#include <linux/mm.h>
#include <linux/rcupdate.h>
#include <linux/kallsyms.h>
+#include <linux/resource.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/ptrace.h>
-#include <linux/seccomp.h>
+#include <linux/cgroup.h>
#include <linux/cpuset.h>
#include <linux/audit.h>
#include <linux/poll.h>
#include <linux/nsproxy.h>
#include <linux/oom.h>
+#include <linux/elf.h>
+#include <linux/pid_namespace.h>
#include "internal.h"
/* NOTE:
(task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
security_ptrace(current,task) == 0))
-static int proc_pid_environ(struct task_struct *task, char * buffer)
-{
- int res = 0;
- struct mm_struct *mm = get_task_mm(task);
- if (mm) {
- unsigned int len = mm->env_end - mm->env_start;
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- res = access_process_vm(task, mm->env_start, buffer, len, 0);
- if (!ptrace_may_attach(task))
- res = -ESRCH;
- mmput(mm);
- }
- return res;
-}
-
static int proc_pid_cmdline(struct task_struct *task, char * buffer)
{
int res = 0;
static int proc_pid_wchan(struct task_struct *task, char *buffer)
{
unsigned long wchan;
- char symname[KSYM_NAME_LEN+1];
+ char symname[KSYM_NAME_LEN];
wchan = get_wchan(task);
*/
static int proc_pid_schedstat(struct task_struct *task, char *buffer)
{
- return sprintf(buffer, "%lu %lu %lu\n",
+ return sprintf(buffer, "%llu %llu %lu\n",
task->sched_info.cpu_time,
task->sched_info.run_delay,
- task->sched_info.pcnt);
+ task->sched_info.pcount);
}
#endif
return sprintf(buffer, "%lu\n", points);
}
+struct limit_names {
+ char *name;
+ char *unit;
+};
+
+static const struct limit_names lnames[RLIM_NLIMITS] = {
+ [RLIMIT_CPU] = {"Max cpu time", "ms"},
+ [RLIMIT_FSIZE] = {"Max file size", "bytes"},
+ [RLIMIT_DATA] = {"Max data size", "bytes"},
+ [RLIMIT_STACK] = {"Max stack size", "bytes"},
+ [RLIMIT_CORE] = {"Max core file size", "bytes"},
+ [RLIMIT_RSS] = {"Max resident set", "bytes"},
+ [RLIMIT_NPROC] = {"Max processes", "processes"},
+ [RLIMIT_NOFILE] = {"Max open files", "files"},
+ [RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"},
+ [RLIMIT_AS] = {"Max address space", "bytes"},
+ [RLIMIT_LOCKS] = {"Max file locks", "locks"},
+ [RLIMIT_SIGPENDING] = {"Max pending signals", "signals"},
+ [RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"},
+ [RLIMIT_NICE] = {"Max nice priority", NULL},
+ [RLIMIT_RTPRIO] = {"Max realtime priority", NULL},
+};
+
+/* Display limits for a process */
+static int proc_pid_limits(struct task_struct *task, char *buffer)
+{
+ unsigned int i;
+ int count = 0;
+ unsigned long flags;
+ char *bufptr = buffer;
+
+ struct rlimit rlim[RLIM_NLIMITS];
+
+ rcu_read_lock();
+ if (!lock_task_sighand(task,&flags)) {
+ rcu_read_unlock();
+ return 0;
+ }
+ memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS);
+ unlock_task_sighand(task, &flags);
+ rcu_read_unlock();
+
+ /*
+ * print the file header
+ */
+ count += sprintf(&bufptr[count], "%-25s %-20s %-20s %-10s\n",
+ "Limit", "Soft Limit", "Hard Limit", "Units");
+
+ for (i = 0; i < RLIM_NLIMITS; i++) {
+ if (rlim[i].rlim_cur == RLIM_INFINITY)
+ count += sprintf(&bufptr[count], "%-25s %-20s ",
+ lnames[i].name, "unlimited");
+ else
+ count += sprintf(&bufptr[count], "%-25s %-20lu ",
+ lnames[i].name, rlim[i].rlim_cur);
+
+ if (rlim[i].rlim_max == RLIM_INFINITY)
+ count += sprintf(&bufptr[count], "%-20s ", "unlimited");
+ else
+ count += sprintf(&bufptr[count], "%-20lu ",
+ rlim[i].rlim_max);
+
+ if (lnames[i].unit)
+ count += sprintf(&bufptr[count], "%-10s\n",
+ lnames[i].unit);
+ else
+ count += sprintf(&bufptr[count], "\n");
+ }
+
+ return count;
+}
+
/************************************************************************/
/* Here the fs part begins */
/************************************************************************/
static int mounts_open(struct inode *inode, struct file *file)
{
struct task_struct *task = get_proc_task(inode);
+ struct nsproxy *nsp;
struct mnt_namespace *ns = NULL;
struct proc_mounts *p;
int ret = -EINVAL;
if (task) {
- task_lock(task);
- if (task->nsproxy) {
- ns = task->nsproxy->mnt_ns;
+ rcu_read_lock();
+ nsp = task_nsproxy(task);
+ if (nsp) {
+ ns = nsp->mnt_ns;
if (ns)
get_mnt_ns(ns);
}
- task_unlock(task);
+ rcu_read_unlock();
+
put_task_struct(task);
}
if (!ret) {
struct seq_file *m = file->private_data;
+ struct nsproxy *nsp;
struct mnt_namespace *mnt_ns = NULL;
struct task_struct *task = get_proc_task(inode);
if (task) {
- task_lock(task);
- if (task->nsproxy)
- mnt_ns = task->nsproxy->mnt_ns;
- if (mnt_ns)
- get_mnt_ns(mnt_ns);
- task_unlock(task);
+ rcu_read_lock();
+ nsp = task_nsproxy(task);
+ if (nsp) {
+ mnt_ns = nsp->mnt_ns;
+ if (mnt_ns)
+ get_mnt_ns(mnt_ns);
+ }
+ rcu_read_unlock();
+
put_task_struct(task);
}
count = PROC_BLOCK_SIZE;
length = -ENOMEM;
- if (!(page = __get_free_page(GFP_KERNEL)))
+ if (!(page = __get_free_page(GFP_TEMPORARY)))
goto out;
length = PROC_I(inode)->op.proc_read(task, (char*)page);
goto out;
ret = -ENOMEM;
- page = (char *)__get_free_page(GFP_USER);
+ page = (char *)__get_free_page(GFP_TEMPORARY);
if (!page)
goto out;
goto out;
copied = -ENOMEM;
- page = (char *)__get_free_page(GFP_USER);
+ page = (char *)__get_free_page(GFP_TEMPORARY);
if (!page)
goto out;
.open = mem_open,
};
+static ssize_t environ_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+ char *page;
+ unsigned long src = *ppos;
+ int ret = -ESRCH;
+ struct mm_struct *mm;
+
+ if (!task)
+ goto out_no_task;
+
+ if (!ptrace_may_attach(task))
+ goto out;
+
+ ret = -ENOMEM;
+ page = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!page)
+ goto out;
+
+ ret = 0;
+
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out_free;
+
+ while (count > 0) {
+ int this_len, retval, max_len;
+
+ this_len = mm->env_end - (mm->env_start + src);
+
+ if (this_len <= 0)
+ break;
+
+ max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+ this_len = (this_len > max_len) ? max_len : this_len;
+
+ retval = access_process_vm(task, (mm->env_start + src),
+ page, this_len, 0);
+
+ if (retval <= 0) {
+ ret = retval;
+ break;
+ }
+
+ if (copy_to_user(buf, page, retval)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret += retval;
+ src += retval;
+ buf += retval;
+ count -= retval;
+ }
+ *ppos = src;
+
+ mmput(mm);
+out_free:
+ free_page((unsigned long) page);
+out:
+ put_task_struct(task);
+out_no_task:
+ return ret;
+}
+
+static const struct file_operations proc_environ_operations = {
+ .read = environ_read,
+};
+
static ssize_t oom_adjust_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
/* No partial writes. */
return -EINVAL;
}
- page = (char*)__get_free_page(GFP_USER);
+ page = (char*)__get_free_page(GFP_TEMPORARY);
if (!page)
return -ENOMEM;
length = -EFAULT;
};
#endif
-#ifdef CONFIG_SECCOMP
-static ssize_t seccomp_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
- char __buf[20];
- size_t len;
-
- if (!tsk)
- return -ESRCH;
- /* no need to print the trailing zero, so use only len */
- len = sprintf(__buf, "%u\n", tsk->seccomp.mode);
- put_task_struct(tsk);
-
- return simple_read_from_buffer(buf, count, ppos, __buf, len);
-}
-
-static ssize_t seccomp_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
- char __buf[20], *end;
- unsigned int seccomp_mode;
- ssize_t result;
-
- result = -ESRCH;
- if (!tsk)
- goto out_no_task;
-
- /* can set it only once to be even more secure */
- result = -EPERM;
- if (unlikely(tsk->seccomp.mode))
- goto out;
-
- result = -EFAULT;
- memset(__buf, 0, sizeof(__buf));
- count = min(count, sizeof(__buf) - 1);
- if (copy_from_user(__buf, buf, count))
- goto out;
-
- seccomp_mode = simple_strtoul(__buf, &end, 0);
- if (*end == '\n')
- end++;
- result = -EINVAL;
- if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
- tsk->seccomp.mode = seccomp_mode;
- set_tsk_thread_flag(tsk, TIF_SECCOMP);
- } else
- goto out;
- result = -EIO;
- if (unlikely(!(end - __buf)))
- goto out;
- result = end - __buf;
-out:
- put_task_struct(tsk);
-out_no_task:
- return result;
-}
-
-static const struct file_operations proc_seccomp_operations = {
- .read = seccomp_read,
- .write = seccomp_write,
-};
-#endif /* CONFIG_SECCOMP */
-
#ifdef CONFIG_FAULT_INJECTION
static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
};
#endif
+#ifdef CONFIG_SCHED_DEBUG
+/*
+ * Print out various scheduling related per-task fields:
+ */
+static int sched_show(struct seq_file *m, void *v)
+{
+ struct inode *inode = m->private;
+ struct task_struct *p;
+
+ WARN_ON(!inode);
+
+ p = get_proc_task(inode);
+ if (!p)
+ return -ESRCH;
+ proc_sched_show_task(p, m);
+
+ put_task_struct(p);
+
+ return 0;
+}
+
+static ssize_t
+sched_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct task_struct *p;
+
+ WARN_ON(!inode);
+
+ p = get_proc_task(inode);
+ if (!p)
+ return -ESRCH;
+ proc_sched_set_task(p);
+
+ put_task_struct(p);
+
+ return count;
+}
+
+static int sched_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+
+ ret = single_open(filp, sched_show, NULL);
+ if (!ret) {
+ struct seq_file *m = filp->private_data;
+
+ m->private = inode;
+ }
+ return ret;
+}
+
+static const struct file_operations proc_pid_sched_operations = {
+ .open = sched_open,
+ .read = seq_read,
+ .write = sched_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#endif
+
static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
char __user *buffer, int buflen)
{
struct inode * inode;
- char *tmp = (char*)__get_free_page(GFP_KERNEL), *path;
+ char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
+ char *path;
int len;
if (!tmp)
task_lock(task);
mm = task->mm;
if (mm)
- dumpable = mm->dumpable;
+ dumpable = get_dumpable(mm);
task_unlock(task);
if(dumpable == 1)
return 1;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
struct task_struct *p = get_proc_task(inode);
- unsigned int fd, tid, ino;
+ unsigned int fd, ino;
int retval;
struct files_struct * files;
struct fdtable *fdt;
if (!p)
goto out_no_task;
retval = 0;
- tid = p->pid;
fd = filp->f_pos;
switch (fd) {
const struct pid_entry *ents, unsigned int nents)
{
int i;
- int pid;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
struct task_struct *task = get_proc_task(inode);
goto out_no_task;
ret = 0;
- pid = task->pid;
i = filp->f_pos;
switch (i) {
case 0:
goto out;
length = -ENOMEM;
- page = (char*)__get_free_page(GFP_USER);
+ page = (char*)__get_free_page(GFP_TEMPORARY);
if (!page)
goto out;
#endif
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
+ struct mm_struct *mm;
+ char buffer[PROC_NUMBUF];
+ size_t len;
+ int ret;
+
+ if (!task)
+ return -ESRCH;
+
+ ret = 0;
+ mm = get_task_mm(task);
+ if (mm) {
+ len = snprintf(buffer, sizeof(buffer), "%08lx\n",
+ ((mm->flags & MMF_DUMP_FILTER_MASK) >>
+ MMF_DUMP_FILTER_SHIFT));
+ mmput(mm);
+ ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
+ }
+
+ put_task_struct(task);
+
+ return ret;
+}
+
+static ssize_t proc_coredump_filter_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct task_struct *task;
+ struct mm_struct *mm;
+ char buffer[PROC_NUMBUF], *end;
+ unsigned int val;
+ int ret;
+ int i;
+ unsigned long mask;
+
+ ret = -EFAULT;
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+ if (copy_from_user(buffer, buf, count))
+ goto out_no_task;
+
+ ret = -EINVAL;
+ val = (unsigned int)simple_strtoul(buffer, &end, 0);
+ if (*end == '\n')
+ end++;
+ if (end - buffer == 0)
+ goto out_no_task;
+
+ ret = -ESRCH;
+ task = get_proc_task(file->f_dentry->d_inode);
+ if (!task)
+ goto out_no_task;
+
+ ret = end - buffer;
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out_no_mm;
+
+ for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
+ if (val & mask)
+ set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
+ else
+ clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
+ }
+
+ mmput(mm);
+ out_no_mm:
+ put_task_struct(task);
+ out_no_task:
+ return ret;
+}
+
+static const struct file_operations proc_coredump_filter_operations = {
+ .read = proc_coredump_filter_read,
+ .write = proc_coredump_filter_write,
+};
+#endif
+
/*
* /proc/self:
*/
int buflen)
{
char tmp[PROC_NUMBUF];
- sprintf(tmp, "%d", current->tgid);
+ sprintf(tmp, "%d", task_tgid_vnr(current));
return vfs_readlink(dentry,buffer,buflen,tmp);
}
static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
{
char tmp[PROC_NUMBUF];
- sprintf(tmp, "%d", current->tgid);
+ sprintf(tmp, "%d", task_tgid_vnr(current));
return ERR_PTR(vfs_follow_link(nd,tmp));
}
DIR("task", S_IRUGO|S_IXUGO, task),
DIR("fd", S_IRUSR|S_IXUSR, fd),
DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo),
- INF("environ", S_IRUSR, pid_environ),
+ REG("environ", S_IRUSR, environ),
INF("auxv", S_IRUSR, pid_auxv),
INF("status", S_IRUGO, pid_status),
+ INF("limits", S_IRUSR, pid_limits),
+#ifdef CONFIG_SCHED_DEBUG
+ REG("sched", S_IRUGO|S_IWUSR, pid_sched),
+#endif
INF("cmdline", S_IRUGO, pid_cmdline),
INF("stat", S_IRUGO, tgid_stat),
INF("statm", S_IRUGO, pid_statm),
REG("numa_maps", S_IRUGO, numa_maps),
#endif
REG("mem", S_IRUSR|S_IWUSR, mem),
-#ifdef CONFIG_SECCOMP
- REG("seccomp", S_IRUSR|S_IWUSR, seccomp),
-#endif
LNK("cwd", cwd),
LNK("root", root),
LNK("exe", exe),
#ifdef CONFIG_SCHEDSTATS
INF("schedstat", S_IRUGO, pid_schedstat),
#endif
-#ifdef CONFIG_CPUSETS
+#ifdef CONFIG_PROC_PID_CPUSET
REG("cpuset", S_IRUGO, cpuset),
+#endif
+#ifdef CONFIG_CGROUPS
+ REG("cgroup", S_IRUGO, cgroup),
#endif
INF("oom_score", S_IRUGO, oom_score),
REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
#ifdef CONFIG_FAULT_INJECTION
REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
#endif
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+ REG("coredump_filter", S_IRUGO|S_IWUSR, coredump_filter),
+#endif
#ifdef CONFIG_TASK_IO_ACCOUNTING
INF("io", S_IRUGO, pid_io_accounting),
#endif
.setattr = proc_setattr,
};
-/**
- * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
- *
- * @task: task that should be flushed.
- *
- * Looks in the dcache for
- * /proc/@pid
- * /proc/@tgid/task/@pid
- * if either directory is present flushes it and all of it'ts children
- * from the dcache.
- *
- * It is safe and reasonable to cache /proc entries for a task until
- * that task exits. After that they just clog up the dcache with
- * useless entries, possibly causing useful dcache entries to be
- * flushed instead. This routine is proved to flush those useless
- * dcache entries at process exit time.
- *
- * NOTE: This routine is just an optimization so it does not guarantee
- * that no dcache entries will exist at process exit time it
- * just makes it very unlikely that any will persist.
- */
-void proc_flush_task(struct task_struct *task)
+static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
{
struct dentry *dentry, *leader, *dir;
char buf[PROC_NUMBUF];
struct qstr name;
name.name = buf;
- name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
- dentry = d_hash_and_lookup(proc_mnt->mnt_root, &name);
+ name.len = snprintf(buf, sizeof(buf), "%d", pid);
+ dentry = d_hash_and_lookup(mnt->mnt_root, &name);
if (dentry) {
shrink_dcache_parent(dentry);
d_drop(dentry);
dput(dentry);
}
- if (thread_group_leader(task))
+ if (tgid == 0)
goto out;
name.name = buf;
- name.len = snprintf(buf, sizeof(buf), "%d", task->tgid);
- leader = d_hash_and_lookup(proc_mnt->mnt_root, &name);
+ name.len = snprintf(buf, sizeof(buf), "%d", tgid);
+ leader = d_hash_and_lookup(mnt->mnt_root, &name);
if (!leader)
goto out;
goto out_put_leader;
name.name = buf;
- name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
+ name.len = snprintf(buf, sizeof(buf), "%d", pid);
dentry = d_hash_and_lookup(dir, &name);
if (dentry) {
shrink_dcache_parent(dentry);
return;
}
+/**
+ * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
+ * @task: task that should be flushed.
+ *
+ * When flushing dentries from proc, one needs to flush them from global
+ * proc (proc_mnt) and from all the namespaces' procs this task was seen
+ * in. This call is supposed to do all of this job.
+ *
+ * Looks in the dcache for
+ * /proc/@pid
+ * /proc/@tgid/task/@pid
+ * if either directory is present flushes it and all of it'ts children
+ * from the dcache.
+ *
+ * It is safe and reasonable to cache /proc entries for a task until
+ * that task exits. After that they just clog up the dcache with
+ * useless entries, possibly causing useful dcache entries to be
+ * flushed instead. This routine is proved to flush those useless
+ * dcache entries at process exit time.
+ *
+ * NOTE: This routine is just an optimization so it does not guarantee
+ * that no dcache entries will exist at process exit time it
+ * just makes it very unlikely that any will persist.
+ */
+
+void proc_flush_task(struct task_struct *task)
+{
+ int i;
+ struct pid *pid, *tgid = NULL;
+ struct upid *upid;
+
+ pid = task_pid(task);
+ if (thread_group_leader(task))
+ tgid = task_tgid(task);
+
+ for (i = 0; i <= pid->level; i++) {
+ upid = &pid->numbers[i];
+ proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr,
+ tgid ? tgid->numbers[i].nr : 0);
+ }
+
+ upid = &pid->numbers[pid->level];
+ if (upid->nr == 1)
+ pid_ns_release_proc(upid->ns);
+}
+
static struct dentry *proc_pid_instantiate(struct inode *dir,
struct dentry * dentry,
struct task_struct *task, const void *ptr)
struct dentry *result = ERR_PTR(-ENOENT);
struct task_struct *task;
unsigned tgid;
+ struct pid_namespace *ns;
result = proc_base_lookup(dir, dentry);
if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT)
if (tgid == ~0U)
goto out;
+ ns = dentry->d_sb->s_fs_info;
rcu_read_lock();
- task = find_task_by_pid(tgid);
+ task = find_task_by_pid_ns(tgid, ns);
if (task)
get_task_struct(task);
rcu_read_unlock();
* Find the first task with tgid >= tgid
*
*/
-static struct task_struct *next_tgid(unsigned int tgid)
-{
+struct tgid_iter {
+ unsigned int tgid;
struct task_struct *task;
+};
+static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter)
+{
struct pid *pid;
+ if (iter.task)
+ put_task_struct(iter.task);
rcu_read_lock();
retry:
- task = NULL;
- pid = find_ge_pid(tgid);
+ iter.task = NULL;
+ pid = find_ge_pid(iter.tgid, ns);
if (pid) {
- tgid = pid->nr + 1;
- task = pid_task(pid, PIDTYPE_PID);
+ iter.tgid = pid_nr_ns(pid, ns);
+ iter.task = pid_task(pid, PIDTYPE_PID);
/* What we to know is if the pid we have find is the
* pid of a thread_group_leader. Testing for task
* being a thread_group_leader is the obvious thing
* found doesn't happen to be a thread group leader.
* As we don't care in the case of readdir.
*/
- if (!task || !has_group_leader_pid(task))
+ if (!iter.task || !has_group_leader_pid(iter.task)) {
+ iter.tgid += 1;
goto retry;
- get_task_struct(task);
+ }
+ get_task_struct(iter.task);
}
rcu_read_unlock();
- return task;
+ return iter;
}
#define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff))
static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
- struct task_struct *task, int tgid)
+ struct tgid_iter iter)
{
char name[PROC_NUMBUF];
- int len = snprintf(name, sizeof(name), "%d", tgid);
+ int len = snprintf(name, sizeof(name), "%d", iter.tgid);
return proc_fill_cache(filp, dirent, filldir, name, len,
- proc_pid_instantiate, task, NULL);
+ proc_pid_instantiate, iter.task, NULL);
}
/* for the /proc/ directory itself, after non-process stuff has been done */
{
unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
- struct task_struct *task;
- int tgid;
+ struct tgid_iter iter;
+ struct pid_namespace *ns;
if (!reaper)
goto out_no_task;
goto out;
}
- tgid = filp->f_pos - TGID_OFFSET;
- for (task = next_tgid(tgid);
- task;
- put_task_struct(task), task = next_tgid(tgid + 1)) {
- tgid = task->pid;
- filp->f_pos = tgid + TGID_OFFSET;
- if (proc_pid_fill_cache(filp, dirent, filldir, task, tgid) < 0) {
- put_task_struct(task);
+ ns = filp->f_dentry->d_sb->s_fs_info;
+ iter.task = NULL;
+ iter.tgid = filp->f_pos - TGID_OFFSET;
+ for (iter = next_tgid(ns, iter);
+ iter.task;
+ iter.tgid += 1, iter = next_tgid(ns, iter)) {
+ filp->f_pos = iter.tgid + TGID_OFFSET;
+ if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
+ put_task_struct(iter.task);
goto out;
}
}
static const struct pid_entry tid_base_stuff[] = {
DIR("fd", S_IRUSR|S_IXUSR, fd),
DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo),
- INF("environ", S_IRUSR, pid_environ),
+ REG("environ", S_IRUSR, environ),
INF("auxv", S_IRUSR, pid_auxv),
INF("status", S_IRUGO, pid_status),
+ INF("limits", S_IRUSR, pid_limits),
+#ifdef CONFIG_SCHED_DEBUG
+ REG("sched", S_IRUGO|S_IWUSR, pid_sched),
+#endif
INF("cmdline", S_IRUGO, pid_cmdline),
INF("stat", S_IRUGO, tid_stat),
INF("statm", S_IRUGO, pid_statm),
REG("numa_maps", S_IRUGO, numa_maps),
#endif
REG("mem", S_IRUSR|S_IWUSR, mem),
-#ifdef CONFIG_SECCOMP
- REG("seccomp", S_IRUSR|S_IWUSR, seccomp),
-#endif
LNK("cwd", cwd),
LNK("root", root),
LNK("exe", exe),
#ifdef CONFIG_SCHEDSTATS
INF("schedstat", S_IRUGO, pid_schedstat),
#endif
-#ifdef CONFIG_CPUSETS
+#ifdef CONFIG_PROC_PID_CPUSET
REG("cpuset", S_IRUGO, cpuset),
+#endif
+#ifdef CONFIG_CGROUPS
+ REG("cgroup", S_IRUGO, cgroup),
#endif
INF("oom_score", S_IRUGO, oom_score),
REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
struct task_struct *task;
struct task_struct *leader = get_proc_task(dir);
unsigned tid;
+ struct pid_namespace *ns;
if (!leader)
goto out_no_task;
if (tid == ~0U)
goto out;
+ ns = dentry->d_sb->s_fs_info;
rcu_read_lock();
- task = find_task_by_pid(tid);
+ task = find_task_by_pid_ns(tid, ns);
if (task)
get_task_struct(task);
rcu_read_unlock();
if (!task)
goto out;
- if (leader->tgid != task->tgid)
+ if (!same_thread_group(leader, task))
goto out_drop_task;
result = proc_task_instantiate(dir, dentry, task, NULL);
* threads past it.
*/
static struct task_struct *first_tid(struct task_struct *leader,
- int tid, int nr)
+ int tid, int nr, struct pid_namespace *ns)
{
struct task_struct *pos;
rcu_read_lock();
/* Attempt to start with the pid of a thread */
if (tid && (nr > 0)) {
- pos = find_task_by_pid(tid);
+ pos = find_task_by_pid_ns(tid, ns);
if (pos && (pos->group_leader == leader))
goto found;
}
ino_t ino;
int tid;
unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */
+ struct pid_namespace *ns;
task = get_proc_task(inode);
if (!task)
/* f_version caches the tgid value that the last readdir call couldn't
* return. lseek aka telldir automagically resets f_version to 0.
*/
- tid = filp->f_version;
+ ns = filp->f_dentry->d_sb->s_fs_info;
+ tid = (int)filp->f_version;
filp->f_version = 0;
- for (task = first_tid(leader, tid, pos - 2);
+ for (task = first_tid(leader, tid, pos - 2, ns);
task;
task = next_tid(task), pos++) {
- tid = task->pid;
+ tid = task_pid_nr_ns(task, ns);
if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) {
/* returning this tgid failed, save it as the first
* pid for the next readir call */
- filp->f_version = tid;
+ filp->f_version = (u64)tid;
put_task_struct(task);
break;
}