if (ret <= 0)
return NULL;
- if (write) {
- unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
- unsigned long ptr_size, limit;
-
- /*
- * Since the stack will hold pointers to the strings, we
- * must account for them as well.
- *
- * The size calculation is the entire vma while each arg page is
- * built, so each time we get here it's calculating how far it
- * is currently (rather than each call being just the newly
- * added size from the arg page). As a result, we need to
- * always add the entire size of the pointers, so that on the
- * last call to get_arg_page() we'll actually have the entire
- * correct size.
- */
- ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
- if (ptr_size > ULONG_MAX - size)
- goto fail;
- size += ptr_size;
-
- acct_arg_size(bprm, size / PAGE_SIZE);
-
- /*
- * We've historically supported up to 32 pages (ARG_MAX)
- * of argument strings even with small stacks
- */
- if (size <= ARG_MAX)
- return page;
-
- /*
- * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
- * (whichever is smaller) for the argv+env strings.
- * This ensures that:
- * - the remaining binfmt code will not run out of stack space,
- * - the program will have a reasonable amount of stack left
- * to work from.
- */
- limit = _STK_LIM / 4 * 3;
- limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
- if (size > limit)
- goto fail;
- }
+ if (write)
+ acct_arg_size(bprm, vma_pages(bprm->vma));
return page;
-
-fail:
- put_page(page);
- return NULL;
}
static void put_arg_page(struct page *page)
return i;
}
+static int prepare_arg_pages(struct linux_binprm *bprm,
+ struct user_arg_ptr argv, struct user_arg_ptr envp)
+{
+ unsigned long limit, ptr_size;
+
+ bprm->argc = count(argv, MAX_ARG_STRINGS);
+ if (bprm->argc < 0)
+ return bprm->argc;
+
+ bprm->envc = count(envp, MAX_ARG_STRINGS);
+ if (bprm->envc < 0)
+ return bprm->envc;
+
+ /*
+ * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
+ * (whichever is smaller) for the argv+env strings.
+ * This ensures that:
+ * - the remaining binfmt code will not run out of stack space,
+ * - the program will have a reasonable amount of stack left
+ * to work from.
+ */
+ limit = _STK_LIM / 4 * 3;
+ limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
+ /*
+ * We've historically supported up to 32 pages (ARG_MAX)
+ * of argument strings even with small stacks
+ */
+ limit = max_t(unsigned long, limit, ARG_MAX);
+ /*
+ * We must account for the size of all the argv and envp pointers to
+ * the argv and envp strings, since they will also take up space in
+ * the stack. They aren't stored until much later when we can't
+ * signal to the parent that the child has run out of stack space.
+ * Instead, calculate it here so it's possible to fail gracefully.
+ */
+ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+ if (limit <= ptr_size)
+ return -E2BIG;
+ limit -= ptr_size;
+
+ bprm->argmin = bprm->p - limit;
+ return 0;
+}
+
/*
* 'copy_strings()' copies argument/environment strings from the old
* processes's memory to the new process's stack. The call to get_user_pages()
pos = bprm->p;
str += len;
bprm->p -= len;
+#ifdef CONFIG_MMU
+ if (bprm->p < bprm->argmin)
+ goto out;
+#endif
while (len > 0) {
int offset, bytes_to_copy;
__set_current_state(TASK_KILLABLE);
spin_unlock_irq(lock);
schedule();
- if (unlikely(__fatal_signal_pending(tsk)))
+ if (__fatal_signal_pending(tsk))
goto killed;
spin_lock_irq(lock);
}
write_unlock_irq(&tasklist_lock);
cgroup_threadgroup_change_end(tsk);
schedule();
- if (unlikely(__fatal_signal_pending(tsk)))
+ if (__fatal_signal_pending(tsk))
goto killed;
}
* Or, if exec fails before, free_bprm() should release ->cred and
* and unlock.
*/
- int prepare_bprm_creds(struct linux_binprm *bprm)
+ static int prepare_bprm_creds(struct linux_binprm *bprm)
{
if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex))
return -ERESTARTNOINTR;
if (retval)
goto out_unmark;
- bprm->argc = count(argv, MAX_ARG_STRINGS);
- if ((retval = bprm->argc) < 0)
- goto out;
-
- bprm->envc = count(envp, MAX_ARG_STRINGS);
- if ((retval = bprm->envc) < 0)
+ retval = prepare_arg_pages(bprm, argv, envp);
+ if (retval < 0)
goto out;
retval = prepare_binprm(bprm);
hlist_for_each_entry(mp, chain, m_hash) {
if (mp->m_dentry == dentry) {
- /* might be worth a WARN_ON() */
- if (d_unlinked(dentry))
- return ERR_PTR(-ENOENT);
mp->m_count++;
return mp;
}
int ret;
if (d_mountpoint(dentry)) {
+ /* might be worth a WARN_ON() */
+ if (d_unlinked(dentry))
+ return ERR_PTR(-ENOENT);
mountpoint:
read_seqlock_excl(&mount_lock);
mp = lookup_mountpoint(dentry);
if (likely(hlist_empty(&head)))
return;
- synchronize_rcu();
+ synchronize_rcu_expedited();
group_pin_kill(&head);
}
namespace_lock();
lock_mount_hash();
- event++;
+ /* Recheck MNT_LOCKED with the locks held */
+ retval = -EINVAL;
+ if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ goto out;
+
+ event++;
if (flags & MNT_DETACH) {
if (!list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
}
}
+out:
unlock_mount_hash();
namespace_unlock();
return retval;
goto dput_and_out;
if (!check_mnt(mnt))
goto dput_and_out;
- if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
goto dput_and_out;
retval = -EPERM;
if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
for (s = r; s; s = next_mnt(s, r)) {
if (!(flag & CL_COPY_UNBINDABLE) &&
IS_MNT_UNBINDABLE(s)) {
- s = skip_mnt_tree(s);
- continue;
+ if (s->mnt.mnt_flags & MNT_LOCKED) {
+ /* Both unbindable and locked. */
+ q = ERR_PTR(-EPERM);
+ goto out;
+ } else {
+ s = skip_mnt_tree(s);
+ continue;
+ }
}
if (!(flag & CL_COPY_MNT_NS_FILE) &&
is_mnt_ns_file(s->mnt.mnt_root)) {
{
namespace_lock();
lock_mount_hash();
- umount_tree(real_mount(mnt), UMOUNT_SYNC);
+ umount_tree(real_mount(mnt), 0);
unlock_mount_hash();
namespace_unlock();
}
const char __user *f = from;
char c;
- if (!access_ok(VERIFY_READ, from, n))
+ if (!access_ok(from, n))
return n;
current->kernel_uaccess_faults_ok++;
#endif
struct mm_struct *mm;
unsigned long p; /* current top of mem */
+ unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int
/*
* True after the bprm_set_creds hook has been called once
extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm);
- extern int prepare_bprm_creds(struct linux_binprm *bprm);
extern void install_exec_creds(struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <net/checksum.h>
+#include <linux/scatterlist.h>
#define PIPE_PARANOIA /* for now */
static int copyout(void __user *to, const void *from, size_t n)
{
- if (access_ok(VERIFY_WRITE, to, n)) {
+ if (access_ok(to, n)) {
kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
}
static int copyin(void *to, const void __user *from, size_t n)
{
- if (access_ok(VERIFY_READ, from, n)) {
+ if (access_ok(from, n)) {
kasan_check_write(to, n);
n = raw_copy_from_user(to, from, n);
}
return bytes;
}
+ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
+ __wsum sum, size_t off)
+ {
+ __wsum next = csum_partial_copy_nocheck(from, to, len, 0);
+ return csum_block_add(sum, next, off);
+ }
+
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
__wsum *csum, struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
size_t n, r;
size_t off = 0;
- __wsum sum = *csum, next;
+ __wsum sum = *csum;
int idx;
if (!sanity(i))
for ( ; n; idx = next_idx(idx, pipe), r = 0) {
size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
char *p = kmap_atomic(pipe->bufs[idx].page);
- next = csum_partial_copy_nocheck(addr, p + r, chunk, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
kunmap_atomic(p);
i->idx = idx;
i->iov_offset = r + chunk;
#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
static int copyout_mcsafe(void __user *to, const void *from, size_t n)
{
- if (access_ok(VERIFY_WRITE, to, n)) {
+ if (access_ok(to, n)) {
kasan_check_read(from, n);
n = copy_to_user_mcsafe((__force void *) to, from, n);
}
err ? v.iov_len : 0;
}), ({
char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck(p + v.bv_offset,
- (to += v.bv_len) - v.bv_len,
- v.bv_len, 0);
+ sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
+ p + v.bv_offset, v.bv_len,
+ sum, off);
kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
- next = csum_partial_copy_nocheck(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len,
+ sum, off);
off += v.iov_len;
})
)
0;
}), ({
char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck(p + v.bv_offset,
- (to += v.bv_len) - v.bv_len,
- v.bv_len, 0);
+ sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
+ p + v.bv_offset, v.bv_len,
+ sum, off);
kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
- next = csum_partial_copy_nocheck(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len,
+ sum, off);
off += v.iov_len;
})
)
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
struct iov_iter *i)
{
const char *from = addr;
+ __wsum *csum = csump;
__wsum sum, next;
size_t off = 0;
err ? v.iov_len : 0;
}), ({
char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
- p + v.bv_offset,
- v.bv_len, 0);
+ sum = csum_and_memcpy(p + v.bv_offset,
+ (from += v.bv_len) - v.bv_len,
+ v.bv_len, sum, off);
kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
- next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
- v.iov_base,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy(v.iov_base,
+ (from += v.iov_len) - v.iov_len,
+ v.iov_len, sum, off);
off += v.iov_len;
})
)
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
+size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+ struct iov_iter *i)
+{
+ struct ahash_request *hash = hashp;
+ struct scatterlist sg;
+ size_t copied;
+
+ copied = copy_to_iter(addr, bytes, i);
+ sg_init_one(&sg, addr, copied);
+ ahash_request_set_crypt(hash, &sg, NULL, copied);
+ crypto_ahash_update(hash);
+ return copied;
+}
+EXPORT_SYMBOL(hash_and_copy_to_iter);
+
int iov_iter_npages(const struct iov_iter *i, int maxpages)
{
size_t size = i->count;
{
if (len > MAX_RW_COUNT)
len = MAX_RW_COUNT;
- if (unlikely(!access_ok(!rw, buf, len)))
+ if (unlikely(!access_ok(buf, len)))
return -EFAULT;
iov->iov_base = buf;