]> Git Repo - linux.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
authorLinus Torvalds <[email protected]>
Tue, 2 Oct 2012 18:11:09 +0000 (11:11 -0700)
committerLinus Torvalds <[email protected]>
Tue, 2 Oct 2012 18:11:09 +0000 (11:11 -0700)
Pull user namespace changes from Eric Biederman:
 "This is a mostly modest set of changes to enable basic user namespace
  support.  This allows the code to code to compile with user namespaces
  enabled and removes the assumption there is only the initial user
  namespace.  Everything is converted except for the most complex of the
  filesystems: autofs4, 9p, afs, ceph, cifs, coda, fuse, gfs2, ncpfs,
  nfs, ocfs2 and xfs as those patches need a bit more review.

  The strategy is to push kuid_t and kgid_t values are far down into
  subsystems and filesystems as reasonable.  Leaving the make_kuid and
  from_kuid operations to happen at the edge of userspace, as the values
  come off the disk, and as the values come in from the network.
  Letting compile type incompatible compile errors (present when user
  namespaces are enabled) guide me to find the issues.

  The most tricky areas have been the places where we had an implicit
  union of uid and gid values and were storing them in an unsigned int.
  Those places were converted into explicit unions.  I made certain to
  handle those places with simple trivial patches.

  Out of that work I discovered we have generic interfaces for storing
  quota by projid.  I had never heard of the project identifiers before.
  Adding full user namespace support for project identifiers accounts
  for most of the code size growth in my git tree.

  Ultimately there will be work to relax privlige checks from
  "capable(FOO)" to "ns_capable(user_ns, FOO)" where it is safe allowing
  root in a user names to do those things that today we only forbid to
  non-root users because it will confuse suid root applications.

  While I was pushing kuid_t and kgid_t changes deep into the audit code
  I made a few other cleanups.  I capitalized on the fact we process
  netlink messages in the context of the message sender.  I removed
  usage of NETLINK_CRED, and started directly using current->tty.

  Some of these patches have also made it into maintainer trees, with no
  problems from identical code from different trees showing up in
  linux-next.

  After reading through all of this code I feel like I might be able to
  win a game of kernel trivial pursuit."

Fix up some fairly trivial conflicts in netfilter uid/git logging code.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: (107 commits)
  userns: Convert the ufs filesystem to use kuid/kgid where appropriate
  userns: Convert the udf filesystem to use kuid/kgid where appropriate
  userns: Convert ubifs to use kuid/kgid
  userns: Convert squashfs to use kuid/kgid where appropriate
  userns: Convert reiserfs to use kuid and kgid where appropriate
  userns: Convert jfs to use kuid/kgid where appropriate
  userns: Convert jffs2 to use kuid and kgid where appropriate
  userns: Convert hpfs to use kuid and kgid where appropriate
  userns: Convert btrfs to use kuid/kgid where appropriate
  userns: Convert bfs to use kuid/kgid where appropriate
  userns: Convert affs to use kuid/kgid wherwe appropriate
  userns: On alpha modify linux_to_osf_stat to use convert from kuids and kgids
  userns: On ia64 deal with current_uid and current_gid being kuid and kgid
  userns: On ppc convert current_uid from a kuid before printing.
  userns: Convert s390 getting uid and gid system calls to use kuid and kgid
  userns: Convert s390 hypfs to use kuid and kgid where appropriate
  userns: Convert binder ipc to use kuids
  userns: Teach security_path_chown to take kuids and kgids
  userns: Add user namespace support to IMA
  userns: Convert EVM to deal with kuids and kgids in it's hmac computation
  ...

57 files changed:
1  2 
arch/alpha/kernel/osf_sys.c
arch/s390/kernel/compat_linux.c
drivers/net/tun.c
drivers/staging/android/binder.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/inode.c
fs/affs/super.c
fs/btrfs/delayed-inode.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/debugfs/inode.c
fs/ecryptfs/main.c
fs/exofs/inode.c
fs/ext3/super.c
fs/ext4/super.c
fs/gfs2/quota.c
fs/hfs/inode.c
fs/logfs/inode.c
fs/logfs/readwrite.c
fs/namei.c
fs/open.c
fs/quota/dquot.c
fs/reiserfs/inode.c
fs/ubifs/super.c
fs/udf/inode.c
fs/udf/super.c
fs/xattr.c
include/linux/sched.h
include/linux/security.h
include/linux/tty.h
include/net/netns/ipv4.h
include/net/sock.h
include/net/tcp.h
include/net/xfrm.h
init/Kconfig
kernel/pid_namespace.c
kernel/trace/trace.c
kernel/trace/trace.h
net/core/dev.c
net/core/scm.c
net/core/sock.c
net/ipv4/raw.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/raw.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/netfilter/nfnetlink_log.c
net/netfilter/xt_LOG.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/sched/cls_cgroup.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
security/keys/key.c
security/keys/keyctl.c

index bc1acdda7a5ed8ab3945b72559c09a9e763ec030,32c5f9d8555d2e78cc712ec0492995c7857de90f..9503a4be40f6887ef883963445d7cd01a3cd4f2c
@@@ -278,8 -278,8 +278,8 @@@ linux_to_osf_stat(struct kstat *lstat, 
        tmp.st_dev      = lstat->dev;
        tmp.st_mode     = lstat->mode;
        tmp.st_nlink    = lstat->nlink;
-       tmp.st_uid      = lstat->uid;
-       tmp.st_gid      = lstat->gid;
+       tmp.st_uid      = from_kuid_munged(current_user_ns(), lstat->uid);
+       tmp.st_gid      = from_kgid_munged(current_user_ns(), lstat->gid);
        tmp.st_rdev     = lstat->rdev;
        tmp.st_ldev     = lstat->rdev;
        tmp.st_size     = lstat->size;
@@@ -1404,52 -1404,3 +1404,52 @@@ SYSCALL_DEFINE3(osf_writev, unsigned lo
  }
  
  #endif
 +
 +SYSCALL_DEFINE2(osf_getpriority, int, which, int, who)
 +{
 +      int prio = sys_getpriority(which, who);
 +      if (prio >= 0) {
 +              /* Return value is the unbiased priority, i.e. 20 - prio.
 +                 This does result in negative return values, so signal
 +                 no error */
 +              force_successful_syscall_return();
 +              prio = 20 - prio;
 +      }
 +      return prio;
 +}
 +
 +SYSCALL_DEFINE0(getxuid)
 +{
 +      current_pt_regs()->r20 = sys_geteuid();
 +      return sys_getuid();
 +}
 +
 +SYSCALL_DEFINE0(getxgid)
 +{
 +      current_pt_regs()->r20 = sys_getegid();
 +      return sys_getgid();
 +}
 +
 +SYSCALL_DEFINE0(getxpid)
 +{
 +      current_pt_regs()->r20 = sys_getppid();
 +      return sys_getpid();
 +}
 +
 +SYSCALL_DEFINE0(alpha_pipe)
 +{
 +      int fd[2];
 +      int res = do_pipe_flags(fd, 0);
 +      if (!res) {
 +              /* The return values are in $0 and $20.  */
 +              current_pt_regs()->r20 = fd[1];
 +              res = fd[0];
 +      }
 +      return res;
 +}
 +
 +SYSCALL_DEFINE1(sethae, unsigned long, val)
 +{
 +      current_pt_regs()->hae = val;
 +      return 0;
 +}
index f606d935f4950dcbec6fca5f67b88ac70760dcda,73995a725dd1f455ea489b1eaea31ffa40776791..189963c90c6eb09deb498947f057ae6ad3450f12
@@@ -131,13 -131,19 +131,19 @@@ asmlinkage long sys32_setresuid16(u16 r
                low2highuid(suid));
  }
  
- asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid)
+ asmlinkage long sys32_getresuid16(u16 __user *ruidp, u16 __user *euidp, u16 __user *suidp)
  {
+       const struct cred *cred = current_cred();
        int retval;
+       u16 ruid, euid, suid;
  
-       if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) &&
-           !(retval = put_user(high2lowuid(current->cred->euid), euid)))
-               retval = put_user(high2lowuid(current->cred->suid), suid);
+       ruid = high2lowuid(from_kuid_munged(cred->user_ns, cred->uid));
+       euid = high2lowuid(from_kuid_munged(cred->user_ns, cred->euid));
+       suid = high2lowuid(from_kuid_munged(cred->user_ns, cred->suid));
+       if (!(retval   = put_user(ruid, ruidp)) &&
+           !(retval   = put_user(euid, euidp)))
+               retval = put_user(suid, suidp);
  
        return retval;
  }
@@@ -148,13 -154,19 +154,19 @@@ asmlinkage long sys32_setresgid16(u16 r
                low2highgid(sgid));
  }
  
- asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
+ asmlinkage long sys32_getresgid16(u16 __user *rgidp, u16 __user *egidp, u16 __user *sgidp)
  {
+       const struct cred *cred = current_cred();
        int retval;
+       u16 rgid, egid, sgid;
+       rgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->gid));
+       egid = high2lowgid(from_kgid_munged(cred->user_ns, cred->egid));
+       sgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->sgid));
  
-       if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) &&
-           !(retval = put_user(high2lowgid(current->cred->egid), egid)))
-               retval = put_user(high2lowgid(current->cred->sgid), sgid);
+       if (!(retval   = put_user(rgid, rgidp)) &&
+           !(retval   = put_user(egid, egidp)))
+               retval = put_user(sgid, sgidp);
  
        return retval;
  }
@@@ -258,22 -270,22 +270,22 @@@ asmlinkage long sys32_setgroups16(int g
  
  asmlinkage long sys32_getuid16(void)
  {
-       return high2lowuid(current->cred->uid);
+       return high2lowuid(from_kuid_munged(current_user_ns(), current_uid()));
  }
  
  asmlinkage long sys32_geteuid16(void)
  {
-       return high2lowuid(current->cred->euid);
+       return high2lowuid(from_kuid_munged(current_user_ns(), current_euid()));
  }
  
  asmlinkage long sys32_getgid16(void)
  {
-       return high2lowgid(current->cred->gid);
+       return high2lowgid(from_kgid_munged(current_user_ns(), current_gid()));
  }
  
  asmlinkage long sys32_getegid16(void)
  {
-       return high2lowgid(current->cred->egid);
+       return high2lowgid(from_kgid_munged(current_user_ns(), current_egid()));
  }
  
  /*
@@@ -620,6 -632,7 +632,6 @@@ asmlinkage unsigned long old32_mmap(str
                return -EFAULT;
        if (a.offset & ~PAGE_MASK)
                return -EINVAL;
 -      a.addr = (unsigned long) compat_ptr(a.addr);
        return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
                              a.offset >> PAGE_SHIFT);
  }
@@@ -630,6 -643,7 +642,6 @@@ asmlinkage long sys32_mmap2(struct mmap
  
        if (copy_from_user(&a, arg, sizeof(a)))
                return -EFAULT;
 -      a.addr = (unsigned long) compat_ptr(a.addr);
        return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
  }
  
diff --combined drivers/net/tun.c
index 9336b829cc81e266d78e82c3b6735686371af6be,a9bd9f384f5fbb3c2fb3faf19526d7b49924fabc..0873cdcf39bebb09d75b1cadbffce4c2d614f593
@@@ -68,7 -68,6 +68,7 @@@
  #include <net/netns/generic.h>
  #include <net/rtnetlink.h>
  #include <net/sock.h>
 +#include <net/cls_cgroup.h>
  
  #include <asm/uaccess.h>
  
@@@ -121,8 -120,8 +121,8 @@@ struct tun_sock
  struct tun_struct {
        struct tun_file         *tfile;
        unsigned int            flags;
-       uid_t                   owner;
-       gid_t                   group;
+       kuid_t                  owner;
+       kgid_t                  group;
  
        struct net_device       *dev;
        netdev_features_t       set_features;
@@@ -188,6 -187,7 +188,6 @@@ static void __tun_detach(struct tun_str
        netif_tx_lock_bh(tun->dev);
        netif_carrier_off(tun->dev);
        tun->tfile = NULL;
 -      tun->socket.file = NULL;
        netif_tx_unlock_bh(tun->dev);
  
        /* Drop read queue */
@@@ -1032,8 -1032,8 +1032,8 @@@ static void tun_setup(struct net_devic
  {
        struct tun_struct *tun = netdev_priv(dev);
  
-       tun->owner = -1;
-       tun->group = -1;
+       tun->owner = INVALID_UID;
+       tun->group = INVALID_GID;
  
        dev->ethtool_ops = &tun_ethtool_ops;
        dev->destructor = tun_free_netdev;
@@@ -1156,14 -1156,20 +1156,20 @@@ static ssize_t tun_show_owner(struct de
                              char *buf)
  {
        struct tun_struct *tun = netdev_priv(to_net_dev(dev));
-       return sprintf(buf, "%d\n", tun->owner);
+       return uid_valid(tun->owner)?
+               sprintf(buf, "%u\n",
+                       from_kuid_munged(current_user_ns(), tun->owner)):
+               sprintf(buf, "-1\n");
  }
  
  static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
                              char *buf)
  {
        struct tun_struct *tun = netdev_priv(to_net_dev(dev));
-       return sprintf(buf, "%d\n", tun->group);
+       return gid_valid(tun->group) ?
+               sprintf(buf, "%u\n",
+                       from_kgid_munged(current_user_ns(), tun->group)):
+               sprintf(buf, "-1\n");
  }
  
  static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
@@@ -1190,8 -1196,8 +1196,8 @@@ static int tun_set_iff(struct net *net
                else
                        return -EINVAL;
  
-               if (((tun->owner != -1 && cred->euid != tun->owner) ||
-                    (tun->group != -1 && !in_egroup_p(tun->group))) &&
+               if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+                    (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
                    !capable(CAP_NET_ADMIN))
                        return -EPERM;
                err = security_tun_dev_attach(tun->socket.sk);
@@@ -1375,6 -1381,8 +1381,8 @@@ static long __tun_chr_ioctl(struct fil
        void __user* argp = (void __user*)arg;
        struct sock_fprog fprog;
        struct ifreq ifr;
+       kuid_t owner;
+       kgid_t group;
        int sndbuf;
        int vnet_hdr_sz;
        int ret;
  
        case TUNSETOWNER:
                /* Set owner of the device */
-               tun->owner = (uid_t) arg;
-               tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
+               owner = make_kuid(current_user_ns(), arg);
+               if (!uid_valid(owner)) {
+                       ret = -EINVAL;
+                       break;
+               }
+               tun->owner = owner;
+               tun_debug(KERN_INFO, tun, "owner set to %d\n",
+                         from_kuid(&init_user_ns, tun->owner));
                break;
  
        case TUNSETGROUP:
                /* Set group of the device */
-               tun->group= (gid_t) arg;
-               tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
+               group = make_kgid(current_user_ns(), arg);
+               if (!gid_valid(group)) {
+                       ret = -EINVAL;
+                       break;
+               }
+               tun->group = group;
+               tun_debug(KERN_INFO, tun, "group set to %d\n",
+                         from_kgid(&init_user_ns, tun->group));
                break;
  
        case TUNSETLINK:
index a807129c7b5a93979e8dc52de5d167d98a327940,8e35d4b2524c686633e93c97fd3e74cc353f0506..b1937ca13575ac097c1b64041d44379737adc7f6
@@@ -47,7 -47,7 +47,7 @@@ static HLIST_HEAD(binder_dead_nodes)
  static struct dentry *binder_debugfs_dir_entry_root;
  static struct dentry *binder_debugfs_dir_entry_proc;
  static struct binder_node *binder_context_mgr_node;
- static uid_t binder_context_mgr_uid = -1;
+ static kuid_t binder_context_mgr_uid = INVALID_UID;
  static int binder_last_id;
  static struct workqueue_struct *binder_deferred_workqueue;
  
@@@ -356,7 -356,7 +356,7 @@@ struct binder_transaction 
        unsigned int    flags;
        long    priority;
        long    saved_priority;
-       uid_t   sender_euid;
+       kuid_t  sender_euid;
  };
  
  static void
@@@ -365,7 -365,7 +365,7 @@@ binder_defer_work(struct binder_proc *p
  /*
   * copied from get_unused_fd_flags
   */
 -int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 +static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
  {
        struct files_struct *files = proc->files;
        int fd, error;
@@@ -415,13 -415,13 +415,13 @@@ repeat
        else
                __clear_close_on_exec(fd, fdt);
        files->next_fd = fd + 1;
 -#if 1
 +
        /* Sanity check */
        if (fdt->fd[fd] != NULL) {
                pr_warn("get_unused_fd: slot %d not NULL!\n", fd);
                fdt->fd[fd] = NULL;
        }
 -#endif
 +
        error = fd;
  
  out:
@@@ -2427,7 -2427,7 +2427,7 @@@ retry
                }
                tr.code = t->code;
                tr.flags = t->flags;
-               tr.sender_euid = t->sender_euid;
+               tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
  
                if (t->from) {
                        struct task_struct *sender = t->from->proc->tsk;
@@@ -2705,12 -2705,12 +2705,12 @@@ static long binder_ioctl(struct file *f
                        ret = -EBUSY;
                        goto err;
                }
-               if (binder_context_mgr_uid != -1) {
-                       if (binder_context_mgr_uid != current->cred->euid) {
+               if (uid_valid(binder_context_mgr_uid)) {
+                       if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) {
                                pr_err("binder: BINDER_SET_"
                                       "CONTEXT_MGR bad uid %d != %d\n",
-                                      current->cred->euid,
-                                      binder_context_mgr_uid);
+                                      from_kuid(&init_user_ns, current->cred->euid),
+                                      from_kuid(&init_user_ns, binder_context_mgr_uid));
                                ret = -EPERM;
                                goto err;
                        }
index 829aba75a6dfef28f1ce79055f5b73d6df883c68,f9ee4e08a4a8c4a03562b94ccf694256a3e0e800..a26c43a151fdb2bd9890efe6a8d77ce85b1f9d90
  /* Debugging ****************************************************************/
  
  #ifdef VERBOSE_DEBUG
 +#ifndef pr_vdebug
  #  define pr_vdebug pr_debug
 +#endif /* pr_vdebug */
  #  define ffs_dump_mem(prefix, ptr, len) \
        print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len)
  #else
 +#ifndef pr_vdebug
  #  define pr_vdebug(...)                 do { } while (0)
 +#endif /* pr_vdebug */
  #  define ffs_dump_mem(prefix, ptr, len) do { } while (0)
  #endif /* VERBOSE_DEBUG */
  
@@@ -224,8 -220,8 +224,8 @@@ struct ffs_data 
        /* File permissions, written once when fs is mounted */
        struct ffs_file_perms {
                umode_t                         mode;
-               uid_t                           uid;
-               gid_t                           gid;
+               kuid_t                          uid;
+               kgid_t                          gid;
        }                               file_perms;
  
        /*
@@@ -1147,10 -1143,19 +1147,19 @@@ static int ffs_fs_parse_opts(struct ffs
                        break;
  
                case 3:
-                       if (!memcmp(opts, "uid", 3))
-                               data->perms.uid = value;
+                       if (!memcmp(opts, "uid", 3)) {
+                               data->perms.uid = make_kuid(current_user_ns(), value);
+                               if (!uid_valid(data->perms.uid)) {
+                                       pr_err("%s: unmapped value: %lu\n", opts, value);
+                                       return -EINVAL;
+                               }
+                       }
                        else if (!memcmp(opts, "gid", 3))
-                               data->perms.gid = value;
+                               data->perms.gid = make_kgid(current_user_ns(), value);
+                               if (!gid_valid(data->perms.gid)) {
+                                       pr_err("%s: unmapped value: %lu\n", opts, value);
+                                       return -EINVAL;
+                               }
                        else
                                goto invalid;
                        break;
@@@ -1179,8 -1184,8 +1188,8 @@@ ffs_fs_mount(struct file_system_type *t
        struct ffs_sb_fill_data data = {
                .perms = {
                        .mode = S_IFREG | 0600,
-                       .uid = 0,
-                       .gid = 0
+                       .uid = GLOBAL_ROOT_UID,
+                       .gid = GLOBAL_ROOT_GID,
                },
                .root_mode = S_IFDIR | 0500,
        };
index 4bb6d53f2de3ff1bc3b97c05a28a8705a133e7a0,7bd36c80be5f530e64c26921928fbda79e01125d..76494cabf4e46e28cb2771b1fdd3566b8e6cc5b2
@@@ -828,6 -828,7 +828,6 @@@ ep_config (struct file *fd, const char 
                if (value == 0)
                        data->state = STATE_EP_ENABLED;
                break;
 -#ifdef        CONFIG_USB_GADGET_DUALSPEED
        case USB_SPEED_HIGH:
                /* fails if caller didn't provide that descriptor... */
                ep->desc = &data->hs_desc;
                if (value == 0)
                        data->state = STATE_EP_ENABLED;
                break;
 -#endif
        default:
                DBG(data->dev, "unconnected, %s init abandoned\n",
                                data->name);
@@@ -1322,6 -1324,7 +1322,6 @@@ static const struct file_operations ep0
   * Unrecognized ep0 requests may be handled in user space.
   */
  
 -#ifdef        CONFIG_USB_GADGET_DUALSPEED
  static void make_qualifier (struct dev_data *dev)
  {
        struct usb_qualifier_descriptor         qual;
  
        memcpy (dev->rbuf, &qual, sizeof qual);
  }
 -#endif
  
  static int
  config_buf (struct dev_data *dev, u8 type, unsigned index)
@@@ -1423,6 -1427,7 +1423,6 @@@ gadgetfs_setup (struct usb_gadget *gadg
                        dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
                        req->buf = dev->dev;
                        break;
 -#ifdef        CONFIG_USB_GADGET_DUALSPEED
                case USB_DT_DEVICE_QUALIFIER:
                        if (!dev->hs_config)
                                break;
                        break;
                case USB_DT_OTHER_SPEED_CONFIG:
                        // FALLTHROUGH
 -#endif
                case USB_DT_CONFIG:
                        value = config_buf (dev,
                                        w_value >> 8,
@@@ -1679,8 -1685,8 +1679,8 @@@ gadgetfs_unbind (struct usb_gadget *gad
  
  static struct dev_data                *the_device;
  
 -static int
 -gadgetfs_bind (struct usb_gadget *gadget)
 +static int gadgetfs_bind(struct usb_gadget *gadget,
 +              struct usb_gadget_driver *driver)
  {
        struct dev_data         *dev = the_device;
  
@@@ -1757,8 -1763,12 +1757,8 @@@ gadgetfs_suspend (struct usb_gadget *ga
  }
  
  static struct usb_gadget_driver gadgetfs_driver = {
 -#ifdef        CONFIG_USB_GADGET_DUALSPEED
 -      .max_speed      = USB_SPEED_HIGH,
 -#else
 -      .max_speed      = USB_SPEED_FULL,
 -#endif
        .function       = (char *) driver_desc,
 +      .bind           = gadgetfs_bind,
        .unbind         = gadgetfs_unbind,
        .setup          = gadgetfs_setup,
        .disconnect     = gadgetfs_disconnect,
  
  static void gadgetfs_nop(struct usb_gadget *arg) { }
  
 -static int gadgetfs_probe (struct usb_gadget *gadget)
 +static int gadgetfs_probe(struct usb_gadget *gadget,
 +              struct usb_gadget_driver *driver)
  {
        CHIP = gadget->name;
        return -EISNAM;
  
  static struct usb_gadget_driver probe_driver = {
        .max_speed      = USB_SPEED_HIGH,
 +      .bind           = gadgetfs_probe,
        .unbind         = gadgetfs_nop,
        .setup          = (void *)gadgetfs_nop,
        .disconnect     = gadgetfs_nop,
@@@ -1892,12 -1900,7 +1892,12 @@@ dev_config (struct file *fd, const cha
  
        /* triggers gadgetfs_bind(); then we can enumerate. */
        spin_unlock_irq (&dev->lock);
 -      value = usb_gadget_probe_driver(&gadgetfs_driver, gadgetfs_bind);
 +      if (dev->hs_config)
 +              gadgetfs_driver.max_speed = USB_SPEED_HIGH;
 +      else
 +              gadgetfs_driver.max_speed = USB_SPEED_FULL;
 +
 +      value = usb_gadget_probe_driver(&gadgetfs_driver);
        if (value != 0) {
                kfree (dev->buf);
                dev->buf = NULL;
@@@ -1985,8 -1988,8 +1985,8 @@@ gadgetfs_make_inode (struct super_bloc
        if (inode) {
                inode->i_ino = get_next_ino();
                inode->i_mode = mode;
-               inode->i_uid = default_uid;
-               inode->i_gid = default_gid;
+               inode->i_uid = make_kuid(&init_user_ns, default_uid);
+               inode->i_gid = make_kgid(&init_user_ns, default_gid);
                inode->i_atime = inode->i_mtime = inode->i_ctime
                                = CURRENT_TIME;
                inode->i_private = data;
@@@ -2036,7 -2039,7 +2036,7 @@@ gadgetfs_fill_super (struct super_bloc
                return -ESRCH;
  
        /* fake probe to determine $CHIP */
 -      (void) usb_gadget_probe_driver(&probe_driver, gadgetfs_probe);
 +      usb_gadget_probe_driver(&probe_driver);
        if (!CHIP)
                return -ENODEV;
  
diff --combined fs/affs/super.c
index 022cecb0757dd0986b0f86f634847bd98fa89a97,966c8c06b9b3a2653c7b7130607614a236ed457d..1f030825cd3a3f5cb77b5f938efb70a6bd6a018c
@@@ -188,7 -188,7 +188,7 @@@ static const match_table_t tokens = 
  };
  
  static int
- parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s32 *root,
+ parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved, s32 *root,
                int *blocksize, char **prefix, char *volume, unsigned long *mount_opts)
  {
        char *p;
                case Opt_setgid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       *gid = option;
+                       *gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(*gid))
+                               return 0;
                        *mount_opts |= SF_SETGID;
                        break;
                case Opt_setuid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       *uid = option;
+                       *uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(*uid))
+                               return 0;
                        *mount_opts |= SF_SETUID;
                        break;
                case Opt_verbose:
@@@ -301,8 -305,8 +305,8 @@@ static int affs_fill_super(struct super
        int                      num_bm;
        int                      i, j;
        s32                      key;
-       uid_t                    uid;
-       gid_t                    gid;
+       kuid_t                   uid;
+       kgid_t                   gid;
        int                      reserved;
        unsigned long            mount_flags;
        int                      tmp_flags;     /* fix remount prototype... */
@@@ -527,8 -531,8 +531,8 @@@ affs_remount(struct super_block *sb, in
  {
        struct affs_sb_info     *sbi = AFFS_SB(sb);
        int                      blocksize;
-       uid_t                    uid;
-       gid_t                    gid;
+       kuid_t                   uid;
+       kgid_t                   gid;
        int                      mode;
        int                      reserved;
        int                      root_block;
                return -EINVAL;
        }
  
 -      flush_delayed_work_sync(&sbi->sb_work);
 +      flush_delayed_work(&sbi->sb_work);
        replace_mount_options(sb, new_opts);
  
        sbi->s_flags = mount_flags;
diff --combined fs/btrfs/delayed-inode.c
index 07d5eeb1e6f1df1f8ae2ddf94218b1f45298aeda,f908c51807957d9ba210cc04bd4b930964084b30..52c85e2b95d0f7efa9cbd105bab093eca90e1b9c
@@@ -512,8 -512,8 +512,8 @@@ static void __btrfs_remove_delayed_item
  
        rb_erase(&delayed_item->rb_node, root);
        delayed_item->delayed_node->count--;
 -      atomic_dec(&delayed_root->items);
 -      if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
 +      if (atomic_dec_return(&delayed_root->items) <
 +          BTRFS_DELAYED_BACKGROUND &&
            waitqueue_active(&delayed_root->wait))
                wake_up(&delayed_root->wait);
  }
@@@ -1028,10 -1028,9 +1028,10 @@@ do_again
                btrfs_release_delayed_item(prev);
                ret = 0;
                btrfs_release_path(path);
 -              if (curr)
 +              if (curr) {
 +                      mutex_unlock(&node->mutex);
                        goto do_again;
 -              else
 +              else
                        goto delete_fail;
        }
  
@@@ -1056,7 -1055,8 +1056,7 @@@ static void btrfs_release_delayed_inode
                delayed_node->count--;
  
                delayed_root = delayed_node->root->fs_info->delayed_root;
 -              atomic_dec(&delayed_root->items);
 -              if (atomic_read(&delayed_root->items) <
 +              if (atomic_dec_return(&delayed_root->items) <
                    BTRFS_DELAYED_BACKGROUND &&
                    waitqueue_active(&delayed_root->wait))
                        wake_up(&delayed_root->wait);
@@@ -1715,8 -1715,8 +1715,8 @@@ static void fill_stack_inode_item(struc
                                  struct btrfs_inode_item *inode_item,
                                  struct inode *inode)
  {
-       btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
-       btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
+       btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
+       btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
        btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
        btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
        btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
@@@ -1764,8 -1764,8 +1764,8 @@@ int btrfs_fill_inode(struct inode *inod
  
        inode_item = &delayed_node->inode_item;
  
-       inode->i_uid = btrfs_stack_inode_uid(inode_item);
-       inode->i_gid = btrfs_stack_inode_gid(inode_item);
+       i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
+       i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
        btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
        inode->i_mode = btrfs_stack_inode_mode(inode_item);
        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
diff --combined fs/btrfs/inode.c
index 316b07a866d246fbd9ae1880112f3387d66bdef0,53687149c07796ea7a8bee1370cb9abda3a1fb13..2a028a58619cdaca85ff21b4b74bc3324bf6bfea
@@@ -324,8 -324,7 +324,8 @@@ static noinline int add_async_extent(st
   * If this code finds it can't get good compression, it puts an
   * entry onto the work queue to write the uncompressed bytes.  This
   * makes sure that both compressed inodes and uncompressed inodes
 - * are written in the same order that pdflush sent them down.
 + * are written in the same order that the flusher thread sent them
 + * down.
   */
  static noinline int compress_file_range(struct inode *inode,
                                        struct page *locked_page,
@@@ -1008,7 -1007,9 +1008,7 @@@ static noinline void async_cow_submit(s
        nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
                PAGE_CACHE_SHIFT;
  
 -      atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
 -
 -      if (atomic_read(&root->fs_info->async_delalloc_pages) <
 +      if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
            5 * 1024 * 1024 &&
            waitqueue_active(&root->fs_info->async_submit_wait))
                wake_up(&root->fs_info->async_submit_wait);
@@@ -1883,11 -1884,8 +1883,11 @@@ static int btrfs_finish_ordered_io(stru
                                trans = btrfs_join_transaction_nolock(root);
                        else
                                trans = btrfs_join_transaction(root);
 -                      if (IS_ERR(trans))
 -                              return PTR_ERR(trans);
 +                      if (IS_ERR(trans)) {
 +                              ret = PTR_ERR(trans);
 +                              trans = NULL;
 +                              goto out;
 +                      }
                        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
                        ret = btrfs_update_inode_fallback(trans, root, inode);
                        if (ret) /* -ENOMEM or corruption */
@@@ -1971,8 -1969,8 +1971,8 @@@ out
                                      ordered_extent->len - 1, NULL, GFP_NOFS);
  
        /*
 -       * This needs to be dont to make sure anybody waiting knows we are done
 -       * upating everything for this ordered extent.
 +       * This needs to be done to make sure anybody waiting knows we are done
 +       * updating everything for this ordered extent.
         */
        btrfs_remove_ordered_extent(inode, ordered_extent);
  
@@@ -2572,8 -2570,8 +2572,8 @@@ static void btrfs_read_locked_inode(str
                                    struct btrfs_inode_item);
        inode->i_mode = btrfs_inode_mode(leaf, inode_item);
        set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
-       inode->i_uid = btrfs_inode_uid(leaf, inode_item);
-       inode->i_gid = btrfs_inode_gid(leaf, inode_item);
+       i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
+       i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
        btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
  
        tspec = btrfs_inode_atime(inode_item);
@@@ -2651,8 -2649,8 +2651,8 @@@ static void fill_inode_item(struct btrf
                            struct btrfs_inode_item *item,
                            struct inode *inode)
  {
-       btrfs_set_inode_uid(leaf, item, inode->i_uid);
-       btrfs_set_inode_gid(leaf, item, inode->i_gid);
+       btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
+       btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
        btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
        btrfs_set_inode_mode(leaf, item, inode->i_mode);
        btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
@@@ -3175,7 -3173,7 +3175,7 @@@ int btrfs_unlink_subvol(struct btrfs_tr
        btrfs_i_size_write(dir, dir->i_size - name_len * 2);
        inode_inc_iversion(dir);
        dir->i_mtime = dir->i_ctime = CURRENT_TIME;
 -      ret = btrfs_update_inode(trans, root, dir);
 +      ret = btrfs_update_inode_fallback(trans, root, dir);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
  out:
        return ret;
  }
  
 +static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
 +                            struct extent_state **cached_state, int writing)
 +{
 +      struct btrfs_ordered_extent *ordered;
 +      int ret = 0;
 +
 +      while (1) {
 +              lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
 +                               0, cached_state);
 +              /*
 +               * We're concerned with the entire range that we're going to be
 +               * doing DIO to, so we need to make sure theres no ordered
 +               * extents in this range.
 +               */
 +              ordered = btrfs_lookup_ordered_range(inode, lockstart,
 +                                                   lockend - lockstart + 1);
 +
 +              /*
 +               * We need to make sure there are no buffered pages in this
 +               * range either, we could have raced between the invalidate in
 +               * generic_file_direct_write and locking the extent.  The
 +               * invalidate needs to happen so that reads after a write do not
 +               * get stale data.
 +               */
 +              if (!ordered && (!writing ||
 +                  !test_range_bit(&BTRFS_I(inode)->io_tree,
 +                                  lockstart, lockend, EXTENT_UPTODATE, 0,
 +                                  *cached_state)))
 +                      break;
 +
 +              unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
 +                                   cached_state, GFP_NOFS);
 +
 +              if (ordered) {
 +                      btrfs_start_ordered_extent(inode, ordered, 1);
 +                      btrfs_put_ordered_extent(ordered);
 +              } else {
 +                      /* Screw you mmap */
 +                      ret = filemap_write_and_wait_range(inode->i_mapping,
 +                                                         lockstart,
 +                                                         lockend);
 +                      if (ret)
 +                              break;
 +
 +                      /*
 +                       * If we found a page that couldn't be invalidated just
 +                       * fall back to buffered.
 +                       */
 +                      ret = invalidate_inode_pages2_range(inode->i_mapping,
 +                                      lockstart >> PAGE_CACHE_SHIFT,
 +                                      lockend >> PAGE_CACHE_SHIFT);
 +                      if (ret)
 +                              break;
 +              }
 +
 +              cond_resched();
 +      }
 +
 +      return ret;
 +}
 +
  static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
                                   struct buffer_head *bh_result, int create)
  {
        struct extent_map *em;
        struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct extent_state *cached_state = NULL;
        u64 start = iblock << inode->i_blkbits;
 +      u64 lockstart, lockend;
        u64 len = bh_result->b_size;
        struct btrfs_trans_handle *trans;
 +      int unlock_bits = EXTENT_LOCKED;
 +      int ret;
 +
 +      if (create) {
 +              ret = btrfs_delalloc_reserve_space(inode, len);
 +              if (ret)
 +                      return ret;
 +              unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
 +      } else {
 +              len = min_t(u64, len, root->sectorsize);
 +      }
 +
 +      lockstart = start;
 +      lockend = start + len - 1;
 +
 +      /*
 +       * If this errors out it's because we couldn't invalidate pagecache for
 +       * this range and we need to fallback to buffered.
 +       */
 +      if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
 +              return -ENOTBLK;
 +
 +      if (create) {
 +              ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
 +                                   lockend, EXTENT_DELALLOC, NULL,
 +                                   &cached_state, GFP_NOFS);
 +              if (ret)
 +                      goto unlock_err;
 +      }
  
        em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
 -      if (IS_ERR(em))
 -              return PTR_ERR(em);
 +      if (IS_ERR(em)) {
 +              ret = PTR_ERR(em);
 +              goto unlock_err;
 +      }
  
        /*
         * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
        if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
            em->block_start == EXTENT_MAP_INLINE) {
                free_extent_map(em);
 -              return -ENOTBLK;
 +              ret = -ENOTBLK;
 +              goto unlock_err;
        }
  
        /* Just a good old fashioned hole, return */
        if (!create && (em->block_start == EXTENT_MAP_HOLE ||
                        test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
                free_extent_map(em);
 -              /* DIO will do one hole at a time, so just unlock a sector */
 -              unlock_extent(&BTRFS_I(inode)->io_tree, start,
 -                            start + root->sectorsize - 1);
 -              return 0;
 +              ret = 0;
 +              goto unlock_err;
        }
  
        /*
         *
         */
        if (!create) {
 -              len = em->len - (start - em->start);
 -              goto map;
 +              len = min(len, em->len - (start - em->start));
 +              lockstart = start + len;
 +              goto unlock;
        }
  
        if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
                        btrfs_end_transaction(trans, root);
                        if (ret) {
                                free_extent_map(em);
 -                              return ret;
 +                              goto unlock_err;
                        }
                        goto unlock;
                }
@@@ -5968,12 -5872,14 +5968,12 @@@ must_cow
         */
        len = bh_result->b_size;
        em = btrfs_new_extent_direct(inode, em, start, len);
 -      if (IS_ERR(em))
 -              return PTR_ERR(em);
 +      if (IS_ERR(em)) {
 +              ret = PTR_ERR(em);
 +              goto unlock_err;
 +      }
        len = min(len, em->len - (start - em->start));
  unlock:
 -      clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
 -                        EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
 -                        0, NULL, GFP_NOFS);
 -map:
        bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
                inode->i_blkbits;
        bh_result->b_size = len;
                        i_size_write(inode, start + len);
        }
  
 +      /*
 +       * In the case of write we need to clear and unlock the entire range,
 +       * in the case of read we need to unlock only the end area that we
 +       * aren't using if there is any left over space.
 +       */
 +      if (lockstart < lockend) {
 +              if (create && len < lockend - lockstart) {
 +                      clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
 +                                       lockstart + len - 1, unlock_bits, 1, 0,
 +                                       &cached_state, GFP_NOFS);
 +                      /*
 +                       * Beside unlock, we also need to cleanup reserved space
 +                       * for the left range by attaching EXTENT_DO_ACCOUNTING.
 +                       */
 +                      clear_extent_bit(&BTRFS_I(inode)->io_tree,
 +                                       lockstart + len, lockend,
 +                                       unlock_bits | EXTENT_DO_ACCOUNTING,
 +                                       1, 0, NULL, GFP_NOFS);
 +              } else {
 +                      clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
 +                                       lockend, unlock_bits, 1, 0,
 +                                       &cached_state, GFP_NOFS);
 +              }
 +      } else {
 +              free_extent_state(cached_state);
 +      }
 +
        free_extent_map(em);
  
        return 0;
 +
 +unlock_err:
 +      if (create)
 +              unlock_bits |= EXTENT_DO_ACCOUNTING;
 +
 +      clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
 +                       unlock_bits, 1, 0, &cached_state, GFP_NOFS);
 +      return ret;
  }
  
  struct btrfs_dio_private {
        u64 logical_offset;
        u64 disk_bytenr;
        u64 bytes;
 -      u32 *csums;
        void *private;
  
        /* number of bios pending for this dio */
@@@ -6055,6 -5927,7 +6055,6 @@@ static void btrfs_endio_direct_read(str
        struct inode *inode = dip->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 start;
 -      u32 *private = dip->csums;
  
        start = dip->logical_offset;
        do {
                        struct page *page = bvec->bv_page;
                        char *kaddr;
                        u32 csum = ~(u32)0;
 +                      u64 private = ~(u32)0;
                        unsigned long flags;
  
 +                      if (get_state_private(&BTRFS_I(inode)->io_tree,
 +                                            start, &private))
 +                              goto failed;
                        local_irq_save(flags);
                        kaddr = kmap_atomic(page);
                        csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
                        local_irq_restore(flags);
  
                        flush_dcache_page(bvec->bv_page);
 -                      if (csum != *private) {
 +                      if (csum != private) {
 +failed:
                                printk(KERN_ERR "btrfs csum failed ino %llu off"
                                      " %llu csum %u private %u\n",
                                      (unsigned long long)btrfs_ino(inode),
                                      (unsigned long long)start,
 -                                    csum, *private);
 +                                    csum, (unsigned)private);
                                err = -EIO;
                        }
                }
  
                start += bvec->bv_len;
 -              private++;
                bvec++;
        } while (bvec <= bvec_end);
  
                      dip->logical_offset + dip->bytes - 1);
        bio->bi_private = dip->private;
  
 -      kfree(dip->csums);
        kfree(dip);
  
        /* If we had a csum failure make sure to clear the uptodate flag */
@@@ -6201,7 -6071,7 +6201,7 @@@ static struct bio *btrfs_dio_bio_alloc(
  
  static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
                                         int rw, u64 file_offset, int skip_sum,
 -                                       u32 *csums, int async_submit)
 +                                       int async_submit)
  {
        int write = rw & REQ_WRITE;
        struct btrfs_root *root = BTRFS_I(inode)->root;
                if (ret)
                        goto err;
        } else if (!skip_sum) {
 -              ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
 -                                        file_offset, csums);
 +              ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
                if (ret)
                        goto err;
        }
@@@ -6260,8 -6131,10 +6260,8 @@@ static int btrfs_submit_direct_hook(in
        u64 submit_len = 0;
        u64 map_length;
        int nr_pages = 0;
 -      u32 *csums = dip->csums;
        int ret = 0;
        int async_submit = 0;
 -      int write = rw & REQ_WRITE;
  
        map_length = orig_bio->bi_size;
        ret = btrfs_map_block(map_tree, READ, start_sector << 9,
                        atomic_inc(&dip->pending_bios);
                        ret = __btrfs_submit_dio_bio(bio, inode, rw,
                                                     file_offset, skip_sum,
 -                                                   csums, async_submit);
 +                                                   async_submit);
                        if (ret) {
                                bio_put(bio);
                                atomic_dec(&dip->pending_bios);
                                goto out_err;
                        }
  
 -                      /* Write's use the ordered csums */
 -                      if (!write && !skip_sum)
 -                              csums = csums + nr_pages;
                        start_sector += submit_len >> 9;
                        file_offset += submit_len;
  
  
  submit:
        ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
 -                                   csums, async_submit);
 +                                   async_submit);
        if (!ret)
                return 0;
  
@@@ -6369,6 -6245,17 +6369,6 @@@ static void btrfs_submit_direct(int rw
                ret = -ENOMEM;
                goto free_ordered;
        }
 -      dip->csums = NULL;
 -
 -      /* Write's use the ordered csum stuff, so we don't need dip->csums */
 -      if (!write && !skip_sum) {
 -              dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
 -              if (!dip->csums) {
 -                      kfree(dip);
 -                      ret = -ENOMEM;
 -                      goto free_ordered;
 -              }
 -      }
  
        dip->private = bio->bi_private;
        dip->inode = inode;
@@@ -6453,22 -6340,132 +6453,22 @@@ static ssize_t check_direct_IO(struct b
  out:
        return retval;
  }
 +
  static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                        const struct iovec *iov, loff_t offset,
                        unsigned long nr_segs)
  {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
 -      struct btrfs_ordered_extent *ordered;
 -      struct extent_state *cached_state = NULL;
 -      u64 lockstart, lockend;
 -      ssize_t ret;
 -      int writing = rw & WRITE;
 -      int write_bits = 0;
 -      size_t count = iov_length(iov, nr_segs);
  
        if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
 -                          offset, nr_segs)) {
 +                          offset, nr_segs))
                return 0;
 -      }
 -
 -      lockstart = offset;
 -      lockend = offset + count - 1;
  
 -      if (writing) {
 -              ret = btrfs_delalloc_reserve_space(inode, count);
 -              if (ret)
 -                      goto out;
 -      }
 -
 -      while (1) {
 -              lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
 -                               0, &cached_state);
 -              /*
 -               * We're concerned with the entire range that we're going to be
 -               * doing DIO to, so we need to make sure theres no ordered
 -               * extents in this range.
 -               */
 -              ordered = btrfs_lookup_ordered_range(inode, lockstart,
 -                                                   lockend - lockstart + 1);
 -
 -              /*
 -               * We need to make sure there are no buffered pages in this
 -               * range either, we could have raced between the invalidate in
 -               * generic_file_direct_write and locking the extent.  The
 -               * invalidate needs to happen so that reads after a write do not
 -               * get stale data.
 -               */
 -              if (!ordered && (!writing ||
 -                  !test_range_bit(&BTRFS_I(inode)->io_tree,
 -                                  lockstart, lockend, EXTENT_UPTODATE, 0,
 -                                  cached_state)))
 -                      break;
 -
 -              unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
 -                                   &cached_state, GFP_NOFS);
 -
 -              if (ordered) {
 -                      btrfs_start_ordered_extent(inode, ordered, 1);
 -                      btrfs_put_ordered_extent(ordered);
 -              } else {
 -                      /* Screw you mmap */
 -                      ret = filemap_write_and_wait_range(file->f_mapping,
 -                                                         lockstart,
 -                                                         lockend);
 -                      if (ret)
 -                              goto out;
 -
 -                      /*
 -                       * If we found a page that couldn't be invalidated just
 -                       * fall back to buffered.
 -                       */
 -                      ret = invalidate_inode_pages2_range(file->f_mapping,
 -                                      lockstart >> PAGE_CACHE_SHIFT,
 -                                      lockend >> PAGE_CACHE_SHIFT);
 -                      if (ret) {
 -                              if (ret == -EBUSY)
 -                                      ret = 0;
 -                              goto out;
 -                      }
 -              }
 -
 -              cond_resched();
 -      }
 -
 -      /*
 -       * we don't use btrfs_set_extent_delalloc because we don't want
 -       * the dirty or uptodate bits
 -       */
 -      if (writing) {
 -              write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
 -              ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
 -                                   EXTENT_DELALLOC, NULL, &cached_state,
 -                                   GFP_NOFS);
 -              if (ret) {
 -                      clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
 -                                       lockend, EXTENT_LOCKED | write_bits,
 -                                       1, 0, &cached_state, GFP_NOFS);
 -                      goto out;
 -              }
 -      }
 -
 -      free_extent_state(cached_state);
 -      cached_state = NULL;
 -
 -      ret = __blockdev_direct_IO(rw, iocb, inode,
 +      return __blockdev_direct_IO(rw, iocb, inode,
                   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
                   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
                   btrfs_submit_direct, 0);
 -
 -      if (ret < 0 && ret != -EIOCBQUEUED) {
 -              clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
 -                            offset + iov_length(iov, nr_segs) - 1,
 -                            EXTENT_LOCKED | write_bits, 1, 0,
 -                            &cached_state, GFP_NOFS);
 -      } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
 -              /*
 -               * We're falling back to buffered, unlock the section we didn't
 -               * do IO on.
 -               */
 -              clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
 -                            offset + iov_length(iov, nr_segs) - 1,
 -                            EXTENT_LOCKED | write_bits, 1, 0,
 -                            &cached_state, GFP_NOFS);
 -      }
 -out:
 -      free_extent_state(cached_state);
 -      return ret;
  }
  
  static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
diff --combined fs/btrfs/ioctl.c
index 9df50fa8a0781ba387553297fbe442ed964e671e,1292682c537fcd1b48cd1e9e5938f857479769d2..27bfce58da3b84db20d4e402cb0434cfde4a920e
@@@ -424,7 -424,7 +424,7 @@@ static noinline int create_subvol(struc
        uuid_le_gen(&new_uuid);
        memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE);
        root_item.otime.sec = cpu_to_le64(cur_time.tv_sec);
 -      root_item.otime.nsec = cpu_to_le64(cur_time.tv_nsec);
 +      root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec);
        root_item.ctime = root_item.otime;
        btrfs_set_root_ctransid(&root_item, trans->transid);
        btrfs_set_root_otransid(&root_item, trans->transid);
@@@ -575,13 -575,13 +575,13 @@@ fail
  */
  static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode)
  {
-       uid_t fsuid = current_fsuid();
+       kuid_t fsuid = current_fsuid();
  
        if (!(dir->i_mode & S_ISVTX))
                return 0;
-       if (inode->i_uid == fsuid)
+       if (uid_eq(inode->i_uid, fsuid))
                return 0;
-       if (dir->i_uid == fsuid)
+       if (uid_eq(dir->i_uid, fsuid))
                return 0;
        return !capable(CAP_FOWNER);
  }
@@@ -664,6 -664,10 +664,6 @@@ static noinline int btrfs_mksubvol(stru
        struct dentry *dentry;
        int error;
  
 -      error = mnt_want_write(parent->mnt);
 -      if (error)
 -              return error;
 -
        mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
  
        dentry = lookup_one_len(name, parent->dentry, namelen);
@@@ -699,6 -703,7 +699,6 @@@ out_dput
        dput(dentry);
  out_unlock:
        mutex_unlock(&dir->i_mutex);
 -      mnt_drop_write(parent->mnt);
        return error;
  }
  
diff --combined fs/debugfs/inode.c
index 6393fd61d5c4dedc19574cdee6eb136d1340fddb,36e2b667e82280e62c108113cd2c02724b14a5ed..b607d92cdf2445ab8c1e364a70f939ce21e22c6a
@@@ -28,7 -28,7 +28,7 @@@
  #include <linux/magic.h>
  #include <linux/slab.h>
  
 -#define DEBUGFS_DEFAULT_MODE  0755
 +#define DEBUGFS_DEFAULT_MODE  0700
  
  static struct vfsmount *debugfs_mount;
  static int debugfs_mount_count;
@@@ -128,8 -128,8 +128,8 @@@ static inline int debugfs_positive(stru
  }
  
  struct debugfs_mount_opts {
-       uid_t uid;
-       gid_t gid;
+       kuid_t uid;
+       kgid_t gid;
        umode_t mode;
  };
  
@@@ -156,6 -156,8 +156,8 @@@ static int debugfs_parse_options(char *
        substring_t args[MAX_OPT_ARGS];
        int option;
        int token;
+       kuid_t uid;
+       kgid_t gid;
        char *p;
  
        opts->mode = DEBUGFS_DEFAULT_MODE;
                case Opt_uid:
                        if (match_int(&args[0], &option))
                                return -EINVAL;
-                       opts->uid = option;
+                       uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(uid))
+                               return -EINVAL;
+                       opts->uid = uid;
                        break;
                case Opt_gid:
                        if (match_octal(&args[0], &option))
                                return -EINVAL;
-                       opts->gid = option;
+                       gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(gid))
+                               return -EINVAL;
+                       opts->gid = gid;
                        break;
                case Opt_mode:
                        if (match_octal(&args[0], &option))
@@@ -226,10 -234,12 +234,12 @@@ static int debugfs_show_options(struct 
        struct debugfs_fs_info *fsi = root->d_sb->s_fs_info;
        struct debugfs_mount_opts *opts = &fsi->mount_opts;
  
-       if (opts->uid != 0)
-               seq_printf(m, ",uid=%u", opts->uid);
-       if (opts->gid != 0)
-               seq_printf(m, ",gid=%u", opts->gid);
+       if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
+               seq_printf(m, ",uid=%u",
+                          from_kuid_munged(&init_user_ns, opts->uid));
+       if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
+               seq_printf(m, ",gid=%u",
+                          from_kgid_munged(&init_user_ns, opts->gid));
        if (opts->mode != DEBUGFS_DEFAULT_MODE)
                seq_printf(m, ",mode=%o", opts->mode);
  
@@@ -291,9 -301,9 +301,9 @@@ static struct file_system_type debug_fs
        .kill_sb =      kill_litter_super,
  };
  
 -struct dentry *__create_file(const char *name, umode_t mode,
 -                                 struct dentry *parent, void *data,
 -                                 const struct file_operations *fops)
 +static struct dentry *__create_file(const char *name, umode_t mode,
 +                                  struct dentry *parent, void *data,
 +                                  const struct file_operations *fops)
  {
        struct dentry *dentry = NULL;
        int error;
diff --combined fs/ecryptfs/main.c
index 9b627c15010a3af35e1f2ec85ccafc2b18d97d44,1d6ce91b70612cf1fa0f2a076c2687ff3333520a..24bb043e50d9234aa997fbf2175c8b7594b43e3b
@@@ -162,7 -162,6 +162,7 @@@ void ecryptfs_put_lower_file(struct ino
        inode_info = ecryptfs_inode_to_private(inode);
        if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
                                      &inode_info->lower_file_mutex)) {
 +              filemap_write_and_wait(inode->i_mapping);
                fput(inode_info->lower_file);
                inode_info->lower_file = NULL;
                mutex_unlock(&inode_info->lower_file_mutex);
@@@ -545,11 -544,12 +545,12 @@@ static struct dentry *ecryptfs_mount(st
                goto out_free;
        }
  
-       if (check_ruid && path.dentry->d_inode->i_uid != current_uid()) {
+       if (check_ruid && !uid_eq(path.dentry->d_inode->i_uid, current_uid())) {
                rc = -EPERM;
                printk(KERN_ERR "Mount of device (uid: %d) not owned by "
                       "requested user (uid: %d)\n",
-                      path.dentry->d_inode->i_uid, current_uid());
+                       i_uid_read(path.dentry->d_inode),
+                       from_kuid(&init_user_ns, current_uid()));
                goto out_free;
        }
  
diff --combined fs/exofs/inode.c
index 1562c27a2fab27f700825e0090e4d98492fa2fbd,190c3d69e569f511a3b38972fe7337c8c74e161d..b5618104775187787415b67c1d99658f45f8b3d7
  
  #define EXOFS_DBGMSG2(M...) do {} while (0)
  
 -enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
 -
  unsigned exofs_max_io_pages(struct ore_layout *layout,
                            unsigned expected_pages)
  {
 -      unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
 +      unsigned pages = min_t(unsigned, expected_pages,
 +                             layout->max_io_length / PAGE_SIZE);
  
 -      /* TODO: easily support bio chaining */
 -      pages =  min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
        return pages;
  }
  
@@@ -98,8 -101,7 +98,8 @@@ static void _pcol_reset(struct page_col
         * it might not end here. don't be left with nothing
         */
        if (!pcol->expected_pages)
 -              pcol->expected_pages = MAX_PAGES_KMALLOC;
 +              pcol->expected_pages =
 +                              exofs_max_io_pages(&pcol->sbi->layout, ~0);
  }
  
  static int pcol_try_alloc(struct page_collect *pcol)
@@@ -387,8 -389,6 +387,8 @@@ static int readpage_strip(void *data, s
        size_t len;
        int ret;
  
 +      BUG_ON(!PageLocked(page));
 +
        /* FIXME: Just for debugging, will be removed */
        if (PageUptodate(page))
                EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
@@@ -572,16 -572,8 +572,16 @@@ static struct page *__r4w_get_page(voi
  
        if (!pcol->that_locked_page ||
            (pcol->that_locked_page->index != index)) {
 -              struct page *page = find_get_page(pcol->inode->i_mapping, index);
 +              struct page *page;
 +              loff_t i_size = i_size_read(pcol->inode);
 +
 +              if (offset >= i_size) {
 +                      *uptodate = true;
 +                      EXOFS_DBGMSG("offset >= i_size index=0x%lx\n", index);
 +                      return ZERO_PAGE(0);
 +              }
  
 +              page =  find_get_page(pcol->inode->i_mapping, index);
                if (!page) {
                        page = find_or_create_page(pcol->inode->i_mapping,
                                                   index, GFP_NOFS);
@@@ -610,13 -602,12 +610,13 @@@ static void __r4w_put_page(void *priv, 
  {
        struct page_collect *pcol = priv;
  
 -      if (pcol->that_locked_page != page) {
 +      if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
                EXOFS_DBGMSG("index=0x%lx\n", page->index);
                page_cache_release(page);
                return;
        }
 -      EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
 +      EXOFS_DBGMSG("that_locked_page index=0x%lx\n",
 +                   ZERO_PAGE(0) == page ? -1 : page->index);
  }
  
  static const struct _ore_r4w_op _r4w_op = {
@@@ -1172,8 -1163,8 +1172,8 @@@ struct inode *exofs_iget(struct super_b
  
        /* copy stuff from on-disk struct to in-memory struct */
        inode->i_mode = le16_to_cpu(fcb.i_mode);
-       inode->i_uid = le32_to_cpu(fcb.i_uid);
-       inode->i_gid = le32_to_cpu(fcb.i_gid);
+       i_uid_write(inode, le32_to_cpu(fcb.i_uid));
+       i_gid_write(inode, le32_to_cpu(fcb.i_gid));
        set_nlink(inode, le16_to_cpu(fcb.i_links_count));
        inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
        inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
@@@ -1385,8 -1376,8 +1385,8 @@@ static int exofs_update_inode(struct in
        fcb = &args->fcb;
  
        fcb->i_mode = cpu_to_le16(inode->i_mode);
-       fcb->i_uid = cpu_to_le32(inode->i_uid);
-       fcb->i_gid = cpu_to_le32(inode->i_gid);
+       fcb->i_uid = cpu_to_le32(i_uid_read(inode));
+       fcb->i_gid = cpu_to_le32(i_gid_read(inode));
        fcb->i_links_count = cpu_to_le16(inode->i_nlink);
        fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
        fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
diff --combined fs/ext3/super.c
index 8c892e93d8e7b6f2ff727709619eedde0e880812,73e42f5c70094f4cf4e14dfb08b1da9f1dd24eef..09b8455bd7ebc289e0d120bbb1d698fcf35c7c02
@@@ -64,6 -64,11 +64,6 @@@ static int ext3_freeze(struct super_blo
  
  /*
   * Wrappers for journal_start/end.
 - *
 - * The only special thing we need to do here is to make sure that all
 - * journal_end calls result in the superblock being marked dirty, so
 - * that sync() will call the filesystem's write_super callback if
 - * appropriate.
   */
  handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
  {
        return journal_start(journal, nblocks);
  }
  
 -/*
 - * The only special thing we need to do here is to make sure that all
 - * journal_stop calls result in the superblock being marked dirty, so
 - * that sync() will call the filesystem's write_super callback if
 - * appropriate.
 - */
  int __ext3_journal_stop(const char *where, handle_t *handle)
  {
        struct super_block *sb;
@@@ -2803,7 -2814,7 +2803,7 @@@ static int ext3_statfs (struct dentry 
  
  static inline struct inode *dquot_to_inode(struct dquot *dquot)
  {
-       return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
+       return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
  }
  
  static int ext3_write_dquot(struct dquot *dquot)
diff --combined fs/ext4/super.c
index c6e0cb3d1f4a9e3730aea4904037eb0e9d4dd6d8,78e6036ff2442b82d3085d4dbc3af0f60f849f6f..1f15cc836fbd0d777a8a2c24ac3d934d8b0d36e1
@@@ -326,6 -326,11 +326,6 @@@ static void ext4_put_nojournal(handle_
  
  /*
   * Wrappers for jbd2_journal_start/end.
 - *
 - * The only special thing we need to do here is to make sure that all
 - * journal_end calls result in the superblock being marked dirty, so
 - * that sync() will call the filesystem's write_super callback if
 - * appropriate.
   */
  handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
  {
        return jbd2_journal_start(journal, nblocks);
  }
  
 -/*
 - * The only special thing we need to do here is to make sure that all
 - * jbd2_journal_stop calls result in the superblock being marked dirty, so
 - * that sync() will call the filesystem's write_super callback if
 - * appropriate.
 - */
  int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
  {
        struct super_block *sb;
@@@ -948,7 -959,6 +948,7 @@@ static struct inode *ext4_alloc_inode(s
        ei->i_reserved_meta_blocks = 0;
        ei->i_allocated_meta_blocks = 0;
        ei->i_da_metadata_calc_len = 0;
 +      ei->i_da_metadata_calc_last_lblock = 0;
        spin_lock_init(&(ei->i_block_reservation_lock));
  #ifdef CONFIG_QUOTA
        ei->i_reserved_quota = 0;
@@@ -3109,10 -3119,6 +3109,10 @@@ static int count_overhead(struct super_
        ext4_group_t            i, ngroups = ext4_get_groups_count(sb);
        int                     s, j, count = 0;
  
 +      if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC))
 +              return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
 +                      sbi->s_itb_per_group + 2);
 +
        first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
                (grp * EXT4_BLOCKS_PER_GROUP(sb));
        last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
@@@ -4424,7 -4430,6 +4424,7 @@@ static void ext4_clear_journal_err(stru
                ext4_commit_super(sb, 1);
  
                jbd2_journal_clear_err(journal);
 +              jbd2_journal_update_sb_errno(journal);
        }
  }
  
@@@ -4791,7 -4796,7 +4791,7 @@@ static int ext4_statfs(struct dentry *d
  
  static inline struct inode *dquot_to_inode(struct dquot *dquot)
  {
-       return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
+       return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
  }
  
  static int ext4_write_dquot(struct dquot *dquot)
diff --combined fs/gfs2/quota.c
index 4021deca61ef3b14558d361823802e8c1ba64fb1,d554dfff58e3186a50ff7039aa8e795355924af6..40c4b0d42fa8fea10b73102b4fd7d155ebf8daa0
@@@ -765,7 -765,6 +765,7 @@@ static int do_sync(unsigned int num_qd
        struct gfs2_holder *ghs, i_gh;
        unsigned int qx, x;
        struct gfs2_quota_data *qd;
 +      unsigned reserved;
        loff_t offset;
        unsigned int nalloc = 0, blocks;
        int error;
                return -ENOMEM;
  
        sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 -      mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
 +      mutex_lock(&ip->i_inode.i_mutex);
        for (qx = 0; qx < num_qd; qx++) {
                error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
                                           GL_NOCACHE, &ghs[qx]);
         * two blocks need to be updated instead of 1 */
        blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
  
 -      error = gfs2_inplace_reserve(ip, 1 +
 -                                   (nalloc * (data_blocks + ind_blocks)));
 +      reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 +      error = gfs2_inplace_reserve(ip, reserved);
        if (error)
                goto out_alloc;
  
        if (nalloc)
 -              blocks += gfs2_rg_blocks(ip) + nalloc * ind_blocks + RES_STATFS;
 +              blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
  
        error = gfs2_trans_begin(sdp, blocks, 0);
        if (error)
@@@ -1071,8 -1070,10 +1071,10 @@@ int gfs2_quota_check(struct gfs2_inode 
  
                if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
                        print_message(qd, "exceeded");
-                       quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
-                                          USRQUOTA : GRPQUOTA, qd->qd_id,
+                       quota_send_warning(make_kqid(&init_user_ns,
+                                                    test_bit(QDF_USER, &qd->qd_flags) ?
+                                                    USRQUOTA : GRPQUOTA,
+                                                    qd->qd_id),
                                           sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
  
                        error = -EDQUOT;
                           time_after_eq(jiffies, qd->qd_last_warn +
                                         gfs2_tune_get(sdp,
                                                gt_quota_warn_period) * HZ)) {
-                       quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
-                                          USRQUOTA : GRPQUOTA, qd->qd_id,
+                       quota_send_warning(make_kqid(&init_user_ns,
+                                                    test_bit(QDF_USER, &qd->qd_flags) ?
+                                                    USRQUOTA : GRPQUOTA,
+                                                    qd->qd_id),
                                           sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
                        error = print_message(qd, "warning");
                        qd->qd_last_warn = jiffies;
@@@ -1470,7 -1473,7 +1474,7 @@@ static int gfs2_quota_get_xstate(struc
        return 0;
  }
  
- static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
+ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
                          struct fs_disk_quota *fdq)
  {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_quota_data *qd;
        struct gfs2_holder q_gh;
        int error;
+       int type;
  
        memset(fdq, 0, sizeof(struct fs_disk_quota));
  
        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
                return -ESRCH; /* Crazy XFS error code */
  
-       if (type == USRQUOTA)
+       if (qid.type == USRQUOTA)
                type = QUOTA_USER;
-       else if (type == GRPQUOTA)
+       else if (qid.type == GRPQUOTA)
                type = QUOTA_GROUP;
        else
                return -EINVAL;
  
-       error = qd_get(sdp, type, id, &qd);
+       error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd);
        if (error)
                return error;
        error = do_glock(qd, FORCE, &q_gh);
        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
        fdq->d_version = FS_DQUOT_VERSION;
        fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
-       fdq->d_id = id;
+       fdq->d_id = from_kqid(&init_user_ns, qid);
        fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
        fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
        fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
@@@ -1515,7 -1519,7 +1520,7 @@@ out
  /* GFS2 only supports a subset of the XFS fields */
  #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
  
- static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
+ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
                          struct fs_disk_quota *fdq)
  {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        int alloc_required;
        loff_t offset;
        int error;
+       int type;
  
        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
                return -ESRCH; /* Crazy XFS error code */
  
-       switch(type) {
+       switch(qid.type) {
        case USRQUOTA:
                type = QUOTA_USER;
                if (fdq->d_flags != FS_USER_QUOTA)
  
        if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
                return -EINVAL;
-       if (fdq->d_id != id)
+       if (fdq->d_id != from_kqid(&init_user_ns, qid))
                return -EINVAL;
  
-       error = qd_get(sdp, type, id, &qd);
+       error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd);
        if (error)
                return error;
  
                error = gfs2_inplace_reserve(ip, blocks);
                if (error)
                        goto out_i;
 -              blocks += gfs2_rg_blocks(ip);
 +              blocks += gfs2_rg_blocks(ip, blocks);
        }
  
        /* Some quotas span block boundaries and can update two blocks,
diff --combined fs/hfs/inode.c
index 553909395270ebe44ed05856af0a67e1a508d8e2,5d5c22da1960f738717ca47b50e034896e2ccf53..0b35903219bc1056d4c1c3260ec75d7b68a1b269
@@@ -594,9 -594,9 +594,9 @@@ int hfs_inode_setattr(struct dentry *de
  
        /* no uig/gid changes and limit which mode bits can be set */
        if (((attr->ia_valid & ATTR_UID) &&
-            (attr->ia_uid != hsb->s_uid)) ||
+            (!uid_eq(attr->ia_uid, hsb->s_uid))) ||
            ((attr->ia_valid & ATTR_GID) &&
-            (attr->ia_gid != hsb->s_gid)) ||
+            (!gid_eq(attr->ia_gid, hsb->s_gid))) ||
            ((attr->ia_valid & ATTR_MODE) &&
             ((S_ISDIR(inode->i_mode) &&
               (attr->ia_mode != inode->i_mode)) ||
@@@ -644,7 -644,7 +644,7 @@@ static int hfs_file_fsync(struct file *
  
        /* sync the superblock to buffers */
        sb = inode->i_sb;
 -      flush_delayed_work_sync(&HFS_SB(sb)->mdb_work);
 +      flush_delayed_work(&HFS_SB(sb)->mdb_work);
        /* .. finally sync the buffers to disk */
        err = sync_blockdev(sb->s_bdev);
        if (!ret)
diff --combined fs/logfs/inode.c
index 6984562738d36bc4142a3e0556730ae9e3bf3a57,43f61c2013f9b10870783ecbf9d8eb3497f1611f..bda39085309fc0e5cac8ba97c1809a6826b0d3a9
@@@ -156,26 -156,10 +156,26 @@@ static void __logfs_destroy_inode(struc
        call_rcu(&inode->i_rcu, logfs_i_callback);
  }
  
 +static void __logfs_destroy_meta_inode(struct inode *inode)
 +{
 +      struct logfs_inode *li = logfs_inode(inode);
 +      BUG_ON(li->li_block);
 +      call_rcu(&inode->i_rcu, logfs_i_callback);
 +}
 +
  static void logfs_destroy_inode(struct inode *inode)
  {
        struct logfs_inode *li = logfs_inode(inode);
  
 +      if (inode->i_ino < LOGFS_RESERVED_INOS) {
 +              /*
 +               * The reserved inodes are never destroyed unless we are in
 +               * unmont path.
 +               */
 +              __logfs_destroy_meta_inode(inode);
 +              return;
 +      }
 +
        BUG_ON(list_empty(&li->li_freeing_list));
        spin_lock(&logfs_inode_lock);
        li->li_refcount--;
@@@ -208,8 -192,8 +208,8 @@@ static void logfs_init_inode(struct sup
        li->li_height   = 0;
        li->li_used_bytes = 0;
        li->li_block    = NULL;
-       inode->i_uid    = 0;
-       inode->i_gid    = 0;
+       i_uid_write(inode, 0);
+       i_gid_write(inode, 0);
        inode->i_size   = 0;
        inode->i_blocks = 0;
        inode->i_ctime  = CURRENT_TIME;
@@@ -389,8 -373,8 +389,8 @@@ static void logfs_put_super(struct supe
  {
        struct logfs_super *super = logfs_super(sb);
        /* kill the meta-inodes */
 -      iput(super->s_master_inode);
        iput(super->s_segfile_inode);
 +      iput(super->s_master_inode);
        iput(super->s_mapping_inode);
  }
  
diff --combined fs/logfs/readwrite.c
index 5be0abef603d4f82af9e59aaca639118e280476b,a8d492d69213eec0c41d51c1d8514d6438b4435e..e1a3b6bf63244237215824021d87f553bf4482a6
@@@ -119,8 -119,8 +119,8 @@@ static void logfs_disk_to_inode(struct 
        inode->i_mode   = be16_to_cpu(di->di_mode);
        li->li_height   = di->di_height;
        li->li_flags    = be32_to_cpu(di->di_flags);
-       inode->i_uid    = be32_to_cpu(di->di_uid);
-       inode->i_gid    = be32_to_cpu(di->di_gid);
+       i_uid_write(inode, be32_to_cpu(di->di_uid));
+       i_gid_write(inode, be32_to_cpu(di->di_gid));
        inode->i_size   = be64_to_cpu(di->di_size);
        logfs_set_blocks(inode, be64_to_cpu(di->di_used_bytes));
        inode->i_atime  = be64_to_timespec(di->di_atime);
@@@ -156,8 -156,8 +156,8 @@@ static void logfs_inode_to_disk(struct 
        di->di_height   = li->li_height;
        di->di_pad      = 0;
        di->di_flags    = cpu_to_be32(li->li_flags);
-       di->di_uid      = cpu_to_be32(inode->i_uid);
-       di->di_gid      = cpu_to_be32(inode->i_gid);
+       di->di_uid      = cpu_to_be32(i_uid_read(inode));
+       di->di_gid      = cpu_to_be32(i_gid_read(inode));
        di->di_size     = cpu_to_be64(i_size_read(inode));
        di->di_used_bytes = cpu_to_be64(li->li_used_bytes);
        di->di_atime    = timespec_to_be64(inode->i_atime);
@@@ -2189,6 -2189,7 +2189,6 @@@ void logfs_evict_inode(struct inode *in
                return;
        }
  
 -      BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS);
        page = inode_to_page(inode);
        BUG_ON(!page); /* FIXME: Use emergency page */
        logfs_put_write_page(page);
diff --combined fs/namei.c
index dd1ed1b8e98efe048683e81bd1244c483160ad55,05480a64d7b7acec9446db567b9c3560ff0f5762..a856e7f7b6e32ea624327a96b2865f90099e2328
@@@ -352,7 -352,6 +352,7 @@@ int __inode_permission(struct inode *in
  /**
   * sb_permission - Check superblock-level permissions
   * @sb: Superblock of inode to check permission on
 + * @inode: Inode to check permission on
   * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
   *
   * Separate out file-system wide checks from inode-specific permission checks.
@@@ -657,7 -656,6 +657,7 @@@ int sysctl_protected_hardlinks __read_m
  /**
   * may_follow_link - Check symlink following for unsafe situations
   * @link: The path of the symlink
 + * @nd: nameidata pathwalk data
   *
   * In the case of the sysctl_protected_symlinks sysctl being enabled,
   * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
@@@ -680,7 -678,7 +680,7 @@@ static inline int may_follow_link(struc
  
        /* Allowed if owner and follower match. */
        inode = link->dentry->d_inode;
-       if (current_cred()->fsuid == inode->i_uid)
+       if (uid_eq(current_cred()->fsuid, inode->i_uid))
                return 0;
  
        /* Allowed if parent directory not sticky and world-writable. */
                return 0;
  
        /* Allowed if parent directory and link owner match. */
-       if (parent->i_uid == inode->i_uid)
+       if (uid_eq(parent->i_uid, inode->i_uid))
                return 0;
  
        path_put_conditional(link, nd);
@@@ -759,7 -757,7 +759,7 @@@ static int may_linkat(struct path *link
        /* Source inode owner (or CAP_FOWNER) can hardlink all they like,
         * otherwise, it must be a safe source.
         */
-       if (cred->fsuid == inode->i_uid || safe_hardlink_source(inode) ||
+       if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) ||
            capable(CAP_FOWNER))
                return 0;
  
@@@ -2416,7 -2414,7 +2416,7 @@@ static int atomic_open(struct nameidat
                goto out;
        }
  
 -      mode = op->mode & S_IALLUGO;
 +      mode = op->mode;
        if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
                mode &= ~current_umask();
  
        }
  
        if (open_flag & O_CREAT) {
 -              error = may_o_create(&nd->path, dentry, op->mode);
 +              error = may_o_create(&nd->path, dentry, mode);
                if (error) {
                        create_error = error;
                        if (open_flag & O_EXCL)
                        dput(dentry);
                        dentry = file->f_path.dentry;
                }
 +              if (create_error && dentry->d_inode == NULL) {
 +                      error = create_error;
 +                      goto out;
 +              }
                goto looked_up;
        }
  
diff --combined fs/open.c
index e1f2cdb91a4dc494473986f1b0c8b91f23614a43,2b2573980d0f0ad09b92386c4bc3f1a7eeea2c06..b0bae3a41825fd1236d8089ffdeda9067342a510
+++ b/fs/open.c
@@@ -534,7 -534,7 +534,7 @@@ static int chown_common(struct path *pa
                newattrs.ia_valid |=
                        ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
        mutex_lock(&inode->i_mutex);
-       error = security_path_chown(path, user, group);
+       error = security_path_chown(path, uid, gid);
        if (!error)
                error = notify_change(path->dentry, &newattrs);
        mutex_unlock(&inode->i_mutex);
@@@ -717,7 -717,7 +717,7 @@@ cleanup_all
                         * here, so just reset the state.
                         */
                        file_reset_write(f);
 -                      mnt_drop_write(f->f_path.mnt);
 +                      __mnt_drop_write(f->f_path.mnt);
                }
        }
  cleanup_file:
@@@ -852,10 -852,9 +852,10 @@@ static inline int build_open_flags(int 
        int lookup_flags = 0;
        int acc_mode;
  
 -      if (!(flags & O_CREAT))
 -              mode = 0;
 -      op->mode = mode;
 +      if (flags & O_CREAT)
 +              op->mode = (mode & S_IALLUGO) | S_IFREG;
 +      else
 +              op->mode = 0;
  
        /* Must never be set by userspace */
        flags &= ~FMODE_NONOTIFY;
diff --combined fs/quota/dquot.c
index c495a3055e2a3be9b5e471afebdf53ac00c6fe51,c4564d0a4a9bb604336401879d06138c3676fbfe..557a9c20a2154856c1e1595e2d0e457d9487aaad
@@@ -253,8 -253,10 +253,10 @@@ static qsize_t inode_get_rsv_space(stru
  static void __dquot_initialize(struct inode *inode, int type);
  
  static inline unsigned int
- hashfn(const struct super_block *sb, unsigned int id, int type)
+ hashfn(const struct super_block *sb, struct kqid qid)
  {
+       unsigned int id = from_kqid(&init_user_ns, qid);
+       int type = qid.type;
        unsigned long tmp;
  
        tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
  static inline void insert_dquot_hash(struct dquot *dquot)
  {
        struct hlist_head *head;
-       head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
+       head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
        hlist_add_head(&dquot->dq_hash, head);
  }
  
@@@ -277,15 -279,14 +279,14 @@@ static inline void remove_dquot_hash(st
  }
  
  static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
-                               unsigned int id, int type)
+                               struct kqid qid)
  {
        struct hlist_node *node;
        struct dquot *dquot;
  
        hlist_for_each (node, dquot_hash+hashent) {
                dquot = hlist_entry(node, struct dquot, dq_hash);
-               if (dquot->dq_sb == sb && dquot->dq_id == id &&
-                   dquot->dq_type == type)
+               if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
                        return dquot;
        }
        return NULL;
@@@ -351,7 -352,7 +352,7 @@@ int dquot_mark_dquot_dirty(struct dquo
        spin_lock(&dq_list_lock);
        if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
                list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
-                               info[dquot->dq_type].dqi_dirty_list);
+                               info[dquot->dq_id.type].dqi_dirty_list);
                ret = 0;
        }
        spin_unlock(&dq_list_lock);
@@@ -410,17 -411,17 +411,17 @@@ int dquot_acquire(struct dquot *dquot
        mutex_lock(&dquot->dq_lock);
        mutex_lock(&dqopt->dqio_mutex);
        if (!test_bit(DQ_READ_B, &dquot->dq_flags))
-               ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
+               ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
        if (ret < 0)
                goto out_iolock;
        set_bit(DQ_READ_B, &dquot->dq_flags);
        /* Instantiate dquot if needed */
        if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
-               ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+               ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
                /* Write the info if needed */
-               if (info_dirty(&dqopt->info[dquot->dq_type])) {
-                       ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
-                                               dquot->dq_sb, dquot->dq_type);
+               if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
+                       ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
+                                       dquot->dq_sb, dquot->dq_id.type);
                }
                if (ret < 0)
                        goto out_iolock;
@@@ -455,7 -456,7 +456,7 @@@ int dquot_commit(struct dquot *dquot
        /* Inactive dquot can be only if there was error during read/init
         * => we have better not writing it */
        if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
-               ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+               ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
        else
                ret = -EIO;
  out_sem:
@@@ -477,12 -478,12 +478,12 @@@ int dquot_release(struct dquot *dquot
        if (atomic_read(&dquot->dq_count) > 1)
                goto out_dqlock;
        mutex_lock(&dqopt->dqio_mutex);
-       if (dqopt->ops[dquot->dq_type]->release_dqblk) {
-               ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
+       if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
+               ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
                /* Write the info */
-               if (info_dirty(&dqopt->info[dquot->dq_type])) {
-                       ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
-                                               dquot->dq_sb, dquot->dq_type);
+               if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
+                       ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
+                                               dquot->dq_sb, dquot->dq_id.type);
                }
                if (ret >= 0)
                        ret = ret2;
@@@ -521,7 -522,7 +522,7 @@@ restart
        list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
                if (dquot->dq_sb != sb)
                        continue;
-               if (dquot->dq_type != type)
+               if (dquot->dq_id.type != type)
                        continue;
                /* Wait for dquot users */
                if (atomic_read(&dquot->dq_count)) {
@@@ -741,7 -742,8 +742,8 @@@ void dqput(struct dquot *dquot
  #ifdef CONFIG_QUOTA_DEBUG
        if (!atomic_read(&dquot->dq_count)) {
                quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
-                           quotatypes[dquot->dq_type], dquot->dq_id);
+                           quotatypes[dquot->dq_id.type],
+                           from_kqid(&init_user_ns, dquot->dq_id));
                BUG();
        }
  #endif
@@@ -752,7 -754,7 +754,7 @@@ we_slept
                /* We have more than one user... nothing to do */
                atomic_dec(&dquot->dq_count);
                /* Releasing dquot during quotaoff phase? */
-               if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
+               if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
                    atomic_read(&dquot->dq_count) == 1)
                        wake_up(&dquot->dq_wait_unused);
                spin_unlock(&dq_list_lock);
@@@ -815,7 -817,7 +817,7 @@@ static struct dquot *get_empty_dquot(st
        INIT_LIST_HEAD(&dquot->dq_dirty);
        init_waitqueue_head(&dquot->dq_wait_unused);
        dquot->dq_sb = sb;
-       dquot->dq_type = type;
+       dquot->dq_id = make_kqid_invalid(type);
        atomic_set(&dquot->dq_count, 1);
  
        return dquot;
   *   a) checking for quota flags under dq_list_lock and
   *   b) getting a reference to dquot before we release dq_list_lock
   */
- struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
+ struct dquot *dqget(struct super_block *sb, struct kqid qid)
  {
-       unsigned int hashent = hashfn(sb, id, type);
+       unsigned int hashent = hashfn(sb, qid);
        struct dquot *dquot = NULL, *empty = NULL;
  
-         if (!sb_has_quota_active(sb, type))
+         if (!sb_has_quota_active(sb, qid.type))
                return NULL;
  we_slept:
        spin_lock(&dq_list_lock);
        spin_lock(&dq_state_lock);
-       if (!sb_has_quota_active(sb, type)) {
+       if (!sb_has_quota_active(sb, qid.type)) {
                spin_unlock(&dq_state_lock);
                spin_unlock(&dq_list_lock);
                goto out;
        }
        spin_unlock(&dq_state_lock);
  
-       dquot = find_dquot(hashent, sb, id, type);
+       dquot = find_dquot(hashent, sb, qid);
        if (!dquot) {
                if (!empty) {
                        spin_unlock(&dq_list_lock);
-                       empty = get_empty_dquot(sb, type);
+                       empty = get_empty_dquot(sb, qid.type);
                        if (!empty)
                                schedule();     /* Try to wait for a moment... */
                        goto we_slept;
                }
                dquot = empty;
                empty = NULL;
-               dquot->dq_id = id;
+               dquot->dq_id = qid;
                /* all dquots go on the inuse_list */
                put_inuse(dquot);
                /* hash it first so it can be found */
@@@ -1129,8 -1131,7 +1131,7 @@@ static void dquot_decr_space(struct dqu
  
  struct dquot_warn {
        struct super_block *w_sb;
-       qid_t w_dq_id;
-       short w_dq_type;
+       struct kqid w_dq_id;
        short w_type;
  };
  
@@@ -1154,11 -1155,11 +1155,11 @@@ static int need_print_warning(struct dq
        if (!flag_print_warnings)
                return 0;
  
-       switch (warn->w_dq_type) {
+       switch (warn->w_dq_id.type) {
                case USRQUOTA:
-                       return current_fsuid() == warn->w_dq_id;
+                       return uid_eq(current_fsuid(), warn->w_dq_id.uid);
                case GRPQUOTA:
-                       return in_group_p(warn->w_dq_id);
+                       return in_group_p(warn->w_dq_id.gid);
        }
        return 0;
  }
@@@ -1184,7 -1185,7 +1185,7 @@@ static void print_warning(struct dquot_
                tty_write_message(tty, ": warning, ");
        else
                tty_write_message(tty, ": write failed, ");
-       tty_write_message(tty, quotatypes[warn->w_dq_type]);
+       tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
        switch (warntype) {
                case QUOTA_NL_IHARDWARN:
                        msg = " file limit reached.\r\n";
@@@ -1218,7 -1219,6 +1219,6 @@@ static void prepare_warning(struct dquo
        warn->w_type = warntype;
        warn->w_sb = dquot->dq_sb;
        warn->w_dq_id = dquot->dq_id;
-       warn->w_dq_type = dquot->dq_type;
  }
  
  /*
@@@ -1236,14 -1236,14 +1236,14 @@@ static void flush_warnings(struct dquot
  #ifdef CONFIG_PRINT_QUOTA_WARNING
                print_warning(&warn[i]);
  #endif
-               quota_send_warning(warn[i].w_dq_type, warn[i].w_dq_id,
+               quota_send_warning(warn[i].w_dq_id,
                                   warn[i].w_sb->s_dev, warn[i].w_type);
        }
  }
  
  static int ignore_hardlimit(struct dquot *dquot)
  {
-       struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+       struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
  
        return capable(CAP_SYS_RESOURCE) &&
               (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
@@@ -1256,7 -1256,7 +1256,7 @@@ static int check_idq(struct dquot *dquo
  {
        qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
  
-       if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
+       if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
            test_bit(DQ_FAKE_B, &dquot->dq_flags))
                return 0;
  
            dquot->dq_dqb.dqb_itime == 0) {
                prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
                dquot->dq_dqb.dqb_itime = get_seconds() +
-                   sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+                   sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
        }
  
        return 0;
@@@ -1294,7 -1294,7 +1294,7 @@@ static int check_bdq(struct dquot *dquo
        qsize_t tspace;
        struct super_block *sb = dquot->dq_sb;
  
-       if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
+       if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
            test_bit(DQ_FAKE_B, &dquot->dq_flags))
                return 0;
  
                if (!prealloc) {
                        prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
                        dquot->dq_dqb.dqb_btime = get_seconds() +
-                           sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
+                           sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
                }
                else
                        /*
@@@ -1344,7 -1344,7 +1344,7 @@@ static int info_idq_free(struct dquot *
  
        if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
            dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
-           !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
+           !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
                return QUOTA_NL_NOWARN;
  
        newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
@@@ -1390,7 -1390,6 +1390,6 @@@ static int dquot_active(const struct in
   */
  static void __dquot_initialize(struct inode *inode, int type)
  {
-       unsigned int id = 0;
        int cnt;
        struct dquot *got[MAXQUOTAS];
        struct super_block *sb = inode->i_sb;
  
        /* First get references to structures we might need. */
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               struct kqid qid;
                got[cnt] = NULL;
                if (type != -1 && cnt != type)
                        continue;
                switch (cnt) {
                case USRQUOTA:
-                       id = inode->i_uid;
+                       qid = make_kqid_uid(inode->i_uid);
                        break;
                case GRPQUOTA:
-                       id = inode->i_gid;
+                       qid = make_kqid_gid(inode->i_gid);
                        break;
                }
-               got[cnt] = dqget(sb, id, cnt);
+               got[cnt] = dqget(sb, qid);
        }
  
        down_write(&sb_dqopt(sb)->dqptr_sem);
@@@ -1589,10 -1589,10 +1589,10 @@@ int __dquot_alloc_space(struct inode *i
                goto out;
        }
  
 -      down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
        for (cnt = 0; cnt < MAXQUOTAS; cnt++)
                warn[cnt].w_type = QUOTA_NL_NOWARN;
  
 +      down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
        spin_lock(&dq_data_lock);
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
                if (!dquots[cnt])
@@@ -1897,10 -1897,10 +1897,10 @@@ int dquot_transfer(struct inode *inode
        if (!dquot_active(inode))
                return 0;
  
-       if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
-               transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA);
-       if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)
-               transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_gid, GRPQUOTA);
+       if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid))
+               transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(iattr->ia_uid));
+       if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))
+               transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(iattr->ia_gid));
  
        ret = __dquot_transfer(inode, transfer_to);
        dqput_all(transfer_to);
@@@ -2360,9 -2360,9 +2360,9 @@@ static void do_get_dqblk(struct dquot *
  
        memset(di, 0, sizeof(*di));
        di->d_version = FS_DQUOT_VERSION;
-       di->d_flags = dquot->dq_type == USRQUOTA ?
+       di->d_flags = dquot->dq_id.type == USRQUOTA ?
                        FS_USER_QUOTA : FS_GROUP_QUOTA;
-       di->d_id = dquot->dq_id;
+       di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
  
        spin_lock(&dq_data_lock);
        di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
        spin_unlock(&dq_data_lock);
  }
  
- int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
+ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
                    struct fs_disk_quota *di)
  {
        struct dquot *dquot;
  
-       dquot = dqget(sb, id, type);
+       dquot = dqget(sb, qid);
        if (!dquot)
                return -ESRCH;
        do_get_dqblk(dquot, di);
@@@ -2401,7 -2401,7 +2401,7 @@@ static int do_set_dqblk(struct dquot *d
  {
        struct mem_dqblk *dm = &dquot->dq_dqb;
        int check_blim = 0, check_ilim = 0;
-       struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+       struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
  
        if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
                return -EINVAL;
        return 0;
  }
  
- int dquot_set_dqblk(struct super_block *sb, int type, qid_t id,
+ int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
                  struct fs_disk_quota *di)
  {
        struct dquot *dquot;
        int rc;
  
-       dquot = dqget(sb, id, type);
+       dquot = dqget(sb, qid);
        if (!dquot) {
                rc = -ESRCH;
                goto out;
diff --combined fs/reiserfs/inode.c
index 855da58db1456b94d43715bb4a28bbc5f982a8d7,7119f488d9cd02038cc2993b3d7b78e3e3421867..46485557cdc63b037994c05e82735605bbb4069a
@@@ -76,10 -76,10 +76,10 @@@ void reiserfs_evict_inode(struct inode 
                ;
        }
        out:
 +      reiserfs_write_unlock_once(inode->i_sb, depth);
        clear_inode(inode);     /* note this must go after the journal_end to prevent deadlock */
        dquot_drop(inode);
        inode->i_blocks = 0;
 -      reiserfs_write_unlock_once(inode->i_sb, depth);
        return;
  
  no_delete:
@@@ -1155,8 -1155,8 +1155,8 @@@ static void init_inode(struct inode *in
                set_inode_sd_version(inode, STAT_DATA_V1);
                inode->i_mode = sd_v1_mode(sd);
                set_nlink(inode, sd_v1_nlink(sd));
-               inode->i_uid = sd_v1_uid(sd);
-               inode->i_gid = sd_v1_gid(sd);
+               i_uid_write(inode, sd_v1_uid(sd));
+               i_gid_write(inode, sd_v1_gid(sd));
                inode->i_size = sd_v1_size(sd);
                inode->i_atime.tv_sec = sd_v1_atime(sd);
                inode->i_mtime.tv_sec = sd_v1_mtime(sd);
  
                inode->i_mode = sd_v2_mode(sd);
                set_nlink(inode, sd_v2_nlink(sd));
-               inode->i_uid = sd_v2_uid(sd);
+               i_uid_write(inode, sd_v2_uid(sd));
                inode->i_size = sd_v2_size(sd);
-               inode->i_gid = sd_v2_gid(sd);
+               i_gid_write(inode, sd_v2_gid(sd));
                inode->i_mtime.tv_sec = sd_v2_mtime(sd);
                inode->i_atime.tv_sec = sd_v2_atime(sd);
                inode->i_ctime.tv_sec = sd_v2_ctime(sd);
@@@ -1258,9 -1258,9 +1258,9 @@@ static void inode2sd(void *sd, struct i
  
        set_sd_v2_mode(sd_v2, inode->i_mode);
        set_sd_v2_nlink(sd_v2, inode->i_nlink);
-       set_sd_v2_uid(sd_v2, inode->i_uid);
+       set_sd_v2_uid(sd_v2, i_uid_read(inode));
        set_sd_v2_size(sd_v2, size);
-       set_sd_v2_gid(sd_v2, inode->i_gid);
+       set_sd_v2_gid(sd_v2, i_gid_read(inode));
        set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
        set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
        set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
@@@ -1280,8 -1280,8 +1280,8 @@@ static void inode2sd_v1(void *sd, struc
        struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;
  
        set_sd_v1_mode(sd_v1, inode->i_mode);
-       set_sd_v1_uid(sd_v1, inode->i_uid);
-       set_sd_v1_gid(sd_v1, inode->i_gid);
+       set_sd_v1_uid(sd_v1, i_uid_read(inode));
+       set_sd_v1_gid(sd_v1, i_gid_read(inode));
        set_sd_v1_nlink(sd_v1, inode->i_nlink);
        set_sd_v1_size(sd_v1, size);
        set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
@@@ -1869,7 -1869,7 +1869,7 @@@ int reiserfs_new_inode(struct reiserfs_
                goto out_bad_inode;
        }
        if (old_format_only(sb)) {
-               if (inode->i_uid & ~0xffff || inode->i_gid & ~0xffff) {
+               if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) {
                        pathrelse(&path_to_key);
                        /* i_uid or i_gid is too big to be stored in stat data v3.5 */
                        err = -EINVAL;
@@@ -3140,16 -3140,16 +3140,16 @@@ int reiserfs_setattr(struct dentry *den
                }
        }
  
-       if ((((attr->ia_valid & ATTR_UID) && (attr->ia_uid & ~0xffff)) ||
-            ((attr->ia_valid & ATTR_GID) && (attr->ia_gid & ~0xffff))) &&
+       if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
+            ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
            (get_inode_sd_version(inode) == STAT_DATA_V1)) {
                /* stat data of format v3.5 has 16 bit uid and gid */
                error = -EINVAL;
                goto out;
        }
  
-       if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
-           (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+       if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
+           (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
                struct reiserfs_transaction_handle th;
                int jbegin_count =
                    2 *
diff --combined fs/ubifs/super.c
index 71a197f0f93d24c0cc33527d4f2d69c8c3d51f7b,f39bad9db61cdacfd0acb4324924ab21cc347517..681f3a9424443e60325e4bde8f1a7bd46f6848b1
@@@ -130,8 -130,8 +130,8 @@@ struct inode *ubifs_iget(struct super_b
  
        inode->i_flags |= (S_NOCMTIME | S_NOATIME);
        set_nlink(inode, le32_to_cpu(ino->nlink));
-       inode->i_uid   = le32_to_cpu(ino->uid);
-       inode->i_gid   = le32_to_cpu(ino->gid);
+       i_uid_write(inode, le32_to_cpu(ino->uid));
+       i_gid_write(inode, le32_to_cpu(ino->gid));
        inode->i_atime.tv_sec  = (int64_t)le64_to_cpu(ino->atime_sec);
        inode->i_atime.tv_nsec = le32_to_cpu(ino->atime_nsec);
        inode->i_mtime.tv_sec  = (int64_t)le64_to_cpu(ino->mtime_sec);
@@@ -303,7 -303,7 +303,7 @@@ static int ubifs_write_inode(struct ino
        mutex_lock(&ui->ui_mutex);
        /*
         * Due to races between write-back forced by budgeting
 -       * (see 'sync_some_inodes()') and pdflush write-back, the inode may
 +       * (see 'sync_some_inodes()') and background write-back, the inode may
         * have already been synchronized, do not do this again. This might
         * also happen if it was synchronized in an VFS operation, e.g.
         * 'ubifs_link()'.
@@@ -1157,6 -1157,9 +1157,6 @@@ static int check_free_space(struct ubif
   *
   * This function mounts UBIFS file system. Returns zero in case of success and
   * a negative error code in case of failure.
 - *
 - * Note, the function does not de-allocate resources it it fails half way
 - * through, and the caller has to do this instead.
   */
  static int mount_ubifs(struct ubifs_info *c)
  {
diff --combined fs/udf/inode.c
index aa233469b3c1a0deb1e3ef60b8039f5dd2e1cd55,1825dc0af728ec2abcc166ef4c7c0a30b699e9f2..287ef9f587b7eac3f731ddc47995d51b74577d33
@@@ -1124,17 -1124,14 +1124,17 @@@ int udf_setsize(struct inode *inode, lo
                                if (err)
                                        return err;
                                down_write(&iinfo->i_data_sem);
 -                      } else
 +                      } else {
                                iinfo->i_lenAlloc = newsize;
 +                              goto set_size;
 +                      }
                }
                err = udf_extend_file(inode, newsize);
                if (err) {
                        up_write(&iinfo->i_data_sem);
                        return err;
                }
 +set_size:
                truncate_setsize(inode, newsize);
                up_write(&iinfo->i_data_sem);
        } else {
@@@ -1312,14 -1309,14 +1312,14 @@@ static void udf_fill_inode(struct inod
        }
  
        read_lock(&sbi->s_cred_lock);
-       inode->i_uid = le32_to_cpu(fe->uid);
-       if (inode->i_uid == -1 ||
+       i_uid_write(inode, le32_to_cpu(fe->uid));
+       if (!uid_valid(inode->i_uid) ||
            UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
            UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
                inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
  
-       inode->i_gid = le32_to_cpu(fe->gid);
-       if (inode->i_gid == -1 ||
+       i_gid_write(inode, le32_to_cpu(fe->gid));
+       if (!gid_valid(inode->i_gid) ||
            UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
            UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
                inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
@@@ -1542,12 -1539,12 +1542,12 @@@ static int udf_update_inode(struct inod
        if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
                fe->uid = cpu_to_le32(-1);
        else
-               fe->uid = cpu_to_le32(inode->i_uid);
+               fe->uid = cpu_to_le32(i_uid_read(inode));
  
        if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
                fe->gid = cpu_to_le32(-1);
        else
-               fe->gid = cpu_to_le32(inode->i_gid);
+               fe->gid = cpu_to_le32(i_gid_read(inode));
  
        udfperms = ((inode->i_mode & S_IRWXO)) |
                   ((inode->i_mode & S_IRWXG) << 2) |
diff --combined fs/udf/super.c
index 18fc038a438da4b6bbf58fa73c23c27ecd0cb721,38c705574b9239610119689d53aab87069c1ecad..862741dddf27b7c935a15aef40b6b057970b93ad
@@@ -199,8 -199,8 +199,8 @@@ struct udf_options 
        unsigned int rootdir;
        unsigned int flags;
        umode_t umask;
-       gid_t gid;
-       uid_t uid;
+       kgid_t gid;
+       kuid_t uid;
        umode_t fmode;
        umode_t dmode;
        struct nls_table *nls_map;
@@@ -335,9 -335,9 +335,9 @@@ static int udf_show_options(struct seq_
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE))
                seq_puts(seq, ",gid=ignore");
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
-               seq_printf(seq, ",uid=%u", sbi->s_uid);
+               seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
-               seq_printf(seq, ",gid=%u", sbi->s_gid);
+               seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
        if (sbi->s_umask != 0)
                seq_printf(seq, ",umask=%ho", sbi->s_umask);
        if (sbi->s_fmode != UDF_INVALID_MODE)
@@@ -516,13 -516,17 +516,17 @@@ static int udf_parse_options(char *opti
                case Opt_gid:
                        if (match_int(args, &option))
                                return 0;
-                       uopt->gid = option;
+                       uopt->gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(uopt->gid))
+                               return 0;
                        uopt->flags |= (1 << UDF_FLAG_GID_SET);
                        break;
                case Opt_uid:
                        if (match_int(args, &option))
                                return 0;
-                       uopt->uid = option;
+                       uopt->uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(uopt->uid))
+                               return 0;
                        uopt->flags |= (1 << UDF_FLAG_UID_SET);
                        break;
                case Opt_umask:
@@@ -1344,7 -1348,6 +1348,7 @@@ static int udf_load_logicalvol(struct s
                udf_err(sb, "error loading logical volume descriptor: "
                        "Partition table too long (%u > %lu)\n", table_len,
                        sb->s_blocksize - sizeof(*lvd));
 +              ret = 1;
                goto out_bh;
        }
  
                                                UDF_ID_SPARABLE,
                                                strlen(UDF_ID_SPARABLE))) {
                                if (udf_load_sparable_map(sb, map,
 -                                  (struct sparablePartitionMap *)gpm) < 0)
 +                                  (struct sparablePartitionMap *)gpm) < 0) {
 +                                      ret = 1;
                                        goto out_bh;
 +                              }
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_METADATA,
                                                strlen(UDF_ID_METADATA))) {
@@@ -1934,8 -1935,8 +1938,8 @@@ static int udf_fill_super(struct super_
        struct udf_sb_info *sbi;
  
        uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
-       uopt.uid = -1;
-       uopt.gid = -1;
+       uopt.uid = INVALID_UID;
+       uopt.gid = INVALID_GID;
        uopt.umask = 0;
        uopt.fmode = UDF_INVALID_MODE;
        uopt.dmode = UDF_INVALID_MODE;
                        if (!silent)
                                pr_notice("Rescanning with blocksize %d\n",
                                          UDF_DEFAULT_BLOCKSIZE);
 +                      brelse(sbi->s_lvid_bh);
 +                      sbi->s_lvid_bh = NULL;
                        uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
                        ret = udf_load_vrs(sb, &uopt, silent, &fileset);
                }
diff --combined fs/xattr.c
index 014f11321fd9921973818980c0e0024fe591d0ca,c111745c2da9ed449a4bb16be2a4293a646aaeb9..f7f7f09b0b4166978f567efc60c07f585462a789
@@@ -20,6 -20,7 +20,7 @@@
  #include <linux/fsnotify.h>
  #include <linux/audit.h>
  #include <linux/vmalloc.h>
+ #include <linux/posix_acl_xattr.h>
  
  #include <asm/uaccess.h>
  
@@@ -347,6 -348,9 +348,9 @@@ setxattr(struct dentry *d, const char _
                        error = -EFAULT;
                        goto out;
                }
+               if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
+                   (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
+                       posix_acl_fix_xattr_from_user(kvalue, size);
        }
  
        error = vfs_setxattr(d, kname, kvalue, size, flags);
@@@ -450,6 -454,9 +454,9 @@@ getxattr(struct dentry *d, const char _
  
        error = vfs_getxattr(d, kname, kvalue, size);
        if (error > 0) {
+               if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
+                   (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
+                       posix_acl_fix_xattr_to_user(kvalue, size);
                if (size && copy_to_user(value, kvalue, error))
                        error = -EFAULT;
        } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
@@@ -791,183 -798,3 +798,183 @@@ EXPORT_SYMBOL(generic_getxattr)
  EXPORT_SYMBOL(generic_listxattr);
  EXPORT_SYMBOL(generic_setxattr);
  EXPORT_SYMBOL(generic_removexattr);
 +
 +/*
 + * Allocate new xattr and copy in the value; but leave the name to callers.
 + */
 +struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
 +{
 +      struct simple_xattr *new_xattr;
 +      size_t len;
 +
 +      /* wrap around? */
 +      len = sizeof(*new_xattr) + size;
 +      if (len <= sizeof(*new_xattr))
 +              return NULL;
 +
 +      new_xattr = kmalloc(len, GFP_KERNEL);
 +      if (!new_xattr)
 +              return NULL;
 +
 +      new_xattr->size = size;
 +      memcpy(new_xattr->value, value, size);
 +      return new_xattr;
 +}
 +
 +/*
 + * xattr GET operation for in-memory/pseudo filesystems
 + */
 +int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
 +                   void *buffer, size_t size)
 +{
 +      struct simple_xattr *xattr;
 +      int ret = -ENODATA;
 +
 +      spin_lock(&xattrs->lock);
 +      list_for_each_entry(xattr, &xattrs->head, list) {
 +              if (strcmp(name, xattr->name))
 +                      continue;
 +
 +              ret = xattr->size;
 +              if (buffer) {
 +                      if (size < xattr->size)
 +                              ret = -ERANGE;
 +                      else
 +                              memcpy(buffer, xattr->value, xattr->size);
 +              }
 +              break;
 +      }
 +      spin_unlock(&xattrs->lock);
 +      return ret;
 +}
 +
 +static int __simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
 +                            const void *value, size_t size, int flags)
 +{
 +      struct simple_xattr *xattr;
 +      struct simple_xattr *uninitialized_var(new_xattr);
 +      int err = 0;
 +
 +      /* value == NULL means remove */
 +      if (value) {
 +              new_xattr = simple_xattr_alloc(value, size);
 +              if (!new_xattr)
 +                      return -ENOMEM;
 +
 +              new_xattr->name = kstrdup(name, GFP_KERNEL);
 +              if (!new_xattr->name) {
 +                      kfree(new_xattr);
 +                      return -ENOMEM;
 +              }
 +      }
 +
 +      spin_lock(&xattrs->lock);
 +      list_for_each_entry(xattr, &xattrs->head, list) {
 +              if (!strcmp(name, xattr->name)) {
 +                      if (flags & XATTR_CREATE) {
 +                              xattr = new_xattr;
 +                              err = -EEXIST;
 +                      } else if (new_xattr) {
 +                              list_replace(&xattr->list, &new_xattr->list);
 +                      } else {
 +                              list_del(&xattr->list);
 +                      }
 +                      goto out;
 +              }
 +      }
 +      if (flags & XATTR_REPLACE) {
 +              xattr = new_xattr;
 +              err = -ENODATA;
 +      } else {
 +              list_add(&new_xattr->list, &xattrs->head);
 +              xattr = NULL;
 +      }
 +out:
 +      spin_unlock(&xattrs->lock);
 +      if (xattr) {
 +              kfree(xattr->name);
 +              kfree(xattr);
 +      }
 +      return err;
 +
 +}
 +
 +/**
 + * simple_xattr_set - xattr SET operation for in-memory/pseudo filesystems
 + * @xattrs: target simple_xattr list
 + * @name: name of the new extended attribute
 + * @value: value of the new xattr. If %NULL, will remove the attribute
 + * @size: size of the new xattr
 + * @flags: %XATTR_{CREATE|REPLACE}
 + *
 + * %XATTR_CREATE is set, the xattr shouldn't exist already; otherwise fails
 + * with -EEXIST.  If %XATTR_REPLACE is set, the xattr should exist;
 + * otherwise, fails with -ENODATA.
 + *
 + * Returns 0 on success, -errno on failure.
 + */
 +int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
 +                   const void *value, size_t size, int flags)
 +{
 +      if (size == 0)
 +              value = ""; /* empty EA, do not remove */
 +      return __simple_xattr_set(xattrs, name, value, size, flags);
 +}
 +
 +/*
 + * xattr REMOVE operation for in-memory/pseudo filesystems
 + */
 +int simple_xattr_remove(struct simple_xattrs *xattrs, const char *name)
 +{
 +      return __simple_xattr_set(xattrs, name, NULL, 0, XATTR_REPLACE);
 +}
 +
 +static bool xattr_is_trusted(const char *name)
 +{
 +      return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
 +}
 +
 +/*
 + * xattr LIST operation for in-memory/pseudo filesystems
 + */
 +ssize_t simple_xattr_list(struct simple_xattrs *xattrs, char *buffer,
 +                        size_t size)
 +{
 +      bool trusted = capable(CAP_SYS_ADMIN);
 +      struct simple_xattr *xattr;
 +      size_t used = 0;
 +
 +      spin_lock(&xattrs->lock);
 +      list_for_each_entry(xattr, &xattrs->head, list) {
 +              size_t len;
 +
 +              /* skip "trusted." attributes for unprivileged callers */
 +              if (!trusted && xattr_is_trusted(xattr->name))
 +                      continue;
 +
 +              len = strlen(xattr->name) + 1;
 +              used += len;
 +              if (buffer) {
 +                      if (size < used) {
 +                              used = -ERANGE;
 +                              break;
 +                      }
 +                      memcpy(buffer, xattr->name, len);
 +                      buffer += len;
 +              }
 +      }
 +      spin_unlock(&xattrs->lock);
 +
 +      return used;
 +}
 +
 +/*
 + * Adds an extended attribute to the list
 + */
 +void simple_xattr_list_add(struct simple_xattrs *xattrs,
 +                         struct simple_xattr *new_xattr)
 +{
 +      spin_lock(&xattrs->lock);
 +      list_add(&new_xattr->list, &xattrs->head);
 +      spin_unlock(&xattrs->lock);
 +}
diff --combined include/linux/sched.h
index 765dffbb085ed2da1a70241658bc11daae5716e8,f64d092f2bedc686eb15d82a81e2199837639667..d23ca6245d54331466d4ad7c32e2533bf4cdcae8
@@@ -273,11 -273,11 +273,11 @@@ extern void init_idle_bootup_task(struc
  extern int runqueue_is_locked(int cpu);
  
  #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
 -extern void select_nohz_load_balancer(int stop_tick);
 +extern void nohz_balance_enter_idle(int cpu);
  extern void set_cpu_sd_state_idle(void);
  extern int get_nohz_timer_target(void);
  #else
 -static inline void select_nohz_load_balancer(int stop_tick) { }
 +static inline void nohz_balance_enter_idle(int cpu) { }
  static inline void set_cpu_sd_state_idle(void) { }
  #endif
  
@@@ -334,6 -334,14 +334,6 @@@ static inline void lockup_detector_init
  }
  #endif
  
 -#if defined(CONFIG_LOCKUP_DETECTOR) && defined(CONFIG_SUSPEND)
 -void lockup_detector_bootcpu_resume(void);
 -#else
 -static inline void lockup_detector_bootcpu_resume(void)
 -{
 -}
 -#endif
 -
  #ifdef CONFIG_DETECT_HUNG_TASK
  extern unsigned int  sysctl_hung_task_panic;
  extern unsigned long sysctl_hung_task_check_count;
@@@ -446,9 -454,6 +446,9 @@@ extern int get_dumpable(struct mm_struc
  #define MMF_VM_HUGEPAGE               17      /* set when VM_HUGEPAGE is set on vma */
  #define MMF_EXE_FILE_CHANGED  18      /* see prctl_set_mm_exe_file() */
  
 +#define MMF_HAS_UPROBES               19      /* has uprobes */
 +#define MMF_RECALC_UPROBES    20      /* MMF_HAS_UPROBES can be wrong */
 +
  #define MMF_INIT_MASK         (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
  
  struct sighand_struct {
@@@ -681,6 -686,11 +681,6 @@@ struct signal_struct 
                                         * (notably. ptrace) */
  };
  
 -/* Context switch must be unlocked if interrupts are to be enabled */
 -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 -# define __ARCH_WANT_UNLOCKED_CTXSW
 -#endif
 -
  /*
   * Bits in flags field of signal_struct.
   */
@@@ -858,6 -868,7 +858,6 @@@ enum cpu_idle_type 
  #define SD_BALANCE_FORK               0x0008  /* Balance on fork, clone */
  #define SD_BALANCE_WAKE               0x0010  /* Balance on wakeup */
  #define SD_WAKE_AFFINE                0x0020  /* Wake task to waking CPU */
 -#define SD_PREFER_LOCAL               0x0040  /* Prefer to keep tasks local to this domain */
  #define SD_SHARE_CPUPOWER     0x0080  /* Domain members share cpu power */
  #define SD_SHARE_PKG_RESOURCES        0x0200  /* Domain members share cpu pkg resources */
  #define SD_SERIALIZE          0x0400  /* Only a single load balancing instance */
@@@ -951,6 -962,7 +951,6 @@@ struct sched_domain 
        unsigned int smt_gain;
        int flags;                      /* See SD_* */
        int level;
 -      int idle_buddy;                 /* cpu assigned to select_idle_sibling() */
  
        /* Runtime fields. */
        unsigned long last_balance;     /* init to jiffies. units in jiffies */
@@@ -1414,7 -1426,7 +1414,7 @@@ struct task_struct 
  
        struct audit_context *audit_context;
  #ifdef CONFIG_AUDITSYSCALL
-       uid_t loginuid;
+       kuid_t loginuid;
        unsigned int sessionid;
  #endif
        struct seccomp seccomp;
@@@ -1882,14 -1894,6 +1882,14 @@@ static inline void rcu_copy_process(str
  
  #endif
  
 +static inline void rcu_switch(struct task_struct *prev,
 +                            struct task_struct *next)
 +{
 +#ifdef CONFIG_RCU_USER_QS
 +      rcu_user_hooks_switch(prev, next);
 +#endif
 +}
 +
  static inline void tsk_restore_flags(struct task_struct *task,
                                unsigned long orig_flags, unsigned long flags)
  {
diff --combined include/linux/security.h
index d143b8e01954ab14ac224d9894bf65e111ee97fb,ebb92cb1fa286a3533b4c80220fdab0978a051d8..145accee9236206d8ae79d4eb7fe0bd80c40892f
@@@ -118,7 -118,6 +118,7 @@@ void reset_security_ops(void)
  extern unsigned long mmap_min_addr;
  extern unsigned long dac_mmap_min_addr;
  #else
 +#define mmap_min_addr         0UL
  #define dac_mmap_min_addr     0UL
  #endif
  
@@@ -1243,6 -1242,8 +1243,6 @@@ static inline void security_free_mnt_op
   *    Check that the @parent process has sufficient permission to trace the
   *    current process before allowing the current process to present itself
   *    to the @parent process for tracing.
 - *    The parent process will still have to undergo the ptrace_access_check
 - *    checks before it is allowed to trace this one.
   *    @parent contains the task_struct structure for debugger process.
   *    Return 0 if permission is granted.
   * @capget:
@@@ -1436,7 -1437,7 +1436,7 @@@ struct security_operations 
        int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
                            struct path *new_dir, struct dentry *new_dentry);
        int (*path_chmod) (struct path *path, umode_t mode);
-       int (*path_chown) (struct path *path, uid_t uid, gid_t gid);
+       int (*path_chown) (struct path *path, kuid_t uid, kgid_t gid);
        int (*path_chroot) (struct path *path);
  #endif
  
@@@ -2831,7 -2832,7 +2831,7 @@@ int security_path_link(struct dentry *o
  int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
                         struct path *new_dir, struct dentry *new_dentry);
  int security_path_chmod(struct path *path, umode_t mode);
- int security_path_chown(struct path *path, uid_t uid, gid_t gid);
+ int security_path_chown(struct path *path, kuid_t uid, kgid_t gid);
  int security_path_chroot(struct path *path);
  #else /* CONFIG_SECURITY_PATH */
  static inline int security_path_unlink(struct path *dir, struct dentry *dentry)
@@@ -2887,7 -2888,7 +2887,7 @@@ static inline int security_path_chmod(s
        return 0;
  }
  
- static inline int security_path_chown(struct path *path, uid_t uid, gid_t gid)
+ static inline int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
  {
        return 0;
  }
diff --combined include/linux/tty.h
index 1509b86825d8ec710713405b47d9931f465ddb89,7298385815e60d67c5346e1bbae97d30f937225a..4f6c59a5fb7941996e8062dd41efe99ec3623555
@@@ -43,7 -43,6 +43,7 @@@
  #include <linux/tty_driver.h>
  #include <linux/tty_ldisc.h>
  #include <linux/mutex.h>
 +#include <linux/tty_flags.h>
  
  
  
@@@ -104,28 -103,28 +104,28 @@@ struct tty_bufhead 
  #define TTY_PARITY    3
  #define TTY_OVERRUN   4
  
 -#define INTR_CHAR(tty) ((tty)->termios->c_cc[VINTR])
 -#define QUIT_CHAR(tty) ((tty)->termios->c_cc[VQUIT])
 -#define ERASE_CHAR(tty) ((tty)->termios->c_cc[VERASE])
 -#define KILL_CHAR(tty) ((tty)->termios->c_cc[VKILL])
 -#define EOF_CHAR(tty) ((tty)->termios->c_cc[VEOF])
 -#define TIME_CHAR(tty) ((tty)->termios->c_cc[VTIME])
 -#define MIN_CHAR(tty) ((tty)->termios->c_cc[VMIN])
 -#define SWTC_CHAR(tty) ((tty)->termios->c_cc[VSWTC])
 -#define START_CHAR(tty) ((tty)->termios->c_cc[VSTART])
 -#define STOP_CHAR(tty) ((tty)->termios->c_cc[VSTOP])
 -#define SUSP_CHAR(tty) ((tty)->termios->c_cc[VSUSP])
 -#define EOL_CHAR(tty) ((tty)->termios->c_cc[VEOL])
 -#define REPRINT_CHAR(tty) ((tty)->termios->c_cc[VREPRINT])
 -#define DISCARD_CHAR(tty) ((tty)->termios->c_cc[VDISCARD])
 -#define WERASE_CHAR(tty) ((tty)->termios->c_cc[VWERASE])
 -#define LNEXT_CHAR(tty)       ((tty)->termios->c_cc[VLNEXT])
 -#define EOL2_CHAR(tty) ((tty)->termios->c_cc[VEOL2])
 -
 -#define _I_FLAG(tty, f)       ((tty)->termios->c_iflag & (f))
 -#define _O_FLAG(tty, f)       ((tty)->termios->c_oflag & (f))
 -#define _C_FLAG(tty, f)       ((tty)->termios->c_cflag & (f))
 -#define _L_FLAG(tty, f)       ((tty)->termios->c_lflag & (f))
 +#define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR])
 +#define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT])
 +#define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE])
 +#define KILL_CHAR(tty) ((tty)->termios.c_cc[VKILL])
 +#define EOF_CHAR(tty) ((tty)->termios.c_cc[VEOF])
 +#define TIME_CHAR(tty) ((tty)->termios.c_cc[VTIME])
 +#define MIN_CHAR(tty) ((tty)->termios.c_cc[VMIN])
 +#define SWTC_CHAR(tty) ((tty)->termios.c_cc[VSWTC])
 +#define START_CHAR(tty) ((tty)->termios.c_cc[VSTART])
 +#define STOP_CHAR(tty) ((tty)->termios.c_cc[VSTOP])
 +#define SUSP_CHAR(tty) ((tty)->termios.c_cc[VSUSP])
 +#define EOL_CHAR(tty) ((tty)->termios.c_cc[VEOL])
 +#define REPRINT_CHAR(tty) ((tty)->termios.c_cc[VREPRINT])
 +#define DISCARD_CHAR(tty) ((tty)->termios.c_cc[VDISCARD])
 +#define WERASE_CHAR(tty) ((tty)->termios.c_cc[VWERASE])
 +#define LNEXT_CHAR(tty)       ((tty)->termios.c_cc[VLNEXT])
 +#define EOL2_CHAR(tty) ((tty)->termios.c_cc[VEOL2])
 +
 +#define _I_FLAG(tty, f)       ((tty)->termios.c_iflag & (f))
 +#define _O_FLAG(tty, f)       ((tty)->termios.c_oflag & (f))
 +#define _C_FLAG(tty, f)       ((tty)->termios.c_cflag & (f))
 +#define _L_FLAG(tty, f)       ((tty)->termios.c_lflag & (f))
  
  #define I_IGNBRK(tty) _I_FLAG((tty), IGNBRK)
  #define I_BRKINT(tty) _I_FLAG((tty), BRKINT)
@@@ -269,11 -268,10 +269,11 @@@ struct tty_struct 
        struct mutex ldisc_mutex;
        struct tty_ldisc *ldisc;
  
 +      struct mutex legacy_mutex;
        struct mutex termios_mutex;
        spinlock_t ctrl_lock;
        /* Termios values are protected by the termios mutex */
 -      struct ktermios *termios, *termios_locked;
 +      struct ktermios termios, termios_locked;
        struct termiox *termiox;        /* May be NULL for unsupported */
        char name[64];
        struct pid *pgrp;               /* Protected by ctrl lock */
@@@ -412,10 -410,6 +412,10 @@@ extern int tty_register_driver(struct t
  extern int tty_unregister_driver(struct tty_driver *driver);
  extern struct device *tty_register_device(struct tty_driver *driver,
                                          unsigned index, struct device *dev);
 +extern struct device *tty_register_device_attr(struct tty_driver *driver,
 +                              unsigned index, struct device *device,
 +                              void *drvdata,
 +                              const struct attribute_group **attr_grp);
  extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
  extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
                             int buflen);
@@@ -429,6 -423,7 +429,6 @@@ extern void tty_unthrottle(struct tty_s
  extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
  extern void tty_driver_remove_tty(struct tty_driver *driver,
                                  struct tty_struct *tty);
 -extern void tty_shutdown(struct tty_struct *tty);
  extern void tty_free_termios(struct tty_struct *tty);
  extern int is_current_pgrp_orphaned(void);
  extern struct pid *tty_get_pgrp(struct tty_struct *tty);
@@@ -502,15 -497,6 +502,15 @@@ extern int tty_write_lock(struct tty_st
  #define tty_is_writelocked(tty)  (mutex_is_locked(&tty->atomic_write_lock))
  
  extern void tty_port_init(struct tty_port *port);
 +extern void tty_port_link_device(struct tty_port *port,
 +              struct tty_driver *driver, unsigned index);
 +extern struct device *tty_port_register_device(struct tty_port *port,
 +              struct tty_driver *driver, unsigned index,
 +              struct device *device);
 +extern struct device *tty_port_register_device_attr(struct tty_port *port,
 +              struct tty_driver *driver, unsigned index,
 +              struct device *device, void *drvdata,
 +              const struct attribute_group **attr_grp);
  extern int tty_port_alloc_xmit_buf(struct tty_port *port);
  extern void tty_port_free_xmit_buf(struct tty_port *port);
  extern void tty_port_put(struct tty_port *port);
@@@ -522,12 -508,6 +522,12 @@@ static inline struct tty_port *tty_port
        return port;
  }
  
 +/* If the cts flow control is enabled, return true. */
 +static inline bool tty_port_cts_enabled(struct tty_port *port)
 +{
 +      return port->flags & ASYNC_CTS_FLOW;
 +}
 +
  extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
  extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
  extern int tty_port_carrier_raised(struct tty_port *port);
@@@ -541,8 -521,6 +541,8 @@@ extern int tty_port_close_start(struct 
  extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty);
  extern void tty_port_close(struct tty_port *port,
                                struct tty_struct *tty, struct file *filp);
 +extern int tty_port_install(struct tty_port *port, struct tty_driver *driver,
 +                              struct tty_struct *tty);
  extern int tty_port_open(struct tty_port *port,
                                struct tty_struct *tty, struct file *filp);
  static inline int tty_port_users(struct tty_port *port)
@@@ -575,7 -553,7 +575,7 @@@ extern void tty_audit_fork(struct signa
  extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
  extern void tty_audit_push(struct tty_struct *tty);
  extern int tty_audit_push_task(struct task_struct *tsk,
-                              uid_t loginuid, u32 sessionid);
+                              kuid_t loginuid, u32 sessionid);
  #else
  static inline void tty_audit_add_data(struct tty_struct *tty,
                                      unsigned char *data, size_t size)
@@@ -594,7 -572,7 +594,7 @@@ static inline void tty_audit_push(struc
  {
  }
  static inline int tty_audit_push_task(struct task_struct *tsk,
-                                     uid_t loginuid, u32 sessionid)
+                                     kuid_t loginuid, u32 sessionid)
  {
        return 0;
  }
@@@ -627,12 -605,8 +627,12 @@@ extern long vt_compat_ioctl(struct tty_
  
  /* tty_mutex.c */
  /* functions for preparation of BKL removal */
 -extern void __lockfunc tty_lock(void) __acquires(tty_lock);
 -extern void __lockfunc tty_unlock(void) __releases(tty_lock);
 +extern void __lockfunc tty_lock(struct tty_struct *tty);
 +extern void __lockfunc tty_unlock(struct tty_struct *tty);
 +extern void __lockfunc tty_lock_pair(struct tty_struct *tty,
 +                              struct tty_struct *tty2);
 +extern void __lockfunc tty_unlock_pair(struct tty_struct *tty,
 +                              struct tty_struct *tty2);
  
  /*
   * this shall be called only from where BTM is held (like close)
  static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
                long timeout)
  {
 -      tty_unlock(); /* tty->ops->close holds the BTM, drop it while waiting */
 +      tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */
        tty_wait_until_sent(tty, timeout);
 -      tty_lock();
 +      tty_lock(tty);
  }
  
  /*
   *
   * Do not use in new code.
   */
 -#define wait_event_interruptible_tty(wq, condition)                   \
 +#define wait_event_interruptible_tty(tty, wq, condition)              \
  ({                                                                    \
        int __ret = 0;                                                  \
        if (!(condition)) {                                             \
 -              __wait_event_interruptible_tty(wq, condition, __ret);   \
 +              __wait_event_interruptible_tty(tty, wq, condition, __ret);      \
        }                                                               \
        __ret;                                                          \
  })
  
 -#define __wait_event_interruptible_tty(wq, condition, ret)            \
 +#define __wait_event_interruptible_tty(tty, wq, condition, ret)               \
  do {                                                                  \
        DEFINE_WAIT(__wait);                                            \
                                                                        \
                if (condition)                                          \
                        break;                                          \
                if (!signal_pending(current)) {                         \
 -                      tty_unlock();                                   \
 +                      tty_unlock(tty);                                        \
                        schedule();                                     \
 -                      tty_lock();                                     \
 +                      tty_lock(tty);                                  \
                        continue;                                       \
                }                                                       \
                ret = -ERESTARTSYS;                                     \
diff --combined include/net/netns/ipv4.h
index eb24dbccd81e81aff5fac76c8792e93df66e7bb6,3516dc0cc61548190ece8b1b9e8407779eeb20a8..69e50c789d9663ec46f9cc743f8720e2ec1e927a
@@@ -5,6 -5,7 +5,7 @@@
  #ifndef __NETNS_IPV4_H__
  #define __NETNS_IPV4_H__
  
+ #include <linux/uidgid.h>
  #include <net/inet_frag.h>
  
  struct tcpm_hash_bucket;
@@@ -62,9 -63,10 +63,9 @@@ struct netns_ipv4 
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
  
-       unsigned int sysctl_ping_group_range[2];
+       kgid_t sysctl_ping_group_range[2];
        long sysctl_tcp_mem[3];
  
 -      atomic_t rt_genid;
        atomic_t dev_addr_genid;
  
  #ifdef CONFIG_IP_MROUTE
diff --combined include/net/sock.h
index 6e6ec18fb6d0a0f1a8e616c58479b57b6652512e,9d43736a869d6fddd45fff0d12d57d898af7fd73..0d7e9834d9be78dad67707301866d9568deb7942
@@@ -218,7 -218,6 +218,7 @@@ struct cg_proto
    *   @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
    *   @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
    *   @sk_gso_max_size: Maximum GSO segment size to build
 +  *   @sk_gso_max_segs: Maximum number of GSO segments
    *   @sk_lingertime: %SO_LINGER l_linger setting
    *   @sk_backlog: always used with the per-socket spinlock held
    *   @sk_callback_lock: used with the callbacks in the end of this struct
@@@ -339,7 -338,6 +339,7 @@@ struct sock 
        netdev_features_t       sk_route_nocaps;
        int                     sk_gso_type;
        unsigned int            sk_gso_max_size;
 +      u16                     sk_gso_max_segs;
        int                     sk_rcvlowat;
        unsigned long           sk_lingertime;
        struct sk_buff_head     sk_error_queue;
@@@ -606,6 -604,15 +606,15 @@@ static inline void sk_add_bind_node(str
  #define sk_for_each_bound(__sk, node, list) \
        hlist_for_each_entry(__sk, node, list, sk_bind_node)
  
+ static inline struct user_namespace *sk_user_ns(struct sock *sk)
+ {
+       /* Careful only use this in a context where these parameters
+        * can not change and must all be valid, such as recvmsg from
+        * userspace.
+        */
+       return sk->sk_socket->file->f_cred->user_ns;
+ }
  /* Sock flags */
  enum sock_flags {
        SOCK_DEAD,
@@@ -1332,7 -1339,7 +1341,7 @@@ static inline bool sk_wmem_schedule(str
  }
  
  static inline bool
 -sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size)
 +sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
  {
        if (!sk_has_account(sk))
                return true;
@@@ -1486,6 -1493,14 +1495,6 @@@ extern void *sock_kmalloc(struct sock *
  extern void sock_kfree_s(struct sock *sk, void *mem, int size);
  extern void sk_send_sigurg(struct sock *sk);
  
 -#ifdef CONFIG_CGROUPS
 -extern void sock_update_classid(struct sock *sk);
 -#else
 -static inline void sock_update_classid(struct sock *sk)
 -{
 -}
 -#endif
 -
  /*
   * Functions to fill in entries in struct proto_ops when a protocol
   * does not implement a particular function.
@@@ -1662,7 -1677,7 +1671,7 @@@ static inline void sock_graft(struct so
        write_unlock_bh(&sk->sk_callback_lock);
  }
  
- extern int sock_i_uid(struct sock *sk);
+ extern kuid_t sock_i_uid(struct sock *sk);
  extern unsigned long sock_i_ino(struct sock *sk);
  
  static inline struct dst_entry *
diff --combined include/net/tcp.h
index 1f000ffe70758c0596b8c4d18230e781b88f495f,91e746736a8f4c4d52428cc48dc6abff93f89616..9a0021d16d919a46240d5f502207d95c1a22853c
@@@ -464,7 -464,6 +464,7 @@@ extern int tcp_disconnect(struct sock *
  void tcp_connect_init(struct sock *sk);
  void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
  int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 +void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
  
  /* From syncookies.c */
  extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
@@@ -1510,7 -1509,8 +1510,8 @@@ struct tcp_iter_state 
        sa_family_t             family;
        enum tcp_seq_states     state;
        struct sock             *syn_wait_sk;
-       int                     bucket, offset, sbucket, num, uid;
+       int                     bucket, offset, sbucket, num;
+       kuid_t                  uid;
        loff_t                  last_pos;
  };
  
diff --combined include/net/xfrm.h
index 639dd1316d375aeb2802c73032cc4cae6dcb8b0c,1f217e2c5d82e9559f3573db06e0f80bf4858b8b..411d83c9821d1854b1468b24359354563835a8f6
@@@ -213,9 -213,6 +213,9 @@@ struct xfrm_state 
        struct xfrm_lifetime_cur curlft;
        struct tasklet_hrtimer  mtimer;
  
 +      /* used to fix curlft->add_time when changing date */
 +      long            saved_tmo;
 +
        /* Last used time */
        unsigned long           lastused;
  
@@@ -241,7 -238,6 +241,7 @@@ static inline struct net *xs_net(struc
  
  /* xflags - make enum if more show up */
  #define XFRM_TIME_DEFER       1
 +#define XFRM_SOFT_EXPIRE 2
  
  enum {
        XFRM_STATE_VOID,
@@@ -273,9 -269,6 +273,9 @@@ struct xfrm_replay 
        int     (*check)(struct xfrm_state *x,
                         struct sk_buff *skb,
                         __be32 net_seq);
 +      int     (*recheck)(struct xfrm_state *x,
 +                         struct sk_buff *skb,
 +                         __be32 net_seq);
        void    (*notify)(struct xfrm_state *x, int event);
        int     (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
  };
@@@ -295,8 -288,6 +295,8 @@@ struct xfrm_policy_afinfo 
                                                  struct flowi *fl,
                                                  int reverse);
        int                     (*get_tos)(const struct flowi *fl);
 +      void                    (*init_dst)(struct net *net,
 +                                          struct xfrm_dst *dst);
        int                     (*init_path)(struct xfrm_dst *path,
                                             struct dst_entry *dst,
                                             int nfheader_len);
@@@ -671,7 -662,7 +671,7 @@@ struct xfrm_spi_skb_cb 
  /* Audit Information */
  struct xfrm_audit {
        u32     secid;
-       uid_t   loginuid;
+       kuid_t  loginuid;
        u32     sessionid;
  };
  
@@@ -690,13 -681,14 +690,14 @@@ static inline struct audit_buffer *xfrm
        return audit_buf;
  }
  
- static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid,
+ static inline void xfrm_audit_helper_usrinfo(kuid_t auid, u32 ses, u32 secid,
                                             struct audit_buffer *audit_buf)
  {
        char *secctx;
        u32 secctx_len;
  
-       audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
+       audit_log_format(audit_buf, " auid=%u ses=%u",
+                        from_kuid(&init_user_ns, auid), ses);
        if (secid != 0 &&
            security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
                audit_log_format(audit_buf, " subj=%s", secctx);
  }
  
  extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                                 u32 auid, u32 ses, u32 secid);
+                                 kuid_t auid, u32 ses, u32 secid);
  extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                                 u32 auid, u32 ses, u32 secid);
+                                 kuid_t auid, u32 ses, u32 secid);
  extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                                u32 auid, u32 ses, u32 secid);
+                                kuid_t auid, u32 ses, u32 secid);
  extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                                   u32 auid, u32 ses, u32 secid);
+                                   kuid_t auid, u32 ses, u32 secid);
  extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
                                             struct sk_buff *skb);
  extern void xfrm_audit_state_replay(struct xfrm_state *x,
@@@ -725,22 -717,22 +726,22 @@@ extern void xfrm_audit_state_icvfail(st
  #else
  
  static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                                 u32 auid, u32 ses, u32 secid)
+                                 kuid_t auid, u32 ses, u32 secid)
  {
  }
  
  static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                                 u32 auid, u32 ses, u32 secid)
+                                 kuid_t auid, u32 ses, u32 secid)
  {
  }
  
  static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                                u32 auid, u32 ses, u32 secid)
+                                kuid_t auid, u32 ses, u32 secid)
  {
  }
  
  static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                                   u32 auid, u32 ses, u32 secid)
+                                   kuid_t auid, u32 ses, u32 secid)
  {
  }
  
diff --combined init/Kconfig
index 73e4adfa91dca4783e53772c27a3b29e77ef704f,577916d8c9d86853e6314bdcd7e24738663f4548..cb003a3c9122e5be10ec0628049f954a4e3310a9
@@@ -267,106 -267,6 +267,106 @@@ config POSIX_MQUEUE_SYSCT
        depends on SYSCTL
        default y
  
 +config FHANDLE
 +      bool "open by fhandle syscalls"
 +      select EXPORTFS
 +      help
 +        If you say Y here, a user level program will be able to map
 +        file names to handle and then later use the handle for
 +        different file system operations. This is useful in implementing
 +        userspace file servers, which now track files using handles instead
 +        of names. The handle would remain the same even if file names
 +        get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2)
 +        syscalls.
 +
 +config AUDIT
 +      bool "Auditing support"
 +      depends on NET
 +      help
 +        Enable auditing infrastructure that can be used with another
 +        kernel subsystem, such as SELinux (which requires this for
 +        logging of avc messages output).  Does not do system-call
 +        auditing without CONFIG_AUDITSYSCALL.
 +
 +config AUDITSYSCALL
 +      bool "Enable system-call auditing support"
 +      depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
 +      default y if SECURITY_SELINUX
 +      help
 +        Enable low-overhead system-call auditing infrastructure that
 +        can be used independently or with another kernel subsystem,
 +        such as SELinux.
 +
 +config AUDIT_WATCH
 +      def_bool y
 +      depends on AUDITSYSCALL
 +      select FSNOTIFY
 +
 +config AUDIT_TREE
 +      def_bool y
 +      depends on AUDITSYSCALL
 +      select FSNOTIFY
 +
 +config AUDIT_LOGINUID_IMMUTABLE
 +      bool "Make audit loginuid immutable"
 +      depends on AUDIT
 +      help
 +        The config option toggles if a task setting its loginuid requires
 +        CAP_SYS_AUDITCONTROL or if that task should require no special permissions
 +        but should instead only allow setting its loginuid if it was never
 +        previously set.  On systems which use systemd or a similar central
 +        process to restart login services this should be set to true.  On older
 +        systems in which an admin would typically have to directly stop and
 +        start processes this should be set to false.  Setting this to true allows
 +        one to drop potentially dangerous capabilites from the login tasks,
 +        but may not be backwards compatible with older init systems.
 +
 +source "kernel/irq/Kconfig"
 +source "kernel/time/Kconfig"
 +
 +menu "CPU/Task time and stats accounting"
 +
 +choice
 +      prompt "Cputime accounting"
 +      default TICK_CPU_ACCOUNTING if !PPC64
 +      default VIRT_CPU_ACCOUNTING if PPC64
 +
 +# Kind of a stub config for the pure tick based cputime accounting
 +config TICK_CPU_ACCOUNTING
 +      bool "Simple tick based cputime accounting"
 +      depends on !S390
 +      help
 +        This is the basic tick based cputime accounting that maintains
 +        statistics about user, system and idle time spent on per jiffies
 +        granularity.
 +
 +        If unsure, say Y.
 +
 +config VIRT_CPU_ACCOUNTING
 +      bool "Deterministic task and CPU time accounting"
 +      depends on HAVE_VIRT_CPU_ACCOUNTING
 +      help
 +        Select this option to enable more accurate task and CPU time
 +        accounting.  This is done by reading a CPU counter on each
 +        kernel entry and exit and on transitions within the kernel
 +        between system, softirq and hardirq state, so there is a
 +        small performance impact.  In the case of s390 or IBM POWER > 5,
 +        this also enables accounting of stolen time on logically-partitioned
 +        systems.
 +
 +config IRQ_TIME_ACCOUNTING
 +      bool "Fine granularity task level IRQ time accounting"
 +      depends on HAVE_IRQ_TIME_ACCOUNTING
 +      help
 +        Select this option to enable fine granularity task irq time
 +        accounting. This is done by reading a timestamp on each
 +        transitions between softirq and hardirq state, so there can be a
 +        small performance impact.
 +
 +        If in doubt, say N here.
 +
 +endchoice
 +
  config BSD_PROCESS_ACCT
        bool "BSD Process Accounting"
        help
@@@ -392,6 -292,18 +392,6 @@@ config BSD_PROCESS_ACCT_V
          for processing it. A preliminary version of these tools is available
          at <http://www.gnu.org/software/acct/>.
  
 -config FHANDLE
 -      bool "open by fhandle syscalls"
 -      select EXPORTFS
 -      help
 -        If you say Y here, a user level program will be able to map
 -        file names to handle and then later use the handle for
 -        different file system operations. This is useful in implementing
 -        userspace file servers, which now track files using handles instead
 -        of names. The handle would remain the same even if file names
 -        get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2)
 -        syscalls.
 -
  config TASKSTATS
        bool "Export task/process statistics through netlink (EXPERIMENTAL)"
        depends on NET
@@@ -434,7 -346,50 +434,7 @@@ config TASK_IO_ACCOUNTIN
  
          Say N if unsure.
  
 -config AUDIT
 -      bool "Auditing support"
 -      depends on NET
 -      help
 -        Enable auditing infrastructure that can be used with another
 -        kernel subsystem, such as SELinux (which requires this for
 -        logging of avc messages output).  Does not do system-call
 -        auditing without CONFIG_AUDITSYSCALL.
 -
 -config AUDITSYSCALL
 -      bool "Enable system-call auditing support"
 -      depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
 -      default y if SECURITY_SELINUX
 -      help
 -        Enable low-overhead system-call auditing infrastructure that
 -        can be used independently or with another kernel subsystem,
 -        such as SELinux.
 -
 -config AUDIT_WATCH
 -      def_bool y
 -      depends on AUDITSYSCALL
 -      select FSNOTIFY
 -
 -config AUDIT_TREE
 -      def_bool y
 -      depends on AUDITSYSCALL
 -      select FSNOTIFY
 -
 -config AUDIT_LOGINUID_IMMUTABLE
 -      bool "Make audit loginuid immutable"
 -      depends on AUDIT
 -      help
 -        The config option toggles if a task setting its loginuid requires
 -        CAP_SYS_AUDITCONTROL or if that task should require no special permissions
 -        but should instead only allow setting its loginuid if it was never
 -        previously set.  On systems which use systemd or a similar central
 -        process to restart login services this should be set to true.  On older
 -        systems in which an admin would typically have to directly stop and
 -        start processes this should be set to false.  Setting this to true allows
 -        one to drop potentially dangerous capabilites from the login tasks,
 -        but may not be backwards compatible with older init systems.
 -
 -source "kernel/irq/Kconfig"
 -source "kernel/time/Kconfig"
 +endmenu # "CPU/Task time and stats accounting"
  
  menu "RCU Subsystem"
  
@@@ -486,24 -441,6 +486,24 @@@ config PREEMPT_RC
          This option enables preemptible-RCU code that is common between
          the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
  
 +config RCU_USER_QS
 +      bool "Consider userspace as in RCU extended quiescent state"
 +      depends on HAVE_RCU_USER_QS && SMP
 +      help
 +        This option sets hooks on kernel / userspace boundaries and
 +        puts RCU in extended quiescent state when the CPU runs in
 +        userspace. It means that when a CPU runs in userspace, it is
 +        excluded from the global RCU state machine and thus doesn't
 +        to keep the timer tick on for RCU.
 +
 +config RCU_USER_QS_FORCE
 +      bool "Force userspace extended QS by default"
 +      depends on RCU_USER_QS
 +      help
 +        Set the hooks in user/kernel boundaries by default in order to
 +        test this feature that treats userspace as an extended quiescent
 +        state until we have a real user like a full adaptive nohz option.
 +
  config RCU_FANOUT
        int "Tree-based hierarchical RCU fanout value"
        range 2 64 if 64BIT
@@@ -986,113 -923,24 +986,24 @@@ config UIDGID_CONVERTE
        bool
        default y
  
-       # List of kernel pieces that need user namespace work
-       # Features
-       depends on SYSVIPC = n
-       depends on IMA = n
-       depends on EVM = n
-       depends on KEYS = n
-       depends on AUDIT = n
-       depends on AUDITSYSCALL = n
-       depends on TASKSTATS = n
-       depends on TRACING = n
-       depends on FS_POSIX_ACL = n
-       depends on QUOTA = n
-       depends on QUOTACTL = n
-       depends on DEBUG_CREDENTIALS = n
-       depends on BSD_PROCESS_ACCT = n
-       depends on DRM = n
-       depends on PROC_EVENTS = n
        # Networking
-       depends on NET = n
        depends on NET_9P = n
-       depends on IPX = n
-       depends on PHONET = n
-       depends on NET_CLS_FLOW = n
-       depends on NETFILTER_XT_MATCH_OWNER = n
-       depends on NETFILTER_XT_MATCH_RECENT = n
-       depends on NETFILTER_XT_TARGET_LOG = n
-       depends on NETFILTER_NETLINK_LOG = n
-       depends on INET = n
-       depends on IPV6 = n
-       depends on IP_SCTP = n
-       depends on AF_RXRPC = n
-       depends on LLC2 = n
-       depends on NET_KEY = n
-       depends on INET_DIAG = n
-       depends on DNS_RESOLVER = n
-       depends on AX25 = n
-       depends on ATALK = n
  
        # Filesystems
-       depends on USB_DEVICEFS = n
-       depends on USB_GADGETFS = n
-       depends on USB_FUNCTIONFS = n
-       depends on DEVTMPFS = n
-       depends on XENFS = n
        depends on 9P_FS = n
-       depends on ADFS_FS = n
-       depends on AFFS_FS = n
        depends on AFS_FS = n
        depends on AUTOFS4_FS = n
-       depends on BEFS_FS = n
-       depends on BFS_FS = n
-       depends on BTRFS_FS = n
        depends on CEPH_FS = n
        depends on CIFS = n
        depends on CODA_FS = n
-       depends on CONFIGFS_FS = n
-       depends on CRAMFS = n
-       depends on DEBUG_FS = n
-       depends on ECRYPT_FS = n
-       depends on EFS_FS = n
-       depends on EXOFS_FS = n
-       depends on FAT_FS = n
        depends on FUSE_FS = n
        depends on GFS2_FS = n
-       depends on HFS_FS = n
-       depends on HFSPLUS_FS = n
-       depends on HPFS_FS = n
-       depends on HUGETLBFS = n
-       depends on ISO9660_FS = n
-       depends on JFFS2_FS = n
-       depends on JFS_FS = n
-       depends on LOGFS = n
-       depends on MINIX_FS = n
        depends on NCP_FS = n
        depends on NFSD = n
        depends on NFS_FS = n
-       depends on NILFS2_FS = n
-       depends on NTFS_FS = n
        depends on OCFS2_FS = n
-       depends on OMFS_FS = n
-       depends on QNX4FS_FS = n
-       depends on QNX6FS_FS = n
-       depends on REISERFS_FS = n
-       depends on SQUASHFS = n
-       depends on SYSV_FS = n
-       depends on UBIFS_FS = n
-       depends on UDF_FS = n
-       depends on UFS_FS = n
-       depends on VXFS_FS = n
        depends on XFS_FS = n
  
-       depends on !UML || HOSTFS = n
-       # The rare drivers that won't build
-       depends on AIRO = n
-       depends on AIRO_CS = n
-       depends on TUN = n
-       depends on INFINIBAND_QIB = n
-       depends on BLK_DEV_LOOP = n
-       depends on ANDROID_BINDER_IPC = n
-       # Security modules
-       depends on SECURITY_TOMOYO = n
-       depends on SECURITY_APPARMOR = n
  config UIDGID_STRICT_TYPE_CHECKS
        bool "Require conversions between uid/gids and their internal representation"
        depends on UIDGID_CONVERTED
@@@ -1216,8 -1064,7 +1127,8 @@@ menuconfig EXPER
  
  config UID16
        bool "Enable 16-bit UID system calls" if EXPERT
 -      depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION)
 +      depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) \
 +              || AARCH32_EMULATION
        default y
        help
          This enables the legacy 16-bit UID syscall wrappers.
@@@ -1265,7 -1112,13 +1176,7 @@@ config KALLSYMS_AL
           Say N unless you really need all symbols.
  
  config HOTPLUG
 -      bool "Support for hot-pluggable devices" if EXPERT
 -      default y
 -      help
 -        This option is provided for the case where no hotplug or uevent
 -        capabilities is wanted by the kernel.  You should only consider
 -        disabling this option for embedded systems that do not use modules, a
 -        dynamic /dev tree, or dynamic device discovery.  Just say Y.
 +      def_bool y
  
  config PRINTK
        default y
diff --combined kernel/pid_namespace.c
index 6144bab8fd8eeed14327c116bea39ba0061cbdb5,baa528d7dfbde35908b84c830761e510cce8f5cf..478bad2745e3a8357fcc2f9a56059d167bf67e49
@@@ -16,6 -16,7 +16,7 @@@
  #include <linux/slab.h>
  #include <linux/proc_fs.h>
  #include <linux/reboot.h>
+ #include <linux/export.h>
  
  #define BITS_PER_PAGE         (PAGE_SIZE*8)
  
@@@ -144,6 -145,7 +145,7 @@@ void free_pid_ns(struct kref *kref
        if (parent != NULL)
                put_pid_ns(parent);
  }
+ EXPORT_SYMBOL_GPL(free_pid_ns);
  
  void zap_pid_ns_processes(struct pid_namespace *pid_ns)
  {
@@@ -232,19 -234,15 +234,19 @@@ static int pid_ns_ctl_handler(struct ct
         */
  
        tmp.data = &current->nsproxy->pid_ns->last_pid;
 -      return proc_dointvec(&tmp, write, buffer, lenp, ppos);
 +      return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
  }
  
 +extern int pid_max;
 +static int zero = 0;
  static struct ctl_table pid_ns_ctl_table[] = {
        {
                .procname = "ns_last_pid",
                .maxlen = sizeof(int),
                .mode = 0666, /* permissions are checked in the handler */
                .proc_handler = pid_ns_ctl_handler,
 +              .extra1 = &zero,
 +              .extra2 = &pid_max,
        },
        { }
  };
diff --combined kernel/trace/trace.c
index 1ec5c1dab6295d921f7c7763f799fbca5e71bc60,c9ace838d5095c34c8b93cbd168cc5a556bfdf25..cdcb59450b491c2af7acf64bda54a82eaf0c16ab
@@@ -328,7 -328,7 +328,7 @@@ static DECLARE_WAIT_QUEUE_HEAD(trace_wa
  unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
        TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
        TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
 -      TRACE_ITER_IRQ_INFO;
 +      TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
  
  static int trace_stop_count;
  static DEFINE_RAW_SPINLOCK(tracing_start_lock);
@@@ -426,15 -426,15 +426,15 @@@ __setup("trace_buf_size=", set_buf_size
  
  static int __init set_tracing_thresh(char *str)
  {
 -      unsigned long threshhold;
 +      unsigned long threshold;
        int ret;
  
        if (!str)
                return 0;
 -      ret = strict_strtoul(str, 0, &threshhold);
 +      ret = strict_strtoul(str, 0, &threshold);
        if (ret < 0)
                return 0;
 -      tracing_thresh = threshhold * 1000;
 +      tracing_thresh = threshold * 1000;
        return 1;
  }
  __setup("tracing_thresh=", set_tracing_thresh);
@@@ -470,7 -470,6 +470,7 @@@ static const char *trace_options[] = 
        "overwrite",
        "disable_on_free",
        "irq-info",
 +      "markers",
        NULL
  };
  
@@@ -2061,7 -2060,8 +2061,8 @@@ print_trace_header(struct seq_file *m, 
        seq_puts(m, "#    -----------------\n");
        seq_printf(m, "#    | task: %.16s-%d "
                   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
-                  data->comm, data->pid, data->uid, data->nice,
+                  data->comm, data->pid,
+                  from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
                   data->policy, data->rt_priority);
        seq_puts(m, "#    -----------------\n");
  
@@@ -3887,9 -3887,6 +3888,9 @@@ tracing_mark_write(struct file *filp, c
        if (tracing_disabled)
                return -EINVAL;
  
 +      if (!(trace_flags & TRACE_ITER_MARKERS))
 +              return -EINVAL;
 +
        if (cnt > TRACE_BUF_SIZE)
                cnt = TRACE_BUF_SIZE;
  
diff --combined kernel/trace/trace.h
index 63a2da0b9a6edf0881cc6f536b014daf11b8ab70,40a6f30c985f2faa4321c8afc08c05ee286283de..c15f528c1af4e4ee44b308bddf1bb4b83195d4c2
@@@ -147,7 -147,7 +147,7 @@@ struct trace_array_cpu 
        unsigned long           skipped_entries;
        cycle_t                 preempt_timestamp;
        pid_t                   pid;
-       uid_t                   uid;
+       kuid_t                  uid;
        char                    comm[TASK_COMM_LEN];
  };
  
@@@ -472,11 -472,11 +472,11 @@@ extern void trace_find_cmdline(int pid
  
  #ifdef CONFIG_DYNAMIC_FTRACE
  extern unsigned long ftrace_update_tot_cnt;
 +#endif
  #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
  extern int DYN_FTRACE_TEST_NAME(void);
  #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
  extern int DYN_FTRACE_TEST_NAME2(void);
 -#endif
  
  extern int ring_buffer_expanded;
  extern bool tracing_selftest_disabled;
@@@ -680,7 -680,6 +680,7 @@@ enum trace_iterator_flags 
        TRACE_ITER_OVERWRITE            = 0x200000,
        TRACE_ITER_STOP_ON_FREE         = 0x400000,
        TRACE_ITER_IRQ_INFO             = 0x800000,
 +      TRACE_ITER_MARKERS              = 0x1000000,
  };
  
  /*
diff --combined net/core/dev.c
index 36c4a0cdb6c128bb0174bbf4302e0e65e9309cbc,1c0d0823a5a42eb901a1e1f524a129a3c0f87e6a..17e912f9b71110fe18df0e8e236536c82de98e5c
@@@ -1055,8 -1055,6 +1055,8 @@@ rollback
   */
  int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
  {
 +      char *new_ifalias;
 +
        ASSERT_RTNL();
  
        if (len >= IFALIASZ)
                return 0;
        }
  
 -      dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
 -      if (!dev->ifalias)
 +      new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
 +      if (!new_ifalias)
                return -ENOMEM;
 +      dev->ifalias = new_ifalias;
  
        strlcpy(dev->ifalias, alias, len+1);
        return len;
@@@ -1642,19 -1639,6 +1642,19 @@@ static inline int deliver_skb(struct sk
        return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  }
  
 +static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
 +{
 +      if (ptype->af_packet_priv == NULL)
 +              return false;
 +
 +      if (ptype->id_match)
 +              return ptype->id_match(ptype, skb->sk);
 +      else if ((struct sock *)ptype->af_packet_priv == skb->sk)
 +              return true;
 +
 +      return false;
 +}
 +
  /*
   *    Support routine. Sends outgoing frames to any network
   *    taps currently in use.
@@@ -1672,7 -1656,8 +1672,7 @@@ static void dev_queue_xmit_nit(struct s
                 * they originated from - MvS ([email protected])
                 */
                if ((ptype->dev == dev || !ptype->dev) &&
 -                  (ptype->af_packet_priv == NULL ||
 -                   (struct sock *)ptype->af_packet_priv != skb->sk)) {
 +                  (!skb_loop_sk(ptype, skb))) {
                        if (pt_prev) {
                                deliver_skb(skb2, pt_prev, skb->dev);
                                pt_prev = ptype;
@@@ -2134,8 -2119,7 +2134,8 @@@ static bool can_checksum_protocol(netde
  static netdev_features_t harmonize_features(struct sk_buff *skb,
        __be16 protocol, netdev_features_t features)
  {
 -      if (!can_checksum_protocol(features, protocol)) {
 +      if (skb->ip_summed != CHECKSUM_NONE &&
 +          !can_checksum_protocol(features, protocol)) {
                features &= ~NETIF_F_ALL_CSUM;
                features &= ~NETIF_F_SG;
        } else if (illegal_highdma(skb->dev, skb)) {
@@@ -2150,9 -2134,6 +2150,9 @@@ netdev_features_t netif_skb_features(st
        __be16 protocol = skb->protocol;
        netdev_features_t features = skb->dev->features;
  
 +      if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
 +              features &= ~NETIF_F_GSO_MASK;
 +
        if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
                protocol = veh->h_vlan_encapsulated_proto;
@@@ -2648,16 -2629,15 +2648,16 @@@ void __skb_get_rxhash(struct sk_buff *s
        if (!skb_flow_dissect(skb, &keys))
                return;
  
 -      if (keys.ports) {
 -              if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
 -                      swap(keys.port16[0], keys.port16[1]);
 +      if (keys.ports)
                skb->l4_rxhash = 1;
 -      }
  
        /* get a consistent hash (same value on both flow directions) */
 -      if ((__force u32)keys.dst < (__force u32)keys.src)
 +      if (((__force u32)keys.dst < (__force u32)keys.src) ||
 +          (((__force u32)keys.dst == (__force u32)keys.src) &&
 +           ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
                swap(keys.dst, keys.src);
 +              swap(keys.port16[0], keys.port16[1]);
 +      }
  
        hash = jhash_3words((__force u32)keys.dst,
                            (__force u32)keys.src,
@@@ -3323,7 -3303,7 +3323,7 @@@ ncls
  
        if (pt_prev) {
                if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
 -                      ret = -ENOMEM;
 +                      goto drop;
                else
                        ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
        } else {
@@@ -4512,8 -4492,8 +4512,8 @@@ static void dev_change_rx_flags(struct 
  static int __dev_set_promiscuity(struct net_device *dev, int inc)
  {
        unsigned int old_flags = dev->flags;
-       uid_t uid;
-       gid_t gid;
+       kuid_t uid;
+       kgid_t gid;
  
        ASSERT_RTNL();
  
                                "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
                                dev->name, (dev->flags & IFF_PROMISC),
                                (old_flags & IFF_PROMISC),
-                               audit_get_loginuid(current),
-                               uid, gid,
+                               from_kuid(&init_user_ns, audit_get_loginuid(current)),
+                               from_kuid(&init_user_ns, uid),
+                               from_kgid(&init_user_ns, gid),
                                audit_get_sessionid(current));
                }
  
@@@ -5746,7 -5727,6 +5747,7 @@@ EXPORT_SYMBOL(netdev_refcnt_read)
  
  /**
   * netdev_wait_allrefs - wait until all references are gone.
 + * @dev: target net_device
   *
   * This is called when unregistering network devices.
   *
@@@ -6007,7 -5987,6 +6008,7 @@@ struct net_device *alloc_netdev_mqs(in
        dev_net_set(dev, &init_net);
  
        dev->gso_max_size = GSO_MAX_SIZE;
 +      dev->gso_max_segs = GSO_MAX_SEGS;
  
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
@@@ -6424,26 -6403,22 +6425,26 @@@ const char *netdev_drivername(const str
        return empty;
  }
  
 -int __netdev_printk(const char *level, const struct net_device *dev,
 +static int __netdev_printk(const char *level, const struct net_device *dev,
                           struct va_format *vaf)
  {
        int r;
  
 -      if (dev && dev->dev.parent)
 -              r = dev_printk(level, dev->dev.parent, "%s: %pV",
 -                             netdev_name(dev), vaf);
 -      else if (dev)
 +      if (dev && dev->dev.parent) {
 +              r = dev_printk_emit(level[1] - '0',
 +                                  dev->dev.parent,
 +                                  "%s %s %s: %pV",
 +                                  dev_driver_string(dev->dev.parent),
 +                                  dev_name(dev->dev.parent),
 +                                  netdev_name(dev), vaf);
 +      } else if (dev) {
                r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
 -      else
 +      } else {
                r = printk("%s(NULL net_device): %pV", level, vaf);
 +      }
  
        return r;
  }
 -EXPORT_SYMBOL(__netdev_printk);
  
  int netdev_printk(const char *level, const struct net_device *dev,
                  const char *format, ...)
        vaf.va = &args;
  
        r = __netdev_printk(level, dev, &vaf);
 +
        va_end(args);
  
        return r;
@@@ -6478,7 -6452,6 +6479,7 @@@ int func(const struct net_device *dev, 
        vaf.va = &args;                                         \
                                                                \
        r = __netdev_printk(level, dev, &vaf);                  \
 +                                                              \
        va_end(args);                                           \
                                                                \
        return r;                                               \
diff --combined net/core/scm.c
index 040cebeed45b810cf9dd7d85c6ac2cbade98ee77,5472ae7a0657569987eb6798cd955e8b4da85e00..6ab491d6c26f43d84da26e9e0fc90a477ab49cea
  static __inline__ int scm_check_creds(struct ucred *creds)
  {
        const struct cred *cred = current_cred();
+       kuid_t uid = make_kuid(cred->user_ns, creds->uid);
+       kgid_t gid = make_kgid(cred->user_ns, creds->gid);
+       if (!uid_valid(uid) || !gid_valid(gid))
+               return -EINVAL;
  
        if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) &&
-           ((creds->uid == cred->uid   || creds->uid == cred->euid ||
-             creds->uid == cred->suid) || capable(CAP_SETUID)) &&
-           ((creds->gid == cred->gid   || creds->gid == cred->egid ||
-             creds->gid == cred->sgid) || capable(CAP_SETGID))) {
+           ((uid_eq(uid, cred->uid)   || uid_eq(uid, cred->euid) ||
+             uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) &&
+           ((gid_eq(gid, cred->gid)   || gid_eq(gid, cred->egid) ||
+             gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) {
               return 0;
        }
        return -EPERM;
@@@ -149,6 -154,9 +154,9 @@@ int __scm_send(struct socket *sock, str
                                goto error;
                        break;
                case SCM_CREDENTIALS:
+               {
+                       kuid_t uid;
+                       kgid_t gid;
                        if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred)))
                                goto error;
                        memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred));
                                p->pid = pid;
                        }
  
+                       err = -EINVAL;
+                       uid = make_kuid(current_user_ns(), p->creds.uid);
+                       gid = make_kgid(current_user_ns(), p->creds.gid);
+                       if (!uid_valid(uid) || !gid_valid(gid))
+                               goto error;
                        if (!p->cred ||
-                           (p->cred->euid != p->creds.uid) ||
-                           (p->cred->egid != p->creds.gid)) {
+                           !uid_eq(p->cred->euid, uid) ||
+                           !gid_eq(p->cred->egid, gid)) {
                                struct cred *cred;
                                err = -ENOMEM;
                                cred = prepare_creds();
                                if (!cred)
                                        goto error;
  
-                               cred->uid = cred->euid = p->creds.uid;
-                               cred->gid = cred->egid = p->creds.gid;
+                               cred->uid = cred->euid = uid;
+                               cred->gid = cred->egid = gid;
                                if (p->cred)
                                        put_cred(p->cred);
                                p->cred = cred;
                        }
                        break;
+               }
                default:
                        goto error;
                }
@@@ -265,7 -280,6 +280,7 @@@ void scm_detach_fds(struct msghdr *msg
        for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
             i++, cmfptr++)
        {
 +              struct socket *sock;
                int new_fd;
                err = security_file_receive(fp[i]);
                if (err)
                }
                /* Bump the usage count and install the file. */
                get_file(fp[i]);
 +              sock = sock_from_file(fp[i], &err);
 +              if (sock)
 +                      sock_update_netprioidx(sock->sk, current);
                fd_install(new_fd, fp[i]);
        }
  
diff --combined net/core/sock.c
index 341fa1c3bd69a4fb9bf3e2e804e489ccedc02491,5c6a435717e05c68d399ba2c5850f355e40845e7..12cddd037bce296b1b38d60b8eb073f77d050108
@@@ -326,6 -326,17 +326,6 @@@ int __sk_backlog_rcv(struct sock *sk, s
  }
  EXPORT_SYMBOL(__sk_backlog_rcv);
  
 -#if defined(CONFIG_CGROUPS)
 -#if !defined(CONFIG_NET_CLS_CGROUP)
 -int net_cls_subsys_id = -1;
 -EXPORT_SYMBOL_GPL(net_cls_subsys_id);
 -#endif
 -#if !defined(CONFIG_NETPRIO_CGROUP)
 -int net_prio_subsys_id = -1;
 -EXPORT_SYMBOL_GPL(net_prio_subsys_id);
 -#endif
 -#endif
 -
  static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
  {
        struct timeval tv;
@@@ -680,8 -691,7 +680,8 @@@ set_rcvbuf
  
        case SO_KEEPALIVE:
  #ifdef CONFIG_INET
 -              if (sk->sk_protocol == IPPROTO_TCP)
 +              if (sk->sk_protocol == IPPROTO_TCP &&
 +                  sk->sk_type == SOCK_STREAM)
                        tcp_set_keepalive(sk, valbool);
  #endif
                sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
@@@ -858,8 -868,8 +858,8 @@@ void cred_to_ucred(struct pid *pid, con
        if (cred) {
                struct user_namespace *current_ns = current_user_ns();
  
-               ucred->uid = from_kuid(current_ns, cred->euid);
-               ucred->gid = from_kgid(current_ns, cred->egid);
+               ucred->uid = from_kuid_munged(current_ns, cred->euid);
+               ucred->gid = from_kgid_munged(current_ns, cred->egid);
        }
  }
  EXPORT_SYMBOL_GPL(cred_to_ucred);
@@@ -1213,7 -1223,6 +1213,7 @@@ static void sk_prot_free(struct proto *
  }
  
  #ifdef CONFIG_CGROUPS
 +#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
  void sock_update_classid(struct sock *sk)
  {
        u32 classid;
                sk->sk_classid = classid;
  }
  EXPORT_SYMBOL(sock_update_classid);
 +#endif
  
 +#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
  void sock_update_netprioidx(struct sock *sk, struct task_struct *task)
  {
        if (in_interrupt())
  }
  EXPORT_SYMBOL_GPL(sock_update_netprioidx);
  #endif
 +#endif
  
  /**
   *    sk_alloc - All socket objects are allocated here
@@@ -1452,7 -1458,6 +1452,7 @@@ void sk_setup_caps(struct sock *sk, str
                } else {
                        sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
                        sk->sk_gso_max_size = dst->dev->gso_max_size;
 +                      sk->sk_gso_max_segs = dst->dev->gso_max_segs;
                }
        }
  }
@@@ -1517,23 -1522,16 +1517,23 @@@ EXPORT_SYMBOL(sock_rfree)
  
  void sock_edemux(struct sk_buff *skb)
  {
 -      sock_put(skb->sk);
 +      struct sock *sk = skb->sk;
 +
 +#ifdef CONFIG_INET
 +      if (sk->sk_state == TCP_TIME_WAIT)
 +              inet_twsk_put(inet_twsk(sk));
 +      else
 +#endif
 +              sock_put(sk);
  }
  EXPORT_SYMBOL(sock_edemux);
  
int sock_i_uid(struct sock *sk)
kuid_t sock_i_uid(struct sock *sk)
  {
-       int uid;
+       kuid_t uid;
  
        read_lock_bh(&sk->sk_callback_lock);
-       uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
+       uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
        read_unlock_bh(&sk->sk_callback_lock);
        return uid;
  }
diff --combined net/ipv4/raw.c
index d23c6571ba1c34525114af16f0818cfe6bbf1f14,f2425785d40a241583fe5944b2f72f87f5d94fdb..73d1e4df4bf630f176f385b96639b4e803469458
@@@ -131,20 -131,18 +131,20 @@@ found
   *    0 - deliver
   *    1 - block
   */
 -static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
 +static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
  {
 -      int type;
 +      struct icmphdr _hdr;
 +      const struct icmphdr *hdr;
  
 -      if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
 +      hdr = skb_header_pointer(skb, skb_transport_offset(skb),
 +                               sizeof(_hdr), &_hdr);
 +      if (!hdr)
                return 1;
  
 -      type = icmp_hdr(skb)->type;
 -      if (type < 32) {
 +      if (hdr->type < 32) {
                __u32 data = raw_sk(sk)->filter.data;
  
 -              return ((1 << type) & data) != 0;
 +              return ((1U << hdr->type) & data) != 0;
        }
  
        /* Do not block unknown ICMP types */
@@@ -994,7 -992,9 +994,9 @@@ static void raw_sock_seq_show(struct se
                i, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
-               0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
  }
  
diff --combined net/ipv4/tcp_ipv4.c
index 00a748d14062d5f0410568be79aa6dc609a201be,642be8a4c6a33d2c88fa1de5f68c2579eb7b9920..be23a0b7b89e17e23f7b3a92d424ff8159c03721
@@@ -417,12 -417,10 +417,12 @@@ void tcp_v4_err(struct sk_buff *icmp_sk
  
                if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
                        tp->mtu_info = info;
 -                      if (!sock_owned_by_user(sk))
 +                      if (!sock_owned_by_user(sk)) {
                                tcp_v4_mtu_reduced(sk);
 -                      else
 -                              set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
 +                      } else {
 +                              if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
 +                                      sock_hold(sk);
 +                      }
                        goto out;
                }
  
@@@ -1464,7 -1462,6 +1464,7 @@@ struct sock *tcp_v4_syn_recv_sock(struc
                goto exit_nonewsk;
  
        newsk->sk_gso_type = SKB_GSO_TCPV4;
 +      inet_sk_rx_dst_set(newsk, skb);
  
        newtp                 = tcp_sk(newsk);
        newinet               = inet_sk(newsk);
@@@ -1630,6 -1627,9 +1630,6 @@@ int tcp_v4_do_rcv(struct sock *sk, stru
                                sk->sk_rx_dst = NULL;
                        }
                }
 -              if (unlikely(sk->sk_rx_dst == NULL))
 -                      inet_sk_rx_dst_set(sk, skb);
 -
                if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
                        rsk = sk;
                        goto reset;
@@@ -1872,21 -1872,10 +1872,21 @@@ static struct timewait_sock_ops tcp_tim
        .twsk_destructor= tcp_twsk_destructor,
  };
  
 +void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
 +{
 +      struct dst_entry *dst = skb_dst(skb);
 +
 +      dst_hold(dst);
 +      sk->sk_rx_dst = dst;
 +      inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 +}
 +EXPORT_SYMBOL(inet_sk_rx_dst_set);
 +
  const struct inet_connection_sock_af_ops ipv4_specific = {
        .queue_xmit        = ip_queue_xmit,
        .send_check        = tcp_v4_send_check,
        .rebuild_header    = inet_sk_rebuild_header,
 +      .sk_rx_dst_set     = inet_sk_rx_dst_set,
        .conn_request      = tcp_v4_conn_request,
        .syn_recv_sock     = tcp_v4_syn_recv_sock,
        .net_header_len    = sizeof(struct iphdr),
@@@ -2393,7 -2382,7 +2393,7 @@@ void tcp_proc_unregister(struct net *ne
  EXPORT_SYMBOL(tcp_proc_unregister);
  
  static void get_openreq4(const struct sock *sk, const struct request_sock *req,
-                        struct seq_file *f, int i, int uid, int *len)
+                        struct seq_file *f, int i, kuid_t uid, int *len)
  {
        const struct inet_request_sock *ireq = inet_rsk(req);
        int ttd = req->expires - jiffies;
                1,    /* timers active (only the expire timer) */
                jiffies_to_clock_t(ttd),
                req->retrans,
-               uid,
+               from_kuid_munged(seq_user_ns(f), uid),
                0,  /* non standard timer */
                0, /* open_requests have no inode */
                atomic_read(&sk->sk_refcnt),
@@@ -2461,7 -2450,7 +2461,7 @@@ static void get_tcp4_sock(struct sock *
                timer_active,
                jiffies_to_clock_t(timer_expires - jiffies),
                icsk->icsk_retransmits,
-               sock_i_uid(sk),
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
                icsk->icsk_probes_out,
                sock_i_ino(sk),
                atomic_read(&sk->sk_refcnt), sk,
diff --combined net/ipv4/udp.c
index 2814f66dac64cf5775806138c91903c7a02eeae3,53b89817c0088593104af0cfca3bb0d6c0b01995..79c8dbe59b5474bdc3e23ba8adc227e1bee016a4
@@@ -758,7 -758,7 +758,7 @@@ static int udp_send_skb(struct sk_buff 
                uh->check = CSUM_MANGLED_0;
  
  send:
 -      err = ip_send_skb(skb);
 +      err = ip_send_skb(sock_net(sk), skb);
        if (err) {
                if (err == -ENOBUFS && !inet->recverr) {
                        UDP_INC_STATS_USER(sock_net(sk),
@@@ -1226,11 -1226,6 +1226,11 @@@ try_again
  
        if (unlikely(err)) {
                trace_kfree_skb(skb, udp_recvmsg);
 +              if (!peeked) {
 +                      atomic_inc(&sk->sk_drops);
 +                      UDP_INC_STATS_USER(sock_net(sk),
 +                                         UDP_MIB_INERRORS, is_udplite);
 +              }
                goto out_free;
        }
  
@@@ -2115,7 -2110,9 +2115,9 @@@ static void udp4_format_sock(struct soc
                bucket, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
-               0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
                atomic_read(&sp->sk_drops), len);
  }
diff --combined net/ipv6/raw.c
index 4a5f78b50495060470777aa41edd540bb699662f,7af88ef016572e33c0530e0802beda3a002faac0..d8e95c77db99e5bac5b7cab803c82a731e14a697
@@@ -107,20 -107,21 +107,20 @@@ found
   *    0 - deliver
   *    1 - block
   */
 -static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
 +static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
  {
 -      struct icmp6hdr *icmph;
 -      struct raw6_sock *rp = raw6_sk(sk);
 +      struct icmp6hdr *_hdr;
 +      const struct icmp6hdr *hdr;
  
 -      if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
 -              __u32 *data = &rp->filter.data[0];
 -              int bit_nr;
 +      hdr = skb_header_pointer(skb, skb_transport_offset(skb),
 +                               sizeof(_hdr), &_hdr);
 +      if (hdr) {
 +              const __u32 *data = &raw6_sk(sk)->filter.data[0];
 +              unsigned int type = hdr->icmp6_type;
  
 -              icmph = (struct icmp6hdr *) skb->data;
 -              bit_nr = icmph->icmp6_type;
 -
 -              return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
 +              return (data[type >> 5] & (1U << (type & 31))) != 0;
        }
 -      return 0;
 +      return 1;
  }
  
  #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@@ -1250,7 -1251,8 +1250,8 @@@ static void raw6_sock_seq_show(struct s
                   sk_wmem_alloc_get(sp),
                   sk_rmem_alloc_get(sp),
                   0, 0L, 0,
-                  sock_i_uid(sp), 0,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+                  0,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
  }
diff --combined net/ipv6/tcp_ipv6.c
index acd32e3f1b68e7c11fd211383e05b50dfd07ee6a,4b5b335ebde132a09c7328e11e4e563fe8f44d11..342ec62cdbde9f7b3a3e47343d158e7cf9ba2564
@@@ -94,18 -94,6 +94,18 @@@ static struct tcp_md5sig_key *tcp_v6_md
  }
  #endif
  
 +static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
 +{
 +      struct dst_entry *dst = skb_dst(skb);
 +      const struct rt6_info *rt = (const struct rt6_info *)dst;
 +
 +      dst_hold(dst);
 +      sk->sk_rx_dst = dst;
 +      inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 +      if (rt->rt6i_node)
 +              inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
 +}
 +
  static void tcp_v6_hash(struct sock *sk)
  {
        if (sk->sk_state != TCP_CLOSE) {
@@@ -403,9 -391,8 +403,9 @@@ static void tcp_v6_err(struct sk_buff *
                tp->mtu_info = ntohl(info);
                if (!sock_owned_by_user(sk))
                        tcp_v6_mtu_reduced(sk);
 -              else
 -                      set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
 +              else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 +                                         &tp->tsq_flags))
 +                      sock_hold(sk);
                goto out;
        }
  
@@@ -1283,7 -1270,6 +1283,7 @@@ static struct sock * tcp_v6_syn_recv_so
  
        newsk->sk_gso_type = SKB_GSO_TCPV6;
        __ip6_dst_store(newsk, dst, NULL, NULL);
 +      inet6_sk_rx_dst_set(newsk, skb);
  
        newtcp6sk = (struct tcp6_sock *)newsk;
        inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
@@@ -1461,17 -1447,7 +1461,17 @@@ static int tcp_v6_do_rcv(struct sock *s
                opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
  
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
 +              struct dst_entry *dst = sk->sk_rx_dst;
 +
                sock_rps_save_rxhash(sk, skb);
 +              if (dst) {
 +                      if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
 +                          dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
 +                              dst_release(dst);
 +                              sk->sk_rx_dst = NULL;
 +                      }
 +              }
 +
                if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
                        goto reset;
                if (opt_skb)
@@@ -1729,9 -1705,9 +1729,9 @@@ static void tcp_v6_early_demux(struct s
                        struct dst_entry *dst = sk->sk_rx_dst;
                        struct inet_sock *icsk = inet_sk(sk);
                        if (dst)
 -                              dst = dst_check(dst, 0);
 +                              dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
                        if (dst &&
 -                          icsk->rx_dst_ifindex == inet6_iif(skb))
 +                          icsk->rx_dst_ifindex == skb->skb_iif)
                                skb_dst_set_noref(skb, dst);
                }
        }
@@@ -1747,7 -1723,6 +1747,7 @@@ static const struct inet_connection_soc
        .queue_xmit        = inet6_csk_xmit,
        .send_check        = tcp_v6_send_check,
        .rebuild_header    = inet6_sk_rebuild_header,
 +      .sk_rx_dst_set     = inet6_sk_rx_dst_set,
        .conn_request      = tcp_v6_conn_request,
        .syn_recv_sock     = tcp_v6_syn_recv_sock,
        .net_header_len    = sizeof(struct ipv6hdr),
@@@ -1779,7 -1754,6 +1779,7 @@@ static const struct inet_connection_soc
        .queue_xmit        = ip_queue_xmit,
        .send_check        = tcp_v4_send_check,
        .rebuild_header    = inet_sk_rebuild_header,
 +      .sk_rx_dst_set     = inet_sk_rx_dst_set,
        .conn_request      = tcp_v6_conn_request,
        .syn_recv_sock     = tcp_v6_syn_recv_sock,
        .net_header_len    = sizeof(struct iphdr),
@@@ -1829,7 -1803,7 +1829,7 @@@ static void tcp_v6_destroy_sock(struct 
  #ifdef CONFIG_PROC_FS
  /* Proc filesystem TCPv6 sock list dumping. */
  static void get_openreq6(struct seq_file *seq,
-                        const struct sock *sk, struct request_sock *req, int i, int uid)
+                        const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
  {
        int ttd = req->expires - jiffies;
        const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
                   1,   /* timers active (only the expire timer) */
                   jiffies_to_clock_t(ttd),
                   req->retrans,
-                  uid,
+                  from_kuid_munged(seq_user_ns(seq), uid),
                   0,  /* non standard timer */
                   0, /* open_requests have no inode */
                   0, req);
@@@ -1903,7 -1877,7 +1903,7 @@@ static void get_tcp6_sock(struct seq_fi
                   timer_active,
                   jiffies_to_clock_t(timer_expires - jiffies),
                   icsk->icsk_retransmits,
-                  sock_i_uid(sp),
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
                   icsk->icsk_probes_out,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp,
diff --combined net/ipv6/udp.c
index 07e2bfef6845429ee7e359a6c21141db0a0219de,bbdff07eebe177e9dbf06bd3306eb96ada0da093..fc9997260a6bc5b841aafc4f3b57cf11ff7ec404
@@@ -394,17 -394,6 +394,17 @@@ try_again
        }
        if (unlikely(err)) {
                trace_kfree_skb(skb, udpv6_recvmsg);
 +              if (!peeked) {
 +                      atomic_inc(&sk->sk_drops);
 +                      if (is_udp4)
 +                              UDP_INC_STATS_USER(sock_net(sk),
 +                                                 UDP_MIB_INERRORS,
 +                                                 is_udplite);
 +                      else
 +                              UDP6_INC_STATS_USER(sock_net(sk),
 +                                                  UDP_MIB_INERRORS,
 +                                                  is_udplite);
 +              }
                goto out_free;
        }
        if (!peeked) {
@@@ -1469,7 -1458,8 +1469,8 @@@ static void udp6_sock_seq_show(struct s
                   sk_wmem_alloc_get(sp),
                   sk_rmem_alloc_get(sp),
                   0, 0L, 0,
-                  sock_i_uid(sp), 0,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+                  0,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp,
                   atomic_read(&sp->sk_drops));
index 5cfb5bedb2b8e8f2fa44ed936a7cab265b5878e6,4142aac17c3cee9c382b942f54d8fc411c3d0d69..8cfc401e197e83092b12d4c9b6395889e4752239
@@@ -55,6 -55,7 +55,7 @@@ struct nfulnl_instance 
        unsigned int qlen;              /* number of nlmsgs in skb */
        struct sk_buff *skb;            /* pre-allocatd skb */
        struct timer_list timer;
+       struct user_namespace *peer_user_ns;    /* User namespace of the peer process */
        int peer_pid;                   /* PID of the peer process */
  
        /* configurable parameters */
@@@ -132,7 -133,7 +133,7 @@@ instance_put(struct nfulnl_instance *in
  static void nfulnl_timer(unsigned long data);
  
  static struct nfulnl_instance *
- instance_create(u_int16_t group_num, int pid)
+ instance_create(u_int16_t group_num, int pid, struct user_namespace *user_ns)
  {
        struct nfulnl_instance *inst;
        int err;
  
        setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
  
+       inst->peer_user_ns = user_ns;
        inst->peer_pid = pid;
        inst->group_num = group_num;
  
@@@ -381,7 -383,6 +383,7 @@@ __build_packet_message(struct nfulnl_in
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
        sk_buff_data_t old_tail = inst->skb->tail;
 +      struct sock *sk;
  
        nlh = nlmsg_put(inst->skb, 0, 0,
                        NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
        }
  
        if (indev && skb_mac_header_was_set(skb)) {
 -              if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
 +              if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
                    nla_put_be16(inst->skb, NFULA_HWLEN,
                                 htons(skb->dev->hard_header_len)) ||
                    nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
        }
  
        /* UID */
 -      if (skb->sk) {
 -              read_lock_bh(&skb->sk->sk_callback_lock);
 -              if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
 -                      struct file *file = skb->sk->sk_socket->file;
 -                      __be32 uid = htonl(from_kuid_munged(inst->peer_user_ns,
 -                                                          file->f_cred->fsuid));
 -                      __be32 gid = htonl(from_kgid_munged(inst->peer_user_ns,
 -                                                          file->f_cred->fsgid));
 -                      /* need to unlock here since NLA_PUT may goto */
 -                      read_unlock_bh(&skb->sk->sk_callback_lock);
 +      sk = skb->sk;
 +      if (sk && sk->sk_state != TCP_TIME_WAIT) {
 +              read_lock_bh(&sk->sk_callback_lock);
 +              if (sk->sk_socket && sk->sk_socket->file) {
 +                      struct file *file = sk->sk_socket->file;
-                       __be32 uid = htonl(file->f_cred->fsuid);
-                       __be32 gid = htonl(file->f_cred->fsgid);
++                      const struct cred *cred = file->f_cred;
++                      struct user_namespace *user_ns = inst->peer_user_ns;
++                      __be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
++                      __be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
 +                      read_unlock_bh(&sk->sk_callback_lock);
                        if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
                            nla_put_be32(inst->skb, NFULA_GID, gid))
                                goto nla_put_failure;
                } else
 -                      read_unlock_bh(&skb->sk->sk_callback_lock);
 +                      read_unlock_bh(&sk->sk_callback_lock);
        }
  
        /* local sequence number */
@@@ -785,7 -788,8 +789,8 @@@ nfulnl_recv_config(struct sock *ctnl, s
                        }
  
                        inst = instance_create(group_num,
-                                              NETLINK_CB(skb).pid);
+                                              NETLINK_CB(skb).pid,
+                                              sk_user_ns(NETLINK_CB(skb).ssk));
                        if (IS_ERR(inst)) {
                                ret = PTR_ERR(inst);
                                goto out;
@@@ -998,10 -1002,8 +1003,10 @@@ static int __init nfnetlink_log_init(vo
  
  #ifdef CONFIG_PROC_FS
        if (!proc_create("nfnetlink_log", 0440,
 -                       proc_net_netfilter, &nful_file_ops))
 +                       proc_net_netfilter, &nful_file_ops)) {
 +              status = -ENOMEM;
                goto cleanup_logger;
 +      }
  #endif
        return status;
  
diff --combined net/netfilter/xt_LOG.c
index 91e9af4d1f42c3baef9af1261c9464c70cd1bac0,02a2bf49dcbd13d307d4f0128fe092ca51e68eef..fa40096940a1712be7ac0c517ea5f1d7af4dfd56
@@@ -145,19 -145,6 +145,21 @@@ static int dump_tcp_header(struct sbuf
        return 0;
  }
  
-       if (sk->sk_socket && sk->sk_socket->file)
 +static void dump_sk_uid_gid(struct sbuff *m, struct sock *sk)
 +{
 +      if (!sk || sk->sk_state == TCP_TIME_WAIT)
 +              return;
 +
 +      read_lock_bh(&sk->sk_callback_lock);
-                       sk->sk_socket->file->f_cred->fsuid,
-                       sk->sk_socket->file->f_cred->fsgid);
++      if (sk->sk_socket && sk->sk_socket->file) {
++              const struct cred *cred = sk->sk_socket->file->f_cred;
 +              sb_add(m, "UID=%u GID=%u ",
++                      from_kuid_munged(&init_user_ns, cred->fsuid),
++                      from_kgid_munged(&init_user_ns, cred->fsgid));
++      }
 +      read_unlock_bh(&sk->sk_callback_lock);
 +}
 +
  /* One level of recursion won't kill us */
  static void dump_ipv4_packet(struct sbuff *m,
                        const struct nf_loginfo *info,
        }
  
        /* Max length: 15 "UID=4294967295 " */
 -      if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
 -              read_lock_bh(&skb->sk->sk_callback_lock);
 -              if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
 -                      const struct cred *cred = skb->sk->sk_socket->file->f_cred;
 -                      sb_add(m, "UID=%u GID=%u ",
 -                              from_kuid_munged(&init_user_ns, cred->fsuid),
 -                              from_kgid_munged(&init_user_ns, cred->fsgid));
 -              }
 -              read_unlock_bh(&skb->sk->sk_callback_lock);
 -      }
 +      if ((logflags & XT_LOG_UID) && !iphoff)
 +              dump_sk_uid_gid(m, skb->sk);
  
        /* Max length: 16 "MARK=0xFFFFFFFF " */
        if (!iphoff && skb->mark)
@@@ -443,8 -438,8 +445,8 @@@ log_packet_common(struct sbuff *m
                  const struct nf_loginfo *loginfo,
                  const char *prefix)
  {
 -      sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
 -             prefix,
 +      sb_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
 +             '0' + loginfo->u.log.level, prefix,
               in ? in->name : "",
               out ? out->name : "");
  #ifdef CONFIG_BRIDGE_NETFILTER
@@@ -724,8 -719,16 +726,8 @@@ static void dump_ipv6_packet(struct sbu
        }
  
        /* Max length: 15 "UID=4294967295 " */
 -      if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
 -              read_lock_bh(&skb->sk->sk_callback_lock);
 -              if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
 -                      const struct cred *cred = skb->sk->sk_socket->file->f_cred;
 -                      sb_add(m, "UID=%u GID=%u ",
 -                              from_kuid_munged(&init_user_ns, cred->fsuid),
 -                              from_kgid_munged(&init_user_ns, cred->fsgid));
 -              }
 -              read_unlock_bh(&skb->sk->sk_callback_lock);
 -      }
 +      if ((logflags & XT_LOG_UID) && recurse)
 +              dump_sk_uid_gid(m, skb->sk);
  
        /* Max length: 16 "MARK=0xFFFFFFFF " */
        if (!recurse && skb->mark)
diff --combined net/netlink/af_netlink.c
index 527023823b5c5ea1a48c373b49e9f1688891a494,7cb7867cc369b445c3a78359feefe8a51dfda6b1..3821199171660f3a16a94cab75453aee52d9c89c
@@@ -912,7 -912,8 +912,8 @@@ static void netlink_rcv_wake(struct soc
                wake_up_interruptible(&nlk->wait);
  }
  
- static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
+ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
+                                 struct sock *ssk)
  {
        int ret;
        struct netlink_sock *nlk = nlk_sk(sk);
        if (nlk->netlink_rcv != NULL) {
                ret = skb->len;
                skb_set_owner_r(skb, sk);
+               NETLINK_CB(skb).ssk = ssk;
                nlk->netlink_rcv(skb);
                consume_skb(skb);
        } else {
@@@ -947,7 -949,7 +949,7 @@@ retry
                return PTR_ERR(sk);
        }
        if (netlink_is_kernel(sk))
-               return netlink_unicast_kernel(sk, skb);
+               return netlink_unicast_kernel(sk, skb, ssk);
  
        if (sk_filter(sk, skb)) {
                err = skb->len;
@@@ -1362,7 -1364,7 +1364,7 @@@ static int netlink_sendmsg(struct kioc
        if (NULL == siocb->scm)
                siocb->scm = &scm;
  
 -      err = scm_send(sock, msg, siocb->scm);
 +      err = scm_send(sock, msg, siocb->scm, true);
        if (err < 0)
                return err;
  
                dst_pid = addr->nl_pid;
                dst_group = ffs(addr->nl_groups);
                err =  -EPERM;
 -              if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
 +              if ((dst_group || dst_pid) &&
 +                  !netlink_capable(sock, NL_NONROOT_SEND))
                        goto out;
        } else {
                dst_pid = nlk->dst_pid;
@@@ -2148,7 -2149,6 +2150,7 @@@ static void __init netlink_add_usersock
        rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
        nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
        nl_table[NETLINK_USERSOCK].registered = 1;
 +      nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
  
        netlink_table_ungrab();
  }
diff --combined net/packet/af_packet.c
index c5c9e2a54218207f0dba9b16920b2e17da84c353,d147317ce9eaa652779c629480d1e555c7820828..048fba476aa5dc6d3ef8e8ff89aa5bc728756130
@@@ -1079,7 -1079,7 +1079,7 @@@ static void *packet_current_rx_frame(st
        default:
                WARN(1, "TPACKET version not supported\n");
                BUG();
 -              return 0;
 +              return NULL;
        }
  }
  
@@@ -1273,14 -1273,6 +1273,14 @@@ static void __fanout_unlink(struct soc
        spin_unlock(&f->lock);
  }
  
 +static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
 +{
 +      if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
 +              return true;
 +
 +      return false;
 +}
 +
  static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
  {
        struct packet_sock *po = pkt_sk(sk);
                match->prot_hook.dev = po->prot_hook.dev;
                match->prot_hook.func = packet_rcv_fanout;
                match->prot_hook.af_packet_priv = match;
 +              match->prot_hook.id_match = match_fanout_group;
                dev_add_pack(&match->prot_hook);
                list_add(&match->list, &fanout_list);
        }
@@@ -1945,6 -1936,7 +1945,6 @@@ static void tpacket_destruct_skb(struc
  
        if (likely(po->tx_ring.pg_vec)) {
                ph = skb_shinfo(skb)->destructor_arg;
 -              BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
                BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
                atomic_dec(&po->tx_ring.pending);
                __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
@@@ -3854,7 -3846,7 +3854,7 @@@ static int packet_seq_show(struct seq_f
                           po->ifindex,
                           po->running,
                           atomic_read(&s->sk_rmem_alloc),
-                          sock_i_uid(s),
+                          from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
                           sock_i_ino(s));
        }
  
diff --combined net/sched/cls_cgroup.c
index 4a23ccca6b708266a7c5eb9ee5381a7a40e4b4b3,91de66695b4a372581466fc0369f6c02c38af570..2ecde225ae609af970a39e5acc5c2b3ce797f6b2
@@@ -77,18 -77,11 +77,18 @@@ struct cgroup_subsys net_cls_subsys = 
        .name           = "net_cls",
        .create         = cgrp_create,
        .destroy        = cgrp_destroy,
 -#ifdef CONFIG_NET_CLS_CGROUP
        .subsys_id      = net_cls_subsys_id,
 -#endif
        .base_cftypes   = ss_files,
        .module         = THIS_MODULE,
 +
 +      /*
 +       * While net_cls cgroup has the rudimentary hierarchy support of
 +       * inheriting the parent's classid on cgroup creation, it doesn't
 +       * properly propagates config changes in ancestors to their
 +       * descendents.  A child should follow the parent's configuration
 +       * but be allowed to override it.  Fix it and remove the following.
 +       */
 +      .broken_hierarchy = true,
  };
  
  struct cls_cgroup_head {
@@@ -158,7 -151,8 +158,8 @@@ static const struct nla_policy cgroup_p
        [TCA_CGROUP_EMATCHES]   = { .type = NLA_NESTED },
  };
  
- static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
+ static int cls_cgroup_change(struct sk_buff *in_skb,
+                            struct tcf_proto *tp, unsigned long base,
                             u32 handle, struct nlattr **tca,
                             unsigned long *arg)
  {
@@@ -290,6 -284,12 +291,6 @@@ static int __init init_cgroup_cls(void
        if (ret)
                goto out;
  
 -#ifndef CONFIG_NET_CLS_CGROUP
 -      /* We can't use rcu_assign_pointer because this is an int. */
 -      smp_wmb();
 -      net_cls_subsys_id = net_cls_subsys.subsys_id;
 -#endif
 -
        ret = register_tcf_proto_ops(&cls_cgroup_ops);
        if (ret)
                cgroup_unload_subsys(&net_cls_subsys);
@@@ -302,6 -302,11 +303,6 @@@ static void __exit exit_cgroup_cls(void
  {
        unregister_tcf_proto_ops(&cls_cgroup_ops);
  
 -#ifndef CONFIG_NET_CLS_CGROUP
 -      net_cls_subsys_id = -1;
 -      synchronize_rcu();
 -#endif
 -
        cgroup_unload_subsys(&net_cls_subsys);
  }
  
diff --combined net/xfrm/xfrm_policy.c
index 387848e900783f6e6862c6d2c0b52ab45bbcfe50,2f475151cea1037cc8bb2a939a4045381b7119c3..46550997548c2e2d9cce0db8024a7bd168fb09cd
@@@ -585,7 -585,6 +585,7 @@@ int xfrm_policy_insert(int dir, struct 
        xfrm_pol_hold(policy);
        net->xfrm.policy_count[dir]++;
        atomic_inc(&flow_cache_genid);
 +      rt_genid_bump(net);
        if (delpol)
                __xfrm_policy_unlink(delpol, dir);
        policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
@@@ -1358,8 -1357,6 +1358,8 @@@ static inline struct xfrm_dst *xfrm_all
  
                memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
                xdst->flo.ops = &xfrm_bundle_fc_ops;
 +              if (afinfo->init_dst)
 +                      afinfo->init_dst(net, xdst);
        } else
                xdst = ERR_PTR(-ENOBUFS);
  
@@@ -1764,7 -1761,7 +1764,7 @@@ static struct dst_entry *make_blackhole
  
        if (!afinfo) {
                dst_release(dst_orig);
 -              ret = ERR_PTR(-EINVAL);
 +              return ERR_PTR(-EINVAL);
        } else {
                ret = afinfo->blackhole_route(net, dst_orig);
        }
@@@ -2633,12 -2630,12 +2633,12 @@@ static void xfrm_policy_fini(struct ne
  
        flush_work(&net->xfrm.policy_hash_work);
  #ifdef CONFIG_XFRM_SUB_POLICY
-       audit_info.loginuid = -1;
+       audit_info.loginuid = INVALID_UID;
        audit_info.sessionid = -1;
        audit_info.secid = 0;
        xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
  #endif
-       audit_info.loginuid = -1;
+       audit_info.loginuid = INVALID_UID;
        audit_info.sessionid = -1;
        audit_info.secid = 0;
        xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
@@@ -2745,7 -2742,7 +2745,7 @@@ static void xfrm_audit_common_policyinf
  }
  
  void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                          uid_t auid, u32 sessionid, u32 secid)
+                          kuid_t auid, u32 sessionid, u32 secid)
  {
        struct audit_buffer *audit_buf;
  
  EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  
  void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                             uid_t auid, u32 sessionid, u32 secid)
+                             kuid_t auid, u32 sessionid, u32 secid)
  {
        struct audit_buffer *audit_buf;
  
diff --combined net/xfrm/xfrm_state.c
index 210be48d8ae3c295a3f9e9642c356acd9866c92c,fce6a49bc7c6057792934a0711649eabb3f32352..bd2d9841ad59becf39644e09bfd7c6f6dba98567
@@@ -415,17 -415,8 +415,17 @@@ static enum hrtimer_restart xfrm_timer_
        if (x->lft.hard_add_expires_seconds) {
                long tmo = x->lft.hard_add_expires_seconds +
                        x->curlft.add_time - now;
 -              if (tmo <= 0)
 -                      goto expired;
 +              if (tmo <= 0) {
 +                      if (x->xflags & XFRM_SOFT_EXPIRE) {
 +                              /* enter hard expire without soft expire first?!
 +                               * setting a new date could trigger this.
 +                               * workarbound: fix x->curflt.add_time by below:
 +                               */
 +                              x->curlft.add_time = now - x->saved_tmo - 1;
 +                              tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
 +                      } else
 +                              goto expired;
 +              }
                if (tmo < next)
                        next = tmo;
        }
        if (x->lft.soft_add_expires_seconds) {
                long tmo = x->lft.soft_add_expires_seconds +
                        x->curlft.add_time - now;
 -              if (tmo <= 0)
 +              if (tmo <= 0) {
                        warn = 1;
 -              else if (tmo < next)
 +                      x->xflags &= ~XFRM_SOFT_EXPIRE;
 +              } else if (tmo < next) {
                        next = tmo;
 +                      x->xflags |= XFRM_SOFT_EXPIRE;
 +                      x->saved_tmo = tmo;
 +              }
        }
        if (x->lft.soft_use_expires_seconds) {
                long tmo = x->lft.soft_use_expires_seconds +
@@@ -1994,10 -1981,8 +1994,10 @@@ int __xfrm_init_state(struct xfrm_stat
                goto error;
  
        x->outer_mode = xfrm_get_mode(x->props.mode, family);
 -      if (x->outer_mode == NULL)
 +      if (x->outer_mode == NULL) {
 +              err = -EPROTONOSUPPORT;
                goto error;
 +      }
  
        if (init_replay) {
                err = xfrm_init_replay(x);
@@@ -2060,7 -2045,7 +2060,7 @@@ void xfrm_state_fini(struct net *net
        unsigned int sz;
  
        flush_work(&net->xfrm.state_hash_work);
-       audit_info.loginuid = -1;
+       audit_info.loginuid = INVALID_UID;
        audit_info.sessionid = -1;
        audit_info.secid = 0;
        xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info);
@@@ -2127,7 -2112,7 +2127,7 @@@ static void xfrm_audit_helper_pktinfo(s
  }
  
  void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                         uid_t auid, u32 sessionid, u32 secid)
+                         kuid_t auid, u32 sessionid, u32 secid)
  {
        struct audit_buffer *audit_buf;
  
  EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
  
  void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                            uid_t auid, u32 sessionid, u32 secid)
+                            kuid_t auid, u32 sessionid, u32 secid)
  {
        struct audit_buffer *audit_buf;
  
diff --combined net/xfrm/xfrm_user.c
index 289f4bf18ff05751c5938c8722b098f6a560ee00,9ea55db737b47fd14b93efe56a7d52655e871d40..bc542448307a8f6413788159ed6c73f069de3956
@@@ -123,21 -123,9 +123,21 @@@ static inline int verify_replay(struct 
                                struct nlattr **attrs)
  {
        struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
 +      struct xfrm_replay_state_esn *rs;
  
 -      if ((p->flags & XFRM_STATE_ESN) && !rt)
 -              return -EINVAL;
 +      if (p->flags & XFRM_STATE_ESN) {
 +              if (!rt)
 +                      return -EINVAL;
 +
 +              rs = nla_data(rt);
 +
 +              if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
 +                      return -EINVAL;
 +
 +              if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
 +                  nla_len(rt) != sizeof(*rs))
 +                      return -EINVAL;
 +      }
  
        if (!rt)
                return 0;
@@@ -382,15 -370,14 +382,15 @@@ static inline int xfrm_replay_verify_le
                                         struct nlattr *rp)
  {
        struct xfrm_replay_state_esn *up;
 +      int ulen;
  
        if (!replay_esn || !rp)
                return 0;
  
        up = nla_data(rp);
 +      ulen = xfrm_replay_state_esn_len(up);
  
 -      if (xfrm_replay_state_esn_len(replay_esn) !=
 -                      xfrm_replay_state_esn_len(up))
 +      if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
                return -EINVAL;
  
        return 0;
@@@ -401,28 -388,22 +401,28 @@@ static int xfrm_alloc_replay_state_esn(
                                       struct nlattr *rta)
  {
        struct xfrm_replay_state_esn *p, *pp, *up;
 +      int klen, ulen;
  
        if (!rta)
                return 0;
  
        up = nla_data(rta);
 +      klen = xfrm_replay_state_esn_len(up);
 +      ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
  
 -      p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
 +      p = kzalloc(klen, GFP_KERNEL);
        if (!p)
                return -ENOMEM;
  
 -      pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
 +      pp = kzalloc(klen, GFP_KERNEL);
        if (!pp) {
                kfree(p);
                return -ENOMEM;
        }
  
 +      memcpy(p, up, ulen);
 +      memcpy(pp, up, ulen);
 +
        *replay_esn = p;
        *preplay_esn = pp;
  
@@@ -461,11 -442,10 +461,11 @@@ static void copy_from_user_state(struc
   * somehow made shareable and move it to xfrm_state.c - JHS
   *
  */
 -static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
 +static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
 +                                int update_esn)
  {
        struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
 -      struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
 +      struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
        struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
        struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
        struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
@@@ -575,7 -555,7 +575,7 @@@ static struct xfrm_state *xfrm_state_co
                goto error;
  
        /* override default values from above */
 -      xfrm_update_ae_params(x, attrs);
 +      xfrm_update_ae_params(x, attrs, 0);
  
        return x;
  
@@@ -595,7 -575,7 +595,7 @@@ static int xfrm_add_sa(struct sk_buff *
        struct xfrm_state *x;
        int err;
        struct km_event c;
-       uid_t loginuid = audit_get_loginuid(current);
+       kuid_t loginuid = audit_get_loginuid(current);
        u32 sessionid = audit_get_sessionid(current);
        u32 sid;
  
@@@ -674,7 -654,7 +674,7 @@@ static int xfrm_del_sa(struct sk_buff *
        int err = -ESRCH;
        struct km_event c;
        struct xfrm_usersa_id *p = nlmsg_data(nlh);
-       uid_t loginuid = audit_get_loginuid(current);
+       kuid_t loginuid = audit_get_loginuid(current);
        u32 sessionid = audit_get_sessionid(current);
        u32 sid;
  
@@@ -709,7 -689,6 +709,7 @@@ out
  
  static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
  {
 +      memset(p, 0, sizeof(*p));
        memcpy(&p->id, &x->id, sizeof(p->id));
        memcpy(&p->sel, &x->sel, sizeof(p->sel));
        memcpy(&p->lft, &x->lft, sizeof(p->lft));
@@@ -763,7 -742,7 +763,7 @@@ static int copy_to_user_auth(struct xfr
                return -EMSGSIZE;
  
        algo = nla_data(nla);
 -      strcpy(algo->alg_name, auth->alg_name);
 +      strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
        memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
        algo->alg_key_len = auth->alg_key_len;
  
@@@ -899,7 -878,6 +899,7 @@@ static struct sk_buff *xfrm_state_netli
  {
        struct xfrm_dump_info info;
        struct sk_buff *skb;
 +      int err;
  
        skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
        if (!skb)
        info.nlmsg_seq = seq;
        info.nlmsg_flags = 0;
  
 -      if (dump_one_state(x, 0, &info)) {
 +      err = dump_one_state(x, 0, &info);
 +      if (err) {
                kfree_skb(skb);
 -              return NULL;
 +              return ERR_PTR(err);
        }
  
        return skb;
@@@ -1340,7 -1317,6 +1340,7 @@@ static void copy_from_user_policy(struc
  
  static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
  {
 +      memset(p, 0, sizeof(*p));
        memcpy(&p->sel, &xp->selector, sizeof(p->sel));
        memcpy(&p->lft, &xp->lft, sizeof(p->lft));
        memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
@@@ -1393,7 -1369,7 +1393,7 @@@ static int xfrm_add_policy(struct sk_bu
        struct km_event c;
        int err;
        int excl;
-       uid_t loginuid = audit_get_loginuid(current);
+       kuid_t loginuid = audit_get_loginuid(current);
        u32 sessionid = audit_get_sessionid(current);
        u32 sid;
  
@@@ -1445,7 -1421,6 +1445,7 @@@ static int copy_to_user_tmpl(struct xfr
                struct xfrm_user_tmpl *up = &vec[i];
                struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
  
 +              memset(up, 0, sizeof(*up));
                memcpy(&up->id, &kp->id, sizeof(up->id));
                up->family = kp->encap_family;
                memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
@@@ -1571,7 -1546,6 +1571,7 @@@ static struct sk_buff *xfrm_policy_netl
  {
        struct xfrm_dump_info info;
        struct sk_buff *skb;
 +      int err;
  
        skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!skb)
        info.nlmsg_seq = seq;
        info.nlmsg_flags = 0;
  
 -      if (dump_one_policy(xp, dir, 0, &info) < 0) {
 +      err = dump_one_policy(xp, dir, 0, &info);
 +      if (err) {
                kfree_skb(skb);
 -              return NULL;
 +              return ERR_PTR(err);
        }
  
        return skb;
@@@ -1651,7 -1624,7 +1651,7 @@@ static int xfrm_get_policy(struct sk_bu
                                            NETLINK_CB(skb).pid);
                }
        } else {
-               uid_t loginuid = audit_get_loginuid(current);
+               kuid_t loginuid = audit_get_loginuid(current);
                u32 sessionid = audit_get_sessionid(current);
                u32 sid;
  
@@@ -1849,7 -1822,7 +1849,7 @@@ static int xfrm_new_ae(struct sk_buff *
                goto out;
  
        spin_lock_bh(&x->lock);
 -      xfrm_update_ae_params(x, attrs);
 +      xfrm_update_ae_params(x, attrs, 1);
        spin_unlock_bh(&x->lock);
  
        c.event = nlh->nlmsg_type;
@@@ -1945,7 -1918,7 +1945,7 @@@ static int xfrm_add_pol_expire(struct s
  
        err = 0;
        if (up->hard) {
-               uid_t loginuid = audit_get_loginuid(current);
+               kuid_t loginuid = audit_get_loginuid(current);
                u32 sessionid = audit_get_sessionid(current);
                u32 sid;
  
@@@ -1988,7 -1961,7 +1988,7 @@@ static int xfrm_add_sa_expire(struct sk
        km_state_expired(x, ue->hard, current->pid);
  
        if (ue->hard) {
-               uid_t loginuid = audit_get_loginuid(current);
+               kuid_t loginuid = audit_get_loginuid(current);
                u32 sessionid = audit_get_sessionid(current);
                u32 sid;
  
diff --combined security/keys/key.c
index 3cbe3529c418d55f6bc5080bf65a1707c6a07dc4,4289c5ba2710e1543d036fbd5bbb41d61e590051..a30e927349051ce651e6f0872861cdda881e5bce
@@@ -18,7 -18,6 +18,6 @@@
  #include <linux/workqueue.h>
  #include <linux/random.h>
  #include <linux/err.h>
- #include <linux/user_namespace.h>
  #include "internal.h"
  
  struct kmem_cache *key_jar;
@@@ -52,7 -51,7 +51,7 @@@ void __key_check(const struct key *key
   * Get the key quota record for a user, allocating a new record if one doesn't
   * already exist.
   */
- struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
+ struct key_user *key_user_lookup(kuid_t uid)
  {
        struct key_user *candidate = NULL, *user;
        struct rb_node *parent = NULL;
@@@ -67,13 -66,9 +66,9 @@@ try_again
                parent = *p;
                user = rb_entry(parent, struct key_user, node);
  
-               if (uid < user->uid)
+               if (uid_lt(uid, user->uid))
                        p = &(*p)->rb_left;
-               else if (uid > user->uid)
-                       p = &(*p)->rb_right;
-               else if (user_ns < user->user_ns)
-                       p = &(*p)->rb_left;
-               else if (user_ns > user->user_ns)
+               else if (uid_gt(uid, user->uid))
                        p = &(*p)->rb_right;
                else
                        goto found;
        atomic_set(&candidate->nkeys, 0);
        atomic_set(&candidate->nikeys, 0);
        candidate->uid = uid;
-       candidate->user_ns = get_user_ns(user_ns);
        candidate->qnkeys = 0;
        candidate->qnbytes = 0;
        spin_lock_init(&candidate->lock);
@@@ -131,7 -125,6 +125,6 @@@ void key_user_put(struct key_user *user
        if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
                rb_erase(&user->node, &key_user_tree);
                spin_unlock(&key_user_lock);
-               put_user_ns(user->user_ns);
  
                kfree(user);
        }
@@@ -229,7 -222,7 +222,7 @@@ serial_exists
   * key_alloc() calls don't race with module unloading.
   */
  struct key *key_alloc(struct key_type *type, const char *desc,
-                     uid_t uid, gid_t gid, const struct cred *cred,
+                     kuid_t uid, kgid_t gid, const struct cred *cred,
                      key_perm_t perm, unsigned long flags)
  {
        struct key_user *user = NULL;
        quotalen = desclen + type->def_datalen;
  
        /* get hold of the key tracking for this user */
-       user = key_user_lookup(uid, cred->user_ns);
+       user = key_user_lookup(uid);
        if (!user)
                goto no_memory_1;
  
        /* check that the user's quota permits allocation of another key and
         * its description */
        if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
-               unsigned maxkeys = (uid == 0) ?
+               unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
                        key_quota_root_maxkeys : key_quota_maxkeys;
-               unsigned maxbytes = (uid == 0) ?
+               unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
                        key_quota_root_maxbytes : key_quota_maxbytes;
  
                spin_lock(&user->lock);
@@@ -380,7 -373,7 +373,7 @@@ int key_payload_reserve(struct key *key
  
        /* contemplate the quota adjustment */
        if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
-               unsigned maxbytes = (key->user->uid == 0) ?
+               unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
                        key_quota_root_maxbytes : key_quota_maxbytes;
  
                spin_lock(&key->user->lock);
@@@ -598,7 -591,7 +591,7 @@@ void key_put(struct key *key
                key_check(key);
  
                if (atomic_dec_and_test(&key->usage))
 -                      queue_work(system_nrt_wq, &key_gc_work);
 +                      schedule_work(&key_gc_work);
        }
  }
  EXPORT_SYMBOL(key_put);
diff --combined security/keys/keyctl.c
index 6cfc6478863efa8545ffe463687ed3a3a081167f,1ecc0f79906e63b7abd5f10eeaa38defb0d4aadc..305ecb76519c8a0aafa115d1847a8b24f3fb71bc
@@@ -569,8 -569,8 +569,8 @@@ okay
        ret = snprintf(tmpbuf, PAGE_SIZE - 1,
                       "%s;%d;%d;%08x;%s",
                       key->type->name,
-                      key->uid,
-                      key->gid,
+                      from_kuid_munged(current_user_ns(), key->uid),
+                      from_kgid_munged(current_user_ns(), key->gid),
                       key->perm,
                       key->description ?: "");
  
@@@ -766,15 -766,25 +766,25 @@@ error
   *
   * If successful, 0 will be returned.
   */
- long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
+ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
  {
        struct key_user *newowner, *zapowner = NULL;
        struct key *key;
        key_ref_t key_ref;
        long ret;
+       kuid_t uid;
+       kgid_t gid;
+       uid = make_kuid(current_user_ns(), user);
+       gid = make_kgid(current_user_ns(), group);
+       ret = -EINVAL;
+       if ((user != (uid_t) -1) && !uid_valid(uid))
+               goto error;
+       if ((group != (gid_t) -1) && !gid_valid(gid))
+               goto error;
  
        ret = 0;
-       if (uid == (uid_t) -1 && gid == (gid_t) -1)
+       if (user == (uid_t) -1 && group == (gid_t) -1)
                goto error;
  
        key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
  
        if (!capable(CAP_SYS_ADMIN)) {
                /* only the sysadmin can chown a key to some other UID */
-               if (uid != (uid_t) -1 && key->uid != uid)
+               if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
                        goto error_put;
  
                /* only the sysadmin can set the key's GID to a group other
                 * than one of those that the current process subscribes to */
-               if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid))
+               if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
                        goto error_put;
        }
  
        /* change the UID */
-       if (uid != (uid_t) -1 && uid != key->uid) {
+       if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
                ret = -ENOMEM;
-               newowner = key_user_lookup(uid, current_user_ns());
+               newowner = key_user_lookup(uid);
                if (!newowner)
                        goto error_put;
  
                /* transfer the quota burden to the new user */
                if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
-                       unsigned maxkeys = (uid == 0) ?
+                       unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
                                key_quota_root_maxkeys : key_quota_maxkeys;
-                       unsigned maxbytes = (uid == 0) ?
+                       unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
                                key_quota_root_maxbytes : key_quota_maxbytes;
  
                        spin_lock(&newowner->lock);
        }
  
        /* change the GID */
-       if (gid != (gid_t) -1)
+       if (group != (gid_t) -1)
                key->gid = gid;
  
        ret = 0;
@@@ -897,7 -907,7 +907,7 @@@ long keyctl_setperm_key(key_serial_t id
        down_write(&key->sem);
  
        /* if we're not the sysadmin, we can only change a key that we own */
-       if (capable(CAP_SYS_ADMIN) || key->uid == current_fsuid()) {
+       if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
                key->perm = perm;
                ret = 0;
        }
@@@ -1486,6 -1496,7 +1496,6 @@@ long keyctl_session_to_parent(void
        oldwork = NULL;
        parent = me->real_parent;
  
 -      task_lock(parent);
        /* the parent mustn't be init and mustn't be a kernel thread */
        if (parent->pid <= 1 || !parent->mm)
                goto unlock;
  
        /* the parent must have the same effective ownership and mustn't be
         * SUID/SGID */
-       if (pcred->uid  != mycred->euid ||
-           pcred->euid != mycred->euid ||
-           pcred->suid != mycred->euid ||
-           pcred->gid  != mycred->egid ||
-           pcred->egid != mycred->egid ||
-           pcred->sgid != mycred->egid)
+       if (!uid_eq(pcred->uid,  mycred->euid) ||
+           !uid_eq(pcred->euid, mycred->euid) ||
+           !uid_eq(pcred->suid, mycred->euid) ||
+           !gid_eq(pcred->gid,  mycred->egid) ||
+           !gid_eq(pcred->egid, mycred->egid) ||
+           !gid_eq(pcred->sgid, mycred->egid))
                goto unlock;
  
        /* the keyrings must have the same UID */
        if ((pcred->tgcred->session_keyring &&
-            pcred->tgcred->session_keyring->uid != mycred->euid) ||
-           mycred->tgcred->session_keyring->uid != mycred->euid)
+            !uid_eq(pcred->tgcred->session_keyring->uid, mycred->euid)) ||
+           !uid_eq(mycred->tgcred->session_keyring->uid, mycred->euid))
                goto unlock;
  
        /* cancel an already pending keyring replacement */
        if (!ret)
                newwork = NULL;
  unlock:
 -      task_unlock(parent);
        write_unlock_irq(&tasklist_lock);
        rcu_read_unlock();
        if (oldwork)
This page took 0.450935 seconds and 4 git commands to generate.