]> Git Repo - linux.git/commitdiff
Merge branch 'master' into for-next
authorJiri Kosina <[email protected]>
Thu, 20 Feb 2014 13:54:28 +0000 (14:54 +0100)
committerJiri Kosina <[email protected]>
Thu, 20 Feb 2014 13:54:28 +0000 (14:54 +0100)
21 files changed:
1  2 
CREDITS
arch/arc/Kconfig
block/blk-core.c
block/blk-map.c
drivers/ata/libata-core.c
drivers/base/bus.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/regulator/core.c
drivers/scsi/scsi_transport_iscsi.c
drivers/staging/android/Kconfig
drivers/usb/core/message.c
drivers/usb/core/urb.c
fs/buffer.c
include/linux/gpio.h
include/linux/pipe_fs_i.h
include/linux/skbuff.h
include/linux/spi/spi.h
include/linux/usb/composite.h
include/net/mac80211.h
kernel/signal.c
net/core/dev.c

diff --combined CREDITS
index 200554b454509bde745cf2c0228aa9effe9b01eb,e371c5504a5053c32b50caf9d2251f793814bcb1..4f2f45deeb7047bee89204aba7d5bafafefdf399
+++ b/CREDITS
@@@ -823,8 -823,8 +823,8 @@@ S: D-69231 Rauenber
  S: Germany
  
  N: Jean Delvare
- W: http://khali.linux-fr.org/
+ W: http://jdelvare.nerim.net/
  D: Several hardware monitoring drivers
  S: France
  
  D: Kernel / timekeeping stuff
  S: Carlisle, MA 01741
  S: USA
 -  
 +
  N: Jan-Benedict Glaw
  D: SRM environment driver (for Alpha systems)
@@@ -2560,7 -2560,7 +2560,7 @@@ S: 22 Seaview S
  S: Fullarton 5063
  S: South Australia
  
 -N. Wolfgang Muees
 +N: Wolfgang Muees
  D: Auerswald USB driver
  
diff --combined arch/arc/Kconfig
index 3d7f1137a21b01620b3b36b0a52dadf5e8f4850f,9be30c8cb0c22d36438c11b50ce21e8d6ae76463..75de197a2fef8a39c61ff764bcbb0d5bef2f71ae
@@@ -128,8 -128,8 +128,8 @@@ config SM
        default n
        help
          This enables support for systems with more than one CPU. If you have
-         a system with only one CPU, like most personal computers, say N. If
-         you have a system with more than one CPU, say Y.
+         a system with only one CPU, say N. If you have a system with more
+         than one CPU, say Y.
  
  if SMP
  
@@@ -356,6 -356,7 +356,6 @@@ config ARC_CURR_IN_RE
  
  config ARC_MISALIGN_ACCESS
        bool "Emulate unaligned memory access (userspace only)"
 -      default N
        select SYSCTL_ARCH_UNALIGN_NO_WARN
        select SYSCTL_ARCH_UNALIGN_ALLOW
        help
@@@ -408,17 -409,6 +408,6 @@@ config ARC_DBG_TLB_MISS_COUN
          Counts number of I and D TLB Misses and exports them via Debugfs
          The counters can be cleared via Debugfs as well
  
- config CMDLINE_UBOOT
-       bool "Support U-boot kernel command line passing"
-       default n
-       help
-         If you are using U-boot (www.denx.de) and wish to pass the kernel
-         command line from the U-boot environment to the Linux kernel then
-         switch this option on.
-         ARC U-boot will setup the cmdline in RAM/flash and set r2 to point
-         to it. kernel startup code will append this to DeviceTree
-         /bootargs provided cmdline args.
  config ARC_BUILTIN_DTB_NAME
        string "Built in DTB"
        help
diff --combined block/blk-core.c
index cd0158163fe004a19d2a656c0eb5cb76559b864c,853f92749202cbfe5b252d557f667b7f970ac872..4db2b32b70e06182390b3396cfdafedf0f2ca4c9
@@@ -38,6 -38,7 +38,7 @@@
  
  #include "blk.h"
  #include "blk-cgroup.h"
+ #include "blk-mq.h"
  
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
  EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@@ -130,7 -131,7 +131,7 @@@ static void req_bio_endio(struct reques
        bio_advance(bio, nbytes);
  
        /* don't actually finish bio if it's part of flush sequence */
-       if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+       if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
                bio_endio(bio, error);
  }
  
@@@ -245,7 -246,16 +246,16 @@@ EXPORT_SYMBOL(blk_stop_queue)
  void blk_sync_queue(struct request_queue *q)
  {
        del_timer_sync(&q->timeout);
-       cancel_delayed_work_sync(&q->delay_work);
+       if (q->mq_ops) {
+               struct blk_mq_hw_ctx *hctx;
+               int i;
+               queue_for_each_hw_ctx(q, hctx, i)
+                       cancel_delayed_work_sync(&hctx->delayed_work);
+       } else {
+               cancel_delayed_work_sync(&q->delay_work);
+       }
  }
  EXPORT_SYMBOL(blk_sync_queue);
  
@@@ -497,8 -507,13 +507,13 @@@ void blk_cleanup_queue(struct request_q
         * Drain all requests queued before DYING marking. Set DEAD flag to
         * prevent that q->request_fn() gets invoked after draining finished.
         */
-       spin_lock_irq(lock);
-       __blk_drain_queue(q, true);
+       if (q->mq_ops) {
+               blk_mq_drain_queue(q);
+               spin_lock_irq(lock);
+       } else {
+               spin_lock_irq(lock);
+               __blk_drain_queue(q, true);
+       }
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
  
@@@ -678,11 -693,20 +693,20 @@@ blk_init_queue_node(request_fn_proc *rf
        if (!uninit_q)
                return NULL;
  
+       uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+       if (!uninit_q->flush_rq)
+               goto out_cleanup_queue;
        q = blk_init_allocated_queue(uninit_q, rfn, lock);
        if (!q)
-               blk_cleanup_queue(uninit_q);
+               goto out_free_flush_rq;
        return q;
+ out_free_flush_rq:
+       kfree(uninit_q->flush_rq);
+ out_cleanup_queue:
+       blk_cleanup_queue(uninit_q);
+       return NULL;
  }
  EXPORT_SYMBOL(blk_init_queue_node);
  
@@@ -1112,7 -1136,7 +1136,7 @@@ static struct request *blk_old_get_requ
  struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
  {
        if (q->mq_ops)
-               return blk_mq_alloc_request(q, rw, gfp_mask, false);
+               return blk_mq_alloc_request(q, rw, gfp_mask);
        else
                return blk_old_get_request(q, rw, gfp_mask);
  }
@@@ -1263,6 -1287,11 +1287,11 @@@ void __blk_put_request(struct request_q
        if (unlikely(!q))
                return;
  
+       if (q->mq_ops) {
+               blk_mq_free_request(req);
+               return;
+       }
        blk_pm_put_request(req);
  
        elv_completed_request(q, req);
@@@ -1326,7 -1355,7 +1355,7 @@@ void blk_add_request_payload(struct req
        bio->bi_io_vec->bv_offset = 0;
        bio->bi_io_vec->bv_len = len;
  
-       bio->bi_size = len;
+       bio->bi_iter.bi_size = len;
        bio->bi_vcnt = 1;
        bio->bi_phys_segments = 1;
  
@@@ -1351,7 -1380,7 +1380,7 @@@ bool bio_attempt_back_merge(struct requ
  
        req->biotail->bi_next = bio;
        req->biotail = bio;
-       req->__data_len += bio->bi_size;
+       req->__data_len += bio->bi_iter.bi_size;
        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
  
        blk_account_io_start(req, false);
@@@ -1380,8 -1409,8 +1409,8 @@@ bool bio_attempt_front_merge(struct req
         * not touch req->buffer either...
         */
        req->buffer = bio_data(bio);
-       req->__sector = bio->bi_sector;
-       req->__data_len += bio->bi_size;
+       req->__sector = bio->bi_iter.bi_sector;
+       req->__data_len += bio->bi_iter.bi_size;
        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
  
        blk_account_io_start(req, false);
@@@ -1459,7 -1488,7 +1488,7 @@@ void init_request_from_bio(struct reque
                req->cmd_flags |= REQ_FAILFAST_MASK;
  
        req->errors = 0;
-       req->__sector = bio->bi_sector;
+       req->__sector = bio->bi_iter.bi_sector;
        req->ioprio = bio_prio(bio);
        blk_rq_bio_prep(req->q, req, bio);
  }
@@@ -1583,12 -1612,12 +1612,12 @@@ static inline void blk_partition_remap(
        if (bio_sectors(bio) && bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
  
-               bio->bi_sector += p->start_sect;
+               bio->bi_iter.bi_sector += p->start_sect;
                bio->bi_bdev = bdev->bd_contains;
  
                trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
                                      bdev->bd_dev,
-                                     bio->bi_sector - p->start_sect);
+                                     bio->bi_iter.bi_sector - p->start_sect);
        }
  }
  
@@@ -1654,7 -1683,7 +1683,7 @@@ static inline int bio_check_eod(struct 
        /* Test device or partition size, when known. */
        maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
        if (maxsector) {
-               sector_t sector = bio->bi_sector;
+               sector_t sector = bio->bi_iter.bi_sector;
  
                if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
                        /*
@@@ -1690,7 -1719,7 +1719,7 @@@ generic_make_request_checks(struct bio 
                       "generic_make_request: Trying to access "
                        "nonexistent block-device %s (%Lu)\n",
                        bdevname(bio->bi_bdev, b),
-                       (long long) bio->bi_sector);
+                       (long long) bio->bi_iter.bi_sector);
                goto end_io;
        }
  
        }
  
        part = bio->bi_bdev->bd_part;
-       if (should_fail_request(part, bio->bi_size) ||
+       if (should_fail_request(part, bio->bi_iter.bi_size) ||
            should_fail_request(&part_to_disk(part)->part0,
-                               bio->bi_size))
+                               bio->bi_iter.bi_size))
                goto end_io;
  
        /*
@@@ -1865,7 -1894,7 +1894,7 @@@ void submit_bio(int rw, struct bio *bio
                if (rw & WRITE) {
                        count_vm_events(PGPGOUT, count);
                } else {
-                       task_io_account_read(bio->bi_size);
+                       task_io_account_read(bio->bi_iter.bi_size);
                        count_vm_events(PGPGIN, count);
                }
  
                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
                        current->comm, task_pid_nr(current),
                                (rw & WRITE) ? "WRITE" : "READ",
-                               (unsigned long long)bio->bi_sector,
+                               (unsigned long long)bio->bi_iter.bi_sector,
                                bdevname(bio->bi_bdev, b),
                                count);
                }
@@@ -1900,7 -1929,7 +1929,7 @@@ EXPORT_SYMBOL(submit_bio)
   *    in some cases below, so export this function.
   *    Request stacking drivers like request-based dm may change the queue
   *    limits while requests are in the queue (e.g. dm's table swapping).
 - *    Such request stacking drivers should check those requests agaist
 + *    Such request stacking drivers should check those requests against
   *    the new queue limits again when they dispatch those requests,
   *    although such checkings are also done against the old queue limits
   *    when submitting requests.
@@@ -2007,7 -2036,7 +2036,7 @@@ unsigned int blk_rq_err_bytes(const str
        for (bio = rq->bio; bio; bio = bio->bi_next) {
                if ((bio->bi_rw & ff) != ff)
                        break;
-               bytes += bio->bi_size;
+               bytes += bio->bi_iter.bi_size;
        }
  
        /* this could lead to infinite loop */
@@@ -2378,9 -2407,9 +2407,9 @@@ bool blk_update_request(struct request 
        total_bytes = 0;
        while (req->bio) {
                struct bio *bio = req->bio;
-               unsigned bio_bytes = min(bio->bi_size, nr_bytes);
+               unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
  
-               if (bio_bytes == bio->bi_size)
+               if (bio_bytes == bio->bi_iter.bi_size)
                        req->bio = bio->bi_next;
  
                req_bio_endio(req, bio, bio_bytes, error);
@@@ -2728,7 -2757,7 +2757,7 @@@ void blk_rq_bio_prep(struct request_que
                rq->nr_phys_segments = bio_phys_segments(q, bio);
                rq->buffer = bio_data(bio);
        }
-       rq->__data_len = bio->bi_size;
+       rq->__data_len = bio->bi_iter.bi_size;
        rq->bio = rq->biotail = bio;
  
        if (bio->bi_bdev)
  void rq_flush_dcache_pages(struct request *rq)
  {
        struct req_iterator iter;
-       struct bio_vec *bvec;
+       struct bio_vec bvec;
  
        rq_for_each_segment(bvec, rq, iter)
-               flush_dcache_page(bvec->bv_page);
+               flush_dcache_page(bvec.bv_page);
  }
  EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
  #endif
diff --combined block/blk-map.c
index 62382ad5b010576822c30270692027170e810dbd,ae4ae1047fd99575473a8b251dff562a151f0719..cca6356d216d13977665e17a846aef31ab1e4a87
@@@ -20,7 -20,7 +20,7 @@@ int blk_rq_append_bio(struct request_qu
                rq->biotail->bi_next = bio;
                rq->biotail = bio;
  
-               rq->__data_len += bio->bi_size;
+               rq->__data_len += bio->bi_iter.bi_size;
        }
        return 0;
  }
@@@ -76,7 -76,7 +76,7 @@@ static int __blk_rq_map_user(struct req
  
        ret = blk_rq_append_bio(q, rq, bio);
        if (!ret)
-               return bio->bi_size;
+               return bio->bi_iter.bi_size;
  
        /* if it was boucned we must call the end io function */
        bio_endio(bio, 0);
@@@ -220,7 -220,7 +220,7 @@@ int blk_rq_map_user_iov(struct request_
        if (IS_ERR(bio))
                return PTR_ERR(bio);
  
-       if (bio->bi_size != len) {
+       if (bio->bi_iter.bi_size != len) {
                /*
                 * Grab an extra reference to this bio, as bio_unmap_user()
                 * expects to be able to drop it twice as it happens on the
@@@ -285,7 -285,7 +285,7 @@@ EXPORT_SYMBOL(blk_rq_unmap_user)
   *
   * Description:
   *    Data will be mapped directly if possible. Otherwise a bounce
 - *    buffer is used. Can be called multple times to append multple
 + *    buffer is used. Can be called multiple times to append multiple
   *    buffers.
   */
  int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
index 1274720e6bb9ba549389b50ec9b3db67afafe6cb,1a3dbd1b196ecb121b1ee9d34388ecd00c307b71..a440958d34e4cf993d513b474452895ad977802a
@@@ -1524,7 -1524,7 +1524,7 @@@ static void ata_qc_complete_internal(st
   *    @dev: Device to which the command is sent
   *    @tf: Taskfile registers for the command and the result
   *    @cdb: CDB for packet command
 - *    @dma_dir: Data tranfer direction of the command
 + *    @dma_dir: Data transfer direction of the command
   *    @sgl: sg list for the data buffer of the command
   *    @n_elem: Number of sg entries
   *    @timeout: Timeout in msecs (0 for default)
@@@ -1712,7 -1712,7 +1712,7 @@@ unsigned ata_exec_internal_sg(struct at
   *    @dev: Device to which the command is sent
   *    @tf: Taskfile registers for the command and the result
   *    @cdb: CDB for packet command
 - *    @dma_dir: Data tranfer direction of the command
 + *    @dma_dir: Data transfer direction of the command
   *    @buf: Data buffer of the command
   *    @buflen: Length of data buffer
   *    @timeout: Timeout in msecs (0 for default)
@@@ -2149,9 -2149,16 +2149,16 @@@ static int ata_dev_config_ncq(struct at
                                    "failed to get NCQ Send/Recv Log Emask 0x%x\n",
                                    err_mask);
                } else {
+                       u8 *cmds = dev->ncq_send_recv_cmds;
                        dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
-                       memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
-                               ATA_LOG_NCQ_SEND_RECV_SIZE);
+                       memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+                       if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+                               ata_dev_dbg(dev, "disabling queued TRIM support\n");
+                               cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
+                                       ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
+                       }
                }
        }
  
@@@ -2215,6 -2222,16 +2222,16 @@@ int ata_dev_configure(struct ata_devic
        if (rc)
                return rc;
  
+       /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
+       if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
+           (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
+               dev->horkage |= ATA_HORKAGE_NOLPM;
+       if (dev->horkage & ATA_HORKAGE_NOLPM) {
+               ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
+               dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+       }
        /* let ACPI work its magic */
        rc = ata_acpi_on_devcfg(dev);
        if (rc)
@@@ -4156,6 -4173,9 +4173,9 @@@ static const struct ata_blacklist_entr
        { "ST3320[68]13AS",     "SD1[5-9]",     ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
  
+       /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+       { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
        /* Blacklist entries taken from Silicon Image 3124/3132
           Windows driver .inf file - also several Linux problem reports */
        { "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
        { "PIONEER DVD-RW  DVR-212D",   NULL,   ATA_HORKAGE_NOSETXFER },
        { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
  
+       /* devices that don't properly handle queued TRIM commands */
+       { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Crucial_CT???M500SSD1",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+       /*
+        * Some WD SATA-I drives spin up and down erratically when the link
+        * is put into the slumber mode.  We don't have full list of the
+        * affected devices.  Disable LPM if the device matches one of the
+        * known prefixes and is SATA-1.  As a side effect LPM partial is
+        * lost too.
+        *
+        * https://bugzilla.kernel.org/show_bug.cgi?id=57211
+        */
+       { "WDC WD800JD-*",              NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
+       { "WDC WD1200JD-*",             NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
+       { "WDC WD1600JD-*",             NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
+       { "WDC WD2000JD-*",             NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
+       { "WDC WD2500JD-*",             NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
+       { "WDC WD3000JD-*",             NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
+       { "WDC WD3200JD-*",             NULL,   ATA_HORKAGE_WD_BROKEN_LPM },
        /* End Marker */
        { }
  };
@@@ -6519,6 -6560,7 +6560,7 @@@ static int __init ata_parse_force_one(c
                { "norst",      .lflags         = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
                { "rstonce",    .lflags         = ATA_LFLAG_RST_ONCE },
                { "atapi_dmadir", .horkage_on   = ATA_HORKAGE_ATAPI_DMADIR },
+               { "disable",    .horkage_on     = ATA_HORKAGE_DISABLE },
        };
        char *start = *cur, *p = *cur;
        char *id, *val, *endp;
diff --combined drivers/base/bus.c
index 1db22d3c4036c514d29e38a85355182445ed83cb,59dc8086e4faa83b1dd6823754e33c9b77b37751..83e910a57563c021d5e803adc2ae4ee7f43447ec
@@@ -146,8 -146,19 +146,19 @@@ void bus_remove_file(struct bus_type *b
  }
  EXPORT_SYMBOL_GPL(bus_remove_file);
  
+ static void bus_release(struct kobject *kobj)
+ {
+       struct subsys_private *priv =
+               container_of(kobj, typeof(*priv), subsys.kobj);
+       struct bus_type *bus = priv->bus;
+       kfree(priv);
+       bus->p = NULL;
+ }
  static struct kobj_type bus_ktype = {
        .sysfs_ops      = &bus_sysfs_ops,
+       .release        = bus_release,
  };
  
  static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
@@@ -953,8 -964,6 +964,6 @@@ void bus_unregister(struct bus_type *bu
        kset_unregister(bus->p->devices_kset);
        bus_remove_file(bus, &bus_attr_uevent);
        kset_unregister(&bus->p->subsys);
-       kfree(bus->p);
-       bus->p = NULL;
  }
  EXPORT_SYMBOL_GPL(bus_unregister);
  
@@@ -1209,7 -1218,7 +1218,7 @@@ err_dev
   * with the name of the subsystem. The root device can carry subsystem-
   * wide attributes. All registered devices are below this single root
   * device and are named after the subsystem with a simple enumeration
 - * number appended. The registered devices are not explicitely named;
 + * number appended. The registered devices are not explicitly named;
   * only 'id' in the device needs to be set.
   *
   * Do not use this interface for anything new, it exists for compatibility
index 0058fd74063eae6a2b6310058dd601ed4f38de3b,ea92b827e787e3543a745b119822085618c16988..f7a81209beb38b92291e616b0171eb9d3f94fa62
@@@ -324,35 -324,6 +324,6 @@@ void drm_helper_disable_unused_function
  }
  EXPORT_SYMBOL(drm_helper_disable_unused_functions);
  
- /**
-  * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
-  * @encoder: encoder to test
-  * @crtc: crtc to test
-  *
-  * Return false if @encoder can't be driven by @crtc, true otherwise.
-  */
- static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
-                               struct drm_crtc *crtc)
- {
-       struct drm_device *dev;
-       struct drm_crtc *tmp;
-       int crtc_mask = 1;
-       WARN(!crtc, "checking null crtc?\n");
-       dev = crtc->dev;
-       list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
-               if (tmp == crtc)
-                       break;
-               crtc_mask <<= 1;
-       }
-       if (encoder->possible_crtcs & crtc_mask)
-               return true;
-       return false;
- }
  /*
   * Check the CRTC we're going to map each output to vs. its current
   * CRTC.  If they don't match, we have to disable the output and the CRTC
@@@ -536,7 -507,7 +507,7 @@@ bool drm_crtc_helper_set_mode(struct dr
         * are later needed by vblank and swap-completion
         * timestamping. They are derived from true hwmode.
         */
-       drm_calc_timestamping_constants(crtc);
+       drm_calc_timestamping_constants(crtc, &crtc->hwmode);
  
        /* FIXME: add subpixel order */
  done:
@@@ -593,7 -564,7 +564,7 @@@ drm_crtc_helper_disable(struct drm_crt
   * Caller must hold mode config lock.
   *
   * Setup a new configuration, provided by the upper layers (either an ioctl call
 - * from userspace or internally e.g. from the fbdev suppport code) in @set, and
 + * from userspace or internally e.g. from the fbdev support code) in @set, and
   * enable it. This is the main helper functions for drivers that implement
   * kernel mode setting with the crtc helper functions and the assorted
   * ->prepare(), ->modeset() and ->commit() helper callbacks.
diff --combined drivers/regulator/core.c
index d59aa96a4dc48e4e6109a9d1fcdcac9bad82a0eb,16a309e5c024ed45b4276d13d1f8e14c2c0a131c..8bd5cbf3aafbc172aa94e8655b0fd4d1812e6945
@@@ -1272,6 -1272,8 +1272,8 @@@ static struct regulator_dev *regulator_
                                if (r->dev.parent &&
                                        node == r->dev.of_node)
                                        return r;
+                       *ret = -EPROBE_DEFER;
+                       return NULL;
                } else {
                        /*
                         * If we couldn't even get the node then it's
@@@ -1312,7 -1314,7 +1314,7 @@@ static struct regulator *_regulator_get
        struct regulator_dev *rdev;
        struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
        const char *devname = NULL;
-       int ret = -EPROBE_DEFER;
+       int ret;
  
        if (id == NULL) {
                pr_err("get() with no identifier\n");
        if (dev)
                devname = dev_name(dev);
  
+       if (have_full_constraints())
+               ret = -ENODEV;
+       else
+               ret = -EPROBE_DEFER;
        mutex_lock(&regulator_list_mutex);
  
        rdev = regulator_dev_lookup(dev, id, &ret);
         * If we have return value from dev_lookup fail, we do not expect to
         * succeed, so, quit with appropriate error value
         */
-       if (ret && ret != -ENODEV) {
+       if (ret && ret != -ENODEV)
                goto out;
-       }
  
        if (!devname)
                devname = "deviceless";
  
                rdev = dummy_regulator_rdev;
                goto found;
-       } else {
+       /* Don't log an error when called from regulator_get_optional() */
+       } else if (!have_full_constraints() || exclusive) {
                dev_err(dev, "dummy supplies not allowed\n");
        }
  
@@@ -2134,7 -2141,7 +2141,7 @@@ EXPORT_SYMBOL_GPL(regulator_is_enabled)
   * @regulator: regulator source
   *
   * Returns positive if the regulator driver backing the source/client
 - * can change its voltage, false otherwise. Usefull for detecting fixed
 + * can change its voltage, false otherwise. Useful for detecting fixed
   * or dummy regulators and disabling voltage change logic in the client
   * driver.
   */
@@@ -2244,7 -2251,7 +2251,7 @@@ int regulator_is_supported_voltage(stru
        if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
                ret = regulator_get_voltage(regulator);
                if (ret >= 0)
-                       return (min_uV <= ret && ret <= max_uV);
+                       return min_uV <= ret && ret <= max_uV;
                else
                        return ret;
        }
@@@ -2416,7 -2423,7 +2423,7 @@@ int regulator_set_voltage(struct regula
        ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
        if (ret < 0)
                goto out;
-       
        /* restore original values in case of error */
        old_min_uV = regulator->min_uV;
        old_max_uV = regulator->max_uV;
        ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
        if (ret < 0)
                goto out2;
-       
  out:
        mutex_unlock(&rdev->mutex);
        return ret;
@@@ -3835,9 -3842,8 +3842,8 @@@ static int __init regulator_init_comple
                         * goes wrong. */
                        rdev_info(rdev, "disabling\n");
                        ret = ops->disable(rdev);
-                       if (ret != 0) {
+                       if (ret != 0)
                                rdev_err(rdev, "couldn't disable: %d\n", ret);
-                       }
                } else {
                        /* The intention is that in future we will
                         * assume that full constraints are provided
index de5b4d9bb022b2ee1047de4cb2a20a894e8fc4be,fd8ffe6bcfdd98466a42f8cda23770dc248cf72b..0102a2d70dd85912db42f2beb1808cd6297f6bab
@@@ -305,20 -305,71 +305,71 @@@ show_##type##_##name(struct device *dev
        iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param)       \
  static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL);
  
- /* generic read only ipvi4 attribute */
+ #define iscsi_iface_attr(type, name, param)                           \
+       iscsi_iface_attr_show(type, name, ISCSI_IFACE_PARAM, param)     \
+ static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL);
+ /* generic read only ipv4 attribute */
  iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR);
  iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW);
  iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET);
  iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO);
+ iscsi_iface_net_attr(ipv4_iface, dhcp_dns_address_en,
+                    ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN);
+ iscsi_iface_net_attr(ipv4_iface, dhcp_slp_da_info_en,
+                    ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN);
+ iscsi_iface_net_attr(ipv4_iface, tos_en, ISCSI_NET_PARAM_IPV4_TOS_EN);
+ iscsi_iface_net_attr(ipv4_iface, tos, ISCSI_NET_PARAM_IPV4_TOS);
+ iscsi_iface_net_attr(ipv4_iface, grat_arp_en,
+                    ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN);
+ iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id_en,
+                    ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN);
+ iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id,
+                    ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID);
+ iscsi_iface_net_attr(ipv4_iface, dhcp_req_vendor_id_en,
+                    ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN);
+ iscsi_iface_net_attr(ipv4_iface, dhcp_use_vendor_id_en,
+                    ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN);
+ iscsi_iface_net_attr(ipv4_iface, dhcp_vendor_id,
+                    ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID);
+ iscsi_iface_net_attr(ipv4_iface, dhcp_learn_iqn_en,
+                    ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN);
+ iscsi_iface_net_attr(ipv4_iface, fragment_disable,
+                    ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE);
+ iscsi_iface_net_attr(ipv4_iface, incoming_forwarding_en,
+                    ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN);
+ iscsi_iface_net_attr(ipv4_iface, ttl, ISCSI_NET_PARAM_IPV4_TTL);
  
  /* generic read only ipv6 attribute */
  iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR);
- iscsi_iface_net_attr(ipv6_iface, link_local_addr, ISCSI_NET_PARAM_IPV6_LINKLOCAL);
+ iscsi_iface_net_attr(ipv6_iface, link_local_addr,
+                    ISCSI_NET_PARAM_IPV6_LINKLOCAL);
  iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER);
  iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg,
                     ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG);
  iscsi_iface_net_attr(ipv6_iface, link_local_autocfg,
                     ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG);
+ iscsi_iface_net_attr(ipv6_iface, link_local_state,
+                    ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE);
+ iscsi_iface_net_attr(ipv6_iface, router_state,
+                    ISCSI_NET_PARAM_IPV6_ROUTER_STATE);
+ iscsi_iface_net_attr(ipv6_iface, grat_neighbor_adv_en,
+                    ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN);
+ iscsi_iface_net_attr(ipv6_iface, mld_en, ISCSI_NET_PARAM_IPV6_MLD_EN);
+ iscsi_iface_net_attr(ipv6_iface, flow_label, ISCSI_NET_PARAM_IPV6_FLOW_LABEL);
+ iscsi_iface_net_attr(ipv6_iface, traffic_class,
+                    ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS);
+ iscsi_iface_net_attr(ipv6_iface, hop_limit, ISCSI_NET_PARAM_IPV6_HOP_LIMIT);
+ iscsi_iface_net_attr(ipv6_iface, nd_reachable_tmo,
+                    ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO);
+ iscsi_iface_net_attr(ipv6_iface, nd_rexmit_time,
+                    ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME);
+ iscsi_iface_net_attr(ipv6_iface, nd_stale_tmo,
+                    ISCSI_NET_PARAM_IPV6_ND_STALE_TMO);
+ iscsi_iface_net_attr(ipv6_iface, dup_addr_detect_cnt,
+                    ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT);
+ iscsi_iface_net_attr(ipv6_iface, router_adv_link_mtu,
+                    ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU);
  
  /* common read only iface attribute */
  iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE);
@@@ -327,6 -378,40 +378,40 @@@ iscsi_iface_net_attr(iface, vlan_priori
  iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED);
  iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);
  iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT);
+ iscsi_iface_net_attr(iface, ipaddress_state, ISCSI_NET_PARAM_IPADDR_STATE);
+ iscsi_iface_net_attr(iface, delayed_ack_en, ISCSI_NET_PARAM_DELAYED_ACK_EN);
+ iscsi_iface_net_attr(iface, tcp_nagle_disable,
+                    ISCSI_NET_PARAM_TCP_NAGLE_DISABLE);
+ iscsi_iface_net_attr(iface, tcp_wsf_disable, ISCSI_NET_PARAM_TCP_WSF_DISABLE);
+ iscsi_iface_net_attr(iface, tcp_wsf, ISCSI_NET_PARAM_TCP_WSF);
+ iscsi_iface_net_attr(iface, tcp_timer_scale, ISCSI_NET_PARAM_TCP_TIMER_SCALE);
+ iscsi_iface_net_attr(iface, tcp_timestamp_en, ISCSI_NET_PARAM_TCP_TIMESTAMP_EN);
+ iscsi_iface_net_attr(iface, cache_id, ISCSI_NET_PARAM_CACHE_ID);
+ iscsi_iface_net_attr(iface, redirect_en, ISCSI_NET_PARAM_REDIRECT_EN);
+ /* common iscsi specific settings attributes */
+ iscsi_iface_attr(iface, def_taskmgmt_tmo, ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO);
+ iscsi_iface_attr(iface, header_digest, ISCSI_IFACE_PARAM_HDRDGST_EN);
+ iscsi_iface_attr(iface, data_digest, ISCSI_IFACE_PARAM_DATADGST_EN);
+ iscsi_iface_attr(iface, immediate_data, ISCSI_IFACE_PARAM_IMM_DATA_EN);
+ iscsi_iface_attr(iface, initial_r2t, ISCSI_IFACE_PARAM_INITIAL_R2T_EN);
+ iscsi_iface_attr(iface, data_seq_in_order,
+                ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN);
+ iscsi_iface_attr(iface, data_pdu_in_order, ISCSI_IFACE_PARAM_PDU_INORDER_EN);
+ iscsi_iface_attr(iface, erl, ISCSI_IFACE_PARAM_ERL);
+ iscsi_iface_attr(iface, max_recv_dlength, ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH);
+ iscsi_iface_attr(iface, first_burst_len, ISCSI_IFACE_PARAM_FIRST_BURST);
+ iscsi_iface_attr(iface, max_outstanding_r2t, ISCSI_IFACE_PARAM_MAX_R2T);
+ iscsi_iface_attr(iface, max_burst_len, ISCSI_IFACE_PARAM_MAX_BURST);
+ iscsi_iface_attr(iface, chap_auth, ISCSI_IFACE_PARAM_CHAP_AUTH_EN);
+ iscsi_iface_attr(iface, bidi_chap, ISCSI_IFACE_PARAM_BIDI_CHAP_EN);
+ iscsi_iface_attr(iface, discovery_auth_optional,
+                ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL);
+ iscsi_iface_attr(iface, discovery_logout,
+                ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN);
+ iscsi_iface_attr(iface, strict_login_comp_en,
+                ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN);
+ iscsi_iface_attr(iface, initiator_name, ISCSI_IFACE_PARAM_INITIATOR_NAME);
  
  static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
                                          struct attribute *attr, int i)
        struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
        struct iscsi_transport *t = iface->transport;
        int param;
+       int param_type;
  
        if (attr == &dev_attr_iface_enabled.attr)
                param = ISCSI_NET_PARAM_IFACE_ENABLE;
                param = ISCSI_NET_PARAM_MTU;
        else if (attr == &dev_attr_iface_port.attr)
                param = ISCSI_NET_PARAM_PORT;
+       else if (attr == &dev_attr_iface_ipaddress_state.attr)
+               param = ISCSI_NET_PARAM_IPADDR_STATE;
+       else if (attr == &dev_attr_iface_delayed_ack_en.attr)
+               param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
+       else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
+               param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
+       else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
+               param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
+       else if (attr == &dev_attr_iface_tcp_wsf.attr)
+               param = ISCSI_NET_PARAM_TCP_WSF;
+       else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
+               param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
+       else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
+               param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
+       else if (attr == &dev_attr_iface_cache_id.attr)
+               param = ISCSI_NET_PARAM_CACHE_ID;
+       else if (attr == &dev_attr_iface_redirect_en.attr)
+               param = ISCSI_NET_PARAM_REDIRECT_EN;
+       else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
+               param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
+       else if (attr == &dev_attr_iface_header_digest.attr)
+               param = ISCSI_IFACE_PARAM_HDRDGST_EN;
+       else if (attr == &dev_attr_iface_data_digest.attr)
+               param = ISCSI_IFACE_PARAM_DATADGST_EN;
+       else if (attr == &dev_attr_iface_immediate_data.attr)
+               param = ISCSI_IFACE_PARAM_IMM_DATA_EN;
+       else if (attr == &dev_attr_iface_initial_r2t.attr)
+               param = ISCSI_IFACE_PARAM_INITIAL_R2T_EN;
+       else if (attr == &dev_attr_iface_data_seq_in_order.attr)
+               param = ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN;
+       else if (attr == &dev_attr_iface_data_pdu_in_order.attr)
+               param = ISCSI_IFACE_PARAM_PDU_INORDER_EN;
+       else if (attr == &dev_attr_iface_erl.attr)
+               param = ISCSI_IFACE_PARAM_ERL;
+       else if (attr == &dev_attr_iface_max_recv_dlength.attr)
+               param = ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH;
+       else if (attr == &dev_attr_iface_first_burst_len.attr)
+               param = ISCSI_IFACE_PARAM_FIRST_BURST;
+       else if (attr == &dev_attr_iface_max_outstanding_r2t.attr)
+               param = ISCSI_IFACE_PARAM_MAX_R2T;
+       else if (attr == &dev_attr_iface_max_burst_len.attr)
+               param = ISCSI_IFACE_PARAM_MAX_BURST;
+       else if (attr == &dev_attr_iface_chap_auth.attr)
+               param = ISCSI_IFACE_PARAM_CHAP_AUTH_EN;
+       else if (attr == &dev_attr_iface_bidi_chap.attr)
+               param = ISCSI_IFACE_PARAM_BIDI_CHAP_EN;
+       else if (attr == &dev_attr_iface_discovery_auth_optional.attr)
+               param = ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL;
+       else if (attr == &dev_attr_iface_discovery_logout.attr)
+               param = ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN;
+       else if (attr == &dev_attr_iface_strict_login_comp_en.attr)
+               param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
+       else if (attr == &dev_attr_iface_initiator_name.attr)
+               param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
        else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
                if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
                        param = ISCSI_NET_PARAM_IPV4_ADDR;
                        param = ISCSI_NET_PARAM_IPV4_SUBNET;
                else if (attr == &dev_attr_ipv4_iface_bootproto.attr)
                        param = ISCSI_NET_PARAM_IPV4_BOOTPROTO;
+               else if (attr ==
+                        &dev_attr_ipv4_iface_dhcp_dns_address_en.attr)
+                       param = ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN;
+               else if (attr ==
+                        &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr)
+                       param = ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN;
+               else if (attr == &dev_attr_ipv4_iface_tos_en.attr)
+                       param = ISCSI_NET_PARAM_IPV4_TOS_EN;
+               else if (attr == &dev_attr_ipv4_iface_tos.attr)
+                       param = ISCSI_NET_PARAM_IPV4_TOS;
+               else if (attr == &dev_attr_ipv4_iface_grat_arp_en.attr)
+                       param = ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN;
+               else if (attr ==
+                        &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr)
+                       param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN;
+               else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id.attr)
+                       param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID;
+               else if (attr ==
+                        &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr)
+                       param = ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN;
+               else if (attr ==
+                        &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr)
+                       param = ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN;
+               else if (attr == &dev_attr_ipv4_iface_dhcp_vendor_id.attr)
+                       param = ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID;
+               else if (attr ==
+                        &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr)
+                       param = ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN;
+               else if (attr ==
+                        &dev_attr_ipv4_iface_fragment_disable.attr)
+                       param = ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE;
+               else if (attr ==
+                        &dev_attr_ipv4_iface_incoming_forwarding_en.attr)
+                       param = ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN;
+               else if (attr == &dev_attr_ipv4_iface_ttl.attr)
+                       param = ISCSI_NET_PARAM_IPV4_TTL;
                else
                        return 0;
        } else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) {
                        param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG;
                else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr)
                        param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG;
+               else if (attr == &dev_attr_ipv6_iface_link_local_state.attr)
+                       param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE;
+               else if (attr == &dev_attr_ipv6_iface_router_state.attr)
+                       param = ISCSI_NET_PARAM_IPV6_ROUTER_STATE;
+               else if (attr ==
+                        &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr)
+                       param = ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN;
+               else if (attr == &dev_attr_ipv6_iface_mld_en.attr)
+                       param = ISCSI_NET_PARAM_IPV6_MLD_EN;
+               else if (attr == &dev_attr_ipv6_iface_flow_label.attr)
+                       param = ISCSI_NET_PARAM_IPV6_FLOW_LABEL;
+               else if (attr == &dev_attr_ipv6_iface_traffic_class.attr)
+                       param = ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS;
+               else if (attr == &dev_attr_ipv6_iface_hop_limit.attr)
+                       param = ISCSI_NET_PARAM_IPV6_HOP_LIMIT;
+               else if (attr == &dev_attr_ipv6_iface_nd_reachable_tmo.attr)
+                       param = ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO;
+               else if (attr == &dev_attr_ipv6_iface_nd_rexmit_time.attr)
+                       param = ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME;
+               else if (attr == &dev_attr_ipv6_iface_nd_stale_tmo.attr)
+                       param = ISCSI_NET_PARAM_IPV6_ND_STALE_TMO;
+               else if (attr == &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr)
+                       param = ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT;
+               else if (attr == &dev_attr_ipv6_iface_router_adv_link_mtu.attr)
+                       param = ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU;
                else
                        return 0;
        } else {
                return 0;
        }
  
-       return t->attr_is_visible(ISCSI_NET_PARAM, param);
+       switch (param) {
+       case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+       case ISCSI_IFACE_PARAM_HDRDGST_EN:
+       case ISCSI_IFACE_PARAM_DATADGST_EN:
+       case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+       case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+       case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+       case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+       case ISCSI_IFACE_PARAM_ERL:
+       case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+       case ISCSI_IFACE_PARAM_FIRST_BURST:
+       case ISCSI_IFACE_PARAM_MAX_R2T:
+       case ISCSI_IFACE_PARAM_MAX_BURST:
+       case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+       case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+       case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+       case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+       case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+       case ISCSI_IFACE_PARAM_INITIATOR_NAME:
+               param_type = ISCSI_IFACE_PARAM;
+               break;
+       default:
+               param_type = ISCSI_NET_PARAM;
+       }
+       return t->attr_is_visible(param_type, param);
  }
  
  static struct attribute *iscsi_iface_attrs[] = {
        &dev_attr_ipv6_iface_link_local_autocfg.attr,
        &dev_attr_iface_mtu.attr,
        &dev_attr_iface_port.attr,
+       &dev_attr_iface_ipaddress_state.attr,
+       &dev_attr_iface_delayed_ack_en.attr,
+       &dev_attr_iface_tcp_nagle_disable.attr,
+       &dev_attr_iface_tcp_wsf_disable.attr,
+       &dev_attr_iface_tcp_wsf.attr,
+       &dev_attr_iface_tcp_timer_scale.attr,
+       &dev_attr_iface_tcp_timestamp_en.attr,
+       &dev_attr_iface_cache_id.attr,
+       &dev_attr_iface_redirect_en.attr,
+       &dev_attr_iface_def_taskmgmt_tmo.attr,
+       &dev_attr_iface_header_digest.attr,
+       &dev_attr_iface_data_digest.attr,
+       &dev_attr_iface_immediate_data.attr,
+       &dev_attr_iface_initial_r2t.attr,
+       &dev_attr_iface_data_seq_in_order.attr,
+       &dev_attr_iface_data_pdu_in_order.attr,
+       &dev_attr_iface_erl.attr,
+       &dev_attr_iface_max_recv_dlength.attr,
+       &dev_attr_iface_first_burst_len.attr,
+       &dev_attr_iface_max_outstanding_r2t.attr,
+       &dev_attr_iface_max_burst_len.attr,
+       &dev_attr_iface_chap_auth.attr,
+       &dev_attr_iface_bidi_chap.attr,
+       &dev_attr_iface_discovery_auth_optional.attr,
+       &dev_attr_iface_discovery_logout.attr,
+       &dev_attr_iface_strict_login_comp_en.attr,
+       &dev_attr_iface_initiator_name.attr,
+       &dev_attr_ipv4_iface_dhcp_dns_address_en.attr,
+       &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr,
+       &dev_attr_ipv4_iface_tos_en.attr,
+       &dev_attr_ipv4_iface_tos.attr,
+       &dev_attr_ipv4_iface_grat_arp_en.attr,
+       &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr,
+       &dev_attr_ipv4_iface_dhcp_alt_client_id.attr,
+       &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr,
+       &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr,
+       &dev_attr_ipv4_iface_dhcp_vendor_id.attr,
+       &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr,
+       &dev_attr_ipv4_iface_fragment_disable.attr,
+       &dev_attr_ipv4_iface_incoming_forwarding_en.attr,
+       &dev_attr_ipv4_iface_ttl.attr,
+       &dev_attr_ipv6_iface_link_local_state.attr,
+       &dev_attr_ipv6_iface_router_state.attr,
+       &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr,
+       &dev_attr_ipv6_iface_mld_en.attr,
+       &dev_attr_ipv6_iface_flow_label.attr,
+       &dev_attr_ipv6_iface_traffic_class.attr,
+       &dev_attr_ipv6_iface_hop_limit.attr,
+       &dev_attr_ipv6_iface_nd_reachable_tmo.attr,
+       &dev_attr_ipv6_iface_nd_rexmit_time.attr,
+       &dev_attr_ipv6_iface_nd_stale_tmo.attr,
+       &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr,
+       &dev_attr_ipv6_iface_router_adv_link_mtu.attr,
        NULL,
  };
  
@@@ -404,6 -683,61 +683,61 @@@ static struct attribute_group iscsi_ifa
        .is_visible = iscsi_iface_attr_is_visible,
  };
  
+ /* convert iscsi_ipaddress_state values to ascii string name */
+ static const struct {
+       enum iscsi_ipaddress_state      value;
+       char                            *name;
+ } iscsi_ipaddress_state_names[] = {
+       {ISCSI_IPDDRESS_STATE_UNCONFIGURED,     "Unconfigured" },
+       {ISCSI_IPDDRESS_STATE_ACQUIRING,        "Acquiring" },
+       {ISCSI_IPDDRESS_STATE_TENTATIVE,        "Tentative" },
+       {ISCSI_IPDDRESS_STATE_VALID,            "Valid" },
+       {ISCSI_IPDDRESS_STATE_DISABLING,        "Disabling" },
+       {ISCSI_IPDDRESS_STATE_INVALID,          "Invalid" },
+       {ISCSI_IPDDRESS_STATE_DEPRECATED,       "Deprecated" },
+ };
+ char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state port_state)
+ {
+       int i;
+       char *state = NULL;
+       for (i = 0; i < ARRAY_SIZE(iscsi_ipaddress_state_names); i++) {
+               if (iscsi_ipaddress_state_names[i].value == port_state) {
+                       state = iscsi_ipaddress_state_names[i].name;
+                       break;
+               }
+       }
+       return state;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_get_ipaddress_state_name);
+ /* convert iscsi_router_state values to ascii string name */
+ static const struct {
+       enum iscsi_router_state value;
+       char                    *name;
+ } iscsi_router_state_names[] = {
+       {ISCSI_ROUTER_STATE_UNKNOWN,            "Unknown" },
+       {ISCSI_ROUTER_STATE_ADVERTISED,         "Advertised" },
+       {ISCSI_ROUTER_STATE_MANUAL,             "Manual" },
+       {ISCSI_ROUTER_STATE_STALE,              "Stale" },
+ };
+ char *iscsi_get_router_state_name(enum iscsi_router_state router_state)
+ {
+       int i;
+       char *state = NULL;
+       for (i = 0; i < ARRAY_SIZE(iscsi_router_state_names); i++) {
+               if (iscsi_router_state_names[i].value == router_state) {
+                       state = iscsi_router_state_names[i].name;
+                       break;
+               }
+       }
+       return state;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_get_router_state_name);
  struct iscsi_iface *
  iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport,
                   uint32_t iface_type, uint32_t iface_num, int dd_size)
@@@ -891,7 -1225,7 +1225,7 @@@ struct bus_type iscsi_flashnode_bus = 
   * Adds a sysfs entry for the flashnode session attributes
   *
   * Returns:
 - *  pointer to allocated flashnode sess on sucess
 + *  pointer to allocated flashnode sess on success
   *  %NULL on failure
   */
  struct iscsi_bus_flash_session *
@@@ -1089,7 -1423,7 +1423,7 @@@ static int iscsi_iter_destroy_flashnode
  }
  
  /**
 - * iscsi_destroy_flashnode_sess - destory flashnode session entry
 + * iscsi_destroy_flashnode_sess - destroy flashnode session entry
   * @fnode_sess: pointer to flashnode session entry to be destroyed
   *
   * Deletes the flashnode session entry and all children flashnode connection
@@@ -1119,7 -1453,7 +1453,7 @@@ static int iscsi_iter_destroy_flashnode
  }
  
  /**
 - * iscsi_destroy_all_flashnode - destory all flashnode session entries
 + * iscsi_destroy_all_flashnode - destroy all flashnode session entries
   * @shost: pointer to host data
   *
   * Destroys all the flashnode session entries and all corresponding children
@@@ -3081,6 -3415,73 +3415,73 @@@ exit_logout_sid
        return err;
  }
  
+ static int
+ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+ {
+       struct iscsi_uevent *ev = nlmsg_data(nlh);
+       struct Scsi_Host *shost = NULL;
+       struct iscsi_internal *priv;
+       struct sk_buff *skbhost_stats;
+       struct nlmsghdr *nlhhost_stats;
+       struct iscsi_uevent *evhost_stats;
+       int host_stats_size = 0;
+       int len, err = 0;
+       char *buf;
+       if (!transport->get_host_stats)
+               return -EINVAL;
+       priv = iscsi_if_transport_lookup(transport);
+       if (!priv)
+               return -EINVAL;
+       host_stats_size = sizeof(struct iscsi_offload_host_stats);
+       len = nlmsg_total_size(sizeof(*ev) + host_stats_size);
+       shost = scsi_host_lookup(ev->u.get_host_stats.host_no);
+       if (!shost) {
+               pr_err("%s: failed. Cound not find host no %u\n",
+                      __func__, ev->u.get_host_stats.host_no);
+               return -ENODEV;
+       }
+       do {
+               int actual_size;
+               skbhost_stats = alloc_skb(len, GFP_KERNEL);
+               if (!skbhost_stats) {
+                       pr_err("cannot deliver host stats: OOM\n");
+                       err = -ENOMEM;
+                       goto exit_host_stats;
+               }
+               nlhhost_stats = __nlmsg_put(skbhost_stats, 0, 0, 0,
+                                     (len - sizeof(*nlhhost_stats)), 0);
+               evhost_stats = nlmsg_data(nlhhost_stats);
+               memset(evhost_stats, 0, sizeof(*evhost_stats));
+               evhost_stats->transport_handle = iscsi_handle(transport);
+               evhost_stats->type = nlh->nlmsg_type;
+               evhost_stats->u.get_host_stats.host_no =
+                                       ev->u.get_host_stats.host_no;
+               buf = (char *)((char *)evhost_stats + sizeof(*evhost_stats));
+               memset(buf, 0, host_stats_size);
+               err = transport->get_host_stats(shost, buf, host_stats_size);
+               actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size);
+               skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size));
+               nlhhost_stats->nlmsg_len = actual_size;
+               err = iscsi_multicast_skb(skbhost_stats, ISCSI_NL_GRP_ISCSID,
+                                         GFP_KERNEL);
+       } while (err < 0 && err != -ECONNREFUSED);
+ exit_host_stats:
+       scsi_host_put(shost);
+       return err;
+ }
  static int
  iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
  {
                err = iscsi_set_chap(transport, ev,
                                     nlmsg_attrlen(nlh, sizeof(*ev)));
                break;
+       case ISCSI_UEVENT_GET_HOST_STATS:
+               err = iscsi_get_host_stats(transport, nlh);
+               break;
        default:
                err = -ENOSYS;
                break;
@@@ -3368,6 -3772,7 +3772,7 @@@ iscsi_conn_attr(ipv6_flow_label, ISCSI_
  iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6);
  iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);
  iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF);
+ iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
  
  
  #define iscsi_conn_ep_attr_show(param)                                        \
@@@ -3437,6 -3842,7 +3842,7 @@@ static struct attribute *iscsi_conn_att
        &dev_attr_conn_is_fw_assigned_ipv6.attr,
        &dev_attr_conn_tcp_xmit_wsf.attr,
        &dev_attr_conn_tcp_recv_wsf.attr,
+       &dev_attr_conn_local_ipaddr.attr,
        NULL,
  };
  
@@@ -3506,6 -3912,8 +3912,8 @@@ static umode_t iscsi_conn_attr_is_visib
                param = ISCSI_PARAM_TCP_XMIT_WSF;
        else if (attr == &dev_attr_conn_tcp_recv_wsf.attr)
                param = ISCSI_PARAM_TCP_RECV_WSF;
+       else if (attr == &dev_attr_conn_local_ipaddr.attr)
+               param = ISCSI_PARAM_LOCAL_IPADDR;
        else {
                WARN_ONCE(1, "Invalid conn attr");
                return 0;
index 22cf17dcb7da6e4b17ff3e40ab487409e2ae75ec,b91c758883bf415162fcb9e67da5a5099ee69a58..b6b869261f32315a7061d7e99c48eabcdcc5065e
@@@ -2,6 -2,7 +2,6 @@@ menu "Android
  
  config ANDROID
        bool "Android Drivers"
 -      default N
        ---help---
          Enable support for various drivers needed on the Android platform
  
@@@ -59,6 -60,7 +59,6 @@@ config ANDROID_TIMED_GPI
  
  config ANDROID_LOW_MEMORY_KILLER
        bool "Android Low Memory Killer"
 -      default N
        ---help---
          Registers processes to be killed when memory is low
  
@@@ -98,6 -100,8 +98,8 @@@ config SW_SYNC_USE
          *WARNING* improper use of this can result in deadlocking kernel
          drivers from userspace.
  
+ source "drivers/staging/android/ion/Kconfig"
  endif # if ANDROID
  
  endmenu
index 874d1a406ebc3a7e06d2f8ad10448c6786d8265c,f829a1aad1c383e71d06ca26914ecff715fa598d..5239e51630749aa49b40ef877e211c19a3b503a0
@@@ -6,7 -6,6 +6,6 @@@
  #include <linux/usb.h>
  #include <linux/module.h>
  #include <linux/slab.h>
- #include <linux/init.h>
  #include <linux/mm.h>
  #include <linux/timer.h>
  #include <linux/ctype.h>
@@@ -179,7 -178,7 +178,7 @@@ EXPORT_SYMBOL_GPL(usb_control_msg)
   *
   * Return:
   * If successful, 0. Otherwise a negative error number. The number of actual
 - * bytes transferred will be stored in the @actual_length paramater.
 + * bytes transferred will be stored in the @actual_length parameter.
   */
  int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
                      void *data, int len, int *actual_length, int timeout)
@@@ -218,7 -217,7 +217,7 @@@ EXPORT_SYMBOL_GPL(usb_interrupt_msg)
   *
   * Return:
   * If successful, 0. Otherwise a negative error number. The number of actual
-  * bytes transferred will be stored in the @actual_length paramater.
+  * bytes transferred will be stored in the @actual_length parameter.
   *
   */
  int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
@@@ -518,7 -517,7 +517,7 @@@ void usb_sg_wait(struct usb_sg_request 
                io->urbs[i]->dev = io->dev;
                retval = usb_submit_urb(io->urbs[i], GFP_ATOMIC);
  
-               /* after we submit, let completions or cancelations fire;
+               /* after we submit, let completions or cancellations fire;
                 * we handshake using io->status.
                 */
                spin_unlock_irq(&io->lock);
diff --combined drivers/usb/core/urb.c
index e726f5e804480bf62c52a04c051fd71cb4fa4708,9ff665f1322feb835bfc35e2034107da90d54dcd..991386ceb4ecdfdb3b2637e6e61fe8ced8b27800
@@@ -2,7 -2,6 +2,6 @@@
  #include <linux/string.h>
  #include <linux/bitops.h>
  #include <linux/slab.h>
- #include <linux/init.h>
  #include <linux/log2.h>
  #include <linux/usb.h>
  #include <linux/wait.h>
@@@ -53,7 -52,7 +52,7 @@@ EXPORT_SYMBOL_GPL(usb_init_urb)
   *    valid options for this.
   *
   * Creates an urb for the USB driver to use, initializes a few internal
-  * structures, incrementes the usage counter, and returns a pointer to it.
+  * structures, increments the usage counter, and returns a pointer to it.
   *
   * If the driver want to use this urb for interrupt, control, or bulk
   * endpoints, pass '0' as the number of iso packets.
@@@ -281,7 -280,7 +280,7 @@@ EXPORT_SYMBOL_GPL(usb_unanchor_urb)
   *
   * Device drivers must explicitly request that repetition, by ensuring that
   * some URB is always on the endpoint's queue (except possibly for short
-  * periods during completion callacks).  When there is no longer an urb
+  * periods during completion callbacks).  When there is no longer an urb
   * queued, the endpoint's bandwidth reservation is canceled.  This means
   * drivers can use their completion handlers to ensure they keep bandwidth
   * they need, by reinitializing and resubmitting the just-completed urb
   */
  int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
  {
+       static int                      pipetypes[4] = {
+               PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
+       };
        int                             xfertype, max;
        struct usb_device               *dev;
        struct usb_host_endpoint        *ep;
        int                             is_out;
+       unsigned int                    allowed;
  
        if (!urb || !urb->complete)
                return -EINVAL;
        if (urb->transfer_buffer_length > INT_MAX)
                return -EMSGSIZE;
  
- #ifdef DEBUG
-       /* stuff that drivers shouldn't do, but which shouldn't
+       /*
+        * stuff that drivers shouldn't do, but which shouldn't
         * cause problems in HCDs if they get it wrong.
         */
-       {
-       unsigned int    allowed;
-       static int pipetypes[4] = {
-               PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
-       };
  
        /* Check that the pipe's type matches the endpoint's type */
        if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
        if (allowed != urb->transfer_flags)
                dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
                        urb->transfer_flags, allowed);
-       }
- #endif
        /*
         * Force periodic transfer intervals to be legal values that are
         * a power of two (so HCDs don't need to).
                /* too small? */
                switch (dev->speed) {
                case USB_SPEED_WIRELESS:
-                       if (urb->interval < 6)
+                       if ((urb->interval < 6)
+                               && (xfertype == USB_ENDPOINT_XFER_INT))
                                return -EINVAL;
-                       break;
                default:
                        if (urb->interval <= 0)
                                return -EINVAL;
@@@ -834,7 -831,7 +831,7 @@@ EXPORT_SYMBOL_GPL(usb_unpoison_anchored
   *
   * this allows all outstanding URBs to be unlinked starting
   * from the back of the queue. This function is asynchronous.
 - * The unlinking is just tiggered. It may happen after this
 + * The unlinking is just triggered. It may happen after this
   * function has returned.
   *
   * This routine should not be called by a driver after its disconnect
diff --combined fs/buffer.c
index a20f2eb107ed2b9345e6829b7feb0241fc2570a1,27265a8b43c1661f85d02b6fb931e8311ea7fe02..8c53a2b15ecbaffcc19ab5f45b53c19174998c91
@@@ -654,14 -654,16 +654,16 @@@ EXPORT_SYMBOL(mark_buffer_dirty_inode)
  static void __set_page_dirty(struct page *page,
                struct address_space *mapping, int warn)
  {
-       spin_lock_irq(&mapping->tree_lock);
+       unsigned long flags;
+       spin_lock_irqsave(&mapping->tree_lock, flags);
        if (page->mapping) {    /* Race with truncate? */
                WARN_ON_ONCE(warn && !PageUptodate(page));
                account_page_dirtied(page, mapping);
                radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
        }
-       spin_unlock_irq(&mapping->tree_lock);
+       spin_unlock_irqrestore(&mapping->tree_lock, flags);
        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
  }
  
@@@ -1312,7 -1314,7 +1314,7 @@@ static void bh_lru_install(struct buffe
                }
                while (out < BH_LRU_SIZE)
                        bhs[out++] = NULL;
-               memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
+               memcpy(this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
        }
        bh_lru_unlock();
  
@@@ -2982,11 -2984,11 +2984,11 @@@ static void guard_bh_eod(int rw, struc
         * let it through, and the IO layer will turn it into
         * an EIO.
         */
-       if (unlikely(bio->bi_sector >= maxsector))
+       if (unlikely(bio->bi_iter.bi_sector >= maxsector))
                return;
  
-       maxsector -= bio->bi_sector;
-       bytes = bio->bi_size;
+       maxsector -= bio->bi_iter.bi_sector;
+       bytes = bio->bi_iter.bi_size;
        if (likely((bytes >> 9) <= maxsector))
                return;
  
        bytes = maxsector << 9;
  
        /* Truncate the bio.. */
-       bio->bi_size = bytes;
+       bio->bi_iter.bi_size = bytes;
        bio->bi_io_vec[0].bv_len = bytes;
  
        /* ..and clear the end of the buffer for reads */
@@@ -3029,14 -3031,14 +3031,14 @@@ int _submit_bh(int rw, struct buffer_he
         */
        bio = bio_alloc(GFP_NOIO, 1);
  
-       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
        bio->bi_io_vec[0].bv_page = bh->b_page;
        bio->bi_io_vec[0].bv_len = bh->b_size;
        bio->bi_io_vec[0].bv_offset = bh_offset(bh);
  
        bio->bi_vcnt = 1;
-       bio->bi_size = bh->b_size;
+       bio->bi_iter.bi_size = bh->b_size;
  
        bio->bi_end_io = end_bio_bh_io_sync;
        bio->bi_private = bh;
@@@ -3086,7 -3088,7 +3088,7 @@@ EXPORT_SYMBOL(submit_bh)
   * until the buffer gets unlocked).
   *
   * ll_rw_block sets b_end_io to simple completion handler that marks
 - * the buffer up-to-date (if approriate), unlocks the buffer and wakes
 + * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
   * any waiters. 
   *
   * All of the buffers must be for the same device, and must also be a
diff --combined include/linux/gpio.h
index c177c48f60a208132d514fb6558f46f4c5aa3716,b581b13d29d95aef275b1b878927f5d94e2572e4..85aa5d0b9357ae2b26d29e858d63329bfc6e9d7f
@@@ -3,7 -3,7 +3,7 @@@
  
  #include <linux/errno.h>
  
 -/* see Documentation/gpio.txt */
 +/* see Documentation/gpio/gpio-legacy.txt */
  
  /* make these flag values available regardless of GPIO kconfig options */
  #define GPIOF_DIR_OUT (0 << 0)
@@@ -90,7 -90,6 +90,6 @@@ void devm_gpio_free(struct device *dev
  
  #include <linux/kernel.h>
  #include <linux/types.h>
- #include <linux/errno.h>
  #include <linux/bug.h>
  #include <linux/pinctrl/pinctrl.h>
  
index 11982d0ce11bafa710bbb321b50cef4f5448f9fa,ab575269211375c3e4d82bb14c5602191ae37484..4d9389c79e61b4abe20666aa9f9c89d676a8c6b1
@@@ -35,7 -35,7 +35,7 @@@ struct pipe_buffer 
   *    @tmp_page: cached released page
   *    @readers: number of current readers of this pipe
   *    @writers: number of current writers of this pipe
 - *    @files: number of struct file refering this pipe (protected by ->i_lock)
 + *    @files: number of struct file referring this pipe (protected by ->i_lock)
   *    @waiting_writers: number of writers blocked waiting for room
   *    @r_counter: reader counter
   *    @w_counter: writer counter
@@@ -157,6 -157,8 +157,8 @@@ int generic_pipe_buf_confirm(struct pip
  int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
  void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
  
+ extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
  /* for F_SETPIPE_SZ and F_GETPIPE_SZ */
  long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
  struct pipe_inode_info *get_pipe_info(struct file *file);
diff --combined include/linux/skbuff.h
index cde842513df20f6e1e981a05259efa776f2b3dda,f589c9af8cbf1250da1945bac436f27d92987e80..fc71710fba452d3eaa552fa1d47e1d2309a9340e
  #include <linux/netdev_features.h>
  #include <net/flow_keys.h>
  
+ /* A. Checksumming of received packets by device.
+  *
+  * CHECKSUM_NONE:
+  *
+  *   Device failed to checksum this packet e.g. due to lack of capabilities.
+  *   The packet contains full (though not verified) checksum in packet but
+  *   not in skb->csum. Thus, skb->csum is undefined in this case.
+  *
+  * CHECKSUM_UNNECESSARY:
+  *
+  *   The hardware you're dealing with doesn't calculate the full checksum
+  *   (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
+  *   for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will
+  *   set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still
+  *   undefined in this case though. It is a bad option, but, unfortunately,
+  *   nowadays most vendors do this. Apparently with the secret goal to sell
+  *   you new devices, when you will add new protocol to your host, f.e. IPv6 8)
+  *
+  * CHECKSUM_COMPLETE:
+  *
+  *   This is the most generic way. The device supplied checksum of the _whole_
+  *   packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
+  *   hardware doesn't need to parse L3/L4 headers to implement this.
+  *
+  *   Note: Even if device supports only some protocols, but is able to produce
+  *   skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
+  *
+  * CHECKSUM_PARTIAL:
+  *
+  *   This is identical to the case for output below. This may occur on a packet
+  *   received directly from another Linux OS, e.g., a virtualized Linux kernel
+  *   on the same host. The packet can be treated in the same way as
+  *   CHECKSUM_UNNECESSARY, except that on output (i.e., forwarding) the
+  *   checksum must be filled in by the OS or the hardware.
+  *
+  * B. Checksumming on output.
+  *
+  * CHECKSUM_NONE:
+  *
+  *   The skb was already checksummed by the protocol, or a checksum is not
+  *   required.
+  *
+  * CHECKSUM_PARTIAL:
+  *
+  *   The device is required to checksum the packet as seen by hard_start_xmit()
+  *   from skb->csum_start up to the end, and to record/write the checksum at
+  *   offset skb->csum_start + skb->csum_offset.
+  *
+  *   The device must show its capabilities in dev->features, set up at device
+  *   setup time, e.g. netdev_features.h:
+  *
+  *    NETIF_F_HW_CSUM - It's a clever device, it's able to checksum everything.
+  *    NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over
+  *                      IPv4. Sigh. Vendors like this way for an unknown reason.
+  *                      Though, see comment above about CHECKSUM_UNNECESSARY. 8)
+  *    NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead.
+  *    NETIF_F_...     - Well, you get the picture.
+  *
+  * CHECKSUM_UNNECESSARY:
+  *
+  *   Normally, the device will do per protocol specific checksumming. Protocol
+  *   implementations that do not want the NIC to perform the checksum
+  *   calculation should use this flag in their outgoing skbs.
+  *
+  *    NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC
+  *                       offload. Correspondingly, the FCoE protocol driver
+  *                       stack should use CHECKSUM_UNNECESSARY.
+  *
+  * Any questions? No questions, good.         --ANK
+  */
  /* Don't change this without changing skb_csum_unnecessary! */
- #define CHECKSUM_NONE 0
- #define CHECKSUM_UNNECESSARY 1
- #define CHECKSUM_COMPLETE 2
- #define CHECKSUM_PARTIAL 3
+ #define CHECKSUM_NONE         0
+ #define CHECKSUM_UNNECESSARY  1
+ #define CHECKSUM_COMPLETE     2
+ #define CHECKSUM_PARTIAL      3
  
  #define SKB_DATA_ALIGN(X)     (((X) + (SMP_CACHE_BYTES - 1)) & \
                                 ~(SMP_CACHE_BYTES - 1))
                         SKB_DATA_ALIGN(sizeof(struct sk_buff)) +       \
                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  
- /* A. Checksumming of received packets by device.
-  *
-  *    NONE: device failed to checksum this packet.
-  *            skb->csum is undefined.
-  *
-  *    UNNECESSARY: device parsed packet and wouldbe verified checksum.
-  *            skb->csum is undefined.
-  *          It is bad option, but, unfortunately, many of vendors do this.
-  *          Apparently with secret goal to sell you new device, when you
-  *          will add new protocol to your host. F.e. IPv6. 8)
-  *
-  *    COMPLETE: the most generic way. Device supplied checksum of _all_
-  *        the packet as seen by netif_rx in skb->csum.
-  *        NOTE: Even if device supports only some protocols, but
-  *        is able to produce some skb->csum, it MUST use COMPLETE,
-  *        not UNNECESSARY.
-  *
-  *    PARTIAL: identical to the case for output below.  This may occur
-  *        on a packet received directly from another Linux OS, e.g.,
-  *        a virtualised Linux kernel on the same host.  The packet can
-  *        be treated in the same way as UNNECESSARY except that on
-  *        output (i.e., forwarding) the checksum must be filled in
-  *        by the OS or the hardware.
-  *
-  * B. Checksumming on output.
-  *
-  *    NONE: skb is checksummed by protocol or csum is not required.
-  *
-  *    PARTIAL: device is required to csum packet as seen by hard_start_xmit
-  *    from skb->csum_start to the end and to record the checksum
-  *    at skb->csum_start + skb->csum_offset.
-  *
-  *    Device must show its capabilities in dev->features, set
-  *    at device setup time.
-  *    NETIF_F_HW_CSUM - it is clever device, it is able to checksum
-  *                      everything.
-  *    NETIF_F_IP_CSUM - device is dumb. It is able to csum only
-  *                      TCP/UDP over IPv4. Sigh. Vendors like this
-  *                      way by an unknown reason. Though, see comment above
-  *                      about CHECKSUM_UNNECESSARY. 8)
-  *    NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
-  *
-  *    UNNECESSARY: device will do per protocol specific csum. Protocol drivers
-  *    that do not want net to perform the checksum calculation should use
-  *    this flag in their outgoing skbs.
-  *    NETIF_F_FCOE_CRC  this indicates the device can do FCoE FC CRC
-  *                      offload. Correspondingly, the FCoE protocol driver
-  *                      stack should use CHECKSUM_UNNECESSARY.
-  *
-  *    Any questions? No questions, good.              --ANK
-  */
  struct net_device;
  struct scatterlist;
  struct pipe_inode_info;
@@@ -703,15 -722,78 +722,78 @@@ unsigned int skb_find_text(struct sk_bu
                           unsigned int to, struct ts_config *config,
                           struct ts_state *state);
  
- void __skb_get_rxhash(struct sk_buff *skb);
- static inline __u32 skb_get_rxhash(struct sk_buff *skb)
+ /*
+  * Packet hash types specify the type of hash in skb_set_hash.
+  *
+  * Hash types refer to the protocol layer addresses which are used to
+  * construct a packet's hash. The hashes are used to differentiate or identify
+  * flows of the protocol layer for the hash type. Hash types are either
+  * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
+  *
+  * Properties of hashes:
+  *
+  * 1) Two packets in different flows have different hash values
+  * 2) Two packets in the same flow should have the same hash value
+  *
+  * A hash at a higher layer is considered to be more specific. A driver should
+  * set the most specific hash possible.
+  *
+  * A driver cannot indicate a more specific hash than the layer at which a hash
+  * was computed. For instance an L3 hash cannot be set as an L4 hash.
+  *
+  * A driver may indicate a hash level which is less specific than the
+  * actual layer the hash was computed on. For instance, a hash computed
+  * at L4 may be considered an L3 hash. This should only be done if the
+  * driver can't unambiguously determine that the HW computed the hash at
+  * the higher layer. Note that the "should" in the second property above
+  * permits this.
+  */
+ enum pkt_hash_types {
+       PKT_HASH_TYPE_NONE,     /* Undefined type */
+       PKT_HASH_TYPE_L2,       /* Input: src_MAC, dest_MAC */
+       PKT_HASH_TYPE_L3,       /* Input: src_IP, dst_IP */
+       PKT_HASH_TYPE_L4,       /* Input: src_IP, dst_IP, src_port, dst_port */
+ };
+ static inline void
+ skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+ {
+       skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
+       skb->rxhash = hash;
+ }
+ void __skb_get_hash(struct sk_buff *skb);
+ static inline __u32 skb_get_hash(struct sk_buff *skb)
  {
        if (!skb->l4_rxhash)
-               __skb_get_rxhash(skb);
+               __skb_get_hash(skb);
  
        return skb->rxhash;
  }
  
+ static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+ {
+       return skb->rxhash;
+ }
+ static inline void skb_clear_hash(struct sk_buff *skb)
+ {
+       skb->rxhash = 0;
+       skb->l4_rxhash = 0;
+ }
+ static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+ {
+       if (!skb->l4_rxhash)
+               skb_clear_hash(skb);
+ }
+ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
+ {
+       to->rxhash = from->rxhash;
+       to->l4_rxhash = from->l4_rxhash;
+ };
  #ifdef NET_SKBUFF_DATA_USES_OFFSET
  static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
  {
@@@ -750,7 -832,7 +832,7 @@@ static inline struct skb_shared_hwtstam
   */
  static inline int skb_queue_empty(const struct sk_buff_head *list)
  {
-       return list->next == (struct sk_buff *)list;
+       return list->next == (const struct sk_buff *) list;
  }
  
  /**
  static inline bool skb_queue_is_last(const struct sk_buff_head *list,
                                     const struct sk_buff *skb)
  {
-       return skb->next == (struct sk_buff *)list;
+       return skb->next == (const struct sk_buff *) list;
  }
  
  /**
  static inline bool skb_queue_is_first(const struct sk_buff_head *list,
                                      const struct sk_buff *skb)
  {
-       return skb->prev == (struct sk_buff *)list;
+       return skb->prev == (const struct sk_buff *) list;
  }
  
  /**
@@@ -1638,6 -1720,11 +1720,11 @@@ static inline void skb_set_mac_header(s
        skb->mac_header += offset;
  }
  
+ static inline void skb_pop_mac_header(struct sk_buff *skb)
+ {
+       skb->mac_header = skb->network_header;
+ }
  static inline void skb_probe_transport_header(struct sk_buff *skb,
                                              const int offset_hint)
  {
@@@ -1951,7 -2038,7 +2038,7 @@@ static inline void skb_propagate_pfmema
  }
  
  /**
 - * skb_frag_page - retrieve the page refered to by a paged fragment
 + * skb_frag_page - retrieve the page referred to by a paged fragment
   * @frag: the paged fragment
   *
   * Returns the &struct page associated with @frag.
@@@ -2363,9 -2450,13 +2450,13 @@@ int skb_splice_bits(struct sk_buff *skb
                    struct pipe_inode_info *pipe, unsigned int len,
                    unsigned int flags);
  void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+ unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
+ void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
+                 int len, int hlen);
  void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
  int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
  void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
  struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
  
  struct skb_checksum_ops {
@@@ -2392,6 -2483,24 +2483,24 @@@ static inline void *skb_header_pointer(
        return buffer;
  }
  
+ /**
+  *    skb_needs_linearize - check if we need to linearize a given skb
+  *                          depending on the given device features.
+  *    @skb: socket buffer to check
+  *    @features: net device features
+  *
+  *    Returns true if either:
+  *    1. skb has frag_list and the device doesn't support FRAGLIST, or
+  *    2. skb is fragmented and the device does not support SG.
+  */
+ static inline bool skb_needs_linearize(struct sk_buff *skb,
+                                      netdev_features_t features)
+ {
+       return skb_is_nonlinear(skb) &&
+              ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
+               (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
+ }
  static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
                                             void *to,
                                             const unsigned int len)
@@@ -2526,6 -2635,10 +2635,10 @@@ static inline void sw_tx_timestamp(stru
   * Ethernet MAC Drivers should call this function in their hard_xmit()
   * function immediately before giving the sk_buff to the MAC hardware.
   *
+  * Specifically, one should make absolutely sure that this function is
+  * called before TX completion of this packet can trigger.  Otherwise
+  * the packet could potentially already be freed.
+  *
   * @skb: A socket buffer.
   */
  static inline void skb_tx_timestamp(struct sk_buff *skb)
@@@ -2786,6 -2899,8 +2899,8 @@@ static inline void skb_checksum_none_as
  
  bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
  
+ int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
  u32 __skb_get_poff(const struct sk_buff *skb);
  
  /**
diff --combined include/linux/spi/spi.h
index 8d3a37bc6110274b1d0d1b48f2ca213b5d1ee041,4203c66d88033269692fa38ce50aebe17fee9419..2c9cdef0598f33a2108b9b557c729a2c65bd1d9c
@@@ -75,6 -75,7 +75,7 @@@ struct spi_device 
        struct spi_master       *master;
        u32                     max_speed_hz;
        u8                      chip_select;
+       u8                      bits_per_word;
        u16                     mode;
  #define       SPI_CPHA        0x01                    /* clock phase */
  #define       SPI_CPOL        0x02                    /* clock polarity */
@@@ -92,7 -93,6 +93,6 @@@
  #define       SPI_TX_QUAD     0x200                   /* transmit with 4 wires */
  #define       SPI_RX_DUAL     0x400                   /* receive with 2 wires */
  #define       SPI_RX_QUAD     0x800                   /* receive with 4 wires */
-       u8                      bits_per_word;
        int                     irq;
        void                    *controller_state;
        void                    *controller_data;
@@@ -234,7 -234,7 +234,7 @@@ static inline void spi_unregister_drive
   * @mode_bits: flags understood by this controller driver
   * @bits_per_word_mask: A mask indicating which values of bits_per_word are
   *    supported by the driver. Bit n indicates that a bits_per_word n+1 is
 - *    suported. If set, the SPI core will reject any transfer with an
 + *    supported. If set, the SPI core will reject any transfer with an
   *    unsupported bits_per_word. If not set, this value is simply ignored,
   *    and it's up to the individual driver to perform any validation.
   * @min_speed_hz: Lowest supported transfer speed
   * @cur_msg: the currently in-flight message
   * @cur_msg_prepared: spi_prepare_message was called for the currently
   *                    in-flight message
 - * @xfer_completion: used by core tranfer_one_message()
 + * @xfer_completion: used by core transfer_one_message()
   * @busy: message pump is busy
   * @running: message pump is running
   * @rt: whether this queue is set to run as a realtime task
   *    message while queuing transfers that arrive in the meantime. When the
   *    driver is finished with this message, it must call
   *    spi_finalize_current_message() so the subsystem can issue the next
-  *    transfer
+  *    message
   * @unprepare_transfer_hardware: there are currently no more messages on the
   *    queue so the subsystem notifies the driver that it may relax the
   *    hardware by issuing this call
-  * @set_cs: assert or deassert chip select, true to assert.  May be called
+  * @set_cs: set the logic level of the chip select line.  May be called
   *          from interrupt context.
   * @prepare_message: set up the controller to transfer a single message,
   *                   for example doing DMA mapping.  Called from threaded
   *                   context.
-  * @transfer_one: transfer a single spi_transfer. When the
-  *              driver is finished with this transfer it must call
-  *              spi_finalize_current_transfer() so the subsystem can issue
-  *                the next transfer
+  * @transfer_one: transfer a single spi_transfer.
+  *                  - return 0 if the transfer is finished,
+  *                  - return 1 if the transfer is still in progress. When
+  *                    the driver is finished with this transfer it must
+  *                    call spi_finalize_current_transfer() so the subsystem
+  *                    can issue the next transfer. Note: transfer_one and
+  *                    transfer_one_message are mutually exclusive; when both
+  *                    are set, the generic subsystem does not call your
+  *                    transfer_one callback.
   * @unprepare_message: undo any work done by prepare_message().
   * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
   *    number. Any individual value may be -ENOENT for CS lines that
@@@ -493,7 -498,7 +498,7 @@@ extern struct spi_master *spi_busnum_to
   * @rx_buf: data to be read (dma-safe memory), or NULL
   * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
   * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
 - * @tx_nbits: number of bits used for writting. If 0 the default
 + * @tx_nbits: number of bits used for writing. If 0 the default
   *      (SPI_NBITS_SINGLE) is used.
   * @rx_nbits: number of bits used for reading. If 0 the default
   *      (SPI_NBITS_SINGLE) is used.
   * by the results of previous messages and where the whole transaction
   * ends when the chipselect goes intactive.
   *
 - * When SPI can transfer in 1x,2x or 4x. It can get this tranfer information
 + * When SPI can transfer in 1x,2x or 4x. It can get this transfer information
   * from device through @tx_nbits and @rx_nbits. In Bi-direction, these
   * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
   * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
@@@ -576,8 -581,8 +581,8 @@@ struct spi_transfer 
        dma_addr_t      rx_dma;
  
        unsigned        cs_change:1;
-       u8              tx_nbits;
-       u8              rx_nbits;
+       unsigned        tx_nbits:3;
+       unsigned        rx_nbits:3;
  #define       SPI_NBITS_SINGLE        0x01 /* 1bit transfer */
  #define       SPI_NBITS_DUAL          0x02 /* 2bits transfer */
  #define       SPI_NBITS_QUAD          0x04 /* 4bits transfer */
@@@ -847,7 -852,7 +852,7 @@@ static inline ssize_t spi_w8r16(struct 
        ssize_t                 status;
        u16                     result;
  
-       status = spi_write_then_read(spi, &cmd, 1, (u8 *) &result, 2);
+       status = spi_write_then_read(spi, &cmd, 1, &result, 2);
  
        /* return negative errno or unsigned value */
        return (status < 0) ? status : result;
index 0e7a555cab1e4d871a796a92ec27bdf095c01c2d,dba63f53906c9ddb1b43ef36ef0822b554a8a3ff..d3ca3b53837c42c85ff2dd7ebcc11764014fe23d
@@@ -92,7 -92,7 +92,7 @@@ struct usb_configuration
   * @suspend: Notifies functions when the host stops sending USB traffic.
   * @resume: Notifies functions when the host restarts USB traffic.
   * @get_status: Returns function status as a reply to
 - *    GetStatus() request when the recepient is Interface.
 + *    GetStatus() request when the recipient is Interface.
   * @func_suspend: callback to be called when
   *    SetFeature(FUNCTION_SUSPEND) is reseived
   *
@@@ -468,6 -468,8 +468,8 @@@ struct usb_function_instance 
        struct config_group group;
        struct list_head cfs_list;
        struct usb_function_driver *fd;
+       int (*set_inst_name)(struct usb_function_instance *inst,
+                             const char *name);
        void (*free_func_inst)(struct usb_function_instance *inst);
  };
  
diff --combined include/net/mac80211.h
index 6b79bfc98175af175215eb50a51d93094bafe776,f4ab2fb4d50c445b980e1f6c507ae5a893c1ddfc..703b1f1456fcbe6ae684cea5bec25abb8ef1b9ec
@@@ -154,12 -154,14 +154,14 @@@ struct ieee80211_low_level_stats 
   * @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed
   * @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
   *    this is used only with channel switching with CSA
+  * @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed
   */
  enum ieee80211_chanctx_change {
        IEEE80211_CHANCTX_CHANGE_WIDTH          = BIT(0),
        IEEE80211_CHANCTX_CHANGE_RX_CHAINS      = BIT(1),
        IEEE80211_CHANCTX_CHANGE_RADAR          = BIT(2),
        IEEE80211_CHANCTX_CHANGE_CHANNEL        = BIT(3),
+       IEEE80211_CHANCTX_CHANGE_MIN_WIDTH      = BIT(4),
  };
  
  /**
   * that contains it is visible in mac80211 only.
   *
   * @def: the channel definition
+  * @min_def: the minimum channel definition currently required.
   * @rx_chains_static: The number of RX chains that must always be
   *    active on the channel to receive MIMO transmissions
   * @rx_chains_dynamic: The number of RX chains that must be enabled
   */
  struct ieee80211_chanctx_conf {
        struct cfg80211_chan_def def;
+       struct cfg80211_chan_def min_def;
  
        u8 rx_chains_static, rx_chains_dynamic;
  
@@@ -1158,6 -1162,19 +1162,19 @@@ static inline bool ieee80211_vif_is_mes
        return false;
  }
  
+ /**
+  * wdev_to_ieee80211_vif - return a vif struct from a wdev
+  * @wdev: the wdev to get the vif for
+  *
+  * This can be used by mac80211 drivers with direct cfg80211 APIs
+  * (like the vendor commands) that get a wdev.
+  *
+  * Note that this function may return %NULL if the given wdev isn't
+  * associated with a vif that the driver knows about (e.g. monitor
+  * or AP_VLAN interfaces.)
+  */
+ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  /**
   * enum ieee80211_key_flags - key flags
   *
@@@ -1228,6 -1245,36 +1245,36 @@@ struct ieee80211_key_conf 
        u8 key[0];
  };
  
+ /**
+  * struct ieee80211_cipher_scheme - cipher scheme
+  *
+  * This structure contains a cipher scheme information defining
+  * the secure packet crypto handling.
+  *
+  * @cipher: a cipher suite selector
+  * @iftype: a cipher iftype bit mask indicating an allowed cipher usage
+  * @hdr_len: a length of a security header used the cipher
+  * @pn_len: a length of a packet number in the security header
+  * @pn_off: an offset of pn from the beginning of the security header
+  * @key_idx_off: an offset of key index byte in the security header
+  * @key_idx_mask: a bit mask of key_idx bits
+  * @key_idx_shift: a bit shift needed to get key_idx
+  *     key_idx value calculation:
+  *      (sec_header_base[key_idx_off] & key_idx_mask) >> key_idx_shift
+  * @mic_len: a mic length in bytes
+  */
+ struct ieee80211_cipher_scheme {
+       u32 cipher;
+       u16 iftype;
+       u8 hdr_len;
+       u8 pn_len;
+       u8 pn_off;
+       u8 key_idx_off;
+       u8 key_idx_mask;
+       u8 key_idx_shift;
+       u8 mic_len;
+ };
  /**
   * enum set_key_cmd - key command
   *
@@@ -1566,7 -1613,8 +1613,8 @@@ enum ieee80211_hw_flags 
   * @extra_tx_headroom: headroom to reserve in each transmit skb
   *    for use by the driver (e.g. for transmit headers.)
   *
-  * @channel_change_time: time (in microseconds) it takes to change channels.
+  * @extra_beacon_tailroom: tailroom to reserve in each beacon tx skb.
+  *    Can be used by drivers to add extra IEs.
   *
   * @max_signal: Maximum value for signal (rssi) in RX information, used
   *    only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
   * @uapsd_max_sp_len: maximum number of total buffered frames the WMM AP may
   *    deliver to a WMM STA during any Service Period triggered by the WMM STA.
   *    Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct values.
+  *
+  * @n_cipher_schemes: a size of an array of cipher schemes definitions.
+  * @cipher_schemes: a pointer to an array of cipher scheme definitions
+  *    supported by HW.
   */
  struct ieee80211_hw {
        struct ieee80211_conf conf;
        void *priv;
        u32 flags;
        unsigned int extra_tx_headroom;
-       int channel_change_time;
+       unsigned int extra_beacon_tailroom;
        int vif_data_size;
        int sta_data_size;
        int chanctx_data_size;
        netdev_features_t netdev_features;
        u8 uapsd_queues;
        u8 uapsd_max_sp_len;
+       u8 n_cipher_schemes;
+       const struct ieee80211_cipher_scheme *cipher_schemes;
  };
  
  /**
@@@ -1841,7 -1895,7 +1895,7 @@@ void ieee80211_free_txskb(struct ieee80
   *
   * Driver informs U-APSD client support by enabling
   * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the
 - * uapsd paramater in conf_tx() operation. Hardware needs to send the QoS
 + * uapsd parameter in conf_tx() operation. Hardware needs to send the QoS
   * Nullfunc frames and stay awake until the service period has ended. To
   * utilize U-APSD, dynamic powersave is disabled for voip AC and all frames
   * from that AC are transmitted with powersave enabled.
   * with the number of frames to be released and which TIDs they are
   * to come from. In this case, the driver is responsible for setting
   * the EOSP (for uAPSD) and MORE_DATA bits in the released frames,
 - * to help the @more_data paramter is passed to tell the driver if
 + * to help the @more_data parameter is passed to tell the driver if
   * there is more data on other TIDs -- the TIDs to release frames
   * from are ignored since mac80211 doesn't know how many frames the
   * buffers for those TIDs contain.
   * appropriately (only the last frame may have %IEEE80211_TX_STATUS_EOSP)
   * and also take care of the EOSP and MORE_DATA bits in the frame.
   * The driver may also use ieee80211_sta_eosp() in this case.
+  *
+  * Note that if the driver ever buffers frames other than QoS-data
+  * frames, it must take care to never send a non-QoS-data frame as
+  * the last frame in a service period, adding a QoS-nulldata frame
+  * after a non-QoS-data frame if needed.
   */
  
  /**
@@@ -2358,9 -2417,6 +2417,6 @@@ enum ieee80211_roc_type 
   *    See the section "Frame filtering" for more information.
   *    This callback must be implemented and can sleep.
   *
-  * @set_multicast_list: Configure the device's interface specific RX multicast
-  *    filter. This callback is optional. This callback must be atomic.
-  *
   * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
   *    must be set or cleared for a given STA. Must be atomic.
   *
   *    AP, IBSS/WDS/mesh peer etc. This callback can sleep.
   *
   * @sta_remove: Notifies low level driver about removal of an associated
-  *    station, AP, IBSS/WDS/mesh peer etc. This callback can sleep.
+  *    station, AP, IBSS/WDS/mesh peer etc. Note that after the callback
+  *    returns it isn't safe to use the pointer, not even RCU protected;
+  *    no RCU grace period is guaranteed between returning here and freeing
+  *    the station. See @sta_pre_rcu_remove if needed.
+  *    This callback can sleep.
   *
   * @sta_add_debugfs: Drivers can use this callback to add debugfs files
   *    when a station is added to mac80211's station list. This callback
   *    station (which can be the AP, a client, IBSS/WDS/mesh peer etc.)
   *    This callback is mutually exclusive with @sta_add/@sta_remove.
   *    It must not fail for down transitions but may fail for transitions
-  *    up the list of states.
+  *    up the list of states. Also note that after the callback returns it
+  *    isn't safe to use the pointer, not even RCU protected - no RCU grace
+  *    period is guaranteed between returning here and freeing the station.
+  *    See @sta_pre_rcu_remove if needed.
+  *    The callback can sleep.
+  *
+  * @sta_pre_rcu_remove: Notify driver about station removal before RCU
+  *    synchronisation. This is useful if a driver needs to have station
+  *    pointers protected using RCU, it can then use this call to clear
+  *    the pointers instead of waiting for an RCU grace period to elapse
+  *    in @sta_state.
   *    The callback can sleep.
   *
   * @sta_rc_update: Notifies the driver of changes to the bitrates that can be
   *    parameters. In the case where the driver buffers some frames for
   *    sleeping stations mac80211 will use this callback to tell the driver
   *    to release some frames, either for PS-poll or uAPSD.
 - *    Note that if the @more_data paramter is %false the driver must check
 + *    Note that if the @more_data parameter is %false the driver must check
   *    if there are more frames on the given TIDs, and if there are more than
   *    the frames being released then it must still set the more-data bit in
   *    the frame. If the @more_data parameter is %true, then of course the
@@@ -2724,10 -2794,6 +2794,6 @@@ struct ieee80211_ops 
                                 unsigned int changed_flags,
                                 unsigned int *total_flags,
                                 u64 multicast);
-       void (*set_multicast_list)(struct ieee80211_hw *hw,
-                                  struct ieee80211_vif *vif, bool allmulti,
-                                  struct netdev_hw_addr_list *mc_list);
        int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
                       bool set);
        int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                         struct ieee80211_sta *sta,
                         enum ieee80211_sta_state old_state,
                         enum ieee80211_sta_state new_state);
+       void (*sta_pre_rcu_remove)(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  struct ieee80211_sta *sta);
        void (*sta_rc_update)(struct ieee80211_hw *hw,
                              struct ieee80211_vif *vif,
                              struct ieee80211_sta *sta,
@@@ -4585,4 -4654,51 +4654,51 @@@ bool ieee80211_tx_prepare_skb(struct ie
                              struct ieee80211_vif *vif, struct sk_buff *skb,
                              int band, struct ieee80211_sta **sta);
  
+ /**
+  * struct ieee80211_noa_data - holds temporary data for tracking P2P NoA state
+  *
+  * @next_tsf: TSF timestamp of the next absent state change
+  * @has_next_tsf: next absent state change event pending
+  *
+  * @absent: descriptor bitmask, set if GO is currently absent
+  *
+  * private:
+  *
+  * @count: count fields from the NoA descriptors
+  * @desc: adjusted data from the NoA
+  */
+ struct ieee80211_noa_data {
+       u32 next_tsf;
+       bool has_next_tsf;
+       u8 absent;
+       u8 count[IEEE80211_P2P_NOA_DESC_MAX];
+       struct {
+               u32 start;
+               u32 duration;
+               u32 interval;
+       } desc[IEEE80211_P2P_NOA_DESC_MAX];
+ };
+ /**
+  * ieee80211_parse_p2p_noa - initialize NoA tracking data from P2P IE
+  *
+  * @attr: P2P NoA IE
+  * @data: NoA tracking data
+  * @tsf: current TSF timestamp
+  *
+  * Return: number of successfully parsed descriptors
+  */
+ int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr,
+                           struct ieee80211_noa_data *data, u32 tsf);
+ /**
+  * ieee80211_update_p2p_noa - get next pending P2P GO absent state change
+  *
+  * @data: NoA tracking data
+  * @tsf: current TSF timestamp
+  */
+ void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf);
  #endif /* MAC80211_H */
diff --combined kernel/signal.c
index f4812283c6e999a2da829d901d475fd2599014a5,52f881db1ca02a4190b46174e5d7ad458c7b5c33..5d4b05a229a66ee464fd8e22a64a458c4a30ca61
@@@ -2047,8 -2047,8 +2047,8 @@@ static bool do_signal_stop(int signr
                if (task_set_jobctl_pending(current, signr | gstop))
                        sig->group_stop_count++;
  
-               for (t = next_thread(current); t != current;
-                    t = next_thread(t)) {
+               = current;
+               while_each_thread(current, t) {
                        /*
                         * Setting state to TASK_STOPPED for a group
                         * stop is always done with the siglock held,
@@@ -2382,7 -2382,7 +2382,7 @@@ relock
   * @regs:             user register state
   * @stepping:         nonzero if debugger single-step or block-step in use
   *
 - * This function should be called when a signal has succesfully been
 + * This function should be called when a signal has successfully been
   * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
   * is always blocked, and the signal itself is blocked unless %SA_NODEFER
   * is set in @ka->sa.sa_flags.  Tracing is notified.
@@@ -3125,8 -3125,7 +3125,7 @@@ int do_sigaction(int sig, struct k_siga
                        rm_from_queue_full(&mask, &t->signal->shared_pending);
                        do {
                                rm_from_queue_full(&mask, &t->pending);
-                               t = next_thread(t);
-                       } while (t != current);
+                       } while_each_thread(current, t);
                }
        }
  
diff --combined net/core/dev.c
index 70d2da3bfb0d49cc2db9a4c2e7f785026488f375,4ad1b78c9c7790a84d4e697da1e025213e3a0eae..9971ea1adc103aeaa8705cd147b010621b2656b9
@@@ -147,6 -147,8 +147,8 @@@ struct list_head ptype_base[PTYPE_HASH_
  struct list_head ptype_all __read_mostly;     /* Taps */
  static struct list_head offload_base __read_mostly;
  
+ static int netif_rx_internal(struct sk_buff *skb);
  /*
   * The @dev_base_head list is protected by @dev_base_lock and the rtnl
   * semaphore.
@@@ -480,7 -482,7 +482,7 @@@ EXPORT_SYMBOL(dev_add_offload)
   *    and must not be freed until after all the CPU's have gone
   *    through a quiescent state.
   */
- void __dev_remove_offload(struct packet_offload *po)
static void __dev_remove_offload(struct packet_offload *po)
  {
        struct list_head *head = &offload_base;
        struct packet_offload *po1;
  out:
        spin_unlock(&offload_lock);
  }
- EXPORT_SYMBOL(__dev_remove_offload);
  
  /**
   *    dev_remove_offload       - remove packet offload handler
@@@ -1118,6 -1119,8 +1119,8 @@@ rollback
  
        write_seqcount_end(&devnet_rename_seq);
  
+       netdev_adjacent_rename_links(dev, oldname);
        write_lock_bh(&dev_base_lock);
        hlist_del_rcu(&dev->name_hlist);
        write_unlock_bh(&dev_base_lock);
                        err = ret;
                        write_seqcount_begin(&devnet_rename_seq);
                        memcpy(dev->name, oldname, IFNAMSIZ);
+                       memcpy(oldname, newname, IFNAMSIZ);
                        goto rollback;
                } else {
                        pr_err("%s: name change rollback failed: %d\n",
@@@ -1566,14 -1570,14 +1570,14 @@@ EXPORT_SYMBOL(unregister_netdevice_noti
   *    are as for raw_notifier_call_chain().
   */
  
- int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
-                                 struct netdev_notifier_info *info)
+ static int call_netdevice_notifiers_info(unsigned long val,
+                                        struct net_device *dev,
+                                        struct netdev_notifier_info *info)
  {
        ASSERT_RTNL();
        netdev_notifier_info_init(info, dev);
        return raw_notifier_call_chain(&netdev_chain, val, info);
  }
- EXPORT_SYMBOL(call_netdevice_notifiers_info);
  
  /**
   *    call_netdevice_notifiers - call all network notifier blocks
@@@ -1699,7 -1703,7 +1703,7 @@@ int dev_forward_skb(struct net_device *
        skb_scrub_packet(skb, true);
        skb->protocol = eth_type_trans(skb, dev);
  
-       return netif_rx(skb);
+       return netif_rx_internal(skb);
  }
  EXPORT_SYMBOL_GPL(dev_forward_skb);
  
@@@ -2079,7 -2083,7 +2083,7 @@@ int netif_set_real_num_tx_queues(struc
  }
  EXPORT_SYMBOL(netif_set_real_num_tx_queues);
  
- #ifdef CONFIG_RPS
+ #ifdef CONFIG_SYSFS
  /**
   *    netif_set_real_num_rx_queues - set actual number of RX queues used
   *    @dev: Network device
@@@ -2145,30 -2149,42 +2149,42 @@@ void __netif_schedule(struct Qdisc *q
  }
  EXPORT_SYMBOL(__netif_schedule);
  
- void dev_kfree_skb_irq(struct sk_buff *skb)
+ struct dev_kfree_skb_cb {
+       enum skb_free_reason reason;
+ };
+ static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
  {
-       if (atomic_dec_and_test(&skb->users)) {
-               struct softnet_data *sd;
-               unsigned long flags;
+       return (struct dev_kfree_skb_cb *)skb->cb;
+ }
+ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+ {
+       unsigned long flags;
  
-               local_irq_save(flags);
-               sd = &__get_cpu_var(softnet_data);
-               skb->next = sd->completion_queue;
-               sd->completion_queue = skb;
-               raise_softirq_irqoff(NET_TX_SOFTIRQ);
-               local_irq_restore(flags);
+       if (likely(atomic_read(&skb->users) == 1)) {
+               smp_rmb();
+               atomic_set(&skb->users, 0);
+       } else if (likely(!atomic_dec_and_test(&skb->users))) {
+               return;
        }
+       get_kfree_skb_cb(skb)->reason = reason;
+       local_irq_save(flags);
+       skb->next = __this_cpu_read(softnet_data.completion_queue);
+       __this_cpu_write(softnet_data.completion_queue, skb);
+       raise_softirq_irqoff(NET_TX_SOFTIRQ);
+       local_irq_restore(flags);
  }
- EXPORT_SYMBOL(dev_kfree_skb_irq);
+ EXPORT_SYMBOL(__dev_kfree_skb_irq);
  
- void dev_kfree_skb_any(struct sk_buff *skb)
+ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
  {
        if (in_irq() || irqs_disabled())
-               dev_kfree_skb_irq(skb);
+               __dev_kfree_skb_irq(skb, reason);
        else
                dev_kfree_skb(skb);
  }
- EXPORT_SYMBOL(dev_kfree_skb_any);
+ EXPORT_SYMBOL(__dev_kfree_skb_any);
  
  
  /**
@@@ -2442,13 -2458,8 +2458,8 @@@ static void dev_gso_skb_destructor(stru
  {
        struct dev_gso_cb *cb;
  
-       do {
-               struct sk_buff *nskb = skb->next;
-               skb->next = nskb->next;
-               nskb->next = NULL;
-               kfree_skb(nskb);
-       } while (skb->next);
+       kfree_skb_list(skb->next);
+       skb->next = NULL;
  
        cb = DEV_GSO_CB(skb);
        if (cb->destructor)
@@@ -2523,23 -2534,8 +2534,8 @@@ netdev_features_t netif_skb_features(st
  }
  EXPORT_SYMBOL(netif_skb_features);
  
- /*
-  * Returns true if either:
-  *    1. skb has frag_list and the device doesn't support FRAGLIST, or
-  *    2. skb is fragmented and the device does not support SG.
-  */
- static inline int skb_needs_linearize(struct sk_buff *skb,
-                                     netdev_features_t features)
- {
-       return skb_is_nonlinear(skb) &&
-                       ((skb_has_frag_list(skb) &&
-                               !(features & NETIF_F_FRAGLIST)) ||
-                       (skb_shinfo(skb)->nr_frags &&
-                               !(features & NETIF_F_SG)));
- }
  int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
-                       struct netdev_queue *txq, void *accel_priv)
+                       struct netdev_queue *txq)
  {
        const struct net_device_ops *ops = dev->netdev_ops;
        int rc = NETDEV_TX_OK;
                        dev_queue_xmit_nit(skb, dev);
  
                skb_len = skb->len;
-               if (accel_priv)
-                       rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
-               else
-                       rc = ops->ndo_start_xmit(skb, dev);
+               trace_net_dev_start_xmit(skb, dev);
+               rc = ops->ndo_start_xmit(skb, dev);
                trace_net_dev_xmit(skb, rc, dev, skb_len);
-               if (rc == NETDEV_TX_OK && txq)
+               if (rc == NETDEV_TX_OK)
                        txq_trans_update(txq);
                return rc;
        }
@@@ -2627,10 -2620,8 +2620,8 @@@ gso
                        dev_queue_xmit_nit(nskb, dev);
  
                skb_len = nskb->len;
-               if (accel_priv)
-                       rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
-               else
-                       rc = ops->ndo_start_xmit(nskb, dev);
+               trace_net_dev_start_xmit(nskb, dev);
+               rc = ops->ndo_start_xmit(nskb, dev);
                trace_net_dev_xmit(nskb, rc, dev, skb_len);
                if (unlikely(rc != NETDEV_TX_OK)) {
                        if (rc & ~NETDEV_TX_MASK)
@@@ -2750,7 -2741,7 +2741,7 @@@ static inline int __dev_xmit_skb(struc
        return rc;
  }
  
- #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+ #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  static void skb_update_prio(struct sk_buff *skb)
  {
        struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
@@@ -2787,8 -2778,9 +2778,9 @@@ int dev_loopback_xmit(struct sk_buff *s
  EXPORT_SYMBOL(dev_loopback_xmit);
  
  /**
-  *    dev_queue_xmit - transmit a buffer
+  *    __dev_queue_xmit - transmit a buffer
   *    @skb: buffer to transmit
+  *    @accel_priv: private data used for L2 forwarding offload
   *
   *    Queue a buffer for transmission to a network device. The caller must
   *    have set the device and priority and built the buffer before calling
   *      the BH enable code must have IRQs enabled so that it will not deadlock.
   *          --BLG
   */
int dev_queue_xmit(struct sk_buff *skb)
static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
  {
        struct net_device *dev = skb->dev;
        struct netdev_queue *txq;
  
        skb_update_prio(skb);
  
-       txq = netdev_pick_tx(dev, skb);
+       txq = netdev_pick_tx(dev, skb, accel_priv);
        q = rcu_dereference_bh(txq->qdisc);
  
  #ifdef CONFIG_NET_CLS_ACT
  
                        if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
-                               rc = dev_hard_start_xmit(skb, dev, txq, NULL);
+                               rc = dev_hard_start_xmit(skb, dev, txq);
                                __this_cpu_dec(xmit_recursion);
                                if (dev_xmit_complete(rc)) {
                                        HARD_TX_UNLOCK(dev, txq);
        rcu_read_unlock_bh();
        return rc;
  }
+ int dev_queue_xmit(struct sk_buff *skb)
+ {
+       return __dev_queue_xmit(skb, NULL);
+ }
  EXPORT_SYMBOL(dev_queue_xmit);
  
+ int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
+ {
+       return __dev_queue_xmit(skb, accel_priv);
+ }
+ EXPORT_SYMBOL(dev_queue_xmit_accel);
  
  /*=======================================================================
                        Receiver routines
@@@ -3009,7 -3012,7 +3012,7 @@@ static int get_rps_cpu(struct net_devic
        }
  
        skb_reset_network_header(skb);
-       if (!skb_get_rxhash(skb))
+       if (!skb_get_hash(skb))
                goto done;
  
        flow_table = rcu_dereference(rxqueue->rps_flow_table);
@@@ -3154,7 -3157,7 +3157,7 @@@ static bool skb_flow_limit(struct sk_bu
        rcu_read_lock();
        fl = rcu_dereference(sd->flow_limit);
        if (fl) {
-               new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
+               new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
                old_flow = fl->history[fl->history_head];
                fl->history[fl->history_head] = new_flow;
  
@@@ -3222,22 -3225,7 +3225,7 @@@ enqueue
        return NET_RX_DROP;
  }
  
- /**
-  *    netif_rx        -       post buffer to the network code
-  *    @skb: buffer to post
-  *
-  *    This function receives a packet from a device driver and queues it for
-  *    the upper (protocol) levels to process.  It always succeeds. The buffer
-  *    may be dropped during processing for congestion control or by the
-  *    protocol layers.
-  *
-  *    return values:
-  *    NET_RX_SUCCESS  (no congestion)
-  *    NET_RX_DROP     (packet was dropped)
-  *
-  */
- int netif_rx(struct sk_buff *skb)
+ static int netif_rx_internal(struct sk_buff *skb)
  {
        int ret;
  
        }
        return ret;
  }
+ /**
+  *    netif_rx        -       post buffer to the network code
+  *    @skb: buffer to post
+  *
+  *    This function receives a packet from a device driver and queues it for
+  *    the upper (protocol) levels to process.  It always succeeds. The buffer
+  *    may be dropped during processing for congestion control or by the
+  *    protocol layers.
+  *
+  *    return values:
+  *    NET_RX_SUCCESS  (no congestion)
+  *    NET_RX_DROP     (packet was dropped)
+  *
+  */
+ int netif_rx(struct sk_buff *skb)
+ {
+       trace_netif_rx_entry(skb);
+       return netif_rx_internal(skb);
+ }
  EXPORT_SYMBOL(netif_rx);
  
  int netif_rx_ni(struct sk_buff *skb)
  {
        int err;
  
+       trace_netif_rx_ni_entry(skb);
        preempt_disable();
-       err = netif_rx(skb);
+       err = netif_rx_internal(skb);
        if (local_softirq_pending())
                do_softirq();
        preempt_enable();
@@@ -3306,7 -3318,10 +3318,10 @@@ static void net_tx_action(struct softir
                        clist = clist->next;
  
                        WARN_ON(atomic_read(&skb->users));
-                       trace_kfree_skb(skb, net_tx_action);
+                       if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
+                               trace_consume_skb(skb);
+                       else
+                               trace_kfree_skb(skb, net_tx_action);
                        __kfree_skb(skb);
                }
        }
@@@ -3424,7 -3439,7 +3439,7 @@@ out
   *    @rx_handler: receive handler to register
   *    @rx_handler_data: data pointer that is used by rx handler
   *
 - *    Register a receive hander for a device. This handler will then be
 + *    Register a receive handler for a device. This handler will then be
   *    called from __netif_receive_skb. A negative errno code is returned
   *    on a failure.
   *
@@@ -3662,22 -3677,7 +3677,7 @@@ static int __netif_receive_skb(struct s
        return ret;
  }
  
- /**
-  *    netif_receive_skb - process receive buffer from network
-  *    @skb: buffer to process
-  *
-  *    netif_receive_skb() is the main receive data processing function.
-  *    It always succeeds. The buffer may be dropped during processing
-  *    for congestion control or by the protocol layers.
-  *
-  *    This function may only be called from softirq context and interrupts
-  *    should be enabled.
-  *
-  *    Return values (usually ignored):
-  *    NET_RX_SUCCESS: no congestion
-  *    NET_RX_DROP: packet was dropped
-  */
- int netif_receive_skb(struct sk_buff *skb)
+ static int netif_receive_skb_internal(struct sk_buff *skb)
  {
        net_timestamp_check(netdev_tstamp_prequeue, skb);
  
  #endif
        return __netif_receive_skb(skb);
  }
+ /**
+  *    netif_receive_skb - process receive buffer from network
+  *    @skb: buffer to process
+  *
+  *    netif_receive_skb() is the main receive data processing function.
+  *    It always succeeds. The buffer may be dropped during processing
+  *    for congestion control or by the protocol layers.
+  *
+  *    This function may only be called from softirq context and interrupts
+  *    should be enabled.
+  *
+  *    Return values (usually ignored):
+  *    NET_RX_SUCCESS: no congestion
+  *    NET_RX_DROP: packet was dropped
+  */
+ int netif_receive_skb(struct sk_buff *skb)
+ {
+       trace_netif_receive_skb_entry(skb);
+       return netif_receive_skb_internal(skb);
+ }
  EXPORT_SYMBOL(netif_receive_skb);
  
  /* Network device is going away, flush any packets still pending
@@@ -3752,7 -3774,7 +3774,7 @@@ static int napi_gro_complete(struct sk_
                if (ptype->type != type || !ptype->callbacks.gro_complete)
                        continue;
  
-               err = ptype->callbacks.gro_complete(skb);
+               err = ptype->callbacks.gro_complete(skb, 0);
                break;
        }
        rcu_read_unlock();
        }
  
  out:
-       return netif_receive_skb(skb);
+       return netif_receive_skb_internal(skb);
  }
  
  /* napi->gro_list contains packets ordered by age.
@@@ -3800,10 -3822,18 +3822,18 @@@ static void gro_list_prepare(struct nap
  {
        struct sk_buff *p;
        unsigned int maclen = skb->dev->hard_header_len;
+       u32 hash = skb_get_hash_raw(skb);
  
        for (p = napi->gro_list; p; p = p->next) {
                unsigned long diffs;
  
+               NAPI_GRO_CB(p)->flush = 0;
+               if (hash != skb_get_hash_raw(p)) {
+                       NAPI_GRO_CB(p)->same_flow = 0;
+                       continue;
+               }
                diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
                diffs |= p->vlan_tci ^ skb->vlan_tci;
                if (maclen == ETH_HLEN)
                                       skb_gro_mac_header(skb),
                                       maclen);
                NAPI_GRO_CB(p)->same_flow = !diffs;
-               NAPI_GRO_CB(p)->flush = 0;
+       }
+ }
+ static void skb_gro_reset_offset(struct sk_buff *skb)
+ {
+       const struct skb_shared_info *pinfo = skb_shinfo(skb);
+       const skb_frag_t *frag0 = &pinfo->frags[0];
+       NAPI_GRO_CB(skb)->data_offset = 0;
+       NAPI_GRO_CB(skb)->frag0 = NULL;
+       NAPI_GRO_CB(skb)->frag0_len = 0;
+       if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
+           pinfo->nr_frags &&
+           !PageHighMem(skb_frag_page(frag0))) {
+               NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+               NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
        }
  }
  
@@@ -3833,7 -3879,9 +3879,9 @@@ static enum gro_result dev_gro_receive(
        if (skb_is_gso(skb) || skb_has_frag_list(skb))
                goto normal;
  
+       skb_gro_reset_offset(skb);
        gro_list_prepare(napi, skb);
+       NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
  
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, head, list) {
                NAPI_GRO_CB(skb)->same_flow = 0;
                NAPI_GRO_CB(skb)->flush = 0;
                NAPI_GRO_CB(skb)->free = 0;
+               NAPI_GRO_CB(skb)->udp_mark = 0;
  
                pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
                break;
        if (same_flow)
                goto ok;
  
-       if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
+       if (NAPI_GRO_CB(skb)->flush)
                goto normal;
  
-       napi->gro_count++;
+       if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
+               struct sk_buff *nskb = napi->gro_list;
+               /* locate the end of the list to select the 'oldest' flow */
+               while (nskb->next) {
+                       pp = &nskb->next;
+                       nskb = *pp;
+               }
+               *pp = NULL;
+               nskb->next = NULL;
+               napi_gro_complete(nskb);
+       } else {
+               napi->gro_count++;
+       }
        NAPI_GRO_CB(skb)->count = 1;
        NAPI_GRO_CB(skb)->age = jiffies;
        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
@@@ -3910,12 -3972,39 +3972,39 @@@ normal
        goto pull;
  }
  
+ struct packet_offload *gro_find_receive_by_type(__be16 type)
+ {
+       struct list_head *offload_head = &offload_base;
+       struct packet_offload *ptype;
+       list_for_each_entry_rcu(ptype, offload_head, list) {
+               if (ptype->type != type || !ptype->callbacks.gro_receive)
+                       continue;
+               return ptype;
+       }
+       return NULL;
+ }
+ EXPORT_SYMBOL(gro_find_receive_by_type);
+ struct packet_offload *gro_find_complete_by_type(__be16 type)
+ {
+       struct list_head *offload_head = &offload_base;
+       struct packet_offload *ptype;
+       list_for_each_entry_rcu(ptype, offload_head, list) {
+               if (ptype->type != type || !ptype->callbacks.gro_complete)
+                       continue;
+               return ptype;
+       }
+       return NULL;
+ }
+ EXPORT_SYMBOL(gro_find_complete_by_type);
  
  static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
  {
        switch (ret) {
        case GRO_NORMAL:
-               if (netif_receive_skb(skb))
+               if (netif_receive_skb_internal(skb))
                        ret = GRO_DROP;
                break;
  
        return ret;
  }
  
- static void skb_gro_reset_offset(struct sk_buff *skb)
- {
-       const struct skb_shared_info *pinfo = skb_shinfo(skb);
-       const skb_frag_t *frag0 = &pinfo->frags[0];
-       NAPI_GRO_CB(skb)->data_offset = 0;
-       NAPI_GRO_CB(skb)->frag0 = NULL;
-       NAPI_GRO_CB(skb)->frag0_len = 0;
-       if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
-           pinfo->nr_frags &&
-           !PageHighMem(skb_frag_page(frag0))) {
-               NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
-               NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
-       }
- }
  gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
  {
-       skb_gro_reset_offset(skb);
+       trace_napi_gro_receive_entry(skb);
  
        return napi_skb_finish(dev_gro_receive(napi, skb), skb);
  }
@@@ -3981,8 -4053,7 +4053,7 @@@ struct sk_buff *napi_get_frags(struct n
  
        if (!skb) {
                skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
-               if (skb)
-                       napi->skb = skb;
+               napi->skb = skb;
        }
        return skb;
  }
@@@ -3993,12 -4064,7 +4064,7 @@@ static gro_result_t napi_frags_finish(s
  {
        switch (ret) {
        case GRO_NORMAL:
-       case GRO_HELD:
-               skb->protocol = eth_type_trans(skb, skb->dev);
-               if (ret == GRO_HELD)
-                       skb_gro_pull(skb, -ETH_HLEN);
-               else if (netif_receive_skb(skb))
+               if (netif_receive_skb_internal(skb))
                        ret = GRO_DROP;
                break;
  
                napi_reuse_skb(napi, skb);
                break;
  
+       case GRO_HELD:
        case GRO_MERGED:
                break;
        }
  static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
  {
        struct sk_buff *skb = napi->skb;
-       struct ethhdr *eth;
-       unsigned int hlen;
-       unsigned int off;
  
        napi->skb = NULL;
  
-       skb_reset_mac_header(skb);
-       skb_gro_reset_offset(skb);
-       off = skb_gro_offset(skb);
-       hlen = off + sizeof(*eth);
-       eth = skb_gro_header_fast(skb, off);
-       if (skb_gro_header_hard(skb, hlen)) {
-               eth = skb_gro_header_slow(skb, hlen, off);
-               if (unlikely(!eth)) {
-                       napi_reuse_skb(napi, skb);
-                       skb = NULL;
-                       goto out;
-               }
+       if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
+               napi_reuse_skb(napi, skb);
+               return NULL;
        }
+       skb->protocol = eth_type_trans(skb, skb->dev);
  
-       skb_gro_pull(skb, sizeof(*eth));
-       /*
-        * This works because the only protocols we care about don't require
-        * special handling.  We'll fix it up properly at the end.
-        */
-       skb->protocol = eth->h_proto;
- out:
        return skb;
  }
  
@@@ -4057,12 -4103,14 +4103,14 @@@ gro_result_t napi_gro_frags(struct napi
        if (!skb)
                return GRO_DROP;
  
+       trace_napi_gro_frags_entry(skb);
        return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
  }
  EXPORT_SYMBOL(napi_gro_frags);
  
  /*
-  * net_rps_action sends any pending IPI's for rps.
+  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
   * Note: called with local irq disabled, but exits with local irq enabled.
   */
  static void net_rps_action_and_irq_enable(struct softnet_data *sd)
@@@ -4267,17 -4315,10 +4315,10 @@@ EXPORT_SYMBOL(netif_napi_add)
  
  void netif_napi_del(struct napi_struct *napi)
  {
-       struct sk_buff *skb, *next;
        list_del_init(&napi->dev_list);
        napi_free_frags(napi);
  
-       for (skb = napi->gro_list; skb; skb = next) {
-               next = skb->next;
-               skb->next = NULL;
-               kfree_skb(skb);
-       }
+       kfree_skb_list(napi->gro_list);
        napi->gro_list = NULL;
        napi->gro_count = 0;
  }
@@@ -4394,19 -4435,6 +4435,6 @@@ struct netdev_adjacent 
        struct rcu_head rcu;
  };
  
- static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
-                                                    struct net_device *adj_dev,
-                                                    struct list_head *adj_list)
- {
-       struct netdev_adjacent *adj;
-       list_for_each_entry_rcu(adj, adj_list, list) {
-               if (adj->dev == adj_dev)
-                       return adj;
-       }
-       return NULL;
- }
  static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
                                                 struct net_device *adj_dev,
                                                 struct list_head *adj_list)
@@@ -4445,13 -4473,12 +4473,12 @@@ EXPORT_SYMBOL(netdev_has_upper_dev)
   * Find out if a device is linked to an upper device and return true in case
   * it is. The caller must hold the RTNL lock.
   */
- bool netdev_has_any_upper_dev(struct net_device *dev)
static bool netdev_has_any_upper_dev(struct net_device *dev)
  {
        ASSERT_RTNL();
  
        return !list_empty(&dev->all_adj_list.upper);
  }
- EXPORT_SYMBOL(netdev_has_any_upper_dev);
  
  /**
   * netdev_master_upper_dev_get - Get master upper device
@@@ -4500,7 -4527,7 +4527,7 @@@ struct net_device *netdev_all_upper_get
  {
        struct netdev_adjacent *upper;
  
-       WARN_ON_ONCE(!rcu_read_lock_held());
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
  
        upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
  
@@@ -4570,6 -4597,27 +4597,27 @@@ void *netdev_lower_get_next_private_rcu
  }
  EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  
+ /**
+  * netdev_lower_get_first_private_rcu - Get the first ->private from the
+  *                                   lower neighbour list, RCU
+  *                                   variant
+  * @dev: device
+  *
+  * Gets the first netdev_adjacent->private from the dev's lower neighbour
+  * list. The caller must hold RCU read lock.
+  */
+ void *netdev_lower_get_first_private_rcu(struct net_device *dev)
+ {
+       struct netdev_adjacent *lower;
+       lower = list_first_or_null_rcu(&dev->adj_list.lower,
+                       struct netdev_adjacent, list);
+       if (lower)
+               return lower->private;
+       return NULL;
+ }
+ EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
  /**
   * netdev_master_upper_dev_get_rcu - Get master upper device
   * @dev: device
@@@ -4589,13 -4637,36 +4637,36 @@@ struct net_device *netdev_master_upper_
  }
  EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
  
+ static int netdev_adjacent_sysfs_add(struct net_device *dev,
+                             struct net_device *adj_dev,
+                             struct list_head *dev_list)
+ {
+       char linkname[IFNAMSIZ+7];
+       sprintf(linkname, dev_list == &dev->adj_list.upper ?
+               "upper_%s" : "lower_%s", adj_dev->name);
+       return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
+                                linkname);
+ }
+ static void netdev_adjacent_sysfs_del(struct net_device *dev,
+                              char *name,
+                              struct list_head *dev_list)
+ {
+       char linkname[IFNAMSIZ+7];
+       sprintf(linkname, dev_list == &dev->adj_list.upper ?
+               "upper_%s" : "lower_%s", name);
+       sysfs_remove_link(&(dev->dev.kobj), linkname);
+ }
+ #define netdev_adjacent_is_neigh_list(dev, dev_list) \
+               (dev_list == &dev->adj_list.upper || \
+                dev_list == &dev->adj_list.lower)
  static int __netdev_adjacent_dev_insert(struct net_device *dev,
                                        struct net_device *adj_dev,
                                        struct list_head *dev_list,
                                        void *private, bool master)
  {
        struct netdev_adjacent *adj;
-       char linkname[IFNAMSIZ+7];
        int ret;
  
        adj = __netdev_find_adj(dev, adj_dev, dev_list);
        pr_debug("dev_hold for %s, because of link added from %s to %s\n",
                 adj_dev->name, dev->name, adj_dev->name);
  
-       if (dev_list == &dev->adj_list.lower) {
-               sprintf(linkname, "lower_%s", adj_dev->name);
-               ret = sysfs_create_link(&(dev->dev.kobj),
-                                       &(adj_dev->dev.kobj), linkname);
-               if (ret)
-                       goto free_adj;
-       } else if (dev_list == &dev->adj_list.upper) {
-               sprintf(linkname, "upper_%s", adj_dev->name);
-               ret = sysfs_create_link(&(dev->dev.kobj),
-                                       &(adj_dev->dev.kobj), linkname);
+       if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
+               ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
                if (ret)
                        goto free_adj;
        }
        return 0;
  
  remove_symlinks:
-       if (dev_list == &dev->adj_list.lower) {
-               sprintf(linkname, "lower_%s", adj_dev->name);
-               sysfs_remove_link(&(dev->dev.kobj), linkname);
-       } else if (dev_list == &dev->adj_list.upper) {
-               sprintf(linkname, "upper_%s", adj_dev->name);
-               sysfs_remove_link(&(dev->dev.kobj), linkname);
-       }
+       if (netdev_adjacent_is_neigh_list(dev, dev_list))
+               netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
  free_adj:
        kfree(adj);
        dev_put(adj_dev);
        return ret;
  }
  
- void __netdev_adjacent_dev_remove(struct net_device *dev,
-                                 struct net_device *adj_dev,
-                                 struct list_head *dev_list)
static void __netdev_adjacent_dev_remove(struct net_device *dev,
+                                        struct net_device *adj_dev,
+                                        struct list_head *dev_list)
  {
        struct netdev_adjacent *adj;
-       char linkname[IFNAMSIZ+7];
  
        adj = __netdev_find_adj(dev, adj_dev, dev_list);
  
        if (adj->master)
                sysfs_remove_link(&(dev->dev.kobj), "master");
  
-       if (dev_list == &dev->adj_list.lower) {
-               sprintf(linkname, "lower_%s", adj_dev->name);
-               sysfs_remove_link(&(dev->dev.kobj), linkname);
-       } else if (dev_list == &dev->adj_list.upper) {
-               sprintf(linkname, "upper_%s", adj_dev->name);
-               sysfs_remove_link(&(dev->dev.kobj), linkname);
-       }
+       if (netdev_adjacent_is_neigh_list(dev, dev_list))
+               netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
  
        list_del_rcu(&adj->list);
        pr_debug("dev_put for %s, because link removed from %s to %s\n",
        kfree_rcu(adj, rcu);
  }
  
- int __netdev_adjacent_dev_link_lists(struct net_device *dev,
-                                    struct net_device *upper_dev,
-                                    struct list_head *up_list,
-                                    struct list_head *down_list,
-                                    void *private, bool master)
static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+                                           struct net_device *upper_dev,
+                                           struct list_head *up_list,
+                                           struct list_head *down_list,
+                                           void *private, bool master)
  {
        int ret;
  
        return 0;
  }
  
- int __netdev_adjacent_dev_link(struct net_device *dev,
-                              struct net_device *upper_dev)
static int __netdev_adjacent_dev_link(struct net_device *dev,
+                                     struct net_device *upper_dev)
  {
        return __netdev_adjacent_dev_link_lists(dev, upper_dev,
                                                &dev->all_adj_list.upper,
                                                NULL, false);
  }
  
- void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
-                                       struct net_device *upper_dev,
-                                       struct list_head *up_list,
-                                       struct list_head *down_list)
static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
+                                              struct net_device *upper_dev,
+                                              struct list_head *up_list,
+                                              struct list_head *down_list)
  {
        __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
        __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
  }
  
- void __netdev_adjacent_dev_unlink(struct net_device *dev,
-                                 struct net_device *upper_dev)
static void __netdev_adjacent_dev_unlink(struct net_device *dev,
+                                        struct net_device *upper_dev)
  {
        __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
                                           &dev->all_adj_list.upper,
                                           &upper_dev->all_adj_list.lower);
  }
  
- int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
-                                        struct net_device *upper_dev,
-                                        void *private, bool master)
static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+                                               struct net_device *upper_dev,
+                                               void *private, bool master)
  {
        int ret = __netdev_adjacent_dev_link(dev, upper_dev);
  
        return 0;
  }
  
- void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
-                                           struct net_device *upper_dev)
static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
+                                                  struct net_device *upper_dev)
  {
        __netdev_adjacent_dev_unlink(dev, upper_dev);
        __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
@@@ -4962,20 -5013,24 +5013,24 @@@ void netdev_upper_dev_unlink(struct net
  }
  EXPORT_SYMBOL(netdev_upper_dev_unlink);
  
- void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
-                                      struct net_device *lower_dev)
+ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
  {
-       struct netdev_adjacent *lower;
+       struct netdev_adjacent *iter;
  
-       if (!lower_dev)
-               return NULL;
-       lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
-       if (!lower)
-               return NULL;
+       list_for_each_entry(iter, &dev->adj_list.upper, list) {
+               netdev_adjacent_sysfs_del(iter->dev, oldname,
+                                         &iter->dev->adj_list.lower);
+               netdev_adjacent_sysfs_add(iter->dev, dev,
+                                         &iter->dev->adj_list.lower);
+       }
  
-       return lower->private;
+       list_for_each_entry(iter, &dev->adj_list.lower, list) {
+               netdev_adjacent_sysfs_del(iter->dev, oldname,
+                                         &iter->dev->adj_list.upper);
+               netdev_adjacent_sysfs_add(iter->dev, dev,
+                                         &iter->dev->adj_list.upper);
+       }
  }
- EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
  
  void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev)
@@@ -5309,6 -5364,17 +5364,17 @@@ int dev_change_flags(struct net_device 
  }
  EXPORT_SYMBOL(dev_change_flags);
  
+ static int __dev_set_mtu(struct net_device *dev, int new_mtu)
+ {
+       const struct net_device_ops *ops = dev->netdev_ops;
+       if (ops->ndo_change_mtu)
+               return ops->ndo_change_mtu(dev, new_mtu);
+       dev->mtu = new_mtu;
+       return 0;
+ }
  /**
   *    dev_set_mtu - Change maximum transfer unit
   *    @dev: device
   */
  int dev_set_mtu(struct net_device *dev, int new_mtu)
  {
-       const struct net_device_ops *ops = dev->netdev_ops;
-       int err;
+       int err, orig_mtu;
  
        if (new_mtu == dev->mtu)
                return 0;
        if (!netif_device_present(dev))
                return -ENODEV;
  
-       err = 0;
-       if (ops->ndo_change_mtu)
-               err = ops->ndo_change_mtu(dev, new_mtu);
-       else
-               dev->mtu = new_mtu;
+       err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
+       err = notifier_to_errno(err);
+       if (err)
+               return err;
  
-       if (!err)
-               call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+       orig_mtu = dev->mtu;
+       err = __dev_set_mtu(dev, new_mtu);
+       if (!err) {
+               err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+               err = notifier_to_errno(err);
+               if (err) {
+                       /* setting mtu back and notifying everyone again,
+                        * so that they have a chance to revert changes.
+                        */
+                       __dev_set_mtu(dev, orig_mtu);
+                       call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+               }
+       }
        return err;
  }
  EXPORT_SYMBOL(dev_set_mtu);
@@@ -5692,7 -5768,7 +5768,7 @@@ void netif_stacked_transfer_operstate(c
  }
  EXPORT_SYMBOL(netif_stacked_transfer_operstate);
  
- #ifdef CONFIG_RPS
+ #ifdef CONFIG_SYSFS
  static int netif_alloc_rx_queues(struct net_device *dev)
  {
        unsigned int i, count = dev->num_rx_queues;
@@@ -5831,13 -5907,8 +5907,8 @@@ int register_netdevice(struct net_devic
        dev->features |= NETIF_F_SOFT_FEATURES;
        dev->wanted_features = dev->features & dev->hw_features;
  
-       /* Turn on no cache copy if HW is doing checksum */
        if (!(dev->flags & IFF_LOOPBACK)) {
                dev->hw_features |= NETIF_F_NOCACHE_COPY;
-               if (dev->features & NETIF_F_ALL_CSUM) {
-                       dev->wanted_features |= NETIF_F_NOCACHE_COPY;
-                       dev->features |= NETIF_F_NOCACHE_COPY;
-               }
        }
  
        /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@@ -6242,7 -6313,7 +6313,7 @@@ struct net_device *alloc_netdev_mqs(in
                return NULL;
        }
  
- #ifdef CONFIG_RPS
+ #ifdef CONFIG_SYSFS
        if (rxqs < 1) {
                pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
                return NULL;
        if (netif_alloc_netdev_queues(dev))
                goto free_all;
  
- #ifdef CONFIG_RPS
+ #ifdef CONFIG_SYSFS
        dev->num_rx_queues = rxqs;
        dev->real_num_rx_queues = rxqs;
        if (netif_alloc_rx_queues(dev))
@@@ -6318,7 -6389,7 +6389,7 @@@ free_all
  free_pcpu:
        free_percpu(dev->pcpu_refcnt);
        netif_free_tx_queues(dev);
- #ifdef CONFIG_RPS
+ #ifdef CONFIG_SYSFS
        kfree(dev->_rx);
  #endif
  
@@@ -6343,7 -6414,7 +6414,7 @@@ void free_netdev(struct net_device *dev
        release_net(dev_net(dev));
  
        netif_free_tx_queues(dev);
- #ifdef CONFIG_RPS
+ #ifdef CONFIG_SYSFS
        kfree(dev->_rx);
  #endif
  
@@@ -6613,11 -6684,11 +6684,11 @@@ static int dev_cpu_callback(struct noti
  
        /* Process offline CPU's input_pkt_queue */
        while ((skb = __skb_dequeue(&oldsd->process_queue))) {
-               netif_rx(skb);
+               netif_rx_internal(skb);
                input_queue_head_incr(oldsd);
        }
        while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
-               netif_rx(skb);
+               netif_rx_internal(skb);
                input_queue_head_incr(oldsd);
        }
  
@@@ -6930,28 -7001,18 +7001,18 @@@ static int __init net_dev_init(void
        for_each_possible_cpu(i) {
                struct softnet_data *sd = &per_cpu(softnet_data, i);
  
-               memset(sd, 0, sizeof(*sd));
                skb_queue_head_init(&sd->input_pkt_queue);
                skb_queue_head_init(&sd->process_queue);
-               sd->completion_queue = NULL;
                INIT_LIST_HEAD(&sd->poll_list);
-               sd->output_queue = NULL;
                sd->output_queue_tailp = &sd->output_queue;
  #ifdef CONFIG_RPS
                sd->csd.func = rps_trigger_softirq;
                sd->csd.info = sd;
-               sd->csd.flags = 0;
                sd->cpu = i;
  #endif
  
                sd->backlog.poll = process_backlog;
                sd->backlog.weight = weight_p;
-               sd->backlog.gro_list = NULL;
-               sd->backlog.gro_count = 0;
- #ifdef CONFIG_NET_FLOW_LIMIT
-               sd->flow_limit = NULL;
- #endif
        }
  
        dev_boot_phase = 0;
This page took 0.27962 seconds and 4 git commands to generate.