]> Git Repo - linux.git/commitdiff
Merge tag 'for-linus-4.2-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Wed, 1 Jul 2015 18:53:46 +0000 (11:53 -0700)
committerLinus Torvalds <[email protected]>
Wed, 1 Jul 2015 18:53:46 +0000 (11:53 -0700)
Pull xen updates from David Vrabel:
 "Xen features and cleanups for 4.2-rc0:

   - add "make xenconfig" to assist in generating configs for Xen guests

   - preparatory cleanups necessary for supporting 64 KiB pages in ARM
     guests

   - automatically use hvc0 as the default console in ARM guests"

* tag 'for-linus-4.2-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  block/xen-blkback: s/nr_pages/nr_segs/
  block/xen-blkfront: Remove invalid comment
  block/xen-blkfront: Remove unused macro MAXIMUM_OUTSTANDING_BLOCK_REQS
  arm/xen: Drop duplicate define mfn_to_virt
  xen/grant-table: Remove unused macro SPP
  xen/xenbus: client: Fix call of virt_to_mfn in xenbus_grant_ring
  xen: Include xen/page.h rather than asm/xen/page.h
  kconfig: add xenconfig defconfig helper
  kconfig: clarify kvmconfig is for kvm
  xen/pcifront: Remove usage of struct timeval
  xen/tmem: use BUILD_BUG_ON() in favor of BUG_ON()
  hvc_xen: avoid uninitialized variable warning
  xenbus: avoid uninitialized variable warning
  xen/arm: allow console=hvc0 to be omitted for guests
  arm,arm64/xen: move Xen initialization earlier
  arm/xen: Correctly check if the event channel interrupt is present

1  2 
arch/arm/kernel/setup.c
arch/arm64/kernel/setup.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkfront.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/pci/xen-pcifront.c
drivers/xen/tmem.c

diff --combined arch/arm/kernel/setup.c
index e6d8c7658ffda7d4e5a03d600d9d1ed20e6ac57f,aa9bfebe113f6dddc269d1c4909502483c38d0d2..36c18b73c1f4631f3882ba492dbeb1f99e9b7242
@@@ -46,6 -46,7 +46,7 @@@
  #include <asm/cacheflush.h>
  #include <asm/cachetype.h>
  #include <asm/tlbflush.h>
+ #include <asm/xen/hypervisor.h>
  
  #include <asm/prom.h>
  #include <asm/mach/arch.h>
@@@ -75,7 -76,8 +76,7 @@@ __setup("fpe=", fpe_setup)
  
  extern void init_default_cache_policy(unsigned long);
  extern void paging_init(const struct machine_desc *desc);
 -extern void early_paging_init(const struct machine_desc *,
 -                            struct proc_info_list *);
 +extern void early_paging_init(const struct machine_desc *);
  extern void sanity_check_meminfo(void);
  extern enum reboot_mode reboot_mode;
  extern void setup_dma_zone(const struct machine_desc *desc);
@@@ -92,9 -94,6 +93,9 @@@ unsigned int __atags_pointer __initdata
  unsigned int system_rev;
  EXPORT_SYMBOL(system_rev);
  
 +const char *system_serial;
 +EXPORT_SYMBOL(system_serial);
 +
  unsigned int system_serial_low;
  EXPORT_SYMBOL(system_serial_low);
  
@@@ -841,25 -840,8 +842,25 @@@ arch_initcall(customize_machine)
  
  static int __init init_machine_late(void)
  {
 +      struct device_node *root;
 +      int ret;
 +
        if (machine_desc->init_late)
                machine_desc->init_late();
 +
 +      root = of_find_node_by_path("/");
 +      if (root) {
 +              ret = of_property_read_string(root, "serial-number",
 +                                            &system_serial);
 +              if (ret)
 +                      system_serial = NULL;
 +      }
 +
 +      if (!system_serial)
 +              system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
 +                                        system_serial_high,
 +                                        system_serial_low);
 +
        return 0;
  }
  late_initcall(init_machine_late);
@@@ -955,9 -937,7 +956,9 @@@ void __init setup_arch(char **cmdline_p
  
        parse_early_param();
  
 -      early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
 +#ifdef CONFIG_MMU
 +      early_paging_init(mdesc);
 +#endif
        setup_dma_zone(mdesc);
        sanity_check_meminfo();
        arm_memblock_init(mdesc);
  
        arm_dt_init_cpu_maps();
        psci_init();
+       xen_early_init();
  #ifdef CONFIG_SMP
        if (is_smp()) {
                if (!mdesc->smp_init || !mdesc->smp_init()) {
@@@ -1130,7 -1111,8 +1132,7 @@@ static int c_show(struct seq_file *m, v
  
        seq_printf(m, "Hardware\t: %s\n", machine_name);
        seq_printf(m, "Revision\t: %04x\n", system_rev);
 -      seq_printf(m, "Serial\t\t: %08x%08x\n",
 -                 system_serial_high, system_serial_low);
 +      seq_printf(m, "Serial\t\t: %s\n", system_serial);
  
        return 0;
  }
index ffd3970721bf6497db2f622bcd3ed31fd3007de1,1b36ba9b73acaf340eed6f717523bd4a3ffc2006..f3067d4d4e35711680376372395667efdc45ad07
@@@ -64,6 -64,7 +64,7 @@@
  #include <asm/psci.h>
  #include <asm/efi.h>
  #include <asm/virt.h>
+ #include <asm/xen/hypervisor.h>
  
  unsigned long elf_hwcap __read_mostly;
  EXPORT_SYMBOL_GPL(elf_hwcap);
@@@ -105,6 -106,18 +106,6 @@@ static struct resource mem_res[] = 
  #define kernel_code mem_res[0]
  #define kernel_data mem_res[1]
  
 -void __init early_print(const char *str, ...)
 -{
 -      char buf[256];
 -      va_list ap;
 -
 -      va_start(ap, str);
 -      vsnprintf(buf, sizeof(buf), str, ap);
 -      va_end(ap);
 -
 -      printk("%s", buf);
 -}
 -
  /*
   * The recorded values of x0 .. x3 upon kernel entry.
   */
@@@ -314,14 -327,12 +315,14 @@@ static void __init setup_processor(void
  
  static void __init setup_machine_fdt(phys_addr_t dt_phys)
  {
 -      if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
 -              early_print("\n"
 -                      "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
 -                      "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
 -                      "\nPlease check your bootloader.\n",
 -                      dt_phys, phys_to_virt(dt_phys));
 +      void *dt_virt = fixmap_remap_fdt(dt_phys);
 +
 +      if (!dt_virt || !early_init_dt_scan(dt_virt)) {
 +              pr_crit("\n"
 +                      "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
 +                      "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
 +                      "\nPlease check your bootloader.",
 +                      &dt_phys, dt_virt);
  
                while (true)
                        cpu_relax();
@@@ -364,6 -375,8 +365,6 @@@ void __init setup_arch(char **cmdline_p
  {
        setup_processor();
  
 -      setup_machine_fdt(__fdt_pointer);
 -
        init_mm.start_code = (unsigned long) _text;
        init_mm.end_code   = (unsigned long) _etext;
        init_mm.end_data   = (unsigned long) _edata;
        early_fixmap_init();
        early_ioremap_init();
  
 +      setup_machine_fdt(__fdt_pointer);
 +
        parse_early_param();
  
        /*
        if (acpi_disabled) {
                unflatten_device_tree();
                psci_dt_init();
 -              cpu_read_bootcpu_ops();
 -#ifdef CONFIG_SMP
 -              of_smp_init_cpus();
 -#endif
        } else {
                psci_acpi_init();
 -              acpi_init_cpus();
        }
+       xen_early_init();
  
 +      cpu_read_bootcpu_ops();
  #ifdef CONFIG_SMP
 +      smp_init_cpus();
        smp_build_mpidr_hash();
  #endif
  
index 2126842fb6e8a862a36b733b8eb709cf785b0591,7049528b35091233216b183645b4393189f999e8..ced96777b677b9bcddd65bae004a7a51b5cf0dc3
@@@ -83,13 -83,6 +83,13 @@@ module_param_named(max_persistent_grant
  MODULE_PARM_DESC(max_persistent_grants,
                   "Maximum number of grants to map persistently");
  
 +/*
 + * Maximum order of pages to be used for the shared ring between front and
 + * backend, 4KB page granularity is used.
 + */
 +unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER;
 +module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
 +MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
  /*
   * The LRU mechanism to clean the lists of persistent grants needs to
   * be executed periodically. The time interval between consecutive executions
@@@ -736,7 -729,7 +736,7 @@@ static void xen_blkbk_unmap_and_respond
        struct grant_page **pages = req->segments;
        unsigned int invcount;
  
-       invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages,
+       invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_segs,
                                           req->unmap, req->unmap_pages);
  
        work->data = req;
@@@ -922,7 -915,7 +922,7 @@@ static int xen_blkbk_map_seg(struct pen
        int rc;
  
        rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
-                          pending_req->nr_pages,
+                          pending_req->nr_segs,
                           (pending_req->operation != BLKIF_OP_READ));
  
        return rc;
@@@ -938,7 -931,7 +938,7 @@@ static int xen_blkbk_parse_indirect(str
        int indirect_grefs, rc, n, nseg, i;
        struct blkif_request_segment *segments = NULL;
  
-       nseg = pending_req->nr_pages;
+       nseg = pending_req->nr_segs;
        indirect_grefs = INDIRECT_PAGES(nseg);
        BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
  
@@@ -1258,7 -1251,7 +1258,7 @@@ static int dispatch_rw_block_io(struct 
        pending_req->id        = req->u.rw.id;
        pending_req->operation = req_operation;
        pending_req->status    = BLKIF_RSP_OKAY;
-       pending_req->nr_pages  = nseg;
+       pending_req->nr_segs   = nseg;
  
        if (req->operation != BLKIF_OP_INDIRECT) {
                preq.dev               = req->u.rw.handle;
  
   fail_flush:
        xen_blkbk_unmap(blkif, pending_req->segments,
-                       pending_req->nr_pages);
+                       pending_req->nr_segs);
   fail_response:
        /* Haven't submitted any bio's yet. */
        make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
@@@ -1445,12 -1438,6 +1445,12 @@@ static int __init xen_blkif_init(void
        if (!xen_domain())
                return -ENODEV;
  
 +      if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) {
 +              pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
 +                      xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER);
 +              xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER;
 +      }
 +
        rc = xen_blkif_interface_init();
        if (rc)
                goto failed_init;
index 8ccc49d01c8eb7c5fd821df8909bf8fbeeaa35a7,7a03e07f52f3e34019cc0ea0c138387a2901f801..45a044a53d1e562db4e606623840d0c667aa56e3
@@@ -44,7 -44,6 +44,7 @@@
  #include <xen/interface/io/blkif.h>
  #include <xen/interface/io/protocols.h>
  
 +extern unsigned int xen_blkif_max_ring_order;
  /*
   * This is the maximum number of segments that would be allowed in indirect
   * requests. This value will also be passed to the frontend.
@@@ -249,7 -248,7 +249,7 @@@ struct backend_info
  #define PERSISTENT_GNT_WAS_ACTIVE     1
  
  /* Number of requests that we can fit in a ring */
 -#define XEN_BLKIF_REQS                        32
 +#define XEN_BLKIF_REQS_PER_PAGE               32
  
  struct persistent_gnt {
        struct page *page;
@@@ -321,7 -320,6 +321,7 @@@ struct xen_blkif 
        struct work_struct      free_work;
        /* Thread shutdown wait queue. */
        wait_queue_head_t       shutdown_wq;
 +      unsigned int nr_ring_pages;
  };
  
  struct seg_buf {
@@@ -345,7 -343,7 +345,7 @@@ struct grant_page 
  struct pending_req {
        struct xen_blkif        *blkif;
        u64                     id;
-       int                     nr_pages;
+       int                     nr_segs;
        atomic_t                pendcnt;
        unsigned short          operation;
        int                     status;
index fc770b7d3beb1951e80c2f16956f3dd0897efe48,60cf1d627d232754de4faaccabc9921f039028ec..6d89ed35d80c0caaf8bf57ba82c7e9f3a9194bb9
@@@ -98,21 -98,7 +98,21 @@@ static unsigned int xen_blkif_max_segme
  module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
  MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
  
 -#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
 +/*
 + * Maximum order of pages to be used for the shared ring between front and
 + * backend, 4KB page granularity is used.
 + */
 +static unsigned int xen_blkif_max_ring_order;
 +module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
 +MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
 +
 +#define BLK_RING_SIZE(info) __CONST_RING_SIZE(blkif, PAGE_SIZE * (info)->nr_ring_pages)
 +#define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE * XENBUS_MAX_RING_PAGES)
 +/*
 + * ring-ref%i i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
 + * characters are enough. Define to 20 to keep consist with backend.
 + */
 +#define RINGREF_NAME_LEN (20)
  
  /*
   * We have one of these per vbd, whether ide, scsi or 'other'.  They
@@@ -128,14 -114,13 +128,14 @@@ struct blkfront_inf
        int vdevice;
        blkif_vdev_t handle;
        enum blkif_state connected;
 -      int ring_ref;
 +      int ring_ref[XENBUS_MAX_RING_PAGES];
 +      unsigned int nr_ring_pages;
        struct blkif_front_ring ring;
        unsigned int evtchn, irq;
        struct request_queue *rq;
        struct work_struct work;
        struct gnttab_free_callback callback;
 -      struct blk_shadow shadow[BLK_RING_SIZE];
 +      struct blk_shadow shadow[BLK_MAX_RING_SIZE];
        struct list_head grants;
        struct list_head indirect_pages;
        unsigned int persistent_gnts_c;
@@@ -183,7 -168,7 +183,7 @@@ static int blkfront_setup_indirect(stru
  static int get_id_from_freelist(struct blkfront_info *info)
  {
        unsigned long free = info->shadow_free;
 -      BUG_ON(free >= BLK_RING_SIZE);
 +      BUG_ON(free >= BLK_RING_SIZE(info));
        info->shadow_free = info->shadow[free].req.u.rw.id;
        info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
        return free;
@@@ -996,7 -981,7 +996,7 @@@ static void blkif_free(struct blkfront_
                }
        }
  
 -      for (i = 0; i < BLK_RING_SIZE; i++) {
 +      for (i = 0; i < BLK_RING_SIZE(info); i++) {
                /*
                 * Clear persistent grants present in requests already
                 * on the shared ring
@@@ -1046,15 -1031,12 +1046,15 @@@ free_shadow
        flush_work(&info->work);
  
        /* Free resources associated with old device channel. */
 -      if (info->ring_ref != GRANT_INVALID_REF) {
 -              gnttab_end_foreign_access(info->ring_ref, 0,
 -                                        (unsigned long)info->ring.sring);
 -              info->ring_ref = GRANT_INVALID_REF;
 -              info->ring.sring = NULL;
 +      for (i = 0; i < info->nr_ring_pages; i++) {
 +              if (info->ring_ref[i] != GRANT_INVALID_REF) {
 +                      gnttab_end_foreign_access(info->ring_ref[i], 0, 0);
 +                      info->ring_ref[i] = GRANT_INVALID_REF;
 +              }
        }
 +      free_pages((unsigned long)info->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
 +      info->ring.sring = NULL;
 +
        if (info->irq)
                unbind_from_irqhandler(info->irq, info);
        info->evtchn = info->irq = 0;
@@@ -1074,12 -1056,6 +1074,6 @@@ static void blkif_completion(struct blk
                s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
  
        if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
-               /*
-                * Copy the data received from the backend into the bvec.
-                * Since bv_offset can be different than 0, and bv_len different
-                * than PAGE_SIZE, we have to keep track of the current offset,
-                * to be sure we are copying the data from the right shared page.
-                */
                for_each_sg(s->sg, sg, nseg, i) {
                        BUG_ON(sg->offset + sg->length > PAGE_SIZE);
                        shared_data = kmap_atomic(
@@@ -1173,7 -1149,7 +1167,7 @@@ static irqreturn_t blkif_interrupt(int 
                 * never have given to it (we stamp it up to BLK_RING_SIZE -
                 * look in get_id_from_freelist.
                 */
 -              if (id >= BLK_RING_SIZE) {
 +              if (id >= BLK_RING_SIZE(info)) {
                        WARN(1, "%s: response to %s has incorrect id (%ld)\n",
                             info->gd->disk_name, op_name(bret->operation), id);
                        /* We can't safely get the 'struct request' as
@@@ -1261,30 -1237,26 +1255,30 @@@ static int setup_blkring(struct xenbus_
                         struct blkfront_info *info)
  {
        struct blkif_sring *sring;
 -      grant_ref_t gref;
 -      int err;
 +      int err, i;
 +      unsigned long ring_size = info->nr_ring_pages * PAGE_SIZE;
 +      grant_ref_t gref[XENBUS_MAX_RING_PAGES];
  
 -      info->ring_ref = GRANT_INVALID_REF;
 +      for (i = 0; i < info->nr_ring_pages; i++)
 +              info->ring_ref[i] = GRANT_INVALID_REF;
  
 -      sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH);
 +      sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
 +                                                     get_order(ring_size));
        if (!sring) {
                xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
                return -ENOMEM;
        }
        SHARED_RING_INIT(sring);
 -      FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
 +      FRONT_RING_INIT(&info->ring, sring, ring_size);
  
 -      err = xenbus_grant_ring(dev, info->ring.sring, 1, &gref);
 +      err = xenbus_grant_ring(dev, info->ring.sring, info->nr_ring_pages, gref);
        if (err < 0) {
 -              free_page((unsigned long)sring);
 +              free_pages((unsigned long)sring, get_order(ring_size));
                info->ring.sring = NULL;
                goto fail;
        }
 -      info->ring_ref = gref;
 +      for (i = 0; i < info->nr_ring_pages; i++)
 +              info->ring_ref[i] = gref[i];
  
        err = xenbus_alloc_evtchn(dev, &info->evtchn);
        if (err)
@@@ -1312,18 -1284,7 +1306,18 @@@ static int talk_to_blkback(struct xenbu
  {
        const char *message = NULL;
        struct xenbus_transaction xbt;
 -      int err;
 +      int err, i;
 +      unsigned int max_page_order = 0;
 +      unsigned int ring_page_order = 0;
 +
 +      err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
 +                         "max-ring-page-order", "%u", &max_page_order);
 +      if (err != 1)
 +              info->nr_ring_pages = 1;
 +      else {
 +              ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
 +              info->nr_ring_pages = 1 << ring_page_order;
 +      }
  
        /* Create shared ring, alloc event channel. */
        err = setup_blkring(dev, info);
@@@ -1337,32 -1298,11 +1331,32 @@@ again
                goto destroy_blkring;
        }
  
 -      err = xenbus_printf(xbt, dev->nodename,
 -                          "ring-ref", "%u", info->ring_ref);
 -      if (err) {
 -              message = "writing ring-ref";
 -              goto abort_transaction;
 +      if (info->nr_ring_pages == 1) {
 +              err = xenbus_printf(xbt, dev->nodename,
 +                                  "ring-ref", "%u", info->ring_ref[0]);
 +              if (err) {
 +                      message = "writing ring-ref";
 +                      goto abort_transaction;
 +              }
 +      } else {
 +              err = xenbus_printf(xbt, dev->nodename,
 +                                  "ring-page-order", "%u", ring_page_order);
 +              if (err) {
 +                      message = "writing ring-page-order";
 +                      goto abort_transaction;
 +              }
 +
 +              for (i = 0; i < info->nr_ring_pages; i++) {
 +                      char ring_ref_name[RINGREF_NAME_LEN];
 +
 +                      snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
 +                      err = xenbus_printf(xbt, dev->nodename, ring_ref_name,
 +                                          "%u", info->ring_ref[i]);
 +                      if (err) {
 +                              message = "writing ring-ref";
 +                              goto abort_transaction;
 +                      }
 +              }
        }
        err = xenbus_printf(xbt, dev->nodename,
                            "event-channel", "%u", info->evtchn);
                goto destroy_blkring;
        }
  
 +      for (i = 0; i < BLK_RING_SIZE(info); i++)
 +              info->shadow[i].req.u.rw.id = i+1;
 +      info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
        xenbus_switch_state(dev, XenbusStateInitialised);
  
        return 0;
  static int blkfront_probe(struct xenbus_device *dev,
                          const struct xenbus_device_id *id)
  {
 -      int err, vdevice, i;
 +      int err, vdevice;
        struct blkfront_info *info;
  
        /* FIXME: Use dynamic device id if this is not set. */
        info->connected = BLKIF_STATE_DISCONNECTED;
        INIT_WORK(&info->work, blkif_restart_queue);
  
 -      for (i = 0; i < BLK_RING_SIZE; i++)
 -              info->shadow[i].req.u.rw.id = i+1;
 -      info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
 -
        /* Front end dir is a number, which is used as the id. */
        info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
        dev_set_drvdata(&dev->dev, info);
  
 -      err = talk_to_blkback(dev, info);
 -      if (err) {
 -              kfree(info);
 -              dev_set_drvdata(&dev->dev, NULL);
 -              return err;
 -      }
 -
        return 0;
  }
  
@@@ -1520,10 -1468,10 +1514,10 @@@ static int blkif_recover(struct blkfron
  
        /* Stage 2: Set up free list. */
        memset(&info->shadow, 0, sizeof(info->shadow));
 -      for (i = 0; i < BLK_RING_SIZE; i++)
 +      for (i = 0; i < BLK_RING_SIZE(info); i++)
                info->shadow[i].req.u.rw.id = i+1;
        info->shadow_free = info->ring.req_prod_pvt;
 -      info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
 +      info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
  
        rc = blkfront_setup_indirect(info);
        if (rc) {
        blk_queue_max_segments(info->rq, segs);
        bio_list_init(&bio_list);
        INIT_LIST_HEAD(&requests);
 -      for (i = 0; i < BLK_RING_SIZE; i++) {
 +      for (i = 0; i < BLK_RING_SIZE(info); i++) {
                /* Not in use? */
                if (!copy[i].request)
                        continue;
@@@ -1741,7 -1689,7 +1735,7 @@@ static int blkfront_setup_indirect(stru
                segs = info->max_indirect_segments;
        }
  
 -      err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE);
 +      err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
        if (err)
                goto out_of_memory;
  
                 * grants, we need to allocate a set of pages that can be
                 * used for mapping indirect grefs
                 */
 -              int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE;
 +              int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE(info);
  
                BUG_ON(!list_empty(&info->indirect_pages));
                for (i = 0; i < num; i++) {
                }
        }
  
 -      for (i = 0; i < BLK_RING_SIZE; i++) {
 +      for (i = 0; i < BLK_RING_SIZE(info); i++) {
                info->shadow[i].grants_used = kzalloc(
                        sizeof(info->shadow[i].grants_used[0]) * segs,
                        GFP_NOIO);
        return 0;
  
  out_of_memory:
 -      for (i = 0; i < BLK_RING_SIZE; i++) {
 +      for (i = 0; i < BLK_RING_SIZE(info); i++) {
                kfree(info->shadow[i].grants_used);
                info->shadow[i].grants_used = NULL;
                kfree(info->shadow[i].sg);
@@@ -1950,15 -1898,8 +1944,15 @@@ static void blkback_changed(struct xenb
        dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
  
        switch (backend_state) {
 -      case XenbusStateInitialising:
        case XenbusStateInitWait:
 +              if (dev->state != XenbusStateInitialising)
 +                      break;
 +              if (talk_to_blkback(dev, info)) {
 +                      kfree(info);
 +                      dev_set_drvdata(&dev->dev, NULL);
 +                      break;
 +              }
 +      case XenbusStateInitialising:
        case XenbusStateInitialised:
        case XenbusStateReconfiguring:
        case XenbusStateReconfigured:
@@@ -2142,12 -2083,6 +2136,12 @@@ static int __init xlblk_init(void
        if (!xen_domain())
                return -ENODEV;
  
 +      if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) {
 +              pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
 +                      xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER);
 +              xen_blkif_max_ring_order = 0;
 +      }
 +
        if (!xen_has_pv_disk_devices())
                return -ENODEV;
  
index 5485f91294e7182da2c31f85715570629cac62d9,9c6a5048ab824147652a518f69d331d7a6fe4afc..880d0d63e872e5725d76fe998db0282c749f45d6
  #include <xen/xen.h>
  #include <xen/events.h>
  #include <xen/interface/memory.h>
+ #include <xen/page.h>
  
  #include <asm/xen/hypercall.h>
- #include <asm/xen/page.h>
  
  /* Provide an option to disable split event channels at load time as
   * event channels are limited resource. Split event channels are
   * enabled by default.
   */
 -bool separate_tx_rx_irq = 1;
 +bool separate_tx_rx_irq = true;
  module_param(separate_tx_rx_irq, bool, 0644);
  
  /* The time that packets can stay on the guest Rx internal queue
@@@ -515,9 -515,14 +515,9 @@@ static void xenvif_rx_action(struct xen
  
        while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
               && (skb = xenvif_rx_dequeue(queue)) != NULL) {
 -              RING_IDX old_req_cons;
 -              RING_IDX ring_slots_used;
 -
                queue->last_rx_time = jiffies;
  
 -              old_req_cons = queue->rx.req_cons;
                XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
 -              ring_slots_used = queue->rx.req_cons - old_req_cons;
  
                __skb_queue_tail(&rxq, skb);
        }
@@@ -748,7 -753,7 +748,7 @@@ static int xenvif_count_requests(struc
                slots++;
  
                if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
 -                      netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
 +                      netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
                                 txp->offset, txp->size);
                        xenvif_fatal_tx_err(queue->vif);
                        return -EINVAL;
@@@ -874,7 -879,7 +874,7 @@@ static inline void xenvif_grant_handle_
        if (unlikely(queue->grant_tx_handle[pending_idx] !=
                     NETBACK_INVALID_HANDLE)) {
                netdev_err(queue->vif->dev,
 -                         "Trying to overwrite active handle! pending_idx: %x\n",
 +                         "Trying to overwrite active handle! pending_idx: 0x%x\n",
                           pending_idx);
                BUG();
        }
@@@ -887,7 -892,7 +887,7 @@@ static inline void xenvif_grant_handle_
        if (unlikely(queue->grant_tx_handle[pending_idx] ==
                     NETBACK_INVALID_HANDLE)) {
                netdev_err(queue->vif->dev,
 -                         "Trying to unmap invalid handle! pending_idx: %x\n",
 +                         "Trying to unmap invalid handle! pending_idx: 0x%x\n",
                           pending_idx);
                BUG();
        }
@@@ -1243,9 -1248,9 +1243,9 @@@ static void xenvif_tx_build_gops(struc
                /* No crossing a page as the payload mustn't fragment. */
                if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
                        netdev_err(queue->vif->dev,
 -                                 "txreq.offset: %x, size: %u, end: %lu\n",
 +                                 "txreq.offset: %u, size: %u, end: %lu\n",
                                   txreq.offset, txreq.size,
 -                                 (txreq.offset&~PAGE_MASK) + txreq.size);
 +                                 (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
                        xenvif_fatal_tx_err(queue->vif);
                        break;
                }
@@@ -1593,12 -1598,12 +1593,12 @@@ static inline void xenvif_tx_dealloc_ac
                                        queue->pages_to_unmap,
                                        gop - queue->tx_unmap_ops);
                if (ret) {
 -                      netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
 +                      netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
                                   gop - queue->tx_unmap_ops, ret);
                        for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
                                if (gop[i].status != GNTST_okay)
                                        netdev_err(queue->vif->dev,
 -                                                 " host_addr: %llx handle: %x status: %d\n",
 +                                                 " host_addr: 0x%llx handle: 0x%x status: %d\n",
                                                   gop[i].host_addr,
                                                   gop[i].handle,
                                                   gop[i].status);
@@@ -1731,7 -1736,7 +1731,7 @@@ void xenvif_idx_unmap(struct xenvif_que
                                &queue->mmap_pages[pending_idx], 1);
        if (ret) {
                netdev_err(queue->vif->dev,
 -                         "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
 +                         "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
                           ret,
                           pending_idx,
                           tx_unmap_op.host_addr,
index 56d8afd11077de5d1f7b475bba37d1164c92bf41,ff88f3169f425ed9a3e64c711d9ebd35afdbc19c..b3e9491bc8ec81ccf55fa55dc5c0690259317555
@@@ -45,7 -45,6 +45,6 @@@
  #include <linux/slab.h>
  #include <net/ip.h>
  
- #include <asm/xen/page.h>
  #include <xen/xen.h>
  #include <xen/xenbus.h>
  #include <xen/events.h>
@@@ -733,7 -732,7 +732,7 @@@ static int xennet_get_responses(struct 
                if (unlikely(rx->status < 0 ||
                             rx->offset + rx->status > PAGE_SIZE)) {
                        if (net_ratelimit())
 -                              dev_warn(dev, "rx->offset: %x, size: %u\n",
 +                              dev_warn(dev, "rx->offset: %u, size: %d\n",
                                         rx->offset, rx->status);
                        xennet_move_rx_slot(queue, skb, ref);
                        err = -EINVAL;
@@@ -1560,8 -1559,9 +1559,8 @@@ static int xennet_init_queue(struct net
        spin_lock_init(&queue->tx_lock);
        spin_lock_init(&queue->rx_lock);
  
 -      init_timer(&queue->rx_refill_timer);
 -      queue->rx_refill_timer.data = (unsigned long)queue;
 -      queue->rx_refill_timer.function = rx_refill_timeout;
 +      setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
 +                  (unsigned long)queue);
  
        snprintf(queue->name, sizeof(queue->name), "%s-q%u",
                 queue->info->netdev->name, queue->id);
@@@ -1697,7 -1697,6 +1696,7 @@@ static void xennet_destroy_queues(struc
  
                if (netif_running(info->netdev))
                        napi_disable(&queue->napi);
 +              del_timer_sync(&queue->rx_refill_timer);
                netif_napi_del(&queue->napi);
        }
  
@@@ -2102,6 -2101,9 +2101,6 @@@ static const struct attribute_group xen
  static int xennet_remove(struct xenbus_device *dev)
  {
        struct netfront_info *info = dev_get_drvdata(&dev->dev);
 -      unsigned int num_queues = info->netdev->real_num_tx_queues;
 -      struct netfront_queue *queue = NULL;
 -      unsigned int i = 0;
  
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
  
  
        unregister_netdev(info->netdev);
  
 -      for (i = 0; i < num_queues; ++i) {
 -              queue = &info->queues[i];
 -              del_timer_sync(&queue->rx_refill_timer);
 -      }
 -
 -      if (num_queues) {
 -              kfree(info->queues);
 -              info->queues = NULL;
 -      }
 -
 +      xennet_destroy_queues(info);
        xennet_free_netdev(info->netdev);
  
        return 0;
index 240f388720857f0c1e3df0635d35fa71e6f05787,c4796c81c4aff8702baed4bd2394177f65373f45..8b7a900cd28b25e5a8d52a10357768df1d6513aa
@@@ -20,6 -20,7 +20,7 @@@
  #include <linux/workqueue.h>
  #include <linux/bitops.h>
  #include <linux/time.h>
+ #include <linux/ktime.h>
  #include <xen/platform_pci.h>
  
  #include <asm/xen/swiotlb-xen.h>
@@@ -115,7 -116,6 +116,6 @@@ static int do_pci_op(struct pcifront_de
        evtchn_port_t port = pdev->evtchn;
        unsigned irq = pdev->irq;
        s64 ns, ns_timeout;
-       struct timeval tv;
  
        spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
  
         * (in the latter case we end up continually re-executing poll() with a
         * timeout in the past). 1s difference gives plenty of slack for error.
         */
-       do_gettimeofday(&tv);
-       ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
+       ns_timeout = ktime_get_ns() + 2 * (s64)NSEC_PER_SEC;
  
        xen_clear_irq_pending(irq);
  
                        (unsigned long *)&pdev->sh_info->flags)) {
                xen_poll_irq_timeout(irq, jiffies + 3*HZ);
                xen_clear_irq_pending(irq);
-               do_gettimeofday(&tv);
-               ns = timeval_to_ns(&tv);
+               ns = ktime_get_ns();
                if (ns > ns_timeout) {
                        dev_err(&pdev->xdev->dev,
                                "pciback not responding!!!\n");
@@@ -446,15 -444,9 +444,15 @@@ static int pcifront_scan_root(struct pc
                                 unsigned int domain, unsigned int bus)
  {
        struct pci_bus *b;
 +      LIST_HEAD(resources);
        struct pcifront_sd *sd = NULL;
        struct pci_bus_entry *bus_entry = NULL;
        int err = 0;
 +      static struct resource busn_res = {
 +              .start = 0,
 +              .end = 255,
 +              .flags = IORESOURCE_BUS,
 +      };
  
  #ifndef CONFIG_PCI_DOMAINS
        if (domain != 0) {
                err = -ENOMEM;
                goto err_out;
        }
 +      pci_add_resource(&resources, &ioport_resource);
 +      pci_add_resource(&resources, &iomem_resource);
 +      pci_add_resource(&resources, &busn_res);
        pcifront_init_sd(sd, domain, bus, pdev);
  
        pci_lock_rescan_remove();
  
 -      b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
 -                                &pcifront_bus_ops, sd);
 +      b = pci_scan_root_bus(&pdev->xdev->dev, bus,
 +                                &pcifront_bus_ops, sd, &resources);
        if (!b) {
                dev_err(&pdev->xdev->dev,
                        "Error creating PCI Frontend Bus!\n");
                err = -ENOMEM;
                pci_unlock_rescan_remove();
 +              pci_free_resource_list(&resources);
                goto err_out;
        }
  
  
        list_add(&bus_entry->list, &pdev->root_buses);
  
 -      /* pci_scan_bus_parented skips devices which do not have a have
 +      /* pci_scan_root_bus skips devices which do not have a
        * devfn==0. The pcifront_scan_bus enumerates all devfn. */
        err = pcifront_scan_bus(pdev, domain, bus, b);
  
diff --combined drivers/xen/tmem.c
index d88f36754bf7efcd67750cce72ec607564bc6fd4,fb31d64c260879aa2b359ed13e0521425a8fe9cf..239738f944badfa3f12f3d61581ef5cb4d6910d4
@@@ -17,8 -17,8 +17,8 @@@
  
  #include <xen/xen.h>
  #include <xen/interface/xen.h>
+ #include <xen/page.h>
  #include <asm/xen/hypercall.h>
- #include <asm/xen/page.h>
  #include <asm/xen/hypervisor.h>
  #include <xen/tmem.h>
  
@@@ -381,15 -381,21 +381,15 @@@ static int __init xen_tmem_init(void
  #ifdef CONFIG_FRONTSWAP
        if (tmem_enabled && frontswap) {
                char *s = "";
 -              struct frontswap_ops *old_ops;
  
                tmem_frontswap_poolid = -1;
 -              old_ops = frontswap_register_ops(&tmem_frontswap_ops);
 -              if (IS_ERR(old_ops) || old_ops) {
 -                      if (IS_ERR(old_ops))
 -                              return PTR_ERR(old_ops);
 -                      s = " (WARNING: frontswap_ops overridden)";
 -              }
 +              frontswap_register_ops(&tmem_frontswap_ops);
                pr_info("frontswap enabled, RAM provided by Xen Transcendent Memory%s\n",
                        s);
        }
  #endif
  #ifdef CONFIG_CLEANCACHE
-       BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
+       BUILD_BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
        if (tmem_enabled && cleancache) {
                int err;
  
This page took 0.12639 seconds and 4 git commands to generate.