]> Git Repo - linux.git/commitdiff
Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyper...
authorLinus Torvalds <[email protected]>
Mon, 3 Feb 2020 14:42:03 +0000 (14:42 +0000)
committerLinus Torvalds <[email protected]>
Mon, 3 Feb 2020 14:42:03 +0000 (14:42 +0000)
Pull Hyper-V updates from Sasha Levin:

 - Most of the commits here are work to enable host-initiated
   hibernation support by Dexuan Cui.

 - Fix for a warning shown when host sends non-aligned balloon requests
   by Tianyu Lan.

* tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  hv_utils: Add the support of hibernation
  hv_utils: Support host-initiated hibernation request
  hv_utils: Support host-initiated restart request
  Tools: hv: Reopen the devices if read() or write() returns errors
  video: hyperv: hyperv_fb: Use physical memory for fb on HyperV Gen 1 VMs.
  Drivers: hv: vmbus: Ignore CHANNELMSG_TL_CONNECT_RESULT(23)
  video: hyperv_fb: Fix hibernation for the deferred IO feature
  Input: hyperv-keyboard: Add the support of hibernation
  hv_balloon: Balloon up according to request page number

1  2 
drivers/hv/hv_util.c
drivers/video/fbdev/hyperv_fb.c

diff --combined drivers/hv/hv_util.c
index 296f9098c9e4624a38d0382c3266a94af26a02eb,900b8a8af57c730e027ee808146deab0227abae5..92ee0fe4c919ec7171c76e15d96e532334cf7396
  
  #define SD_MAJOR      3
  #define SD_MINOR      0
+ #define SD_MINOR_1    1
+ #define SD_MINOR_2    2
+ #define SD_VERSION_3_1        (SD_MAJOR << 16 | SD_MINOR_1)
+ #define SD_VERSION_3_2        (SD_MAJOR << 16 | SD_MINOR_2)
  #define SD_VERSION    (SD_MAJOR << 16 | SD_MINOR)
  
  #define SD_MAJOR_1    1
@@@ -50,8 -54,10 +54,10 @@@ static int sd_srv_version
  static int ts_srv_version;
  static int hb_srv_version;
  
- #define SD_VER_COUNT 2
+ #define SD_VER_COUNT 4
  static const int sd_versions[] = {
+       SD_VERSION_3_2,
+       SD_VERSION_3_1,
        SD_VERSION,
        SD_VERSION_1
  };
@@@ -75,18 -81,56 +81,56 @@@ static const int fw_versions[] = 
        UTIL_WS2K8_FW_VERSION
  };
  
+ /*
+  * Send the "hibernate" udev event in a thread context.
+  */
+ struct hibernate_work_context {
+       struct work_struct work;
+       struct hv_device *dev;
+ };
+ static struct hibernate_work_context hibernate_context;
+ static bool hibernation_supported;
+ static void send_hibernate_uevent(struct work_struct *work)
+ {
+       char *uevent_env[2] = { "EVENT=hibernate", NULL };
+       struct hibernate_work_context *ctx;
+       ctx = container_of(work, struct hibernate_work_context, work);
+       kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
+       pr_info("Sent hibernation uevent\n");
+ }
+ static int hv_shutdown_init(struct hv_util_service *srv)
+ {
+       struct vmbus_channel *channel = srv->channel;
+       INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
+       hibernate_context.dev = channel->device_obj;
+       hibernation_supported = hv_is_hibernation_supported();
+       return 0;
+ }
  static void shutdown_onchannelcallback(void *context);
  static struct hv_util_service util_shutdown = {
        .util_cb = shutdown_onchannelcallback,
+       .util_init = hv_shutdown_init,
  };
  
  static int hv_timesync_init(struct hv_util_service *srv);
+ static int hv_timesync_pre_suspend(void);
  static void hv_timesync_deinit(void);
  
  static void timesync_onchannelcallback(void *context);
  static struct hv_util_service util_timesynch = {
        .util_cb = timesync_onchannelcallback,
        .util_init = hv_timesync_init,
+       .util_pre_suspend = hv_timesync_pre_suspend,
        .util_deinit = hv_timesync_deinit,
  };
  
@@@ -98,18 -142,24 +142,24 @@@ static struct hv_util_service util_hear
  static struct hv_util_service util_kvp = {
        .util_cb = hv_kvp_onchannelcallback,
        .util_init = hv_kvp_init,
+       .util_pre_suspend = hv_kvp_pre_suspend,
+       .util_pre_resume = hv_kvp_pre_resume,
        .util_deinit = hv_kvp_deinit,
  };
  
  static struct hv_util_service util_vss = {
        .util_cb = hv_vss_onchannelcallback,
        .util_init = hv_vss_init,
+       .util_pre_suspend = hv_vss_pre_suspend,
+       .util_pre_resume = hv_vss_pre_resume,
        .util_deinit = hv_vss_deinit,
  };
  
  static struct hv_util_service util_fcopy = {
        .util_cb = hv_fcopy_onchannelcallback,
        .util_init = hv_fcopy_init,
+       .util_pre_suspend = hv_fcopy_pre_suspend,
+       .util_pre_resume = hv_fcopy_pre_resume,
        .util_deinit = hv_fcopy_deinit,
  };
  
@@@ -118,17 -168,27 +168,27 @@@ static void perform_shutdown(struct wor
        orderly_poweroff(true);
  }
  
+ static void perform_restart(struct work_struct *dummy)
+ {
+       orderly_reboot();
+ }
  /*
   * Perform the shutdown operation in a thread context.
   */
  static DECLARE_WORK(shutdown_work, perform_shutdown);
  
+ /*
+  * Perform the restart operation in a thread context.
+  */
+ static DECLARE_WORK(restart_work, perform_restart);
  static void shutdown_onchannelcallback(void *context)
  {
        struct vmbus_channel *channel = context;
+       struct work_struct *work = NULL;
        u32 recvlen;
        u64 requestid;
-       bool execute_shutdown = false;
        u8  *shut_txf_buf = util_shutdown.recv_buffer;
  
        struct shutdown_msg_data *shutdown_msg;
                                        sizeof(struct vmbuspipe_hdr) +
                                        sizeof(struct icmsg_hdr)];
  
+                       /*
+                        * shutdown_msg->flags can be 0(shut down), 2(reboot),
+                        * or 4(hibernate). It may bitwise-OR 1, which means
+                        * performing the request by force. Linux always tries
+                        * to perform the request by force.
+                        */
                        switch (shutdown_msg->flags) {
                        case 0:
                        case 1:
                                icmsghdrp->status = HV_S_OK;
-                               execute_shutdown = true;
+                               work = &shutdown_work;
                                pr_info("Shutdown request received -"
                                            " graceful shutdown initiated\n");
                                break;
+                       case 2:
+                       case 3:
+                               icmsghdrp->status = HV_S_OK;
+                               work = &restart_work;
+                               pr_info("Restart request received -"
+                                           " graceful restart initiated\n");
+                               break;
+                       case 4:
+                       case 5:
+                               pr_info("Hibernation request received\n");
+                               icmsghdrp->status = hibernation_supported ?
+                                       HV_S_OK : HV_E_FAIL;
+                               if (hibernation_supported)
+                                       work = &hibernate_context.work;
+                               break;
                        default:
                                icmsghdrp->status = HV_E_FAIL;
-                               execute_shutdown = false;
                                pr_info("Shutdown request received -"
                                            " Invalid request\n");
                                break;
                                       VM_PKT_DATA_INBAND, 0);
        }
  
-       if (execute_shutdown == true)
-               schedule_work(&shutdown_work);
+       if (work)
+               schedule_work(work);
  }
  
  /*
@@@ -211,7 -289,7 +289,7 @@@ static struct timespec64 hv_get_adj_hos
        unsigned long flags;
  
        spin_lock_irqsave(&host_ts.lock, flags);
 -      reftime = hyperv_cs->read(hyperv_cs);
 +      reftime = hv_read_reference_counter();
        newtime = host_ts.host_time + (reftime - host_ts.ref_time);
        ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
        spin_unlock_irqrestore(&host_ts.lock, flags);
@@@ -250,7 -328,7 +328,7 @@@ static inline void adj_guesttime(u64 ho
         */
        spin_lock_irqsave(&host_ts.lock, flags);
  
 -      cur_reftime = hyperv_cs->read(hyperv_cs);
 +      cur_reftime = hv_read_reference_counter();
        host_ts.host_time = hosttime;
        host_ts.ref_time = cur_reftime;
  
@@@ -315,7 -393,7 +393,7 @@@ static void timesync_onchannelcallback(
                                        sizeof(struct vmbuspipe_hdr) +
                                        sizeof(struct icmsg_hdr)];
                                adj_guesttime(timedatap->parenttime,
 -                                            hyperv_cs->read(hyperv_cs),
 +                                            hv_read_reference_counter(),
                                              timedatap->flags);
                        }
                }
@@@ -441,6 -519,44 +519,44 @@@ static int util_remove(struct hv_devic
        return 0;
  }
  
+ /*
+  * When we're in util_suspend(), all the userspace processes have been frozen
+  * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
+  * after the whole resume procedure, including util_resume(), finishes.
+  */
+ static int util_suspend(struct hv_device *dev)
+ {
+       struct hv_util_service *srv = hv_get_drvdata(dev);
+       int ret = 0;
+       if (srv->util_pre_suspend) {
+               ret = srv->util_pre_suspend();
+               if (ret)
+                       return ret;
+       }
+       vmbus_close(dev->channel);
+       return 0;
+ }
+ static int util_resume(struct hv_device *dev)
+ {
+       struct hv_util_service *srv = hv_get_drvdata(dev);
+       int ret = 0;
+       if (srv->util_pre_resume) {
+               ret = srv->util_pre_resume();
+               if (ret)
+                       return ret;
+       }
+       ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
+                        4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+                        dev->channel);
+       return ret;
+ }
  static const struct hv_vmbus_device_id id_table[] = {
        /* Shutdown guid */
        { HV_SHUTDOWN_GUID,
@@@ -477,6 -593,8 +593,8 @@@ static  struct hv_driver util_drv = 
        .id_table = id_table,
        .probe =  util_probe,
        .remove =  util_remove,
+       .suspend = util_suspend,
+       .resume =  util_resume,
        .driver = {
                .probe_type = PROBE_PREFER_ASYNCHRONOUS,
        },
@@@ -524,7 -642,7 +642,7 @@@ static struct ptp_clock *hv_ptp_clock
  static int hv_timesync_init(struct hv_util_service *srv)
  {
        /* TimeSync requires Hyper-V clocksource. */
 -      if (!hyperv_cs)
 +      if (!hv_read_reference_counter)
                return -ENODEV;
  
        spin_lock_init(&host_ts.lock);
        return 0;
  }
  
+ static void hv_timesync_cancel_work(void)
+ {
+       cancel_work_sync(&adj_time_work);
+ }
+ static int hv_timesync_pre_suspend(void)
+ {
+       hv_timesync_cancel_work();
+       return 0;
+ }
  static void hv_timesync_deinit(void)
  {
        if (hv_ptp_clock)
                ptp_clock_unregister(hv_ptp_clock);
-       cancel_work_sync(&adj_time_work);
+       hv_timesync_cancel_work();
  }
  
  static int __init init_hyperv_utils(void)
index afe9fd751cd5b7e3875b914e4e1059c3d10345d4,e2443dbaf3e9815742692a2849c73f4992adc42b..f47d50e560c063d2aa02ab1e29772355aed24474
   * "set-vmvideo" command. For example
   *     set-vmvideo -vmname name -horizontalresolution:1920 \
   * -verticalresolution:1200 -resolutiontype single
+  *
+  * Gen 1 VMs also support direct using VM's physical memory for framebuffer.
+  * It could improve the efficiency and performance for framebuffer and VM.
+  * This requires to allocate contiguous physical memory from Linux kernel's
+  * CMA memory allocator. To enable this, supply a kernel parameter to give
+  * enough memory space to CMA allocator for framebuffer. For example:
+  *    cma=130m
+  * This gives 130MB memory to CMA allocator that can be allocated to
+  * framebuffer. For reference, 8K resolution (7680x4320) takes about
+  * 127MB memory.
   */
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@@ -228,7 -238,6 +238,6 @@@ struct synthvid_msg 
  } __packed;
  
  
  /* FB driver definitions and structures */
  #define HVFB_WIDTH 1152 /* default screen width */
  #define HVFB_HEIGHT 864 /* default screen height */
@@@ -258,12 -267,15 +267,15 @@@ struct hvfb_par 
        /* If true, the VSC notifies the VSP on every framebuffer change */
        bool synchronous_fb;
  
+       /* If true, need to copy from deferred IO mem to framebuffer mem */
+       bool need_docopy;
        struct notifier_block hvfb_panic_nb;
  
        /* Memory for deferred IO and frame buffer itself */
        unsigned char *dio_vp;
        unsigned char *mmio_vp;
-       unsigned long mmio_pp;
+       phys_addr_t mmio_pp;
  
        /* Dirty rectangle, protected by delayed_refresh_lock */
        int x1, y1, x2, y2;
@@@ -434,7 -446,7 +446,7 @@@ static void synthvid_deferred_io(struc
                maxy = max_t(int, maxy, y2);
  
                /* Copy from dio space to mmio address */
-               if (par->fb_ready)
+               if (par->fb_ready && par->need_docopy)
                        hvfb_docopy(par, start, PAGE_SIZE);
        }
  
@@@ -751,12 -763,12 +763,12 @@@ static void hvfb_update_work(struct wor
                return;
  
        /* Copy the dirty rectangle to frame buffer memory */
-       for (j = y1; j < y2; j++) {
-               hvfb_docopy(par,
-                           j * info->fix.line_length +
-                           (x1 * screen_depth / 8),
-                           (x2 - x1) * screen_depth / 8);
-       }
+       if (par->need_docopy)
+               for (j = y1; j < y2; j++)
+                       hvfb_docopy(par,
+                                   j * info->fix.line_length +
+                                   (x1 * screen_depth / 8),
+                                   (x2 - x1) * screen_depth / 8);
  
        /* Refresh */
        if (par->fb_ready && par->update)
@@@ -801,7 -813,8 +813,8 @@@ static int hvfb_on_panic(struct notifie
        par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
        par->synchronous_fb = true;
        info = par->info;
-       hvfb_docopy(par, 0, dio_fb_size);
+       if (par->need_docopy)
+               hvfb_docopy(par, 0, dio_fb_size);
        synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
  
        return NOTIFY_DONE;
@@@ -895,7 -908,7 +908,7 @@@ static void hvfb_cfb_imageblit(struct f
                                               image->width, image->height);
  }
  
 -static struct fb_ops hvfb_ops = {
 +static const struct fb_ops hvfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = hvfb_check_var,
        .fb_set_par = hvfb_set_par,
@@@ -940,6 -953,62 +953,62 @@@ static void hvfb_get_option(struct fb_i
        return;
  }
  
+ /*
+  * Allocate enough contiguous physical memory.
+  * Return physical address if succeeded or -1 if failed.
+  */
+ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
+                                  unsigned int request_size)
+ {
+       struct page *page = NULL;
+       dma_addr_t dma_handle;
+       void *vmem;
+       phys_addr_t paddr = 0;
+       unsigned int order = get_order(request_size);
+       if (request_size == 0)
+               return -1;
+       if (order < MAX_ORDER) {
+               /* Call alloc_pages if the size is less than 2^MAX_ORDER */
+               page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+               if (!page)
+                       return -1;
+               paddr = (page_to_pfn(page) << PAGE_SHIFT);
+       } else {
+               /* Allocate from CMA */
+               hdev->device.coherent_dma_mask = DMA_BIT_MASK(64);
+               vmem = dma_alloc_coherent(&hdev->device,
+                                         round_up(request_size, PAGE_SIZE),
+                                         &dma_handle,
+                                         GFP_KERNEL | __GFP_NOWARN);
+               if (!vmem)
+                       return -1;
+               paddr = virt_to_phys(vmem);
+       }
+       return paddr;
+ }
+ /* Release contiguous physical memory */
+ static void hvfb_release_phymem(struct hv_device *hdev,
+                               phys_addr_t paddr, unsigned int size)
+ {
+       unsigned int order = get_order(size);
+       if (order < MAX_ORDER)
+               __free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
+       else
+               dma_free_coherent(&hdev->device,
+                                 round_up(size, PAGE_SIZE),
+                                 phys_to_virt(paddr),
+                                 paddr);
+ }
  
  /* Get framebuffer memory from Hyper-V video pci space */
  static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
        void __iomem *fb_virt;
        int gen2vm = efi_enabled(EFI_BOOT);
        resource_size_t pot_start, pot_end;
+       phys_addr_t paddr;
        int ret;
  
-       dio_fb_size =
-               screen_width * screen_height * screen_depth / 8;
+       info->apertures = alloc_apertures(1);
+       if (!info->apertures)
+               return -ENOMEM;
  
-       if (gen2vm) {
-               pot_start = 0;
-               pot_end = -1;
-       } else {
+       if (!gen2vm) {
                pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
-                             PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+                       PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
                if (!pdev) {
                        pr_err("Unable to find PCI Hyper-V video\n");
+                       kfree(info->apertures);
                        return -ENODEV;
                }
  
+               info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
+               info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
+               /*
+                * For Gen 1 VM, we can directly use the contiguous memory
+                * from VM. If we succeed, deferred IO happens directly
+                * on this allocated framebuffer memory, avoiding extra
+                * memory copy.
+                */
+               paddr = hvfb_get_phymem(hdev, screen_fb_size);
+               if (paddr != (phys_addr_t) -1) {
+                       par->mmio_pp = paddr;
+                       par->mmio_vp = par->dio_vp = __va(paddr);
+                       info->fix.smem_start = paddr;
+                       info->fix.smem_len = screen_fb_size;
+                       info->screen_base = par->mmio_vp;
+                       info->screen_size = screen_fb_size;
+                       par->need_docopy = false;
+                       goto getmem_done;
+               }
+               pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
+       } else {
+               info->apertures->ranges[0].base = screen_info.lfb_base;
+               info->apertures->ranges[0].size = screen_info.lfb_size;
+       }
+       /*
+        * Cannot use the contiguous physical memory.
+        * Allocate mmio space for framebuffer.
+        */
+       dio_fb_size =
+               screen_width * screen_height * screen_depth / 8;
+       if (gen2vm) {
+               pot_start = 0;
+               pot_end = -1;
+       } else {
                if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
                    pci_resource_len(pdev, 0) < screen_fb_size) {
                        pr_err("Resource not available or (0x%lx < 0x%lx)\n",
        if (par->dio_vp == NULL)
                goto err3;
  
-       info->apertures = alloc_apertures(1);
-       if (!info->apertures)
-               goto err4;
-       if (gen2vm) {
-               info->apertures->ranges[0].base = screen_info.lfb_base;
-               info->apertures->ranges[0].size = screen_info.lfb_size;
-               remove_conflicting_framebuffers(info->apertures,
-                                               KBUILD_MODNAME, false);
-       } else {
-               info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
-               info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
-       }
        /* Physical address of FB device */
        par->mmio_pp = par->mem->start;
        /* Virtual address of FB device */
        info->screen_base = par->dio_vp;
        info->screen_size = dio_fb_size;
  
+ getmem_done:
+       remove_conflicting_framebuffers(info->apertures,
+                                       KBUILD_MODNAME, false);
        if (!gen2vm)
                pci_dev_put(pdev);
+       kfree(info->apertures);
  
        return 0;
  
- err4:
-       vfree(par->dio_vp);
  err3:
        iounmap(fb_virt);
  err2:
  err1:
        if (!gen2vm)
                pci_dev_put(pdev);
+       kfree(info->apertures);
  
        return -ENOMEM;
  }
  
  /* Release the framebuffer */
- static void hvfb_putmem(struct fb_info *info)
+ static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
  {
        struct hvfb_par *par = info->par;
  
-       vfree(par->dio_vp);
-       iounmap(info->screen_base);
-       vmbus_free_mmio(par->mem->start, screen_fb_size);
+       if (par->need_docopy) {
+               vfree(par->dio_vp);
+               iounmap(info->screen_base);
+               vmbus_free_mmio(par->mem->start, screen_fb_size);
+       } else {
+               hvfb_release_phymem(hdev, info->fix.smem_start,
+                                   screen_fb_size);
+       }
        par->mem = NULL;
  }
  
@@@ -1062,6 -1165,7 +1165,7 @@@ static int hvfb_probe(struct hv_device 
        par = info->par;
        par->info = info;
        par->fb_ready = false;
+       par->need_docopy = true;
        init_completion(&par->wait);
        INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);
  
  
  error:
        fb_deferred_io_cleanup(info);
-       hvfb_putmem(info);
+       hvfb_putmem(hdev, info);
  error2:
        vmbus_close(hdev->channel);
  error1:
@@@ -1177,7 -1281,7 +1281,7 @@@ static int hvfb_remove(struct hv_devic
        vmbus_close(hdev->channel);
        hv_set_drvdata(hdev, NULL);
  
-       hvfb_putmem(info);
+       hvfb_putmem(hdev, info);
        framebuffer_release(info);
  
        return 0;
@@@ -1194,6 -1298,7 +1298,7 @@@ static int hvfb_suspend(struct hv_devic
        fb_set_suspend(info, 1);
  
        cancel_delayed_work_sync(&par->dwork);
+       cancel_delayed_work_sync(&info->deferred_work);
  
        par->update_saved = par->update;
        par->update = false;
@@@ -1227,6 -1332,7 +1332,7 @@@ static int hvfb_resume(struct hv_devic
        par->fb_ready = true;
        par->update = par->update_saved;
  
+       schedule_delayed_work(&info->deferred_work, info->fbdefio->delay);
        schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
  
        /* 0 means do resume */
This page took 0.089817 seconds and 4 git commands to generate.