]> Git Repo - linux.git/commitdiff
Merge tag 'hyperv-fixes-signed' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Tue, 15 Sep 2020 23:20:43 +0000 (16:20 -0700)
committerLinus Torvalds <[email protected]>
Tue, 15 Sep 2020 23:20:43 +0000 (16:20 -0700)
Pull hyperv fixes from Wei Liu:
 "Two patches from Michael and Dexuan to fix vmbus hanging issues"

* tag 'hyperv-fixes-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  Drivers: hv: vmbus: Add timeout to vmbus_wait_for_unload
  Drivers: hv: vmbus: hibernation: do not hang forever in vmbus_bus_resume()

1  2 
drivers/hv/channel_mgmt.c
drivers/hv/vmbus_drv.c

index 591106cf58fc086c5a683da02ab8bd4204fbb069,af7832e1316741bb821da9c5a127131e51e33fc0..1d44bb635bb8450eece2e5f655314475ce33f896
@@@ -317,6 -317,7 +317,6 @@@ static struct vmbus_channel *alloc_chan
                return NULL;
  
        spin_lock_init(&channel->sched_lock);
 -      spin_lock_init(&channel->lock);
        init_completion(&channel->rescind_event);
  
        INIT_LIST_HEAD(&channel->sc_list);
@@@ -399,6 -400,8 +399,6 @@@ static void vmbus_release_relid(u32 rel
  
  void hv_process_channel_removal(struct vmbus_channel *channel)
  {
 -      unsigned long flags;
 -
        lockdep_assert_held(&vmbus_connection.channel_mutex);
        BUG_ON(!channel->rescind);
  
        if (channel->offermsg.child_relid != INVALID_RELID)
                vmbus_channel_unmap_relid(channel);
  
 -      if (channel->primary_channel == NULL) {
 +      if (channel->primary_channel == NULL)
                list_del(&channel->listentry);
 -      } else {
 -              struct vmbus_channel *primary_channel = channel->primary_channel;
 -              spin_lock_irqsave(&primary_channel->lock, flags);
 +      else
                list_del(&channel->sc_list);
 -              spin_unlock_irqrestore(&primary_channel->lock, flags);
 -      }
  
        /*
         * If this is a "perf" channel, updates the hv_numa_map[] masks so that
@@@ -463,6 -470,7 +463,6 @@@ static void vmbus_add_channel_work(stru
        struct vmbus_channel *newchannel =
                container_of(work, struct vmbus_channel, add_channel_work);
        struct vmbus_channel *primary_channel = newchannel->primary_channel;
 -      unsigned long flags;
        int ret;
  
        /*
@@@ -523,10 -531,13 +523,10 @@@ err_deq_chan
         */
        newchannel->probe_done = true;
  
 -      if (primary_channel == NULL) {
 +      if (primary_channel == NULL)
                list_del(&newchannel->listentry);
 -      } else {
 -              spin_lock_irqsave(&primary_channel->lock, flags);
 +      else
                list_del(&newchannel->sc_list);
 -              spin_unlock_irqrestore(&primary_channel->lock, flags);
 -      }
  
        /* vmbus_process_offer() has mapped the channel. */
        vmbus_channel_unmap_relid(newchannel);
@@@ -546,6 -557,7 +546,6 @@@ static void vmbus_process_offer(struct 
  {
        struct vmbus_channel *channel;
        struct workqueue_struct *wq;
 -      unsigned long flags;
        bool fnew = true;
  
        /*
                }
        }
  
 -      if (fnew)
 +      if (fnew) {
                list_add_tail(&newchannel->listentry,
                              &vmbus_connection.chn_list);
 -      else {
 +      else {
                /*
                 * Check to see if this is a valid sub-channel.
                 */
                 * Process the sub-channel.
                 */
                newchannel->primary_channel = channel;
 -              spin_lock_irqsave(&channel->lock, flags);
                list_add_tail(&newchannel->sc_list, &channel->sc_list);
 -              spin_unlock_irqrestore(&channel->lock, flags);
        }
  
        vmbus_channel_map_relid(newchannel);
@@@ -688,7 -702,10 +688,7 @@@ static void init_vp_index(struct vmbus_
                 * In case alloc_cpumask_var() fails, bind it to
                 * VMBUS_CONNECT_CPU.
                 */
 -              channel->numa_node = cpu_to_node(VMBUS_CONNECT_CPU);
                channel->target_cpu = VMBUS_CONNECT_CPU;
 -              channel->target_vp =
 -                      hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
                if (perf_chn)
                        hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
                return;
                        continue;
                break;
        }
 -      channel->numa_node = numa_node;
        alloced_mask = &hv_context.hv_numa_map[numa_node];
  
        if (cpumask_weight(alloced_mask) ==
        cpumask_set_cpu(target_cpu, alloced_mask);
  
        channel->target_cpu = target_cpu;
 -      channel->target_vp = hv_cpu_number_to_vp_number(target_cpu);
  
        free_cpumask_var(available_mask);
  }
@@@ -731,7 -750,7 +731,7 @@@ static void vmbus_wait_for_unload(void
        void *page_addr;
        struct hv_message *msg;
        struct vmbus_channel_message_header *hdr;
-       u32 message_type;
+       u32 message_type, i;
  
        /*
         * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
         * functional and vmbus_unload_response() will complete
         * vmbus_connection.unload_event. If not, the last thing we can do is
         * read message pages for all CPUs directly.
+        *
+        * Wait no more than 10 seconds so that the panic path can't get
+        * hung forever in case the response message isn't seen.
         */
-       while (1) {
+       for (i = 0; i < 1000; i++) {
                if (completion_done(&vmbus_connection.unload_event))
                        break;
  
diff --combined drivers/hv/vmbus_drv.c
index 910b6e90866cae242f5c26037341e91732a5fb6c,0f2d6a60f769ecd0a19aba2a44a2fe8e1eddf169..946d0aba101f4adb16fc7a3412e7456e9715443d
@@@ -23,6 -23,7 +23,6 @@@
  #include <linux/cpu.h>
  #include <linux/sched/task_stack.h>
  
 -#include <asm/mshyperv.h>
  #include <linux/delay.h>
  #include <linux/notifier.h>
  #include <linux/ptrace.h>
@@@ -230,7 -231,7 +230,7 @@@ static ssize_t numa_node_show(struct de
        if (!hv_dev->channel)
                return -ENODEV;
  
 -      return sprintf(buf, "%d\n", hv_dev->channel->numa_node);
 +      return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
  }
  static DEVICE_ATTR_RO(numa_node);
  #endif
@@@ -511,17 -512,18 +511,17 @@@ static ssize_t channel_vp_mapping_show(
  {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
 -      unsigned long flags;
        int buf_size = PAGE_SIZE, n_written, tot_written;
        struct list_head *cur;
  
        if (!channel)
                return -ENODEV;
  
 +      mutex_lock(&vmbus_connection.channel_mutex);
 +
        tot_written = snprintf(buf, buf_size, "%u:%u\n",
                channel->offermsg.child_relid, channel->target_cpu);
  
 -      spin_lock_irqsave(&channel->lock, flags);
 -
        list_for_each(cur, &channel->sc_list) {
                if (tot_written >= buf_size - 1)
                        break;
                tot_written += n_written;
        }
  
 -      spin_unlock_irqrestore(&channel->lock, flags);
 +      mutex_unlock(&vmbus_connection.channel_mutex);
  
        return tot_written;
  }
@@@ -1719,7 -1721,7 +1719,7 @@@ static ssize_t target_cpu_store(struct 
        /* No CPUs should come up or down during this. */
        cpus_read_lock();
  
 -      if (!cpumask_test_cpu(target_cpu, cpu_online_mask)) {
 +      if (!cpu_online(target_cpu)) {
                cpus_read_unlock();
                return -EINVAL;
        }
         */
  
        channel->target_cpu = target_cpu;
 -      channel->target_vp = hv_cpu_number_to_vp_number(target_cpu);
 -      channel->numa_node = cpu_to_node(target_cpu);
  
        /* See init_vp_index(). */
        if (hv_is_perf_channel(channel))
@@@ -2347,6 -2351,7 +2347,6 @@@ acpi_walk_err
  static int vmbus_bus_suspend(struct device *dev)
  {
        struct vmbus_channel *channel, *sc;
 -      unsigned long flags;
  
        while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
                /*
        if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
                wait_for_completion(&vmbus_connection.ready_for_suspend_event);
  
-       WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
+       if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
+               pr_err("Can not suspend due to a previous failed resuming\n");
+               return -EBUSY;
+       }
  
        mutex_lock(&vmbus_connection.channel_mutex);
  
                        continue;
                }
  
 -              spin_lock_irqsave(&channel->lock, flags);
                list_for_each_entry(sc, &channel->sc_list, sc_list) {
                        pr_err("Sub-channel not deleted!\n");
                        WARN_ON_ONCE(1);
                }
 -              spin_unlock_irqrestore(&channel->lock, flags);
  
                atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
        }
@@@ -2456,7 -2466,9 +2459,9 @@@ static int vmbus_bus_resume(struct devi
  
        vmbus_request_offers();
  
-       wait_for_completion(&vmbus_connection.ready_for_resume_event);
+       if (wait_for_completion_timeout(
+               &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
+               pr_err("Some vmbus device is missing after suspending?\n");
  
        /* Reset the event for the next suspend. */
        reinit_completion(&vmbus_connection.ready_for_suspend_event);
This page took 0.070651 seconds and 4 git commands to generate.