return NULL;
spin_lock_init(&channel->sched_lock);
- spin_lock_init(&channel->lock);
init_completion(&channel->rescind_event);
INIT_LIST_HEAD(&channel->sc_list);
void hv_process_channel_removal(struct vmbus_channel *channel)
{
- unsigned long flags;
-
lockdep_assert_held(&vmbus_connection.channel_mutex);
BUG_ON(!channel->rescind);
if (channel->offermsg.child_relid != INVALID_RELID)
vmbus_channel_unmap_relid(channel);
- if (channel->primary_channel == NULL) {
+ if (channel->primary_channel == NULL)
list_del(&channel->listentry);
- } else {
- struct vmbus_channel *primary_channel = channel->primary_channel;
- spin_lock_irqsave(&primary_channel->lock, flags);
+ else
list_del(&channel->sc_list);
- spin_unlock_irqrestore(&primary_channel->lock, flags);
- }
/*
* If this is a "perf" channel, updates the hv_numa_map[] masks so that
struct vmbus_channel *newchannel =
container_of(work, struct vmbus_channel, add_channel_work);
struct vmbus_channel *primary_channel = newchannel->primary_channel;
- unsigned long flags;
int ret;
/*
*/
newchannel->probe_done = true;
- if (primary_channel == NULL) {
+ if (primary_channel == NULL)
list_del(&newchannel->listentry);
- } else {
- spin_lock_irqsave(&primary_channel->lock, flags);
+ else
list_del(&newchannel->sc_list);
- spin_unlock_irqrestore(&primary_channel->lock, flags);
- }
/* vmbus_process_offer() has mapped the channel. */
vmbus_channel_unmap_relid(newchannel);
{
struct vmbus_channel *channel;
struct workqueue_struct *wq;
- unsigned long flags;
bool fnew = true;
/*
}
}
- if (fnew)
+ if (fnew) {
list_add_tail(&newchannel->listentry,
&vmbus_connection.chn_list);
- else {
+ } else {
/*
* Check to see if this is a valid sub-channel.
*/
* Process the sub-channel.
*/
newchannel->primary_channel = channel;
- spin_lock_irqsave(&channel->lock, flags);
list_add_tail(&newchannel->sc_list, &channel->sc_list);
- spin_unlock_irqrestore(&channel->lock, flags);
}
vmbus_channel_map_relid(newchannel);
* In case alloc_cpumask_var() fails, bind it to
* VMBUS_CONNECT_CPU.
*/
- channel->numa_node = cpu_to_node(VMBUS_CONNECT_CPU);
channel->target_cpu = VMBUS_CONNECT_CPU;
- channel->target_vp =
- hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
if (perf_chn)
hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
return;
continue;
break;
}
- channel->numa_node = numa_node;
alloced_mask = &hv_context.hv_numa_map[numa_node];
if (cpumask_weight(alloced_mask) ==
cpumask_set_cpu(target_cpu, alloced_mask);
channel->target_cpu = target_cpu;
- channel->target_vp = hv_cpu_number_to_vp_number(target_cpu);
free_cpumask_var(available_mask);
}
void *page_addr;
struct hv_message *msg;
struct vmbus_channel_message_header *hdr;
- u32 message_type;
+ u32 message_type, i;
/*
* CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
* functional and vmbus_unload_response() will complete
* vmbus_connection.unload_event. If not, the last thing we can do is
* read message pages for all CPUs directly.
+ *
+ * Wait no more than 10 seconds so that the panic path can't get
+ * hung forever in case the response message isn't seen.
*/
- while (1) {
+ for (i = 0; i < 1000; i++) {
if (completion_done(&vmbus_connection.unload_event))
break;
#include <linux/cpu.h>
#include <linux/sched/task_stack.h>
-#include <asm/mshyperv.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ptrace.h>
if (!hv_dev->channel)
return -ENODEV;
- return sprintf(buf, "%d\n", hv_dev->channel->numa_node);
+ return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
}
static DEVICE_ATTR_RO(numa_node);
#endif
{
struct hv_device *hv_dev = device_to_hv_device(dev);
struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
- unsigned long flags;
int buf_size = PAGE_SIZE, n_written, tot_written;
struct list_head *cur;
if (!channel)
return -ENODEV;
+ mutex_lock(&vmbus_connection.channel_mutex);
+
tot_written = snprintf(buf, buf_size, "%u:%u\n",
channel->offermsg.child_relid, channel->target_cpu);
- spin_lock_irqsave(&channel->lock, flags);
-
list_for_each(cur, &channel->sc_list) {
if (tot_written >= buf_size - 1)
break;
tot_written += n_written;
}
- spin_unlock_irqrestore(&channel->lock, flags);
+ mutex_unlock(&vmbus_connection.channel_mutex);
return tot_written;
}
/* No CPUs should come up or down during this. */
cpus_read_lock();
- if (!cpumask_test_cpu(target_cpu, cpu_online_mask)) {
+ if (!cpu_online(target_cpu)) {
cpus_read_unlock();
return -EINVAL;
}
*/
channel->target_cpu = target_cpu;
- channel->target_vp = hv_cpu_number_to_vp_number(target_cpu);
- channel->numa_node = cpu_to_node(target_cpu);
/* See init_vp_index(). */
if (hv_is_perf_channel(channel))
static int vmbus_bus_suspend(struct device *dev)
{
struct vmbus_channel *channel, *sc;
- unsigned long flags;
while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
/*
if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
wait_for_completion(&vmbus_connection.ready_for_suspend_event);
- WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
+ if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
+ pr_err("Can not suspend due to a previous failed resuming\n");
+ return -EBUSY;
+ }
mutex_lock(&vmbus_connection.channel_mutex);
continue;
}
- spin_lock_irqsave(&channel->lock, flags);
list_for_each_entry(sc, &channel->sc_list, sc_list) {
pr_err("Sub-channel not deleted!\n");
WARN_ON_ONCE(1);
}
- spin_unlock_irqrestore(&channel->lock, flags);
atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
}
vmbus_request_offers();
- wait_for_completion(&vmbus_connection.ready_for_resume_event);
+ if (wait_for_completion_timeout(
+ &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
+ pr_err("Some vmbus device is missing after suspending?\n");
/* Reset the event for the next suspend. */
reinit_completion(&vmbus_connection.ready_for_suspend_event);