]>
Commit | Line | Data |
---|---|---|
3b20eb23 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3e7ee490 | 2 | /* |
3e7ee490 HJ |
3 | * Copyright (c) 2009, Microsoft Corporation. |
4 | * | |
3e7ee490 HJ |
5 | * Authors: |
6 | * Haiyang Zhang <[email protected]> | |
7 | * Hank Janssen <[email protected]> | |
3e7ee490 | 8 | */ |
0a46618d HJ |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | ||
a0086dc5 GKH |
11 | #include <linux/kernel.h> |
12 | #include <linux/mm.h> | |
5a0e3ad6 | 13 | #include <linux/slab.h> |
b7c947f0 | 14 | #include <linux/vmalloc.h> |
46a97191 | 15 | #include <linux/hyperv.h> |
83ba0c4f | 16 | #include <linux/version.h> |
248e742a | 17 | #include <linux/random.h> |
4061ed9e | 18 | #include <linux/clockchips.h> |
fd1fea68 | 19 | #include <clocksource/hyperv_timer.h> |
4061ed9e | 20 | #include <asm/mshyperv.h> |
0f2a6619 | 21 | #include "hyperv_vmbus.h" |
3e7ee490 | 22 | |
454f18a9 | 23 | /* The one and only */ |
a3cadf38 | 24 | struct hv_context hv_context; |
3e7ee490 | 25 | |
3e189519 | 26 | /* |
d44890c8 | 27 | * hv_init - Main initialization routine. |
0831ad04 GKH |
28 | * |
29 | * This routine must be called before any other routines in here are called | |
30 | */ | |
d44890c8 | 31 | int hv_init(void) |
3e7ee490 | 32 | { |
37cdd991 SH |
33 | hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context); |
34 | if (!hv_context.cpu_context) | |
35 | return -ENOMEM; | |
5433e003 | 36 | return 0; |
3e7ee490 HJ |
37 | } |
38 | ||
3e189519 | 39 | /* |
d44890c8 | 40 | * hv_post_message - Post a message using the hypervisor message IPC. |
0831ad04 GKH |
41 | * |
42 | * This involves a hypercall. | |
43 | */ | |
415f0a02 | 44 | int hv_post_message(union hv_connection_id connection_id, |
b8dfb264 HZ |
45 | enum hv_message_type message_type, |
46 | void *payload, size_t payload_size) | |
3e7ee490 | 47 | { |
b8dfb264 | 48 | struct hv_input_post_message *aligned_msg; |
37cdd991 | 49 | struct hv_per_cpu_context *hv_cpu; |
a108393d | 50 | u64 status; |
3e7ee490 | 51 | |
b8dfb264 | 52 | if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) |
39594abc | 53 | return -EMSGSIZE; |
3e7ee490 | 54 | |
37cdd991 SH |
55 | hv_cpu = get_cpu_ptr(hv_context.cpu_context); |
56 | aligned_msg = hv_cpu->post_msg_page; | |
b8dfb264 | 57 | aligned_msg->connectionid = connection_id; |
b29ef354 | 58 | aligned_msg->reserved = 0; |
b8dfb264 HZ |
59 | aligned_msg->message_type = message_type; |
60 | aligned_msg->payload_size = payload_size; | |
61 | memcpy((void *)aligned_msg->payload, payload, payload_size); | |
3e7ee490 | 62 | |
a108393d | 63 | status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL); |
3e7ee490 | 64 | |
13b9abfc MK |
65 | /* Preemption must remain disabled until after the hypercall |
66 | * so some other thread can't get scheduled onto this cpu and | |
67 | * corrupt the per-cpu post_msg_page | |
68 | */ | |
69 | put_cpu_ptr(hv_cpu); | |
70 | ||
a108393d | 71 | return status & 0xFFFF; |
3e7ee490 HJ |
72 | } |
73 | ||
2608fb65 JW |
74 | int hv_synic_alloc(void) |
75 | { | |
2608fb65 | 76 | int cpu; |
f25a7ece MK |
77 | struct hv_per_cpu_context *hv_cpu; |
78 | ||
79 | /* | |
80 | * First, zero all per-cpu memory areas so hv_synic_free() can | |
81 | * detect what memory has been allocated and cleanup properly | |
82 | * after any failures. | |
83 | */ | |
84 | for_each_present_cpu(cpu) { | |
85 | hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); | |
86 | memset(hv_cpu, 0, sizeof(*hv_cpu)); | |
87 | } | |
2608fb65 | 88 | |
6396bb22 | 89 | hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask), |
597ff72f | 90 | GFP_KERNEL); |
9f01ec53 S |
91 | if (hv_context.hv_numa_map == NULL) { |
92 | pr_err("Unable to allocate NUMA map\n"); | |
93 | goto err; | |
94 | } | |
95 | ||
421b8f20 | 96 | for_each_present_cpu(cpu) { |
f25a7ece | 97 | hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); |
37cdd991 | 98 | |
37cdd991 SH |
99 | tasklet_init(&hv_cpu->msg_dpc, |
100 | vmbus_on_msg_dpc, (unsigned long) hv_cpu); | |
101 | ||
37cdd991 | 102 | hv_cpu->synic_message_page = |
2608fb65 | 103 | (void *)get_zeroed_page(GFP_ATOMIC); |
37cdd991 | 104 | if (hv_cpu->synic_message_page == NULL) { |
2608fb65 JW |
105 | pr_err("Unable to allocate SYNIC message page\n"); |
106 | goto err; | |
107 | } | |
108 | ||
37cdd991 SH |
109 | hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC); |
110 | if (hv_cpu->synic_event_page == NULL) { | |
2608fb65 JW |
111 | pr_err("Unable to allocate SYNIC event page\n"); |
112 | goto err; | |
113 | } | |
b29ef354 | 114 | |
37cdd991 SH |
115 | hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC); |
116 | if (hv_cpu->post_msg_page == NULL) { | |
b29ef354 S |
117 | pr_err("Unable to allocate post msg page\n"); |
118 | goto err; | |
119 | } | |
2608fb65 JW |
120 | } |
121 | ||
122 | return 0; | |
123 | err: | |
57208632 MK |
124 | /* |
125 | * Any memory allocations that succeeded will be freed when | |
126 | * the caller cleans up by calling hv_synic_free() | |
127 | */ | |
2608fb65 JW |
128 | return -ENOMEM; |
129 | } | |
130 | ||
2608fb65 JW |
131 | |
132 | void hv_synic_free(void) | |
133 | { | |
134 | int cpu; | |
135 | ||
37cdd991 SH |
136 | for_each_present_cpu(cpu) { |
137 | struct hv_per_cpu_context *hv_cpu | |
138 | = per_cpu_ptr(hv_context.cpu_context, cpu); | |
139 | ||
57208632 MK |
140 | free_page((unsigned long)hv_cpu->synic_event_page); |
141 | free_page((unsigned long)hv_cpu->synic_message_page); | |
142 | free_page((unsigned long)hv_cpu->post_msg_page); | |
37cdd991 SH |
143 | } |
144 | ||
9f01ec53 | 145 | kfree(hv_context.hv_numa_map); |
2608fb65 JW |
146 | } |
147 | ||
3e189519 | 148 | /* |
68cb8117 | 149 | * hv_synic_init - Initialize the Synthetic Interrupt Controller. |
0831ad04 GKH |
150 | * |
151 | * If it is already initialized by another entity (ie x2v shim), we need to | |
152 | * retrieve the initialized message and event pages. Otherwise, we create and | |
153 | * initialize the message and event pages. | |
154 | */ | |
dba61cda | 155 | void hv_synic_enable_regs(unsigned int cpu) |
3e7ee490 | 156 | { |
37cdd991 SH |
157 | struct hv_per_cpu_context *hv_cpu |
158 | = per_cpu_ptr(hv_context.cpu_context, cpu); | |
eacb1b4d GKH |
159 | union hv_synic_simp simp; |
160 | union hv_synic_siefp siefp; | |
b8dfb264 | 161 | union hv_synic_sint shared_sint; |
eacb1b4d | 162 | union hv_synic_scontrol sctrl; |
a73e6b7c | 163 | |
a73e6b7c | 164 | /* Setup the Synic's message page */ |
155e4a2f | 165 | hv_get_simp(simp.as_uint64); |
f6feebe0 | 166 | simp.simp_enabled = 1; |
37cdd991 | 167 | simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page) |
ef514d3e | 168 | >> HV_HYP_PAGE_SHIFT; |
3e7ee490 | 169 | |
155e4a2f | 170 | hv_set_simp(simp.as_uint64); |
3e7ee490 | 171 | |
a73e6b7c | 172 | /* Setup the Synic's event page */ |
8e307bf8 | 173 | hv_get_siefp(siefp.as_uint64); |
f6feebe0 | 174 | siefp.siefp_enabled = 1; |
37cdd991 | 175 | siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page) |
ef514d3e | 176 | >> HV_HYP_PAGE_SHIFT; |
a73e6b7c | 177 | |
8e307bf8 | 178 | hv_set_siefp(siefp.as_uint64); |
0831ad04 | 179 | |
0831ad04 | 180 | /* Setup the shared SINT. */ |
619a4c8b | 181 | hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); |
3e7ee490 | 182 | |
626b901f | 183 | shared_sint.vector = hv_get_vector(); |
b8dfb264 | 184 | shared_sint.masked = false; |
2ddddd0b | 185 | shared_sint.auto_eoi = hv_recommend_using_aeoi(); |
619a4c8b | 186 | hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); |
3e7ee490 | 187 | |
454f18a9 | 188 | /* Enable the global synic bit */ |
06d1d98a | 189 | hv_get_synic_state(sctrl.as_uint64); |
f6feebe0 | 190 | sctrl.enable = 1; |
3e7ee490 | 191 | |
06d1d98a | 192 | hv_set_synic_state(sctrl.as_uint64); |
dba61cda DC |
193 | } |
194 | ||
195 | int hv_synic_init(unsigned int cpu) | |
196 | { | |
197 | hv_synic_enable_regs(cpu); | |
3e7ee490 | 198 | |
4df4cb9e | 199 | hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT); |
e086748c | 200 | |
fd1fea68 | 201 | return 0; |
e086748c VK |
202 | } |
203 | ||
3e189519 | 204 | /* |
d44890c8 | 205 | * hv_synic_cleanup - Cleanup routine for hv_synic_init(). |
0831ad04 | 206 | */ |
dba61cda | 207 | void hv_synic_disable_regs(unsigned int cpu) |
3e7ee490 | 208 | { |
b8dfb264 | 209 | union hv_synic_sint shared_sint; |
eacb1b4d GKH |
210 | union hv_synic_simp simp; |
211 | union hv_synic_siefp siefp; | |
e72e7ac5 | 212 | union hv_synic_scontrol sctrl; |
dba61cda DC |
213 | |
214 | hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); | |
215 | ||
216 | shared_sint.masked = 1; | |
217 | ||
218 | /* Need to correctly cleanup in the case of SMP!!! */ | |
219 | /* Disable the interrupt */ | |
220 | hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); | |
221 | ||
222 | hv_get_simp(simp.as_uint64); | |
223 | simp.simp_enabled = 0; | |
224 | simp.base_simp_gpa = 0; | |
225 | ||
226 | hv_set_simp(simp.as_uint64); | |
227 | ||
228 | hv_get_siefp(siefp.as_uint64); | |
229 | siefp.siefp_enabled = 0; | |
230 | siefp.base_siefp_gpa = 0; | |
231 | ||
232 | hv_set_siefp(siefp.as_uint64); | |
233 | ||
234 | /* Disable the global synic bit */ | |
235 | hv_get_synic_state(sctrl.as_uint64); | |
236 | sctrl.enable = 0; | |
237 | hv_set_synic_state(sctrl.as_uint64); | |
238 | } | |
239 | ||
240 | int hv_synic_cleanup(unsigned int cpu) | |
241 | { | |
523b9408 VK |
242 | struct vmbus_channel *channel, *sc; |
243 | bool channel_found = false; | |
3e7ee490 | 244 | |
8a857c55 APM |
245 | /* |
246 | * Hyper-V does not provide a way to change the connect CPU once | |
92e4dc8b CC |
247 | * it is set; we must prevent the connect CPU from going offline |
248 | * while the VM is running normally. But in the panic or kexec() | |
249 | * path where the vmbus is already disconnected, the CPU must be | |
250 | * allowed to shut down. | |
8a857c55 | 251 | */ |
92e4dc8b CC |
252 | if (cpu == VMBUS_CONNECT_CPU && |
253 | vmbus_connection.conn_state == CONNECTED) | |
8a857c55 APM |
254 | return -EBUSY; |
255 | ||
523b9408 VK |
256 | /* |
257 | * Search for channels which are bound to the CPU we're about to | |
d570aec0 APM |
258 | * cleanup. In case we find one and vmbus is still connected, we |
259 | * fail; this will effectively prevent CPU offlining. | |
260 | * | |
261 | * TODO: Re-bind the channels to different CPUs. | |
523b9408 VK |
262 | */ |
263 | mutex_lock(&vmbus_connection.channel_mutex); | |
264 | list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { | |
265 | if (channel->target_cpu == cpu) { | |
266 | channel_found = true; | |
267 | break; | |
268 | } | |
523b9408 VK |
269 | list_for_each_entry(sc, &channel->sc_list, sc_list) { |
270 | if (sc->target_cpu == cpu) { | |
271 | channel_found = true; | |
272 | break; | |
273 | } | |
274 | } | |
523b9408 VK |
275 | if (channel_found) |
276 | break; | |
277 | } | |
278 | mutex_unlock(&vmbus_connection.channel_mutex); | |
279 | ||
280 | if (channel_found && vmbus_connection.conn_state == CONNECTED) | |
281 | return -EBUSY; | |
282 | ||
4df4cb9e | 283 | hv_stimer_legacy_cleanup(cpu); |
e086748c | 284 | |
dba61cda | 285 | hv_synic_disable_regs(cpu); |
76d36ab7 VK |
286 | |
287 | return 0; | |
3e7ee490 | 288 | } |