1 // SPDX-License-Identifier: GPL-2.0
4 * Hyper-V specific APIC code.
6 * Copyright (C) 2018, Microsoft, Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
22 #include <linux/types.h>
23 #include <linux/vmalloc.h>
25 #include <linux/clockchips.h>
26 #include <linux/hyperv.h>
27 #include <linux/slab.h>
28 #include <linux/cpuhotplug.h>
29 #include <asm/hypervisor.h>
30 #include <asm/mshyperv.h>
33 #include <asm/trace/hyperv.h>
35 static struct apic orig_apic;
37 static u64 hv_apic_icr_read(void)
41 rdmsrl(HV_X64_MSR_ICR, reg_val);
45 static void hv_apic_icr_write(u32 low, u32 id)
49 reg_val = SET_XAPIC_DEST_FIELD(id);
50 reg_val = reg_val << 32;
53 wrmsrl(HV_X64_MSR_ICR, reg_val);
56 static u32 hv_apic_read(u32 reg)
62 rdmsr(HV_X64_MSR_EOI, reg_val, hi);
66 rdmsr(HV_X64_MSR_TPR, reg_val, hi);
71 return native_apic_mem_read(reg);
75 static void hv_apic_write(u32 reg, u32 val)
79 wrmsr(HV_X64_MSR_EOI, val, 0);
82 wrmsr(HV_X64_MSR_TPR, val, 0);
85 native_apic_mem_write(reg, val);
89 static void hv_apic_eoi_write(u32 reg, u32 val)
91 struct hv_vp_assist_page *hvp = hv_vp_assist_page[smp_processor_id()];
93 if (hvp && (xchg(&hvp->apic_assist, 0) & 0x1))
96 wrmsr(HV_X64_MSR_EOI, val, 0);
99 static bool cpu_is_self(int cpu)
101 return cpu == smp_processor_id();
105 * IPI implementation on Hyper-V.
107 static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
110 struct hv_send_ipi_ex **arg;
111 struct hv_send_ipi_ex *ipi_arg;
114 u64 status = HV_STATUS_INVALID_PARAMETER;
116 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
119 local_irq_save(flags);
120 arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
123 if (unlikely(!ipi_arg))
124 goto ipi_mask_ex_done;
126 ipi_arg->vector = vector;
127 ipi_arg->reserved = 0;
128 ipi_arg->vp_set.valid_bank_mask = 0;
131 * Use HV_GENERIC_SET_ALL and avoid converting cpumask to VP_SET
132 * when the IPI is sent to all currently present CPUs.
134 if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {
135 ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
137 nr_bank = cpumask_to_vpset_skip(&(ipi_arg->vp_set), mask,
138 exclude_self ? cpu_is_self : NULL);
141 * 'nr_bank <= 0' means some CPUs in cpumask can't be
142 * represented in VP_SET. Return an error and fall back to
143 * native (architectural) method of sending IPIs.
146 goto ipi_mask_ex_done;
148 ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
151 status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
155 local_irq_restore(flags);
156 return hv_result_success(status);
159 static bool __send_ipi_mask(const struct cpumask *mask, int vector,
162 int cur_cpu, vcpu, this_cpu = smp_processor_id();
163 struct hv_send_ipi ipi_arg;
167 trace_hyperv_send_ipi_mask(mask, vector);
169 weight = cpumask_weight(mask);
173 * 1. the mask is empty
174 * 2. the mask only contains self when exclude_self is true
177 (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
180 if (!hv_hypercall_pg)
183 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
187 * From the supplied CPU set we need to figure out if we can get away
188 * with cheaper HVCALL_SEND_IPI hypercall. This is possible when the
189 * highest VP number in the set is < 64. As VP numbers are usually in
190 * ascending order and match Linux CPU ids, here is an optimization:
191 * we check the VP number for the highest bit in the supplied set first
192 * so we can quickly find out if using HVCALL_SEND_IPI_EX hypercall is
193 * a must. We will also check all VP numbers when walking the supplied
194 * CPU set to remain correct in all cases.
196 if (hv_cpu_number_to_vp_number(cpumask_last(mask)) >= 64)
197 goto do_ex_hypercall;
199 ipi_arg.vector = vector;
200 ipi_arg.cpu_mask = 0;
202 for_each_cpu(cur_cpu, mask) {
203 if (exclude_self && cur_cpu == this_cpu)
205 vcpu = hv_cpu_number_to_vp_number(cur_cpu);
206 if (vcpu == VP_INVAL)
210 * This particular version of the IPI hypercall can
211 * only target upto 64 CPUs.
214 goto do_ex_hypercall;
216 __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
219 status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
221 return hv_result_success(status);
224 return __send_ipi_mask_ex(mask, vector, exclude_self);
227 static bool __send_ipi_one(int cpu, int vector)
229 int vp = hv_cpu_number_to_vp_number(cpu);
232 trace_hyperv_send_ipi_one(cpu, vector);
234 if (!hv_hypercall_pg || (vp == VP_INVAL))
237 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
241 return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
243 status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
244 return hv_result_success(status);
247 static void hv_send_ipi(int cpu, int vector)
249 if (!__send_ipi_one(cpu, vector))
250 orig_apic.send_IPI(cpu, vector);
253 static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
255 if (!__send_ipi_mask(mask, vector, false))
256 orig_apic.send_IPI_mask(mask, vector);
259 static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
261 if (!__send_ipi_mask(mask, vector, true))
262 orig_apic.send_IPI_mask_allbutself(mask, vector);
265 static void hv_send_ipi_allbutself(int vector)
267 hv_send_ipi_mask_allbutself(cpu_online_mask, vector);
270 static void hv_send_ipi_all(int vector)
272 if (!__send_ipi_mask(cpu_online_mask, vector, false))
273 orig_apic.send_IPI_all(vector);
276 static void hv_send_ipi_self(int vector)
278 if (!__send_ipi_one(smp_processor_id(), vector))
279 orig_apic.send_IPI_self(vector);
282 void __init hv_apic_init(void)
284 if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) {
285 pr_info("Hyper-V: Using IPI hypercalls\n");
287 * Set the IPI entry points.
291 apic->send_IPI = hv_send_ipi;
292 apic->send_IPI_mask = hv_send_ipi_mask;
293 apic->send_IPI_mask_allbutself = hv_send_ipi_mask_allbutself;
294 apic->send_IPI_allbutself = hv_send_ipi_allbutself;
295 apic->send_IPI_all = hv_send_ipi_all;
296 apic->send_IPI_self = hv_send_ipi_self;
299 if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
300 pr_info("Hyper-V: Using enlightened APIC (%s mode)",
301 x2apic_enabled() ? "x2apic" : "xapic");
303 * When in x2apic mode, don't use the Hyper-V specific APIC
304 * accessors since the field layout in the ICR register is
305 * different in x2apic mode. Furthermore, the architectural
306 * x2apic MSRs function just as well as the Hyper-V
307 * synthetic APIC MSRs, so there's no benefit in having
308 * separate Hyper-V accessors for x2apic mode. The only
309 * exception is hv_apic_eoi_write, because it benefits from
310 * lazy EOI when available, but the same accessor works for
311 * both xapic and x2apic because the field layout is the same.
313 apic_set_eoi_write(hv_apic_eoi_write);
314 if (!x2apic_enabled()) {
315 apic->read = hv_apic_read;
316 apic->write = hv_apic_write;
317 apic->icr_write = hv_apic_icr_write;
318 apic->icr_read = hv_apic_icr_read;