]> Git Repo - linux.git/blobdiff - drivers/clocksource/hyperv_timer.c
arm64: avoid prototype warnings for syscalls
[linux.git] / drivers / clocksource / hyperv_timer.c
index 18de1f439ffd5013ed7e1c09f7f4a70bdc8ad5a6..bcd9042a0c9f460a4eeece614d0b032176ccc620 100644 (file)
@@ -49,7 +49,7 @@ static bool direct_mode_enabled;
 
 static int stimer0_irq = -1;
 static int stimer0_message_sint;
-static DEFINE_PER_CPU(long, stimer0_evt);
+static __maybe_unused DEFINE_PER_CPU(long, stimer0_evt);
 
 /*
  * Common code for stimer0 interrupts coming via Direct Mode or
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(hv_stimer0_isr);
  * stimer0 interrupt handler for architectures that support
  * per-cpu interrupts, which also implies Direct Mode.
  */
-static irqreturn_t hv_stimer0_percpu_isr(int irq, void *dev_id)
+static irqreturn_t __maybe_unused hv_stimer0_percpu_isr(int irq, void *dev_id)
 {
        hv_stimer0_isr();
        return IRQ_HANDLED;
@@ -196,6 +196,7 @@ void __weak hv_remove_stimer0_handler(void)
 {
 };
 
+#ifdef CONFIG_ACPI
 /* Called only on architectures with per-cpu IRQs (i.e., not x86/x64) */
 static int hv_setup_stimer0_irq(void)
 {
@@ -230,6 +231,16 @@ static void hv_remove_stimer0_irq(void)
                stimer0_irq = -1;
        }
 }
+#else
+static int hv_setup_stimer0_irq(void)
+{
+       return 0;
+}
+
+static void hv_remove_stimer0_irq(void)
+{
+}
+#endif
 
 /* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */
 int hv_stimer_alloc(bool have_percpu_irqs)
@@ -367,9 +378,18 @@ static union {
        u8 reserved[PAGE_SIZE];
 } tsc_pg __aligned(PAGE_SIZE);
 
+static struct ms_hyperv_tsc_page *tsc_page = &tsc_pg.page;
+static unsigned long tsc_pfn;
+
+unsigned long hv_get_tsc_pfn(void)
+{
+       return tsc_pfn;
+}
+EXPORT_SYMBOL_GPL(hv_get_tsc_pfn);
+
 struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
 {
-       return &tsc_pg.page;
+       return tsc_page;
 }
 EXPORT_SYMBOL_GPL(hv_get_tsc_page);
 
@@ -407,13 +427,12 @@ static void suspend_hv_clock_tsc(struct clocksource *arg)
 
 static void resume_hv_clock_tsc(struct clocksource *arg)
 {
-       phys_addr_t phys_addr = virt_to_phys(&tsc_pg);
        union hv_reference_tsc_msr tsc_msr;
 
        /* Re-enable the TSC page */
        tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
        tsc_msr.enable = 1;
-       tsc_msr.pfn = HVPFN_DOWN(phys_addr);
+       tsc_msr.pfn = tsc_pfn;
        hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
 }
 
@@ -497,13 +516,6 @@ static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
 static bool __init hv_init_tsc_clocksource(void)
 {
        union hv_reference_tsc_msr tsc_msr;
-       phys_addr_t     phys_addr;
-
-       if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
-               return false;
-
-       if (hv_root_partition)
-               return false;
 
        /*
         * If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
@@ -521,19 +533,34 @@ static bool __init hv_init_tsc_clocksource(void)
                hyperv_cs_msr.rating = 250;
        }
 
+       if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
+               return false;
+
        hv_read_reference_counter = read_hv_clock_tsc;
-       phys_addr = virt_to_phys(hv_get_tsc_page());
 
        /*
-        * The Hyper-V TLFS specifies to preserve the value of reserved
-        * bits in registers. So read the existing value, preserve the
-        * low order 12 bits, and add in the guest physical address
-        * (which already has at least the low 12 bits set to zero since
-        * it is page aligned). Also set the "enable" bit, which is bit 0.
+        * TSC page mapping works differently in root compared to guest.
+        * - In guest partition the guest PFN has to be passed to the
+        *   hypervisor.
+        * - In root partition it's other way around: it has to map the PFN
+        *   provided by the hypervisor.
+        *   But it can't be mapped right here as it's too early and MMU isn't
+        *   ready yet. So, we only set the enable bit here and will remap the
+        *   page later in hv_remap_tsc_clocksource().
+        *
+        * It worth mentioning, that TSC clocksource read function
+        * (read_hv_clock_tsc) has a MSR-based fallback mechanism, used when
+        * TSC page is zeroed (which is the case until the PFN is remapped) and
+        * thus TSC clocksource will work even without the real TSC page
+        * mapped.
         */
        tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+       if (hv_root_partition)
+               tsc_pfn = tsc_msr.pfn;
+       else
+               tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page));
        tsc_msr.enable = 1;
-       tsc_msr.pfn = HVPFN_DOWN(phys_addr);
+       tsc_msr.pfn = tsc_pfn;
        hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
 
        clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
@@ -566,3 +593,20 @@ void __init hv_init_clocksource(void)
        hv_sched_clock_offset = hv_read_reference_counter();
        hv_setup_sched_clock(read_hv_sched_clock_msr);
 }
+
+void __init hv_remap_tsc_clocksource(void)
+{
+       if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
+               return;
+
+       if (!hv_root_partition) {
+               WARN(1, "%s: attempt to remap TSC page in guest partition\n",
+                    __func__);
+               return;
+       }
+
+       tsc_page = memremap(tsc_pfn << HV_HYP_PAGE_SHIFT, sizeof(tsc_pg),
+                           MEMREMAP_WB);
+       if (!tsc_page)
+               pr_err("Failed to remap Hyper-V TSC page.\n");
+}
This page took 0.035747 seconds and 4 git commands to generate.