]> Git Repo - J-linux.git/commitdiff
sparc/vdso: Add helper function for 64-bit right shift on 32-bit target
authorKoakuma <[email protected]>
Thu, 8 Aug 2024 02:05:00 +0000 (09:05 +0700)
committerAndreas Larsson <[email protected]>
Mon, 18 Nov 2024 08:59:20 +0000 (09:59 +0100)
Add helper function for 64-bit right shift on 32-bit target so that
clang does not emit a runtime library call.

Signed-off-by: Koakuma <[email protected]>
Reviewed-by: Andreas Larsson <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Andreas Larsson <[email protected]>
arch/sparc/vdso/vclock_gettime.c

index e794edde675548230e20d7b14fd3ff6c83405d32..79607804ea1b0f321215a9c4b5ead1edeb912e64 100644 (file)
@@ -86,6 +86,11 @@ notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv,
 }
 
 #ifdef CONFIG_SPARC64
+notrace static __always_inline u64 __shr64(u64 val, int amt)
+{
+       return val >> amt;
+}
+
 notrace static __always_inline u64 vread_tick(void)
 {
        u64     ret;
@@ -102,6 +107,21 @@ notrace static __always_inline u64 vread_tick_stick(void)
        return ret;
 }
 #else
+notrace static __always_inline u64 __shr64(u64 val, int amt)
+{
+       u64 ret;
+
+       __asm__ __volatile__("sllx %H1, 32, %%g1\n\t"
+                            "srl %L1, 0, %L1\n\t"
+                            "or %%g1, %L1, %%g1\n\t"
+                            "srlx %%g1, %2, %L0\n\t"
+                            "srlx %L0, 32, %H0"
+                            : "=r" (ret)
+                            : "r" (val), "r" (amt)
+                            : "g1");
+       return ret;
+}
+
 notrace static __always_inline u64 vread_tick(void)
 {
        register unsigned long long ret asm("o4");
@@ -154,7 +174,7 @@ notrace static __always_inline int do_realtime(struct vvar_data *vvar,
                ts->tv_sec = vvar->wall_time_sec;
                ns = vvar->wall_time_snsec;
                ns += vgetsns(vvar);
-               ns >>= vvar->clock.shift;
+               ns = __shr64(ns, vvar->clock.shift);
        } while (unlikely(vvar_read_retry(vvar, seq)));
 
        ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
@@ -174,7 +194,7 @@ notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
                ts->tv_sec = vvar->wall_time_sec;
                ns = vvar->wall_time_snsec;
                ns += vgetsns_stick(vvar);
-               ns >>= vvar->clock.shift;
+               ns = __shr64(ns, vvar->clock.shift);
        } while (unlikely(vvar_read_retry(vvar, seq)));
 
        ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
@@ -194,7 +214,7 @@ notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
                ts->tv_sec = vvar->monotonic_time_sec;
                ns = vvar->monotonic_time_snsec;
                ns += vgetsns(vvar);
-               ns >>= vvar->clock.shift;
+               ns = __shr64(ns, vvar->clock.shift);
        } while (unlikely(vvar_read_retry(vvar, seq)));
 
        ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
@@ -214,7 +234,7 @@ notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
                ts->tv_sec = vvar->monotonic_time_sec;
                ns = vvar->monotonic_time_snsec;
                ns += vgetsns_stick(vvar);
-               ns >>= vvar->clock.shift;
+               ns = __shr64(ns, vvar->clock.shift);
        } while (unlikely(vvar_read_retry(vvar, seq)));
 
        ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
This page took 0.050346 seconds and 4 git commands to generate.