1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Userland implementation of gettimeofday() for 32 bits processes in a
4 * ppc64 kernel for use in the vDSO
9 #include <asm/processor.h>
10 #include <asm/ppc_asm.h>
12 #include <asm/asm-offsets.h>
13 #include <asm/unistd.h>
15 /* Offset for the low 32-bit part of a field of long type */
24 * Exact prototype of gettimeofday
26 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
29 V_FUNCTION_BEGIN(__kernel_gettimeofday)
34 mr r10,r3 /* r10 saves tv */
35 mr r11,r4 /* r11 saves tz */
36 bl __get_datapage@local /* get data page */
37 mr r9, r3 /* datapage ptr in r9 */
38 cmplwi r10,0 /* check if tv is NULL */
40 lis r7,1000000@ha /* load up USEC_PER_SEC */
41 addi r7,r7,1000000@l /* so we get microseconds in r4 */
42 bl __do_get_tspec@local /* get sec/usec from tb & kernel */
43 stw r3,TVAL32_TV_SEC(r10)
44 stw r4,TVAL32_TV_USEC(r10)
46 3: cmplwi r11,0 /* check if tz is NULL */
48 lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
49 lwz r5,CFG_TZ_DSTTIME(r9)
50 stw r4,TZONE_TZ_MINWEST(r11)
51 stw r5,TZONE_TZ_DSTTIME(r11)
58 V_FUNCTION_END(__kernel_gettimeofday)
61 * Exact prototype of clock_gettime()
63 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
66 V_FUNCTION_BEGIN(__kernel_clock_gettime)
68 /* Check for supported clock IDs */
69 cmpli cr0,r3,CLOCK_REALTIME
70 cmpli cr1,r3,CLOCK_MONOTONIC
71 cror cr0*4+eq,cr0*4+eq,cr1*4+eq
74 mflr r12 /* r12 saves lr */
76 mr r11,r4 /* r11 saves tp */
77 bl __get_datapage@local /* get data page */
78 mr r9,r3 /* datapage ptr in r9 */
79 lis r7,NSEC_PER_SEC@h /* want nanoseconds */
80 ori r7,r7,NSEC_PER_SEC@l
81 50: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */
82 bne cr1,80f /* not monotonic -> all done */
88 /* now we must fixup using wall to monotonic. We need to snapshot
89 * that value and do the counter trick again. Fortunately, we still
90 * have the counter value in r8 that was returned by __do_get_xsec.
91 * At this point, r3,r4 contain our sec/nsec values, r5 and r6
92 * can be used, r7 contains NSEC_PER_SEC.
95 lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9)
96 lwz r6,WTOM_CLOCK_NSEC(r9)
98 /* We now have our offset in r5,r6. We create a fake dependency
99 * on that value and re-check the counter
104 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
105 cmpl cr0,r8,r0 /* check if updated */
108 /* Calculate and store result. Note that this mimics the C code,
109 * which may cause funny results if nsec goes negative... is that
123 80: stw r3,TSPC32_TV_SEC(r11)
124 stw r4,TSPC32_TV_NSEC(r11)
135 li r0,__NR_clock_gettime
140 V_FUNCTION_END(__kernel_clock_gettime)
144 * Exact prototype of clock_getres()
146 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
149 V_FUNCTION_BEGIN(__kernel_clock_getres)
151 /* Check for supported clock IDs */
152 cmpwi cr0,r3,CLOCK_REALTIME
153 cmpwi cr1,r3,CLOCK_MONOTONIC
154 cror cr0*4+eq,cr0*4+eq,cr1*4+eq
159 bl __get_datapage@local /* get data page */
160 lwz r5, CLOCK_HRTIMER_RES(r3)
166 stw r3,TSPC32_TV_SEC(r4)
167 stw r5,TSPC32_TV_NSEC(r4)
174 li r0,__NR_clock_getres
178 V_FUNCTION_END(__kernel_clock_getres)
182 * Exact prototype of time()
184 * time_t time(time *t);
187 V_FUNCTION_BEGIN(__kernel_time)
192 mr r11,r3 /* r11 holds t */
193 bl __get_datapage@local
194 mr r9, r3 /* datapage ptr in r9 */
196 lwz r3,STAMP_XTIME_SEC+LOPART(r9)
198 cmplwi r11,0 /* check if t is NULL */
200 stw r3,0(r11) /* store result at *t */
205 V_FUNCTION_END(__kernel_time)
208 * This is the core of clock_gettime() and gettimeofday(),
209 * it returns the current time in r3 (seconds) and r4.
210 * On entry, r7 gives the resolution of r4, either USEC_PER_SEC
211 * or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds.
212 * It expects the datapage ptr in r9 and doesn't clobber it.
213 * It clobbers r0, r5 and r6.
214 * On return, r8 contains the counter value that can be reused.
215 * This clobbers cr0 but not any other cr field.
219 /* Check for update count & load values. We use the low
220 * order 32 bits of the update count
222 1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
223 andi. r0,r8,1 /* pending update ? loop */
225 xor r0,r8,r8 /* create dependency */
228 /* Load orig stamp (offset to TB) */
229 lwz r5,CFG_TB_ORIG_STAMP(r9)
230 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
232 /* Get a stable TB value */
239 /* Subtract tb orig stamp and shift left 12 bits.
244 rlwimi. r0,r4,12,20,31
248 * Load scale factor & do multiplication.
249 * We only use the high 32 bits of the tb_to_xs value.
250 * Even with a 1GHz timebase clock, the high 32 bits of
251 * tb_to_xs will be at least 4 million, so the error from
252 * ignoring the low 32 bits will be no more than 0.25ppm.
253 * The error will just make the clock run very very slightly
254 * slow until the next time the kernel updates the VDSO data,
255 * at which point the clock will catch up to the kernel's value,
256 * so there is no long-term error accumulation.
258 lwz r5,CFG_TB_TO_XS(r9) /* load values */
262 beq+ 4f /* skip high part computation if 0 */
268 /* At this point, we have seconds since the xtime stamp
269 * as a 32.32 fixed-point number in r3 and r4.
270 * Load & add the xtime stamp.
272 lwz r5,STAMP_XTIME_SEC+LOPART(r9)
273 lwz r6,STAMP_SEC_FRAC(r9)
277 /* We create a fake dependency on the result in r3/r4
278 * and re-check the counter
283 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
284 cmplw cr0,r8,r0 /* check if updated */
287 mulhwu r4,r4,r7 /* convert to micro or nanoseconds */