]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* clear_page.S: UltraSparc optimized clear page. |
2 | * | |
3 | * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller ([email protected]) | |
4 | * Copyright (C) 1997 Jakub Jelinek ([email protected]) | |
5 | */ | |
6 | ||
7 | #include <asm/visasm.h> | |
8 | #include <asm/thread_info.h> | |
9 | #include <asm/page.h> | |
10 | #include <asm/pgtable.h> | |
11 | #include <asm/spitfire.h> | |
4da808c3 | 12 | #include <asm/head.h> |
1da177e4 LT |
13 | |
14 | /* What we used to do was lock a TLB entry into a specific | |
15 | * TLB slot, clear the page with interrupts disabled, then | |
16 | * restore the original TLB entry. This was great for | |
17 | * disturbing the TLB as little as possible, but it meant | |
18 | * we had to keep interrupts disabled for a long time. | |
19 | * | |
20 | * Now, we simply use the normal TLB loading mechanism, | |
21 | * and this makes the cpu choose a slot all by itself. | |
22 | * Then we do a normal TLB flush on exit. We need only | |
23 | * disable preemption during the clear. | |
24 | */ | |
25 | ||
1da177e4 LT |
26 | .text |
27 | ||
28 | .globl _clear_page | |
29 | _clear_page: /* %o0=dest */ | |
30 | ba,pt %xcc, clear_page_common | |
31 | clr %o4 | |
32 | ||
33 | /* This thing is pretty important, it shows up | |
34 | * on the profiles via do_anonymous_page(). | |
35 | */ | |
36 | .align 32 | |
37 | .globl clear_user_page | |
38 | clear_user_page: /* %o0=dest, %o1=vaddr */ | |
39 | lduw [%g6 + TI_PRE_COUNT], %o2 | |
b2d43834 | 40 | sethi %hi(PAGE_OFFSET), %g2 |
1da177e4 LT |
41 | sethi %hi(PAGE_SIZE), %o4 |
42 | ||
b2d43834 | 43 | ldx [%g2 + %lo(PAGE_OFFSET)], %g2 |
c4bce90e | 44 | sethi %hi(PAGE_KERNEL_LOCKED), %g3 |
1da177e4 | 45 | |
c4bce90e | 46 | ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 |
1da177e4 LT |
47 | sub %o0, %g2, %g1 ! paddr |
48 | ||
1da177e4 LT |
49 | and %o1, %o4, %o0 ! vaddr D-cache alias bit |
50 | ||
51 | or %g1, %g3, %g1 ! TTE data | |
52 | sethi %hi(TLBTEMP_BASE), %o3 | |
53 | ||
54 | add %o2, 1, %o4 | |
55 | add %o0, %o3, %o0 ! TTE vaddr | |
56 | ||
57 | /* Disable preemption. */ | |
58 | mov TLB_TAG_ACCESS, %g3 | |
59 | stw %o4, [%g6 + TI_PRE_COUNT] | |
60 | ||
61 | /* Load TLB entry. */ | |
62 | rdpr %pstate, %o4 | |
63 | wrpr %o4, PSTATE_IE, %pstate | |
64 | stxa %o0, [%g3] ASI_DMMU | |
65 | stxa %g1, [%g0] ASI_DTLB_DATA_IN | |
4da808c3 DM |
66 | sethi %hi(KERNBASE), %g1 |
67 | flush %g1 | |
1da177e4 LT |
68 | wrpr %o4, 0x0, %pstate |
69 | ||
70 | mov 1, %o4 | |
71 | ||
72 | clear_page_common: | |
73 | VISEntryHalf | |
74 | membar #StoreLoad | #StoreStore | #LoadStore | |
75 | fzero %f0 | |
76 | sethi %hi(PAGE_SIZE/64), %o1 | |
77 | mov %o0, %g1 ! remember vaddr for tlbflush | |
78 | fzero %f2 | |
79 | or %o1, %lo(PAGE_SIZE/64), %o1 | |
80 | faddd %f0, %f2, %f4 | |
81 | fmuld %f0, %f2, %f6 | |
82 | faddd %f0, %f2, %f8 | |
83 | fmuld %f0, %f2, %f10 | |
84 | ||
85 | faddd %f0, %f2, %f12 | |
86 | fmuld %f0, %f2, %f14 | |
87 | 1: stda %f0, [%o0 + %g0] ASI_BLK_P | |
88 | subcc %o1, 1, %o1 | |
89 | bne,pt %icc, 1b | |
90 | add %o0, 0x40, %o0 | |
91 | membar #Sync | |
92 | VISExitHalf | |
93 | ||
94 | brz,pn %o4, out | |
95 | nop | |
96 | ||
97 | stxa %g0, [%g1] ASI_DMMU_DEMAP | |
98 | membar #Sync | |
99 | stw %o2, [%g6 + TI_PRE_COUNT] | |
100 | ||
101 | out: retl | |
102 | nop | |
103 |