]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Inline assembly cache operations. | |
7 | * | |
8 | * Copyright (C) 1996 David S. Miller ([email protected]) | |
9 | * Copyright (C) 1997 - 2002 Ralf Baechle ([email protected]) | |
10 | * Copyright (C) 2004 Ralf Baechle ([email protected]) | |
11 | */ | |
12 | #ifndef _ASM_R4KCACHE_H | |
13 | #define _ASM_R4KCACHE_H | |
14 | ||
15 | #include <asm/asm.h> | |
16 | #include <asm/cacheops.h> | |
41700e73 | 17 | #include <asm/cpu-features.h> |
41c594ab | 18 | #include <asm/mipsmtregs.h> |
1da177e4 LT |
19 | |
20 | /* | |
21 | * This macro return a properly sign-extended address suitable as base address | |
22 | * for indexed cache operations. Two issues here: | |
23 | * | |
24 | * - The MIPS32 and MIPS64 specs permit an implementation to directly derive | |
25 | * the index bits from the virtual address. This breaks with tradition | |
2fe25f67 | 26 | * set by the R4000. To keep unpleasant surprises from happening we pick |
1da177e4 LT |
27 | * an address in KSEG0 / CKSEG0. |
28 | * - We need a properly sign extended address for 64-bit code. To get away | |
29 | * without ifdefs we let the compiler do it by a type cast. | |
30 | */ | |
31 | #define INDEX_BASE CKSEG0 | |
32 | ||
33 | #define cache_op(op,addr) \ | |
34 | __asm__ __volatile__( \ | |
2fe25f67 | 35 | " .set push \n" \ |
1da177e4 LT |
36 | " .set noreorder \n" \ |
37 | " .set mips3\n\t \n" \ | |
38 | " cache %0, %1 \n" \ | |
2fe25f67 | 39 | " .set pop \n" \ |
1da177e4 | 40 | : \ |
675055bf | 41 | : "i" (op), "R" (*(unsigned char *)(addr))) |
1da177e4 | 42 | |
41c594ab RB |
43 | #ifdef CONFIG_MIPS_MT |
44 | /* | |
45 | * Temporary hacks for SMTC debug. Optionally force single-threaded | |
46 | * execution during I-cache flushes. | |
47 | */ | |
48 | ||
49 | #define PROTECT_CACHE_FLUSHES 1 | |
50 | ||
51 | #ifdef PROTECT_CACHE_FLUSHES | |
52 | ||
53 | extern int mt_protiflush; | |
54 | extern int mt_protdflush; | |
55 | extern void mt_cflush_lockdown(void); | |
56 | extern void mt_cflush_release(void); | |
57 | ||
58 | #define BEGIN_MT_IPROT \ | |
59 | unsigned long flags = 0; \ | |
60 | unsigned long mtflags = 0; \ | |
61 | if(mt_protiflush) { \ | |
62 | local_irq_save(flags); \ | |
63 | ehb(); \ | |
64 | mtflags = dvpe(); \ | |
65 | mt_cflush_lockdown(); \ | |
66 | } | |
67 | ||
68 | #define END_MT_IPROT \ | |
69 | if(mt_protiflush) { \ | |
70 | mt_cflush_release(); \ | |
71 | evpe(mtflags); \ | |
72 | local_irq_restore(flags); \ | |
73 | } | |
74 | ||
75 | #define BEGIN_MT_DPROT \ | |
76 | unsigned long flags = 0; \ | |
77 | unsigned long mtflags = 0; \ | |
78 | if(mt_protdflush) { \ | |
79 | local_irq_save(flags); \ | |
80 | ehb(); \ | |
81 | mtflags = dvpe(); \ | |
82 | mt_cflush_lockdown(); \ | |
83 | } | |
84 | ||
85 | #define END_MT_DPROT \ | |
86 | if(mt_protdflush) { \ | |
87 | mt_cflush_release(); \ | |
88 | evpe(mtflags); \ | |
89 | local_irq_restore(flags); \ | |
90 | } | |
91 | ||
92 | #else | |
93 | ||
94 | #define BEGIN_MT_IPROT | |
95 | #define BEGIN_MT_DPROT | |
96 | #define END_MT_IPROT | |
97 | #define END_MT_DPROT | |
98 | ||
99 | #endif /* PROTECT_CACHE_FLUSHES */ | |
100 | ||
101 | #define __iflush_prologue \ | |
102 | unsigned long redundance; \ | |
103 | extern int mt_n_iflushes; \ | |
104 | BEGIN_MT_IPROT \ | |
105 | for (redundance = 0; redundance < mt_n_iflushes; redundance++) { | |
106 | ||
107 | #define __iflush_epilogue \ | |
108 | END_MT_IPROT \ | |
109 | } | |
110 | ||
111 | #define __dflush_prologue \ | |
112 | unsigned long redundance; \ | |
113 | extern int mt_n_dflushes; \ | |
114 | BEGIN_MT_DPROT \ | |
115 | for (redundance = 0; redundance < mt_n_dflushes; redundance++) { | |
116 | ||
117 | #define __dflush_epilogue \ | |
118 | END_MT_DPROT \ | |
119 | } | |
120 | ||
121 | #define __inv_dflush_prologue __dflush_prologue | |
122 | #define __inv_dflush_epilogue __dflush_epilogue | |
123 | #define __sflush_prologue { | |
124 | #define __sflush_epilogue } | |
125 | #define __inv_sflush_prologue __sflush_prologue | |
126 | #define __inv_sflush_epilogue __sflush_epilogue | |
127 | ||
128 | #else /* CONFIG_MIPS_MT */ | |
129 | ||
130 | #define __iflush_prologue { | |
131 | #define __iflush_epilogue } | |
132 | #define __dflush_prologue { | |
133 | #define __dflush_epilogue } | |
134 | #define __inv_dflush_prologue { | |
135 | #define __inv_dflush_epilogue } | |
136 | #define __sflush_prologue { | |
137 | #define __sflush_epilogue } | |
138 | #define __inv_sflush_prologue { | |
139 | #define __inv_sflush_epilogue } | |
140 | ||
141 | #endif /* CONFIG_MIPS_MT */ | |
142 | ||
1da177e4 LT |
143 | static inline void flush_icache_line_indexed(unsigned long addr) |
144 | { | |
41c594ab | 145 | __iflush_prologue |
1da177e4 | 146 | cache_op(Index_Invalidate_I, addr); |
41c594ab | 147 | __iflush_epilogue |
1da177e4 LT |
148 | } |
149 | ||
150 | static inline void flush_dcache_line_indexed(unsigned long addr) | |
151 | { | |
41c594ab | 152 | __dflush_prologue |
1da177e4 | 153 | cache_op(Index_Writeback_Inv_D, addr); |
41c594ab | 154 | __dflush_epilogue |
1da177e4 LT |
155 | } |
156 | ||
157 | static inline void flush_scache_line_indexed(unsigned long addr) | |
158 | { | |
159 | cache_op(Index_Writeback_Inv_SD, addr); | |
160 | } | |
161 | ||
162 | static inline void flush_icache_line(unsigned long addr) | |
163 | { | |
41c594ab | 164 | __iflush_prologue |
1da177e4 | 165 | cache_op(Hit_Invalidate_I, addr); |
41c594ab | 166 | __iflush_epilogue |
1da177e4 LT |
167 | } |
168 | ||
169 | static inline void flush_dcache_line(unsigned long addr) | |
170 | { | |
41c594ab | 171 | __dflush_prologue |
1da177e4 | 172 | cache_op(Hit_Writeback_Inv_D, addr); |
41c594ab | 173 | __dflush_epilogue |
1da177e4 LT |
174 | } |
175 | ||
176 | static inline void invalidate_dcache_line(unsigned long addr) | |
177 | { | |
41c594ab | 178 | __dflush_prologue |
1da177e4 | 179 | cache_op(Hit_Invalidate_D, addr); |
41c594ab | 180 | __dflush_epilogue |
1da177e4 LT |
181 | } |
182 | ||
183 | static inline void invalidate_scache_line(unsigned long addr) | |
184 | { | |
185 | cache_op(Hit_Invalidate_SD, addr); | |
186 | } | |
187 | ||
188 | static inline void flush_scache_line(unsigned long addr) | |
189 | { | |
190 | cache_op(Hit_Writeback_Inv_SD, addr); | |
191 | } | |
192 | ||
41700e73 AN |
193 | #define protected_cache_op(op,addr) \ |
194 | __asm__ __volatile__( \ | |
195 | " .set push \n" \ | |
196 | " .set noreorder \n" \ | |
197 | " .set mips3 \n" \ | |
198 | "1: cache %0, (%1) \n" \ | |
199 | "2: .set pop \n" \ | |
200 | " .section __ex_table,\"a\" \n" \ | |
201 | " "STR(PTR)" 1b, 2b \n" \ | |
202 | " .previous" \ | |
203 | : \ | |
204 | : "i" (op), "r" (addr)) | |
205 | ||
1da177e4 LT |
206 | /* |
207 | * The next two are for badland addresses like signal trampolines. | |
208 | */ | |
209 | static inline void protected_flush_icache_line(unsigned long addr) | |
210 | { | |
41700e73 | 211 | protected_cache_op(Hit_Invalidate_I, addr); |
1da177e4 LT |
212 | } |
213 | ||
214 | /* | |
215 | * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D | |
216 | * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style | |
217 | * caches. We're talking about one cacheline unnecessarily getting invalidated | |
2fe25f67 | 218 | * here so the penalty isn't overly hard. |
1da177e4 LT |
219 | */ |
220 | static inline void protected_writeback_dcache_line(unsigned long addr) | |
221 | { | |
41700e73 | 222 | protected_cache_op(Hit_Writeback_Inv_D, addr); |
1da177e4 LT |
223 | } |
224 | ||
225 | static inline void protected_writeback_scache_line(unsigned long addr) | |
226 | { | |
41700e73 | 227 | protected_cache_op(Hit_Writeback_Inv_SD, addr); |
1da177e4 LT |
228 | } |
229 | ||
230 | /* | |
231 | * This one is RM7000-specific | |
232 | */ | |
233 | static inline void invalidate_tcache_page(unsigned long addr) | |
234 | { | |
235 | cache_op(Page_Invalidate_T, addr); | |
236 | } | |
237 | ||
238 | #define cache16_unroll32(base,op) \ | |
239 | __asm__ __volatile__( \ | |
2fe25f67 | 240 | " .set push \n" \ |
1da177e4 LT |
241 | " .set noreorder \n" \ |
242 | " .set mips3 \n" \ | |
243 | " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \ | |
244 | " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \ | |
245 | " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \ | |
246 | " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \ | |
247 | " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \ | |
248 | " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \ | |
249 | " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \ | |
250 | " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \ | |
251 | " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \ | |
252 | " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \ | |
253 | " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \ | |
254 | " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \ | |
255 | " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \ | |
256 | " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \ | |
257 | " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \ | |
258 | " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \ | |
2fe25f67 | 259 | " .set pop \n" \ |
1da177e4 LT |
260 | : \ |
261 | : "r" (base), \ | |
262 | "i" (op)); | |
263 | ||
1da177e4 LT |
264 | #define cache32_unroll32(base,op) \ |
265 | __asm__ __volatile__( \ | |
2fe25f67 | 266 | " .set push \n" \ |
1da177e4 LT |
267 | " .set noreorder \n" \ |
268 | " .set mips3 \n" \ | |
269 | " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \ | |
270 | " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \ | |
271 | " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \ | |
272 | " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \ | |
273 | " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \ | |
274 | " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \ | |
275 | " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \ | |
276 | " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \ | |
277 | " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \ | |
278 | " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \ | |
279 | " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \ | |
280 | " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \ | |
281 | " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \ | |
282 | " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \ | |
283 | " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \ | |
284 | " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \ | |
2fe25f67 | 285 | " .set pop \n" \ |
1da177e4 LT |
286 | : \ |
287 | : "r" (base), \ | |
288 | "i" (op)); | |
289 | ||
1da177e4 LT |
290 | #define cache64_unroll32(base,op) \ |
291 | __asm__ __volatile__( \ | |
2fe25f67 | 292 | " .set push \n" \ |
1da177e4 LT |
293 | " .set noreorder \n" \ |
294 | " .set mips3 \n" \ | |
295 | " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \ | |
296 | " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \ | |
297 | " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \ | |
298 | " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \ | |
299 | " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \ | |
300 | " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \ | |
301 | " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \ | |
302 | " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \ | |
303 | " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \ | |
304 | " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \ | |
305 | " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \ | |
306 | " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \ | |
307 | " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \ | |
308 | " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \ | |
309 | " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \ | |
310 | " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \ | |
2fe25f67 | 311 | " .set pop \n" \ |
1da177e4 LT |
312 | : \ |
313 | : "r" (base), \ | |
314 | "i" (op)); | |
315 | ||
1da177e4 LT |
316 | #define cache128_unroll32(base,op) \ |
317 | __asm__ __volatile__( \ | |
2fe25f67 | 318 | " .set push \n" \ |
1da177e4 LT |
319 | " .set noreorder \n" \ |
320 | " .set mips3 \n" \ | |
321 | " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \ | |
322 | " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \ | |
323 | " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \ | |
324 | " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \ | |
325 | " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \ | |
326 | " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \ | |
327 | " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \ | |
328 | " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \ | |
329 | " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \ | |
330 | " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \ | |
331 | " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \ | |
332 | " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \ | |
333 | " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \ | |
334 | " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \ | |
335 | " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \ | |
336 | " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \ | |
2fe25f67 | 337 | " .set pop \n" \ |
1da177e4 LT |
338 | : \ |
339 | : "r" (base), \ | |
340 | "i" (op)); | |
341 | ||
76f072a4 AN |
342 | /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ |
343 | #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ | |
344 | static inline void blast_##pfx##cache##lsize(void) \ | |
345 | { \ | |
346 | unsigned long start = INDEX_BASE; \ | |
347 | unsigned long end = start + current_cpu_data.desc.waysize; \ | |
348 | unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ | |
349 | unsigned long ws_end = current_cpu_data.desc.ways << \ | |
350 | current_cpu_data.desc.waybit; \ | |
351 | unsigned long ws, addr; \ | |
352 | \ | |
41c594ab RB |
353 | __##pfx##flush_prologue \ |
354 | \ | |
76f072a4 AN |
355 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
356 | for (addr = start; addr < end; addr += lsize * 32) \ | |
357 | cache##lsize##_unroll32(addr|ws,indexop); \ | |
41c594ab RB |
358 | \ |
359 | __##pfx##flush_epilogue \ | |
76f072a4 AN |
360 | } \ |
361 | \ | |
362 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | |
363 | { \ | |
364 | unsigned long start = page; \ | |
365 | unsigned long end = page + PAGE_SIZE; \ | |
366 | \ | |
41c594ab RB |
367 | __##pfx##flush_prologue \ |
368 | \ | |
76f072a4 AN |
369 | do { \ |
370 | cache##lsize##_unroll32(start,hitop); \ | |
371 | start += lsize * 32; \ | |
372 | } while (start < end); \ | |
41c594ab RB |
373 | \ |
374 | __##pfx##flush_epilogue \ | |
76f072a4 AN |
375 | } \ |
376 | \ | |
377 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ | |
378 | { \ | |
de62893b AN |
379 | unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ |
380 | unsigned long start = INDEX_BASE + (page & indexmask); \ | |
76f072a4 AN |
381 | unsigned long end = start + PAGE_SIZE; \ |
382 | unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ | |
383 | unsigned long ws_end = current_cpu_data.desc.ways << \ | |
384 | current_cpu_data.desc.waybit; \ | |
385 | unsigned long ws, addr; \ | |
386 | \ | |
41c594ab RB |
387 | __##pfx##flush_prologue \ |
388 | \ | |
76f072a4 AN |
389 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
390 | for (addr = start; addr < end; addr += lsize * 32) \ | |
391 | cache##lsize##_unroll32(addr|ws,indexop); \ | |
41c594ab RB |
392 | \ |
393 | __##pfx##flush_epilogue \ | |
76f072a4 AN |
394 | } |
395 | ||
396 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) | |
397 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) | |
398 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) | |
399 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) | |
400 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) | |
401 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) | |
402 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) | |
403 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) | |
404 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) | |
1da177e4 | 405 | |
41700e73 AN |
406 | /* build blast_xxx_range, protected_blast_xxx_range */ |
407 | #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \ | |
408 | static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ | |
409 | unsigned long end) \ | |
410 | { \ | |
411 | unsigned long lsize = cpu_##desc##_line_size(); \ | |
412 | unsigned long addr = start & ~(lsize - 1); \ | |
413 | unsigned long aend = (end - 1) & ~(lsize - 1); \ | |
41c594ab RB |
414 | \ |
415 | __##pfx##flush_prologue \ | |
416 | \ | |
41700e73 AN |
417 | while (1) { \ |
418 | prot##cache_op(hitop, addr); \ | |
419 | if (addr == aend) \ | |
420 | break; \ | |
421 | addr += lsize; \ | |
422 | } \ | |
41c594ab RB |
423 | \ |
424 | __##pfx##flush_epilogue \ | |
41700e73 AN |
425 | } |
426 | ||
427 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) | |
428 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_) | |
429 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_) | |
430 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, ) | |
431 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, ) | |
432 | /* blast_inv_dcache_range */ | |
433 | __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, ) | |
37caa934 | 434 | __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, ) |
41700e73 | 435 | |
1da177e4 | 436 | #endif /* _ASM_R4KCACHE_H */ |