})
#endif
+static inline reg_size_t register_align(void *val)
+{
+ return (unsigned long)(signed long)val;
+}
+
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
- struct mem_access *ma);
+ struct mem_access *ma, int);
asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
--- /dev/null
- mov #0xe0, r1 ! 0xffffffe0
+/*
+ * __clear_user_page, __clear_user, clear_page implementation of SuperH
+ *
+ * Copyright (C) 2001 Kaz Kojima
+ * Copyright (C) 2001, 2002 Niibe Yutaka
+ * Copyright (C) 2006 Paul Mundt
+ */
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ENTRY(__clear_user)
+ !
+ mov #0, r0
++ mov #0xffffffe0, r1
+ !
+ ! r4..(r4+31)&~32 -------- not aligned [ Area 0 ]
+ ! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ]
+ ! (r4+r5)&~32..r4+r5 -------- not aligned [ Area 2 ]
+ !
+ ! Clear area 0
+ mov r4, r2
+ !
+ tst r1, r5 ! length < 32
+ bt .Larea2 ! skip to remainder
+ !
+ add #31, r2
+ and r1, r2
+ cmp/eq r4, r2
+ bt .Larea1
+ mov r2, r3
+ sub r4, r3
+ mov r3, r7
+ mov r4, r2
+ !
+.L0: dt r3
+0: mov.b r0, @r2
+ bf/s .L0
+ add #1, r2
+ !
+ sub r7, r5
+ mov r2, r4
+.Larea1:
+ mov r4, r3
+ add r5, r3
+ and r1, r3
+ cmp/hi r2, r3
+ bf .Larea2
+ !
+ ! Clear area 1
+#if defined(CONFIG_CPU_SH4)
+1: movca.l r0, @r2
+#else
+1: mov.l r0, @r2
+#endif
+ add #4, r2
+2: mov.l r0, @r2
+ add #4, r2
+3: mov.l r0, @r2
+ add #4, r2
+4: mov.l r0, @r2
+ add #4, r2
+5: mov.l r0, @r2
+ add #4, r2
+6: mov.l r0, @r2
+ add #4, r2
+7: mov.l r0, @r2
+ add #4, r2
+8: mov.l r0, @r2
+ add #4, r2
+ cmp/hi r2, r3
+ bt/s 1b
+ nop
+ !
+ ! Clear area 2
+.Larea2:
+ mov r4, r3
+ add r5, r3
+ cmp/hs r3, r2
+ bt/s .Ldone
+ sub r2, r3
+.L2: dt r3
+9: mov.b r0, @r2
+ bf/s .L2
+ add #1, r2
+ !
+.Ldone: rts
+ mov #0, r0 ! return 0 as normal return
+
+ ! return the number of bytes remained
+.Lbad_clear_user:
+ mov r4, r0
+ add r5, r0
+ rts
+ sub r2, r0
+
+.section __ex_table,"a"
+ .align 2
+ .long 0b, .Lbad_clear_user
+ .long 1b, .Lbad_clear_user
+ .long 2b, .Lbad_clear_user
+ .long 3b, .Lbad_clear_user
+ .long 4b, .Lbad_clear_user
+ .long 5b, .Lbad_clear_user
+ .long 6b, .Lbad_clear_user
+ .long 7b, .Lbad_clear_user
+ .long 8b, .Lbad_clear_user
+ .long 9b, .Lbad_clear_user
+.previous
a3 += linesz;
} while (a0 < a0e);
}
- switch (boot_cpu_data.dcache.ways) {
- case 1:
- __flush_dcache_segment_fn = __flush_dcache_segment_1way;
- break;
- case 2:
- __flush_dcache_segment_fn = __flush_dcache_segment_2way;
- break;
- case 4:
- __flush_dcache_segment_fn = __flush_dcache_segment_4way;
- break;
- default:
- panic("unknown number of cache ways\n");
- break;
+
+extern void __weak sh4__flush_region_init(void);
+
+/*
+ * SH-4 has virtually indexed and physically tagged cache.
+ */
+void __init sh4_cache_init(void)
+{
++ unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
++
+ printk("PVR=%08x CVR=%08x PRR=%08x\n",
+ ctrl_inl(CCN_PVR),
+ ctrl_inl(CCN_CVR),
+ ctrl_inl(CCN_PRR));
+
++ if (wt_enabled)
++ __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
++ else {
++ switch (boot_cpu_data.dcache.ways) {
++ case 1:
++ __flush_dcache_segment_fn = __flush_dcache_segment_1way;
++ break;
++ case 2:
++ __flush_dcache_segment_fn = __flush_dcache_segment_2way;
++ break;
++ case 4:
++ __flush_dcache_segment_fn = __flush_dcache_segment_4way;
++ break;
++ default:
++ panic("unknown number of cache ways\n");
++ break;
++ }
+ }
+
+ local_flush_icache_range = sh4_flush_icache_range;
+ local_flush_dcache_page = sh4_flush_dcache_page;
+ local_flush_cache_all = sh4_flush_cache_all;
+ local_flush_cache_mm = sh4_flush_cache_mm;
+ local_flush_cache_dup_mm = sh4_flush_cache_mm;
+ local_flush_cache_page = sh4_flush_cache_page;
+ local_flush_cache_range = sh4_flush_cache_range;
+
+ sh4__flush_region_init();
+}