]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _M68K_SYSTEM_H |
2 | #define _M68K_SYSTEM_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/linkage.h> |
5 | #include <linux/kernel.h> | |
6 | #include <asm/segment.h> | |
7 | #include <asm/entry.h> | |
8 | ||
9 | #ifdef __KERNEL__ | |
10 | ||
11 | /* | |
12 | * switch_to(n) should switch tasks to task ptr, first checking that | |
13 | * ptr isn't the current task, in which case it does nothing. This | |
14 | * also clears the TS-flag if the task we switched to has used the | |
15 | * math co-processor latest. | |
16 | */ | |
17 | /* | |
18 | * switch_to() saves the extra registers, that are not saved | |
19 | * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and | |
20 | * a0-a1. Some of these are used by schedule() and its predecessors | |
21 | * and so we might get see unexpected behaviors when a task returns | |
22 | * with unexpected register values. | |
23 | * | |
24 | * syscall stores these registers itself and none of them are used | |
25 | * by syscall after the function in the syscall has been called. | |
26 | * | |
27 | * Beware that resume now expects *next to be in d1 and the offset of | |
28 | * tss to be in a1. This saves a few instructions as we no longer have | |
29 | * to push them onto the stack and read them back right after. | |
30 | * | |
31 | * 02/17/96 - Jes Sorensen ([email protected]) | |
32 | * | |
33 | * Changed 96/09/19 by Andreas Schwab | |
34 | * pass prev in a0, next in a1 | |
35 | */ | |
36 | asmlinkage void resume(void); | |
37 | #define switch_to(prev,next,last) do { \ | |
38 | register void *_prev __asm__ ("a0") = (prev); \ | |
39 | register void *_next __asm__ ("a1") = (next); \ | |
40 | register void *_last __asm__ ("d1"); \ | |
41 | __asm__ __volatile__("jbsr resume" \ | |
42 | : "=a" (_prev), "=a" (_next), "=d" (_last) \ | |
43 | : "0" (_prev), "1" (_next) \ | |
44 | : "d0", "d2", "d3", "d4", "d5"); \ | |
45 | (last) = _last; \ | |
46 | } while (0) | |
47 | ||
48 | ||
986c7603 AV |
49 | /* |
50 | * Force strict CPU ordering. | |
51 | * Not really required on m68k... | |
52 | */ | |
53 | #define nop() do { asm volatile ("nop"); barrier(); } while (0) | |
54 | #define mb() barrier() | |
55 | #define rmb() barrier() | |
56 | #define wmb() barrier() | |
57 | #define read_barrier_depends() ((void)0) | |
58 | #define set_mb(var, value) ({ (var) = (value); wmb(); }) | |
59 | ||
60 | #define smp_mb() barrier() | |
61 | #define smp_rmb() barrier() | |
62 | #define smp_wmb() barrier() | |
63 | #define smp_read_barrier_depends() ((void)0) | |
64 | ||
1da177e4 LT |
65 | /* interrupt control.. */ |
66 | #if 0 | |
67 | #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") | |
68 | #else | |
69 | #include <linux/hardirq.h> | |
70 | #define local_irq_enable() ({ \ | |
71 | if (MACH_IS_Q40 || !hardirq_count()) \ | |
72 | asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \ | |
73 | }) | |
74 | #endif | |
75 | #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") | |
76 | #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") | |
77 | #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") | |
78 | ||
79 | static inline int irqs_disabled(void) | |
80 | { | |
81 | unsigned long flags; | |
82 | local_save_flags(flags); | |
83 | return flags & ~ALLOWINT; | |
84 | } | |
85 | ||
86 | /* For spinlocks etc */ | |
87 | #define local_irq_save(x) ({ local_save_flags(x); local_irq_disable(); }) | |
88 | ||
1da177e4 | 89 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) |
1da177e4 LT |
90 | |
91 | struct __xchg_dummy { unsigned long a[100]; }; | |
92 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) | |
93 | ||
94 | #ifndef CONFIG_RMW_INSNS | |
95 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
96 | { | |
97 | unsigned long flags, tmp; | |
98 | ||
99 | local_irq_save(flags); | |
100 | ||
101 | switch (size) { | |
102 | case 1: | |
103 | tmp = *(u8 *)ptr; | |
104 | *(u8 *)ptr = x; | |
105 | x = tmp; | |
106 | break; | |
107 | case 2: | |
108 | tmp = *(u16 *)ptr; | |
109 | *(u16 *)ptr = x; | |
110 | x = tmp; | |
111 | break; | |
112 | case 4: | |
113 | tmp = *(u32 *)ptr; | |
114 | *(u32 *)ptr = x; | |
115 | x = tmp; | |
116 | break; | |
117 | default: | |
118 | BUG(); | |
119 | } | |
120 | ||
121 | local_irq_restore(flags); | |
122 | return x; | |
123 | } | |
124 | #else | |
125 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
126 | { | |
127 | switch (size) { | |
128 | case 1: | |
129 | __asm__ __volatile__ | |
130 | ("moveb %2,%0\n\t" | |
131 | "1:\n\t" | |
132 | "casb %0,%1,%2\n\t" | |
133 | "jne 1b" | |
134 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
135 | break; | |
136 | case 2: | |
137 | __asm__ __volatile__ | |
138 | ("movew %2,%0\n\t" | |
139 | "1:\n\t" | |
140 | "casw %0,%1,%2\n\t" | |
141 | "jne 1b" | |
142 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
143 | break; | |
144 | case 4: | |
145 | __asm__ __volatile__ | |
146 | ("movel %2,%0\n\t" | |
147 | "1:\n\t" | |
148 | "casl %0,%1,%2\n\t" | |
149 | "jne 1b" | |
150 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
151 | break; | |
152 | } | |
153 | return x; | |
154 | } | |
155 | #endif | |
156 | ||
5da75103 MD |
157 | #include <asm-generic/cmpxchg-local.h> |
158 | ||
159 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | |
160 | ||
1da177e4 LT |
161 | /* |
162 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
163 | * store NEW in MEM. Return the initial value in MEM. Success is | |
164 | * indicated by comparing RETURN with OLD. | |
165 | */ | |
166 | #ifdef CONFIG_RMW_INSNS | |
167 | #define __HAVE_ARCH_CMPXCHG 1 | |
168 | ||
169 | static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, | |
170 | unsigned long new, int size) | |
171 | { | |
172 | switch (size) { | |
173 | case 1: | |
174 | __asm__ __volatile__ ("casb %0,%2,%1" | |
175 | : "=d" (old), "=m" (*(char *)p) | |
176 | : "d" (new), "0" (old), "m" (*(char *)p)); | |
177 | break; | |
178 | case 2: | |
179 | __asm__ __volatile__ ("casw %0,%2,%1" | |
180 | : "=d" (old), "=m" (*(short *)p) | |
181 | : "d" (new), "0" (old), "m" (*(short *)p)); | |
182 | break; | |
183 | case 4: | |
184 | __asm__ __volatile__ ("casl %0,%2,%1" | |
185 | : "=d" (old), "=m" (*(int *)p) | |
186 | : "d" (new), "0" (old), "m" (*(int *)p)); | |
187 | break; | |
188 | } | |
189 | return old; | |
190 | } | |
191 | ||
5da75103 MD |
192 | #define cmpxchg(ptr, o, n) \ |
193 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | |
194 | (unsigned long)(n), sizeof(*(ptr)))) | |
195 | #define cmpxchg_local(ptr, o, n) \ | |
196 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | |
197 | (unsigned long)(n), sizeof(*(ptr)))) | |
198 | #else | |
199 | ||
200 | /* | |
201 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | |
202 | * them available. | |
203 | */ | |
204 | #define cmpxchg_local(ptr, o, n) \ | |
205 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | |
206 | (unsigned long)(n), sizeof(*(ptr)))) | |
207 | ||
208 | #ifndef CONFIG_SMP | |
209 | #include <asm-generic/cmpxchg.h> | |
210 | #endif | |
211 | ||
1da177e4 LT |
212 | #endif |
213 | ||
214 | #define arch_align_stack(x) (x) | |
215 | ||
216 | #endif /* __KERNEL__ */ | |
217 | ||
218 | #endif /* _M68K_SYSTEM_H */ |