]>
Commit | Line | Data |
---|---|---|
047ea784 PM |
1 | #ifndef _ASM_POWERPC_IO_H |
2 | #define _ASM_POWERPC_IO_H | |
88ced031 | 3 | #ifdef __KERNEL__ |
1da177e4 | 4 | |
be135f40 | 5 | #define ARCH_HAS_IOREMAP_WC |
86c391bd CL |
6 | #ifdef CONFIG_PPC32 |
7 | #define ARCH_HAS_IOREMAP_WT | |
8 | #endif | |
be135f40 | 9 | |
b41e5fff | 10 | /* |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
1269277a DW |
17 | /* Check of existence of legacy devices */ |
18 | extern int check_legacy_ioport(unsigned long base_port); | |
8d8a0241 OH |
19 | #define I8042_DATA_REG 0x60 |
20 | #define FDC_BASE 0x3f0 | |
1269277a | 21 | |
e1612de9 HM |
22 | #if defined(CONFIG_PPC64) && defined(CONFIG_PCI) |
23 | extern struct pci_dev *isa_bridge_pcidev; | |
24 | /* | |
25 | * has legacy ISA devices ? | |
26 | */ | |
ac237b65 | 27 | #define arch_has_dev_port() (isa_bridge_pcidev != NULL || isa_io_special) |
e1612de9 HM |
28 | #endif |
29 | ||
b41e5fff | 30 | #include <linux/device.h> |
1da177e4 LT |
31 | #include <linux/compiler.h> |
32 | #include <asm/page.h> | |
33 | #include <asm/byteorder.h> | |
feaf7cf1 | 34 | #include <asm/synch.h> |
1da177e4 | 35 | #include <asm/delay.h> |
68a64357 | 36 | #include <asm/mmu.h> |
24bfa6a9 | 37 | #include <asm/ppc_asm.h> |
1da177e4 | 38 | |
68a64357 BH |
39 | #ifdef CONFIG_PPC64 |
40 | #include <asm/paca.h> | |
41 | #endif | |
42 | ||
1da177e4 LT |
43 | #define SIO_CONFIG_RA 0x398 |
44 | #define SIO_CONFIG_RD 0x399 | |
45 | ||
46 | #define SLOW_DOWN_IO | |
47 | ||
68a64357 BH |
48 | /* 32 bits uses slightly different variables for the various IO |
49 | * bases. Most of this file only uses _IO_BASE though which we | |
50 | * define properly based on the platform | |
51 | */ | |
52 | #ifndef CONFIG_PCI | |
53 | #define _IO_BASE 0 | |
54 | #define _ISA_MEM_BASE 0 | |
55 | #define PCI_DRAM_OFFSET 0 | |
56 | #elif defined(CONFIG_PPC32) | |
57 | #define _IO_BASE isa_io_base | |
58 | #define _ISA_MEM_BASE isa_mem_base | |
59 | #define PCI_DRAM_OFFSET pci_dram_offset | |
60 | #else | |
61 | #define _IO_BASE pci_io_base | |
25e81f92 | 62 | #define _ISA_MEM_BASE isa_mem_base |
68a64357 BH |
63 | #define PCI_DRAM_OFFSET 0 |
64 | #endif | |
65 | ||
66 | extern unsigned long isa_io_base; | |
68a64357 BH |
67 | extern unsigned long pci_io_base; |
68 | extern unsigned long pci_dram_offset; | |
69 | ||
25e81f92 BH |
70 | extern resource_size_t isa_mem_base; |
71 | ||
3fafe9c2 BH |
72 | /* Boolean set by platform if PIO accesses are suppored while _IO_BASE |
73 | * is not set or addresses cannot be translated to MMIO. This is typically | |
74 | * set when the platform supports "special" PIO accesses via a non memory | |
75 | * mapped mechanism, and allows things like the early udbg UART code to | |
76 | * function. | |
77 | */ | |
78 | extern bool isa_io_special; | |
79 | ||
ecd73cc5 BH |
80 | #ifdef CONFIG_PPC32 |
81 | #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO) | |
82 | #error CONFIG_PPC_INDIRECT_{PIO,MMIO} are not yet supported on 32 bits | |
83 | #endif | |
68a64357 BH |
84 | #endif |
85 | ||
4cb3cee0 BH |
86 | /* |
87 | * | |
88 | * Low level MMIO accessors | |
89 | * | |
90 | * This provides the non-bus specific accessors to MMIO. Those are PowerPC | |
91 | * specific and thus shouldn't be used in generic code. The accessors | |
92 | * provided here are: | |
93 | * | |
94 | * in_8, in_le16, in_be16, in_le32, in_be32, in_le64, in_be64 | |
95 | * out_8, out_le16, out_be16, out_le32, out_be32, out_le64, out_be64 | |
96 | * _insb, _insw_ns, _insl_ns, _outsb, _outsw_ns, _outsl_ns | |
97 | * | |
98 | * Those operate directly on a kernel virtual address. Note that the prototype | |
99 | * for the out_* accessors has the arguments in opposite order from the usual | |
100 | * linux PCI accessors. Unlike those, they take the address first and the value | |
101 | * next. | |
102 | * | |
103 | * Note: I might drop the _ns suffix on the stream operations soon as it is | |
104 | * simply normal for stream operations to not swap in the first place. | |
105 | * | |
106 | */ | |
107 | ||
68a64357 | 108 | #ifdef CONFIG_PPC64 |
048c8bc9 | 109 | #define IO_SET_SYNC_FLAG() do { local_paca->io_sync = 1; } while(0) |
68a64357 BH |
110 | #else |
111 | #define IO_SET_SYNC_FLAG() | |
112 | #endif | |
4cb3cee0 | 113 | |
15cba23e | 114 | #define DEF_MMIO_IN_X(name, size, insn) \ |
0f3d6bcd TP |
115 | static inline u##size name(const volatile u##size __iomem *addr) \ |
116 | { \ | |
117 | u##size ret; \ | |
118 | __asm__ __volatile__("sync;"#insn" %0,%y1;twi 0,%0,0;isync" \ | |
119 | : "=r" (ret) : "Z" (*addr) : "memory"); \ | |
120 | return ret; \ | |
121 | } | |
122 | ||
15cba23e | 123 | #define DEF_MMIO_OUT_X(name, size, insn) \ |
0f3d6bcd TP |
124 | static inline void name(volatile u##size __iomem *addr, u##size val) \ |
125 | { \ | |
126 | __asm__ __volatile__("sync;"#insn" %1,%y0" \ | |
127 | : "=Z" (*addr) : "r" (val) : "memory"); \ | |
128 | IO_SET_SYNC_FLAG(); \ | |
129 | } | |
4cb3cee0 | 130 | |
15cba23e | 131 | #define DEF_MMIO_IN_D(name, size, insn) \ |
0f3d6bcd TP |
132 | static inline u##size name(const volatile u##size __iomem *addr) \ |
133 | { \ | |
134 | u##size ret; \ | |
135 | __asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\ | |
136 | : "=r" (ret) : "m" (*addr) : "memory"); \ | |
137 | return ret; \ | |
138 | } | |
4cb3cee0 | 139 | |
15cba23e | 140 | #define DEF_MMIO_OUT_D(name, size, insn) \ |
0f3d6bcd TP |
141 | static inline void name(volatile u##size __iomem *addr, u##size val) \ |
142 | { \ | |
143 | __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ | |
144 | : "=m" (*addr) : "r" (val) : "memory"); \ | |
145 | IO_SET_SYNC_FLAG(); \ | |
146 | } | |
4cb3cee0 | 147 | |
15cba23e IM |
148 | DEF_MMIO_IN_D(in_8, 8, lbz); |
149 | DEF_MMIO_OUT_D(out_8, 8, stb); | |
4cb3cee0 | 150 | |
15cba23e IM |
151 | #ifdef __BIG_ENDIAN__ |
152 | DEF_MMIO_IN_D(in_be16, 16, lhz); | |
153 | DEF_MMIO_IN_D(in_be32, 32, lwz); | |
154 | DEF_MMIO_IN_X(in_le16, 16, lhbrx); | |
155 | DEF_MMIO_IN_X(in_le32, 32, lwbrx); | |
4cb3cee0 | 156 | |
15cba23e IM |
157 | DEF_MMIO_OUT_D(out_be16, 16, sth); |
158 | DEF_MMIO_OUT_D(out_be32, 32, stw); | |
159 | DEF_MMIO_OUT_X(out_le16, 16, sthbrx); | |
160 | DEF_MMIO_OUT_X(out_le32, 32, stwbrx); | |
161 | #else | |
162 | DEF_MMIO_IN_X(in_be16, 16, lhbrx); | |
163 | DEF_MMIO_IN_X(in_be32, 32, lwbrx); | |
164 | DEF_MMIO_IN_D(in_le16, 16, lhz); | |
165 | DEF_MMIO_IN_D(in_le32, 32, lwz); | |
166 | ||
167 | DEF_MMIO_OUT_X(out_be16, 16, sthbrx); | |
168 | DEF_MMIO_OUT_X(out_be32, 32, stwbrx); | |
169 | DEF_MMIO_OUT_D(out_le16, 16, sth); | |
170 | DEF_MMIO_OUT_D(out_le32, 32, stw); | |
171 | ||
172 | #endif /* __BIG_ENDIAN */ | |
4cb3cee0 | 173 | |
68a64357 | 174 | #ifdef __powerpc64__ |
15cba23e IM |
175 | |
176 | #ifdef __BIG_ENDIAN__ | |
177 | DEF_MMIO_OUT_D(out_be64, 64, std); | |
178 | DEF_MMIO_IN_D(in_be64, 64, ld); | |
68a64357 | 179 | |
4cb3cee0 BH |
180 | /* There is no asm instructions for 64 bits reverse loads and stores */ |
181 | static inline u64 in_le64(const volatile u64 __iomem *addr) | |
182 | { | |
bda76dd1 | 183 | return swab64(in_be64(addr)); |
4cb3cee0 BH |
184 | } |
185 | ||
186 | static inline void out_le64(volatile u64 __iomem *addr, u64 val) | |
187 | { | |
bda76dd1 | 188 | out_be64(addr, swab64(val)); |
4cb3cee0 | 189 | } |
15cba23e IM |
190 | #else |
191 | DEF_MMIO_OUT_D(out_le64, 64, std); | |
192 | DEF_MMIO_IN_D(in_le64, 64, ld); | |
193 | ||
194 | /* There is no asm instructions for 64 bits reverse loads and stores */ | |
195 | static inline u64 in_be64(const volatile u64 __iomem *addr) | |
196 | { | |
197 | return swab64(in_le64(addr)); | |
198 | } | |
199 | ||
200 | static inline void out_be64(volatile u64 __iomem *addr, u64 val) | |
201 | { | |
202 | out_le64(addr, swab64(val)); | |
203 | } | |
204 | ||
205 | #endif | |
68a64357 | 206 | #endif /* __powerpc64__ */ |
4cb3cee0 BH |
207 | |
208 | /* | |
209 | * Low level IO stream instructions are defined out of line for now | |
210 | */ | |
211 | extern void _insb(const volatile u8 __iomem *addr, void *buf, long count); | |
212 | extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count); | |
213 | extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count); | |
214 | extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count); | |
215 | extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count); | |
216 | extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count); | |
217 | ||
218 | /* The _ns naming is historical and will be removed. For now, just #define | |
219 | * the non _ns equivalent names | |
220 | */ | |
221 | #define _insw _insw_ns | |
222 | #define _insl _insl_ns | |
223 | #define _outsw _outsw_ns | |
224 | #define _outsl _outsl_ns | |
225 | ||
68a64357 BH |
226 | |
227 | /* | |
228 | * memset_io, memcpy_toio, memcpy_fromio base implementations are out of line | |
229 | */ | |
230 | ||
231 | extern void _memset_io(volatile void __iomem *addr, int c, unsigned long n); | |
232 | extern void _memcpy_fromio(void *dest, const volatile void __iomem *src, | |
233 | unsigned long n); | |
234 | extern void _memcpy_toio(volatile void __iomem *dest, const void *src, | |
235 | unsigned long n); | |
236 | ||
4cb3cee0 BH |
237 | /* |
238 | * | |
239 | * PCI and standard ISA accessors | |
240 | * | |
241 | * Those are globally defined linux accessors for devices on PCI or ISA | |
242 | * busses. They follow the Linux defined semantics. The current implementation | |
243 | * for PowerPC is as close as possible to the x86 version of these, and thus | |
244 | * provides fairly heavy weight barriers for the non-raw versions | |
245 | * | |
ecd73cc5 BH |
246 | * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_MMIO |
247 | * or CONFIG_PPC_INDIRECT_PIO are set allowing the platform to provide its | |
248 | * own implementation of some or all of the accessors. | |
4cb3cee0 BH |
249 | */ |
250 | ||
68a64357 BH |
251 | /* |
252 | * Include the EEH definitions when EEH is enabled only so they don't get | |
253 | * in the way when building for 32 bits | |
254 | */ | |
255 | #ifdef CONFIG_EEH | |
4cb3cee0 | 256 | #include <asm/eeh.h> |
68a64357 | 257 | #endif |
4cb3cee0 BH |
258 | |
259 | /* Shortcut to the MMIO argument pointer */ | |
260 | #define PCI_IO_ADDR volatile void __iomem * | |
261 | ||
262 | /* Indirect IO address tokens: | |
263 | * | |
ecd73cc5 BH |
264 | * When CONFIG_PPC_INDIRECT_MMIO is set, the platform can provide hooks |
265 | * on all MMIOs. (Note that this is all 64 bits only for now) | |
4cb3cee0 | 266 | * |
446957ba | 267 | * To help platforms who may need to differentiate MMIO addresses in |
4cb3cee0 BH |
268 | * their hooks, a bitfield is reserved for use by the platform near the |
269 | * top of MMIO addresses (not PIO, those have to cope the hard way). | |
270 | * | |
43c6494f | 271 | * The highest address in the kernel virtual space are: |
4cb3cee0 | 272 | * |
43c6494f ME |
273 | * d0003fffffffffff # with Hash MMU |
274 | * c00fffffffffffff # with Radix MMU | |
4cb3cee0 | 275 | * |
43c6494f ME |
276 | * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits |
277 | * that can be used for the field. | |
4cb3cee0 BH |
278 | * |
279 | * The direct IO mapping operations will then mask off those bits | |
280 | * before doing the actual access, though that only happen when | |
ecd73cc5 | 281 | * CONFIG_PPC_INDIRECT_MMIO is set, thus be careful when you use that |
4cb3cee0 | 282 | * mechanism |
ecd73cc5 BH |
283 | * |
284 | * For PIO, there is a separate CONFIG_PPC_INDIRECT_PIO which makes | |
285 | * all PIO functions call through a hook. | |
4cb3cee0 BH |
286 | */ |
287 | ||
ecd73cc5 | 288 | #ifdef CONFIG_PPC_INDIRECT_MMIO |
43c6494f ME |
289 | #define PCI_IO_IND_TOKEN_SHIFT 52 |
290 | #define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT) | |
4cb3cee0 BH |
291 | #define PCI_FIX_ADDR(addr) \ |
292 | ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) | |
293 | #define PCI_GET_ADDR_TOKEN(addr) \ | |
294 | (((unsigned long)(addr) & PCI_IO_IND_TOKEN_MASK) >> \ | |
295 | PCI_IO_IND_TOKEN_SHIFT) | |
296 | #define PCI_SET_ADDR_TOKEN(addr, token) \ | |
297 | do { \ | |
298 | unsigned long __a = (unsigned long)(addr); \ | |
299 | __a &= ~PCI_IO_IND_TOKEN_MASK; \ | |
300 | __a |= ((unsigned long)(token)) << PCI_IO_IND_TOKEN_SHIFT; \ | |
301 | (addr) = (void __iomem *)__a; \ | |
302 | } while(0) | |
303 | #else | |
304 | #define PCI_FIX_ADDR(addr) (addr) | |
305 | #endif | |
306 | ||
757db1ed BH |
307 | |
308 | /* | |
309 | * Non ordered and non-swapping "raw" accessors | |
310 | */ | |
311 | ||
312 | static inline unsigned char __raw_readb(const volatile void __iomem *addr) | |
313 | { | |
314 | return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr); | |
315 | } | |
316 | static inline unsigned short __raw_readw(const volatile void __iomem *addr) | |
317 | { | |
318 | return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr); | |
319 | } | |
320 | static inline unsigned int __raw_readl(const volatile void __iomem *addr) | |
321 | { | |
322 | return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr); | |
323 | } | |
324 | static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr) | |
325 | { | |
326 | *(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v; | |
327 | } | |
328 | static inline void __raw_writew(unsigned short v, volatile void __iomem *addr) | |
329 | { | |
330 | *(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v; | |
331 | } | |
332 | static inline void __raw_writel(unsigned int v, volatile void __iomem *addr) | |
333 | { | |
334 | *(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v; | |
335 | } | |
336 | ||
337 | #ifdef __powerpc64__ | |
338 | static inline unsigned long __raw_readq(const volatile void __iomem *addr) | |
339 | { | |
340 | return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr); | |
341 | } | |
342 | static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) | |
343 | { | |
344 | *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v; | |
345 | } | |
a84bf321 | 346 | |
8056fe28 ME |
347 | static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr) |
348 | { | |
349 | __raw_writeq((__force unsigned long)cpu_to_be64(v), addr); | |
350 | } | |
351 | ||
a84bf321 | 352 | /* |
d381d7ca BH |
353 | * Real mode versions of the above. Those instructions are only supposed |
354 | * to be used in hypervisor real mode as per the architecture spec. | |
a84bf321 | 355 | */ |
d381d7ca BH |
356 | static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr) |
357 | { | |
358 | __asm__ __volatile__("stbcix %0,0,%1" | |
359 | : : "r" (val), "r" (paddr) : "memory"); | |
360 | } | |
361 | ||
362 | static inline void __raw_rm_writew(u16 val, volatile void __iomem *paddr) | |
363 | { | |
364 | __asm__ __volatile__("sthcix %0,0,%1" | |
365 | : : "r" (val), "r" (paddr) : "memory"); | |
366 | } | |
367 | ||
368 | static inline void __raw_rm_writel(u32 val, volatile void __iomem *paddr) | |
369 | { | |
370 | __asm__ __volatile__("stwcix %0,0,%1" | |
371 | : : "r" (val), "r" (paddr) : "memory"); | |
372 | } | |
373 | ||
a84bf321 AP |
374 | static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr) |
375 | { | |
376 | __asm__ __volatile__("stdcix %0,0,%1" | |
377 | : : "r" (val), "r" (paddr) : "memory"); | |
378 | } | |
379 | ||
8056fe28 ME |
380 | static inline void __raw_rm_writeq_be(u64 val, volatile void __iomem *paddr) |
381 | { | |
382 | __raw_rm_writeq((__force u64)cpu_to_be64(val), paddr); | |
383 | } | |
384 | ||
d381d7ca BH |
385 | static inline u8 __raw_rm_readb(volatile void __iomem *paddr) |
386 | { | |
387 | u8 ret; | |
388 | __asm__ __volatile__("lbzcix %0,0, %1" | |
389 | : "=r" (ret) : "r" (paddr) : "memory"); | |
390 | return ret; | |
391 | } | |
392 | ||
393 | static inline u16 __raw_rm_readw(volatile void __iomem *paddr) | |
394 | { | |
395 | u16 ret; | |
396 | __asm__ __volatile__("lhzcix %0,0, %1" | |
397 | : "=r" (ret) : "r" (paddr) : "memory"); | |
398 | return ret; | |
399 | } | |
400 | ||
401 | static inline u32 __raw_rm_readl(volatile void __iomem *paddr) | |
402 | { | |
403 | u32 ret; | |
404 | __asm__ __volatile__("lwzcix %0,0, %1" | |
405 | : "=r" (ret) : "r" (paddr) : "memory"); | |
406 | return ret; | |
407 | } | |
408 | ||
409 | static inline u64 __raw_rm_readq(volatile void __iomem *paddr) | |
410 | { | |
411 | u64 ret; | |
412 | __asm__ __volatile__("ldcix %0,0, %1" | |
413 | : "=r" (ret) : "r" (paddr) : "memory"); | |
414 | return ret; | |
415 | } | |
757db1ed BH |
416 | #endif /* __powerpc64__ */ |
417 | ||
68a64357 | 418 | /* |
757db1ed BH |
419 | * |
420 | * PCI PIO and MMIO accessors. | |
421 | * | |
422 | * | |
68a64357 BH |
423 | * On 32 bits, PIO operations have a recovery mechanism in case they trigger |
424 | * machine checks (which they occasionally do when probing non existing | |
425 | * IO ports on some platforms, like PowerMac and 8xx). | |
426 | * I always found it to be of dubious reliability and I am tempted to get | |
427 | * rid of it one of these days. So if you think it's important to keep it, | |
428 | * please voice up asap. We never had it for 64 bits and I do not intend | |
429 | * to port it over | |
430 | */ | |
431 | ||
432 | #ifdef CONFIG_PPC32 | |
433 | ||
434 | #define __do_in_asm(name, op) \ | |
4cfbdfff | 435 | static inline unsigned int name(unsigned int port) \ |
68a64357 BH |
436 | { \ |
437 | unsigned int x; \ | |
438 | __asm__ __volatile__( \ | |
439 | "sync\n" \ | |
440 | "0:" op " %0,0,%1\n" \ | |
441 | "1: twi 0,%0,0\n" \ | |
442 | "2: isync\n" \ | |
443 | "3: nop\n" \ | |
444 | "4:\n" \ | |
445 | ".section .fixup,\"ax\"\n" \ | |
446 | "5: li %0,-1\n" \ | |
447 | " b 4b\n" \ | |
448 | ".previous\n" \ | |
24bfa6a9 NP |
449 | EX_TABLE(0b, 5b) \ |
450 | EX_TABLE(1b, 5b) \ | |
451 | EX_TABLE(2b, 5b) \ | |
452 | EX_TABLE(3b, 5b) \ | |
68a64357 | 453 | : "=&r" (x) \ |
cfab3bdf BH |
454 | : "r" (port + _IO_BASE) \ |
455 | : "memory"); \ | |
68a64357 BH |
456 | return x; \ |
457 | } | |
458 | ||
459 | #define __do_out_asm(name, op) \ | |
4cfbdfff | 460 | static inline void name(unsigned int val, unsigned int port) \ |
68a64357 BH |
461 | { \ |
462 | __asm__ __volatile__( \ | |
463 | "sync\n" \ | |
464 | "0:" op " %0,0,%1\n" \ | |
465 | "1: sync\n" \ | |
466 | "2:\n" \ | |
24bfa6a9 NP |
467 | EX_TABLE(0b, 2b) \ |
468 | EX_TABLE(1b, 2b) \ | |
cfab3bdf BH |
469 | : : "r" (val), "r" (port + _IO_BASE) \ |
470 | : "memory"); \ | |
68a64357 BH |
471 | } |
472 | ||
473 | __do_in_asm(_rec_inb, "lbzx") | |
474 | __do_in_asm(_rec_inw, "lhbrx") | |
475 | __do_in_asm(_rec_inl, "lwbrx") | |
476 | __do_out_asm(_rec_outb, "stbx") | |
477 | __do_out_asm(_rec_outw, "sthbrx") | |
478 | __do_out_asm(_rec_outl, "stwbrx") | |
479 | ||
480 | #endif /* CONFIG_PPC32 */ | |
481 | ||
4cb3cee0 | 482 | /* The "__do_*" operations below provide the actual "base" implementation |
42b2aa86 | 483 | * for each of the defined accessors. Some of them use the out_* functions |
4cb3cee0 BH |
484 | * directly, some of them still use EEH, though we might change that in the |
485 | * future. Those macros below provide the necessary argument swapping and | |
486 | * handling of the IO base for PIO. | |
487 | * | |
488 | * They are themselves used by the macros that define the actual accessors | |
489 | * and can be used by the hooks if any. | |
490 | * | |
491 | * Note that PIO operations are always defined in terms of their corresonding | |
492 | * MMIO operations. That allows platforms like iSeries who want to modify the | |
493 | * behaviour of both to only hook on the MMIO version and get both. It's also | |
494 | * possible to hook directly at the toplevel PIO operation if they have to | |
495 | * be handled differently | |
496 | */ | |
497 | #define __do_writeb(val, addr) out_8(PCI_FIX_ADDR(addr), val) | |
498 | #define __do_writew(val, addr) out_le16(PCI_FIX_ADDR(addr), val) | |
499 | #define __do_writel(val, addr) out_le32(PCI_FIX_ADDR(addr), val) | |
500 | #define __do_writeq(val, addr) out_le64(PCI_FIX_ADDR(addr), val) | |
501 | #define __do_writew_be(val, addr) out_be16(PCI_FIX_ADDR(addr), val) | |
502 | #define __do_writel_be(val, addr) out_be32(PCI_FIX_ADDR(addr), val) | |
503 | #define __do_writeq_be(val, addr) out_be64(PCI_FIX_ADDR(addr), val) | |
68a64357 BH |
504 | |
505 | #ifdef CONFIG_EEH | |
4cb3cee0 BH |
506 | #define __do_readb(addr) eeh_readb(PCI_FIX_ADDR(addr)) |
507 | #define __do_readw(addr) eeh_readw(PCI_FIX_ADDR(addr)) | |
508 | #define __do_readl(addr) eeh_readl(PCI_FIX_ADDR(addr)) | |
509 | #define __do_readq(addr) eeh_readq(PCI_FIX_ADDR(addr)) | |
510 | #define __do_readw_be(addr) eeh_readw_be(PCI_FIX_ADDR(addr)) | |
511 | #define __do_readl_be(addr) eeh_readl_be(PCI_FIX_ADDR(addr)) | |
512 | #define __do_readq_be(addr) eeh_readq_be(PCI_FIX_ADDR(addr)) | |
68a64357 BH |
513 | #else /* CONFIG_EEH */ |
514 | #define __do_readb(addr) in_8(PCI_FIX_ADDR(addr)) | |
515 | #define __do_readw(addr) in_le16(PCI_FIX_ADDR(addr)) | |
516 | #define __do_readl(addr) in_le32(PCI_FIX_ADDR(addr)) | |
517 | #define __do_readq(addr) in_le64(PCI_FIX_ADDR(addr)) | |
518 | #define __do_readw_be(addr) in_be16(PCI_FIX_ADDR(addr)) | |
519 | #define __do_readl_be(addr) in_be32(PCI_FIX_ADDR(addr)) | |
520 | #define __do_readq_be(addr) in_be64(PCI_FIX_ADDR(addr)) | |
521 | #endif /* !defined(CONFIG_EEH) */ | |
522 | ||
523 | #ifdef CONFIG_PPC32 | |
524 | #define __do_outb(val, port) _rec_outb(val, port) | |
525 | #define __do_outw(val, port) _rec_outw(val, port) | |
526 | #define __do_outl(val, port) _rec_outl(val, port) | |
527 | #define __do_inb(port) _rec_inb(port) | |
528 | #define __do_inw(port) _rec_inw(port) | |
529 | #define __do_inl(port) _rec_inl(port) | |
530 | #else /* CONFIG_PPC32 */ | |
531 | #define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)_IO_BASE+port); | |
532 | #define __do_outw(val, port) writew(val,(PCI_IO_ADDR)_IO_BASE+port); | |
533 | #define __do_outl(val, port) writel(val,(PCI_IO_ADDR)_IO_BASE+port); | |
534 | #define __do_inb(port) readb((PCI_IO_ADDR)_IO_BASE + port); | |
535 | #define __do_inw(port) readw((PCI_IO_ADDR)_IO_BASE + port); | |
536 | #define __do_inl(port) readl((PCI_IO_ADDR)_IO_BASE + port); | |
537 | #endif /* !CONFIG_PPC32 */ | |
538 | ||
539 | #ifdef CONFIG_EEH | |
4cb3cee0 BH |
540 | #define __do_readsb(a, b, n) eeh_readsb(PCI_FIX_ADDR(a), (b), (n)) |
541 | #define __do_readsw(a, b, n) eeh_readsw(PCI_FIX_ADDR(a), (b), (n)) | |
542 | #define __do_readsl(a, b, n) eeh_readsl(PCI_FIX_ADDR(a), (b), (n)) | |
68a64357 BH |
543 | #else /* CONFIG_EEH */ |
544 | #define __do_readsb(a, b, n) _insb(PCI_FIX_ADDR(a), (b), (n)) | |
545 | #define __do_readsw(a, b, n) _insw(PCI_FIX_ADDR(a), (b), (n)) | |
546 | #define __do_readsl(a, b, n) _insl(PCI_FIX_ADDR(a), (b), (n)) | |
547 | #endif /* !CONFIG_EEH */ | |
4cb3cee0 BH |
548 | #define __do_writesb(a, b, n) _outsb(PCI_FIX_ADDR(a),(b),(n)) |
549 | #define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n)) | |
550 | #define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n)) | |
551 | ||
68a64357 BH |
552 | #define __do_insb(p, b, n) readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) |
553 | #define __do_insw(p, b, n) readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) | |
554 | #define __do_insl(p, b, n) readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) | |
555 | #define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) | |
556 | #define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) | |
557 | #define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) | |
558 | ||
559 | #define __do_memset_io(addr, c, n) \ | |
560 | _memset_io(PCI_FIX_ADDR(addr), c, n) | |
561 | #define __do_memcpy_toio(dst, src, n) \ | |
562 | _memcpy_toio(PCI_FIX_ADDR(dst), src, n) | |
563 | ||
564 | #ifdef CONFIG_EEH | |
565 | #define __do_memcpy_fromio(dst, src, n) \ | |
566 | eeh_memcpy_fromio(dst, PCI_FIX_ADDR(src), n) | |
567 | #else /* CONFIG_EEH */ | |
568 | #define __do_memcpy_fromio(dst, src, n) \ | |
569 | _memcpy_fromio(dst,PCI_FIX_ADDR(src),n) | |
570 | #endif /* !CONFIG_EEH */ | |
4cb3cee0 | 571 | |
21176fed ME |
572 | #ifdef CONFIG_PPC_INDIRECT_PIO |
573 | #define DEF_PCI_HOOK_pio(x) x | |
574 | #else | |
575 | #define DEF_PCI_HOOK_pio(x) NULL | |
576 | #endif | |
577 | ||
578 | #ifdef CONFIG_PPC_INDIRECT_MMIO | |
579 | #define DEF_PCI_HOOK_mem(x) x | |
4cb3cee0 | 580 | #else |
21176fed | 581 | #define DEF_PCI_HOOK_mem(x) NULL |
4cb3cee0 BH |
582 | #endif |
583 | ||
584 | /* Structure containing all the hooks */ | |
585 | extern struct ppc_pci_io { | |
586 | ||
7cfb62a2 IK |
587 | #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) ret (*name) at; |
588 | #define DEF_PCI_AC_NORET(name, at, al, space, aa) void (*name) at; | |
4cb3cee0 BH |
589 | |
590 | #include <asm/io-defs.h> | |
591 | ||
592 | #undef DEF_PCI_AC_RET | |
593 | #undef DEF_PCI_AC_NORET | |
594 | ||
595 | } ppc_pci_io; | |
596 | ||
597 | /* The inline wrappers */ | |
7cfb62a2 | 598 | #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ |
4cb3cee0 BH |
599 | static inline ret name at \ |
600 | { \ | |
21176fed | 601 | if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \ |
4cb3cee0 BH |
602 | return ppc_pci_io.name al; \ |
603 | return __do_##name al; \ | |
604 | } | |
605 | ||
7cfb62a2 | 606 | #define DEF_PCI_AC_NORET(name, at, al, space, aa) \ |
4cb3cee0 BH |
607 | static inline void name at \ |
608 | { \ | |
21176fed | 609 | if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \ |
4cb3cee0 BH |
610 | ppc_pci_io.name al; \ |
611 | else \ | |
612 | __do_##name al; \ | |
613 | } | |
614 | ||
615 | #include <asm/io-defs.h> | |
616 | ||
617 | #undef DEF_PCI_AC_RET | |
618 | #undef DEF_PCI_AC_NORET | |
619 | ||
620 | /* Some drivers check for the presence of readq & writeq with | |
621 | * a #ifdef, so we make them happy here. | |
622 | */ | |
68a64357 | 623 | #ifdef __powerpc64__ |
4cb3cee0 BH |
624 | #define readq readq |
625 | #define writeq writeq | |
68a64357 BH |
626 | #endif |
627 | ||
4cb3cee0 BH |
628 | /* |
629 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | |
630 | * access | |
631 | */ | |
632 | #define xlate_dev_mem_ptr(p) __va(p) | |
633 | ||
634 | /* | |
635 | * Convert a virtual cached pointer to an uncached pointer | |
636 | */ | |
637 | #define xlate_dev_kmem_ptr(p) p | |
caf81329 | 638 | |
4cb3cee0 BH |
639 | /* |
640 | * We don't do relaxed operations yet, at least not with this semantic | |
641 | */ | |
5da59057 WD |
642 | #define readb_relaxed(addr) readb(addr) |
643 | #define readw_relaxed(addr) readw(addr) | |
644 | #define readl_relaxed(addr) readl(addr) | |
645 | #define readq_relaxed(addr) readq(addr) | |
646 | #define writeb_relaxed(v, addr) writeb(v, addr) | |
647 | #define writew_relaxed(v, addr) writew(v, addr) | |
648 | #define writel_relaxed(v, addr) writel(v, addr) | |
649 | #define writeq_relaxed(v, addr) writeq(v, addr) | |
1da177e4 | 650 | |
ef237039 LG |
651 | #include <asm-generic/iomap.h> |
652 | ||
68a64357 BH |
653 | #ifdef CONFIG_PPC32 |
654 | #define mmiowb() | |
655 | #else | |
4cb3cee0 BH |
656 | /* |
657 | * Enforce synchronisation of stores vs. spin_unlock | |
c03983ac | 658 | * (this does it explicitly, though our implementation of spin_unlock |
4cb3cee0 BH |
659 | * does it implicitely too) |
660 | */ | |
f007cacf PM |
661 | static inline void mmiowb(void) |
662 | { | |
292f86f0 HD |
663 | unsigned long tmp; |
664 | ||
665 | __asm__ __volatile__("sync; li %0,0; stb %0,%1(13)" | |
666 | : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync)) | |
667 | : "memory"); | |
f007cacf | 668 | } |
68a64357 | 669 | #endif /* !CONFIG_PPC32 */ |
1da177e4 | 670 | |
4cb3cee0 BH |
671 | static inline void iosync(void) |
672 | { | |
673 | __asm__ __volatile__ ("sync" : : : "memory"); | |
674 | } | |
675 | ||
676 | /* Enforce in-order execution of data I/O. | |
677 | * No distinction between read/write on PPC; use eieio for all three. | |
678 | * Those are fairly week though. They don't provide a barrier between | |
679 | * MMIO and cacheable storage nor do they provide a barrier vs. locks, | |
680 | * they only provide barriers between 2 __raw MMIO operations and | |
681 | * possibly break write combining. | |
682 | */ | |
683 | #define iobarrier_rw() eieio() | |
684 | #define iobarrier_r() eieio() | |
685 | #define iobarrier_w() eieio() | |
686 | ||
687 | ||
1da177e4 LT |
688 | /* |
689 | * output pause versions need a delay at least for the | |
690 | * w83c105 ide controller in a p610. | |
691 | */ | |
692 | #define inb_p(port) inb(port) | |
693 | #define outb_p(val, port) (udelay(1), outb((val), (port))) | |
694 | #define inw_p(port) inw(port) | |
695 | #define outw_p(val, port) (udelay(1), outw((val), (port))) | |
696 | #define inl_p(port) inl(port) | |
697 | #define outl_p(val, port) (udelay(1), outl((val), (port))) | |
698 | ||
1da177e4 LT |
699 | |
700 | #define IO_SPACE_LIMIT ~(0UL) | |
701 | ||
702 | ||
1da177e4 LT |
703 | /** |
704 | * ioremap - map bus memory into CPU space | |
705 | * @address: bus address of the memory | |
706 | * @size: size of the resource to map | |
707 | * | |
708 | * ioremap performs a platform specific sequence of operations to | |
709 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | |
710 | * writew/writel functions and the other mmio helpers. The returned | |
711 | * address is not guaranteed to be usable directly as a virtual | |
712 | * address. | |
4cb3cee0 BH |
713 | * |
714 | * We provide a few variations of it: | |
715 | * | |
716 | * * ioremap is the standard one and provides non-cacheable guarded mappings | |
717 | * and can be hooked by the platform via ppc_md | |
718 | * | |
40f1ce7f AB |
719 | * * ioremap_prot allows to specify the page flags as an argument and can |
720 | * also be hooked by the platform via ppc_md. | |
4cb3cee0 BH |
721 | * |
722 | * * ioremap_nocache is identical to ioremap | |
723 | * | |
be135f40 AB |
724 | * * ioremap_wc enables write combining |
725 | * | |
86c391bd CL |
726 | * * ioremap_wt enables write through |
727 | * | |
728 | * * ioremap_coherent maps coherent cached memory | |
729 | * | |
4cb3cee0 BH |
730 | * * iounmap undoes such a mapping and can be hooked |
731 | * | |
3d5134ee BH |
732 | * * __ioremap_at (and the pending __iounmap_at) are low level functions to |
733 | * create hand-made mappings for use only by the PCI code and cannot | |
734 | * currently be hooked. Must be page aligned. | |
4cb3cee0 BH |
735 | * |
736 | * * __ioremap is the low level implementation used by ioremap and | |
40f1ce7f | 737 | * ioremap_prot and cannot be hooked (but can be used by a hook on one |
4cb3cee0 BH |
738 | * of the previous ones) |
739 | * | |
1cdab55d BH |
740 | * * __ioremap_caller is the same as above but takes an explicit caller |
741 | * reference rather than using __builtin_return_address(0) | |
742 | * | |
4cb3cee0 BH |
743 | * * __iounmap, is the low level implementation used by iounmap and cannot |
744 | * be hooked (but can be used by a hook on iounmap) | |
745 | * | |
1da177e4 | 746 | */ |
68a64357 | 747 | extern void __iomem *ioremap(phys_addr_t address, unsigned long size); |
40f1ce7f AB |
748 | extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, |
749 | unsigned long flags); | |
be135f40 | 750 | extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); |
86c391bd CL |
751 | void __iomem *ioremap_wt(phys_addr_t address, unsigned long size); |
752 | void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); | |
1da177e4 | 753 | #define ioremap_nocache(addr, size) ioremap((addr), (size)) |
4c73e892 | 754 | #define ioremap_uc(addr, size) ioremap((addr), (size)) |
f855b2f5 OH |
755 | #define ioremap_cache(addr, size) \ |
756 | ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) | |
a1f242ff | 757 | |
68a64357 | 758 | extern void iounmap(volatile void __iomem *addr); |
4cb3cee0 | 759 | |
68a64357 | 760 | extern void __iomem *__ioremap(phys_addr_t, unsigned long size, |
4cb3cee0 | 761 | unsigned long flags); |
1cdab55d | 762 | extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, |
c766ee72 | 763 | pgprot_t prot, void *caller); |
1cdab55d | 764 | |
68a64357 | 765 | extern void __iounmap(volatile void __iomem *addr); |
4cb3cee0 | 766 | |
3d5134ee | 767 | extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea, |
c766ee72 | 768 | unsigned long size, pgprot_t prot); |
3d5134ee | 769 | extern void __iounmap_at(void *ea, unsigned long size); |
1da177e4 | 770 | |
4cb3cee0 | 771 | /* |
ecd73cc5 | 772 | * When CONFIG_PPC_INDIRECT_PIO is set, we use the generic iomap implementation |
4cb3cee0 BH |
773 | * which needs some additional definitions here. They basically allow PIO |
774 | * space overall to be 1GB. This will work as long as we never try to use | |
775 | * iomap to map MMIO below 1GB which should be fine on ppc64 | |
776 | */ | |
777 | #define HAVE_ARCH_PIO_SIZE 1 | |
778 | #define PIO_OFFSET 0x00000000UL | |
3d5134ee BH |
779 | #define PIO_MASK (FULL_IO_SIZE - 1) |
780 | #define PIO_RESERVED (FULL_IO_SIZE) | |
4cb3cee0 BH |
781 | |
782 | #define mmio_read16be(addr) readw_be(addr) | |
783 | #define mmio_read32be(addr) readl_be(addr) | |
784 | #define mmio_write16be(val, addr) writew_be(val, addr) | |
785 | #define mmio_write32be(val, addr) writel_be(val, addr) | |
786 | #define mmio_insb(addr, dst, count) readsb(addr, dst, count) | |
787 | #define mmio_insw(addr, dst, count) readsw(addr, dst, count) | |
788 | #define mmio_insl(addr, dst, count) readsl(addr, dst, count) | |
789 | #define mmio_outsb(addr, src, count) writesb(addr, src, count) | |
790 | #define mmio_outsw(addr, src, count) writesw(addr, src, count) | |
791 | #define mmio_outsl(addr, src, count) writesl(addr, src, count) | |
792 | ||
1da177e4 LT |
793 | /** |
794 | * virt_to_phys - map virtual addresses to physical | |
795 | * @address: address to remap | |
796 | * | |
797 | * The returned physical address is the physical (CPU) mapping for | |
798 | * the memory address given. It is only valid to use this function on | |
799 | * addresses directly mapped or allocated via kmalloc. | |
800 | * | |
801 | * This function does not give bus mappings for DMA transfers. In | |
802 | * almost all conceivable cases a device driver should not be using | |
803 | * this function | |
804 | */ | |
805 | static inline unsigned long virt_to_phys(volatile void * address) | |
806 | { | |
807 | return __pa((unsigned long)address); | |
808 | } | |
809 | ||
810 | /** | |
811 | * phys_to_virt - map physical address to virtual | |
812 | * @address: address to remap | |
813 | * | |
814 | * The returned virtual address is a current CPU mapping for | |
815 | * the memory address given. It is only valid to use this function on | |
816 | * addresses that have a kernel mapping | |
817 | * | |
818 | * This function does not handle bus mappings for DMA transfers. In | |
819 | * almost all conceivable cases a device driver should not be using | |
820 | * this function | |
821 | */ | |
822 | static inline void * phys_to_virt(unsigned long address) | |
823 | { | |
824 | return (void *)__va(address); | |
825 | } | |
826 | ||
827 | /* | |
828 | * Change "struct page" to physical address. | |
829 | */ | |
4ee7084e | 830 | #define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT) |
1da177e4 | 831 | |
68a64357 BH |
832 | /* |
833 | * 32 bits still uses virt_to_bus() for it's implementation of DMA | |
834 | * mappings se we have to keep it defined here. We also have some old | |
835 | * drivers (shame shame shame) that use bus_to_virt() and haven't been | |
836 | * fixed yet so I need to define it here. | |
837 | */ | |
838 | #ifdef CONFIG_PPC32 | |
839 | ||
840 | static inline unsigned long virt_to_bus(volatile void * address) | |
841 | { | |
842 | if (address == NULL) | |
843 | return 0; | |
844 | return __pa(address) + PCI_DRAM_OFFSET; | |
845 | } | |
846 | ||
847 | static inline void * bus_to_virt(unsigned long address) | |
848 | { | |
849 | if (address == 0) | |
850 | return NULL; | |
851 | return __va(address - PCI_DRAM_OFFSET); | |
852 | } | |
853 | ||
854 | #define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET) | |
855 | ||
856 | #endif /* CONFIG_PPC32 */ | |
857 | ||
5427828e VB |
858 | /* access ports */ |
859 | #define setbits32(_addr, _v) out_be32((_addr), in_be32(_addr) | (_v)) | |
860 | #define clrbits32(_addr, _v) out_be32((_addr), in_be32(_addr) & ~(_v)) | |
861 | ||
862 | #define setbits16(_addr, _v) out_be16((_addr), in_be16(_addr) | (_v)) | |
863 | #define clrbits16(_addr, _v) out_be16((_addr), in_be16(_addr) & ~(_v)) | |
68a64357 | 864 | |
12cdac34 SW |
865 | #define setbits8(_addr, _v) out_8((_addr), in_8(_addr) | (_v)) |
866 | #define clrbits8(_addr, _v) out_8((_addr), in_8(_addr) & ~(_v)) | |
867 | ||
dc967d7f TT |
868 | /* Clear and set bits in one shot. These macros can be used to clear and |
869 | * set multiple bits in a register using a single read-modify-write. These | |
870 | * macros can also be used to set a multiple-bit bit pattern using a mask, | |
871 | * by specifying the mask in the 'clear' parameter and the new bit pattern | |
872 | * in the 'set' parameter. | |
873 | */ | |
874 | ||
875 | #define clrsetbits(type, addr, clear, set) \ | |
876 | out_##type((addr), (in_##type(addr) & ~(clear)) | (set)) | |
877 | ||
878 | #ifdef __powerpc64__ | |
879 | #define clrsetbits_be64(addr, clear, set) clrsetbits(be64, addr, clear, set) | |
880 | #define clrsetbits_le64(addr, clear, set) clrsetbits(le64, addr, clear, set) | |
881 | #endif | |
882 | ||
883 | #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set) | |
884 | #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) | |
885 | ||
886 | #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set) | |
e2d75505 | 887 | #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set) |
dc967d7f TT |
888 | |
889 | #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) | |
890 | ||
1da177e4 LT |
891 | #endif /* __KERNEL__ */ |
892 | ||
047ea784 | 893 | #endif /* _ASM_POWERPC_IO_H */ |