]>
Commit | Line | Data |
---|---|---|
b920de1b DH |
1 | /* MN10300 I/O port emulation and memory-mapped I/O |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells ([email protected]) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public Licence | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the Licence, or (at your option) any later version. | |
10 | */ | |
11 | #ifndef _ASM_IO_H | |
12 | #define _ASM_IO_H | |
13 | ||
14 | #include <asm/page.h> /* I/O is all done through memory accesses */ | |
15 | #include <asm/cpu-regs.h> | |
16 | #include <asm/cacheflush.h> | |
17 | ||
18 | #define mmiowb() do {} while (0) | |
19 | ||
20 | /*****************************************************************************/ | |
21 | /* | |
22 | * readX/writeX() are used to access memory mapped devices. On some | |
23 | * architectures the memory mapped IO stuff needs to be accessed | |
24 | * differently. On the x86 architecture, we just read/write the | |
25 | * memory location directly. | |
26 | */ | |
27 | static inline u8 readb(const volatile void __iomem *addr) | |
28 | { | |
29 | return *(const volatile u8 *) addr; | |
30 | } | |
31 | ||
32 | static inline u16 readw(const volatile void __iomem *addr) | |
33 | { | |
34 | return *(const volatile u16 *) addr; | |
35 | } | |
36 | ||
37 | static inline u32 readl(const volatile void __iomem *addr) | |
38 | { | |
39 | return *(const volatile u32 *) addr; | |
40 | } | |
41 | ||
42 | #define __raw_readb readb | |
43 | #define __raw_readw readw | |
44 | #define __raw_readl readl | |
45 | ||
46 | #define readb_relaxed readb | |
47 | #define readw_relaxed readw | |
48 | #define readl_relaxed readl | |
49 | ||
50 | static inline void writeb(u8 b, volatile void __iomem *addr) | |
51 | { | |
52 | *(volatile u8 *) addr = b; | |
53 | } | |
54 | ||
55 | static inline void writew(u16 b, volatile void __iomem *addr) | |
56 | { | |
57 | *(volatile u16 *) addr = b; | |
58 | } | |
59 | ||
60 | static inline void writel(u32 b, volatile void __iomem *addr) | |
61 | { | |
62 | *(volatile u32 *) addr = b; | |
63 | } | |
64 | ||
65 | #define __raw_writeb writeb | |
66 | #define __raw_writew writew | |
67 | #define __raw_writel writel | |
68 | ||
69 | /*****************************************************************************/ | |
70 | /* | |
71 | * traditional input/output functions | |
72 | */ | |
73 | static inline u8 inb_local(unsigned long addr) | |
74 | { | |
75 | return readb((volatile void __iomem *) addr); | |
76 | } | |
77 | ||
78 | static inline void outb_local(u8 b, unsigned long addr) | |
79 | { | |
80 | return writeb(b, (volatile void __iomem *) addr); | |
81 | } | |
82 | ||
83 | static inline u8 inb(unsigned long addr) | |
84 | { | |
85 | return readb((volatile void __iomem *) addr); | |
86 | } | |
87 | ||
88 | static inline u16 inw(unsigned long addr) | |
89 | { | |
90 | return readw((volatile void __iomem *) addr); | |
91 | } | |
92 | ||
93 | static inline u32 inl(unsigned long addr) | |
94 | { | |
95 | return readl((volatile void __iomem *) addr); | |
96 | } | |
97 | ||
98 | static inline void outb(u8 b, unsigned long addr) | |
99 | { | |
100 | return writeb(b, (volatile void __iomem *) addr); | |
101 | } | |
102 | ||
103 | static inline void outw(u16 b, unsigned long addr) | |
104 | { | |
105 | return writew(b, (volatile void __iomem *) addr); | |
106 | } | |
107 | ||
108 | static inline void outl(u32 b, unsigned long addr) | |
109 | { | |
110 | return writel(b, (volatile void __iomem *) addr); | |
111 | } | |
112 | ||
113 | #define inb_p(addr) inb(addr) | |
114 | #define inw_p(addr) inw(addr) | |
115 | #define inl_p(addr) inl(addr) | |
116 | #define outb_p(x, addr) outb((x), (addr)) | |
117 | #define outw_p(x, addr) outw((x), (addr)) | |
118 | #define outl_p(x, addr) outl((x), (addr)) | |
119 | ||
120 | static inline void insb(unsigned long addr, void *buffer, int count) | |
121 | { | |
122 | if (count) { | |
123 | u8 *buf = buffer; | |
124 | do { | |
125 | u8 x = inb(addr); | |
126 | *buf++ = x; | |
127 | } while (--count); | |
128 | } | |
129 | } | |
130 | ||
131 | static inline void insw(unsigned long addr, void *buffer, int count) | |
132 | { | |
133 | if (count) { | |
134 | u16 *buf = buffer; | |
135 | do { | |
136 | u16 x = inw(addr); | |
137 | *buf++ = x; | |
138 | } while (--count); | |
139 | } | |
140 | } | |
141 | ||
142 | static inline void insl(unsigned long addr, void *buffer, int count) | |
143 | { | |
144 | if (count) { | |
145 | u32 *buf = buffer; | |
146 | do { | |
147 | u32 x = inl(addr); | |
148 | *buf++ = x; | |
149 | } while (--count); | |
150 | } | |
151 | } | |
152 | ||
153 | static inline void outsb(unsigned long addr, const void *buffer, int count) | |
154 | { | |
155 | if (count) { | |
156 | const u8 *buf = buffer; | |
157 | do { | |
158 | outb(*buf++, addr); | |
159 | } while (--count); | |
160 | } | |
161 | } | |
162 | ||
163 | static inline void outsw(unsigned long addr, const void *buffer, int count) | |
164 | { | |
165 | if (count) { | |
166 | const u16 *buf = buffer; | |
167 | do { | |
168 | outw(*buf++, addr); | |
169 | } while (--count); | |
170 | } | |
171 | } | |
172 | ||
173 | extern void __outsl(unsigned long addr, const void *buffer, int count); | |
174 | static inline void outsl(unsigned long addr, const void *buffer, int count) | |
175 | { | |
176 | if ((unsigned long) buffer & 0x3) | |
177 | return __outsl(addr, buffer, count); | |
178 | ||
179 | if (count) { | |
180 | const u32 *buf = buffer; | |
181 | do { | |
182 | outl(*buf++, addr); | |
183 | } while (--count); | |
184 | } | |
185 | } | |
186 | ||
187 | #define ioread8(addr) readb(addr) | |
188 | #define ioread16(addr) readw(addr) | |
189 | #define ioread32(addr) readl(addr) | |
190 | ||
191 | #define iowrite8(v, addr) writeb((v), (addr)) | |
192 | #define iowrite16(v, addr) writew((v), (addr)) | |
193 | #define iowrite32(v, addr) writel((v), (addr)) | |
194 | ||
195 | #define ioread8_rep(p, dst, count) \ | |
196 | insb((unsigned long) (p), (dst), (count)) | |
197 | #define ioread16_rep(p, dst, count) \ | |
198 | insw((unsigned long) (p), (dst), (count)) | |
199 | #define ioread32_rep(p, dst, count) \ | |
200 | insl((unsigned long) (p), (dst), (count)) | |
201 | ||
202 | #define iowrite8_rep(p, src, count) \ | |
203 | outsb((unsigned long) (p), (src), (count)) | |
204 | #define iowrite16_rep(p, src, count) \ | |
205 | outsw((unsigned long) (p), (src), (count)) | |
206 | #define iowrite32_rep(p, src, count) \ | |
207 | outsl((unsigned long) (p), (src), (count)) | |
208 | ||
209 | ||
210 | #define IO_SPACE_LIMIT 0xffffffff | |
211 | ||
212 | #ifdef __KERNEL__ | |
213 | ||
214 | #include <linux/vmalloc.h> | |
215 | #define __io_virt(x) ((void *) (x)) | |
216 | ||
217 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ | |
218 | struct pci_dev; | |
219 | extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | |
220 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) | |
221 | { | |
222 | } | |
223 | ||
224 | /* | |
225 | * Change virtual addresses to physical addresses and vv. | |
226 | * These are pretty trivial | |
227 | */ | |
228 | static inline unsigned long virt_to_phys(volatile void *address) | |
229 | { | |
230 | return __pa(address); | |
231 | } | |
232 | ||
233 | static inline void *phys_to_virt(unsigned long address) | |
234 | { | |
235 | return __va(address); | |
236 | } | |
237 | ||
238 | /* | |
239 | * Change "struct page" to physical address. | |
240 | */ | |
241 | static inline void *__ioremap(unsigned long offset, unsigned long size, | |
242 | unsigned long flags) | |
243 | { | |
244 | return (void *) offset; | |
245 | } | |
246 | ||
247 | static inline void *ioremap(unsigned long offset, unsigned long size) | |
248 | { | |
249 | return (void *) offset; | |
250 | } | |
251 | ||
252 | /* | |
253 | * This one maps high address device memory and turns off caching for that | |
254 | * area. it's useful if some control registers are in such an area and write | |
255 | * combining or read caching is not desirable: | |
256 | */ | |
257 | static inline void *ioremap_nocache(unsigned long offset, unsigned long size) | |
258 | { | |
259 | return (void *) (offset | 0x20000000); | |
260 | } | |
261 | ||
262 | static inline void iounmap(void *addr) | |
263 | { | |
264 | } | |
265 | ||
266 | static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) | |
267 | { | |
268 | return (void __iomem *) port; | |
269 | } | |
270 | ||
271 | static inline void ioport_unmap(void __iomem *p) | |
272 | { | |
273 | } | |
274 | ||
275 | #define xlate_dev_kmem_ptr(p) ((void *) (p)) | |
276 | #define xlate_dev_mem_ptr(p) ((void *) (p)) | |
277 | ||
278 | /* | |
279 | * PCI bus iomem addresses must be in the region 0x80000000-0x9fffffff | |
280 | */ | |
281 | static inline unsigned long virt_to_bus(volatile void *address) | |
282 | { | |
283 | return ((unsigned long) address) & ~0x20000000; | |
284 | } | |
285 | ||
286 | static inline void *bus_to_virt(unsigned long address) | |
287 | { | |
288 | return (void *) address; | |
289 | } | |
290 | ||
291 | #define page_to_bus page_to_phys | |
292 | ||
293 | #define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) | |
294 | #define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c)) | |
295 | #define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c)) | |
296 | ||
297 | #endif /* __KERNEL__ */ | |
298 | ||
299 | #endif /* _ASM_IO_H */ |