diff options
Diffstat (limited to 'include/asm-generic/io.h')
-rw-r--r-- | include/asm-generic/io.h | 716 |
1 files changed, 588 insertions, 128 deletions
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index b8fdc57a7335..00483d769d86 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define __ASM_GENERIC_IO_H | 12 | #define __ASM_GENERIC_IO_H |
13 | 13 | ||
14 | #include <asm/page.h> /* I/O is all done through memory accesses */ | 14 | #include <asm/page.h> /* I/O is all done through memory accesses */ |
15 | #include <linux/string.h> /* for memset() and memcpy() */ | ||
15 | #include <linux/types.h> | 16 | #include <linux/types.h> |
16 | 17 | ||
17 | #ifdef CONFIG_GENERIC_IOMAP | 18 | #ifdef CONFIG_GENERIC_IOMAP |
@@ -24,260 +25,654 @@ | |||
24 | #define mmiowb() do {} while (0) | 25 | #define mmiowb() do {} while (0) |
25 | #endif | 26 | #endif |
26 | 27 | ||
27 | /*****************************************************************************/ | ||
28 | /* | 28 | /* |
29 | * readX/writeX() are used to access memory mapped devices. On some | 29 | * __raw_{read,write}{b,w,l,q}() access memory in native endianness. |
30 | * architectures the memory mapped IO stuff needs to be accessed | 30 | * |
31 | * differently. On the simple architectures, we just read/write the | 31 | * On some architectures memory mapped IO needs to be accessed differently. |
32 | * memory location directly. | 32 | * On the simple architectures, we just read/write the memory location |
33 | * directly. | ||
33 | */ | 34 | */ |
35 | |||
34 | #ifndef __raw_readb | 36 | #ifndef __raw_readb |
37 | #define __raw_readb __raw_readb | ||
35 | static inline u8 __raw_readb(const volatile void __iomem *addr) | 38 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
36 | { | 39 | { |
37 | return *(const volatile u8 __force *) addr; | 40 | return *(const volatile u8 __force *)addr; |
38 | } | 41 | } |
39 | #endif | 42 | #endif |
40 | 43 | ||
41 | #ifndef __raw_readw | 44 | #ifndef __raw_readw |
45 | #define __raw_readw __raw_readw | ||
42 | static inline u16 __raw_readw(const volatile void __iomem *addr) | 46 | static inline u16 __raw_readw(const volatile void __iomem *addr) |
43 | { | 47 | { |
44 | return *(const volatile u16 __force *) addr; | 48 | return *(const volatile u16 __force *)addr; |
45 | } | 49 | } |
46 | #endif | 50 | #endif |
47 | 51 | ||
48 | #ifndef __raw_readl | 52 | #ifndef __raw_readl |
53 | #define __raw_readl __raw_readl | ||
49 | static inline u32 __raw_readl(const volatile void __iomem *addr) | 54 | static inline u32 __raw_readl(const volatile void __iomem *addr) |
50 | { | 55 | { |
51 | return *(const volatile u32 __force *) addr; | 56 | return *(const volatile u32 __force *)addr; |
52 | } | 57 | } |
53 | #endif | 58 | #endif |
54 | 59 | ||
55 | #define readb __raw_readb | 60 | #ifdef CONFIG_64BIT |
56 | 61 | #ifndef __raw_readq | |
57 | #define readw readw | 62 | #define __raw_readq __raw_readq |
58 | static inline u16 readw(const volatile void __iomem *addr) | 63 | static inline u64 __raw_readq(const volatile void __iomem *addr) |
59 | { | ||
60 | return __le16_to_cpu(__raw_readw(addr)); | ||
61 | } | ||
62 | |||
63 | #define readl readl | ||
64 | static inline u32 readl(const volatile void __iomem *addr) | ||
65 | { | 64 | { |
66 | return __le32_to_cpu(__raw_readl(addr)); | 65 | return *(const volatile u64 __force *)addr; |
67 | } | 66 | } |
67 | #endif | ||
68 | #endif /* CONFIG_64BIT */ | ||
68 | 69 | ||
69 | #ifndef __raw_writeb | 70 | #ifndef __raw_writeb |
70 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) | 71 | #define __raw_writeb __raw_writeb |
72 | static inline void __raw_writeb(u8 value, volatile void __iomem *addr) | ||
71 | { | 73 | { |
72 | *(volatile u8 __force *) addr = b; | 74 | *(volatile u8 __force *)addr = value; |
73 | } | 75 | } |
74 | #endif | 76 | #endif |
75 | 77 | ||
76 | #ifndef __raw_writew | 78 | #ifndef __raw_writew |
77 | static inline void __raw_writew(u16 b, volatile void __iomem *addr) | 79 | #define __raw_writew __raw_writew |
80 | static inline void __raw_writew(u16 value, volatile void __iomem *addr) | ||
78 | { | 81 | { |
79 | *(volatile u16 __force *) addr = b; | 82 | *(volatile u16 __force *)addr = value; |
80 | } | 83 | } |
81 | #endif | 84 | #endif |
82 | 85 | ||
83 | #ifndef __raw_writel | 86 | #ifndef __raw_writel |
84 | static inline void __raw_writel(u32 b, volatile void __iomem *addr) | 87 | #define __raw_writel __raw_writel |
88 | static inline void __raw_writel(u32 value, volatile void __iomem *addr) | ||
85 | { | 89 | { |
86 | *(volatile u32 __force *) addr = b; | 90 | *(volatile u32 __force *)addr = value; |
87 | } | 91 | } |
88 | #endif | 92 | #endif |
89 | 93 | ||
90 | #define writeb __raw_writeb | ||
91 | #define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) | ||
92 | #define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr) | ||
93 | |||
94 | #ifdef CONFIG_64BIT | 94 | #ifdef CONFIG_64BIT |
95 | #ifndef __raw_readq | 95 | #ifndef __raw_writeq |
96 | static inline u64 __raw_readq(const volatile void __iomem *addr) | 96 | #define __raw_writeq __raw_writeq |
97 | static inline void __raw_writeq(u64 value, volatile void __iomem *addr) | ||
97 | { | 98 | { |
98 | return *(const volatile u64 __force *) addr; | 99 | *(volatile u64 __force *)addr = value; |
99 | } | 100 | } |
100 | #endif | 101 | #endif |
102 | #endif /* CONFIG_64BIT */ | ||
101 | 103 | ||
102 | #define readq readq | 104 | /* |
103 | static inline u64 readq(const volatile void __iomem *addr) | 105 | * {read,write}{b,w,l,q}() access little endian memory and return result in |
104 | { | 106 | * native endianness. |
105 | return __le64_to_cpu(__raw_readq(addr)); | 107 | */ |
106 | } | ||
107 | 108 | ||
108 | #ifndef __raw_writeq | 109 | #ifndef readb |
109 | static inline void __raw_writeq(u64 b, volatile void __iomem *addr) | 110 | #define readb readb |
111 | static inline u8 readb(const volatile void __iomem *addr) | ||
110 | { | 112 | { |
111 | *(volatile u64 __force *) addr = b; | 113 | return __raw_readb(addr); |
112 | } | 114 | } |
113 | #endif | 115 | #endif |
114 | 116 | ||
115 | #define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr) | 117 | #ifndef readw |
116 | #endif /* CONFIG_64BIT */ | 118 | #define readw readw |
117 | 119 | static inline u16 readw(const volatile void __iomem *addr) | |
118 | #ifndef PCI_IOBASE | 120 | { |
119 | #define PCI_IOBASE ((void __iomem *) 0) | 121 | return __le16_to_cpu(__raw_readw(addr)); |
122 | } | ||
120 | #endif | 123 | #endif |
121 | 124 | ||
122 | /*****************************************************************************/ | 125 | #ifndef readl |
123 | /* | 126 | #define readl readl |
124 | * traditional input/output functions | 127 | static inline u32 readl(const volatile void __iomem *addr) |
125 | */ | ||
126 | |||
127 | static inline u8 inb(unsigned long addr) | ||
128 | { | 128 | { |
129 | return readb(addr + PCI_IOBASE); | 129 | return __le32_to_cpu(__raw_readl(addr)); |
130 | } | 130 | } |
131 | #endif | ||
131 | 132 | ||
132 | static inline u16 inw(unsigned long addr) | 133 | #ifdef CONFIG_64BIT |
134 | #ifndef readq | ||
135 | #define readq readq | ||
136 | static inline u64 readq(const volatile void __iomem *addr) | ||
133 | { | 137 | { |
134 | return readw(addr + PCI_IOBASE); | 138 | return __le64_to_cpu(__raw_readq(addr)); |
135 | } | 139 | } |
140 | #endif | ||
141 | #endif /* CONFIG_64BIT */ | ||
136 | 142 | ||
137 | static inline u32 inl(unsigned long addr) | 143 | #ifndef writeb |
144 | #define writeb writeb | ||
145 | static inline void writeb(u8 value, volatile void __iomem *addr) | ||
138 | { | 146 | { |
139 | return readl(addr + PCI_IOBASE); | 147 | __raw_writeb(value, addr); |
140 | } | 148 | } |
149 | #endif | ||
141 | 150 | ||
142 | static inline void outb(u8 b, unsigned long addr) | 151 | #ifndef writew |
152 | #define writew writew | ||
153 | static inline void writew(u16 value, volatile void __iomem *addr) | ||
143 | { | 154 | { |
144 | writeb(b, addr + PCI_IOBASE); | 155 | __raw_writew(cpu_to_le16(value), addr); |
145 | } | 156 | } |
157 | #endif | ||
146 | 158 | ||
147 | static inline void outw(u16 b, unsigned long addr) | 159 | #ifndef writel |
160 | #define writel writel | ||
161 | static inline void writel(u32 value, volatile void __iomem *addr) | ||
148 | { | 162 | { |
149 | writew(b, addr + PCI_IOBASE); | 163 | __raw_writel(__cpu_to_le32(value), addr); |
150 | } | 164 | } |
165 | #endif | ||
151 | 166 | ||
152 | static inline void outl(u32 b, unsigned long addr) | 167 | #ifdef CONFIG_64BIT |
168 | #ifndef writeq | ||
169 | #define writeq writeq | ||
170 | static inline void writeq(u64 value, volatile void __iomem *addr) | ||
153 | { | 171 | { |
154 | writel(b, addr + PCI_IOBASE); | 172 | __raw_writeq(__cpu_to_le64(value), addr); |
155 | } | 173 | } |
174 | #endif | ||
175 | #endif /* CONFIG_64BIT */ | ||
156 | 176 | ||
157 | #define inb_p(addr) inb(addr) | 177 | /* |
158 | #define inw_p(addr) inw(addr) | 178 | * {read,write}s{b,w,l,q}() repeatedly access the same memory address in |
159 | #define inl_p(addr) inl(addr) | 179 | * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). |
160 | #define outb_p(x, addr) outb((x), (addr)) | 180 | */ |
161 | #define outw_p(x, addr) outw((x), (addr)) | 181 | #ifndef readsb |
162 | #define outl_p(x, addr) outl((x), (addr)) | 182 | #define readsb readsb |
163 | 183 | static inline void readsb(const volatile void __iomem *addr, void *buffer, | |
164 | #ifndef insb | 184 | unsigned int count) |
165 | static inline void insb(unsigned long addr, void *buffer, int count) | ||
166 | { | 185 | { |
167 | if (count) { | 186 | if (count) { |
168 | u8 *buf = buffer; | 187 | u8 *buf = buffer; |
188 | |||
169 | do { | 189 | do { |
170 | u8 x = __raw_readb(addr + PCI_IOBASE); | 190 | u8 x = __raw_readb(addr); |
171 | *buf++ = x; | 191 | *buf++ = x; |
172 | } while (--count); | 192 | } while (--count); |
173 | } | 193 | } |
174 | } | 194 | } |
175 | #endif | 195 | #endif |
176 | 196 | ||
177 | #ifndef insw | 197 | #ifndef readsw |
178 | static inline void insw(unsigned long addr, void *buffer, int count) | 198 | #define readsw readsw |
199 | static inline void readsw(const volatile void __iomem *addr, void *buffer, | ||
200 | unsigned int count) | ||
179 | { | 201 | { |
180 | if (count) { | 202 | if (count) { |
181 | u16 *buf = buffer; | 203 | u16 *buf = buffer; |
204 | |||
182 | do { | 205 | do { |
183 | u16 x = __raw_readw(addr + PCI_IOBASE); | 206 | u16 x = __raw_readw(addr); |
184 | *buf++ = x; | 207 | *buf++ = x; |
185 | } while (--count); | 208 | } while (--count); |
186 | } | 209 | } |
187 | } | 210 | } |
188 | #endif | 211 | #endif |
189 | 212 | ||
190 | #ifndef insl | 213 | #ifndef readsl |
191 | static inline void insl(unsigned long addr, void *buffer, int count) | 214 | #define readsl readsl |
215 | static inline void readsl(const volatile void __iomem *addr, void *buffer, | ||
216 | unsigned int count) | ||
192 | { | 217 | { |
193 | if (count) { | 218 | if (count) { |
194 | u32 *buf = buffer; | 219 | u32 *buf = buffer; |
220 | |||
195 | do { | 221 | do { |
196 | u32 x = __raw_readl(addr + PCI_IOBASE); | 222 | u32 x = __raw_readl(addr); |
197 | *buf++ = x; | 223 | *buf++ = x; |
198 | } while (--count); | 224 | } while (--count); |
199 | } | 225 | } |
200 | } | 226 | } |
201 | #endif | 227 | #endif |
202 | 228 | ||
203 | #ifndef outsb | 229 | #ifdef CONFIG_64BIT |
204 | static inline void outsb(unsigned long addr, const void *buffer, int count) | 230 | #ifndef readsq |
231 | #define readsq readsq | ||
232 | static inline void readsq(const volatile void __iomem *addr, void *buffer, | ||
233 | unsigned int count) | ||
234 | { | ||
235 | if (count) { | ||
236 | u64 *buf = buffer; | ||
237 | |||
238 | do { | ||
239 | u64 x = __raw_readq(addr); | ||
240 | *buf++ = x; | ||
241 | } while (--count); | ||
242 | } | ||
243 | } | ||
244 | #endif | ||
245 | #endif /* CONFIG_64BIT */ | ||
246 | |||
247 | #ifndef writesb | ||
248 | #define writesb writesb | ||
249 | static inline void writesb(volatile void __iomem *addr, const void *buffer, | ||
250 | unsigned int count) | ||
205 | { | 251 | { |
206 | if (count) { | 252 | if (count) { |
207 | const u8 *buf = buffer; | 253 | const u8 *buf = buffer; |
254 | |||
208 | do { | 255 | do { |
209 | __raw_writeb(*buf++, addr + PCI_IOBASE); | 256 | __raw_writeb(*buf++, addr); |
210 | } while (--count); | 257 | } while (--count); |
211 | } | 258 | } |
212 | } | 259 | } |
213 | #endif | 260 | #endif |
214 | 261 | ||
215 | #ifndef outsw | 262 | #ifndef writesw |
216 | static inline void outsw(unsigned long addr, const void *buffer, int count) | 263 | #define writesw writesw |
264 | static inline void writesw(volatile void __iomem *addr, const void *buffer, | ||
265 | unsigned int count) | ||
217 | { | 266 | { |
218 | if (count) { | 267 | if (count) { |
219 | const u16 *buf = buffer; | 268 | const u16 *buf = buffer; |
269 | |||
220 | do { | 270 | do { |
221 | __raw_writew(*buf++, addr + PCI_IOBASE); | 271 | __raw_writew(*buf++, addr); |
222 | } while (--count); | 272 | } while (--count); |
223 | } | 273 | } |
224 | } | 274 | } |
225 | #endif | 275 | #endif |
226 | 276 | ||
227 | #ifndef outsl | 277 | #ifndef writesl |
228 | static inline void outsl(unsigned long addr, const void *buffer, int count) | 278 | #define writesl writesl |
279 | static inline void writesl(volatile void __iomem *addr, const void *buffer, | ||
280 | unsigned int count) | ||
229 | { | 281 | { |
230 | if (count) { | 282 | if (count) { |
231 | const u32 *buf = buffer; | 283 | const u32 *buf = buffer; |
284 | |||
232 | do { | 285 | do { |
233 | __raw_writel(*buf++, addr + PCI_IOBASE); | 286 | __raw_writel(*buf++, addr); |
234 | } while (--count); | 287 | } while (--count); |
235 | } | 288 | } |
236 | } | 289 | } |
237 | #endif | 290 | #endif |
238 | 291 | ||
239 | #ifndef CONFIG_GENERIC_IOMAP | 292 | #ifdef CONFIG_64BIT |
240 | #define ioread8(addr) readb(addr) | 293 | #ifndef writesq |
241 | #define ioread16(addr) readw(addr) | 294 | #define writesq writesq |
242 | #define ioread16be(addr) __be16_to_cpu(__raw_readw(addr)) | 295 | static inline void writesq(volatile void __iomem *addr, const void *buffer, |
243 | #define ioread32(addr) readl(addr) | 296 | unsigned int count) |
244 | #define ioread32be(addr) __be32_to_cpu(__raw_readl(addr)) | 297 | { |
245 | 298 | if (count) { | |
246 | #define iowrite8(v, addr) writeb((v), (addr)) | 299 | const u64 *buf = buffer; |
247 | #define iowrite16(v, addr) writew((v), (addr)) | 300 | |
248 | #define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr) | 301 | do { |
249 | #define iowrite32(v, addr) writel((v), (addr)) | 302 | __raw_writeq(*buf++, addr); |
250 | #define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr) | 303 | } while (--count); |
251 | 304 | } | |
252 | #define ioread8_rep(p, dst, count) \ | 305 | } |
253 | insb((unsigned long) (p), (dst), (count)) | 306 | #endif |
254 | #define ioread16_rep(p, dst, count) \ | 307 | #endif /* CONFIG_64BIT */ |
255 | insw((unsigned long) (p), (dst), (count)) | 308 | |
256 | #define ioread32_rep(p, dst, count) \ | 309 | #ifndef PCI_IOBASE |
257 | insl((unsigned long) (p), (dst), (count)) | 310 | #define PCI_IOBASE ((void __iomem *)0) |
258 | 311 | #endif | |
259 | #define iowrite8_rep(p, src, count) \ | ||
260 | outsb((unsigned long) (p), (src), (count)) | ||
261 | #define iowrite16_rep(p, src, count) \ | ||
262 | outsw((unsigned long) (p), (src), (count)) | ||
263 | #define iowrite32_rep(p, src, count) \ | ||
264 | outsl((unsigned long) (p), (src), (count)) | ||
265 | #endif /* CONFIG_GENERIC_IOMAP */ | ||
266 | 312 | ||
267 | #ifndef IO_SPACE_LIMIT | 313 | #ifndef IO_SPACE_LIMIT |
268 | #define IO_SPACE_LIMIT 0xffff | 314 | #define IO_SPACE_LIMIT 0xffff |
269 | #endif | 315 | #endif |
270 | 316 | ||
317 | /* | ||
318 | * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be | ||
319 | * implemented on hardware that needs an additional delay for I/O accesses to | ||
320 | * take effect. | ||
321 | */ | ||
322 | |||
323 | #ifndef inb | ||
324 | #define inb inb | ||
325 | static inline u8 inb(unsigned long addr) | ||
326 | { | ||
327 | return readb(PCI_IOBASE + addr); | ||
328 | } | ||
329 | #endif | ||
330 | |||
331 | #ifndef inw | ||
332 | #define inw inw | ||
333 | static inline u16 inw(unsigned long addr) | ||
334 | { | ||
335 | return readw(PCI_IOBASE + addr); | ||
336 | } | ||
337 | #endif | ||
338 | |||
339 | #ifndef inl | ||
340 | #define inl inl | ||
341 | static inline u32 inl(unsigned long addr) | ||
342 | { | ||
343 | return readl(PCI_IOBASE + addr); | ||
344 | } | ||
345 | #endif | ||
346 | |||
347 | #ifndef outb | ||
348 | #define outb outb | ||
349 | static inline void outb(u8 value, unsigned long addr) | ||
350 | { | ||
351 | writeb(value, PCI_IOBASE + addr); | ||
352 | } | ||
353 | #endif | ||
354 | |||
355 | #ifndef outw | ||
356 | #define outw outw | ||
357 | static inline void outw(u16 value, unsigned long addr) | ||
358 | { | ||
359 | writew(value, PCI_IOBASE + addr); | ||
360 | } | ||
361 | #endif | ||
362 | |||
363 | #ifndef outl | ||
364 | #define outl outl | ||
365 | static inline void outl(u32 value, unsigned long addr) | ||
366 | { | ||
367 | writel(value, PCI_IOBASE + addr); | ||
368 | } | ||
369 | #endif | ||
370 | |||
371 | #ifndef inb_p | ||
372 | #define inb_p inb_p | ||
373 | static inline u8 inb_p(unsigned long addr) | ||
374 | { | ||
375 | return inb(addr); | ||
376 | } | ||
377 | #endif | ||
378 | |||
379 | #ifndef inw_p | ||
380 | #define inw_p inw_p | ||
381 | static inline u16 inw_p(unsigned long addr) | ||
382 | { | ||
383 | return inw(addr); | ||
384 | } | ||
385 | #endif | ||
386 | |||
387 | #ifndef inl_p | ||
388 | #define inl_p inl_p | ||
389 | static inline u32 inl_p(unsigned long addr) | ||
390 | { | ||
391 | return inl(addr); | ||
392 | } | ||
393 | #endif | ||
394 | |||
395 | #ifndef outb_p | ||
396 | #define outb_p outb_p | ||
397 | static inline void outb_p(u8 value, unsigned long addr) | ||
398 | { | ||
399 | outb(value, addr); | ||
400 | } | ||
401 | #endif | ||
402 | |||
403 | #ifndef outw_p | ||
404 | #define outw_p outw_p | ||
405 | static inline void outw_p(u16 value, unsigned long addr) | ||
406 | { | ||
407 | outw(value, addr); | ||
408 | } | ||
409 | #endif | ||
410 | |||
411 | #ifndef outl_p | ||
412 | #define outl_p outl_p | ||
413 | static inline void outl_p(u32 value, unsigned long addr) | ||
414 | { | ||
415 | outl(value, addr); | ||
416 | } | ||
417 | #endif | ||
418 | |||
419 | /* | ||
420 | * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a | ||
421 | * single I/O port multiple times. | ||
422 | */ | ||
423 | |||
424 | #ifndef insb | ||
425 | #define insb insb | ||
426 | static inline void insb(unsigned long addr, void *buffer, unsigned int count) | ||
427 | { | ||
428 | readsb(PCI_IOBASE + addr, buffer, count); | ||
429 | } | ||
430 | #endif | ||
431 | |||
432 | #ifndef insw | ||
433 | #define insw insw | ||
434 | static inline void insw(unsigned long addr, void *buffer, unsigned int count) | ||
435 | { | ||
436 | readsw(PCI_IOBASE + addr, buffer, count); | ||
437 | } | ||
438 | #endif | ||
439 | |||
440 | #ifndef insl | ||
441 | #define insl insl | ||
442 | static inline void insl(unsigned long addr, void *buffer, unsigned int count) | ||
443 | { | ||
444 | readsl(PCI_IOBASE + addr, buffer, count); | ||
445 | } | ||
446 | #endif | ||
447 | |||
448 | #ifndef outsb | ||
449 | #define outsb outsb | ||
450 | static inline void outsb(unsigned long addr, const void *buffer, | ||
451 | unsigned int count) | ||
452 | { | ||
453 | writesb(PCI_IOBASE + addr, buffer, count); | ||
454 | } | ||
455 | #endif | ||
456 | |||
457 | #ifndef outsw | ||
458 | #define outsw outsw | ||
459 | static inline void outsw(unsigned long addr, const void *buffer, | ||
460 | unsigned int count) | ||
461 | { | ||
462 | writesw(PCI_IOBASE + addr, buffer, count); | ||
463 | } | ||
464 | #endif | ||
465 | |||
466 | #ifndef outsl | ||
467 | #define outsl outsl | ||
468 | static inline void outsl(unsigned long addr, const void *buffer, | ||
469 | unsigned int count) | ||
470 | { | ||
471 | writesl(PCI_IOBASE + addr, buffer, count); | ||
472 | } | ||
473 | #endif | ||
474 | |||
475 | #ifndef insb_p | ||
476 | #define insb_p insb_p | ||
477 | static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) | ||
478 | { | ||
479 | insb(addr, buffer, count); | ||
480 | } | ||
481 | #endif | ||
482 | |||
483 | #ifndef insw_p | ||
484 | #define insw_p insw_p | ||
485 | static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) | ||
486 | { | ||
487 | insw(addr, buffer, count); | ||
488 | } | ||
489 | #endif | ||
490 | |||
491 | #ifndef insl_p | ||
492 | #define insl_p insl_p | ||
493 | static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) | ||
494 | { | ||
495 | insl(addr, buffer, count); | ||
496 | } | ||
497 | #endif | ||
498 | |||
499 | #ifndef outsb_p | ||
500 | #define outsb_p outsb_p | ||
501 | static inline void outsb_p(unsigned long addr, const void *buffer, | ||
502 | unsigned int count) | ||
503 | { | ||
504 | outsb(addr, buffer, count); | ||
505 | } | ||
506 | #endif | ||
507 | |||
508 | #ifndef outsw_p | ||
509 | #define outsw_p outsw_p | ||
510 | static inline void outsw_p(unsigned long addr, const void *buffer, | ||
511 | unsigned int count) | ||
512 | { | ||
513 | outsw(addr, buffer, count); | ||
514 | } | ||
515 | #endif | ||
516 | |||
517 | #ifndef outsl_p | ||
518 | #define outsl_p outsl_p | ||
519 | static inline void outsl_p(unsigned long addr, const void *buffer, | ||
520 | unsigned int count) | ||
521 | { | ||
522 | outsl(addr, buffer, count); | ||
523 | } | ||
524 | #endif | ||
525 | |||
526 | #ifndef CONFIG_GENERIC_IOMAP | ||
527 | #ifndef ioread8 | ||
528 | #define ioread8 ioread8 | ||
529 | static inline u8 ioread8(const volatile void __iomem *addr) | ||
530 | { | ||
531 | return readb(addr); | ||
532 | } | ||
533 | #endif | ||
534 | |||
535 | #ifndef ioread16 | ||
536 | #define ioread16 ioread16 | ||
537 | static inline u16 ioread16(const volatile void __iomem *addr) | ||
538 | { | ||
539 | return readw(addr); | ||
540 | } | ||
541 | #endif | ||
542 | |||
543 | #ifndef ioread32 | ||
544 | #define ioread32 ioread32 | ||
545 | static inline u32 ioread32(const volatile void __iomem *addr) | ||
546 | { | ||
547 | return readl(addr); | ||
548 | } | ||
549 | #endif | ||
550 | |||
551 | #ifndef iowrite8 | ||
552 | #define iowrite8 iowrite8 | ||
553 | static inline void iowrite8(u8 value, volatile void __iomem *addr) | ||
554 | { | ||
555 | writeb(value, addr); | ||
556 | } | ||
557 | #endif | ||
558 | |||
559 | #ifndef iowrite16 | ||
560 | #define iowrite16 iowrite16 | ||
561 | static inline void iowrite16(u16 value, volatile void __iomem *addr) | ||
562 | { | ||
563 | writew(value, addr); | ||
564 | } | ||
565 | #endif | ||
566 | |||
567 | #ifndef iowrite32 | ||
568 | #define iowrite32 iowrite32 | ||
569 | static inline void iowrite32(u32 value, volatile void __iomem *addr) | ||
570 | { | ||
571 | writel(value, addr); | ||
572 | } | ||
573 | #endif | ||
574 | |||
575 | #ifndef ioread16be | ||
576 | #define ioread16be ioread16be | ||
577 | static inline u16 ioread16be(const volatile void __iomem *addr) | ||
578 | { | ||
579 | return __be16_to_cpu(__raw_readw(addr)); | ||
580 | } | ||
581 | #endif | ||
582 | |||
583 | #ifndef ioread32be | ||
584 | #define ioread32be ioread32be | ||
585 | static inline u32 ioread32be(const volatile void __iomem *addr) | ||
586 | { | ||
587 | return __be32_to_cpu(__raw_readl(addr)); | ||
588 | } | ||
589 | #endif | ||
590 | |||
591 | #ifndef iowrite16be | ||
592 | #define iowrite16be iowrite16be | ||
593 | static inline void iowrite16be(u16 value, void volatile __iomem *addr) | ||
594 | { | ||
595 | __raw_writew(__cpu_to_be16(value), addr); | ||
596 | } | ||
597 | #endif | ||
598 | |||
599 | #ifndef iowrite32be | ||
600 | #define iowrite32be iowrite32be | ||
601 | static inline void iowrite32be(u32 value, volatile void __iomem *addr) | ||
602 | { | ||
603 | __raw_writel(__cpu_to_be32(value), addr); | ||
604 | } | ||
605 | #endif | ||
606 | |||
607 | #ifndef ioread8_rep | ||
608 | #define ioread8_rep ioread8_rep | ||
609 | static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, | ||
610 | unsigned int count) | ||
611 | { | ||
612 | readsb(addr, buffer, count); | ||
613 | } | ||
614 | #endif | ||
615 | |||
616 | #ifndef ioread16_rep | ||
617 | #define ioread16_rep ioread16_rep | ||
618 | static inline void ioread16_rep(const volatile void __iomem *addr, | ||
619 | void *buffer, unsigned int count) | ||
620 | { | ||
621 | readsw(addr, buffer, count); | ||
622 | } | ||
623 | #endif | ||
624 | |||
625 | #ifndef ioread32_rep | ||
626 | #define ioread32_rep ioread32_rep | ||
627 | static inline void ioread32_rep(const volatile void __iomem *addr, | ||
628 | void *buffer, unsigned int count) | ||
629 | { | ||
630 | readsl(addr, buffer, count); | ||
631 | } | ||
632 | #endif | ||
633 | |||
634 | #ifndef iowrite8_rep | ||
635 | #define iowrite8_rep iowrite8_rep | ||
636 | static inline void iowrite8_rep(volatile void __iomem *addr, | ||
637 | const void *buffer, | ||
638 | unsigned int count) | ||
639 | { | ||
640 | writesb(addr, buffer, count); | ||
641 | } | ||
642 | #endif | ||
643 | |||
644 | #ifndef iowrite16_rep | ||
645 | #define iowrite16_rep iowrite16_rep | ||
646 | static inline void iowrite16_rep(volatile void __iomem *addr, | ||
647 | const void *buffer, | ||
648 | unsigned int count) | ||
649 | { | ||
650 | writesw(addr, buffer, count); | ||
651 | } | ||
652 | #endif | ||
653 | |||
654 | #ifndef iowrite32_rep | ||
655 | #define iowrite32_rep iowrite32_rep | ||
656 | static inline void iowrite32_rep(volatile void __iomem *addr, | ||
657 | const void *buffer, | ||
658 | unsigned int count) | ||
659 | { | ||
660 | writesl(addr, buffer, count); | ||
661 | } | ||
662 | #endif | ||
663 | #endif /* CONFIG_GENERIC_IOMAP */ | ||
664 | |||
271 | #ifdef __KERNEL__ | 665 | #ifdef __KERNEL__ |
272 | 666 | ||
273 | #include <linux/vmalloc.h> | 667 | #include <linux/vmalloc.h> |
274 | #define __io_virt(x) ((void __force *) (x)) | 668 | #define __io_virt(x) ((void __force *)(x)) |
275 | 669 | ||
276 | #ifndef CONFIG_GENERIC_IOMAP | 670 | #ifndef CONFIG_GENERIC_IOMAP |
277 | struct pci_dev; | 671 | struct pci_dev; |
278 | extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | 672 | extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); |
279 | 673 | ||
280 | #ifndef pci_iounmap | 674 | #ifndef pci_iounmap |
675 | #define pci_iounmap pci_iounmap | ||
281 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) | 676 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) |
282 | { | 677 | { |
283 | } | 678 | } |
@@ -289,11 +684,15 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) | |||
289 | * These are pretty trivial | 684 | * These are pretty trivial |
290 | */ | 685 | */ |
291 | #ifndef virt_to_phys | 686 | #ifndef virt_to_phys |
687 | #define virt_to_phys virt_to_phys | ||
292 | static inline unsigned long virt_to_phys(volatile void *address) | 688 | static inline unsigned long virt_to_phys(volatile void *address) |
293 | { | 689 | { |
294 | return __pa((unsigned long)address); | 690 | return __pa((unsigned long)address); |
295 | } | 691 | } |
692 | #endif | ||
296 | 693 | ||
694 | #ifndef phys_to_virt | ||
695 | #define phys_to_virt phys_to_virt | ||
297 | static inline void *phys_to_virt(unsigned long address) | 696 | static inline void *phys_to_virt(unsigned long address) |
298 | { | 697 | { |
299 | return __va(address); | 698 | return __va(address); |
@@ -306,37 +705,65 @@ static inline void *phys_to_virt(unsigned long address) | |||
306 | * This implementation is for the no-MMU case only... if you have an MMU | 705 | * This implementation is for the no-MMU case only... if you have an MMU |
307 | * you'll need to provide your own definitions. | 706 | * you'll need to provide your own definitions. |
308 | */ | 707 | */ |
708 | |||
309 | #ifndef CONFIG_MMU | 709 | #ifndef CONFIG_MMU |
310 | static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) | 710 | #ifndef ioremap |
711 | #define ioremap ioremap | ||
712 | static inline void __iomem *ioremap(phys_addr_t offset, size_t size) | ||
311 | { | 713 | { |
312 | return (void __iomem*) (unsigned long)offset; | 714 | return (void __iomem *)(unsigned long)offset; |
313 | } | 715 | } |
716 | #endif | ||
314 | 717 | ||
315 | #define __ioremap(offset, size, flags) ioremap(offset, size) | 718 | #ifndef __ioremap |
719 | #define __ioremap __ioremap | ||
720 | static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, | ||
721 | unsigned long flags) | ||
722 | { | ||
723 | return ioremap(offset, size); | ||
724 | } | ||
725 | #endif | ||
316 | 726 | ||
317 | #ifndef ioremap_nocache | 727 | #ifndef ioremap_nocache |
318 | #define ioremap_nocache ioremap | 728 | #define ioremap_nocache ioremap_nocache |
729 | static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) | ||
730 | { | ||
731 | return ioremap(offset, size); | ||
732 | } | ||
319 | #endif | 733 | #endif |
320 | 734 | ||
321 | #ifndef ioremap_wc | 735 | #ifndef ioremap_wc |
322 | #define ioremap_wc ioremap_nocache | 736 | #define ioremap_wc ioremap_wc |
737 | static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) | ||
738 | { | ||
739 | return ioremap_nocache(offset, size); | ||
740 | } | ||
323 | #endif | 741 | #endif |
324 | 742 | ||
743 | #ifndef iounmap | ||
744 | #define iounmap iounmap | ||
325 | static inline void iounmap(void __iomem *addr) | 745 | static inline void iounmap(void __iomem *addr) |
326 | { | 746 | { |
327 | } | 747 | } |
748 | #endif | ||
328 | #endif /* CONFIG_MMU */ | 749 | #endif /* CONFIG_MMU */ |
329 | 750 | ||
330 | #ifdef CONFIG_HAS_IOPORT_MAP | 751 | #ifdef CONFIG_HAS_IOPORT_MAP |
331 | #ifndef CONFIG_GENERIC_IOMAP | 752 | #ifndef CONFIG_GENERIC_IOMAP |
753 | #ifndef ioport_map | ||
754 | #define ioport_map ioport_map | ||
332 | static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) | 755 | static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) |
333 | { | 756 | { |
334 | return PCI_IOBASE + (port & IO_SPACE_LIMIT); | 757 | return PCI_IOBASE + (port & IO_SPACE_LIMIT); |
335 | } | 758 | } |
759 | #endif | ||
336 | 760 | ||
761 | #ifndef ioport_unmap | ||
762 | #define ioport_unmap ioport_unmap | ||
337 | static inline void ioport_unmap(void __iomem *p) | 763 | static inline void ioport_unmap(void __iomem *p) |
338 | { | 764 | { |
339 | } | 765 | } |
766 | #endif | ||
340 | #else /* CONFIG_GENERIC_IOMAP */ | 767 | #else /* CONFIG_GENERIC_IOMAP */ |
341 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); | 768 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); |
342 | extern void ioport_unmap(void __iomem *p); | 769 | extern void ioport_unmap(void __iomem *p); |
@@ -344,35 +771,68 @@ extern void ioport_unmap(void __iomem *p); | |||
344 | #endif /* CONFIG_HAS_IOPORT_MAP */ | 771 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
345 | 772 | ||
346 | #ifndef xlate_dev_kmem_ptr | 773 | #ifndef xlate_dev_kmem_ptr |
347 | #define xlate_dev_kmem_ptr(p) p | 774 | #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr |
775 | static inline void *xlate_dev_kmem_ptr(void *addr) | ||
776 | { | ||
777 | return addr; | ||
778 | } | ||
348 | #endif | 779 | #endif |
780 | |||
349 | #ifndef xlate_dev_mem_ptr | 781 | #ifndef xlate_dev_mem_ptr |
350 | #define xlate_dev_mem_ptr(p) __va(p) | 782 | #define xlate_dev_mem_ptr xlate_dev_mem_ptr |
783 | static inline void *xlate_dev_mem_ptr(phys_addr_t addr) | ||
784 | { | ||
785 | return __va(addr); | ||
786 | } | ||
787 | #endif | ||
788 | |||
789 | #ifndef unxlate_dev_mem_ptr | ||
790 | #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr | ||
791 | static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) | ||
792 | { | ||
793 | } | ||
351 | #endif | 794 | #endif |
352 | 795 | ||
353 | #ifdef CONFIG_VIRT_TO_BUS | 796 | #ifdef CONFIG_VIRT_TO_BUS |
354 | #ifndef virt_to_bus | 797 | #ifndef virt_to_bus |
355 | static inline unsigned long virt_to_bus(volatile void *address) | 798 | static inline unsigned long virt_to_bus(void *address) |
356 | { | 799 | { |
357 | return ((unsigned long) address); | 800 | return (unsigned long)address; |
358 | } | 801 | } |
359 | 802 | ||
360 | static inline void *bus_to_virt(unsigned long address) | 803 | static inline void *bus_to_virt(unsigned long address) |
361 | { | 804 | { |
362 | return (void *) address; | 805 | return (void *)address; |
363 | } | 806 | } |
364 | #endif | 807 | #endif |
365 | #endif | 808 | #endif |
366 | 809 | ||
367 | #ifndef memset_io | 810 | #ifndef memset_io |
368 | #define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) | 811 | #define memset_io memset_io |
812 | static inline void memset_io(volatile void __iomem *addr, int value, | ||
813 | size_t size) | ||
814 | { | ||
815 | memset(__io_virt(addr), value, size); | ||
816 | } | ||
369 | #endif | 817 | #endif |
370 | 818 | ||
371 | #ifndef memcpy_fromio | 819 | #ifndef memcpy_fromio |
372 | #define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c)) | 820 | #define memcpy_fromio memcpy_fromio |
821 | static inline void memcpy_fromio(void *buffer, | ||
822 | const volatile void __iomem *addr, | ||
823 | size_t size) | ||
824 | { | ||
825 | memcpy(buffer, __io_virt(addr), size); | ||
826 | } | ||
373 | #endif | 827 | #endif |
828 | |||
374 | #ifndef memcpy_toio | 829 | #ifndef memcpy_toio |
375 | #define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c)) | 830 | #define memcpy_toio memcpy_toio |
831 | static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, | ||
832 | size_t size) | ||
833 | { | ||
834 | memcpy(__io_virt(addr), buffer, size); | ||
835 | } | ||
376 | #endif | 836 | #endif |
377 | 837 | ||
378 | #endif /* __KERNEL__ */ | 838 | #endif /* __KERNEL__ */ |