diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ia64/io.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-ia64/io.h')
-rw-r--r-- | include/asm-ia64/io.h | 484 |
1 files changed, 484 insertions, 0 deletions
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h new file mode 100644 index 000000000000..491e9d1fc538 --- /dev/null +++ b/include/asm-ia64/io.h | |||
@@ -0,0 +1,484 @@ | |||
1 | #ifndef _ASM_IA64_IO_H | ||
2 | #define _ASM_IA64_IO_H | ||
3 | |||
4 | /* | ||
5 | * This file contains the definitions for the emulated IO instructions | ||
6 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | ||
7 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | ||
8 | * versions of the single-IO instructions (inb_p/inw_p/..). | ||
9 | * | ||
10 | * This file is not meant to be obfuscating: it's just complicated to | ||
11 | * (a) handle it all in a way that makes gcc able to optimize it as | ||
12 | * well as possible and (b) trying to avoid writing the same thing | ||
13 | * over and over again with slight variations and possibly making a | ||
14 | * mistake somewhere. | ||
15 | * | ||
16 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
17 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
18 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | ||
19 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | ||
20 | */ | ||
21 | |||
22 | /* We don't use IO slowdowns on the ia64, but.. */ | ||
23 | #define __SLOW_DOWN_IO do { } while (0) | ||
24 | #define SLOW_DOWN_IO do { } while (0) | ||
25 | |||
26 | #define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */ | ||
27 | |||
28 | /* | ||
29 | * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but | ||
30 | * large machines may have multiple other I/O spaces so we can't place any a priori limit | ||
31 | * on IO_SPACE_LIMIT. These additional spaces are described in ACPI. | ||
32 | */ | ||
33 | #define IO_SPACE_LIMIT 0xffffffffffffffffUL | ||
34 | |||
35 | #define MAX_IO_SPACES_BITS 4 | ||
36 | #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS) | ||
37 | #define IO_SPACE_BITS 24 | ||
38 | #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS) | ||
39 | |||
40 | #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS) | ||
41 | #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS) | ||
42 | #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1)) | ||
43 | |||
44 | #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | (p & 0xfff)) | ||
45 | |||
46 | struct io_space { | ||
47 | unsigned long mmio_base; /* base in MMIO space */ | ||
48 | int sparse; | ||
49 | }; | ||
50 | |||
51 | extern struct io_space io_space[]; | ||
52 | extern unsigned int num_io_spaces; | ||
53 | |||
54 | # ifdef __KERNEL__ | ||
55 | |||
56 | /* | ||
57 | * All MMIO iomem cookies are in region 6; anything less is a PIO cookie: | ||
58 | * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap) | ||
59 | * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port) | ||
60 | * | ||
61 | * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch | ||
62 | * code that uses bare port numbers without the prerequisite pci_iomap(). | ||
63 | */ | ||
64 | #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS)) | ||
65 | #define PIO_MASK (PIO_OFFSET - 1) | ||
66 | #define PIO_RESERVED __IA64_UNCACHED_OFFSET | ||
67 | #define HAVE_ARCH_PIO_SIZE | ||
68 | |||
69 | #include <asm/intrinsics.h> | ||
70 | #include <asm/machvec.h> | ||
71 | #include <asm/page.h> | ||
72 | #include <asm/system.h> | ||
73 | #include <asm-generic/iomap.h> | ||
74 | |||
75 | /* | ||
76 | * Change virtual addresses to physical addresses and vv. | ||
77 | */ | ||
78 | static inline unsigned long | ||
79 | virt_to_phys (volatile void *address) | ||
80 | { | ||
81 | return (unsigned long) address - PAGE_OFFSET; | ||
82 | } | ||
83 | |||
84 | static inline void* | ||
85 | phys_to_virt (unsigned long address) | ||
86 | { | ||
87 | return (void *) (address + PAGE_OFFSET); | ||
88 | } | ||
89 | |||
90 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | ||
91 | extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ | ||
92 | |||
93 | /* | ||
94 | * The following two macros are deprecated and scheduled for removal. | ||
95 | * Please use the PCI-DMA interface defined in <asm/pci.h> instead. | ||
96 | */ | ||
97 | #define bus_to_virt phys_to_virt | ||
98 | #define virt_to_bus virt_to_phys | ||
99 | #define page_to_bus page_to_phys | ||
100 | |||
101 | # endif /* KERNEL */ | ||
102 | |||
103 | /* | ||
104 | * Memory fence w/accept. This should never be used in code that is | ||
105 | * not IA-64 specific. | ||
106 | */ | ||
107 | #define __ia64_mf_a() ia64_mfa() | ||
108 | |||
109 | /** | ||
110 | * ___ia64_mmiowb - I/O write barrier | ||
111 | * | ||
112 | * Ensure ordering of I/O space writes. This will make sure that writes | ||
113 | * following the barrier will arrive after all previous writes. For most | ||
114 | * ia64 platforms, this is a simple 'mf.a' instruction. | ||
115 | * | ||
116 | * See Documentation/DocBook/deviceiobook.tmpl for more information. | ||
117 | */ | ||
118 | static inline void ___ia64_mmiowb(void) | ||
119 | { | ||
120 | ia64_mfa(); | ||
121 | } | ||
122 | |||
123 | static inline const unsigned long | ||
124 | __ia64_get_io_port_base (void) | ||
125 | { | ||
126 | extern unsigned long ia64_iobase; | ||
127 | |||
128 | return ia64_iobase; | ||
129 | } | ||
130 | |||
131 | static inline void* | ||
132 | __ia64_mk_io_addr (unsigned long port) | ||
133 | { | ||
134 | struct io_space *space; | ||
135 | unsigned long offset; | ||
136 | |||
137 | space = &io_space[IO_SPACE_NR(port)]; | ||
138 | port = IO_SPACE_PORT(port); | ||
139 | if (space->sparse) | ||
140 | offset = IO_SPACE_SPARSE_ENCODING(port); | ||
141 | else | ||
142 | offset = port; | ||
143 | |||
144 | return (void *) (space->mmio_base | offset); | ||
145 | } | ||
146 | |||
147 | #define __ia64_inb ___ia64_inb | ||
148 | #define __ia64_inw ___ia64_inw | ||
149 | #define __ia64_inl ___ia64_inl | ||
150 | #define __ia64_outb ___ia64_outb | ||
151 | #define __ia64_outw ___ia64_outw | ||
152 | #define __ia64_outl ___ia64_outl | ||
153 | #define __ia64_readb ___ia64_readb | ||
154 | #define __ia64_readw ___ia64_readw | ||
155 | #define __ia64_readl ___ia64_readl | ||
156 | #define __ia64_readq ___ia64_readq | ||
157 | #define __ia64_readb_relaxed ___ia64_readb | ||
158 | #define __ia64_readw_relaxed ___ia64_readw | ||
159 | #define __ia64_readl_relaxed ___ia64_readl | ||
160 | #define __ia64_readq_relaxed ___ia64_readq | ||
161 | #define __ia64_writeb ___ia64_writeb | ||
162 | #define __ia64_writew ___ia64_writew | ||
163 | #define __ia64_writel ___ia64_writel | ||
164 | #define __ia64_writeq ___ia64_writeq | ||
165 | #define __ia64_mmiowb ___ia64_mmiowb | ||
166 | |||
167 | /* | ||
168 | * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure | ||
169 | * that the access has completed before executing other I/O accesses. Since we're doing | ||
170 | * the accesses through an uncachable (UC) translation, the CPU will execute them in | ||
171 | * program order. However, we still need to tell the compiler not to shuffle them around | ||
172 | * during optimization, which is why we use "volatile" pointers. | ||
173 | */ | ||
174 | |||
175 | static inline unsigned int | ||
176 | ___ia64_inb (unsigned long port) | ||
177 | { | ||
178 | volatile unsigned char *addr = __ia64_mk_io_addr(port); | ||
179 | unsigned char ret; | ||
180 | |||
181 | ret = *addr; | ||
182 | __ia64_mf_a(); | ||
183 | return ret; | ||
184 | } | ||
185 | |||
186 | static inline unsigned int | ||
187 | ___ia64_inw (unsigned long port) | ||
188 | { | ||
189 | volatile unsigned short *addr = __ia64_mk_io_addr(port); | ||
190 | unsigned short ret; | ||
191 | |||
192 | ret = *addr; | ||
193 | __ia64_mf_a(); | ||
194 | return ret; | ||
195 | } | ||
196 | |||
197 | static inline unsigned int | ||
198 | ___ia64_inl (unsigned long port) | ||
199 | { | ||
200 | volatile unsigned int *addr = __ia64_mk_io_addr(port); | ||
201 | unsigned int ret; | ||
202 | |||
203 | ret = *addr; | ||
204 | __ia64_mf_a(); | ||
205 | return ret; | ||
206 | } | ||
207 | |||
208 | static inline void | ||
209 | ___ia64_outb (unsigned char val, unsigned long port) | ||
210 | { | ||
211 | volatile unsigned char *addr = __ia64_mk_io_addr(port); | ||
212 | |||
213 | *addr = val; | ||
214 | __ia64_mf_a(); | ||
215 | } | ||
216 | |||
217 | static inline void | ||
218 | ___ia64_outw (unsigned short val, unsigned long port) | ||
219 | { | ||
220 | volatile unsigned short *addr = __ia64_mk_io_addr(port); | ||
221 | |||
222 | *addr = val; | ||
223 | __ia64_mf_a(); | ||
224 | } | ||
225 | |||
226 | static inline void | ||
227 | ___ia64_outl (unsigned int val, unsigned long port) | ||
228 | { | ||
229 | volatile unsigned int *addr = __ia64_mk_io_addr(port); | ||
230 | |||
231 | *addr = val; | ||
232 | __ia64_mf_a(); | ||
233 | } | ||
234 | |||
235 | static inline void | ||
236 | __insb (unsigned long port, void *dst, unsigned long count) | ||
237 | { | ||
238 | unsigned char *dp = dst; | ||
239 | |||
240 | while (count--) | ||
241 | *dp++ = platform_inb(port); | ||
242 | } | ||
243 | |||
244 | static inline void | ||
245 | __insw (unsigned long port, void *dst, unsigned long count) | ||
246 | { | ||
247 | unsigned short *dp = dst; | ||
248 | |||
249 | while (count--) | ||
250 | *dp++ = platform_inw(port); | ||
251 | } | ||
252 | |||
253 | static inline void | ||
254 | __insl (unsigned long port, void *dst, unsigned long count) | ||
255 | { | ||
256 | unsigned int *dp = dst; | ||
257 | |||
258 | while (count--) | ||
259 | *dp++ = platform_inl(port); | ||
260 | } | ||
261 | |||
262 | static inline void | ||
263 | __outsb (unsigned long port, const void *src, unsigned long count) | ||
264 | { | ||
265 | const unsigned char *sp = src; | ||
266 | |||
267 | while (count--) | ||
268 | platform_outb(*sp++, port); | ||
269 | } | ||
270 | |||
271 | static inline void | ||
272 | __outsw (unsigned long port, const void *src, unsigned long count) | ||
273 | { | ||
274 | const unsigned short *sp = src; | ||
275 | |||
276 | while (count--) | ||
277 | platform_outw(*sp++, port); | ||
278 | } | ||
279 | |||
280 | static inline void | ||
281 | __outsl (unsigned long port, const void *src, unsigned long count) | ||
282 | { | ||
283 | const unsigned int *sp = src; | ||
284 | |||
285 | while (count--) | ||
286 | platform_outl(*sp++, port); | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * Unfortunately, some platforms are broken and do not follow the IA-64 architecture | ||
291 | * specification regarding legacy I/O support. Thus, we have to make these operations | ||
292 | * platform dependent... | ||
293 | */ | ||
294 | #define __inb platform_inb | ||
295 | #define __inw platform_inw | ||
296 | #define __inl platform_inl | ||
297 | #define __outb platform_outb | ||
298 | #define __outw platform_outw | ||
299 | #define __outl platform_outl | ||
300 | #define __mmiowb platform_mmiowb | ||
301 | |||
302 | #define inb(p) __inb(p) | ||
303 | #define inw(p) __inw(p) | ||
304 | #define inl(p) __inl(p) | ||
305 | #define insb(p,d,c) __insb(p,d,c) | ||
306 | #define insw(p,d,c) __insw(p,d,c) | ||
307 | #define insl(p,d,c) __insl(p,d,c) | ||
308 | #define outb(v,p) __outb(v,p) | ||
309 | #define outw(v,p) __outw(v,p) | ||
310 | #define outl(v,p) __outl(v,p) | ||
311 | #define outsb(p,s,c) __outsb(p,s,c) | ||
312 | #define outsw(p,s,c) __outsw(p,s,c) | ||
313 | #define outsl(p,s,c) __outsl(p,s,c) | ||
314 | #define mmiowb() __mmiowb() | ||
315 | |||
316 | /* | ||
317 | * The address passed to these functions are ioremap()ped already. | ||
318 | * | ||
319 | * We need these to be machine vectors since some platforms don't provide | ||
320 | * DMA coherence via PIO reads (PCI drivers and the spec imply that this is | ||
321 | * a good idea). Writes are ok though for all existing ia64 platforms (and | ||
322 | * hopefully it'll stay that way). | ||
323 | */ | ||
324 | static inline unsigned char | ||
325 | ___ia64_readb (const volatile void __iomem *addr) | ||
326 | { | ||
327 | return *(volatile unsigned char __force *)addr; | ||
328 | } | ||
329 | |||
330 | static inline unsigned short | ||
331 | ___ia64_readw (const volatile void __iomem *addr) | ||
332 | { | ||
333 | return *(volatile unsigned short __force *)addr; | ||
334 | } | ||
335 | |||
336 | static inline unsigned int | ||
337 | ___ia64_readl (const volatile void __iomem *addr) | ||
338 | { | ||
339 | return *(volatile unsigned int __force *) addr; | ||
340 | } | ||
341 | |||
342 | static inline unsigned long | ||
343 | ___ia64_readq (const volatile void __iomem *addr) | ||
344 | { | ||
345 | return *(volatile unsigned long __force *) addr; | ||
346 | } | ||
347 | |||
348 | static inline void | ||
349 | __writeb (unsigned char val, volatile void __iomem *addr) | ||
350 | { | ||
351 | *(volatile unsigned char __force *) addr = val; | ||
352 | } | ||
353 | |||
354 | static inline void | ||
355 | __writew (unsigned short val, volatile void __iomem *addr) | ||
356 | { | ||
357 | *(volatile unsigned short __force *) addr = val; | ||
358 | } | ||
359 | |||
360 | static inline void | ||
361 | __writel (unsigned int val, volatile void __iomem *addr) | ||
362 | { | ||
363 | *(volatile unsigned int __force *) addr = val; | ||
364 | } | ||
365 | |||
366 | static inline void | ||
367 | __writeq (unsigned long val, volatile void __iomem *addr) | ||
368 | { | ||
369 | *(volatile unsigned long __force *) addr = val; | ||
370 | } | ||
371 | |||
372 | #define __readb platform_readb | ||
373 | #define __readw platform_readw | ||
374 | #define __readl platform_readl | ||
375 | #define __readq platform_readq | ||
376 | #define __readb_relaxed platform_readb_relaxed | ||
377 | #define __readw_relaxed platform_readw_relaxed | ||
378 | #define __readl_relaxed platform_readl_relaxed | ||
379 | #define __readq_relaxed platform_readq_relaxed | ||
380 | |||
381 | #define readb(a) __readb((a)) | ||
382 | #define readw(a) __readw((a)) | ||
383 | #define readl(a) __readl((a)) | ||
384 | #define readq(a) __readq((a)) | ||
385 | #define readb_relaxed(a) __readb_relaxed((a)) | ||
386 | #define readw_relaxed(a) __readw_relaxed((a)) | ||
387 | #define readl_relaxed(a) __readl_relaxed((a)) | ||
388 | #define readq_relaxed(a) __readq_relaxed((a)) | ||
389 | #define __raw_readb readb | ||
390 | #define __raw_readw readw | ||
391 | #define __raw_readl readl | ||
392 | #define __raw_readq readq | ||
393 | #define __raw_readb_relaxed readb_relaxed | ||
394 | #define __raw_readw_relaxed readw_relaxed | ||
395 | #define __raw_readl_relaxed readl_relaxed | ||
396 | #define __raw_readq_relaxed readq_relaxed | ||
397 | #define writeb(v,a) __writeb((v), (a)) | ||
398 | #define writew(v,a) __writew((v), (a)) | ||
399 | #define writel(v,a) __writel((v), (a)) | ||
400 | #define writeq(v,a) __writeq((v), (a)) | ||
401 | #define __raw_writeb writeb | ||
402 | #define __raw_writew writew | ||
403 | #define __raw_writel writel | ||
404 | #define __raw_writeq writeq | ||
405 | |||
406 | #ifndef inb_p | ||
407 | # define inb_p inb | ||
408 | #endif | ||
409 | #ifndef inw_p | ||
410 | # define inw_p inw | ||
411 | #endif | ||
412 | #ifndef inl_p | ||
413 | # define inl_p inl | ||
414 | #endif | ||
415 | |||
416 | #ifndef outb_p | ||
417 | # define outb_p outb | ||
418 | #endif | ||
419 | #ifndef outw_p | ||
420 | # define outw_p outw | ||
421 | #endif | ||
422 | #ifndef outl_p | ||
423 | # define outl_p outl | ||
424 | #endif | ||
425 | |||
426 | /* | ||
427 | * An "address" in IO memory space is not clearly either an integer or a pointer. We will | ||
428 | * accept both, thus the casts. | ||
429 | * | ||
430 | * On ia-64, we access the physical I/O memory space through the uncached kernel region. | ||
431 | */ | ||
432 | static inline void __iomem * | ||
433 | ioremap (unsigned long offset, unsigned long size) | ||
434 | { | ||
435 | return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset)); | ||
436 | } | ||
437 | |||
438 | static inline void | ||
439 | iounmap (volatile void __iomem *addr) | ||
440 | { | ||
441 | } | ||
442 | |||
443 | #define ioremap_nocache(o,s) ioremap(o,s) | ||
444 | |||
445 | # ifdef __KERNEL__ | ||
446 | |||
447 | /* | ||
448 | * String version of IO memory access ops: | ||
449 | */ | ||
450 | extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); | ||
451 | extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); | ||
452 | extern void memset_io(volatile void __iomem *s, int c, long n); | ||
453 | |||
454 | #define dma_cache_inv(_start,_size) do { } while (0) | ||
455 | #define dma_cache_wback(_start,_size) do { } while (0) | ||
456 | #define dma_cache_wback_inv(_start,_size) do { } while (0) | ||
457 | |||
458 | # endif /* __KERNEL__ */ | ||
459 | |||
460 | /* | ||
461 | * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that | ||
462 | * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64). | ||
463 | * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on | ||
464 | * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing | ||
465 | * over BIO-level virtual merging. | ||
466 | */ | ||
467 | extern unsigned long ia64_max_iommu_merge_mask; | ||
468 | #if 1 | ||
469 | #define BIO_VMERGE_BOUNDARY 0 | ||
470 | #else | ||
471 | /* | ||
472 | * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be | ||
473 | * replaced by dma_merge_mask() or something of that sort. Note: the only way | ||
474 | * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets | ||
475 | * expanded into: | ||
476 | * | ||
477 | * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask) | ||
478 | * | ||
479 | * which is precisely what we want. | ||
480 | */ | ||
481 | #define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1) | ||
482 | #endif | ||
483 | |||
484 | #endif /* _ASM_IA64_IO_H */ | ||