diff options
Diffstat (limited to 'include/asm-x86/io_64.h')
-rw-r--r-- | include/asm-x86/io_64.h | 276 |
1 files changed, 276 insertions, 0 deletions
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h new file mode 100644 index 000000000000..7475095c5061 --- /dev/null +++ b/include/asm-x86/io_64.h | |||
@@ -0,0 +1,276 @@ | |||
1 | #ifndef _ASM_IO_H | ||
2 | #define _ASM_IO_H | ||
3 | |||
4 | |||
5 | /* | ||
6 | * This file contains the definitions for the x86 IO instructions | ||
7 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | ||
8 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | ||
9 | * versions of the single-IO instructions (inb_p/inw_p/..). | ||
10 | * | ||
11 | * This file is not meant to be obfuscating: it's just complicated | ||
12 | * to (a) handle it all in a way that makes gcc able to optimize it | ||
13 | * as well as possible and (b) trying to avoid writing the same thing | ||
14 | * over and over again with slight variations and possibly making a | ||
15 | * mistake somewhere. | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * Thanks to James van Artsdalen for a better timing-fix than | ||
20 | * the two short jumps: using outb's to a nonexistent port seems | ||
21 | * to guarantee better timings even on fast machines. | ||
22 | * | ||
23 | * On the other hand, I'd like to be sure of a non-existent port: | ||
24 | * I feel a bit unsafe about using 0x80 (should be safe, though) | ||
25 | * | ||
26 | * Linus | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * Bit simplified and optimized by Jan Hubicka | ||
31 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. | ||
32 | * | ||
33 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, | ||
34 | * isa_read[wl] and isa_write[wl] fixed | ||
35 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> | ||
36 | */ | ||
37 | |||
38 | #define __SLOW_DOWN_IO "\noutb %%al,$0x80" | ||
39 | |||
40 | #ifdef REALLY_SLOW_IO | ||
41 | #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO | ||
42 | #else | ||
43 | #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO | ||
44 | #endif | ||
45 | |||
46 | /* | ||
47 | * Talk about misusing macros.. | ||
48 | */ | ||
49 | #define __OUT1(s,x) \ | ||
50 | static inline void out##s(unsigned x value, unsigned short port) { | ||
51 | |||
52 | #define __OUT2(s,s1,s2) \ | ||
53 | __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" | ||
54 | |||
55 | #define __OUT(s,s1,x) \ | ||
56 | __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ | ||
57 | __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ | ||
58 | |||
59 | #define __IN1(s) \ | ||
60 | static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; | ||
61 | |||
62 | #define __IN2(s,s1,s2) \ | ||
63 | __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" | ||
64 | |||
65 | #define __IN(s,s1,i...) \ | ||
66 | __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ | ||
67 | __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ | ||
68 | |||
69 | #define __INS(s) \ | ||
70 | static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ | ||
71 | { __asm__ __volatile__ ("rep ; ins" #s \ | ||
72 | : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } | ||
73 | |||
74 | #define __OUTS(s) \ | ||
75 | static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ | ||
76 | { __asm__ __volatile__ ("rep ; outs" #s \ | ||
77 | : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } | ||
78 | |||
79 | #define RETURN_TYPE unsigned char | ||
80 | __IN(b,"") | ||
81 | #undef RETURN_TYPE | ||
82 | #define RETURN_TYPE unsigned short | ||
83 | __IN(w,"") | ||
84 | #undef RETURN_TYPE | ||
85 | #define RETURN_TYPE unsigned int | ||
86 | __IN(l,"") | ||
87 | #undef RETURN_TYPE | ||
88 | |||
89 | __OUT(b,"b",char) | ||
90 | __OUT(w,"w",short) | ||
91 | __OUT(l,,int) | ||
92 | |||
93 | __INS(b) | ||
94 | __INS(w) | ||
95 | __INS(l) | ||
96 | |||
97 | __OUTS(b) | ||
98 | __OUTS(w) | ||
99 | __OUTS(l) | ||
100 | |||
101 | #define IO_SPACE_LIMIT 0xffff | ||
102 | |||
103 | #if defined(__KERNEL__) && defined(__x86_64__) | ||
104 | |||
105 | #include <linux/vmalloc.h> | ||
106 | |||
107 | #ifndef __i386__ | ||
108 | /* | ||
109 | * Change virtual addresses to physical addresses and vv. | ||
110 | * These are pretty trivial | ||
111 | */ | ||
112 | static inline unsigned long virt_to_phys(volatile void * address) | ||
113 | { | ||
114 | return __pa(address); | ||
115 | } | ||
116 | |||
117 | static inline void * phys_to_virt(unsigned long address) | ||
118 | { | ||
119 | return __va(address); | ||
120 | } | ||
121 | #endif | ||
122 | |||
123 | /* | ||
124 | * Change "struct page" to physical address. | ||
125 | */ | ||
126 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | ||
127 | |||
128 | #include <asm-generic/iomap.h> | ||
129 | |||
130 | extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); | ||
131 | |||
132 | static inline void __iomem * ioremap (unsigned long offset, unsigned long size) | ||
133 | { | ||
134 | return __ioremap(offset, size, 0); | ||
135 | } | ||
136 | |||
137 | extern void *early_ioremap(unsigned long addr, unsigned long size); | ||
138 | extern void early_iounmap(void *addr, unsigned long size); | ||
139 | |||
140 | /* | ||
141 | * This one maps high address device memory and turns off caching for that area. | ||
142 | * it's useful if some control registers are in such an area and write combining | ||
143 | * or read caching is not desirable: | ||
144 | */ | ||
145 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); | ||
146 | extern void iounmap(volatile void __iomem *addr); | ||
147 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | ||
148 | |||
149 | /* | ||
150 | * ISA I/O bus memory addresses are 1:1 with the physical address. | ||
151 | */ | ||
152 | #define isa_virt_to_bus virt_to_phys | ||
153 | #define isa_page_to_bus page_to_phys | ||
154 | #define isa_bus_to_virt phys_to_virt | ||
155 | |||
156 | /* | ||
157 | * However PCI ones are not necessarily 1:1 and therefore these interfaces | ||
158 | * are forbidden in portable PCI drivers. | ||
159 | * | ||
160 | * Allow them on x86 for legacy drivers, though. | ||
161 | */ | ||
162 | #define virt_to_bus virt_to_phys | ||
163 | #define bus_to_virt phys_to_virt | ||
164 | |||
165 | /* | ||
166 | * readX/writeX() are used to access memory mapped devices. On some | ||
167 | * architectures the memory mapped IO stuff needs to be accessed | ||
168 | * differently. On the x86 architecture, we just read/write the | ||
169 | * memory location directly. | ||
170 | */ | ||
171 | |||
172 | static inline __u8 __readb(const volatile void __iomem *addr) | ||
173 | { | ||
174 | return *(__force volatile __u8 *)addr; | ||
175 | } | ||
176 | static inline __u16 __readw(const volatile void __iomem *addr) | ||
177 | { | ||
178 | return *(__force volatile __u16 *)addr; | ||
179 | } | ||
180 | static __always_inline __u32 __readl(const volatile void __iomem *addr) | ||
181 | { | ||
182 | return *(__force volatile __u32 *)addr; | ||
183 | } | ||
184 | static inline __u64 __readq(const volatile void __iomem *addr) | ||
185 | { | ||
186 | return *(__force volatile __u64 *)addr; | ||
187 | } | ||
188 | #define readb(x) __readb(x) | ||
189 | #define readw(x) __readw(x) | ||
190 | #define readl(x) __readl(x) | ||
191 | #define readq(x) __readq(x) | ||
192 | #define readb_relaxed(a) readb(a) | ||
193 | #define readw_relaxed(a) readw(a) | ||
194 | #define readl_relaxed(a) readl(a) | ||
195 | #define readq_relaxed(a) readq(a) | ||
196 | #define __raw_readb readb | ||
197 | #define __raw_readw readw | ||
198 | #define __raw_readl readl | ||
199 | #define __raw_readq readq | ||
200 | |||
201 | #define mmiowb() | ||
202 | |||
203 | static inline void __writel(__u32 b, volatile void __iomem *addr) | ||
204 | { | ||
205 | *(__force volatile __u32 *)addr = b; | ||
206 | } | ||
207 | static inline void __writeq(__u64 b, volatile void __iomem *addr) | ||
208 | { | ||
209 | *(__force volatile __u64 *)addr = b; | ||
210 | } | ||
211 | static inline void __writeb(__u8 b, volatile void __iomem *addr) | ||
212 | { | ||
213 | *(__force volatile __u8 *)addr = b; | ||
214 | } | ||
215 | static inline void __writew(__u16 b, volatile void __iomem *addr) | ||
216 | { | ||
217 | *(__force volatile __u16 *)addr = b; | ||
218 | } | ||
219 | #define writeq(val,addr) __writeq((val),(addr)) | ||
220 | #define writel(val,addr) __writel((val),(addr)) | ||
221 | #define writew(val,addr) __writew((val),(addr)) | ||
222 | #define writeb(val,addr) __writeb((val),(addr)) | ||
223 | #define __raw_writeb writeb | ||
224 | #define __raw_writew writew | ||
225 | #define __raw_writel writel | ||
226 | #define __raw_writeq writeq | ||
227 | |||
228 | void __memcpy_fromio(void*,unsigned long,unsigned); | ||
229 | void __memcpy_toio(unsigned long,const void*,unsigned); | ||
230 | |||
231 | static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len) | ||
232 | { | ||
233 | __memcpy_fromio(to,(unsigned long)from,len); | ||
234 | } | ||
235 | static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len) | ||
236 | { | ||
237 | __memcpy_toio((unsigned long)to,from,len); | ||
238 | } | ||
239 | |||
240 | void memset_io(volatile void __iomem *a, int b, size_t c); | ||
241 | |||
242 | /* | ||
243 | * ISA space is 'always mapped' on a typical x86 system, no need to | ||
244 | * explicitly ioremap() it. The fact that the ISA IO space is mapped | ||
245 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values | ||
246 | * are physical addresses. The following constant pointer can be | ||
247 | * used as the IO-area pointer (it can be iounmapped as well, so the | ||
248 | * analogy with PCI is quite large): | ||
249 | */ | ||
250 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | ||
251 | |||
252 | /* Nothing to do */ | ||
253 | |||
254 | #define dma_cache_inv(_start,_size) do { } while (0) | ||
255 | #define dma_cache_wback(_start,_size) do { } while (0) | ||
256 | #define dma_cache_wback_inv(_start,_size) do { } while (0) | ||
257 | |||
258 | #define flush_write_buffers() | ||
259 | |||
260 | extern int iommu_bio_merge; | ||
261 | #define BIO_VMERGE_BOUNDARY iommu_bio_merge | ||
262 | |||
263 | /* | ||
264 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
265 | * access | ||
266 | */ | ||
267 | #define xlate_dev_mem_ptr(p) __va(p) | ||
268 | |||
269 | /* | ||
270 | * Convert a virtual cached pointer to an uncached pointer | ||
271 | */ | ||
272 | #define xlate_dev_kmem_ptr(p) p | ||
273 | |||
274 | #endif /* __KERNEL__ */ | ||
275 | |||
276 | #endif | ||