diff options
Diffstat (limited to 'include/asm-sh64/io.h')
-rw-r--r-- | include/asm-sh64/io.h | 250 |
1 files changed, 250 insertions, 0 deletions
diff --git a/include/asm-sh64/io.h b/include/asm-sh64/io.h new file mode 100644 index 000000000000..cfafaa73b2b0 --- /dev/null +++ b/include/asm-sh64/io.h | |||
@@ -0,0 +1,250 @@ | |||
1 | #ifndef __ASM_SH64_IO_H | ||
2 | #define __ASM_SH64_IO_H | ||
3 | |||
4 | /* | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | * | ||
9 | * include/asm-sh64/io.h | ||
10 | * | ||
11 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
12 | * Copyright (C) 2003 Paul Mundt | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * Convention: | ||
18 | * read{b,w,l}/write{b,w,l} are for PCI, | ||
19 | * while in{b,w,l}/out{b,w,l} are for ISA | ||
20 | * These may (will) be platform specific function. | ||
21 | * | ||
22 | * In addition, we have | ||
23 | * ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O. | ||
24 | * which are processor specific. Address should be the result of | ||
25 | * onchip_remap(); | ||
26 | */ | ||
27 | |||
28 | #include <linux/compiler.h> | ||
29 | #include <asm/cache.h> | ||
30 | #include <asm/system.h> | ||
31 | #include <asm/page.h> | ||
32 | #include <asm-generic/iomap.h> | ||
33 | |||
34 | #define virt_to_bus virt_to_phys | ||
35 | #define bus_to_virt phys_to_virt | ||
36 | #define page_to_bus page_to_phys | ||
37 | |||
38 | /* | ||
39 | * Nothing overly special here.. instead of doing the same thing | ||
40 | * over and over again, we just define a set of sh64_in/out functions | ||
41 | * with an implicit size. The traditional read{b,w,l}/write{b,w,l} | ||
42 | * mess is wrapped to this, as are the SH-specific ctrl_in/out routines. | ||
43 | */ | ||
44 | static inline unsigned char sh64_in8(const volatile void __iomem *addr) | ||
45 | { | ||
46 | return *(volatile unsigned char __force *)addr; | ||
47 | } | ||
48 | |||
49 | static inline unsigned short sh64_in16(const volatile void __iomem *addr) | ||
50 | { | ||
51 | return *(volatile unsigned short __force *)addr; | ||
52 | } | ||
53 | |||
54 | static inline unsigned int sh64_in32(const volatile void __iomem *addr) | ||
55 | { | ||
56 | return *(volatile unsigned int __force *)addr; | ||
57 | } | ||
58 | |||
59 | static inline unsigned long long sh64_in64(const volatile void __iomem *addr) | ||
60 | { | ||
61 | return *(volatile unsigned long long __force *)addr; | ||
62 | } | ||
63 | |||
64 | static inline void sh64_out8(unsigned char b, volatile void __iomem *addr) | ||
65 | { | ||
66 | *(volatile unsigned char __force *)addr = b; | ||
67 | wmb(); | ||
68 | } | ||
69 | |||
70 | static inline void sh64_out16(unsigned short b, volatile void __iomem *addr) | ||
71 | { | ||
72 | *(volatile unsigned short __force *)addr = b; | ||
73 | wmb(); | ||
74 | } | ||
75 | |||
76 | static inline void sh64_out32(unsigned int b, volatile void __iomem *addr) | ||
77 | { | ||
78 | *(volatile unsigned int __force *)addr = b; | ||
79 | wmb(); | ||
80 | } | ||
81 | |||
82 | static inline void sh64_out64(unsigned long long b, volatile void __iomem *addr) | ||
83 | { | ||
84 | *(volatile unsigned long long __force *)addr = b; | ||
85 | wmb(); | ||
86 | } | ||
87 | |||
88 | #define readb(addr) sh64_in8(addr) | ||
89 | #define readw(addr) sh64_in16(addr) | ||
90 | #define readl(addr) sh64_in32(addr) | ||
91 | #define readb_relaxed(addr) sh64_in8(addr) | ||
92 | #define readw_relaxed(addr) sh64_in16(addr) | ||
93 | #define readl_relaxed(addr) sh64_in32(addr) | ||
94 | |||
95 | #define writeb(b, addr) sh64_out8(b, addr) | ||
96 | #define writew(b, addr) sh64_out16(b, addr) | ||
97 | #define writel(b, addr) sh64_out32(b, addr) | ||
98 | |||
99 | #define ctrl_inb(addr) sh64_in8(ioport_map(addr, 1)) | ||
100 | #define ctrl_inw(addr) sh64_in16(ioport_map(addr, 2)) | ||
101 | #define ctrl_inl(addr) sh64_in32(ioport_map(addr, 4)) | ||
102 | |||
103 | #define ctrl_outb(b, addr) sh64_out8(b, ioport_map(addr, 1)) | ||
104 | #define ctrl_outw(b, addr) sh64_out16(b, ioport_map(addr, 2)) | ||
105 | #define ctrl_outl(b, addr) sh64_out32(b, ioport_map(addr, 4)) | ||
106 | |||
107 | #define ioread8(addr) sh64_in8(addr) | ||
108 | #define ioread16(addr) sh64_in16(addr) | ||
109 | #define ioread32(addr) sh64_in32(addr) | ||
110 | #define iowrite8(b, addr) sh64_out8(b, addr) | ||
111 | #define iowrite16(b, addr) sh64_out16(b, addr) | ||
112 | #define iowrite32(b, addr) sh64_out32(b, addr) | ||
113 | |||
114 | #define inb(addr) ctrl_inb(addr) | ||
115 | #define inw(addr) ctrl_inw(addr) | ||
116 | #define inl(addr) ctrl_inl(addr) | ||
117 | #define outb(b, addr) ctrl_outb(b, addr) | ||
118 | #define outw(b, addr) ctrl_outw(b, addr) | ||
119 | #define outl(b, addr) ctrl_outl(b, addr) | ||
120 | |||
121 | void outsw(unsigned long port, const void *addr, unsigned long count); | ||
122 | void insw(unsigned long port, void *addr, unsigned long count); | ||
123 | void outsl(unsigned long port, const void *addr, unsigned long count); | ||
124 | void insl(unsigned long port, void *addr, unsigned long count); | ||
125 | |||
126 | void memcpy_toio(void __iomem *to, const void *from, long count); | ||
127 | void memcpy_fromio(void *to, void __iomem *from, long count); | ||
128 | |||
129 | #define mmiowb() | ||
130 | |||
131 | #ifdef __KERNEL__ | ||
132 | |||
133 | #ifdef CONFIG_SH_CAYMAN | ||
134 | extern unsigned long smsc_superio_virt; | ||
135 | #endif | ||
136 | #ifdef CONFIG_PCI | ||
137 | extern unsigned long pciio_virt; | ||
138 | #endif | ||
139 | |||
140 | #define IO_SPACE_LIMIT 0xffffffff | ||
141 | |||
142 | /* | ||
143 | * Change virtual addresses to physical addresses and vv. | ||
144 | * These are trivial on the 1:1 Linux/SuperH mapping | ||
145 | */ | ||
146 | extern __inline__ unsigned long virt_to_phys(volatile void * address) | ||
147 | { | ||
148 | return __pa(address); | ||
149 | } | ||
150 | |||
151 | extern __inline__ void * phys_to_virt(unsigned long address) | ||
152 | { | ||
153 | return __va(address); | ||
154 | } | ||
155 | |||
156 | extern void * __ioremap(unsigned long phys_addr, unsigned long size, | ||
157 | unsigned long flags); | ||
158 | |||
159 | extern __inline__ void * ioremap(unsigned long phys_addr, unsigned long size) | ||
160 | { | ||
161 | return __ioremap(phys_addr, size, 1); | ||
162 | } | ||
163 | |||
164 | extern __inline__ void * ioremap_nocache (unsigned long phys_addr, unsigned long size) | ||
165 | { | ||
166 | return __ioremap(phys_addr, size, 0); | ||
167 | } | ||
168 | |||
169 | extern void iounmap(void *addr); | ||
170 | |||
171 | unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name); | ||
172 | extern void onchip_unmap(unsigned long vaddr); | ||
173 | |||
174 | static __inline__ int check_signature(volatile void __iomem *io_addr, | ||
175 | const unsigned char *signature, int length) | ||
176 | { | ||
177 | int retval = 0; | ||
178 | do { | ||
179 | if (readb(io_addr) != *signature) | ||
180 | goto out; | ||
181 | io_addr++; | ||
182 | signature++; | ||
183 | length--; | ||
184 | } while (length); | ||
185 | retval = 1; | ||
186 | out: | ||
187 | return retval; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * The caches on some architectures aren't dma-coherent and have need to | ||
192 | * handle this in software. There are three types of operations that | ||
193 | * can be applied to dma buffers. | ||
194 | * | ||
195 | * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by | ||
196 | * writing the content of the caches back to memory, if necessary. | ||
197 | * The function also invalidates the affected part of the caches as | ||
198 | * necessary before DMA transfers from outside to memory. | ||
199 | * - dma_cache_inv(start, size) invalidates the affected parts of the | ||
200 | * caches. Dirty lines of the caches may be written back or simply | ||
201 | * be discarded. This operation is necessary before dma operations | ||
202 | * to the memory. | ||
203 | * - dma_cache_wback(start, size) writes back any dirty lines but does | ||
204 | * not invalidate the cache. This can be used before DMA reads from | ||
205 | * memory, | ||
206 | */ | ||
207 | |||
208 | static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size) | ||
209 | { | ||
210 | unsigned long s = start & L1_CACHE_ALIGN_MASK; | ||
211 | unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK; | ||
212 | |||
213 | for (; s <= e; s += L1_CACHE_BYTES) | ||
214 | asm volatile ("ocbp %0, 0" : : "r" (s)); | ||
215 | } | ||
216 | |||
217 | static __inline__ void dma_cache_inv (unsigned long start, unsigned long size) | ||
218 | { | ||
219 | // Note that caller has to be careful with overzealous | ||
220 | // invalidation should there be partial cache lines at the extremities | ||
221 | // of the specified range | ||
222 | unsigned long s = start & L1_CACHE_ALIGN_MASK; | ||
223 | unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK; | ||
224 | |||
225 | for (; s <= e; s += L1_CACHE_BYTES) | ||
226 | asm volatile ("ocbi %0, 0" : : "r" (s)); | ||
227 | } | ||
228 | |||
229 | static __inline__ void dma_cache_wback (unsigned long start, unsigned long size) | ||
230 | { | ||
231 | unsigned long s = start & L1_CACHE_ALIGN_MASK; | ||
232 | unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK; | ||
233 | |||
234 | for (; s <= e; s += L1_CACHE_BYTES) | ||
235 | asm volatile ("ocbwb %0, 0" : : "r" (s)); | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
240 | * access | ||
241 | */ | ||
242 | #define xlate_dev_mem_ptr(p) __va(p) | ||
243 | |||
244 | /* | ||
245 | * Convert a virtual cached pointer to an uncached pointer | ||
246 | */ | ||
247 | #define xlate_dev_kmem_ptr(p) p | ||
248 | |||
249 | #endif /* __KERNEL__ */ | ||
250 | #endif /* __ASM_SH64_IO_H */ | ||