diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-09 20:25:00 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-09 20:25:00 -0500 |
| commit | a0e4467726cd26bacb16f13d207ffcfa82ffc07d (patch) | |
| tree | 98b5fcbda0cd787b07d09da90d25c87b3883c567 /include/asm-generic | |
| parent | ed8efd2de75479a175bd21df073d9e97df65a820 (diff) | |
| parent | cb61f6769b8836081940ba26249f1b756400c7df (diff) | |
Merge tag 'asm-generic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic
Pull asm-generic asm/io.h rewrite from Arnd Bergmann:
"While there normally is no reason to have a pull request for
asm-generic but have all changes get merged through whichever tree
needs them, I do have a series for 3.19.
There are two sets of patches that change significant portions of
asm/io.h, and this branch contains both in order to resolve the
conflicts:
- Will Deacon has done a set of patches to ensure that all
architectures define {read,write}{b,w,l,q}_relaxed() functions or
get them by including asm-generic/io.h.
These functions are commonly used on ARM specific drivers to avoid
expensive L2 cache synchronization implied by the normal
{read,write}{b,w,l,q}, but we need to define them on all
architectures in order to share the drivers across architectures
and to enable CONFIG_COMPILE_TEST configurations for them
- Thierry Reding has done an unrelated set of patches that extends
the asm-generic/io.h file to the degree necessary to make it useful
on ARM64 and potentially other architectures"
* tag 'asm-generic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: (29 commits)
ARM64: use GENERIC_PCI_IOMAP
sparc: io: remove duplicate relaxed accessors on sparc32
ARM: sa11x0: Use void __iomem * in MMIO accessors
arm64: Use include/asm-generic/io.h
ARM: Use include/asm-generic/io.h
asm-generic/io.h: Implement generic {read,write}s*()
asm-generic/io.h: Reconcile I/O accessor overrides
/dev/mem: Use more consistent data types
Change xlate_dev_{kmem,mem}_ptr() prototypes
ARM: ixp4xx: Properly override I/O accessors
ARM: ixp4xx: Fix build with IXP4XX_INDIRECT_PCI
ARM: ebsa110: Properly override I/O accessors
ARC: Remove redundant PCI_IOBASE declaration
documentation: memory-barriers: clarify relaxed io accessor semantics
x86: io: implement dummy relaxed accessor macros for writes
tile: io: implement dummy relaxed accessor macros for writes
sparc: io: implement dummy relaxed accessor macros for writes
powerpc: io: implement dummy relaxed accessor macros for writes
parisc: io: implement dummy relaxed accessor macros for writes
mn10300: io: implement dummy relaxed accessor macros for writes
...
Diffstat (limited to 'include/asm-generic')
| -rw-r--r-- | include/asm-generic/io.h | 751 |
1 files changed, 624 insertions, 127 deletions
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index b8fdc57a7335..9db042304df3 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #define __ASM_GENERIC_IO_H | 12 | #define __ASM_GENERIC_IO_H |
| 13 | 13 | ||
| 14 | #include <asm/page.h> /* I/O is all done through memory accesses */ | 14 | #include <asm/page.h> /* I/O is all done through memory accesses */ |
| 15 | #include <linux/string.h> /* for memset() and memcpy() */ | ||
| 15 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 16 | 17 | ||
| 17 | #ifdef CONFIG_GENERIC_IOMAP | 18 | #ifdef CONFIG_GENERIC_IOMAP |
| @@ -24,260 +25,691 @@ | |||
| 24 | #define mmiowb() do {} while (0) | 25 | #define mmiowb() do {} while (0) |
| 25 | #endif | 26 | #endif |
| 26 | 27 | ||
| 27 | /*****************************************************************************/ | ||
| 28 | /* | 28 | /* |
| 29 | * readX/writeX() are used to access memory mapped devices. On some | 29 | * __raw_{read,write}{b,w,l,q}() access memory in native endianness. |
| 30 | * architectures the memory mapped IO stuff needs to be accessed | 30 | * |
| 31 | * differently. On the simple architectures, we just read/write the | 31 | * On some architectures memory mapped IO needs to be accessed differently. |
| 32 | * memory location directly. | 32 | * On the simple architectures, we just read/write the memory location |
| 33 | * directly. | ||
| 33 | */ | 34 | */ |
| 35 | |||
| 34 | #ifndef __raw_readb | 36 | #ifndef __raw_readb |
| 37 | #define __raw_readb __raw_readb | ||
| 35 | static inline u8 __raw_readb(const volatile void __iomem *addr) | 38 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
| 36 | { | 39 | { |
| 37 | return *(const volatile u8 __force *) addr; | 40 | return *(const volatile u8 __force *)addr; |
| 38 | } | 41 | } |
| 39 | #endif | 42 | #endif |
| 40 | 43 | ||
| 41 | #ifndef __raw_readw | 44 | #ifndef __raw_readw |
| 45 | #define __raw_readw __raw_readw | ||
| 42 | static inline u16 __raw_readw(const volatile void __iomem *addr) | 46 | static inline u16 __raw_readw(const volatile void __iomem *addr) |
| 43 | { | 47 | { |
| 44 | return *(const volatile u16 __force *) addr; | 48 | return *(const volatile u16 __force *)addr; |
| 45 | } | 49 | } |
| 46 | #endif | 50 | #endif |
| 47 | 51 | ||
| 48 | #ifndef __raw_readl | 52 | #ifndef __raw_readl |
| 53 | #define __raw_readl __raw_readl | ||
| 49 | static inline u32 __raw_readl(const volatile void __iomem *addr) | 54 | static inline u32 __raw_readl(const volatile void __iomem *addr) |
| 50 | { | 55 | { |
| 51 | return *(const volatile u32 __force *) addr; | 56 | return *(const volatile u32 __force *)addr; |
| 52 | } | 57 | } |
| 53 | #endif | 58 | #endif |
| 54 | 59 | ||
| 55 | #define readb __raw_readb | 60 | #ifdef CONFIG_64BIT |
| 56 | 61 | #ifndef __raw_readq | |
| 57 | #define readw readw | 62 | #define __raw_readq __raw_readq |
| 58 | static inline u16 readw(const volatile void __iomem *addr) | 63 | static inline u64 __raw_readq(const volatile void __iomem *addr) |
| 59 | { | ||
| 60 | return __le16_to_cpu(__raw_readw(addr)); | ||
| 61 | } | ||
| 62 | |||
| 63 | #define readl readl | ||
| 64 | static inline u32 readl(const volatile void __iomem *addr) | ||
| 65 | { | 64 | { |
| 66 | return __le32_to_cpu(__raw_readl(addr)); | 65 | return *(const volatile u64 __force *)addr; |
| 67 | } | 66 | } |
| 67 | #endif | ||
| 68 | #endif /* CONFIG_64BIT */ | ||
| 68 | 69 | ||
| 69 | #ifndef __raw_writeb | 70 | #ifndef __raw_writeb |
| 70 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) | 71 | #define __raw_writeb __raw_writeb |
| 72 | static inline void __raw_writeb(u8 value, volatile void __iomem *addr) | ||
| 71 | { | 73 | { |
| 72 | *(volatile u8 __force *) addr = b; | 74 | *(volatile u8 __force *)addr = value; |
| 73 | } | 75 | } |
| 74 | #endif | 76 | #endif |
| 75 | 77 | ||
| 76 | #ifndef __raw_writew | 78 | #ifndef __raw_writew |
| 77 | static inline void __raw_writew(u16 b, volatile void __iomem *addr) | 79 | #define __raw_writew __raw_writew |
| 80 | static inline void __raw_writew(u16 value, volatile void __iomem *addr) | ||
| 78 | { | 81 | { |
| 79 | *(volatile u16 __force *) addr = b; | 82 | *(volatile u16 __force *)addr = value; |
| 80 | } | 83 | } |
| 81 | #endif | 84 | #endif |
| 82 | 85 | ||
| 83 | #ifndef __raw_writel | 86 | #ifndef __raw_writel |
| 84 | static inline void __raw_writel(u32 b, volatile void __iomem *addr) | 87 | #define __raw_writel __raw_writel |
| 88 | static inline void __raw_writel(u32 value, volatile void __iomem *addr) | ||
| 85 | { | 89 | { |
| 86 | *(volatile u32 __force *) addr = b; | 90 | *(volatile u32 __force *)addr = value; |
| 87 | } | 91 | } |
| 88 | #endif | 92 | #endif |
| 89 | 93 | ||
| 90 | #define writeb __raw_writeb | ||
| 91 | #define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) | ||
| 92 | #define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr) | ||
| 93 | |||
| 94 | #ifdef CONFIG_64BIT | 94 | #ifdef CONFIG_64BIT |
| 95 | #ifndef __raw_readq | 95 | #ifndef __raw_writeq |
| 96 | static inline u64 __raw_readq(const volatile void __iomem *addr) | 96 | #define __raw_writeq __raw_writeq |
| 97 | static inline void __raw_writeq(u64 value, volatile void __iomem *addr) | ||
| 97 | { | 98 | { |
| 98 | return *(const volatile u64 __force *) addr; | 99 | *(volatile u64 __force *)addr = value; |
| 99 | } | 100 | } |
| 100 | #endif | 101 | #endif |
| 102 | #endif /* CONFIG_64BIT */ | ||
| 101 | 103 | ||
| 102 | #define readq readq | 104 | /* |
| 103 | static inline u64 readq(const volatile void __iomem *addr) | 105 | * {read,write}{b,w,l,q}() access little endian memory and return result in |
| 104 | { | 106 | * native endianness. |
| 105 | return __le64_to_cpu(__raw_readq(addr)); | 107 | */ |
| 106 | } | ||
| 107 | 108 | ||
| 108 | #ifndef __raw_writeq | 109 | #ifndef readb |
| 109 | static inline void __raw_writeq(u64 b, volatile void __iomem *addr) | 110 | #define readb readb |
| 111 | static inline u8 readb(const volatile void __iomem *addr) | ||
| 110 | { | 112 | { |
| 111 | *(volatile u64 __force *) addr = b; | 113 | return __raw_readb(addr); |
| 112 | } | 114 | } |
| 113 | #endif | 115 | #endif |
| 114 | 116 | ||
| 115 | #define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr) | 117 | #ifndef readw |
| 116 | #endif /* CONFIG_64BIT */ | 118 | #define readw readw |
| 117 | 119 | static inline u16 readw(const volatile void __iomem *addr) | |
| 118 | #ifndef PCI_IOBASE | 120 | { |
| 119 | #define PCI_IOBASE ((void __iomem *) 0) | 121 | return __le16_to_cpu(__raw_readw(addr)); |
| 122 | } | ||
| 120 | #endif | 123 | #endif |
| 121 | 124 | ||
| 122 | /*****************************************************************************/ | 125 | #ifndef readl |
| 123 | /* | 126 | #define readl readl |
| 124 | * traditional input/output functions | 127 | static inline u32 readl(const volatile void __iomem *addr) |
| 125 | */ | ||
| 126 | |||
| 127 | static inline u8 inb(unsigned long addr) | ||
| 128 | { | 128 | { |
| 129 | return readb(addr + PCI_IOBASE); | 129 | return __le32_to_cpu(__raw_readl(addr)); |
| 130 | } | 130 | } |
| 131 | #endif | ||
| 131 | 132 | ||
| 132 | static inline u16 inw(unsigned long addr) | 133 | #ifdef CONFIG_64BIT |
| 134 | #ifndef readq | ||
| 135 | #define readq readq | ||
| 136 | static inline u64 readq(const volatile void __iomem *addr) | ||
| 133 | { | 137 | { |
| 134 | return readw(addr + PCI_IOBASE); | 138 | return __le64_to_cpu(__raw_readq(addr)); |
| 135 | } | 139 | } |
| 140 | #endif | ||
| 141 | #endif /* CONFIG_64BIT */ | ||
| 136 | 142 | ||
| 137 | static inline u32 inl(unsigned long addr) | 143 | #ifndef writeb |
| 144 | #define writeb writeb | ||
| 145 | static inline void writeb(u8 value, volatile void __iomem *addr) | ||
| 138 | { | 146 | { |
| 139 | return readl(addr + PCI_IOBASE); | 147 | __raw_writeb(value, addr); |
| 140 | } | 148 | } |
| 149 | #endif | ||
| 141 | 150 | ||
| 142 | static inline void outb(u8 b, unsigned long addr) | 151 | #ifndef writew |
| 152 | #define writew writew | ||
| 153 | static inline void writew(u16 value, volatile void __iomem *addr) | ||
| 143 | { | 154 | { |
| 144 | writeb(b, addr + PCI_IOBASE); | 155 | __raw_writew(cpu_to_le16(value), addr); |
| 145 | } | 156 | } |
| 157 | #endif | ||
| 146 | 158 | ||
| 147 | static inline void outw(u16 b, unsigned long addr) | 159 | #ifndef writel |
| 160 | #define writel writel | ||
| 161 | static inline void writel(u32 value, volatile void __iomem *addr) | ||
| 148 | { | 162 | { |
| 149 | writew(b, addr + PCI_IOBASE); | 163 | __raw_writel(__cpu_to_le32(value), addr); |
| 150 | } | 164 | } |
| 165 | #endif | ||
| 151 | 166 | ||
| 152 | static inline void outl(u32 b, unsigned long addr) | 167 | #ifdef CONFIG_64BIT |
| 168 | #ifndef writeq | ||
| 169 | #define writeq writeq | ||
| 170 | static inline void writeq(u64 value, volatile void __iomem *addr) | ||
| 153 | { | 171 | { |
| 154 | writel(b, addr + PCI_IOBASE); | 172 | __raw_writeq(__cpu_to_le64(value), addr); |
| 155 | } | 173 | } |
| 174 | #endif | ||
| 175 | #endif /* CONFIG_64BIT */ | ||
| 176 | |||
| 177 | /* | ||
| 178 | * {read,write}{b,w,l,q}_relaxed() are like the regular version, but | ||
| 179 | * are not guaranteed to provide ordering against spinlocks or memory | ||
| 180 | * accesses. | ||
| 181 | */ | ||
| 182 | #ifndef readb_relaxed | ||
| 183 | #define readb_relaxed readb | ||
| 184 | #endif | ||
| 156 | 185 | ||
| 157 | #define inb_p(addr) inb(addr) | 186 | #ifndef readw_relaxed |
| 158 | #define inw_p(addr) inw(addr) | 187 | #define readw_relaxed readw |
| 159 | #define inl_p(addr) inl(addr) | 188 | #endif |
| 160 | #define outb_p(x, addr) outb((x), (addr)) | ||
| 161 | #define outw_p(x, addr) outw((x), (addr)) | ||
| 162 | #define outl_p(x, addr) outl((x), (addr)) | ||
| 163 | 189 | ||
| 164 | #ifndef insb | 190 | #ifndef readl_relaxed |
| 165 | static inline void insb(unsigned long addr, void *buffer, int count) | 191 | #define readl_relaxed readl |
| 192 | #endif | ||
| 193 | |||
| 194 | #ifndef readq_relaxed | ||
| 195 | #define readq_relaxed readq | ||
| 196 | #endif | ||
| 197 | |||
| 198 | #ifndef writeb_relaxed | ||
| 199 | #define writeb_relaxed writeb | ||
| 200 | #endif | ||
| 201 | |||
| 202 | #ifndef writew_relaxed | ||
| 203 | #define writew_relaxed writew | ||
| 204 | #endif | ||
| 205 | |||
| 206 | #ifndef writel_relaxed | ||
| 207 | #define writel_relaxed writel | ||
| 208 | #endif | ||
| 209 | |||
| 210 | #ifndef writeq_relaxed | ||
| 211 | #define writeq_relaxed writeq | ||
| 212 | #endif | ||
| 213 | |||
| 214 | /* | ||
| 215 | * {read,write}s{b,w,l,q}() repeatedly access the same memory address in | ||
| 216 | * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). | ||
| 217 | */ | ||
| 218 | #ifndef readsb | ||
| 219 | #define readsb readsb | ||
| 220 | static inline void readsb(const volatile void __iomem *addr, void *buffer, | ||
| 221 | unsigned int count) | ||
| 166 | { | 222 | { |
| 167 | if (count) { | 223 | if (count) { |
| 168 | u8 *buf = buffer; | 224 | u8 *buf = buffer; |
| 225 | |||
| 169 | do { | 226 | do { |
| 170 | u8 x = __raw_readb(addr + PCI_IOBASE); | 227 | u8 x = __raw_readb(addr); |
| 171 | *buf++ = x; | 228 | *buf++ = x; |
| 172 | } while (--count); | 229 | } while (--count); |
| 173 | } | 230 | } |
| 174 | } | 231 | } |
| 175 | #endif | 232 | #endif |
| 176 | 233 | ||
| 177 | #ifndef insw | 234 | #ifndef readsw |
| 178 | static inline void insw(unsigned long addr, void *buffer, int count) | 235 | #define readsw readsw |
| 236 | static inline void readsw(const volatile void __iomem *addr, void *buffer, | ||
| 237 | unsigned int count) | ||
| 179 | { | 238 | { |
| 180 | if (count) { | 239 | if (count) { |
| 181 | u16 *buf = buffer; | 240 | u16 *buf = buffer; |
| 241 | |||
| 182 | do { | 242 | do { |
| 183 | u16 x = __raw_readw(addr + PCI_IOBASE); | 243 | u16 x = __raw_readw(addr); |
| 184 | *buf++ = x; | 244 | *buf++ = x; |
| 185 | } while (--count); | 245 | } while (--count); |
| 186 | } | 246 | } |
| 187 | } | 247 | } |
| 188 | #endif | 248 | #endif |
| 189 | 249 | ||
| 190 | #ifndef insl | 250 | #ifndef readsl |
| 191 | static inline void insl(unsigned long addr, void *buffer, int count) | 251 | #define readsl readsl |
| 252 | static inline void readsl(const volatile void __iomem *addr, void *buffer, | ||
| 253 | unsigned int count) | ||
| 192 | { | 254 | { |
| 193 | if (count) { | 255 | if (count) { |
| 194 | u32 *buf = buffer; | 256 | u32 *buf = buffer; |
| 257 | |||
| 195 | do { | 258 | do { |
| 196 | u32 x = __raw_readl(addr + PCI_IOBASE); | 259 | u32 x = __raw_readl(addr); |
| 197 | *buf++ = x; | 260 | *buf++ = x; |
| 198 | } while (--count); | 261 | } while (--count); |
| 199 | } | 262 | } |
| 200 | } | 263 | } |
| 201 | #endif | 264 | #endif |
| 202 | 265 | ||
| 203 | #ifndef outsb | 266 | #ifdef CONFIG_64BIT |
| 204 | static inline void outsb(unsigned long addr, const void *buffer, int count) | 267 | #ifndef readsq |
| 268 | #define readsq readsq | ||
| 269 | static inline void readsq(const volatile void __iomem *addr, void *buffer, | ||
| 270 | unsigned int count) | ||
| 271 | { | ||
| 272 | if (count) { | ||
| 273 | u64 *buf = buffer; | ||
| 274 | |||
| 275 | do { | ||
| 276 | u64 x = __raw_readq(addr); | ||
| 277 | *buf++ = x; | ||
| 278 | } while (--count); | ||
| 279 | } | ||
| 280 | } | ||
| 281 | #endif | ||
| 282 | #endif /* CONFIG_64BIT */ | ||
| 283 | |||
| 284 | #ifndef writesb | ||
| 285 | #define writesb writesb | ||
| 286 | static inline void writesb(volatile void __iomem *addr, const void *buffer, | ||
| 287 | unsigned int count) | ||
| 205 | { | 288 | { |
| 206 | if (count) { | 289 | if (count) { |
| 207 | const u8 *buf = buffer; | 290 | const u8 *buf = buffer; |
| 291 | |||
| 208 | do { | 292 | do { |
| 209 | __raw_writeb(*buf++, addr + PCI_IOBASE); | 293 | __raw_writeb(*buf++, addr); |
| 210 | } while (--count); | 294 | } while (--count); |
| 211 | } | 295 | } |
| 212 | } | 296 | } |
| 213 | #endif | 297 | #endif |
| 214 | 298 | ||
| 215 | #ifndef outsw | 299 | #ifndef writesw |
| 216 | static inline void outsw(unsigned long addr, const void *buffer, int count) | 300 | #define writesw writesw |
| 301 | static inline void writesw(volatile void __iomem *addr, const void *buffer, | ||
| 302 | unsigned int count) | ||
| 217 | { | 303 | { |
| 218 | if (count) { | 304 | if (count) { |
| 219 | const u16 *buf = buffer; | 305 | const u16 *buf = buffer; |
| 306 | |||
| 220 | do { | 307 | do { |
| 221 | __raw_writew(*buf++, addr + PCI_IOBASE); | 308 | __raw_writew(*buf++, addr); |
| 222 | } while (--count); | 309 | } while (--count); |
| 223 | } | 310 | } |
| 224 | } | 311 | } |
| 225 | #endif | 312 | #endif |
| 226 | 313 | ||
| 227 | #ifndef outsl | 314 | #ifndef writesl |
| 228 | static inline void outsl(unsigned long addr, const void *buffer, int count) | 315 | #define writesl writesl |
| 316 | static inline void writesl(volatile void __iomem *addr, const void *buffer, | ||
| 317 | unsigned int count) | ||
| 229 | { | 318 | { |
| 230 | if (count) { | 319 | if (count) { |
| 231 | const u32 *buf = buffer; | 320 | const u32 *buf = buffer; |
| 321 | |||
| 232 | do { | 322 | do { |
| 233 | __raw_writel(*buf++, addr + PCI_IOBASE); | 323 | __raw_writel(*buf++, addr); |
| 234 | } while (--count); | 324 | } while (--count); |
| 235 | } | 325 | } |
| 236 | } | 326 | } |
| 237 | #endif | 327 | #endif |
| 238 | 328 | ||
| 239 | #ifndef CONFIG_GENERIC_IOMAP | 329 | #ifdef CONFIG_64BIT |
| 240 | #define ioread8(addr) readb(addr) | 330 | #ifndef writesq |
| 241 | #define ioread16(addr) readw(addr) | 331 | #define writesq writesq |
| 242 | #define ioread16be(addr) __be16_to_cpu(__raw_readw(addr)) | 332 | static inline void writesq(volatile void __iomem *addr, const void *buffer, |
| 243 | #define ioread32(addr) readl(addr) | 333 | unsigned int count) |
| 244 | #define ioread32be(addr) __be32_to_cpu(__raw_readl(addr)) | 334 | { |
| 245 | 335 | if (count) { | |
| 246 | #define iowrite8(v, addr) writeb((v), (addr)) | 336 | const u64 *buf = buffer; |
| 247 | #define iowrite16(v, addr) writew((v), (addr)) | 337 | |
| 248 | #define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr) | 338 | do { |
| 249 | #define iowrite32(v, addr) writel((v), (addr)) | 339 | __raw_writeq(*buf++, addr); |
| 250 | #define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr) | 340 | } while (--count); |
| 251 | 341 | } | |
| 252 | #define ioread8_rep(p, dst, count) \ | 342 | } |
| 253 | insb((unsigned long) (p), (dst), (count)) | 343 | #endif |
| 254 | #define ioread16_rep(p, dst, count) \ | 344 | #endif /* CONFIG_64BIT */ |
| 255 | insw((unsigned long) (p), (dst), (count)) | 345 | |
| 256 | #define ioread32_rep(p, dst, count) \ | 346 | #ifndef PCI_IOBASE |
| 257 | insl((unsigned long) (p), (dst), (count)) | 347 | #define PCI_IOBASE ((void __iomem *)0) |
| 258 | 348 | #endif | |
| 259 | #define iowrite8_rep(p, src, count) \ | ||
| 260 | outsb((unsigned long) (p), (src), (count)) | ||
| 261 | #define iowrite16_rep(p, src, count) \ | ||
| 262 | outsw((unsigned long) (p), (src), (count)) | ||
| 263 | #define iowrite32_rep(p, src, count) \ | ||
| 264 | outsl((unsigned long) (p), (src), (count)) | ||
| 265 | #endif /* CONFIG_GENERIC_IOMAP */ | ||
| 266 | 349 | ||
| 267 | #ifndef IO_SPACE_LIMIT | 350 | #ifndef IO_SPACE_LIMIT |
| 268 | #define IO_SPACE_LIMIT 0xffff | 351 | #define IO_SPACE_LIMIT 0xffff |
| 269 | #endif | 352 | #endif |
| 270 | 353 | ||
| 354 | /* | ||
| 355 | * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be | ||
| 356 | * implemented on hardware that needs an additional delay for I/O accesses to | ||
| 357 | * take effect. | ||
| 358 | */ | ||
| 359 | |||
| 360 | #ifndef inb | ||
| 361 | #define inb inb | ||
| 362 | static inline u8 inb(unsigned long addr) | ||
| 363 | { | ||
| 364 | return readb(PCI_IOBASE + addr); | ||
| 365 | } | ||
| 366 | #endif | ||
| 367 | |||
| 368 | #ifndef inw | ||
| 369 | #define inw inw | ||
| 370 | static inline u16 inw(unsigned long addr) | ||
| 371 | { | ||
| 372 | return readw(PCI_IOBASE + addr); | ||
| 373 | } | ||
| 374 | #endif | ||
| 375 | |||
| 376 | #ifndef inl | ||
| 377 | #define inl inl | ||
| 378 | static inline u32 inl(unsigned long addr) | ||
| 379 | { | ||
| 380 | return readl(PCI_IOBASE + addr); | ||
| 381 | } | ||
| 382 | #endif | ||
| 383 | |||
| 384 | #ifndef outb | ||
| 385 | #define outb outb | ||
| 386 | static inline void outb(u8 value, unsigned long addr) | ||
| 387 | { | ||
| 388 | writeb(value, PCI_IOBASE + addr); | ||
| 389 | } | ||
| 390 | #endif | ||
| 391 | |||
| 392 | #ifndef outw | ||
| 393 | #define outw outw | ||
| 394 | static inline void outw(u16 value, unsigned long addr) | ||
| 395 | { | ||
| 396 | writew(value, PCI_IOBASE + addr); | ||
| 397 | } | ||
| 398 | #endif | ||
| 399 | |||
| 400 | #ifndef outl | ||
| 401 | #define outl outl | ||
| 402 | static inline void outl(u32 value, unsigned long addr) | ||
| 403 | { | ||
| 404 | writel(value, PCI_IOBASE + addr); | ||
| 405 | } | ||
| 406 | #endif | ||
| 407 | |||
| 408 | #ifndef inb_p | ||
| 409 | #define inb_p inb_p | ||
| 410 | static inline u8 inb_p(unsigned long addr) | ||
| 411 | { | ||
| 412 | return inb(addr); | ||
| 413 | } | ||
| 414 | #endif | ||
| 415 | |||
| 416 | #ifndef inw_p | ||
| 417 | #define inw_p inw_p | ||
| 418 | static inline u16 inw_p(unsigned long addr) | ||
| 419 | { | ||
| 420 | return inw(addr); | ||
| 421 | } | ||
| 422 | #endif | ||
| 423 | |||
| 424 | #ifndef inl_p | ||
| 425 | #define inl_p inl_p | ||
| 426 | static inline u32 inl_p(unsigned long addr) | ||
| 427 | { | ||
| 428 | return inl(addr); | ||
| 429 | } | ||
| 430 | #endif | ||
| 431 | |||
| 432 | #ifndef outb_p | ||
| 433 | #define outb_p outb_p | ||
| 434 | static inline void outb_p(u8 value, unsigned long addr) | ||
| 435 | { | ||
| 436 | outb(value, addr); | ||
| 437 | } | ||
| 438 | #endif | ||
| 439 | |||
| 440 | #ifndef outw_p | ||
| 441 | #define outw_p outw_p | ||
| 442 | static inline void outw_p(u16 value, unsigned long addr) | ||
| 443 | { | ||
| 444 | outw(value, addr); | ||
| 445 | } | ||
| 446 | #endif | ||
| 447 | |||
| 448 | #ifndef outl_p | ||
| 449 | #define outl_p outl_p | ||
| 450 | static inline void outl_p(u32 value, unsigned long addr) | ||
| 451 | { | ||
| 452 | outl(value, addr); | ||
| 453 | } | ||
| 454 | #endif | ||
| 455 | |||
| 456 | /* | ||
| 457 | * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a | ||
| 458 | * single I/O port multiple times. | ||
| 459 | */ | ||
| 460 | |||
| 461 | #ifndef insb | ||
| 462 | #define insb insb | ||
| 463 | static inline void insb(unsigned long addr, void *buffer, unsigned int count) | ||
| 464 | { | ||
| 465 | readsb(PCI_IOBASE + addr, buffer, count); | ||
| 466 | } | ||
| 467 | #endif | ||
| 468 | |||
| 469 | #ifndef insw | ||
| 470 | #define insw insw | ||
| 471 | static inline void insw(unsigned long addr, void *buffer, unsigned int count) | ||
| 472 | { | ||
| 473 | readsw(PCI_IOBASE + addr, buffer, count); | ||
| 474 | } | ||
| 475 | #endif | ||
| 476 | |||
| 477 | #ifndef insl | ||
| 478 | #define insl insl | ||
| 479 | static inline void insl(unsigned long addr, void *buffer, unsigned int count) | ||
| 480 | { | ||
| 481 | readsl(PCI_IOBASE + addr, buffer, count); | ||
| 482 | } | ||
| 483 | #endif | ||
| 484 | |||
| 485 | #ifndef outsb | ||
| 486 | #define outsb outsb | ||
| 487 | static inline void outsb(unsigned long addr, const void *buffer, | ||
| 488 | unsigned int count) | ||
| 489 | { | ||
| 490 | writesb(PCI_IOBASE + addr, buffer, count); | ||
| 491 | } | ||
| 492 | #endif | ||
| 493 | |||
| 494 | #ifndef outsw | ||
| 495 | #define outsw outsw | ||
| 496 | static inline void outsw(unsigned long addr, const void *buffer, | ||
| 497 | unsigned int count) | ||
| 498 | { | ||
| 499 | writesw(PCI_IOBASE + addr, buffer, count); | ||
| 500 | } | ||
| 501 | #endif | ||
| 502 | |||
| 503 | #ifndef outsl | ||
| 504 | #define outsl outsl | ||
| 505 | static inline void outsl(unsigned long addr, const void *buffer, | ||
| 506 | unsigned int count) | ||
| 507 | { | ||
| 508 | writesl(PCI_IOBASE + addr, buffer, count); | ||
| 509 | } | ||
| 510 | #endif | ||
| 511 | |||
| 512 | #ifndef insb_p | ||
| 513 | #define insb_p insb_p | ||
| 514 | static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) | ||
| 515 | { | ||
| 516 | insb(addr, buffer, count); | ||
| 517 | } | ||
| 518 | #endif | ||
| 519 | |||
| 520 | #ifndef insw_p | ||
| 521 | #define insw_p insw_p | ||
| 522 | static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) | ||
| 523 | { | ||
| 524 | insw(addr, buffer, count); | ||
| 525 | } | ||
| 526 | #endif | ||
| 527 | |||
| 528 | #ifndef insl_p | ||
| 529 | #define insl_p insl_p | ||
| 530 | static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) | ||
| 531 | { | ||
| 532 | insl(addr, buffer, count); | ||
| 533 | } | ||
| 534 | #endif | ||
| 535 | |||
| 536 | #ifndef outsb_p | ||
| 537 | #define outsb_p outsb_p | ||
| 538 | static inline void outsb_p(unsigned long addr, const void *buffer, | ||
| 539 | unsigned int count) | ||
| 540 | { | ||
| 541 | outsb(addr, buffer, count); | ||
| 542 | } | ||
| 543 | #endif | ||
| 544 | |||
| 545 | #ifndef outsw_p | ||
| 546 | #define outsw_p outsw_p | ||
| 547 | static inline void outsw_p(unsigned long addr, const void *buffer, | ||
| 548 | unsigned int count) | ||
| 549 | { | ||
| 550 | outsw(addr, buffer, count); | ||
| 551 | } | ||
| 552 | #endif | ||
| 553 | |||
| 554 | #ifndef outsl_p | ||
| 555 | #define outsl_p outsl_p | ||
| 556 | static inline void outsl_p(unsigned long addr, const void *buffer, | ||
| 557 | unsigned int count) | ||
| 558 | { | ||
| 559 | outsl(addr, buffer, count); | ||
| 560 | } | ||
| 561 | #endif | ||
| 562 | |||
| 563 | #ifndef CONFIG_GENERIC_IOMAP | ||
| 564 | #ifndef ioread8 | ||
| 565 | #define ioread8 ioread8 | ||
| 566 | static inline u8 ioread8(const volatile void __iomem *addr) | ||
| 567 | { | ||
| 568 | return readb(addr); | ||
| 569 | } | ||
| 570 | #endif | ||
| 571 | |||
| 572 | #ifndef ioread16 | ||
| 573 | #define ioread16 ioread16 | ||
| 574 | static inline u16 ioread16(const volatile void __iomem *addr) | ||
| 575 | { | ||
| 576 | return readw(addr); | ||
| 577 | } | ||
| 578 | #endif | ||
| 579 | |||
| 580 | #ifndef ioread32 | ||
| 581 | #define ioread32 ioread32 | ||
| 582 | static inline u32 ioread32(const volatile void __iomem *addr) | ||
| 583 | { | ||
| 584 | return readl(addr); | ||
| 585 | } | ||
| 586 | #endif | ||
| 587 | |||
| 588 | #ifndef iowrite8 | ||
| 589 | #define iowrite8 iowrite8 | ||
| 590 | static inline void iowrite8(u8 value, volatile void __iomem *addr) | ||
| 591 | { | ||
| 592 | writeb(value, addr); | ||
| 593 | } | ||
| 594 | #endif | ||
| 595 | |||
| 596 | #ifndef iowrite16 | ||
| 597 | #define iowrite16 iowrite16 | ||
| 598 | static inline void iowrite16(u16 value, volatile void __iomem *addr) | ||
| 599 | { | ||
| 600 | writew(value, addr); | ||
| 601 | } | ||
| 602 | #endif | ||
| 603 | |||
| 604 | #ifndef iowrite32 | ||
| 605 | #define iowrite32 iowrite32 | ||
| 606 | static inline void iowrite32(u32 value, volatile void __iomem *addr) | ||
| 607 | { | ||
| 608 | writel(value, addr); | ||
| 609 | } | ||
| 610 | #endif | ||
| 611 | |||
| 612 | #ifndef ioread16be | ||
| 613 | #define ioread16be ioread16be | ||
| 614 | static inline u16 ioread16be(const volatile void __iomem *addr) | ||
| 615 | { | ||
| 616 | return __be16_to_cpu(__raw_readw(addr)); | ||
| 617 | } | ||
| 618 | #endif | ||
| 619 | |||
| 620 | #ifndef ioread32be | ||
| 621 | #define ioread32be ioread32be | ||
| 622 | static inline u32 ioread32be(const volatile void __iomem *addr) | ||
| 623 | { | ||
| 624 | return __be32_to_cpu(__raw_readl(addr)); | ||
| 625 | } | ||
| 626 | #endif | ||
| 627 | |||
| 628 | #ifndef iowrite16be | ||
| 629 | #define iowrite16be iowrite16be | ||
| 630 | static inline void iowrite16be(u16 value, void volatile __iomem *addr) | ||
| 631 | { | ||
| 632 | __raw_writew(__cpu_to_be16(value), addr); | ||
| 633 | } | ||
| 634 | #endif | ||
| 635 | |||
| 636 | #ifndef iowrite32be | ||
| 637 | #define iowrite32be iowrite32be | ||
| 638 | static inline void iowrite32be(u32 value, volatile void __iomem *addr) | ||
| 639 | { | ||
| 640 | __raw_writel(__cpu_to_be32(value), addr); | ||
| 641 | } | ||
| 642 | #endif | ||
| 643 | |||
| 644 | #ifndef ioread8_rep | ||
| 645 | #define ioread8_rep ioread8_rep | ||
| 646 | static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, | ||
| 647 | unsigned int count) | ||
| 648 | { | ||
| 649 | readsb(addr, buffer, count); | ||
| 650 | } | ||
| 651 | #endif | ||
| 652 | |||
| 653 | #ifndef ioread16_rep | ||
| 654 | #define ioread16_rep ioread16_rep | ||
| 655 | static inline void ioread16_rep(const volatile void __iomem *addr, | ||
| 656 | void *buffer, unsigned int count) | ||
| 657 | { | ||
| 658 | readsw(addr, buffer, count); | ||
| 659 | } | ||
| 660 | #endif | ||
| 661 | |||
| 662 | #ifndef ioread32_rep | ||
| 663 | #define ioread32_rep ioread32_rep | ||
| 664 | static inline void ioread32_rep(const volatile void __iomem *addr, | ||
| 665 | void *buffer, unsigned int count) | ||
| 666 | { | ||
| 667 | readsl(addr, buffer, count); | ||
| 668 | } | ||
| 669 | #endif | ||
| 670 | |||
| 671 | #ifndef iowrite8_rep | ||
| 672 | #define iowrite8_rep iowrite8_rep | ||
| 673 | static inline void iowrite8_rep(volatile void __iomem *addr, | ||
| 674 | const void *buffer, | ||
| 675 | unsigned int count) | ||
| 676 | { | ||
| 677 | writesb(addr, buffer, count); | ||
| 678 | } | ||
| 679 | #endif | ||
| 680 | |||
| 681 | #ifndef iowrite16_rep | ||
| 682 | #define iowrite16_rep iowrite16_rep | ||
| 683 | static inline void iowrite16_rep(volatile void __iomem *addr, | ||
| 684 | const void *buffer, | ||
| 685 | unsigned int count) | ||
| 686 | { | ||
| 687 | writesw(addr, buffer, count); | ||
| 688 | } | ||
| 689 | #endif | ||
| 690 | |||
| 691 | #ifndef iowrite32_rep | ||
| 692 | #define iowrite32_rep iowrite32_rep | ||
| 693 | static inline void iowrite32_rep(volatile void __iomem *addr, | ||
| 694 | const void *buffer, | ||
| 695 | unsigned int count) | ||
| 696 | { | ||
| 697 | writesl(addr, buffer, count); | ||
| 698 | } | ||
| 699 | #endif | ||
| 700 | #endif /* CONFIG_GENERIC_IOMAP */ | ||
| 701 | |||
| 271 | #ifdef __KERNEL__ | 702 | #ifdef __KERNEL__ |
| 272 | 703 | ||
| 273 | #include <linux/vmalloc.h> | 704 | #include <linux/vmalloc.h> |
| 274 | #define __io_virt(x) ((void __force *) (x)) | 705 | #define __io_virt(x) ((void __force *)(x)) |
| 275 | 706 | ||
| 276 | #ifndef CONFIG_GENERIC_IOMAP | 707 | #ifndef CONFIG_GENERIC_IOMAP |
| 277 | struct pci_dev; | 708 | struct pci_dev; |
| 278 | extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | 709 | extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); |
| 279 | 710 | ||
| 280 | #ifndef pci_iounmap | 711 | #ifndef pci_iounmap |
| 712 | #define pci_iounmap pci_iounmap | ||
| 281 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) | 713 | static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) |
| 282 | { | 714 | { |
| 283 | } | 715 | } |
| @@ -289,11 +721,15 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) | |||
| 289 | * These are pretty trivial | 721 | * These are pretty trivial |
| 290 | */ | 722 | */ |
| 291 | #ifndef virt_to_phys | 723 | #ifndef virt_to_phys |
| 724 | #define virt_to_phys virt_to_phys | ||
| 292 | static inline unsigned long virt_to_phys(volatile void *address) | 725 | static inline unsigned long virt_to_phys(volatile void *address) |
| 293 | { | 726 | { |
| 294 | return __pa((unsigned long)address); | 727 | return __pa((unsigned long)address); |
| 295 | } | 728 | } |
| 729 | #endif | ||
| 296 | 730 | ||
| 731 | #ifndef phys_to_virt | ||
| 732 | #define phys_to_virt phys_to_virt | ||
| 297 | static inline void *phys_to_virt(unsigned long address) | 733 | static inline void *phys_to_virt(unsigned long address) |
| 298 | { | 734 | { |
| 299 | return __va(address); | 735 | return __va(address); |
| @@ -306,37 +742,65 @@ static inline void *phys_to_virt(unsigned long address) | |||
| 306 | * This implementation is for the no-MMU case only... if you have an MMU | 742 | * This implementation is for the no-MMU case only... if you have an MMU |
| 307 | * you'll need to provide your own definitions. | 743 | * you'll need to provide your own definitions. |
| 308 | */ | 744 | */ |
| 745 | |||
| 309 | #ifndef CONFIG_MMU | 746 | #ifndef CONFIG_MMU |
| 310 | static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) | 747 | #ifndef ioremap |
| 748 | #define ioremap ioremap | ||
| 749 | static inline void __iomem *ioremap(phys_addr_t offset, size_t size) | ||
| 311 | { | 750 | { |
| 312 | return (void __iomem*) (unsigned long)offset; | 751 | return (void __iomem *)(unsigned long)offset; |
| 313 | } | 752 | } |
| 753 | #endif | ||
| 314 | 754 | ||
| 315 | #define __ioremap(offset, size, flags) ioremap(offset, size) | 755 | #ifndef __ioremap |
| 756 | #define __ioremap __ioremap | ||
| 757 | static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, | ||
| 758 | unsigned long flags) | ||
| 759 | { | ||
| 760 | return ioremap(offset, size); | ||
| 761 | } | ||
| 762 | #endif | ||
| 316 | 763 | ||
| 317 | #ifndef ioremap_nocache | 764 | #ifndef ioremap_nocache |
| 318 | #define ioremap_nocache ioremap | 765 | #define ioremap_nocache ioremap_nocache |
| 766 | static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) | ||
| 767 | { | ||
| 768 | return ioremap(offset, size); | ||
| 769 | } | ||
| 319 | #endif | 770 | #endif |
| 320 | 771 | ||
| 321 | #ifndef ioremap_wc | 772 | #ifndef ioremap_wc |
| 322 | #define ioremap_wc ioremap_nocache | 773 | #define ioremap_wc ioremap_wc |
| 774 | static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) | ||
| 775 | { | ||
| 776 | return ioremap_nocache(offset, size); | ||
| 777 | } | ||
| 323 | #endif | 778 | #endif |
| 324 | 779 | ||
| 780 | #ifndef iounmap | ||
| 781 | #define iounmap iounmap | ||
| 325 | static inline void iounmap(void __iomem *addr) | 782 | static inline void iounmap(void __iomem *addr) |
| 326 | { | 783 | { |
| 327 | } | 784 | } |
| 785 | #endif | ||
| 328 | #endif /* CONFIG_MMU */ | 786 | #endif /* CONFIG_MMU */ |
| 329 | 787 | ||
| 330 | #ifdef CONFIG_HAS_IOPORT_MAP | 788 | #ifdef CONFIG_HAS_IOPORT_MAP |
| 331 | #ifndef CONFIG_GENERIC_IOMAP | 789 | #ifndef CONFIG_GENERIC_IOMAP |
| 790 | #ifndef ioport_map | ||
| 791 | #define ioport_map ioport_map | ||
| 332 | static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) | 792 | static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) |
| 333 | { | 793 | { |
| 334 | return PCI_IOBASE + (port & IO_SPACE_LIMIT); | 794 | return PCI_IOBASE + (port & IO_SPACE_LIMIT); |
| 335 | } | 795 | } |
| 796 | #endif | ||
| 336 | 797 | ||
| 798 | #ifndef ioport_unmap | ||
| 799 | #define ioport_unmap ioport_unmap | ||
| 337 | static inline void ioport_unmap(void __iomem *p) | 800 | static inline void ioport_unmap(void __iomem *p) |
| 338 | { | 801 | { |
| 339 | } | 802 | } |
| 803 | #endif | ||
| 340 | #else /* CONFIG_GENERIC_IOMAP */ | 804 | #else /* CONFIG_GENERIC_IOMAP */ |
| 341 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); | 805 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); |
| 342 | extern void ioport_unmap(void __iomem *p); | 806 | extern void ioport_unmap(void __iomem *p); |
| @@ -344,35 +808,68 @@ extern void ioport_unmap(void __iomem *p); | |||
| 344 | #endif /* CONFIG_HAS_IOPORT_MAP */ | 808 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
| 345 | 809 | ||
| 346 | #ifndef xlate_dev_kmem_ptr | 810 | #ifndef xlate_dev_kmem_ptr |
| 347 | #define xlate_dev_kmem_ptr(p) p | 811 | #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr |
| 812 | static inline void *xlate_dev_kmem_ptr(void *addr) | ||
| 813 | { | ||
| 814 | return addr; | ||
| 815 | } | ||
| 348 | #endif | 816 | #endif |
| 817 | |||
| 349 | #ifndef xlate_dev_mem_ptr | 818 | #ifndef xlate_dev_mem_ptr |
| 350 | #define xlate_dev_mem_ptr(p) __va(p) | 819 | #define xlate_dev_mem_ptr xlate_dev_mem_ptr |
| 820 | static inline void *xlate_dev_mem_ptr(phys_addr_t addr) | ||
| 821 | { | ||
| 822 | return __va(addr); | ||
| 823 | } | ||
| 824 | #endif | ||
| 825 | |||
| 826 | #ifndef unxlate_dev_mem_ptr | ||
| 827 | #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr | ||
| 828 | static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) | ||
| 829 | { | ||
| 830 | } | ||
| 351 | #endif | 831 | #endif |
| 352 | 832 | ||
| 353 | #ifdef CONFIG_VIRT_TO_BUS | 833 | #ifdef CONFIG_VIRT_TO_BUS |
| 354 | #ifndef virt_to_bus | 834 | #ifndef virt_to_bus |
| 355 | static inline unsigned long virt_to_bus(volatile void *address) | 835 | static inline unsigned long virt_to_bus(void *address) |
| 356 | { | 836 | { |
| 357 | return ((unsigned long) address); | 837 | return (unsigned long)address; |
| 358 | } | 838 | } |
| 359 | 839 | ||
| 360 | static inline void *bus_to_virt(unsigned long address) | 840 | static inline void *bus_to_virt(unsigned long address) |
| 361 | { | 841 | { |
| 362 | return (void *) address; | 842 | return (void *)address; |
| 363 | } | 843 | } |
| 364 | #endif | 844 | #endif |
| 365 | #endif | 845 | #endif |
| 366 | 846 | ||
| 367 | #ifndef memset_io | 847 | #ifndef memset_io |
| 368 | #define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) | 848 | #define memset_io memset_io |
| 849 | static inline void memset_io(volatile void __iomem *addr, int value, | ||
| 850 | size_t size) | ||
| 851 | { | ||
| 852 | memset(__io_virt(addr), value, size); | ||
| 853 | } | ||
| 369 | #endif | 854 | #endif |
| 370 | 855 | ||
| 371 | #ifndef memcpy_fromio | 856 | #ifndef memcpy_fromio |
| 372 | #define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c)) | 857 | #define memcpy_fromio memcpy_fromio |
| 858 | static inline void memcpy_fromio(void *buffer, | ||
| 859 | const volatile void __iomem *addr, | ||
| 860 | size_t size) | ||
| 861 | { | ||
| 862 | memcpy(buffer, __io_virt(addr), size); | ||
| 863 | } | ||
| 373 | #endif | 864 | #endif |
| 865 | |||
| 374 | #ifndef memcpy_toio | 866 | #ifndef memcpy_toio |
| 375 | #define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c)) | 867 | #define memcpy_toio memcpy_toio |
| 868 | static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, | ||
| 869 | size_t size) | ||
| 870 | { | ||
| 871 | memcpy(__io_virt(addr), buffer, size); | ||
| 872 | } | ||
| 376 | #endif | 873 | #endif |
| 377 | 874 | ||
| 378 | #endif /* __KERNEL__ */ | 875 | #endif /* __KERNEL__ */ |
