diff options
Diffstat (limited to 'arch/alpha/include/asm/io.h')
-rw-r--r-- | arch/alpha/include/asm/io.h | 577 |
1 files changed, 577 insertions, 0 deletions
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h new file mode 100644 index 000000000000..e971ab000f95 --- /dev/null +++ b/arch/alpha/include/asm/io.h | |||
@@ -0,0 +1,577 @@ | |||
1 | #ifndef __ALPHA_IO_H | ||
2 | #define __ALPHA_IO_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <asm/compiler.h> | ||
9 | #include <asm/system.h> | ||
10 | #include <asm/pgtable.h> | ||
11 | #include <asm/machvec.h> | ||
12 | #include <asm/hwrpb.h> | ||
13 | |||
14 | /* The generic header contains only prototypes. Including it ensures that | ||
15 | the implementation we have here matches that interface. */ | ||
16 | #include <asm-generic/iomap.h> | ||
17 | |||
18 | /* We don't use IO slowdowns on the Alpha, but.. */ | ||
19 | #define __SLOW_DOWN_IO do { } while (0) | ||
20 | #define SLOW_DOWN_IO do { } while (0) | ||
21 | |||
22 | /* | ||
23 | * Virtual -> physical identity mapping starts at this offset | ||
24 | */ | ||
25 | #ifdef USE_48_BIT_KSEG | ||
26 | #define IDENT_ADDR 0xffff800000000000UL | ||
27 | #else | ||
28 | #define IDENT_ADDR 0xfffffc0000000000UL | ||
29 | #endif | ||
30 | |||
31 | /* | ||
32 | * We try to avoid hae updates (thus the cache), but when we | ||
33 | * do need to update the hae, we need to do it atomically, so | ||
34 | * that any interrupts wouldn't get confused with the hae | ||
35 | * register not being up-to-date with respect to the hardware | ||
36 | * value. | ||
37 | */ | ||
38 | extern inline void __set_hae(unsigned long new_hae) | ||
39 | { | ||
40 | unsigned long flags; | ||
41 | local_irq_save(flags); | ||
42 | |||
43 | alpha_mv.hae_cache = new_hae; | ||
44 | *alpha_mv.hae_register = new_hae; | ||
45 | mb(); | ||
46 | /* Re-read to make sure it was written. */ | ||
47 | new_hae = *alpha_mv.hae_register; | ||
48 | |||
49 | local_irq_restore(flags); | ||
50 | } | ||
51 | |||
52 | extern inline void set_hae(unsigned long new_hae) | ||
53 | { | ||
54 | if (new_hae != alpha_mv.hae_cache) | ||
55 | __set_hae(new_hae); | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Change virtual addresses to physical addresses and vv. | ||
60 | */ | ||
61 | #ifdef USE_48_BIT_KSEG | ||
62 | static inline unsigned long virt_to_phys(void *address) | ||
63 | { | ||
64 | return (unsigned long)address - IDENT_ADDR; | ||
65 | } | ||
66 | |||
67 | static inline void * phys_to_virt(unsigned long address) | ||
68 | { | ||
69 | return (void *) (address + IDENT_ADDR); | ||
70 | } | ||
71 | #else | ||
72 | static inline unsigned long virt_to_phys(void *address) | ||
73 | { | ||
74 | unsigned long phys = (unsigned long)address; | ||
75 | |||
76 | /* Sign-extend from bit 41. */ | ||
77 | phys <<= (64 - 41); | ||
78 | phys = (long)phys >> (64 - 41); | ||
79 | |||
80 | /* Crop to the physical address width of the processor. */ | ||
81 | phys &= (1ul << hwrpb->pa_bits) - 1; | ||
82 | |||
83 | return phys; | ||
84 | } | ||
85 | |||
86 | static inline void * phys_to_virt(unsigned long address) | ||
87 | { | ||
88 | return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1))); | ||
89 | } | ||
90 | #endif | ||
91 | |||
92 | #define page_to_phys(page) page_to_pa(page) | ||
93 | |||
94 | static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page) | ||
95 | { | ||
96 | return page_to_phys(page); | ||
97 | } | ||
98 | |||
99 | /* This depends on working iommu. */ | ||
100 | #define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0) | ||
101 | |||
102 | /* Maximum PIO space address supported? */ | ||
103 | #define IO_SPACE_LIMIT 0xffff | ||
104 | |||
105 | /* | ||
106 | * Change addresses as seen by the kernel (virtual) to addresses as | ||
107 | * seen by a device (bus), and vice versa. | ||
108 | * | ||
109 | * Note that this only works for a limited range of kernel addresses, | ||
110 | * and very well may not span all memory. Consider this interface | ||
111 | * deprecated in favour of the DMA-mapping API. | ||
112 | */ | ||
113 | extern unsigned long __direct_map_base; | ||
114 | extern unsigned long __direct_map_size; | ||
115 | |||
116 | static inline unsigned long __deprecated virt_to_bus(void *address) | ||
117 | { | ||
118 | unsigned long phys = virt_to_phys(address); | ||
119 | unsigned long bus = phys + __direct_map_base; | ||
120 | return phys <= __direct_map_size ? bus : 0; | ||
121 | } | ||
122 | #define isa_virt_to_bus virt_to_bus | ||
123 | |||
124 | static inline void * __deprecated bus_to_virt(unsigned long address) | ||
125 | { | ||
126 | void *virt; | ||
127 | |||
128 | /* This check is a sanity check but also ensures that bus address 0 | ||
129 | maps to virtual address 0 which is useful to detect null pointers | ||
130 | (the NCR driver is much simpler if NULL pointers are preserved). */ | ||
131 | address -= __direct_map_base; | ||
132 | virt = phys_to_virt(address); | ||
133 | return (long)address <= 0 ? NULL : virt; | ||
134 | } | ||
135 | #define isa_bus_to_virt bus_to_virt | ||
136 | |||
137 | /* | ||
138 | * There are different chipsets to interface the Alpha CPUs to the world. | ||
139 | */ | ||
140 | |||
141 | #define IO_CONCAT(a,b) _IO_CONCAT(a,b) | ||
142 | #define _IO_CONCAT(a,b) a ## _ ## b | ||
143 | |||
144 | #ifdef CONFIG_ALPHA_GENERIC | ||
145 | |||
146 | /* In a generic kernel, we always go through the machine vector. */ | ||
147 | |||
148 | #define REMAP1(TYPE, NAME, QUAL) \ | ||
149 | static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ | ||
150 | { \ | ||
151 | return alpha_mv.mv_##NAME(addr); \ | ||
152 | } | ||
153 | |||
154 | #define REMAP2(TYPE, NAME, QUAL) \ | ||
155 | static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ | ||
156 | { \ | ||
157 | alpha_mv.mv_##NAME(b, addr); \ | ||
158 | } | ||
159 | |||
160 | REMAP1(unsigned int, ioread8, /**/) | ||
161 | REMAP1(unsigned int, ioread16, /**/) | ||
162 | REMAP1(unsigned int, ioread32, /**/) | ||
163 | REMAP1(u8, readb, const volatile) | ||
164 | REMAP1(u16, readw, const volatile) | ||
165 | REMAP1(u32, readl, const volatile) | ||
166 | REMAP1(u64, readq, const volatile) | ||
167 | |||
168 | REMAP2(u8, iowrite8, /**/) | ||
169 | REMAP2(u16, iowrite16, /**/) | ||
170 | REMAP2(u32, iowrite32, /**/) | ||
171 | REMAP2(u8, writeb, volatile) | ||
172 | REMAP2(u16, writew, volatile) | ||
173 | REMAP2(u32, writel, volatile) | ||
174 | REMAP2(u64, writeq, volatile) | ||
175 | |||
176 | #undef REMAP1 | ||
177 | #undef REMAP2 | ||
178 | |||
179 | extern inline void __iomem *generic_ioportmap(unsigned long a) | ||
180 | { | ||
181 | return alpha_mv.mv_ioportmap(a); | ||
182 | } | ||
183 | |||
184 | static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) | ||
185 | { | ||
186 | return alpha_mv.mv_ioremap(a, s); | ||
187 | } | ||
188 | |||
189 | static inline void generic_iounmap(volatile void __iomem *a) | ||
190 | { | ||
191 | return alpha_mv.mv_iounmap(a); | ||
192 | } | ||
193 | |||
194 | static inline int generic_is_ioaddr(unsigned long a) | ||
195 | { | ||
196 | return alpha_mv.mv_is_ioaddr(a); | ||
197 | } | ||
198 | |||
199 | static inline int generic_is_mmio(const volatile void __iomem *a) | ||
200 | { | ||
201 | return alpha_mv.mv_is_mmio(a); | ||
202 | } | ||
203 | |||
204 | #define __IO_PREFIX generic | ||
205 | #define generic_trivial_rw_bw 0 | ||
206 | #define generic_trivial_rw_lq 0 | ||
207 | #define generic_trivial_io_bw 0 | ||
208 | #define generic_trivial_io_lq 0 | ||
209 | #define generic_trivial_iounmap 0 | ||
210 | |||
211 | #else | ||
212 | |||
213 | #if defined(CONFIG_ALPHA_APECS) | ||
214 | # include <asm/core_apecs.h> | ||
215 | #elif defined(CONFIG_ALPHA_CIA) | ||
216 | # include <asm/core_cia.h> | ||
217 | #elif defined(CONFIG_ALPHA_IRONGATE) | ||
218 | # include <asm/core_irongate.h> | ||
219 | #elif defined(CONFIG_ALPHA_JENSEN) | ||
220 | # include <asm/jensen.h> | ||
221 | #elif defined(CONFIG_ALPHA_LCA) | ||
222 | # include <asm/core_lca.h> | ||
223 | #elif defined(CONFIG_ALPHA_MARVEL) | ||
224 | # include <asm/core_marvel.h> | ||
225 | #elif defined(CONFIG_ALPHA_MCPCIA) | ||
226 | # include <asm/core_mcpcia.h> | ||
227 | #elif defined(CONFIG_ALPHA_POLARIS) | ||
228 | # include <asm/core_polaris.h> | ||
229 | #elif defined(CONFIG_ALPHA_T2) | ||
230 | # include <asm/core_t2.h> | ||
231 | #elif defined(CONFIG_ALPHA_TSUNAMI) | ||
232 | # include <asm/core_tsunami.h> | ||
233 | #elif defined(CONFIG_ALPHA_TITAN) | ||
234 | # include <asm/core_titan.h> | ||
235 | #elif defined(CONFIG_ALPHA_WILDFIRE) | ||
236 | # include <asm/core_wildfire.h> | ||
237 | #else | ||
238 | #error "What system is this?" | ||
239 | #endif | ||
240 | |||
241 | #endif /* GENERIC */ | ||
242 | |||
243 | /* | ||
244 | * We always have external versions of these routines. | ||
245 | */ | ||
246 | extern u8 inb(unsigned long port); | ||
247 | extern u16 inw(unsigned long port); | ||
248 | extern u32 inl(unsigned long port); | ||
249 | extern void outb(u8 b, unsigned long port); | ||
250 | extern void outw(u16 b, unsigned long port); | ||
251 | extern void outl(u32 b, unsigned long port); | ||
252 | |||
253 | extern u8 readb(const volatile void __iomem *addr); | ||
254 | extern u16 readw(const volatile void __iomem *addr); | ||
255 | extern u32 readl(const volatile void __iomem *addr); | ||
256 | extern u64 readq(const volatile void __iomem *addr); | ||
257 | extern void writeb(u8 b, volatile void __iomem *addr); | ||
258 | extern void writew(u16 b, volatile void __iomem *addr); | ||
259 | extern void writel(u32 b, volatile void __iomem *addr); | ||
260 | extern void writeq(u64 b, volatile void __iomem *addr); | ||
261 | |||
262 | extern u8 __raw_readb(const volatile void __iomem *addr); | ||
263 | extern u16 __raw_readw(const volatile void __iomem *addr); | ||
264 | extern u32 __raw_readl(const volatile void __iomem *addr); | ||
265 | extern u64 __raw_readq(const volatile void __iomem *addr); | ||
266 | extern void __raw_writeb(u8 b, volatile void __iomem *addr); | ||
267 | extern void __raw_writew(u16 b, volatile void __iomem *addr); | ||
268 | extern void __raw_writel(u32 b, volatile void __iomem *addr); | ||
269 | extern void __raw_writeq(u64 b, volatile void __iomem *addr); | ||
270 | |||
271 | /* | ||
272 | * Mapping from port numbers to __iomem space is pretty easy. | ||
273 | */ | ||
274 | |||
275 | /* These two have to be extern inline because of the extern prototype from | ||
276 | <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for | ||
277 | the same declaration. */ | ||
278 | extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) | ||
279 | { | ||
280 | return IO_CONCAT(__IO_PREFIX,ioportmap) (port); | ||
281 | } | ||
282 | |||
283 | extern inline void ioport_unmap(void __iomem *addr) | ||
284 | { | ||
285 | } | ||
286 | |||
287 | static inline void __iomem *ioremap(unsigned long port, unsigned long size) | ||
288 | { | ||
289 | return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); | ||
290 | } | ||
291 | |||
292 | static inline void __iomem *__ioremap(unsigned long port, unsigned long size, | ||
293 | unsigned long flags) | ||
294 | { | ||
295 | return ioremap(port, size); | ||
296 | } | ||
297 | |||
298 | static inline void __iomem * ioremap_nocache(unsigned long offset, | ||
299 | unsigned long size) | ||
300 | { | ||
301 | return ioremap(offset, size); | ||
302 | } | ||
303 | |||
304 | static inline void iounmap(volatile void __iomem *addr) | ||
305 | { | ||
306 | IO_CONCAT(__IO_PREFIX,iounmap)(addr); | ||
307 | } | ||
308 | |||
309 | static inline int __is_ioaddr(unsigned long addr) | ||
310 | { | ||
311 | return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); | ||
312 | } | ||
313 | #define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) | ||
314 | |||
315 | static inline int __is_mmio(const volatile void __iomem *addr) | ||
316 | { | ||
317 | return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); | ||
318 | } | ||
319 | |||
320 | |||
321 | /* | ||
322 | * If the actual I/O bits are sufficiently trivial, then expand inline. | ||
323 | */ | ||
324 | |||
325 | #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) | ||
326 | extern inline unsigned int ioread8(void __iomem *addr) | ||
327 | { | ||
328 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); | ||
329 | mb(); | ||
330 | return ret; | ||
331 | } | ||
332 | |||
333 | extern inline unsigned int ioread16(void __iomem *addr) | ||
334 | { | ||
335 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); | ||
336 | mb(); | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | extern inline void iowrite8(u8 b, void __iomem *addr) | ||
341 | { | ||
342 | IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); | ||
343 | mb(); | ||
344 | } | ||
345 | |||
346 | extern inline void iowrite16(u16 b, void __iomem *addr) | ||
347 | { | ||
348 | IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); | ||
349 | mb(); | ||
350 | } | ||
351 | |||
352 | extern inline u8 inb(unsigned long port) | ||
353 | { | ||
354 | return ioread8(ioport_map(port, 1)); | ||
355 | } | ||
356 | |||
357 | extern inline u16 inw(unsigned long port) | ||
358 | { | ||
359 | return ioread16(ioport_map(port, 2)); | ||
360 | } | ||
361 | |||
362 | extern inline void outb(u8 b, unsigned long port) | ||
363 | { | ||
364 | iowrite8(b, ioport_map(port, 1)); | ||
365 | } | ||
366 | |||
367 | extern inline void outw(u16 b, unsigned long port) | ||
368 | { | ||
369 | iowrite16(b, ioport_map(port, 2)); | ||
370 | } | ||
371 | #endif | ||
372 | |||
373 | #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) | ||
374 | extern inline unsigned int ioread32(void __iomem *addr) | ||
375 | { | ||
376 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); | ||
377 | mb(); | ||
378 | return ret; | ||
379 | } | ||
380 | |||
381 | extern inline void iowrite32(u32 b, void __iomem *addr) | ||
382 | { | ||
383 | IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); | ||
384 | mb(); | ||
385 | } | ||
386 | |||
387 | extern inline u32 inl(unsigned long port) | ||
388 | { | ||
389 | return ioread32(ioport_map(port, 4)); | ||
390 | } | ||
391 | |||
392 | extern inline void outl(u32 b, unsigned long port) | ||
393 | { | ||
394 | iowrite32(b, ioport_map(port, 4)); | ||
395 | } | ||
396 | #endif | ||
397 | |||
398 | #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 | ||
399 | extern inline u8 __raw_readb(const volatile void __iomem *addr) | ||
400 | { | ||
401 | return IO_CONCAT(__IO_PREFIX,readb)(addr); | ||
402 | } | ||
403 | |||
404 | extern inline u16 __raw_readw(const volatile void __iomem *addr) | ||
405 | { | ||
406 | return IO_CONCAT(__IO_PREFIX,readw)(addr); | ||
407 | } | ||
408 | |||
409 | extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) | ||
410 | { | ||
411 | IO_CONCAT(__IO_PREFIX,writeb)(b, addr); | ||
412 | } | ||
413 | |||
414 | extern inline void __raw_writew(u16 b, volatile void __iomem *addr) | ||
415 | { | ||
416 | IO_CONCAT(__IO_PREFIX,writew)(b, addr); | ||
417 | } | ||
418 | |||
419 | extern inline u8 readb(const volatile void __iomem *addr) | ||
420 | { | ||
421 | u8 ret = __raw_readb(addr); | ||
422 | mb(); | ||
423 | return ret; | ||
424 | } | ||
425 | |||
426 | extern inline u16 readw(const volatile void __iomem *addr) | ||
427 | { | ||
428 | u16 ret = __raw_readw(addr); | ||
429 | mb(); | ||
430 | return ret; | ||
431 | } | ||
432 | |||
433 | extern inline void writeb(u8 b, volatile void __iomem *addr) | ||
434 | { | ||
435 | __raw_writeb(b, addr); | ||
436 | mb(); | ||
437 | } | ||
438 | |||
439 | extern inline void writew(u16 b, volatile void __iomem *addr) | ||
440 | { | ||
441 | __raw_writew(b, addr); | ||
442 | mb(); | ||
443 | } | ||
444 | #endif | ||
445 | |||
446 | #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 | ||
447 | extern inline u32 __raw_readl(const volatile void __iomem *addr) | ||
448 | { | ||
449 | return IO_CONCAT(__IO_PREFIX,readl)(addr); | ||
450 | } | ||
451 | |||
452 | extern inline u64 __raw_readq(const volatile void __iomem *addr) | ||
453 | { | ||
454 | return IO_CONCAT(__IO_PREFIX,readq)(addr); | ||
455 | } | ||
456 | |||
457 | extern inline void __raw_writel(u32 b, volatile void __iomem *addr) | ||
458 | { | ||
459 | IO_CONCAT(__IO_PREFIX,writel)(b, addr); | ||
460 | } | ||
461 | |||
462 | extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) | ||
463 | { | ||
464 | IO_CONCAT(__IO_PREFIX,writeq)(b, addr); | ||
465 | } | ||
466 | |||
467 | extern inline u32 readl(const volatile void __iomem *addr) | ||
468 | { | ||
469 | u32 ret = __raw_readl(addr); | ||
470 | mb(); | ||
471 | return ret; | ||
472 | } | ||
473 | |||
474 | extern inline u64 readq(const volatile void __iomem *addr) | ||
475 | { | ||
476 | u64 ret = __raw_readq(addr); | ||
477 | mb(); | ||
478 | return ret; | ||
479 | } | ||
480 | |||
481 | extern inline void writel(u32 b, volatile void __iomem *addr) | ||
482 | { | ||
483 | __raw_writel(b, addr); | ||
484 | mb(); | ||
485 | } | ||
486 | |||
487 | extern inline void writeq(u64 b, volatile void __iomem *addr) | ||
488 | { | ||
489 | __raw_writeq(b, addr); | ||
490 | mb(); | ||
491 | } | ||
492 | #endif | ||
493 | |||
494 | #define inb_p inb | ||
495 | #define inw_p inw | ||
496 | #define inl_p inl | ||
497 | #define outb_p outb | ||
498 | #define outw_p outw | ||
499 | #define outl_p outl | ||
500 | #define readb_relaxed(addr) __raw_readb(addr) | ||
501 | #define readw_relaxed(addr) __raw_readw(addr) | ||
502 | #define readl_relaxed(addr) __raw_readl(addr) | ||
503 | #define readq_relaxed(addr) __raw_readq(addr) | ||
504 | |||
505 | #define mmiowb() | ||
506 | |||
507 | /* | ||
508 | * String version of IO memory access ops: | ||
509 | */ | ||
510 | extern void memcpy_fromio(void *, const volatile void __iomem *, long); | ||
511 | extern void memcpy_toio(volatile void __iomem *, const void *, long); | ||
512 | extern void _memset_c_io(volatile void __iomem *, unsigned long, long); | ||
513 | |||
514 | static inline void memset_io(volatile void __iomem *addr, u8 c, long len) | ||
515 | { | ||
516 | _memset_c_io(addr, 0x0101010101010101UL * c, len); | ||
517 | } | ||
518 | |||
519 | #define __HAVE_ARCH_MEMSETW_IO | ||
520 | static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) | ||
521 | { | ||
522 | _memset_c_io(addr, 0x0001000100010001UL * c, len); | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * String versions of in/out ops: | ||
527 | */ | ||
528 | extern void insb (unsigned long port, void *dst, unsigned long count); | ||
529 | extern void insw (unsigned long port, void *dst, unsigned long count); | ||
530 | extern void insl (unsigned long port, void *dst, unsigned long count); | ||
531 | extern void outsb (unsigned long port, const void *src, unsigned long count); | ||
532 | extern void outsw (unsigned long port, const void *src, unsigned long count); | ||
533 | extern void outsl (unsigned long port, const void *src, unsigned long count); | ||
534 | |||
535 | /* | ||
536 | * The Alpha Jensen hardware for some rather strange reason puts | ||
537 | * the RTC clock at 0x170 instead of 0x70. Probably due to some | ||
538 | * misguided idea about using 0x70 for NMI stuff. | ||
539 | * | ||
540 | * These defines will override the defaults when doing RTC queries | ||
541 | */ | ||
542 | |||
543 | #ifdef CONFIG_ALPHA_GENERIC | ||
544 | # define RTC_PORT(x) ((x) + alpha_mv.rtc_port) | ||
545 | #else | ||
546 | # ifdef CONFIG_ALPHA_JENSEN | ||
547 | # define RTC_PORT(x) (0x170+(x)) | ||
548 | # else | ||
549 | # define RTC_PORT(x) (0x70 + (x)) | ||
550 | # endif | ||
551 | #endif | ||
552 | #define RTC_ALWAYS_BCD 0 | ||
553 | |||
554 | /* | ||
555 | * Some mucking forons use if[n]def writeq to check if platform has it. | ||
556 | * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them | ||
557 | * to play with; for now just use cpp anti-recursion logics and make sure | ||
558 | * that damn thing is defined and expands to itself. | ||
559 | */ | ||
560 | |||
561 | #define writeq writeq | ||
562 | #define readq readq | ||
563 | |||
564 | /* | ||
565 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
566 | * access | ||
567 | */ | ||
568 | #define xlate_dev_mem_ptr(p) __va(p) | ||
569 | |||
570 | /* | ||
571 | * Convert a virtual cached pointer to an uncached pointer | ||
572 | */ | ||
573 | #define xlate_dev_kmem_ptr(p) p | ||
574 | |||
575 | #endif /* __KERNEL__ */ | ||
576 | |||
577 | #endif /* __ALPHA_IO_H */ | ||