diff options
-rw-r--r-- | arch/powerpc/Kconfig | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/Makefile | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/io.c | 18 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 46 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/pci.c | 370 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/setup.c | 12 | ||||
-rw-r--r-- | include/asm-powerpc/eeh.h | 118 | ||||
-rw-r--r-- | include/asm-powerpc/ide.h | 7 | ||||
-rw-r--r-- | include/asm-powerpc/io-defs.h | 56 | ||||
-rw-r--r-- | include/asm-powerpc/io.h | 641 | ||||
-rw-r--r-- | include/asm-powerpc/machdep.h | 4 | ||||
-rw-r--r-- | include/asm-ppc/io.h | 12 |
14 files changed, 701 insertions, 608 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3d26ba7ad76d..3e89d9d34937 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -394,6 +394,7 @@ config PPC_PSERIES | |||
394 | config PPC_ISERIES | 394 | config PPC_ISERIES |
395 | bool "IBM Legacy iSeries" | 395 | bool "IBM Legacy iSeries" |
396 | depends on PPC_MULTIPLATFORM && PPC64 | 396 | depends on PPC_MULTIPLATFORM && PPC64 |
397 | select PPC_INDIRECT_IO | ||
397 | 398 | ||
398 | config PPC_CHRP | 399 | config PPC_CHRP |
399 | bool "Common Hardware Reference Platform (CHRP) based machines" | 400 | bool "Common Hardware Reference Platform (CHRP) based machines" |
@@ -548,6 +549,15 @@ config PPC_970_NAP | |||
548 | bool | 549 | bool |
549 | default n | 550 | default n |
550 | 551 | ||
552 | config PPC_INDIRECT_IO | ||
553 | bool | ||
554 | select GENERIC_IOMAP | ||
555 | default n | ||
556 | |||
557 | config GENERIC_IOMAP | ||
558 | bool | ||
559 | default n | ||
560 | |||
551 | source "drivers/cpufreq/Kconfig" | 561 | source "drivers/cpufreq/Kconfig" |
552 | 562 | ||
553 | config CPU_FREQ_PMAC | 563 | config CPU_FREQ_PMAC |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index eba8d118e214..600954df07ae 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -62,7 +62,7 @@ obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o | |||
62 | module-$(CONFIG_PPC64) += module_64.o | 62 | module-$(CONFIG_PPC64) += module_64.o |
63 | obj-$(CONFIG_MODULES) += $(module-y) | 63 | obj-$(CONFIG_MODULES) += $(module-y) |
64 | 64 | ||
65 | pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o iomap.o | 65 | pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o |
66 | pci32-$(CONFIG_PPC32) := pci_32.o | 66 | pci32-$(CONFIG_PPC32) := pci_32.o |
67 | obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y) | 67 | obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y) |
68 | kexec-$(CONFIG_PPC64) := machine_kexec_64.o | 68 | kexec-$(CONFIG_PPC64) := machine_kexec_64.o |
@@ -71,6 +71,10 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o $(kexec-y) | |||
71 | obj-$(CONFIG_AUDIT) += audit.o | 71 | obj-$(CONFIG_AUDIT) += audit.o |
72 | obj64-$(CONFIG_AUDIT) += compat_audit.o | 72 | obj64-$(CONFIG_AUDIT) += compat_audit.o |
73 | 73 | ||
74 | ifneq ($(CONFIG_PPC_INDIRECT_IO),y) | ||
75 | pci64-$(CONFIG_PPC64) += iomap.o | ||
76 | endif | ||
77 | |||
74 | ifeq ($(CONFIG_PPC_ISERIES),y) | 78 | ifeq ($(CONFIG_PPC_ISERIES),y) |
75 | $(obj)/head_64.o: $(obj)/lparmap.s | 79 | $(obj)/head_64.o: $(obj)/lparmap.s |
76 | AFLAGS_head_64.o += -I$(obj) | 80 | AFLAGS_head_64.o += -I$(obj) |
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c index e98180686b35..c1aa07524c26 100644 --- a/arch/powerpc/kernel/io.c +++ b/arch/powerpc/kernel/io.c | |||
@@ -25,13 +25,11 @@ | |||
25 | #include <asm/firmware.h> | 25 | #include <asm/firmware.h> |
26 | #include <asm/bug.h> | 26 | #include <asm/bug.h> |
27 | 27 | ||
28 | void _insb(volatile u8 __iomem *port, void *buf, long count) | 28 | void _insb(const volatile u8 __iomem *port, void *buf, long count) |
29 | { | 29 | { |
30 | u8 *tbuf = buf; | 30 | u8 *tbuf = buf; |
31 | u8 tmp; | 31 | u8 tmp; |
32 | 32 | ||
33 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | ||
34 | |||
35 | if (unlikely(count <= 0)) | 33 | if (unlikely(count <= 0)) |
36 | return; | 34 | return; |
37 | asm volatile("sync"); | 35 | asm volatile("sync"); |
@@ -48,8 +46,6 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count) | |||
48 | { | 46 | { |
49 | const u8 *tbuf = buf; | 47 | const u8 *tbuf = buf; |
50 | 48 | ||
51 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | ||
52 | |||
53 | if (unlikely(count <= 0)) | 49 | if (unlikely(count <= 0)) |
54 | return; | 50 | return; |
55 | asm volatile("sync"); | 51 | asm volatile("sync"); |
@@ -60,13 +56,11 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count) | |||
60 | } | 56 | } |
61 | EXPORT_SYMBOL(_outsb); | 57 | EXPORT_SYMBOL(_outsb); |
62 | 58 | ||
63 | void _insw_ns(volatile u16 __iomem *port, void *buf, long count) | 59 | void _insw_ns(const volatile u16 __iomem *port, void *buf, long count) |
64 | { | 60 | { |
65 | u16 *tbuf = buf; | 61 | u16 *tbuf = buf; |
66 | u16 tmp; | 62 | u16 tmp; |
67 | 63 | ||
68 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | ||
69 | |||
70 | if (unlikely(count <= 0)) | 64 | if (unlikely(count <= 0)) |
71 | return; | 65 | return; |
72 | asm volatile("sync"); | 66 | asm volatile("sync"); |
@@ -83,8 +77,6 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count) | |||
83 | { | 77 | { |
84 | const u16 *tbuf = buf; | 78 | const u16 *tbuf = buf; |
85 | 79 | ||
86 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | ||
87 | |||
88 | if (unlikely(count <= 0)) | 80 | if (unlikely(count <= 0)) |
89 | return; | 81 | return; |
90 | asm volatile("sync"); | 82 | asm volatile("sync"); |
@@ -95,13 +87,11 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count) | |||
95 | } | 87 | } |
96 | EXPORT_SYMBOL(_outsw_ns); | 88 | EXPORT_SYMBOL(_outsw_ns); |
97 | 89 | ||
98 | void _insl_ns(volatile u32 __iomem *port, void *buf, long count) | 90 | void _insl_ns(const volatile u32 __iomem *port, void *buf, long count) |
99 | { | 91 | { |
100 | u32 *tbuf = buf; | 92 | u32 *tbuf = buf; |
101 | u32 tmp; | 93 | u32 tmp; |
102 | 94 | ||
103 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | ||
104 | |||
105 | if (unlikely(count <= 0)) | 95 | if (unlikely(count <= 0)) |
106 | return; | 96 | return; |
107 | asm volatile("sync"); | 97 | asm volatile("sync"); |
@@ -118,8 +108,6 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count) | |||
118 | { | 108 | { |
119 | const u32 *tbuf = buf; | 109 | const u32 *tbuf = buf; |
120 | 110 | ||
121 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | ||
122 | |||
123 | if (unlikely(count <= 0)) | 111 | if (unlikely(count <= 0)) |
124 | return; | 112 | return; |
125 | asm volatile("sync"); | 113 | asm volatile("sync"); |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index d800e19ea564..afee470de924 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -1137,7 +1137,7 @@ int unmap_bus_range(struct pci_bus *bus) | |||
1137 | 1137 | ||
1138 | if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) | 1138 | if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) |
1139 | return 1; | 1139 | return 1; |
1140 | if (iounmap_explicit((void __iomem *) start_virt, size)) | 1140 | if (__iounmap_explicit((void __iomem *) start_virt, size)) |
1141 | return 1; | 1141 | return 1; |
1142 | 1142 | ||
1143 | return 0; | 1143 | return 0; |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index f7ad64acf47e..f602a53b1b79 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -599,3 +599,10 @@ void __init setup_per_cpu_areas(void) | |||
599 | } | 599 | } |
600 | } | 600 | } |
601 | #endif | 601 | #endif |
602 | |||
603 | |||
604 | #ifdef CONFIG_PPC_INDIRECT_IO | ||
605 | struct ppc_pci_io ppc_pci_io; | ||
606 | EXPORT_SYMBOL(ppc_pci_io); | ||
607 | #endif /* CONFIG_PPC_INDIRECT_IO */ | ||
608 | |||
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index ac64f4aaa509..e9b21846ccbd 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -129,22 +129,12 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, | |||
129 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); | 129 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); |
130 | } | 130 | } |
131 | 131 | ||
132 | |||
133 | void __iomem * | ||
134 | ioremap(unsigned long addr, unsigned long size) | ||
135 | { | ||
136 | return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
137 | } | ||
138 | |||
139 | void __iomem * __ioremap(unsigned long addr, unsigned long size, | 132 | void __iomem * __ioremap(unsigned long addr, unsigned long size, |
140 | unsigned long flags) | 133 | unsigned long flags) |
141 | { | 134 | { |
142 | unsigned long pa, ea; | 135 | unsigned long pa, ea; |
143 | void __iomem *ret; | 136 | void __iomem *ret; |
144 | 137 | ||
145 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | ||
146 | return (void __iomem *)addr; | ||
147 | |||
148 | /* | 138 | /* |
149 | * Choose an address to map it to. | 139 | * Choose an address to map it to. |
150 | * Once the imalloc system is running, we use it. | 140 | * Once the imalloc system is running, we use it. |
@@ -178,6 +168,25 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size, | |||
178 | return ret; | 168 | return ret; |
179 | } | 169 | } |
180 | 170 | ||
171 | |||
172 | void __iomem * ioremap(unsigned long addr, unsigned long size) | ||
173 | { | ||
174 | unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED; | ||
175 | |||
176 | if (ppc_md.ioremap) | ||
177 | return ppc_md.ioremap(addr, size, flags); | ||
178 | return __ioremap(addr, size, flags); | ||
179 | } | ||
180 | |||
181 | void __iomem * ioremap_flags(unsigned long addr, unsigned long size, | ||
182 | unsigned long flags) | ||
183 | { | ||
184 | if (ppc_md.ioremap) | ||
185 | return ppc_md.ioremap(addr, size, flags); | ||
186 | return __ioremap(addr, size, flags); | ||
187 | } | ||
188 | |||
189 | |||
181 | #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) | 190 | #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) |
182 | 191 | ||
183 | int __ioremap_explicit(unsigned long pa, unsigned long ea, | 192 | int __ioremap_explicit(unsigned long pa, unsigned long ea, |
@@ -235,13 +244,10 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, | |||
235 | * | 244 | * |
236 | * XXX what about calls before mem_init_done (ie python_countermeasures()) | 245 | * XXX what about calls before mem_init_done (ie python_countermeasures()) |
237 | */ | 246 | */ |
238 | void iounmap(volatile void __iomem *token) | 247 | void __iounmap(void __iomem *token) |
239 | { | 248 | { |
240 | void *addr; | 249 | void *addr; |
241 | 250 | ||
242 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | ||
243 | return; | ||
244 | |||
245 | if (!mem_init_done) | 251 | if (!mem_init_done) |
246 | return; | 252 | return; |
247 | 253 | ||
@@ -250,6 +256,14 @@ void iounmap(volatile void __iomem *token) | |||
250 | im_free(addr); | 256 | im_free(addr); |
251 | } | 257 | } |
252 | 258 | ||
259 | void iounmap(void __iomem *token) | ||
260 | { | ||
261 | if (ppc_md.iounmap) | ||
262 | ppc_md.iounmap(token); | ||
263 | else | ||
264 | __iounmap(token); | ||
265 | } | ||
266 | |||
253 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) | 267 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) |
254 | { | 268 | { |
255 | struct vm_struct *area; | 269 | struct vm_struct *area; |
@@ -268,7 +282,7 @@ static int iounmap_subset_regions(unsigned long addr, unsigned long size) | |||
268 | return 0; | 282 | return 0; |
269 | } | 283 | } |
270 | 284 | ||
271 | int iounmap_explicit(volatile void __iomem *start, unsigned long size) | 285 | int __iounmap_explicit(void __iomem *start, unsigned long size) |
272 | { | 286 | { |
273 | struct vm_struct *area; | 287 | struct vm_struct *area; |
274 | unsigned long addr; | 288 | unsigned long addr; |
@@ -303,8 +317,10 @@ int iounmap_explicit(volatile void __iomem *start, unsigned long size) | |||
303 | } | 317 | } |
304 | 318 | ||
305 | EXPORT_SYMBOL(ioremap); | 319 | EXPORT_SYMBOL(ioremap); |
320 | EXPORT_SYMBOL(ioremap_flags); | ||
306 | EXPORT_SYMBOL(__ioremap); | 321 | EXPORT_SYMBOL(__ioremap); |
307 | EXPORT_SYMBOL(iounmap); | 322 | EXPORT_SYMBOL(iounmap); |
323 | EXPORT_SYMBOL(__iounmap); | ||
308 | 324 | ||
309 | void __iomem * reserve_phb_iospace(unsigned long size) | 325 | void __iomem * reserve_phb_iospace(unsigned long size) |
310 | { | 326 | { |
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c index a90ae42a7bc2..4a06d9c34986 100644 --- a/arch/powerpc/platforms/iseries/pci.c +++ b/arch/powerpc/platforms/iseries/pci.c | |||
@@ -156,53 +156,6 @@ static void pci_Log_Error(char *Error_Text, int Bus, int SubBus, | |||
156 | } | 156 | } |
157 | 157 | ||
158 | /* | 158 | /* |
159 | * iSeries_pcibios_init | ||
160 | * | ||
161 | * Description: | ||
162 | * This function checks for all possible system PCI host bridges that connect | ||
163 | * PCI buses. The system hypervisor is queried as to the guest partition | ||
164 | * ownership status. A pci_controller is built for any bus which is partially | ||
165 | * owned or fully owned by this guest partition. | ||
166 | */ | ||
167 | void iSeries_pcibios_init(void) | ||
168 | { | ||
169 | struct pci_controller *phb; | ||
170 | struct device_node *root = of_find_node_by_path("/"); | ||
171 | struct device_node *node = NULL; | ||
172 | |||
173 | if (root == NULL) { | ||
174 | printk(KERN_CRIT "iSeries_pcibios_init: can't find root " | ||
175 | "of device tree\n"); | ||
176 | return; | ||
177 | } | ||
178 | while ((node = of_get_next_child(root, node)) != NULL) { | ||
179 | HvBusNumber bus; | ||
180 | const u32 *busp; | ||
181 | |||
182 | if ((node->type == NULL) || (strcmp(node->type, "pci") != 0)) | ||
183 | continue; | ||
184 | |||
185 | busp = get_property(node, "bus-range", NULL); | ||
186 | if (busp == NULL) | ||
187 | continue; | ||
188 | bus = *busp; | ||
189 | printk("bus %d appears to exist\n", bus); | ||
190 | phb = pcibios_alloc_controller(node); | ||
191 | if (phb == NULL) | ||
192 | continue; | ||
193 | |||
194 | phb->pci_mem_offset = phb->local_number = bus; | ||
195 | phb->first_busno = bus; | ||
196 | phb->last_busno = bus; | ||
197 | phb->ops = &iSeries_pci_ops; | ||
198 | } | ||
199 | |||
200 | of_node_put(root); | ||
201 | |||
202 | pci_devs_phb_init(); | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * iSeries_pci_final_fixup(void) | 159 | * iSeries_pci_final_fixup(void) |
207 | */ | 160 | */ |
208 | void __init iSeries_pci_final_fixup(void) | 161 | void __init iSeries_pci_final_fixup(void) |
@@ -438,11 +391,7 @@ static inline struct device_node *xlate_iomm_address( | |||
438 | /* | 391 | /* |
439 | * Read MM I/O Instructions for the iSeries | 392 | * Read MM I/O Instructions for the iSeries |
440 | * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal | 393 | * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal |
441 | * else, data is returned in big Endian format. | 394 | * else, data is returned in Big Endian format. |
442 | * | ||
443 | * iSeries_Read_Byte = Read Byte ( 8 bit) | ||
444 | * iSeries_Read_Word = Read Word (16 bit) | ||
445 | * iSeries_Read_Long = Read Long (32 bit) | ||
446 | */ | 395 | */ |
447 | static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) | 396 | static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) |
448 | { | 397 | { |
@@ -462,14 +411,15 @@ static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) | |||
462 | num_printed = 0; | 411 | num_printed = 0; |
463 | } | 412 | } |
464 | if (num_printed++ < 10) | 413 | if (num_printed++ < 10) |
465 | printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress); | 414 | printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", |
415 | IoAddress); | ||
466 | return 0xff; | 416 | return 0xff; |
467 | } | 417 | } |
468 | do { | 418 | do { |
469 | HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0); | 419 | HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0); |
470 | } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0); | 420 | } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0); |
471 | 421 | ||
472 | return (u8)ret.value; | 422 | return ret.value; |
473 | } | 423 | } |
474 | 424 | ||
475 | static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) | 425 | static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) |
@@ -490,7 +440,8 @@ static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) | |||
490 | num_printed = 0; | 440 | num_printed = 0; |
491 | } | 441 | } |
492 | if (num_printed++ < 10) | 442 | if (num_printed++ < 10) |
493 | printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress); | 443 | printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", |
444 | IoAddress); | ||
494 | return 0xffff; | 445 | return 0xffff; |
495 | } | 446 | } |
496 | do { | 447 | do { |
@@ -498,7 +449,7 @@ static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) | |||
498 | BarOffset, 0); | 449 | BarOffset, 0); |
499 | } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0); | 450 | } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0); |
500 | 451 | ||
501 | return swab16((u16)ret.value); | 452 | return ret.value; |
502 | } | 453 | } |
503 | 454 | ||
504 | static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) | 455 | static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) |
@@ -519,7 +470,8 @@ static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) | |||
519 | num_printed = 0; | 470 | num_printed = 0; |
520 | } | 471 | } |
521 | if (num_printed++ < 10) | 472 | if (num_printed++ < 10) |
522 | printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress); | 473 | printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", |
474 | IoAddress); | ||
523 | return 0xffffffff; | 475 | return 0xffffffff; |
524 | } | 476 | } |
525 | do { | 477 | do { |
@@ -527,15 +479,12 @@ static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) | |||
527 | BarOffset, 0); | 479 | BarOffset, 0); |
528 | } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0); | 480 | } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0); |
529 | 481 | ||
530 | return swab32((u32)ret.value); | 482 | return ret.value; |
531 | } | 483 | } |
532 | 484 | ||
533 | /* | 485 | /* |
534 | * Write MM I/O Instructions for the iSeries | 486 | * Write MM I/O Instructions for the iSeries |
535 | * | 487 | * |
536 | * iSeries_Write_Byte = Write Byte (8 bit) | ||
537 | * iSeries_Write_Word = Write Word(16 bit) | ||
538 | * iSeries_Write_Long = Write Long(32 bit) | ||
539 | */ | 488 | */ |
540 | static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress) | 489 | static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress) |
541 | { | 490 | { |
@@ -581,11 +530,12 @@ static void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress) | |||
581 | num_printed = 0; | 530 | num_printed = 0; |
582 | } | 531 | } |
583 | if (num_printed++ < 10) | 532 | if (num_printed++ < 10) |
584 | printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress); | 533 | printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", |
534 | IoAddress); | ||
585 | return; | 535 | return; |
586 | } | 536 | } |
587 | do { | 537 | do { |
588 | rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0); | 538 | rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, data, 0); |
589 | } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0); | 539 | } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0); |
590 | } | 540 | } |
591 | 541 | ||
@@ -607,231 +557,221 @@ static void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress) | |||
607 | num_printed = 0; | 557 | num_printed = 0; |
608 | } | 558 | } |
609 | if (num_printed++ < 10) | 559 | if (num_printed++ < 10) |
610 | printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress); | 560 | printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", |
561 | IoAddress); | ||
611 | return; | 562 | return; |
612 | } | 563 | } |
613 | do { | 564 | do { |
614 | rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0); | 565 | rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, data, 0); |
615 | } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0); | 566 | } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0); |
616 | } | 567 | } |
617 | 568 | ||
618 | extern unsigned char __raw_readb(const volatile void __iomem *addr) | 569 | static u8 iseries_readb(const volatile void __iomem *addr) |
619 | { | 570 | { |
620 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 571 | return iSeries_Read_Byte(addr); |
621 | |||
622 | return *(volatile unsigned char __force *)addr; | ||
623 | } | 572 | } |
624 | EXPORT_SYMBOL(__raw_readb); | ||
625 | 573 | ||
626 | extern unsigned short __raw_readw(const volatile void __iomem *addr) | 574 | static u16 iseries_readw(const volatile void __iomem *addr) |
627 | { | 575 | { |
628 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 576 | return le16_to_cpu(iSeries_Read_Word(addr)); |
629 | |||
630 | return *(volatile unsigned short __force *)addr; | ||
631 | } | 577 | } |
632 | EXPORT_SYMBOL(__raw_readw); | ||
633 | 578 | ||
634 | extern unsigned int __raw_readl(const volatile void __iomem *addr) | 579 | static u32 iseries_readl(const volatile void __iomem *addr) |
635 | { | 580 | { |
636 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 581 | return le32_to_cpu(iSeries_Read_Long(addr)); |
637 | |||
638 | return *(volatile unsigned int __force *)addr; | ||
639 | } | 582 | } |
640 | EXPORT_SYMBOL(__raw_readl); | ||
641 | 583 | ||
642 | extern unsigned long __raw_readq(const volatile void __iomem *addr) | 584 | static u16 iseries_readw_be(const volatile void __iomem *addr) |
643 | { | 585 | { |
644 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 586 | return iSeries_Read_Word(addr); |
645 | |||
646 | return *(volatile unsigned long __force *)addr; | ||
647 | } | 587 | } |
648 | EXPORT_SYMBOL(__raw_readq); | ||
649 | 588 | ||
650 | extern void __raw_writeb(unsigned char v, volatile void __iomem *addr) | 589 | static u32 iseries_readl_be(const volatile void __iomem *addr) |
651 | { | 590 | { |
652 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 591 | return iSeries_Read_Long(addr); |
653 | |||
654 | *(volatile unsigned char __force *)addr = v; | ||
655 | } | 592 | } |
656 | EXPORT_SYMBOL(__raw_writeb); | ||
657 | 593 | ||
658 | extern void __raw_writew(unsigned short v, volatile void __iomem *addr) | 594 | static void iseries_writeb(u8 data, volatile void __iomem *addr) |
659 | { | 595 | { |
660 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 596 | iSeries_Write_Byte(data, addr); |
661 | |||
662 | *(volatile unsigned short __force *)addr = v; | ||
663 | } | 597 | } |
664 | EXPORT_SYMBOL(__raw_writew); | ||
665 | 598 | ||
666 | extern void __raw_writel(unsigned int v, volatile void __iomem *addr) | 599 | static void iseries_writew(u16 data, volatile void __iomem *addr) |
667 | { | 600 | { |
668 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 601 | iSeries_Write_Word(cpu_to_le16(data), addr); |
669 | |||
670 | *(volatile unsigned int __force *)addr = v; | ||
671 | } | 602 | } |
672 | EXPORT_SYMBOL(__raw_writel); | ||
673 | 603 | ||
674 | extern void __raw_writeq(unsigned long v, volatile void __iomem *addr) | 604 | static void iseries_writel(u32 data, volatile void __iomem *addr) |
675 | { | 605 | { |
676 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 606 | iSeries_Write_Long(cpu_to_le32(data), addr); |
677 | |||
678 | *(volatile unsigned long __force *)addr = v; | ||
679 | } | 607 | } |
680 | EXPORT_SYMBOL(__raw_writeq); | ||
681 | 608 | ||
682 | int in_8(const volatile unsigned char __iomem *addr) | 609 | static void iseries_writew_be(u16 data, volatile void __iomem *addr) |
683 | { | 610 | { |
684 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 611 | iSeries_Write_Word(data, addr); |
685 | return iSeries_Read_Byte(addr); | ||
686 | return __in_8(addr); | ||
687 | } | 612 | } |
688 | EXPORT_SYMBOL(in_8); | ||
689 | 613 | ||
690 | void out_8(volatile unsigned char __iomem *addr, int val) | 614 | static void iseries_writel_be(u32 data, volatile void __iomem *addr) |
691 | { | 615 | { |
692 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 616 | iSeries_Write_Long(data, addr); |
693 | iSeries_Write_Byte(val, addr); | ||
694 | else | ||
695 | __out_8(addr, val); | ||
696 | } | 617 | } |
697 | EXPORT_SYMBOL(out_8); | ||
698 | 618 | ||
699 | int in_le16(const volatile unsigned short __iomem *addr) | 619 | static void iseries_readsb(const volatile void __iomem *addr, void *buf, |
620 | unsigned long count) | ||
700 | { | 621 | { |
701 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 622 | u8 *dst = buf; |
702 | return iSeries_Read_Word(addr); | 623 | while(count-- > 0) |
703 | return __in_le16(addr); | 624 | *(dst++) = iSeries_Read_Byte(addr); |
704 | } | 625 | } |
705 | EXPORT_SYMBOL(in_le16); | ||
706 | 626 | ||
707 | int in_be16(const volatile unsigned short __iomem *addr) | 627 | static void iseries_readsw(const volatile void __iomem *addr, void *buf, |
628 | unsigned long count) | ||
708 | { | 629 | { |
709 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 630 | u16 *dst = buf; |
710 | 631 | while(count-- > 0) | |
711 | return __in_be16(addr); | 632 | *(dst++) = iSeries_Read_Word(addr); |
712 | } | 633 | } |
713 | EXPORT_SYMBOL(in_be16); | ||
714 | 634 | ||
715 | void out_le16(volatile unsigned short __iomem *addr, int val) | 635 | static void iseries_readsl(const volatile void __iomem *addr, void *buf, |
636 | unsigned long count) | ||
716 | { | 637 | { |
717 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 638 | u32 *dst = buf; |
718 | iSeries_Write_Word(val, addr); | 639 | while(count-- > 0) |
719 | else | 640 | *(dst++) = iSeries_Read_Long(addr); |
720 | __out_le16(addr, val); | ||
721 | } | 641 | } |
722 | EXPORT_SYMBOL(out_le16); | ||
723 | 642 | ||
724 | void out_be16(volatile unsigned short __iomem *addr, int val) | 643 | static void iseries_writesb(volatile void __iomem *addr, const void *buf, |
644 | unsigned long count) | ||
725 | { | 645 | { |
726 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 646 | const u8 *src = buf; |
727 | 647 | while(count-- > 0) | |
728 | __out_be16(addr, val); | 648 | iSeries_Write_Byte(*(src++), addr); |
729 | } | 649 | } |
730 | EXPORT_SYMBOL(out_be16); | ||
731 | 650 | ||
732 | unsigned in_le32(const volatile unsigned __iomem *addr) | 651 | static void iseries_writesw(volatile void __iomem *addr, const void *buf, |
652 | unsigned long count) | ||
733 | { | 653 | { |
734 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 654 | const u16 *src = buf; |
735 | return iSeries_Read_Long(addr); | 655 | while(count-- > 0) |
736 | return __in_le32(addr); | 656 | iSeries_Write_Word(*(src++), addr); |
737 | } | 657 | } |
738 | EXPORT_SYMBOL(in_le32); | ||
739 | 658 | ||
740 | unsigned in_be32(const volatile unsigned __iomem *addr) | 659 | static void iseries_writesl(volatile void __iomem *addr, const void *buf, |
660 | unsigned long count) | ||
741 | { | 661 | { |
742 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 662 | const u32 *src = buf; |
743 | 663 | while(count-- > 0) | |
744 | return __in_be32(addr); | 664 | iSeries_Write_Long(*(src++), addr); |
745 | } | 665 | } |
746 | EXPORT_SYMBOL(in_be32); | ||
747 | 666 | ||
748 | void out_le32(volatile unsigned __iomem *addr, int val) | 667 | static void iseries_memset_io(volatile void __iomem *addr, int c, |
668 | unsigned long n) | ||
749 | { | 669 | { |
750 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 670 | volatile char __iomem *d = addr; |
751 | iSeries_Write_Long(val, addr); | ||
752 | else | ||
753 | __out_le32(addr, val); | ||
754 | } | ||
755 | EXPORT_SYMBOL(out_le32); | ||
756 | 671 | ||
757 | void out_be32(volatile unsigned __iomem *addr, int val) | 672 | while (n-- > 0) |
758 | { | 673 | iSeries_Write_Byte(c, d++); |
759 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | ||
760 | |||
761 | __out_be32(addr, val); | ||
762 | } | 674 | } |
763 | EXPORT_SYMBOL(out_be32); | ||
764 | 675 | ||
765 | unsigned long in_le64(const volatile unsigned long __iomem *addr) | 676 | static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src, |
677 | unsigned long n) | ||
766 | { | 678 | { |
767 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 679 | char *d = dest; |
680 | const volatile char __iomem *s = src; | ||
768 | 681 | ||
769 | return __in_le64(addr); | 682 | while (n-- > 0) |
683 | *d++ = iSeries_Read_Byte(s++); | ||
770 | } | 684 | } |
771 | EXPORT_SYMBOL(in_le64); | ||
772 | 685 | ||
773 | unsigned long in_be64(const volatile unsigned long __iomem *addr) | 686 | static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src, |
687 | unsigned long n) | ||
774 | { | 688 | { |
775 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 689 | const char *s = src; |
690 | volatile char __iomem *d = dest; | ||
776 | 691 | ||
777 | return __in_be64(addr); | 692 | while (n-- > 0) |
693 | iSeries_Write_Byte(*s++, d++); | ||
778 | } | 694 | } |
779 | EXPORT_SYMBOL(in_be64); | ||
780 | |||
781 | void out_le64(volatile unsigned long __iomem *addr, unsigned long val) | ||
782 | { | ||
783 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | ||
784 | 695 | ||
785 | __out_le64(addr, val); | 696 | /* We only set MMIO ops. The default PIO ops will be default |
786 | } | 697 | * to the MMIO ops + pci_io_base which is 0 on iSeries as |
787 | EXPORT_SYMBOL(out_le64); | 698 | * expected so both should work. |
699 | * | ||
700 | * Note that we don't implement the readq/writeq versions as | ||
701 | * I don't know of an HV call for doing so. Thus, the default | ||
702 | * operation will be used instead, which will fault a the value | ||
703 | * return by iSeries for MMIO addresses always hits a non mapped | ||
704 | * area. This is as good as the BUG() we used to have there. | ||
705 | */ | ||
706 | static struct ppc_pci_io __initdata iseries_pci_io = { | ||
707 | .readb = iseries_readb, | ||
708 | .readw = iseries_readw, | ||
709 | .readl = iseries_readl, | ||
710 | .readw_be = iseries_readw_be, | ||
711 | .readl_be = iseries_readl_be, | ||
712 | .writeb = iseries_writeb, | ||
713 | .writew = iseries_writew, | ||
714 | .writel = iseries_writel, | ||
715 | .writew_be = iseries_writew_be, | ||
716 | .writel_be = iseries_writel_be, | ||
717 | .readsb = iseries_readsb, | ||
718 | .readsw = iseries_readsw, | ||
719 | .readsl = iseries_readsl, | ||
720 | .writesb = iseries_writesb, | ||
721 | .writesw = iseries_writesw, | ||
722 | .writesl = iseries_writesl, | ||
723 | .memset_io = iseries_memset_io, | ||
724 | .memcpy_fromio = iseries_memcpy_fromio, | ||
725 | .memcpy_toio = iseries_memcpy_toio, | ||
726 | }; | ||
788 | 727 | ||
789 | void out_be64(volatile unsigned long __iomem *addr, unsigned long val) | 728 | /* |
729 | * iSeries_pcibios_init | ||
730 | * | ||
731 | * Description: | ||
732 | * This function checks for all possible system PCI host bridges that connect | ||
733 | * PCI buses. The system hypervisor is queried as to the guest partition | ||
734 | * ownership status. A pci_controller is built for any bus which is partially | ||
735 | * owned or fully owned by this guest partition. | ||
736 | */ | ||
737 | void __init iSeries_pcibios_init(void) | ||
790 | { | 738 | { |
791 | BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); | 739 | struct pci_controller *phb; |
740 | struct device_node *root = of_find_node_by_path("/"); | ||
741 | struct device_node *node = NULL; | ||
792 | 742 | ||
793 | __out_be64(addr, val); | 743 | /* Install IO hooks */ |
794 | } | 744 | ppc_pci_io = iseries_pci_io; |
795 | EXPORT_SYMBOL(out_be64); | ||
796 | 745 | ||
797 | void memset_io(volatile void __iomem *addr, int c, unsigned long n) | 746 | if (root == NULL) { |
798 | { | 747 | printk(KERN_CRIT "iSeries_pcibios_init: can't find root " |
799 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | 748 | "of device tree\n"); |
800 | volatile char __iomem *d = addr; | 749 | return; |
750 | } | ||
751 | while ((node = of_get_next_child(root, node)) != NULL) { | ||
752 | HvBusNumber bus; | ||
753 | const u32 *busp; | ||
801 | 754 | ||
802 | while (n-- > 0) { | 755 | if ((node->type == NULL) || (strcmp(node->type, "pci") != 0)) |
803 | iSeries_Write_Byte(c, d++); | 756 | continue; |
804 | } | ||
805 | } else | ||
806 | eeh_memset_io(addr, c, n); | ||
807 | } | ||
808 | EXPORT_SYMBOL(memset_io); | ||
809 | 757 | ||
810 | void memcpy_fromio(void *dest, const volatile void __iomem *src, | 758 | busp = get_property(node, "bus-range", NULL); |
811 | unsigned long n) | 759 | if (busp == NULL) |
812 | { | 760 | continue; |
813 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | 761 | bus = *busp; |
814 | char *d = dest; | 762 | printk("bus %d appears to exist\n", bus); |
815 | const volatile char __iomem *s = src; | 763 | phb = pcibios_alloc_controller(node); |
764 | if (phb == NULL) | ||
765 | continue; | ||
816 | 766 | ||
817 | while (n-- > 0) { | 767 | phb->pci_mem_offset = phb->local_number = bus; |
818 | *d++ = iSeries_Read_Byte(s++); | 768 | phb->first_busno = bus; |
819 | } | 769 | phb->last_busno = bus; |
820 | } else | 770 | phb->ops = &iSeries_pci_ops; |
821 | eeh_memcpy_fromio(dest, src, n); | 771 | } |
822 | } | ||
823 | EXPORT_SYMBOL(memcpy_fromio); | ||
824 | 772 | ||
825 | void memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) | 773 | of_node_put(root); |
826 | { | ||
827 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | ||
828 | const char *s = src; | ||
829 | volatile char __iomem *d = dest; | ||
830 | 774 | ||
831 | while (n-- > 0) { | 775 | pci_devs_phb_init(); |
832 | iSeries_Write_Byte(*s++, d++); | ||
833 | } | ||
834 | } else | ||
835 | eeh_memcpy_toio(dest, src, n); | ||
836 | } | 776 | } |
837 | EXPORT_SYMBOL(memcpy_toio); | 777 | |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index cd8965ec9ddb..2f16d9330cfe 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -617,6 +617,16 @@ static void iseries_dedicated_idle(void) | |||
617 | void __init iSeries_init_IRQ(void) { } | 617 | void __init iSeries_init_IRQ(void) { } |
618 | #endif | 618 | #endif |
619 | 619 | ||
620 | static void __iomem *iseries_ioremap(unsigned long address, unsigned long size, | ||
621 | unsigned long flags) | ||
622 | { | ||
623 | return (void __iomem *)address; | ||
624 | } | ||
625 | |||
626 | static void iseries_iounmap(void __iomem *token) | ||
627 | { | ||
628 | } | ||
629 | |||
620 | /* | 630 | /* |
621 | * iSeries has no legacy IO, anything calling this function has to | 631 | * iSeries has no legacy IO, anything calling this function has to |
622 | * fail or bad things will happen | 632 | * fail or bad things will happen |
@@ -655,6 +665,8 @@ define_machine(iseries) { | |||
655 | .progress = iSeries_progress, | 665 | .progress = iSeries_progress, |
656 | .probe = iseries_probe, | 666 | .probe = iseries_probe, |
657 | .check_legacy_ioport = iseries_check_legacy_ioport, | 667 | .check_legacy_ioport = iseries_check_legacy_ioport, |
668 | .ioremap = iseries_ioremap, | ||
669 | .iounmap = iseries_iounmap, | ||
658 | /* XXX Implement enable_pmcs for iSeries */ | 670 | /* XXX Implement enable_pmcs for iSeries */ |
659 | }; | 671 | }; |
660 | 672 | ||
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h index 6a784396660b..66481bbf270a 100644 --- a/include/asm-powerpc/eeh.h +++ b/include/asm-powerpc/eeh.h | |||
@@ -120,10 +120,6 @@ static inline u8 eeh_readb(const volatile void __iomem *addr) | |||
120 | return eeh_check_failure(addr, val); | 120 | return eeh_check_failure(addr, val); |
121 | return val; | 121 | return val; |
122 | } | 122 | } |
123 | static inline void eeh_writeb(u8 val, volatile void __iomem *addr) | ||
124 | { | ||
125 | out_8(addr, val); | ||
126 | } | ||
127 | 123 | ||
128 | static inline u16 eeh_readw(const volatile void __iomem *addr) | 124 | static inline u16 eeh_readw(const volatile void __iomem *addr) |
129 | { | 125 | { |
@@ -132,21 +128,6 @@ static inline u16 eeh_readw(const volatile void __iomem *addr) | |||
132 | return eeh_check_failure(addr, val); | 128 | return eeh_check_failure(addr, val); |
133 | return val; | 129 | return val; |
134 | } | 130 | } |
135 | static inline void eeh_writew(u16 val, volatile void __iomem *addr) | ||
136 | { | ||
137 | out_le16(addr, val); | ||
138 | } | ||
139 | static inline u16 eeh_raw_readw(const volatile void __iomem *addr) | ||
140 | { | ||
141 | u16 val = in_be16(addr); | ||
142 | if (EEH_POSSIBLE_ERROR(val, u16)) | ||
143 | return eeh_check_failure(addr, val); | ||
144 | return val; | ||
145 | } | ||
146 | static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) { | ||
147 | volatile u16 __iomem *vaddr = (volatile u16 __iomem *) addr; | ||
148 | out_be16(vaddr, val); | ||
149 | } | ||
150 | 131 | ||
151 | static inline u32 eeh_readl(const volatile void __iomem *addr) | 132 | static inline u32 eeh_readl(const volatile void __iomem *addr) |
152 | { | 133 | { |
@@ -155,44 +136,38 @@ static inline u32 eeh_readl(const volatile void __iomem *addr) | |||
155 | return eeh_check_failure(addr, val); | 136 | return eeh_check_failure(addr, val); |
156 | return val; | 137 | return val; |
157 | } | 138 | } |
158 | static inline void eeh_writel(u32 val, volatile void __iomem *addr) | 139 | |
159 | { | 140 | static inline u64 eeh_readq(const volatile void __iomem *addr) |
160 | out_le32(addr, val); | ||
161 | } | ||
162 | static inline u32 eeh_raw_readl(const volatile void __iomem *addr) | ||
163 | { | 141 | { |
164 | u32 val = in_be32(addr); | 142 | u64 val = in_le64(addr); |
165 | if (EEH_POSSIBLE_ERROR(val, u32)) | 143 | if (EEH_POSSIBLE_ERROR(val, u64)) |
166 | return eeh_check_failure(addr, val); | 144 | return eeh_check_failure(addr, val); |
167 | return val; | 145 | return val; |
168 | } | 146 | } |
169 | static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr) | ||
170 | { | ||
171 | out_be32(addr, val); | ||
172 | } | ||
173 | 147 | ||
174 | static inline u64 eeh_readq(const volatile void __iomem *addr) | 148 | static inline u16 eeh_readw_be(const volatile void __iomem *addr) |
175 | { | 149 | { |
176 | u64 val = in_le64(addr); | 150 | u16 val = in_be16(addr); |
177 | if (EEH_POSSIBLE_ERROR(val, u64)) | 151 | if (EEH_POSSIBLE_ERROR(val, u16)) |
178 | return eeh_check_failure(addr, val); | 152 | return eeh_check_failure(addr, val); |
179 | return val; | 153 | return val; |
180 | } | 154 | } |
181 | static inline void eeh_writeq(u64 val, volatile void __iomem *addr) | 155 | |
156 | static inline u32 eeh_readl_be(const volatile void __iomem *addr) | ||
182 | { | 157 | { |
183 | out_le64(addr, val); | 158 | u32 val = in_be32(addr); |
159 | if (EEH_POSSIBLE_ERROR(val, u32)) | ||
160 | return eeh_check_failure(addr, val); | ||
161 | return val; | ||
184 | } | 162 | } |
185 | static inline u64 eeh_raw_readq(const volatile void __iomem *addr) | 163 | |
164 | static inline u64 eeh_readq_be(const volatile void __iomem *addr) | ||
186 | { | 165 | { |
187 | u64 val = in_be64(addr); | 166 | u64 val = in_be64(addr); |
188 | if (EEH_POSSIBLE_ERROR(val, u64)) | 167 | if (EEH_POSSIBLE_ERROR(val, u64)) |
189 | return eeh_check_failure(addr, val); | 168 | return eeh_check_failure(addr, val); |
190 | return val; | 169 | return val; |
191 | } | 170 | } |
192 | static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr) | ||
193 | { | ||
194 | out_be64(addr, val); | ||
195 | } | ||
196 | 171 | ||
197 | #define EEH_CHECK_ALIGN(v,a) \ | 172 | #define EEH_CHECK_ALIGN(v,a) \ |
198 | ((((unsigned long)(v)) & ((a) - 1)) == 0) | 173 | ((((unsigned long)(v)) & ((a) - 1)) == 0) |
@@ -292,68 +267,29 @@ static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src, | |||
292 | 267 | ||
293 | #undef EEH_CHECK_ALIGN | 268 | #undef EEH_CHECK_ALIGN |
294 | 269 | ||
295 | static inline u8 eeh_inb(unsigned long port) | ||
296 | { | ||
297 | u8 val; | ||
298 | val = in_8((u8 __iomem *)(port+pci_io_base)); | ||
299 | if (EEH_POSSIBLE_ERROR(val, u8)) | ||
300 | return eeh_check_failure((void __iomem *)(port), val); | ||
301 | return val; | ||
302 | } | ||
303 | |||
304 | static inline void eeh_outb(u8 val, unsigned long port) | ||
305 | { | ||
306 | out_8((u8 __iomem *)(port+pci_io_base), val); | ||
307 | } | ||
308 | |||
309 | static inline u16 eeh_inw(unsigned long port) | ||
310 | { | ||
311 | u16 val; | ||
312 | val = in_le16((u16 __iomem *)(port+pci_io_base)); | ||
313 | if (EEH_POSSIBLE_ERROR(val, u16)) | ||
314 | return eeh_check_failure((void __iomem *)(port), val); | ||
315 | return val; | ||
316 | } | ||
317 | |||
318 | static inline void eeh_outw(u16 val, unsigned long port) | ||
319 | { | ||
320 | out_le16((u16 __iomem *)(port+pci_io_base), val); | ||
321 | } | ||
322 | |||
323 | static inline u32 eeh_inl(unsigned long port) | ||
324 | { | ||
325 | u32 val; | ||
326 | val = in_le32((u32 __iomem *)(port+pci_io_base)); | ||
327 | if (EEH_POSSIBLE_ERROR(val, u32)) | ||
328 | return eeh_check_failure((void __iomem *)(port), val); | ||
329 | return val; | ||
330 | } | ||
331 | |||
332 | static inline void eeh_outl(u32 val, unsigned long port) | ||
333 | { | ||
334 | out_le32((u32 __iomem *)(port+pci_io_base), val); | ||
335 | } | ||
336 | |||
337 | /* in-string eeh macros */ | 270 | /* in-string eeh macros */ |
338 | static inline void eeh_insb(unsigned long port, void * buf, int ns) | 271 | static inline void eeh_readsb(const volatile void __iomem *addr, void * buf, |
272 | int ns) | ||
339 | { | 273 | { |
340 | _insb((u8 __iomem *)(port+pci_io_base), buf, ns); | 274 | _insb(addr, buf, ns); |
341 | if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8)) | 275 | if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8)) |
342 | eeh_check_failure((void __iomem *)(port), *(u8*)buf); | 276 | eeh_check_failure(addr, *(u8*)buf); |
343 | } | 277 | } |
344 | 278 | ||
345 | static inline void eeh_insw_ns(unsigned long port, void * buf, int ns) | 279 | static inline void eeh_readsw(const volatile void __iomem *addr, void * buf, |
280 | int ns) | ||
346 | { | 281 | { |
347 | _insw_ns((u16 __iomem *)(port+pci_io_base), buf, ns); | 282 | _insw(addr, buf, ns); |
348 | if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16)) | 283 | if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16)) |
349 | eeh_check_failure((void __iomem *)(port), *(u16*)buf); | 284 | eeh_check_failure(addr, *(u16*)buf); |
350 | } | 285 | } |
351 | 286 | ||
352 | static inline void eeh_insl_ns(unsigned long port, void * buf, int nl) | 287 | static inline void eeh_readsl(const volatile void __iomem *addr, void * buf, |
288 | int nl) | ||
353 | { | 289 | { |
354 | _insl_ns((u32 __iomem *)(port+pci_io_base), buf, nl); | 290 | _insl(addr, buf, nl); |
355 | if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32)) | 291 | if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32)) |
356 | eeh_check_failure((void __iomem *)(port), *(u32*)buf); | 292 | eeh_check_failure(addr, *(u32*)buf); |
357 | } | 293 | } |
358 | 294 | ||
359 | #endif /* __KERNEL__ */ | 295 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-powerpc/ide.h b/include/asm-powerpc/ide.h index c8390f9485de..60a8fc429970 100644 --- a/include/asm-powerpc/ide.h +++ b/include/asm-powerpc/ide.h | |||
@@ -22,10 +22,17 @@ | |||
22 | #endif | 22 | #endif |
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #ifdef __powerpc64__ | ||
26 | #define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c)) | ||
27 | #define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c)) | ||
28 | #define __ide_mm_outsw(p, a, c) writesw((void __iomem *)(p), (a), (c)) | ||
29 | #define __ide_mm_outsl(p, a, c) writesl((void __iomem *)(p), (a), (c)) | ||
30 | #else | ||
25 | #define __ide_mm_insw(p, a, c) _insw_ns((volatile u16 __iomem *)(p), (a), (c)) | 31 | #define __ide_mm_insw(p, a, c) _insw_ns((volatile u16 __iomem *)(p), (a), (c)) |
26 | #define __ide_mm_insl(p, a, c) _insl_ns((volatile u32 __iomem *)(p), (a), (c)) | 32 | #define __ide_mm_insl(p, a, c) _insl_ns((volatile u32 __iomem *)(p), (a), (c)) |
27 | #define __ide_mm_outsw(p, a, c) _outsw_ns((volatile u16 __iomem *)(p), (a), (c)) | 33 | #define __ide_mm_outsw(p, a, c) _outsw_ns((volatile u16 __iomem *)(p), (a), (c)) |
28 | #define __ide_mm_outsl(p, a, c) _outsl_ns((volatile u32 __iomem *)(p), (a), (c)) | 34 | #define __ide_mm_outsl(p, a, c) _outsl_ns((volatile u32 __iomem *)(p), (a), (c)) |
35 | #endif | ||
29 | 36 | ||
30 | #ifndef __powerpc64__ | 37 | #ifndef __powerpc64__ |
31 | #include <linux/hdreg.h> | 38 | #include <linux/hdreg.h> |
diff --git a/include/asm-powerpc/io-defs.h b/include/asm-powerpc/io-defs.h new file mode 100644 index 000000000000..5a660f1130db --- /dev/null +++ b/include/asm-powerpc/io-defs.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* This file is meant to be include multiple times by other headers */ | ||
2 | |||
3 | DEF_PCI_AC_RET(readb, u8, (const PCI_IO_ADDR addr), (addr)) | ||
4 | DEF_PCI_AC_RET(readw, u16, (const PCI_IO_ADDR addr), (addr)) | ||
5 | DEF_PCI_AC_RET(readl, u32, (const PCI_IO_ADDR addr), (addr)) | ||
6 | DEF_PCI_AC_RET(readq, u64, (const PCI_IO_ADDR addr), (addr)) | ||
7 | DEF_PCI_AC_RET(readw_be, u16, (const PCI_IO_ADDR addr), (addr)) | ||
8 | DEF_PCI_AC_RET(readl_be, u32, (const PCI_IO_ADDR addr), (addr)) | ||
9 | DEF_PCI_AC_RET(readq_be, u64, (const PCI_IO_ADDR addr), (addr)) | ||
10 | DEF_PCI_AC_NORET(writeb, (u8 val, PCI_IO_ADDR addr), (val, addr)) | ||
11 | DEF_PCI_AC_NORET(writew, (u16 val, PCI_IO_ADDR addr), (val, addr)) | ||
12 | DEF_PCI_AC_NORET(writel, (u32 val, PCI_IO_ADDR addr), (val, addr)) | ||
13 | DEF_PCI_AC_NORET(writeq, (u64 val, PCI_IO_ADDR addr), (val, addr)) | ||
14 | DEF_PCI_AC_NORET(writew_be, (u16 val, PCI_IO_ADDR addr), (val, addr)) | ||
15 | DEF_PCI_AC_NORET(writel_be, (u32 val, PCI_IO_ADDR addr), (val, addr)) | ||
16 | DEF_PCI_AC_NORET(writeq_be, (u64 val, PCI_IO_ADDR addr), (val, addr)) | ||
17 | |||
18 | DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port)) | ||
19 | DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port)) | ||
20 | DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port)) | ||
21 | DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port)) | ||
22 | DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port)) | ||
23 | DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port)) | ||
24 | |||
25 | DEF_PCI_AC_NORET(readsb, (const PCI_IO_ADDR a, void *b, unsigned long c), \ | ||
26 | (a, b, c)) | ||
27 | DEF_PCI_AC_NORET(readsw, (const PCI_IO_ADDR a, void *b, unsigned long c), \ | ||
28 | (a, b, c)) | ||
29 | DEF_PCI_AC_NORET(readsl, (const PCI_IO_ADDR a, void *b, unsigned long c), \ | ||
30 | (a, b, c)) | ||
31 | DEF_PCI_AC_NORET(writesb, (PCI_IO_ADDR a, const void *b, unsigned long c), \ | ||
32 | (a, b, c)) | ||
33 | DEF_PCI_AC_NORET(writesw, (PCI_IO_ADDR a, const void *b, unsigned long c), \ | ||
34 | (a, b, c)) | ||
35 | DEF_PCI_AC_NORET(writesl, (PCI_IO_ADDR a, const void *b, unsigned long c), \ | ||
36 | (a, b, c)) | ||
37 | |||
38 | DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c), \ | ||
39 | (p, b, c)) | ||
40 | DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c), \ | ||
41 | (p, b, c)) | ||
42 | DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c), \ | ||
43 | (p, b, c)) | ||
44 | DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c), \ | ||
45 | (p, b, c)) | ||
46 | DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c), \ | ||
47 | (p, b, c)) | ||
48 | DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c), \ | ||
49 | (p, b, c)) | ||
50 | |||
51 | DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n), \ | ||
52 | (a, c, n)) | ||
53 | DEF_PCI_AC_NORET(memcpy_fromio,(void *d,const PCI_IO_ADDR s,unsigned long n), \ | ||
54 | (d, s, n)) | ||
55 | DEF_PCI_AC_NORET(memcpy_toio,(PCI_IO_ADDR d,const void *s,unsigned long n), \ | ||
56 | (d, s, n)) | ||
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index c2c5f14b5f5f..03e843fc1437 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h | |||
@@ -31,57 +31,122 @@ extern int check_legacy_ioport(unsigned long base_port); | |||
31 | 31 | ||
32 | #define SLOW_DOWN_IO | 32 | #define SLOW_DOWN_IO |
33 | 33 | ||
34 | /* | ||
35 | * | ||
36 | * Low level MMIO accessors | ||
37 | * | ||
38 | * This provides the non-bus specific accessors to MMIO. Those are PowerPC | ||
39 | * specific and thus shouldn't be used in generic code. The accessors | ||
40 | * provided here are: | ||
41 | * | ||
42 | * in_8, in_le16, in_be16, in_le32, in_be32, in_le64, in_be64 | ||
43 | * out_8, out_le16, out_be16, out_le32, out_be32, out_le64, out_be64 | ||
44 | * _insb, _insw_ns, _insl_ns, _outsb, _outsw_ns, _outsl_ns | ||
45 | * | ||
46 | * Those operate directly on a kernel virtual address. Note that the prototype | ||
47 | * for the out_* accessors has the arguments in opposite order from the usual | ||
48 | * linux PCI accessors. Unlike those, they take the address first and the value | ||
49 | * next. | ||
50 | * | ||
51 | * Note: I might drop the _ns suffix on the stream operations soon as it is | ||
52 | * simply normal for stream operations to not swap in the first place. | ||
53 | * | ||
54 | */ | ||
55 | |||
56 | #define IO_SET_SYNC_FLAG() do { get_paca()->io_sync = 1; } while(0) | ||
57 | |||
58 | #define DEF_MMIO_IN(name, type, insn) \ | ||
59 | static inline type name(const volatile type __iomem *addr) \ | ||
60 | { \ | ||
61 | type ret; \ | ||
62 | __asm__ __volatile__("sync;" insn ";twi 0,%0,0;isync" \ | ||
63 | : "=r" (ret) : "r" (addr), "m" (*addr)); \ | ||
64 | return ret; \ | ||
65 | } | ||
66 | |||
67 | #define DEF_MMIO_OUT(name, type, insn) \ | ||
68 | static inline void name(volatile type __iomem *addr, type val) \ | ||
69 | { \ | ||
70 | __asm__ __volatile__("sync;" insn \ | ||
71 | : "=m" (*addr) : "r" (val), "r" (addr)); \ | ||
72 | IO_SET_SYNC_FLAG(); \ | ||
73 | } | ||
74 | |||
75 | |||
76 | #define DEF_MMIO_IN_BE(name, size, insn) \ | ||
77 | DEF_MMIO_IN(name, u##size, __stringify(insn)"%U2%X2 %0,%2") | ||
78 | #define DEF_MMIO_IN_LE(name, size, insn) \ | ||
79 | DEF_MMIO_IN(name, u##size, __stringify(insn)" %0,0,%1") | ||
80 | |||
81 | #define DEF_MMIO_OUT_BE(name, size, insn) \ | ||
82 | DEF_MMIO_OUT(name, u##size, __stringify(insn)"%U0%X0 %1,%0") | ||
83 | #define DEF_MMIO_OUT_LE(name, size, insn) \ | ||
84 | DEF_MMIO_OUT(name, u##size, __stringify(insn)" %1,0,%2") | ||
85 | |||
86 | DEF_MMIO_IN_BE(in_8, 8, lbz); | ||
87 | DEF_MMIO_IN_BE(in_be16, 16, lhz); | ||
88 | DEF_MMIO_IN_BE(in_be32, 32, lwz); | ||
89 | DEF_MMIO_IN_BE(in_be64, 64, ld); | ||
90 | DEF_MMIO_IN_LE(in_le16, 16, lhbrx); | ||
91 | DEF_MMIO_IN_LE(in_le32, 32, lwbrx); | ||
92 | |||
93 | DEF_MMIO_OUT_BE(out_8, 8, stb); | ||
94 | DEF_MMIO_OUT_BE(out_be16, 16, sth); | ||
95 | DEF_MMIO_OUT_BE(out_be32, 32, stw); | ||
96 | DEF_MMIO_OUT_BE(out_be64, 64, std); | ||
97 | DEF_MMIO_OUT_LE(out_le16, 16, sthbrx); | ||
98 | DEF_MMIO_OUT_LE(out_le32, 32, stwbrx); | ||
99 | |||
100 | /* There is no asm instructions for 64 bits reverse loads and stores */ | ||
101 | static inline u64 in_le64(const volatile u64 __iomem *addr) | ||
102 | { | ||
103 | return le64_to_cpu(in_be64(addr)); | ||
104 | } | ||
105 | |||
106 | static inline void out_le64(volatile u64 __iomem *addr, u64 val) | ||
107 | { | ||
108 | out_be64(addr, cpu_to_le64(val)); | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Low level IO stream instructions are defined out of line for now | ||
113 | */ | ||
114 | extern void _insb(const volatile u8 __iomem *addr, void *buf, long count); | ||
115 | extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count); | ||
116 | extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count); | ||
117 | extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count); | ||
118 | extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count); | ||
119 | extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count); | ||
120 | |||
121 | /* The _ns naming is historical and will be removed. For now, just #define | ||
122 | * the non _ns equivalent names | ||
123 | */ | ||
124 | #define _insw _insw_ns | ||
125 | #define _insl _insl_ns | ||
126 | #define _outsw _outsw_ns | ||
127 | #define _outsl _outsl_ns | ||
128 | |||
129 | /* | ||
130 | * | ||
131 | * PCI and standard ISA accessors | ||
132 | * | ||
133 | * Those are globally defined linux accessors for devices on PCI or ISA | ||
134 | * busses. They follow the Linux defined semantics. The current implementation | ||
135 | * for PowerPC is as close as possible to the x86 version of these, and thus | ||
136 | * provides fairly heavy weight barriers for the non-raw versions | ||
137 | * | ||
138 | * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_IO | ||
139 | * allowing the platform to provide its own implementation of some or all | ||
140 | * of the accessors. | ||
141 | */ | ||
142 | |||
34 | extern unsigned long isa_io_base; | 143 | extern unsigned long isa_io_base; |
35 | extern unsigned long pci_io_base; | 144 | extern unsigned long pci_io_base; |
36 | 145 | ||
37 | #ifdef CONFIG_PPC_ISERIES | 146 | |
38 | 147 | /* | |
39 | extern int in_8(const volatile unsigned char __iomem *addr); | 148 | * Non ordered and non-swapping "raw" accessors |
40 | extern void out_8(volatile unsigned char __iomem *addr, int val); | 149 | */ |
41 | extern int in_le16(const volatile unsigned short __iomem *addr); | ||
42 | extern int in_be16(const volatile unsigned short __iomem *addr); | ||
43 | extern void out_le16(volatile unsigned short __iomem *addr, int val); | ||
44 | extern void out_be16(volatile unsigned short __iomem *addr, int val); | ||
45 | extern unsigned in_le32(const volatile unsigned __iomem *addr); | ||
46 | extern unsigned in_be32(const volatile unsigned __iomem *addr); | ||
47 | extern void out_le32(volatile unsigned __iomem *addr, int val); | ||
48 | extern void out_be32(volatile unsigned __iomem *addr, int val); | ||
49 | extern unsigned long in_le64(const volatile unsigned long __iomem *addr); | ||
50 | extern unsigned long in_be64(const volatile unsigned long __iomem *addr); | ||
51 | extern void out_le64(volatile unsigned long __iomem *addr, unsigned long val); | ||
52 | extern void out_be64(volatile unsigned long __iomem *addr, unsigned long val); | ||
53 | |||
54 | extern unsigned char __raw_readb(const volatile void __iomem *addr); | ||
55 | extern unsigned short __raw_readw(const volatile void __iomem *addr); | ||
56 | extern unsigned int __raw_readl(const volatile void __iomem *addr); | ||
57 | extern unsigned long __raw_readq(const volatile void __iomem *addr); | ||
58 | extern void __raw_writeb(unsigned char v, volatile void __iomem *addr); | ||
59 | extern void __raw_writew(unsigned short v, volatile void __iomem *addr); | ||
60 | extern void __raw_writel(unsigned int v, volatile void __iomem *addr); | ||
61 | extern void __raw_writeq(unsigned long v, volatile void __iomem *addr); | ||
62 | |||
63 | extern void memset_io(volatile void __iomem *addr, int c, unsigned long n); | ||
64 | extern void memcpy_fromio(void *dest, const volatile void __iomem *src, | ||
65 | unsigned long n); | ||
66 | extern void memcpy_toio(volatile void __iomem *dest, const void *src, | ||
67 | unsigned long n); | ||
68 | |||
69 | #else /* CONFIG_PPC_ISERIES */ | ||
70 | |||
71 | #define in_8(addr) __in_8((addr)) | ||
72 | #define out_8(addr, val) __out_8((addr), (val)) | ||
73 | #define in_le16(addr) __in_le16((addr)) | ||
74 | #define in_be16(addr) __in_be16((addr)) | ||
75 | #define out_le16(addr, val) __out_le16((addr), (val)) | ||
76 | #define out_be16(addr, val) __out_be16((addr), (val)) | ||
77 | #define in_le32(addr) __in_le32((addr)) | ||
78 | #define in_be32(addr) __in_be32((addr)) | ||
79 | #define out_le32(addr, val) __out_le32((addr), (val)) | ||
80 | #define out_be32(addr, val) __out_be32((addr), (val)) | ||
81 | #define in_le64(addr) __in_le64((addr)) | ||
82 | #define in_be64(addr) __in_be64((addr)) | ||
83 | #define out_le64(addr, val) __out_le64((addr), (val)) | ||
84 | #define out_be64(addr, val) __out_be64((addr), (val)) | ||
85 | 150 | ||
86 | static inline unsigned char __raw_readb(const volatile void __iomem *addr) | 151 | static inline unsigned char __raw_readb(const volatile void __iomem *addr) |
87 | { | 152 | { |
@@ -115,52 +180,203 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) | |||
115 | { | 180 | { |
116 | *(volatile unsigned long __force *)addr = v; | 181 | *(volatile unsigned long __force *)addr = v; |
117 | } | 182 | } |
118 | #define memset_io(a,b,c) eeh_memset_io((a),(b),(c)) | ||
119 | #define memcpy_fromio(a,b,c) eeh_memcpy_fromio((a),(b),(c)) | ||
120 | #define memcpy_toio(a,b,c) eeh_memcpy_toio((a),(b),(c)) | ||
121 | 183 | ||
122 | #endif /* CONFIG_PPC_ISERIES */ | ||
123 | 184 | ||
124 | /* | 185 | /* |
125 | * The insw/outsw/insl/outsl macros don't do byte-swapping. | 186 | * |
126 | * They are only used in practice for transferring buffers which | 187 | * PCI PIO and MMIO accessors. |
127 | * are arrays of bytes, and byte-swapping is not appropriate in | 188 | * |
128 | * that case. - paulus */ | 189 | */ |
129 | #define insb(port, buf, ns) eeh_insb((port), (buf), (ns)) | 190 | |
130 | #define insw(port, buf, ns) eeh_insw_ns((port), (buf), (ns)) | 191 | #include <asm/eeh.h> |
131 | #define insl(port, buf, nl) eeh_insl_ns((port), (buf), (nl)) | 192 | |
132 | 193 | /* Shortcut to the MMIO argument pointer */ | |
133 | #define outsb(port, buf, ns) _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns)) | 194 | #define PCI_IO_ADDR volatile void __iomem * |
134 | #define outsw(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) | 195 | |
135 | #define outsl(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) | 196 | /* Indirect IO address tokens: |
136 | 197 | * | |
137 | #define readb(addr) eeh_readb(addr) | 198 | * When CONFIG_PPC_INDIRECT_IO is set, the platform can provide hooks |
138 | #define readw(addr) eeh_readw(addr) | 199 | * on all IOs. |
139 | #define readl(addr) eeh_readl(addr) | 200 | * |
140 | #define readq(addr) eeh_readq(addr) | 201 | * To help platforms who may need to differenciate MMIO addresses in |
141 | #define writeb(data, addr) eeh_writeb((data), (addr)) | 202 | * their hooks, a bitfield is reserved for use by the platform near the |
142 | #define writew(data, addr) eeh_writew((data), (addr)) | 203 | * top of MMIO addresses (not PIO, those have to cope the hard way). |
143 | #define writel(data, addr) eeh_writel((data), (addr)) | 204 | * |
144 | #define writeq(data, addr) eeh_writeq((data), (addr)) | 205 | * This bit field is 12 bits and is at the top of the IO virtual |
145 | #define inb(port) eeh_inb((unsigned long)port) | 206 | * addresses PCI_IO_INDIRECT_TOKEN_MASK. |
146 | #define outb(val, port) eeh_outb(val, (unsigned long)port) | 207 | * |
147 | #define inw(port) eeh_inw((unsigned long)port) | 208 | * The kernel virtual space is thus: |
148 | #define outw(val, port) eeh_outw(val, (unsigned long)port) | 209 | * |
149 | #define inl(port) eeh_inl((unsigned long)port) | 210 | * 0xD000000000000000 : vmalloc |
150 | #define outl(val, port) eeh_outl(val, (unsigned long)port) | 211 | * 0xD000080000000000 : PCI PHB IO space |
212 | * 0xD000080080000000 : ioremap | ||
213 | * 0xD0000fffffffffff : end of ioremap region | ||
214 | * | ||
215 | * Since the top 4 bits are reserved as the region ID, we use thus | ||
216 | * the next 12 bits and keep 4 bits available for the future if the | ||
217 | * virtual address space is ever to be extended. | ||
218 | * | ||
219 | * The direct IO mapping operations will then mask off those bits | ||
220 | * before doing the actual access, though that only happen when | ||
221 | * CONFIG_PPC_INDIRECT_IO is set, thus be careful when you use that | ||
222 | * mechanism | ||
223 | */ | ||
224 | |||
225 | #ifdef CONFIG_PPC_INDIRECT_IO | ||
226 | #define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul | ||
227 | #define PCI_IO_IND_TOKEN_SHIFT 48 | ||
228 | #define PCI_FIX_ADDR(addr) \ | ||
229 | ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) | ||
230 | #define PCI_GET_ADDR_TOKEN(addr) \ | ||
231 | (((unsigned long)(addr) & PCI_IO_IND_TOKEN_MASK) >> \ | ||
232 | PCI_IO_IND_TOKEN_SHIFT) | ||
233 | #define PCI_SET_ADDR_TOKEN(addr, token) \ | ||
234 | do { \ | ||
235 | unsigned long __a = (unsigned long)(addr); \ | ||
236 | __a &= ~PCI_IO_IND_TOKEN_MASK; \ | ||
237 | __a |= ((unsigned long)(token)) << PCI_IO_IND_TOKEN_SHIFT; \ | ||
238 | (addr) = (void __iomem *)__a; \ | ||
239 | } while(0) | ||
240 | #else | ||
241 | #define PCI_FIX_ADDR(addr) (addr) | ||
242 | #endif | ||
243 | |||
244 | /* The "__do_*" operations below provide the actual "base" implementation | ||
245 | * for each of the defined acccessor. Some of them use the out_* functions | ||
246 | * directly, some of them still use EEH, though we might change that in the | ||
247 | * future. Those macros below provide the necessary argument swapping and | ||
248 | * handling of the IO base for PIO. | ||
249 | * | ||
250 | * They are themselves used by the macros that define the actual accessors | ||
251 | * and can be used by the hooks if any. | ||
252 | * | ||
253 | * Note that PIO operations are always defined in terms of their corresonding | ||
254 | * MMIO operations. That allows platforms like iSeries who want to modify the | ||
255 | * behaviour of both to only hook on the MMIO version and get both. It's also | ||
256 | * possible to hook directly at the toplevel PIO operation if they have to | ||
257 | * be handled differently | ||
258 | */ | ||
259 | #define __do_writeb(val, addr) out_8(PCI_FIX_ADDR(addr), val) | ||
260 | #define __do_writew(val, addr) out_le16(PCI_FIX_ADDR(addr), val) | ||
261 | #define __do_writel(val, addr) out_le32(PCI_FIX_ADDR(addr), val) | ||
262 | #define __do_writeq(val, addr) out_le64(PCI_FIX_ADDR(addr), val) | ||
263 | #define __do_writew_be(val, addr) out_be16(PCI_FIX_ADDR(addr), val) | ||
264 | #define __do_writel_be(val, addr) out_be32(PCI_FIX_ADDR(addr), val) | ||
265 | #define __do_writeq_be(val, addr) out_be64(PCI_FIX_ADDR(addr), val) | ||
266 | #define __do_readb(addr) eeh_readb(PCI_FIX_ADDR(addr)) | ||
267 | #define __do_readw(addr) eeh_readw(PCI_FIX_ADDR(addr)) | ||
268 | #define __do_readl(addr) eeh_readl(PCI_FIX_ADDR(addr)) | ||
269 | #define __do_readq(addr) eeh_readq(PCI_FIX_ADDR(addr)) | ||
270 | #define __do_readw_be(addr) eeh_readw_be(PCI_FIX_ADDR(addr)) | ||
271 | #define __do_readl_be(addr) eeh_readl_be(PCI_FIX_ADDR(addr)) | ||
272 | #define __do_readq_be(addr) eeh_readq_be(PCI_FIX_ADDR(addr)) | ||
273 | |||
274 | #define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)pci_io_base+port); | ||
275 | #define __do_outw(val, port) writew(val,(PCI_IO_ADDR)pci_io_base+port); | ||
276 | #define __do_outl(val, port) writel(val,(PCI_IO_ADDR)pci_io_base+port); | ||
277 | #define __do_inb(port) readb((PCI_IO_ADDR)pci_io_base + port); | ||
278 | #define __do_inw(port) readw((PCI_IO_ADDR)pci_io_base + port); | ||
279 | #define __do_inl(port) readl((PCI_IO_ADDR)pci_io_base + port); | ||
280 | |||
281 | #define __do_readsb(a, b, n) eeh_readsb(PCI_FIX_ADDR(a), (b), (n)) | ||
282 | #define __do_readsw(a, b, n) eeh_readsw(PCI_FIX_ADDR(a), (b), (n)) | ||
283 | #define __do_readsl(a, b, n) eeh_readsl(PCI_FIX_ADDR(a), (b), (n)) | ||
284 | #define __do_writesb(a, b, n) _outsb(PCI_FIX_ADDR(a),(b),(n)) | ||
285 | #define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n)) | ||
286 | #define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n)) | ||
287 | |||
288 | #define __do_insb(p, b, n) readsb((PCI_IO_ADDR)pci_io_base+(p), (b), (n)) | ||
289 | #define __do_insw(p, b, n) readsw((PCI_IO_ADDR)pci_io_base+(p), (b), (n)) | ||
290 | #define __do_insl(p, b, n) readsl((PCI_IO_ADDR)pci_io_base+(p), (b), (n)) | ||
291 | #define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)pci_io_base+(p),(b),(n)) | ||
292 | #define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)pci_io_base+(p),(b),(n)) | ||
293 | #define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)pci_io_base+(p),(b),(n)) | ||
294 | |||
295 | #define __do_memset_io(addr, c, n) eeh_memset_io(PCI_FIX_ADDR(addr), c, n) | ||
296 | #define __do_memcpy_fromio(dst, src, n) eeh_memcpy_fromio(dst, \ | ||
297 | PCI_FIX_ADDR(src), n) | ||
298 | #define __do_memcpy_toio(dst, src, n) eeh_memcpy_toio(PCI_FIX_ADDR(dst), \ | ||
299 | src, n) | ||
300 | |||
301 | #ifdef CONFIG_PPC_INDIRECT_IO | ||
302 | #define DEF_PCI_HOOK(x) x | ||
303 | #else | ||
304 | #define DEF_PCI_HOOK(x) NULL | ||
305 | #endif | ||
306 | |||
307 | /* Structure containing all the hooks */ | ||
308 | extern struct ppc_pci_io { | ||
309 | |||
310 | #define DEF_PCI_AC_RET(name, ret, at, al) ret (*name) at; | ||
311 | #define DEF_PCI_AC_NORET(name, at, al) void (*name) at; | ||
312 | |||
313 | #include <asm/io-defs.h> | ||
314 | |||
315 | #undef DEF_PCI_AC_RET | ||
316 | #undef DEF_PCI_AC_NORET | ||
317 | |||
318 | } ppc_pci_io; | ||
319 | |||
320 | /* The inline wrappers */ | ||
321 | #define DEF_PCI_AC_RET(name, ret, at, al) \ | ||
322 | static inline ret name at \ | ||
323 | { \ | ||
324 | if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \ | ||
325 | return ppc_pci_io.name al; \ | ||
326 | return __do_##name al; \ | ||
327 | } | ||
328 | |||
329 | #define DEF_PCI_AC_NORET(name, at, al) \ | ||
330 | static inline void name at \ | ||
331 | { \ | ||
332 | if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \ | ||
333 | ppc_pci_io.name al; \ | ||
334 | else \ | ||
335 | __do_##name al; \ | ||
336 | } | ||
337 | |||
338 | #include <asm/io-defs.h> | ||
339 | |||
340 | #undef DEF_PCI_AC_RET | ||
341 | #undef DEF_PCI_AC_NORET | ||
342 | |||
343 | /* Some drivers check for the presence of readq & writeq with | ||
344 | * a #ifdef, so we make them happy here. | ||
345 | */ | ||
346 | #define readq readq | ||
347 | #define writeq writeq | ||
348 | |||
349 | /* Nothing to do for cache stuff x*/ | ||
350 | |||
351 | #define dma_cache_inv(_start,_size) do { } while (0) | ||
352 | #define dma_cache_wback(_start,_size) do { } while (0) | ||
353 | #define dma_cache_wback_inv(_start,_size) do { } while (0) | ||
354 | |||
355 | |||
356 | /* | ||
357 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
358 | * access | ||
359 | */ | ||
360 | #define xlate_dev_mem_ptr(p) __va(p) | ||
361 | |||
362 | /* | ||
363 | * Convert a virtual cached pointer to an uncached pointer | ||
364 | */ | ||
365 | #define xlate_dev_kmem_ptr(p) p | ||
151 | 366 | ||
367 | /* | ||
368 | * We don't do relaxed operations yet, at least not with this semantic | ||
369 | */ | ||
152 | #define readb_relaxed(addr) readb(addr) | 370 | #define readb_relaxed(addr) readb(addr) |
153 | #define readw_relaxed(addr) readw(addr) | 371 | #define readw_relaxed(addr) readw(addr) |
154 | #define readl_relaxed(addr) readl(addr) | 372 | #define readl_relaxed(addr) readl(addr) |
155 | #define readq_relaxed(addr) readq(addr) | 373 | #define readq_relaxed(addr) readq(addr) |
156 | 374 | ||
157 | extern void _insb(volatile u8 __iomem *port, void *buf, long count); | 375 | /* |
158 | extern void _outsb(volatile u8 __iomem *port, const void *buf, long count); | 376 | * Enforce synchronisation of stores vs. spin_unlock |
159 | extern void _insw_ns(volatile u16 __iomem *port, void *buf, long count); | 377 | * (this does it explicitely, though our implementation of spin_unlock |
160 | extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count); | 378 | * does it implicitely too) |
161 | extern void _insl_ns(volatile u32 __iomem *port, void *buf, long count); | 379 | */ |
162 | extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count); | ||
163 | |||
164 | static inline void mmiowb(void) | 380 | static inline void mmiowb(void) |
165 | { | 381 | { |
166 | unsigned long tmp; | 382 | unsigned long tmp; |
@@ -170,6 +386,23 @@ static inline void mmiowb(void) | |||
170 | : "memory"); | 386 | : "memory"); |
171 | } | 387 | } |
172 | 388 | ||
389 | static inline void iosync(void) | ||
390 | { | ||
391 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
392 | } | ||
393 | |||
394 | /* Enforce in-order execution of data I/O. | ||
395 | * No distinction between read/write on PPC; use eieio for all three. | ||
396 | * Those are fairly week though. They don't provide a barrier between | ||
397 | * MMIO and cacheable storage nor do they provide a barrier vs. locks, | ||
398 | * they only provide barriers between 2 __raw MMIO operations and | ||
399 | * possibly break write combining. | ||
400 | */ | ||
401 | #define iobarrier_rw() eieio() | ||
402 | #define iobarrier_r() eieio() | ||
403 | #define iobarrier_w() eieio() | ||
404 | |||
405 | |||
173 | /* | 406 | /* |
174 | * output pause versions need a delay at least for the | 407 | * output pause versions need a delay at least for the |
175 | * w83c105 ide controller in a p610. | 408 | * w83c105 ide controller in a p610. |
@@ -185,11 +418,6 @@ static inline void mmiowb(void) | |||
185 | #define IO_SPACE_LIMIT ~(0UL) | 418 | #define IO_SPACE_LIMIT ~(0UL) |
186 | 419 | ||
187 | 420 | ||
188 | extern int __ioremap_explicit(unsigned long p_addr, unsigned long v_addr, | ||
189 | unsigned long size, unsigned long flags); | ||
190 | extern void __iomem *__ioremap(unsigned long address, unsigned long size, | ||
191 | unsigned long flags); | ||
192 | |||
193 | /** | 421 | /** |
194 | * ioremap - map bus memory into CPU space | 422 | * ioremap - map bus memory into CPU space |
195 | * @address: bus address of the memory | 423 | * @address: bus address of the memory |
@@ -200,14 +428,70 @@ extern void __iomem *__ioremap(unsigned long address, unsigned long size, | |||
200 | * writew/writel functions and the other mmio helpers. The returned | 428 | * writew/writel functions and the other mmio helpers. The returned |
201 | * address is not guaranteed to be usable directly as a virtual | 429 | * address is not guaranteed to be usable directly as a virtual |
202 | * address. | 430 | * address. |
431 | * | ||
432 | * We provide a few variations of it: | ||
433 | * | ||
434 | * * ioremap is the standard one and provides non-cacheable guarded mappings | ||
435 | * and can be hooked by the platform via ppc_md | ||
436 | * | ||
437 | * * ioremap_flags allows to specify the page flags as an argument and can | ||
438 | * also be hooked by the platform via ppc_md | ||
439 | * | ||
440 | * * ioremap_nocache is identical to ioremap | ||
441 | * | ||
442 | * * iounmap undoes such a mapping and can be hooked | ||
443 | * | ||
444 | * * __ioremap_explicit (and the pending __iounmap_explicit) are low level | ||
445 | * functions to create hand-made mappings for use only by the PCI code | ||
446 | * and cannot currently be hooked. | ||
447 | * | ||
448 | * * __ioremap is the low level implementation used by ioremap and | ||
449 | * ioremap_flags and cannot be hooked (but can be used by a hook on one | ||
450 | * of the previous ones) | ||
451 | * | ||
452 | * * __iounmap, is the low level implementation used by iounmap and cannot | ||
453 | * be hooked (but can be used by a hook on iounmap) | ||
454 | * | ||
203 | */ | 455 | */ |
204 | extern void __iomem *ioremap(unsigned long address, unsigned long size); | 456 | extern void __iomem *ioremap(unsigned long address, unsigned long size); |
205 | 457 | extern void __iomem *ioremap_flags(unsigned long address, unsigned long size, | |
458 | unsigned long flags); | ||
206 | #define ioremap_nocache(addr, size) ioremap((addr), (size)) | 459 | #define ioremap_nocache(addr, size) ioremap((addr), (size)) |
207 | extern int iounmap_explicit(volatile void __iomem *addr, unsigned long size); | 460 | extern void iounmap(void __iomem *addr); |
208 | extern void iounmap(volatile void __iomem *addr); | 461 | |
462 | extern void __iomem *__ioremap(unsigned long address, unsigned long size, | ||
463 | unsigned long flags); | ||
464 | extern void __iounmap(void __iomem *addr); | ||
465 | |||
466 | extern int __ioremap_explicit(unsigned long p_addr, unsigned long v_addr, | ||
467 | unsigned long size, unsigned long flags); | ||
468 | extern int __iounmap_explicit(void __iomem *start, unsigned long size); | ||
469 | |||
209 | extern void __iomem * reserve_phb_iospace(unsigned long size); | 470 | extern void __iomem * reserve_phb_iospace(unsigned long size); |
210 | 471 | ||
472 | |||
473 | /* | ||
474 | * When CONFIG_PPC_INDIRECT_IO is set, we use the generic iomap implementation | ||
475 | * which needs some additional definitions here. They basically allow PIO | ||
476 | * space overall to be 1GB. This will work as long as we never try to use | ||
477 | * iomap to map MMIO below 1GB which should be fine on ppc64 | ||
478 | */ | ||
479 | #define HAVE_ARCH_PIO_SIZE 1 | ||
480 | #define PIO_OFFSET 0x00000000UL | ||
481 | #define PIO_MASK 0x3fffffffUL | ||
482 | #define PIO_RESERVED 0x40000000UL | ||
483 | |||
484 | #define mmio_read16be(addr) readw_be(addr) | ||
485 | #define mmio_read32be(addr) readl_be(addr) | ||
486 | #define mmio_write16be(val, addr) writew_be(val, addr) | ||
487 | #define mmio_write32be(val, addr) writel_be(val, addr) | ||
488 | #define mmio_insb(addr, dst, count) readsb(addr, dst, count) | ||
489 | #define mmio_insw(addr, dst, count) readsw(addr, dst, count) | ||
490 | #define mmio_insl(addr, dst, count) readsl(addr, dst, count) | ||
491 | #define mmio_outsb(addr, src, count) writesb(addr, src, count) | ||
492 | #define mmio_outsw(addr, src, count) writesw(addr, src, count) | ||
493 | #define mmio_outsl(addr, src, count) writesl(addr, src, count) | ||
494 | |||
211 | /** | 495 | /** |
212 | * virt_to_phys - map virtual addresses to physical | 496 | * virt_to_phys - map virtual addresses to physical |
213 | * @address: address to remap | 497 | * @address: address to remap |
@@ -254,177 +538,6 @@ static inline void * phys_to_virt(unsigned long address) | |||
254 | */ | 538 | */ |
255 | #define BIO_VMERGE_BOUNDARY 0 | 539 | #define BIO_VMERGE_BOUNDARY 0 |
256 | 540 | ||
257 | static inline void iosync(void) | ||
258 | { | ||
259 | __asm__ __volatile__ ("sync" : : : "memory"); | ||
260 | } | ||
261 | |||
262 | /* Enforce in-order execution of data I/O. | ||
263 | * No distinction between read/write on PPC; use eieio for all three. | ||
264 | */ | ||
265 | #define iobarrier_rw() eieio() | ||
266 | #define iobarrier_r() eieio() | ||
267 | #define iobarrier_w() eieio() | ||
268 | |||
269 | /* | ||
270 | * 8, 16 and 32 bit, big and little endian I/O operations, with barrier. | ||
271 | * These routines do not perform EEH-related I/O address translation, | ||
272 | * and should not be used directly by device drivers. Use inb/readb | ||
273 | * instead. | ||
274 | */ | ||
275 | static inline int __in_8(const volatile unsigned char __iomem *addr) | ||
276 | { | ||
277 | int ret; | ||
278 | |||
279 | __asm__ __volatile__("sync; lbz%U1%X1 %0,%1; twi 0,%0,0; isync" | ||
280 | : "=r" (ret) : "m" (*addr)); | ||
281 | return ret; | ||
282 | } | ||
283 | |||
284 | static inline void __out_8(volatile unsigned char __iomem *addr, int val) | ||
285 | { | ||
286 | __asm__ __volatile__("sync; stb%U0%X0 %1,%0" | ||
287 | : "=m" (*addr) : "r" (val)); | ||
288 | get_paca()->io_sync = 1; | ||
289 | } | ||
290 | |||
291 | static inline int __in_le16(const volatile unsigned short __iomem *addr) | ||
292 | { | ||
293 | int ret; | ||
294 | |||
295 | __asm__ __volatile__("sync; lhbrx %0,0,%1; twi 0,%0,0; isync" | ||
296 | : "=r" (ret) : "r" (addr), "m" (*addr)); | ||
297 | return ret; | ||
298 | } | ||
299 | |||
300 | static inline int __in_be16(const volatile unsigned short __iomem *addr) | ||
301 | { | ||
302 | int ret; | ||
303 | |||
304 | __asm__ __volatile__("sync; lhz%U1%X1 %0,%1; twi 0,%0,0; isync" | ||
305 | : "=r" (ret) : "m" (*addr)); | ||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | static inline void __out_le16(volatile unsigned short __iomem *addr, int val) | ||
310 | { | ||
311 | __asm__ __volatile__("sync; sthbrx %1,0,%2" | ||
312 | : "=m" (*addr) : "r" (val), "r" (addr)); | ||
313 | get_paca()->io_sync = 1; | ||
314 | } | ||
315 | |||
316 | static inline void __out_be16(volatile unsigned short __iomem *addr, int val) | ||
317 | { | ||
318 | __asm__ __volatile__("sync; sth%U0%X0 %1,%0" | ||
319 | : "=m" (*addr) : "r" (val)); | ||
320 | get_paca()->io_sync = 1; | ||
321 | } | ||
322 | |||
323 | static inline unsigned __in_le32(const volatile unsigned __iomem *addr) | ||
324 | { | ||
325 | unsigned ret; | ||
326 | |||
327 | __asm__ __volatile__("sync; lwbrx %0,0,%1; twi 0,%0,0; isync" | ||
328 | : "=r" (ret) : "r" (addr), "m" (*addr)); | ||
329 | return ret; | ||
330 | } | ||
331 | |||
332 | static inline unsigned __in_be32(const volatile unsigned __iomem *addr) | ||
333 | { | ||
334 | unsigned ret; | ||
335 | |||
336 | __asm__ __volatile__("sync; lwz%U1%X1 %0,%1; twi 0,%0,0; isync" | ||
337 | : "=r" (ret) : "m" (*addr)); | ||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | static inline void __out_le32(volatile unsigned __iomem *addr, int val) | ||
342 | { | ||
343 | __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr) | ||
344 | : "r" (val), "r" (addr)); | ||
345 | get_paca()->io_sync = 1; | ||
346 | } | ||
347 | |||
348 | static inline void __out_be32(volatile unsigned __iomem *addr, int val) | ||
349 | { | ||
350 | __asm__ __volatile__("sync; stw%U0%X0 %1,%0" | ||
351 | : "=m" (*addr) : "r" (val)); | ||
352 | get_paca()->io_sync = 1; | ||
353 | } | ||
354 | |||
355 | static inline unsigned long __in_le64(const volatile unsigned long __iomem *addr) | ||
356 | { | ||
357 | unsigned long tmp, ret; | ||
358 | |||
359 | __asm__ __volatile__( | ||
360 | "sync\n" | ||
361 | "ld %1,0(%2)\n" | ||
362 | "twi 0,%1,0\n" | ||
363 | "isync\n" | ||
364 | "rldimi %0,%1,5*8,1*8\n" | ||
365 | "rldimi %0,%1,3*8,2*8\n" | ||
366 | "rldimi %0,%1,1*8,3*8\n" | ||
367 | "rldimi %0,%1,7*8,4*8\n" | ||
368 | "rldicl %1,%1,32,0\n" | ||
369 | "rlwimi %0,%1,8,8,31\n" | ||
370 | "rlwimi %0,%1,24,16,23\n" | ||
371 | : "=r" (ret) , "=r" (tmp) : "b" (addr) , "m" (*addr)); | ||
372 | return ret; | ||
373 | } | ||
374 | |||
375 | static inline unsigned long __in_be64(const volatile unsigned long __iomem *addr) | ||
376 | { | ||
377 | unsigned long ret; | ||
378 | |||
379 | __asm__ __volatile__("sync; ld%U1%X1 %0,%1; twi 0,%0,0; isync" | ||
380 | : "=r" (ret) : "m" (*addr)); | ||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | static inline void __out_le64(volatile unsigned long __iomem *addr, unsigned long val) | ||
385 | { | ||
386 | unsigned long tmp; | ||
387 | |||
388 | __asm__ __volatile__( | ||
389 | "rldimi %0,%1,5*8,1*8\n" | ||
390 | "rldimi %0,%1,3*8,2*8\n" | ||
391 | "rldimi %0,%1,1*8,3*8\n" | ||
392 | "rldimi %0,%1,7*8,4*8\n" | ||
393 | "rldicl %1,%1,32,0\n" | ||
394 | "rlwimi %0,%1,8,8,31\n" | ||
395 | "rlwimi %0,%1,24,16,23\n" | ||
396 | "sync\n" | ||
397 | "std %0,0(%3)" | ||
398 | : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr)); | ||
399 | get_paca()->io_sync = 1; | ||
400 | } | ||
401 | |||
402 | static inline void __out_be64(volatile unsigned long __iomem *addr, unsigned long val) | ||
403 | { | ||
404 | __asm__ __volatile__("sync; std%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); | ||
405 | get_paca()->io_sync = 1; | ||
406 | } | ||
407 | |||
408 | #include <asm/eeh.h> | ||
409 | |||
410 | /* Nothing to do */ | ||
411 | |||
412 | #define dma_cache_inv(_start,_size) do { } while (0) | ||
413 | #define dma_cache_wback(_start,_size) do { } while (0) | ||
414 | #define dma_cache_wback_inv(_start,_size) do { } while (0) | ||
415 | |||
416 | |||
417 | /* | ||
418 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
419 | * access | ||
420 | */ | ||
421 | #define xlate_dev_mem_ptr(p) __va(p) | ||
422 | |||
423 | /* | ||
424 | * Convert a virtual cached pointer to an uncached pointer | ||
425 | */ | ||
426 | #define xlate_dev_kmem_ptr(p) p | ||
427 | |||
428 | #endif /* __KERNEL__ */ | 541 | #endif /* __KERNEL__ */ |
429 | 542 | ||
430 | #endif /* CONFIG_PPC64 */ | 543 | #endif /* CONFIG_PPC64 */ |
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index 23580cf5504c..8c1a2dfe5cc7 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h | |||
@@ -87,6 +87,10 @@ struct machdep_calls { | |||
87 | void (*tce_flush)(struct iommu_table *tbl); | 87 | void (*tce_flush)(struct iommu_table *tbl); |
88 | void (*pci_dma_dev_setup)(struct pci_dev *dev); | 88 | void (*pci_dma_dev_setup)(struct pci_dev *dev); |
89 | void (*pci_dma_bus_setup)(struct pci_bus *bus); | 89 | void (*pci_dma_bus_setup)(struct pci_bus *bus); |
90 | |||
91 | void __iomem * (*ioremap)(unsigned long addr, unsigned long size, | ||
92 | unsigned long flags); | ||
93 | void (*iounmap)(void __iomem *token); | ||
90 | #endif /* CONFIG_PPC64 */ | 94 | #endif /* CONFIG_PPC64 */ |
91 | 95 | ||
92 | int (*probe)(void); | 96 | int (*probe)(void); |
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h index b744baf9e206..bfb639f9e420 100644 --- a/include/asm-ppc/io.h +++ b/include/asm-ppc/io.h | |||
@@ -321,12 +321,12 @@ __do_out_asm(outl, "stwbrx") | |||
321 | #define inl_p(port) inl((port)) | 321 | #define inl_p(port) inl((port)) |
322 | #define outl_p(val, port) outl((val), (port)) | 322 | #define outl_p(val, port) outl((val), (port)) |
323 | 323 | ||
324 | extern void _insb(volatile u8 __iomem *port, void *buf, long count); | 324 | extern void _insb(const volatile u8 __iomem *addr, void *buf, long count); |
325 | extern void _outsb(volatile u8 __iomem *port, const void *buf, long count); | 325 | extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count); |
326 | extern void _insw_ns(volatile u16 __iomem *port, void *buf, long count); | 326 | extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count); |
327 | extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count); | 327 | extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count); |
328 | extern void _insl_ns(volatile u32 __iomem *port, void *buf, long count); | 328 | extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count); |
329 | extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count); | 329 | extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count); |
330 | 330 | ||
331 | 331 | ||
332 | #define IO_SPACE_LIMIT ~0 | 332 | #define IO_SPACE_LIMIT ~0 |