aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile15
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cpu_setup_ppc970.S16
-rw-r--r--arch/powerpc/kernel/cputable.c62
-rw-r--r--arch/powerpc/kernel/crash.c4
-rw-r--r--arch/powerpc/kernel/dma_64.c249
-rw-r--r--arch/powerpc/kernel/entry_64.S51
-rw-r--r--arch/powerpc/kernel/head_64.S163
-rw-r--r--arch/powerpc/kernel/ibmebus.c9
-rw-r--r--arch/powerpc/kernel/idle.c7
-rw-r--r--arch/powerpc/kernel/idle_power4.S8
-rw-r--r--arch/powerpc/kernel/io.c105
-rw-r--r--arch/powerpc/kernel/iomap.c2
-rw-r--r--arch/powerpc/kernel/iommu.c6
-rw-r--r--arch/powerpc/kernel/irq.c80
-rw-r--r--arch/powerpc/kernel/of_device.c173
-rw-r--r--arch/powerpc/kernel/of_platform.c489
-rw-r--r--arch/powerpc/kernel/pci_32.c96
-rw-r--r--arch/powerpc/kernel/pci_64.c70
-rw-r--r--arch/powerpc/kernel/pci_direct_iommu.c98
-rw-r--r--arch/powerpc/kernel/pci_iommu.c164
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c4
-rw-r--r--arch/powerpc/kernel/prom.c111
-rw-r--r--arch/powerpc/kernel/prom_init.c18
-rw-r--r--arch/powerpc/kernel/prom_parse.c290
-rw-r--r--arch/powerpc/kernel/rtas.c5
-rw-r--r--arch/powerpc/kernel/rtas_flash.c4
-rw-r--r--arch/powerpc/kernel/rtas_pci.c35
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c18
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c5
-rw-r--r--arch/powerpc/kernel/smp.c1
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c76
-rw-r--r--arch/powerpc/kernel/time.c7
-rw-r--r--arch/powerpc/kernel/traps.c8
-rw-r--r--arch/powerpc/kernel/vio.c94
37 files changed, 1505 insertions, 1048 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 7af23c43fd4b..4fe53d08ab81 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -17,11 +17,11 @@ obj-y += vdso32/
17obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ 17obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
18 signal_64.o ptrace32.o \ 18 signal_64.o ptrace32.o \
19 paca.o cpu_setup_ppc970.o \ 19 paca.o cpu_setup_ppc970.o \
20 firmware.o sysfs.o 20 firmware.o sysfs.o nvram_64.o
21obj-$(CONFIG_PPC64) += vdso64/ 21obj-$(CONFIG_PPC64) += vdso64/
22obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o 22obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
23obj-$(CONFIG_PPC_970_NAP) += idle_power4.o 23obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
24obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o 24obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o
25procfs-$(CONFIG_PPC64) := proc_ppc64.o 25procfs-$(CONFIG_PPC64) := proc_ppc64.o
26obj-$(CONFIG_PROC_FS) += $(procfs-y) 26obj-$(CONFIG_PROC_FS) += $(procfs-y)
27rtaspci-$(CONFIG_PPC64) := rtas_pci.o 27rtaspci-$(CONFIG_PPC64) := rtas_pci.o
@@ -32,7 +32,6 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o
32obj-$(CONFIG_IBMVIO) += vio.o 32obj-$(CONFIG_IBMVIO) += vio.o
33obj-$(CONFIG_IBMEBUS) += ibmebus.o 33obj-$(CONFIG_IBMEBUS) += ibmebus.o
34obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o 34obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
35obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
36obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 35obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
37obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o 36obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
38obj-$(CONFIG_TAU) += tau_6xx.o 37obj-$(CONFIG_TAU) += tau_6xx.o
@@ -59,11 +58,11 @@ obj-$(CONFIG_BOOTX_TEXT) += btext.o
59obj-$(CONFIG_SMP) += smp.o 58obj-$(CONFIG_SMP) += smp.o
60obj-$(CONFIG_KPROBES) += kprobes.o 59obj-$(CONFIG_KPROBES) += kprobes.o
61obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o 60obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
61
62module-$(CONFIG_PPC64) += module_64.o 62module-$(CONFIG_PPC64) += module_64.o
63obj-$(CONFIG_MODULES) += $(module-y) 63obj-$(CONFIG_MODULES) += $(module-y)
64 64
65pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \ 65pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o
66 pci_direct_iommu.o iomap.o
67pci32-$(CONFIG_PPC32) := pci_32.o 66pci32-$(CONFIG_PPC32) := pci_32.o
68obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y) 67obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y)
69kexec-$(CONFIG_PPC64) := machine_kexec_64.o 68kexec-$(CONFIG_PPC64) := machine_kexec_64.o
@@ -72,8 +71,12 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o $(kexec-y)
72obj-$(CONFIG_AUDIT) += audit.o 71obj-$(CONFIG_AUDIT) += audit.o
73obj64-$(CONFIG_AUDIT) += compat_audit.o 72obj64-$(CONFIG_AUDIT) += compat_audit.o
74 73
74ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
75obj-y += iomap.o
76endif
77
75ifeq ($(CONFIG_PPC_ISERIES),y) 78ifeq ($(CONFIG_PPC_ISERIES),y)
76$(obj)/head_64.o: $(obj)/lparmap.s 79extra-y += lparmap.s
77AFLAGS_head_64.o += -I$(obj) 80AFLAGS_head_64.o += -I$(obj)
78endif 81endif
79 82
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d06f378597bb..e96521530d21 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -118,7 +118,8 @@ int main(void)
118 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); 118 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
119 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); 119 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
120 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); 120 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
121 DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled)); 121 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
122 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
122 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 123 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
123 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 124 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
124 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 125 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index 652594891d58..bf118c385752 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -83,6 +83,22 @@ _GLOBAL(__setup_cpu_ppc970)
83 rldimi r0,r11,52,8 /* set NAP and DPM */ 83 rldimi r0,r11,52,8 /* set NAP and DPM */
84 li r11,0 84 li r11,0
85 rldimi r0,r11,32,31 /* clear EN_ATTN */ 85 rldimi r0,r11,32,31 /* clear EN_ATTN */
86 b load_hids /* Jump to shared code */
87
88
89_GLOBAL(__setup_cpu_ppc970MP)
90 /* Do nothing if not running in HV mode */
91 mfmsr r0
92 rldicl. r0,r0,4,63
93 beqlr
94
95 mfspr r0,SPRN_HID0
96 li r11,0x15 /* clear DOZE and SLEEP */
97 rldimi r0,r11,52,6 /* set DEEPNAP, NAP and DPM */
98 li r11,0
99 rldimi r0,r11,32,31 /* clear EN_ATTN */
100
101load_hids:
86 mtspr SPRN_HID0,r0 102 mtspr SPRN_HID0,r0
87 mfspr r0,SPRN_HID0 103 mfspr r0,SPRN_HID0
88 mfspr r0,SPRN_HID0 104 mfspr r0,SPRN_HID0
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index bfd499ee3753..9d1614c3ce67 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -42,6 +42,7 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
42#endif /* CONFIG_PPC32 */ 42#endif /* CONFIG_PPC32 */
43#ifdef CONFIG_PPC64 43#ifdef CONFIG_PPC64
44extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); 44extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
45extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
45extern void __restore_cpu_ppc970(void); 46extern void __restore_cpu_ppc970(void);
46#endif /* CONFIG_PPC64 */ 47#endif /* CONFIG_PPC64 */
47 48
@@ -222,9 +223,9 @@ static struct cpu_spec cpu_specs[] = {
222 .icache_bsize = 128, 223 .icache_bsize = 128,
223 .dcache_bsize = 128, 224 .dcache_bsize = 128,
224 .num_pmcs = 8, 225 .num_pmcs = 8,
225 .cpu_setup = __setup_cpu_ppc970, 226 .cpu_setup = __setup_cpu_ppc970MP,
226 .cpu_restore = __restore_cpu_ppc970, 227 .cpu_restore = __restore_cpu_ppc970,
227 .oprofile_cpu_type = "ppc64/970", 228 .oprofile_cpu_type = "ppc64/970MP",
228 .oprofile_type = PPC_OPROFILE_POWER4, 229 .oprofile_type = PPC_OPROFILE_POWER4,
229 .platform = "ppc970", 230 .platform = "ppc970",
230 }, 231 },
@@ -276,10 +277,45 @@ static struct cpu_spec cpu_specs[] = {
276 .oprofile_mmcra_sipr = MMCRA_SIPR, 277 .oprofile_mmcra_sipr = MMCRA_SIPR,
277 .platform = "power5+", 278 .platform = "power5+",
278 }, 279 },
280 { /* POWER6 in P5+ mode; 2.04-compliant processor */
281 .pvr_mask = 0xffffffff,
282 .pvr_value = 0x0f000001,
283 .cpu_name = "POWER5+",
284 .cpu_features = CPU_FTRS_POWER5,
285 .cpu_user_features = COMMON_USER_POWER5_PLUS,
286 .icache_bsize = 128,
287 .dcache_bsize = 128,
288 .num_pmcs = 6,
289 .oprofile_cpu_type = "ppc64/power6",
290 .oprofile_type = PPC_OPROFILE_POWER4,
291 .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
292 .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
293 .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
294 POWER6_MMCRA_OTHER,
295 .platform = "power5+",
296 },
279 { /* Power6 */ 297 { /* Power6 */
280 .pvr_mask = 0xffff0000, 298 .pvr_mask = 0xffff0000,
281 .pvr_value = 0x003e0000, 299 .pvr_value = 0x003e0000,
282 .cpu_name = "POWER6", 300 .cpu_name = "POWER6 (raw)",
301 .cpu_features = CPU_FTRS_POWER6,
302 .cpu_user_features = COMMON_USER_POWER6 |
303 PPC_FEATURE_POWER6_EXT,
304 .icache_bsize = 128,
305 .dcache_bsize = 128,
306 .num_pmcs = 6,
307 .oprofile_cpu_type = "ppc64/power6",
308 .oprofile_type = PPC_OPROFILE_POWER4,
309 .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
310 .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
311 .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
312 POWER6_MMCRA_OTHER,
313 .platform = "power6x",
314 },
315 { /* 2.05-compliant processor, i.e. Power6 "architected" mode */
316 .pvr_mask = 0xffffffff,
317 .pvr_value = 0x0f000002,
318 .cpu_name = "POWER6 (architected)",
283 .cpu_features = CPU_FTRS_POWER6, 319 .cpu_features = CPU_FTRS_POWER6,
284 .cpu_user_features = COMMON_USER_POWER6, 320 .cpu_user_features = COMMON_USER_POWER6,
285 .icache_bsize = 128, 321 .icache_bsize = 128,
@@ -303,6 +339,9 @@ static struct cpu_spec cpu_specs[] = {
303 PPC_FEATURE_SMT, 339 PPC_FEATURE_SMT,
304 .icache_bsize = 128, 340 .icache_bsize = 128,
305 .dcache_bsize = 128, 341 .dcache_bsize = 128,
342 .num_pmcs = 4,
343 .oprofile_cpu_type = "ppc64/cell-be",
344 .oprofile_type = PPC_OPROFILE_CELL,
306 .platform = "ppc-cell-be", 345 .platform = "ppc-cell-be",
307 }, 346 },
308 { /* PA Semi PA6T */ 347 { /* PA Semi PA6T */
@@ -801,6 +840,17 @@ static struct cpu_spec cpu_specs[] = {
801 .cpu_setup = __setup_cpu_603, 840 .cpu_setup = __setup_cpu_603,
802 .platform = "ppc603", 841 .platform = "ppc603",
803 }, 842 },
843 { /* e300c3 on 83xx */
844 .pvr_mask = 0x7fff0000,
845 .pvr_value = 0x00850000,
846 .cpu_name = "e300c3",
847 .cpu_features = CPU_FTRS_E300,
848 .cpu_user_features = COMMON_USER,
849 .icache_bsize = 32,
850 .dcache_bsize = 32,
851 .cpu_setup = __setup_cpu_603,
852 .platform = "ppc603",
853 },
804 { /* default match, we assume split I/D cache & TB (non-601)... */ 854 { /* default match, we assume split I/D cache & TB (non-601)... */
805 .pvr_mask = 0x00000000, 855 .pvr_mask = 0x00000000,
806 .pvr_value = 0x00000000, 856 .pvr_value = 0x00000000,
@@ -1169,19 +1219,15 @@ static struct cpu_spec cpu_specs[] = {
1169#endif /* CONFIG_PPC32 */ 1219#endif /* CONFIG_PPC32 */
1170}; 1220};
1171 1221
1172struct cpu_spec *identify_cpu(unsigned long offset) 1222struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr)
1173{ 1223{
1174 struct cpu_spec *s = cpu_specs; 1224 struct cpu_spec *s = cpu_specs;
1175 struct cpu_spec **cur = &cur_cpu_spec; 1225 struct cpu_spec **cur = &cur_cpu_spec;
1176 unsigned int pvr = mfspr(SPRN_PVR);
1177 int i; 1226 int i;
1178 1227
1179 s = PTRRELOC(s); 1228 s = PTRRELOC(s);
1180 cur = PTRRELOC(cur); 1229 cur = PTRRELOC(cur);
1181 1230
1182 if (*cur != NULL)
1183 return PTRRELOC(*cur);
1184
1185 for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) 1231 for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
1186 if ((pvr & s->pvr_mask) == s->pvr_value) { 1232 if ((pvr & s->pvr_mask) == s->pvr_value) {
1187 *cur = cpu_specs + i; 1233 *cur = cpu_specs + i;
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 1af41f7616dc..89b03c8da9d2 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -111,7 +111,7 @@ void crash_ipi_callback(struct pt_regs *regs)
111 if (!cpu_online(cpu)) 111 if (!cpu_online(cpu))
112 return; 112 return;
113 113
114 local_irq_disable(); 114 hard_irq_disable();
115 if (!cpu_isset(cpu, cpus_in_crash)) 115 if (!cpu_isset(cpu, cpus_in_crash))
116 crash_save_this_cpu(regs, cpu); 116 crash_save_this_cpu(regs, cpu);
117 cpu_set(cpu, cpus_in_crash); 117 cpu_set(cpu, cpus_in_crash);
@@ -289,7 +289,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
289 * an SMP system. 289 * an SMP system.
290 * The kernel is broken so disable interrupts. 290 * The kernel is broken so disable interrupts.
291 */ 291 */
292 local_irq_disable(); 292 hard_irq_disable();
293 293
294 for_each_irq(irq) { 294 for_each_irq(irq) {
295 struct irq_desc *desc = irq_desc + irq; 295 struct irq_desc *desc = irq_desc + irq;
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 6c168f6ea142..7b0e754383cf 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -1,151 +1,194 @@
1/* 1/*
2 * Copyright (C) 2004 IBM Corporation 2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3 * 3 *
4 * Implements the generic device dma API for ppc64. Handles 4 * Provide default implementations of the DMA mapping callbacks for
5 * the pci and vio busses 5 * directly mapped busses and busses using the iommu infrastructure
6 */ 6 */
7 7
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/dma-mapping.h> 9#include <linux/dma-mapping.h>
10/* Include the busses we support */
11#include <linux/pci.h>
12#include <asm/vio.h>
13#include <asm/ibmebus.h>
14#include <asm/scatterlist.h>
15#include <asm/bug.h> 10#include <asm/bug.h>
11#include <asm/iommu.h>
12#include <asm/abs_addr.h>
16 13
17static struct dma_mapping_ops *get_dma_ops(struct device *dev) 14/*
18{ 15 * Generic iommu implementation
19#ifdef CONFIG_PCI 16 */
20 if (dev->bus == &pci_bus_type)
21 return &pci_dma_ops;
22#endif
23#ifdef CONFIG_IBMVIO
24 if (dev->bus == &vio_bus_type)
25 return &vio_dma_ops;
26#endif
27#ifdef CONFIG_IBMEBUS
28 if (dev->bus == &ibmebus_bus_type)
29 return &ibmebus_dma_ops;
30#endif
31 return NULL;
32}
33 17
34int dma_supported(struct device *dev, u64 mask) 18static inline unsigned long device_to_mask(struct device *dev)
35{ 19{
36 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 20 if (dev->dma_mask && *dev->dma_mask)
21 return *dev->dma_mask;
22 /* Assume devices without mask can take 32 bit addresses */
23 return 0xfffffffful;
24}
37 25
38 BUG_ON(!dma_ops);
39 26
40 return dma_ops->dma_supported(dev, mask); 27/* Allocates a contiguous real buffer and creates mappings over it.
28 * Returns the virtual address of the buffer and sets dma_handle
29 * to the dma address (mapping) of the first page.
30 */
31static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
32 dma_addr_t *dma_handle, gfp_t flag)
33{
34 return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle,
35 device_to_mask(dev), flag,
36 dev->archdata.numa_node);
41} 37}
42EXPORT_SYMBOL(dma_supported);
43 38
44int dma_set_mask(struct device *dev, u64 dma_mask) 39static void dma_iommu_free_coherent(struct device *dev, size_t size,
40 void *vaddr, dma_addr_t dma_handle)
45{ 41{
46#ifdef CONFIG_PCI 42 iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
47 if (dev->bus == &pci_bus_type)
48 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
49#endif
50#ifdef CONFIG_IBMVIO
51 if (dev->bus == &vio_bus_type)
52 return -EIO;
53#endif /* CONFIG_IBMVIO */
54#ifdef CONFIG_IBMEBUS
55 if (dev->bus == &ibmebus_bus_type)
56 return -EIO;
57#endif
58 BUG();
59 return 0;
60} 43}
61EXPORT_SYMBOL(dma_set_mask);
62 44
63void *dma_alloc_coherent(struct device *dev, size_t size, 45/* Creates TCEs for a user provided buffer. The user buffer must be
64 dma_addr_t *dma_handle, gfp_t flag) 46 * contiguous real kernel storage (not vmalloc). The address of the buffer
47 * passed here is the kernel (virtual) address of the buffer. The buffer
48 * need not be page aligned, the dma_addr_t returned will point to the same
49 * byte within the page as vaddr.
50 */
51static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
52 size_t size,
53 enum dma_data_direction direction)
65{ 54{
66 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 55 return iommu_map_single(dev->archdata.dma_data, vaddr, size,
67 56 device_to_mask(dev), direction);
68 BUG_ON(!dma_ops);
69
70 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
71} 57}
72EXPORT_SYMBOL(dma_alloc_coherent);
73 58
74void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 59
75 dma_addr_t dma_handle) 60static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
61 size_t size,
62 enum dma_data_direction direction)
76{ 63{
77 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 64 iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction);
65}
78 66
79 BUG_ON(!dma_ops);
80 67
81 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 68static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
69 int nelems, enum dma_data_direction direction)
70{
71 return iommu_map_sg(dev->archdata.dma_data, sglist, nelems,
72 device_to_mask(dev), direction);
82} 73}
83EXPORT_SYMBOL(dma_free_coherent);
84 74
85dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, 75static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
86 enum dma_data_direction direction) 76 int nelems, enum dma_data_direction direction)
87{ 77{
88 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 78 iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction);
89
90 BUG_ON(!dma_ops);
91
92 return dma_ops->map_single(dev, cpu_addr, size, direction);
93} 79}
94EXPORT_SYMBOL(dma_map_single);
95 80
96void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 81/* We support DMA to/from any memory page via the iommu */
97 enum dma_data_direction direction) 82static int dma_iommu_dma_supported(struct device *dev, u64 mask)
98{ 83{
99 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 84 struct iommu_table *tbl = dev->archdata.dma_data;
100 85
101 BUG_ON(!dma_ops); 86 if (!tbl || tbl->it_offset > mask) {
102 87 printk(KERN_INFO
103 dma_ops->unmap_single(dev, dma_addr, size, direction); 88 "Warning: IOMMU offset too big for device mask\n");
89 if (tbl)
90 printk(KERN_INFO
91 "mask: 0x%08lx, table offset: 0x%08lx\n",
92 mask, tbl->it_offset);
93 else
94 printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
95 mask);
96 return 0;
97 } else
98 return 1;
104} 99}
105EXPORT_SYMBOL(dma_unmap_single);
106 100
107dma_addr_t dma_map_page(struct device *dev, struct page *page, 101struct dma_mapping_ops dma_iommu_ops = {
108 unsigned long offset, size_t size, 102 .alloc_coherent = dma_iommu_alloc_coherent,
109 enum dma_data_direction direction) 103 .free_coherent = dma_iommu_free_coherent,
110{ 104 .map_single = dma_iommu_map_single,
111 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 105 .unmap_single = dma_iommu_unmap_single,
106 .map_sg = dma_iommu_map_sg,
107 .unmap_sg = dma_iommu_unmap_sg,
108 .dma_supported = dma_iommu_dma_supported,
109};
110EXPORT_SYMBOL(dma_iommu_ops);
112 111
113 BUG_ON(!dma_ops); 112/*
113 * Generic direct DMA implementation
114 *
115 * This implementation supports a global offset that can be applied if
116 * the address at which memory is visible to devices is not 0.
117 */
118unsigned long dma_direct_offset;
114 119
115 return dma_ops->map_single(dev, page_address(page) + offset, size, 120static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
116 direction); 121 dma_addr_t *dma_handle, gfp_t flag)
122{
123 struct page *page;
124 void *ret;
125 int node = dev->archdata.numa_node;
126
127 /* TODO: Maybe use the numa node here too ? */
128 page = alloc_pages_node(node, flag, get_order(size));
129 if (page == NULL)
130 return NULL;
131 ret = page_address(page);
132 memset(ret, 0, size);
133 *dma_handle = virt_to_abs(ret) | dma_direct_offset;
134
135 return ret;
117} 136}
118EXPORT_SYMBOL(dma_map_page);
119 137
120void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 138static void dma_direct_free_coherent(struct device *dev, size_t size,
121 enum dma_data_direction direction) 139 void *vaddr, dma_addr_t dma_handle)
122{ 140{
123 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 141 free_pages((unsigned long)vaddr, get_order(size));
142}
124 143
125 BUG_ON(!dma_ops); 144static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
145 size_t size,
146 enum dma_data_direction direction)
147{
148 return virt_to_abs(ptr) | dma_direct_offset;
149}
126 150
127 dma_ops->unmap_single(dev, dma_address, size, direction); 151static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
152 size_t size,
153 enum dma_data_direction direction)
154{
128} 155}
129EXPORT_SYMBOL(dma_unmap_page);
130 156
131int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 157static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
132 enum dma_data_direction direction) 158 int nents, enum dma_data_direction direction)
133{ 159{
134 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 160 int i;
135 161
136 BUG_ON(!dma_ops); 162 for (i = 0; i < nents; i++, sg++) {
163 sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
164 dma_direct_offset;
165 sg->dma_length = sg->length;
166 }
137 167
138 return dma_ops->map_sg(dev, sg, nents, direction); 168 return nents;
139} 169}
140EXPORT_SYMBOL(dma_map_sg);
141 170
142void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 171static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
143 enum dma_data_direction direction) 172 int nents, enum dma_data_direction direction)
144{ 173{
145 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 174}
146
147 BUG_ON(!dma_ops);
148 175
149 dma_ops->unmap_sg(dev, sg, nhwentries, direction); 176static int dma_direct_dma_supported(struct device *dev, u64 mask)
177{
178 /* Could be improved to check for memory though it better be
179 * done via some global so platforms can set the limit in case
180 * they have limited DMA windows
181 */
182 return mask >= DMA_32BIT_MASK;
150} 183}
151EXPORT_SYMBOL(dma_unmap_sg); 184
185struct dma_mapping_ops dma_direct_ops = {
186 .alloc_coherent = dma_direct_alloc_coherent,
187 .free_coherent = dma_direct_free_coherent,
188 .map_single = dma_direct_map_single,
189 .unmap_single = dma_direct_unmap_single,
190 .map_sg = dma_direct_map_sg,
191 .unmap_sg = dma_direct_unmap_sg,
192 .dma_supported = dma_direct_dma_supported,
193};
194EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 748e74fcf541..1a3d4de197d2 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -87,15 +87,19 @@ system_call_common:
87 addi r9,r1,STACK_FRAME_OVERHEAD 87 addi r9,r1,STACK_FRAME_OVERHEAD
88 ld r11,exception_marker@toc(r2) 88 ld r11,exception_marker@toc(r2)
89 std r11,-16(r9) /* "regshere" marker */ 89 std r11,-16(r9) /* "regshere" marker */
90 li r10,1
91 stb r10,PACASOFTIRQEN(r13)
92 stb r10,PACAHARDIRQEN(r13)
93 std r10,SOFTE(r1)
90#ifdef CONFIG_PPC_ISERIES 94#ifdef CONFIG_PPC_ISERIES
91BEGIN_FW_FTR_SECTION 95BEGIN_FW_FTR_SECTION
92 /* Hack for handling interrupts when soft-enabling on iSeries */ 96 /* Hack for handling interrupts when soft-enabling on iSeries */
93 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ 97 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
94 andi. r10,r12,MSR_PR /* from kernel */ 98 andi. r10,r12,MSR_PR /* from kernel */
95 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq 99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
96 beq hardware_interrupt_entry 100 bne 2f
97 lbz r10,PACAPROCENABLED(r13) 101 b hardware_interrupt_entry
98 std r10,SOFTE(r1) 1022:
99END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 103END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
100#endif 104#endif
101 mfmsr r11 105 mfmsr r11
@@ -460,9 +464,9 @@ _GLOBAL(ret_from_except_lite)
460#endif 464#endif
461 465
462restore: 466restore:
467 ld r5,SOFTE(r1)
463#ifdef CONFIG_PPC_ISERIES 468#ifdef CONFIG_PPC_ISERIES
464BEGIN_FW_FTR_SECTION 469BEGIN_FW_FTR_SECTION
465 ld r5,SOFTE(r1)
466 cmpdi 0,r5,0 470 cmpdi 0,r5,0
467 beq 4f 471 beq 4f
468 /* Check for pending interrupts (iSeries) */ 472 /* Check for pending interrupts (iSeries) */
@@ -472,21 +476,25 @@ BEGIN_FW_FTR_SECTION
472 beq+ 4f /* skip do_IRQ if no interrupts */ 476 beq+ 4f /* skip do_IRQ if no interrupts */
473 477
474 li r3,0 478 li r3,0
475 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */ 479 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
476 ori r10,r10,MSR_EE 480 ori r10,r10,MSR_EE
477 mtmsrd r10 /* hard-enable again */ 481 mtmsrd r10 /* hard-enable again */
478 addi r3,r1,STACK_FRAME_OVERHEAD 482 addi r3,r1,STACK_FRAME_OVERHEAD
479 bl .do_IRQ 483 bl .do_IRQ
480 b .ret_from_except_lite /* loop back and handle more */ 484 b .ret_from_except_lite /* loop back and handle more */
481 4854:
4824: stb r5,PACAPROCENABLED(r13)
483END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 486END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
484#endif 487#endif
488 stb r5,PACASOFTIRQEN(r13)
485 489
486 ld r3,_MSR(r1) 490 ld r3,_MSR(r1)
487 andi. r0,r3,MSR_RI 491 andi. r0,r3,MSR_RI
488 beq- unrecov_restore 492 beq- unrecov_restore
489 493
494 /* extract EE bit and use it to restore paca->hard_enabled */
495 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
496 stb r4,PACAHARDIRQEN(r13)
497
490 andi. r0,r3,MSR_PR 498 andi. r0,r3,MSR_PR
491 499
492 /* 500 /*
@@ -538,25 +546,15 @@ do_work:
538 /* Check that preempt_count() == 0 and interrupts are enabled */ 546 /* Check that preempt_count() == 0 and interrupts are enabled */
539 lwz r8,TI_PREEMPT(r9) 547 lwz r8,TI_PREEMPT(r9)
540 cmpwi cr1,r8,0 548 cmpwi cr1,r8,0
541#ifdef CONFIG_PPC_ISERIES
542BEGIN_FW_FTR_SECTION
543 ld r0,SOFTE(r1) 549 ld r0,SOFTE(r1)
544 cmpdi r0,0 550 cmpdi r0,0
545END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
546#endif
547BEGIN_FW_FTR_SECTION
548 andi. r0,r3,MSR_EE
549END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
550 crandc eq,cr1*4+eq,eq 551 crandc eq,cr1*4+eq,eq
551 bne restore 552 bne restore
552 /* here we are preempting the current task */ 553 /* here we are preempting the current task */
5531: 5541:
554#ifdef CONFIG_PPC_ISERIES
555BEGIN_FW_FTR_SECTION
556 li r0,1 555 li r0,1
557 stb r0,PACAPROCENABLED(r13) 556 stb r0,PACASOFTIRQEN(r13)
558END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 557 stb r0,PACAHARDIRQEN(r13)
559#endif
560 ori r10,r10,MSR_EE 558 ori r10,r10,MSR_EE
561 mtmsrd r10,1 /* reenable interrupts */ 559 mtmsrd r10,1 /* reenable interrupts */
562 bl .preempt_schedule 560 bl .preempt_schedule
@@ -639,8 +637,7 @@ _GLOBAL(enter_rtas)
639 /* There is no way it is acceptable to get here with interrupts enabled, 637 /* There is no way it is acceptable to get here with interrupts enabled,
640 * check it with the asm equivalent of WARN_ON 638 * check it with the asm equivalent of WARN_ON
641 */ 639 */
642 mfmsr r6 640 lbz r0,PACASOFTIRQEN(r13)
643 andi. r0,r6,MSR_EE
6441: tdnei r0,0 6411: tdnei r0,0
645.section __bug_table,"a" 642.section __bug_table,"a"
646 .llong 1b,__LINE__ + 0x1000000, 1f, 2f 643 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
@@ -649,7 +646,13 @@ _GLOBAL(enter_rtas)
6491: .asciz __FILE__ 6461: .asciz __FILE__
6502: .asciz "enter_rtas" 6472: .asciz "enter_rtas"
651.previous 648.previous
652 649
650 /* Hard-disable interrupts */
651 mfmsr r6
652 rldicl r7,r6,48,1
653 rotldi r7,r7,16
654 mtmsrd r7,1
655
653 /* Unfortunately, the stack pointer and the MSR are also clobbered, 656 /* Unfortunately, the stack pointer and the MSR are also clobbered,
654 * so they are saved in the PACA which allows us to restore 657 * so they are saved in the PACA which allows us to restore
655 * our original state after RTAS returns. 658 * our original state after RTAS returns.
@@ -735,8 +738,6 @@ _STATIC(rtas_restore_regs)
735 738
736#endif /* CONFIG_PPC_RTAS */ 739#endif /* CONFIG_PPC_RTAS */
737 740
738#ifdef CONFIG_PPC_MULTIPLATFORM
739
740_GLOBAL(enter_prom) 741_GLOBAL(enter_prom)
741 mflr r0 742 mflr r0
742 std r0,16(r1) 743 std r0,16(r1)
@@ -821,5 +822,3 @@ _GLOBAL(enter_prom)
821 ld r0,16(r1) 822 ld r0,16(r1)
822 mtlr r0 823 mtlr r0
823 blr 824 blr
824
825#endif /* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index e720729f3e55..71b1fe58e9e4 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -35,9 +35,7 @@
35#include <asm/thread_info.h> 35#include <asm/thread_info.h>
36#include <asm/firmware.h> 36#include <asm/firmware.h>
37 37
38#ifdef CONFIG_PPC_ISERIES
39#define DO_SOFT_DISABLE 38#define DO_SOFT_DISABLE
40#endif
41 39
42/* 40/*
43 * We layout physical memory as follows: 41 * We layout physical memory as follows:
@@ -74,13 +72,11 @@
74 .text 72 .text
75 .globl _stext 73 .globl _stext
76_stext: 74_stext:
77#ifdef CONFIG_PPC_MULTIPLATFORM
78_GLOBAL(__start) 75_GLOBAL(__start)
79 /* NOP this out unconditionally */ 76 /* NOP this out unconditionally */
80BEGIN_FTR_SECTION 77BEGIN_FTR_SECTION
81 b .__start_initialization_multiplatform 78 b .__start_initialization_multiplatform
82END_FTR_SECTION(0, 1) 79END_FTR_SECTION(0, 1)
83#endif /* CONFIG_PPC_MULTIPLATFORM */
84 80
85 /* Catch branch to 0 in real mode */ 81 /* Catch branch to 0 in real mode */
86 trap 82 trap
@@ -308,7 +304,9 @@ exception_marker:
308 std r9,_LINK(r1); \ 304 std r9,_LINK(r1); \
309 mfctr r10; /* save CTR in stackframe */ \ 305 mfctr r10; /* save CTR in stackframe */ \
310 std r10,_CTR(r1); \ 306 std r10,_CTR(r1); \
307 lbz r10,PACASOFTIRQEN(r13); \
311 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 308 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
309 std r10,SOFTE(r1); \
312 std r11,_XER(r1); \ 310 std r11,_XER(r1); \
313 li r9,(n)+1; \ 311 li r9,(n)+1; \
314 std r9,_TRAP(r1); /* set trap number */ \ 312 std r9,_TRAP(r1); /* set trap number */ \
@@ -343,6 +341,34 @@ label##_pSeries: \
343 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 341 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
344 342
345 343
344#define MASKABLE_EXCEPTION_PSERIES(n, label) \
345 . = n; \
346 .globl label##_pSeries; \
347label##_pSeries: \
348 HMT_MEDIUM; \
349 mtspr SPRN_SPRG1,r13; /* save r13 */ \
350 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
351 std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
352 std r10,PACA_EXGEN+EX_R10(r13); \
353 lbz r10,PACASOFTIRQEN(r13); \
354 mfcr r9; \
355 cmpwi r10,0; \
356 beq masked_interrupt; \
357 mfspr r10,SPRN_SPRG1; \
358 std r10,PACA_EXGEN+EX_R13(r13); \
359 std r11,PACA_EXGEN+EX_R11(r13); \
360 std r12,PACA_EXGEN+EX_R12(r13); \
361 clrrdi r12,r13,32; /* get high part of &label */ \
362 mfmsr r10; \
363 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
364 LOAD_HANDLER(r12,label##_common) \
365 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
366 mtspr SPRN_SRR0,r12; \
367 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
368 mtspr SPRN_SRR1,r10; \
369 rfid; \
370 b . /* prevent speculative execution */
371
346#define STD_EXCEPTION_ISERIES(n, label, area) \ 372#define STD_EXCEPTION_ISERIES(n, label, area) \
347 .globl label##_iSeries; \ 373 .globl label##_iSeries; \
348label##_iSeries: \ 374label##_iSeries: \
@@ -358,40 +384,32 @@ label##_iSeries: \
358 HMT_MEDIUM; \ 384 HMT_MEDIUM; \
359 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 385 mtspr SPRN_SPRG1,r13; /* save r13 */ \
360 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ 386 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
361 lbz r10,PACAPROCENABLED(r13); \ 387 lbz r10,PACASOFTIRQEN(r13); \
362 cmpwi 0,r10,0; \ 388 cmpwi 0,r10,0; \
363 beq- label##_iSeries_masked; \ 389 beq- label##_iSeries_masked; \
364 EXCEPTION_PROLOG_ISERIES_2; \ 390 EXCEPTION_PROLOG_ISERIES_2; \
365 b label##_common; \ 391 b label##_common; \
366 392
367#ifdef DO_SOFT_DISABLE 393#ifdef CONFIG_PPC_ISERIES
368#define DISABLE_INTS \ 394#define DISABLE_INTS \
369BEGIN_FW_FTR_SECTION; \
370 lbz r10,PACAPROCENABLED(r13); \
371 li r11,0; \ 395 li r11,0; \
372 std r10,SOFTE(r1); \ 396 stb r11,PACASOFTIRQEN(r13); \
397BEGIN_FW_FTR_SECTION; \
398 stb r11,PACAHARDIRQEN(r13); \
399END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
400BEGIN_FW_FTR_SECTION; \
373 mfmsr r10; \ 401 mfmsr r10; \
374 stb r11,PACAPROCENABLED(r13); \
375 ori r10,r10,MSR_EE; \ 402 ori r10,r10,MSR_EE; \
376 mtmsrd r10,1; \ 403 mtmsrd r10,1; \
377END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 404END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
378 405
379#define ENABLE_INTS \ 406#else
380BEGIN_FW_FTR_SECTION; \ 407#define DISABLE_INTS \
381 lbz r10,PACAPROCENABLED(r13); \ 408 li r11,0; \
382 mfmsr r11; \ 409 stb r11,PACASOFTIRQEN(r13); \
383 std r10,SOFTE(r1); \ 410 stb r11,PACAHARDIRQEN(r13)
384 ori r11,r11,MSR_EE; \
385END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES); \
386BEGIN_FW_FTR_SECTION; \
387 ld r12,_MSR(r1); \
388 mfmsr r11; \
389 rlwimi r11,r12,0,MSR_EE; \
390END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
391 mtmsrd r11,1
392 411
393#else /* hard enable/disable interrupts */ 412#endif /* CONFIG_PPC_ISERIES */
394#define DISABLE_INTS
395 413
396#define ENABLE_INTS \ 414#define ENABLE_INTS \
397 ld r12,_MSR(r1); \ 415 ld r12,_MSR(r1); \
@@ -399,8 +417,6 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
399 rlwimi r11,r12,0,MSR_EE; \ 417 rlwimi r11,r12,0,MSR_EE; \
400 mtmsrd r11,1 418 mtmsrd r11,1
401 419
402#endif
403
404#define STD_EXCEPTION_COMMON(trap, label, hdlr) \ 420#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
405 .align 7; \ 421 .align 7; \
406 .globl label##_common; \ 422 .globl label##_common; \
@@ -541,11 +557,11 @@ instruction_access_slb_pSeries:
541 mfspr r12,SPRN_SRR1 /* and SRR1 */ 557 mfspr r12,SPRN_SRR1 /* and SRR1 */
542 b .slb_miss_realmode /* Rel. branch works in real mode */ 558 b .slb_miss_realmode /* Rel. branch works in real mode */
543 559
544 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) 560 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
545 STD_EXCEPTION_PSERIES(0x600, alignment) 561 STD_EXCEPTION_PSERIES(0x600, alignment)
546 STD_EXCEPTION_PSERIES(0x700, program_check) 562 STD_EXCEPTION_PSERIES(0x700, program_check)
547 STD_EXCEPTION_PSERIES(0x800, fp_unavailable) 563 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
548 STD_EXCEPTION_PSERIES(0x900, decrementer) 564 MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
549 STD_EXCEPTION_PSERIES(0xa00, trap_0a) 565 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
550 STD_EXCEPTION_PSERIES(0xb00, trap_0b) 566 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
551 567
@@ -597,7 +613,24 @@ system_call_pSeries:
597/*** pSeries interrupt support ***/ 613/*** pSeries interrupt support ***/
598 614
599 /* moved from 0xf00 */ 615 /* moved from 0xf00 */
600 STD_EXCEPTION_PSERIES(., performance_monitor) 616 MASKABLE_EXCEPTION_PSERIES(., performance_monitor)
617
618/*
619 * An interrupt came in while soft-disabled; clear EE in SRR1,
620 * clear paca->hard_enabled and return.
621 */
622masked_interrupt:
623 stb r10,PACAHARDIRQEN(r13)
624 mtcrf 0x80,r9
625 ld r9,PACA_EXGEN+EX_R9(r13)
626 mfspr r10,SPRN_SRR1
627 rldicl r10,r10,48,1 /* clear MSR_EE */
628 rotldi r10,r10,16
629 mtspr SPRN_SRR1,r10
630 ld r10,PACA_EXGEN+EX_R10(r13)
631 mfspr r13,SPRN_SPRG1
632 rfid
633 b .
601 634
602 .align 7 635 .align 7
603do_stab_bolted_pSeries: 636do_stab_bolted_pSeries:
@@ -792,7 +825,7 @@ system_reset_iSeries:
792 825
793 cmpwi 0,r23,0 826 cmpwi 0,r23,0
794 beq iSeries_secondary_smp_loop /* Loop until told to go */ 827 beq iSeries_secondary_smp_loop /* Loop until told to go */
795 bne .__secondary_start /* Loop until told to go */ 828 bne __secondary_start /* Loop until told to go */
796iSeries_secondary_smp_loop: 829iSeries_secondary_smp_loop:
797 /* Let the Hypervisor know we are alive */ 830 /* Let the Hypervisor know we are alive */
798 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 831 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
@@ -813,7 +846,6 @@ iSeries_secondary_smp_loop:
813 b 1b /* If SMP not configured, secondaries 846 b 1b /* If SMP not configured, secondaries
814 * loop forever */ 847 * loop forever */
815 848
816 .globl decrementer_iSeries_masked
817decrementer_iSeries_masked: 849decrementer_iSeries_masked:
818 /* We may not have a valid TOC pointer in here. */ 850 /* We may not have a valid TOC pointer in here. */
819 li r11,1 851 li r11,1
@@ -824,7 +856,6 @@ decrementer_iSeries_masked:
824 mtspr SPRN_DEC,r12 856 mtspr SPRN_DEC,r12
825 /* fall through */ 857 /* fall through */
826 858
827 .globl hardware_interrupt_iSeries_masked
828hardware_interrupt_iSeries_masked: 859hardware_interrupt_iSeries_masked:
829 mtcrf 0x80,r9 /* Restore regs */ 860 mtcrf 0x80,r9 /* Restore regs */
830 ld r12,PACALPPACAPTR(r13) 861 ld r12,PACALPPACAPTR(r13)
@@ -926,10 +957,18 @@ bad_stack:
926 * any task or sent any task a signal, you should use 957 * any task or sent any task a signal, you should use
927 * ret_from_except or ret_from_except_lite instead of this. 958 * ret_from_except or ret_from_except_lite instead of this.
928 */ 959 */
960fast_exc_return_irq: /* restores irq state too */
961 ld r3,SOFTE(r1)
962 ld r12,_MSR(r1)
963 stb r3,PACASOFTIRQEN(r13) /* restore paca->soft_enabled */
964 rldicl r4,r12,49,63 /* get MSR_EE to LSB */
965 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
966 b 1f
967
929 .globl fast_exception_return 968 .globl fast_exception_return
930fast_exception_return: 969fast_exception_return:
931 ld r12,_MSR(r1) 970 ld r12,_MSR(r1)
932 ld r11,_NIP(r1) 9711: ld r11,_NIP(r1)
933 andi. r3,r12,MSR_RI /* check if RI is set */ 972 andi. r3,r12,MSR_RI /* check if RI is set */
934 beq- unrecov_fer 973 beq- unrecov_fer
935 974
@@ -952,7 +991,8 @@ fast_exception_return:
952 REST_8GPRS(2, r1) 991 REST_8GPRS(2, r1)
953 992
954 mfmsr r10 993 mfmsr r10
955 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ 994 rldicl r10,r10,48,1 /* clear EE */
995 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
956 mtmsrd r10,1 996 mtmsrd r10,1
957 997
958 mtspr SPRN_SRR1,r12 998 mtspr SPRN_SRR1,r12
@@ -1326,6 +1366,16 @@ BEGIN_FW_FTR_SECTION
1326 * interrupts if necessary. 1366 * interrupts if necessary.
1327 */ 1367 */
1328 beq 13f 1368 beq 13f
1369END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1370#endif
1371BEGIN_FW_FTR_SECTION
1372 /*
1373 * Here we have interrupts hard-disabled, so it is sufficient
1374 * to restore paca->{soft,hard}_enable and get out.
1375 */
1376 beq fast_exc_return_irq /* Return from exception on success */
1377END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1378
1329 /* For a hash failure, we don't bother re-enabling interrupts */ 1379 /* For a hash failure, we don't bother re-enabling interrupts */
1330 ble- 12f 1380 ble- 12f
1331 1381
@@ -1337,14 +1387,6 @@ BEGIN_FW_FTR_SECTION
1337 ld r3,SOFTE(r1) 1387 ld r3,SOFTE(r1)
1338 bl .local_irq_restore 1388 bl .local_irq_restore
1339 b 11f 1389 b 11f
1340END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1341#endif
1342BEGIN_FW_FTR_SECTION
1343 beq fast_exception_return /* Return from exception on success */
1344 ble- 12f /* Failure return from hash_page */
1345
1346 /* fall through */
1347END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1348 1390
1349/* Here we have a page fault that hash_page can't handle. */ 1391/* Here we have a page fault that hash_page can't handle. */
1350handle_page_fault: 1392handle_page_fault:
@@ -1362,6 +1404,8 @@ handle_page_fault:
1362 bl .bad_page_fault 1404 bl .bad_page_fault
1363 b .ret_from_except 1405 b .ret_from_except
1364 1406
140713: b .ret_from_except_lite
1408
1365/* We have a page fault that hash_page could handle but HV refused 1409/* We have a page fault that hash_page could handle but HV refused
1366 * the PTE insertion 1410 * the PTE insertion
1367 */ 1411 */
@@ -1371,8 +1415,6 @@ handle_page_fault:
1371 bl .low_hash_fault 1415 bl .low_hash_fault
1372 b .ret_from_except 1416 b .ret_from_except
1373 1417
137413: b .ret_from_except_lite
1375
1376 /* here we have a segment miss */ 1418 /* here we have a segment miss */
1377do_ste_alloc: 1419do_ste_alloc:
1378 bl .ste_allocate /* try to insert stab entry */ 1420 bl .ste_allocate /* try to insert stab entry */
@@ -1560,7 +1602,7 @@ _GLOBAL(generic_secondary_smp_init)
1560 ld r1,PACAEMERGSP(r13) 1602 ld r1,PACAEMERGSP(r13)
1561 subi r1,r1,STACK_FRAME_OVERHEAD 1603 subi r1,r1,STACK_FRAME_OVERHEAD
1562 1604
1563 b .__secondary_start 1605 b __secondary_start
1564#endif 1606#endif
1565 1607
1566#ifdef CONFIG_PPC_ISERIES 1608#ifdef CONFIG_PPC_ISERIES
@@ -1595,7 +1637,6 @@ _STATIC(__start_initialization_iSeries)
1595 b .start_here_common 1637 b .start_here_common
1596#endif /* CONFIG_PPC_ISERIES */ 1638#endif /* CONFIG_PPC_ISERIES */
1597 1639
1598#ifdef CONFIG_PPC_MULTIPLATFORM
1599 1640
1600_STATIC(__mmu_off) 1641_STATIC(__mmu_off)
1601 mfmsr r3 1642 mfmsr r3
@@ -1621,13 +1662,11 @@ _STATIC(__mmu_off)
1621 * 1662 *
1622 */ 1663 */
1623_GLOBAL(__start_initialization_multiplatform) 1664_GLOBAL(__start_initialization_multiplatform)
1624#ifdef CONFIG_PPC_MULTIPLATFORM
1625 /* 1665 /*
1626 * Are we booted from a PROM Of-type client-interface ? 1666 * Are we booted from a PROM Of-type client-interface ?
1627 */ 1667 */
1628 cmpldi cr0,r5,0 1668 cmpldi cr0,r5,0
1629 bne .__boot_from_prom /* yes -> prom */ 1669 bne .__boot_from_prom /* yes -> prom */
1630#endif
1631 1670
1632 /* Save parameters */ 1671 /* Save parameters */
1633 mr r31,r3 1672 mr r31,r3
@@ -1656,7 +1695,6 @@ _GLOBAL(__start_initialization_multiplatform)
1656 bl .__mmu_off 1695 bl .__mmu_off
1657 b .__after_prom_start 1696 b .__after_prom_start
1658 1697
1659#ifdef CONFIG_PPC_MULTIPLATFORM
1660_STATIC(__boot_from_prom) 1698_STATIC(__boot_from_prom)
1661 /* Save parameters */ 1699 /* Save parameters */
1662 mr r31,r3 1700 mr r31,r3
@@ -1696,7 +1734,6 @@ _STATIC(__boot_from_prom)
1696 bl .prom_init 1734 bl .prom_init
1697 /* We never return */ 1735 /* We never return */
1698 trap 1736 trap
1699#endif
1700 1737
1701/* 1738/*
1702 * At this point, r3 contains the physical address we are running at, 1739 * At this point, r3 contains the physical address we are running at,
@@ -1752,8 +1789,6 @@ _STATIC(__after_prom_start)
1752 bl .copy_and_flush /* copy the rest */ 1789 bl .copy_and_flush /* copy the rest */
1753 b .start_here_multiplatform 1790 b .start_here_multiplatform
1754 1791
1755#endif /* CONFIG_PPC_MULTIPLATFORM */
1756
1757/* 1792/*
1758 * Copy routine used to copy the kernel to start at physical address 0 1793 * Copy routine used to copy the kernel to start at physical address 0
1759 * and flush and invalidate the caches as needed. 1794 * and flush and invalidate the caches as needed.
@@ -1836,7 +1871,7 @@ _GLOBAL(pmac_secondary_start)
1836 ld r1,PACAEMERGSP(r13) 1871 ld r1,PACAEMERGSP(r13)
1837 subi r1,r1,STACK_FRAME_OVERHEAD 1872 subi r1,r1,STACK_FRAME_OVERHEAD
1838 1873
1839 b .__secondary_start 1874 b __secondary_start
1840 1875
1841#endif /* CONFIG_PPC_PMAC */ 1876#endif /* CONFIG_PPC_PMAC */
1842 1877
@@ -1853,7 +1888,7 @@ _GLOBAL(pmac_secondary_start)
1853 * r13 = paca virtual address 1888 * r13 = paca virtual address
1854 * SPRG3 = paca virtual address 1889 * SPRG3 = paca virtual address
1855 */ 1890 */
1856_GLOBAL(__secondary_start) 1891__secondary_start:
1857 /* Set thread priority to MEDIUM */ 1892 /* Set thread priority to MEDIUM */
1858 HMT_MEDIUM 1893 HMT_MEDIUM
1859 1894
@@ -1877,11 +1912,16 @@ _GLOBAL(__secondary_start)
1877 /* enable MMU and jump to start_secondary */ 1912 /* enable MMU and jump to start_secondary */
1878 LOAD_REG_ADDR(r3, .start_secondary_prolog) 1913 LOAD_REG_ADDR(r3, .start_secondary_prolog)
1879 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 1914 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1880#ifdef DO_SOFT_DISABLE 1915#ifdef CONFIG_PPC_ISERIES
1881BEGIN_FW_FTR_SECTION 1916BEGIN_FW_FTR_SECTION
1882 ori r4,r4,MSR_EE 1917 ori r4,r4,MSR_EE
1883END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 1918END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1884#endif 1919#endif
1920BEGIN_FW_FTR_SECTION
1921 stb r7,PACASOFTIRQEN(r13)
1922 stb r7,PACAHARDIRQEN(r13)
1923END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1924
1885 mtspr SPRN_SRR0,r3 1925 mtspr SPRN_SRR0,r3
1886 mtspr SPRN_SRR1,r4 1926 mtspr SPRN_SRR1,r4
1887 rfid 1927 rfid
@@ -1913,7 +1953,6 @@ _GLOBAL(enable_64b_mode)
1913 isync 1953 isync
1914 blr 1954 blr
1915 1955
1916#ifdef CONFIG_PPC_MULTIPLATFORM
1917/* 1956/*
1918 * This is where the main kernel code starts. 1957 * This is where the main kernel code starts.
1919 */ 1958 */
@@ -1977,7 +2016,6 @@ _STATIC(start_here_multiplatform)
1977 mtspr SPRN_SRR1,r4 2016 mtspr SPRN_SRR1,r4
1978 rfid 2017 rfid
1979 b . /* prevent speculative execution */ 2018 b . /* prevent speculative execution */
1980#endif /* CONFIG_PPC_MULTIPLATFORM */
1981 2019
1982 /* This is where all platforms converge execution */ 2020 /* This is where all platforms converge execution */
1983_STATIC(start_here_common) 2021_STATIC(start_here_common)
@@ -2005,15 +2043,18 @@ _STATIC(start_here_common)
2005 2043
2006 /* Load up the kernel context */ 2044 /* Load up the kernel context */
20075: 20455:
2008#ifdef DO_SOFT_DISABLE
2009BEGIN_FW_FTR_SECTION
2010 li r5,0 2046 li r5,0
2011 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ 2047 stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */
2048#ifdef CONFIG_PPC_ISERIES
2049BEGIN_FW_FTR_SECTION
2012 mfmsr r5 2050 mfmsr r5
2013 ori r5,r5,MSR_EE /* Hard Enabled */ 2051 ori r5,r5,MSR_EE /* Hard Enabled */
2014 mtmsrd r5 2052 mtmsrd r5
2015END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 2053END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
2016#endif 2054#endif
2055BEGIN_FW_FTR_SECTION
2056 stb r5,PACAHARDIRQEN(r13)
2057END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
2017 2058
2018 bl .start_kernel 2059 bl .start_kernel
2019 2060
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 39db7a3affe1..82bd2f10770f 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -112,7 +112,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
112 return 1; 112 return 1;
113} 113}
114 114
115struct dma_mapping_ops ibmebus_dma_ops = { 115static struct dma_mapping_ops ibmebus_dma_ops = {
116 .alloc_coherent = ibmebus_alloc_coherent, 116 .alloc_coherent = ibmebus_alloc_coherent,
117 .free_coherent = ibmebus_free_coherent, 117 .free_coherent = ibmebus_free_coherent,
118 .map_single = ibmebus_map_single, 118 .map_single = ibmebus_map_single,
@@ -176,6 +176,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_common(
176 dev->ofdev.dev.bus = &ibmebus_bus_type; 176 dev->ofdev.dev.bus = &ibmebus_bus_type;
177 dev->ofdev.dev.release = ibmebus_dev_release; 177 dev->ofdev.dev.release = ibmebus_dev_release;
178 178
179 dev->ofdev.dev.archdata.of_node = dev->ofdev.node;
180 dev->ofdev.dev.archdata.dma_ops = &ibmebus_dma_ops;
181 dev->ofdev.dev.archdata.numa_node = of_node_to_nid(dev->ofdev.node);
182
179 /* An ibmebusdev is based on a of_device. We have to change the 183 /* An ibmebusdev is based on a of_device. We have to change the
180 * bus type to use our own DMA mapping operations. 184 * bus type to use our own DMA mapping operations.
181 */ 185 */
@@ -210,11 +214,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_node(
210 return NULL; 214 return NULL;
211 } 215 }
212 216
213 dev = kmalloc(sizeof(struct ibmebus_dev), GFP_KERNEL); 217 dev = kzalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
214 if (!dev) { 218 if (!dev) {
215 return NULL; 219 return NULL;
216 } 220 }
217 memset(dev, 0, sizeof(struct ibmebus_dev));
218 221
219 dev->ofdev.node = of_node_get(dn); 222 dev->ofdev.node = of_node_get(dn);
220 223
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 4180c3998b39..8994af327b47 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -39,6 +39,13 @@
39#define cpu_should_die() 0 39#define cpu_should_die() 0
40#endif 40#endif
41 41
42static int __init powersave_off(char *arg)
43{
44 ppc_md.power_save = NULL;
45 return 0;
46}
47__setup("powersave=off", powersave_off);
48
42/* 49/*
43 * The body of the idle task. 50 * The body of the idle task.
44 */ 51 */
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 30de81da7b40..ba3195478600 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -30,6 +30,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
30 beqlr 30 beqlr
31 31
32 /* Go to NAP now */ 32 /* Go to NAP now */
33 mfmsr r7
34 rldicl r0,r7,48,1
35 rotldi r0,r0,16
36 mtmsrd r0,1 /* hard-disable interrupts */
37 li r0,1
38 stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
39 stb r0,PACAHARDIRQEN(r13)
33BEGIN_FTR_SECTION 40BEGIN_FTR_SECTION
34 DSSALL 41 DSSALL
35 sync 42 sync
@@ -38,7 +45,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
38 ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ 45 ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
39 ori r8,r8,_TLF_NAPPING /* so when we take an exception */ 46 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
40 std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ 47 std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
41 mfmsr r7
42 ori r7,r7,MSR_EE 48 ori r7,r7,MSR_EE
43 oris r7,r7,MSR_POW@h 49 oris r7,r7,MSR_POW@h
441: sync 501: sync
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index e98180686b35..34ae11494ddc 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -25,13 +25,11 @@
25#include <asm/firmware.h> 25#include <asm/firmware.h>
26#include <asm/bug.h> 26#include <asm/bug.h>
27 27
28void _insb(volatile u8 __iomem *port, void *buf, long count) 28void _insb(const volatile u8 __iomem *port, void *buf, long count)
29{ 29{
30 u8 *tbuf = buf; 30 u8 *tbuf = buf;
31 u8 tmp; 31 u8 tmp;
32 32
33 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
34
35 if (unlikely(count <= 0)) 33 if (unlikely(count <= 0))
36 return; 34 return;
37 asm volatile("sync"); 35 asm volatile("sync");
@@ -48,8 +46,6 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
48{ 46{
49 const u8 *tbuf = buf; 47 const u8 *tbuf = buf;
50 48
51 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
52
53 if (unlikely(count <= 0)) 49 if (unlikely(count <= 0))
54 return; 50 return;
55 asm volatile("sync"); 51 asm volatile("sync");
@@ -60,13 +56,11 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
60} 56}
61EXPORT_SYMBOL(_outsb); 57EXPORT_SYMBOL(_outsb);
62 58
63void _insw_ns(volatile u16 __iomem *port, void *buf, long count) 59void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
64{ 60{
65 u16 *tbuf = buf; 61 u16 *tbuf = buf;
66 u16 tmp; 62 u16 tmp;
67 63
68 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
69
70 if (unlikely(count <= 0)) 64 if (unlikely(count <= 0))
71 return; 65 return;
72 asm volatile("sync"); 66 asm volatile("sync");
@@ -83,8 +77,6 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
83{ 77{
84 const u16 *tbuf = buf; 78 const u16 *tbuf = buf;
85 79
86 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
87
88 if (unlikely(count <= 0)) 80 if (unlikely(count <= 0))
89 return; 81 return;
90 asm volatile("sync"); 82 asm volatile("sync");
@@ -95,13 +87,11 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
95} 87}
96EXPORT_SYMBOL(_outsw_ns); 88EXPORT_SYMBOL(_outsw_ns);
97 89
98void _insl_ns(volatile u32 __iomem *port, void *buf, long count) 90void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
99{ 91{
100 u32 *tbuf = buf; 92 u32 *tbuf = buf;
101 u32 tmp; 93 u32 tmp;
102 94
103 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
104
105 if (unlikely(count <= 0)) 95 if (unlikely(count <= 0))
106 return; 96 return;
107 asm volatile("sync"); 97 asm volatile("sync");
@@ -118,8 +108,6 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
118{ 108{
119 const u32 *tbuf = buf; 109 const u32 *tbuf = buf;
120 110
121 BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
122
123 if (unlikely(count <= 0)) 111 if (unlikely(count <= 0))
124 return; 112 return;
125 asm volatile("sync"); 113 asm volatile("sync");
@@ -129,3 +117,90 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
129 asm volatile("sync"); 117 asm volatile("sync");
130} 118}
131EXPORT_SYMBOL(_outsl_ns); 119EXPORT_SYMBOL(_outsl_ns);
120
121#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
122
123void _memset_io(volatile void __iomem *addr, int c, unsigned long n)
124{
125 void *p = (void __force *)addr;
126 u32 lc = c;
127 lc |= lc << 8;
128 lc |= lc << 16;
129
130 __asm__ __volatile__ ("sync" : : : "memory");
131 while(n && !IO_CHECK_ALIGN(p, 4)) {
132 *((volatile u8 *)p) = c;
133 p++;
134 n--;
135 }
136 while(n >= 4) {
137 *((volatile u32 *)p) = lc;
138 p += 4;
139 n -= 4;
140 }
141 while(n) {
142 *((volatile u8 *)p) = c;
143 p++;
144 n--;
145 }
146 __asm__ __volatile__ ("sync" : : : "memory");
147}
148EXPORT_SYMBOL(_memset_io);
149
150void _memcpy_fromio(void *dest, const volatile void __iomem *src,
151 unsigned long n)
152{
153 void *vsrc = (void __force *) src;
154
155 __asm__ __volatile__ ("sync" : : : "memory");
156 while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
157 *((u8 *)dest) = *((volatile u8 *)vsrc);
158 __asm__ __volatile__ ("eieio" : : : "memory");
159 vsrc++;
160 dest++;
161 n--;
162 }
163 while(n > 4) {
164 *((u32 *)dest) = *((volatile u32 *)vsrc);
165 __asm__ __volatile__ ("eieio" : : : "memory");
166 vsrc += 4;
167 dest += 4;
168 n -= 4;
169 }
170 while(n) {
171 *((u8 *)dest) = *((volatile u8 *)vsrc);
172 __asm__ __volatile__ ("eieio" : : : "memory");
173 vsrc++;
174 dest++;
175 n--;
176 }
177 __asm__ __volatile__ ("sync" : : : "memory");
178}
179EXPORT_SYMBOL(_memcpy_fromio);
180
181void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
182{
183 void *vdest = (void __force *) dest;
184
185 __asm__ __volatile__ ("sync" : : : "memory");
186 while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
187 *((volatile u8 *)vdest) = *((u8 *)src);
188 src++;
189 vdest++;
190 n--;
191 }
192 while(n > 4) {
193 *((volatile u32 *)vdest) = *((volatile u32 *)src);
194 src += 4;
195 vdest += 4;
196 n-=4;
197 }
198 while(n) {
199 *((volatile u8 *)vdest) = *((u8 *)src);
200 src++;
201 vdest++;
202 n--;
203 }
204 __asm__ __volatile__ ("sync" : : : "memory");
205}
206EXPORT_SYMBOL(_memcpy_toio);
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index a13a93dfc655..c68113371050 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(iowrite32_rep);
106 106
107void __iomem *ioport_map(unsigned long port, unsigned int len) 107void __iomem *ioport_map(unsigned long port, unsigned int len)
108{ 108{
109 return (void __iomem *) (port+pci_io_base); 109 return (void __iomem *) (port + _IO_BASE);
110} 110}
111 111
112void ioport_unmap(void __iomem *addr) 112void ioport_unmap(void __iomem *addr)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ba6b7256084b..95edad4faf26 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -258,9 +258,9 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
258 spin_unlock_irqrestore(&(tbl->it_lock), flags); 258 spin_unlock_irqrestore(&(tbl->it_lock), flags);
259} 259}
260 260
261int iommu_map_sg(struct device *dev, struct iommu_table *tbl, 261int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
262 struct scatterlist *sglist, int nelems, 262 int nelems, unsigned long mask,
263 unsigned long mask, enum dma_data_direction direction) 263 enum dma_data_direction direction)
264{ 264{
265 dma_addr_t dma_next = 0, dma_addr; 265 dma_addr_t dma_next = 0, dma_addr;
266 unsigned long flags; 266 unsigned long flags;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5e37bf14ef2d..0bd8c7665834 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -64,8 +64,9 @@
64#include <asm/ptrace.h> 64#include <asm/ptrace.h>
65#include <asm/machdep.h> 65#include <asm/machdep.h>
66#include <asm/udbg.h> 66#include <asm/udbg.h>
67#ifdef CONFIG_PPC_ISERIES 67#ifdef CONFIG_PPC64
68#include <asm/paca.h> 68#include <asm/paca.h>
69#include <asm/firmware.h>
69#endif 70#endif
70 71
71int __irq_offset_value; 72int __irq_offset_value;
@@ -95,6 +96,74 @@ extern atomic_t ipi_sent;
95EXPORT_SYMBOL(irq_desc); 96EXPORT_SYMBOL(irq_desc);
96 97
97int distribute_irqs = 1; 98int distribute_irqs = 1;
99
100static inline unsigned long get_hard_enabled(void)
101{
102 unsigned long enabled;
103
104 __asm__ __volatile__("lbz %0,%1(13)"
105 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
106
107 return enabled;
108}
109
110static inline void set_soft_enabled(unsigned long enable)
111{
112 __asm__ __volatile__("stb %0,%1(13)"
113 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
114}
115
116void local_irq_restore(unsigned long en)
117{
118 /*
119 * get_paca()->soft_enabled = en;
120 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
121 * That was allowed before, and in such a case we do need to take care
122 * that gcc will set soft_enabled directly via r13, not choose to use
123 * an intermediate register, lest we're preempted to a different cpu.
124 */
125 set_soft_enabled(en);
126 if (!en)
127 return;
128
129 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
130 /*
131 * Do we need to disable preemption here? Not really: in the
132 * unlikely event that we're preempted to a different cpu in
133 * between getting r13, loading its lppaca_ptr, and loading
134 * its any_int, we might call iseries_handle_interrupts without
135 * an interrupt pending on the new cpu, but that's no disaster,
136 * is it? And the business of preempting us off the old cpu
137 * would itself involve a local_irq_restore which handles the
138 * interrupt to that cpu.
139 *
140 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
141 * to avoid any preemption checking added into get_paca().
142 */
143 if (local_paca->lppaca_ptr->int_dword.any_int)
144 iseries_handle_interrupts();
145 return;
146 }
147
148 /*
149 * if (get_paca()->hard_enabled) return;
150 * But again we need to take care that gcc gets hard_enabled directly
151 * via r13, not choose to use an intermediate register, lest we're
152 * preempted to a different cpu in between the two instructions.
153 */
154 if (get_hard_enabled())
155 return;
156
157 /*
158 * Need to hard-enable interrupts here. Since currently disabled,
159 * no need to take further asm precautions against preemption; but
160 * use local_paca instead of get_paca() to avoid preemption checking.
161 */
162 local_paca->hard_enabled = en;
163 if ((int)mfspr(SPRN_DEC) < 0)
164 mtspr(SPRN_DEC, 1);
165 hard_irq_enable();
166}
98#endif /* CONFIG_PPC64 */ 167#endif /* CONFIG_PPC64 */
99 168
100int show_interrupts(struct seq_file *p, void *v) 169int show_interrupts(struct seq_file *p, void *v)
@@ -246,7 +315,8 @@ void do_IRQ(struct pt_regs *regs)
246 set_irq_regs(old_regs); 315 set_irq_regs(old_regs);
247 316
248#ifdef CONFIG_PPC_ISERIES 317#ifdef CONFIG_PPC_ISERIES
249 if (get_lppaca()->int_dword.fields.decr_int) { 318 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
319 get_lppaca()->int_dword.fields.decr_int) {
250 get_lppaca()->int_dword.fields.decr_int = 0; 320 get_lppaca()->int_dword.fields.decr_int = 0;
251 /* Signal a fake decrementer interrupt */ 321 /* Signal a fake decrementer interrupt */
252 timer_interrupt(regs); 322 timer_interrupt(regs);
@@ -626,10 +696,14 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
626 696
627void irq_dispose_mapping(unsigned int virq) 697void irq_dispose_mapping(unsigned int virq)
628{ 698{
629 struct irq_host *host = irq_map[virq].host; 699 struct irq_host *host;
630 irq_hw_number_t hwirq; 700 irq_hw_number_t hwirq;
631 unsigned long flags; 701 unsigned long flags;
632 702
703 if (virq == NO_IRQ)
704 return;
705
706 host = irq_map[virq].host;
633 WARN_ON (host == NULL); 707 WARN_ON (host == NULL);
634 if (host == NULL) 708 if (host == NULL)
635 return; 709 return;
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 397c83eda20e..8a06724e029e 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -9,30 +9,26 @@
9#include <asm/of_device.h> 9#include <asm/of_device.h>
10 10
11/** 11/**
12 * of_match_device - Tell if an of_device structure has a matching 12 * of_match_node - Tell if an device_node has a matching of_match structure
13 * of_match structure
14 * @ids: array of of device match structures to search in 13 * @ids: array of of device match structures to search in
15 * @dev: the of device structure to match against 14 * @node: the of device structure to match against
16 * 15 *
17 * Used by a driver to check whether an of_device present in the 16 * Low level utility function used by device matching.
18 * system is in its list of supported devices.
19 */ 17 */
20const struct of_device_id *of_match_device(const struct of_device_id *matches, 18const struct of_device_id *of_match_node(const struct of_device_id *matches,
21 const struct of_device *dev) 19 const struct device_node *node)
22{ 20{
23 if (!dev->node)
24 return NULL;
25 while (matches->name[0] || matches->type[0] || matches->compatible[0]) { 21 while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
26 int match = 1; 22 int match = 1;
27 if (matches->name[0]) 23 if (matches->name[0])
28 match &= dev->node->name 24 match &= node->name
29 && !strcmp(matches->name, dev->node->name); 25 && !strcmp(matches->name, node->name);
30 if (matches->type[0]) 26 if (matches->type[0])
31 match &= dev->node->type 27 match &= node->type
32 && !strcmp(matches->type, dev->node->type); 28 && !strcmp(matches->type, node->type);
33 if (matches->compatible[0]) 29 if (matches->compatible[0])
34 match &= device_is_compatible(dev->node, 30 match &= device_is_compatible(node,
35 matches->compatible); 31 matches->compatible);
36 if (match) 32 if (match)
37 return matches; 33 return matches;
38 matches++; 34 matches++;
@@ -40,16 +36,21 @@ const struct of_device_id *of_match_device(const struct of_device_id *matches,
40 return NULL; 36 return NULL;
41} 37}
42 38
43static int of_platform_bus_match(struct device *dev, struct device_driver *drv) 39/**
40 * of_match_device - Tell if an of_device structure has a matching
41 * of_match structure
42 * @ids: array of of device match structures to search in
43 * @dev: the of device structure to match against
44 *
45 * Used by a driver to check whether an of_device present in the
46 * system is in its list of supported devices.
47 */
48const struct of_device_id *of_match_device(const struct of_device_id *matches,
49 const struct of_device *dev)
44{ 50{
45 struct of_device * of_dev = to_of_device(dev); 51 if (!dev->node)
46 struct of_platform_driver * of_drv = to_of_platform_driver(drv); 52 return NULL;
47 const struct of_device_id * matches = of_drv->match_table; 53 return of_match_node(matches, dev->node);
48
49 if (!matches)
50 return 0;
51
52 return of_match_device(matches, of_dev) != NULL;
53} 54}
54 55
55struct of_device *of_dev_get(struct of_device *dev) 56struct of_device *of_dev_get(struct of_device *dev)
@@ -71,96 +72,8 @@ void of_dev_put(struct of_device *dev)
71 put_device(&dev->dev); 72 put_device(&dev->dev);
72} 73}
73 74
74 75static ssize_t dev_show_devspec(struct device *dev,
75static int of_device_probe(struct device *dev) 76 struct device_attribute *attr, char *buf)
76{
77 int error = -ENODEV;
78 struct of_platform_driver *drv;
79 struct of_device *of_dev;
80 const struct of_device_id *match;
81
82 drv = to_of_platform_driver(dev->driver);
83 of_dev = to_of_device(dev);
84
85 if (!drv->probe)
86 return error;
87
88 of_dev_get(of_dev);
89
90 match = of_match_device(drv->match_table, of_dev);
91 if (match)
92 error = drv->probe(of_dev, match);
93 if (error)
94 of_dev_put(of_dev);
95
96 return error;
97}
98
99static int of_device_remove(struct device *dev)
100{
101 struct of_device * of_dev = to_of_device(dev);
102 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
103
104 if (dev->driver && drv->remove)
105 drv->remove(of_dev);
106 return 0;
107}
108
109static int of_device_suspend(struct device *dev, pm_message_t state)
110{
111 struct of_device * of_dev = to_of_device(dev);
112 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
113 int error = 0;
114
115 if (dev->driver && drv->suspend)
116 error = drv->suspend(of_dev, state);
117 return error;
118}
119
120static int of_device_resume(struct device * dev)
121{
122 struct of_device * of_dev = to_of_device(dev);
123 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
124 int error = 0;
125
126 if (dev->driver && drv->resume)
127 error = drv->resume(of_dev);
128 return error;
129}
130
131struct bus_type of_platform_bus_type = {
132 .name = "of_platform",
133 .match = of_platform_bus_match,
134 .probe = of_device_probe,
135 .remove = of_device_remove,
136 .suspend = of_device_suspend,
137 .resume = of_device_resume,
138};
139
140static int __init of_bus_driver_init(void)
141{
142 return bus_register(&of_platform_bus_type);
143}
144
145postcore_initcall(of_bus_driver_init);
146
147int of_register_driver(struct of_platform_driver *drv)
148{
149 /* initialize common driver fields */
150 drv->driver.name = drv->name;
151 drv->driver.bus = &of_platform_bus_type;
152
153 /* register with core */
154 return driver_register(&drv->driver);
155}
156
157void of_unregister_driver(struct of_platform_driver *drv)
158{
159 driver_unregister(&drv->driver);
160}
161
162
163static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
164{ 77{
165 struct of_device *ofdev; 78 struct of_device *ofdev;
166 79
@@ -208,41 +121,11 @@ void of_device_unregister(struct of_device *ofdev)
208 device_unregister(&ofdev->dev); 121 device_unregister(&ofdev->dev);
209} 122}
210 123
211struct of_device* of_platform_device_create(struct device_node *np,
212 const char *bus_id,
213 struct device *parent)
214{
215 struct of_device *dev;
216
217 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
218 if (!dev)
219 return NULL;
220 memset(dev, 0, sizeof(*dev));
221
222 dev->node = of_node_get(np);
223 dev->dma_mask = 0xffffffffUL;
224 dev->dev.dma_mask = &dev->dma_mask;
225 dev->dev.parent = parent;
226 dev->dev.bus = &of_platform_bus_type;
227 dev->dev.release = of_release_dev;
228
229 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
230
231 if (of_device_register(dev) != 0) {
232 kfree(dev);
233 return NULL;
234 }
235
236 return dev;
237}
238 124
125EXPORT_SYMBOL(of_match_node);
239EXPORT_SYMBOL(of_match_device); 126EXPORT_SYMBOL(of_match_device);
240EXPORT_SYMBOL(of_platform_bus_type);
241EXPORT_SYMBOL(of_register_driver);
242EXPORT_SYMBOL(of_unregister_driver);
243EXPORT_SYMBOL(of_device_register); 127EXPORT_SYMBOL(of_device_register);
244EXPORT_SYMBOL(of_device_unregister); 128EXPORT_SYMBOL(of_device_unregister);
245EXPORT_SYMBOL(of_dev_get); 129EXPORT_SYMBOL(of_dev_get);
246EXPORT_SYMBOL(of_dev_put); 130EXPORT_SYMBOL(of_dev_put);
247EXPORT_SYMBOL(of_platform_device_create);
248EXPORT_SYMBOL(of_release_dev); 131EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
new file mode 100644
index 000000000000..b3189d0161b8
--- /dev/null
+++ b/arch/powerpc/kernel/of_platform.c
@@ -0,0 +1,489 @@
1/*
2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
3 * <benh@kernel.crashing.org>
4 * and Arnd Bergmann, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#undef DEBUG
14
15#include <linux/string.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/mod_devicetable.h>
20#include <linux/slab.h>
21#include <linux/pci.h>
22
23#include <asm/errno.h>
24#include <asm/dcr.h>
25#include <asm/of_device.h>
26#include <asm/of_platform.h>
27#include <asm/topology.h>
28#include <asm/pci-bridge.h>
29#include <asm/ppc-pci.h>
30#include <asm/atomic.h>
31
32
33/*
34 * The list of OF IDs below is used for matching bus types in the
35 * system whose devices are to be exposed as of_platform_devices.
36 *
37 * This is the default list valid for most platforms. This file provides
38 * functions who can take an explicit list if necessary though
39 *
40 * The search is always performed recursively looking for children of
41 * the provided device_node and recursively if such a children matches
42 * a bus type in the list
43 */
44
45static struct of_device_id of_default_bus_ids[] = {
46 { .type = "soc", },
47 { .compatible = "soc", },
48 { .type = "spider", },
49 { .type = "axon", },
50 { .type = "plb5", },
51 { .type = "plb4", },
52 { .type = "opb", },
53 {},
54};
55
56static atomic_t bus_no_reg_magic;
57
58/*
59 *
60 * OF platform device type definition & base infrastructure
61 *
62 */
63
64static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
65{
66 struct of_device * of_dev = to_of_device(dev);
67 struct of_platform_driver * of_drv = to_of_platform_driver(drv);
68 const struct of_device_id * matches = of_drv->match_table;
69
70 if (!matches)
71 return 0;
72
73 return of_match_device(matches, of_dev) != NULL;
74}
75
76static int of_platform_device_probe(struct device *dev)
77{
78 int error = -ENODEV;
79 struct of_platform_driver *drv;
80 struct of_device *of_dev;
81 const struct of_device_id *match;
82
83 drv = to_of_platform_driver(dev->driver);
84 of_dev = to_of_device(dev);
85
86 if (!drv->probe)
87 return error;
88
89 of_dev_get(of_dev);
90
91 match = of_match_device(drv->match_table, of_dev);
92 if (match)
93 error = drv->probe(of_dev, match);
94 if (error)
95 of_dev_put(of_dev);
96
97 return error;
98}
99
100static int of_platform_device_remove(struct device *dev)
101{
102 struct of_device * of_dev = to_of_device(dev);
103 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
104
105 if (dev->driver && drv->remove)
106 drv->remove(of_dev);
107 return 0;
108}
109
110static int of_platform_device_suspend(struct device *dev, pm_message_t state)
111{
112 struct of_device * of_dev = to_of_device(dev);
113 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
114 int error = 0;
115
116 if (dev->driver && drv->suspend)
117 error = drv->suspend(of_dev, state);
118 return error;
119}
120
121static int of_platform_device_resume(struct device * dev)
122{
123 struct of_device * of_dev = to_of_device(dev);
124 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
125 int error = 0;
126
127 if (dev->driver && drv->resume)
128 error = drv->resume(of_dev);
129 return error;
130}
131
132struct bus_type of_platform_bus_type = {
133 .name = "of_platform",
134 .match = of_platform_bus_match,
135 .probe = of_platform_device_probe,
136 .remove = of_platform_device_remove,
137 .suspend = of_platform_device_suspend,
138 .resume = of_platform_device_resume,
139};
140EXPORT_SYMBOL(of_platform_bus_type);
141
142static int __init of_bus_driver_init(void)
143{
144 return bus_register(&of_platform_bus_type);
145}
146
147postcore_initcall(of_bus_driver_init);
148
149int of_register_platform_driver(struct of_platform_driver *drv)
150{
151 /* initialize common driver fields */
152 drv->driver.name = drv->name;
153 drv->driver.bus = &of_platform_bus_type;
154
155 /* register with core */
156 return driver_register(&drv->driver);
157}
158EXPORT_SYMBOL(of_register_platform_driver);
159
160void of_unregister_platform_driver(struct of_platform_driver *drv)
161{
162 driver_unregister(&drv->driver);
163}
164EXPORT_SYMBOL(of_unregister_platform_driver);
165
166static void of_platform_make_bus_id(struct of_device *dev)
167{
168 struct device_node *node = dev->node;
169 char *name = dev->dev.bus_id;
170 const u32 *reg;
171 u64 addr;
172 long magic;
173
174 /*
175 * If it's a DCR based device, use 'd' for native DCRs
176 * and 'D' for MMIO DCRs.
177 */
178#ifdef CONFIG_PPC_DCR
179 reg = get_property(node, "dcr-reg", NULL);
180 if (reg) {
181#ifdef CONFIG_PPC_DCR_NATIVE
182 snprintf(name, BUS_ID_SIZE, "d%x.%s",
183 *reg, node->name);
184#else /* CONFIG_PPC_DCR_NATIVE */
185 addr = of_translate_dcr_address(node, *reg, NULL);
186 if (addr != OF_BAD_ADDR) {
187 snprintf(name, BUS_ID_SIZE,
188 "D%llx.%s", (unsigned long long)addr,
189 node->name);
190 return;
191 }
192#endif /* !CONFIG_PPC_DCR_NATIVE */
193 }
194#endif /* CONFIG_PPC_DCR */
195
196 /*
197 * For MMIO, get the physical address
198 */
199 reg = get_property(node, "reg", NULL);
200 if (reg) {
201 addr = of_translate_address(node, reg);
202 if (addr != OF_BAD_ADDR) {
203 snprintf(name, BUS_ID_SIZE,
204 "%llx.%s", (unsigned long long)addr,
205 node->name);
206 return;
207 }
208 }
209
210 /*
211 * No BusID, use the node name and add a globally incremented
212 * counter (and pray...)
213 */
214 magic = atomic_add_return(1, &bus_no_reg_magic);
215 snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1);
216}
217
218struct of_device* of_platform_device_create(struct device_node *np,
219 const char *bus_id,
220 struct device *parent)
221{
222 struct of_device *dev;
223
224 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
225 if (!dev)
226 return NULL;
227 memset(dev, 0, sizeof(*dev));
228
229 dev->node = of_node_get(np);
230 dev->dma_mask = 0xffffffffUL;
231 dev->dev.dma_mask = &dev->dma_mask;
232 dev->dev.parent = parent;
233 dev->dev.bus = &of_platform_bus_type;
234 dev->dev.release = of_release_dev;
235 dev->dev.archdata.of_node = np;
236 dev->dev.archdata.numa_node = of_node_to_nid(np);
237
238 /* We do not fill the DMA ops for platform devices by default.
239 * This is currently the responsibility of the platform code
240 * to do such, possibly using a device notifier
241 */
242
243 if (bus_id)
244 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
245 else
246 of_platform_make_bus_id(dev);
247
248 if (of_device_register(dev) != 0) {
249 kfree(dev);
250 return NULL;
251 }
252
253 return dev;
254}
255EXPORT_SYMBOL(of_platform_device_create);
256
257
258
259/**
260 * of_platform_bus_create - Create an OF device for a bus node and all its
261 * children. Optionally recursively instanciate matching busses.
262 * @bus: device node of the bus to instanciate
263 * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
264 * disallow recursive creation of child busses
265 */
266static int of_platform_bus_create(struct device_node *bus,
267 struct of_device_id *matches,
268 struct device *parent)
269{
270 struct device_node *child;
271 struct of_device *dev;
272 int rc = 0;
273
274 for (child = NULL; (child = of_get_next_child(bus, child)); ) {
275 pr_debug(" create child: %s\n", child->full_name);
276 dev = of_platform_device_create(child, NULL, parent);
277 if (dev == NULL)
278 rc = -ENOMEM;
279 else if (!of_match_node(matches, child))
280 continue;
281 if (rc == 0) {
282 pr_debug(" and sub busses\n");
283 rc = of_platform_bus_create(child, matches, &dev->dev);
284 } if (rc) {
285 of_node_put(child);
286 break;
287 }
288 }
289 return rc;
290}
291
292/**
293 * of_platform_bus_probe - Probe the device-tree for platform busses
294 * @root: parent of the first level to probe or NULL for the root of the tree
295 * @matches: match table, NULL to use the default
296 * @parent: parent to hook devices from, NULL for toplevel
297 *
298 * Note that children of the provided root are not instanciated as devices
299 * unless the specified root itself matches the bus list and is not NULL.
300 */
301
302int of_platform_bus_probe(struct device_node *root,
303 struct of_device_id *matches,
304 struct device *parent)
305{
306 struct device_node *child;
307 struct of_device *dev;
308 int rc = 0;
309
310 if (matches == NULL)
311 matches = of_default_bus_ids;
312 if (matches == OF_NO_DEEP_PROBE)
313 return -EINVAL;
314 if (root == NULL)
315 root = of_find_node_by_path("/");
316 else
317 of_node_get(root);
318
319 pr_debug("of_platform_bus_probe()\n");
320 pr_debug(" starting at: %s\n", root->full_name);
321
322 /* Do a self check of bus type, if there's a match, create
323 * children
324 */
325 if (of_match_node(matches, root)) {
326 pr_debug(" root match, create all sub devices\n");
327 dev = of_platform_device_create(root, NULL, parent);
328 if (dev == NULL) {
329 rc = -ENOMEM;
330 goto bail;
331 }
332 pr_debug(" create all sub busses\n");
333 rc = of_platform_bus_create(root, matches, &dev->dev);
334 goto bail;
335 }
336 for (child = NULL; (child = of_get_next_child(root, child)); ) {
337 if (!of_match_node(matches, child))
338 continue;
339
340 pr_debug(" match: %s\n", child->full_name);
341 dev = of_platform_device_create(child, NULL, parent);
342 if (dev == NULL)
343 rc = -ENOMEM;
344 else
345 rc = of_platform_bus_create(child, matches, &dev->dev);
346 if (rc) {
347 of_node_put(child);
348 break;
349 }
350 }
351 bail:
352 of_node_put(root);
353 return rc;
354}
355EXPORT_SYMBOL(of_platform_bus_probe);
356
357static int of_dev_node_match(struct device *dev, void *data)
358{
359 return to_of_device(dev)->node == data;
360}
361
362struct of_device *of_find_device_by_node(struct device_node *np)
363{
364 struct device *dev;
365
366 dev = bus_find_device(&of_platform_bus_type,
367 NULL, np, of_dev_node_match);
368 if (dev)
369 return to_of_device(dev);
370 return NULL;
371}
372EXPORT_SYMBOL(of_find_device_by_node);
373
374static int of_dev_phandle_match(struct device *dev, void *data)
375{
376 phandle *ph = data;
377 return to_of_device(dev)->node->linux_phandle == *ph;
378}
379
380struct of_device *of_find_device_by_phandle(phandle ph)
381{
382 struct device *dev;
383
384 dev = bus_find_device(&of_platform_bus_type,
385 NULL, &ph, of_dev_phandle_match);
386 if (dev)
387 return to_of_device(dev);
388 return NULL;
389}
390EXPORT_SYMBOL(of_find_device_by_phandle);
391
392
393#ifdef CONFIG_PPC_OF_PLATFORM_PCI
394
395/* The probing of PCI controllers from of_platform is currently
396 * 64 bits only, mostly due to gratuitous differences between
397 * the 32 and 64 bits PCI code on PowerPC and the 32 bits one
398 * lacking some bits needed here.
399 */
400
401static int __devinit of_pci_phb_probe(struct of_device *dev,
402 const struct of_device_id *match)
403{
404 struct pci_controller *phb;
405
406 /* Check if we can do that ... */
407 if (ppc_md.pci_setup_phb == NULL)
408 return -ENODEV;
409
410 printk(KERN_INFO "Setting up PCI bus %s\n", dev->node->full_name);
411
412 /* Alloc and setup PHB data structure */
413 phb = pcibios_alloc_controller(dev->node);
414 if (!phb)
415 return -ENODEV;
416
417 /* Setup parent in sysfs */
418 phb->parent = &dev->dev;
419
420 /* Setup the PHB using arch provided callback */
421 if (ppc_md.pci_setup_phb(phb)) {
422 pcibios_free_controller(phb);
423 return -ENODEV;
424 }
425
426 /* Process "ranges" property */
427 pci_process_bridge_OF_ranges(phb, dev->node, 0);
428
429 /* Setup IO space.
430 * This will not work properly for ISA IOs, something needs to be done
431 * about it if we ever generalize that way of probing PCI brigdes
432 */
433 pci_setup_phb_io_dynamic(phb, 0);
434
435 /* Init pci_dn data structures */
436 pci_devs_phb_init_dynamic(phb);
437
438 /* Register devices with EEH */
439#ifdef CONFIG_EEH
440 if (dev->node->child)
441 eeh_add_device_tree_early(dev->node);
442#endif /* CONFIG_EEH */
443
444 /* Scan the bus */
445 scan_phb(phb);
446
447 /* Claim resources. This might need some rework as well depending
448 * wether we are doing probe-only or not, like assigning unassigned
449 * resources etc...
450 */
451 pcibios_claim_one_bus(phb->bus);
452
453 /* Finish EEH setup */
454#ifdef CONFIG_EEH
455 eeh_add_device_tree_late(phb->bus);
456#endif
457
458 /* Add probed PCI devices to the device model */
459 pci_bus_add_devices(phb->bus);
460
461 return 0;
462}
463
464static struct of_device_id of_pci_phb_ids[] = {
465 { .type = "pci", },
466 { .type = "pcix", },
467 { .type = "pcie", },
468 { .type = "pciex", },
469 { .type = "ht", },
470 {}
471};
472
473static struct of_platform_driver of_pci_phb_driver = {
474 .name = "of-pci",
475 .match_table = of_pci_phb_ids,
476 .probe = of_pci_phb_probe,
477 .driver = {
478 .multithread_probe = 1,
479 },
480};
481
482static __init int of_pci_phb_init(void)
483{
484 return of_register_platform_driver(&of_pci_phb_driver);
485}
486
487device_initcall(of_pci_phb_init);
488
489#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 0d9ff72e2852..2f54cd81dea5 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -12,6 +12,7 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <linux/irq.h> 14#include <linux/irq.h>
15#include <linux/list.h>
15 16
16#include <asm/processor.h> 17#include <asm/processor.h>
17#include <asm/io.h> 18#include <asm/io.h>
@@ -99,7 +100,7 @@ pcibios_fixup_resources(struct pci_dev *dev)
99 continue; 100 continue;
100 if (res->end == 0xffffffff) { 101 if (res->end == 0xffffffff) {
101 DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n", 102 DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
102 pci_name(dev), i, res->start, res->end); 103 pci_name(dev), i, (u64)res->start, (u64)res->end);
103 res->end -= res->start; 104 res->end -= res->start;
104 res->start = 0; 105 res->start = 0;
105 res->flags |= IORESOURCE_UNSET; 106 res->flags |= IORESOURCE_UNSET;
@@ -115,11 +116,9 @@ pcibios_fixup_resources(struct pci_dev *dev)
115 if (offset != 0) { 116 if (offset != 0) {
116 res->start += offset; 117 res->start += offset;
117 res->end += offset; 118 res->end += offset;
118#ifdef DEBUG 119 DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
119 printk("Fixup res %d (%lx) of dev %s: %llx -> %llx\n", 120 i, res->flags, pci_name(dev),
120 i, res->flags, pci_name(dev), 121 (u64)res->start - offset, (u64)res->start);
121 res->start - offset, res->start);
122#endif
123 } 122 }
124 } 123 }
125 124
@@ -255,7 +254,7 @@ pcibios_allocate_bus_resources(struct list_head *bus_list)
255 } 254 }
256 255
257 DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n", 256 DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
258 res->start, res->end, res->flags, pr); 257 (u64)res->start, (u64)res->end, res->flags, pr);
259 if (pr) { 258 if (pr) {
260 if (request_resource(pr, res) == 0) 259 if (request_resource(pr, res) == 0)
261 continue; 260 continue;
@@ -306,7 +305,7 @@ reparent_resources(struct resource *parent, struct resource *res)
306 for (p = res->child; p != NULL; p = p->sibling) { 305 for (p = res->child; p != NULL; p = p->sibling) {
307 p->parent = res; 306 p->parent = res;
308 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n", 307 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
309 p->name, p->start, p->end, res->name); 308 p->name, (u64)p->start, (u64)p->end, res->name);
310 } 309 }
311 return 0; 310 return 0;
312} 311}
@@ -362,7 +361,7 @@ pci_relocate_bridge_resource(struct pci_bus *bus, int i)
362 } 361 }
363 if (request_resource(pr, res)) { 362 if (request_resource(pr, res)) {
364 DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n", 363 DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
365 res->start, res->end); 364 (u64)res->start, (u64)res->end);
366 return -1; /* "can't happen" */ 365 return -1; /* "can't happen" */
367 } 366 }
368 update_bridge_base(bus, i); 367 update_bridge_base(bus, i);
@@ -480,14 +479,14 @@ static inline void alloc_resource(struct pci_dev *dev, int idx)
480 struct resource *pr, *r = &dev->resource[idx]; 479 struct resource *pr, *r = &dev->resource[idx];
481 480
482 DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n", 481 DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
483 pci_name(dev), idx, r->start, r->end, r->flags); 482 pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
484 pr = pci_find_parent_resource(dev, r); 483 pr = pci_find_parent_resource(dev, r);
485 if (!pr || request_resource(pr, r) < 0) { 484 if (!pr || request_resource(pr, r) < 0) {
486 printk(KERN_ERR "PCI: Cannot allocate resource region %d" 485 printk(KERN_ERR "PCI: Cannot allocate resource region %d"
487 " of device %s\n", idx, pci_name(dev)); 486 " of device %s\n", idx, pci_name(dev));
488 if (pr) 487 if (pr)
489 DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n", 488 DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
490 pr, pr->start, pr->end, pr->flags); 489 pr, (u64)pr->start, (u64)pr->end, pr->flags);
491 /* We'll assign a new address later */ 490 /* We'll assign a new address later */
492 r->flags |= IORESOURCE_UNSET; 491 r->flags |= IORESOURCE_UNSET;
493 r->end -= r->start; 492 r->end -= r->start;
@@ -960,7 +959,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
960 res->flags = IORESOURCE_IO; 959 res->flags = IORESOURCE_IO;
961 res->start = ranges[2]; 960 res->start = ranges[2];
962 DBG("PCI: IO 0x%llx -> 0x%llx\n", 961 DBG("PCI: IO 0x%llx -> 0x%llx\n",
963 res->start, res->start + size - 1); 962 (u64)res->start, (u64)res->start + size - 1);
964 break; 963 break;
965 case 2: /* memory space */ 964 case 2: /* memory space */
966 memno = 0; 965 memno = 0;
@@ -982,7 +981,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
982 res->flags |= IORESOURCE_PREFETCH; 981 res->flags |= IORESOURCE_PREFETCH;
983 res->start = ranges[na+2]; 982 res->start = ranges[na+2];
984 DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno, 983 DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
985 res->start, res->start + size - 1); 984 (u64)res->start, (u64)res->start + size - 1);
986 } 985 }
987 break; 986 break;
988 } 987 }
@@ -1268,7 +1267,10 @@ pcibios_init(void)
1268 if (pci_assign_all_buses) 1267 if (pci_assign_all_buses)
1269 hose->first_busno = next_busno; 1268 hose->first_busno = next_busno;
1270 hose->last_busno = 0xff; 1269 hose->last_busno = 0xff;
1271 bus = pci_scan_bus(hose->first_busno, hose->ops, hose); 1270 bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
1271 hose->ops, hose);
1272 if (bus)
1273 pci_bus_add_devices(bus);
1272 hose->last_busno = bus->subordinate; 1274 hose->last_busno = bus->subordinate;
1273 if (pci_assign_all_buses || next_busno <= hose->last_busno) 1275 if (pci_assign_all_buses || next_busno <= hose->last_busno)
1274 next_busno = hose->last_busno + pcibios_assign_bus_offset; 1276 next_busno = hose->last_busno + pcibios_assign_bus_offset;
@@ -1282,10 +1284,6 @@ pcibios_init(void)
1282 if (pci_assign_all_buses && have_of) 1284 if (pci_assign_all_buses && have_of)
1283 pcibios_make_OF_bus_map(); 1285 pcibios_make_OF_bus_map();
1284 1286
1285 /* Do machine dependent PCI interrupt routing */
1286 if (ppc_md.pci_swizzle && ppc_md.pci_map_irq)
1287 pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq);
1288
1289 /* Call machine dependent fixup */ 1287 /* Call machine dependent fixup */
1290 if (ppc_md.pcibios_fixup) 1288 if (ppc_md.pcibios_fixup)
1291 ppc_md.pcibios_fixup(); 1289 ppc_md.pcibios_fixup();
@@ -1308,25 +1306,6 @@ pcibios_init(void)
1308 1306
1309subsys_initcall(pcibios_init); 1307subsys_initcall(pcibios_init);
1310 1308
1311unsigned char __init
1312common_swizzle(struct pci_dev *dev, unsigned char *pinp)
1313{
1314 struct pci_controller *hose = dev->sysdata;
1315
1316 if (dev->bus->number != hose->first_busno) {
1317 u8 pin = *pinp;
1318 do {
1319 pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
1320 /* Move up the chain of bridges. */
1321 dev = dev->bus->self;
1322 } while (dev->bus->self);
1323 *pinp = pin;
1324
1325 /* The slot is the idsel of the last bridge. */
1326 }
1327 return PCI_SLOT(dev->devfn);
1328}
1329
1330unsigned long resource_fixup(struct pci_dev * dev, struct resource * res, 1309unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
1331 unsigned long start, unsigned long size) 1310 unsigned long start, unsigned long size)
1332{ 1311{
@@ -1338,6 +1317,7 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
1338 struct pci_controller *hose = (struct pci_controller *) bus->sysdata; 1317 struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
1339 unsigned long io_offset; 1318 unsigned long io_offset;
1340 struct resource *res; 1319 struct resource *res;
1320 struct pci_dev *dev;
1341 int i; 1321 int i;
1342 1322
1343 io_offset = (unsigned long)hose->io_base_virt - isa_io_base; 1323 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
@@ -1390,8 +1370,16 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
1390 } 1370 }
1391 } 1371 }
1392 1372
1373 /* Platform specific bus fixups */
1393 if (ppc_md.pcibios_fixup_bus) 1374 if (ppc_md.pcibios_fixup_bus)
1394 ppc_md.pcibios_fixup_bus(bus); 1375 ppc_md.pcibios_fixup_bus(bus);
1376
1377 /* Read default IRQs and fixup if necessary */
1378 list_for_each_entry(dev, &bus->devices, bus_list) {
1379 pci_read_irq_line(dev);
1380 if (ppc_md.pci_irq_fixup)
1381 ppc_md.pci_irq_fixup(dev);
1382 }
1395} 1383}
1396 1384
1397char __init *pcibios_setup(char *str) 1385char __init *pcibios_setup(char *str)
@@ -1571,7 +1559,7 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
1571 *offset += hose->pci_mem_offset; 1559 *offset += hose->pci_mem_offset;
1572 res_bit = IORESOURCE_MEM; 1560 res_bit = IORESOURCE_MEM;
1573 } else { 1561 } else {
1574 io_offset = hose->io_base_virt - ___IO_BASE; 1562 io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
1575 *offset += io_offset; 1563 *offset += io_offset;
1576 res_bit = IORESOURCE_IO; 1564 res_bit = IORESOURCE_IO;
1577 } 1565 }
@@ -1826,7 +1814,8 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
1826 return; 1814 return;
1827 1815
1828 if (rsrc->flags & IORESOURCE_IO) 1816 if (rsrc->flags & IORESOURCE_IO)
1829 offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys; 1817 offset = (void __iomem *)_IO_BASE - hose->io_base_virt
1818 + hose->io_base_phys;
1830 1819
1831 *start = rsrc->start + offset; 1820 *start = rsrc->start + offset;
1832 *end = rsrc->end + offset; 1821 *end = rsrc->end + offset;
@@ -1845,35 +1834,6 @@ pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
1845 res->child = NULL; 1834 res->child = NULL;
1846} 1835}
1847 1836
1848void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
1849{
1850 unsigned long start = pci_resource_start(dev, bar);
1851 unsigned long len = pci_resource_len(dev, bar);
1852 unsigned long flags = pci_resource_flags(dev, bar);
1853
1854 if (!len)
1855 return NULL;
1856 if (max && len > max)
1857 len = max;
1858 if (flags & IORESOURCE_IO)
1859 return ioport_map(start, len);
1860 if (flags & IORESOURCE_MEM)
1861 /* Not checking IORESOURCE_CACHEABLE because PPC does
1862 * not currently distinguish between ioremap and
1863 * ioremap_nocache.
1864 */
1865 return ioremap(start, len);
1866 /* What? */
1867 return NULL;
1868}
1869
1870void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
1871{
1872 /* Nothing to do */
1873}
1874EXPORT_SYMBOL(pci_iomap);
1875EXPORT_SYMBOL(pci_iounmap);
1876
1877unsigned long pci_address_to_pio(phys_addr_t address) 1837unsigned long pci_address_to_pio(phys_addr_t address)
1878{ 1838{
1879 struct pci_controller* hose = hose_head; 1839 struct pci_controller* hose = hose_head;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 9bae8a5bf671..6fa9a0a5c8db 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -42,11 +42,9 @@
42unsigned long pci_probe_only = 1; 42unsigned long pci_probe_only = 1;
43int pci_assign_all_buses = 0; 43int pci_assign_all_buses = 0;
44 44
45#ifdef CONFIG_PPC_MULTIPLATFORM
46static void fixup_resource(struct resource *res, struct pci_dev *dev); 45static void fixup_resource(struct resource *res, struct pci_dev *dev);
47static void do_bus_setup(struct pci_bus *bus); 46static void do_bus_setup(struct pci_bus *bus);
48static void phbs_remap_io(void); 47static void phbs_remap_io(void);
49#endif
50 48
51/* pci_io_base -- the base address from which io bars are offsets. 49/* pci_io_base -- the base address from which io bars are offsets.
52 * This is the lowest I/O base address (so bar values are always positive), 50 * This is the lowest I/O base address (so bar values are always positive),
@@ -63,7 +61,7 @@ void iSeries_pcibios_init(void);
63 61
64LIST_HEAD(hose_list); 62LIST_HEAD(hose_list);
65 63
66struct dma_mapping_ops pci_dma_ops; 64struct dma_mapping_ops *pci_dma_ops;
67EXPORT_SYMBOL(pci_dma_ops); 65EXPORT_SYMBOL(pci_dma_ops);
68 66
69int global_phb_number; /* Global phb counter */ 67int global_phb_number; /* Global phb counter */
@@ -212,6 +210,10 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
212 210
213void pcibios_free_controller(struct pci_controller *phb) 211void pcibios_free_controller(struct pci_controller *phb)
214{ 212{
213 spin_lock(&hose_spinlock);
214 list_del(&phb->list_node);
215 spin_unlock(&hose_spinlock);
216
215 if (phb->is_dynamic) 217 if (phb->is_dynamic)
216 kfree(phb); 218 kfree(phb);
217} 219}
@@ -251,7 +253,6 @@ static void __init pcibios_claim_of_setup(void)
251 pcibios_claim_one_bus(b); 253 pcibios_claim_one_bus(b);
252} 254}
253 255
254#ifdef CONFIG_PPC_MULTIPLATFORM
255static u32 get_int_prop(struct device_node *np, const char *name, u32 def) 256static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
256{ 257{
257 const u32 *prop; 258 const u32 *prop;
@@ -329,7 +330,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
329 struct pci_dev *dev; 330 struct pci_dev *dev;
330 const char *type; 331 const char *type;
331 332
332 dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL); 333 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
333 if (!dev) 334 if (!dev)
334 return NULL; 335 return NULL;
335 type = get_property(node, "device_type", NULL); 336 type = get_property(node, "device_type", NULL);
@@ -338,7 +339,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
338 339
339 DBG(" create device, devfn: %x, type: %s\n", devfn, type); 340 DBG(" create device, devfn: %x, type: %s\n", devfn, type);
340 341
341 memset(dev, 0, sizeof(struct pci_dev));
342 dev->bus = bus; 342 dev->bus = bus;
343 dev->sysdata = node; 343 dev->sysdata = node;
344 dev->dev.parent = bus->bridge; 344 dev->dev.parent = bus->bridge;
@@ -506,7 +506,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
506 pci_scan_child_bus(bus); 506 pci_scan_child_bus(bus);
507} 507}
508EXPORT_SYMBOL(of_scan_pci_bridge); 508EXPORT_SYMBOL(of_scan_pci_bridge);
509#endif /* CONFIG_PPC_MULTIPLATFORM */
510 509
511void __devinit scan_phb(struct pci_controller *hose) 510void __devinit scan_phb(struct pci_controller *hose)
512{ 511{
@@ -517,7 +516,7 @@ void __devinit scan_phb(struct pci_controller *hose)
517 516
518 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); 517 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
519 518
520 bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node); 519 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
521 if (bus == NULL) { 520 if (bus == NULL) {
522 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", 521 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
523 hose->global_number); 522 hose->global_number);
@@ -540,7 +539,7 @@ void __devinit scan_phb(struct pci_controller *hose)
540 } 539 }
541 540
542 mode = PCI_PROBE_NORMAL; 541 mode = PCI_PROBE_NORMAL;
543#ifdef CONFIG_PPC_MULTIPLATFORM 542
544 if (node && ppc_md.pci_probe_mode) 543 if (node && ppc_md.pci_probe_mode)
545 mode = ppc_md.pci_probe_mode(bus); 544 mode = ppc_md.pci_probe_mode(bus);
546 DBG(" probe mode: %d\n", mode); 545 DBG(" probe mode: %d\n", mode);
@@ -548,7 +547,7 @@ void __devinit scan_phb(struct pci_controller *hose)
548 bus->subordinate = hose->last_busno; 547 bus->subordinate = hose->last_busno;
549 of_scan_bus(node, bus); 548 of_scan_bus(node, bus);
550 } 549 }
551#endif /* CONFIG_PPC_MULTIPLATFORM */ 550
552 if (mode == PCI_PROBE_NORMAL) 551 if (mode == PCI_PROBE_NORMAL)
553 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); 552 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
554} 553}
@@ -592,11 +591,9 @@ static int __init pcibios_init(void)
592 if (ppc64_isabridge_dev != NULL) 591 if (ppc64_isabridge_dev != NULL)
593 printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev)); 592 printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
594 593
595#ifdef CONFIG_PPC_MULTIPLATFORM
596 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 594 if (!firmware_has_feature(FW_FEATURE_ISERIES))
597 /* map in PCI I/O space */ 595 /* map in PCI I/O space */
598 phbs_remap_io(); 596 phbs_remap_io();
599#endif
600 597
601 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); 598 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
602 599
@@ -873,8 +870,6 @@ void pcibios_add_platform_entries(struct pci_dev *pdev)
873 device_create_file(&pdev->dev, &dev_attr_devspec); 870 device_create_file(&pdev->dev, &dev_attr_devspec);
874} 871}
875 872
876#ifdef CONFIG_PPC_MULTIPLATFORM
877
878#define ISA_SPACE_MASK 0x1 873#define ISA_SPACE_MASK 0x1
879#define ISA_SPACE_IO 0x1 874#define ISA_SPACE_IO 0x1
880 875
@@ -975,11 +970,7 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
975 res = NULL; 970 res = NULL;
976 pci_space = ranges[0]; 971 pci_space = ranges[0];
977 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 972 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
978 973 cpu_phys_addr = of_translate_address(dev, &ranges[3]);
979 cpu_phys_addr = ranges[3];
980 if (na >= 2)
981 cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
982
983 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4]; 974 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
984 ranges += np; 975 ranges += np;
985 if (size == 0) 976 if (size == 0)
@@ -1145,7 +1136,7 @@ int unmap_bus_range(struct pci_bus *bus)
1145 1136
1146 if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) 1137 if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
1147 return 1; 1138 return 1;
1148 if (iounmap_explicit((void __iomem *) start_virt, size)) 1139 if (__iounmap_explicit((void __iomem *) start_virt, size))
1149 return 1; 1140 return 1;
1150 1141
1151 return 0; 1142 return 0;
@@ -1213,23 +1204,52 @@ void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
1213} 1204}
1214EXPORT_SYMBOL(pcibios_fixup_device_resources); 1205EXPORT_SYMBOL(pcibios_fixup_device_resources);
1215 1206
1207void __devinit pcibios_setup_new_device(struct pci_dev *dev)
1208{
1209 struct dev_archdata *sd = &dev->dev.archdata;
1210
1211 sd->of_node = pci_device_to_OF_node(dev);
1212
1213 DBG("PCI device %s OF node: %s\n", pci_name(dev),
1214 sd->of_node ? sd->of_node->full_name : "<none>");
1215
1216 sd->dma_ops = pci_dma_ops;
1217#ifdef CONFIG_NUMA
1218 sd->numa_node = pcibus_to_node(dev->bus);
1219#else
1220 sd->numa_node = -1;
1221#endif
1222 if (ppc_md.pci_dma_dev_setup)
1223 ppc_md.pci_dma_dev_setup(dev);
1224}
1225EXPORT_SYMBOL(pcibios_setup_new_device);
1216 1226
1217static void __devinit do_bus_setup(struct pci_bus *bus) 1227static void __devinit do_bus_setup(struct pci_bus *bus)
1218{ 1228{
1219 struct pci_dev *dev; 1229 struct pci_dev *dev;
1220 1230
1221 ppc_md.iommu_bus_setup(bus); 1231 if (ppc_md.pci_dma_bus_setup)
1232 ppc_md.pci_dma_bus_setup(bus);
1222 1233
1223 list_for_each_entry(dev, &bus->devices, bus_list) 1234 list_for_each_entry(dev, &bus->devices, bus_list)
1224 ppc_md.iommu_dev_setup(dev); 1235 pcibios_setup_new_device(dev);
1225 1236
1226 if (ppc_md.irq_bus_setup) 1237 /* Read default IRQs and fixup if necessary */
1227 ppc_md.irq_bus_setup(bus); 1238 list_for_each_entry(dev, &bus->devices, bus_list) {
1239 pci_read_irq_line(dev);
1240 if (ppc_md.pci_irq_fixup)
1241 ppc_md.pci_irq_fixup(dev);
1242 }
1228} 1243}
1229 1244
1230void __devinit pcibios_fixup_bus(struct pci_bus *bus) 1245void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1231{ 1246{
1232 struct pci_dev *dev = bus->self; 1247 struct pci_dev *dev = bus->self;
1248 struct device_node *np;
1249
1250 np = pci_bus_to_OF_node(bus);
1251
1252 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
1233 1253
1234 if (dev && pci_probe_only && 1254 if (dev && pci_probe_only &&
1235 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 1255 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
@@ -1343,8 +1363,6 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
1343 return NULL; 1363 return NULL;
1344} 1364}
1345 1365
1346#endif /* CONFIG_PPC_MULTIPLATFORM */
1347
1348unsigned long pci_address_to_pio(phys_addr_t address) 1366unsigned long pci_address_to_pio(phys_addr_t address)
1349{ 1367{
1350 struct pci_controller *hose, *tmp; 1368 struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/kernel/pci_direct_iommu.c b/arch/powerpc/kernel/pci_direct_iommu.c
deleted file mode 100644
index 72ce082ce738..000000000000
--- a/arch/powerpc/kernel/pci_direct_iommu.c
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * Support for DMA from PCI devices to main memory on
3 * machines without an iommu or with directly addressable
4 * RAM (typically a pmac with 2Gb of RAM or less)
5 *
6 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/delay.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/bootmem.h>
20#include <linux/mm.h>
21#include <linux/dma-mapping.h>
22
23#include <asm/sections.h>
24#include <asm/io.h>
25#include <asm/prom.h>
26#include <asm/pci-bridge.h>
27#include <asm/machdep.h>
28#include <asm/pmac_feature.h>
29#include <asm/abs_addr.h>
30#include <asm/ppc-pci.h>
31
32static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
33 dma_addr_t *dma_handle, gfp_t flag)
34{
35 void *ret;
36
37 ret = (void *)__get_free_pages(flag, get_order(size));
38 if (ret != NULL) {
39 memset(ret, 0, size);
40 *dma_handle = virt_to_abs(ret);
41 }
42 return ret;
43}
44
45static void pci_direct_free_coherent(struct device *hwdev, size_t size,
46 void *vaddr, dma_addr_t dma_handle)
47{
48 free_pages((unsigned long)vaddr, get_order(size));
49}
50
51static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr,
52 size_t size, enum dma_data_direction direction)
53{
54 return virt_to_abs(ptr);
55}
56
57static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
58 size_t size, enum dma_data_direction direction)
59{
60}
61
62static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg,
63 int nents, enum dma_data_direction direction)
64{
65 int i;
66
67 for (i = 0; i < nents; i++, sg++) {
68 sg->dma_address = page_to_phys(sg->page) + sg->offset;
69 sg->dma_length = sg->length;
70 }
71
72 return nents;
73}
74
75static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg,
76 int nents, enum dma_data_direction direction)
77{
78}
79
80static int pci_direct_dma_supported(struct device *dev, u64 mask)
81{
82 return mask < 0x100000000ull;
83}
84
85static struct dma_mapping_ops pci_direct_ops = {
86 .alloc_coherent = pci_direct_alloc_coherent,
87 .free_coherent = pci_direct_free_coherent,
88 .map_single = pci_direct_map_single,
89 .unmap_single = pci_direct_unmap_single,
90 .map_sg = pci_direct_map_sg,
91 .unmap_sg = pci_direct_unmap_sg,
92 .dma_supported = pci_direct_dma_supported,
93};
94
95void __init pci_direct_iommu_init(void)
96{
97 pci_dma_ops = pci_direct_ops;
98}
diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c
deleted file mode 100644
index 0688b2534acb..000000000000
--- a/arch/powerpc/kernel/pci_iommu.c
+++ /dev/null
@@ -1,164 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup, new allocation schemes:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 *
7 * Dynamic DMA mapping support, platform-independent parts.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24
25#include <linux/init.h>
26#include <linux/types.h>
27#include <linux/slab.h>
28#include <linux/mm.h>
29#include <linux/spinlock.h>
30#include <linux/string.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/iommu.h>
36#include <asm/pci-bridge.h>
37#include <asm/machdep.h>
38#include <asm/ppc-pci.h>
39
40/*
41 * We can use ->sysdata directly and avoid the extra work in
42 * pci_device_to_OF_node since ->sysdata will have been initialised
43 * in the iommu init code for all devices.
44 */
45#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
46
47static inline struct iommu_table *device_to_table(struct device *hwdev)
48{
49 struct pci_dev *pdev;
50
51 if (!hwdev) {
52 pdev = ppc64_isabridge_dev;
53 if (!pdev)
54 return NULL;
55 } else
56 pdev = to_pci_dev(hwdev);
57
58 return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
59}
60
61
62static inline unsigned long device_to_mask(struct device *hwdev)
63{
64 struct pci_dev *pdev;
65
66 if (!hwdev) {
67 pdev = ppc64_isabridge_dev;
68 if (!pdev) /* This is the best guess we can do */
69 return 0xfffffffful;
70 } else
71 pdev = to_pci_dev(hwdev);
72
73 if (pdev->dma_mask)
74 return pdev->dma_mask;
75
76 /* Assume devices without mask can take 32 bit addresses */
77 return 0xfffffffful;
78}
79
80
81/* Allocates a contiguous real buffer and creates mappings over it.
82 * Returns the virtual address of the buffer and sets dma_handle
83 * to the dma address (mapping) of the first page.
84 */
85static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
86 dma_addr_t *dma_handle, gfp_t flag)
87{
88 return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
89 device_to_mask(hwdev), flag,
90 pcibus_to_node(to_pci_dev(hwdev)->bus));
91}
92
93static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
94 void *vaddr, dma_addr_t dma_handle)
95{
96 iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
97}
98
99/* Creates TCEs for a user provided buffer. The user buffer must be
100 * contiguous real kernel storage (not vmalloc). The address of the buffer
101 * passed here is the kernel (virtual) address of the buffer. The buffer
102 * need not be page aligned, the dma_addr_t returned will point to the same
103 * byte within the page as vaddr.
104 */
105static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
106 size_t size, enum dma_data_direction direction)
107{
108 return iommu_map_single(device_to_table(hwdev), vaddr, size,
109 device_to_mask(hwdev), direction);
110}
111
112
113static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
114 size_t size, enum dma_data_direction direction)
115{
116 iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
117}
118
119
120static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
121 int nelems, enum dma_data_direction direction)
122{
123 return iommu_map_sg(pdev, device_to_table(pdev), sglist,
124 nelems, device_to_mask(pdev), direction);
125}
126
127static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
128 int nelems, enum dma_data_direction direction)
129{
130 iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
131}
132
133/* We support DMA to/from any memory page via the iommu */
134static int pci_iommu_dma_supported(struct device *dev, u64 mask)
135{
136 struct iommu_table *tbl = device_to_table(dev);
137
138 if (!tbl || tbl->it_offset > mask) {
139 printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
140 if (tbl)
141 printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n",
142 mask, tbl->it_offset);
143 else
144 printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
145 mask);
146 return 0;
147 } else
148 return 1;
149}
150
151struct dma_mapping_ops pci_iommu_ops = {
152 .alloc_coherent = pci_iommu_alloc_coherent,
153 .free_coherent = pci_iommu_free_coherent,
154 .map_single = pci_iommu_map_single,
155 .unmap_single = pci_iommu_unmap_single,
156 .map_sg = pci_iommu_map_sg,
157 .unmap_sg = pci_iommu_unmap_sg,
158 .dma_supported = pci_iommu_dma_supported,
159};
160
161void pci_iommu_init(void)
162{
163 pci_dma_ops = pci_iommu_ops;
164}
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 807193a3c784..9179f0739ea2 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -49,6 +49,10 @@
49#include <asm/commproc.h> 49#include <asm/commproc.h>
50#endif 50#endif
51 51
52#ifdef CONFIG_PPC64
53EXPORT_SYMBOL(local_irq_restore);
54#endif
55
52#ifdef CONFIG_PPC32 56#ifdef CONFIG_PPC32
53extern void transfer_to_handler(void); 57extern void transfer_to_handler(void);
54extern void do_IRQ(struct pt_regs *regs); 58extern void do_IRQ(struct pt_regs *regs);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index bdb412d4b748..c18dbe77fdc2 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -538,35 +538,31 @@ static struct ibm_pa_feature {
538 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 538 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
539}; 539};
540 540
541static void __init check_cpu_pa_features(unsigned long node) 541static void __init scan_features(unsigned long node, unsigned char *ftrs,
542 unsigned long tablelen,
543 struct ibm_pa_feature *fp,
544 unsigned long ft_size)
542{ 545{
543 unsigned char *pa_ftrs; 546 unsigned long i, len, bit;
544 unsigned long len, tablelen, i, bit;
545
546 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
547 if (pa_ftrs == NULL)
548 return;
549 547
550 /* find descriptor with type == 0 */ 548 /* find descriptor with type == 0 */
551 for (;;) { 549 for (;;) {
552 if (tablelen < 3) 550 if (tablelen < 3)
553 return; 551 return;
554 len = 2 + pa_ftrs[0]; 552 len = 2 + ftrs[0];
555 if (tablelen < len) 553 if (tablelen < len)
556 return; /* descriptor 0 not found */ 554 return; /* descriptor 0 not found */
557 if (pa_ftrs[1] == 0) 555 if (ftrs[1] == 0)
558 break; 556 break;
559 tablelen -= len; 557 tablelen -= len;
560 pa_ftrs += len; 558 ftrs += len;
561 } 559 }
562 560
563 /* loop over bits we know about */ 561 /* loop over bits we know about */
564 for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) { 562 for (i = 0; i < ft_size; ++i, ++fp) {
565 struct ibm_pa_feature *fp = &ibm_pa_features[i]; 563 if (fp->pabyte >= ftrs[0])
566
567 if (fp->pabyte >= pa_ftrs[0])
568 continue; 564 continue;
569 bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1; 565 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
570 if (bit ^ fp->invert) { 566 if (bit ^ fp->invert) {
571 cur_cpu_spec->cpu_features |= fp->cpu_features; 567 cur_cpu_spec->cpu_features |= fp->cpu_features;
572 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 568 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
@@ -577,16 +573,59 @@ static void __init check_cpu_pa_features(unsigned long node)
577 } 573 }
578} 574}
579 575
576static void __init check_cpu_pa_features(unsigned long node)
577{
578 unsigned char *pa_ftrs;
579 unsigned long tablelen;
580
581 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
582 if (pa_ftrs == NULL)
583 return;
584
585 scan_features(node, pa_ftrs, tablelen,
586 ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
587}
588
589static struct feature_property {
590 const char *name;
591 u32 min_value;
592 unsigned long cpu_feature;
593 unsigned long cpu_user_ftr;
594} feature_properties[] __initdata = {
595#ifdef CONFIG_ALTIVEC
596 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
597 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
598#endif /* CONFIG_ALTIVEC */
599#ifdef CONFIG_PPC64
600 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
601 {"ibm,purr", 1, CPU_FTR_PURR, 0},
602 {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
603#endif /* CONFIG_PPC64 */
604};
605
606static void __init check_cpu_feature_properties(unsigned long node)
607{
608 unsigned long i;
609 struct feature_property *fp = feature_properties;
610 const u32 *prop;
611
612 for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
613 prop = of_get_flat_dt_prop(node, fp->name, NULL);
614 if (prop && *prop >= fp->min_value) {
615 cur_cpu_spec->cpu_features |= fp->cpu_feature;
616 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
617 }
618 }
619}
620
580static int __init early_init_dt_scan_cpus(unsigned long node, 621static int __init early_init_dt_scan_cpus(unsigned long node,
581 const char *uname, int depth, 622 const char *uname, int depth,
582 void *data) 623 void *data)
583{ 624{
584 static int logical_cpuid = 0; 625 static int logical_cpuid = 0;
585 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 626 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
586#ifdef CONFIG_ALTIVEC 627 const u32 *prop;
587 u32 *prop; 628 const u32 *intserv;
588#endif
589 u32 *intserv;
590 int i, nthreads; 629 int i, nthreads;
591 unsigned long len; 630 unsigned long len;
592 int found = 0; 631 int found = 0;
@@ -643,24 +682,27 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
643 intserv[i]); 682 intserv[i]);
644 boot_cpuid = logical_cpuid; 683 boot_cpuid = logical_cpuid;
645 set_hard_smp_processor_id(boot_cpuid, intserv[i]); 684 set_hard_smp_processor_id(boot_cpuid, intserv[i]);
646 }
647 685
648#ifdef CONFIG_ALTIVEC 686 /*
649 /* Check if we have a VMX and eventually update CPU features */ 687 * PAPR defines "logical" PVR values for cpus that
650 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL); 688 * meet various levels of the architecture:
651 if (prop && (*prop) > 0) { 689 * 0x0f000001 Architecture version 2.04
652 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 690 * 0x0f000002 Architecture version 2.05
653 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 691 * If the cpu-version property in the cpu node contains
654 } 692 * such a value, we call identify_cpu again with the
655 693 * logical PVR value in order to use the cpu feature
656 /* Same goes for Apple's "altivec" property */ 694 * bits appropriate for the architecture level.
657 prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL); 695 *
658 if (prop) { 696 * A POWER6 partition in "POWER6 architected" mode
659 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; 697 * uses the 0x0f000002 PVR value; in POWER5+ mode
660 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; 698 * it uses 0x0f000001.
699 */
700 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
701 if (prop && (*prop & 0xff000000) == 0x0f000000)
702 identify_cpu(0, *prop);
661 } 703 }
662#endif /* CONFIG_ALTIVEC */
663 704
705 check_cpu_feature_properties(node);
664 check_cpu_pa_features(node); 706 check_cpu_pa_features(node);
665 707
666#ifdef CONFIG_PPC_PSERIES 708#ifdef CONFIG_PPC_PSERIES
@@ -1674,6 +1716,7 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
1674 } 1716 }
1675 return NULL; 1717 return NULL;
1676} 1718}
1719EXPORT_SYMBOL(of_get_cpu_node);
1677 1720
1678#ifdef DEBUG 1721#ifdef DEBUG
1679static struct debugfs_blob_wrapper flat_dt_blob; 1722static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index b91761639d96..46cf32670ddb 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -173,8 +173,8 @@ static unsigned long __initdata dt_string_start, dt_string_end;
173static unsigned long __initdata prom_initrd_start, prom_initrd_end; 173static unsigned long __initdata prom_initrd_start, prom_initrd_end;
174 174
175#ifdef CONFIG_PPC64 175#ifdef CONFIG_PPC64
176static int __initdata iommu_force_on; 176static int __initdata prom_iommu_force_on;
177static int __initdata ppc64_iommu_off; 177static int __initdata prom_iommu_off;
178static unsigned long __initdata prom_tce_alloc_start; 178static unsigned long __initdata prom_tce_alloc_start;
179static unsigned long __initdata prom_tce_alloc_end; 179static unsigned long __initdata prom_tce_alloc_end;
180#endif 180#endif
@@ -582,9 +582,9 @@ static void __init early_cmdline_parse(void)
582 while (*opt && *opt == ' ') 582 while (*opt && *opt == ' ')
583 opt++; 583 opt++;
584 if (!strncmp(opt, RELOC("off"), 3)) 584 if (!strncmp(opt, RELOC("off"), 3))
585 RELOC(ppc64_iommu_off) = 1; 585 RELOC(prom_iommu_off) = 1;
586 else if (!strncmp(opt, RELOC("force"), 5)) 586 else if (!strncmp(opt, RELOC("force"), 5))
587 RELOC(iommu_force_on) = 1; 587 RELOC(prom_iommu_force_on) = 1;
588 } 588 }
589#endif 589#endif
590} 590}
@@ -627,6 +627,7 @@ static void __init early_cmdline_parse(void)
627/* Option vector 3: processor options supported */ 627/* Option vector 3: processor options supported */
628#define OV3_FP 0x80 /* floating point */ 628#define OV3_FP 0x80 /* floating point */
629#define OV3_VMX 0x40 /* VMX/Altivec */ 629#define OV3_VMX 0x40 /* VMX/Altivec */
630#define OV3_DFP 0x20 /* decimal FP */
630 631
631/* Option vector 5: PAPR/OF options supported */ 632/* Option vector 5: PAPR/OF options supported */
632#define OV5_LPAR 0x80 /* logical partitioning supported */ 633#define OV5_LPAR 0x80 /* logical partitioning supported */
@@ -642,6 +643,7 @@ static void __init early_cmdline_parse(void)
642static unsigned char ibm_architecture_vec[] = { 643static unsigned char ibm_architecture_vec[] = {
643 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ 644 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
644 W(0xffff0000), W(0x003e0000), /* POWER6 */ 645 W(0xffff0000), W(0x003e0000), /* POWER6 */
646 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
645 W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */ 647 W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
646 5 - 1, /* 5 option vectors */ 648 5 - 1, /* 5 option vectors */
647 649
@@ -668,7 +670,7 @@ static unsigned char ibm_architecture_vec[] = {
668 /* option vector 3: processor options supported */ 670 /* option vector 3: processor options supported */
669 3 - 2, /* length */ 671 3 - 2, /* length */
670 0, /* don't ignore, don't halt */ 672 0, /* don't ignore, don't halt */
671 OV3_FP | OV3_VMX, 673 OV3_FP | OV3_VMX | OV3_DFP,
672 674
673 /* option vector 4: IBM PAPR implementation */ 675 /* option vector 4: IBM PAPR implementation */
674 2 - 2, /* length */ 676 2 - 2, /* length */
@@ -1167,7 +1169,7 @@ static void __init prom_initialize_tce_table(void)
1167 u64 local_alloc_top, local_alloc_bottom; 1169 u64 local_alloc_top, local_alloc_bottom;
1168 u64 i; 1170 u64 i;
1169 1171
1170 if (RELOC(ppc64_iommu_off)) 1172 if (RELOC(prom_iommu_off))
1171 return; 1173 return;
1172 1174
1173 prom_debug("starting prom_initialize_tce_table\n"); 1175 prom_debug("starting prom_initialize_tce_table\n");
@@ -2283,11 +2285,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2283 * Fill in some infos for use by the kernel later on 2285 * Fill in some infos for use by the kernel later on
2284 */ 2286 */
2285#ifdef CONFIG_PPC64 2287#ifdef CONFIG_PPC64
2286 if (RELOC(ppc64_iommu_off)) 2288 if (RELOC(prom_iommu_off))
2287 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off", 2289 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
2288 NULL, 0); 2290 NULL, 0);
2289 2291
2290 if (RELOC(iommu_force_on)) 2292 if (RELOC(prom_iommu_force_on))
2291 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on", 2293 prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
2292 NULL, 0); 2294 NULL, 0);
2293 2295
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 603dff3ad62a..0dfbe1cd28eb 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -25,6 +25,12 @@
25#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \ 25#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
26 (ns) > 0) 26 (ns) > 0)
27 27
28static struct of_bus *of_match_bus(struct device_node *np);
29static int __of_address_to_resource(struct device_node *dev,
30 const u32 *addrp, u64 size, unsigned int flags,
31 struct resource *r);
32
33
28/* Debug utility */ 34/* Debug utility */
29#ifdef DEBUG 35#ifdef DEBUG
30static void of_dump_addr(const char *s, const u32 *addr, int na) 36static void of_dump_addr(const char *s, const u32 *addr, int na)
@@ -101,6 +107,7 @@ static unsigned int of_bus_default_get_flags(const u32 *addr)
101} 107}
102 108
103 109
110#ifdef CONFIG_PCI
104/* 111/*
105 * PCI bus specific translator 112 * PCI bus specific translator
106 */ 113 */
@@ -153,15 +160,156 @@ static unsigned int of_bus_pci_get_flags(const u32 *addr)
153 switch((w >> 24) & 0x03) { 160 switch((w >> 24) & 0x03) {
154 case 0x01: 161 case 0x01:
155 flags |= IORESOURCE_IO; 162 flags |= IORESOURCE_IO;
163 break;
156 case 0x02: /* 32 bits */ 164 case 0x02: /* 32 bits */
157 case 0x03: /* 64 bits */ 165 case 0x03: /* 64 bits */
158 flags |= IORESOURCE_MEM; 166 flags |= IORESOURCE_MEM;
167 break;
159 } 168 }
160 if (w & 0x40000000) 169 if (w & 0x40000000)
161 flags |= IORESOURCE_PREFETCH; 170 flags |= IORESOURCE_PREFETCH;
162 return flags; 171 return flags;
163} 172}
164 173
174const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
175 unsigned int *flags)
176{
177 const u32 *prop;
178 unsigned int psize;
179 struct device_node *parent;
180 struct of_bus *bus;
181 int onesize, i, na, ns;
182
183 /* Get parent & match bus type */
184 parent = of_get_parent(dev);
185 if (parent == NULL)
186 return NULL;
187 bus = of_match_bus(parent);
188 if (strcmp(bus->name, "pci")) {
189 of_node_put(parent);
190 return NULL;
191 }
192 bus->count_cells(dev, &na, &ns);
193 of_node_put(parent);
194 if (!OF_CHECK_COUNTS(na, ns))
195 return NULL;
196
197 /* Get "reg" or "assigned-addresses" property */
198 prop = get_property(dev, bus->addresses, &psize);
199 if (prop == NULL)
200 return NULL;
201 psize /= 4;
202
203 onesize = na + ns;
204 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
205 if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
206 if (size)
207 *size = of_read_number(prop + na, ns);
208 if (flags)
209 *flags = bus->get_flags(prop);
210 return prop;
211 }
212 return NULL;
213}
214EXPORT_SYMBOL(of_get_pci_address);
215
216int of_pci_address_to_resource(struct device_node *dev, int bar,
217 struct resource *r)
218{
219 const u32 *addrp;
220 u64 size;
221 unsigned int flags;
222
223 addrp = of_get_pci_address(dev, bar, &size, &flags);
224 if (addrp == NULL)
225 return -EINVAL;
226 return __of_address_to_resource(dev, addrp, size, flags, r);
227}
228EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
229
230static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
231{
232 return (((pin - 1) + slot) % 4) + 1;
233}
234
235int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
236{
237 struct device_node *dn, *ppnode;
238 struct pci_dev *ppdev;
239 u32 lspec;
240 u32 laddr[3];
241 u8 pin;
242 int rc;
243
244 /* Check if we have a device node, if yes, fallback to standard OF
245 * parsing
246 */
247 dn = pci_device_to_OF_node(pdev);
248 if (dn)
249 return of_irq_map_one(dn, 0, out_irq);
250
251 /* Ok, we don't, time to have fun. Let's start by building up an
252 * interrupt spec. we assume #interrupt-cells is 1, which is standard
253 * for PCI. If you do different, then don't use that routine.
254 */
255 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
256 if (rc != 0)
257 return rc;
258 /* No pin, exit */
259 if (pin == 0)
260 return -ENODEV;
261
262 /* Now we walk up the PCI tree */
263 lspec = pin;
264 for (;;) {
265 /* Get the pci_dev of our parent */
266 ppdev = pdev->bus->self;
267
268 /* Ouch, it's a host bridge... */
269 if (ppdev == NULL) {
270#ifdef CONFIG_PPC64
271 ppnode = pci_bus_to_OF_node(pdev->bus);
272#else
273 struct pci_controller *host;
274 host = pci_bus_to_host(pdev->bus);
275 ppnode = host ? host->arch_data : NULL;
276#endif
277 /* No node for host bridge ? give up */
278 if (ppnode == NULL)
279 return -EINVAL;
280 } else
281 /* We found a P2P bridge, check if it has a node */
282 ppnode = pci_device_to_OF_node(ppdev);
283
284 /* Ok, we have found a parent with a device-node, hand over to
285 * the OF parsing code.
286 * We build a unit address from the linux device to be used for
287 * resolution. Note that we use the linux bus number which may
288 * not match your firmware bus numbering.
289 * Fortunately, in most cases, interrupt-map-mask doesn't include
290 * the bus number as part of the matching.
291 * You should still be careful about that though if you intend
292 * to rely on this function (you ship a firmware that doesn't
293 * create device nodes for all PCI devices).
294 */
295 if (ppnode)
296 break;
297
298 /* We can only get here if we hit a P2P bridge with no node,
299 * let's do standard swizzling and try again
300 */
301 lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
302 pdev = ppdev;
303 }
304
305 laddr[0] = (pdev->bus->number << 16)
306 | (pdev->devfn << 8);
307 laddr[1] = laddr[2] = 0;
308 return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
309}
310EXPORT_SYMBOL_GPL(of_irq_map_pci);
311#endif /* CONFIG_PCI */
312
165/* 313/*
166 * ISA bus specific translator 314 * ISA bus specific translator
167 */ 315 */
@@ -223,6 +371,7 @@ static unsigned int of_bus_isa_get_flags(const u32 *addr)
223 */ 371 */
224 372
225static struct of_bus of_busses[] = { 373static struct of_bus of_busses[] = {
374#ifdef CONFIG_PCI
226 /* PCI */ 375 /* PCI */
227 { 376 {
228 .name = "pci", 377 .name = "pci",
@@ -233,6 +382,7 @@ static struct of_bus of_busses[] = {
233 .translate = of_bus_pci_translate, 382 .translate = of_bus_pci_translate,
234 .get_flags = of_bus_pci_get_flags, 383 .get_flags = of_bus_pci_get_flags,
235 }, 384 },
385#endif /* CONFIG_PCI */
236 /* ISA */ 386 /* ISA */
237 { 387 {
238 .name = "isa", 388 .name = "isa",
@@ -445,48 +595,6 @@ const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
445} 595}
446EXPORT_SYMBOL(of_get_address); 596EXPORT_SYMBOL(of_get_address);
447 597
448const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
449 unsigned int *flags)
450{
451 const u32 *prop;
452 unsigned int psize;
453 struct device_node *parent;
454 struct of_bus *bus;
455 int onesize, i, na, ns;
456
457 /* Get parent & match bus type */
458 parent = of_get_parent(dev);
459 if (parent == NULL)
460 return NULL;
461 bus = of_match_bus(parent);
462 if (strcmp(bus->name, "pci")) {
463 of_node_put(parent);
464 return NULL;
465 }
466 bus->count_cells(dev, &na, &ns);
467 of_node_put(parent);
468 if (!OF_CHECK_COUNTS(na, ns))
469 return NULL;
470
471 /* Get "reg" or "assigned-addresses" property */
472 prop = get_property(dev, bus->addresses, &psize);
473 if (prop == NULL)
474 return NULL;
475 psize /= 4;
476
477 onesize = na + ns;
478 for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
479 if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
480 if (size)
481 *size = of_read_number(prop + na, ns);
482 if (flags)
483 *flags = bus->get_flags(prop);
484 return prop;
485 }
486 return NULL;
487}
488EXPORT_SYMBOL(of_get_pci_address);
489
490static int __of_address_to_resource(struct device_node *dev, const u32 *addrp, 598static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
491 u64 size, unsigned int flags, 599 u64 size, unsigned int flags,
492 struct resource *r) 600 struct resource *r)
@@ -529,20 +637,6 @@ int of_address_to_resource(struct device_node *dev, int index,
529} 637}
530EXPORT_SYMBOL_GPL(of_address_to_resource); 638EXPORT_SYMBOL_GPL(of_address_to_resource);
531 639
532int of_pci_address_to_resource(struct device_node *dev, int bar,
533 struct resource *r)
534{
535 const u32 *addrp;
536 u64 size;
537 unsigned int flags;
538
539 addrp = of_get_pci_address(dev, bar, &size, &flags);
540 if (addrp == NULL)
541 return -EINVAL;
542 return __of_address_to_resource(dev, addrp, size, flags, r);
543}
544EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
545
546void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, 640void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
547 unsigned long *busno, unsigned long *phys, unsigned long *size) 641 unsigned long *busno, unsigned long *phys, unsigned long *size)
548{ 642{
@@ -898,87 +992,3 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
898 return res; 992 return res;
899} 993}
900EXPORT_SYMBOL_GPL(of_irq_map_one); 994EXPORT_SYMBOL_GPL(of_irq_map_one);
901
902#ifdef CONFIG_PCI
903static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
904{
905 return (((pin - 1) + slot) % 4) + 1;
906}
907
908int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
909{
910 struct device_node *dn, *ppnode;
911 struct pci_dev *ppdev;
912 u32 lspec;
913 u32 laddr[3];
914 u8 pin;
915 int rc;
916
917 /* Check if we have a device node, if yes, fallback to standard OF
918 * parsing
919 */
920 dn = pci_device_to_OF_node(pdev);
921 if (dn)
922 return of_irq_map_one(dn, 0, out_irq);
923
924 /* Ok, we don't, time to have fun. Let's start by building up an
925 * interrupt spec. we assume #interrupt-cells is 1, which is standard
926 * for PCI. If you do different, then don't use that routine.
927 */
928 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
929 if (rc != 0)
930 return rc;
931 /* No pin, exit */
932 if (pin == 0)
933 return -ENODEV;
934
935 /* Now we walk up the PCI tree */
936 lspec = pin;
937 for (;;) {
938 /* Get the pci_dev of our parent */
939 ppdev = pdev->bus->self;
940
941 /* Ouch, it's a host bridge... */
942 if (ppdev == NULL) {
943#ifdef CONFIG_PPC64
944 ppnode = pci_bus_to_OF_node(pdev->bus);
945#else
946 struct pci_controller *host;
947 host = pci_bus_to_host(pdev->bus);
948 ppnode = host ? host->arch_data : NULL;
949#endif
950 /* No node for host bridge ? give up */
951 if (ppnode == NULL)
952 return -EINVAL;
953 } else
954 /* We found a P2P bridge, check if it has a node */
955 ppnode = pci_device_to_OF_node(ppdev);
956
957 /* Ok, we have found a parent with a device-node, hand over to
958 * the OF parsing code.
959 * We build a unit address from the linux device to be used for
960 * resolution. Note that we use the linux bus number which may
961 * not match your firmware bus numbering.
962 * Fortunately, in most cases, interrupt-map-mask doesn't include
963 * the bus number as part of the matching.
964 * You should still be careful about that though if you intend
965 * to rely on this function (you ship a firmware that doesn't
966 * create device nodes for all PCI devices).
967 */
968 if (ppnode)
969 break;
970
971 /* We can only get here if we hit a P2P bridge with no node,
972 * let's do standard swizzling and try again
973 */
974 lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
975 pdev = ppdev;
976 }
977
978 laddr[0] = (pdev->bus->number << 16)
979 | (pdev->devfn << 8);
980 laddr[1] = laddr[2] = 0;
981 return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
982}
983EXPORT_SYMBOL_GPL(of_irq_map_pci);
984#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 6ef80d4e38d3..387ed0d9ad61 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -810,9 +810,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
810 return 0; 810 return 0;
811} 811}
812 812
813#ifdef CONFIG_HOTPLUG_CPU
813/* This version can't take the spinlock, because it never returns */ 814/* This version can't take the spinlock, because it never returns */
814 815static struct rtas_args rtas_stop_self_args = {
815struct rtas_args rtas_stop_self_args = {
816 /* The token is initialized for real in setup_system() */ 816 /* The token is initialized for real in setup_system() */
817 .token = RTAS_UNKNOWN_SERVICE, 817 .token = RTAS_UNKNOWN_SERVICE,
818 .nargs = 0, 818 .nargs = 0,
@@ -834,6 +834,7 @@ void rtas_stop_self(void)
834 834
835 panic("Alas, I survived.\n"); 835 panic("Alas, I survived.\n");
836} 836}
837#endif
837 838
838/* 839/*
839 * Call early during boot, before mem init or bootmem, to retrieve the RTAS 840 * Call early during boot, before mem init or bootmem, to retrieve the RTAS
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 6f6fc977cb39..b9561d300516 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -681,14 +681,12 @@ static int initialize_flash_pde_data(const char *rtas_call_name,
681 int *status; 681 int *status;
682 int token; 682 int token;
683 683
684 dp->data = kmalloc(buf_size, GFP_KERNEL); 684 dp->data = kzalloc(buf_size, GFP_KERNEL);
685 if (dp->data == NULL) { 685 if (dp->data == NULL) {
686 remove_flash_pde(dp); 686 remove_flash_pde(dp);
687 return -ENOMEM; 687 return -ENOMEM;
688 } 688 }
689 689
690 memset(dp->data, 0, buf_size);
691
692 /* 690 /*
693 * This code assumes that the status int is the first member of the 691 * This code assumes that the status int is the first member of the
694 * struct 692 * struct
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index b4a0de79c060..ace9f4c86e67 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -38,6 +38,7 @@
38#include <asm/rtas.h> 38#include <asm/rtas.h>
39#include <asm/mpic.h> 39#include <asm/mpic.h>
40#include <asm/ppc-pci.h> 40#include <asm/ppc-pci.h>
41#include <asm/eeh.h>
41 42
42/* RTAS tokens */ 43/* RTAS tokens */
43static int read_pci_config; 44static int read_pci_config;
@@ -231,32 +232,13 @@ void __init init_pci_config_tokens (void)
231 232
232unsigned long __devinit get_phb_buid (struct device_node *phb) 233unsigned long __devinit get_phb_buid (struct device_node *phb)
233{ 234{
234 int addr_cells; 235 struct resource r;
235 const unsigned int *buid_vals;
236 unsigned int len;
237 unsigned long buid;
238
239 if (ibm_read_pci_config == -1) return 0;
240 236
241 /* PHB's will always be children of the root node, 237 if (ibm_read_pci_config == -1)
242 * or so it is promised by the current firmware. */
243 if (phb->parent == NULL)
244 return 0; 238 return 0;
245 if (phb->parent->parent) 239 if (of_address_to_resource(phb, 0, &r))
246 return 0;
247
248 buid_vals = get_property(phb, "reg", &len);
249 if (buid_vals == NULL)
250 return 0; 240 return 0;
251 241 return r.start;
252 addr_cells = prom_n_addr_cells(phb);
253 if (addr_cells == 1) {
254 buid = (unsigned long) buid_vals[0];
255 } else {
256 buid = (((unsigned long)buid_vals[0]) << 32UL) |
257 (((unsigned long)buid_vals[1]) & 0xffffffff);
258 }
259 return buid;
260} 242}
261 243
262static int phb_set_bus_ranges(struct device_node *dev, 244static int phb_set_bus_ranges(struct device_node *dev,
@@ -276,8 +258,10 @@ static int phb_set_bus_ranges(struct device_node *dev,
276 return 0; 258 return 0;
277} 259}
278 260
279int __devinit setup_phb(struct device_node *dev, struct pci_controller *phb) 261int __devinit rtas_setup_phb(struct pci_controller *phb)
280{ 262{
263 struct device_node *dev = phb->arch_data;
264
281 if (is_python(dev)) 265 if (is_python(dev))
282 python_countermeasures(dev); 266 python_countermeasures(dev);
283 267
@@ -309,7 +293,7 @@ unsigned long __init find_and_init_phbs(void)
309 phb = pcibios_alloc_controller(node); 293 phb = pcibios_alloc_controller(node);
310 if (!phb) 294 if (!phb)
311 continue; 295 continue;
312 setup_phb(node, phb); 296 rtas_setup_phb(phb);
313 pci_process_bridge_OF_ranges(phb, node, 0); 297 pci_process_bridge_OF_ranges(phb, node, 0);
314 pci_setup_phb_io(phb, index == 0); 298 pci_setup_phb_io(phb, index == 0);
315 index++; 299 index++;
@@ -381,7 +365,6 @@ int pcibios_remove_root_bus(struct pci_controller *phb)
381 } 365 }
382 } 366 }
383 367
384 list_del(&phb->list_node);
385 pcibios_free_controller(phb); 368 pcibios_free_controller(phb);
386 369
387 return 0; 370 return 0;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a4c2964a3ca6..61c65d19ef06 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -63,10 +63,6 @@ unsigned int DMA_MODE_WRITE;
63 63
64int have_of = 1; 64int have_of = 1;
65 65
66#ifdef CONFIG_PPC_MULTIPLATFORM
67dev_t boot_dev;
68#endif /* CONFIG_PPC_MULTIPLATFORM */
69
70#ifdef CONFIG_VGA_CONSOLE 66#ifdef CONFIG_VGA_CONSOLE
71unsigned long vgacon_remap_base; 67unsigned long vgacon_remap_base;
72#endif 68#endif
@@ -101,7 +97,7 @@ unsigned long __init early_init(unsigned long dt_ptr)
101 * Identify the CPU type and fix up code sections 97 * Identify the CPU type and fix up code sections
102 * that depend on which cpu we have. 98 * that depend on which cpu we have.
103 */ 99 */
104 spec = identify_cpu(offset); 100 spec = identify_cpu(offset, mfspr(SPRN_PVR));
105 101
106 do_feature_fixups(spec->cpu_features, 102 do_feature_fixups(spec->cpu_features,
107 PTRRELOC(&__start___ftr_fixup), 103 PTRRELOC(&__start___ftr_fixup),
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 16278968dab6..3733de30e84d 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -33,6 +33,7 @@
33#include <linux/serial.h> 33#include <linux/serial.h>
34#include <linux/serial_8250.h> 34#include <linux/serial_8250.h>
35#include <linux/bootmem.h> 35#include <linux/bootmem.h>
36#include <linux/pci.h>
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/kdump.h> 38#include <asm/kdump.h>
38#include <asm/prom.h> 39#include <asm/prom.h>
@@ -71,7 +72,6 @@
71 72
72int have_of = 1; 73int have_of = 1;
73int boot_cpuid = 0; 74int boot_cpuid = 0;
74dev_t boot_dev;
75u64 ppc64_pft_size; 75u64 ppc64_pft_size;
76 76
77/* Pick defaults since we might want to patch instructions 77/* Pick defaults since we might want to patch instructions
@@ -171,7 +171,7 @@ void __init setup_paca(int cpu)
171void __init early_setup(unsigned long dt_ptr) 171void __init early_setup(unsigned long dt_ptr)
172{ 172{
173 /* Identify CPU type */ 173 /* Identify CPU type */
174 identify_cpu(0); 174 identify_cpu(0, mfspr(SPRN_PVR));
175 175
176 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ 176 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
177 setup_paca(0); 177 setup_paca(0);
@@ -226,8 +226,8 @@ void early_setup_secondary(void)
226{ 226{
227 struct paca_struct *lpaca = get_paca(); 227 struct paca_struct *lpaca = get_paca();
228 228
229 /* Mark enabled in PACA */ 229 /* Mark interrupts enabled in PACA */
230 lpaca->proc_enabled = 0; 230 lpaca->soft_enabled = 0;
231 231
232 /* Initialize hash table for that CPU */ 232 /* Initialize hash table for that CPU */
233 htab_initialize_secondary(); 233 htab_initialize_secondary();
@@ -392,7 +392,8 @@ void __init setup_system(void)
392 * setting up the hash table pointers. It also sets up some interrupt-mapping 392 * setting up the hash table pointers. It also sets up some interrupt-mapping
393 * related options that will be used by finish_device_tree() 393 * related options that will be used by finish_device_tree()
394 */ 394 */
395 ppc_md.init_early(); 395 if (ppc_md.init_early)
396 ppc_md.init_early();
396 397
397 /* 398 /*
398 * We can discover serial ports now since the above did setup the 399 * We can discover serial ports now since the above did setup the
@@ -598,3 +599,10 @@ void __init setup_per_cpu_areas(void)
598 } 599 }
599} 600}
600#endif 601#endif
602
603
604#ifdef CONFIG_PPC_INDIRECT_IO
605struct ppc_pci_io ppc_pci_io;
606EXPORT_SYMBOL(ppc_pci_io);
607#endif /* CONFIG_PPC_INDIRECT_IO */
608
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index de59c6c31a5b..bc892e69b4f7 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -78,7 +78,7 @@ static int __devinit start_contest(int cmd, long offset, int num)
78{ 78{
79 int i, score=0; 79 int i, score=0;
80 u64 tb; 80 u64 tb;
81 long mark; 81 u64 mark;
82 82
83 tbsync->cmd = cmd; 83 tbsync->cmd = cmd;
84 84
@@ -116,8 +116,7 @@ void __devinit smp_generic_give_timebase(void)
116 printk("Synchronizing timebase\n"); 116 printk("Synchronizing timebase\n");
117 117
118 /* if this fails then this kernel won't work anyway... */ 118 /* if this fails then this kernel won't work anyway... */
119 tbsync = kmalloc( sizeof(*tbsync), GFP_KERNEL ); 119 tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
120 memset( tbsync, 0, sizeof(*tbsync) );
121 mb(); 120 mb();
122 running = 1; 121 running = 1;
123 122
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 35c6309bdb76..9b28c238b6c0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -65,6 +65,7 @@ cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
65 65
66EXPORT_SYMBOL(cpu_online_map); 66EXPORT_SYMBOL(cpu_online_map);
67EXPORT_SYMBOL(cpu_possible_map); 67EXPORT_SYMBOL(cpu_possible_map);
68EXPORT_SYMBOL(cpu_sibling_map);
68 69
69/* SMP operations for this machine */ 70/* SMP operations for this machine */
70struct smp_ops_t *smp_ops; 71struct smp_ops_t *smp_ops;
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index d15c33e95959..03a2a2f30d66 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -51,6 +51,7 @@
51#include <asm/time.h> 51#include <asm/time.h>
52#include <asm/mmu_context.h> 52#include <asm/mmu_context.h>
53#include <asm/ppc-pci.h> 53#include <asm/ppc-pci.h>
54#include <asm/syscalls.h>
54 55
55/* readdir & getdents */ 56/* readdir & getdents */
56#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de))) 57#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index d45a168bdaca..22123a0d5416 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -200,10 +200,9 @@ static void register_cpu_online(unsigned int cpu)
200 struct cpu *c = &per_cpu(cpu_devices, cpu); 200 struct cpu *c = &per_cpu(cpu_devices, cpu);
201 struct sys_device *s = &c->sysdev; 201 struct sys_device *s = &c->sysdev;
202 202
203#ifndef CONFIG_PPC_ISERIES 203 if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
204 if (cpu_has_feature(CPU_FTR_SMT)) 204 cpu_has_feature(CPU_FTR_SMT))
205 sysdev_create_file(s, &attr_smt_snooze_delay); 205 sysdev_create_file(s, &attr_smt_snooze_delay);
206#endif
207 206
208 /* PMC stuff */ 207 /* PMC stuff */
209 208
@@ -242,10 +241,9 @@ static void unregister_cpu_online(unsigned int cpu)
242 241
243 BUG_ON(c->no_control); 242 BUG_ON(c->no_control);
244 243
245#ifndef CONFIG_PPC_ISERIES 244 if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
246 if (cpu_has_feature(CPU_FTR_SMT)) 245 cpu_has_feature(CPU_FTR_SMT))
247 sysdev_remove_file(s, &attr_smt_snooze_delay); 246 sysdev_remove_file(s, &attr_smt_snooze_delay);
248#endif
249 247
250 /* PMC stuff */ 248 /* PMC stuff */
251 249
@@ -299,6 +297,72 @@ static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
299 .notifier_call = sysfs_cpu_notify, 297 .notifier_call = sysfs_cpu_notify,
300}; 298};
301 299
300static DEFINE_MUTEX(cpu_mutex);
301
302int cpu_add_sysdev_attr(struct sysdev_attribute *attr)
303{
304 int cpu;
305
306 mutex_lock(&cpu_mutex);
307
308 for_each_possible_cpu(cpu) {
309 sysdev_create_file(get_cpu_sysdev(cpu), attr);
310 }
311
312 mutex_unlock(&cpu_mutex);
313 return 0;
314}
315EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr);
316
317int cpu_add_sysdev_attr_group(struct attribute_group *attrs)
318{
319 int cpu;
320 struct sys_device *sysdev;
321
322 mutex_lock(&cpu_mutex);
323
324 for_each_possible_cpu(cpu) {
325 sysdev = get_cpu_sysdev(cpu);
326 sysfs_create_group(&sysdev->kobj, attrs);
327 }
328
329 mutex_unlock(&cpu_mutex);
330 return 0;
331}
332EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group);
333
334
335void cpu_remove_sysdev_attr(struct sysdev_attribute *attr)
336{
337 int cpu;
338
339 mutex_lock(&cpu_mutex);
340
341 for_each_possible_cpu(cpu) {
342 sysdev_remove_file(get_cpu_sysdev(cpu), attr);
343 }
344
345 mutex_unlock(&cpu_mutex);
346}
347EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr);
348
349void cpu_remove_sysdev_attr_group(struct attribute_group *attrs)
350{
351 int cpu;
352 struct sys_device *sysdev;
353
354 mutex_lock(&cpu_mutex);
355
356 for_each_possible_cpu(cpu) {
357 sysdev = get_cpu_sysdev(cpu);
358 sysfs_remove_group(&sysdev->kobj, attrs);
359 }
360
361 mutex_unlock(&cpu_mutex);
362}
363EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group);
364
365
302/* NUMA stuff */ 366/* NUMA stuff */
303 367
304#ifdef CONFIG_NUMA 368#ifdef CONFIG_NUMA
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 46a24de36fec..f6f0c6b07c4c 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -631,7 +631,8 @@ void timer_interrupt(struct pt_regs * regs)
631 calculate_steal_time(); 631 calculate_steal_time();
632 632
633#ifdef CONFIG_PPC_ISERIES 633#ifdef CONFIG_PPC_ISERIES
634 get_lppaca()->int_dword.fields.decr_int = 0; 634 if (firmware_has_feature(FW_FEATURE_ISERIES))
635 get_lppaca()->int_dword.fields.decr_int = 0;
635#endif 636#endif
636 637
637 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) 638 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
@@ -674,7 +675,7 @@ void timer_interrupt(struct pt_regs * regs)
674 set_dec(next_dec); 675 set_dec(next_dec);
675 676
676#ifdef CONFIG_PPC_ISERIES 677#ifdef CONFIG_PPC_ISERIES
677 if (hvlpevent_is_pending()) 678 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
678 process_hvlpevents(); 679 process_hvlpevents();
679#endif 680#endif
680 681
@@ -774,7 +775,7 @@ int do_settimeofday(struct timespec *tv)
774 * settimeofday to perform this operation. 775 * settimeofday to perform this operation.
775 */ 776 */
776#ifdef CONFIG_PPC_ISERIES 777#ifdef CONFIG_PPC_ISERIES
777 if (first_settimeofday) { 778 if (firmware_has_feature(FW_FEATURE_ISERIES) && first_settimeofday) {
778 iSeries_tb_recal(); 779 iSeries_tb_recal();
779 first_settimeofday = 0; 780 first_settimeofday = 0;
780 } 781 }
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index c66b4771ef44..0d4e203fa7a0 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -53,10 +53,6 @@
53#endif 53#endif
54#include <asm/kexec.h> 54#include <asm/kexec.h>
55 55
56#ifdef CONFIG_PPC64 /* XXX */
57#define _IO_BASE pci_io_base
58#endif
59
60#ifdef CONFIG_DEBUGGER 56#ifdef CONFIG_DEBUGGER
61int (*__debugger)(struct pt_regs *regs); 57int (*__debugger)(struct pt_regs *regs);
62int (*__debugger_ipi)(struct pt_regs *regs); 58int (*__debugger_ipi)(struct pt_regs *regs);
@@ -241,7 +237,7 @@ void system_reset_exception(struct pt_regs *regs)
241 */ 237 */
242static inline int check_io_access(struct pt_regs *regs) 238static inline int check_io_access(struct pt_regs *regs)
243{ 239{
244#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 240#ifdef CONFIG_PPC32
245 unsigned long msr = regs->msr; 241 unsigned long msr = regs->msr;
246 const struct exception_table_entry *entry; 242 const struct exception_table_entry *entry;
247 unsigned int *nip = (unsigned int *)regs->nip; 243 unsigned int *nip = (unsigned int *)regs->nip;
@@ -274,7 +270,7 @@ static inline int check_io_access(struct pt_regs *regs)
274 return 1; 270 return 1;
275 } 271 }
276 } 272 }
277#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */ 273#endif /* CONFIG_PPC32 */
278 return 0; 274 return 0;
279} 275}
280 276
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index ed007878d1bf..a80f8f1d2e5d 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -81,15 +81,15 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
81 struct iommu_table *tbl; 81 struct iommu_table *tbl;
82 unsigned long offset, size; 82 unsigned long offset, size;
83 83
84 dma_window = get_property(dev->dev.platform_data, 84 dma_window = get_property(dev->dev.archdata.of_node,
85 "ibm,my-dma-window", NULL); 85 "ibm,my-dma-window", NULL);
86 if (!dma_window) 86 if (!dma_window)
87 return NULL; 87 return NULL;
88 88
89 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); 89 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
90 90
91 of_parse_dma_window(dev->dev.platform_data, dma_window, 91 of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
92 &tbl->it_index, &offset, &size); 92 &tbl->it_index, &offset, &size);
93 93
94 /* TCE table size - measured in tce entries */ 94 /* TCE table size - measured in tce entries */
95 tbl->it_size = size >> IOMMU_PAGE_SHIFT; 95 tbl->it_size = size >> IOMMU_PAGE_SHIFT;
@@ -117,7 +117,8 @@ static const struct vio_device_id *vio_match_device(
117{ 117{
118 while (ids->type[0] != '\0') { 118 while (ids->type[0] != '\0') {
119 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && 119 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
120 device_is_compatible(dev->dev.platform_data, ids->compat)) 120 device_is_compatible(dev->dev.archdata.of_node,
121 ids->compat))
121 return ids; 122 return ids;
122 ids++; 123 ids++;
123 } 124 }
@@ -198,9 +199,9 @@ EXPORT_SYMBOL(vio_unregister_driver);
198/* vio_dev refcount hit 0 */ 199/* vio_dev refcount hit 0 */
199static void __devinit vio_dev_release(struct device *dev) 200static void __devinit vio_dev_release(struct device *dev)
200{ 201{
201 if (dev->platform_data) { 202 if (dev->archdata.of_node) {
202 /* XXX free TCE table */ 203 /* XXX should free TCE table */
203 of_node_put(dev->platform_data); 204 of_node_put(dev->archdata.of_node);
204 } 205 }
205 kfree(to_vio_dev(dev)); 206 kfree(to_vio_dev(dev));
206} 207}
@@ -210,7 +211,7 @@ static void __devinit vio_dev_release(struct device *dev)
210 * @of_node: The OF node for this device. 211 * @of_node: The OF node for this device.
211 * 212 *
212 * Creates and initializes a vio_dev structure from the data in 213 * Creates and initializes a vio_dev structure from the data in
213 * of_node (dev.platform_data) and adds it to the list of virtual devices. 214 * of_node and adds it to the list of virtual devices.
214 * Returns a pointer to the created vio_dev or NULL if node has 215 * Returns a pointer to the created vio_dev or NULL if node has
215 * NULL device_type or compatible fields. 216 * NULL device_type or compatible fields.
216 */ 217 */
@@ -240,8 +241,6 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
240 if (viodev == NULL) 241 if (viodev == NULL)
241 return NULL; 242 return NULL;
242 243
243 viodev->dev.platform_data = of_node_get(of_node);
244
245 viodev->irq = irq_of_parse_and_map(of_node, 0); 244 viodev->irq = irq_of_parse_and_map(of_node, 0);
246 245
247 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address); 246 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
@@ -254,7 +253,10 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
254 if (unit_address != NULL) 253 if (unit_address != NULL)
255 viodev->unit_address = *unit_address; 254 viodev->unit_address = *unit_address;
256 } 255 }
257 viodev->iommu_table = vio_build_iommu_table(viodev); 256 viodev->dev.archdata.of_node = of_node_get(of_node);
257 viodev->dev.archdata.dma_ops = &dma_iommu_ops;
258 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
259 viodev->dev.archdata.numa_node = of_node_to_nid(of_node);
258 260
259 /* init generic 'struct device' fields: */ 261 /* init generic 'struct device' fields: */
260 viodev->dev.parent = &vio_bus_device.dev; 262 viodev->dev.parent = &vio_bus_device.dev;
@@ -285,10 +287,11 @@ static int __init vio_bus_init(void)
285#ifdef CONFIG_PPC_ISERIES 287#ifdef CONFIG_PPC_ISERIES
286 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 288 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
287 iommu_vio_init(); 289 iommu_vio_init();
288 vio_bus_device.iommu_table = &vio_iommu_table; 290 vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops;
291 vio_bus_device.dev.archdata.dma_data = &vio_iommu_table;
289 iSeries_vio_dev = &vio_bus_device.dev; 292 iSeries_vio_dev = &vio_bus_device.dev;
290 } 293 }
291#endif 294#endif /* CONFIG_PPC_ISERIES */
292 295
293 err = bus_register(&vio_bus_type); 296 err = bus_register(&vio_bus_type);
294 if (err) { 297 if (err) {
@@ -336,7 +339,7 @@ static ssize_t name_show(struct device *dev,
336static ssize_t devspec_show(struct device *dev, 339static ssize_t devspec_show(struct device *dev,
337 struct device_attribute *attr, char *buf) 340 struct device_attribute *attr, char *buf)
338{ 341{
339 struct device_node *of_node = dev->platform_data; 342 struct device_node *of_node = dev->archdata.of_node;
340 343
341 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); 344 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
342} 345}
@@ -353,62 +356,6 @@ void __devinit vio_unregister_device(struct vio_dev *viodev)
353} 356}
354EXPORT_SYMBOL(vio_unregister_device); 357EXPORT_SYMBOL(vio_unregister_device);
355 358
356static dma_addr_t vio_map_single(struct device *dev, void *vaddr,
357 size_t size, enum dma_data_direction direction)
358{
359 return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size,
360 ~0ul, direction);
361}
362
363static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle,
364 size_t size, enum dma_data_direction direction)
365{
366 iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size,
367 direction);
368}
369
370static int vio_map_sg(struct device *dev, struct scatterlist *sglist,
371 int nelems, enum dma_data_direction direction)
372{
373 return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist,
374 nelems, ~0ul, direction);
375}
376
377static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist,
378 int nelems, enum dma_data_direction direction)
379{
380 iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction);
381}
382
383static void *vio_alloc_coherent(struct device *dev, size_t size,
384 dma_addr_t *dma_handle, gfp_t flag)
385{
386 return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
387 dma_handle, ~0ul, flag, -1);
388}
389
390static void vio_free_coherent(struct device *dev, size_t size,
391 void *vaddr, dma_addr_t dma_handle)
392{
393 iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr,
394 dma_handle);
395}
396
397static int vio_dma_supported(struct device *dev, u64 mask)
398{
399 return 1;
400}
401
402struct dma_mapping_ops vio_dma_ops = {
403 .alloc_coherent = vio_alloc_coherent,
404 .free_coherent = vio_free_coherent,
405 .map_single = vio_map_single,
406 .unmap_single = vio_unmap_single,
407 .map_sg = vio_map_sg,
408 .unmap_sg = vio_unmap_sg,
409 .dma_supported = vio_dma_supported,
410};
411
412static int vio_bus_match(struct device *dev, struct device_driver *drv) 359static int vio_bus_match(struct device *dev, struct device_driver *drv)
413{ 360{
414 const struct vio_dev *vio_dev = to_vio_dev(dev); 361 const struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -422,13 +369,14 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
422 char *buffer, int buffer_size) 369 char *buffer, int buffer_size)
423{ 370{
424 const struct vio_dev *vio_dev = to_vio_dev(dev); 371 const struct vio_dev *vio_dev = to_vio_dev(dev);
425 struct device_node *dn = dev->platform_data; 372 struct device_node *dn;
426 const char *cp; 373 const char *cp;
427 int length; 374 int length;
428 375
429 if (!num_envp) 376 if (!num_envp)
430 return -ENOMEM; 377 return -ENOMEM;
431 378
379 dn = dev->archdata.of_node;
432 if (!dn) 380 if (!dn)
433 return -ENODEV; 381 return -ENODEV;
434 cp = get_property(dn, "compatible", &length); 382 cp = get_property(dn, "compatible", &length);
@@ -465,7 +413,7 @@ struct bus_type vio_bus_type = {
465*/ 413*/
466const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) 414const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
467{ 415{
468 return get_property(vdev->dev.platform_data, which, length); 416 return get_property(vdev->dev.archdata.of_node, which, length);
469} 417}
470EXPORT_SYMBOL(vio_get_attribute); 418EXPORT_SYMBOL(vio_get_attribute);
471 419