aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/Makefile11
-rw-r--r--arch/powerpc/include/asm/device.h11
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h27
-rw-r--r--arch/powerpc/include/asm/iommu.h10
-rw-r--r--arch/powerpc/include/asm/pmc.h2
-rw-r--r--arch/powerpc/include/asm/pte-40x.h1
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h1
-rw-r--r--arch/powerpc/include/asm/pte-common.h5
-rw-r--r--arch/powerpc/kernel/dma-iommu.c16
-rw-r--r--arch/powerpc/kernel/dma.c15
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S1
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/process.c17
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/vdso.c14
-rw-r--r--arch/powerpc/kernel/vio.c4
-rw-r--r--arch/powerpc/mm/pgtable.c19
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S1
-rw-r--r--arch/powerpc/platforms/cell/beat_iommu.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c9
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c8
-rwxr-xr-xarch/powerpc/relocs_check.pl56
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
-rw-r--r--arch/powerpc/xmon/xmon.c16
27 files changed, 204 insertions, 59 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4fd479059d65..10a0a5488a44 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -385,9 +385,15 @@ config NUMA
385 385
386config NODES_SHIFT 386config NODES_SHIFT
387 int 387 int
388 default "8" if PPC64
388 default "4" 389 default "4"
389 depends on NEED_MULTIPLE_NODES 390 depends on NEED_MULTIPLE_NODES
390 391
392config MAX_ACTIVE_REGIONS
393 int
394 default "256" if PPC64
395 default "32"
396
391config ARCH_SELECT_MEMORY_MODEL 397config ARCH_SELECT_MEMORY_MODEL
392 def_bool y 398 def_bool y
393 depends on PPC64 399 depends on PPC64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index aacf629c1a9f..1a54a3b3a3fa 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -164,6 +164,17 @@ PHONY += $(BOOT_TARGETS)
164 164
165boot := arch/$(ARCH)/boot 165boot := arch/$(ARCH)/boot
166 166
167ifeq ($(CONFIG_RELOCATABLE),y)
168quiet_cmd_relocs_check = CALL $<
169 cmd_relocs_check = perl $< "$(OBJDUMP)" "$(obj)/vmlinux"
170
171PHONY += relocs_check
172relocs_check: arch/powerpc/relocs_check.pl vmlinux
173 $(call cmd,relocs_check)
174
175zImage: relocs_check
176endif
177
167$(BOOT_TARGETS): vmlinux 178$(BOOT_TARGETS): vmlinux
168 $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) 179 $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
169 180
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 9dade15d1ab4..6d94d27ed850 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -15,7 +15,16 @@ struct dev_archdata {
15 15
16 /* DMA operations on that device */ 16 /* DMA operations on that device */
17 struct dma_map_ops *dma_ops; 17 struct dma_map_ops *dma_ops;
18 void *dma_data; 18
19 /*
20 * When an iommu is in use, dma_data is used as a ptr to the base of the
21 * iommu_table. Otherwise, it is a simple numerical offset.
22 */
23 union {
24 dma_addr_t dma_offset;
25 void *iommu_table_base;
26 } dma_data;
27
19#ifdef CONFIG_SWIOTLB 28#ifdef CONFIG_SWIOTLB
20 dma_addr_t max_direct_dma_addr; 29 dma_addr_t max_direct_dma_addr;
21#endif 30#endif
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index cb2ca41dd526..e281daebddca 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -26,7 +26,6 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
26extern void dma_direct_free_coherent(struct device *dev, size_t size, 26extern void dma_direct_free_coherent(struct device *dev, size_t size,
27 void *vaddr, dma_addr_t dma_handle); 27 void *vaddr, dma_addr_t dma_handle);
28 28
29extern unsigned long get_dma_direct_offset(struct device *dev);
30 29
31#ifdef CONFIG_NOT_COHERENT_CACHE 30#ifdef CONFIG_NOT_COHERENT_CACHE
32/* 31/*
@@ -90,6 +89,28 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
90 dev->archdata.dma_ops = ops; 89 dev->archdata.dma_ops = ops;
91} 90}
92 91
92/*
93 * get_dma_offset()
94 *
95 * Get the dma offset on configurations where the dma address can be determined
96 * from the physical address by looking at a simple offset. Direct dma and
97 * swiotlb use this function, but it is typically not used by implementations
98 * with an iommu.
99 */
100static inline dma_addr_t get_dma_offset(struct device *dev)
101{
102 if (dev)
103 return dev->archdata.dma_data.dma_offset;
104
105 return PCI_DRAM_OFFSET;
106}
107
108static inline void set_dma_offset(struct device *dev, dma_addr_t off)
109{
110 if (dev)
111 dev->archdata.dma_data.dma_offset = off;
112}
113
93/* this will be removed soon */ 114/* this will be removed soon */
94#define flush_write_buffers() 115#define flush_write_buffers()
95 116
@@ -181,12 +202,12 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
181 202
182static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 203static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
183{ 204{
184 return paddr + get_dma_direct_offset(dev); 205 return paddr + get_dma_offset(dev);
185} 206}
186 207
187static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 208static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
188{ 209{
189 return daddr - get_dma_direct_offset(dev); 210 return daddr - get_dma_offset(dev);
190} 211}
191 212
192#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 213#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 7464c0daddd1..edfc9803ec91 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -70,6 +70,16 @@ struct iommu_table {
70 70
71struct scatterlist; 71struct scatterlist;
72 72
73static inline void set_iommu_table_base(struct device *dev, void *base)
74{
75 dev->archdata.dma_data.iommu_table_base = base;
76}
77
78static inline void *get_iommu_table_base(struct device *dev)
79{
80 return dev->archdata.dma_data.iommu_table_base;
81}
82
73/* Frees table for an individual device node */ 83/* Frees table for an individual device node */
74extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); 84extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
75 85
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
index ccc68b50d05d..5a9ede4962cb 100644
--- a/arch/powerpc/include/asm/pmc.h
+++ b/arch/powerpc/include/asm/pmc.h
@@ -29,7 +29,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq);
29void release_pmc_hardware(void); 29void release_pmc_hardware(void);
30void ppc_enable_pmcs(void); 30void ppc_enable_pmcs(void);
31 31
32#ifdef CONFIG_PPC64 32#ifdef CONFIG_PPC_BOOK3S_64
33#include <asm/lppaca.h> 33#include <asm/lppaca.h>
34 34
35static inline void ppc_set_pmu_inuse(int inuse) 35static inline void ppc_set_pmu_inuse(int inuse)
diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/pte-40x.h
index 6c3e1f4378d4..ec0b0b0d1df9 100644
--- a/arch/powerpc/include/asm/pte-40x.h
+++ b/arch/powerpc/include/asm/pte-40x.h
@@ -43,6 +43,7 @@
43#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ 43#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
44#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ 44#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
45#define _PAGE_USER 0x010 /* matches one of the zone permission bits */ 45#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
46#define _PAGE_SPECIAL 0x020 /* software: Special page */
46#define _PAGE_RW 0x040 /* software: Writes permitted */ 47#define _PAGE_RW 0x040 /* software: Writes permitted */
47#define _PAGE_DIRTY 0x080 /* software: dirty page */ 48#define _PAGE_DIRTY 0x080 /* software: dirty page */
48#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ 49#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
index 94e979718dcf..dd5ea95fe61e 100644
--- a/arch/powerpc/include/asm/pte-8xx.h
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -32,6 +32,7 @@
32#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */ 32#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
33#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ 33#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
34#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ 34#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
35#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
35 36
36/* These five software bits must be masked out when the entry is loaded 37/* These five software bits must be masked out when the entry is loaded
37 * into the TLB. 38 * into the TLB.
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index c3b65076a263..f2b370180a09 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -25,9 +25,6 @@
25#ifndef _PAGE_WRITETHRU 25#ifndef _PAGE_WRITETHRU
26#define _PAGE_WRITETHRU 0 26#define _PAGE_WRITETHRU 0
27#endif 27#endif
28#ifndef _PAGE_SPECIAL
29#define _PAGE_SPECIAL 0
30#endif
31#ifndef _PAGE_4K_PFN 28#ifndef _PAGE_4K_PFN
32#define _PAGE_4K_PFN 0 29#define _PAGE_4K_PFN 0
33#endif 30#endif
@@ -179,7 +176,5 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
179#define HAVE_PAGE_AGP 176#define HAVE_PAGE_AGP
180 177
181/* Advertise support for _PAGE_SPECIAL */ 178/* Advertise support for _PAGE_SPECIAL */
182#ifdef _PAGE_SPECIAL
183#define __HAVE_ARCH_PTE_SPECIAL 179#define __HAVE_ARCH_PTE_SPECIAL
184#endif
185 180
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 87ddb3fb948c..37771a518119 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -18,7 +18,7 @@
18static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, 18static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
19 dma_addr_t *dma_handle, gfp_t flag) 19 dma_addr_t *dma_handle, gfp_t flag)
20{ 20{
21 return iommu_alloc_coherent(dev, dev->archdata.dma_data, size, 21 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
22 dma_handle, device_to_mask(dev), flag, 22 dma_handle, device_to_mask(dev), flag,
23 dev_to_node(dev)); 23 dev_to_node(dev));
24} 24}
@@ -26,7 +26,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
26static void dma_iommu_free_coherent(struct device *dev, size_t size, 26static void dma_iommu_free_coherent(struct device *dev, size_t size,
27 void *vaddr, dma_addr_t dma_handle) 27 void *vaddr, dma_addr_t dma_handle)
28{ 28{
29 iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); 29 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
30} 30}
31 31
32/* Creates TCEs for a user provided buffer. The user buffer must be 32/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -39,8 +39,8 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
39 enum dma_data_direction direction, 39 enum dma_data_direction direction,
40 struct dma_attrs *attrs) 40 struct dma_attrs *attrs)
41{ 41{
42 return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size, 42 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
43 device_to_mask(dev), direction, attrs); 43 size, device_to_mask(dev), direction, attrs);
44} 44}
45 45
46 46
@@ -48,7 +48,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
48 size_t size, enum dma_data_direction direction, 48 size_t size, enum dma_data_direction direction,
49 struct dma_attrs *attrs) 49 struct dma_attrs *attrs)
50{ 50{
51 iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction, 51 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
52 attrs); 52 attrs);
53} 53}
54 54
@@ -57,7 +57,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
57 int nelems, enum dma_data_direction direction, 57 int nelems, enum dma_data_direction direction,
58 struct dma_attrs *attrs) 58 struct dma_attrs *attrs)
59{ 59{
60 return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems, 60 return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
61 device_to_mask(dev), direction, attrs); 61 device_to_mask(dev), direction, attrs);
62} 62}
63 63
@@ -65,14 +65,14 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
65 int nelems, enum dma_data_direction direction, 65 int nelems, enum dma_data_direction direction,
66 struct dma_attrs *attrs) 66 struct dma_attrs *attrs)
67{ 67{
68 iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction, 68 iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction,
69 attrs); 69 attrs);
70} 70}
71 71
72/* We support DMA to/from any memory page via the iommu */ 72/* We support DMA to/from any memory page via the iommu */
73static int dma_iommu_dma_supported(struct device *dev, u64 mask) 73static int dma_iommu_dma_supported(struct device *dev, u64 mask)
74{ 74{
75 struct iommu_table *tbl = dev->archdata.dma_data; 75 struct iommu_table *tbl = get_iommu_table_base(dev);
76 76
77 if (!tbl || tbl->it_offset > mask) { 77 if (!tbl || tbl->it_offset > mask) {
78 printk(KERN_INFO 78 printk(KERN_INFO
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 21b784d7e7d0..6215062caf8c 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -21,13 +21,6 @@
21 * default the offset is PCI_DRAM_OFFSET. 21 * default the offset is PCI_DRAM_OFFSET.
22 */ 22 */
23 23
24unsigned long get_dma_direct_offset(struct device *dev)
25{
26 if (dev)
27 return (unsigned long)dev->archdata.dma_data;
28
29 return PCI_DRAM_OFFSET;
30}
31 24
32void *dma_direct_alloc_coherent(struct device *dev, size_t size, 25void *dma_direct_alloc_coherent(struct device *dev, size_t size,
33 dma_addr_t *dma_handle, gfp_t flag) 26 dma_addr_t *dma_handle, gfp_t flag)
@@ -37,7 +30,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
37 ret = __dma_alloc_coherent(dev, size, dma_handle, flag); 30 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
38 if (ret == NULL) 31 if (ret == NULL)
39 return NULL; 32 return NULL;
40 *dma_handle += get_dma_direct_offset(dev); 33 *dma_handle += get_dma_offset(dev);
41 return ret; 34 return ret;
42#else 35#else
43 struct page *page; 36 struct page *page;
@@ -51,7 +44,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
51 return NULL; 44 return NULL;
52 ret = page_address(page); 45 ret = page_address(page);
53 memset(ret, 0, size); 46 memset(ret, 0, size);
54 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev); 47 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
55 48
56 return ret; 49 return ret;
57#endif 50#endif
@@ -75,7 +68,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
75 int i; 68 int i;
76 69
77 for_each_sg(sgl, sg, nents, i) { 70 for_each_sg(sgl, sg, nents, i) {
78 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); 71 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
79 sg->dma_length = sg->length; 72 sg->dma_length = sg->length;
80 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 73 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
81 } 74 }
@@ -110,7 +103,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
110{ 103{
111 BUG_ON(dir == DMA_NONE); 104 BUG_ON(dir == DMA_NONE);
112 __dma_sync_page(page, offset, size, dir); 105 __dma_sync_page(page, offset, size, dir);
113 return page_to_phys(page) + offset + get_dma_direct_offset(dev); 106 return page_to_phys(page) + offset + get_dma_offset(dev);
114} 107}
115 108
116static inline void dma_direct_unmap_page(struct device *dev, 109static inline void dma_direct_unmap_page(struct device *dev,
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 9048f96237f6..24dcc0ecf246 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -17,7 +17,6 @@
17#include <asm/cputable.h> 17#include <asm/cputable.h>
18#include <asm/setup.h> 18#include <asm/setup.h>
19#include <asm/thread_info.h> 19#include <asm/thread_info.h>
20#include <asm/reg.h>
21#include <asm/exception-64e.h> 20#include <asm/exception-64e.h>
22#include <asm/bug.h> 21#include <asm/bug.h>
23#include <asm/irqflags.h> 22#include <asm/irqflags.h>
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e9f4840096b3..bb8209e34931 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1117,7 +1117,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1117 1117
1118 /* Hook up default DMA ops */ 1118 /* Hook up default DMA ops */
1119 sd->dma_ops = pci_dma_ops; 1119 sd->dma_ops = pci_dma_ops;
1120 sd->dma_data = (void *)PCI_DRAM_OFFSET; 1120 set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
1121 1121
1122 /* Additional platform DMA/iommu setup */ 1122 /* Additional platform DMA/iommu setup */
1123 if (ppc_md.pci_dma_dev_setup) 1123 if (ppc_md.pci_dma_dev_setup)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 0a3216433051..1168c5f440ab 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1165,7 +1165,22 @@ static inline unsigned long brk_rnd(void)
1165 1165
1166unsigned long arch_randomize_brk(struct mm_struct *mm) 1166unsigned long arch_randomize_brk(struct mm_struct *mm)
1167{ 1167{
1168 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); 1168 unsigned long base = mm->brk;
1169 unsigned long ret;
1170
1171#ifdef CONFIG_PPC64
1172 /*
1173 * If we are using 1TB segments and we are allowed to randomise
1174 * the heap, we can put it above 1TB so it is backed by a 1TB
1175 * segment. Otherwise the heap will be in the bottom 1TB
1176 * which always uses 256MB segments and this may result in a
1177 * performance penalty.
1178 */
1179 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1180 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1181#endif
1182
1183 ret = PAGE_ALIGN(base + brk_rnd());
1169 1184
1170 if (ret < mm->brk) 1185 if (ret < mm->brk)
1171 return mm->brk; 1186 return mm->brk;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 864334b337a3..bafac2e41ae1 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -800,7 +800,7 @@ static void __init prom_send_capabilities(void)
800 root = call_prom("open", 1, 1, ADDR("/")); 800 root = call_prom("open", 1, 1, ADDR("/"));
801 if (root != 0) { 801 if (root != 0) {
802 /* try calling the ibm,client-architecture-support method */ 802 /* try calling the ibm,client-architecture-support method */
803 prom_printf("Calling ibm,client-architecture..."); 803 prom_printf("Calling ibm,client-architecture-support...");
804 if (call_prom_ret("call-method", 3, 2, &ret, 804 if (call_prom_ret("call-method", 3, 2, &ret,
805 ADDR("ibm,client-architecture-support"), 805 ADDR("ibm,client-architecture-support"),
806 root, 806 root,
@@ -814,6 +814,7 @@ static void __init prom_send_capabilities(void)
814 return; 814 return;
815 } 815 }
816 call_prom("close", 1, 0, root); 816 call_prom("close", 1, 0, root);
817 prom_printf(" not implemented\n");
817 } 818 }
818 819
819 /* no ibm,client-architecture-support call, try the old way */ 820 /* no ibm,client-architecture-support call, try the old way */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 3faaf29bdb29..94e2df3cae07 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -241,6 +241,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
241 } 241 }
242 242
243 /* 243 /*
244 * Put vDSO base into mm struct. We need to do this before calling
245 * install_special_mapping or the perf counter mmap tracking code
246 * will fail to recognise it as a vDSO (since arch_vma_name fails).
247 */
248 current->mm->context.vdso_base = vdso_base;
249
250 /*
244 * our vma flags don't have VM_WRITE so by default, the process isn't 251 * our vma flags don't have VM_WRITE so by default, the process isn't
245 * allowed to write those pages. 252 * allowed to write those pages.
246 * gdb can break that with ptrace interface, and thus trigger COW on 253 * gdb can break that with ptrace interface, and thus trigger COW on
@@ -260,11 +267,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
260 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| 267 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
261 VM_ALWAYSDUMP, 268 VM_ALWAYSDUMP,
262 vdso_pagelist); 269 vdso_pagelist);
263 if (rc) 270 if (rc) {
271 current->mm->context.vdso_base = 0;
264 goto fail_mmapsem; 272 goto fail_mmapsem;
265 273 }
266 /* Put vDSO base into mm struct */
267 current->mm->context.vdso_base = vdso_base;
268 274
269 up_write(&mm->mmap_sem); 275 up_write(&mm->mmap_sem);
270 return 0; 276 return 0;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index bc7b41edbdfc..77f64218abf3 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1054,6 +1054,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1054 return NULL; 1054 return NULL;
1055 1055
1056 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); 1056 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
1057 if (tbl == NULL)
1058 return NULL;
1057 1059
1058 of_parse_dma_window(dev->dev.archdata.of_node, dma_window, 1060 of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
1059 &tbl->it_index, &offset, &size); 1061 &tbl->it_index, &offset, &size);
@@ -1233,7 +1235,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1233 vio_cmo_set_dma_ops(viodev); 1235 vio_cmo_set_dma_ops(viodev);
1234 else 1236 else
1235 viodev->dev.archdata.dma_ops = &dma_iommu_ops; 1237 viodev->dev.archdata.dma_ops = &dma_iommu_ops;
1236 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev); 1238 set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
1237 set_dev_node(&viodev->dev, of_node_to_nid(of_node)); 1239 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1238 1240
1239 /* init generic 'struct device' fields: */ 1241 /* init generic 'struct device' fields: */
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 83f1551ec2c9..53040931de32 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -30,6 +30,8 @@
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31#include <asm/tlb.h> 31#include <asm/tlb.h>
32 32
33#include "mmu_decl.h"
34
33DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
34 36
35#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
@@ -166,7 +168,7 @@ struct page * maybe_pte_to_page(pte_t pte)
166 * support falls into the same category. 168 * support falls into the same category.
167 */ 169 */
168 170
169static pte_t set_pte_filter(pte_t pte) 171static pte_t set_pte_filter(pte_t pte, unsigned long addr)
170{ 172{
171 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 173 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
172 if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || 174 if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
@@ -175,6 +177,17 @@ static pte_t set_pte_filter(pte_t pte)
175 if (!pg) 177 if (!pg)
176 return pte; 178 return pte;
177 if (!test_bit(PG_arch_1, &pg->flags)) { 179 if (!test_bit(PG_arch_1, &pg->flags)) {
180#ifdef CONFIG_8xx
181 /* On 8xx, cache control instructions (particularly
182 * "dcbst" from flush_dcache_icache) fault as write
183 * operation if there is an unpopulated TLB entry
184 * for the address in question. To workaround that,
185 * we invalidate the TLB here, thus avoiding dcbst
186 * misbehaviour.
187 */
188 /* 8xx doesn't care about PID, size or ind args */
189 _tlbil_va(addr, 0, 0, 0);
190#endif /* CONFIG_8xx */
178 flush_dcache_icache_page(pg); 191 flush_dcache_icache_page(pg);
179 set_bit(PG_arch_1, &pg->flags); 192 set_bit(PG_arch_1, &pg->flags);
180 } 193 }
@@ -194,7 +207,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
194 * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so 207 * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
195 * instead we "filter out" the exec permission for non clean pages. 208 * instead we "filter out" the exec permission for non clean pages.
196 */ 209 */
197static pte_t set_pte_filter(pte_t pte) 210static pte_t set_pte_filter(pte_t pte, unsigned long addr)
198{ 211{
199 struct page *pg; 212 struct page *pg;
200 213
@@ -276,7 +289,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
276 * this context might not have been activated yet when this 289 * this context might not have been activated yet when this
277 * is called. 290 * is called.
278 */ 291 */
279 pte = set_pte_filter(pte); 292 pte = set_pte_filter(pte, addr);
280 293
281 /* Perform the setting of the PTE */ 294 /* Perform the setting of the PTE */
282 __set_pte_at(mm, addr, ptep, pte, 0); 295 __set_pte_at(mm, addr, ptep, pte, 0);
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index ef1cccf71173..f288279e679d 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -18,7 +18,6 @@
18#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#include <asm/cputable.h> 19#include <asm/cputable.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/reg.h>
22#include <asm/exception-64e.h> 21#include <asm/exception-64e.h>
23#include <asm/ppc-opcode.h> 22#include <asm/ppc-opcode.h>
24 23
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
index 93b0efddd658..39d361c5c6d2 100644
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ b/arch/powerpc/platforms/cell/beat_iommu.c
@@ -77,7 +77,7 @@ static void __init celleb_init_direct_mapping(void)
77static void celleb_dma_dev_setup(struct device *dev) 77static void celleb_dma_dev_setup(struct device *dev)
78{ 78{
79 dev->archdata.dma_ops = get_pci_dma_ops(); 79 dev->archdata.dma_ops = get_pci_dma_ops();
80 dev->archdata.dma_data = (void *)celleb_dma_direct_offset; 80 set_dma_offset(dev, celleb_dma_direct_offset);
81} 81}
82 82
83static void celleb_pci_dma_dev_setup(struct pci_dev *pdev) 83static void celleb_pci_dma_dev_setup(struct pci_dev *pdev)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 416db17eb18f..ca5bfdfe47f2 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -657,15 +657,13 @@ static void cell_dma_dev_setup_fixed(struct device *dev);
657 657
658static void cell_dma_dev_setup(struct device *dev) 658static void cell_dma_dev_setup(struct device *dev)
659{ 659{
660 struct dev_archdata *archdata = &dev->archdata;
661
662 /* Order is important here, these are not mutually exclusive */ 660 /* Order is important here, these are not mutually exclusive */
663 if (get_dma_ops(dev) == &dma_iommu_fixed_ops) 661 if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
664 cell_dma_dev_setup_fixed(dev); 662 cell_dma_dev_setup_fixed(dev);
665 else if (get_pci_dma_ops() == &dma_iommu_ops) 663 else if (get_pci_dma_ops() == &dma_iommu_ops)
666 archdata->dma_data = cell_get_iommu_table(dev); 664 set_iommu_table_base(dev, cell_get_iommu_table(dev));
667 else if (get_pci_dma_ops() == &dma_direct_ops) 665 else if (get_pci_dma_ops() == &dma_direct_ops)
668 archdata->dma_data = (void *)cell_dma_direct_offset; 666 set_dma_offset(dev, cell_dma_direct_offset);
669 else 667 else
670 BUG(); 668 BUG();
671} 669}
@@ -973,11 +971,10 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
973 971
974static void cell_dma_dev_setup_fixed(struct device *dev) 972static void cell_dma_dev_setup_fixed(struct device *dev)
975{ 973{
976 struct dev_archdata *archdata = &dev->archdata;
977 u64 addr; 974 u64 addr;
978 975
979 addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base; 976 addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
980 archdata->dma_data = (void *)addr; 977 set_dma_offset(dev, addr);
981 978
982 dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); 979 dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
983} 980}
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 6c1e1011959e..9d53cb481a7c 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -193,7 +193,7 @@ static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
193 pdn->iommu_table = iommu_init_table(tbl, -1); 193 pdn->iommu_table = iommu_init_table(tbl, -1);
194 else 194 else
195 kfree(tbl); 195 kfree(tbl);
196 pdev->dev.archdata.dma_data = pdn->iommu_table; 196 set_iommu_table_base(&pdev->dev, pdn->iommu_table);
197} 197}
198#else 198#else
199#define pci_dma_dev_setup_iseries NULL 199#define pci_dma_dev_setup_iseries NULL
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index a0ff03a3d8da..7b1d608ea3c8 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -189,7 +189,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
189 } 189 }
190#endif 190#endif
191 191
192 dev->dev.archdata.dma_data = &iommu_table_iobmap; 192 set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
193} 193}
194 194
195static void pci_dma_bus_setup_null(struct pci_bus *b) { } 195static void pci_dma_bus_setup_null(struct pci_bus *b) { }
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 661c8e02bcba..1a0000a4b6d6 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -482,7 +482,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
482 phb->node); 482 phb->node);
483 iommu_table_setparms(phb, dn, tbl); 483 iommu_table_setparms(phb, dn, tbl);
484 PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node); 484 PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
485 dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table; 485 set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
486 return; 486 return;
487 } 487 }
488 488
@@ -494,7 +494,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
494 dn = dn->parent; 494 dn = dn->parent;
495 495
496 if (dn && PCI_DN(dn)) 496 if (dn && PCI_DN(dn))
497 dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table; 497 set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
498 else 498 else
499 printk(KERN_WARNING "iommu: Device %s has no iommu table\n", 499 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
500 pci_name(dev)); 500 pci_name(dev));
@@ -538,7 +538,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
538 */ 538 */
539 if (dma_window == NULL || pdn->parent == NULL) { 539 if (dma_window == NULL || pdn->parent == NULL) {
540 pr_debug(" no dma window for device, linking to parent\n"); 540 pr_debug(" no dma window for device, linking to parent\n");
541 dev->dev.archdata.dma_data = PCI_DN(pdn)->iommu_table; 541 set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table);
542 return; 542 return;
543 } 543 }
544 544
@@ -554,7 +554,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
554 pr_debug(" found DMA window, table: %p\n", pci->iommu_table); 554 pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
555 } 555 }
556 556
557 dev->dev.archdata.dma_data = pci->iommu_table; 557 set_iommu_table_base(&dev->dev, pci->iommu_table);
558} 558}
559#else /* CONFIG_PCI */ 559#else /* CONFIG_PCI */
560#define pci_dma_bus_setup_pSeries NULL 560#define pci_dma_bus_setup_pSeries NULL
diff --git a/arch/powerpc/relocs_check.pl b/arch/powerpc/relocs_check.pl
new file mode 100755
index 000000000000..d2571096c3e9
--- /dev/null
+++ b/arch/powerpc/relocs_check.pl
@@ -0,0 +1,56 @@
1#!/usr/bin/perl
2
3# Copyright © 2009 IBM Corporation
4
5# This program is free software; you can redistribute it and/or
6# modify it under the terms of the GNU General Public License
7# as published by the Free Software Foundation; either version
8# 2 of the License, or (at your option) any later version.
9
10# This script checks the relcoations of a vmlinux for "suspicious"
11# relocations.
12
13use strict;
14use warnings;
15
16if ($#ARGV != 1) {
17 die "$0 [path to objdump] [path to vmlinux]\n";
18}
19
20# Have Kbuild supply the path to objdump so we handle cross compilation.
21my $objdump = shift;
22my $vmlinux = shift;
23my $bad_relocs_count = 0;
24my $bad_relocs = "";
25my $old_binutils = 0;
26
27open(FD, "$objdump -R $vmlinux|") or die;
28while (<FD>) {
29 study $_;
30
31 # Only look at relcoation lines.
32 next if (!/\s+R_/);
33
34 # These relocations are okay
35 next if (/R_PPC64_RELATIVE/ or /R_PPC64_NONE/ or
36 /R_PPC64_ADDR64\s+mach_/);
37
38 # If we see this type of relcoation it's an idication that
39 # we /may/ be using an old version of binutils.
40 if (/R_PPC64_UADDR64/) {
41 $old_binutils++;
42 }
43
44 $bad_relocs_count++;
45 $bad_relocs .= $_;
46}
47
48if ($bad_relocs_count) {
49 print "WARNING: $bad_relocs_count bad relocations\n";
50 print $bad_relocs;
51}
52
53if ($old_binutils) {
54 print "WARNING: You need at binutils >= 2.19 to build a ".
55 "CONFIG_RELCOATABLE kernel\n";
56}
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 89639ecbf381..ae3c4db86fe8 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -297,7 +297,7 @@ static void pci_dma_dev_setup_dart(struct pci_dev *dev)
297 /* We only have one iommu table on the mac for now, which makes 297 /* We only have one iommu table on the mac for now, which makes
298 * things simple. Setup all PCI devices to point to this table 298 * things simple. Setup all PCI devices to point to this table
299 */ 299 */
300 dev->dev.archdata.dma_data = &iommu_table_dart; 300 set_iommu_table_base(&dev->dev, &iommu_table_dart);
301} 301}
302 302
303static void pci_dma_bus_setup_dart(struct pci_bus *bus) 303static void pci_dma_bus_setup_dart(struct pci_bus *bus)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0e09a45ac79a..c6f0a71b405e 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -335,6 +335,16 @@ int cpus_are_in_xmon(void)
335} 335}
336#endif 336#endif
337 337
338static inline int unrecoverable_excp(struct pt_regs *regs)
339{
340#ifdef CONFIG_4xx
341 /* We have no MSR_RI bit on 4xx, so we simply return false */
342 return 0;
343#else
344 return ((regs->msr & MSR_RI) == 0);
345#endif
346}
347
338static int xmon_core(struct pt_regs *regs, int fromipi) 348static int xmon_core(struct pt_regs *regs, int fromipi)
339{ 349{
340 int cmd = 0; 350 int cmd = 0;
@@ -388,7 +398,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
388 bp = NULL; 398 bp = NULL;
389 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) 399 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF))
390 bp = at_breakpoint(regs->nip); 400 bp = at_breakpoint(regs->nip);
391 if (bp || (regs->msr & MSR_RI) == 0) 401 if (bp || unrecoverable_excp(regs))
392 fromipi = 0; 402 fromipi = 0;
393 403
394 if (!fromipi) { 404 if (!fromipi) {
@@ -399,7 +409,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
399 cpu, BP_NUM(bp)); 409 cpu, BP_NUM(bp));
400 xmon_print_symbol(regs->nip, " ", ")\n"); 410 xmon_print_symbol(regs->nip, " ", ")\n");
401 } 411 }
402 if ((regs->msr & MSR_RI) == 0) 412 if (unrecoverable_excp(regs))
403 printf("WARNING: exception is not recoverable, " 413 printf("WARNING: exception is not recoverable, "
404 "can't continue\n"); 414 "can't continue\n");
405 release_output_lock(); 415 release_output_lock();
@@ -490,7 +500,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
490 printf("Stopped at breakpoint %x (", BP_NUM(bp)); 500 printf("Stopped at breakpoint %x (", BP_NUM(bp));
491 xmon_print_symbol(regs->nip, " ", ")\n"); 501 xmon_print_symbol(regs->nip, " ", ")\n");
492 } 502 }
493 if ((regs->msr & MSR_RI) == 0) 503 if (unrecoverable_excp(regs))
494 printf("WARNING: exception is not recoverable, " 504 printf("WARNING: exception is not recoverable, "
495 "can't continue\n"); 505 "can't continue\n");
496 remove_bpts(); 506 remove_bpts();