aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/dma-iommu.c16
-rw-r--r--arch/powerpc/kernel/dma.c15
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S1
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/process.c17
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/vdso.c14
-rw-r--r--arch/powerpc/kernel/vio.c4
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S69
9 files changed, 59 insertions, 82 deletions
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 87ddb3fb948c..37771a518119 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -18,7 +18,7 @@
18static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, 18static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
19 dma_addr_t *dma_handle, gfp_t flag) 19 dma_addr_t *dma_handle, gfp_t flag)
20{ 20{
21 return iommu_alloc_coherent(dev, dev->archdata.dma_data, size, 21 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
22 dma_handle, device_to_mask(dev), flag, 22 dma_handle, device_to_mask(dev), flag,
23 dev_to_node(dev)); 23 dev_to_node(dev));
24} 24}
@@ -26,7 +26,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
26static void dma_iommu_free_coherent(struct device *dev, size_t size, 26static void dma_iommu_free_coherent(struct device *dev, size_t size,
27 void *vaddr, dma_addr_t dma_handle) 27 void *vaddr, dma_addr_t dma_handle)
28{ 28{
29 iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); 29 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
30} 30}
31 31
32/* Creates TCEs for a user provided buffer. The user buffer must be 32/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -39,8 +39,8 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
39 enum dma_data_direction direction, 39 enum dma_data_direction direction,
40 struct dma_attrs *attrs) 40 struct dma_attrs *attrs)
41{ 41{
42 return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size, 42 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
43 device_to_mask(dev), direction, attrs); 43 size, device_to_mask(dev), direction, attrs);
44} 44}
45 45
46 46
@@ -48,7 +48,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
48 size_t size, enum dma_data_direction direction, 48 size_t size, enum dma_data_direction direction,
49 struct dma_attrs *attrs) 49 struct dma_attrs *attrs)
50{ 50{
51 iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction, 51 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
52 attrs); 52 attrs);
53} 53}
54 54
@@ -57,7 +57,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
57 int nelems, enum dma_data_direction direction, 57 int nelems, enum dma_data_direction direction,
58 struct dma_attrs *attrs) 58 struct dma_attrs *attrs)
59{ 59{
60 return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems, 60 return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
61 device_to_mask(dev), direction, attrs); 61 device_to_mask(dev), direction, attrs);
62} 62}
63 63
@@ -65,14 +65,14 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
65 int nelems, enum dma_data_direction direction, 65 int nelems, enum dma_data_direction direction,
66 struct dma_attrs *attrs) 66 struct dma_attrs *attrs)
67{ 67{
68 iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction, 68 iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction,
69 attrs); 69 attrs);
70} 70}
71 71
72/* We support DMA to/from any memory page via the iommu */ 72/* We support DMA to/from any memory page via the iommu */
73static int dma_iommu_dma_supported(struct device *dev, u64 mask) 73static int dma_iommu_dma_supported(struct device *dev, u64 mask)
74{ 74{
75 struct iommu_table *tbl = dev->archdata.dma_data; 75 struct iommu_table *tbl = get_iommu_table_base(dev);
76 76
77 if (!tbl || tbl->it_offset > mask) { 77 if (!tbl || tbl->it_offset > mask) {
78 printk(KERN_INFO 78 printk(KERN_INFO
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 21b784d7e7d0..6215062caf8c 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -21,13 +21,6 @@
21 * default the offset is PCI_DRAM_OFFSET. 21 * default the offset is PCI_DRAM_OFFSET.
22 */ 22 */
23 23
24unsigned long get_dma_direct_offset(struct device *dev)
25{
26 if (dev)
27 return (unsigned long)dev->archdata.dma_data;
28
29 return PCI_DRAM_OFFSET;
30}
31 24
32void *dma_direct_alloc_coherent(struct device *dev, size_t size, 25void *dma_direct_alloc_coherent(struct device *dev, size_t size,
33 dma_addr_t *dma_handle, gfp_t flag) 26 dma_addr_t *dma_handle, gfp_t flag)
@@ -37,7 +30,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
37 ret = __dma_alloc_coherent(dev, size, dma_handle, flag); 30 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
38 if (ret == NULL) 31 if (ret == NULL)
39 return NULL; 32 return NULL;
40 *dma_handle += get_dma_direct_offset(dev); 33 *dma_handle += get_dma_offset(dev);
41 return ret; 34 return ret;
42#else 35#else
43 struct page *page; 36 struct page *page;
@@ -51,7 +44,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
51 return NULL; 44 return NULL;
52 ret = page_address(page); 45 ret = page_address(page);
53 memset(ret, 0, size); 46 memset(ret, 0, size);
54 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev); 47 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
55 48
56 return ret; 49 return ret;
57#endif 50#endif
@@ -75,7 +68,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
75 int i; 68 int i;
76 69
77 for_each_sg(sgl, sg, nents, i) { 70 for_each_sg(sgl, sg, nents, i) {
78 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); 71 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
79 sg->dma_length = sg->length; 72 sg->dma_length = sg->length;
80 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 73 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
81 } 74 }
@@ -110,7 +103,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
110{ 103{
111 BUG_ON(dir == DMA_NONE); 104 BUG_ON(dir == DMA_NONE);
112 __dma_sync_page(page, offset, size, dir); 105 __dma_sync_page(page, offset, size, dir);
113 return page_to_phys(page) + offset + get_dma_direct_offset(dev); 106 return page_to_phys(page) + offset + get_dma_offset(dev);
114} 107}
115 108
116static inline void dma_direct_unmap_page(struct device *dev, 109static inline void dma_direct_unmap_page(struct device *dev,
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 9048f96237f6..24dcc0ecf246 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -17,7 +17,6 @@
17#include <asm/cputable.h> 17#include <asm/cputable.h>
18#include <asm/setup.h> 18#include <asm/setup.h>
19#include <asm/thread_info.h> 19#include <asm/thread_info.h>
20#include <asm/reg.h>
21#include <asm/exception-64e.h> 20#include <asm/exception-64e.h>
22#include <asm/bug.h> 21#include <asm/bug.h>
23#include <asm/irqflags.h> 22#include <asm/irqflags.h>
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e9f4840096b3..bb8209e34931 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1117,7 +1117,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1117 1117
1118 /* Hook up default DMA ops */ 1118 /* Hook up default DMA ops */
1119 sd->dma_ops = pci_dma_ops; 1119 sd->dma_ops = pci_dma_ops;
1120 sd->dma_data = (void *)PCI_DRAM_OFFSET; 1120 set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
1121 1121
1122 /* Additional platform DMA/iommu setup */ 1122 /* Additional platform DMA/iommu setup */
1123 if (ppc_md.pci_dma_dev_setup) 1123 if (ppc_md.pci_dma_dev_setup)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 0a3216433051..1168c5f440ab 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1165,7 +1165,22 @@ static inline unsigned long brk_rnd(void)
1165 1165
1166unsigned long arch_randomize_brk(struct mm_struct *mm) 1166unsigned long arch_randomize_brk(struct mm_struct *mm)
1167{ 1167{
1168 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); 1168 unsigned long base = mm->brk;
1169 unsigned long ret;
1170
1171#ifdef CONFIG_PPC64
1172 /*
1173 * If we are using 1TB segments and we are allowed to randomise
1174 * the heap, we can put it above 1TB so it is backed by a 1TB
1175 * segment. Otherwise the heap will be in the bottom 1TB
1176 * which always uses 256MB segments and this may result in a
1177 * performance penalty.
1178 */
1179 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1180 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1181#endif
1182
1183 ret = PAGE_ALIGN(base + brk_rnd());
1169 1184
1170 if (ret < mm->brk) 1185 if (ret < mm->brk)
1171 return mm->brk; 1186 return mm->brk;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 864334b337a3..bafac2e41ae1 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -800,7 +800,7 @@ static void __init prom_send_capabilities(void)
800 root = call_prom("open", 1, 1, ADDR("/")); 800 root = call_prom("open", 1, 1, ADDR("/"));
801 if (root != 0) { 801 if (root != 0) {
802 /* try calling the ibm,client-architecture-support method */ 802 /* try calling the ibm,client-architecture-support method */
803 prom_printf("Calling ibm,client-architecture..."); 803 prom_printf("Calling ibm,client-architecture-support...");
804 if (call_prom_ret("call-method", 3, 2, &ret, 804 if (call_prom_ret("call-method", 3, 2, &ret,
805 ADDR("ibm,client-architecture-support"), 805 ADDR("ibm,client-architecture-support"),
806 root, 806 root,
@@ -814,6 +814,7 @@ static void __init prom_send_capabilities(void)
814 return; 814 return;
815 } 815 }
816 call_prom("close", 1, 0, root); 816 call_prom("close", 1, 0, root);
817 prom_printf(" not implemented\n");
817 } 818 }
818 819
819 /* no ibm,client-architecture-support call, try the old way */ 820 /* no ibm,client-architecture-support call, try the old way */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 3faaf29bdb29..94e2df3cae07 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -241,6 +241,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
241 } 241 }
242 242
243 /* 243 /*
244 * Put vDSO base into mm struct. We need to do this before calling
245 * install_special_mapping or the perf counter mmap tracking code
246 * will fail to recognise it as a vDSO (since arch_vma_name fails).
247 */
248 current->mm->context.vdso_base = vdso_base;
249
250 /*
244 * our vma flags don't have VM_WRITE so by default, the process isn't 251 * our vma flags don't have VM_WRITE so by default, the process isn't
245 * allowed to write those pages. 252 * allowed to write those pages.
246 * gdb can break that with ptrace interface, and thus trigger COW on 253 * gdb can break that with ptrace interface, and thus trigger COW on
@@ -260,11 +267,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
260 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| 267 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
261 VM_ALWAYSDUMP, 268 VM_ALWAYSDUMP,
262 vdso_pagelist); 269 vdso_pagelist);
263 if (rc) 270 if (rc) {
271 current->mm->context.vdso_base = 0;
264 goto fail_mmapsem; 272 goto fail_mmapsem;
265 273 }
266 /* Put vDSO base into mm struct */
267 current->mm->context.vdso_base = vdso_base;
268 274
269 up_write(&mm->mmap_sem); 275 up_write(&mm->mmap_sem);
270 return 0; 276 return 0;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index bc7b41edbdfc..77f64218abf3 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1054,6 +1054,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1054 return NULL; 1054 return NULL;
1055 1055
1056 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); 1056 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
1057 if (tbl == NULL)
1058 return NULL;
1057 1059
1058 of_parse_dma_window(dev->dev.archdata.of_node, dma_window, 1060 of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
1059 &tbl->it_index, &offset, &size); 1061 &tbl->it_index, &offset, &size);
@@ -1233,7 +1235,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1233 vio_cmo_set_dma_ops(viodev); 1235 vio_cmo_set_dma_ops(viodev);
1234 else 1236 else
1235 viodev->dev.archdata.dma_ops = &dma_iommu_ops; 1237 viodev->dev.archdata.dma_ops = &dma_iommu_ops;
1236 viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev); 1238 set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
1237 set_dev_node(&viodev->dev, of_node_to_nid(of_node)); 1239 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1238 1240
1239 /* init generic 'struct device' fields: */ 1241 /* init generic 'struct device' fields: */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 58da4070723d..f56429362a12 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
6#include <asm/page.h> 6#include <asm/page.h>
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8#include <asm/cache.h> 8#include <asm/cache.h>
9#include <asm/thread_info.h>
9 10
10ENTRY(_stext) 11ENTRY(_stext)
11 12
@@ -71,12 +72,7 @@ SECTIONS
71 /* Read-only data */ 72 /* Read-only data */
72 RODATA 73 RODATA
73 74
74 /* Exception & bug tables */ 75 EXCEPTION_TABLE(0)
75 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
76 __start___ex_table = .;
77 *(__ex_table)
78 __stop___ex_table = .;
79 }
80 76
81 NOTES :kernel :notes 77 NOTES :kernel :notes
82 78
@@ -93,12 +89,7 @@ SECTIONS
93 */ 89 */
94 . = ALIGN(PAGE_SIZE); 90 . = ALIGN(PAGE_SIZE);
95 __init_begin = .; 91 __init_begin = .;
96 92 INIT_TEXT_SECTION(PAGE_SIZE) :kernel
97 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
98 _sinittext = .;
99 INIT_TEXT
100 _einittext = .;
101 } :kernel
102 93
103 /* .exit.text is discarded at runtime, not link time, 94 /* .exit.text is discarded at runtime, not link time,
104 * to deal with references from __bug_table 95 * to deal with references from __bug_table
@@ -122,23 +113,16 @@ SECTIONS
122#endif 113#endif
123 } 114 }
124 115
125 . = ALIGN(16);
126 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { 116 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
127 __setup_start = .; 117 INIT_SETUP(16)
128 *(.init.setup)
129 __setup_end = .;
130 } 118 }
131 119
132 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { 120 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
133 __initcall_start = .; 121 INIT_CALLS
134 INITCALLS 122 }
135 __initcall_end = .;
136 }
137 123
138 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { 124 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
139 __con_initcall_start = .; 125 CON_INITCALL
140 *(.con_initcall.init)
141 __con_initcall_end = .;
142 } 126 }
143 127
144 SECURITY_INIT 128 SECURITY_INIT
@@ -169,14 +153,10 @@ SECTIONS
169 __stop___fw_ftr_fixup = .; 153 __stop___fw_ftr_fixup = .;
170 } 154 }
171#endif 155#endif
172#ifdef CONFIG_BLK_DEV_INITRD
173 . = ALIGN(PAGE_SIZE);
174 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { 156 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
175 __initramfs_start = .; 157 INIT_RAM_FS
176 *(.init.ramfs)
177 __initramfs_end = .;
178 } 158 }
179#endif 159
180 PERCPU(PAGE_SIZE) 160 PERCPU(PAGE_SIZE)
181 161
182 . = ALIGN(8); 162 . = ALIGN(8);
@@ -240,36 +220,24 @@ SECTIONS
240#endif 220#endif
241 221
242 /* The initial task and kernel stack */ 222 /* The initial task and kernel stack */
243#ifdef CONFIG_PPC32
244 . = ALIGN(8192);
245#else
246 . = ALIGN(16384);
247#endif
248 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { 223 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
249 *(.data.init_task) 224 INIT_TASK_DATA(THREAD_SIZE)
250 } 225 }
251 226
252 . = ALIGN(PAGE_SIZE);
253 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { 227 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
254 *(.data.page_aligned) 228 PAGE_ALIGNED_DATA(PAGE_SIZE)
255 } 229 }
256 230
257 . = ALIGN(L1_CACHE_BYTES);
258 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { 231 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
259 *(.data.cacheline_aligned) 232 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
260 } 233 }
261 234
262 . = ALIGN(L1_CACHE_BYTES);
263 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { 235 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
264 *(.data.read_mostly) 236 READ_MOSTLY_DATA(L1_CACHE_BYTES)
265 } 237 }
266 238
267 . = ALIGN(PAGE_SIZE);
268 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { 239 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
269 __nosave_begin = .; 240 NOSAVE_DATA
270 *(.data.nosave)
271 . = ALIGN(PAGE_SIZE);
272 __nosave_end = .;
273 } 241 }
274 242
275 . = ALIGN(PAGE_SIZE); 243 . = ALIGN(PAGE_SIZE);
@@ -280,14 +248,7 @@ SECTIONS
280 * And finally the bss 248 * And finally the bss
281 */ 249 */
282 250
283 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 251 BSS_SECTION(0, 0, 0)
284 __bss_start = .;
285 *(.sbss) *(.scommon)
286 *(.dynbss)
287 *(.bss)
288 *(COMMON)
289 __bss_stop = .;
290 }
291 252
292 . = ALIGN(PAGE_SIZE); 253 . = ALIGN(PAGE_SIZE);
293 _end = . ; 254 _end = . ;