aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-12-13 19:16:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-12-13 19:16:03 -0500
commitb2077ebc19a4792d1b4749ab9fe013c893588989 (patch)
treedcf2cec138264c79e1ac0d85fca320145ab987a6 /arch
parent2430cdd0fe14884cb533b13020a0ff000144af6d (diff)
parentb713aa0b15015a65ad5421543b80df86de043d62 (diff)
Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King: "This resolves some further issues with the dma mask changes on ARM which have been found by TI and others, and also some corner cases with the updates to the virtual to physical address translations. Konstantin also found some problems with the unwinder, which now performs tighter verification that the stack is valid while unwinding" * 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: ARM: fix asm/memory.h build error ARM: 7917/1: cacheflush: correctly limit range of memory region being flushed ARM: 7913/1: fix framepointer check in unwind_frame ARM: 7912/1: check stack pointer in get_wchan ARM: 7909/1: mm: Call setup_dma_zone() post early_paging_init() ARM: 7908/1: mm: Fix the arm_dma_limit calculation ARM: another fix for the DMA mapping checks
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/memory.h31
-rw-r--r--arch/arm/kernel/head-nommu.S4
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c3
-rw-r--r--arch/arm/mm/dma-mapping.c91
-rw-r--r--arch/arm/mm/init.c2
9 files changed, 67 insertions, 78 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 9ecccc865046..6976b03e5213 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -100,23 +100,19 @@
100#define TASK_UNMAPPED_BASE UL(0x00000000) 100#define TASK_UNMAPPED_BASE UL(0x00000000)
101#endif 101#endif
102 102
103#ifndef PHYS_OFFSET
104#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
105#endif
106
107#ifndef END_MEM 103#ifndef END_MEM
108#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 104#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
109#endif 105#endif
110 106
111#ifndef PAGE_OFFSET 107#ifndef PAGE_OFFSET
112#define PAGE_OFFSET (PHYS_OFFSET) 108#define PAGE_OFFSET PLAT_PHYS_OFFSET
113#endif 109#endif
114 110
115/* 111/*
116 * The module can be at any place in ram in nommu mode. 112 * The module can be at any place in ram in nommu mode.
117 */ 113 */
118#define MODULES_END (END_MEM) 114#define MODULES_END (END_MEM)
119#define MODULES_VADDR (PHYS_OFFSET) 115#define MODULES_VADDR PAGE_OFFSET
120 116
121#define XIP_VIRT_ADDR(physaddr) (physaddr) 117#define XIP_VIRT_ADDR(physaddr) (physaddr)
122 118
@@ -157,6 +153,16 @@
157#endif 153#endif
158#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) 154#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
159 155
156/*
157 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
158 * memory. This is used for XIP and NoMMU kernels, or by kernels which
159 * have their own mach/memory.h. Assembly code must always use
160 * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
161 */
162#ifndef PLAT_PHYS_OFFSET
163#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
164#endif
165
160#ifndef __ASSEMBLY__ 166#ifndef __ASSEMBLY__
161 167
162/* 168/*
@@ -239,6 +245,8 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
239 245
240#else 246#else
241 247
248#define PHYS_OFFSET PLAT_PHYS_OFFSET
249
242static inline phys_addr_t __virt_to_phys(unsigned long x) 250static inline phys_addr_t __virt_to_phys(unsigned long x)
243{ 251{
244 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; 252 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
@@ -251,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
251 259
252#endif 260#endif
253#endif 261#endif
254#endif /* __ASSEMBLY__ */
255
256#ifndef PHYS_OFFSET
257#ifdef PLAT_PHYS_OFFSET
258#define PHYS_OFFSET PLAT_PHYS_OFFSET
259#else
260#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
261#endif
262#endif
263
264#ifndef __ASSEMBLY__
265 262
266/* 263/*
267 * PFNs are used to describe any physical page; this means 264 * PFNs are used to describe any physical page; this means
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 14235ba64a90..716249cc2ee1 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,7 +68,7 @@ ENTRY(stext)
68 68
69#ifdef CONFIG_ARM_MPU 69#ifdef CONFIG_ARM_MPU
70 /* Calculate the size of a region covering just the kernel */ 70 /* Calculate the size of a region covering just the kernel */
71 ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET 71 ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
72 ldr r6, =(_end) @ Cover whole kernel 72 ldr r6, =(_end) @ Cover whole kernel
73 sub r6, r6, r5 @ Minimum size of region to map 73 sub r6, r6, r5 @ Minimum size of region to map
74 clz r6, r6 @ Region size must be 2^N... 74 clz r6, r6 @ Region size must be 2^N...
@@ -213,7 +213,7 @@ ENTRY(__setup_mpu)
213 set_region_nr r0, #MPU_RAM_REGION 213 set_region_nr r0, #MPU_RAM_REGION
214 isb 214 isb
215 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ 215 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
216 ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET 216 ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
217 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) 217 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
218 218
219 setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled 219 setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 11d59b32fb8d..32f317e5828a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -110,7 +110,7 @@ ENTRY(stext)
110 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) 110 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
111 add r8, r8, r4 @ PHYS_OFFSET 111 add r8, r8, r4 @ PHYS_OFFSET
112#else 112#else
113 ldr r8, =PHYS_OFFSET @ always constant in this case 113 ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
114#endif 114#endif
115 115
116 /* 116 /*
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94f6b05f9e24..92f7b15dd221 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
404unsigned long get_wchan(struct task_struct *p) 404unsigned long get_wchan(struct task_struct *p)
405{ 405{
406 struct stackframe frame; 406 struct stackframe frame;
407 unsigned long stack_page;
407 int count = 0; 408 int count = 0;
408 if (!p || p == current || p->state == TASK_RUNNING) 409 if (!p || p == current || p->state == TASK_RUNNING)
409 return 0; 410 return 0;
@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
412 frame.sp = thread_saved_sp(p); 413 frame.sp = thread_saved_sp(p);
413 frame.lr = 0; /* recovered from the stack */ 414 frame.lr = 0; /* recovered from the stack */
414 frame.pc = thread_saved_pc(p); 415 frame.pc = thread_saved_pc(p);
416 stack_page = (unsigned long)task_stack_page(p);
415 do { 417 do {
416 int ret = unwind_frame(&frame); 418 if (frame.sp < stack_page ||
417 if (ret < 0) 419 frame.sp >= stack_page + THREAD_SIZE ||
420 unwind_frame(&frame) < 0)
418 return 0; 421 return 0;
419 if (!in_sched_functions(frame.pc)) 422 if (!in_sched_functions(frame.pc))
420 return frame.pc; 423 return frame.pc;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6a1b8a81b1ae..987a7f5bce5f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p)
873 machine_desc = mdesc; 873 machine_desc = mdesc;
874 machine_name = mdesc->name; 874 machine_name = mdesc->name;
875 875
876 setup_dma_zone(mdesc);
877
878 if (mdesc->reboot_mode != REBOOT_HARD) 876 if (mdesc->reboot_mode != REBOOT_HARD)
879 reboot_mode = mdesc->reboot_mode; 877 reboot_mode = mdesc->reboot_mode;
880 878
@@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p)
892 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 890 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
893 891
894 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); 892 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
893 setup_dma_zone(mdesc);
895 sanity_check_meminfo(); 894 sanity_check_meminfo();
896 arm_memblock_init(&meminfo, mdesc); 895 arm_memblock_init(&meminfo, mdesc);
897 896
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 00f79e59985b..af4e8c8a5422 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
31 high = ALIGN(low, THREAD_SIZE); 31 high = ALIGN(low, THREAD_SIZE);
32 32
33 /* check current frame pointer is within bounds */ 33 /* check current frame pointer is within bounds */
34 if (fp < (low + 12) || fp + 4 >= high) 34 if (fp < low + 12 || fp > high - 4)
35 return -EINVAL; 35 return -EINVAL;
36 36
37 /* restore the registers from the stack frame */ 37 /* restore the registers from the stack frame */
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index dbf0923e8d76..7940241f0576 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -509,9 +509,10 @@ static inline int
509__do_cache_op(unsigned long start, unsigned long end) 509__do_cache_op(unsigned long start, unsigned long end)
510{ 510{
511 int ret; 511 int ret;
512 unsigned long chunk = PAGE_SIZE;
513 512
514 do { 513 do {
514 unsigned long chunk = min(PAGE_SIZE, end - start);
515
515 if (signal_pending(current)) { 516 if (signal_pending(current)) {
516 struct thread_info *ti = current_thread_info(); 517 struct thread_info *ti = current_thread_info();
517 518
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f6b6bfa88ecf..f61a5707823a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -158,13 +158,49 @@ struct dma_map_ops arm_coherent_dma_ops = {
158}; 158};
159EXPORT_SYMBOL(arm_coherent_dma_ops); 159EXPORT_SYMBOL(arm_coherent_dma_ops);
160 160
161static int __dma_supported(struct device *dev, u64 mask, bool warn)
162{
163 unsigned long max_dma_pfn;
164
165 /*
166 * If the mask allows for more memory than we can address,
167 * and we actually have that much memory, then we must
168 * indicate that DMA to this device is not supported.
169 */
170 if (sizeof(mask) != sizeof(dma_addr_t) &&
171 mask > (dma_addr_t)~0 &&
172 dma_to_pfn(dev, ~0) < max_pfn) {
173 if (warn) {
174 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
175 mask);
176 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
177 }
178 return 0;
179 }
180
181 max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
182
183 /*
184 * Translate the device's DMA mask to a PFN limit. This
185 * PFN number includes the page which we can DMA to.
186 */
187 if (dma_to_pfn(dev, mask) < max_dma_pfn) {
188 if (warn)
189 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
190 mask,
191 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
192 max_dma_pfn + 1);
193 return 0;
194 }
195
196 return 1;
197}
198
161static u64 get_coherent_dma_mask(struct device *dev) 199static u64 get_coherent_dma_mask(struct device *dev)
162{ 200{
163 u64 mask = (u64)DMA_BIT_MASK(32); 201 u64 mask = (u64)DMA_BIT_MASK(32);
164 202
165 if (dev) { 203 if (dev) {
166 unsigned long max_dma_pfn;
167
168 mask = dev->coherent_dma_mask; 204 mask = dev->coherent_dma_mask;
169 205
170 /* 206 /*
@@ -176,34 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
176 return 0; 212 return 0;
177 } 213 }
178 214
179 max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); 215 if (!__dma_supported(dev, mask, true))
180
181 /*
182 * If the mask allows for more memory than we can address,
183 * and we actually have that much memory, then fail the
184 * allocation.
185 */
186 if (sizeof(mask) != sizeof(dma_addr_t) &&
187 mask > (dma_addr_t)~0 &&
188 dma_to_pfn(dev, ~0) > max_dma_pfn) {
189 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
190 mask);
191 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
192 return 0;
193 }
194
195 /*
196 * Now check that the mask, when translated to a PFN,
197 * fits within the allowable addresses which we can
198 * allocate.
199 */
200 if (dma_to_pfn(dev, mask) < max_dma_pfn) {
201 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
202 mask,
203 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
204 arm_dma_pfn_limit + 1);
205 return 0; 216 return 0;
206 }
207 } 217 }
208 218
209 return mask; 219 return mask;
@@ -1032,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1032 */ 1042 */
1033int dma_supported(struct device *dev, u64 mask) 1043int dma_supported(struct device *dev, u64 mask)
1034{ 1044{
1035 unsigned long limit; 1045 return __dma_supported(dev, mask, false);
1036
1037 /*
1038 * If the mask allows for more memory than we can address,
1039 * and we actually have that much memory, then we must
1040 * indicate that DMA to this device is not supported.
1041 */
1042 if (sizeof(mask) != sizeof(dma_addr_t) &&
1043 mask > (dma_addr_t)~0 &&
1044 dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
1045 return 0;
1046
1047 /*
1048 * Translate the device's DMA mask to a PFN limit. This
1049 * PFN number includes the page which we can DMA to.
1050 */
1051 limit = dma_to_pfn(dev, mask);
1052
1053 if (limit < arm_dma_pfn_limit)
1054 return 0;
1055
1056 return 1;
1057} 1046}
1058EXPORT_SYMBOL(dma_supported); 1047EXPORT_SYMBOL(dma_supported);
1059 1048
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3e8f106ee5fe..1f7b19a47060 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
229#ifdef CONFIG_ZONE_DMA 229#ifdef CONFIG_ZONE_DMA
230 if (mdesc->dma_zone_size) { 230 if (mdesc->dma_zone_size) {
231 arm_dma_zone_size = mdesc->dma_zone_size; 231 arm_dma_zone_size = mdesc->dma_zone_size;
232 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; 232 arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
233 } else 233 } else
234 arm_dma_limit = 0xffffffff; 234 arm_dma_limit = 0xffffffff;
235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; 235 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;