aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig6
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/include/asm/io.h75
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h31
-rw-r--r--arch/arm/kernel/armksyms.c6
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/lib/memcpy.S2
-rw-r--r--arch/arm/lib/memset.S2
-rw-r--r--arch/arm/mm/ioremap.c33
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/nommu.c39
-rw-r--r--arch/arm/vdso/vdsomunge.c56
14 files changed, 198 insertions, 69 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a750c1425c3a..1c5021002fe4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1693,6 +1693,12 @@ config HIGHMEM
1693config HIGHPTE 1693config HIGHPTE
1694 bool "Allocate 2nd-level pagetables from highmem" 1694 bool "Allocate 2nd-level pagetables from highmem"
1695 depends on HIGHMEM 1695 depends on HIGHMEM
1696 help
1697 The VM uses one page of physical memory for each page table.
1698 For systems with a lot of processes, this can use a lot of
1699 precious low memory, eventually leading to low memory being
1700 consumed by page tables. Setting this option will allow
1701 user-space 2nd level page tables to reside in high memory.
1696 1702
1697config HW_PERF_EVENTS 1703config HW_PERF_EVENTS
1698 bool "Enable hardware performance counter support for perf events" 1704 bool "Enable hardware performance counter support for perf events"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f1b157971366..a2e16f940394 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR
1635 1635
1636config DEBUG_SET_MODULE_RONX 1636config DEBUG_SET_MODULE_RONX
1637 bool "Set loadable kernel module data as NX and text as RO" 1637 bool "Set loadable kernel module data as NX and text as RO"
1638 depends on MODULES 1638 depends on MODULES && MMU
1639 ---help--- 1639 ---help---
1640 This option helps catch unintended modifications to loadable 1640 This option helps catch unintended modifications to loadable
1641 kernel module's text and read-only data. It also prevents execution 1641 kernel module's text and read-only data. It also prevents execution
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 1c3938f26beb..485982084fe9 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
140 * The _caller variety takes a __builtin_return_address(0) value for 140 * The _caller variety takes a __builtin_return_address(0) value for
141 * /proc/vmalloc to use - and should only be used in non-inline functions. 141 * /proc/vmalloc to use - and should only be used in non-inline functions.
142 */ 142 */
143extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
144 size_t, unsigned int, void *);
145extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, 143extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
146 void *); 144 void *);
147
148extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); 145extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
149extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
150extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); 146extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
151extern void __iounmap(volatile void __iomem *addr); 147extern void __iounmap(volatile void __iomem *addr);
152extern void __arm_iounmap(volatile void __iomem *addr);
153 148
154extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, 149extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
155 unsigned int, void *); 150 unsigned int, void *);
@@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
321static inline void memset_io(volatile void __iomem *dst, unsigned c, 316static inline void memset_io(volatile void __iomem *dst, unsigned c,
322 size_t count) 317 size_t count)
323{ 318{
324 memset((void __force *)dst, c, count); 319 extern void mmioset(void *, unsigned int, size_t);
320 mmioset((void __force *)dst, c, count);
325} 321}
326#define memset_io(dst,c,count) memset_io(dst,c,count) 322#define memset_io(dst,c,count) memset_io(dst,c,count)
327 323
328static inline void memcpy_fromio(void *to, const volatile void __iomem *from, 324static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
329 size_t count) 325 size_t count)
330{ 326{
331 memcpy(to, (const void __force *)from, count); 327 extern void mmiocpy(void *, const void *, size_t);
328 mmiocpy(to, (const void __force *)from, count);
332} 329}
333#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count) 330#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
334 331
335static inline void memcpy_toio(volatile void __iomem *to, const void *from, 332static inline void memcpy_toio(volatile void __iomem *to, const void *from,
336 size_t count) 333 size_t count)
337{ 334{
338 memcpy((void __force *)to, from, count); 335 extern void mmiocpy(void *, const void *, size_t);
336 mmiocpy((void __force *)to, from, count);
339} 337}
340#define memcpy_toio(to,from,count) memcpy_toio(to,from,count) 338#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
341 339
@@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
348#endif /* readl */ 346#endif /* readl */
349 347
350/* 348/*
351 * ioremap and friends. 349 * ioremap() and friends.
350 *
351 * ioremap() takes a resource address, and size. Due to the ARM memory
352 * types, it is important to use the correct ioremap() function as each
353 * mapping has specific properties.
354 *
355 * Function Memory type Cacheability Cache hint
356 * ioremap() Device n/a n/a
357 * ioremap_nocache() Device n/a n/a
358 * ioremap_cache() Normal Writeback Read allocate
359 * ioremap_wc() Normal Non-cacheable n/a
360 * ioremap_wt() Normal Non-cacheable n/a
361 *
362 * All device mappings have the following properties:
363 * - no access speculation
364 * - no repetition (eg, on return from an exception)
365 * - number, order and size of accesses are maintained
366 * - unaligned accesses are "unpredictable"
367 * - writes may be delayed before they hit the endpoint device
352 * 368 *
353 * ioremap takes a PCI memory address, as specified in 369 * ioremap_nocache() is the same as ioremap() as there are too many device
354 * Documentation/io-mapping.txt. 370 * drivers using this for device registers, and documentation which tells
371 * people to use it for such for this to be any different. This is not a
372 * safe fallback for memory-like mappings, or memory regions where the
373 * compiler may generate unaligned accesses - eg, via inlining its own
374 * memcpy.
355 * 375 *
376 * All normal memory mappings have the following properties:
377 * - reads can be repeated with no side effects
378 * - repeated reads return the last value written
379 * - reads can fetch additional locations without side effects
380 * - writes can be repeated (in certain cases) with no side effects
381 * - writes can be merged before accessing the target
382 * - unaligned accesses can be supported
383 * - ordering is not guaranteed without explicit dependencies or barrier
384 * instructions
385 * - writes may be delayed before they hit the endpoint memory
386 *
387 * The cache hint is only a performance hint: CPUs may alias these hints.
388 * Eg, a CPU not implementing read allocate but implementing write allocate
389 * will provide a write allocate mapping instead.
356 */ 390 */
357#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 391void __iomem *ioremap(resource_size_t res_cookie, size_t size);
358#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 392#define ioremap ioremap
359#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) 393#define ioremap_nocache ioremap
360#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) 394
361#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 395void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
362#define iounmap __arm_iounmap 396#define ioremap_cache ioremap_cache
397
398void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
399#define ioremap_wc ioremap_wc
400#define ioremap_wt ioremap_wc
401
402void iounmap(volatile void __iomem *iomem_cookie);
403#define iounmap iounmap
363 404
364/* 405/*
365 * io{read,write}{16,32}be() macros 406 * io{read,write}{16,32}be() macros
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 3a72d69b3255..6f225acc07c5 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x)
275 */ 275 */
276#define __pa(x) __virt_to_phys((unsigned long)(x)) 276#define __pa(x) __virt_to_phys((unsigned long)(x))
277#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 277#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
278#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 278#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
279 279
280extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); 280extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
281 281
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index bfd662e49a25..aeddd28b3595 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -129,7 +129,36 @@
129 129
130/* 130/*
131 * These are the memory types, defined to be compatible with 131 * These are the memory types, defined to be compatible with
132 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB 132 * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
133 * ARMv6+ without TEX remapping, they are a table index.
134 * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
135 *
136 * MT type Pre-ARMv6 ARMv6+ type / cacheable status
137 * UNCACHED Uncached Strongly ordered
138 * BUFFERABLE Bufferable Normal memory / non-cacheable
139 * WRITETHROUGH Writethrough Normal memory / write through
140 * WRITEBACK Writeback Normal memory / write back, read alloc
141 * MINICACHE Minicache N/A
142 * WRITEALLOC Writeback Normal memory / write back, write alloc
143 * DEV_SHARED Uncached Device memory (shared)
144 * DEV_NONSHARED Uncached Device memory (non-shared)
145 * DEV_WC Bufferable Normal memory / non-cacheable
146 * DEV_CACHED Writeback Normal memory / write back, read alloc
147 * VECTORS Variable Normal memory / variable
148 *
149 * All normal memory mappings have the following properties:
150 * - reads can be repeated with no side effects
151 * - repeated reads return the last value written
152 * - reads can fetch additional locations without side effects
153 * - writes can be repeated (in certain cases) with no side effects
154 * - writes can be merged before accessing the target
155 * - unaligned accesses can be supported
156 *
157 * All device mappings have the following properties:
158 * - no access speculation
159 * - no repetition (eg, on return from an exception)
160 * - number, order and size of accesses are maintained
161 * - unaligned accesses are "unpredictable"
133 */ 162 */
134#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ 163#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
135#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ 164#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index a88671cfe1ff..5e5a51a99e68 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void);
50 50
51extern void fpundefinstr(void); 51extern void fpundefinstr(void);
52 52
53void mmioset(void *, unsigned int, size_t);
54void mmiocpy(void *, const void *, size_t);
55
53 /* platform dependent support */ 56 /* platform dependent support */
54EXPORT_SYMBOL(arm_delay_ops); 57EXPORT_SYMBOL(arm_delay_ops);
55 58
@@ -88,6 +91,9 @@ EXPORT_SYMBOL(memmove);
88EXPORT_SYMBOL(memchr); 91EXPORT_SYMBOL(memchr);
89EXPORT_SYMBOL(__memzero); 92EXPORT_SYMBOL(__memzero);
90 93
94EXPORT_SYMBOL(mmioset);
95EXPORT_SYMBOL(mmiocpy);
96
91#ifdef CONFIG_MMU 97#ifdef CONFIG_MMU
92EXPORT_SYMBOL(copy_page); 98EXPORT_SYMBOL(copy_page);
93 99
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7dac3086e361..cb4fb1e69778 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -410,7 +410,7 @@ ENDPROC(__fiq_abt)
410 zero_fp 410 zero_fp
411 411
412 .if \trace 412 .if \trace
413#ifdef CONFIG_IRQSOFF_TRACER 413#ifdef CONFIG_TRACE_IRQFLAGS
414 bl trace_hardirqs_off 414 bl trace_hardirqs_off
415#endif 415#endif
416 ct_user_exit save = 0 416 ct_user_exit save = 0
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 90dfbedfbfb8..3d6b7821cff8 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -578,7 +578,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
578 struct pt_regs *old_regs = set_irq_regs(regs); 578 struct pt_regs *old_regs = set_irq_regs(regs);
579 579
580 if ((unsigned)ipinr < NR_IPI) { 580 if ((unsigned)ipinr < NR_IPI) {
581 trace_ipi_entry(ipi_types[ipinr]); 581 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
582 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 582 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
583 } 583 }
584 584
@@ -637,7 +637,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
637 } 637 }
638 638
639 if ((unsigned)ipinr < NR_IPI) 639 if ((unsigned)ipinr < NR_IPI)
640 trace_ipi_exit(ipi_types[ipinr]); 640 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
641 set_irq_regs(old_regs); 641 set_irq_regs(old_regs);
642} 642}
643 643
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 7797e81e40e0..64111bd4440b 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -61,8 +61,10 @@
61 61
62/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ 62/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
63 63
64ENTRY(mmiocpy)
64ENTRY(memcpy) 65ENTRY(memcpy)
65 66
66#include "copy_template.S" 67#include "copy_template.S"
67 68
68ENDPROC(memcpy) 69ENDPROC(memcpy)
70ENDPROC(mmiocpy)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index a4ee97b5a2bf..3c65e3bd790f 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -16,6 +16,7 @@
16 .text 16 .text
17 .align 5 17 .align 5
18 18
19ENTRY(mmioset)
19ENTRY(memset) 20ENTRY(memset)
20UNWIND( .fnstart ) 21UNWIND( .fnstart )
21 ands r3, r0, #3 @ 1 unaligned? 22 ands r3, r0, #3 @ 1 unaligned?
@@ -133,3 +134,4 @@ UNWIND( .fnstart )
133 b 1b 134 b 1b
134UNWIND( .fnend ) 135UNWIND( .fnend )
135ENDPROC(memset) 136ENDPROC(memset)
137ENDPROC(mmioset)
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index d1e5ad7ab3bc..0c81056c1dd7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
255} 255}
256#endif 256#endif
257 257
258void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, 258static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
259 unsigned long offset, size_t size, unsigned int mtype, void *caller) 259 unsigned long offset, size_t size, unsigned int mtype, void *caller)
260{ 260{
261 const struct mem_type *type; 261 const struct mem_type *type;
@@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
363 unsigned int mtype) 363 unsigned int mtype)
364{ 364{
365 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, 365 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
366 __builtin_return_address(0)); 366 __builtin_return_address(0));
367} 367}
368EXPORT_SYMBOL(__arm_ioremap_pfn); 368EXPORT_SYMBOL(__arm_ioremap_pfn);
369 369
@@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
371 unsigned int, void *) = 371 unsigned int, void *) =
372 __arm_ioremap_caller; 372 __arm_ioremap_caller;
373 373
374void __iomem * 374void __iomem *ioremap(resource_size_t res_cookie, size_t size)
375__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) 375{
376 return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
377 __builtin_return_address(0));
378}
379EXPORT_SYMBOL(ioremap);
380
381void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
382{
383 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
384 __builtin_return_address(0));
385}
386EXPORT_SYMBOL(ioremap_cache);
387
388void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
376{ 389{
377 return arch_ioremap_caller(phys_addr, size, mtype, 390 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
378 __builtin_return_address(0)); 391 __builtin_return_address(0));
379} 392}
380EXPORT_SYMBOL(__arm_ioremap); 393EXPORT_SYMBOL(ioremap_wc);
381 394
382/* 395/*
383 * Remap an arbitrary physical address space into the kernel virtual 396 * Remap an arbitrary physical address space into the kernel virtual
@@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr)
431 444
432void (*arch_iounmap)(volatile void __iomem *) = __iounmap; 445void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
433 446
434void __arm_iounmap(volatile void __iomem *io_addr) 447void iounmap(volatile void __iomem *cookie)
435{ 448{
436 arch_iounmap(io_addr); 449 arch_iounmap(cookie);
437} 450}
438EXPORT_SYMBOL(__arm_iounmap); 451EXPORT_SYMBOL(iounmap);
439 452
440#ifdef CONFIG_PCI 453#ifdef CONFIG_PCI
441static int pci_ioremap_mem_type = MT_DEVICE; 454static int pci_ioremap_mem_type = MT_DEVICE;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6ca7d9aa896f..870838a46d52 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1072,6 +1072,7 @@ void __init sanity_check_meminfo(void)
1072 int highmem = 0; 1072 int highmem = 0;
1073 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; 1073 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
1074 struct memblock_region *reg; 1074 struct memblock_region *reg;
1075 bool should_use_highmem = false;
1075 1076
1076 for_each_memblock(memory, reg) { 1077 for_each_memblock(memory, reg) {
1077 phys_addr_t block_start = reg->base; 1078 phys_addr_t block_start = reg->base;
@@ -1090,6 +1091,7 @@ void __init sanity_check_meminfo(void)
1090 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", 1091 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1091 &block_start, &block_end); 1092 &block_start, &block_end);
1092 memblock_remove(reg->base, reg->size); 1093 memblock_remove(reg->base, reg->size);
1094 should_use_highmem = true;
1093 continue; 1095 continue;
1094 } 1096 }
1095 1097
@@ -1100,6 +1102,7 @@ void __init sanity_check_meminfo(void)
1100 &block_start, &block_end, &vmalloc_limit); 1102 &block_start, &block_end, &vmalloc_limit);
1101 memblock_remove(vmalloc_limit, overlap_size); 1103 memblock_remove(vmalloc_limit, overlap_size);
1102 block_end = vmalloc_limit; 1104 block_end = vmalloc_limit;
1105 should_use_highmem = true;
1103 } 1106 }
1104 } 1107 }
1105 1108
@@ -1134,6 +1137,9 @@ void __init sanity_check_meminfo(void)
1134 } 1137 }
1135 } 1138 }
1136 1139
1140 if (should_use_highmem)
1141 pr_notice("Consider using a HIGHMEM enabled kernel.\n");
1142
1137 high_memory = __va(arm_lowmem_limit - 1) + 1; 1143 high_memory = __va(arm_lowmem_limit - 1) + 1;
1138 1144
1139 /* 1145 /*
@@ -1494,6 +1500,7 @@ void __init paging_init(const struct machine_desc *mdesc)
1494 build_mem_type_table(); 1500 build_mem_type_table();
1495 prepare_page_table(); 1501 prepare_page_table();
1496 map_lowmem(); 1502 map_lowmem();
1503 memblock_set_current_limit(arm_lowmem_limit);
1497 dma_contiguous_remap(); 1504 dma_contiguous_remap();
1498 devicemaps_init(mdesc); 1505 devicemaps_init(mdesc);
1499 kmap_init(); 1506 kmap_init();
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index afd7e05d95f1..1dd10936d68d 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
351} 351}
352EXPORT_SYMBOL(__arm_ioremap_pfn); 352EXPORT_SYMBOL(__arm_ioremap_pfn);
353 353
354void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, 354void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
355 size_t size, unsigned int mtype, void *caller) 355 unsigned int mtype, void *caller)
356{ 356{
357 return __arm_ioremap_pfn(pfn, offset, size, mtype); 357 return (void __iomem *)phys_addr;
358} 358}
359 359
360void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size, 360void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
361 unsigned int mtype) 361
362void __iomem *ioremap(resource_size_t res_cookie, size_t size)
362{ 363{
363 return (void __iomem *)phys_addr; 364 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
365 __builtin_return_address(0));
364} 366}
365EXPORT_SYMBOL(__arm_ioremap); 367EXPORT_SYMBOL(ioremap);
366 368
367void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); 369void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
370{
371 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
372 __builtin_return_address(0));
373}
374EXPORT_SYMBOL(ioremap_cache);
368 375
369void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, 376void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
370 unsigned int mtype, void *caller) 377{
378 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
379 __builtin_return_address(0));
380}
381EXPORT_SYMBOL(ioremap_wc);
382
383void __iounmap(volatile void __iomem *addr)
371{ 384{
372 return __arm_ioremap(phys_addr, size, mtype);
373} 385}
386EXPORT_SYMBOL(__iounmap);
374 387
375void (*arch_iounmap)(volatile void __iomem *); 388void (*arch_iounmap)(volatile void __iomem *);
376 389
377void __arm_iounmap(volatile void __iomem *addr) 390void iounmap(volatile void __iomem *addr)
378{ 391{
379} 392}
380EXPORT_SYMBOL(__arm_iounmap); 393EXPORT_SYMBOL(iounmap);
diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
index 9005b07296c8..aedec81d1198 100644
--- a/arch/arm/vdso/vdsomunge.c
+++ b/arch/arm/vdso/vdsomunge.c
@@ -45,13 +45,11 @@
45 * it does. 45 * it does.
46 */ 46 */
47 47
48#define _GNU_SOURCE
49
50#include <byteswap.h> 48#include <byteswap.h>
51#include <elf.h> 49#include <elf.h>
52#include <errno.h> 50#include <errno.h>
53#include <error.h>
54#include <fcntl.h> 51#include <fcntl.h>
52#include <stdarg.h>
55#include <stdbool.h> 53#include <stdbool.h>
56#include <stdio.h> 54#include <stdio.h>
57#include <stdlib.h> 55#include <stdlib.h>
@@ -82,11 +80,25 @@
82#define EF_ARM_ABI_FLOAT_HARD 0x400 80#define EF_ARM_ABI_FLOAT_HARD 0x400
83#endif 81#endif
84 82
83static int failed;
84static const char *argv0;
85static const char *outfile; 85static const char *outfile;
86 86
87static void fail(const char *fmt, ...)
88{
89 va_list ap;
90
91 failed = 1;
92 fprintf(stderr, "%s: ", argv0);
93 va_start(ap, fmt);
94 vfprintf(stderr, fmt, ap);
95 va_end(ap);
96 exit(EXIT_FAILURE);
97}
98
87static void cleanup(void) 99static void cleanup(void)
88{ 100{
89 if (error_message_count > 0 && outfile != NULL) 101 if (failed && outfile != NULL)
90 unlink(outfile); 102 unlink(outfile);
91} 103}
92 104
@@ -119,68 +131,66 @@ int main(int argc, char **argv)
119 int infd; 131 int infd;
120 132
121 atexit(cleanup); 133 atexit(cleanup);
134 argv0 = argv[0];
122 135
123 if (argc != 3) 136 if (argc != 3)
124 error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]); 137 fail("Usage: %s [infile] [outfile]\n", argv[0]);
125 138
126 infile = argv[1]; 139 infile = argv[1];
127 outfile = argv[2]; 140 outfile = argv[2];
128 141
129 infd = open(infile, O_RDONLY); 142 infd = open(infile, O_RDONLY);
130 if (infd < 0) 143 if (infd < 0)
131 error(EXIT_FAILURE, errno, "Cannot open %s", infile); 144 fail("Cannot open %s: %s\n", infile, strerror(errno));
132 145
133 if (fstat(infd, &stat) != 0) 146 if (fstat(infd, &stat) != 0)
134 error(EXIT_FAILURE, errno, "Failed stat for %s", infile); 147 fail("Failed stat for %s: %s\n", infile, strerror(errno));
135 148
136 inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0); 149 inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
137 if (inbuf == MAP_FAILED) 150 if (inbuf == MAP_FAILED)
138 error(EXIT_FAILURE, errno, "Failed to map %s", infile); 151 fail("Failed to map %s: %s\n", infile, strerror(errno));
139 152
140 close(infd); 153 close(infd);
141 154
142 inhdr = inbuf; 155 inhdr = inbuf;
143 156
144 if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0) 157 if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
145 error(EXIT_FAILURE, 0, "Not an ELF file"); 158 fail("Not an ELF file\n");
146 159
147 if (inhdr->e_ident[EI_CLASS] != ELFCLASS32) 160 if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
148 error(EXIT_FAILURE, 0, "Unsupported ELF class"); 161 fail("Unsupported ELF class\n");
149 162
150 swap = inhdr->e_ident[EI_DATA] != HOST_ORDER; 163 swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
151 164
152 if (read_elf_half(inhdr->e_type, swap) != ET_DYN) 165 if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
153 error(EXIT_FAILURE, 0, "Not a shared object"); 166 fail("Not a shared object\n");
154 167
155 if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) { 168 if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
156 error(EXIT_FAILURE, 0, "Unsupported architecture %#x", 169 fail("Unsupported architecture %#x\n", inhdr->e_machine);
157 inhdr->e_machine);
158 }
159 170
160 e_flags = read_elf_word(inhdr->e_flags, swap); 171 e_flags = read_elf_word(inhdr->e_flags, swap);
161 172
162 if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) { 173 if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
163 error(EXIT_FAILURE, 0, "Unsupported EABI version %#x", 174 fail("Unsupported EABI version %#x\n",
164 EF_ARM_EABI_VERSION(e_flags)); 175 EF_ARM_EABI_VERSION(e_flags));
165 } 176 }
166 177
167 if (e_flags & EF_ARM_ABI_FLOAT_HARD) 178 if (e_flags & EF_ARM_ABI_FLOAT_HARD)
168 error(EXIT_FAILURE, 0, 179 fail("Unexpected hard-float flag set in e_flags\n");
169 "Unexpected hard-float flag set in e_flags");
170 180
171 clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT); 181 clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
172 182
173 outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR); 183 outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
174 if (outfd < 0) 184 if (outfd < 0)
175 error(EXIT_FAILURE, errno, "Cannot open %s", outfile); 185 fail("Cannot open %s: %s\n", outfile, strerror(errno));
176 186
177 if (ftruncate(outfd, stat.st_size) != 0) 187 if (ftruncate(outfd, stat.st_size) != 0)
178 error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile); 188 fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
179 189
180 outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, 190 outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
181 outfd, 0); 191 outfd, 0);
182 if (outbuf == MAP_FAILED) 192 if (outbuf == MAP_FAILED)
183 error(EXIT_FAILURE, errno, "Failed to map %s", outfile); 193 fail("Failed to map %s: %s\n", outfile, strerror(errno));
184 194
185 close(outfd); 195 close(outfd);
186 196
@@ -195,7 +205,7 @@ int main(int argc, char **argv)
195 } 205 }
196 206
197 if (msync(outbuf, stat.st_size, MS_SYNC) != 0) 207 if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
198 error(EXIT_FAILURE, errno, "Failed to sync %s", outfile); 208 fail("Failed to sync %s: %s\n", outfile, strerror(errno));
199 209
200 return EXIT_SUCCESS; 210 return EXIT_SUCCESS;
201} 211}