aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 00:03:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 00:03:26 -0400
commitbb0fd7ab0986105765d11baa82e619c618a235aa (patch)
tree6a0585ece827e1025aa48819959d02155a871be9 /arch/arm/mm
parentbdfa54dfd9eea001274dbcd622657a904fe43b81 (diff)
parent4b2f8838479eb2abe042e094f7d2cced6d5ea772 (diff)
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Included in this update are both some long term fixes and some new features. Fixes: - An integer overflow in the calculation of ELF_ET_DYN_BASE. - Avoiding OOMs for high-order IOMMU allocations - SMP requires the data cache to be enabled for synchronisation primitives to work, so prevent the CPU_DCACHE_DISABLE option being visible on SMP builds. - A bug going back 10+ years in the noMMU ARM94* CPU support code, where it corrupts registers. Found by folk getting Linux running on their cameras. - Versatile Express needs an errata workaround enabled for CPU hot-unplug to work. Features: - Clean up module linker by handling out of range relocations separately from relocation cases we don't handle. - Fix a long term bug in the pci_mmap_page_range() code, which we hope won't impact userspace (we hope there's no users of the existing broken interface.) - Don't map DMA coherent allocations when we don't have a MMU. - Drop experimental status for SMP_ON_UP. - Warn when DT doesn't specify ePAPR mandatory cache properties. - Add documentation concerning how we find the start of physical memory for AUTO_ZRELADDR kernels, detailing why we have chosen the mask and the implications of changing it. - Updates from Ard Biesheuvel to address some issues with large kernels (such as allyesconfig) failing to link. - Allow hibernation to work on modern (ARMv7) CPUs - this appears to have never worked in the past on these CPUs. - Enable IRQ_SHOW_LEVEL, which changes the /proc/interrupts output format (hopefully without userspace breaking... let's hope that if it causes someone a problem, they tell us.) - Fix tegra-ahb DT offsets. - Rework ARM errata 643719 code (and ARMv7 flush_cache_louis()/ flush_dcache_all()) code to be more efficient, and enable this errata workaround by default for ARMv7+SMP CPUs. This complements the Versatile Express fix above. - Rework ARMv7 context code for errata 430973, so that only Cortex A8 CPUs are impacted by the branch target buffer flush when this errata is enabled. Also update the help text to indicate that all r1p* A8 CPUs are impacted. - Switch ARM to the generic show_mem() implementation, it conveys all the information which we were already reporting. - Prevent slow timer sources being used for udelay() - timers running at less than 1MHz are not useful for this, and can cause udelay() to return immediately, without any wait. Using such a slow timer is silly. - VDSO support for 32-bit ARM, mainly for gettimeofday() using the ARM architected timer. - Perf support for Scorpion performance monitoring units" vdso semantic conflict fixed up as per linux-next. * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (52 commits) ARM: update errata 430973 documentation to cover Cortex A8 r1p* ARM: ensure delay timer has sufficient accuracy for delays ARM: switch to use the generic show_mem() implementation ARM: proc-v7: avoid errata 430973 workaround for non-Cortex A8 CPUs ARM: enable ARM errata 643719 workaround by default ARM: cache-v7: optimise test for Cortex A9 r0pX devices ARM: cache-v7: optimise branches in v7_flush_cache_louis ARM: cache-v7: consolidate initialisation of cache level index ARM: cache-v7: shift CLIDR to extract appropriate field before masking ARM: cache-v7: use movw/movt instructions ARM: allow 16-bit instructions in ALT_UP() ARM: proc-arm94*.S: fix setup function ARM: vexpress: fix CPU hotplug with CT9x4 tile. ARM: 8276/1: Make CPU_DCACHE_DISABLE depend on !SMP ARM: 8335/1: Documentation: DT bindings: Tegra AHB: document the legacy base address ARM: 8334/1: amba: tegra-ahb: detect and correct bogus base address ARM: 8333/1: amba: tegra-ahb: fix register offsets in the macros ARM: 8339/1: Enable CONFIG_GENERIC_IRQ_SHOW_LEVEL ARM: 8338/1: kexec: Relax SMP validation to improve DT compatibility ARM: 8337/1: mm: Do not invoke OOM for higher order IOMMU DMA allocations ...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig16
-rw-r--r--arch/arm/mm/alignment.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c7
-rw-r--r--arch/arm/mm/cache-v7.S38
-rw-r--r--arch/arm/mm/dma-mapping.c116
-rw-r--r--arch/arm/mm/init.c49
-rw-r--r--arch/arm/mm/proc-arm1020.S4
-rw-r--r--arch/arm/mm/proc-arm1020e.S4
-rw-r--r--arch/arm/mm/proc-arm1022.S4
-rw-r--r--arch/arm/mm/proc-arm1026.S4
-rw-r--r--arch/arm/mm/proc-arm720.S4
-rw-r--r--arch/arm/mm/proc-arm740.S4
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S4
-rw-r--r--arch/arm/mm/proc-arm920.S4
-rw-r--r--arch/arm/mm/proc-arm922.S4
-rw-r--r--arch/arm/mm/proc-arm925.S4
-rw-r--r--arch/arm/mm/proc-arm926.S4
-rw-r--r--arch/arm/mm/proc-arm940.S30
-rw-r--r--arch/arm/mm/proc-arm946.S26
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S4
-rw-r--r--arch/arm/mm/proc-fa526.S4
-rw-r--r--arch/arm/mm/proc-feroceon.S5
-rw-r--r--arch/arm/mm/proc-macros.S28
-rw-r--r--arch/arm/mm/proc-mohawk.S4
-rw-r--r--arch/arm/mm/proc-sa110.S4
-rw-r--r--arch/arm/mm/proc-sa1100.S4
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7-2level.S12
-rw-r--r--arch/arm/mm/proc-v7.S56
-rw-r--r--arch/arm/mm/proc-v7m.S4
-rw-r--r--arch/arm/mm/proc-xsc3.S4
-rw-r--r--arch/arm/mm/proc-xscale.S4
32 files changed, 256 insertions, 213 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 9b4f29e595a4..b7644310236b 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -738,7 +738,7 @@ config CPU_ICACHE_DISABLE
738 738
739config CPU_DCACHE_DISABLE 739config CPU_DCACHE_DISABLE
740 bool "Disable D-Cache (C-bit)" 740 bool "Disable D-Cache (C-bit)"
741 depends on CPU_CP15 741 depends on CPU_CP15 && !SMP
742 help 742 help
743 Say Y here to disable the processor data cache. Unless 743 Say Y here to disable the processor data cache. Unless
744 you have a reason not to or are unsure, say N. 744 you have a reason not to or are unsure, say N.
@@ -825,6 +825,20 @@ config KUSER_HELPERS
825 Say N here only if you are absolutely certain that you do not 825 Say N here only if you are absolutely certain that you do not
826 need these helpers; otherwise, the safe option is to say Y. 826 need these helpers; otherwise, the safe option is to say Y.
827 827
828config VDSO
829 bool "Enable VDSO for acceleration of some system calls"
830 depends on AEABI && MMU
831 default y if ARM_ARCH_TIMER
832 select GENERIC_TIME_VSYSCALL
833 help
834 Place in the process address space an ELF shared object
835 providing fast implementations of gettimeofday and
836 clock_gettime. Systems that implement the ARM architected
837 timer will receive maximum benefit.
838
839 You must have glibc 2.22 or later for programs to seamlessly
840 take advantage of this.
841
828config DMA_CACHE_RWFO 842config DMA_CACHE_RWFO
829 bool "Enable read/write for ownership DMA cache maintenance" 843 bool "Enable read/write for ownership DMA cache maintenance"
830 depends on CPU_V6K && SMP 844 depends on CPU_V6K && SMP
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 2c0c541c60ca..9769f1eefe3b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -201,7 +201,7 @@ union offset_union {
201 THUMB( "1: "ins" %1, [%2]\n" ) \ 201 THUMB( "1: "ins" %1, [%2]\n" ) \
202 THUMB( " add %2, %2, #1\n" ) \ 202 THUMB( " add %2, %2, #1\n" ) \
203 "2:\n" \ 203 "2:\n" \
204 " .pushsection .fixup,\"ax\"\n" \ 204 " .pushsection .text.fixup,\"ax\"\n" \
205 " .align 2\n" \ 205 " .align 2\n" \
206 "3: mov %0, #1\n" \ 206 "3: mov %0, #1\n" \
207 " b 2b\n" \ 207 " b 2b\n" \
@@ -261,7 +261,7 @@ union offset_union {
261 " mov %1, %1, "NEXT_BYTE"\n" \ 261 " mov %1, %1, "NEXT_BYTE"\n" \
262 "2: "ins" %1, [%2]\n" \ 262 "2: "ins" %1, [%2]\n" \
263 "3:\n" \ 263 "3:\n" \
264 " .pushsection .fixup,\"ax\"\n" \ 264 " .pushsection .text.fixup,\"ax\"\n" \
265 " .align 2\n" \ 265 " .align 2\n" \
266 "4: mov %0, #1\n" \ 266 "4: mov %0, #1\n" \
267 " b 3b\n" \ 267 " b 3b\n" \
@@ -301,7 +301,7 @@ union offset_union {
301 " mov %1, %1, "NEXT_BYTE"\n" \ 301 " mov %1, %1, "NEXT_BYTE"\n" \
302 "4: "ins" %1, [%2]\n" \ 302 "4: "ins" %1, [%2]\n" \
303 "5:\n" \ 303 "5:\n" \
304 " .pushsection .fixup,\"ax\"\n" \ 304 " .pushsection .text.fixup,\"ax\"\n" \
305 " .align 2\n" \ 305 " .align 2\n" \
306 "6: mov %0, #1\n" \ 306 "6: mov %0, #1\n" \
307 " b 5b\n" \ 307 " b 5b\n" \
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 8f15f70622a6..e309c8f35af5 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1647,6 +1647,7 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1647 struct device_node *np; 1647 struct device_node *np;
1648 struct resource res; 1648 struct resource res;
1649 u32 cache_id, old_aux; 1649 u32 cache_id, old_aux;
1650 u32 cache_level = 2;
1650 1651
1651 np = of_find_matching_node(NULL, l2x0_ids); 1652 np = of_find_matching_node(NULL, l2x0_ids);
1652 if (!np) 1653 if (!np)
@@ -1679,6 +1680,12 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1679 if (!of_property_read_bool(np, "cache-unified")) 1680 if (!of_property_read_bool(np, "cache-unified"))
1680 pr_err("L2C: device tree omits to specify unified cache\n"); 1681 pr_err("L2C: device tree omits to specify unified cache\n");
1681 1682
1683 if (of_property_read_u32(np, "cache-level", &cache_level))
1684 pr_err("L2C: device tree omits to specify cache-level\n");
1685
1686 if (cache_level != 2)
1687 pr_err("L2C: device tree specifies invalid cache level\n");
1688
1682 /* Read back current (default) hardware configuration */ 1689 /* Read back current (default) hardware configuration */
1683 if (data->save) 1690 if (data->save)
1684 data->save(l2x0_base); 1691 data->save(l2x0_base);
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index b966656d2c2d..a134d8a13d00 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -36,10 +36,10 @@ ENTRY(v7_invalidate_l1)
36 mcr p15, 2, r0, c0, c0, 0 36 mcr p15, 2, r0, c0, c0, 0
37 mrc p15, 1, r0, c0, c0, 0 37 mrc p15, 1, r0, c0, c0, 0
38 38
39 ldr r1, =0x7fff 39 movw r1, #0x7fff
40 and r2, r1, r0, lsr #13 40 and r2, r1, r0, lsr #13
41 41
42 ldr r1, =0x3ff 42 movw r1, #0x3ff
43 43
44 and r3, r1, r0, lsr #3 @ NumWays - 1 44 and r3, r1, r0, lsr #3 @ NumWays - 1
45 add r2, r2, #1 @ NumSets 45 add r2, r2, #1 @ NumSets
@@ -90,21 +90,20 @@ ENDPROC(v7_flush_icache_all)
90ENTRY(v7_flush_dcache_louis) 90ENTRY(v7_flush_dcache_louis)
91 dmb @ ensure ordering with previous memory accesses 91 dmb @ ensure ordering with previous memory accesses
92 mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr 92 mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
93 ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr 93ALT_SMP(mov r3, r0, lsr #20) @ move LoUIS into position
94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr 94ALT_UP( mov r3, r0, lsr #26) @ move LoUU into position
95 ands r3, r3, #7 << 1 @ extract LoU*2 field from clidr
96 bne start_flush_levels @ LoU != 0, start flushing
95#ifdef CONFIG_ARM_ERRATA_643719 97#ifdef CONFIG_ARM_ERRATA_643719
96 ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register 98ALT_SMP(mrc p15, 0, r2, c0, c0, 0) @ read main ID register
97 ALT_UP(reteq lr) @ LoUU is zero, so nothing to do 99ALT_UP( ret lr) @ LoUU is zero, so nothing to do
98 ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? 100 movw r1, #:lower16:(0x410fc090 >> 4) @ ID of ARM Cortex A9 r0p?
99 biceq r2, r2, #0x0000000f @ clear minor revision number 101 movt r1, #:upper16:(0x410fc090 >> 4)
100 teqeq r2, r1 @ test for errata affected core and if so... 102 teq r1, r2, lsr #4 @ test for errata affected core and if so...
101 orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne') 103 moveq r3, #1 << 1 @ fix LoUIS value
104 beq start_flush_levels @ start flushing cache levels
102#endif 105#endif
103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 106 ret lr
104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
105 reteq lr @ return if level == 0
106 mov r10, #0 @ r10 (starting level) = 0
107 b flush_levels @ start flushing cache levels
108ENDPROC(v7_flush_dcache_louis) 107ENDPROC(v7_flush_dcache_louis)
109 108
110/* 109/*
@@ -119,9 +118,10 @@ ENDPROC(v7_flush_dcache_louis)
119ENTRY(v7_flush_dcache_all) 118ENTRY(v7_flush_dcache_all)
120 dmb @ ensure ordering with previous memory accesses 119 dmb @ ensure ordering with previous memory accesses
121 mrc p15, 1, r0, c0, c0, 1 @ read clidr 120 mrc p15, 1, r0, c0, c0, 1 @ read clidr
122 ands r3, r0, #0x7000000 @ extract loc from clidr 121 mov r3, r0, lsr #23 @ move LoC into position
123 mov r3, r3, lsr #23 @ left align loc bit field 122 ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
124 beq finished @ if loc is 0, then no need to clean 123 beq finished @ if loc is 0, then no need to clean
124start_flush_levels:
125 mov r10, #0 @ start clean at cache level 0 125 mov r10, #0 @ start clean at cache level 0
126flush_levels: 126flush_levels:
127 add r2, r10, r10, lsr #1 @ work out 3x current cache level 127 add r2, r10, r10, lsr #1 @ work out 3x current cache level
@@ -140,10 +140,10 @@ flush_levels:
140#endif 140#endif
141 and r2, r1, #7 @ extract the length of the cache lines 141 and r2, r1, #7 @ extract the length of the cache lines
142 add r2, r2, #4 @ add 4 (line length offset) 142 add r2, r2, #4 @ add 4 (line length offset)
143 ldr r4, =0x3ff 143 movw r4, #0x3ff
144 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 144 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
145 clz r5, r4 @ find bit position of way size increment 145 clz r5, r4 @ find bit position of way size increment
146 ldr r7, =0x7fff 146 movw r7, #0x7fff
147 ands r7, r7, r1, lsr #13 @ extract max number of the index size 147 ands r7, r7, r1, lsr #13 @ extract max number of the index size
148loop1: 148loop1:
149 mov r9, r7 @ create working copy of max index 149 mov r9, r7 @ create working copy of max index
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e315dfe3af1b..09c5fe3d30c2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -289,11 +289,11 @@ static void __dma_free_buffer(struct page *page, size_t size)
289 289
290static void *__alloc_from_contiguous(struct device *dev, size_t size, 290static void *__alloc_from_contiguous(struct device *dev, size_t size,
291 pgprot_t prot, struct page **ret_page, 291 pgprot_t prot, struct page **ret_page,
292 const void *caller); 292 const void *caller, bool want_vaddr);
293 293
294static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 294static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
295 pgprot_t prot, struct page **ret_page, 295 pgprot_t prot, struct page **ret_page,
296 const void *caller); 296 const void *caller, bool want_vaddr);
297 297
298static void * 298static void *
299__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 299__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
@@ -357,10 +357,10 @@ static int __init atomic_pool_init(void)
357 357
358 if (dev_get_cma_area(NULL)) 358 if (dev_get_cma_area(NULL))
359 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 359 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
360 &page, atomic_pool_init); 360 &page, atomic_pool_init, true);
361 else 361 else
362 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 362 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
363 &page, atomic_pool_init); 363 &page, atomic_pool_init, true);
364 if (ptr) { 364 if (ptr) {
365 int ret; 365 int ret;
366 366
@@ -467,13 +467,15 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
467 467
468static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 468static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
469 pgprot_t prot, struct page **ret_page, 469 pgprot_t prot, struct page **ret_page,
470 const void *caller) 470 const void *caller, bool want_vaddr)
471{ 471{
472 struct page *page; 472 struct page *page;
473 void *ptr; 473 void *ptr = NULL;
474 page = __dma_alloc_buffer(dev, size, gfp); 474 page = __dma_alloc_buffer(dev, size, gfp);
475 if (!page) 475 if (!page)
476 return NULL; 476 return NULL;
477 if (!want_vaddr)
478 goto out;
477 479
478 ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 480 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
479 if (!ptr) { 481 if (!ptr) {
@@ -481,6 +483,7 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
481 return NULL; 483 return NULL;
482 } 484 }
483 485
486 out:
484 *ret_page = page; 487 *ret_page = page;
485 return ptr; 488 return ptr;
486} 489}
@@ -523,12 +526,12 @@ static int __free_from_pool(void *start, size_t size)
523 526
524static void *__alloc_from_contiguous(struct device *dev, size_t size, 527static void *__alloc_from_contiguous(struct device *dev, size_t size,
525 pgprot_t prot, struct page **ret_page, 528 pgprot_t prot, struct page **ret_page,
526 const void *caller) 529 const void *caller, bool want_vaddr)
527{ 530{
528 unsigned long order = get_order(size); 531 unsigned long order = get_order(size);
529 size_t count = size >> PAGE_SHIFT; 532 size_t count = size >> PAGE_SHIFT;
530 struct page *page; 533 struct page *page;
531 void *ptr; 534 void *ptr = NULL;
532 535
533 page = dma_alloc_from_contiguous(dev, count, order); 536 page = dma_alloc_from_contiguous(dev, count, order);
534 if (!page) 537 if (!page)
@@ -536,6 +539,9 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
536 539
537 __dma_clear_buffer(page, size); 540 __dma_clear_buffer(page, size);
538 541
542 if (!want_vaddr)
543 goto out;
544
539 if (PageHighMem(page)) { 545 if (PageHighMem(page)) {
540 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 546 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
541 if (!ptr) { 547 if (!ptr) {
@@ -546,17 +552,21 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
546 __dma_remap(page, size, prot); 552 __dma_remap(page, size, prot);
547 ptr = page_address(page); 553 ptr = page_address(page);
548 } 554 }
555
556 out:
549 *ret_page = page; 557 *ret_page = page;
550 return ptr; 558 return ptr;
551} 559}
552 560
553static void __free_from_contiguous(struct device *dev, struct page *page, 561static void __free_from_contiguous(struct device *dev, struct page *page,
554 void *cpu_addr, size_t size) 562 void *cpu_addr, size_t size, bool want_vaddr)
555{ 563{
556 if (PageHighMem(page)) 564 if (want_vaddr) {
557 __dma_free_remap(cpu_addr, size); 565 if (PageHighMem(page))
558 else 566 __dma_free_remap(cpu_addr, size);
559 __dma_remap(page, size, PAGE_KERNEL); 567 else
568 __dma_remap(page, size, PAGE_KERNEL);
569 }
560 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 570 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
561} 571}
562 572
@@ -574,12 +584,12 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
574 584
575#define nommu() 1 585#define nommu() 1
576 586
577#define __get_dma_pgprot(attrs, prot) __pgprot(0) 587#define __get_dma_pgprot(attrs, prot) __pgprot(0)
578#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 588#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
579#define __alloc_from_pool(size, ret_page) NULL 589#define __alloc_from_pool(size, ret_page) NULL
580#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL 590#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
581#define __free_from_pool(cpu_addr, size) 0 591#define __free_from_pool(cpu_addr, size) 0
582#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) 592#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
583#define __dma_free_remap(cpu_addr, size) do { } while (0) 593#define __dma_free_remap(cpu_addr, size) do { } while (0)
584 594
585#endif /* CONFIG_MMU */ 595#endif /* CONFIG_MMU */
@@ -599,11 +609,13 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
599 609
600 610
601static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 611static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
602 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) 612 gfp_t gfp, pgprot_t prot, bool is_coherent,
613 struct dma_attrs *attrs, const void *caller)
603{ 614{
604 u64 mask = get_coherent_dma_mask(dev); 615 u64 mask = get_coherent_dma_mask(dev);
605 struct page *page = NULL; 616 struct page *page = NULL;
606 void *addr; 617 void *addr;
618 bool want_vaddr;
607 619
608#ifdef CONFIG_DMA_API_DEBUG 620#ifdef CONFIG_DMA_API_DEBUG
609 u64 limit = (mask + 1) & ~mask; 621 u64 limit = (mask + 1) & ~mask;
@@ -631,20 +643,21 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
631 643
632 *handle = DMA_ERROR_CODE; 644 *handle = DMA_ERROR_CODE;
633 size = PAGE_ALIGN(size); 645 size = PAGE_ALIGN(size);
646 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
634 647
635 if (is_coherent || nommu()) 648 if (is_coherent || nommu())
636 addr = __alloc_simple_buffer(dev, size, gfp, &page); 649 addr = __alloc_simple_buffer(dev, size, gfp, &page);
637 else if (!(gfp & __GFP_WAIT)) 650 else if (!(gfp & __GFP_WAIT))
638 addr = __alloc_from_pool(size, &page); 651 addr = __alloc_from_pool(size, &page);
639 else if (!dev_get_cma_area(dev)) 652 else if (!dev_get_cma_area(dev))
640 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 653 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
641 else 654 else
642 addr = __alloc_from_contiguous(dev, size, prot, &page, caller); 655 addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
643 656
644 if (addr) 657 if (page)
645 *handle = pfn_to_dma(dev, page_to_pfn(page)); 658 *handle = pfn_to_dma(dev, page_to_pfn(page));
646 659
647 return addr; 660 return want_vaddr ? addr : page;
648} 661}
649 662
650/* 663/*
@@ -661,7 +674,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
661 return memory; 674 return memory;
662 675
663 return __dma_alloc(dev, size, handle, gfp, prot, false, 676 return __dma_alloc(dev, size, handle, gfp, prot, false,
664 __builtin_return_address(0)); 677 attrs, __builtin_return_address(0));
665} 678}
666 679
667static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 680static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
@@ -674,7 +687,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
674 return memory; 687 return memory;
675 688
676 return __dma_alloc(dev, size, handle, gfp, prot, true, 689 return __dma_alloc(dev, size, handle, gfp, prot, true,
677 __builtin_return_address(0)); 690 attrs, __builtin_return_address(0));
678} 691}
679 692
680/* 693/*
@@ -715,6 +728,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
715 bool is_coherent) 728 bool is_coherent)
716{ 729{
717 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 730 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
731 bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
718 732
719 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 733 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
720 return; 734 return;
@@ -726,14 +740,15 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
726 } else if (__free_from_pool(cpu_addr, size)) { 740 } else if (__free_from_pool(cpu_addr, size)) {
727 return; 741 return;
728 } else if (!dev_get_cma_area(dev)) { 742 } else if (!dev_get_cma_area(dev)) {
729 __dma_free_remap(cpu_addr, size); 743 if (want_vaddr)
744 __dma_free_remap(cpu_addr, size);
730 __dma_free_buffer(page, size); 745 __dma_free_buffer(page, size);
731 } else { 746 } else {
732 /* 747 /*
733 * Non-atomic allocations cannot be freed with IRQs disabled 748 * Non-atomic allocations cannot be freed with IRQs disabled
734 */ 749 */
735 WARN_ON(irqs_disabled()); 750 WARN_ON(irqs_disabled());
736 __free_from_contiguous(dev, page, cpu_addr, size); 751 __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr);
737 } 752 }
738} 753}
739 754
@@ -1135,13 +1150,28 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1135 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1150 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1136 1151
1137 while (count) { 1152 while (count) {
1138 int j, order = __fls(count); 1153 int j, order;
1154
1155 for (order = __fls(count); order > 0; --order) {
1156 /*
1157 * We do not want OOM killer to be invoked as long
1158 * as we can fall back to single pages, so we force
1159 * __GFP_NORETRY for orders higher than zero.
1160 */
1161 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
1162 if (pages[i])
1163 break;
1164 }
1139 1165
1140 pages[i] = alloc_pages(gfp, order); 1166 if (!pages[i]) {
1141 while (!pages[i] && order) 1167 /*
1142 pages[i] = alloc_pages(gfp, --order); 1168 * Fall back to single page allocation.
1143 if (!pages[i]) 1169 * Might invoke OOM killer as last resort.
1144 goto error; 1170 */
1171 pages[i] = alloc_pages(gfp, 0);
1172 if (!pages[i])
1173 goto error;
1174 }
1145 1175
1146 if (order) { 1176 if (order) {
1147 split_page(pages[i], order); 1177 split_page(pages[i], order);
@@ -1206,7 +1236,7 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1206static dma_addr_t 1236static dma_addr_t
1207__iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 1237__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1208{ 1238{
1209 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1239 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1210 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1240 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1211 dma_addr_t dma_addr, iova; 1241 dma_addr_t dma_addr, iova;
1212 int i, ret = DMA_ERROR_CODE; 1242 int i, ret = DMA_ERROR_CODE;
@@ -1242,7 +1272,7 @@ fail:
1242 1272
1243static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 1273static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1244{ 1274{
1245 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1275 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1246 1276
1247 /* 1277 /*
1248 * add optional in-page offset from iova to size and align 1278 * add optional in-page offset from iova to size and align
@@ -1457,7 +1487,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1457 enum dma_data_direction dir, struct dma_attrs *attrs, 1487 enum dma_data_direction dir, struct dma_attrs *attrs,
1458 bool is_coherent) 1488 bool is_coherent)
1459{ 1489{
1460 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1490 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1461 dma_addr_t iova, iova_base; 1491 dma_addr_t iova, iova_base;
1462 int ret = 0; 1492 int ret = 0;
1463 unsigned int count; 1493 unsigned int count;
@@ -1678,7 +1708,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1678 unsigned long offset, size_t size, enum dma_data_direction dir, 1708 unsigned long offset, size_t size, enum dma_data_direction dir,
1679 struct dma_attrs *attrs) 1709 struct dma_attrs *attrs)
1680{ 1710{
1681 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1711 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1682 dma_addr_t dma_addr; 1712 dma_addr_t dma_addr;
1683 int ret, prot, len = PAGE_ALIGN(size + offset); 1713 int ret, prot, len = PAGE_ALIGN(size + offset);
1684 1714
@@ -1731,7 +1761,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1731 size_t size, enum dma_data_direction dir, 1761 size_t size, enum dma_data_direction dir,
1732 struct dma_attrs *attrs) 1762 struct dma_attrs *attrs)
1733{ 1763{
1734 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1764 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1735 dma_addr_t iova = handle & PAGE_MASK; 1765 dma_addr_t iova = handle & PAGE_MASK;
1736 int offset = handle & ~PAGE_MASK; 1766 int offset = handle & ~PAGE_MASK;
1737 int len = PAGE_ALIGN(size + offset); 1767 int len = PAGE_ALIGN(size + offset);
@@ -1756,7 +1786,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1756 size_t size, enum dma_data_direction dir, 1786 size_t size, enum dma_data_direction dir,
1757 struct dma_attrs *attrs) 1787 struct dma_attrs *attrs)
1758{ 1788{
1759 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1789 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1760 dma_addr_t iova = handle & PAGE_MASK; 1790 dma_addr_t iova = handle & PAGE_MASK;
1761 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1791 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1762 int offset = handle & ~PAGE_MASK; 1792 int offset = handle & ~PAGE_MASK;
@@ -1775,7 +1805,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1775static void arm_iommu_sync_single_for_cpu(struct device *dev, 1805static void arm_iommu_sync_single_for_cpu(struct device *dev,
1776 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1806 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1777{ 1807{
1778 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1808 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1779 dma_addr_t iova = handle & PAGE_MASK; 1809 dma_addr_t iova = handle & PAGE_MASK;
1780 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1810 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1781 unsigned int offset = handle & ~PAGE_MASK; 1811 unsigned int offset = handle & ~PAGE_MASK;
@@ -1789,7 +1819,7 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
1789static void arm_iommu_sync_single_for_device(struct device *dev, 1819static void arm_iommu_sync_single_for_device(struct device *dev,
1790 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1820 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1791{ 1821{
1792 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1822 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1793 dma_addr_t iova = handle & PAGE_MASK; 1823 dma_addr_t iova = handle & PAGE_MASK;
1794 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1824 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1795 unsigned int offset = handle & ~PAGE_MASK; 1825 unsigned int offset = handle & ~PAGE_MASK;
@@ -1950,7 +1980,7 @@ static int __arm_iommu_attach_device(struct device *dev,
1950 return err; 1980 return err;
1951 1981
1952 kref_get(&mapping->kref); 1982 kref_get(&mapping->kref);
1953 dev->archdata.mapping = mapping; 1983 to_dma_iommu_mapping(dev) = mapping;
1954 1984
1955 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 1985 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1956 return 0; 1986 return 0;
@@ -1995,7 +2025,7 @@ static void __arm_iommu_detach_device(struct device *dev)
1995 2025
1996 iommu_detach_device(mapping->domain, dev); 2026 iommu_detach_device(mapping->domain, dev);
1997 kref_put(&mapping->kref, release_iommu_mapping); 2027 kref_put(&mapping->kref, release_iommu_mapping);
1998 dev->archdata.mapping = NULL; 2028 to_dma_iommu_mapping(dev) = NULL;
1999 2029
2000 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2030 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2001} 2031}
@@ -2053,7 +2083,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2053 2083
2054static void arm_teardown_iommu_dma_ops(struct device *dev) 2084static void arm_teardown_iommu_dma_ops(struct device *dev)
2055{ 2085{
2056 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 2086 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
2057 2087
2058 if (!mapping) 2088 if (!mapping)
2059 return; 2089 return;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3d0e9aed4b40..be92fa0f2f35 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -86,55 +86,6 @@ static int __init parse_tag_initrd2(const struct tag *tag)
86 86
87__tagtable(ATAG_INITRD2, parse_tag_initrd2); 87__tagtable(ATAG_INITRD2, parse_tag_initrd2);
88 88
89/*
90 * This keeps memory configuration data used by a couple memory
91 * initialization functions, as well as show_mem() for the skipping
92 * of holes in the memory map. It is populated by arm_add_memory().
93 */
94void show_mem(unsigned int filter)
95{
96 int free = 0, total = 0, reserved = 0;
97 int shared = 0, cached = 0, slab = 0;
98 struct memblock_region *reg;
99
100 printk("Mem-info:\n");
101 show_free_areas(filter);
102
103 for_each_memblock (memory, reg) {
104 unsigned int pfn1, pfn2;
105 struct page *page, *end;
106
107 pfn1 = memblock_region_memory_base_pfn(reg);
108 pfn2 = memblock_region_memory_end_pfn(reg);
109
110 page = pfn_to_page(pfn1);
111 end = pfn_to_page(pfn2 - 1) + 1;
112
113 do {
114 total++;
115 if (PageReserved(page))
116 reserved++;
117 else if (PageSwapCache(page))
118 cached++;
119 else if (PageSlab(page))
120 slab++;
121 else if (!page_count(page))
122 free++;
123 else
124 shared += page_count(page) - 1;
125 pfn1++;
126 page = pfn_to_page(pfn1);
127 } while (pfn1 < pfn2);
128 }
129
130 printk("%d pages of RAM\n", total);
131 printk("%d free pages\n", free);
132 printk("%d reserved pages\n", reserved);
133 printk("%d slab pages\n", slab);
134 printk("%d pages shared\n", shared);
135 printk("%d pages swap cached\n", cached);
136}
137
138static void __init find_limits(unsigned long *min, unsigned long *max_low, 89static void __init find_limits(unsigned long *min, unsigned long *max_low,
139 unsigned long *max_high) 90 unsigned long *max_high)
140{ 91{
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 86ee5d47ce3c..aa0519eed698 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -507,7 +507,7 @@ cpu_arm1020_name:
507 507
508 .align 508 .align
509 509
510 .section ".proc.info.init", #alloc, #execinstr 510 .section ".proc.info.init", #alloc
511 511
512 .type __arm1020_proc_info,#object 512 .type __arm1020_proc_info,#object
513__arm1020_proc_info: 513__arm1020_proc_info:
@@ -519,7 +519,7 @@ __arm1020_proc_info:
519 .long PMD_TYPE_SECT | \ 519 .long PMD_TYPE_SECT | \
520 PMD_SECT_AP_WRITE | \ 520 PMD_SECT_AP_WRITE | \
521 PMD_SECT_AP_READ 521 PMD_SECT_AP_READ
522 b __arm1020_setup 522 initfn __arm1020_setup, __arm1020_proc_info
523 .long cpu_arch_name 523 .long cpu_arch_name
524 .long cpu_elf_name 524 .long cpu_elf_name
525 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 525 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index a6331d78601f..bff4c7f70fd6 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -465,7 +465,7 @@ arm1020e_crval:
465 465
466 .align 466 .align
467 467
468 .section ".proc.info.init", #alloc, #execinstr 468 .section ".proc.info.init", #alloc
469 469
470 .type __arm1020e_proc_info,#object 470 .type __arm1020e_proc_info,#object
471__arm1020e_proc_info: 471__arm1020e_proc_info:
@@ -479,7 +479,7 @@ __arm1020e_proc_info:
479 PMD_BIT4 | \ 479 PMD_BIT4 | \
480 PMD_SECT_AP_WRITE | \ 480 PMD_SECT_AP_WRITE | \
481 PMD_SECT_AP_READ 481 PMD_SECT_AP_READ
482 b __arm1020e_setup 482 initfn __arm1020e_setup, __arm1020e_proc_info
483 .long cpu_arch_name 483 .long cpu_arch_name
484 .long cpu_elf_name 484 .long cpu_elf_name
485 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP 485 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index a126b7a59928..dbb2413fe04d 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -448,7 +448,7 @@ arm1022_crval:
448 448
449 .align 449 .align
450 450
451 .section ".proc.info.init", #alloc, #execinstr 451 .section ".proc.info.init", #alloc
452 452
453 .type __arm1022_proc_info,#object 453 .type __arm1022_proc_info,#object
454__arm1022_proc_info: 454__arm1022_proc_info:
@@ -462,7 +462,7 @@ __arm1022_proc_info:
462 PMD_BIT4 | \ 462 PMD_BIT4 | \
463 PMD_SECT_AP_WRITE | \ 463 PMD_SECT_AP_WRITE | \
464 PMD_SECT_AP_READ 464 PMD_SECT_AP_READ
465 b __arm1022_setup 465 initfn __arm1022_setup, __arm1022_proc_info
466 .long cpu_arch_name 466 .long cpu_arch_name
467 .long cpu_elf_name 467 .long cpu_elf_name
468 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP 468 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index fc294067e977..0b37b2cef9d3 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -442,7 +442,7 @@ arm1026_crval:
442 string cpu_arm1026_name, "ARM1026EJ-S" 442 string cpu_arm1026_name, "ARM1026EJ-S"
443 .align 443 .align
444 444
445 .section ".proc.info.init", #alloc, #execinstr 445 .section ".proc.info.init", #alloc
446 446
447 .type __arm1026_proc_info,#object 447 .type __arm1026_proc_info,#object
448__arm1026_proc_info: 448__arm1026_proc_info:
@@ -456,7 +456,7 @@ __arm1026_proc_info:
456 PMD_BIT4 | \ 456 PMD_BIT4 | \
457 PMD_SECT_AP_WRITE | \ 457 PMD_SECT_AP_WRITE | \
458 PMD_SECT_AP_READ 458 PMD_SECT_AP_READ
459 b __arm1026_setup 459 initfn __arm1026_setup, __arm1026_proc_info
460 .long cpu_arch_name 460 .long cpu_arch_name
461 .long cpu_elf_name 461 .long cpu_elf_name
462 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 462 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 2baa66b3ac9b..3651cd70e418 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -186,7 +186,7 @@ arm720_crval:
186 * See <asm/procinfo.h> for a definition of this structure. 186 * See <asm/procinfo.h> for a definition of this structure.
187 */ 187 */
188 188
189 .section ".proc.info.init", #alloc, #execinstr 189 .section ".proc.info.init", #alloc
190 190
191.macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req 191.macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req
192 .type __\name\()_proc_info,#object 192 .type __\name\()_proc_info,#object
@@ -203,7 +203,7 @@ __\name\()_proc_info:
203 PMD_BIT4 | \ 203 PMD_BIT4 | \
204 PMD_SECT_AP_WRITE | \ 204 PMD_SECT_AP_WRITE | \
205 PMD_SECT_AP_READ 205 PMD_SECT_AP_READ
206 b \cpu_flush @ cpu_flush 206 initfn \cpu_flush, __\name\()_proc_info @ cpu_flush
207 .long cpu_arch_name @ arch_name 207 .long cpu_arch_name @ arch_name
208 .long cpu_elf_name @ elf_name 208 .long cpu_elf_name @ elf_name
209 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap 209 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index ac1ea6b3bce4..024fb7732407 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -132,14 +132,14 @@ __arm740_setup:
132 132
133 .align 133 .align
134 134
135 .section ".proc.info.init", #alloc, #execinstr 135 .section ".proc.info.init", #alloc
136 .type __arm740_proc_info,#object 136 .type __arm740_proc_info,#object
137__arm740_proc_info: 137__arm740_proc_info:
138 .long 0x41807400 138 .long 0x41807400
139 .long 0xfffffff0 139 .long 0xfffffff0
140 .long 0 140 .long 0
141 .long 0 141 .long 0
142 b __arm740_setup 142 initfn __arm740_setup, __arm740_proc_info
143 .long cpu_arch_name 143 .long cpu_arch_name
144 .long cpu_elf_name 144 .long cpu_elf_name
145 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT 145 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index bf6ba4bc30ff..25472d94426d 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -76,7 +76,7 @@ __arm7tdmi_setup:
76 76
77 .align 77 .align
78 78
79 .section ".proc.info.init", #alloc, #execinstr 79 .section ".proc.info.init", #alloc
80 80
81.macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ 81.macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
82 extra_hwcaps=0 82 extra_hwcaps=0
@@ -86,7 +86,7 @@ __\name\()_proc_info:
86 .long \cpu_mask 86 .long \cpu_mask
87 .long 0 87 .long 0
88 .long 0 88 .long 0
89 b __arm7tdmi_setup 89 initfn __arm7tdmi_setup, __\name\()_proc_info
90 .long cpu_arch_name 90 .long cpu_arch_name
91 .long cpu_elf_name 91 .long cpu_elf_name
92 .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps ) 92 .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps )
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 22bf8dde4f84..7a14bd4414c9 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -448,7 +448,7 @@ arm920_crval:
448 448
449 .align 449 .align
450 450
451 .section ".proc.info.init", #alloc, #execinstr 451 .section ".proc.info.init", #alloc
452 452
453 .type __arm920_proc_info,#object 453 .type __arm920_proc_info,#object
454__arm920_proc_info: 454__arm920_proc_info:
@@ -464,7 +464,7 @@ __arm920_proc_info:
464 PMD_BIT4 | \ 464 PMD_BIT4 | \
465 PMD_SECT_AP_WRITE | \ 465 PMD_SECT_AP_WRITE | \
466 PMD_SECT_AP_READ 466 PMD_SECT_AP_READ
467 b __arm920_setup 467 initfn __arm920_setup, __arm920_proc_info
468 .long cpu_arch_name 468 .long cpu_arch_name
469 .long cpu_elf_name 469 .long cpu_elf_name
470 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 470 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 0c6d5ac5a6d4..edccfcdcd551 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -426,7 +426,7 @@ arm922_crval:
426 426
427 .align 427 .align
428 428
429 .section ".proc.info.init", #alloc, #execinstr 429 .section ".proc.info.init", #alloc
430 430
431 .type __arm922_proc_info,#object 431 .type __arm922_proc_info,#object
432__arm922_proc_info: 432__arm922_proc_info:
@@ -442,7 +442,7 @@ __arm922_proc_info:
442 PMD_BIT4 | \ 442 PMD_BIT4 | \
443 PMD_SECT_AP_WRITE | \ 443 PMD_SECT_AP_WRITE | \
444 PMD_SECT_AP_READ 444 PMD_SECT_AP_READ
445 b __arm922_setup 445 initfn __arm922_setup, __arm922_proc_info
446 .long cpu_arch_name 446 .long cpu_arch_name
447 .long cpu_elf_name 447 .long cpu_elf_name
448 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 448 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index c32d073282ea..ede8c54ab4aa 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -494,7 +494,7 @@ arm925_crval:
494 494
495 .align 495 .align
496 496
497 .section ".proc.info.init", #alloc, #execinstr 497 .section ".proc.info.init", #alloc
498 498
499.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache 499.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
500 .type __\name\()_proc_info,#object 500 .type __\name\()_proc_info,#object
@@ -510,7 +510,7 @@ __\name\()_proc_info:
510 PMD_BIT4 | \ 510 PMD_BIT4 | \
511 PMD_SECT_AP_WRITE | \ 511 PMD_SECT_AP_WRITE | \
512 PMD_SECT_AP_READ 512 PMD_SECT_AP_READ
513 b __arm925_setup 513 initfn __arm925_setup, __\name\()_proc_info
514 .long cpu_arch_name 514 .long cpu_arch_name
515 .long cpu_elf_name 515 .long cpu_elf_name
516 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 516 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 252b2503038d..fb827c633693 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -474,7 +474,7 @@ arm926_crval:
474 474
475 .align 475 .align
476 476
477 .section ".proc.info.init", #alloc, #execinstr 477 .section ".proc.info.init", #alloc
478 478
479 .type __arm926_proc_info,#object 479 .type __arm926_proc_info,#object
480__arm926_proc_info: 480__arm926_proc_info:
@@ -490,7 +490,7 @@ __arm926_proc_info:
490 PMD_BIT4 | \ 490 PMD_BIT4 | \
491 PMD_SECT_AP_WRITE | \ 491 PMD_SECT_AP_WRITE | \
492 PMD_SECT_AP_READ 492 PMD_SECT_AP_READ
493 b __arm926_setup 493 initfn __arm926_setup, __arm926_proc_info
494 .long cpu_arch_name 494 .long cpu_arch_name
495 .long cpu_elf_name 495 .long cpu_elf_name
496 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 496 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index e5212d489377..ee5b66f847c4 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -297,26 +297,16 @@ __arm940_setup:
297 mcr p15, 0, r0, c6, c0, 1 297 mcr p15, 0, r0, c6, c0, 1
298 298
299 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM 299 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
300 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) 300 ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
301 mov r2, #10 @ 11 is the minimum (4KB) 301 pr_val r3, r0, r7, #1
3021: add r2, r2, #1 @ area size *= 2 302 mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM
303 mov r1, r1, lsr #1 303 mcr p15, 0, r3, c6, c1, 1
304 bne 1b @ count not zero r-shift
305 orr r0, r0, r2, lsl #1 @ the area register value
306 orr r0, r0, #1 @ set enable bit
307 mcr p15, 0, r0, c6, c1, 0 @ set area 1, RAM
308 mcr p15, 0, r0, c6, c1, 1
309 304
310 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH 305 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
311 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) 306 ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
312 mov r2, #10 @ 11 is the minimum (4KB) 307 pr_val r3, r0, r6, #1
3131: add r2, r2, #1 @ area size *= 2 308 mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH
314 mov r1, r1, lsr #1 309 mcr p15, 0, r3, c6, c2, 1
315 bne 1b @ count not zero r-shift
316 orr r0, r0, r2, lsl #1 @ the area register value
317 orr r0, r0, #1 @ set enable bit
318 mcr p15, 0, r0, c6, c2, 0 @ set area 2, ROM/FLASH
319 mcr p15, 0, r0, c6, c2, 1
320 310
321 mov r0, #0x06 311 mov r0, #0x06
322 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable 312 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
@@ -354,14 +344,14 @@ __arm940_setup:
354 344
355 .align 345 .align
356 346
357 .section ".proc.info.init", #alloc, #execinstr 347 .section ".proc.info.init", #alloc
358 348
359 .type __arm940_proc_info,#object 349 .type __arm940_proc_info,#object
360__arm940_proc_info: 350__arm940_proc_info:
361 .long 0x41009400 351 .long 0x41009400
362 .long 0xff00fff0 352 .long 0xff00fff0
363 .long 0 353 .long 0
364 b __arm940_setup 354 initfn __arm940_setup, __arm940_proc_info
365 .long cpu_arch_name 355 .long cpu_arch_name
366 .long cpu_elf_name 356 .long cpu_elf_name
367 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 357 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index b3dd9b2d0b8e..7361837edc31 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -343,24 +343,14 @@ __arm946_setup:
343 mcr p15, 0, r0, c6, c0, 0 @ set region 0, default 343 mcr p15, 0, r0, c6, c0, 0 @ set region 0, default
344 344
345 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM 345 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
346 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) 346 ldr r7, =CONFIG_DRAM_SIZE @ size of RAM (must be >= 4KB)
347 mov r2, #10 @ 11 is the minimum (4KB) 347 pr_val r3, r0, r7, #1
3481: add r2, r2, #1 @ area size *= 2 348 mcr p15, 0, r3, c6, c1, 0
349 mov r1, r1, lsr #1
350 bne 1b @ count not zero r-shift
351 orr r0, r0, r2, lsl #1 @ the region register value
352 orr r0, r0, #1 @ set enable bit
353 mcr p15, 0, r0, c6, c1, 0 @ set region 1, RAM
354 349
355 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH 350 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
356 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) 351 ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
357 mov r2, #10 @ 11 is the minimum (4KB) 352 pr_val r3, r0, r7, #1
3581: add r2, r2, #1 @ area size *= 2 353 mcr p15, 0, r3, c6, c2, 0
359 mov r1, r1, lsr #1
360 bne 1b @ count not zero r-shift
361 orr r0, r0, r2, lsl #1 @ the region register value
362 orr r0, r0, #1 @ set enable bit
363 mcr p15, 0, r0, c6, c2, 0 @ set region 2, ROM/FLASH
364 354
365 mov r0, #0x06 355 mov r0, #0x06
366 mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable 356 mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable
@@ -409,14 +399,14 @@ __arm946_setup:
409 399
410 .align 400 .align
411 401
412 .section ".proc.info.init", #alloc, #execinstr 402 .section ".proc.info.init", #alloc
413 .type __arm946_proc_info,#object 403 .type __arm946_proc_info,#object
414__arm946_proc_info: 404__arm946_proc_info:
415 .long 0x41009460 405 .long 0x41009460
416 .long 0xff00fff0 406 .long 0xff00fff0
417 .long 0 407 .long 0
418 .long 0 408 .long 0
419 b __arm946_setup 409 initfn __arm946_setup, __arm946_proc_info
420 .long cpu_arch_name 410 .long cpu_arch_name
421 .long cpu_elf_name 411 .long cpu_elf_name
422 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 412 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 8227322bbb8f..7fac8c612134 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -70,7 +70,7 @@ __arm9tdmi_setup:
70 70
71 .align 71 .align
72 72
73 .section ".proc.info.init", #alloc, #execinstr 73 .section ".proc.info.init", #alloc
74 74
75.macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req 75.macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
76 .type __\name\()_proc_info, #object 76 .type __\name\()_proc_info, #object
@@ -79,7 +79,7 @@ __\name\()_proc_info:
79 .long \cpu_mask 79 .long \cpu_mask
80 .long 0 80 .long 0
81 .long 0 81 .long 0
82 b __arm9tdmi_setup 82 initfn __arm9tdmi_setup, __\name\()_proc_info
83 .long cpu_arch_name 83 .long cpu_arch_name
84 .long cpu_elf_name 84 .long cpu_elf_name
85 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT 85 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index c494886892ba..4001b73af4ee 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -190,7 +190,7 @@ fa526_cr1_set:
190 190
191 .align 191 .align
192 192
193 .section ".proc.info.init", #alloc, #execinstr 193 .section ".proc.info.init", #alloc
194 194
195 .type __fa526_proc_info,#object 195 .type __fa526_proc_info,#object
196__fa526_proc_info: 196__fa526_proc_info:
@@ -206,7 +206,7 @@ __fa526_proc_info:
206 PMD_BIT4 | \ 206 PMD_BIT4 | \
207 PMD_SECT_AP_WRITE | \ 207 PMD_SECT_AP_WRITE | \
208 PMD_SECT_AP_READ 208 PMD_SECT_AP_READ
209 b __fa526_setup 209 initfn __fa526_setup, __fa526_proc_info
210 .long cpu_arch_name 210 .long cpu_arch_name
211 .long cpu_elf_name 211 .long cpu_elf_name
212 .long HWCAP_SWP | HWCAP_HALF 212 .long HWCAP_SWP | HWCAP_HALF
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 03a1b75f2e16..e494d6d6acbe 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -584,7 +584,7 @@ feroceon_crval:
584 584
585 .align 585 .align
586 586
587 .section ".proc.info.init", #alloc, #execinstr 587 .section ".proc.info.init", #alloc
588 588
589.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req 589.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
590 .type __\name\()_proc_info,#object 590 .type __\name\()_proc_info,#object
@@ -601,7 +601,8 @@ __\name\()_proc_info:
601 PMD_BIT4 | \ 601 PMD_BIT4 | \
602 PMD_SECT_AP_WRITE | \ 602 PMD_SECT_AP_WRITE | \
603 PMD_SECT_AP_READ 603 PMD_SECT_AP_READ
604 b __feroceon_setup 604 initfn __feroceon_setup, __\name\()_proc_info
605 .long __feroceon_setup
605 .long cpu_arch_name 606 .long cpu_arch_name
606 .long cpu_elf_name 607 .long cpu_elf_name
607 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 608 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 082b9f2f7e90..c671f345266a 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -331,3 +331,31 @@ ENTRY(\name\()_tlb_fns)
331 .globl \x 331 .globl \x
332 .equ \x, \y 332 .equ \x, \y
333.endm 333.endm
334
335.macro initfn, func, base
336 .long \func - \base
337.endm
338
339 /*
340 * Macro to calculate the log2 size for the protection region
341 * registers. This calculates rd = log2(size) - 1. tmp must
342 * not be the same register as rd.
343 */
344.macro pr_sz, rd, size, tmp
345 mov \tmp, \size, lsr #12
346 mov \rd, #11
3471: movs \tmp, \tmp, lsr #1
348 addne \rd, \rd, #1
349 bne 1b
350.endm
351
352 /*
353 * Macro to generate a protection region register value
354 * given a pre-masked address, size, and enable bit.
355 * Corrupts size.
356 */
357.macro pr_val, dest, addr, size, enable
358 pr_sz \dest, \size, \size @ calculate log2(size) - 1
359 orr \dest, \addr, \dest, lsl #1 @ mask in the region size
360 orr \dest, \dest, \enable
361.endm
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 53d393455f13..d65edf717bf7 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -427,7 +427,7 @@ mohawk_crval:
427 427
428 .align 428 .align
429 429
430 .section ".proc.info.init", #alloc, #execinstr 430 .section ".proc.info.init", #alloc
431 431
432 .type __88sv331x_proc_info,#object 432 .type __88sv331x_proc_info,#object
433__88sv331x_proc_info: 433__88sv331x_proc_info:
@@ -443,7 +443,7 @@ __88sv331x_proc_info:
443 PMD_BIT4 | \ 443 PMD_BIT4 | \
444 PMD_SECT_AP_WRITE | \ 444 PMD_SECT_AP_WRITE | \
445 PMD_SECT_AP_READ 445 PMD_SECT_AP_READ
446 b __mohawk_setup 446 initfn __mohawk_setup, __88sv331x_proc_info
447 .long cpu_arch_name 447 .long cpu_arch_name
448 .long cpu_elf_name 448 .long cpu_elf_name
449 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 449 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 8008a0461cf5..ee2ce496239f 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -199,7 +199,7 @@ sa110_crval:
199 199
200 .align 200 .align
201 201
202 .section ".proc.info.init", #alloc, #execinstr 202 .section ".proc.info.init", #alloc
203 203
204 .type __sa110_proc_info,#object 204 .type __sa110_proc_info,#object
205__sa110_proc_info: 205__sa110_proc_info:
@@ -213,7 +213,7 @@ __sa110_proc_info:
213 .long PMD_TYPE_SECT | \ 213 .long PMD_TYPE_SECT | \
214 PMD_SECT_AP_WRITE | \ 214 PMD_SECT_AP_WRITE | \
215 PMD_SECT_AP_READ 215 PMD_SECT_AP_READ
216 b __sa110_setup 216 initfn __sa110_setup, __sa110_proc_info
217 .long cpu_arch_name 217 .long cpu_arch_name
218 .long cpu_elf_name 218 .long cpu_elf_name
219 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT 219 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 89f97ac648a9..222d5836f666 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -242,7 +242,7 @@ sa1100_crval:
242 242
243 .align 243 .align
244 244
245 .section ".proc.info.init", #alloc, #execinstr 245 .section ".proc.info.init", #alloc
246 246
247.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req 247.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
248 .type __\name\()_proc_info,#object 248 .type __\name\()_proc_info,#object
@@ -257,7 +257,7 @@ __\name\()_proc_info:
257 .long PMD_TYPE_SECT | \ 257 .long PMD_TYPE_SECT | \
258 PMD_SECT_AP_WRITE | \ 258 PMD_SECT_AP_WRITE | \
259 PMD_SECT_AP_READ 259 PMD_SECT_AP_READ
260 b __sa1100_setup 260 initfn __sa1100_setup, __\name\()_proc_info
261 .long cpu_arch_name 261 .long cpu_arch_name
262 .long cpu_elf_name 262 .long cpu_elf_name
263 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT 263 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index d0390f4b3f18..06d890a2342b 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -264,7 +264,7 @@ v6_crval:
264 string cpu_elf_name, "v6" 264 string cpu_elf_name, "v6"
265 .align 265 .align
266 266
267 .section ".proc.info.init", #alloc, #execinstr 267 .section ".proc.info.init", #alloc
268 268
269 /* 269 /*
270 * Match any ARMv6 processor core. 270 * Match any ARMv6 processor core.
@@ -287,7 +287,7 @@ __v6_proc_info:
287 PMD_SECT_XN | \ 287 PMD_SECT_XN | \
288 PMD_SECT_AP_WRITE | \ 288 PMD_SECT_AP_WRITE | \
289 PMD_SECT_AP_READ 289 PMD_SECT_AP_READ
290 b __v6_setup 290 initfn __v6_setup, __v6_proc_info
291 .long cpu_arch_name 291 .long cpu_arch_name
292 .long cpu_elf_name 292 .long cpu_elf_name
293 /* See also feat_v6_fixup() for HWCAP_TLS */ 293 /* See also feat_v6_fixup() for HWCAP_TLS */
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index ed448d8a596b..10405b8d31af 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -37,15 +37,18 @@
37 * It is assumed that: 37 * It is assumed that:
38 * - we are not using split page tables 38 * - we are not using split page tables
39 */ 39 */
40ENTRY(cpu_v7_switch_mm) 40ENTRY(cpu_ca8_switch_mm)
41#ifdef CONFIG_MMU 41#ifdef CONFIG_MMU
42 mov r2, #0 42 mov r2, #0
43 mmid r1, r1 @ get mm->context.id
44 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
45 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
46#ifdef CONFIG_ARM_ERRATA_430973 43#ifdef CONFIG_ARM_ERRATA_430973
47 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 44 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
48#endif 45#endif
46#endif
47ENTRY(cpu_v7_switch_mm)
48#ifdef CONFIG_MMU
49 mmid r1, r1 @ get mm->context.id
50 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
51 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
49#ifdef CONFIG_PID_IN_CONTEXTIDR 52#ifdef CONFIG_PID_IN_CONTEXTIDR
50 mrc p15, 0, r2, c13, c0, 1 @ read current context ID 53 mrc p15, 0, r2, c13, c0, 1 @ read current context ID
51 lsr r2, r2, #8 @ extract the PID 54 lsr r2, r2, #8 @ extract the PID
@@ -61,6 +64,7 @@ ENTRY(cpu_v7_switch_mm)
61#endif 64#endif
62 bx lr 65 bx lr
63ENDPROC(cpu_v7_switch_mm) 66ENDPROC(cpu_v7_switch_mm)
67ENDPROC(cpu_ca8_switch_mm)
64 68
65/* 69/*
66 * cpu_v7_set_pte_ext(ptep, pte) 70 * cpu_v7_set_pte_ext(ptep, pte)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 8b4ee5e81c14..3d1054f11a8a 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -153,6 +153,21 @@ ENDPROC(cpu_v7_do_resume)
153#endif 153#endif
154 154
155/* 155/*
156 * Cortex-A8
157 */
158 globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
159 globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
160 globl_equ cpu_ca8_reset, cpu_v7_reset
161 globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
162 globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
163 globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
164 globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
165#ifdef CONFIG_ARM_CPU_SUSPEND
166 globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
167 globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
168#endif
169
170/*
156 * Cortex-A9 processor functions 171 * Cortex-A9 processor functions
157 */ 172 */
158 globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init 173 globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
@@ -451,7 +466,10 @@ __v7_setup_stack:
451 466
452 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 467 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
453 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 468 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
469#ifndef CONFIG_ARM_LPAE
470 define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
454 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 471 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
472#endif
455#ifdef CONFIG_CPU_PJ4B 473#ifdef CONFIG_CPU_PJ4B
456 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 474 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
457#endif 475#endif
@@ -462,19 +480,19 @@ __v7_setup_stack:
462 string cpu_elf_name, "v7" 480 string cpu_elf_name, "v7"
463 .align 481 .align
464 482
465 .section ".proc.info.init", #alloc, #execinstr 483 .section ".proc.info.init", #alloc
466 484
467 /* 485 /*
468 * Standard v7 proc info content 486 * Standard v7 proc info content
469 */ 487 */
470.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions 488.macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
471 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 489 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
472 PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) 490 PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
473 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 491 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
474 PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags) 492 PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags)
475 .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \ 493 .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \
476 PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags 494 PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags
477 W(b) \initfunc 495 initfn \initfunc, \name
478 .long cpu_arch_name 496 .long cpu_arch_name
479 .long cpu_elf_name 497 .long cpu_elf_name
480 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ 498 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
@@ -494,7 +512,7 @@ __v7_setup_stack:
494__v7_ca5mp_proc_info: 512__v7_ca5mp_proc_info:
495 .long 0x410fc050 513 .long 0x410fc050
496 .long 0xff0ffff0 514 .long 0xff0ffff0
497 __v7_proc __v7_ca5mp_setup 515 __v7_proc __v7_ca5mp_proc_info, __v7_ca5mp_setup
498 .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info 516 .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
499 517
500 /* 518 /*
@@ -504,9 +522,19 @@ __v7_ca5mp_proc_info:
504__v7_ca9mp_proc_info: 522__v7_ca9mp_proc_info:
505 .long 0x410fc090 523 .long 0x410fc090
506 .long 0xff0ffff0 524 .long 0xff0ffff0
507 __v7_proc __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions 525 __v7_proc __v7_ca9mp_proc_info, __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions
508 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 526 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
509 527
528 /*
529 * ARM Ltd. Cortex A8 processor.
530 */
531 .type __v7_ca8_proc_info, #object
532__v7_ca8_proc_info:
533 .long 0x410fc080
534 .long 0xff0ffff0
535 __v7_proc __v7_ca8_proc_info, __v7_setup, proc_fns = ca8_processor_functions
536 .size __v7_ca8_proc_info, . - __v7_ca8_proc_info
537
510#endif /* CONFIG_ARM_LPAE */ 538#endif /* CONFIG_ARM_LPAE */
511 539
512 /* 540 /*
@@ -517,7 +545,7 @@ __v7_ca9mp_proc_info:
517__v7_pj4b_proc_info: 545__v7_pj4b_proc_info:
518 .long 0x560f5800 546 .long 0x560f5800
519 .long 0xff0fff00 547 .long 0xff0fff00
520 __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions 548 __v7_proc __v7_pj4b_proc_info, __v7_pj4b_setup, proc_fns = pj4b_processor_functions
521 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info 549 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
522#endif 550#endif
523 551
@@ -528,7 +556,7 @@ __v7_pj4b_proc_info:
528__v7_cr7mp_proc_info: 556__v7_cr7mp_proc_info:
529 .long 0x410fc170 557 .long 0x410fc170
530 .long 0xff0ffff0 558 .long 0xff0ffff0
531 __v7_proc __v7_cr7mp_setup 559 __v7_proc __v7_cr7mp_proc_info, __v7_cr7mp_setup
532 .size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info 560 .size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info
533 561
534 /* 562 /*
@@ -538,7 +566,7 @@ __v7_cr7mp_proc_info:
538__v7_ca7mp_proc_info: 566__v7_ca7mp_proc_info:
539 .long 0x410fc070 567 .long 0x410fc070
540 .long 0xff0ffff0 568 .long 0xff0ffff0
541 __v7_proc __v7_ca7mp_setup 569 __v7_proc __v7_ca7mp_proc_info, __v7_ca7mp_setup
542 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info 570 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
543 571
544 /* 572 /*
@@ -548,7 +576,7 @@ __v7_ca7mp_proc_info:
548__v7_ca12mp_proc_info: 576__v7_ca12mp_proc_info:
549 .long 0x410fc0d0 577 .long 0x410fc0d0
550 .long 0xff0ffff0 578 .long 0xff0ffff0
551 __v7_proc __v7_ca12mp_setup 579 __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
552 .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info 580 .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
553 581
554 /* 582 /*
@@ -558,7 +586,7 @@ __v7_ca12mp_proc_info:
558__v7_ca15mp_proc_info: 586__v7_ca15mp_proc_info:
559 .long 0x410fc0f0 587 .long 0x410fc0f0
560 .long 0xff0ffff0 588 .long 0xff0ffff0
561 __v7_proc __v7_ca15mp_setup 589 __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
562 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 590 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
563 591
564 /* 592 /*
@@ -568,7 +596,7 @@ __v7_ca15mp_proc_info:
568__v7_b15mp_proc_info: 596__v7_b15mp_proc_info:
569 .long 0x420f00f0 597 .long 0x420f00f0
570 .long 0xff0ffff0 598 .long 0xff0ffff0
571 __v7_proc __v7_b15mp_setup 599 __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
572 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info 600 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
573 601
574 /* 602 /*
@@ -578,7 +606,7 @@ __v7_b15mp_proc_info:
578__v7_ca17mp_proc_info: 606__v7_ca17mp_proc_info:
579 .long 0x410fc0e0 607 .long 0x410fc0e0
580 .long 0xff0ffff0 608 .long 0xff0ffff0
581 __v7_proc __v7_ca17mp_setup 609 __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
582 .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info 610 .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
583 611
584 /* 612 /*
@@ -594,7 +622,7 @@ __krait_proc_info:
594 * do support them. They also don't indicate support for fused multiply 622 * do support them. They also don't indicate support for fused multiply
595 * instructions even though they actually do support them. 623 * instructions even though they actually do support them.
596 */ 624 */
597 __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4 625 __v7_proc __krait_proc_info, __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4
598 .size __krait_proc_info, . - __krait_proc_info 626 .size __krait_proc_info, . - __krait_proc_info
599 627
600 /* 628 /*
@@ -604,5 +632,5 @@ __krait_proc_info:
604__v7_proc_info: 632__v7_proc_info:
605 .long 0x000f0000 @ Required ID value 633 .long 0x000f0000 @ Required ID value
606 .long 0x000f0000 @ Mask for ID 634 .long 0x000f0000 @ Mask for ID
607 __v7_proc __v7_setup 635 __v7_proc __v7_proc_info, __v7_setup
608 .size __v7_proc_info, . - __v7_proc_info 636 .size __v7_proc_info, . - __v7_proc_info
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index d1e68b553d3b..e08e1f2bab76 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -135,7 +135,7 @@ __v7m_setup_stack_top:
135 string cpu_elf_name "v7m" 135 string cpu_elf_name "v7m"
136 string cpu_v7m_name "ARMv7-M" 136 string cpu_v7m_name "ARMv7-M"
137 137
138 .section ".proc.info.init", #alloc, #execinstr 138 .section ".proc.info.init", #alloc
139 139
140 /* 140 /*
141 * Match any ARMv7-M processor core. 141 * Match any ARMv7-M processor core.
@@ -146,7 +146,7 @@ __v7m_proc_info:
146 .long 0x000f0000 @ Mask for ID 146 .long 0x000f0000 @ Mask for ID
147 .long 0 @ proc_info_list.__cpu_mm_mmu_flags 147 .long 0 @ proc_info_list.__cpu_mm_mmu_flags
148 .long 0 @ proc_info_list.__cpu_io_mmu_flags 148 .long 0 @ proc_info_list.__cpu_io_mmu_flags
149 b __v7m_setup @ proc_info_list.__cpu_flush 149 initfn __v7m_setup, __v7m_proc_info @ proc_info_list.__cpu_flush
150 .long cpu_arch_name 150 .long cpu_arch_name
151 .long cpu_elf_name 151 .long cpu_elf_name
152 .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT 152 .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index f8acdfece036..293dcc2c441f 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -499,7 +499,7 @@ xsc3_crval:
499 499
500 .align 500 .align
501 501
502 .section ".proc.info.init", #alloc, #execinstr 502 .section ".proc.info.init", #alloc
503 503
504.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req 504.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req
505 .type __\name\()_proc_info,#object 505 .type __\name\()_proc_info,#object
@@ -514,7 +514,7 @@ __\name\()_proc_info:
514 .long PMD_TYPE_SECT | \ 514 .long PMD_TYPE_SECT | \
515 PMD_SECT_AP_WRITE | \ 515 PMD_SECT_AP_WRITE | \
516 PMD_SECT_AP_READ 516 PMD_SECT_AP_READ
517 b __xsc3_setup 517 initfn __xsc3_setup, __\name\()_proc_info
518 .long cpu_arch_name 518 .long cpu_arch_name
519 .long cpu_elf_name 519 .long cpu_elf_name
520 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 520 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index afa2b3c4df4a..b6bbfdb6dfdc 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -612,7 +612,7 @@ xscale_crval:
612 612
613 .align 613 .align
614 614
615 .section ".proc.info.init", #alloc, #execinstr 615 .section ".proc.info.init", #alloc
616 616
617.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache 617.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
618 .type __\name\()_proc_info,#object 618 .type __\name\()_proc_info,#object
@@ -627,7 +627,7 @@ __\name\()_proc_info:
627 .long PMD_TYPE_SECT | \ 627 .long PMD_TYPE_SECT | \
628 PMD_SECT_AP_WRITE | \ 628 PMD_SECT_AP_WRITE | \
629 PMD_SECT_AP_READ 629 PMD_SECT_AP_READ
630 b __xscale_setup 630 initfn __xscale_setup, __\name\()_proc_info
631 .long cpu_arch_name 631 .long cpu_arch_name
632 .long cpu_elf_name 632 .long cpu_elf_name
633 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 633 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP