aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 17:32:52 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-01-06 17:32:52 -0500
commit4073723acb9cdcdbe4df9c0e0c376c65d1697e43 (patch)
treef41c17eac157b1223ce104845cf9b1e5a9e6a83d /arch/arm/mm
parent58daf18cdcab550262a5f4681e1f1e073e21965a (diff)
parent4ec3eb13634529c0bc7466658d84d0bbe3244aea (diff)
Merge branch 'misc' into devel
Conflicts: arch/arm/Kconfig arch/arm/common/Makefile arch/arm/kernel/Makefile arch/arm/kernel/smp.c
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig35
-rw-r--r--arch/arm/mm/dma-mapping.c28
-rw-r--r--arch/arm/mm/ioremap.c8
-rw-r--r--arch/arm/mm/mmu.c6
-rw-r--r--arch/arm/mm/proc-macros.S7
-rw-r--r--arch/arm/mm/proc-v7.S9
6 files changed, 77 insertions, 16 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 4414a01e1e8a..8493ed04797a 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -599,6 +599,14 @@ config CPU_CP15_MPU
599 help 599 help
600 Processor has the CP15 register, which has MPU related registers. 600 Processor has the CP15 register, which has MPU related registers.
601 601
602config CPU_USE_DOMAINS
603 bool
604 depends on MMU
605 default y if !CPU_32v6K
606 help
607 This option enables or disables the use of domain switching
608 via the set_fs() function.
609
602# 610#
603# CPU supports 36-bit I/O 611# CPU supports 36-bit I/O
604# 612#
@@ -628,6 +636,33 @@ config ARM_THUMBEE
628 Say Y here if you have a CPU with the ThumbEE extension and code to 636 Say Y here if you have a CPU with the ThumbEE extension and code to
629 make use of it. Say N for code that can run on CPUs without ThumbEE. 637 make use of it. Say N for code that can run on CPUs without ThumbEE.
630 638
639config SWP_EMULATE
640 bool "Emulate SWP/SWPB instructions"
641 depends on CPU_V7
642 select HAVE_PROC_CPU if PROC_FS
643 default y if SMP
644 help
645 ARMv6 architecture deprecates use of the SWP/SWPB instructions.
646 ARMv7 multiprocessing extensions introduce the ability to disable
647 these instructions, triggering an undefined instruction exception
648 when executed. Say Y here to enable software emulation of these
649 instructions for userspace (not kernel) using LDREX/STREX.
650 Also creates /proc/cpu/swp_emulation for statistics.
651
652 In some older versions of glibc [<=2.8] SWP is used during futex
653 trylock() operations with the assumption that the code will not
654 be preempted. This invalid assumption may be more likely to fail
655 with SWP emulation enabled, leading to deadlock of the user
656 application.
657
658 NOTE: when accessing uncached shared regions, LDREX/STREX rely
659 on an external transaction monitoring block called a global
660 monitor to maintain update atomicity. If your system does not
661 implement a global monitor, this option can cause programs that
662 perform SWP operations to uncached memory to deadlock.
663
664 If unsure, say Y.
665
631config CPU_BIG_ENDIAN 666config CPU_BIG_ENDIAN
632 bool "Build big-endian kernel" 667 bool "Build big-endian kernel"
633 depends on ARCH_SUPPORTS_BIG_ENDIAN 668 depends on ARCH_SUPPORTS_BIG_ENDIAN
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 809f1bf9fa29..6b48e0a3d7aa 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -312,7 +312,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
312 addr = page_address(page); 312 addr = page_address(page);
313 313
314 if (addr) 314 if (addr)
315 *handle = page_to_dma(dev, page); 315 *handle = pfn_to_dma(dev, page_to_pfn(page));
316 316
317 return addr; 317 return addr;
318} 318}
@@ -407,7 +407,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
407 if (!arch_is_coherent()) 407 if (!arch_is_coherent())
408 __dma_free_remap(cpu_addr, size); 408 __dma_free_remap(cpu_addr, size);
409 409
410 __dma_free_buffer(dma_to_page(dev, handle), size); 410 __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
411} 411}
412EXPORT_SYMBOL(dma_free_coherent); 412EXPORT_SYMBOL(dma_free_coherent);
413 413
@@ -555,17 +555,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
555 struct scatterlist *s; 555 struct scatterlist *s;
556 int i, j; 556 int i, j;
557 557
558 BUG_ON(!valid_dma_direction(dir));
559
558 for_each_sg(sg, s, nents, i) { 560 for_each_sg(sg, s, nents, i) {
559 s->dma_address = dma_map_page(dev, sg_page(s), s->offset, 561 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
560 s->length, dir); 562 s->length, dir);
561 if (dma_mapping_error(dev, s->dma_address)) 563 if (dma_mapping_error(dev, s->dma_address))
562 goto bad_mapping; 564 goto bad_mapping;
563 } 565 }
566 debug_dma_map_sg(dev, sg, nents, nents, dir);
564 return nents; 567 return nents;
565 568
566 bad_mapping: 569 bad_mapping:
567 for_each_sg(sg, s, i, j) 570 for_each_sg(sg, s, i, j)
568 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 571 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
569 return 0; 572 return 0;
570} 573}
571EXPORT_SYMBOL(dma_map_sg); 574EXPORT_SYMBOL(dma_map_sg);
@@ -586,8 +589,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
586 struct scatterlist *s; 589 struct scatterlist *s;
587 int i; 590 int i;
588 591
592 debug_dma_unmap_sg(dev, sg, nents, dir);
593
589 for_each_sg(sg, s, nents, i) 594 for_each_sg(sg, s, nents, i)
590 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 595 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
591} 596}
592EXPORT_SYMBOL(dma_unmap_sg); 597EXPORT_SYMBOL(dma_unmap_sg);
593 598
@@ -612,6 +617,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
612 __dma_page_dev_to_cpu(sg_page(s), s->offset, 617 __dma_page_dev_to_cpu(sg_page(s), s->offset,
613 s->length, dir); 618 s->length, dir);
614 } 619 }
620
621 debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
615} 622}
616EXPORT_SYMBOL(dma_sync_sg_for_cpu); 623EXPORT_SYMBOL(dma_sync_sg_for_cpu);
617 624
@@ -636,5 +643,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
636 __dma_page_cpu_to_dev(sg_page(s), s->offset, 643 __dma_page_cpu_to_dev(sg_page(s), s->offset,
637 s->length, dir); 644 s->length, dir);
638 } 645 }
646
647 debug_dma_sync_sg_for_device(dev, sg, nents, dir);
639} 648}
640EXPORT_SYMBOL(dma_sync_sg_for_device); 649EXPORT_SYMBOL(dma_sync_sg_for_device);
650
651#define PREALLOC_DMA_DEBUG_ENTRIES 4096
652
653static int __init dma_debug_do_init(void)
654{
655 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
656 return 0;
657}
658fs_initcall(dma_debug_do_init);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 55c17a6fb22f..ab506272b2d3 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -204,12 +204,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
204 /* 204 /*
205 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 205 * Don't allow RAM to be mapped - this causes problems with ARMv6+
206 */ 206 */
207 if (pfn_valid(pfn)) { 207 if (WARN_ON(pfn_valid(pfn)))
208 printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" 208 return NULL;
209 "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
210 "will fail in the next kernel release. Please fix your driver.\n");
211 WARN_ON(1);
212 }
213 209
214 type = get_mem_type(mtype); 210 type = get_mem_type(mtype);
215 if (!type) 211 if (!type)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 72ad3e1f56cf..79c01f540cbe 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -24,6 +24,7 @@
24#include <asm/smp_plat.h> 24#include <asm/smp_plat.h>
25#include <asm/tlb.h> 25#include <asm/tlb.h>
26#include <asm/highmem.h> 26#include <asm/highmem.h>
27#include <asm/traps.h>
27 28
28#include <asm/mach/arch.h> 29#include <asm/mach/arch.h>
29#include <asm/mach/map.h> 30#include <asm/mach/map.h>
@@ -914,12 +915,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
914{ 915{
915 struct map_desc map; 916 struct map_desc map;
916 unsigned long addr; 917 unsigned long addr;
917 void *vectors;
918 918
919 /* 919 /*
920 * Allocate the vector page early. 920 * Allocate the vector page early.
921 */ 921 */
922 vectors = early_alloc(PAGE_SIZE); 922 vectors_page = early_alloc(PAGE_SIZE);
923 923
924 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 924 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
925 pmd_clear(pmd_off_k(addr)); 925 pmd_clear(pmd_off_k(addr));
@@ -959,7 +959,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
959 * location (0xffff0000). If we aren't using high-vectors, also 959 * location (0xffff0000). If we aren't using high-vectors, also
960 * create a mapping at the low-vectors virtual address. 960 * create a mapping at the low-vectors virtual address.
961 */ 961 */
962 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 962 map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
963 map.virtual = 0xffff0000; 963 map.virtual = 0xffff0000;
964 map.length = PAGE_SIZE; 964 map.length = PAGE_SIZE;
965 map.type = MT_HIGH_VECTORS; 965 map.type = MT_HIGH_VECTORS;
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index b795afd0a2c6..f8f777df8d72 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -109,6 +109,10 @@
109 * 110x 0 1 0 r/w r/o 109 * 110x 0 1 0 r/w r/o
110 * 11x0 0 1 0 r/w r/o 110 * 11x0 0 1 0 r/w r/o
111 * 1111 0 1 1 r/w r/w 111 * 1111 0 1 1 r/w r/w
112 *
113 * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
114 * 110x 1 1 1 r/o r/o
115 * 11x0 1 1 1 r/o r/o
112 */ 116 */
113 .macro armv6_mt_table pfx 117 .macro armv6_mt_table pfx
114\pfx\()_mt_table: 118\pfx\()_mt_table:
@@ -148,8 +152,11 @@
148 152
149 tst r1, #L_PTE_USER 153 tst r1, #L_PTE_USER
150 orrne r3, r3, #PTE_EXT_AP1 154 orrne r3, r3, #PTE_EXT_AP1
155#ifdef CONFIG_CPU_USE_DOMAINS
156 @ allow kernel read/write access to read-only user pages
151 tstne r3, #PTE_EXT_APX 157 tstne r3, #PTE_EXT_APX
152 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 158 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
159#endif
153 160
154 tst r1, #L_PTE_EXEC 161 tst r1, #L_PTE_EXEC
155 orreq r3, r3, #PTE_EXT_XN 162 orreq r3, r3, #PTE_EXT_XN
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 9b9ff5d949fd..7401f4d7e676 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -148,8 +148,11 @@ ENTRY(cpu_v7_set_pte_ext)
148 148
149 tst r1, #L_PTE_USER 149 tst r1, #L_PTE_USER
150 orrne r3, r3, #PTE_EXT_AP1 150 orrne r3, r3, #PTE_EXT_AP1
151#ifdef CONFIG_CPU_USE_DOMAINS
152 @ allow kernel read/write access to read-only user pages
151 tstne r3, #PTE_EXT_APX 153 tstne r3, #PTE_EXT_APX
152 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 154 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
155#endif
153 156
154 tst r1, #L_PTE_EXEC 157 tst r1, #L_PTE_EXEC
155 orreq r3, r3, #PTE_EXT_XN 158 orreq r3, r3, #PTE_EXT_XN
@@ -273,8 +276,6 @@ __v7_setup:
273 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 276 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
274 ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 277 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
275 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 278 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
276 mov r10, #0x1f @ domains 0, 1 = manager
277 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
278 /* 279 /*
279 * Memory region attributes with SCTLR.TRE=1 280 * Memory region attributes with SCTLR.TRE=1
280 * 281 *
@@ -313,6 +314,10 @@ __v7_setup:
313#ifdef CONFIG_CPU_ENDIAN_BE8 314#ifdef CONFIG_CPU_ENDIAN_BE8
314 orr r6, r6, #1 << 25 @ big-endian page tables 315 orr r6, r6, #1 << 25 @ big-endian page tables
315#endif 316#endif
317#ifdef CONFIG_SWP_EMULATE
318 orr r5, r5, #(1 << 10) @ set SW bit in "clear"
319 bic r6, r6, #(1 << 10) @ clear it in "mmuset"
320#endif
316 mrc p15, 0, r0, c1, c0, 0 @ read control register 321 mrc p15, 0, r0, c1, c0, 0 @ read control register
317 bic r0, r0, r5 @ clear bits them 322 bic r0, r0, r5 @ clear bits them
318 orr r0, r0, r6 @ set them 323 orr r0, r0, r6 @ set them