aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2010-11-03 10:51:26 -0400
committerTakashi Iwai <tiwai@suse.de>2010-11-03 10:51:26 -0400
commit69dbdd819599e2f3b77c172e83af512845bca5ad (patch)
tree49939d8b80ec2115a801eae2aebc21f23867c876 /arch/arm/mm/mmu.c
parent87232dd49aeb6b7d1af291edca8bd129a82ef4b5 (diff)
parent75e3f3137cb570661c2ad3035a139dda671fbb63 (diff)
Merge branch 'fix/asoc' into for-linus
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c119
1 files changed, 56 insertions, 63 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e8ed9dc461fe..72ad3e1f56cf 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,7 +14,6 @@
14#include <linux/mman.h> 14#include <linux/mman.h>
15#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/memblock.h> 16#include <linux/memblock.h>
17#include <linux/sort.h>
18#include <linux/fs.h> 17#include <linux/fs.h>
19 18
20#include <asm/cputype.h> 19#include <asm/cputype.h>
@@ -265,17 +264,17 @@ static struct mem_type mem_types[] = {
265 .domain = DOMAIN_KERNEL, 264 .domain = DOMAIN_KERNEL,
266 }, 265 },
267 [MT_MEMORY_DTCM] = { 266 [MT_MEMORY_DTCM] = {
268 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | 267 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
269 L_PTE_DIRTY | L_PTE_WRITE, 268 L_PTE_WRITE,
270 .prot_l1 = PMD_TYPE_TABLE, 269 .prot_l1 = PMD_TYPE_TABLE,
271 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 270 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
272 .domain = DOMAIN_KERNEL, 271 .domain = DOMAIN_KERNEL,
273 }, 272 },
274 [MT_MEMORY_ITCM] = { 273 [MT_MEMORY_ITCM] = {
275 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 274 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
276 L_PTE_USER | L_PTE_EXEC, 275 L_PTE_WRITE | L_PTE_EXEC,
277 .prot_l1 = PMD_TYPE_TABLE, 276 .prot_l1 = PMD_TYPE_TABLE,
278 .domain = DOMAIN_IO, 277 .domain = DOMAIN_KERNEL,
279 }, 278 },
280}; 279};
281 280
@@ -310,9 +309,8 @@ static void __init build_mem_type_table(void)
310 cachepolicy = CPOLICY_WRITEBACK; 309 cachepolicy = CPOLICY_WRITEBACK;
311 ecc_mask = 0; 310 ecc_mask = 0;
312 } 311 }
313#ifdef CONFIG_SMP 312 if (is_smp())
314 cachepolicy = CPOLICY_WRITEALLOC; 313 cachepolicy = CPOLICY_WRITEALLOC;
315#endif
316 314
317 /* 315 /*
318 * Strip out features not present on earlier architectures. 316 * Strip out features not present on earlier architectures.
@@ -406,13 +404,11 @@ static void __init build_mem_type_table(void)
406 cp = &cache_policies[cachepolicy]; 404 cp = &cache_policies[cachepolicy];
407 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 405 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
408 406
409#ifndef CONFIG_SMP
410 /* 407 /*
411 * Only use write-through for non-SMP systems 408 * Only use write-through for non-SMP systems
412 */ 409 */
413 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) 410 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
414 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; 411 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
415#endif
416 412
417 /* 413 /*
418 * Enable CPU-specific coherency if supported. 414 * Enable CPU-specific coherency if supported.
@@ -436,22 +432,23 @@ static void __init build_mem_type_table(void)
436 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 432 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
437 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 433 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
438 434
439#ifdef CONFIG_SMP 435 if (is_smp()) {
440 /* 436 /*
441 * Mark memory with the "shared" attribute for SMP systems 437 * Mark memory with the "shared" attribute
442 */ 438 * for SMP systems
443 user_pgprot |= L_PTE_SHARED; 439 */
444 kern_pgprot |= L_PTE_SHARED; 440 user_pgprot |= L_PTE_SHARED;
445 vecs_pgprot |= L_PTE_SHARED; 441 kern_pgprot |= L_PTE_SHARED;
446 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 442 vecs_pgprot |= L_PTE_SHARED;
447 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 443 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
448 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 444 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
449 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 445 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
450 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 446 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
451 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 447 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
452 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 448 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
453 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 449 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
454#endif 450 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
451 }
455 } 452 }
456 453
457 /* 454 /*
@@ -747,13 +744,14 @@ static int __init early_vmalloc(char *arg)
747} 744}
748early_param("vmalloc", early_vmalloc); 745early_param("vmalloc", early_vmalloc);
749 746
750phys_addr_t lowmem_end_addr; 747static phys_addr_t lowmem_limit __initdata = 0;
751 748
752static void __init sanity_check_meminfo(void) 749static void __init sanity_check_meminfo(void)
753{ 750{
754 int i, j, highmem = 0; 751 int i, j, highmem = 0;
755 752
756 lowmem_end_addr = __pa(vmalloc_min - 1) + 1; 753 lowmem_limit = __pa(vmalloc_min - 1) + 1;
754 memblock_set_current_limit(lowmem_limit);
757 755
758 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 756 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
759 struct membank *bank = &meminfo.bank[j]; 757 struct membank *bank = &meminfo.bank[j];
@@ -829,8 +827,7 @@ static void __init sanity_check_meminfo(void)
829 * rather difficult. 827 * rather difficult.
830 */ 828 */
831 reason = "with VIPT aliasing cache"; 829 reason = "with VIPT aliasing cache";
832#ifdef CONFIG_SMP 830 } else if (is_smp() && tlb_ops_need_broadcast()) {
833 } else if (tlb_ops_need_broadcast()) {
834 /* 831 /*
835 * kmap_high needs to occasionally flush TLB entries, 832 * kmap_high needs to occasionally flush TLB entries,
836 * however, if the TLB entries need to be broadcast 833 * however, if the TLB entries need to be broadcast
@@ -840,7 +837,6 @@ static void __init sanity_check_meminfo(void)
840 * (must not be called with irqs off) 837 * (must not be called with irqs off)
841 */ 838 */
842 reason = "without hardware TLB ops broadcasting"; 839 reason = "without hardware TLB ops broadcasting";
843#endif
844 } 840 }
845 if (reason) { 841 if (reason) {
846 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", 842 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
@@ -856,6 +852,7 @@ static void __init sanity_check_meminfo(void)
856static inline void prepare_page_table(void) 852static inline void prepare_page_table(void)
857{ 853{
858 unsigned long addr; 854 unsigned long addr;
855 phys_addr_t end;
859 856
860 /* 857 /*
861 * Clear out all the mappings below the kernel image. 858 * Clear out all the mappings below the kernel image.
@@ -871,10 +868,17 @@ static inline void prepare_page_table(void)
871 pmd_clear(pmd_off_k(addr)); 868 pmd_clear(pmd_off_k(addr));
872 869
873 /* 870 /*
871 * Find the end of the first block of lowmem.
872 */
873 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
874 if (end >= lowmem_limit)
875 end = lowmem_limit;
876
877 /*
874 * Clear out all the kernel space mappings, except for the first 878 * Clear out all the kernel space mappings, except for the first
875 * memory bank, up to the end of the vmalloc region. 879 * memory bank, up to the end of the vmalloc region.
876 */ 880 */
877 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); 881 for (addr = __phys_to_virt(end);
878 addr < VMALLOC_END; addr += PGDIR_SIZE) 882 addr < VMALLOC_END; addr += PGDIR_SIZE)
879 pmd_clear(pmd_off_k(addr)); 883 pmd_clear(pmd_off_k(addr));
880} 884}
@@ -991,37 +995,28 @@ static void __init kmap_init(void)
991#endif 995#endif
992} 996}
993 997
994static inline void map_memory_bank(struct membank *bank)
995{
996 struct map_desc map;
997
998 map.pfn = bank_pfn_start(bank);
999 map.virtual = __phys_to_virt(bank_phys_start(bank));
1000 map.length = bank_phys_size(bank);
1001 map.type = MT_MEMORY;
1002
1003 create_mapping(&map);
1004}
1005
1006static void __init map_lowmem(void) 998static void __init map_lowmem(void)
1007{ 999{
1008 struct meminfo *mi = &meminfo; 1000 struct memblock_region *reg;
1009 int i;
1010 1001
1011 /* Map all the lowmem memory banks. */ 1002 /* Map all the lowmem memory banks. */
1012 for (i = 0; i < mi->nr_banks; i++) { 1003 for_each_memblock(memory, reg) {
1013 struct membank *bank = &mi->bank[i]; 1004 phys_addr_t start = reg->base;
1005 phys_addr_t end = start + reg->size;
1006 struct map_desc map;
1007
1008 if (end > lowmem_limit)
1009 end = lowmem_limit;
1010 if (start >= end)
1011 break;
1014 1012
1015 if (!bank->highmem) 1013 map.pfn = __phys_to_pfn(start);
1016 map_memory_bank(bank); 1014 map.virtual = __phys_to_virt(start);
1017 } 1015 map.length = end - start;
1018} 1016 map.type = MT_MEMORY;
1019 1017
1020static int __init meminfo_cmp(const void *_a, const void *_b) 1018 create_mapping(&map);
1021{ 1019 }
1022 const struct membank *a = _a, *b = _b;
1023 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
1024 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
1025} 1020}
1026 1021
1027/* 1022/*
@@ -1032,8 +1027,6 @@ void __init paging_init(struct machine_desc *mdesc)
1032{ 1027{
1033 void *zero_page; 1028 void *zero_page;
1034 1029
1035 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
1036
1037 build_mem_type_table(); 1030 build_mem_type_table();
1038 sanity_check_meminfo(); 1031 sanity_check_meminfo();
1039 prepare_page_table(); 1032 prepare_page_table();