diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 12 |
1 files changed, 3 insertions, 9 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 714a7fd99ca3..a7a9e41fa2c2 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -16,13 +16,13 @@ | |||
16 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | #include <linux/sizes.h> | ||
19 | 20 | ||
20 | #include <asm/cp15.h> | 21 | #include <asm/cp15.h> |
21 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
22 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
23 | #include <asm/cachetype.h> | 24 | #include <asm/cachetype.h> |
24 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
25 | #include <asm/sizes.h> | ||
26 | #include <asm/smp_plat.h> | 26 | #include <asm/smp_plat.h> |
27 | #include <asm/tlb.h> | 27 | #include <asm/tlb.h> |
28 | #include <asm/highmem.h> | 28 | #include <asm/highmem.h> |
@@ -423,12 +423,6 @@ static void __init build_mem_type_table(void) | |||
423 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 423 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
424 | 424 | ||
425 | /* | 425 | /* |
426 | * Only use write-through for non-SMP systems | ||
427 | */ | ||
428 | if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | ||
429 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | ||
430 | |||
431 | /* | ||
432 | * Enable CPU-specific coherency if supported. | 426 | * Enable CPU-specific coherency if supported. |
433 | * (Only available on XSC3 at the moment.) | 427 | * (Only available on XSC3 at the moment.) |
434 | */ | 428 | */ |
@@ -800,7 +794,7 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, | |||
800 | vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); | 794 | vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); |
801 | vm->addr = (void *)addr; | 795 | vm->addr = (void *)addr; |
802 | vm->size = size; | 796 | vm->size = size; |
803 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; | 797 | vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; |
804 | vm->caller = caller; | 798 | vm->caller = caller; |
805 | vm_area_add_early(vm); | 799 | vm_area_add_early(vm); |
806 | } | 800 | } |
@@ -833,7 +827,7 @@ static void __init fill_pmd_gaps(void) | |||
833 | 827 | ||
834 | /* we're still single threaded hence no lock needed here */ | 828 | /* we're still single threaded hence no lock needed here */ |
835 | for (vm = vmlist; vm; vm = vm->next) { | 829 | for (vm = vmlist; vm; vm = vm->next) { |
836 | if (!(vm->flags & VM_ARM_STATIC_MAPPING)) | 830 | if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) |
837 | continue; | 831 | continue; |
838 | addr = (unsigned long)vm->addr; | 832 | addr = (unsigned long)vm->addr; |
839 | if (addr < next) | 833 | if (addr < next) |