aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/init.c40
-rw-r--r--arch/arm/mm/ioremap.c82
-rw-r--r--arch/arm/mm/mm.h14
-rw-r--r--arch/arm/mm/mmu.c51
-rw-r--r--arch/arm/mm/nommu.c2
5 files changed, 113 insertions, 76 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index fbdd12ea3a5..786adddf1a8 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -20,7 +20,6 @@
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/gfp.h> 21#include <linux/gfp.h>
22#include <linux/memblock.h> 22#include <linux/memblock.h>
23#include <linux/sort.h>
24 23
25#include <asm/mach-types.h> 24#include <asm/mach-types.h>
26#include <asm/prom.h> 25#include <asm/prom.h>
@@ -134,30 +133,18 @@ void show_mem(unsigned int filter)
134} 133}
135 134
136static void __init find_limits(unsigned long *min, unsigned long *max_low, 135static void __init find_limits(unsigned long *min, unsigned long *max_low,
137 unsigned long *max_high) 136 unsigned long *max_high)
138{ 137{
139 struct meminfo *mi = &meminfo; 138 struct meminfo *mi = &meminfo;
140 int i; 139 int i;
141 140
142 *min = -1UL; 141 /* This assumes the meminfo array is properly sorted */
143 *max_low = *max_high = 0; 142 *min = bank_pfn_start(&mi->bank[0]);
144 143 for_each_bank (i, mi)
145 for_each_bank (i, mi) { 144 if (mi->bank[i].highmem)
146 struct membank *bank = &mi->bank[i]; 145 break;
147 unsigned long start, end; 146 *max_low = bank_pfn_end(&mi->bank[i - 1]);
148 147 *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
149 start = bank_pfn_start(bank);
150 end = bank_pfn_end(bank);
151
152 if (*min > start)
153 *min = start;
154 if (*max_high < end)
155 *max_high = end;
156 if (bank->highmem)
157 continue;
158 if (*max_low < end)
159 *max_low = end;
160 }
161} 148}
162 149
163static void __init arm_bootmem_init(unsigned long start_pfn, 150static void __init arm_bootmem_init(unsigned long start_pfn,
@@ -319,19 +306,10 @@ static void arm_memory_present(void)
319} 306}
320#endif 307#endif
321 308
322static int __init meminfo_cmp(const void *_a, const void *_b)
323{
324 const struct membank *a = _a, *b = _b;
325 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
326 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
327}
328
329void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) 309void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
330{ 310{
331 int i; 311 int i;
332 312
333 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
334
335 memblock_init(); 313 memblock_init();
336 for (i = 0; i < mi->nr_banks; i++) 314 for (i = 0; i < mi->nr_banks; i++)
337 memblock_add(mi->bank[i].start, mi->bank[i].size); 315 memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -403,8 +381,6 @@ void __init bootmem_init(void)
403 */ 381 */
404 arm_bootmem_free(min, max_low, max_high); 382 arm_bootmem_free(min, max_low, max_high);
405 383
406 high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
407
408 /* 384 /*
409 * This doesn't seem to be used by the Linux memory manager any 385 * This doesn't seem to be used by the Linux memory manager any
410 * more, but is used by ll_rw_block. If we can get rid of it, we 386 * more, but is used by ll_rw_block. If we can get rid of it, we
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index bdb248c4f55..12c7ad215ce 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -36,12 +36,6 @@
36#include <asm/mach/map.h> 36#include <asm/mach/map.h>
37#include "mm.h" 37#include "mm.h"
38 38
39/*
40 * Used by ioremap() and iounmap() code to mark (super)section-mapped
41 * I/O regions in vm_struct->flags field.
42 */
43#define VM_ARM_SECTION_MAPPING 0x80000000
44
45int ioremap_page(unsigned long virt, unsigned long phys, 39int ioremap_page(unsigned long virt, unsigned long phys,
46 const struct mem_type *mtype) 40 const struct mem_type *mtype)
47{ 41{
@@ -201,12 +195,6 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
201 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 195 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
202 return NULL; 196 return NULL;
203 197
204 /*
205 * Don't allow RAM to be mapped - this causes problems with ARMv6+
206 */
207 if (WARN_ON(pfn_valid(pfn)))
208 return NULL;
209
210 type = get_mem_type(mtype); 198 type = get_mem_type(mtype);
211 if (!type) 199 if (!type)
212 return NULL; 200 return NULL;
@@ -216,6 +204,34 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
216 */ 204 */
217 size = PAGE_ALIGN(offset + size); 205 size = PAGE_ALIGN(offset + size);
218 206
207 /*
208 * Try to reuse one of the static mapping whenever possible.
209 */
210 read_lock(&vmlist_lock);
211 for (area = vmlist; area; area = area->next) {
212 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
213 break;
214 if (!(area->flags & VM_ARM_STATIC_MAPPING))
215 continue;
216 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
217 continue;
218 if (__phys_to_pfn(area->phys_addr) > pfn ||
219 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
220 continue;
221 /* we can drop the lock here as we know *area is static */
222 read_unlock(&vmlist_lock);
223 addr = (unsigned long)area->addr;
224 addr += __pfn_to_phys(pfn) - area->phys_addr;
225 return (void __iomem *) (offset + addr);
226 }
227 read_unlock(&vmlist_lock);
228
229 /*
230 * Don't allow RAM to be mapped - this causes problems with ARMv6+
231 */
232 if (WARN_ON(pfn_valid(pfn)))
233 return NULL;
234
219 area = get_vm_area_caller(size, VM_IOREMAP, caller); 235 area = get_vm_area_caller(size, VM_IOREMAP, caller);
220 if (!area) 236 if (!area)
221 return NULL; 237 return NULL;
@@ -313,28 +329,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
313void __iounmap(volatile void __iomem *io_addr) 329void __iounmap(volatile void __iomem *io_addr)
314{ 330{
315 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 331 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
316#ifndef CONFIG_SMP 332 struct vm_struct *vm;
317 struct vm_struct **p, *tmp;
318 333
319 /* 334 read_lock(&vmlist_lock);
320 * If this is a section based mapping we need to handle it 335 for (vm = vmlist; vm; vm = vm->next) {
321 * specially as the VM subsystem does not know how to handle 336 if (vm->addr > addr)
322 * such a beast. We need the lock here b/c we need to clear 337 break;
323 * all the mappings before the area can be reclaimed 338 if (!(vm->flags & VM_IOREMAP))
324 * by someone else. 339 continue;
325 */ 340 /* If this is a static mapping we must leave it alone */
326 write_lock(&vmlist_lock); 341 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
327 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 342 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
328 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { 343 read_unlock(&vmlist_lock);
329 if (tmp->flags & VM_ARM_SECTION_MAPPING) { 344 return;
330 unmap_area_sections((unsigned long)tmp->addr, 345 }
331 tmp->size); 346#ifndef CONFIG_SMP
332 } 347 /*
348 * If this is a section based mapping we need to handle it
349 * specially as the VM subsystem does not know how to handle
350 * such a beast.
351 */
352 if ((vm->addr == addr) &&
353 (vm->flags & VM_ARM_SECTION_MAPPING)) {
354 unmap_area_sections((unsigned long)vm->addr, vm->size);
333 break; 355 break;
334 } 356 }
335 }
336 write_unlock(&vmlist_lock);
337#endif 357#endif
358 }
359 read_unlock(&vmlist_lock);
338 360
339 vunmap(addr); 361 vunmap(addr);
340} 362}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index ad7cce3bc43..70f6d3ea483 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -21,6 +21,20 @@ const struct mem_type *get_mem_type(unsigned int type);
21 21
22extern void __flush_dcache_page(struct address_space *mapping, struct page *page); 22extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
23 23
24/*
25 * ARM specific vm_struct->flags bits.
26 */
27
28/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
29#define VM_ARM_SECTION_MAPPING 0x80000000
30
31/* permanent static mappings from iotable_init() */
32#define VM_ARM_STATIC_MAPPING 0x40000000
33
34/* mapping type (attributes) for permanent static mappings */
35#define VM_ARM_MTYPE(mt) ((mt) << 20)
36#define VM_ARM_MTYPE_MASK (0x1f << 20)
37
24#endif 38#endif
25 39
26#ifdef CONFIG_ZONE_DMA 40#ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index dc8c550e6cb..27e366af67f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
15#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/memblock.h> 16#include <linux/memblock.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/vmalloc.h>
18 19
19#include <asm/cputype.h> 20#include <asm/cputype.h>
20#include <asm/sections.h> 21#include <asm/sections.h>
@@ -529,13 +530,18 @@ EXPORT_SYMBOL(phys_mem_access_prot);
529 530
530#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 531#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
531 532
532static void __init *early_alloc(unsigned long sz) 533static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
533{ 534{
534 void *ptr = __va(memblock_alloc(sz, sz)); 535 void *ptr = __va(memblock_alloc(sz, align));
535 memset(ptr, 0, sz); 536 memset(ptr, 0, sz);
536 return ptr; 537 return ptr;
537} 538}
538 539
540static void __init *early_alloc(unsigned long sz)
541{
542 return early_alloc_aligned(sz, sz);
543}
544
539static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) 545static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
540{ 546{
541 if (pmd_none(*pmd)) { 547 if (pmd_none(*pmd)) {
@@ -685,9 +691,10 @@ static void __init create_mapping(struct map_desc *md)
685 } 691 }
686 692
687 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 693 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
688 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 694 md->virtual >= PAGE_OFFSET &&
695 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
689 printk(KERN_WARNING "BUG: mapping for 0x%08llx" 696 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
690 " at 0x%08lx overlaps vmalloc space\n", 697 " at 0x%08lx out of vmalloc space\n",
691 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); 698 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
692 } 699 }
693 700
@@ -729,18 +736,33 @@ static void __init create_mapping(struct map_desc *md)
729 */ 736 */
730void __init iotable_init(struct map_desc *io_desc, int nr) 737void __init iotable_init(struct map_desc *io_desc, int nr)
731{ 738{
732 int i; 739 struct map_desc *md;
740 struct vm_struct *vm;
741
742 if (!nr)
743 return;
733 744
734 for (i = 0; i < nr; i++) 745 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
735 create_mapping(io_desc + i); 746
747 for (md = io_desc; nr; md++, nr--) {
748 create_mapping(md);
749 vm->addr = (void *)(md->virtual & PAGE_MASK);
750 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
751 vm->phys_addr = __pfn_to_phys(md->pfn);
752 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
753 vm->flags |= VM_ARM_MTYPE(md->type);
754 vm->caller = iotable_init;
755 vm_area_add_early(vm++);
756 }
736} 757}
737 758
738static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); 759static void * __initdata vmalloc_min =
760 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
739 761
740/* 762/*
741 * vmalloc=size forces the vmalloc area to be exactly 'size' 763 * vmalloc=size forces the vmalloc area to be exactly 'size'
742 * bytes. This can be used to increase (or decrease) the vmalloc 764 * bytes. This can be used to increase (or decrease) the vmalloc
743 * area - the default is 128m. 765 * area - the default is 240m.
744 */ 766 */
745static int __init early_vmalloc(char *arg) 767static int __init early_vmalloc(char *arg)
746{ 768{
@@ -860,6 +882,7 @@ void __init sanity_check_meminfo(void)
860 } 882 }
861#endif 883#endif
862 meminfo.nr_banks = j; 884 meminfo.nr_banks = j;
885 high_memory = __va(lowmem_limit - 1) + 1;
863 memblock_set_current_limit(lowmem_limit); 886 memblock_set_current_limit(lowmem_limit);
864} 887}
865 888
@@ -890,10 +913,10 @@ static inline void prepare_page_table(void)
890 913
891 /* 914 /*
892 * Clear out all the kernel space mappings, except for the first 915 * Clear out all the kernel space mappings, except for the first
893 * memory bank, up to the end of the vmalloc region. 916 * memory bank, up to the vmalloc region.
894 */ 917 */
895 for (addr = __phys_to_virt(end); 918 for (addr = __phys_to_virt(end);
896 addr < VMALLOC_END; addr += PMD_SIZE) 919 addr < VMALLOC_START; addr += PMD_SIZE)
897 pmd_clear(pmd_off_k(addr)); 920 pmd_clear(pmd_off_k(addr));
898} 921}
899 922
@@ -920,8 +943,8 @@ void __init arm_mm_memblock_reserve(void)
920} 943}
921 944
922/* 945/*
923 * Set up device the mappings. Since we clear out the page tables for all 946 * Set up the device mappings. Since we clear out the page tables for all
924 * mappings above VMALLOC_END, we will remove any debug device mappings. 947 * mappings above VMALLOC_START, we will remove any debug device mappings.
925 * This means you have to be careful how you debug this function, or any 948 * This means you have to be careful how you debug this function, or any
926 * called function. This means you can't use any function or debugging 949 * called function. This means you can't use any function or debugging
927 * method which may touch any device, otherwise the kernel _will_ crash. 950 * method which may touch any device, otherwise the kernel _will_ crash.
@@ -936,7 +959,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
936 */ 959 */
937 vectors_page = early_alloc(PAGE_SIZE); 960 vectors_page = early_alloc(PAGE_SIZE);
938 961
939 for (addr = VMALLOC_END; addr; addr += PMD_SIZE) 962 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
940 pmd_clear(pmd_off_k(addr)); 963 pmd_clear(pmd_off_k(addr));
941 964
942 /* 965 /*
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 88417514b2c..4fc6794cca4 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -29,6 +29,8 @@ void __init arm_mm_memblock_reserve(void)
29 29
30void __init sanity_check_meminfo(void) 30void __init sanity_check_meminfo(void)
31{ 31{
32 phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]);
33 high_memory = __va(end - 1) + 1;
32} 34}
33 35
34/* 36/*