aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-09-12 07:04:37 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-09-12 07:04:37 -0400
commit87d721ad7a37b7650dd710c88dd5c6a5bf9fe996 (patch)
tree869d633803eb7c429624d3bd16a6117816849763 /arch/arm/mm
parentddd559b13f6d2fe3ad68c4b3f5235fd3c2eae4e3 (diff)
parentb7cfda9fc3d7aa60cffab5367f2a72a4a70060cd (diff)
Merge branch 'master' into devel
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/flush.c9
-rw-r--r--arch/arm/mm/highmem.c8
-rw-r--r--arch/arm/mm/init.c150
-rw-r--r--arch/arm/mm/mmu.c9
4 files changed, 129 insertions, 47 deletions
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c07222eb5ce0..575f3ad722e7 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -144,7 +144,14 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
144 * page. This ensures that data in the physical page is mutually 144 * page. This ensures that data in the physical page is mutually
145 * coherent with the kernels mapping. 145 * coherent with the kernels mapping.
146 */ 146 */
147 __cpuc_flush_dcache_page(page_address(page)); 147#ifdef CONFIG_HIGHMEM
148 /*
149 * kmap_atomic() doesn't set the page virtual address, and
150 * kunmap_atomic() takes care of cache flushing already.
151 */
152 if (page_address(page))
153#endif
154 __cpuc_flush_dcache_page(page_address(page));
148 155
149 /* 156 /*
150 * If this is a page cache page, and we have an aliasing VIPT cache, 157 * If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index a34954d9df7d..73cae57fa707 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -40,11 +40,16 @@ void *kmap_atomic(struct page *page, enum km_type type)
40{ 40{
41 unsigned int idx; 41 unsigned int idx;
42 unsigned long vaddr; 42 unsigned long vaddr;
43 void *kmap;
43 44
44 pagefault_disable(); 45 pagefault_disable();
45 if (!PageHighMem(page)) 46 if (!PageHighMem(page))
46 return page_address(page); 47 return page_address(page);
47 48
49 kmap = kmap_high_get(page);
50 if (kmap)
51 return kmap;
52
48 idx = type + KM_TYPE_NR * smp_processor_id(); 53 idx = type + KM_TYPE_NR * smp_processor_id();
49 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 54 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
50#ifdef CONFIG_DEBUG_HIGHMEM 55#ifdef CONFIG_DEBUG_HIGHMEM
@@ -80,6 +85,9 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
80#else 85#else
81 (void) idx; /* to kill a warning */ 86 (void) idx; /* to kill a warning */
82#endif 87#endif
88 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
89 /* this address was obtained through kmap_high_get() */
90 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
83 } 91 }
84 pagefault_enable(); 92 pagefault_enable();
85} 93}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8277802ec859..ea36186f32c3 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,6 +15,7 @@
15#include <linux/mman.h> 15#include <linux/mman.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17#include <linux/initrd.h> 17#include <linux/initrd.h>
18#include <linux/sort.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
19 20
20#include <asm/mach-types.h> 21#include <asm/mach-types.h>
@@ -120,6 +121,32 @@ void show_mem(void)
120 printk("%d pages swap cached\n", cached); 121 printk("%d pages swap cached\n", cached);
121} 122}
122 123
124static void __init find_node_limits(int node, struct meminfo *mi,
125 unsigned long *min, unsigned long *max_low, unsigned long *max_high)
126{
127 int i;
128
129 *min = -1UL;
130 *max_low = *max_high = 0;
131
132 for_each_nodebank(i, mi, node) {
133 struct membank *bank = &mi->bank[i];
134 unsigned long start, end;
135
136 start = bank_pfn_start(bank);
137 end = bank_pfn_end(bank);
138
139 if (*min > start)
140 *min = start;
141 if (*max_high < end)
142 *max_high = end;
143 if (bank->highmem)
144 continue;
145 if (*max_low < end)
146 *max_low = end;
147 }
148}
149
123/* 150/*
124 * FIXME: We really want to avoid allocating the bootmap bitmap 151 * FIXME: We really want to avoid allocating the bootmap bitmap
125 * over the top of the initrd. Hopefully, this is located towards 152 * over the top of the initrd. Hopefully, this is located towards
@@ -210,41 +237,25 @@ static inline void map_memory_bank(struct membank *bank)
210#endif 237#endif
211} 238}
212 239
213static unsigned long __init bootmem_init_node(int node, struct meminfo *mi) 240static void __init bootmem_init_node(int node, struct meminfo *mi,
241 unsigned long start_pfn, unsigned long end_pfn)
214{ 242{
215 unsigned long start_pfn, end_pfn, boot_pfn; 243 unsigned long boot_pfn;
216 unsigned int boot_pages; 244 unsigned int boot_pages;
217 pg_data_t *pgdat; 245 pg_data_t *pgdat;
218 int i; 246 int i;
219 247
220 start_pfn = -1UL;
221 end_pfn = 0;
222
223 /* 248 /*
224 * Calculate the pfn range, and map the memory banks for this node. 249 * Map the memory banks for this node.
225 */ 250 */
226 for_each_nodebank(i, mi, node) { 251 for_each_nodebank(i, mi, node) {
227 struct membank *bank = &mi->bank[i]; 252 struct membank *bank = &mi->bank[i];
228 unsigned long start, end;
229
230 start = bank_pfn_start(bank);
231 end = bank_pfn_end(bank);
232
233 if (start_pfn > start)
234 start_pfn = start;
235 if (end_pfn < end)
236 end_pfn = end;
237 253
238 map_memory_bank(bank); 254 if (!bank->highmem)
255 map_memory_bank(bank);
239 } 256 }
240 257
241 /* 258 /*
242 * If there is no memory in this node, ignore it.
243 */
244 if (end_pfn == 0)
245 return end_pfn;
246
247 /*
248 * Allocate the bootmem bitmap page. 259 * Allocate the bootmem bitmap page.
249 */ 260 */
250 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 261 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
@@ -260,7 +271,8 @@ static unsigned long __init bootmem_init_node(int node, struct meminfo *mi)
260 271
261 for_each_nodebank(i, mi, node) { 272 for_each_nodebank(i, mi, node) {
262 struct membank *bank = &mi->bank[i]; 273 struct membank *bank = &mi->bank[i];
263 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); 274 if (!bank->highmem)
275 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
264 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); 276 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
265 } 277 }
266 278
@@ -269,8 +281,6 @@ static unsigned long __init bootmem_init_node(int node, struct meminfo *mi)
269 */ 281 */
270 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, 282 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
271 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); 283 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
272
273 return end_pfn;
274} 284}
275 285
276static void __init bootmem_reserve_initrd(int node) 286static void __init bootmem_reserve_initrd(int node)
@@ -297,33 +307,39 @@ static void __init bootmem_reserve_initrd(int node)
297static void __init bootmem_free_node(int node, struct meminfo *mi) 307static void __init bootmem_free_node(int node, struct meminfo *mi)
298{ 308{
299 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 309 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
300 unsigned long start_pfn, end_pfn; 310 unsigned long min, max_low, max_high;
301 pg_data_t *pgdat = NODE_DATA(node);
302 int i; 311 int i;
303 312
304 start_pfn = pgdat->bdata->node_min_pfn; 313 find_node_limits(node, mi, &min, &max_low, &max_high);
305 end_pfn = pgdat->bdata->node_low_pfn;
306 314
307 /* 315 /*
308 * initialise the zones within this node. 316 * initialise the zones within this node.
309 */ 317 */
310 memset(zone_size, 0, sizeof(zone_size)); 318 memset(zone_size, 0, sizeof(zone_size));
311 memset(zhole_size, 0, sizeof(zhole_size));
312 319
313 /* 320 /*
314 * The size of this node has already been determined. If we need 321 * The size of this node has already been determined. If we need
315 * to do anything fancy with the allocation of this memory to the 322 * to do anything fancy with the allocation of this memory to the
316 * zones, now is the time to do it. 323 * zones, now is the time to do it.
317 */ 324 */
318 zone_size[0] = end_pfn - start_pfn; 325 zone_size[0] = max_low - min;
326#ifdef CONFIG_HIGHMEM
327 zone_size[ZONE_HIGHMEM] = max_high - max_low;
328#endif
319 329
320 /* 330 /*
321 * For each bank in this node, calculate the size of the holes. 331 * For each bank in this node, calculate the size of the holes.
322 * holes = node_size - sum(bank_sizes_in_node) 332 * holes = node_size - sum(bank_sizes_in_node)
323 */ 333 */
324 zhole_size[0] = zone_size[0]; 334 memcpy(zhole_size, zone_size, sizeof(zhole_size));
325 for_each_nodebank(i, mi, node) 335 for_each_nodebank(i, mi, node) {
326 zhole_size[0] -= bank_pfn_size(&mi->bank[i]); 336 int idx = 0;
337#ifdef CONFIG_HIGHMEM
338 if (mi->bank[i].highmem)
339 idx = ZONE_HIGHMEM;
340#endif
341 zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
342 }
327 343
328 /* 344 /*
329 * Adjust the sizes according to any special requirements for 345 * Adjust the sizes according to any special requirements for
@@ -331,25 +347,74 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
331 */ 347 */
332 arch_adjust_zones(node, zone_size, zhole_size); 348 arch_adjust_zones(node, zone_size, zhole_size);
333 349
334 free_area_init_node(node, zone_size, start_pfn, zhole_size); 350 free_area_init_node(node, zone_size, min, zhole_size);
351}
352
353#ifndef CONFIG_SPARSEMEM
354int pfn_valid(unsigned long pfn)
355{
356 struct meminfo *mi = &meminfo;
357 unsigned int left = 0, right = mi->nr_banks;
358
359 do {
360 unsigned int mid = (right + left) / 2;
361 struct membank *bank = &mi->bank[mid];
362
363 if (pfn < bank_pfn_start(bank))
364 right = mid;
365 else if (pfn >= bank_pfn_end(bank))
366 left = mid + 1;
367 else
368 return 1;
369 } while (left < right);
370 return 0;
371}
372EXPORT_SYMBOL(pfn_valid);
373#endif
374
375static int __init meminfo_cmp(const void *_a, const void *_b)
376{
377 const struct membank *a = _a, *b = _b;
378 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
379 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
335} 380}
336 381
337void __init bootmem_init(void) 382void __init bootmem_init(void)
338{ 383{
339 struct meminfo *mi = &meminfo; 384 struct meminfo *mi = &meminfo;
340 unsigned long memend_pfn = 0; 385 unsigned long min, max_low, max_high;
341 int node, initrd_node; 386 int node, initrd_node;
342 387
388 sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL);
389
343 /* 390 /*
344 * Locate which node contains the ramdisk image, if any. 391 * Locate which node contains the ramdisk image, if any.
345 */ 392 */
346 initrd_node = check_initrd(mi); 393 initrd_node = check_initrd(mi);
347 394
395 max_low = max_high = 0;
396
348 /* 397 /*
349 * Run through each node initialising the bootmem allocator. 398 * Run through each node initialising the bootmem allocator.
350 */ 399 */
351 for_each_node(node) { 400 for_each_node(node) {
352 unsigned long end_pfn = bootmem_init_node(node, mi); 401 unsigned long node_low, node_high;
402
403 find_node_limits(node, mi, &min, &node_low, &node_high);
404
405 if (node_low > max_low)
406 max_low = node_low;
407 if (node_high > max_high)
408 max_high = node_high;
409
410 /*
411 * If there is no memory in this node, ignore it.
412 * (We can't have nodes which have no lowmem)
413 */
414 if (node_low == 0)
415 continue;
416
417 bootmem_init_node(node, mi, min, node_low);
353 418
354 /* 419 /*
355 * Reserve any special node zero regions. 420 * Reserve any special node zero regions.
@@ -362,12 +427,6 @@ void __init bootmem_init(void)
362 */ 427 */
363 if (node == initrd_node) 428 if (node == initrd_node)
364 bootmem_reserve_initrd(node); 429 bootmem_reserve_initrd(node);
365
366 /*
367 * Remember the highest memory PFN.
368 */
369 if (end_pfn > memend_pfn)
370 memend_pfn = end_pfn;
371 } 430 }
372 431
373 /* 432 /*
@@ -383,7 +442,7 @@ void __init bootmem_init(void)
383 for_each_node(node) 442 for_each_node(node)
384 bootmem_free_node(node, mi); 443 bootmem_free_node(node, mi);
385 444
386 high_memory = __va((memend_pfn << PAGE_SHIFT) - 1) + 1; 445 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
387 446
388 /* 447 /*
389 * This doesn't seem to be used by the Linux memory manager any 448 * This doesn't seem to be used by the Linux memory manager any
@@ -393,7 +452,8 @@ void __init bootmem_init(void)
393 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in 452 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
394 * the system, not the maximum PFN. 453 * the system, not the maximum PFN.
395 */ 454 */
396 max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; 455 max_low_pfn = max_low - PHYS_PFN_OFFSET;
456 max_pfn = max_high - PHYS_PFN_OFFSET;
397} 457}
398 458
399static inline int free_area(unsigned long pfn, unsigned long end, char *s) 459static inline int free_area(unsigned long pfn, unsigned long end, char *s)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4722582b17b8..4426ee67ceca 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -687,13 +687,19 @@ __early_param("vmalloc=", early_vmalloc);
687 687
688static void __init sanity_check_meminfo(void) 688static void __init sanity_check_meminfo(void)
689{ 689{
690 int i, j; 690 int i, j, highmem = 0;
691 691
692 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 692 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
693 struct membank *bank = &meminfo.bank[j]; 693 struct membank *bank = &meminfo.bank[j];
694 *bank = meminfo.bank[i]; 694 *bank = meminfo.bank[i];
695 695
696#ifdef CONFIG_HIGHMEM 696#ifdef CONFIG_HIGHMEM
697 if (__va(bank->start) > VMALLOC_MIN ||
698 __va(bank->start) < (void *)PAGE_OFFSET)
699 highmem = 1;
700
701 bank->highmem = highmem;
702
697 /* 703 /*
698 * Split those memory banks which are partially overlapping 704 * Split those memory banks which are partially overlapping
699 * the vmalloc area greatly simplifying things later. 705 * the vmalloc area greatly simplifying things later.
@@ -714,6 +720,7 @@ static void __init sanity_check_meminfo(void)
714 i++; 720 i++;
715 bank[1].size -= VMALLOC_MIN - __va(bank->start); 721 bank[1].size -= VMALLOC_MIN - __va(bank->start);
716 bank[1].start = __pa(VMALLOC_MIN - 1) + 1; 722 bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
723 bank[1].highmem = highmem = 1;
717 j++; 724 j++;
718 } 725 }
719 bank->size = VMALLOC_MIN - __va(bank->start); 726 bank->size = VMALLOC_MIN - __va(bank->start);