aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/init.c71
-rw-r--r--arch/arm/mm/mm-armv.c87
2 files changed, 72 insertions, 86 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c08710b1ff02..6dcb23d64bf5 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -522,6 +522,69 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s)
522 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); 522 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
523} 523}
524 524
525static inline void
526free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
527{
528 struct page *start_pg, *end_pg;
529 unsigned long pg, pgend;
530
531 /*
532 * Convert start_pfn/end_pfn to a struct page pointer.
533 */
534 start_pg = pfn_to_page(start_pfn);
535 end_pg = pfn_to_page(end_pfn);
536
537 /*
538 * Convert to physical addresses, and
539 * round start upwards and end downwards.
540 */
541 pg = PAGE_ALIGN(__pa(start_pg));
542 pgend = __pa(end_pg) & PAGE_MASK;
543
544 /*
545 * If there are free pages between these,
546 * free the section of the memmap array.
547 */
548 if (pg < pgend)
549 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
550}
551
552/*
553 * The mem_map array can get very big. Free the unused area of the memory map.
554 */
555static void __init free_unused_memmap_node(int node, struct meminfo *mi)
556{
557 unsigned long bank_start, prev_bank_end = 0;
558 unsigned int i;
559
560 /*
561 * [FIXME] This relies on each bank being in address order. This
562 * may not be the case, especially if the user has provided the
563 * information on the command line.
564 */
565 for (i = 0; i < mi->nr_banks; i++) {
566 if (mi->bank[i].size == 0 || mi->bank[i].node != node)
567 continue;
568
569 bank_start = mi->bank[i].start >> PAGE_SHIFT;
570 if (bank_start < prev_bank_end) {
571 printk(KERN_ERR "MEM: unordered memory banks. "
572 "Not freeing memmap.\n");
573 break;
574 }
575
576 /*
577 * If we had a previous bank, and there is a space
578 * between the current bank and the previous, free it.
579 */
580 if (prev_bank_end && prev_bank_end != bank_start)
581 free_memmap(node, prev_bank_end, bank_start);
582
583 prev_bank_end = (mi->bank[i].start +
584 mi->bank[i].size) >> PAGE_SHIFT;
585 }
586}
587
525/* 588/*
526 * mem_init() marks the free areas in the mem_map and tells us how much 589 * mem_init() marks the free areas in the mem_map and tells us how much
527 * memory is free. This is done after various parts of the system have 590 * memory is free. This is done after various parts of the system have
@@ -540,16 +603,12 @@ void __init mem_init(void)
540 max_mapnr = virt_to_page(high_memory) - mem_map; 603 max_mapnr = virt_to_page(high_memory) - mem_map;
541#endif 604#endif
542 605
543 /*
544 * We may have non-contiguous memory.
545 */
546 if (meminfo.nr_banks != 1)
547 create_memmap_holes(&meminfo);
548
549 /* this will put all unused low memory onto the freelists */ 606 /* this will put all unused low memory onto the freelists */
550 for_each_online_node(node) { 607 for_each_online_node(node) {
551 pg_data_t *pgdat = NODE_DATA(node); 608 pg_data_t *pgdat = NODE_DATA(node);
552 609
610 free_unused_memmap_node(node, &meminfo);
611
553 if (pgdat->node_spanned_pages != 0) 612 if (pgdat->node_spanned_pages != 0)
554 totalram_pages += free_all_bootmem_node(pgdat); 613 totalram_pages += free_all_bootmem_node(pgdat);
555 } 614 }
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 2c2b93d77d43..052ab443ec4e 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -169,7 +169,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
169 169
170 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); 170 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
171 171
172 /*
173 * Copy over the kernel and IO PGD entries
174 */
172 init_pgd = pgd_offset_k(0); 175 init_pgd = pgd_offset_k(0);
176 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
177 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
178
179 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
173 180
174 if (!vectors_high()) { 181 if (!vectors_high()) {
175 /* 182 /*
@@ -198,14 +205,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
198 spin_unlock(&mm->page_table_lock); 205 spin_unlock(&mm->page_table_lock);
199 } 206 }
200 207
201 /*
202 * Copy over the kernel and IO PGD entries
203 */
204 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
205 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
206
207 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
208
209 return new_pgd; 208 return new_pgd;
210 209
211no_pte: 210no_pte:
@@ -698,75 +697,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
698 for (i = 0; i < nr; i++) 697 for (i = 0; i < nr; i++)
699 create_mapping(io_desc + i); 698 create_mapping(io_desc + i);
700} 699}
701
702static inline void
703free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
704{
705 struct page *start_pg, *end_pg;
706 unsigned long pg, pgend;
707
708 /*
709 * Convert start_pfn/end_pfn to a struct page pointer.
710 */
711 start_pg = pfn_to_page(start_pfn);
712 end_pg = pfn_to_page(end_pfn);
713
714 /*
715 * Convert to physical addresses, and
716 * round start upwards and end downwards.
717 */
718 pg = PAGE_ALIGN(__pa(start_pg));
719 pgend = __pa(end_pg) & PAGE_MASK;
720
721 /*
722 * If there are free pages between these,
723 * free the section of the memmap array.
724 */
725 if (pg < pgend)
726 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
727}
728
729static inline void free_unused_memmap_node(int node, struct meminfo *mi)
730{
731 unsigned long bank_start, prev_bank_end = 0;
732 unsigned int i;
733
734 /*
735 * [FIXME] This relies on each bank being in address order. This
736 * may not be the case, especially if the user has provided the
737 * information on the command line.
738 */
739 for (i = 0; i < mi->nr_banks; i++) {
740 if (mi->bank[i].size == 0 || mi->bank[i].node != node)
741 continue;
742
743 bank_start = mi->bank[i].start >> PAGE_SHIFT;
744 if (bank_start < prev_bank_end) {
745 printk(KERN_ERR "MEM: unordered memory banks. "
746 "Not freeing memmap.\n");
747 break;
748 }
749
750 /*
751 * If we had a previous bank, and there is a space
752 * between the current bank and the previous, free it.
753 */
754 if (prev_bank_end && prev_bank_end != bank_start)
755 free_memmap(node, prev_bank_end, bank_start);
756
757 prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
758 mi->bank[i].size) >> PAGE_SHIFT;
759 }
760}
761
762/*
763 * The mem_map array can get very big. Free
764 * the unused area of the memory map.
765 */
766void __init create_memmap_holes(struct meminfo *mi)
767{
768 int node;
769
770 for_each_online_node(node)
771 free_unused_memmap_node(node, mi);
772}