aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mm-armv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mm-armv.c')
-rw-r--r--arch/arm/mm/mm-armv.c87
1 files changed, 7 insertions, 80 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 2c2b93d77d43..052ab443ec4e 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -169,7 +169,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
169 169
170 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); 170 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
171 171
172 /*
173 * Copy over the kernel and IO PGD entries
174 */
172 init_pgd = pgd_offset_k(0); 175 init_pgd = pgd_offset_k(0);
176 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
177 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
178
179 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
173 180
174 if (!vectors_high()) { 181 if (!vectors_high()) {
175 /* 182 /*
@@ -198,14 +205,6 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
198 spin_unlock(&mm->page_table_lock); 205 spin_unlock(&mm->page_table_lock);
199 } 206 }
200 207
201 /*
202 * Copy over the kernel and IO PGD entries
203 */
204 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
205 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
206
207 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
208
209 return new_pgd; 208 return new_pgd;
210 209
211no_pte: 210no_pte:
@@ -698,75 +697,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
698 for (i = 0; i < nr; i++) 697 for (i = 0; i < nr; i++)
699 create_mapping(io_desc + i); 698 create_mapping(io_desc + i);
700} 699}
701
702static inline void
703free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
704{
705 struct page *start_pg, *end_pg;
706 unsigned long pg, pgend;
707
708 /*
709 * Convert start_pfn/end_pfn to a struct page pointer.
710 */
711 start_pg = pfn_to_page(start_pfn);
712 end_pg = pfn_to_page(end_pfn);
713
714 /*
715 * Convert to physical addresses, and
716 * round start upwards and end downwards.
717 */
718 pg = PAGE_ALIGN(__pa(start_pg));
719 pgend = __pa(end_pg) & PAGE_MASK;
720
721 /*
722 * If there are free pages between these,
723 * free the section of the memmap array.
724 */
725 if (pg < pgend)
726 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
727}
728
729static inline void free_unused_memmap_node(int node, struct meminfo *mi)
730{
731 unsigned long bank_start, prev_bank_end = 0;
732 unsigned int i;
733
734 /*
735 * [FIXME] This relies on each bank being in address order. This
736 * may not be the case, especially if the user has provided the
737 * information on the command line.
738 */
739 for (i = 0; i < mi->nr_banks; i++) {
740 if (mi->bank[i].size == 0 || mi->bank[i].node != node)
741 continue;
742
743 bank_start = mi->bank[i].start >> PAGE_SHIFT;
744 if (bank_start < prev_bank_end) {
745 printk(KERN_ERR "MEM: unordered memory banks. "
746 "Not freeing memmap.\n");
747 break;
748 }
749
750 /*
751 * If we had a previous bank, and there is a space
752 * between the current bank and the previous, free it.
753 */
754 if (prev_bank_end && prev_bank_end != bank_start)
755 free_memmap(node, prev_bank_end, bank_start);
756
757 prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
758 mi->bank[i].size) >> PAGE_SHIFT;
759 }
760}
761
762/*
763 * The mem_map array can get very big. Free
764 * the unused area of the memory map.
765 */
766void __init create_memmap_holes(struct meminfo *mi)
767{
768 int node;
769
770 for_each_online_node(node)
771 free_unused_memmap_node(node, mi);
772}