aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2005-06-27 09:16:47 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-06-27 09:16:47 -0400
commita013053d4965d9a45300938e713a4b512e0257d8 (patch)
treed0a03ece81d34de8df497f23376918ec2472bd1e /arch/arm/mm
parenta343e6075a396e07eeff52c0da5629c8fd396be2 (diff)
[PATCH] ARM: Move memmap freeing into init.c
It doesn't make sense for this to be in mm-armv.c now that 26-bit ARM support is no longer integrated into arch/arm. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/init.c71
-rw-r--r--arch/arm/mm/mm-armv.c72
2 files changed, 65 insertions, 78 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c08710b1ff02..6dcb23d64bf5 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -522,6 +522,69 @@ static inline void free_area(unsigned long addr, unsigned long end, char *s)
522 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); 522 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
523} 523}
524 524
525static inline void
526free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
527{
528 struct page *start_pg, *end_pg;
529 unsigned long pg, pgend;
530
531 /*
532 * Convert start_pfn/end_pfn to a struct page pointer.
533 */
534 start_pg = pfn_to_page(start_pfn);
535 end_pg = pfn_to_page(end_pfn);
536
537 /*
538 * Convert to physical addresses, and
539 * round start upwards and end downwards.
540 */
541 pg = PAGE_ALIGN(__pa(start_pg));
542 pgend = __pa(end_pg) & PAGE_MASK;
543
544 /*
545 * If there are free pages between these,
546 * free the section of the memmap array.
547 */
548 if (pg < pgend)
549 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
550}
551
552/*
553 * The mem_map array can get very big. Free the unused area of the memory map.
554 */
555static void __init free_unused_memmap_node(int node, struct meminfo *mi)
556{
557 unsigned long bank_start, prev_bank_end = 0;
558 unsigned int i;
559
560 /*
561 * [FIXME] This relies on each bank being in address order. This
562 * may not be the case, especially if the user has provided the
563 * information on the command line.
564 */
565 for (i = 0; i < mi->nr_banks; i++) {
566 if (mi->bank[i].size == 0 || mi->bank[i].node != node)
567 continue;
568
569 bank_start = mi->bank[i].start >> PAGE_SHIFT;
570 if (bank_start < prev_bank_end) {
571 printk(KERN_ERR "MEM: unordered memory banks. "
572 "Not freeing memmap.\n");
573 break;
574 }
575
576 /*
577 * If we had a previous bank, and there is a space
578 * between the current bank and the previous, free it.
579 */
580 if (prev_bank_end && prev_bank_end != bank_start)
581 free_memmap(node, prev_bank_end, bank_start);
582
583 prev_bank_end = (mi->bank[i].start +
584 mi->bank[i].size) >> PAGE_SHIFT;
585 }
586}
587
525/* 588/*
526 * mem_init() marks the free areas in the mem_map and tells us how much 589 * mem_init() marks the free areas in the mem_map and tells us how much
527 * memory is free. This is done after various parts of the system have 590 * memory is free. This is done after various parts of the system have
@@ -540,16 +603,12 @@ void __init mem_init(void)
540 max_mapnr = virt_to_page(high_memory) - mem_map; 603 max_mapnr = virt_to_page(high_memory) - mem_map;
541#endif 604#endif
542 605
543 /*
544 * We may have non-contiguous memory.
545 */
546 if (meminfo.nr_banks != 1)
547 create_memmap_holes(&meminfo);
548
549 /* this will put all unused low memory onto the freelists */ 606 /* this will put all unused low memory onto the freelists */
550 for_each_online_node(node) { 607 for_each_online_node(node) {
551 pg_data_t *pgdat = NODE_DATA(node); 608 pg_data_t *pgdat = NODE_DATA(node);
552 609
610 free_unused_memmap_node(node, &meminfo);
611
553 if (pgdat->node_spanned_pages != 0) 612 if (pgdat->node_spanned_pages != 0)
554 totalram_pages += free_all_bootmem_node(pgdat); 613 totalram_pages += free_all_bootmem_node(pgdat);
555 } 614 }
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index d79864a0dfa6..052ab443ec4e 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -697,75 +697,3 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
697 for (i = 0; i < nr; i++) 697 for (i = 0; i < nr; i++)
698 create_mapping(io_desc + i); 698 create_mapping(io_desc + i);
699} 699}
700
701static inline void
702free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
703{
704 struct page *start_pg, *end_pg;
705 unsigned long pg, pgend;
706
707 /*
708 * Convert start_pfn/end_pfn to a struct page pointer.
709 */
710 start_pg = pfn_to_page(start_pfn);
711 end_pg = pfn_to_page(end_pfn);
712
713 /*
714 * Convert to physical addresses, and
715 * round start upwards and end downwards.
716 */
717 pg = PAGE_ALIGN(__pa(start_pg));
718 pgend = __pa(end_pg) & PAGE_MASK;
719
720 /*
721 * If there are free pages between these,
722 * free the section of the memmap array.
723 */
724 if (pg < pgend)
725 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
726}
727
728static inline void free_unused_memmap_node(int node, struct meminfo *mi)
729{
730 unsigned long bank_start, prev_bank_end = 0;
731 unsigned int i;
732
733 /*
734 * [FIXME] This relies on each bank being in address order. This
735 * may not be the case, especially if the user has provided the
736 * information on the command line.
737 */
738 for (i = 0; i < mi->nr_banks; i++) {
739 if (mi->bank[i].size == 0 || mi->bank[i].node != node)
740 continue;
741
742 bank_start = mi->bank[i].start >> PAGE_SHIFT;
743 if (bank_start < prev_bank_end) {
744 printk(KERN_ERR "MEM: unordered memory banks. "
745 "Not freeing memmap.\n");
746 break;
747 }
748
749 /*
750 * If we had a previous bank, and there is a space
751 * between the current bank and the previous, free it.
752 */
753 if (prev_bank_end && prev_bank_end != bank_start)
754 free_memmap(node, prev_bank_end, bank_start);
755
756 prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
757 mi->bank[i].size) >> PAGE_SHIFT;
758 }
759}
760
761/*
762 * The mem_map array can get very big. Free
763 * the unused area of the memory map.
764 */
765void __init create_memmap_holes(struct meminfo *mi)
766{
767 int node;
768
769 for_each_online_node(node)
770 free_unused_memmap_node(node, mi);
771}