aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-07-12 04:46:30 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-07-14 14:45:31 -0400
commitc13291a536b835b2ab278ab201f2cb1ce22f2785 (patch)
tree6bb3a2fd47e22d75308314b14f3a0f0a4d338141
parent96e907d1360240d1958fe8ce3a3ac640733330d4 (diff)
bootmem: Use for_each_mem_pfn_range() in page_alloc.c
The previous patch added for_each_mem_pfn_range() which is more versatile than for_each_active_range_index_in_nid(). This patch replaces for_each_active_range_index_in_nid() and open coded early_node_map[] walks with for_each_mem_pfn_range(). All conversions in this patch are straight-forward and shouldn't cause any functional difference. After the conversions, for_each_active_range_index_in_nid() doesn't have any user left and is removed. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1310460395-30913-4-git-send-email-tj@kernel.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--mm/page_alloc.c165
1 files changed, 56 insertions, 109 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3092a97268d1..902f03a4fd6b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3711,34 +3711,6 @@ __meminit int init_currently_empty_zone(struct zone *zone,
3711} 3711}
3712 3712
3713#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 3713#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3714/*
3715 * Basic iterator support. Return the first range of PFNs for a node
3716 * Note: nid == MAX_NUMNODES returns first region regardless of node
3717 */
3718static int __meminit first_active_region_index_in_nid(int nid)
3719{
3720 int i;
3721
3722 for (i = 0; i < nr_nodemap_entries; i++)
3723 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3724 return i;
3725
3726 return -1;
3727}
3728
3729/*
3730 * Basic iterator support. Return the next active range of PFNs for a node
3731 * Note: nid == MAX_NUMNODES returns next region regardless of node
3732 */
3733static int __meminit next_active_region_index_in_nid(int index, int nid)
3734{
3735 for (index = index + 1; index < nr_nodemap_entries; index++)
3736 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3737 return index;
3738
3739 return -1;
3740}
3741
3742#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 3714#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3743/* 3715/*
3744 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 3716 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
@@ -3748,15 +3720,12 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
3748 */ 3720 */
3749int __meminit __early_pfn_to_nid(unsigned long pfn) 3721int __meminit __early_pfn_to_nid(unsigned long pfn)
3750{ 3722{
3751 int i; 3723 unsigned long start_pfn, end_pfn;
3752 3724 int i, nid;
3753 for (i = 0; i < nr_nodemap_entries; i++) {
3754 unsigned long start_pfn = early_node_map[i].start_pfn;
3755 unsigned long end_pfn = early_node_map[i].end_pfn;
3756 3725
3726 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
3757 if (start_pfn <= pfn && pfn < end_pfn) 3727 if (start_pfn <= pfn && pfn < end_pfn)
3758 return early_node_map[i].nid; 3728 return nid;
3759 }
3760 /* This is a memory hole */ 3729 /* This is a memory hole */
3761 return -1; 3730 return -1;
3762} 3731}
@@ -3785,11 +3754,6 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3785} 3754}
3786#endif 3755#endif
3787 3756
3788/* Basic iterator support to walk early_node_map[] */
3789#define for_each_active_range_index_in_nid(i, nid) \
3790 for (i = first_active_region_index_in_nid(nid); i != -1; \
3791 i = next_active_region_index_in_nid(i, nid))
3792
3793/** 3757/**
3794 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 3758 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3795 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 3759 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
@@ -3799,25 +3763,19 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3799 * add_active_ranges() contain no holes and may be freed, this 3763 * add_active_ranges() contain no holes and may be freed, this
3800 * this function may be used instead of calling free_bootmem() manually. 3764 * this function may be used instead of calling free_bootmem() manually.
3801 */ 3765 */
3802void __init free_bootmem_with_active_regions(int nid, 3766void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
3803 unsigned long max_low_pfn)
3804{ 3767{
3805 int i; 3768 unsigned long start_pfn, end_pfn;
3806 3769 int i, this_nid;
3807 for_each_active_range_index_in_nid(i, nid) {
3808 unsigned long size_pages = 0;
3809 unsigned long end_pfn = early_node_map[i].end_pfn;
3810
3811 if (early_node_map[i].start_pfn >= max_low_pfn)
3812 continue;
3813 3770
3814 if (end_pfn > max_low_pfn) 3771 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
3815 end_pfn = max_low_pfn; 3772 start_pfn = min(start_pfn, max_low_pfn);
3773 end_pfn = min(end_pfn, max_low_pfn);
3816 3774
3817 size_pages = end_pfn - early_node_map[i].start_pfn; 3775 if (start_pfn < end_pfn)
3818 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 3776 free_bootmem_node(NODE_DATA(this_nid),
3819 PFN_PHYS(early_node_map[i].start_pfn), 3777 PFN_PHYS(start_pfn),
3820 size_pages << PAGE_SHIFT); 3778 (end_pfn - start_pfn) << PAGE_SHIFT);
3821 } 3779 }
3822} 3780}
3823 3781
@@ -3891,15 +3849,12 @@ u64 __init find_memory_core_early(int nid, u64 size, u64 align,
3891int __init add_from_early_node_map(struct range *range, int az, 3849int __init add_from_early_node_map(struct range *range, int az,
3892 int nr_range, int nid) 3850 int nr_range, int nid)
3893{ 3851{
3852 unsigned long start_pfn, end_pfn;
3894 int i; 3853 int i;
3895 u64 start, end;
3896 3854
3897 /* need to go over early_node_map to find out good range for node */ 3855 /* need to go over early_node_map to find out good range for node */
3898 for_each_active_range_index_in_nid(i, nid) { 3856 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
3899 start = early_node_map[i].start_pfn; 3857 nr_range = add_range(range, az, nr_range, start_pfn, end_pfn);
3900 end = early_node_map[i].end_pfn;
3901 nr_range = add_range(range, az, nr_range, start, end);
3902 }
3903 return nr_range; 3858 return nr_range;
3904} 3859}
3905 3860
@@ -3913,12 +3868,11 @@ int __init add_from_early_node_map(struct range *range, int az,
3913 */ 3868 */
3914void __init sparse_memory_present_with_active_regions(int nid) 3869void __init sparse_memory_present_with_active_regions(int nid)
3915{ 3870{
3916 int i; 3871 unsigned long start_pfn, end_pfn;
3872 int i, this_nid;
3917 3873
3918 for_each_active_range_index_in_nid(i, nid) 3874 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
3919 memory_present(early_node_map[i].nid, 3875 memory_present(this_nid, start_pfn, end_pfn);
3920 early_node_map[i].start_pfn,
3921 early_node_map[i].end_pfn);
3922} 3876}
3923 3877
3924/** 3878/**
@@ -3935,13 +3889,15 @@ void __init sparse_memory_present_with_active_regions(int nid)
3935void __meminit get_pfn_range_for_nid(unsigned int nid, 3889void __meminit get_pfn_range_for_nid(unsigned int nid,
3936 unsigned long *start_pfn, unsigned long *end_pfn) 3890 unsigned long *start_pfn, unsigned long *end_pfn)
3937{ 3891{
3892 unsigned long this_start_pfn, this_end_pfn;
3938 int i; 3893 int i;
3894
3939 *start_pfn = -1UL; 3895 *start_pfn = -1UL;
3940 *end_pfn = 0; 3896 *end_pfn = 0;
3941 3897
3942 for_each_active_range_index_in_nid(i, nid) { 3898 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
3943 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 3899 *start_pfn = min(*start_pfn, this_start_pfn);
3944 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 3900 *end_pfn = max(*end_pfn, this_end_pfn);
3945 } 3901 }
3946 3902
3947 if (*start_pfn == -1UL) 3903 if (*start_pfn == -1UL)
@@ -4484,6 +4440,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4484void __init remove_active_range(unsigned int nid, unsigned long start_pfn, 4440void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4485 unsigned long end_pfn) 4441 unsigned long end_pfn)
4486{ 4442{
4443 unsigned long this_start_pfn, this_end_pfn;
4487 int i, j; 4444 int i, j;
4488 int removed = 0; 4445 int removed = 0;
4489 4446
@@ -4491,26 +4448,22 @@ void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4491 nid, start_pfn, end_pfn); 4448 nid, start_pfn, end_pfn);
4492 4449
4493 /* Find the old active region end and shrink */ 4450 /* Find the old active region end and shrink */
4494 for_each_active_range_index_in_nid(i, nid) { 4451 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4495 if (early_node_map[i].start_pfn >= start_pfn && 4452 if (this_start_pfn >= start_pfn && this_end_pfn <= end_pfn) {
4496 early_node_map[i].end_pfn <= end_pfn) {
4497 /* clear it */ 4453 /* clear it */
4498 early_node_map[i].start_pfn = 0; 4454 early_node_map[i].start_pfn = 0;
4499 early_node_map[i].end_pfn = 0; 4455 early_node_map[i].end_pfn = 0;
4500 removed = 1; 4456 removed = 1;
4501 continue; 4457 continue;
4502 } 4458 }
4503 if (early_node_map[i].start_pfn < start_pfn && 4459 if (this_start_pfn < start_pfn && this_end_pfn > start_pfn) {
4504 early_node_map[i].end_pfn > start_pfn) {
4505 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4506 early_node_map[i].end_pfn = start_pfn; 4460 early_node_map[i].end_pfn = start_pfn;
4507 if (temp_end_pfn > end_pfn) 4461 if (this_end_pfn > end_pfn)
4508 add_active_range(nid, end_pfn, temp_end_pfn); 4462 add_active_range(nid, end_pfn, this_end_pfn);
4509 continue; 4463 continue;
4510 } 4464 }
4511 if (early_node_map[i].start_pfn >= start_pfn && 4465 if (this_start_pfn >= start_pfn && this_end_pfn > end_pfn &&
4512 early_node_map[i].end_pfn > end_pfn && 4466 this_start_pfn < end_pfn) {
4513 early_node_map[i].start_pfn < end_pfn) {
4514 early_node_map[i].start_pfn = end_pfn; 4467 early_node_map[i].start_pfn = end_pfn;
4515 continue; 4468 continue;
4516 } 4469 }
@@ -4593,15 +4546,11 @@ void __init sort_node_map(void)
4593unsigned long __init node_map_pfn_alignment(void) 4546unsigned long __init node_map_pfn_alignment(void)
4594{ 4547{
4595 unsigned long accl_mask = 0, last_end = 0; 4548 unsigned long accl_mask = 0, last_end = 0;
4549 unsigned long start, end, mask;
4596 int last_nid = -1; 4550 int last_nid = -1;
4597 int i; 4551 int i, nid;
4598
4599 for_each_active_range_index_in_nid(i, MAX_NUMNODES) {
4600 int nid = early_node_map[i].nid;
4601 unsigned long start = early_node_map[i].start_pfn;
4602 unsigned long end = early_node_map[i].end_pfn;
4603 unsigned long mask;
4604 4552
4553 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
4605 if (!start || last_nid < 0 || last_nid == nid) { 4554 if (!start || last_nid < 0 || last_nid == nid) {
4606 last_nid = nid; 4555 last_nid = nid;
4607 last_end = end; 4556 last_end = end;
@@ -4628,12 +4577,12 @@ unsigned long __init node_map_pfn_alignment(void)
4628/* Find the lowest pfn for a node */ 4577/* Find the lowest pfn for a node */
4629static unsigned long __init find_min_pfn_for_node(int nid) 4578static unsigned long __init find_min_pfn_for_node(int nid)
4630{ 4579{
4631 int i;
4632 unsigned long min_pfn = ULONG_MAX; 4580 unsigned long min_pfn = ULONG_MAX;
4581 unsigned long start_pfn;
4582 int i;
4633 4583
4634 /* Assuming a sorted map, the first range found has the starting pfn */ 4584 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
4635 for_each_active_range_index_in_nid(i, nid) 4585 min_pfn = min(min_pfn, start_pfn);
4636 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4637 4586
4638 if (min_pfn == ULONG_MAX) { 4587 if (min_pfn == ULONG_MAX) {
4639 printk(KERN_WARNING 4588 printk(KERN_WARNING
@@ -4662,15 +4611,16 @@ unsigned long __init find_min_pfn_with_active_regions(void)
4662 */ 4611 */
4663static unsigned long __init early_calculate_totalpages(void) 4612static unsigned long __init early_calculate_totalpages(void)
4664{ 4613{
4665 int i;
4666 unsigned long totalpages = 0; 4614 unsigned long totalpages = 0;
4615 unsigned long start_pfn, end_pfn;
4616 int i, nid;
4617
4618 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4619 unsigned long pages = end_pfn - start_pfn;
4667 4620
4668 for (i = 0; i < nr_nodemap_entries; i++) {
4669 unsigned long pages = early_node_map[i].end_pfn -
4670 early_node_map[i].start_pfn;
4671 totalpages += pages; 4621 totalpages += pages;
4672 if (pages) 4622 if (pages)
4673 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY); 4623 node_set_state(nid, N_HIGH_MEMORY);
4674 } 4624 }
4675 return totalpages; 4625 return totalpages;
4676} 4626}
@@ -4725,6 +4675,8 @@ restart:
4725 /* Spread kernelcore memory as evenly as possible throughout nodes */ 4675 /* Spread kernelcore memory as evenly as possible throughout nodes */
4726 kernelcore_node = required_kernelcore / usable_nodes; 4676 kernelcore_node = required_kernelcore / usable_nodes;
4727 for_each_node_state(nid, N_HIGH_MEMORY) { 4677 for_each_node_state(nid, N_HIGH_MEMORY) {
4678 unsigned long start_pfn, end_pfn;
4679
4728 /* 4680 /*
4729 * Recalculate kernelcore_node if the division per node 4681 * Recalculate kernelcore_node if the division per node
4730 * now exceeds what is necessary to satisfy the requested 4682 * now exceeds what is necessary to satisfy the requested
@@ -4741,13 +4693,10 @@ restart:
4741 kernelcore_remaining = kernelcore_node; 4693 kernelcore_remaining = kernelcore_node;
4742 4694
4743 /* Go through each range of PFNs within this node */ 4695 /* Go through each range of PFNs within this node */
4744 for_each_active_range_index_in_nid(i, nid) { 4696 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4745 unsigned long start_pfn, end_pfn;
4746 unsigned long size_pages; 4697 unsigned long size_pages;
4747 4698
4748 start_pfn = max(early_node_map[i].start_pfn, 4699 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
4749 zone_movable_pfn[nid]);
4750 end_pfn = early_node_map[i].end_pfn;
4751 if (start_pfn >= end_pfn) 4700 if (start_pfn >= end_pfn)
4752 continue; 4701 continue;
4753 4702
@@ -4849,8 +4798,8 @@ static void check_for_regular_memory(pg_data_t *pgdat)
4849 */ 4798 */
4850void __init free_area_init_nodes(unsigned long *max_zone_pfn) 4799void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4851{ 4800{
4852 unsigned long nid; 4801 unsigned long start_pfn, end_pfn;
4853 int i; 4802 int i, nid;
4854 4803
4855 /* Sort early_node_map as initialisation assumes it is sorted */ 4804 /* Sort early_node_map as initialisation assumes it is sorted */
4856 sort_node_map(); 4805 sort_node_map();
@@ -4900,11 +4849,9 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4900 } 4849 }
4901 4850
4902 /* Print out the early_node_map[] */ 4851 /* Print out the early_node_map[] */
4903 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 4852 printk("Early memory PFN ranges\n");
4904 for (i = 0; i < nr_nodemap_entries; i++) 4853 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
4905 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid, 4854 printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
4906 early_node_map[i].start_pfn,
4907 early_node_map[i].end_pfn);
4908 4855
4909 /* Initialise every node */ 4856 /* Initialise every node */
4910 mminit_verify_pageflags_layout(); 4857 mminit_verify_pageflags_layout();