aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/mm/numa.c50
-rw-r--r--arch/x86/mm/memblock.c23
-rw-r--r--drivers/pci/intel-iommu.c24
-rw-r--r--include/linux/mm.h22
-rw-r--r--mm/page_alloc.c40
5 files changed, 76 insertions, 83 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 2164006fe170..6f06ea53bca2 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -127,45 +127,25 @@ static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
127} 127}
128 128
129/* 129/*
130 * get_active_region_work_fn - A helper function for get_node_active_region 130 * get_node_active_region - Return active region containing pfn
131 * Returns datax set to the start_pfn and end_pfn if they contain
132 * the initial value of datax->start_pfn between them
133 * @start_pfn: start page(inclusive) of region to check
134 * @end_pfn: end page(exclusive) of region to check
135 * @datax: comes in with ->start_pfn set to value to search for and
136 * goes out with active range if it contains it
137 * Returns 1 if search value is in range else 0
138 */
139static int __init get_active_region_work_fn(unsigned long start_pfn,
140 unsigned long end_pfn, void *datax)
141{
142 struct node_active_region *data;
143 data = (struct node_active_region *)datax;
144
145 if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
146 data->start_pfn = start_pfn;
147 data->end_pfn = end_pfn;
148 return 1;
149 }
150 return 0;
151
152}
153
154/*
155 * get_node_active_region - Return active region containing start_pfn
156 * Active range returned is empty if none found. 131 * Active range returned is empty if none found.
157 * @start_pfn: The page to return the region for. 132 * @pfn: The page to return the region for
158 * @node_ar: Returned set to the active region containing start_pfn 133 * @node_ar: Returned set to the active region containing @pfn
159 */ 134 */
160static void __init get_node_active_region(unsigned long start_pfn, 135static void __init get_node_active_region(unsigned long pfn,
161 struct node_active_region *node_ar) 136 struct node_active_region *node_ar)
162{ 137{
163 int nid = early_pfn_to_nid(start_pfn); 138 unsigned long start_pfn, end_pfn;
139 int i, nid;
164 140
165 node_ar->nid = nid; 141 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
166 node_ar->start_pfn = start_pfn; 142 if (pfn >= start_pfn && pfn < end_pfn) {
167 node_ar->end_pfn = start_pfn; 143 node_ar->nid = nid;
168 work_with_active_regions(nid, get_active_region_work_fn, node_ar); 144 node_ar->start_pfn = start_pfn;
145 node_ar->end_pfn = end_pfn;
146 break;
147 }
148 }
169} 149}
170 150
171static void map_cpu_to_node(int cpu, int node) 151static void map_cpu_to_node(int cpu, int node)
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index e126117d1b03..da0d5c84586e 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -115,28 +115,13 @@ static void __init memblock_x86_subtract_reserved(struct range *range, int az)
115 memblock_reserve_reserved_regions(); 115 memblock_reserve_reserved_regions();
116} 116}
117 117
118struct count_data {
119 int nr;
120};
121
122static int __init count_work_fn(unsigned long start_pfn,
123 unsigned long end_pfn, void *datax)
124{
125 struct count_data *data = datax;
126
127 data->nr++;
128
129 return 0;
130}
131
132static int __init count_early_node_map(int nodeid) 118static int __init count_early_node_map(int nodeid)
133{ 119{
134 struct count_data data; 120 int i, cnt = 0;
135
136 data.nr = 0;
137 work_with_active_regions(nodeid, count_work_fn, &data);
138 121
139 return data.nr; 122 for_each_mem_pfn_range(i, nodeid, NULL, NULL, NULL)
123 cnt++;
124 return cnt;
140} 125}
141 126
142int __init __get_free_all_memory_range(struct range **rangep, int nodeid, 127int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f02c34d26d1b..8ec352077e1a 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2178,18 +2178,6 @@ static inline void iommu_prepare_isa(void)
2178 2178
2179static int md_domain_init(struct dmar_domain *domain, int guest_width); 2179static int md_domain_init(struct dmar_domain *domain, int guest_width);
2180 2180
2181static int __init si_domain_work_fn(unsigned long start_pfn,
2182 unsigned long end_pfn, void *datax)
2183{
2184 int *ret = datax;
2185
2186 *ret = iommu_domain_identity_map(si_domain,
2187 (uint64_t)start_pfn << PAGE_SHIFT,
2188 (uint64_t)end_pfn << PAGE_SHIFT);
2189 return *ret;
2190
2191}
2192
2193static int __init si_domain_init(int hw) 2181static int __init si_domain_init(int hw)
2194{ 2182{
2195 struct dmar_drhd_unit *drhd; 2183 struct dmar_drhd_unit *drhd;
@@ -2221,9 +2209,15 @@ static int __init si_domain_init(int hw)
2221 return 0; 2209 return 0;
2222 2210
2223 for_each_online_node(nid) { 2211 for_each_online_node(nid) {
2224 work_with_active_regions(nid, si_domain_work_fn, &ret); 2212 unsigned long start_pfn, end_pfn;
2225 if (ret) 2213 int i;
2226 return ret; 2214
2215 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2216 ret = iommu_domain_identity_map(si_domain,
2217 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2218 if (ret)
2219 return ret;
2220 }
2227 } 2221 }
2228 2222
2229 return 0; 2223 return 0;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c70a326b8f26..57e4c9ffdff8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1327,9 +1327,27 @@ int add_from_early_node_map(struct range *range, int az,
1327 int nr_range, int nid); 1327 int nr_range, int nid);
1328u64 __init find_memory_core_early(int nid, u64 size, u64 align, 1328u64 __init find_memory_core_early(int nid, u64 size, u64 align,
1329 u64 goal, u64 limit); 1329 u64 goal, u64 limit);
1330typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
1331extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
1332extern void sparse_memory_present_with_active_regions(int nid); 1330extern void sparse_memory_present_with_active_regions(int nid);
1331
1332extern void __next_mem_pfn_range(int *idx, int nid,
1333 unsigned long *out_start_pfn,
1334 unsigned long *out_end_pfn, int *out_nid);
1335
1336/**
1337 * for_each_mem_pfn_range - early memory pfn range iterator
1338 * @i: an integer used as loop variable
1339 * @nid: node selector, %MAX_NUMNODES for all nodes
1340 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
1341 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
1342 * @p_nid: ptr to int for nid of the range, can be %NULL
1343 *
1344 * Walks over configured memory ranges. Available after early_node_map is
1345 * populated.
1346 */
1347#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
1348 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
1349 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
1350
1333#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 1351#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
1334 1352
1335#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ 1353#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c7f0e5be4a31..69fffabf61b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3903,18 +3903,6 @@ int __init add_from_early_node_map(struct range *range, int az,
3903 return nr_range; 3903 return nr_range;
3904} 3904}
3905 3905
3906void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3907{
3908 int i;
3909 int ret;
3910
3911 for_each_active_range_index_in_nid(i, nid) {
3912 ret = work_fn(early_node_map[i].start_pfn,
3913 early_node_map[i].end_pfn, data);
3914 if (ret)
3915 break;
3916 }
3917}
3918/** 3906/**
3919 * sparse_memory_present_with_active_regions - Call memory_present for each active range 3907 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3920 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 3908 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -4421,6 +4409,34 @@ static inline void setup_nr_node_ids(void)
4421} 4409}
4422#endif 4410#endif
4423 4411
4412/*
4413 * Common iterator interface used to define for_each_mem_pfn_range().
4414 */
4415void __meminit __next_mem_pfn_range(int *idx, int nid,
4416 unsigned long *out_start_pfn,
4417 unsigned long *out_end_pfn, int *out_nid)
4418{
4419 struct node_active_region *r = NULL;
4420
4421 while (++*idx < nr_nodemap_entries) {
4422 if (nid == MAX_NUMNODES || nid == early_node_map[*idx].nid) {
4423 r = &early_node_map[*idx];
4424 break;
4425 }
4426 }
4427 if (!r) {
4428 *idx = -1;
4429 return;
4430 }
4431
4432 if (out_start_pfn)
4433 *out_start_pfn = r->start_pfn;
4434 if (out_end_pfn)
4435 *out_end_pfn = r->end_pfn;
4436 if (out_nid)
4437 *out_nid = r->nid;
4438}
4439
4424/** 4440/**
4425 * add_active_range - Register a range of PFNs backed by physical memory 4441 * add_active_range - Register a range of PFNs backed by physical memory
4426 * @nid: The node ID the range resides on 4442 * @nid: The node ID the range resides on