aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/mm/mem.c30
-rw-r--r--include/linux/lmb.h1
-rw-r--r--lib/lmb.c33
3 files changed, 57 insertions, 7 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d9e37f365b54..f67e118116fa 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -154,19 +154,35 @@ out:
154 154
155/* 155/*
156 * walk_memory_resource() needs to make sure there is no holes in a given 156 * walk_memory_resource() needs to make sure there is no holes in a given
157 * memory range. On PPC64, since this range comes from /sysfs, the range 157 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
158 * is guaranteed to be valid, non-overlapping and can not contain any 158 * Instead it maintains it in lmb.memory structures. Walk through the
159 * holes. By the time we get here (memory add or remove), /proc/device-tree 159 * memory regions, find holes and callback for contiguous regions.
160 * is updated and correct. Only reason we need to check against device-tree
161 * would be if we allow user-land to specify a memory range through a
162 * system call/ioctl etc. instead of doing offline/online through /sysfs.
163 */ 160 */
164int 161int
165walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, 162walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
166 int (*func)(unsigned long, unsigned long, void *)) 163 int (*func)(unsigned long, unsigned long, void *))
167{ 164{
168 return (*func)(start_pfn, nr_pages, arg); 165 struct lmb_property res;
166 unsigned long pfn, len;
167 u64 end;
168 int ret = -1;
169
170 res.base = (u64) start_pfn << PAGE_SHIFT;
171 res.size = (u64) nr_pages << PAGE_SHIFT;
172
173 end = res.base + res.size - 1;
174 while ((res.base < end) && (lmb_find(&res) >= 0)) {
175 pfn = (unsigned long)(res.base >> PAGE_SHIFT);
176 len = (unsigned long)(res.size >> PAGE_SHIFT);
177 ret = (*func)(pfn, len, arg);
178 if (ret)
179 break;
180 res.base += (res.size + 1);
181 res.size = (end - res.base + 1);
182 }
183 return ret;
169} 184}
185EXPORT_SYMBOL_GPL(walk_memory_resource);
170 186
171#endif /* CONFIG_MEMORY_HOTPLUG */ 187#endif /* CONFIG_MEMORY_HOTPLUG */
172 188
diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 55d4b261a9e8..c46c89505dac 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -54,6 +54,7 @@ extern u64 __init lmb_phys_mem_size(void);
54extern u64 __init lmb_end_of_DRAM(void); 54extern u64 __init lmb_end_of_DRAM(void);
55extern void __init lmb_enforce_memory_limit(u64 memory_limit); 55extern void __init lmb_enforce_memory_limit(u64 memory_limit);
56extern int __init lmb_is_reserved(u64 addr); 56extern int __init lmb_is_reserved(u64 addr);
57extern int lmb_find(struct lmb_property *res);
57 58
58extern void lmb_dump_all(void); 59extern void lmb_dump_all(void);
59 60
diff --git a/lib/lmb.c b/lib/lmb.c
index 5b2a739bc3d5..83287d3869a3 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -474,3 +474,36 @@ int __init lmb_is_reserved(u64 addr)
474 } 474 }
475 return 0; 475 return 0;
476} 476}
477
478/*
479 * Given a <base, len>, find which memory regions belong to this range.
480 * Adjust the request and return a contiguous chunk.
481 */
482int lmb_find(struct lmb_property *res)
483{
484 int i;
485 u64 rstart, rend;
486
487 rstart = res->base;
488 rend = rstart + res->size - 1;
489
490 for (i = 0; i < lmb.memory.cnt; i++) {
491 u64 start = lmb.memory.region[i].base;
492 u64 end = start + lmb.memory.region[i].size - 1;
493
494 if (start > rend)
495 return -1;
496
497 if ((end >= rstart) && (start < rend)) {
498 /* adjust the request */
499 if (rstart < start)
500 rstart = start;
501 if (rend > end)
502 rend = end;
503 res->base = rstart;
504 res->size = rend - rstart + 1;
505 return 0;
506 }
507 }
508 return -1;
509}