aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-07-11 17:02:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-11 19:04:50 -0400
commit29f6738609e40227dabcc63bfb3b84b3726a75bd (patch)
tree2a505191fa668771cacfd3cc7f6e2446a0cc48dd /mm
parent99ab7b19440a72ebdf225f99b20f8ef40decee86 (diff)
memblock: free allocated memblock_reserved_regions later
memblock_free_reserved_regions() calls memblock_free(), but memblock_free() would double reserved.regions too, so we could free the old range for reserved.regions. Also tj said there is another bug which could be related to this. | I don't think we're saving any noticeable | amount by doing this "free - give it to page allocator - reserve | again" dancing. We should just allocate regions aligned to page | boundaries and free them later when memblock is no longer in use. in that case, when DEBUG_PAGEALLOC, will get panic: memblock_free: [0x0000102febc080-0x0000102febf080] memblock_free_reserved_regions+0x37/0x39 BUG: unable to handle kernel paging request at ffff88102febd948 IP: [<ffffffff836a5774>] __next_free_mem_range+0x9b/0x155 PGD 4826063 PUD cf67a067 PMD cf7fa067 PTE 800000102febd160 Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC CPU 0 Pid: 0, comm: swapper Not tainted 3.5.0-rc2-next-20120614-sasha #447 RIP: 0010:[<ffffffff836a5774>] [<ffffffff836a5774>] __next_free_mem_range+0x9b/0x155 See the discussion at https://lkml.org/lkml/2012/6/13/469 So try to allocate with PAGE_SIZE alignment and free it later. Reported-by: Sasha Levin <levinsasha928@gmail.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memblock.c51
-rw-r--r--mm/nobootmem.c38
2 files changed, 46 insertions, 43 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index d4382095f8bd..5cc6731b00cc 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -143,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
143 MAX_NUMNODES); 143 MAX_NUMNODES);
144} 144}
145 145
146/*
147 * Free memblock.reserved.regions
148 */
149int __init_memblock memblock_free_reserved_regions(void)
150{
151 if (memblock.reserved.regions == memblock_reserved_init_regions)
152 return 0;
153
154 return memblock_free(__pa(memblock.reserved.regions),
155 sizeof(struct memblock_region) * memblock.reserved.max);
156}
157
158/*
159 * Reserve memblock.reserved.regions
160 */
161int __init_memblock memblock_reserve_reserved_regions(void)
162{
163 if (memblock.reserved.regions == memblock_reserved_init_regions)
164 return 0;
165
166 return memblock_reserve(__pa(memblock.reserved.regions),
167 sizeof(struct memblock_region) * memblock.reserved.max);
168}
169
170static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 146static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
171{ 147{
172 type->total_size -= type->regions[r].size; 148 type->total_size -= type->regions[r].size;
@@ -184,6 +160,18 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
184 } 160 }
185} 161}
186 162
163phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
164 phys_addr_t *addr)
165{
166 if (memblock.reserved.regions == memblock_reserved_init_regions)
167 return 0;
168
169 *addr = __pa(memblock.reserved.regions);
170
171 return PAGE_ALIGN(sizeof(struct memblock_region) *
172 memblock.reserved.max);
173}
174
187/** 175/**
188 * memblock_double_array - double the size of the memblock regions array 176 * memblock_double_array - double the size of the memblock regions array
189 * @type: memblock type of the regions array being doubled 177 * @type: memblock type of the regions array being doubled
@@ -204,6 +192,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
204 phys_addr_t new_area_size) 192 phys_addr_t new_area_size)
205{ 193{
206 struct memblock_region *new_array, *old_array; 194 struct memblock_region *new_array, *old_array;
195 phys_addr_t old_alloc_size, new_alloc_size;
207 phys_addr_t old_size, new_size, addr; 196 phys_addr_t old_size, new_size, addr;
208 int use_slab = slab_is_available(); 197 int use_slab = slab_is_available();
209 int *in_slab; 198 int *in_slab;
@@ -217,6 +206,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
217 /* Calculate new doubled size */ 206 /* Calculate new doubled size */
218 old_size = type->max * sizeof(struct memblock_region); 207 old_size = type->max * sizeof(struct memblock_region);
219 new_size = old_size << 1; 208 new_size = old_size << 1;
209 /*
210 * We need to allocated new one align to PAGE_SIZE,
211 * so we can free them completely later.
212 */
213 old_alloc_size = PAGE_ALIGN(old_size);
214 new_alloc_size = PAGE_ALIGN(new_size);
220 215
221 /* Retrieve the slab flag */ 216 /* Retrieve the slab flag */
222 if (type == &memblock.memory) 217 if (type == &memblock.memory)
@@ -245,11 +240,11 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
245 240
246 addr = memblock_find_in_range(new_area_start + new_area_size, 241 addr = memblock_find_in_range(new_area_start + new_area_size,
247 memblock.current_limit, 242 memblock.current_limit,
248 new_size, sizeof(phys_addr_t)); 243 new_alloc_size, PAGE_SIZE);
249 if (!addr && new_area_size) 244 if (!addr && new_area_size)
250 addr = memblock_find_in_range(0, 245 addr = memblock_find_in_range(0,
251 min(new_area_start, memblock.current_limit), 246 min(new_area_start, memblock.current_limit),
252 new_size, sizeof(phys_addr_t)); 247 new_alloc_size, PAGE_SIZE);
253 248
254 new_array = addr ? __va(addr) : 0; 249 new_array = addr ? __va(addr) : 0;
255 } 250 }
@@ -279,13 +274,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
279 kfree(old_array); 274 kfree(old_array);
280 else if (old_array != memblock_memory_init_regions && 275 else if (old_array != memblock_memory_init_regions &&
281 old_array != memblock_reserved_init_regions) 276 old_array != memblock_reserved_init_regions)
282 memblock_free(__pa(old_array), old_size); 277 memblock_free(__pa(old_array), old_alloc_size);
283 278
284 /* Reserve the new array if that comes from the memblock. 279 /* Reserve the new array if that comes from the memblock.
285 * Otherwise, we needn't do it 280 * Otherwise, we needn't do it
286 */ 281 */
287 if (!use_slab) 282 if (!use_slab)
288 BUG_ON(memblock_reserve(addr, new_size)); 283 BUG_ON(memblock_reserve(addr, new_alloc_size));
289 284
290 /* Update slab flag */ 285 /* Update slab flag */
291 *in_slab = use_slab; 286 *in_slab = use_slab;
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 0900b3910cda..405573010f99 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -105,27 +105,35 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
105 __free_pages_bootmem(pfn_to_page(i), 0); 105 __free_pages_bootmem(pfn_to_page(i), 0);
106} 106}
107 107
108static unsigned long __init __free_memory_core(phys_addr_t start,
109 phys_addr_t end)
110{
111 unsigned long start_pfn = PFN_UP(start);
112 unsigned long end_pfn = min_t(unsigned long,
113 PFN_DOWN(end), max_low_pfn);
114
115 if (start_pfn > end_pfn)
116 return 0;
117
118 __free_pages_memory(start_pfn, end_pfn);
119
120 return end_pfn - start_pfn;
121}
122
108unsigned long __init free_low_memory_core_early(int nodeid) 123unsigned long __init free_low_memory_core_early(int nodeid)
109{ 124{
110 unsigned long count = 0; 125 unsigned long count = 0;
111 phys_addr_t start, end; 126 phys_addr_t start, end, size;
112 u64 i; 127 u64 i;
113 128
114 /* free reserved array temporarily so that it's treated as free area */ 129 for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
115 memblock_free_reserved_regions(); 130 count += __free_memory_core(start, end);
116 131
117 for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) { 132 /* free range that is used for reserved array if we allocate it */
118 unsigned long start_pfn = PFN_UP(start); 133 size = get_allocated_memblock_reserved_regions_info(&start);
119 unsigned long end_pfn = min_t(unsigned long, 134 if (size)
120 PFN_DOWN(end), max_low_pfn); 135 count += __free_memory_core(start, start + size);
121 if (start_pfn < end_pfn) {
122 __free_pages_memory(start_pfn, end_pfn);
123 count += end_pfn - start_pfn;
124 }
125 }
126 136
127 /* put region array back? */
128 memblock_reserve_reserved_regions();
129 return count; 137 return count;
130} 138}
131 139