diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory_hotplug.c | 48 | ||||
-rw-r--r-- | mm/sparse.c | 74 |
2 files changed, 72 insertions, 50 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 855e0fc928b3..2e916c308ae6 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -24,28 +24,6 @@ | |||
24 | 24 | ||
25 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
26 | 26 | ||
27 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | ||
28 | { | ||
29 | struct page *page, *ret; | ||
30 | unsigned long memmap_size = sizeof(struct page) * nr_pages; | ||
31 | |||
32 | page = alloc_pages(GFP_KERNEL, get_order(memmap_size)); | ||
33 | if (page) | ||
34 | goto got_map_page; | ||
35 | |||
36 | ret = vmalloc(memmap_size); | ||
37 | if (ret) | ||
38 | goto got_map_ptr; | ||
39 | |||
40 | return NULL; | ||
41 | got_map_page: | ||
42 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | ||
43 | got_map_ptr: | ||
44 | memset(ret, 0, memmap_size); | ||
45 | |||
46 | return ret; | ||
47 | } | ||
48 | |||
49 | extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, | 27 | extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, |
50 | unsigned long size); | 28 | unsigned long size); |
51 | static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) | 29 | static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) |
@@ -60,35 +38,15 @@ static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) | |||
60 | zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); | 38 | zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); |
61 | } | 39 | } |
62 | 40 | ||
63 | extern int sparse_add_one_section(struct zone *, unsigned long, | 41 | extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, |
64 | struct page *mem_map); | 42 | int nr_pages); |
65 | static int __add_section(struct zone *zone, unsigned long phys_start_pfn) | 43 | static int __add_section(struct zone *zone, unsigned long phys_start_pfn) |
66 | { | 44 | { |
67 | struct pglist_data *pgdat = zone->zone_pgdat; | 45 | struct pglist_data *pgdat = zone->zone_pgdat; |
68 | int nr_pages = PAGES_PER_SECTION; | 46 | int nr_pages = PAGES_PER_SECTION; |
69 | struct page *memmap; | ||
70 | int ret; | 47 | int ret; |
71 | 48 | ||
72 | /* | 49 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); |
73 | * This can potentially allocate memory, and does its own | ||
74 | * internal locking. | ||
75 | */ | ||
76 | sparse_index_init(pfn_to_section_nr(phys_start_pfn), pgdat->node_id); | ||
77 | |||
78 | pgdat_resize_lock(pgdat, &flags); | ||
79 | memmap = __kmalloc_section_memmap(nr_pages); | ||
80 | ret = sparse_add_one_section(zone, phys_start_pfn, memmap); | ||
81 | pgdat_resize_unlock(pgdat, &flags); | ||
82 | |||
83 | if (ret <= 0) { | ||
84 | /* the mem_map didn't get used */ | ||
85 | if (memmap >= (struct page *)VMALLOC_START && | ||
86 | memmap < (struct page *)VMALLOC_END) | ||
87 | vfree(memmap); | ||
88 | else | ||
89 | free_pages((unsigned long)memmap, | ||
90 | get_order(sizeof(struct page) * nr_pages)); | ||
91 | } | ||
92 | 50 | ||
93 | if (ret < 0) | 51 | if (ret < 0) |
94 | return ret; | 52 | return ret; |
diff --git a/mm/sparse.c b/mm/sparse.c index 0d3bd4bf3aaa..72079b538e2d 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -5,8 +5,10 @@ | |||
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/mmzone.h> | 6 | #include <linux/mmzone.h> |
7 | #include <linux/bootmem.h> | 7 | #include <linux/bootmem.h> |
8 | #include <linux/highmem.h> | ||
8 | #include <linux/module.h> | 9 | #include <linux/module.h> |
9 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
11 | #include <linux/vmalloc.h> | ||
10 | #include <asm/dma.h> | 12 | #include <asm/dma.h> |
11 | 13 | ||
12 | /* | 14 | /* |
@@ -187,6 +189,45 @@ static struct page *sparse_early_mem_map_alloc(unsigned long pnum) | |||
187 | return NULL; | 189 | return NULL; |
188 | } | 190 | } |
189 | 191 | ||
192 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | ||
193 | { | ||
194 | struct page *page, *ret; | ||
195 | unsigned long memmap_size = sizeof(struct page) * nr_pages; | ||
196 | |||
197 | page = alloc_pages(GFP_KERNEL, get_order(memmap_size)); | ||
198 | if (page) | ||
199 | goto got_map_page; | ||
200 | |||
201 | ret = vmalloc(memmap_size); | ||
202 | if (ret) | ||
203 | goto got_map_ptr; | ||
204 | |||
205 | return NULL; | ||
206 | got_map_page: | ||
207 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | ||
208 | got_map_ptr: | ||
209 | memset(ret, 0, memmap_size); | ||
210 | |||
211 | return ret; | ||
212 | } | ||
213 | |||
214 | static int vaddr_in_vmalloc_area(void *addr) | ||
215 | { | ||
216 | if (addr >= (void *)VMALLOC_START && | ||
217 | addr < (void *)VMALLOC_END) | ||
218 | return 1; | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | ||
223 | { | ||
224 | if (vaddr_in_vmalloc_area(memmap)) | ||
225 | vfree(memmap); | ||
226 | else | ||
227 | free_pages((unsigned long)memmap, | ||
228 | get_order(sizeof(struct page) * nr_pages)); | ||
229 | } | ||
230 | |||
190 | /* | 231 | /* |
191 | * Allocate the accumulated non-linear sections, allocate a mem_map | 232 | * Allocate the accumulated non-linear sections, allocate a mem_map |
192 | * for each and record the physical to section mapping. | 233 | * for each and record the physical to section mapping. |
@@ -212,14 +253,37 @@ void sparse_init(void) | |||
212 | * set. If this is <=0, then that means that the passed-in | 253 | * set. If this is <=0, then that means that the passed-in |
213 | * map was not consumed and must be freed. | 254 | * map was not consumed and must be freed. |
214 | */ | 255 | */ |
215 | int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map) | 256 | int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, |
257 | int nr_pages) | ||
216 | { | 258 | { |
217 | struct mem_section *ms = __pfn_to_section(start_pfn); | 259 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
260 | struct pglist_data *pgdat = zone->zone_pgdat; | ||
261 | struct mem_section *ms; | ||
262 | struct page *memmap; | ||
263 | unsigned long flags; | ||
264 | int ret; | ||
218 | 265 | ||
219 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) | 266 | /* |
220 | return -EEXIST; | 267 | * no locking for this, because it does its own |
268 | * plus, it does a kmalloc | ||
269 | */ | ||
270 | sparse_index_init(section_nr, pgdat->node_id); | ||
271 | memmap = __kmalloc_section_memmap(nr_pages); | ||
272 | |||
273 | pgdat_resize_lock(pgdat, &flags); | ||
221 | 274 | ||
275 | ms = __pfn_to_section(start_pfn); | ||
276 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | ||
277 | ret = -EEXIST; | ||
278 | goto out; | ||
279 | } | ||
222 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | 280 | ms->section_mem_map |= SECTION_MARKED_PRESENT; |
223 | 281 | ||
224 | return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map); | 282 | ret = sparse_init_one_section(ms, section_nr, memmap); |
283 | |||
284 | if (ret <= 0) | ||
285 | __kfree_section_memmap(memmap, nr_pages); | ||
286 | out: | ||
287 | pgdat_resize_unlock(pgdat, &flags); | ||
288 | return ret; | ||
225 | } | 289 | } |