diff options
| author | Dave Hansen <haveblue@us.ibm.com> | 2005-10-29 21:16:55 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:44 -0400 |
| commit | 0b0acbec1bed75ec1e1daa7f7006323a2a2b2844 (patch) | |
| tree | e0d54fbaa6b8b0955ed881af8956b4085039b2d1 | |
| parent | 3947be1969a9ce455ec30f60ef51efb10e4323d1 (diff) | |
[PATCH] memory hotplug: move section_mem_map alloc to sparse.c
This basically keeps up from having to extern __kmalloc_section_memmap().
The vaddr_in_vmalloc_area() helper could go in a vmalloc header, but that
header gets hard to work with, because it needs some arch-specific macros.
Just stick it in here for now, instead of creating another header.
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Lion Vollnhals <webmaster@schiggl.de>
Signed-off-by: Jiri Slaby <xslaby@fi.muni.cz>
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
| -rw-r--r-- | drivers/acpi/acpi_memhotplug.c | 5 | ||||
| -rw-r--r-- | drivers/base/memory.c | 5 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 48 | ||||
| -rw-r--r-- | mm/sparse.c | 74 |
4 files changed, 75 insertions, 57 deletions
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 01a1bd239263..2143609d2936 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c | |||
| @@ -200,8 +200,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) | |||
| 200 | * Note: Assume that this function returns zero on success | 200 | * Note: Assume that this function returns zero on success |
| 201 | */ | 201 | */ |
| 202 | result = add_memory(mem_device->start_addr, | 202 | result = add_memory(mem_device->start_addr, |
| 203 | (mem_device->end_addr - mem_device->start_addr) + 1, | 203 | (mem_device->end_addr - mem_device->start_addr) + 1); |
| 204 | mem_device->read_write_attribute); | ||
| 205 | if (result) { | 204 | if (result) { |
| 206 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n")); | 205 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n")); |
| 207 | mem_device->state = MEMORY_INVALID_STATE; | 206 | mem_device->state = MEMORY_INVALID_STATE; |
| @@ -259,7 +258,7 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) | |||
| 259 | * Ask the VM to offline this memory range. | 258 | * Ask the VM to offline this memory range. |
| 260 | * Note: Assume that this function returns zero on success | 259 | * Note: Assume that this function returns zero on success |
| 261 | */ | 260 | */ |
| 262 | result = remove_memory(start, len, attr); | 261 | result = remove_memory(start, len); |
| 263 | if (result) { | 262 | if (result) { |
| 264 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Hot-Remove failed.\n")); | 263 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Hot-Remove failed.\n")); |
| 265 | return_VALUE(result); | 264 | return_VALUE(result); |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 785cb6e6b91c..b7ddd651d664 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -340,15 +340,12 @@ static int memory_probe_init(void) | |||
| 340 | static int add_memory_block(unsigned long node_id, struct mem_section *section, | 340 | static int add_memory_block(unsigned long node_id, struct mem_section *section, |
| 341 | unsigned long state, int phys_device) | 341 | unsigned long state, int phys_device) |
| 342 | { | 342 | { |
| 343 | size_t size = sizeof(struct memory_block); | 343 | struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); |
| 344 | struct memory_block *mem = kmalloc(size, GFP_KERNEL); | ||
| 345 | int ret = 0; | 344 | int ret = 0; |
| 346 | 345 | ||
| 347 | if (!mem) | 346 | if (!mem) |
| 348 | return -ENOMEM; | 347 | return -ENOMEM; |
| 349 | 348 | ||
| 350 | memset(mem, 0, size); | ||
| 351 | |||
| 352 | mem->phys_index = __section_nr(section); | 349 | mem->phys_index = __section_nr(section); |
| 353 | mem->state = state; | 350 | mem->state = state; |
| 354 | init_MUTEX(&mem->state_sem); | 351 | init_MUTEX(&mem->state_sem); |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 855e0fc928b3..2e916c308ae6 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -24,28 +24,6 @@ | |||
| 24 | 24 | ||
| 25 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
| 26 | 26 | ||
| 27 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | ||
| 28 | { | ||
| 29 | struct page *page, *ret; | ||
| 30 | unsigned long memmap_size = sizeof(struct page) * nr_pages; | ||
| 31 | |||
| 32 | page = alloc_pages(GFP_KERNEL, get_order(memmap_size)); | ||
| 33 | if (page) | ||
| 34 | goto got_map_page; | ||
| 35 | |||
| 36 | ret = vmalloc(memmap_size); | ||
| 37 | if (ret) | ||
| 38 | goto got_map_ptr; | ||
| 39 | |||
| 40 | return NULL; | ||
| 41 | got_map_page: | ||
| 42 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | ||
| 43 | got_map_ptr: | ||
| 44 | memset(ret, 0, memmap_size); | ||
| 45 | |||
| 46 | return ret; | ||
| 47 | } | ||
| 48 | |||
| 49 | extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, | 27 | extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, |
| 50 | unsigned long size); | 28 | unsigned long size); |
| 51 | static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) | 29 | static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) |
| @@ -60,35 +38,15 @@ static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) | |||
| 60 | zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); | 38 | zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); |
| 61 | } | 39 | } |
| 62 | 40 | ||
| 63 | extern int sparse_add_one_section(struct zone *, unsigned long, | 41 | extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, |
| 64 | struct page *mem_map); | 42 | int nr_pages); |
| 65 | static int __add_section(struct zone *zone, unsigned long phys_start_pfn) | 43 | static int __add_section(struct zone *zone, unsigned long phys_start_pfn) |
| 66 | { | 44 | { |
| 67 | struct pglist_data *pgdat = zone->zone_pgdat; | 45 | struct pglist_data *pgdat = zone->zone_pgdat; |
| 68 | int nr_pages = PAGES_PER_SECTION; | 46 | int nr_pages = PAGES_PER_SECTION; |
| 69 | struct page *memmap; | ||
| 70 | int ret; | 47 | int ret; |
| 71 | 48 | ||
| 72 | /* | 49 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); |
| 73 | * This can potentially allocate memory, and does its own | ||
| 74 | * internal locking. | ||
| 75 | */ | ||
| 76 | sparse_index_init(pfn_to_section_nr(phys_start_pfn), pgdat->node_id); | ||
| 77 | |||
| 78 | pgdat_resize_lock(pgdat, &flags); | ||
| 79 | memmap = __kmalloc_section_memmap(nr_pages); | ||
| 80 | ret = sparse_add_one_section(zone, phys_start_pfn, memmap); | ||
| 81 | pgdat_resize_unlock(pgdat, &flags); | ||
| 82 | |||
| 83 | if (ret <= 0) { | ||
| 84 | /* the mem_map didn't get used */ | ||
| 85 | if (memmap >= (struct page *)VMALLOC_START && | ||
| 86 | memmap < (struct page *)VMALLOC_END) | ||
| 87 | vfree(memmap); | ||
| 88 | else | ||
| 89 | free_pages((unsigned long)memmap, | ||
| 90 | get_order(sizeof(struct page) * nr_pages)); | ||
| 91 | } | ||
| 92 | 50 | ||
| 93 | if (ret < 0) | 51 | if (ret < 0) |
| 94 | return ret; | 52 | return ret; |
diff --git a/mm/sparse.c b/mm/sparse.c index 0d3bd4bf3aaa..72079b538e2d 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
| @@ -5,8 +5,10 @@ | |||
| 5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
| 6 | #include <linux/mmzone.h> | 6 | #include <linux/mmzone.h> |
| 7 | #include <linux/bootmem.h> | 7 | #include <linux/bootmem.h> |
| 8 | #include <linux/highmem.h> | ||
| 8 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| 9 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
| 11 | #include <linux/vmalloc.h> | ||
| 10 | #include <asm/dma.h> | 12 | #include <asm/dma.h> |
| 11 | 13 | ||
| 12 | /* | 14 | /* |
| @@ -187,6 +189,45 @@ static struct page *sparse_early_mem_map_alloc(unsigned long pnum) | |||
| 187 | return NULL; | 189 | return NULL; |
| 188 | } | 190 | } |
| 189 | 191 | ||
| 192 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | ||
| 193 | { | ||
| 194 | struct page *page, *ret; | ||
| 195 | unsigned long memmap_size = sizeof(struct page) * nr_pages; | ||
| 196 | |||
| 197 | page = alloc_pages(GFP_KERNEL, get_order(memmap_size)); | ||
| 198 | if (page) | ||
| 199 | goto got_map_page; | ||
| 200 | |||
| 201 | ret = vmalloc(memmap_size); | ||
| 202 | if (ret) | ||
| 203 | goto got_map_ptr; | ||
| 204 | |||
| 205 | return NULL; | ||
| 206 | got_map_page: | ||
| 207 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | ||
| 208 | got_map_ptr: | ||
| 209 | memset(ret, 0, memmap_size); | ||
| 210 | |||
| 211 | return ret; | ||
| 212 | } | ||
| 213 | |||
| 214 | static int vaddr_in_vmalloc_area(void *addr) | ||
| 215 | { | ||
| 216 | if (addr >= (void *)VMALLOC_START && | ||
| 217 | addr < (void *)VMALLOC_END) | ||
| 218 | return 1; | ||
| 219 | return 0; | ||
| 220 | } | ||
| 221 | |||
| 222 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | ||
| 223 | { | ||
| 224 | if (vaddr_in_vmalloc_area(memmap)) | ||
| 225 | vfree(memmap); | ||
| 226 | else | ||
| 227 | free_pages((unsigned long)memmap, | ||
| 228 | get_order(sizeof(struct page) * nr_pages)); | ||
| 229 | } | ||
| 230 | |||
| 190 | /* | 231 | /* |
| 191 | * Allocate the accumulated non-linear sections, allocate a mem_map | 232 | * Allocate the accumulated non-linear sections, allocate a mem_map |
| 192 | * for each and record the physical to section mapping. | 233 | * for each and record the physical to section mapping. |
| @@ -212,14 +253,37 @@ void sparse_init(void) | |||
| 212 | * set. If this is <=0, then that means that the passed-in | 253 | * set. If this is <=0, then that means that the passed-in |
| 213 | * map was not consumed and must be freed. | 254 | * map was not consumed and must be freed. |
| 214 | */ | 255 | */ |
| 215 | int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map) | 256 | int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, |
| 257 | int nr_pages) | ||
| 216 | { | 258 | { |
| 217 | struct mem_section *ms = __pfn_to_section(start_pfn); | 259 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
| 260 | struct pglist_data *pgdat = zone->zone_pgdat; | ||
| 261 | struct mem_section *ms; | ||
| 262 | struct page *memmap; | ||
| 263 | unsigned long flags; | ||
| 264 | int ret; | ||
| 218 | 265 | ||
| 219 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) | 266 | /* |
| 220 | return -EEXIST; | 267 | * no locking for this, because it does its own |
| 268 | * plus, it does a kmalloc | ||
| 269 | */ | ||
| 270 | sparse_index_init(section_nr, pgdat->node_id); | ||
| 271 | memmap = __kmalloc_section_memmap(nr_pages); | ||
| 272 | |||
| 273 | pgdat_resize_lock(pgdat, &flags); | ||
| 221 | 274 | ||
| 275 | ms = __pfn_to_section(start_pfn); | ||
| 276 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | ||
| 277 | ret = -EEXIST; | ||
| 278 | goto out; | ||
| 279 | } | ||
| 222 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | 280 | ms->section_mem_map |= SECTION_MARKED_PRESENT; |
| 223 | 281 | ||
| 224 | return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map); | 282 | ret = sparse_init_one_section(ms, section_nr, memmap); |
| 283 | |||
| 284 | if (ret <= 0) | ||
| 285 | __kfree_section_memmap(memmap, nr_pages); | ||
| 286 | out: | ||
| 287 | pgdat_resize_unlock(pgdat, &flags); | ||
| 288 | return ret; | ||
| 225 | } | 289 | } |
