diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-03 14:37:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-03 14:37:15 -0400 |
commit | 542a086ac72fb193cbc1b996963a572269e57743 (patch) | |
tree | b137c08037cca4ffc8a156a891a01113b3b8edce /drivers/base/memory.c | |
parent | 1d1fdd95df681f0c065d90ffaafa215a0e8825e2 (diff) | |
parent | 1eeeef153c02f5856ec109fa532eb5f31c39f85c (diff) |
Merge tag 'driver-core-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core
Pull driver core patches from Greg KH:
"Here's the big driver core pull request for 3.12-rc1.
Lots of tiny changes here fixing up the way sysfs attributes are
created, to try to make drivers simpler, and fix a whole class race
conditions with creations of device attributes after the device was
announced to userspace.
All the various pieces are acked by the different subsystem
maintainers"
* tag 'driver-core-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (119 commits)
firmware loader: fix pending_fw_head list corruption
drivers/base/memory.c: introduce help macro to_memory_block
dynamic debug: line queries failing due to uninitialized local variable
sysfs: sysfs_create_groups returns a value.
debugfs: provide debugfs_create_x64() when disabled
rbd: convert bus code to use bus_groups
firmware: dcdbas: use binary attribute groups
sysfs: add sysfs_create/remove_groups for when SYSFS is not enabled
driver core: add #include <linux/sysfs.h> to core files.
HID: convert bus code to use dev_groups
Input: serio: convert bus code to use drv_groups
Input: gameport: convert bus code to use drv_groups
driver core: firmware: use __ATTR_RW()
driver core: core: use DEVICE_ATTR_RO
driver core: bus: use DRIVER_ATTR_WO()
driver core: create write-only attribute macros for devices and drivers
sysfs: create __ATTR_WO()
driver-core: platform: convert bus code to use dev_groups
workqueue: convert bus code to use dev_groups
MEI: convert bus code to use dev_groups
...
Diffstat (limited to 'drivers/base/memory.c')
-rw-r--r-- | drivers/base/memory.c | 258 |
1 files changed, 115 insertions, 143 deletions
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index ec386ee9cb22..1c617623c8ae 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/capability.h> | 16 | #include <linux/capability.h> |
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/memory.h> | 18 | #include <linux/memory.h> |
19 | #include <linux/kobject.h> | ||
20 | #include <linux/memory_hotplug.h> | 19 | #include <linux/memory_hotplug.h> |
21 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
22 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
@@ -30,6 +29,8 @@ static DEFINE_MUTEX(mem_sysfs_mutex); | |||
30 | 29 | ||
31 | #define MEMORY_CLASS_NAME "memory" | 30 | #define MEMORY_CLASS_NAME "memory" |
32 | 31 | ||
32 | #define to_memory_block(dev) container_of(dev, struct memory_block, dev) | ||
33 | |||
33 | static int sections_per_block; | 34 | static int sections_per_block; |
34 | 35 | ||
35 | static inline int base_memory_block_id(int section_nr) | 36 | static inline int base_memory_block_id(int section_nr) |
@@ -77,7 +78,7 @@ EXPORT_SYMBOL(unregister_memory_isolate_notifier); | |||
77 | 78 | ||
78 | static void memory_block_release(struct device *dev) | 79 | static void memory_block_release(struct device *dev) |
79 | { | 80 | { |
80 | struct memory_block *mem = container_of(dev, struct memory_block, dev); | 81 | struct memory_block *mem = to_memory_block(dev); |
81 | 82 | ||
82 | kfree(mem); | 83 | kfree(mem); |
83 | } | 84 | } |
@@ -110,8 +111,7 @@ static unsigned long get_memory_block_size(void) | |||
110 | static ssize_t show_mem_start_phys_index(struct device *dev, | 111 | static ssize_t show_mem_start_phys_index(struct device *dev, |
111 | struct device_attribute *attr, char *buf) | 112 | struct device_attribute *attr, char *buf) |
112 | { | 113 | { |
113 | struct memory_block *mem = | 114 | struct memory_block *mem = to_memory_block(dev); |
114 | container_of(dev, struct memory_block, dev); | ||
115 | unsigned long phys_index; | 115 | unsigned long phys_index; |
116 | 116 | ||
117 | phys_index = mem->start_section_nr / sections_per_block; | 117 | phys_index = mem->start_section_nr / sections_per_block; |
@@ -121,8 +121,7 @@ static ssize_t show_mem_start_phys_index(struct device *dev, | |||
121 | static ssize_t show_mem_end_phys_index(struct device *dev, | 121 | static ssize_t show_mem_end_phys_index(struct device *dev, |
122 | struct device_attribute *attr, char *buf) | 122 | struct device_attribute *attr, char *buf) |
123 | { | 123 | { |
124 | struct memory_block *mem = | 124 | struct memory_block *mem = to_memory_block(dev); |
125 | container_of(dev, struct memory_block, dev); | ||
126 | unsigned long phys_index; | 125 | unsigned long phys_index; |
127 | 126 | ||
128 | phys_index = mem->end_section_nr / sections_per_block; | 127 | phys_index = mem->end_section_nr / sections_per_block; |
@@ -137,8 +136,7 @@ static ssize_t show_mem_removable(struct device *dev, | |||
137 | { | 136 | { |
138 | unsigned long i, pfn; | 137 | unsigned long i, pfn; |
139 | int ret = 1; | 138 | int ret = 1; |
140 | struct memory_block *mem = | 139 | struct memory_block *mem = to_memory_block(dev); |
141 | container_of(dev, struct memory_block, dev); | ||
142 | 140 | ||
143 | for (i = 0; i < sections_per_block; i++) { | 141 | for (i = 0; i < sections_per_block; i++) { |
144 | if (!present_section_nr(mem->start_section_nr + i)) | 142 | if (!present_section_nr(mem->start_section_nr + i)) |
@@ -156,8 +154,7 @@ static ssize_t show_mem_removable(struct device *dev, | |||
156 | static ssize_t show_mem_state(struct device *dev, | 154 | static ssize_t show_mem_state(struct device *dev, |
157 | struct device_attribute *attr, char *buf) | 155 | struct device_attribute *attr, char *buf) |
158 | { | 156 | { |
159 | struct memory_block *mem = | 157 | struct memory_block *mem = to_memory_block(dev); |
160 | container_of(dev, struct memory_block, dev); | ||
161 | ssize_t len = 0; | 158 | ssize_t len = 0; |
162 | 159 | ||
163 | /* | 160 | /* |
@@ -263,9 +260,8 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t | |||
263 | return ret; | 260 | return ret; |
264 | } | 261 | } |
265 | 262 | ||
266 | static int __memory_block_change_state(struct memory_block *mem, | 263 | static int memory_block_change_state(struct memory_block *mem, |
267 | unsigned long to_state, unsigned long from_state_req, | 264 | unsigned long to_state, unsigned long from_state_req) |
268 | int online_type) | ||
269 | { | 265 | { |
270 | int ret = 0; | 266 | int ret = 0; |
271 | 267 | ||
@@ -275,105 +271,89 @@ static int __memory_block_change_state(struct memory_block *mem, | |||
275 | if (to_state == MEM_OFFLINE) | 271 | if (to_state == MEM_OFFLINE) |
276 | mem->state = MEM_GOING_OFFLINE; | 272 | mem->state = MEM_GOING_OFFLINE; |
277 | 273 | ||
278 | ret = memory_block_action(mem->start_section_nr, to_state, online_type); | 274 | ret = memory_block_action(mem->start_section_nr, to_state, |
275 | mem->online_type); | ||
276 | |||
279 | mem->state = ret ? from_state_req : to_state; | 277 | mem->state = ret ? from_state_req : to_state; |
278 | |||
280 | return ret; | 279 | return ret; |
281 | } | 280 | } |
282 | 281 | ||
282 | /* The device lock serializes operations on memory_subsys_[online|offline] */ | ||
283 | static int memory_subsys_online(struct device *dev) | 283 | static int memory_subsys_online(struct device *dev) |
284 | { | 284 | { |
285 | struct memory_block *mem = container_of(dev, struct memory_block, dev); | 285 | struct memory_block *mem = to_memory_block(dev); |
286 | int ret; | 286 | int ret; |
287 | 287 | ||
288 | mutex_lock(&mem->state_mutex); | 288 | if (mem->state == MEM_ONLINE) |
289 | 289 | return 0; | |
290 | ret = mem->state == MEM_ONLINE ? 0 : | ||
291 | __memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE, | ||
292 | ONLINE_KEEP); | ||
293 | 290 | ||
294 | mutex_unlock(&mem->state_mutex); | 291 | /* |
295 | return ret; | 292 | * If we are called from store_mem_state(), online_type will be |
296 | } | 293 | * set >= 0 Otherwise we were called from the device online |
297 | 294 | * attribute and need to set the online_type. | |
298 | static int memory_subsys_offline(struct device *dev) | 295 | */ |
299 | { | 296 | if (mem->online_type < 0) |
300 | struct memory_block *mem = container_of(dev, struct memory_block, dev); | 297 | mem->online_type = ONLINE_KEEP; |
301 | int ret; | ||
302 | 298 | ||
303 | mutex_lock(&mem->state_mutex); | 299 | ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); |
304 | 300 | ||
305 | ret = mem->state == MEM_OFFLINE ? 0 : | 301 | /* clear online_type */ |
306 | __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1); | 302 | mem->online_type = -1; |
307 | 303 | ||
308 | mutex_unlock(&mem->state_mutex); | ||
309 | return ret; | 304 | return ret; |
310 | } | 305 | } |
311 | 306 | ||
312 | static int __memory_block_change_state_uevent(struct memory_block *mem, | 307 | static int memory_subsys_offline(struct device *dev) |
313 | unsigned long to_state, unsigned long from_state_req, | ||
314 | int online_type) | ||
315 | { | ||
316 | int ret = __memory_block_change_state(mem, to_state, from_state_req, | ||
317 | online_type); | ||
318 | if (!ret) { | ||
319 | switch (mem->state) { | ||
320 | case MEM_OFFLINE: | ||
321 | kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE); | ||
322 | break; | ||
323 | case MEM_ONLINE: | ||
324 | kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE); | ||
325 | break; | ||
326 | default: | ||
327 | break; | ||
328 | } | ||
329 | } | ||
330 | return ret; | ||
331 | } | ||
332 | |||
333 | static int memory_block_change_state(struct memory_block *mem, | ||
334 | unsigned long to_state, unsigned long from_state_req, | ||
335 | int online_type) | ||
336 | { | 308 | { |
337 | int ret; | 309 | struct memory_block *mem = to_memory_block(dev); |
338 | 310 | ||
339 | mutex_lock(&mem->state_mutex); | 311 | if (mem->state == MEM_OFFLINE) |
340 | ret = __memory_block_change_state_uevent(mem, to_state, from_state_req, | 312 | return 0; |
341 | online_type); | ||
342 | mutex_unlock(&mem->state_mutex); | ||
343 | 313 | ||
344 | return ret; | 314 | return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); |
345 | } | 315 | } |
316 | |||
346 | static ssize_t | 317 | static ssize_t |
347 | store_mem_state(struct device *dev, | 318 | store_mem_state(struct device *dev, |
348 | struct device_attribute *attr, const char *buf, size_t count) | 319 | struct device_attribute *attr, const char *buf, size_t count) |
349 | { | 320 | { |
350 | struct memory_block *mem; | 321 | struct memory_block *mem = to_memory_block(dev); |
351 | bool offline; | 322 | int ret, online_type; |
352 | int ret = -EINVAL; | ||
353 | |||
354 | mem = container_of(dev, struct memory_block, dev); | ||
355 | 323 | ||
356 | lock_device_hotplug(); | 324 | lock_device_hotplug(); |
357 | 325 | ||
358 | if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) { | 326 | if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) |
359 | offline = false; | 327 | online_type = ONLINE_KERNEL; |
360 | ret = memory_block_change_state(mem, MEM_ONLINE, | 328 | else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) |
361 | MEM_OFFLINE, ONLINE_KERNEL); | 329 | online_type = ONLINE_MOVABLE; |
362 | } else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) { | 330 | else if (!strncmp(buf, "online", min_t(int, count, 6))) |
363 | offline = false; | 331 | online_type = ONLINE_KEEP; |
364 | ret = memory_block_change_state(mem, MEM_ONLINE, | 332 | else if (!strncmp(buf, "offline", min_t(int, count, 7))) |
365 | MEM_OFFLINE, ONLINE_MOVABLE); | 333 | online_type = -1; |
366 | } else if (!strncmp(buf, "online", min_t(int, count, 6))) { | 334 | else |
367 | offline = false; | 335 | return -EINVAL; |
368 | ret = memory_block_change_state(mem, MEM_ONLINE, | 336 | |
369 | MEM_OFFLINE, ONLINE_KEEP); | 337 | switch (online_type) { |
370 | } else if(!strncmp(buf, "offline", min_t(int, count, 7))) { | 338 | case ONLINE_KERNEL: |
371 | offline = true; | 339 | case ONLINE_MOVABLE: |
372 | ret = memory_block_change_state(mem, MEM_OFFLINE, | 340 | case ONLINE_KEEP: |
373 | MEM_ONLINE, -1); | 341 | /* |
342 | * mem->online_type is not protected so there can be a | ||
343 | * race here. However, when racing online, the first | ||
344 | * will succeed and the second will just return as the | ||
345 | * block will already be online. The online type | ||
346 | * could be either one, but that is expected. | ||
347 | */ | ||
348 | mem->online_type = online_type; | ||
349 | ret = device_online(&mem->dev); | ||
350 | break; | ||
351 | case -1: | ||
352 | ret = device_offline(&mem->dev); | ||
353 | break; | ||
354 | default: | ||
355 | ret = -EINVAL; /* should never happen */ | ||
374 | } | 356 | } |
375 | if (!ret) | ||
376 | dev->offline = offline; | ||
377 | 357 | ||
378 | unlock_device_hotplug(); | 358 | unlock_device_hotplug(); |
379 | 359 | ||
@@ -394,8 +374,7 @@ store_mem_state(struct device *dev, | |||
394 | static ssize_t show_phys_device(struct device *dev, | 374 | static ssize_t show_phys_device(struct device *dev, |
395 | struct device_attribute *attr, char *buf) | 375 | struct device_attribute *attr, char *buf) |
396 | { | 376 | { |
397 | struct memory_block *mem = | 377 | struct memory_block *mem = to_memory_block(dev); |
398 | container_of(dev, struct memory_block, dev); | ||
399 | return sprintf(buf, "%d\n", mem->phys_device); | 378 | return sprintf(buf, "%d\n", mem->phys_device); |
400 | } | 379 | } |
401 | 380 | ||
@@ -471,7 +450,7 @@ store_soft_offline_page(struct device *dev, | |||
471 | u64 pfn; | 450 | u64 pfn; |
472 | if (!capable(CAP_SYS_ADMIN)) | 451 | if (!capable(CAP_SYS_ADMIN)) |
473 | return -EPERM; | 452 | return -EPERM; |
474 | if (strict_strtoull(buf, 0, &pfn) < 0) | 453 | if (kstrtoull(buf, 0, &pfn) < 0) |
475 | return -EINVAL; | 454 | return -EINVAL; |
476 | pfn >>= PAGE_SHIFT; | 455 | pfn >>= PAGE_SHIFT; |
477 | if (!pfn_valid(pfn)) | 456 | if (!pfn_valid(pfn)) |
@@ -490,7 +469,7 @@ store_hard_offline_page(struct device *dev, | |||
490 | u64 pfn; | 469 | u64 pfn; |
491 | if (!capable(CAP_SYS_ADMIN)) | 470 | if (!capable(CAP_SYS_ADMIN)) |
492 | return -EPERM; | 471 | return -EPERM; |
493 | if (strict_strtoull(buf, 0, &pfn) < 0) | 472 | if (kstrtoull(buf, 0, &pfn) < 0) |
494 | return -EINVAL; | 473 | return -EINVAL; |
495 | pfn >>= PAGE_SHIFT; | 474 | pfn >>= PAGE_SHIFT; |
496 | ret = memory_failure(pfn, 0, 0); | 475 | ret = memory_failure(pfn, 0, 0); |
@@ -527,7 +506,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section, | |||
527 | put_device(&hint->dev); | 506 | put_device(&hint->dev); |
528 | if (!dev) | 507 | if (!dev) |
529 | return NULL; | 508 | return NULL; |
530 | return container_of(dev, struct memory_block, dev); | 509 | return to_memory_block(dev); |
531 | } | 510 | } |
532 | 511 | ||
533 | /* | 512 | /* |
@@ -567,16 +546,13 @@ static const struct attribute_group *memory_memblk_attr_groups[] = { | |||
567 | static | 546 | static |
568 | int register_memory(struct memory_block *memory) | 547 | int register_memory(struct memory_block *memory) |
569 | { | 548 | { |
570 | int error; | ||
571 | |||
572 | memory->dev.bus = &memory_subsys; | 549 | memory->dev.bus = &memory_subsys; |
573 | memory->dev.id = memory->start_section_nr / sections_per_block; | 550 | memory->dev.id = memory->start_section_nr / sections_per_block; |
574 | memory->dev.release = memory_block_release; | 551 | memory->dev.release = memory_block_release; |
575 | memory->dev.groups = memory_memblk_attr_groups; | 552 | memory->dev.groups = memory_memblk_attr_groups; |
576 | memory->dev.offline = memory->state == MEM_OFFLINE; | 553 | memory->dev.offline = memory->state == MEM_OFFLINE; |
577 | 554 | ||
578 | error = device_register(&memory->dev); | 555 | return device_register(&memory->dev); |
579 | return error; | ||
580 | } | 556 | } |
581 | 557 | ||
582 | static int init_memory_block(struct memory_block **memory, | 558 | static int init_memory_block(struct memory_block **memory, |
@@ -597,7 +573,6 @@ static int init_memory_block(struct memory_block **memory, | |||
597 | mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; | 573 | mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; |
598 | mem->state = state; | 574 | mem->state = state; |
599 | mem->section_count++; | 575 | mem->section_count++; |
600 | mutex_init(&mem->state_mutex); | ||
601 | start_pfn = section_nr_to_pfn(mem->start_section_nr); | 576 | start_pfn = section_nr_to_pfn(mem->start_section_nr); |
602 | mem->phys_device = arch_get_memory_phys_device(start_pfn); | 577 | mem->phys_device = arch_get_memory_phys_device(start_pfn); |
603 | 578 | ||
@@ -607,55 +582,57 @@ static int init_memory_block(struct memory_block **memory, | |||
607 | return ret; | 582 | return ret; |
608 | } | 583 | } |
609 | 584 | ||
610 | static int add_memory_section(int nid, struct mem_section *section, | 585 | static int add_memory_block(int base_section_nr) |
611 | struct memory_block **mem_p, | ||
612 | unsigned long state, enum mem_add_context context) | ||
613 | { | 586 | { |
614 | struct memory_block *mem = NULL; | 587 | struct memory_block *mem; |
615 | int scn_nr = __section_nr(section); | 588 | int i, ret, section_count = 0, section_nr; |
616 | int ret = 0; | ||
617 | |||
618 | mutex_lock(&mem_sysfs_mutex); | ||
619 | |||
620 | if (context == BOOT) { | ||
621 | /* same memory block ? */ | ||
622 | if (mem_p && *mem_p) | ||
623 | if (scn_nr >= (*mem_p)->start_section_nr && | ||
624 | scn_nr <= (*mem_p)->end_section_nr) { | ||
625 | mem = *mem_p; | ||
626 | kobject_get(&mem->dev.kobj); | ||
627 | } | ||
628 | } else | ||
629 | mem = find_memory_block(section); | ||
630 | |||
631 | if (mem) { | ||
632 | mem->section_count++; | ||
633 | kobject_put(&mem->dev.kobj); | ||
634 | } else { | ||
635 | ret = init_memory_block(&mem, section, state); | ||
636 | /* store memory_block pointer for next loop */ | ||
637 | if (!ret && context == BOOT) | ||
638 | if (mem_p) | ||
639 | *mem_p = mem; | ||
640 | } | ||
641 | 589 | ||
642 | if (!ret) { | 590 | for (i = base_section_nr; |
643 | if (context == HOTPLUG && | 591 | (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS; |
644 | mem->section_count == sections_per_block) | 592 | i++) { |
645 | ret = register_mem_sect_under_node(mem, nid); | 593 | if (!present_section_nr(i)) |
594 | continue; | ||
595 | if (section_count == 0) | ||
596 | section_nr = i; | ||
597 | section_count++; | ||
646 | } | 598 | } |
647 | 599 | ||
648 | mutex_unlock(&mem_sysfs_mutex); | 600 | if (section_count == 0) |
649 | return ret; | 601 | return 0; |
602 | ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE); | ||
603 | if (ret) | ||
604 | return ret; | ||
605 | mem->section_count = section_count; | ||
606 | return 0; | ||
650 | } | 607 | } |
651 | 608 | ||
609 | |||
652 | /* | 610 | /* |
653 | * need an interface for the VM to add new memory regions, | 611 | * need an interface for the VM to add new memory regions, |
654 | * but without onlining it. | 612 | * but without onlining it. |
655 | */ | 613 | */ |
656 | int register_new_memory(int nid, struct mem_section *section) | 614 | int register_new_memory(int nid, struct mem_section *section) |
657 | { | 615 | { |
658 | return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG); | 616 | int ret = 0; |
617 | struct memory_block *mem; | ||
618 | |||
619 | mutex_lock(&mem_sysfs_mutex); | ||
620 | |||
621 | mem = find_memory_block(section); | ||
622 | if (mem) { | ||
623 | mem->section_count++; | ||
624 | put_device(&mem->dev); | ||
625 | } else { | ||
626 | ret = init_memory_block(&mem, section, MEM_OFFLINE); | ||
627 | if (ret) | ||
628 | goto out; | ||
629 | } | ||
630 | |||
631 | if (mem->section_count == sections_per_block) | ||
632 | ret = register_mem_sect_under_node(mem, nid); | ||
633 | out: | ||
634 | mutex_unlock(&mem_sysfs_mutex); | ||
635 | return ret; | ||
659 | } | 636 | } |
660 | 637 | ||
661 | #ifdef CONFIG_MEMORY_HOTREMOVE | 638 | #ifdef CONFIG_MEMORY_HOTREMOVE |
@@ -665,7 +642,7 @@ unregister_memory(struct memory_block *memory) | |||
665 | BUG_ON(memory->dev.bus != &memory_subsys); | 642 | BUG_ON(memory->dev.bus != &memory_subsys); |
666 | 643 | ||
667 | /* drop the ref. we got in remove_memory_block() */ | 644 | /* drop the ref. we got in remove_memory_block() */ |
668 | kobject_put(&memory->dev.kobj); | 645 | put_device(&memory->dev); |
669 | device_unregister(&memory->dev); | 646 | device_unregister(&memory->dev); |
670 | } | 647 | } |
671 | 648 | ||
@@ -682,7 +659,7 @@ static int remove_memory_block(unsigned long node_id, | |||
682 | if (mem->section_count == 0) | 659 | if (mem->section_count == 0) |
683 | unregister_memory(mem); | 660 | unregister_memory(mem); |
684 | else | 661 | else |
685 | kobject_put(&mem->dev.kobj); | 662 | put_device(&mem->dev); |
686 | 663 | ||
687 | mutex_unlock(&mem_sysfs_mutex); | 664 | mutex_unlock(&mem_sysfs_mutex); |
688 | return 0; | 665 | return 0; |
@@ -735,7 +712,6 @@ int __init memory_dev_init(void) | |||
735 | int ret; | 712 | int ret; |
736 | int err; | 713 | int err; |
737 | unsigned long block_sz; | 714 | unsigned long block_sz; |
738 | struct memory_block *mem = NULL; | ||
739 | 715 | ||
740 | ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); | 716 | ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); |
741 | if (ret) | 717 | if (ret) |
@@ -748,17 +724,13 @@ int __init memory_dev_init(void) | |||
748 | * Create entries for memory sections that were found | 724 | * Create entries for memory sections that were found |
749 | * during boot and have been initialized | 725 | * during boot and have been initialized |
750 | */ | 726 | */ |
751 | for (i = 0; i < NR_MEM_SECTIONS; i++) { | 727 | mutex_lock(&mem_sysfs_mutex); |
752 | if (!present_section_nr(i)) | 728 | for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) { |
753 | continue; | 729 | err = add_memory_block(i); |
754 | /* don't need to reuse memory_block if only one per block */ | ||
755 | err = add_memory_section(0, __nr_to_section(i), | ||
756 | (sections_per_block == 1) ? NULL : &mem, | ||
757 | MEM_ONLINE, | ||
758 | BOOT); | ||
759 | if (!ret) | 730 | if (!ret) |
760 | ret = err; | 731 | ret = err; |
761 | } | 732 | } |
733 | mutex_unlock(&mem_sysfs_mutex); | ||
762 | 734 | ||
763 | out: | 735 | out: |
764 | if (ret) | 736 | if (ret) |