aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/virtio/virtio_pci.c2
-rw-r--r--include/linux/module.h25
-rw-r--r--kernel/module.c35
-rw-r--r--mm/mlock.c47
4 files changed, 44 insertions, 65 deletions
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index bef6b45e8a5c..330aacbdec1f 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -192,7 +192,7 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
192 drv = container_of(vp_dev->vdev.dev.driver, 192 drv = container_of(vp_dev->vdev.dev.driver,
193 struct virtio_driver, driver); 193 struct virtio_driver, driver);
194 194
195 if (drv->config_changed) 195 if (drv && drv->config_changed)
196 drv->config_changed(&vp_dev->vdev); 196 drv->config_changed(&vp_dev->vdev);
197 } 197 }
198 198
diff --git a/include/linux/module.h b/include/linux/module.h
index 4f7ea12463d3..f3b8329eb5b8 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -219,11 +219,6 @@ void *__symbol_get_gpl(const char *symbol);
219 219
220#endif 220#endif
221 221
222struct module_ref
223{
224 local_t count;
225} ____cacheline_aligned;
226
227enum module_state 222enum module_state
228{ 223{
229 MODULE_STATE_LIVE, 224 MODULE_STATE_LIVE,
@@ -344,8 +339,11 @@ struct module
344 /* Destruction function. */ 339 /* Destruction function. */
345 void (*exit)(void); 340 void (*exit)(void);
346 341
347 /* Reference counts */ 342#ifdef CONFIG_SMP
348 struct module_ref ref[NR_CPUS]; 343 char *refptr;
344#else
345 local_t ref;
346#endif
349#endif 347#endif
350}; 348};
351#ifndef MODULE_ARCH_INIT 349#ifndef MODULE_ARCH_INIT
@@ -395,13 +393,22 @@ void __symbol_put(const char *symbol);
395#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x) 393#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
396void symbol_put_addr(void *addr); 394void symbol_put_addr(void *addr);
397 395
396static inline local_t *__module_ref_addr(struct module *mod, int cpu)
397{
398#ifdef CONFIG_SMP
399 return (local_t *) (mod->refptr + per_cpu_offset(cpu));
400#else
401 return &mod->ref;
402#endif
403}
404
398/* Sometimes we know we already have a refcount, and it's easier not 405/* Sometimes we know we already have a refcount, and it's easier not
399 to handle the error case (which only happens with rmmod --wait). */ 406 to handle the error case (which only happens with rmmod --wait). */
400static inline void __module_get(struct module *module) 407static inline void __module_get(struct module *module)
401{ 408{
402 if (module) { 409 if (module) {
403 BUG_ON(module_refcount(module) == 0); 410 BUG_ON(module_refcount(module) == 0);
404 local_inc(&module->ref[get_cpu()].count); 411 local_inc(__module_ref_addr(module, get_cpu()));
405 put_cpu(); 412 put_cpu();
406 } 413 }
407} 414}
@@ -413,7 +420,7 @@ static inline int try_module_get(struct module *module)
413 if (module) { 420 if (module) {
414 unsigned int cpu = get_cpu(); 421 unsigned int cpu = get_cpu();
415 if (likely(module_is_live(module))) 422 if (likely(module_is_live(module)))
416 local_inc(&module->ref[cpu].count); 423 local_inc(__module_ref_addr(module, cpu));
417 else 424 else
418 ret = 0; 425 ret = 0;
419 put_cpu(); 426 put_cpu();
diff --git a/kernel/module.c b/kernel/module.c
index e8b51d41dd72..ba22484a987e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -573,13 +573,13 @@ static char last_unloaded_module[MODULE_NAME_LEN+1];
573/* Init the unload section of the module. */ 573/* Init the unload section of the module. */
574static void module_unload_init(struct module *mod) 574static void module_unload_init(struct module *mod)
575{ 575{
576 unsigned int i; 576 int cpu;
577 577
578 INIT_LIST_HEAD(&mod->modules_which_use_me); 578 INIT_LIST_HEAD(&mod->modules_which_use_me);
579 for (i = 0; i < NR_CPUS; i++) 579 for_each_possible_cpu(cpu)
580 local_set(&mod->ref[i].count, 0); 580 local_set(__module_ref_addr(mod, cpu), 0);
581 /* Hold reference count during initialization. */ 581 /* Hold reference count during initialization. */
582 local_set(&mod->ref[raw_smp_processor_id()].count, 1); 582 local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
583 /* Backwards compatibility macros put refcount during init. */ 583 /* Backwards compatibility macros put refcount during init. */
584 mod->waiter = current; 584 mod->waiter = current;
585} 585}
@@ -717,10 +717,11 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
717 717
718unsigned int module_refcount(struct module *mod) 718unsigned int module_refcount(struct module *mod)
719{ 719{
720 unsigned int i, total = 0; 720 unsigned int total = 0;
721 int cpu;
721 722
722 for (i = 0; i < NR_CPUS; i++) 723 for_each_possible_cpu(cpu)
723 total += local_read(&mod->ref[i].count); 724 total += local_read(__module_ref_addr(mod, cpu));
724 return total; 725 return total;
725} 726}
726EXPORT_SYMBOL(module_refcount); 727EXPORT_SYMBOL(module_refcount);
@@ -894,7 +895,7 @@ void module_put(struct module *module)
894{ 895{
895 if (module) { 896 if (module) {
896 unsigned int cpu = get_cpu(); 897 unsigned int cpu = get_cpu();
897 local_dec(&module->ref[cpu].count); 898 local_dec(__module_ref_addr(module, cpu));
898 /* Maybe they're waiting for us to drop reference? */ 899 /* Maybe they're waiting for us to drop reference? */
899 if (unlikely(!module_is_live(module))) 900 if (unlikely(!module_is_live(module)))
900 wake_up_process(module->waiter); 901 wake_up_process(module->waiter);
@@ -1464,7 +1465,10 @@ static void free_module(struct module *mod)
1464 kfree(mod->args); 1465 kfree(mod->args);
1465 if (mod->percpu) 1466 if (mod->percpu)
1466 percpu_modfree(mod->percpu); 1467 percpu_modfree(mod->percpu);
1467 1468#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
1469 if (mod->refptr)
1470 percpu_modfree(mod->refptr);
1471#endif
1468 /* Free lock-classes: */ 1472 /* Free lock-classes: */
1469 lockdep_free_key_range(mod->module_core, mod->core_size); 1473 lockdep_free_key_range(mod->module_core, mod->core_size);
1470 1474
@@ -2011,6 +2015,14 @@ static noinline struct module *load_module(void __user *umod,
2011 if (err < 0) 2015 if (err < 0)
2012 goto free_mod; 2016 goto free_mod;
2013 2017
2018#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2019 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
2020 mod->name);
2021 if (!mod->refptr) {
2022 err = -ENOMEM;
2023 goto free_mod;
2024 }
2025#endif
2014 if (pcpuindex) { 2026 if (pcpuindex) {
2015 /* We have a special allocation for this section. */ 2027 /* We have a special allocation for this section. */
2016 percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, 2028 percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
@@ -2018,7 +2030,7 @@ static noinline struct module *load_module(void __user *umod,
2018 mod->name); 2030 mod->name);
2019 if (!percpu) { 2031 if (!percpu) {
2020 err = -ENOMEM; 2032 err = -ENOMEM;
2021 goto free_mod; 2033 goto free_percpu;
2022 } 2034 }
2023 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 2035 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
2024 mod->percpu = percpu; 2036 mod->percpu = percpu;
@@ -2282,6 +2294,9 @@ static noinline struct module *load_module(void __user *umod,
2282 free_percpu: 2294 free_percpu:
2283 if (percpu) 2295 if (percpu)
2284 percpu_modfree(percpu); 2296 percpu_modfree(percpu);
2297#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2298 percpu_modfree(mod->refptr);
2299#endif
2285 free_mod: 2300 free_mod:
2286 kfree(args); 2301 kfree(args);
2287 free_hdr: 2302 free_hdr:
diff --git a/mm/mlock.c b/mm/mlock.c
index 2904a347e476..028ec482fdd4 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -294,14 +294,10 @@ static inline int __mlock_posix_error_return(long retval)
294 * 294 *
295 * return number of pages [> 0] to be removed from locked_vm on success 295 * return number of pages [> 0] to be removed from locked_vm on success
296 * of "special" vmas. 296 * of "special" vmas.
297 *
298 * return negative error if vma spanning @start-@range disappears while
299 * mmap semaphore is dropped. Unlikely?
300 */ 297 */
301long mlock_vma_pages_range(struct vm_area_struct *vma, 298long mlock_vma_pages_range(struct vm_area_struct *vma,
302 unsigned long start, unsigned long end) 299 unsigned long start, unsigned long end)
303{ 300{
304 struct mm_struct *mm = vma->vm_mm;
305 int nr_pages = (end - start) / PAGE_SIZE; 301 int nr_pages = (end - start) / PAGE_SIZE;
306 BUG_ON(!(vma->vm_flags & VM_LOCKED)); 302 BUG_ON(!(vma->vm_flags & VM_LOCKED));
307 303
@@ -314,20 +310,8 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
314 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || 310 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
315 is_vm_hugetlb_page(vma) || 311 is_vm_hugetlb_page(vma) ||
316 vma == get_gate_vma(current))) { 312 vma == get_gate_vma(current))) {
317 long error;
318 downgrade_write(&mm->mmap_sem);
319
320 error = __mlock_vma_pages_range(vma, start, end, 1);
321 313
322 up_read(&mm->mmap_sem); 314 return __mlock_vma_pages_range(vma, start, end, 1);
323 /* vma can change or disappear */
324 down_write(&mm->mmap_sem);
325 vma = find_vma(mm, start);
326 /* non-NULL vma must contain @start, but need to check @end */
327 if (!vma || end > vma->vm_end)
328 return -ENOMEM;
329
330 return 0; /* hide other errors from mmap(), et al */
331 } 315 }
332 316
333 /* 317 /*
@@ -438,41 +422,14 @@ success:
438 vma->vm_flags = newflags; 422 vma->vm_flags = newflags;
439 423
440 if (lock) { 424 if (lock) {
441 /*
442 * mmap_sem is currently held for write. Downgrade the write
443 * lock to a read lock so that other faults, mmap scans, ...
444 * while we fault in all pages.
445 */
446 downgrade_write(&mm->mmap_sem);
447
448 ret = __mlock_vma_pages_range(vma, start, end, 1); 425 ret = __mlock_vma_pages_range(vma, start, end, 1);
449 426
450 /* 427 if (ret > 0) {
451 * Need to reacquire mmap sem in write mode, as our callers
452 * expect this. We have no support for atomically upgrading
453 * a sem to write, so we need to check for ranges while sem
454 * is unlocked.
455 */
456 up_read(&mm->mmap_sem);
457 /* vma can change or disappear */
458 down_write(&mm->mmap_sem);
459 *prev = find_vma(mm, start);
460 /* non-NULL *prev must contain @start, but need to check @end */
461 if (!(*prev) || end > (*prev)->vm_end)
462 ret = -ENOMEM;
463 else if (ret > 0) {
464 mm->locked_vm -= ret; 428 mm->locked_vm -= ret;
465 ret = 0; 429 ret = 0;
466 } else 430 } else
467 ret = __mlock_posix_error_return(ret); /* translate if needed */ 431 ret = __mlock_posix_error_return(ret); /* translate if needed */
468 } else { 432 } else {
469 /*
470 * TODO: for unlocking, pages will already be resident, so
471 * we don't need to wait for allocations/reclaim/pagein, ...
472 * However, unlocking a very large region can still take a
473 * while. Should we downgrade the semaphore for both lock
474 * AND unlock ?
475 */
476 __mlock_vma_pages_range(vma, start, end, 0); 433 __mlock_vma_pages_range(vma, start, end, 0);
477 } 434 }
478 435