diff options
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 156 |
1 files changed, 15 insertions, 141 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index baf7eb27e3ae..f3f8a4f52a0c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -343,9 +343,6 @@ struct mem_cgroup { | |||
343 | struct cg_proto tcp_mem; | 343 | struct cg_proto tcp_mem; |
344 | #endif | 344 | #endif |
345 | #if defined(CONFIG_MEMCG_KMEM) | 345 | #if defined(CONFIG_MEMCG_KMEM) |
346 | /* analogous to slab_common's slab_caches list, but per-memcg; | ||
347 | * protected by memcg_slab_mutex */ | ||
348 | struct list_head memcg_slab_caches; | ||
349 | /* Index in the kmem_cache->memcg_params->memcg_caches array */ | 346 | /* Index in the kmem_cache->memcg_params->memcg_caches array */ |
350 | int kmemcg_id; | 347 | int kmemcg_id; |
351 | #endif | 348 | #endif |
@@ -2476,25 +2473,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, | |||
2476 | } | 2473 | } |
2477 | 2474 | ||
2478 | #ifdef CONFIG_MEMCG_KMEM | 2475 | #ifdef CONFIG_MEMCG_KMEM |
2479 | /* | ||
2480 | * The memcg_slab_mutex is held whenever a per memcg kmem cache is created or | ||
2481 | * destroyed. It protects memcg_caches arrays and memcg_slab_caches lists. | ||
2482 | */ | ||
2483 | static DEFINE_MUTEX(memcg_slab_mutex); | ||
2484 | |||
2485 | /* | ||
2486 | * This is a bit cumbersome, but it is rarely used and avoids a backpointer | ||
2487 | * in the memcg_cache_params struct. | ||
2488 | */ | ||
2489 | static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) | ||
2490 | { | ||
2491 | struct kmem_cache *cachep; | ||
2492 | |||
2493 | VM_BUG_ON(p->is_root_cache); | ||
2494 | cachep = p->root_cache; | ||
2495 | return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg)); | ||
2496 | } | ||
2497 | |||
2498 | int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, | 2476 | int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, |
2499 | unsigned long nr_pages) | 2477 | unsigned long nr_pages) |
2500 | { | 2478 | { |
@@ -2578,10 +2556,7 @@ static int memcg_alloc_cache_id(void) | |||
2578 | else if (size > MEMCG_CACHES_MAX_SIZE) | 2556 | else if (size > MEMCG_CACHES_MAX_SIZE) |
2579 | size = MEMCG_CACHES_MAX_SIZE; | 2557 | size = MEMCG_CACHES_MAX_SIZE; |
2580 | 2558 | ||
2581 | mutex_lock(&memcg_slab_mutex); | ||
2582 | err = memcg_update_all_caches(size); | 2559 | err = memcg_update_all_caches(size); |
2583 | mutex_unlock(&memcg_slab_mutex); | ||
2584 | |||
2585 | if (err) { | 2560 | if (err) { |
2586 | ida_simple_remove(&kmem_limited_groups, id); | 2561 | ida_simple_remove(&kmem_limited_groups, id); |
2587 | return err; | 2562 | return err; |
@@ -2604,120 +2579,20 @@ void memcg_update_array_size(int num) | |||
2604 | memcg_limited_groups_array_size = num; | 2579 | memcg_limited_groups_array_size = num; |
2605 | } | 2580 | } |
2606 | 2581 | ||
2607 | static void memcg_register_cache(struct mem_cgroup *memcg, | 2582 | struct memcg_kmem_cache_create_work { |
2608 | struct kmem_cache *root_cache) | ||
2609 | { | ||
2610 | struct kmem_cache *cachep; | ||
2611 | int id; | ||
2612 | |||
2613 | lockdep_assert_held(&memcg_slab_mutex); | ||
2614 | |||
2615 | id = memcg_cache_id(memcg); | ||
2616 | |||
2617 | /* | ||
2618 | * Since per-memcg caches are created asynchronously on first | ||
2619 | * allocation (see memcg_kmem_get_cache()), several threads can try to | ||
2620 | * create the same cache, but only one of them may succeed. | ||
2621 | */ | ||
2622 | if (cache_from_memcg_idx(root_cache, id)) | ||
2623 | return; | ||
2624 | |||
2625 | cachep = memcg_create_kmem_cache(memcg, root_cache); | ||
2626 | /* | ||
2627 | * If we could not create a memcg cache, do not complain, because | ||
2628 | * that's not critical at all as we can always proceed with the root | ||
2629 | * cache. | ||
2630 | */ | ||
2631 | if (!cachep) | ||
2632 | return; | ||
2633 | |||
2634 | list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); | ||
2635 | |||
2636 | /* | ||
2637 | * Since readers won't lock (see cache_from_memcg_idx()), we need a | ||
2638 | * barrier here to ensure nobody will see the kmem_cache partially | ||
2639 | * initialized. | ||
2640 | */ | ||
2641 | smp_wmb(); | ||
2642 | |||
2643 | BUG_ON(root_cache->memcg_params->memcg_caches[id]); | ||
2644 | root_cache->memcg_params->memcg_caches[id] = cachep; | ||
2645 | } | ||
2646 | |||
2647 | static void memcg_unregister_cache(struct kmem_cache *cachep) | ||
2648 | { | ||
2649 | struct kmem_cache *root_cache; | ||
2650 | struct mem_cgroup *memcg; | ||
2651 | int id; | ||
2652 | |||
2653 | lockdep_assert_held(&memcg_slab_mutex); | ||
2654 | |||
2655 | BUG_ON(is_root_cache(cachep)); | ||
2656 | |||
2657 | root_cache = cachep->memcg_params->root_cache; | ||
2658 | memcg = cachep->memcg_params->memcg; | ||
2659 | id = memcg_cache_id(memcg); | ||
2660 | |||
2661 | BUG_ON(root_cache->memcg_params->memcg_caches[id] != cachep); | ||
2662 | root_cache->memcg_params->memcg_caches[id] = NULL; | ||
2663 | |||
2664 | list_del(&cachep->memcg_params->list); | ||
2665 | |||
2666 | kmem_cache_destroy(cachep); | ||
2667 | } | ||
2668 | |||
2669 | int __memcg_cleanup_cache_params(struct kmem_cache *s) | ||
2670 | { | ||
2671 | struct kmem_cache *c; | ||
2672 | int i, failed = 0; | ||
2673 | |||
2674 | mutex_lock(&memcg_slab_mutex); | ||
2675 | for_each_memcg_cache_index(i) { | ||
2676 | c = cache_from_memcg_idx(s, i); | ||
2677 | if (!c) | ||
2678 | continue; | ||
2679 | |||
2680 | memcg_unregister_cache(c); | ||
2681 | |||
2682 | if (cache_from_memcg_idx(s, i)) | ||
2683 | failed++; | ||
2684 | } | ||
2685 | mutex_unlock(&memcg_slab_mutex); | ||
2686 | return failed; | ||
2687 | } | ||
2688 | |||
2689 | static void memcg_unregister_all_caches(struct mem_cgroup *memcg) | ||
2690 | { | ||
2691 | struct kmem_cache *cachep; | ||
2692 | struct memcg_cache_params *params, *tmp; | ||
2693 | |||
2694 | if (!memcg_kmem_is_active(memcg)) | ||
2695 | return; | ||
2696 | |||
2697 | mutex_lock(&memcg_slab_mutex); | ||
2698 | list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) { | ||
2699 | cachep = memcg_params_to_cache(params); | ||
2700 | memcg_unregister_cache(cachep); | ||
2701 | } | ||
2702 | mutex_unlock(&memcg_slab_mutex); | ||
2703 | } | ||
2704 | |||
2705 | struct memcg_register_cache_work { | ||
2706 | struct mem_cgroup *memcg; | 2583 | struct mem_cgroup *memcg; |
2707 | struct kmem_cache *cachep; | 2584 | struct kmem_cache *cachep; |
2708 | struct work_struct work; | 2585 | struct work_struct work; |
2709 | }; | 2586 | }; |
2710 | 2587 | ||
2711 | static void memcg_register_cache_func(struct work_struct *w) | 2588 | static void memcg_kmem_cache_create_func(struct work_struct *w) |
2712 | { | 2589 | { |
2713 | struct memcg_register_cache_work *cw = | 2590 | struct memcg_kmem_cache_create_work *cw = |
2714 | container_of(w, struct memcg_register_cache_work, work); | 2591 | container_of(w, struct memcg_kmem_cache_create_work, work); |
2715 | struct mem_cgroup *memcg = cw->memcg; | 2592 | struct mem_cgroup *memcg = cw->memcg; |
2716 | struct kmem_cache *cachep = cw->cachep; | 2593 | struct kmem_cache *cachep = cw->cachep; |
2717 | 2594 | ||
2718 | mutex_lock(&memcg_slab_mutex); | 2595 | memcg_create_kmem_cache(memcg, cachep); |
2719 | memcg_register_cache(memcg, cachep); | ||
2720 | mutex_unlock(&memcg_slab_mutex); | ||
2721 | 2596 | ||
2722 | css_put(&memcg->css); | 2597 | css_put(&memcg->css); |
2723 | kfree(cw); | 2598 | kfree(cw); |
@@ -2726,10 +2601,10 @@ static void memcg_register_cache_func(struct work_struct *w) | |||
2726 | /* | 2601 | /* |
2727 | * Enqueue the creation of a per-memcg kmem_cache. | 2602 | * Enqueue the creation of a per-memcg kmem_cache. |
2728 | */ | 2603 | */ |
2729 | static void __memcg_schedule_register_cache(struct mem_cgroup *memcg, | 2604 | static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, |
2730 | struct kmem_cache *cachep) | 2605 | struct kmem_cache *cachep) |
2731 | { | 2606 | { |
2732 | struct memcg_register_cache_work *cw; | 2607 | struct memcg_kmem_cache_create_work *cw; |
2733 | 2608 | ||
2734 | cw = kmalloc(sizeof(*cw), GFP_NOWAIT); | 2609 | cw = kmalloc(sizeof(*cw), GFP_NOWAIT); |
2735 | if (!cw) | 2610 | if (!cw) |
@@ -2739,18 +2614,18 @@ static void __memcg_schedule_register_cache(struct mem_cgroup *memcg, | |||
2739 | 2614 | ||
2740 | cw->memcg = memcg; | 2615 | cw->memcg = memcg; |
2741 | cw->cachep = cachep; | 2616 | cw->cachep = cachep; |
2617 | INIT_WORK(&cw->work, memcg_kmem_cache_create_func); | ||
2742 | 2618 | ||
2743 | INIT_WORK(&cw->work, memcg_register_cache_func); | ||
2744 | schedule_work(&cw->work); | 2619 | schedule_work(&cw->work); |
2745 | } | 2620 | } |
2746 | 2621 | ||
2747 | static void memcg_schedule_register_cache(struct mem_cgroup *memcg, | 2622 | static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, |
2748 | struct kmem_cache *cachep) | 2623 | struct kmem_cache *cachep) |
2749 | { | 2624 | { |
2750 | /* | 2625 | /* |
2751 | * We need to stop accounting when we kmalloc, because if the | 2626 | * We need to stop accounting when we kmalloc, because if the |
2752 | * corresponding kmalloc cache is not yet created, the first allocation | 2627 | * corresponding kmalloc cache is not yet created, the first allocation |
2753 | * in __memcg_schedule_register_cache will recurse. | 2628 | * in __memcg_schedule_kmem_cache_create will recurse. |
2754 | * | 2629 | * |
2755 | * However, it is better to enclose the whole function. Depending on | 2630 | * However, it is better to enclose the whole function. Depending on |
2756 | * the debugging options enabled, INIT_WORK(), for instance, can | 2631 | * the debugging options enabled, INIT_WORK(), for instance, can |
@@ -2759,7 +2634,7 @@ static void memcg_schedule_register_cache(struct mem_cgroup *memcg, | |||
2759 | * the safest choice is to do it like this, wrapping the whole function. | 2634 | * the safest choice is to do it like this, wrapping the whole function. |
2760 | */ | 2635 | */ |
2761 | current->memcg_kmem_skip_account = 1; | 2636 | current->memcg_kmem_skip_account = 1; |
2762 | __memcg_schedule_register_cache(memcg, cachep); | 2637 | __memcg_schedule_kmem_cache_create(memcg, cachep); |
2763 | current->memcg_kmem_skip_account = 0; | 2638 | current->memcg_kmem_skip_account = 0; |
2764 | } | 2639 | } |
2765 | 2640 | ||
@@ -2807,7 +2682,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) | |||
2807 | * could happen with the slab_mutex held. So it's better to | 2682 | * could happen with the slab_mutex held. So it's better to |
2808 | * defer everything. | 2683 | * defer everything. |
2809 | */ | 2684 | */ |
2810 | memcg_schedule_register_cache(memcg, cachep); | 2685 | memcg_schedule_kmem_cache_create(memcg, cachep); |
2811 | out: | 2686 | out: |
2812 | css_put(&memcg->css); | 2687 | css_put(&memcg->css); |
2813 | return cachep; | 2688 | return cachep; |
@@ -4136,7 +4011,7 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | |||
4136 | 4011 | ||
4137 | static void memcg_destroy_kmem(struct mem_cgroup *memcg) | 4012 | static void memcg_destroy_kmem(struct mem_cgroup *memcg) |
4138 | { | 4013 | { |
4139 | memcg_unregister_all_caches(memcg); | 4014 | memcg_destroy_kmem_caches(memcg); |
4140 | mem_cgroup_sockets_destroy(memcg); | 4015 | mem_cgroup_sockets_destroy(memcg); |
4141 | } | 4016 | } |
4142 | #else | 4017 | #else |
@@ -4664,7 +4539,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | |||
4664 | spin_lock_init(&memcg->event_list_lock); | 4539 | spin_lock_init(&memcg->event_list_lock); |
4665 | #ifdef CONFIG_MEMCG_KMEM | 4540 | #ifdef CONFIG_MEMCG_KMEM |
4666 | memcg->kmemcg_id = -1; | 4541 | memcg->kmemcg_id = -1; |
4667 | INIT_LIST_HEAD(&memcg->memcg_slab_caches); | ||
4668 | #endif | 4542 | #endif |
4669 | 4543 | ||
4670 | return &memcg->css; | 4544 | return &memcg->css; |