summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTobin C. Harding <tobin@kernel.org>2019-05-13 20:16:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 12:47:45 -0400
commit16cb0ec75b346ec4fce11c5ce40d68b173f4e2f4 (patch)
treefcd09dc64062bf70b6acbca2d2f22d4d4e8f7867 /mm
parent916ac0527837aa0be46d82804f93dd46f03aaedc (diff)
slab: use slab_list instead of lru
Currently we use the page->lru list for maintaining lists of slabs. We have a list in the page structure (slab_list) that can be used for this purpose. Doing so makes the code cleaner since we are not overloading the lru list. Use the slab_list instead of the lru list for maintaining lists of slabs. Link: http://lkml.kernel.org/r/20190402230545.2929-7-tobin@kernel.org Signed-off-by: Tobin C. Harding <tobin@kernel.org> Acked-by: Christoph Lameter <cl@linux.com> Reviewed-by: Roman Gushchin <guro@fb.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c49
1 files changed, 25 insertions, 24 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 284ab737faee..e9eaa8fce231 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1674,8 +1674,8 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1674{ 1674{
1675 struct page *page, *n; 1675 struct page *page, *n;
1676 1676
1677 list_for_each_entry_safe(page, n, list, lru) { 1677 list_for_each_entry_safe(page, n, list, slab_list) {
1678 list_del(&page->lru); 1678 list_del(&page->slab_list);
1679 slab_destroy(cachep, page); 1679 slab_destroy(cachep, page);
1680 } 1680 }
1681} 1681}
@@ -2231,8 +2231,8 @@ static int drain_freelist(struct kmem_cache *cache,
2231 goto out; 2231 goto out;
2232 } 2232 }
2233 2233
2234 page = list_entry(p, struct page, lru); 2234 page = list_entry(p, struct page, slab_list);
2235 list_del(&page->lru); 2235 list_del(&page->slab_list);
2236 n->free_slabs--; 2236 n->free_slabs--;
2237 n->total_slabs--; 2237 n->total_slabs--;
2238 /* 2238 /*
@@ -2691,13 +2691,13 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2691 if (!page) 2691 if (!page)
2692 return; 2692 return;
2693 2693
2694 INIT_LIST_HEAD(&page->lru); 2694 INIT_LIST_HEAD(&page->slab_list);
2695 n = get_node(cachep, page_to_nid(page)); 2695 n = get_node(cachep, page_to_nid(page));
2696 2696
2697 spin_lock(&n->list_lock); 2697 spin_lock(&n->list_lock);
2698 n->total_slabs++; 2698 n->total_slabs++;
2699 if (!page->active) { 2699 if (!page->active) {
2700 list_add_tail(&page->lru, &(n->slabs_free)); 2700 list_add_tail(&page->slab_list, &n->slabs_free);
2701 n->free_slabs++; 2701 n->free_slabs++;
2702 } else 2702 } else
2703 fixup_slab_list(cachep, n, page, &list); 2703 fixup_slab_list(cachep, n, page, &list);
@@ -2806,9 +2806,9 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
2806 void **list) 2806 void **list)
2807{ 2807{
2808 /* move slabp to correct slabp list: */ 2808 /* move slabp to correct slabp list: */
2809 list_del(&page->lru); 2809 list_del(&page->slab_list);
2810 if (page->active == cachep->num) { 2810 if (page->active == cachep->num) {
2811 list_add(&page->lru, &n->slabs_full); 2811 list_add(&page->slab_list, &n->slabs_full);
2812 if (OBJFREELIST_SLAB(cachep)) { 2812 if (OBJFREELIST_SLAB(cachep)) {
2813#if DEBUG 2813#if DEBUG
2814 /* Poisoning will be done without holding the lock */ 2814 /* Poisoning will be done without holding the lock */
@@ -2822,7 +2822,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
2822 page->freelist = NULL; 2822 page->freelist = NULL;
2823 } 2823 }
2824 } else 2824 } else
2825 list_add(&page->lru, &n->slabs_partial); 2825 list_add(&page->slab_list, &n->slabs_partial);
2826} 2826}
2827 2827
2828/* Try to find non-pfmemalloc slab if needed */ 2828/* Try to find non-pfmemalloc slab if needed */
@@ -2845,20 +2845,20 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2845 } 2845 }
2846 2846
2847 /* Move pfmemalloc slab to the end of list to speed up next search */ 2847 /* Move pfmemalloc slab to the end of list to speed up next search */
2848 list_del(&page->lru); 2848 list_del(&page->slab_list);
2849 if (!page->active) { 2849 if (!page->active) {
2850 list_add_tail(&page->lru, &n->slabs_free); 2850 list_add_tail(&page->slab_list, &n->slabs_free);
2851 n->free_slabs++; 2851 n->free_slabs++;
2852 } else 2852 } else
2853 list_add_tail(&page->lru, &n->slabs_partial); 2853 list_add_tail(&page->slab_list, &n->slabs_partial);
2854 2854
2855 list_for_each_entry(page, &n->slabs_partial, lru) { 2855 list_for_each_entry(page, &n->slabs_partial, slab_list) {
2856 if (!PageSlabPfmemalloc(page)) 2856 if (!PageSlabPfmemalloc(page))
2857 return page; 2857 return page;
2858 } 2858 }
2859 2859
2860 n->free_touched = 1; 2860 n->free_touched = 1;
2861 list_for_each_entry(page, &n->slabs_free, lru) { 2861 list_for_each_entry(page, &n->slabs_free, slab_list) {
2862 if (!PageSlabPfmemalloc(page)) { 2862 if (!PageSlabPfmemalloc(page)) {
2863 n->free_slabs--; 2863 n->free_slabs--;
2864 return page; 2864 return page;
@@ -2873,11 +2873,12 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2873 struct page *page; 2873 struct page *page;
2874 2874
2875 assert_spin_locked(&n->list_lock); 2875 assert_spin_locked(&n->list_lock);
2876 page = list_first_entry_or_null(&n->slabs_partial, struct page, lru); 2876 page = list_first_entry_or_null(&n->slabs_partial, struct page,
2877 slab_list);
2877 if (!page) { 2878 if (!page) {
2878 n->free_touched = 1; 2879 n->free_touched = 1;
2879 page = list_first_entry_or_null(&n->slabs_free, struct page, 2880 page = list_first_entry_or_null(&n->slabs_free, struct page,
2880 lru); 2881 slab_list);
2881 if (page) 2882 if (page)
2882 n->free_slabs--; 2883 n->free_slabs--;
2883 } 2884 }
@@ -3378,29 +3379,29 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
3378 objp = objpp[i]; 3379 objp = objpp[i];
3379 3380
3380 page = virt_to_head_page(objp); 3381 page = virt_to_head_page(objp);
3381 list_del(&page->lru); 3382 list_del(&page->slab_list);
3382 check_spinlock_acquired_node(cachep, node); 3383 check_spinlock_acquired_node(cachep, node);
3383 slab_put_obj(cachep, page, objp); 3384 slab_put_obj(cachep, page, objp);
3384 STATS_DEC_ACTIVE(cachep); 3385 STATS_DEC_ACTIVE(cachep);
3385 3386
3386 /* fixup slab chains */ 3387 /* fixup slab chains */
3387 if (page->active == 0) { 3388 if (page->active == 0) {
3388 list_add(&page->lru, &n->slabs_free); 3389 list_add(&page->slab_list, &n->slabs_free);
3389 n->free_slabs++; 3390 n->free_slabs++;
3390 } else { 3391 } else {
3391 /* Unconditionally move a slab to the end of the 3392 /* Unconditionally move a slab to the end of the
3392 * partial list on free - maximum time for the 3393 * partial list on free - maximum time for the
3393 * other objects to be freed, too. 3394 * other objects to be freed, too.
3394 */ 3395 */
3395 list_add_tail(&page->lru, &n->slabs_partial); 3396 list_add_tail(&page->slab_list, &n->slabs_partial);
3396 } 3397 }
3397 } 3398 }
3398 3399
3399 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) { 3400 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3400 n->free_objects -= cachep->num; 3401 n->free_objects -= cachep->num;
3401 3402
3402 page = list_last_entry(&n->slabs_free, struct page, lru); 3403 page = list_last_entry(&n->slabs_free, struct page, slab_list);
3403 list_move(&page->lru, list); 3404 list_move(&page->slab_list, list);
3404 n->free_slabs--; 3405 n->free_slabs--;
3405 n->total_slabs--; 3406 n->total_slabs--;
3406 } 3407 }
@@ -3438,7 +3439,7 @@ free_done:
3438 int i = 0; 3439 int i = 0;
3439 struct page *page; 3440 struct page *page;
3440 3441
3441 list_for_each_entry(page, &n->slabs_free, lru) { 3442 list_for_each_entry(page, &n->slabs_free, slab_list) {
3442 BUG_ON(page->active); 3443 BUG_ON(page->active);
3443 3444
3444 i++; 3445 i++;
@@ -4302,9 +4303,9 @@ static int leaks_show(struct seq_file *m, void *p)
4302 check_irq_on(); 4303 check_irq_on();
4303 spin_lock_irq(&n->list_lock); 4304 spin_lock_irq(&n->list_lock);
4304 4305
4305 list_for_each_entry(page, &n->slabs_full, lru) 4306 list_for_each_entry(page, &n->slabs_full, slab_list)
4306 handle_slab(x, cachep, page); 4307 handle_slab(x, cachep, page);
4307 list_for_each_entry(page, &n->slabs_partial, lru) 4308 list_for_each_entry(page, &n->slabs_partial, slab_list)
4308 handle_slab(x, cachep, page); 4309 handle_slab(x, cachep, page);
4309 spin_unlock_irq(&n->list_lock); 4310 spin_unlock_irq(&n->list_lock);
4310 } 4311 }