aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-10-22 10:05:36 -0400
committerPekka Enberg <penberg@kernel.org>2012-10-24 04:58:03 -0400
commit1b4f59e356cc94929305bd107b7f38eec62715ad (patch)
treede809be913f6491a61dcac6dabbf2cb0a1012b45 /mm
parentb4f591c45f1de0f5b8ad8da508a892b571a53202 (diff)
slub: Commonize slab_cache field in struct page
Right now, slab and slub have fields in struct page to derive which cache a page belongs to, but they do it slightly differently. slab uses a field called slab_cache, that lives in the third double word. slub, uses a field called "slab", living outside of the doublewords area. Ideally, we could use the same field for this. Since slub heavily makes use of the doubleword region, there isn't really much room to move slub's slab_cache field around. Since slab does not have such strict placement restrictions, we can move it outside the doubleword area. The naming used by slab, "slab_cache", is less confusing, and it is preferred over slub's generic "slab". Signed-off-by: Glauber Costa <glommer@parallels.com> Acked-by: Christoph Lameter <cl@linux.com> CC: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 16274b273c61..35483e0ab6bc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1092,11 +1092,11 @@ static noinline struct kmem_cache_node *free_debug_processing(
1092 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) 1092 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1093 goto out; 1093 goto out;
1094 1094
1095 if (unlikely(s != page->slab)) { 1095 if (unlikely(s != page->slab_cache)) {
1096 if (!PageSlab(page)) { 1096 if (!PageSlab(page)) {
1097 slab_err(s, page, "Attempt to free object(0x%p) " 1097 slab_err(s, page, "Attempt to free object(0x%p) "
1098 "outside of slab", object); 1098 "outside of slab", object);
1099 } else if (!page->slab) { 1099 } else if (!page->slab_cache) {
1100 printk(KERN_ERR 1100 printk(KERN_ERR
1101 "SLUB <none>: no slab for object 0x%p.\n", 1101 "SLUB <none>: no slab for object 0x%p.\n",
1102 object); 1102 object);
@@ -1357,7 +1357,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1357 goto out; 1357 goto out;
1358 1358
1359 inc_slabs_node(s, page_to_nid(page), page->objects); 1359 inc_slabs_node(s, page_to_nid(page), page->objects);
1360 page->slab = s; 1360 page->slab_cache = s;
1361 __SetPageSlab(page); 1361 __SetPageSlab(page);
1362 if (page->pfmemalloc) 1362 if (page->pfmemalloc)
1363 SetPageSlabPfmemalloc(page); 1363 SetPageSlabPfmemalloc(page);
@@ -1424,7 +1424,7 @@ static void rcu_free_slab(struct rcu_head *h)
1424 else 1424 else
1425 page = container_of((struct list_head *)h, struct page, lru); 1425 page = container_of((struct list_head *)h, struct page, lru);
1426 1426
1427 __free_slab(page->slab, page); 1427 __free_slab(page->slab_cache, page);
1428} 1428}
1429 1429
1430static void free_slab(struct kmem_cache *s, struct page *page) 1430static void free_slab(struct kmem_cache *s, struct page *page)
@@ -2617,9 +2617,9 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
2617 2617
2618 page = virt_to_head_page(x); 2618 page = virt_to_head_page(x);
2619 2619
2620 if (kmem_cache_debug(s) && page->slab != s) { 2620 if (kmem_cache_debug(s) && page->slab_cache != s) {
2621 pr_err("kmem_cache_free: Wrong slab cache. %s but object" 2621 pr_err("kmem_cache_free: Wrong slab cache. %s but object"
2622 " is from %s\n", page->slab->name, s->name); 2622 " is from %s\n", page->slab_cache->name, s->name);
2623 WARN_ON_ONCE(1); 2623 WARN_ON_ONCE(1);
2624 return; 2624 return;
2625 } 2625 }
@@ -3418,7 +3418,7 @@ size_t ksize(const void *object)
3418 return PAGE_SIZE << compound_order(page); 3418 return PAGE_SIZE << compound_order(page);
3419 } 3419 }
3420 3420
3421 return slab_ksize(page->slab); 3421 return slab_ksize(page->slab_cache);
3422} 3422}
3423EXPORT_SYMBOL(ksize); 3423EXPORT_SYMBOL(ksize);
3424 3424
@@ -3443,8 +3443,8 @@ bool verify_mem_not_deleted(const void *x)
3443 } 3443 }
3444 3444
3445 slab_lock(page); 3445 slab_lock(page);
3446 if (on_freelist(page->slab, page, object)) { 3446 if (on_freelist(page->slab_cache, page, object)) {
3447 object_err(page->slab, page, object, "Object is on free-list"); 3447 object_err(page->slab_cache, page, object, "Object is on free-list");
3448 rv = false; 3448 rv = false;
3449 } else { 3449 } else {
3450 rv = true; 3450 rv = true;
@@ -3475,7 +3475,7 @@ void kfree(const void *x)
3475 __free_pages(page, compound_order(page)); 3475 __free_pages(page, compound_order(page));
3476 return; 3476 return;
3477 } 3477 }
3478 slab_free(page->slab, page, object, _RET_IP_); 3478 slab_free(page->slab_cache, page, object, _RET_IP_);
3479} 3479}
3480EXPORT_SYMBOL(kfree); 3480EXPORT_SYMBOL(kfree);
3481 3481
@@ -3686,11 +3686,11 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3686 3686
3687 if (n) { 3687 if (n) {
3688 list_for_each_entry(p, &n->partial, lru) 3688 list_for_each_entry(p, &n->partial, lru)
3689 p->slab = s; 3689 p->slab_cache = s;
3690 3690
3691#ifdef CONFIG_SLUB_DEBUG 3691#ifdef CONFIG_SLUB_DEBUG
3692 list_for_each_entry(p, &n->full, lru) 3692 list_for_each_entry(p, &n->full, lru)
3693 p->slab = s; 3693 p->slab_cache = s;
3694#endif 3694#endif
3695 } 3695 }
3696 } 3696 }