aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2011-03-10 02:22:00 -0500
committerPekka Enberg <penberg@kernel.org>2011-03-11 11:06:34 -0500
commitda9a638c6f8fc0633fa94a334f1c053f5e307177 (patch)
tree786966087ccd4fd2c97757ce7b722f728c17ca2b /mm/slub.c
parentab9a0f196f2f4f080df54402493ea3dc31b5243e (diff)
slub,rcu: don't assume the size of struct rcu_head
The size of struct rcu_head may be changed. When it becomes larger, it will pollute the page array. We reserve some some bytes for struct rcu_head when a slab is allocated in this situation. Changed from V1: use VM_BUG_ON instead BUG_ON Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index d3d17677bab5..ebba3eb19369 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1254,21 +1254,38 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1254 __free_pages(page, order); 1254 __free_pages(page, order);
1255} 1255}
1256 1256
1257#define need_reserve_slab_rcu \
1258 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1259
1257static void rcu_free_slab(struct rcu_head *h) 1260static void rcu_free_slab(struct rcu_head *h)
1258{ 1261{
1259 struct page *page; 1262 struct page *page;
1260 1263
1261 page = container_of((struct list_head *)h, struct page, lru); 1264 if (need_reserve_slab_rcu)
1265 page = virt_to_head_page(h);
1266 else
1267 page = container_of((struct list_head *)h, struct page, lru);
1268
1262 __free_slab(page->slab, page); 1269 __free_slab(page->slab, page);
1263} 1270}
1264 1271
1265static void free_slab(struct kmem_cache *s, struct page *page) 1272static void free_slab(struct kmem_cache *s, struct page *page)
1266{ 1273{
1267 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1274 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1268 /* 1275 struct rcu_head *head;
1269 * RCU free overloads the RCU head over the LRU 1276
1270 */ 1277 if (need_reserve_slab_rcu) {
1271 struct rcu_head *head = (void *)&page->lru; 1278 int order = compound_order(page);
1279 int offset = (PAGE_SIZE << order) - s->reserved;
1280
1281 VM_BUG_ON(s->reserved != sizeof(*head));
1282 head = page_address(page) + offset;
1283 } else {
1284 /*
1285 * RCU free overloads the RCU head over the LRU
1286 */
1287 head = (void *)&page->lru;
1288 }
1272 1289
1273 call_rcu(head, rcu_free_slab); 1290 call_rcu(head, rcu_free_slab);
1274 } else 1291 } else
@@ -2356,6 +2373,9 @@ static int kmem_cache_open(struct kmem_cache *s,
2356 s->flags = kmem_cache_flags(size, flags, name, ctor); 2373 s->flags = kmem_cache_flags(size, flags, name, ctor);
2357 s->reserved = 0; 2374 s->reserved = 0;
2358 2375
2376 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
2377 s->reserved = sizeof(struct rcu_head);
2378
2359 if (!calculate_sizes(s, -1)) 2379 if (!calculate_sizes(s, -1))
2360 goto error; 2380 goto error;
2361 if (disable_higher_order_debug) { 2381 if (disable_higher_order_debug) {