diff options
-rw-r--r-- | mm/slub.c | 31 |
1 files changed, 15 insertions, 16 deletions
@@ -1198,19 +1198,15 @@ static __always_inline int slab_trylock(struct page *page) | |||
1198 | /* | 1198 | /* |
1199 | * Management of partially allocated slabs | 1199 | * Management of partially allocated slabs |
1200 | */ | 1200 | */ |
1201 | static void add_partial_tail(struct kmem_cache_node *n, struct page *page) | 1201 | static void add_partial(struct kmem_cache_node *n, |
1202 | struct page *page, int tail) | ||
1202 | { | 1203 | { |
1203 | spin_lock(&n->list_lock); | 1204 | spin_lock(&n->list_lock); |
1204 | n->nr_partial++; | 1205 | n->nr_partial++; |
1205 | list_add_tail(&page->lru, &n->partial); | 1206 | if (tail) |
1206 | spin_unlock(&n->list_lock); | 1207 | list_add_tail(&page->lru, &n->partial); |
1207 | } | 1208 | else |
1208 | 1209 | list_add(&page->lru, &n->partial); | |
1209 | static void add_partial(struct kmem_cache_node *n, struct page *page) | ||
1210 | { | ||
1211 | spin_lock(&n->list_lock); | ||
1212 | n->nr_partial++; | ||
1213 | list_add(&page->lru, &n->partial); | ||
1214 | spin_unlock(&n->list_lock); | 1210 | spin_unlock(&n->list_lock); |
1215 | } | 1211 | } |
1216 | 1212 | ||
@@ -1339,7 +1335,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | |||
1339 | * | 1335 | * |
1340 | * On exit the slab lock will have been dropped. | 1336 | * On exit the slab lock will have been dropped. |
1341 | */ | 1337 | */ |
1342 | static void unfreeze_slab(struct kmem_cache *s, struct page *page) | 1338 | static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) |
1343 | { | 1339 | { |
1344 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1340 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
1345 | 1341 | ||
@@ -1347,7 +1343,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page) | |||
1347 | if (page->inuse) { | 1343 | if (page->inuse) { |
1348 | 1344 | ||
1349 | if (page->freelist) | 1345 | if (page->freelist) |
1350 | add_partial(n, page); | 1346 | add_partial(n, page, tail); |
1351 | else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) | 1347 | else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER)) |
1352 | add_full(n, page); | 1348 | add_full(n, page); |
1353 | slab_unlock(page); | 1349 | slab_unlock(page); |
@@ -1362,7 +1358,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page) | |||
1362 | * partial list stays small. kmem_cache_shrink can | 1358 | * partial list stays small. kmem_cache_shrink can |
1363 | * reclaim empty slabs from the partial list. | 1359 | * reclaim empty slabs from the partial list. |
1364 | */ | 1360 | */ |
1365 | add_partial_tail(n, page); | 1361 | add_partial(n, page, 1); |
1366 | slab_unlock(page); | 1362 | slab_unlock(page); |
1367 | } else { | 1363 | } else { |
1368 | slab_unlock(page); | 1364 | slab_unlock(page); |
@@ -1377,6 +1373,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page) | |||
1377 | static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | 1373 | static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
1378 | { | 1374 | { |
1379 | struct page *page = c->page; | 1375 | struct page *page = c->page; |
1376 | int tail = 1; | ||
1380 | /* | 1377 | /* |
1381 | * Merge cpu freelist into freelist. Typically we get here | 1378 | * Merge cpu freelist into freelist. Typically we get here |
1382 | * because both freelists are empty. So this is unlikely | 1379 | * because both freelists are empty. So this is unlikely |
@@ -1385,6 +1382,8 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | |||
1385 | while (unlikely(c->freelist)) { | 1382 | while (unlikely(c->freelist)) { |
1386 | void **object; | 1383 | void **object; |
1387 | 1384 | ||
1385 | tail = 0; /* Hot objects. Put the slab first */ | ||
1386 | |||
1388 | /* Retrieve object from cpu_freelist */ | 1387 | /* Retrieve object from cpu_freelist */ |
1389 | object = c->freelist; | 1388 | object = c->freelist; |
1390 | c->freelist = c->freelist[c->offset]; | 1389 | c->freelist = c->freelist[c->offset]; |
@@ -1395,7 +1394,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | |||
1395 | page->inuse--; | 1394 | page->inuse--; |
1396 | } | 1395 | } |
1397 | c->page = NULL; | 1396 | c->page = NULL; |
1398 | unfreeze_slab(s, page); | 1397 | unfreeze_slab(s, page, tail); |
1399 | } | 1398 | } |
1400 | 1399 | ||
1401 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | 1400 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
@@ -1617,7 +1616,7 @@ checks_ok: | |||
1617 | * then add it. | 1616 | * then add it. |
1618 | */ | 1617 | */ |
1619 | if (unlikely(!prior)) | 1618 | if (unlikely(!prior)) |
1620 | add_partial_tail(get_node(s, page_to_nid(page)), page); | 1619 | add_partial(get_node(s, page_to_nid(page)), page, 1); |
1621 | 1620 | ||
1622 | out_unlock: | 1621 | out_unlock: |
1623 | slab_unlock(page); | 1622 | slab_unlock(page); |
@@ -2025,7 +2024,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | |||
2025 | #endif | 2024 | #endif |
2026 | init_kmem_cache_node(n); | 2025 | init_kmem_cache_node(n); |
2027 | atomic_long_inc(&n->nr_slabs); | 2026 | atomic_long_inc(&n->nr_slabs); |
2028 | add_partial(n, page); | 2027 | add_partial(n, page, 0); |
2029 | return n; | 2028 | return n; |
2030 | } | 2029 | } |
2031 | 2030 | ||