diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 55 |
1 files changed, 36 insertions, 19 deletions
@@ -133,6 +133,9 @@ | |||
133 | */ | 133 | */ |
134 | #define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL) | 134 | #define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL) |
135 | 135 | ||
136 | /* Mininum number of partial slabs */ | ||
137 | #define MIN_PARTIAL 2 | ||
138 | |||
136 | #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ | 139 | #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ |
137 | SLAB_POISON | SLAB_STORE_USER) | 140 | SLAB_POISON | SLAB_STORE_USER) |
138 | /* | 141 | /* |
@@ -664,16 +667,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
664 | /* | 667 | /* |
665 | * Tracking of fully allocated slabs for debugging | 668 | * Tracking of fully allocated slabs for debugging |
666 | */ | 669 | */ |
667 | static void add_full(struct kmem_cache *s, struct page *page) | 670 | static void add_full(struct kmem_cache_node *n, struct page *page) |
668 | { | 671 | { |
669 | struct kmem_cache_node *n; | ||
670 | |||
671 | VM_BUG_ON(!irqs_disabled()); | ||
672 | |||
673 | if (!(s->flags & SLAB_STORE_USER)) | ||
674 | return; | ||
675 | |||
676 | n = get_node(s, page_to_nid(page)); | ||
677 | spin_lock(&n->list_lock); | 672 | spin_lock(&n->list_lock); |
678 | list_add(&page->lru, &n->full); | 673 | list_add(&page->lru, &n->full); |
679 | spin_unlock(&n->list_lock); | 674 | spin_unlock(&n->list_lock); |
@@ -982,10 +977,16 @@ static __always_inline int slab_trylock(struct page *page) | |||
982 | /* | 977 | /* |
983 | * Management of partially allocated slabs | 978 | * Management of partially allocated slabs |
984 | */ | 979 | */ |
985 | static void add_partial(struct kmem_cache *s, struct page *page) | 980 | static void add_partial_tail(struct kmem_cache_node *n, struct page *page) |
986 | { | 981 | { |
987 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 982 | spin_lock(&n->list_lock); |
983 | n->nr_partial++; | ||
984 | list_add_tail(&page->lru, &n->partial); | ||
985 | spin_unlock(&n->list_lock); | ||
986 | } | ||
988 | 987 | ||
988 | static void add_partial(struct kmem_cache_node *n, struct page *page) | ||
989 | { | ||
989 | spin_lock(&n->list_lock); | 990 | spin_lock(&n->list_lock); |
990 | n->nr_partial++; | 991 | n->nr_partial++; |
991 | list_add(&page->lru, &n->partial); | 992 | list_add(&page->lru, &n->partial); |
@@ -1085,7 +1086,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1085 | n = get_node(s, zone_to_nid(*z)); | 1086 | n = get_node(s, zone_to_nid(*z)); |
1086 | 1087 | ||
1087 | if (n && cpuset_zone_allowed_hardwall(*z, flags) && | 1088 | if (n && cpuset_zone_allowed_hardwall(*z, flags) && |
1088 | n->nr_partial > 2) { | 1089 | n->nr_partial > MIN_PARTIAL) { |
1089 | page = get_partial_node(n); | 1090 | page = get_partial_node(n); |
1090 | if (page) | 1091 | if (page) |
1091 | return page; | 1092 | return page; |
@@ -1119,15 +1120,31 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | |||
1119 | */ | 1120 | */ |
1120 | static void putback_slab(struct kmem_cache *s, struct page *page) | 1121 | static void putback_slab(struct kmem_cache *s, struct page *page) |
1121 | { | 1122 | { |
1123 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | ||
1124 | |||
1122 | if (page->inuse) { | 1125 | if (page->inuse) { |
1126 | |||
1123 | if (page->freelist) | 1127 | if (page->freelist) |
1124 | add_partial(s, page); | 1128 | add_partial(n, page); |
1125 | else if (PageError(page)) | 1129 | else if (PageError(page) && (s->flags & SLAB_STORE_USER)) |
1126 | add_full(s, page); | 1130 | add_full(n, page); |
1127 | slab_unlock(page); | 1131 | slab_unlock(page); |
1132 | |||
1128 | } else { | 1133 | } else { |
1129 | slab_unlock(page); | 1134 | if (n->nr_partial < MIN_PARTIAL) { |
1130 | discard_slab(s, page); | 1135 | /* |
1136 | * Adding an empty page to the partial slabs in order | ||
1137 | * to avoid page allocator overhead. This page needs to | ||
1138 | * come after all the others that are not fully empty | ||
1139 | * in order to make sure that we do maximum | ||
1140 | * defragmentation. | ||
1141 | */ | ||
1142 | add_partial_tail(n, page); | ||
1143 | slab_unlock(page); | ||
1144 | } else { | ||
1145 | slab_unlock(page); | ||
1146 | discard_slab(s, page); | ||
1147 | } | ||
1131 | } | 1148 | } |
1132 | } | 1149 | } |
1133 | 1150 | ||
@@ -1326,7 +1343,7 @@ checks_ok: | |||
1326 | * then add it. | 1343 | * then add it. |
1327 | */ | 1344 | */ |
1328 | if (unlikely(!prior)) | 1345 | if (unlikely(!prior)) |
1329 | add_partial(s, page); | 1346 | add_partial(get_node(s, page_to_nid(page)), page); |
1330 | 1347 | ||
1331 | out_unlock: | 1348 | out_unlock: |
1332 | slab_unlock(page); | 1349 | slab_unlock(page); |
@@ -1535,7 +1552,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag | |||
1535 | init_object(kmalloc_caches, n, 1); | 1552 | init_object(kmalloc_caches, n, 1); |
1536 | init_kmem_cache_node(n); | 1553 | init_kmem_cache_node(n); |
1537 | atomic_long_inc(&n->nr_slabs); | 1554 | atomic_long_inc(&n->nr_slabs); |
1538 | add_partial(kmalloc_caches, page); | 1555 | add_partial(n, page); |
1539 | return n; | 1556 | return n; |
1540 | } | 1557 | } |
1541 | 1558 | ||