aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-02-02 14:30:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-02-02 14:30:08 -0500
commit7b383bef25e493cc4f047e44ebd6c3ccfd6d1cc5 (patch)
tree4a8379bb6d5929cf72c916da8e5bc7532aa43841 /mm/slub.c
parent87af5e5c22568201dfbda5cac9c76e96982adc9c (diff)
parentcb8ee1a3d429f8898972c869dd4792afb04e961a (diff)
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "Random bug fixes that have accumulated in my inbox over the past few months" * 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: mm: Fix warning on make htmldocs caused by slab.c mm: slub: work around unneeded lockdep warning mm: sl[uo]b: fix misleading comments slub: Fix possible format string bug. slub: use lockdep_assert_held slub: Fix calculation of cpu slabs slab.h: remove duplicate kmalloc declaration and fix kernel-doc warnings
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c56
1 files changed, 34 insertions, 22 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2b1a6970e46f..7e3e0458bce4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1000,23 +1000,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1000 1000
1001/* 1001/*
1002 * Tracking of fully allocated slabs for debugging purposes. 1002 * Tracking of fully allocated slabs for debugging purposes.
1003 *
1004 * list_lock must be held.
1005 */ 1003 */
1006static void add_full(struct kmem_cache *s, 1004static void add_full(struct kmem_cache *s,
1007 struct kmem_cache_node *n, struct page *page) 1005 struct kmem_cache_node *n, struct page *page)
1008{ 1006{
1007 lockdep_assert_held(&n->list_lock);
1008
1009 if (!(s->flags & SLAB_STORE_USER)) 1009 if (!(s->flags & SLAB_STORE_USER))
1010 return; 1010 return;
1011 1011
1012 list_add(&page->lru, &n->full); 1012 list_add(&page->lru, &n->full);
1013} 1013}
1014 1014
1015/* 1015static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1016 * list_lock must be held.
1017 */
1018static void remove_full(struct kmem_cache *s, struct page *page)
1019{ 1016{
1017 lockdep_assert_held(&n->list_lock);
1018
1020 if (!(s->flags & SLAB_STORE_USER)) 1019 if (!(s->flags & SLAB_STORE_USER))
1021 return; 1020 return;
1022 1021
@@ -1265,7 +1264,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
1265 void *object, u8 val) { return 1; } 1264 void *object, u8 val) { return 1; }
1266static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1265static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1267 struct page *page) {} 1266 struct page *page) {}
1268static inline void remove_full(struct kmem_cache *s, struct page *page) {} 1267static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1268 struct page *page) {}
1269static inline unsigned long kmem_cache_flags(unsigned long object_size, 1269static inline unsigned long kmem_cache_flags(unsigned long object_size,
1270 unsigned long flags, const char *name, 1270 unsigned long flags, const char *name,
1271 void (*ctor)(void *)) 1271 void (*ctor)(void *))
@@ -1519,12 +1519,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
1519 1519
1520/* 1520/*
1521 * Management of partially allocated slabs. 1521 * Management of partially allocated slabs.
1522 *
1523 * list_lock must be held.
1524 */ 1522 */
1525static inline void add_partial(struct kmem_cache_node *n, 1523static inline void add_partial(struct kmem_cache_node *n,
1526 struct page *page, int tail) 1524 struct page *page, int tail)
1527{ 1525{
1526 lockdep_assert_held(&n->list_lock);
1527
1528 n->nr_partial++; 1528 n->nr_partial++;
1529 if (tail == DEACTIVATE_TO_TAIL) 1529 if (tail == DEACTIVATE_TO_TAIL)
1530 list_add_tail(&page->lru, &n->partial); 1530 list_add_tail(&page->lru, &n->partial);
@@ -1532,12 +1532,11 @@ static inline void add_partial(struct kmem_cache_node *n,
1532 list_add(&page->lru, &n->partial); 1532 list_add(&page->lru, &n->partial);
1533} 1533}
1534 1534
1535/*
1536 * list_lock must be held.
1537 */
1538static inline void remove_partial(struct kmem_cache_node *n, 1535static inline void remove_partial(struct kmem_cache_node *n,
1539 struct page *page) 1536 struct page *page)
1540{ 1537{
1538 lockdep_assert_held(&n->list_lock);
1539
1541 list_del(&page->lru); 1540 list_del(&page->lru);
1542 n->nr_partial--; 1541 n->nr_partial--;
1543} 1542}
@@ -1547,8 +1546,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
1547 * return the pointer to the freelist. 1546 * return the pointer to the freelist.
1548 * 1547 *
1549 * Returns a list of objects or NULL if it fails. 1548 * Returns a list of objects or NULL if it fails.
1550 *
1551 * Must hold list_lock since we modify the partial list.
1552 */ 1549 */
1553static inline void *acquire_slab(struct kmem_cache *s, 1550static inline void *acquire_slab(struct kmem_cache *s,
1554 struct kmem_cache_node *n, struct page *page, 1551 struct kmem_cache_node *n, struct page *page,
@@ -1558,6 +1555,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
1558 unsigned long counters; 1555 unsigned long counters;
1559 struct page new; 1556 struct page new;
1560 1557
1558 lockdep_assert_held(&n->list_lock);
1559
1561 /* 1560 /*
1562 * Zap the freelist and set the frozen bit. 1561 * Zap the freelist and set the frozen bit.
1563 * The old freelist is the list of objects for the 1562 * The old freelist is the list of objects for the
@@ -1902,7 +1901,7 @@ redo:
1902 1901
1903 else if (l == M_FULL) 1902 else if (l == M_FULL)
1904 1903
1905 remove_full(s, page); 1904 remove_full(s, n, page);
1906 1905
1907 if (m == M_PARTIAL) { 1906 if (m == M_PARTIAL) {
1908 1907
@@ -2556,7 +2555,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2556 new.inuse--; 2555 new.inuse--;
2557 if ((!new.inuse || !prior) && !was_frozen) { 2556 if ((!new.inuse || !prior) && !was_frozen) {
2558 2557
2559 if (kmem_cache_has_cpu_partial(s) && !prior) 2558 if (kmem_cache_has_cpu_partial(s) && !prior) {
2560 2559
2561 /* 2560 /*
2562 * Slab was on no list before and will be 2561 * Slab was on no list before and will be
@@ -2566,7 +2565,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2566 */ 2565 */
2567 new.frozen = 1; 2566 new.frozen = 1;
2568 2567
2569 else { /* Needs to be taken off a list */ 2568 } else { /* Needs to be taken off a list */
2570 2569
2571 n = get_node(s, page_to_nid(page)); 2570 n = get_node(s, page_to_nid(page));
2572 /* 2571 /*
@@ -2615,7 +2614,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2615 */ 2614 */
2616 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 2615 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2617 if (kmem_cache_debug(s)) 2616 if (kmem_cache_debug(s))
2618 remove_full(s, page); 2617 remove_full(s, n, page);
2619 add_partial(n, page, DEACTIVATE_TO_TAIL); 2618 add_partial(n, page, DEACTIVATE_TO_TAIL);
2620 stat(s, FREE_ADD_PARTIAL); 2619 stat(s, FREE_ADD_PARTIAL);
2621 } 2620 }
@@ -2629,9 +2628,10 @@ slab_empty:
2629 */ 2628 */
2630 remove_partial(n, page); 2629 remove_partial(n, page);
2631 stat(s, FREE_REMOVE_PARTIAL); 2630 stat(s, FREE_REMOVE_PARTIAL);
2632 } else 2631 } else {
2633 /* Slab must be on the full list */ 2632 /* Slab must be on the full list */
2634 remove_full(s, page); 2633 remove_full(s, n, page);
2634 }
2635 2635
2636 spin_unlock_irqrestore(&n->list_lock, flags); 2636 spin_unlock_irqrestore(&n->list_lock, flags);
2637 stat(s, FREE_SLAB); 2637 stat(s, FREE_SLAB);
@@ -2905,7 +2905,13 @@ static void early_kmem_cache_node_alloc(int node)
2905 init_kmem_cache_node(n); 2905 init_kmem_cache_node(n);
2906 inc_slabs_node(kmem_cache_node, node, page->objects); 2906 inc_slabs_node(kmem_cache_node, node, page->objects);
2907 2907
2908 /*
2909 * the lock is for lockdep's sake, not for any actual
2910 * race protection
2911 */
2912 spin_lock(&n->list_lock);
2908 add_partial(n, page, DEACTIVATE_TO_HEAD); 2913 add_partial(n, page, DEACTIVATE_TO_HEAD);
2914 spin_unlock(&n->list_lock);
2909} 2915}
2910 2916
2911static void free_kmem_cache_nodes(struct kmem_cache *s) 2917static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -4314,7 +4320,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4314 4320
4315 page = ACCESS_ONCE(c->partial); 4321 page = ACCESS_ONCE(c->partial);
4316 if (page) { 4322 if (page) {
4317 x = page->pobjects; 4323 node = page_to_nid(page);
4324 if (flags & SO_TOTAL)
4325 WARN_ON_ONCE(1);
4326 else if (flags & SO_OBJECTS)
4327 WARN_ON_ONCE(1);
4328 else
4329 x = page->pages;
4318 total += x; 4330 total += x;
4319 nodes[node] += x; 4331 nodes[node] += x;
4320 } 4332 }
@@ -5178,7 +5190,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
5178 } 5190 }
5179 5191
5180 s->kobj.kset = slab_kset; 5192 s->kobj.kset = slab_kset;
5181 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); 5193 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5182 if (err) { 5194 if (err) {
5183 kobject_put(&s->kobj); 5195 kobject_put(&s->kobj);
5184 return err; 5196 return err;