diff options
| -rw-r--r-- | include/linux/slab.h | 8 | ||||
| -rw-r--r-- | mm/slab.c | 2 | ||||
| -rw-r--r-- | mm/slub.c | 56 |
3 files changed, 39 insertions, 27 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index a060142aa5f5..9260abdd67df 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -205,8 +205,8 @@ struct kmem_cache { | |||
| 205 | 205 | ||
| 206 | #ifdef CONFIG_SLUB | 206 | #ifdef CONFIG_SLUB |
| 207 | /* | 207 | /* |
| 208 | * SLUB allocates up to order 2 pages directly and otherwise | 208 | * SLUB directly allocates requests fitting in to an order-1 page |
| 209 | * passes the request to the page allocator. | 209 | * (PAGE_SIZE*2). Larger requests are passed to the page allocator. |
| 210 | */ | 210 | */ |
| 211 | #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) | 211 | #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) |
| 212 | #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) | 212 | #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) |
| @@ -217,12 +217,12 @@ struct kmem_cache { | |||
| 217 | 217 | ||
| 218 | #ifdef CONFIG_SLOB | 218 | #ifdef CONFIG_SLOB |
| 219 | /* | 219 | /* |
| 220 | * SLOB passes all page size and larger requests to the page allocator. | 220 | * SLOB passes all requests larger than one page to the page allocator. |
| 221 | * No kmalloc array is necessary since objects of different sizes can | 221 | * No kmalloc array is necessary since objects of different sizes can |
| 222 | * be allocated from the same page. | 222 | * be allocated from the same page. |
| 223 | */ | 223 | */ |
| 224 | #define KMALLOC_SHIFT_MAX 30 | ||
| 225 | #define KMALLOC_SHIFT_HIGH PAGE_SHIFT | 224 | #define KMALLOC_SHIFT_HIGH PAGE_SHIFT |
| 225 | #define KMALLOC_SHIFT_MAX 30 | ||
| 226 | #ifndef KMALLOC_SHIFT_LOW | 226 | #ifndef KMALLOC_SHIFT_LOW |
| 227 | #define KMALLOC_SHIFT_LOW 3 | 227 | #define KMALLOC_SHIFT_LOW 3 |
| 228 | #endif | 228 | #endif |
| @@ -1946,7 +1946,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, | |||
| 1946 | /** | 1946 | /** |
| 1947 | * slab_destroy - destroy and release all objects in a slab | 1947 | * slab_destroy - destroy and release all objects in a slab |
| 1948 | * @cachep: cache pointer being destroyed | 1948 | * @cachep: cache pointer being destroyed |
| 1949 | * @slabp: slab pointer being destroyed | 1949 | * @page: page pointer being destroyed |
| 1950 | * | 1950 | * |
| 1951 | * Destroy all the objs in a slab, and release the mem back to the system. | 1951 | * Destroy all the objs in a slab, and release the mem back to the system. |
| 1952 | * Before calling the slab must have been unlinked from the cache. The | 1952 | * Before calling the slab must have been unlinked from the cache. The |
| @@ -1000,23 +1000,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) | |||
| 1000 | 1000 | ||
| 1001 | /* | 1001 | /* |
| 1002 | * Tracking of fully allocated slabs for debugging purposes. | 1002 | * Tracking of fully allocated slabs for debugging purposes. |
| 1003 | * | ||
| 1004 | * list_lock must be held. | ||
| 1005 | */ | 1003 | */ |
| 1006 | static void add_full(struct kmem_cache *s, | 1004 | static void add_full(struct kmem_cache *s, |
| 1007 | struct kmem_cache_node *n, struct page *page) | 1005 | struct kmem_cache_node *n, struct page *page) |
| 1008 | { | 1006 | { |
| 1007 | lockdep_assert_held(&n->list_lock); | ||
| 1008 | |||
| 1009 | if (!(s->flags & SLAB_STORE_USER)) | 1009 | if (!(s->flags & SLAB_STORE_USER)) |
| 1010 | return; | 1010 | return; |
| 1011 | 1011 | ||
| 1012 | list_add(&page->lru, &n->full); | 1012 | list_add(&page->lru, &n->full); |
| 1013 | } | 1013 | } |
| 1014 | 1014 | ||
| 1015 | /* | 1015 | static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) |
| 1016 | * list_lock must be held. | ||
| 1017 | */ | ||
| 1018 | static void remove_full(struct kmem_cache *s, struct page *page) | ||
| 1019 | { | 1016 | { |
| 1017 | lockdep_assert_held(&n->list_lock); | ||
| 1018 | |||
| 1020 | if (!(s->flags & SLAB_STORE_USER)) | 1019 | if (!(s->flags & SLAB_STORE_USER)) |
| 1021 | return; | 1020 | return; |
| 1022 | 1021 | ||
| @@ -1265,7 +1264,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page, | |||
| 1265 | void *object, u8 val) { return 1; } | 1264 | void *object, u8 val) { return 1; } |
| 1266 | static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, | 1265 | static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, |
| 1267 | struct page *page) {} | 1266 | struct page *page) {} |
| 1268 | static inline void remove_full(struct kmem_cache *s, struct page *page) {} | 1267 | static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, |
| 1268 | struct page *page) {} | ||
| 1269 | static inline unsigned long kmem_cache_flags(unsigned long object_size, | 1269 | static inline unsigned long kmem_cache_flags(unsigned long object_size, |
| 1270 | unsigned long flags, const char *name, | 1270 | unsigned long flags, const char *name, |
| 1271 | void (*ctor)(void *)) | 1271 | void (*ctor)(void *)) |
| @@ -1519,12 +1519,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page) | |||
| 1519 | 1519 | ||
| 1520 | /* | 1520 | /* |
| 1521 | * Management of partially allocated slabs. | 1521 | * Management of partially allocated slabs. |
| 1522 | * | ||
| 1523 | * list_lock must be held. | ||
| 1524 | */ | 1522 | */ |
| 1525 | static inline void add_partial(struct kmem_cache_node *n, | 1523 | static inline void add_partial(struct kmem_cache_node *n, |
| 1526 | struct page *page, int tail) | 1524 | struct page *page, int tail) |
| 1527 | { | 1525 | { |
| 1526 | lockdep_assert_held(&n->list_lock); | ||
| 1527 | |||
| 1528 | n->nr_partial++; | 1528 | n->nr_partial++; |
| 1529 | if (tail == DEACTIVATE_TO_TAIL) | 1529 | if (tail == DEACTIVATE_TO_TAIL) |
| 1530 | list_add_tail(&page->lru, &n->partial); | 1530 | list_add_tail(&page->lru, &n->partial); |
| @@ -1532,12 +1532,11 @@ static inline void add_partial(struct kmem_cache_node *n, | |||
| 1532 | list_add(&page->lru, &n->partial); | 1532 | list_add(&page->lru, &n->partial); |
| 1533 | } | 1533 | } |
| 1534 | 1534 | ||
| 1535 | /* | ||
| 1536 | * list_lock must be held. | ||
| 1537 | */ | ||
| 1538 | static inline void remove_partial(struct kmem_cache_node *n, | 1535 | static inline void remove_partial(struct kmem_cache_node *n, |
| 1539 | struct page *page) | 1536 | struct page *page) |
| 1540 | { | 1537 | { |
| 1538 | lockdep_assert_held(&n->list_lock); | ||
| 1539 | |||
| 1541 | list_del(&page->lru); | 1540 | list_del(&page->lru); |
| 1542 | n->nr_partial--; | 1541 | n->nr_partial--; |
| 1543 | } | 1542 | } |
| @@ -1547,8 +1546,6 @@ static inline void remove_partial(struct kmem_cache_node *n, | |||
| 1547 | * return the pointer to the freelist. | 1546 | * return the pointer to the freelist. |
| 1548 | * | 1547 | * |
| 1549 | * Returns a list of objects or NULL if it fails. | 1548 | * Returns a list of objects or NULL if it fails. |
| 1550 | * | ||
| 1551 | * Must hold list_lock since we modify the partial list. | ||
| 1552 | */ | 1549 | */ |
| 1553 | static inline void *acquire_slab(struct kmem_cache *s, | 1550 | static inline void *acquire_slab(struct kmem_cache *s, |
| 1554 | struct kmem_cache_node *n, struct page *page, | 1551 | struct kmem_cache_node *n, struct page *page, |
| @@ -1558,6 +1555,8 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
| 1558 | unsigned long counters; | 1555 | unsigned long counters; |
| 1559 | struct page new; | 1556 | struct page new; |
| 1560 | 1557 | ||
| 1558 | lockdep_assert_held(&n->list_lock); | ||
| 1559 | |||
| 1561 | /* | 1560 | /* |
| 1562 | * Zap the freelist and set the frozen bit. | 1561 | * Zap the freelist and set the frozen bit. |
| 1563 | * The old freelist is the list of objects for the | 1562 | * The old freelist is the list of objects for the |
| @@ -1902,7 +1901,7 @@ redo: | |||
| 1902 | 1901 | ||
| 1903 | else if (l == M_FULL) | 1902 | else if (l == M_FULL) |
| 1904 | 1903 | ||
| 1905 | remove_full(s, page); | 1904 | remove_full(s, n, page); |
| 1906 | 1905 | ||
| 1907 | if (m == M_PARTIAL) { | 1906 | if (m == M_PARTIAL) { |
| 1908 | 1907 | ||
| @@ -2556,7 +2555,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2556 | new.inuse--; | 2555 | new.inuse--; |
| 2557 | if ((!new.inuse || !prior) && !was_frozen) { | 2556 | if ((!new.inuse || !prior) && !was_frozen) { |
| 2558 | 2557 | ||
| 2559 | if (kmem_cache_has_cpu_partial(s) && !prior) | 2558 | if (kmem_cache_has_cpu_partial(s) && !prior) { |
| 2560 | 2559 | ||
| 2561 | /* | 2560 | /* |
| 2562 | * Slab was on no list before and will be | 2561 | * Slab was on no list before and will be |
| @@ -2566,7 +2565,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2566 | */ | 2565 | */ |
| 2567 | new.frozen = 1; | 2566 | new.frozen = 1; |
| 2568 | 2567 | ||
| 2569 | else { /* Needs to be taken off a list */ | 2568 | } else { /* Needs to be taken off a list */ |
| 2570 | 2569 | ||
| 2571 | n = get_node(s, page_to_nid(page)); | 2570 | n = get_node(s, page_to_nid(page)); |
| 2572 | /* | 2571 | /* |
| @@ -2615,7 +2614,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2615 | */ | 2614 | */ |
| 2616 | if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { | 2615 | if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { |
| 2617 | if (kmem_cache_debug(s)) | 2616 | if (kmem_cache_debug(s)) |
| 2618 | remove_full(s, page); | 2617 | remove_full(s, n, page); |
| 2619 | add_partial(n, page, DEACTIVATE_TO_TAIL); | 2618 | add_partial(n, page, DEACTIVATE_TO_TAIL); |
| 2620 | stat(s, FREE_ADD_PARTIAL); | 2619 | stat(s, FREE_ADD_PARTIAL); |
| 2621 | } | 2620 | } |
| @@ -2629,9 +2628,10 @@ slab_empty: | |||
| 2629 | */ | 2628 | */ |
| 2630 | remove_partial(n, page); | 2629 | remove_partial(n, page); |
| 2631 | stat(s, FREE_REMOVE_PARTIAL); | 2630 | stat(s, FREE_REMOVE_PARTIAL); |
| 2632 | } else | 2631 | } else { |
| 2633 | /* Slab must be on the full list */ | 2632 | /* Slab must be on the full list */ |
| 2634 | remove_full(s, page); | 2633 | remove_full(s, n, page); |
| 2634 | } | ||
| 2635 | 2635 | ||
| 2636 | spin_unlock_irqrestore(&n->list_lock, flags); | 2636 | spin_unlock_irqrestore(&n->list_lock, flags); |
| 2637 | stat(s, FREE_SLAB); | 2637 | stat(s, FREE_SLAB); |
| @@ -2905,7 +2905,13 @@ static void early_kmem_cache_node_alloc(int node) | |||
| 2905 | init_kmem_cache_node(n); | 2905 | init_kmem_cache_node(n); |
| 2906 | inc_slabs_node(kmem_cache_node, node, page->objects); | 2906 | inc_slabs_node(kmem_cache_node, node, page->objects); |
| 2907 | 2907 | ||
| 2908 | /* | ||
| 2909 | * the lock is for lockdep's sake, not for any actual | ||
| 2910 | * race protection | ||
| 2911 | */ | ||
| 2912 | spin_lock(&n->list_lock); | ||
| 2908 | add_partial(n, page, DEACTIVATE_TO_HEAD); | 2913 | add_partial(n, page, DEACTIVATE_TO_HEAD); |
| 2914 | spin_unlock(&n->list_lock); | ||
| 2909 | } | 2915 | } |
| 2910 | 2916 | ||
| 2911 | static void free_kmem_cache_nodes(struct kmem_cache *s) | 2917 | static void free_kmem_cache_nodes(struct kmem_cache *s) |
| @@ -4314,7 +4320,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
| 4314 | 4320 | ||
| 4315 | page = ACCESS_ONCE(c->partial); | 4321 | page = ACCESS_ONCE(c->partial); |
| 4316 | if (page) { | 4322 | if (page) { |
| 4317 | x = page->pobjects; | 4323 | node = page_to_nid(page); |
| 4324 | if (flags & SO_TOTAL) | ||
| 4325 | WARN_ON_ONCE(1); | ||
| 4326 | else if (flags & SO_OBJECTS) | ||
| 4327 | WARN_ON_ONCE(1); | ||
| 4328 | else | ||
| 4329 | x = page->pages; | ||
| 4318 | total += x; | 4330 | total += x; |
| 4319 | nodes[node] += x; | 4331 | nodes[node] += x; |
| 4320 | } | 4332 | } |
| @@ -5178,7 +5190,7 @@ static int sysfs_slab_add(struct kmem_cache *s) | |||
| 5178 | } | 5190 | } |
| 5179 | 5191 | ||
| 5180 | s->kobj.kset = slab_kset; | 5192 | s->kobj.kset = slab_kset; |
| 5181 | err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); | 5193 | err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); |
| 5182 | if (err) { | 5194 | if (err) { |
| 5183 | kobject_put(&s->kobj); | 5195 | kobject_put(&s->kobj); |
| 5184 | return err; | 5196 | return err; |
