diff options
| -rw-r--r-- | Documentation/vm/slub.txt | 2 | ||||
| -rw-r--r-- | mm/slub.c | 23 |
2 files changed, 14 insertions, 11 deletions
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt index 6752870c4970..b0c6d1bbb434 100644 --- a/Documentation/vm/slub.txt +++ b/Documentation/vm/slub.txt | |||
| @@ -17,7 +17,7 @@ data and perform operation on the slabs. By default slabinfo only lists | |||
| 17 | slabs that have data in them. See "slabinfo -h" for more options when | 17 | slabs that have data in them. See "slabinfo -h" for more options when |
| 18 | running the command. slabinfo can be compiled with | 18 | running the command. slabinfo can be compiled with |
| 19 | 19 | ||
| 20 | gcc -o slabinfo tools/slub/slabinfo.c | 20 | gcc -o slabinfo tools/vm/slabinfo.c |
| 21 | 21 | ||
| 22 | Some of the modes of operation of slabinfo require that slub debugging | 22 | Some of the modes of operation of slabinfo require that slub debugging |
| 23 | be enabled on the command line. F.e. no tracking information will be | 23 | be enabled on the command line. F.e. no tracking information will be |
| @@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
| 1369 | 1369 | ||
| 1370 | inc_slabs_node(s, page_to_nid(page), page->objects); | 1370 | inc_slabs_node(s, page_to_nid(page), page->objects); |
| 1371 | page->slab = s; | 1371 | page->slab = s; |
| 1372 | page->flags |= 1 << PG_slab; | 1372 | __SetPageSlab(page); |
| 1373 | 1373 | ||
| 1374 | start = page_address(page); | 1374 | start = page_address(page); |
| 1375 | 1375 | ||
| @@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
| 1514 | freelist = page->freelist; | 1514 | freelist = page->freelist; |
| 1515 | counters = page->counters; | 1515 | counters = page->counters; |
| 1516 | new.counters = counters; | 1516 | new.counters = counters; |
| 1517 | if (mode) | 1517 | if (mode) { |
| 1518 | new.inuse = page->objects; | 1518 | new.inuse = page->objects; |
| 1519 | new.freelist = NULL; | ||
| 1520 | } else { | ||
| 1521 | new.freelist = freelist; | ||
| 1522 | } | ||
| 1519 | 1523 | ||
| 1520 | VM_BUG_ON(new.frozen); | 1524 | VM_BUG_ON(new.frozen); |
| 1521 | new.frozen = 1; | 1525 | new.frozen = 1; |
| 1522 | 1526 | ||
| 1523 | } while (!__cmpxchg_double_slab(s, page, | 1527 | } while (!__cmpxchg_double_slab(s, page, |
| 1524 | freelist, counters, | 1528 | freelist, counters, |
| 1525 | NULL, new.counters, | 1529 | new.freelist, new.counters, |
| 1526 | "lock and freeze")); | 1530 | "lock and freeze")); |
| 1527 | 1531 | ||
| 1528 | remove_partial(n, page); | 1532 | remove_partial(n, page); |
| @@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s, | |||
| 1564 | object = t; | 1568 | object = t; |
| 1565 | available = page->objects - page->inuse; | 1569 | available = page->objects - page->inuse; |
| 1566 | } else { | 1570 | } else { |
| 1567 | page->freelist = t; | ||
| 1568 | available = put_cpu_partial(s, page, 0); | 1571 | available = put_cpu_partial(s, page, 0); |
| 1569 | stat(s, CPU_PARTIAL_NODE); | 1572 | stat(s, CPU_PARTIAL_NODE); |
| 1570 | } | 1573 | } |
| @@ -1579,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s, | |||
| 1579 | /* | 1582 | /* |
| 1580 | * Get a page from somewhere. Search in increasing NUMA distances. | 1583 | * Get a page from somewhere. Search in increasing NUMA distances. |
| 1581 | */ | 1584 | */ |
| 1582 | static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags, | 1585 | static void *get_any_partial(struct kmem_cache *s, gfp_t flags, |
| 1583 | struct kmem_cache_cpu *c) | 1586 | struct kmem_cache_cpu *c) |
| 1584 | { | 1587 | { |
| 1585 | #ifdef CONFIG_NUMA | 1588 | #ifdef CONFIG_NUMA |
| @@ -2766,7 +2769,7 @@ static unsigned long calculate_alignment(unsigned long flags, | |||
| 2766 | } | 2769 | } |
| 2767 | 2770 | ||
| 2768 | static void | 2771 | static void |
| 2769 | init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | 2772 | init_kmem_cache_node(struct kmem_cache_node *n) |
| 2770 | { | 2773 | { |
| 2771 | n->nr_partial = 0; | 2774 | n->nr_partial = 0; |
| 2772 | spin_lock_init(&n->list_lock); | 2775 | spin_lock_init(&n->list_lock); |
| @@ -2836,7 +2839,7 @@ static void early_kmem_cache_node_alloc(int node) | |||
| 2836 | init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); | 2839 | init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); |
| 2837 | init_tracking(kmem_cache_node, n); | 2840 | init_tracking(kmem_cache_node, n); |
| 2838 | #endif | 2841 | #endif |
| 2839 | init_kmem_cache_node(n, kmem_cache_node); | 2842 | init_kmem_cache_node(n); |
| 2840 | inc_slabs_node(kmem_cache_node, node, page->objects); | 2843 | inc_slabs_node(kmem_cache_node, node, page->objects); |
| 2841 | 2844 | ||
| 2842 | add_partial(n, page, DEACTIVATE_TO_HEAD); | 2845 | add_partial(n, page, DEACTIVATE_TO_HEAD); |
| @@ -2876,7 +2879,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s) | |||
| 2876 | } | 2879 | } |
| 2877 | 2880 | ||
| 2878 | s->node[node] = n; | 2881 | s->node[node] = n; |
| 2879 | init_kmem_cache_node(n, s); | 2882 | init_kmem_cache_node(n); |
| 2880 | } | 2883 | } |
| 2881 | return 1; | 2884 | return 1; |
| 2882 | } | 2885 | } |
| @@ -3625,7 +3628,7 @@ static int slab_mem_going_online_callback(void *arg) | |||
| 3625 | ret = -ENOMEM; | 3628 | ret = -ENOMEM; |
| 3626 | goto out; | 3629 | goto out; |
| 3627 | } | 3630 | } |
| 3628 | init_kmem_cache_node(n, s); | 3631 | init_kmem_cache_node(n); |
| 3629 | s->node[nid] = n; | 3632 | s->node[nid] = n; |
| 3630 | } | 3633 | } |
| 3631 | out: | 3634 | out: |
| @@ -3968,9 +3971,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
| 3968 | } | 3971 | } |
| 3969 | return s; | 3972 | return s; |
| 3970 | } | 3973 | } |
| 3971 | kfree(n); | ||
| 3972 | kfree(s); | 3974 | kfree(s); |
| 3973 | } | 3975 | } |
| 3976 | kfree(n); | ||
| 3974 | err: | 3977 | err: |
| 3975 | up_write(&slub_lock); | 3978 | up_write(&slub_lock); |
| 3976 | 3979 | ||
