diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-11 21:52:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-11 21:52:23 -0500 |
commit | 6296e5d3c067df41980a5fd09ad4cc6765f79bb9 (patch) | |
tree | ac10bc5321ac1d750612c0e0ae53d6c4097c5734 /mm/slub.c | |
parent | c086ae4ed94f9a1d283318e006813268c2dbf9fc (diff) | |
parent | 5878cf431ca7233a56819ca6970153ac0b129599 (diff) |
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
slub: disallow changing cpu_partial from userspace for debug caches
slub: add missed accounting
slub: Extract get_freelist from __slab_alloc
slub: Switch per cpu partial page support off for debugging
slub: fix a possible memleak in __slab_alloc()
slub: fix slub_max_order Documentation
slub: add missed accounting
slab: add taint flag outputting to debug paths.
slub: add taint flag outputting to debug paths
slab: introduce slab_max_order kernel parameter
slab: rename slab_break_gfp_order to slab_max_order
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 77 |
1 files changed, 48 insertions, 29 deletions
@@ -570,7 +570,7 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...) | |||
570 | va_end(args); | 570 | va_end(args); |
571 | printk(KERN_ERR "========================================" | 571 | printk(KERN_ERR "========================================" |
572 | "=====================================\n"); | 572 | "=====================================\n"); |
573 | printk(KERN_ERR "BUG %s: %s\n", s->name, buf); | 573 | printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); |
574 | printk(KERN_ERR "----------------------------------------" | 574 | printk(KERN_ERR "----------------------------------------" |
575 | "-------------------------------------\n\n"); | 575 | "-------------------------------------\n\n"); |
576 | } | 576 | } |
@@ -1901,11 +1901,14 @@ static void unfreeze_partials(struct kmem_cache *s) | |||
1901 | } | 1901 | } |
1902 | 1902 | ||
1903 | if (l != m) { | 1903 | if (l != m) { |
1904 | if (l == M_PARTIAL) | 1904 | if (l == M_PARTIAL) { |
1905 | remove_partial(n, page); | 1905 | remove_partial(n, page); |
1906 | else | 1906 | stat(s, FREE_REMOVE_PARTIAL); |
1907 | } else { | ||
1907 | add_partial(n, page, | 1908 | add_partial(n, page, |
1908 | DEACTIVATE_TO_TAIL); | 1909 | DEACTIVATE_TO_TAIL); |
1910 | stat(s, FREE_ADD_PARTIAL); | ||
1911 | } | ||
1909 | 1912 | ||
1910 | l = m; | 1913 | l = m; |
1911 | } | 1914 | } |
@@ -2124,6 +2127,37 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, | |||
2124 | } | 2127 | } |
2125 | 2128 | ||
2126 | /* | 2129 | /* |
2130 | * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist | ||
2131 | * or deactivate the page. | ||
2132 | * | ||
2133 | * The page is still frozen if the return value is not NULL. | ||
2134 | * | ||
2135 | * If this function returns NULL then the page has been unfrozen. | ||
2136 | */ | ||
2137 | static inline void *get_freelist(struct kmem_cache *s, struct page *page) | ||
2138 | { | ||
2139 | struct page new; | ||
2140 | unsigned long counters; | ||
2141 | void *freelist; | ||
2142 | |||
2143 | do { | ||
2144 | freelist = page->freelist; | ||
2145 | counters = page->counters; | ||
2146 | new.counters = counters; | ||
2147 | VM_BUG_ON(!new.frozen); | ||
2148 | |||
2149 | new.inuse = page->objects; | ||
2150 | new.frozen = freelist != NULL; | ||
2151 | |||
2152 | } while (!cmpxchg_double_slab(s, page, | ||
2153 | freelist, counters, | ||
2154 | NULL, new.counters, | ||
2155 | "get_freelist")); | ||
2156 | |||
2157 | return freelist; | ||
2158 | } | ||
2159 | |||
2160 | /* | ||
2127 | * Slow path. The lockless freelist is empty or we need to perform | 2161 | * Slow path. The lockless freelist is empty or we need to perform |
2128 | * debugging duties. | 2162 | * debugging duties. |
2129 | * | 2163 | * |
@@ -2144,8 +2178,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
2144 | { | 2178 | { |
2145 | void **object; | 2179 | void **object; |
2146 | unsigned long flags; | 2180 | unsigned long flags; |
2147 | struct page new; | ||
2148 | unsigned long counters; | ||
2149 | 2181 | ||
2150 | local_irq_save(flags); | 2182 | local_irq_save(flags); |
2151 | #ifdef CONFIG_PREEMPT | 2183 | #ifdef CONFIG_PREEMPT |
@@ -2166,31 +2198,14 @@ redo: | |||
2166 | goto new_slab; | 2198 | goto new_slab; |
2167 | } | 2199 | } |
2168 | 2200 | ||
2169 | stat(s, ALLOC_SLOWPATH); | 2201 | /* must check again c->freelist in case of cpu migration or IRQ */ |
2170 | 2202 | object = c->freelist; | |
2171 | do { | 2203 | if (object) |
2172 | object = c->page->freelist; | 2204 | goto load_freelist; |
2173 | counters = c->page->counters; | ||
2174 | new.counters = counters; | ||
2175 | VM_BUG_ON(!new.frozen); | ||
2176 | |||
2177 | /* | ||
2178 | * If there is no object left then we use this loop to | ||
2179 | * deactivate the slab which is simple since no objects | ||
2180 | * are left in the slab and therefore we do not need to | ||
2181 | * put the page back onto the partial list. | ||
2182 | * | ||
2183 | * If there are objects left then we retrieve them | ||
2184 | * and use them to refill the per cpu queue. | ||
2185 | */ | ||
2186 | 2205 | ||
2187 | new.inuse = c->page->objects; | 2206 | stat(s, ALLOC_SLOWPATH); |
2188 | new.frozen = object != NULL; | ||
2189 | 2207 | ||
2190 | } while (!__cmpxchg_double_slab(s, c->page, | 2208 | object = get_freelist(s, c->page); |
2191 | object, counters, | ||
2192 | NULL, new.counters, | ||
2193 | "__slab_alloc")); | ||
2194 | 2209 | ||
2195 | if (!object) { | 2210 | if (!object) { |
2196 | c->page = NULL; | 2211 | c->page = NULL; |
@@ -3028,7 +3043,9 @@ static int kmem_cache_open(struct kmem_cache *s, | |||
3028 | * per node list when we run out of per cpu objects. We only fetch 50% | 3043 | * per node list when we run out of per cpu objects. We only fetch 50% |
3029 | * to keep some capacity around for frees. | 3044 | * to keep some capacity around for frees. |
3030 | */ | 3045 | */ |
3031 | if (s->size >= PAGE_SIZE) | 3046 | if (kmem_cache_debug(s)) |
3047 | s->cpu_partial = 0; | ||
3048 | else if (s->size >= PAGE_SIZE) | ||
3032 | s->cpu_partial = 2; | 3049 | s->cpu_partial = 2; |
3033 | else if (s->size >= 1024) | 3050 | else if (s->size >= 1024) |
3034 | s->cpu_partial = 6; | 3051 | s->cpu_partial = 6; |
@@ -4637,6 +4654,8 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, | |||
4637 | err = strict_strtoul(buf, 10, &objects); | 4654 | err = strict_strtoul(buf, 10, &objects); |
4638 | if (err) | 4655 | if (err) |
4639 | return err; | 4656 | return err; |
4657 | if (objects && kmem_cache_debug(s)) | ||
4658 | return -EINVAL; | ||
4640 | 4659 | ||
4641 | s->cpu_partial = objects; | 4660 | s->cpu_partial = objects; |
4642 | flush_all(s); | 4661 | flush_all(s); |