aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-11-11 15:07:14 -0500
committerPekka Enberg <penberg@kernel.org>2011-12-13 15:17:10 -0500
commit213eeb9fd9d66c33109e2ace242df214dc3a653d (patch)
treecaaecaf19dd13f10a1704aa6b2a20fa900fb3d85 /mm
parent73736e0387ba0e6d2b703407b4d26168d31516a7 (diff)
slub: Extract get_freelist from __slab_alloc
get_freelist retrieves free objects from the page freelist (put there by remote frees) or deactivates a slab page if no more objects are available. Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 5e410a95abaf..6dc79f8e6ce9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2127,6 +2127,37 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2127} 2127}
2128 2128
2129/* 2129/*
2130 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
2131 * or deactivate the page.
2132 *
2133 * The page is still frozen if the return value is not NULL.
2134 *
2135 * If this function returns NULL then the page has been unfrozen.
2136 */
2137static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2138{
2139 struct page new;
2140 unsigned long counters;
2141 void *freelist;
2142
2143 do {
2144 freelist = page->freelist;
2145 counters = page->counters;
2146 new.counters = counters;
2147 VM_BUG_ON(!new.frozen);
2148
2149 new.inuse = page->objects;
2150 new.frozen = freelist != NULL;
2151
2152 } while (!cmpxchg_double_slab(s, page,
2153 freelist, counters,
2154 NULL, new.counters,
2155 "get_freelist"));
2156
2157 return freelist;
2158}
2159
2160/*
2130 * Slow path. The lockless freelist is empty or we need to perform 2161 * Slow path. The lockless freelist is empty or we need to perform
2131 * debugging duties. 2162 * debugging duties.
2132 * 2163 *
@@ -2147,8 +2178,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2147{ 2178{
2148 void **object; 2179 void **object;
2149 unsigned long flags; 2180 unsigned long flags;
2150 struct page new;
2151 unsigned long counters;
2152 2181
2153 local_irq_save(flags); 2182 local_irq_save(flags);
2154#ifdef CONFIG_PREEMPT 2183#ifdef CONFIG_PREEMPT
@@ -2176,29 +2205,7 @@ redo:
2176 2205
2177 stat(s, ALLOC_SLOWPATH); 2206 stat(s, ALLOC_SLOWPATH);
2178 2207
2179 do { 2208 object = get_freelist(s, c->page);
2180 object = c->page->freelist;
2181 counters = c->page->counters;
2182 new.counters = counters;
2183 VM_BUG_ON(!new.frozen);
2184
2185 /*
2186 * If there is no object left then we use this loop to
2187 * deactivate the slab which is simple since no objects
2188 * are left in the slab and therefore we do not need to
2189 * put the page back onto the partial list.
2190 *
2191 * If there are objects left then we retrieve them
2192 * and use them to refill the per cpu queue.
2193 */
2194
2195 new.inuse = c->page->objects;
2196 new.frozen = object != NULL;
2197
2198 } while (!__cmpxchg_double_slab(s, c->page,
2199 object, counters,
2200 NULL, new.counters,
2201 "__slab_alloc"));
2202 2209
2203 if (!object) { 2210 if (!object) {
2204 c->page = NULL; 2211 c->page = NULL;