aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 13:25:49 -0400
committerPekka Enberg <penberg@kernel.org>2011-07-02 06:26:53 -0400
commitb789ef518b2a7231b0668c813f677cee528a9d3f (patch)
tree08c300c4dde3e1386670962f233612a366bc59af /mm/slub.c
parentfc9bb8c768abe7ae10861c3510e01a95f98d5933 (diff)
slub: Add cmpxchg_double_slab()
Add a function that operates on the second doubleword in the page struct and manipulates the object counters, the freelist and the frozen attribute. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c65
1 files changed, 60 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 5a2d3d8e0558..be6715dd0ee8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -131,6 +131,9 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
131/* Enable to test recovery from slab corruption on boot */ 131/* Enable to test recovery from slab corruption on boot */
132#undef SLUB_RESILIENCY_TEST 132#undef SLUB_RESILIENCY_TEST
133 133
134/* Enable to log cmpxchg failures */
135#undef SLUB_DEBUG_CMPXCHG
136
134/* 137/*
135 * Mininum number of partial slabs. These will be left on the partial 138 * Mininum number of partial slabs. These will be left on the partial
136 * lists even if they are empty. kmem_cache_shrink may reclaim them. 139 * lists even if they are empty. kmem_cache_shrink may reclaim them.
@@ -170,6 +173,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
170 173
171/* Internal SLUB flags */ 174/* Internal SLUB flags */
172#define __OBJECT_POISON 0x80000000UL /* Poison object */ 175#define __OBJECT_POISON 0x80000000UL /* Poison object */
176#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
173 177
174static int kmem_size = sizeof(struct kmem_cache); 178static int kmem_size = sizeof(struct kmem_cache);
175 179
@@ -338,6 +342,37 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
338 return x.x & OO_MASK; 342 return x.x & OO_MASK;
339} 343}
340 344
345static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
346 void *freelist_old, unsigned long counters_old,
347 void *freelist_new, unsigned long counters_new,
348 const char *n)
349{
350#ifdef CONFIG_CMPXCHG_DOUBLE
351 if (s->flags & __CMPXCHG_DOUBLE) {
352 if (cmpxchg_double(&page->freelist,
353 freelist_old, counters_old,
354 freelist_new, counters_new))
355 return 1;
356 } else
357#endif
358 {
359 if (page->freelist == freelist_old && page->counters == counters_old) {
360 page->freelist = freelist_new;
361 page->counters = counters_new;
362 return 1;
363 }
364 }
365
366 cpu_relax();
367 stat(s, CMPXCHG_DOUBLE_FAIL);
368
369#ifdef SLUB_DEBUG_CMPXCHG
370 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
371#endif
372
373 return 0;
374}
375
341#ifdef CONFIG_SLUB_DEBUG 376#ifdef CONFIG_SLUB_DEBUG
342/* 377/*
343 * Determine a map of object in use on a page. 378 * Determine a map of object in use on a page.
@@ -2596,6 +2631,12 @@ static int kmem_cache_open(struct kmem_cache *s,
2596 } 2631 }
2597 } 2632 }
2598 2633
2634#ifdef CONFIG_CMPXCHG_DOUBLE
2635 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
2636 /* Enable fast mode */
2637 s->flags |= __CMPXCHG_DOUBLE;
2638#endif
2639
2599 /* 2640 /*
2600 * The larger the object size is, the more pages we want on the partial 2641 * The larger the object size is, the more pages we want on the partial
2601 * list to avoid pounding the page allocator excessively. 2642 * list to avoid pounding the page allocator excessively.
@@ -4248,8 +4289,10 @@ static ssize_t sanity_checks_store(struct kmem_cache *s,
4248 const char *buf, size_t length) 4289 const char *buf, size_t length)
4249{ 4290{
4250 s->flags &= ~SLAB_DEBUG_FREE; 4291 s->flags &= ~SLAB_DEBUG_FREE;
4251 if (buf[0] == '1') 4292 if (buf[0] == '1') {
4293 s->flags &= ~__CMPXCHG_DOUBLE;
4252 s->flags |= SLAB_DEBUG_FREE; 4294 s->flags |= SLAB_DEBUG_FREE;
4295 }
4253 return length; 4296 return length;
4254} 4297}
4255SLAB_ATTR(sanity_checks); 4298SLAB_ATTR(sanity_checks);
@@ -4263,8 +4306,10 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4263 size_t length) 4306 size_t length)
4264{ 4307{
4265 s->flags &= ~SLAB_TRACE; 4308 s->flags &= ~SLAB_TRACE;
4266 if (buf[0] == '1') 4309 if (buf[0] == '1') {
4310 s->flags &= ~__CMPXCHG_DOUBLE;
4267 s->flags |= SLAB_TRACE; 4311 s->flags |= SLAB_TRACE;
4312 }
4268 return length; 4313 return length;
4269} 4314}
4270SLAB_ATTR(trace); 4315SLAB_ATTR(trace);
@@ -4281,8 +4326,10 @@ static ssize_t red_zone_store(struct kmem_cache *s,
4281 return -EBUSY; 4326 return -EBUSY;
4282 4327
4283 s->flags &= ~SLAB_RED_ZONE; 4328 s->flags &= ~SLAB_RED_ZONE;
4284 if (buf[0] == '1') 4329 if (buf[0] == '1') {
4330 s->flags &= ~__CMPXCHG_DOUBLE;
4285 s->flags |= SLAB_RED_ZONE; 4331 s->flags |= SLAB_RED_ZONE;
4332 }
4286 calculate_sizes(s, -1); 4333 calculate_sizes(s, -1);
4287 return length; 4334 return length;
4288} 4335}
@@ -4300,8 +4347,10 @@ static ssize_t poison_store(struct kmem_cache *s,
4300 return -EBUSY; 4347 return -EBUSY;
4301 4348
4302 s->flags &= ~SLAB_POISON; 4349 s->flags &= ~SLAB_POISON;
4303 if (buf[0] == '1') 4350 if (buf[0] == '1') {
4351 s->flags &= ~__CMPXCHG_DOUBLE;
4304 s->flags |= SLAB_POISON; 4352 s->flags |= SLAB_POISON;
4353 }
4305 calculate_sizes(s, -1); 4354 calculate_sizes(s, -1);
4306 return length; 4355 return length;
4307} 4356}
@@ -4319,8 +4368,10 @@ static ssize_t store_user_store(struct kmem_cache *s,
4319 return -EBUSY; 4368 return -EBUSY;
4320 4369
4321 s->flags &= ~SLAB_STORE_USER; 4370 s->flags &= ~SLAB_STORE_USER;
4322 if (buf[0] == '1') 4371 if (buf[0] == '1') {
4372 s->flags &= ~__CMPXCHG_DOUBLE;
4323 s->flags |= SLAB_STORE_USER; 4373 s->flags |= SLAB_STORE_USER;
4374 }
4324 calculate_sizes(s, -1); 4375 calculate_sizes(s, -1);
4325 return length; 4376 return length;
4326} 4377}
@@ -4493,6 +4544,8 @@ STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4493STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 4544STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4494STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 4545STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4495STAT_ATTR(ORDER_FALLBACK, order_fallback); 4546STAT_ATTR(ORDER_FALLBACK, order_fallback);
4547STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
4548STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
4496#endif 4549#endif
4497 4550
4498static struct attribute *slab_attrs[] = { 4551static struct attribute *slab_attrs[] = {
@@ -4550,6 +4603,8 @@ static struct attribute *slab_attrs[] = {
4550 &deactivate_to_tail_attr.attr, 4603 &deactivate_to_tail_attr.attr,
4551 &deactivate_remote_frees_attr.attr, 4604 &deactivate_remote_frees_attr.attr,
4552 &order_fallback_attr.attr, 4605 &order_fallback_attr.attr,
4606 &cmpxchg_double_fail_attr.attr,
4607 &cmpxchg_double_cpu_fail_attr.attr,
4553#endif 4608#endif
4554#ifdef CONFIG_FAILSLAB 4609#ifdef CONFIG_FAILSLAB
4555 &failslab_attr.attr, 4610 &failslab_attr.attr,