aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChen Gang <gang.chen@asianux.com>2013-07-14 21:05:29 -0400
committerPekka Enberg <penberg@kernel.org>2013-07-17 03:11:57 -0400
commitd0e0ac9772f8ec520c96ebdd60f00eedf54a46ae (patch)
tree3ef143e2fe4711bcb24de55da4ee103638fc0a27 /mm/slub.c
parente35e1a9744bfc267bf511c2f37266103994466c8 (diff)
mm/slub: beautify code for 80 column limitation and tab alignment
Be sure of 80 column limitation for both code and comments. Correct tab alignment for 'if-else' statement. Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Chen Gang <gang.chen@asianux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c92
1 files changed, 56 insertions, 36 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 4636c8810b8d..d51f75d565c8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
373#endif 373#endif
374 { 374 {
375 slab_lock(page); 375 slab_lock(page);
376 if (page->freelist == freelist_old && page->counters == counters_old) { 376 if (page->freelist == freelist_old &&
377 page->counters == counters_old) {
377 page->freelist = freelist_new; 378 page->freelist = freelist_new;
378 page->counters = counters_new; 379 page->counters = counters_new;
379 slab_unlock(page); 380 slab_unlock(page);
@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
411 412
412 local_irq_save(flags); 413 local_irq_save(flags);
413 slab_lock(page); 414 slab_lock(page);
414 if (page->freelist == freelist_old && page->counters == counters_old) { 415 if (page->freelist == freelist_old &&
416 page->counters == counters_old) {
415 page->freelist = freelist_new; 417 page->freelist = freelist_new;
416 page->counters = counters_new; 418 page->counters = counters_new;
417 slab_unlock(page); 419 slab_unlock(page);
@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object)
553 555
554static void print_page_info(struct page *page) 556static void print_page_info(struct page *page)
555{ 557{
556 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 558 printk(KERN_ERR
557 page, page->objects, page->inuse, page->freelist, page->flags); 559 "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
560 page, page->objects, page->inuse, page->freelist, page->flags);
558 561
559} 562}
560 563
@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
629 print_trailer(s, page, object); 632 print_trailer(s, page, object);
630} 633}
631 634
632static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) 635static void slab_err(struct kmem_cache *s, struct page *page,
636 const char *fmt, ...)
633{ 637{
634 va_list args; 638 va_list args;
635 char buf[100]; 639 char buf[100];
@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
788 } else { 792 } else {
789 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 793 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
790 check_bytes_and_report(s, page, p, "Alignment padding", 794 check_bytes_and_report(s, page, p, "Alignment padding",
791 endobject, POISON_INUSE, s->inuse - s->object_size); 795 endobject, POISON_INUSE,
796 s->inuse - s->object_size);
792 } 797 }
793 } 798 }
794 799
@@ -918,7 +923,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
918 page->freelist); 923 page->freelist);
919 924
920 if (!alloc) 925 if (!alloc)
921 print_section("Object ", (void *)object, s->object_size); 926 print_section("Object ", (void *)object,
927 s->object_size);
922 928
923 dump_stack(); 929 dump_stack();
924 } 930 }
@@ -937,7 +943,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
937 return should_failslab(s->object_size, flags, s->flags); 943 return should_failslab(s->object_size, flags, s->flags);
938} 944}
939 945
940static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 946static inline void slab_post_alloc_hook(struct kmem_cache *s,
947 gfp_t flags, void *object)
941{ 948{
942 flags &= gfp_allowed_mask; 949 flags &= gfp_allowed_mask;
943 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 950 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
@@ -1039,7 +1046,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
1039 init_tracking(s, object); 1046 init_tracking(s, object);
1040} 1047}
1041 1048
1042static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, 1049static noinline int alloc_debug_processing(struct kmem_cache *s,
1050 struct page *page,
1043 void *object, unsigned long addr) 1051 void *object, unsigned long addr)
1044{ 1052{
1045 if (!check_slab(s, page)) 1053 if (!check_slab(s, page))
@@ -1743,7 +1751,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
1743/* 1751/*
1744 * Remove the cpu slab 1752 * Remove the cpu slab
1745 */ 1753 */
1746static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist) 1754static void deactivate_slab(struct kmem_cache *s, struct page *page,
1755 void *freelist)
1747{ 1756{
1748 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 1757 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1749 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1758 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -2002,7 +2011,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2002 page->pobjects = pobjects; 2011 page->pobjects = pobjects;
2003 page->next = oldpage; 2012 page->next = oldpage;
2004 2013
2005 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 2014 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2015 != oldpage);
2006#endif 2016#endif
2007} 2017}
2008 2018
@@ -2172,8 +2182,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2172} 2182}
2173 2183
2174/* 2184/*
2175 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist 2185 * Check the page->freelist of a page and either transfer the freelist to the
2176 * or deactivate the page. 2186 * per cpu freelist or deactivate the page.
2177 * 2187 *
2178 * The page is still frozen if the return value is not NULL. 2188 * The page is still frozen if the return value is not NULL.
2179 * 2189 *
@@ -2317,7 +2327,8 @@ new_slab:
2317 goto load_freelist; 2327 goto load_freelist;
2318 2328
2319 /* Only entered in the debug case */ 2329 /* Only entered in the debug case */
2320 if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr)) 2330 if (kmem_cache_debug(s) &&
2331 !alloc_debug_processing(s, page, freelist, addr))
2321 goto new_slab; /* Slab failed checks. Next slab needed */ 2332 goto new_slab; /* Slab failed checks. Next slab needed */
2322 2333
2323 deactivate_slab(s, page, get_freepointer(s, freelist)); 2334 deactivate_slab(s, page, get_freepointer(s, freelist));
@@ -2385,13 +2396,15 @@ redo:
2385 * The cmpxchg will only match if there was no additional 2396 * The cmpxchg will only match if there was no additional
2386 * operation and if we are on the right processor. 2397 * operation and if we are on the right processor.
2387 * 2398 *
2388 * The cmpxchg does the following atomically (without lock semantics!) 2399 * The cmpxchg does the following atomically (without lock
2400 * semantics!)
2389 * 1. Relocate first pointer to the current per cpu area. 2401 * 1. Relocate first pointer to the current per cpu area.
2390 * 2. Verify that tid and freelist have not been changed 2402 * 2. Verify that tid and freelist have not been changed
2391 * 3. If they were not changed replace tid and freelist 2403 * 3. If they were not changed replace tid and freelist
2392 * 2404 *
2393 * Since this is without lock semantics the protection is only against 2405 * Since this is without lock semantics the protection is only
2394 * code executing on this cpu *not* from access by other cpus. 2406 * against code executing on this cpu *not* from access by
2407 * other cpus.
2395 */ 2408 */
2396 if (unlikely(!this_cpu_cmpxchg_double( 2409 if (unlikely(!this_cpu_cmpxchg_double(
2397 s->cpu_slab->freelist, s->cpu_slab->tid, 2410 s->cpu_slab->freelist, s->cpu_slab->tid,
@@ -2423,7 +2436,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2423{ 2436{
2424 void *ret = slab_alloc(s, gfpflags, _RET_IP_); 2437 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2425 2438
2426 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); 2439 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2440 s->size, gfpflags);
2427 2441
2428 return ret; 2442 return ret;
2429} 2443}
@@ -2515,8 +2529,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2515 if (kmem_cache_has_cpu_partial(s) && !prior) 2529 if (kmem_cache_has_cpu_partial(s) && !prior)
2516 2530
2517 /* 2531 /*
2518 * Slab was on no list before and will be partially empty 2532 * Slab was on no list before and will be
2519 * We can defer the list move and instead freeze it. 2533 * partially empty
2534 * We can defer the list move and instead
2535 * freeze it.
2520 */ 2536 */
2521 new.frozen = 1; 2537 new.frozen = 1;
2522 2538
@@ -3074,8 +3090,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3074 * A) The number of objects from per cpu partial slabs dumped to the 3090 * A) The number of objects from per cpu partial slabs dumped to the
3075 * per node list when we reach the limit. 3091 * per node list when we reach the limit.
3076 * B) The number of objects in cpu partial slabs to extract from the 3092 * B) The number of objects in cpu partial slabs to extract from the
3077 * per node list when we run out of per cpu objects. We only fetch 50% 3093 * per node list when we run out of per cpu objects. We only fetch
3078 * to keep some capacity around for frees. 3094 * 50% to keep some capacity around for frees.
3079 */ 3095 */
3080 if (!kmem_cache_has_cpu_partial(s)) 3096 if (!kmem_cache_has_cpu_partial(s))
3081 s->cpu_partial = 0; 3097 s->cpu_partial = 0;
@@ -3102,8 +3118,8 @@ error:
3102 if (flags & SLAB_PANIC) 3118 if (flags & SLAB_PANIC)
3103 panic("Cannot create slab %s size=%lu realsize=%u " 3119 panic("Cannot create slab %s size=%lu realsize=%u "
3104 "order=%u offset=%u flags=%lx\n", 3120 "order=%u offset=%u flags=%lx\n",
3105 s->name, (unsigned long)s->size, s->size, oo_order(s->oo), 3121 s->name, (unsigned long)s->size, s->size,
3106 s->offset, flags); 3122 oo_order(s->oo), s->offset, flags);
3107 return -EINVAL; 3123 return -EINVAL;
3108} 3124}
3109 3125
@@ -3341,7 +3357,8 @@ bool verify_mem_not_deleted(const void *x)
3341 3357
3342 slab_lock(page); 3358 slab_lock(page);
3343 if (on_freelist(page->slab_cache, page, object)) { 3359 if (on_freelist(page->slab_cache, page, object)) {
3344 object_err(page->slab_cache, page, object, "Object is on free-list"); 3360 object_err(page->slab_cache, page, object,
3361 "Object is on free-list");
3345 rv = false; 3362 rv = false;
3346 } else { 3363 } else {
3347 rv = true; 3364 rv = true;
@@ -4165,15 +4182,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
4165 !cpumask_empty(to_cpumask(l->cpus)) && 4182 !cpumask_empty(to_cpumask(l->cpus)) &&
4166 len < PAGE_SIZE - 60) { 4183 len < PAGE_SIZE - 60) {
4167 len += sprintf(buf + len, " cpus="); 4184 len += sprintf(buf + len, " cpus=");
4168 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 4185 len += cpulist_scnprintf(buf + len,
4186 PAGE_SIZE - len - 50,
4169 to_cpumask(l->cpus)); 4187 to_cpumask(l->cpus));
4170 } 4188 }
4171 4189
4172 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && 4190 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4173 len < PAGE_SIZE - 60) { 4191 len < PAGE_SIZE - 60) {
4174 len += sprintf(buf + len, " nodes="); 4192 len += sprintf(buf + len, " nodes=");
4175 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 4193 len += nodelist_scnprintf(buf + len,
4176 l->nodes); 4194 PAGE_SIZE - len - 50,
4195 l->nodes);
4177 } 4196 }
4178 4197
4179 len += sprintf(buf + len, "\n"); 4198 len += sprintf(buf + len, "\n");
@@ -4280,7 +4299,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4280 int cpu; 4299 int cpu;
4281 4300
4282 for_each_possible_cpu(cpu) { 4301 for_each_possible_cpu(cpu) {
4283 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 4302 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4303 cpu);
4284 int node; 4304 int node;
4285 struct page *page; 4305 struct page *page;
4286 4306
@@ -4314,12 +4334,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4314 for_each_node_state(node, N_NORMAL_MEMORY) { 4334 for_each_node_state(node, N_NORMAL_MEMORY) {
4315 struct kmem_cache_node *n = get_node(s, node); 4335 struct kmem_cache_node *n = get_node(s, node);
4316 4336
4317 if (flags & SO_TOTAL) 4337 if (flags & SO_TOTAL)
4318 x = atomic_long_read(&n->total_objects); 4338 x = atomic_long_read(&n->total_objects);
4319 else if (flags & SO_OBJECTS) 4339 else if (flags & SO_OBJECTS)
4320 x = atomic_long_read(&n->total_objects) - 4340 x = atomic_long_read(&n->total_objects) -
4321 count_partial(n, count_free); 4341 count_partial(n, count_free);
4322
4323 else 4342 else
4324 x = atomic_long_read(&n->nr_slabs); 4343 x = atomic_long_read(&n->nr_slabs);
4325 total += x; 4344 total += x;
@@ -5135,7 +5154,8 @@ static char *create_unique_id(struct kmem_cache *s)
5135 5154
5136#ifdef CONFIG_MEMCG_KMEM 5155#ifdef CONFIG_MEMCG_KMEM
5137 if (!is_root_cache(s)) 5156 if (!is_root_cache(s))
5138 p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg)); 5157 p += sprintf(p, "-%08d",
5158 memcg_cache_id(s->memcg_params->memcg));
5139#endif 5159#endif
5140 5160
5141 BUG_ON(p > name + ID_STR_LENGTH - 1); 5161 BUG_ON(p > name + ID_STR_LENGTH - 1);