aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2008-01-08 02:20:27 -0500
committerChristoph Lameter <clameter@sgi.com>2008-02-04 13:56:02 -0500
commit064287807c9dd64688084d34c6748a326b5f3ec8 (patch)
tree49bbcae6aa117cb917a14a1aa30618f533b18262
parent7c2e132c54c79af4e524154074b9a02c3c0d6072 (diff)
SLUB: Fix coding style violations
This fixes most of the obvious coding style violations in mm/slub.c as reported by checkpatch. Acked-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Christoph Lameter <clameter@sgi.com>
-rw-r--r--mm/slub.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e160f28ab051..a660834416ac 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -357,22 +357,22 @@ static void print_section(char *text, u8 *addr, unsigned int length)
357 printk(KERN_ERR "%8s 0x%p: ", text, addr + i); 357 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
358 newline = 0; 358 newline = 0;
359 } 359 }
360 printk(" %02x", addr[i]); 360 printk(KERN_CONT " %02x", addr[i]);
361 offset = i % 16; 361 offset = i % 16;
362 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.'; 362 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
363 if (offset == 15) { 363 if (offset == 15) {
364 printk(" %s\n",ascii); 364 printk(KERN_CONT " %s\n", ascii);
365 newline = 1; 365 newline = 1;
366 } 366 }
367 } 367 }
368 if (!newline) { 368 if (!newline) {
369 i %= 16; 369 i %= 16;
370 while (i < 16) { 370 while (i < 16) {
371 printk(" "); 371 printk(KERN_CONT " ");
372 ascii[i] = ' '; 372 ascii[i] = ' ';
373 i++; 373 i++;
374 } 374 }
375 printk(" %s\n", ascii); 375 printk(KERN_CONT " %s\n", ascii);
376 } 376 }
377} 377}
378 378
@@ -532,7 +532,7 @@ static void init_object(struct kmem_cache *s, void *object, int active)
532 532
533 if (s->flags & __OBJECT_POISON) { 533 if (s->flags & __OBJECT_POISON) {
534 memset(p, POISON_FREE, s->objsize - 1); 534 memset(p, POISON_FREE, s->objsize - 1);
535 p[s->objsize -1] = POISON_END; 535 p[s->objsize - 1] = POISON_END;
536 } 536 }
537 537
538 if (s->flags & SLAB_RED_ZONE) 538 if (s->flags & SLAB_RED_ZONE)
@@ -561,7 +561,7 @@ static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
561 561
562static int check_bytes_and_report(struct kmem_cache *s, struct page *page, 562static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
563 u8 *object, char *what, 563 u8 *object, char *what,
564 u8* start, unsigned int value, unsigned int bytes) 564 u8 *start, unsigned int value, unsigned int bytes)
565{ 565{
566 u8 *fault; 566 u8 *fault;
567 u8 *end; 567 u8 *end;
@@ -695,7 +695,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
695 (!check_bytes_and_report(s, page, p, "Poison", p, 695 (!check_bytes_and_report(s, page, p, "Poison", p,
696 POISON_FREE, s->objsize - 1) || 696 POISON_FREE, s->objsize - 1) ||
697 !check_bytes_and_report(s, page, p, "Poison", 697 !check_bytes_and_report(s, page, p, "Poison",
698 p + s->objsize -1, POISON_END, 1))) 698 p + s->objsize - 1, POISON_END, 1)))
699 return 0; 699 return 0;
700 /* 700 /*
701 * check_pad_bytes cleans up on its own. 701 * check_pad_bytes cleans up on its own.
@@ -903,8 +903,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
903 "SLUB <none>: no slab for object 0x%p.\n", 903 "SLUB <none>: no slab for object 0x%p.\n",
904 object); 904 object);
905 dump_stack(); 905 dump_stack();
906 } 906 } else
907 else
908 object_err(s, page, object, 907 object_err(s, page, object,
909 "page slab pointer corrupt."); 908 "page slab pointer corrupt.");
910 goto fail; 909 goto fail;
@@ -950,7 +949,7 @@ static int __init setup_slub_debug(char *str)
950 /* 949 /*
951 * Determine which debug features should be switched on 950 * Determine which debug features should be switched on
952 */ 951 */
953 for ( ;*str && *str != ','; str++) { 952 for (; *str && *str != ','; str++) {
954 switch (tolower(*str)) { 953 switch (tolower(*str)) {
955 case 'f': 954 case 'f':
956 slub_debug |= SLAB_DEBUG_FREE; 955 slub_debug |= SLAB_DEBUG_FREE;
@@ -969,7 +968,7 @@ static int __init setup_slub_debug(char *str)
969 break; 968 break;
970 default: 969 default:
971 printk(KERN_ERR "slub_debug option '%c' " 970 printk(KERN_ERR "slub_debug option '%c' "
972 "unknown. skipped\n",*str); 971 "unknown. skipped\n", *str);
973 } 972 }
974 } 973 }
975 974
@@ -1042,7 +1041,7 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
1042 */ 1041 */
1043static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1042static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1044{ 1043{
1045 struct page * page; 1044 struct page *page;
1046 int pages = 1 << s->order; 1045 int pages = 1 << s->order;
1047 1046
1048 if (s->order) 1047 if (s->order)
@@ -1138,7 +1137,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1138 mod_zone_page_state(page_zone(page), 1137 mod_zone_page_state(page_zone(page),
1139 (s->flags & SLAB_RECLAIM_ACCOUNT) ? 1138 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1140 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1139 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1141 - pages); 1140 -pages);
1142 1141
1143 __free_pages(page, s->order); 1142 __free_pages(page, s->order);
1144} 1143}
@@ -1542,7 +1541,7 @@ debug:
1542 * 1541 *
1543 * Otherwise we can simply pick the next object from the lockless free list. 1542 * Otherwise we can simply pick the next object from the lockless free list.
1544 */ 1543 */
1545static void __always_inline *slab_alloc(struct kmem_cache *s, 1544static __always_inline void *slab_alloc(struct kmem_cache *s,
1546 gfp_t gfpflags, int node, void *addr) 1545 gfp_t gfpflags, int node, void *addr)
1547{ 1546{
1548 void **object; 1547 void **object;
@@ -1650,7 +1649,7 @@ debug:
1650 * If fastpath is not possible then fall back to __slab_free where we deal 1649 * If fastpath is not possible then fall back to __slab_free where we deal
1651 * with all sorts of special processing. 1650 * with all sorts of special processing.
1652 */ 1651 */
1653static void __always_inline slab_free(struct kmem_cache *s, 1652static __always_inline void slab_free(struct kmem_cache *s,
1654 struct page *page, void *x, void *addr) 1653 struct page *page, void *x, void *addr)
1655{ 1654{
1656 void **object = (void *)x; 1655 void **object = (void *)x;
@@ -2231,7 +2230,7 @@ error:
2231 */ 2230 */
2232int kmem_ptr_validate(struct kmem_cache *s, const void *object) 2231int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2233{ 2232{
2234 struct page * page; 2233 struct page *page;
2235 2234
2236 page = get_object_page(object); 2235 page = get_object_page(object);
2237 2236
@@ -2343,7 +2342,7 @@ static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
2343 2342
2344static int __init setup_slub_min_order(char *str) 2343static int __init setup_slub_min_order(char *str)
2345{ 2344{
2346 get_option (&str, &slub_min_order); 2345 get_option(&str, &slub_min_order);
2347 2346
2348 return 1; 2347 return 1;
2349} 2348}
@@ -2352,7 +2351,7 @@ __setup("slub_min_order=", setup_slub_min_order);
2352 2351
2353static int __init setup_slub_max_order(char *str) 2352static int __init setup_slub_max_order(char *str)
2354{ 2353{
2355 get_option (&str, &slub_max_order); 2354 get_option(&str, &slub_max_order);
2356 2355
2357 return 1; 2356 return 1;
2358} 2357}
@@ -2361,7 +2360,7 @@ __setup("slub_max_order=", setup_slub_max_order);
2361 2360
2362static int __init setup_slub_min_objects(char *str) 2361static int __init setup_slub_min_objects(char *str)
2363{ 2362{
2364 get_option (&str, &slub_min_objects); 2363 get_option(&str, &slub_min_objects);
2365 2364
2366 return 1; 2365 return 1;
2367} 2366}
@@ -2946,7 +2945,7 @@ static struct kmem_cache *find_mergeable(size_t size,
2946 * Check if alignment is compatible. 2945 * Check if alignment is compatible.
2947 * Courtesy of Adrian Drzewiecki 2946 * Courtesy of Adrian Drzewiecki
2948 */ 2947 */
2949 if ((s->size & ~(align -1)) != s->size) 2948 if ((s->size & ~(align - 1)) != s->size)
2950 continue; 2949 continue;
2951 2950
2952 if (s->size - size >= sizeof(void *)) 2951 if (s->size - size >= sizeof(void *))
@@ -3055,8 +3054,9 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3055 return NOTIFY_OK; 3054 return NOTIFY_OK;
3056} 3055}
3057 3056
3058static struct notifier_block __cpuinitdata slab_notifier = 3057static struct notifier_block __cpuinitdata slab_notifier = {
3059 { &slab_cpuup_callback, NULL, 0 }; 3058 &slab_cpuup_callback, NULL, 0
3059};
3060 3060
3061#endif 3061#endif
3062 3062
@@ -3864,7 +3864,7 @@ static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
3864SLAB_ATTR(remote_node_defrag_ratio); 3864SLAB_ATTR(remote_node_defrag_ratio);
3865#endif 3865#endif
3866 3866
3867static struct attribute * slab_attrs[] = { 3867static struct attribute *slab_attrs[] = {
3868 &slab_size_attr.attr, 3868 &slab_size_attr.attr,
3869 &object_size_attr.attr, 3869 &object_size_attr.attr,
3870 &objs_per_slab_attr.attr, 3870 &objs_per_slab_attr.attr,