diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-02-05 20:57:39 -0500 |
---|---|---|
committer | Christoph Lameter <christoph@stapp.engr.sgi.com> | 2008-02-07 20:52:39 -0500 |
commit | 3adbefee6fd58a061b2bf1df4f3769701860fc62 (patch) | |
tree | 846b65d2aedbdeabece8e661761960e54bc11f6c /mm | |
parent | a76d354629ea46c449705970a2c0b9e9090d6f03 (diff) |
SLUB: fix checkpatch warnings
fix checkpatch --file mm/slub.c errors and warnings.
$ q-code-quality-compare
errors lines of code errors/KLOC
mm/slub.c [before] 22 4204 5.2
mm/slub.c [after] 0 4210 0
no code changed:
text data bss dec hex filename
22195 8634 136 30965 78f5 slub.o.before
22195 8634 136 30965 78f5 slub.o.after
md5:
93cdfbec2d6450622163c590e1064358 slub.o.before.asm
93cdfbec2d6450622163c590e1064358 slub.o.after.asm
[clameter: rediffed against Pekka's cleanup patch, omitted
moves of the name of a function to the start of line]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 37 |
1 files changed, 21 insertions, 16 deletions
@@ -719,9 +719,10 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
719 | endobject, red, s->inuse - s->objsize)) | 719 | endobject, red, s->inuse - s->objsize)) |
720 | return 0; | 720 | return 0; |
721 | } else { | 721 | } else { |
722 | if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) | 722 | if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { |
723 | check_bytes_and_report(s, page, p, "Alignment padding", endobject, | 723 | check_bytes_and_report(s, page, p, "Alignment padding", |
724 | POISON_INUSE, s->inuse - s->objsize); | 724 | endobject, POISON_INUSE, s->inuse - s->objsize); |
725 | } | ||
725 | } | 726 | } |
726 | 727 | ||
727 | if (s->flags & SLAB_POISON) { | 728 | if (s->flags & SLAB_POISON) { |
@@ -928,11 +929,10 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page, | |||
928 | return 0; | 929 | return 0; |
929 | 930 | ||
930 | if (unlikely(s != page->slab)) { | 931 | if (unlikely(s != page->slab)) { |
931 | if (!PageSlab(page)) | 932 | if (!PageSlab(page)) { |
932 | slab_err(s, page, "Attempt to free object(0x%p) " | 933 | slab_err(s, page, "Attempt to free object(0x%p) " |
933 | "outside of slab", object); | 934 | "outside of slab", object); |
934 | else | 935 | } else if (!page->slab) { |
935 | if (!page->slab) { | ||
936 | printk(KERN_ERR | 936 | printk(KERN_ERR |
937 | "SLUB <none>: no slab for object 0x%p.\n", | 937 | "SLUB <none>: no slab for object 0x%p.\n", |
938 | object); | 938 | object); |
@@ -1041,7 +1041,7 @@ static unsigned long kmem_cache_flags(unsigned long objsize, | |||
1041 | */ | 1041 | */ |
1042 | if (slub_debug && (!slub_debug_slabs || | 1042 | if (slub_debug && (!slub_debug_slabs || |
1043 | strncmp(slub_debug_slabs, name, | 1043 | strncmp(slub_debug_slabs, name, |
1044 | strlen(slub_debug_slabs)) == 0)) | 1044 | strlen(slub_debug_slabs)) == 0)) |
1045 | flags |= slub_debug; | 1045 | flags |= slub_debug; |
1046 | } | 1046 | } |
1047 | 1047 | ||
@@ -1330,8 +1330,8 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) | |||
1330 | get_cycles() % 1024 > s->remote_node_defrag_ratio) | 1330 | get_cycles() % 1024 > s->remote_node_defrag_ratio) |
1331 | return NULL; | 1331 | return NULL; |
1332 | 1332 | ||
1333 | zonelist = &NODE_DATA(slab_node(current->mempolicy)) | 1333 | zonelist = &NODE_DATA( |
1334 | ->node_zonelists[gfp_zone(flags)]; | 1334 | slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)]; |
1335 | for (z = zonelist->zones; *z; z++) { | 1335 | for (z = zonelist->zones; *z; z++) { |
1336 | struct kmem_cache_node *n; | 1336 | struct kmem_cache_node *n; |
1337 | 1337 | ||
@@ -2589,7 +2589,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | |||
2589 | goto unlock_out; | 2589 | goto unlock_out; |
2590 | 2590 | ||
2591 | realsize = kmalloc_caches[index].objsize; | 2591 | realsize = kmalloc_caches[index].objsize; |
2592 | text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize), | 2592 | text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", |
2593 | (unsigned int)realsize); | ||
2593 | s = kmalloc(kmem_size, flags & ~SLUB_DMA); | 2594 | s = kmalloc(kmem_size, flags & ~SLUB_DMA); |
2594 | 2595 | ||
2595 | if (!s || !text || !kmem_cache_open(s, flags, text, | 2596 | if (!s || !text || !kmem_cache_open(s, flags, text, |
@@ -3040,7 +3041,8 @@ void __init kmem_cache_init(void) | |||
3040 | #endif | 3041 | #endif |
3041 | 3042 | ||
3042 | 3043 | ||
3043 | printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," | 3044 | printk(KERN_INFO |
3045 | "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," | ||
3044 | " CPUs=%d, Nodes=%d\n", | 3046 | " CPUs=%d, Nodes=%d\n", |
3045 | caches, cache_line_size(), | 3047 | caches, cache_line_size(), |
3046 | slub_min_order, slub_max_order, slub_min_objects, | 3048 | slub_min_order, slub_max_order, slub_min_objects, |
@@ -3207,7 +3209,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, | |||
3207 | } | 3209 | } |
3208 | 3210 | ||
3209 | static struct notifier_block __cpuinitdata slab_notifier = { | 3211 | static struct notifier_block __cpuinitdata slab_notifier = { |
3210 | &slab_cpuup_callback, NULL, 0 | 3212 | .notifier_call = slab_cpuup_callback |
3211 | }; | 3213 | }; |
3212 | 3214 | ||
3213 | #endif | 3215 | #endif |
@@ -3365,8 +3367,9 @@ static void resiliency_test(void) | |||
3365 | p = kzalloc(32, GFP_KERNEL); | 3367 | p = kzalloc(32, GFP_KERNEL); |
3366 | p[32 + sizeof(void *)] = 0x34; | 3368 | p[32 + sizeof(void *)] = 0x34; |
3367 | printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" | 3369 | printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" |
3368 | " 0x34 -> -0x%p\n", p); | 3370 | " 0x34 -> -0x%p\n", p); |
3369 | printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); | 3371 | printk(KERN_ERR |
3372 | "If allocated object is overwritten then not detectable\n\n"); | ||
3370 | 3373 | ||
3371 | validate_slab_cache(kmalloc_caches + 5); | 3374 | validate_slab_cache(kmalloc_caches + 5); |
3372 | p = kzalloc(64, GFP_KERNEL); | 3375 | p = kzalloc(64, GFP_KERNEL); |
@@ -3374,7 +3377,8 @@ static void resiliency_test(void) | |||
3374 | *p = 0x56; | 3377 | *p = 0x56; |
3375 | printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", | 3378 | printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", |
3376 | p); | 3379 | p); |
3377 | printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n"); | 3380 | printk(KERN_ERR |
3381 | "If allocated object is overwritten then not detectable\n\n"); | ||
3378 | validate_slab_cache(kmalloc_caches + 6); | 3382 | validate_slab_cache(kmalloc_caches + 6); |
3379 | 3383 | ||
3380 | printk(KERN_ERR "\nB. Corruption after free\n"); | 3384 | printk(KERN_ERR "\nB. Corruption after free\n"); |
@@ -3387,7 +3391,8 @@ static void resiliency_test(void) | |||
3387 | p = kzalloc(256, GFP_KERNEL); | 3391 | p = kzalloc(256, GFP_KERNEL); |
3388 | kfree(p); | 3392 | kfree(p); |
3389 | p[50] = 0x9a; | 3393 | p[50] = 0x9a; |
3390 | printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); | 3394 | printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", |
3395 | p); | ||
3391 | validate_slab_cache(kmalloc_caches + 8); | 3396 | validate_slab_cache(kmalloc_caches + 8); |
3392 | 3397 | ||
3393 | p = kzalloc(512, GFP_KERNEL); | 3398 | p = kzalloc(512, GFP_KERNEL); |