diff options
author | Joe Perches <joe@perches.com> | 2015-04-14 18:44:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 19:48:59 -0400 |
commit | 6f6528a1632cb9661a2ff46e217b07d84a80eff6 (patch) | |
tree | 75d228d6e6529d570b8357f02440c7add33dc174 /mm/slub.c | |
parent | 124dee09f0669b92cc0073b00984d53541ca0884 (diff) |
slub: use bool function return values of true/false not 1/0
Use the normal return values for bool functions
Signed-off-by: Joe Perches <joe@perches.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 12 |
1 files changed, 6 insertions, 6 deletions
@@ -374,7 +374,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page | |||
374 | if (cmpxchg_double(&page->freelist, &page->counters, | 374 | if (cmpxchg_double(&page->freelist, &page->counters, |
375 | freelist_old, counters_old, | 375 | freelist_old, counters_old, |
376 | freelist_new, counters_new)) | 376 | freelist_new, counters_new)) |
377 | return 1; | 377 | return true; |
378 | } else | 378 | } else |
379 | #endif | 379 | #endif |
380 | { | 380 | { |
@@ -384,7 +384,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page | |||
384 | page->freelist = freelist_new; | 384 | page->freelist = freelist_new; |
385 | set_page_slub_counters(page, counters_new); | 385 | set_page_slub_counters(page, counters_new); |
386 | slab_unlock(page); | 386 | slab_unlock(page); |
387 | return 1; | 387 | return true; |
388 | } | 388 | } |
389 | slab_unlock(page); | 389 | slab_unlock(page); |
390 | } | 390 | } |
@@ -396,7 +396,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page | |||
396 | pr_info("%s %s: cmpxchg double redo ", n, s->name); | 396 | pr_info("%s %s: cmpxchg double redo ", n, s->name); |
397 | #endif | 397 | #endif |
398 | 398 | ||
399 | return 0; | 399 | return false; |
400 | } | 400 | } |
401 | 401 | ||
402 | static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, | 402 | static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, |
@@ -410,7 +410,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, | |||
410 | if (cmpxchg_double(&page->freelist, &page->counters, | 410 | if (cmpxchg_double(&page->freelist, &page->counters, |
411 | freelist_old, counters_old, | 411 | freelist_old, counters_old, |
412 | freelist_new, counters_new)) | 412 | freelist_new, counters_new)) |
413 | return 1; | 413 | return true; |
414 | } else | 414 | } else |
415 | #endif | 415 | #endif |
416 | { | 416 | { |
@@ -424,7 +424,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, | |||
424 | set_page_slub_counters(page, counters_new); | 424 | set_page_slub_counters(page, counters_new); |
425 | slab_unlock(page); | 425 | slab_unlock(page); |
426 | local_irq_restore(flags); | 426 | local_irq_restore(flags); |
427 | return 1; | 427 | return true; |
428 | } | 428 | } |
429 | slab_unlock(page); | 429 | slab_unlock(page); |
430 | local_irq_restore(flags); | 430 | local_irq_restore(flags); |
@@ -437,7 +437,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, | |||
437 | pr_info("%s %s: cmpxchg double redo ", n, s->name); | 437 | pr_info("%s %s: cmpxchg double redo ", n, s->name); |
438 | #endif | 438 | #endif |
439 | 439 | ||
440 | return 0; | 440 | return false; |
441 | } | 441 | } |
442 | 442 | ||
443 | #ifdef CONFIG_SLUB_DEBUG | 443 | #ifdef CONFIG_SLUB_DEBUG |