aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLQYMGT <lqymgt@gmail.com>2014-12-10 18:42:13 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 20:41:04 -0500
commitb455def28d8a22aee4a13d065b3fd1d296833606 (patch)
treedc8e11d7ee505595fd5e23ffd106dfd2094e756c
parente2ab879e96b5e65bf8ce1123f3b7f01ebba27204 (diff)
mm: slab/slub: coding style: whitespaces and tabs mixture
Some code in mm/slab.c and mm/slub.c use whitespaces in indent. Clean them up. Signed-off-by: LQYMGT <lqymgt@gmail.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slab.c10
-rw-r--r--mm/slub.c10
2 files changed, 10 insertions, 10 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f34e053ec46e..eae2d21cc14f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3580,11 +3580,11 @@ static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
3580 3580
3581 for_each_online_node(node) { 3581 for_each_online_node(node) {
3582 3582
3583 if (use_alien_caches) { 3583 if (use_alien_caches) {
3584 new_alien = alloc_alien_cache(node, cachep->limit, gfp); 3584 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3585 if (!new_alien) 3585 if (!new_alien)
3586 goto fail; 3586 goto fail;
3587 } 3587 }
3588 3588
3589 new_shared = NULL; 3589 new_shared = NULL;
3590 if (cachep->shared) { 3590 if (cachep->shared) {
diff --git a/mm/slub.c b/mm/slub.c
index ae7b9f1ad394..761789ea1d09 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2554,7 +2554,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2554 2554
2555 } else { /* Needs to be taken off a list */ 2555 } else { /* Needs to be taken off a list */
2556 2556
2557 n = get_node(s, page_to_nid(page)); 2557 n = get_node(s, page_to_nid(page));
2558 /* 2558 /*
2559 * Speculatively acquire the list_lock. 2559 * Speculatively acquire the list_lock.
2560 * If the cmpxchg does not succeed then we may 2560 * If the cmpxchg does not succeed then we may
@@ -2587,10 +2587,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2587 * The list lock was not taken therefore no list 2587 * The list lock was not taken therefore no list
2588 * activity can be necessary. 2588 * activity can be necessary.
2589 */ 2589 */
2590 if (was_frozen) 2590 if (was_frozen)
2591 stat(s, FREE_FROZEN); 2591 stat(s, FREE_FROZEN);
2592 return; 2592 return;
2593 } 2593 }
2594 2594
2595 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 2595 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2596 goto slab_empty; 2596 goto slab_empty;