aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>2015-11-05 21:46:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-05 22:34:48 -0500
commit145949a1387ba7a4fd0df15181e09345ec7b0492 (patch)
treecb9ad2f476f95ced9228dd79ce30ddd69653f8eb
parent61f9ec1d8e97131ce55159647fcdfeccc0f40647 (diff)
mm/list_lru.c: replace nr_node_ids for loop with for_each_node()
The functions used in the patch are in slowpath, which gets called whenever alloc_super is called during mounts. Though this should not make difference for the architectures with sequential numa node ids, for the powerpc which can potentially have sparse node ids (for e.g., 4 node system having numa ids, 0,1,16,17 is common), this patch saves some unnecessary allocations for non existing numa nodes. Even without that saving, perhaps patch makes code more readable. [vdavydov@parallels.com: take memcg_aware check outside for_each loop] Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Reviewed-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Anton Blanchard <anton@samba.org> Cc: Nishanth Aravamudan <nacc@linux.vnet.ibm.com> Cc: Greg Kurz <gkurz@linux.vnet.ibm.com> Cc: Grant Likely <grant.likely@linaro.org> Cc: Nikunj A Dadhania <nikunj@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/list_lru.c34
1 files changed, 23 insertions, 11 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index e1da19fac1b3..28237476b055 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -42,6 +42,10 @@ static void list_lru_unregister(struct list_lru *lru)
42#ifdef CONFIG_MEMCG_KMEM 42#ifdef CONFIG_MEMCG_KMEM
43static inline bool list_lru_memcg_aware(struct list_lru *lru) 43static inline bool list_lru_memcg_aware(struct list_lru *lru)
44{ 44{
45 /*
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
48 */
45 return !!lru->node[0].memcg_lrus; 49 return !!lru->node[0].memcg_lrus;
46} 50}
47 51
@@ -377,16 +381,20 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
377{ 381{
378 int i; 382 int i;
379 383
380 for (i = 0; i < nr_node_ids; i++) { 384 if (!memcg_aware)
381 if (!memcg_aware) 385 return 0;
382 lru->node[i].memcg_lrus = NULL; 386
383 else if (memcg_init_list_lru_node(&lru->node[i])) 387 for_each_node(i) {
388 if (memcg_init_list_lru_node(&lru->node[i]))
384 goto fail; 389 goto fail;
385 } 390 }
386 return 0; 391 return 0;
387fail: 392fail:
388 for (i = i - 1; i >= 0; i--) 393 for (i = i - 1; i >= 0; i--) {
394 if (!lru->node[i].memcg_lrus)
395 continue;
389 memcg_destroy_list_lru_node(&lru->node[i]); 396 memcg_destroy_list_lru_node(&lru->node[i]);
397 }
390 return -ENOMEM; 398 return -ENOMEM;
391} 399}
392 400
@@ -397,7 +405,7 @@ static void memcg_destroy_list_lru(struct list_lru *lru)
397 if (!list_lru_memcg_aware(lru)) 405 if (!list_lru_memcg_aware(lru))
398 return; 406 return;
399 407
400 for (i = 0; i < nr_node_ids; i++) 408 for_each_node(i)
401 memcg_destroy_list_lru_node(&lru->node[i]); 409 memcg_destroy_list_lru_node(&lru->node[i]);
402} 410}
403 411
@@ -409,16 +417,20 @@ static int memcg_update_list_lru(struct list_lru *lru,
409 if (!list_lru_memcg_aware(lru)) 417 if (!list_lru_memcg_aware(lru))
410 return 0; 418 return 0;
411 419
412 for (i = 0; i < nr_node_ids; i++) { 420 for_each_node(i) {
413 if (memcg_update_list_lru_node(&lru->node[i], 421 if (memcg_update_list_lru_node(&lru->node[i],
414 old_size, new_size)) 422 old_size, new_size))
415 goto fail; 423 goto fail;
416 } 424 }
417 return 0; 425 return 0;
418fail: 426fail:
419 for (i = i - 1; i >= 0; i--) 427 for (i = i - 1; i >= 0; i--) {
428 if (!lru->node[i].memcg_lrus)
429 continue;
430
420 memcg_cancel_update_list_lru_node(&lru->node[i], 431 memcg_cancel_update_list_lru_node(&lru->node[i],
421 old_size, new_size); 432 old_size, new_size);
433 }
422 return -ENOMEM; 434 return -ENOMEM;
423} 435}
424 436
@@ -430,7 +442,7 @@ static void memcg_cancel_update_list_lru(struct list_lru *lru,
430 if (!list_lru_memcg_aware(lru)) 442 if (!list_lru_memcg_aware(lru))
431 return; 443 return;
432 444
433 for (i = 0; i < nr_node_ids; i++) 445 for_each_node(i)
434 memcg_cancel_update_list_lru_node(&lru->node[i], 446 memcg_cancel_update_list_lru_node(&lru->node[i],
435 old_size, new_size); 447 old_size, new_size);
436} 448}
@@ -485,7 +497,7 @@ static void memcg_drain_list_lru(struct list_lru *lru,
485 if (!list_lru_memcg_aware(lru)) 497 if (!list_lru_memcg_aware(lru))
486 return; 498 return;
487 499
488 for (i = 0; i < nr_node_ids; i++) 500 for_each_node(i)
489 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx); 501 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
490} 502}
491 503
@@ -522,7 +534,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
522 if (!lru->node) 534 if (!lru->node)
523 goto out; 535 goto out;
524 536
525 for (i = 0; i < nr_node_ids; i++) { 537 for_each_node(i) {
526 spin_lock_init(&lru->node[i].lock); 538 spin_lock_init(&lru->node[i].lock);
527 if (key) 539 if (key)
528 lockdep_set_class(&lru->node[i].lock, key); 540 lockdep_set_class(&lru->node[i].lock, key);