diff options
Diffstat (limited to 'mm/memcontrol.c')
| -rw-r--r-- | mm/memcontrol.c | 67 |
1 files changed, 50 insertions, 17 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7acf43bf04a2..dd39ba000b31 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -1055,12 +1055,24 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, | |||
| 1055 | struct mem_cgroup *memcg) | 1055 | struct mem_cgroup *memcg) |
| 1056 | { | 1056 | { |
| 1057 | struct mem_cgroup_per_zone *mz; | 1057 | struct mem_cgroup_per_zone *mz; |
| 1058 | struct lruvec *lruvec; | ||
| 1058 | 1059 | ||
| 1059 | if (mem_cgroup_disabled()) | 1060 | if (mem_cgroup_disabled()) { |
| 1060 | return &zone->lruvec; | 1061 | lruvec = &zone->lruvec; |
| 1062 | goto out; | ||
| 1063 | } | ||
| 1061 | 1064 | ||
| 1062 | mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); | 1065 | mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); |
| 1063 | return &mz->lruvec; | 1066 | lruvec = &mz->lruvec; |
| 1067 | out: | ||
| 1068 | /* | ||
| 1069 | * Since a node can be onlined after the mem_cgroup was created, | ||
| 1070 | * we have to be prepared to initialize lruvec->zone here; | ||
| 1071 | * and if offlined then reonlined, we need to reinitialize it. | ||
| 1072 | */ | ||
| 1073 | if (unlikely(lruvec->zone != zone)) | ||
| 1074 | lruvec->zone = zone; | ||
| 1075 | return lruvec; | ||
| 1064 | } | 1076 | } |
| 1065 | 1077 | ||
| 1066 | /* | 1078 | /* |
| @@ -1087,9 +1099,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) | |||
| 1087 | struct mem_cgroup_per_zone *mz; | 1099 | struct mem_cgroup_per_zone *mz; |
| 1088 | struct mem_cgroup *memcg; | 1100 | struct mem_cgroup *memcg; |
| 1089 | struct page_cgroup *pc; | 1101 | struct page_cgroup *pc; |
| 1102 | struct lruvec *lruvec; | ||
| 1090 | 1103 | ||
| 1091 | if (mem_cgroup_disabled()) | 1104 | if (mem_cgroup_disabled()) { |
| 1092 | return &zone->lruvec; | 1105 | lruvec = &zone->lruvec; |
| 1106 | goto out; | ||
| 1107 | } | ||
| 1093 | 1108 | ||
| 1094 | pc = lookup_page_cgroup(page); | 1109 | pc = lookup_page_cgroup(page); |
| 1095 | memcg = pc->mem_cgroup; | 1110 | memcg = pc->mem_cgroup; |
| @@ -1107,7 +1122,16 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) | |||
| 1107 | pc->mem_cgroup = memcg = root_mem_cgroup; | 1122 | pc->mem_cgroup = memcg = root_mem_cgroup; |
| 1108 | 1123 | ||
| 1109 | mz = page_cgroup_zoneinfo(memcg, page); | 1124 | mz = page_cgroup_zoneinfo(memcg, page); |
| 1110 | return &mz->lruvec; | 1125 | lruvec = &mz->lruvec; |
| 1126 | out: | ||
| 1127 | /* | ||
| 1128 | * Since a node can be onlined after the mem_cgroup was created, | ||
| 1129 | * we have to be prepared to initialize lruvec->zone here; | ||
| 1130 | * and if offlined then reonlined, we need to reinitialize it. | ||
| 1131 | */ | ||
| 1132 | if (unlikely(lruvec->zone != zone)) | ||
| 1133 | lruvec->zone = zone; | ||
| 1134 | return lruvec; | ||
| 1111 | } | 1135 | } |
| 1112 | 1136 | ||
| 1113 | /** | 1137 | /** |
| @@ -1452,17 +1476,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg) | |||
| 1452 | static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) | 1476 | static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) |
| 1453 | { | 1477 | { |
| 1454 | u64 limit; | 1478 | u64 limit; |
| 1455 | u64 memsw; | ||
| 1456 | 1479 | ||
| 1457 | limit = res_counter_read_u64(&memcg->res, RES_LIMIT); | 1480 | limit = res_counter_read_u64(&memcg->res, RES_LIMIT); |
| 1458 | limit += total_swap_pages << PAGE_SHIFT; | ||
| 1459 | 1481 | ||
| 1460 | memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | ||
| 1461 | /* | 1482 | /* |
| 1462 | * If memsw is finite and limits the amount of swap space available | 1483 | * Do not consider swap space if we cannot swap due to swappiness |
| 1463 | * to this memcg, return that limit. | ||
| 1464 | */ | 1484 | */ |
| 1465 | return min(limit, memsw); | 1485 | if (mem_cgroup_swappiness(memcg)) { |
| 1486 | u64 memsw; | ||
| 1487 | |||
| 1488 | limit += total_swap_pages << PAGE_SHIFT; | ||
| 1489 | memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | ||
| 1490 | |||
| 1491 | /* | ||
| 1492 | * If memsw is finite and limits the amount of swap space | ||
| 1493 | * available to this memcg, return that limit. | ||
| 1494 | */ | ||
| 1495 | limit = min(limit, memsw); | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | return limit; | ||
| 1466 | } | 1499 | } |
| 1467 | 1500 | ||
| 1468 | void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, | 1501 | void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, |
| @@ -3688,17 +3721,17 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
| 3688 | static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg, | 3721 | static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg, |
| 3689 | int node, int zid, enum lru_list lru) | 3722 | int node, int zid, enum lru_list lru) |
| 3690 | { | 3723 | { |
| 3691 | struct mem_cgroup_per_zone *mz; | 3724 | struct lruvec *lruvec; |
| 3692 | unsigned long flags, loop; | 3725 | unsigned long flags, loop; |
| 3693 | struct list_head *list; | 3726 | struct list_head *list; |
| 3694 | struct page *busy; | 3727 | struct page *busy; |
| 3695 | struct zone *zone; | 3728 | struct zone *zone; |
| 3696 | 3729 | ||
| 3697 | zone = &NODE_DATA(node)->node_zones[zid]; | 3730 | zone = &NODE_DATA(node)->node_zones[zid]; |
| 3698 | mz = mem_cgroup_zoneinfo(memcg, node, zid); | 3731 | lruvec = mem_cgroup_zone_lruvec(zone, memcg); |
| 3699 | list = &mz->lruvec.lists[lru]; | 3732 | list = &lruvec->lists[lru]; |
| 3700 | 3733 | ||
| 3701 | loop = mz->lru_size[lru]; | 3734 | loop = mem_cgroup_get_lru_size(lruvec, lru); |
| 3702 | /* give some margin against EBUSY etc...*/ | 3735 | /* give some margin against EBUSY etc...*/ |
| 3703 | loop += 256; | 3736 | loop += 256; |
| 3704 | busy = NULL; | 3737 | busy = NULL; |
| @@ -4736,7 +4769,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) | |||
| 4736 | 4769 | ||
| 4737 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 4770 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { |
| 4738 | mz = &pn->zoneinfo[zone]; | 4771 | mz = &pn->zoneinfo[zone]; |
| 4739 | lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]); | 4772 | lruvec_init(&mz->lruvec); |
| 4740 | mz->usage_in_excess = 0; | 4773 | mz->usage_in_excess = 0; |
| 4741 | mz->on_tree = false; | 4774 | mz->on_tree = false; |
| 4742 | mz->memcg = memcg; | 4775 | mz->memcg = memcg; |
