diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 41 |
1 files changed, 41 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index cb8ad3c6e483..bbd194630c5b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -932,6 +932,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
932 | long mapped_ratio; | 932 | long mapped_ratio; |
933 | long distress; | 933 | long distress; |
934 | long swap_tendency; | 934 | long swap_tendency; |
935 | long imbalance; | ||
935 | 936 | ||
936 | if (zone_is_near_oom(zone)) | 937 | if (zone_is_near_oom(zone)) |
937 | goto force_reclaim_mapped; | 938 | goto force_reclaim_mapped; |
@@ -967,6 +968,46 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
967 | swap_tendency = mapped_ratio / 2 + distress + sc->swappiness; | 968 | swap_tendency = mapped_ratio / 2 + distress + sc->swappiness; |
968 | 969 | ||
969 | /* | 970 | /* |
971 | * If there's huge imbalance between active and inactive | ||
972 | * (think active 100 times larger than inactive) we should | ||
973 | * become more permissive, or the system will take too much | ||
974 | * cpu before it start swapping during memory pressure. | ||
975 | * Distress is about avoiding early-oom, this is about | ||
976 | * making swappiness graceful despite setting it to low | ||
977 | * values. | ||
978 | * | ||
979 | * Avoid div by zero with nr_inactive+1, and max resulting | ||
980 | * value is vm_total_pages. | ||
981 | */ | ||
982 | imbalance = zone_page_state(zone, NR_ACTIVE); | ||
983 | imbalance /= zone_page_state(zone, NR_INACTIVE) + 1; | ||
984 | |||
985 | /* | ||
986 | * Reduce the effect of imbalance if swappiness is low, | ||
987 | * this means for a swappiness very low, the imbalance | ||
988 | * must be much higher than 100 for this logic to make | ||
989 | * the difference. | ||
990 | * | ||
991 | * Max temporary value is vm_total_pages*100. | ||
992 | */ | ||
993 | imbalance *= (vm_swappiness + 1); | ||
994 | imbalance /= 100; | ||
995 | |||
996 | /* | ||
997 | * If not much of the ram is mapped, makes the imbalance | ||
998 | * less relevant, it's high priority we refill the inactive | ||
999 | * list with mapped pages only in presence of high ratio of | ||
1000 | * mapped pages. | ||
1001 | * | ||
1002 | * Max temporary value is vm_total_pages*100. | ||
1003 | */ | ||
1004 | imbalance *= mapped_ratio; | ||
1005 | imbalance /= 100; | ||
1006 | |||
1007 | /* apply imbalance feedback to swap_tendency */ | ||
1008 | swap_tendency += imbalance; | ||
1009 | |||
1010 | /* | ||
970 | * Now use this metric to decide whether to start moving mapped | 1011 | * Now use this metric to decide whether to start moving mapped |
971 | * memory onto the inactive list. | 1012 | * memory onto the inactive list. |
972 | */ | 1013 | */ |