diff options
Diffstat (limited to 'mm/page-writeback.c')
| -rw-r--r-- | mm/page-writeback.c | 36 |
1 files changed, 25 insertions, 11 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 830893b2b3c7..0713bfbf0954 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -201,6 +201,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) | |||
| 201 | zone_reclaimable_pages(z) - z->dirty_balance_reserve; | 201 | zone_reclaimable_pages(z) - z->dirty_balance_reserve; |
| 202 | } | 202 | } |
| 203 | /* | 203 | /* |
| 204 | * Unreclaimable memory (kernel memory or anonymous memory | ||
| 205 | * without swap) can bring down the dirtyable pages below | ||
| 206 | * the zone's dirty balance reserve and the above calculation | ||
| 207 | * will underflow. However we still want to add in nodes | ||
| 208 | * which are below threshold (negative values) to get a more | ||
| 209 | * accurate calculation but make sure that the total never | ||
| 210 | * underflows. | ||
| 211 | */ | ||
| 212 | if ((long)x < 0) | ||
| 213 | x = 0; | ||
| 214 | |||
| 215 | /* | ||
| 204 | * Make sure that the number of highmem pages is never larger | 216 | * Make sure that the number of highmem pages is never larger |
| 205 | * than the number of the total dirtyable memory. This can only | 217 | * than the number of the total dirtyable memory. This can only |
| 206 | * occur in very strange VM situations but we want to make sure | 218 | * occur in very strange VM situations but we want to make sure |
| @@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void) | |||
| 222 | { | 234 | { |
| 223 | unsigned long x; | 235 | unsigned long x; |
| 224 | 236 | ||
| 225 | x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() - | 237 | x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); |
| 226 | dirty_balance_reserve; | 238 | x -= min(x, dirty_balance_reserve); |
| 227 | 239 | ||
| 228 | if (!vm_highmem_is_dirtyable) | 240 | if (!vm_highmem_is_dirtyable) |
| 229 | x -= highmem_dirtyable_memory(x); | 241 | x -= highmem_dirtyable_memory(x); |
| @@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone) | |||
| 290 | * highmem zone can hold its share of dirty pages, so we don't | 302 | * highmem zone can hold its share of dirty pages, so we don't |
| 291 | * care about vm_highmem_is_dirtyable here. | 303 | * care about vm_highmem_is_dirtyable here. |
| 292 | */ | 304 | */ |
| 293 | return zone_page_state(zone, NR_FREE_PAGES) + | 305 | unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) + |
| 294 | zone_reclaimable_pages(zone) - | 306 | zone_reclaimable_pages(zone); |
| 295 | zone->dirty_balance_reserve; | 307 | |
| 308 | /* don't allow this to underflow */ | ||
| 309 | nr_pages -= min(nr_pages, zone->dirty_balance_reserve); | ||
| 310 | return nr_pages; | ||
| 296 | } | 311 | } |
| 297 | 312 | ||
| 298 | /** | 313 | /** |
| @@ -1069,7 +1084,7 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi, | |||
| 1069 | } | 1084 | } |
| 1070 | 1085 | ||
| 1071 | /* | 1086 | /* |
| 1072 | * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr() | 1087 | * After a task dirtied this many pages, balance_dirty_pages_ratelimited() |
| 1073 | * will look to see if it needs to start dirty throttling. | 1088 | * will look to see if it needs to start dirty throttling. |
| 1074 | * | 1089 | * |
| 1075 | * If dirty_poll_interval is too low, big NUMA machines will call the expensive | 1090 | * If dirty_poll_interval is too low, big NUMA machines will call the expensive |
| @@ -1436,9 +1451,8 @@ static DEFINE_PER_CPU(int, bdp_ratelimits); | |||
| 1436 | DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; | 1451 | DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; |
| 1437 | 1452 | ||
| 1438 | /** | 1453 | /** |
| 1439 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state | 1454 | * balance_dirty_pages_ratelimited - balance dirty memory state |
| 1440 | * @mapping: address_space which was dirtied | 1455 | * @mapping: address_space which was dirtied |
| 1441 | * @nr_pages_dirtied: number of pages which the caller has just dirtied | ||
| 1442 | * | 1456 | * |
| 1443 | * Processes which are dirtying memory should call in here once for each page | 1457 | * Processes which are dirtying memory should call in here once for each page |
| 1444 | * which was newly dirtied. The function will periodically check the system's | 1458 | * which was newly dirtied. The function will periodically check the system's |
| @@ -1449,8 +1463,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; | |||
| 1449 | * limit we decrease the ratelimiting by a lot, to prevent individual processes | 1463 | * limit we decrease the ratelimiting by a lot, to prevent individual processes |
| 1450 | * from overshooting the limit by (ratelimit_pages) each. | 1464 | * from overshooting the limit by (ratelimit_pages) each. |
| 1451 | */ | 1465 | */ |
| 1452 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | 1466 | void balance_dirty_pages_ratelimited(struct address_space *mapping) |
| 1453 | unsigned long nr_pages_dirtied) | ||
| 1454 | { | 1467 | { |
| 1455 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 1468 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
| 1456 | int ratelimit; | 1469 | int ratelimit; |
| @@ -1484,6 +1497,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | |||
| 1484 | */ | 1497 | */ |
| 1485 | p = &__get_cpu_var(dirty_throttle_leaks); | 1498 | p = &__get_cpu_var(dirty_throttle_leaks); |
| 1486 | if (*p > 0 && current->nr_dirtied < ratelimit) { | 1499 | if (*p > 0 && current->nr_dirtied < ratelimit) { |
| 1500 | unsigned long nr_pages_dirtied; | ||
| 1487 | nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); | 1501 | nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); |
| 1488 | *p -= nr_pages_dirtied; | 1502 | *p -= nr_pages_dirtied; |
| 1489 | current->nr_dirtied += nr_pages_dirtied; | 1503 | current->nr_dirtied += nr_pages_dirtied; |
| @@ -1493,7 +1507,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | |||
| 1493 | if (unlikely(current->nr_dirtied >= ratelimit)) | 1507 | if (unlikely(current->nr_dirtied >= ratelimit)) |
| 1494 | balance_dirty_pages(mapping, current->nr_dirtied); | 1508 | balance_dirty_pages(mapping, current->nr_dirtied); |
| 1495 | } | 1509 | } |
| 1496 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); | 1510 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited); |
| 1497 | 1511 | ||
| 1498 | void throttle_vm_writeout(gfp_t gfp_mask) | 1512 | void throttle_vm_writeout(gfp_t gfp_mask) |
| 1499 | { | 1513 | { |
