diff options
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 91 |
1 files changed, 61 insertions, 30 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0ada2b2954b0..f7c0fb993fb9 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -267,26 +267,35 @@ static void wb_min_max_ratio(struct bdi_writeback *wb, | |||
267 | */ | 267 | */ |
268 | 268 | ||
269 | /** | 269 | /** |
270 | * zone_dirtyable_memory - number of dirtyable pages in a zone | 270 | * node_dirtyable_memory - number of dirtyable pages in a node |
271 | * @zone: the zone | 271 | * @pgdat: the node |
272 | * | 272 | * |
273 | * Returns the zone's number of pages potentially available for dirty | 273 | * Returns the node's number of pages potentially available for dirty |
274 | * page cache. This is the base value for the per-zone dirty limits. | 274 | * page cache. This is the base value for the per-node dirty limits. |
275 | */ | 275 | */ |
276 | static unsigned long zone_dirtyable_memory(struct zone *zone) | 276 | static unsigned long node_dirtyable_memory(struct pglist_data *pgdat) |
277 | { | 277 | { |
278 | unsigned long nr_pages; | 278 | unsigned long nr_pages = 0; |
279 | int z; | ||
280 | |||
281 | for (z = 0; z < MAX_NR_ZONES; z++) { | ||
282 | struct zone *zone = pgdat->node_zones + z; | ||
283 | |||
284 | if (!populated_zone(zone)) | ||
285 | continue; | ||
286 | |||
287 | nr_pages += zone_page_state(zone, NR_FREE_PAGES); | ||
288 | } | ||
279 | 289 | ||
280 | nr_pages = zone_page_state(zone, NR_FREE_PAGES); | ||
281 | /* | 290 | /* |
282 | * Pages reserved for the kernel should not be considered | 291 | * Pages reserved for the kernel should not be considered |
283 | * dirtyable, to prevent a situation where reclaim has to | 292 | * dirtyable, to prevent a situation where reclaim has to |
284 | * clean pages in order to balance the zones. | 293 | * clean pages in order to balance the zones. |
285 | */ | 294 | */ |
286 | nr_pages -= min(nr_pages, zone->totalreserve_pages); | 295 | nr_pages -= min(nr_pages, pgdat->totalreserve_pages); |
287 | 296 | ||
288 | nr_pages += node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE); | 297 | nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE); |
289 | nr_pages += node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE); | 298 | nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE); |
290 | 299 | ||
291 | return nr_pages; | 300 | return nr_pages; |
292 | } | 301 | } |
@@ -299,13 +308,24 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) | |||
299 | int i; | 308 | int i; |
300 | 309 | ||
301 | for_each_node_state(node, N_HIGH_MEMORY) { | 310 | for_each_node_state(node, N_HIGH_MEMORY) { |
302 | for (i = 0; i < MAX_NR_ZONES; i++) { | 311 | for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) { |
303 | struct zone *z = &NODE_DATA(node)->node_zones[i]; | 312 | struct zone *z; |
313 | unsigned long dirtyable; | ||
314 | |||
315 | if (!is_highmem_idx(i)) | ||
316 | continue; | ||
317 | |||
318 | z = &NODE_DATA(node)->node_zones[i]; | ||
319 | dirtyable = zone_page_state(z, NR_FREE_PAGES) + | ||
320 | zone_page_state(z, NR_ZONE_LRU_FILE); | ||
304 | 321 | ||
305 | if (is_highmem(z)) | 322 | /* watch for underflows */ |
306 | x += zone_dirtyable_memory(z); | 323 | dirtyable -= min(dirtyable, high_wmark_pages(z)); |
324 | |||
325 | x += dirtyable; | ||
307 | } | 326 | } |
308 | } | 327 | } |
328 | |||
309 | /* | 329 | /* |
310 | * Unreclaimable memory (kernel memory or anonymous memory | 330 | * Unreclaimable memory (kernel memory or anonymous memory |
311 | * without swap) can bring down the dirtyable pages below | 331 | * without swap) can bring down the dirtyable pages below |
@@ -445,23 +465,23 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) | |||
445 | } | 465 | } |
446 | 466 | ||
447 | /** | 467 | /** |
448 | * zone_dirty_limit - maximum number of dirty pages allowed in a zone | 468 | * node_dirty_limit - maximum number of dirty pages allowed in a node |
449 | * @zone: the zone | 469 | * @pgdat: the node |
450 | * | 470 | * |
451 | * Returns the maximum number of dirty pages allowed in a zone, based | 471 | * Returns the maximum number of dirty pages allowed in a node, based |
452 | * on the zone's dirtyable memory. | 472 | * on the node's dirtyable memory. |
453 | */ | 473 | */ |
454 | static unsigned long zone_dirty_limit(struct zone *zone) | 474 | static unsigned long node_dirty_limit(struct pglist_data *pgdat) |
455 | { | 475 | { |
456 | unsigned long zone_memory = zone_dirtyable_memory(zone); | 476 | unsigned long node_memory = node_dirtyable_memory(pgdat); |
457 | struct task_struct *tsk = current; | 477 | struct task_struct *tsk = current; |
458 | unsigned long dirty; | 478 | unsigned long dirty; |
459 | 479 | ||
460 | if (vm_dirty_bytes) | 480 | if (vm_dirty_bytes) |
461 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) * | 481 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) * |
462 | zone_memory / global_dirtyable_memory(); | 482 | node_memory / global_dirtyable_memory(); |
463 | else | 483 | else |
464 | dirty = vm_dirty_ratio * zone_memory / 100; | 484 | dirty = vm_dirty_ratio * node_memory / 100; |
465 | 485 | ||
466 | if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) | 486 | if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) |
467 | dirty += dirty / 4; | 487 | dirty += dirty / 4; |
@@ -470,19 +490,30 @@ static unsigned long zone_dirty_limit(struct zone *zone) | |||
470 | } | 490 | } |
471 | 491 | ||
472 | /** | 492 | /** |
473 | * zone_dirty_ok - tells whether a zone is within its dirty limits | 493 | * node_dirty_ok - tells whether a node is within its dirty limits |
474 | * @zone: the zone to check | 494 | * @pgdat: the node to check |
475 | * | 495 | * |
476 | * Returns %true when the dirty pages in @zone are within the zone's | 496 | * Returns %true when the dirty pages in @pgdat are within the node's |
477 | * dirty limit, %false if the limit is exceeded. | 497 | * dirty limit, %false if the limit is exceeded. |
478 | */ | 498 | */ |
479 | bool zone_dirty_ok(struct zone *zone) | 499 | bool node_dirty_ok(struct pglist_data *pgdat) |
480 | { | 500 | { |
481 | unsigned long limit = zone_dirty_limit(zone); | 501 | int z; |
502 | unsigned long limit = node_dirty_limit(pgdat); | ||
503 | unsigned long nr_pages = 0; | ||
504 | |||
505 | for (z = 0; z < MAX_NR_ZONES; z++) { | ||
506 | struct zone *zone = pgdat->node_zones + z; | ||
507 | |||
508 | if (!populated_zone(zone)) | ||
509 | continue; | ||
510 | |||
511 | nr_pages += zone_page_state(zone, NR_FILE_DIRTY); | ||
512 | nr_pages += zone_page_state(zone, NR_UNSTABLE_NFS); | ||
513 | nr_pages += zone_page_state(zone, NR_WRITEBACK); | ||
514 | } | ||
482 | 515 | ||
483 | return zone_page_state(zone, NR_FILE_DIRTY) + | 516 | return nr_pages <= limit; |
484 | zone_page_state(zone, NR_UNSTABLE_NFS) + | ||
485 | zone_page_state(zone, NR_WRITEBACK) <= limit; | ||
486 | } | 517 | } |
487 | 518 | ||
488 | int dirty_background_ratio_handler(struct ctl_table *table, int write, | 519 | int dirty_background_ratio_handler(struct ctl_table *table, int write, |