diff options
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r-- | mm/vmstat.c | 95 |
1 files changed, 67 insertions, 28 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c index 20c2ef4458fa..9bb314577911 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -19,6 +19,9 @@ | |||
19 | #include <linux/math64.h> | 19 | #include <linux/math64.h> |
20 | #include <linux/writeback.h> | 20 | #include <linux/writeback.h> |
21 | #include <linux/compaction.h> | 21 | #include <linux/compaction.h> |
22 | #include <linux/mm_inline.h> | ||
23 | |||
24 | #include "internal.h" | ||
22 | 25 | ||
23 | #ifdef CONFIG_VM_EVENT_COUNTERS | 26 | #ifdef CONFIG_VM_EVENT_COUNTERS |
24 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; | 27 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; |
@@ -414,12 +417,17 @@ void dec_zone_page_state(struct page *page, enum zone_stat_item item) | |||
414 | EXPORT_SYMBOL(dec_zone_page_state); | 417 | EXPORT_SYMBOL(dec_zone_page_state); |
415 | #endif | 418 | #endif |
416 | 419 | ||
420 | static inline void fold_diff(int *diff) | ||
421 | { | ||
422 | int i; | ||
423 | |||
424 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | ||
425 | if (diff[i]) | ||
426 | atomic_long_add(diff[i], &vm_stat[i]); | ||
427 | } | ||
428 | |||
417 | /* | 429 | /* |
418 | * Update the zone counters for one cpu. | 430 | * Update the zone counters for the current cpu. |
419 | * | ||
420 | * The cpu specified must be either the current cpu or a processor that | ||
421 | * is not online. If it is the current cpu then the execution thread must | ||
422 | * be pinned to the current cpu. | ||
423 | * | 431 | * |
424 | * Note that refresh_cpu_vm_stats strives to only access | 432 | * Note that refresh_cpu_vm_stats strives to only access |
425 | * node local memory. The per cpu pagesets on remote zones are placed | 433 | * node local memory. The per cpu pagesets on remote zones are placed |
@@ -432,33 +440,29 @@ EXPORT_SYMBOL(dec_zone_page_state); | |||
432 | * with the global counters. These could cause remote node cache line | 440 | * with the global counters. These could cause remote node cache line |
433 | * bouncing and will have to be only done when necessary. | 441 | * bouncing and will have to be only done when necessary. |
434 | */ | 442 | */ |
435 | void refresh_cpu_vm_stats(int cpu) | 443 | static void refresh_cpu_vm_stats(void) |
436 | { | 444 | { |
437 | struct zone *zone; | 445 | struct zone *zone; |
438 | int i; | 446 | int i; |
439 | int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; | 447 | int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; |
440 | 448 | ||
441 | for_each_populated_zone(zone) { | 449 | for_each_populated_zone(zone) { |
442 | struct per_cpu_pageset *p; | 450 | struct per_cpu_pageset __percpu *p = zone->pageset; |
443 | 451 | ||
444 | p = per_cpu_ptr(zone->pageset, cpu); | 452 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { |
453 | int v; | ||
445 | 454 | ||
446 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 455 | v = this_cpu_xchg(p->vm_stat_diff[i], 0); |
447 | if (p->vm_stat_diff[i]) { | 456 | if (v) { |
448 | unsigned long flags; | ||
449 | int v; | ||
450 | 457 | ||
451 | local_irq_save(flags); | ||
452 | v = p->vm_stat_diff[i]; | ||
453 | p->vm_stat_diff[i] = 0; | ||
454 | local_irq_restore(flags); | ||
455 | atomic_long_add(v, &zone->vm_stat[i]); | 458 | atomic_long_add(v, &zone->vm_stat[i]); |
456 | global_diff[i] += v; | 459 | global_diff[i] += v; |
457 | #ifdef CONFIG_NUMA | 460 | #ifdef CONFIG_NUMA |
458 | /* 3 seconds idle till flush */ | 461 | /* 3 seconds idle till flush */ |
459 | p->expire = 3; | 462 | __this_cpu_write(p->expire, 3); |
460 | #endif | 463 | #endif |
461 | } | 464 | } |
465 | } | ||
462 | cond_resched(); | 466 | cond_resched(); |
463 | #ifdef CONFIG_NUMA | 467 | #ifdef CONFIG_NUMA |
464 | /* | 468 | /* |
@@ -468,29 +472,57 @@ void refresh_cpu_vm_stats(int cpu) | |||
468 | * Check if there are pages remaining in this pageset | 472 | * Check if there are pages remaining in this pageset |
469 | * if not then there is nothing to expire. | 473 | * if not then there is nothing to expire. |
470 | */ | 474 | */ |
471 | if (!p->expire || !p->pcp.count) | 475 | if (!__this_cpu_read(p->expire) || |
476 | !__this_cpu_read(p->pcp.count)) | ||
472 | continue; | 477 | continue; |
473 | 478 | ||
474 | /* | 479 | /* |
475 | * We never drain zones local to this processor. | 480 | * We never drain zones local to this processor. |
476 | */ | 481 | */ |
477 | if (zone_to_nid(zone) == numa_node_id()) { | 482 | if (zone_to_nid(zone) == numa_node_id()) { |
478 | p->expire = 0; | 483 | __this_cpu_write(p->expire, 0); |
479 | continue; | 484 | continue; |
480 | } | 485 | } |
481 | 486 | ||
482 | p->expire--; | 487 | |
483 | if (p->expire) | 488 | if (__this_cpu_dec_return(p->expire)) |
484 | continue; | 489 | continue; |
485 | 490 | ||
486 | if (p->pcp.count) | 491 | if (__this_cpu_read(p->pcp.count)) |
487 | drain_zone_pages(zone, &p->pcp); | 492 | drain_zone_pages(zone, __this_cpu_ptr(&p->pcp)); |
488 | #endif | 493 | #endif |
489 | } | 494 | } |
495 | fold_diff(global_diff); | ||
496 | } | ||
490 | 497 | ||
491 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 498 | /* |
492 | if (global_diff[i]) | 499 | * Fold the data for an offline cpu into the global array. |
493 | atomic_long_add(global_diff[i], &vm_stat[i]); | 500 | * There cannot be any access by the offline cpu and therefore |
501 | * synchronization is simplified. | ||
502 | */ | ||
503 | void cpu_vm_stats_fold(int cpu) | ||
504 | { | ||
505 | struct zone *zone; | ||
506 | int i; | ||
507 | int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; | ||
508 | |||
509 | for_each_populated_zone(zone) { | ||
510 | struct per_cpu_pageset *p; | ||
511 | |||
512 | p = per_cpu_ptr(zone->pageset, cpu); | ||
513 | |||
514 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | ||
515 | if (p->vm_stat_diff[i]) { | ||
516 | int v; | ||
517 | |||
518 | v = p->vm_stat_diff[i]; | ||
519 | p->vm_stat_diff[i] = 0; | ||
520 | atomic_long_add(v, &zone->vm_stat[i]); | ||
521 | global_diff[i] += v; | ||
522 | } | ||
523 | } | ||
524 | |||
525 | fold_diff(global_diff); | ||
494 | } | 526 | } |
495 | 527 | ||
496 | /* | 528 | /* |
@@ -703,6 +735,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, | |||
703 | const char * const vmstat_text[] = { | 735 | const char * const vmstat_text[] = { |
704 | /* Zoned VM counters */ | 736 | /* Zoned VM counters */ |
705 | "nr_free_pages", | 737 | "nr_free_pages", |
738 | "nr_alloc_batch", | ||
706 | "nr_inactive_anon", | 739 | "nr_inactive_anon", |
707 | "nr_active_anon", | 740 | "nr_active_anon", |
708 | "nr_inactive_file", | 741 | "nr_inactive_file", |
@@ -817,6 +850,12 @@ const char * const vmstat_text[] = { | |||
817 | "thp_zero_page_alloc", | 850 | "thp_zero_page_alloc", |
818 | "thp_zero_page_alloc_failed", | 851 | "thp_zero_page_alloc_failed", |
819 | #endif | 852 | #endif |
853 | #ifdef CONFIG_SMP | ||
854 | "nr_tlb_remote_flush", | ||
855 | "nr_tlb_remote_flush_received", | ||
856 | #endif | ||
857 | "nr_tlb_local_flush_all", | ||
858 | "nr_tlb_local_flush_one", | ||
820 | 859 | ||
821 | #endif /* CONFIG_VM_EVENTS_COUNTERS */ | 860 | #endif /* CONFIG_VM_EVENTS_COUNTERS */ |
822 | }; | 861 | }; |
@@ -1052,7 +1091,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | |||
1052 | "\n all_unreclaimable: %u" | 1091 | "\n all_unreclaimable: %u" |
1053 | "\n start_pfn: %lu" | 1092 | "\n start_pfn: %lu" |
1054 | "\n inactive_ratio: %u", | 1093 | "\n inactive_ratio: %u", |
1055 | zone->all_unreclaimable, | 1094 | !zone_reclaimable(zone), |
1056 | zone->zone_start_pfn, | 1095 | zone->zone_start_pfn, |
1057 | zone->inactive_ratio); | 1096 | zone->inactive_ratio); |
1058 | seq_putc(m, '\n'); | 1097 | seq_putc(m, '\n'); |
@@ -1177,7 +1216,7 @@ int sysctl_stat_interval __read_mostly = HZ; | |||
1177 | 1216 | ||
1178 | static void vmstat_update(struct work_struct *w) | 1217 | static void vmstat_update(struct work_struct *w) |
1179 | { | 1218 | { |
1180 | refresh_cpu_vm_stats(smp_processor_id()); | 1219 | refresh_cpu_vm_stats(); |
1181 | schedule_delayed_work(&__get_cpu_var(vmstat_work), | 1220 | schedule_delayed_work(&__get_cpu_var(vmstat_work), |
1182 | round_jiffies_relative(sysctl_stat_interval)); | 1221 | round_jiffies_relative(sysctl_stat_interval)); |
1183 | } | 1222 | } |