diff options
author | Michal Hocko <mhocko@suse.com> | 2017-09-06 19:23:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-06 20:27:29 -0400 |
commit | c41f012ade0b95b0a6e25c7150673e0554736165 (patch) | |
tree | c3270264283dff1de402a6609b155c804c097320 /mm | |
parent | 4da243ac1cf6aeb30b7c555d56208982d66d6d33 (diff) |
mm: rename global_page_state to global_zone_page_state
global_page_state is error prone as a recent bug report pointed out [1].
It only returns proper values for zone based counters as the enum it
gets suggests. We already have global_node_page_state so let's rename
global_page_state to global_zone_page_state to be more explicit here.
All existing users seems to be correct:
$ git grep "global_page_state(NR_" | sed 's@.*(\(NR_[A-Z_]*\)).*@\1@' | sort | uniq -c
2 NR_BOUNCE
2 NR_FREE_CMA_PAGES
11 NR_FREE_PAGES
1 NR_KERNEL_STACK_KB
1 NR_MLOCK
2 NR_PAGETABLE
This patch shouldn't introduce any functional change.
[1] http://lkml.kernel.org/r/201707260628.v6Q6SmaS030814@www262.sakura.ne.jp
Link: http://lkml.kernel.org/r/20170801134256.5400-2-hannes@cmpxchg.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mmap.c | 6 | ||||
-rw-r--r-- | mm/nommu.c | 4 | ||||
-rw-r--r-- | mm/page-writeback.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 12 | ||||
-rw-r--r-- | mm/util.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 4 |
6 files changed, 16 insertions, 16 deletions
@@ -3514,7 +3514,7 @@ static int init_user_reserve(void) | |||
3514 | { | 3514 | { |
3515 | unsigned long free_kbytes; | 3515 | unsigned long free_kbytes; |
3516 | 3516 | ||
3517 | free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); | 3517 | free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); |
3518 | 3518 | ||
3519 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); | 3519 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); |
3520 | return 0; | 3520 | return 0; |
@@ -3535,7 +3535,7 @@ static int init_admin_reserve(void) | |||
3535 | { | 3535 | { |
3536 | unsigned long free_kbytes; | 3536 | unsigned long free_kbytes; |
3537 | 3537 | ||
3538 | free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); | 3538 | free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); |
3539 | 3539 | ||
3540 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); | 3540 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); |
3541 | return 0; | 3541 | return 0; |
@@ -3579,7 +3579,7 @@ static int reserve_mem_notifier(struct notifier_block *nb, | |||
3579 | 3579 | ||
3580 | break; | 3580 | break; |
3581 | case MEM_OFFLINE: | 3581 | case MEM_OFFLINE: |
3582 | free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); | 3582 | free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); |
3583 | 3583 | ||
3584 | if (sysctl_user_reserve_kbytes > free_kbytes) { | 3584 | if (sysctl_user_reserve_kbytes > free_kbytes) { |
3585 | init_user_reserve(); | 3585 | init_user_reserve(); |
diff --git a/mm/nommu.c b/mm/nommu.c index fc184f597d59..53d5175a5c14 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1962,7 +1962,7 @@ static int __meminit init_user_reserve(void) | |||
1962 | { | 1962 | { |
1963 | unsigned long free_kbytes; | 1963 | unsigned long free_kbytes; |
1964 | 1964 | ||
1965 | free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); | 1965 | free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); |
1966 | 1966 | ||
1967 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); | 1967 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); |
1968 | return 0; | 1968 | return 0; |
@@ -1983,7 +1983,7 @@ static int __meminit init_admin_reserve(void) | |||
1983 | { | 1983 | { |
1984 | unsigned long free_kbytes; | 1984 | unsigned long free_kbytes; |
1985 | 1985 | ||
1986 | free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); | 1986 | free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); |
1987 | 1987 | ||
1988 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); | 1988 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); |
1989 | return 0; | 1989 | return 0; |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index bf050ab025b7..0b9c5cbe8eba 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -363,7 +363,7 @@ static unsigned long global_dirtyable_memory(void) | |||
363 | { | 363 | { |
364 | unsigned long x; | 364 | unsigned long x; |
365 | 365 | ||
366 | x = global_page_state(NR_FREE_PAGES); | 366 | x = global_zone_page_state(NR_FREE_PAGES); |
367 | /* | 367 | /* |
368 | * Pages reserved for the kernel should not be considered | 368 | * Pages reserved for the kernel should not be considered |
369 | * dirtyable, to prevent a situation where reclaim has to | 369 | * dirtyable, to prevent a situation where reclaim has to |
@@ -1405,7 +1405,7 @@ void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time) | |||
1405 | * will look to see if it needs to start dirty throttling. | 1405 | * will look to see if it needs to start dirty throttling. |
1406 | * | 1406 | * |
1407 | * If dirty_poll_interval is too low, big NUMA machines will call the expensive | 1407 | * If dirty_poll_interval is too low, big NUMA machines will call the expensive |
1408 | * global_page_state() too often. So scale it near-sqrt to the safety margin | 1408 | * global_zone_page_state() too often. So scale it near-sqrt to the safety margin |
1409 | * (the number of pages we may dirty without exceeding the dirty limits). | 1409 | * (the number of pages we may dirty without exceeding the dirty limits). |
1410 | */ | 1410 | */ |
1411 | static unsigned long dirty_poll_interval(unsigned long dirty, | 1411 | static unsigned long dirty_poll_interval(unsigned long dirty, |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0bea94af0423..a4562c058ec4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4509,7 +4509,7 @@ long si_mem_available(void) | |||
4509 | * Estimate the amount of memory available for userspace allocations, | 4509 | * Estimate the amount of memory available for userspace allocations, |
4510 | * without causing swapping. | 4510 | * without causing swapping. |
4511 | */ | 4511 | */ |
4512 | available = global_page_state(NR_FREE_PAGES) - totalreserve_pages; | 4512 | available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; |
4513 | 4513 | ||
4514 | /* | 4514 | /* |
4515 | * Not all the page cache can be freed, otherwise the system will | 4515 | * Not all the page cache can be freed, otherwise the system will |
@@ -4538,7 +4538,7 @@ void si_meminfo(struct sysinfo *val) | |||
4538 | { | 4538 | { |
4539 | val->totalram = totalram_pages; | 4539 | val->totalram = totalram_pages; |
4540 | val->sharedram = global_node_page_state(NR_SHMEM); | 4540 | val->sharedram = global_node_page_state(NR_SHMEM); |
4541 | val->freeram = global_page_state(NR_FREE_PAGES); | 4541 | val->freeram = global_zone_page_state(NR_FREE_PAGES); |
4542 | val->bufferram = nr_blockdev_pages(); | 4542 | val->bufferram = nr_blockdev_pages(); |
4543 | val->totalhigh = totalhigh_pages; | 4543 | val->totalhigh = totalhigh_pages; |
4544 | val->freehigh = nr_free_highpages(); | 4544 | val->freehigh = nr_free_highpages(); |
@@ -4673,11 +4673,11 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) | |||
4673 | global_node_page_state(NR_SLAB_UNRECLAIMABLE), | 4673 | global_node_page_state(NR_SLAB_UNRECLAIMABLE), |
4674 | global_node_page_state(NR_FILE_MAPPED), | 4674 | global_node_page_state(NR_FILE_MAPPED), |
4675 | global_node_page_state(NR_SHMEM), | 4675 | global_node_page_state(NR_SHMEM), |
4676 | global_page_state(NR_PAGETABLE), | 4676 | global_zone_page_state(NR_PAGETABLE), |
4677 | global_page_state(NR_BOUNCE), | 4677 | global_zone_page_state(NR_BOUNCE), |
4678 | global_page_state(NR_FREE_PAGES), | 4678 | global_zone_page_state(NR_FREE_PAGES), |
4679 | free_pcp, | 4679 | free_pcp, |
4680 | global_page_state(NR_FREE_CMA_PAGES)); | 4680 | global_zone_page_state(NR_FREE_CMA_PAGES)); |
4681 | 4681 | ||
4682 | for_each_online_pgdat(pgdat) { | 4682 | for_each_online_pgdat(pgdat) { |
4683 | if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) | 4683 | if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) |
@@ -614,7 +614,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |||
614 | return 0; | 614 | return 0; |
615 | 615 | ||
616 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | 616 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { |
617 | free = global_page_state(NR_FREE_PAGES); | 617 | free = global_zone_page_state(NR_FREE_PAGES); |
618 | free += global_node_page_state(NR_FILE_PAGES); | 618 | free += global_node_page_state(NR_FILE_PAGES); |
619 | 619 | ||
620 | /* | 620 | /* |
diff --git a/mm/vmstat.c b/mm/vmstat.c index e131b51654c7..ba9b202e8500 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -1502,7 +1502,7 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos) | |||
1502 | if (!v) | 1502 | if (!v) |
1503 | return ERR_PTR(-ENOMEM); | 1503 | return ERR_PTR(-ENOMEM); |
1504 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 1504 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
1505 | v[i] = global_page_state(i); | 1505 | v[i] = global_zone_page_state(i); |
1506 | v += NR_VM_ZONE_STAT_ITEMS; | 1506 | v += NR_VM_ZONE_STAT_ITEMS; |
1507 | 1507 | ||
1508 | for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) | 1508 | for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) |
@@ -1591,7 +1591,7 @@ int vmstat_refresh(struct ctl_table *table, int write, | |||
1591 | * which can equally be echo'ed to or cat'ted from (by root), | 1591 | * which can equally be echo'ed to or cat'ted from (by root), |
1592 | * can be used to update the stats just before reading them. | 1592 | * can be used to update the stats just before reading them. |
1593 | * | 1593 | * |
1594 | * Oh, and since global_page_state() etc. are so careful to hide | 1594 | * Oh, and since global_zone_page_state() etc. are so careful to hide |
1595 | * transiently negative values, report an error here if any of | 1595 | * transiently negative values, report an error here if any of |
1596 | * the stats is negative, so we know to go looking for imbalance. | 1596 | * the stats is negative, so we know to go looking for imbalance. |
1597 | */ | 1597 | */ |