diff options
author | Arun KS <arunks@codeaurora.org> | 2018-12-28 03:34:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 15:11:47 -0500 |
commit | 476567e8735a0d06225f3873a86dfa0efd95f3a5 (patch) | |
tree | 0e6724684a8ac631d8ae0fbbf53b123342279fcc /mm/page_alloc.c | |
parent | ca79b0c211af63fa3276f0e3fd7dd9ada2439839 (diff) |
mm: remove managed_page_count_lock spinlock
Now that totalram_pages and managed_pages are atomic varibles, no need of
managed_page_count spinlock. The lock had really a weak consistency
guarantee. It hasn't been used for anything but the update but no reader
actually cares about all the values being updated to be in sync.
Link: http://lkml.kernel.org/r/1542090790-21750-5-git-send-email-arunks@codeaurora.org
Signed-off-by: Arun KS <arunks@codeaurora.org>
Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 5 |
1 files changed, 0 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eb2027892ef9..6f3d2c7af84b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -122,9 +122,6 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { | |||
122 | }; | 122 | }; |
123 | EXPORT_SYMBOL(node_states); | 123 | EXPORT_SYMBOL(node_states); |
124 | 124 | ||
125 | /* Protect totalram_pages and zone->managed_pages */ | ||
126 | static DEFINE_SPINLOCK(managed_page_count_lock); | ||
127 | |||
128 | atomic_long_t _totalram_pages __read_mostly; | 125 | atomic_long_t _totalram_pages __read_mostly; |
129 | EXPORT_SYMBOL(_totalram_pages); | 126 | EXPORT_SYMBOL(_totalram_pages); |
130 | unsigned long totalreserve_pages __read_mostly; | 127 | unsigned long totalreserve_pages __read_mostly; |
@@ -7077,14 +7074,12 @@ early_param("movablecore", cmdline_parse_movablecore); | |||
7077 | 7074 | ||
7078 | void adjust_managed_page_count(struct page *page, long count) | 7075 | void adjust_managed_page_count(struct page *page, long count) |
7079 | { | 7076 | { |
7080 | spin_lock(&managed_page_count_lock); | ||
7081 | atomic_long_add(count, &page_zone(page)->managed_pages); | 7077 | atomic_long_add(count, &page_zone(page)->managed_pages); |
7082 | totalram_pages_add(count); | 7078 | totalram_pages_add(count); |
7083 | #ifdef CONFIG_HIGHMEM | 7079 | #ifdef CONFIG_HIGHMEM |
7084 | if (PageHighMem(page)) | 7080 | if (PageHighMem(page)) |
7085 | totalhigh_pages_add(count); | 7081 | totalhigh_pages_add(count); |
7086 | #endif | 7082 | #endif |
7087 | spin_unlock(&managed_page_count_lock); | ||
7088 | } | 7083 | } |
7089 | EXPORT_SYMBOL(adjust_managed_page_count); | 7084 | EXPORT_SYMBOL(adjust_managed_page_count); |
7090 | 7085 | ||