aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2005-11-22 00:32:28 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-22 12:13:43 -0500
commit0bd0f9fb190a0fc0fb25b764c8b04869711f7657 (patch)
tree0bd7b58c037b5905cf834f27625bd4e0cbb2f3f2 /mm/hugetlb.c
parent5ef897c71a8985b62b7ec320a37376daaad364d0 (diff)
[PATCH] hugetlb: fix race in set_max_huge_pages for multiple updaters of nr_huge_pages
If there are multiple updaters to /proc/sys/vm/nr_hugepages simultaneously it is possible for the nr_huge_pages variable to become incorrect. There is no locking in the set_max_huge_pages function around alloc_fresh_huge_page which is able to update nr_huge_pages. Two callers to alloc_fresh_huge_page could race against each other as could a call to alloc_fresh_huge_page and a call to update_and_free_page. This patch just expands the area covered by the hugetlb_lock to cover the call into alloc_fresh_huge_page. I'm not sure how we could say that a sysctl section is performance critical where more specific locking would be needed. My reproducer was to run a couple copies of the following script simultaneously while [ true ]; do echo 1000 > /proc/sys/vm/nr_hugepages echo 500 > /proc/sys/vm/nr_hugepages echo 750 > /proc/sys/vm/nr_hugepages echo 100 > /proc/sys/vm/nr_hugepages echo 0 > /proc/sys/vm/nr_hugepages done and then watch /proc/meminfo and eventually you will see things like HugePages_Total: 100 HugePages_Free: 109 After applying the patch all seemed well. Signed-off-by: Eric Paris <eparis@redhat.com> Acked-by: William Irwin <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 728e9bda12e..3e52df7c471 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -22,6 +22,10 @@ unsigned long max_huge_pages;
22static struct list_head hugepage_freelists[MAX_NUMNODES]; 22static struct list_head hugepage_freelists[MAX_NUMNODES];
23static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 23static unsigned int nr_huge_pages_node[MAX_NUMNODES];
24static unsigned int free_huge_pages_node[MAX_NUMNODES]; 24static unsigned int free_huge_pages_node[MAX_NUMNODES];
25
26/*
27 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
28 */
25static DEFINE_SPINLOCK(hugetlb_lock); 29static DEFINE_SPINLOCK(hugetlb_lock);
26 30
27static void enqueue_huge_page(struct page *page) 31static void enqueue_huge_page(struct page *page)
@@ -61,8 +65,10 @@ static struct page *alloc_fresh_huge_page(void)
61 HUGETLB_PAGE_ORDER); 65 HUGETLB_PAGE_ORDER);
62 nid = (nid + 1) % num_online_nodes(); 66 nid = (nid + 1) % num_online_nodes();
63 if (page) { 67 if (page) {
68 spin_lock(&hugetlb_lock);
64 nr_huge_pages++; 69 nr_huge_pages++;
65 nr_huge_pages_node[page_to_nid(page)]++; 70 nr_huge_pages_node[page_to_nid(page)]++;
71 spin_unlock(&hugetlb_lock);
66 } 72 }
67 return page; 73 return page;
68} 74}