aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@shadowen.org>2008-07-24 00:27:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 13:47:16 -0400
commite7c4b0bfd025f71cf7624b7c1be174f63caade33 (patch)
treeee81ef1fafe34ca1a4a914d0db7e5decbc122379
parent04f2cbe35699d22dbf428373682ead85ca1240f5 (diff)
huge page private reservation review cleanups
Create some new accessors for vma private data to cut down on and contain the casts. Encapsulates the huge and small page offset calculations. Also adds a couple of VM_BUG_ONs for consistency. [akpm@linux-foundation.org: Make things static] Signed-off-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Adam Litke <agl@us.ibm.com> Cc: Johannes Weiner <hannes@saeurebad.de> Cc: Andy Whitcroft <apw@shadowen.org> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Michael Kerrisk <mtk.manpages@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/hugetlb.c58
1 files changed, 45 insertions, 13 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a2d29b84501f..3e873f0101fb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -40,6 +40,28 @@ static int hugetlb_next_nid;
40 */ 40 */
41static DEFINE_SPINLOCK(hugetlb_lock); 41static DEFINE_SPINLOCK(hugetlb_lock);
42 42
43/*
44 * Convert the address within this vma to the page offset within
45 * the mapping, in base page units.
46 */
47static pgoff_t vma_page_offset(struct vm_area_struct *vma,
48 unsigned long address)
49{
50 return ((address - vma->vm_start) >> PAGE_SHIFT) +
51 (vma->vm_pgoff >> PAGE_SHIFT);
52}
53
54/*
55 * Convert the address within this vma to the page offset within
56 * the mapping, in pagecache page units; huge pages here.
57 */
58static pgoff_t vma_pagecache_offset(struct vm_area_struct *vma,
59 unsigned long address)
60{
61 return ((address - vma->vm_start) >> HPAGE_SHIFT) +
62 (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
63}
64
43#define HPAGE_RESV_OWNER (1UL << (BITS_PER_LONG - 1)) 65#define HPAGE_RESV_OWNER (1UL << (BITS_PER_LONG - 1))
44#define HPAGE_RESV_UNMAPPED (1UL << (BITS_PER_LONG - 2)) 66#define HPAGE_RESV_UNMAPPED (1UL << (BITS_PER_LONG - 2))
45#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 67#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
@@ -53,36 +75,48 @@ static DEFINE_SPINLOCK(hugetlb_lock);
53 * to reset the VMA at fork() time as it is not in use yet and there is no 75 * to reset the VMA at fork() time as it is not in use yet and there is no
54 * chance of the global counters getting corrupted as a result of the values. 76 * chance of the global counters getting corrupted as a result of the values.
55 */ 77 */
78static unsigned long get_vma_private_data(struct vm_area_struct *vma)
79{
80 return (unsigned long)vma->vm_private_data;
81}
82
83static void set_vma_private_data(struct vm_area_struct *vma,
84 unsigned long value)
85{
86 vma->vm_private_data = (void *)value;
87}
88
56static unsigned long vma_resv_huge_pages(struct vm_area_struct *vma) 89static unsigned long vma_resv_huge_pages(struct vm_area_struct *vma)
57{ 90{
58 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 91 VM_BUG_ON(!is_vm_hugetlb_page(vma));
59 if (!(vma->vm_flags & VM_SHARED)) 92 if (!(vma->vm_flags & VM_SHARED))
60 return (unsigned long)vma->vm_private_data & ~HPAGE_RESV_MASK; 93 return get_vma_private_data(vma) & ~HPAGE_RESV_MASK;
61 return 0; 94 return 0;
62} 95}
63 96
64static void set_vma_resv_huge_pages(struct vm_area_struct *vma, 97static void set_vma_resv_huge_pages(struct vm_area_struct *vma,
65 unsigned long reserve) 98 unsigned long reserve)
66{ 99{
67 unsigned long flags;
68 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 100 VM_BUG_ON(!is_vm_hugetlb_page(vma));
69 VM_BUG_ON(vma->vm_flags & VM_SHARED); 101 VM_BUG_ON(vma->vm_flags & VM_SHARED);
70 102
71 flags = (unsigned long)vma->vm_private_data & HPAGE_RESV_MASK; 103 set_vma_private_data(vma,
72 vma->vm_private_data = (void *)(reserve | flags); 104 (get_vma_private_data(vma) & HPAGE_RESV_MASK) | reserve);
73} 105}
74 106
75static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 107static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
76{ 108{
77 unsigned long reserveflags = (unsigned long)vma->vm_private_data;
78 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 109 VM_BUG_ON(!is_vm_hugetlb_page(vma));
79 vma->vm_private_data = (void *)(reserveflags | flags); 110 VM_BUG_ON(vma->vm_flags & VM_SHARED);
111
112 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
80} 113}
81 114
82static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 115static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
83{ 116{
84 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 117 VM_BUG_ON(!is_vm_hugetlb_page(vma));
85 return ((unsigned long)vma->vm_private_data & flag) != 0; 118
119 return (get_vma_private_data(vma) & flag) != 0;
86} 120}
87 121
88/* Decrement the reserved pages in the hugepage pool by one */ 122/* Decrement the reserved pages in the hugepage pool by one */
@@ -1151,11 +1185,10 @@ static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma,
1151 unsigned long address) 1185 unsigned long address)
1152{ 1186{
1153 struct address_space *mapping; 1187 struct address_space *mapping;
1154 unsigned long idx; 1188 pgoff_t idx;
1155 1189
1156 mapping = vma->vm_file->f_mapping; 1190 mapping = vma->vm_file->f_mapping;
1157 idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 1191 idx = vma_pagecache_offset(vma, address);
1158 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
1159 1192
1160 return find_lock_page(mapping, idx); 1193 return find_lock_page(mapping, idx);
1161} 1194}
@@ -1164,7 +1197,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1164 unsigned long address, pte_t *ptep, int write_access) 1197 unsigned long address, pte_t *ptep, int write_access)
1165{ 1198{
1166 int ret = VM_FAULT_SIGBUS; 1199 int ret = VM_FAULT_SIGBUS;
1167 unsigned long idx; 1200 pgoff_t idx;
1168 unsigned long size; 1201 unsigned long size;
1169 struct page *page; 1202 struct page *page;
1170 struct address_space *mapping; 1203 struct address_space *mapping;
@@ -1183,8 +1216,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1183 } 1216 }
1184 1217
1185 mapping = vma->vm_file->f_mapping; 1218 mapping = vma->vm_file->f_mapping;
1186 idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 1219 idx = vma_pagecache_offset(vma, address);
1187 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
1188 1220
1189 /* 1221 /*
1190 * Use page lock to guard against racing truncation 1222 * Use page lock to guard against racing truncation