diff options
-rw-r--r-- | mm/hugetlb.c | 58 |
1 files changed, 45 insertions, 13 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a2d29b84501f..3e873f0101fb 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -40,6 +40,28 @@ static int hugetlb_next_nid; | |||
40 | */ | 40 | */ |
41 | static DEFINE_SPINLOCK(hugetlb_lock); | 41 | static DEFINE_SPINLOCK(hugetlb_lock); |
42 | 42 | ||
43 | /* | ||
44 | * Convert the address within this vma to the page offset within | ||
45 | * the mapping, in base page units. | ||
46 | */ | ||
47 | static pgoff_t vma_page_offset(struct vm_area_struct *vma, | ||
48 | unsigned long address) | ||
49 | { | ||
50 | return ((address - vma->vm_start) >> PAGE_SHIFT) + | ||
51 | (vma->vm_pgoff >> PAGE_SHIFT); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Convert the address within this vma to the page offset within | ||
56 | * the mapping, in pagecache page units; huge pages here. | ||
57 | */ | ||
58 | static pgoff_t vma_pagecache_offset(struct vm_area_struct *vma, | ||
59 | unsigned long address) | ||
60 | { | ||
61 | return ((address - vma->vm_start) >> HPAGE_SHIFT) + | ||
62 | (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | ||
63 | } | ||
64 | |||
43 | #define HPAGE_RESV_OWNER (1UL << (BITS_PER_LONG - 1)) | 65 | #define HPAGE_RESV_OWNER (1UL << (BITS_PER_LONG - 1)) |
44 | #define HPAGE_RESV_UNMAPPED (1UL << (BITS_PER_LONG - 2)) | 66 | #define HPAGE_RESV_UNMAPPED (1UL << (BITS_PER_LONG - 2)) |
45 | #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) | 67 | #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) |
@@ -53,36 +75,48 @@ static DEFINE_SPINLOCK(hugetlb_lock); | |||
53 | * to reset the VMA at fork() time as it is not in use yet and there is no | 75 | * to reset the VMA at fork() time as it is not in use yet and there is no |
54 | * chance of the global counters getting corrupted as a result of the values. | 76 | * chance of the global counters getting corrupted as a result of the values. |
55 | */ | 77 | */ |
78 | static unsigned long get_vma_private_data(struct vm_area_struct *vma) | ||
79 | { | ||
80 | return (unsigned long)vma->vm_private_data; | ||
81 | } | ||
82 | |||
83 | static void set_vma_private_data(struct vm_area_struct *vma, | ||
84 | unsigned long value) | ||
85 | { | ||
86 | vma->vm_private_data = (void *)value; | ||
87 | } | ||
88 | |||
56 | static unsigned long vma_resv_huge_pages(struct vm_area_struct *vma) | 89 | static unsigned long vma_resv_huge_pages(struct vm_area_struct *vma) |
57 | { | 90 | { |
58 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 91 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); |
59 | if (!(vma->vm_flags & VM_SHARED)) | 92 | if (!(vma->vm_flags & VM_SHARED)) |
60 | return (unsigned long)vma->vm_private_data & ~HPAGE_RESV_MASK; | 93 | return get_vma_private_data(vma) & ~HPAGE_RESV_MASK; |
61 | return 0; | 94 | return 0; |
62 | } | 95 | } |
63 | 96 | ||
64 | static void set_vma_resv_huge_pages(struct vm_area_struct *vma, | 97 | static void set_vma_resv_huge_pages(struct vm_area_struct *vma, |
65 | unsigned long reserve) | 98 | unsigned long reserve) |
66 | { | 99 | { |
67 | unsigned long flags; | ||
68 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 100 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); |
69 | VM_BUG_ON(vma->vm_flags & VM_SHARED); | 101 | VM_BUG_ON(vma->vm_flags & VM_SHARED); |
70 | 102 | ||
71 | flags = (unsigned long)vma->vm_private_data & HPAGE_RESV_MASK; | 103 | set_vma_private_data(vma, |
72 | vma->vm_private_data = (void *)(reserve | flags); | 104 | (get_vma_private_data(vma) & HPAGE_RESV_MASK) | reserve); |
73 | } | 105 | } |
74 | 106 | ||
75 | static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) | 107 | static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) |
76 | { | 108 | { |
77 | unsigned long reserveflags = (unsigned long)vma->vm_private_data; | ||
78 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 109 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); |
79 | vma->vm_private_data = (void *)(reserveflags | flags); | 110 | VM_BUG_ON(vma->vm_flags & VM_SHARED); |
111 | |||
112 | set_vma_private_data(vma, get_vma_private_data(vma) | flags); | ||
80 | } | 113 | } |
81 | 114 | ||
82 | static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) | 115 | static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) |
83 | { | 116 | { |
84 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); | 117 | VM_BUG_ON(!is_vm_hugetlb_page(vma)); |
85 | return ((unsigned long)vma->vm_private_data & flag) != 0; | 118 | |
119 | return (get_vma_private_data(vma) & flag) != 0; | ||
86 | } | 120 | } |
87 | 121 | ||
88 | /* Decrement the reserved pages in the hugepage pool by one */ | 122 | /* Decrement the reserved pages in the hugepage pool by one */ |
@@ -1151,11 +1185,10 @@ static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma, | |||
1151 | unsigned long address) | 1185 | unsigned long address) |
1152 | { | 1186 | { |
1153 | struct address_space *mapping; | 1187 | struct address_space *mapping; |
1154 | unsigned long idx; | 1188 | pgoff_t idx; |
1155 | 1189 | ||
1156 | mapping = vma->vm_file->f_mapping; | 1190 | mapping = vma->vm_file->f_mapping; |
1157 | idx = ((address - vma->vm_start) >> HPAGE_SHIFT) | 1191 | idx = vma_pagecache_offset(vma, address); |
1158 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | ||
1159 | 1192 | ||
1160 | return find_lock_page(mapping, idx); | 1193 | return find_lock_page(mapping, idx); |
1161 | } | 1194 | } |
@@ -1164,7 +1197,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1164 | unsigned long address, pte_t *ptep, int write_access) | 1197 | unsigned long address, pte_t *ptep, int write_access) |
1165 | { | 1198 | { |
1166 | int ret = VM_FAULT_SIGBUS; | 1199 | int ret = VM_FAULT_SIGBUS; |
1167 | unsigned long idx; | 1200 | pgoff_t idx; |
1168 | unsigned long size; | 1201 | unsigned long size; |
1169 | struct page *page; | 1202 | struct page *page; |
1170 | struct address_space *mapping; | 1203 | struct address_space *mapping; |
@@ -1183,8 +1216,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1183 | } | 1216 | } |
1184 | 1217 | ||
1185 | mapping = vma->vm_file->f_mapping; | 1218 | mapping = vma->vm_file->f_mapping; |
1186 | idx = ((address - vma->vm_start) >> HPAGE_SHIFT) | 1219 | idx = vma_pagecache_offset(vma, address); |
1187 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | ||
1188 | 1220 | ||
1189 | /* | 1221 | /* |
1190 | * Use page lock to guard against racing truncation | 1222 | * Use page lock to guard against racing truncation |