aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-04-03 17:47:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-03 19:20:59 -0400
commit4e35f483850ba46b838adfd312b3052416e15204 (patch)
tree1d8c9724ae0e09cfae9fc88e0002921802393041 /mm/hugetlb.c
parentf031dd274ccb7069012ede73f537cc81c42fc80b (diff)
mm, hugetlb: use vma_resv_map() map types
Util now, we get a resv_map by two ways according to each mapping type. This makes code dirty and unreadable. Unify it. [davidlohr@hp.com: code cleanups] Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Davidlohr Bueso <davidlohr@hp.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c95
1 files changed, 45 insertions, 50 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c7918cd3a153..1c7baff65f9d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -419,13 +419,24 @@ void resv_map_release(struct kref *ref)
419 kfree(resv_map); 419 kfree(resv_map);
420} 420}
421 421
422static inline struct resv_map *inode_resv_map(struct inode *inode)
423{
424 return inode->i_mapping->private_data;
425}
426
422static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 427static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
423{ 428{
424 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 429 VM_BUG_ON(!is_vm_hugetlb_page(vma));
425 if (!(vma->vm_flags & VM_MAYSHARE)) 430 if (vma->vm_flags & VM_MAYSHARE) {
431 struct address_space *mapping = vma->vm_file->f_mapping;
432 struct inode *inode = mapping->host;
433
434 return inode_resv_map(inode);
435
436 } else {
426 return (struct resv_map *)(get_vma_private_data(vma) & 437 return (struct resv_map *)(get_vma_private_data(vma) &
427 ~HPAGE_RESV_MASK); 438 ~HPAGE_RESV_MASK);
428 return NULL; 439 }
429} 440}
430 441
431static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 442static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
@@ -1167,48 +1178,34 @@ static void return_unused_surplus_pages(struct hstate *h,
1167static long vma_needs_reservation(struct hstate *h, 1178static long vma_needs_reservation(struct hstate *h,
1168 struct vm_area_struct *vma, unsigned long addr) 1179 struct vm_area_struct *vma, unsigned long addr)
1169{ 1180{
1170 struct address_space *mapping = vma->vm_file->f_mapping; 1181 struct resv_map *resv;
1171 struct inode *inode = mapping->host; 1182 pgoff_t idx;
1172 1183 long chg;
1173 if (vma->vm_flags & VM_MAYSHARE) {
1174 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1175 struct resv_map *resv = inode->i_mapping->private_data;
1176
1177 return region_chg(resv, idx, idx + 1);
1178 1184
1179 } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1185 resv = vma_resv_map(vma);
1186 if (!resv)
1180 return 1; 1187 return 1;
1181 1188
1182 } else { 1189 idx = vma_hugecache_offset(h, vma, addr);
1183 long err; 1190 chg = region_chg(resv, idx, idx + 1);
1184 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1185 struct resv_map *resv = vma_resv_map(vma);
1186 1191
1187 err = region_chg(resv, idx, idx + 1); 1192 if (vma->vm_flags & VM_MAYSHARE)
1188 if (err < 0) 1193 return chg;
1189 return err; 1194 else
1190 return 0; 1195 return chg < 0 ? chg : 0;
1191 }
1192} 1196}
1193static void vma_commit_reservation(struct hstate *h, 1197static void vma_commit_reservation(struct hstate *h,
1194 struct vm_area_struct *vma, unsigned long addr) 1198 struct vm_area_struct *vma, unsigned long addr)
1195{ 1199{
1196 struct address_space *mapping = vma->vm_file->f_mapping; 1200 struct resv_map *resv;
1197 struct inode *inode = mapping->host; 1201 pgoff_t idx;
1198
1199 if (vma->vm_flags & VM_MAYSHARE) {
1200 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1201 struct resv_map *resv = inode->i_mapping->private_data;
1202
1203 region_add(resv, idx, idx + 1);
1204 1202
1205 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1203 resv = vma_resv_map(vma);
1206 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1204 if (!resv)
1207 struct resv_map *resv = vma_resv_map(vma); 1205 return;
1208 1206
1209 /* Mark this page used in the map. */ 1207 idx = vma_hugecache_offset(h, vma, addr);
1210 region_add(resv, idx, idx + 1); 1208 region_add(resv, idx, idx + 1);
1211 }
1212} 1209}
1213 1210
1214static struct page *alloc_huge_page(struct vm_area_struct *vma, 1211static struct page *alloc_huge_page(struct vm_area_struct *vma,
@@ -2271,7 +2268,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2271 * after this open call completes. It is therefore safe to take a 2268 * after this open call completes. It is therefore safe to take a
2272 * new reference here without additional locking. 2269 * new reference here without additional locking.
2273 */ 2270 */
2274 if (resv) 2271 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2275 kref_get(&resv->refs); 2272 kref_get(&resv->refs);
2276} 2273}
2277 2274
@@ -2280,23 +2277,21 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2280 struct hstate *h = hstate_vma(vma); 2277 struct hstate *h = hstate_vma(vma);
2281 struct resv_map *resv = vma_resv_map(vma); 2278 struct resv_map *resv = vma_resv_map(vma);
2282 struct hugepage_subpool *spool = subpool_vma(vma); 2279 struct hugepage_subpool *spool = subpool_vma(vma);
2283 unsigned long reserve; 2280 unsigned long reserve, start, end;
2284 unsigned long start;
2285 unsigned long end;
2286 2281
2287 if (resv) { 2282 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2288 start = vma_hugecache_offset(h, vma, vma->vm_start); 2283 return;
2289 end = vma_hugecache_offset(h, vma, vma->vm_end);
2290 2284
2291 reserve = (end - start) - 2285 start = vma_hugecache_offset(h, vma, vma->vm_start);
2292 region_count(resv, start, end); 2286 end = vma_hugecache_offset(h, vma, vma->vm_end);
2293 2287
2294 kref_put(&resv->refs, resv_map_release); 2288 reserve = (end - start) - region_count(resv, start, end);
2295 2289
2296 if (reserve) { 2290 kref_put(&resv->refs, resv_map_release);
2297 hugetlb_acct_memory(h, -reserve); 2291
2298 hugepage_subpool_put_pages(spool, reserve); 2292 if (reserve) {
2299 } 2293 hugetlb_acct_memory(h, -reserve);
2294 hugepage_subpool_put_pages(spool, reserve);
2300 } 2295 }
2301} 2296}
2302 2297
@@ -3189,7 +3184,7 @@ int hugetlb_reserve_pages(struct inode *inode,
3189 * called to make the mapping read-write. Assume !vma is a shm mapping 3184 * called to make the mapping read-write. Assume !vma is a shm mapping
3190 */ 3185 */
3191 if (!vma || vma->vm_flags & VM_MAYSHARE) { 3186 if (!vma || vma->vm_flags & VM_MAYSHARE) {
3192 resv_map = inode->i_mapping->private_data; 3187 resv_map = inode_resv_map(inode);
3193 3188
3194 chg = region_chg(resv_map, from, to); 3189 chg = region_chg(resv_map, from, to);
3195 3190
@@ -3248,7 +3243,7 @@ out_err:
3248void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 3243void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3249{ 3244{
3250 struct hstate *h = hstate_inode(inode); 3245 struct hstate *h = hstate_inode(inode);
3251 struct resv_map *resv_map = inode->i_mapping->private_data; 3246 struct resv_map *resv_map = inode_resv_map(inode);
3252 long chg = 0; 3247 long chg = 0;
3253 struct hugepage_subpool *spool = subpool_inode(inode); 3248 struct hugepage_subpool *spool = subpool_inode(inode);
3254 3249