aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 28c655ba935..e83ad2c9228 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -316,7 +316,7 @@ static void resv_map_release(struct kref *ref)
316static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 316static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
317{ 317{
318 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 318 VM_BUG_ON(!is_vm_hugetlb_page(vma));
319 if (!(vma->vm_flags & VM_SHARED)) 319 if (!(vma->vm_flags & VM_MAYSHARE))
320 return (struct resv_map *)(get_vma_private_data(vma) & 320 return (struct resv_map *)(get_vma_private_data(vma) &
321 ~HPAGE_RESV_MASK); 321 ~HPAGE_RESV_MASK);
322 return NULL; 322 return NULL;
@@ -325,7 +325,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
325static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 325static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
326{ 326{
327 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 327 VM_BUG_ON(!is_vm_hugetlb_page(vma));
328 VM_BUG_ON(vma->vm_flags & VM_SHARED); 328 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
329 329
330 set_vma_private_data(vma, (get_vma_private_data(vma) & 330 set_vma_private_data(vma, (get_vma_private_data(vma) &
331 HPAGE_RESV_MASK) | (unsigned long)map); 331 HPAGE_RESV_MASK) | (unsigned long)map);
@@ -334,7 +334,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
334static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 334static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
335{ 335{
336 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 336 VM_BUG_ON(!is_vm_hugetlb_page(vma));
337 VM_BUG_ON(vma->vm_flags & VM_SHARED); 337 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
338 338
339 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 339 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
340} 340}
@@ -353,7 +353,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
353 if (vma->vm_flags & VM_NORESERVE) 353 if (vma->vm_flags & VM_NORESERVE)
354 return; 354 return;
355 355
356 if (vma->vm_flags & VM_SHARED) { 356 if (vma->vm_flags & VM_MAYSHARE) {
357 /* Shared mappings always use reserves */ 357 /* Shared mappings always use reserves */
358 h->resv_huge_pages--; 358 h->resv_huge_pages--;
359 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 359 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
@@ -369,14 +369,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
369void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 369void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
370{ 370{
371 VM_BUG_ON(!is_vm_hugetlb_page(vma)); 371 VM_BUG_ON(!is_vm_hugetlb_page(vma));
372 if (!(vma->vm_flags & VM_SHARED)) 372 if (!(vma->vm_flags & VM_MAYSHARE))
373 vma->vm_private_data = (void *)0; 373 vma->vm_private_data = (void *)0;
374} 374}
375 375
376/* Returns true if the VMA has associated reserve pages */ 376/* Returns true if the VMA has associated reserve pages */
377static int vma_has_reserves(struct vm_area_struct *vma) 377static int vma_has_reserves(struct vm_area_struct *vma)
378{ 378{
379 if (vma->vm_flags & VM_SHARED) 379 if (vma->vm_flags & VM_MAYSHARE)
380 return 1; 380 return 1;
381 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 381 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
382 return 1; 382 return 1;
@@ -924,7 +924,7 @@ static long vma_needs_reservation(struct hstate *h,
924 struct address_space *mapping = vma->vm_file->f_mapping; 924 struct address_space *mapping = vma->vm_file->f_mapping;
925 struct inode *inode = mapping->host; 925 struct inode *inode = mapping->host;
926 926
927 if (vma->vm_flags & VM_SHARED) { 927 if (vma->vm_flags & VM_MAYSHARE) {
928 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 928 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
929 return region_chg(&inode->i_mapping->private_list, 929 return region_chg(&inode->i_mapping->private_list,
930 idx, idx + 1); 930 idx, idx + 1);
@@ -949,7 +949,7 @@ static void vma_commit_reservation(struct hstate *h,
949 struct address_space *mapping = vma->vm_file->f_mapping; 949 struct address_space *mapping = vma->vm_file->f_mapping;
950 struct inode *inode = mapping->host; 950 struct inode *inode = mapping->host;
951 951
952 if (vma->vm_flags & VM_SHARED) { 952 if (vma->vm_flags & VM_MAYSHARE) {
953 pgoff_t idx = vma_hugecache_offset(h, vma, addr); 953 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
954 region_add(&inode->i_mapping->private_list, idx, idx + 1); 954 region_add(&inode->i_mapping->private_list, idx, idx + 1);
955 955
@@ -1893,7 +1893,7 @@ retry_avoidcopy:
1893 * at the time of fork() could consume its reserves on COW instead 1893 * at the time of fork() could consume its reserves on COW instead
1894 * of the full address range. 1894 * of the full address range.
1895 */ 1895 */
1896 if (!(vma->vm_flags & VM_SHARED) && 1896 if (!(vma->vm_flags & VM_MAYSHARE) &&
1897 is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 1897 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1898 old_page != pagecache_page) 1898 old_page != pagecache_page)
1899 outside_reserve = 1; 1899 outside_reserve = 1;
@@ -2000,7 +2000,7 @@ retry:
2000 clear_huge_page(page, address, huge_page_size(h)); 2000 clear_huge_page(page, address, huge_page_size(h));
2001 __SetPageUptodate(page); 2001 __SetPageUptodate(page);
2002 2002
2003 if (vma->vm_flags & VM_SHARED) { 2003 if (vma->vm_flags & VM_MAYSHARE) {
2004 int err; 2004 int err;
2005 struct inode *inode = mapping->host; 2005 struct inode *inode = mapping->host;
2006 2006
@@ -2104,7 +2104,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2104 goto out_mutex; 2104 goto out_mutex;
2105 } 2105 }
2106 2106
2107 if (!(vma->vm_flags & VM_SHARED)) 2107 if (!(vma->vm_flags & VM_MAYSHARE))
2108 pagecache_page = hugetlbfs_pagecache_page(h, 2108 pagecache_page = hugetlbfs_pagecache_page(h,
2109 vma, address); 2109 vma, address);
2110 } 2110 }
@@ -2289,7 +2289,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2289 * to reserve the full area even if read-only as mprotect() may be 2289 * to reserve the full area even if read-only as mprotect() may be
2290 * called to make the mapping read-write. Assume !vma is a shm mapping 2290 * called to make the mapping read-write. Assume !vma is a shm mapping
2291 */ 2291 */
2292 if (!vma || vma->vm_flags & VM_SHARED) 2292 if (!vma || vma->vm_flags & VM_MAYSHARE)
2293 chg = region_chg(&inode->i_mapping->private_list, from, to); 2293 chg = region_chg(&inode->i_mapping->private_list, from, to);
2294 else { 2294 else {
2295 struct resv_map *resv_map = resv_map_alloc(); 2295 struct resv_map *resv_map = resv_map_alloc();
@@ -2330,7 +2330,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2330 * consumed reservations are stored in the map. Hence, nothing 2330 * consumed reservations are stored in the map. Hence, nothing
2331 * else has to be done for private mappings here 2331 * else has to be done for private mappings here
2332 */ 2332 */
2333 if (!vma || vma->vm_flags & VM_SHARED) 2333 if (!vma || vma->vm_flags & VM_MAYSHARE)
2334 region_add(&inode->i_mapping->private_list, from, to); 2334 region_add(&inode->i_mapping->private_list, from, to);
2335 return 0; 2335 return 0;
2336} 2336}