aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@saeurebad.de>2008-07-24 00:27:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 13:47:16 -0400
commita858f7b2e9bb4eb665176dde5cf32eeaaf90f153 (patch)
tree54f79db047c42d476502e9023d3cbdb0a4c2c767
parent84afd99b8398c9d73af8238aa3cd835858e3097a (diff)
vma_page_offset() has no callees: drop it
Hugh adds: vma_pagecache_offset() has a dangerously misleading name, since it's using hugepage units: rename it to vma_hugecache_offset(). [apw@shadowen.org: restack onto fixed MAP_PRIVATE reservations] [akpm@linux-foundation.org: vma_split conversion] Signed-off-by: Johannes Weiner <hannes@saeurebad.de> Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Adam Litke <agl@us.ibm.com> Cc: Nishanth Aravamudan <nacc@us.ibm.com> Cc: Andi Kleen <ak@suse.de> Cc: Nick Piggin <npiggin@suse.de> Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/hugetlb.c29
1 files changed, 9 insertions, 20 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 65616941a383..eda9642254a0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -201,20 +201,9 @@ static long region_count(struct list_head *head, long f, long t)
201 201
202/* 202/*
203 * Convert the address within this vma to the page offset within 203 * Convert the address within this vma to the page offset within
204 * the mapping, in base page units.
205 */
206static pgoff_t vma_page_offset(struct vm_area_struct *vma,
207 unsigned long address)
208{
209 return ((address - vma->vm_start) >> PAGE_SHIFT) +
210 (vma->vm_pgoff >> PAGE_SHIFT);
211}
212
213/*
214 * Convert the address within this vma to the page offset within
215 * the mapping, in pagecache page units; huge pages here. 204 * the mapping, in pagecache page units; huge pages here.
216 */ 205 */
217static pgoff_t vma_pagecache_offset(struct vm_area_struct *vma, 206static pgoff_t vma_hugecache_offset(struct vm_area_struct *vma,
218 unsigned long address) 207 unsigned long address)
219{ 208{
220 return ((address - vma->vm_start) >> HPAGE_SHIFT) + 209 return ((address - vma->vm_start) >> HPAGE_SHIFT) +
@@ -806,7 +795,7 @@ static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr)
806 struct inode *inode = mapping->host; 795 struct inode *inode = mapping->host;
807 796
808 if (vma->vm_flags & VM_SHARED) { 797 if (vma->vm_flags & VM_SHARED) {
809 pgoff_t idx = vma_pagecache_offset(vma, addr); 798 pgoff_t idx = vma_hugecache_offset(vma, addr);
810 return region_chg(&inode->i_mapping->private_list, 799 return region_chg(&inode->i_mapping->private_list,
811 idx, idx + 1); 800 idx, idx + 1);
812 801
@@ -815,7 +804,7 @@ static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr)
815 804
816 } else { 805 } else {
817 int err; 806 int err;
818 pgoff_t idx = vma_pagecache_offset(vma, addr); 807 pgoff_t idx = vma_hugecache_offset(vma, addr);
819 struct resv_map *reservations = vma_resv_map(vma); 808 struct resv_map *reservations = vma_resv_map(vma);
820 809
821 err = region_chg(&reservations->regions, idx, idx + 1); 810 err = region_chg(&reservations->regions, idx, idx + 1);
@@ -831,11 +820,11 @@ static void vma_commit_reservation(struct vm_area_struct *vma,
831 struct inode *inode = mapping->host; 820 struct inode *inode = mapping->host;
832 821
833 if (vma->vm_flags & VM_SHARED) { 822 if (vma->vm_flags & VM_SHARED) {
834 pgoff_t idx = vma_pagecache_offset(vma, addr); 823 pgoff_t idx = vma_hugecache_offset(vma, addr);
835 region_add(&inode->i_mapping->private_list, idx, idx + 1); 824 region_add(&inode->i_mapping->private_list, idx, idx + 1);
836 825
837 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 826 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
838 pgoff_t idx = vma_pagecache_offset(vma, addr); 827 pgoff_t idx = vma_hugecache_offset(vma, addr);
839 struct resv_map *reservations = vma_resv_map(vma); 828 struct resv_map *reservations = vma_resv_map(vma);
840 829
841 /* Mark this page used in the map. */ 830 /* Mark this page used in the map. */
@@ -1153,8 +1142,8 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
1153 unsigned long end; 1142 unsigned long end;
1154 1143
1155 if (reservations) { 1144 if (reservations) {
1156 start = vma_pagecache_offset(vma, vma->vm_start); 1145 start = vma_hugecache_offset(vma, vma->vm_start);
1157 end = vma_pagecache_offset(vma, vma->vm_end); 1146 end = vma_hugecache_offset(vma, vma->vm_end);
1158 1147
1159 reserve = (end - start) - 1148 reserve = (end - start) -
1160 region_count(&reservations->regions, start, end); 1149 region_count(&reservations->regions, start, end);
@@ -1471,7 +1460,7 @@ static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma,
1471 pgoff_t idx; 1460 pgoff_t idx;
1472 1461
1473 mapping = vma->vm_file->f_mapping; 1462 mapping = vma->vm_file->f_mapping;
1474 idx = vma_pagecache_offset(vma, address); 1463 idx = vma_hugecache_offset(vma, address);
1475 1464
1476 return find_lock_page(mapping, idx); 1465 return find_lock_page(mapping, idx);
1477} 1466}
@@ -1499,7 +1488,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1499 } 1488 }
1500 1489
1501 mapping = vma->vm_file->f_mapping; 1490 mapping = vma->vm_file->f_mapping;
1502 idx = vma_pagecache_offset(vma, address); 1491 idx = vma_hugecache_offset(vma, address);
1503 1492
1504 /* 1493 /*
1505 * Use page lock to guard against racing truncation 1494 * Use page lock to guard against racing truncation