aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorOGAWA Hirofumi <hirofumi@mail.parknet.co.jp>2005-10-29 21:15:50 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:36 -0400
commitb57b98d147ef98758886a39efb94f3254542c39b (patch)
tree1aab53a2bb1add92d55fbce963e371527c05d281 /mm
parent662f3a0b94cc92bd708c27b80f8207cd7db93204 (diff)
[PATCH] mm/msync.c cleanup
This is not problem actually, but sync_page_range() is using for exported function to filesystems. The msync_xxx is more readable at least to me. Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Acked-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/msync.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/msync.c b/mm/msync.c
index d0f5a1bce7cb..9cab3f2d5863 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -22,7 +22,7 @@
22 * threads/the swapper from ripping pte's out from under us. 22 * threads/the swapper from ripping pte's out from under us.
23 */ 23 */
24 24
25static void sync_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 25static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
26 unsigned long addr, unsigned long end) 26 unsigned long addr, unsigned long end)
27{ 27{
28 pte_t *pte; 28 pte_t *pte;
@@ -50,7 +50,7 @@ static void sync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
50 pte_unmap(pte - 1); 50 pte_unmap(pte - 1);
51} 51}
52 52
53static inline void sync_pmd_range(struct vm_area_struct *vma, pud_t *pud, 53static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
54 unsigned long addr, unsigned long end) 54 unsigned long addr, unsigned long end)
55{ 55{
56 pmd_t *pmd; 56 pmd_t *pmd;
@@ -61,11 +61,11 @@ static inline void sync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
61 next = pmd_addr_end(addr, end); 61 next = pmd_addr_end(addr, end);
62 if (pmd_none_or_clear_bad(pmd)) 62 if (pmd_none_or_clear_bad(pmd))
63 continue; 63 continue;
64 sync_pte_range(vma, pmd, addr, next); 64 msync_pte_range(vma, pmd, addr, next);
65 } while (pmd++, addr = next, addr != end); 65 } while (pmd++, addr = next, addr != end);
66} 66}
67 67
68static inline void sync_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 68static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
69 unsigned long addr, unsigned long end) 69 unsigned long addr, unsigned long end)
70{ 70{
71 pud_t *pud; 71 pud_t *pud;
@@ -76,11 +76,11 @@ static inline void sync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
76 next = pud_addr_end(addr, end); 76 next = pud_addr_end(addr, end);
77 if (pud_none_or_clear_bad(pud)) 77 if (pud_none_or_clear_bad(pud))
78 continue; 78 continue;
79 sync_pmd_range(vma, pud, addr, next); 79 msync_pmd_range(vma, pud, addr, next);
80 } while (pud++, addr = next, addr != end); 80 } while (pud++, addr = next, addr != end);
81} 81}
82 82
83static void sync_page_range(struct vm_area_struct *vma, 83static void msync_page_range(struct vm_area_struct *vma,
84 unsigned long addr, unsigned long end) 84 unsigned long addr, unsigned long end)
85{ 85{
86 struct mm_struct *mm = vma->vm_mm; 86 struct mm_struct *mm = vma->vm_mm;
@@ -101,14 +101,14 @@ static void sync_page_range(struct vm_area_struct *vma,
101 next = pgd_addr_end(addr, end); 101 next = pgd_addr_end(addr, end);
102 if (pgd_none_or_clear_bad(pgd)) 102 if (pgd_none_or_clear_bad(pgd))
103 continue; 103 continue;
104 sync_pud_range(vma, pgd, addr, next); 104 msync_pud_range(vma, pgd, addr, next);
105 } while (pgd++, addr = next, addr != end); 105 } while (pgd++, addr = next, addr != end);
106 spin_unlock(&mm->page_table_lock); 106 spin_unlock(&mm->page_table_lock);
107} 107}
108 108
109#ifdef CONFIG_PREEMPT 109#ifdef CONFIG_PREEMPT
110static inline void filemap_sync(struct vm_area_struct *vma, 110static inline void filemap_msync(struct vm_area_struct *vma,
111 unsigned long addr, unsigned long end) 111 unsigned long addr, unsigned long end)
112{ 112{
113 const size_t chunk = 64 * 1024; /* bytes */ 113 const size_t chunk = 64 * 1024; /* bytes */
114 unsigned long next; 114 unsigned long next;
@@ -117,15 +117,15 @@ static inline void filemap_sync(struct vm_area_struct *vma,
117 next = addr + chunk; 117 next = addr + chunk;
118 if (next > end || next < addr) 118 if (next > end || next < addr)
119 next = end; 119 next = end;
120 sync_page_range(vma, addr, next); 120 msync_page_range(vma, addr, next);
121 cond_resched(); 121 cond_resched();
122 } while (addr = next, addr != end); 122 } while (addr = next, addr != end);
123} 123}
124#else 124#else
125static inline void filemap_sync(struct vm_area_struct *vma, 125static inline void filemap_msync(struct vm_area_struct *vma,
126 unsigned long addr, unsigned long end) 126 unsigned long addr, unsigned long end)
127{ 127{
128 sync_page_range(vma, addr, end); 128 msync_page_range(vma, addr, end);
129} 129}
130#endif 130#endif
131 131
@@ -150,7 +150,7 @@ static int msync_interval(struct vm_area_struct *vma,
150 return -EBUSY; 150 return -EBUSY;
151 151
152 if (file && (vma->vm_flags & VM_SHARED)) { 152 if (file && (vma->vm_flags & VM_SHARED)) {
153 filemap_sync(vma, addr, end); 153 filemap_msync(vma, addr, end);
154 154
155 if (flags & MS_SYNC) { 155 if (flags & MS_SYNC) {
156 struct address_space *mapping = file->f_mapping; 156 struct address_space *mapping = file->f_mapping;