aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPrasanna Meda <pmeda@akamai.com>2005-06-21 20:14:37 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 21:46:13 -0400
commit05b7438475ddbac47e75506913d44550f0e75938 (patch)
tree3871b9c9c0cfa3699de9581a9283dc9fee608f2c
parente798c6e87b64d9fdbd5e9f757b1c033223763d9f (diff)
[PATCH] madvise: merge the maps
This attempts to merge back the split maps. This code is mostly copied from Chrisw's mlock merging from post 2.6.11 trees. The only difference is in munmapped_error handling. Also passed prev to willneed/dontneed, eventhogh they do not handle it now, since I felt it will be cleaner, instead of handling prev in madvise_vma in some cases and in subfunction in some cases. Signed-off-by: Prasanna Meda <pmeda@akamai.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/madvise.c80
1 files changed, 51 insertions, 29 deletions
diff --git a/mm/madvise.c b/mm/madvise.c
index 75b81ad1f98c..e3108054733c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -8,17 +8,20 @@
8#include <linux/mman.h> 8#include <linux/mman.h>
9#include <linux/pagemap.h> 9#include <linux/pagemap.h>
10#include <linux/syscalls.h> 10#include <linux/syscalls.h>
11#include <linux/mempolicy.h>
11#include <linux/hugetlb.h> 12#include <linux/hugetlb.h>
12 13
13/* 14/*
14 * We can potentially split a vm area into separate 15 * We can potentially split a vm area into separate
15 * areas, each area with its own behavior. 16 * areas, each area with its own behavior.
16 */ 17 */
17static long madvise_behavior(struct vm_area_struct * vma, unsigned long start, 18static long madvise_behavior(struct vm_area_struct * vma,
18 unsigned long end, int behavior) 19 struct vm_area_struct **prev,
20 unsigned long start, unsigned long end, int behavior)
19{ 21{
20 struct mm_struct * mm = vma->vm_mm; 22 struct mm_struct * mm = vma->vm_mm;
21 int error = 0; 23 int error = 0;
24 pgoff_t pgoff;
22 int new_flags = vma->vm_flags & ~VM_READHINTMASK; 25 int new_flags = vma->vm_flags & ~VM_READHINTMASK;
23 26
24 switch (behavior) { 27 switch (behavior) {
@@ -32,8 +35,20 @@ static long madvise_behavior(struct vm_area_struct * vma, unsigned long start,
32 break; 35 break;
33 } 36 }
34 37
35 if (new_flags == vma->vm_flags) 38 if (new_flags == vma->vm_flags) {
36 goto out; 39 *prev = vma;
40 goto success;
41 }
42
43 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
44 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
45 vma->vm_file, pgoff, vma_policy(vma));
46 if (*prev) {
47 vma = *prev;
48 goto success;
49 }
50
51 *prev = vma;
37 52
38 if (start != vma->vm_start) { 53 if (start != vma->vm_start) {
39 error = split_vma(mm, vma, start, 1); 54 error = split_vma(mm, vma, start, 1);
@@ -56,6 +71,7 @@ static long madvise_behavior(struct vm_area_struct * vma, unsigned long start,
56out: 71out:
57 if (error == -ENOMEM) 72 if (error == -ENOMEM)
58 error = -EAGAIN; 73 error = -EAGAIN;
74success:
59 return error; 75 return error;
60} 76}
61 77
@@ -63,6 +79,7 @@ out:
63 * Schedule all required I/O operations. Do not wait for completion. 79 * Schedule all required I/O operations. Do not wait for completion.
64 */ 80 */
65static long madvise_willneed(struct vm_area_struct * vma, 81static long madvise_willneed(struct vm_area_struct * vma,
82 struct vm_area_struct ** prev,
66 unsigned long start, unsigned long end) 83 unsigned long start, unsigned long end)
67{ 84{
68 struct file *file = vma->vm_file; 85 struct file *file = vma->vm_file;
@@ -70,6 +87,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
70 if (!file) 87 if (!file)
71 return -EBADF; 88 return -EBADF;
72 89
90 *prev = vma;
73 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 91 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
74 if (end > vma->vm_end) 92 if (end > vma->vm_end)
75 end = vma->vm_end; 93 end = vma->vm_end;
@@ -100,8 +118,10 @@ static long madvise_willneed(struct vm_area_struct * vma,
100 * dirty pages is already available as msync(MS_INVALIDATE). 118 * dirty pages is already available as msync(MS_INVALIDATE).
101 */ 119 */
102static long madvise_dontneed(struct vm_area_struct * vma, 120static long madvise_dontneed(struct vm_area_struct * vma,
121 struct vm_area_struct ** prev,
103 unsigned long start, unsigned long end) 122 unsigned long start, unsigned long end)
104{ 123{
124 *prev = vma;
105 if ((vma->vm_flags & VM_LOCKED) || is_vm_hugetlb_page(vma)) 125 if ((vma->vm_flags & VM_LOCKED) || is_vm_hugetlb_page(vma))
106 return -EINVAL; 126 return -EINVAL;
107 127
@@ -116,8 +136,8 @@ static long madvise_dontneed(struct vm_area_struct * vma,
116 return 0; 136 return 0;
117} 137}
118 138
119static long madvise_vma(struct vm_area_struct * vma, unsigned long start, 139static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
120 unsigned long end, int behavior) 140 unsigned long start, unsigned long end, int behavior)
121{ 141{
122 long error = -EBADF; 142 long error = -EBADF;
123 143
@@ -125,15 +145,15 @@ static long madvise_vma(struct vm_area_struct * vma, unsigned long start,
125 case MADV_NORMAL: 145 case MADV_NORMAL:
126 case MADV_SEQUENTIAL: 146 case MADV_SEQUENTIAL:
127 case MADV_RANDOM: 147 case MADV_RANDOM:
128 error = madvise_behavior(vma, start, end, behavior); 148 error = madvise_behavior(vma, prev, start, end, behavior);
129 break; 149 break;
130 150
131 case MADV_WILLNEED: 151 case MADV_WILLNEED:
132 error = madvise_willneed(vma, start, end); 152 error = madvise_willneed(vma, prev, start, end);
133 break; 153 break;
134 154
135 case MADV_DONTNEED: 155 case MADV_DONTNEED:
136 error = madvise_dontneed(vma, start, end); 156 error = madvise_dontneed(vma, prev, start, end);
137 break; 157 break;
138 158
139 default: 159 default:
@@ -180,8 +200,8 @@ static long madvise_vma(struct vm_area_struct * vma, unsigned long start,
180 */ 200 */
181asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior) 201asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
182{ 202{
183 unsigned long end; 203 unsigned long end, tmp;
184 struct vm_area_struct * vma; 204 struct vm_area_struct * vma, *prev;
185 int unmapped_error = 0; 205 int unmapped_error = 0;
186 int error = -EINVAL; 206 int error = -EINVAL;
187 size_t len; 207 size_t len;
@@ -207,40 +227,42 @@ asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
207 /* 227 /*
208 * If the interval [start,end) covers some unmapped address 228 * If the interval [start,end) covers some unmapped address
209 * ranges, just ignore them, but return -ENOMEM at the end. 229 * ranges, just ignore them, but return -ENOMEM at the end.
230 * - different from the way of handling in mlock etc.
210 */ 231 */
211 vma = find_vma(current->mm, start); 232 vma = find_vma_prev(current->mm, start, &prev);
233 if (!vma && prev)
234 vma = prev->vm_next;
212 for (;;) { 235 for (;;) {
213 /* Still start < end. */ 236 /* Still start < end. */
214 error = -ENOMEM; 237 error = -ENOMEM;
215 if (!vma) 238 if (!vma)
216 goto out; 239 goto out;
217 240
218 /* Here start < vma->vm_end. */ 241 /* Here start < (end|vma->vm_end). */
219 if (start < vma->vm_start) { 242 if (start < vma->vm_start) {
220 unmapped_error = -ENOMEM; 243 unmapped_error = -ENOMEM;
221 start = vma->vm_start; 244 start = vma->vm_start;
245 if (start >= end)
246 goto out;
222 } 247 }
223 248
224 /* Here vma->vm_start <= start < vma->vm_end. */ 249 /* Here vma->vm_start <= start < (end|vma->vm_end) */
225 if (end <= vma->vm_end) { 250 tmp = vma->vm_end;
226 if (start < end) { 251 if (end < tmp)
227 error = madvise_vma(vma, start, end, 252 tmp = end;
228 behavior);
229 if (error)
230 goto out;
231 }
232 error = unmapped_error;
233 goto out;
234 }
235 253
236 /* Here vma->vm_start <= start < vma->vm_end < end. */ 254 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
237 error = madvise_vma(vma, start, vma->vm_end, behavior); 255 error = madvise_vma(vma, &prev, start, tmp, behavior);
238 if (error) 256 if (error)
239 goto out; 257 goto out;
240 start = vma->vm_end; 258 start = tmp;
241 vma = vma->vm_next; 259 if (start < prev->vm_end)
260 start = prev->vm_end;
261 error = unmapped_error;
262 if (start >= end)
263 goto out;
264 vma = prev->vm_next;
242 } 265 }
243
244out: 266out:
245 up_write(&current->mm->mmap_sem); 267 up_write(&current->mm->mmap_sem);
246 return error; 268 return error;