diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Makefile | 2 | ||||
-rw-r--r-- | mm/cma.c | 12 | ||||
-rw-r--r-- | mm/huge_memory.c | 11 | ||||
-rw-r--r-- | mm/hugetlb.c | 4 | ||||
-rw-r--r-- | mm/iov_iter.c | 753 | ||||
-rw-r--r-- | mm/kasan/kasan.c | 14 | ||||
-rw-r--r-- | mm/memcontrol.c | 4 | ||||
-rw-r--r-- | mm/memory.c | 7 | ||||
-rw-r--r-- | mm/mlock.c | 4 | ||||
-rw-r--r-- | mm/nommu.c | 1 | ||||
-rw-r--r-- | mm/page_alloc.c | 3 | ||||
-rw-r--r-- | mm/vmalloc.c | 1 |
12 files changed, 45 insertions, 771 deletions
diff --git a/mm/Makefile b/mm/Makefile index 3c1caa2693bd..15dbe9903c27 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ | |||
21 | mm_init.o mmu_context.o percpu.o slab_common.o \ | 21 | mm_init.o mmu_context.o percpu.o slab_common.o \ |
22 | compaction.o vmacache.o \ | 22 | compaction.o vmacache.o \ |
23 | interval_tree.o list_lru.o workingset.o \ | 23 | interval_tree.o list_lru.o workingset.o \ |
24 | iov_iter.o debug.o $(mmu-y) | 24 | debug.o $(mmu-y) |
25 | 25 | ||
26 | obj-y += init-mm.o | 26 | obj-y += init-mm.o |
27 | 27 | ||
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | |||
64 | return (1UL << (align_order - cma->order_per_bit)) - 1; | 64 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | ||
68 | * Find a PFN aligned to the specified order and return an offset represented in | ||
69 | * order_per_bits. | ||
70 | */ | ||
67 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) | 71 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) |
68 | { | 72 | { |
69 | unsigned int alignment; | ||
70 | |||
71 | if (align_order <= cma->order_per_bit) | 73 | if (align_order <= cma->order_per_bit) |
72 | return 0; | 74 | return 0; |
73 | alignment = 1UL << (align_order - cma->order_per_bit); | 75 | |
74 | return ALIGN(cma->base_pfn, alignment) - | 76 | return (ALIGN(cma->base_pfn, (1UL << align_order)) |
75 | (cma->base_pfn >> cma->order_per_bit); | 77 | - cma->base_pfn) >> cma->order_per_bit; |
76 | } | 78 | } |
77 | 79 | ||
78 | static unsigned long cma_bitmap_maxno(struct cma *cma) | 80 | static unsigned long cma_bitmap_maxno(struct cma *cma) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fc00c8cb5a82..626e93db28ba 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1295,8 +1295,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1295 | * Avoid grouping on DSO/COW pages in specific and RO pages | 1295 | * Avoid grouping on DSO/COW pages in specific and RO pages |
1296 | * in general, RO pages shouldn't hurt as much anyway since | 1296 | * in general, RO pages shouldn't hurt as much anyway since |
1297 | * they can be in shared cache state. | 1297 | * they can be in shared cache state. |
1298 | * | ||
1299 | * FIXME! This checks "pmd_dirty()" as an approximation of | ||
1300 | * "is this a read-only page", since checking "pmd_write()" | ||
1301 | * is even more broken. We haven't actually turned this into | ||
1302 | * a writable page, so pmd_write() will always be false. | ||
1298 | */ | 1303 | */ |
1299 | if (!pmd_write(pmd)) | 1304 | if (!pmd_dirty(pmd)) |
1300 | flags |= TNF_NO_GROUP; | 1305 | flags |= TNF_NO_GROUP; |
1301 | 1306 | ||
1302 | /* | 1307 | /* |
@@ -1482,6 +1487,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1482 | 1487 | ||
1483 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { | 1488 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { |
1484 | pmd_t entry; | 1489 | pmd_t entry; |
1490 | ret = 1; | ||
1485 | 1491 | ||
1486 | /* | 1492 | /* |
1487 | * Avoid trapping faults against the zero page. The read-only | 1493 | * Avoid trapping faults against the zero page. The read-only |
@@ -1490,11 +1496,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1490 | */ | 1496 | */ |
1491 | if (prot_numa && is_huge_zero_pmd(*pmd)) { | 1497 | if (prot_numa && is_huge_zero_pmd(*pmd)) { |
1492 | spin_unlock(ptl); | 1498 | spin_unlock(ptl); |
1493 | return 0; | 1499 | return ret; |
1494 | } | 1500 | } |
1495 | 1501 | ||
1496 | if (!prot_numa || !pmd_protnone(*pmd)) { | 1502 | if (!prot_numa || !pmd_protnone(*pmd)) { |
1497 | ret = 1; | ||
1498 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); | 1503 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); |
1499 | entry = pmd_modify(entry, newprot); | 1504 | entry = pmd_modify(entry, newprot); |
1500 | ret = HPAGE_PMD_NR; | 1505 | ret = HPAGE_PMD_NR; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a9ac6c26832..c41b2a0ee273 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
917 | __SetPageHead(page); | 917 | __SetPageHead(page); |
918 | __ClearPageReserved(page); | 918 | __ClearPageReserved(page); |
919 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { | 919 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { |
920 | __SetPageTail(p); | ||
921 | /* | 920 | /* |
922 | * For gigantic hugepages allocated through bootmem at | 921 | * For gigantic hugepages allocated through bootmem at |
923 | * boot, it's safer to be consistent with the not-gigantic | 922 | * boot, it's safer to be consistent with the not-gigantic |
@@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
933 | __ClearPageReserved(p); | 932 | __ClearPageReserved(p); |
934 | set_page_count(p, 0); | 933 | set_page_count(p, 0); |
935 | p->first_page = page; | 934 | p->first_page = page; |
935 | /* Make sure p->first_page is always valid for PageTail() */ | ||
936 | smp_wmb(); | ||
937 | __SetPageTail(p); | ||
936 | } | 938 | } |
937 | } | 939 | } |
938 | 940 | ||
diff --git a/mm/iov_iter.c b/mm/iov_iter.c deleted file mode 100644 index 827732047da1..000000000000 --- a/mm/iov_iter.c +++ /dev/null | |||
@@ -1,753 +0,0 @@ | |||
1 | #include <linux/export.h> | ||
2 | #include <linux/uio.h> | ||
3 | #include <linux/pagemap.h> | ||
4 | #include <linux/slab.h> | ||
5 | #include <linux/vmalloc.h> | ||
6 | #include <net/checksum.h> | ||
7 | |||
8 | #define iterate_iovec(i, n, __v, __p, skip, STEP) { \ | ||
9 | size_t left; \ | ||
10 | size_t wanted = n; \ | ||
11 | __p = i->iov; \ | ||
12 | __v.iov_len = min(n, __p->iov_len - skip); \ | ||
13 | if (likely(__v.iov_len)) { \ | ||
14 | __v.iov_base = __p->iov_base + skip; \ | ||
15 | left = (STEP); \ | ||
16 | __v.iov_len -= left; \ | ||
17 | skip += __v.iov_len; \ | ||
18 | n -= __v.iov_len; \ | ||
19 | } else { \ | ||
20 | left = 0; \ | ||
21 | } \ | ||
22 | while (unlikely(!left && n)) { \ | ||
23 | __p++; \ | ||
24 | __v.iov_len = min(n, __p->iov_len); \ | ||
25 | if (unlikely(!__v.iov_len)) \ | ||
26 | continue; \ | ||
27 | __v.iov_base = __p->iov_base; \ | ||
28 | left = (STEP); \ | ||
29 | __v.iov_len -= left; \ | ||
30 | skip = __v.iov_len; \ | ||
31 | n -= __v.iov_len; \ | ||
32 | } \ | ||
33 | n = wanted - n; \ | ||
34 | } | ||
35 | |||
36 | #define iterate_kvec(i, n, __v, __p, skip, STEP) { \ | ||
37 | size_t wanted = n; \ | ||
38 | __p = i->kvec; \ | ||
39 | __v.iov_len = min(n, __p->iov_len - skip); \ | ||
40 | if (likely(__v.iov_len)) { \ | ||
41 | __v.iov_base = __p->iov_base + skip; \ | ||
42 | (void)(STEP); \ | ||
43 | skip += __v.iov_len; \ | ||
44 | n -= __v.iov_len; \ | ||
45 | } \ | ||
46 | while (unlikely(n)) { \ | ||
47 | __p++; \ | ||
48 | __v.iov_len = min(n, __p->iov_len); \ | ||
49 | if (unlikely(!__v.iov_len)) \ | ||
50 | continue; \ | ||
51 | __v.iov_base = __p->iov_base; \ | ||
52 | (void)(STEP); \ | ||
53 | skip = __v.iov_len; \ | ||
54 | n -= __v.iov_len; \ | ||
55 | } \ | ||
56 | n = wanted; \ | ||
57 | } | ||
58 | |||
59 | #define iterate_bvec(i, n, __v, __p, skip, STEP) { \ | ||
60 | size_t wanted = n; \ | ||
61 | __p = i->bvec; \ | ||
62 | __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \ | ||
63 | if (likely(__v.bv_len)) { \ | ||
64 | __v.bv_page = __p->bv_page; \ | ||
65 | __v.bv_offset = __p->bv_offset + skip; \ | ||
66 | (void)(STEP); \ | ||
67 | skip += __v.bv_len; \ | ||
68 | n -= __v.bv_len; \ | ||
69 | } \ | ||
70 | while (unlikely(n)) { \ | ||
71 | __p++; \ | ||
72 | __v.bv_len = min_t(size_t, n, __p->bv_len); \ | ||
73 | if (unlikely(!__v.bv_len)) \ | ||
74 | continue; \ | ||
75 | __v.bv_page = __p->bv_page; \ | ||
76 | __v.bv_offset = __p->bv_offset; \ | ||
77 | (void)(STEP); \ | ||
78 | skip = __v.bv_len; \ | ||
79 | n -= __v.bv_len; \ | ||
80 | } \ | ||
81 | n = wanted; \ | ||
82 | } | ||
83 | |||
84 | #define iterate_all_kinds(i, n, v, I, B, K) { \ | ||
85 | size_t skip = i->iov_offset; \ | ||
86 | if (unlikely(i->type & ITER_BVEC)) { \ | ||
87 | const struct bio_vec *bvec; \ | ||
88 | struct bio_vec v; \ | ||
89 | iterate_bvec(i, n, v, bvec, skip, (B)) \ | ||
90 | } else if (unlikely(i->type & ITER_KVEC)) { \ | ||
91 | const struct kvec *kvec; \ | ||
92 | struct kvec v; \ | ||
93 | iterate_kvec(i, n, v, kvec, skip, (K)) \ | ||
94 | } else { \ | ||
95 | const struct iovec *iov; \ | ||
96 | struct iovec v; \ | ||
97 | iterate_iovec(i, n, v, iov, skip, (I)) \ | ||
98 | } \ | ||
99 | } | ||
100 | |||
101 | #define iterate_and_advance(i, n, v, I, B, K) { \ | ||
102 | size_t skip = i->iov_offset; \ | ||
103 | if (unlikely(i->type & ITER_BVEC)) { \ | ||
104 | const struct bio_vec *bvec; \ | ||
105 | struct bio_vec v; \ | ||
106 | iterate_bvec(i, n, v, bvec, skip, (B)) \ | ||
107 | if (skip == bvec->bv_len) { \ | ||
108 | bvec++; \ | ||
109 | skip = 0; \ | ||
110 | } \ | ||
111 | i->nr_segs -= bvec - i->bvec; \ | ||
112 | i->bvec = bvec; \ | ||
113 | } else if (unlikely(i->type & ITER_KVEC)) { \ | ||
114 | const struct kvec *kvec; \ | ||
115 | struct kvec v; \ | ||
116 | iterate_kvec(i, n, v, kvec, skip, (K)) \ | ||
117 | if (skip == kvec->iov_len) { \ | ||
118 | kvec++; \ | ||
119 | skip = 0; \ | ||
120 | } \ | ||
121 | i->nr_segs -= kvec - i->kvec; \ | ||
122 | i->kvec = kvec; \ | ||
123 | } else { \ | ||
124 | const struct iovec *iov; \ | ||
125 | struct iovec v; \ | ||
126 | iterate_iovec(i, n, v, iov, skip, (I)) \ | ||
127 | if (skip == iov->iov_len) { \ | ||
128 | iov++; \ | ||
129 | skip = 0; \ | ||
130 | } \ | ||
131 | i->nr_segs -= iov - i->iov; \ | ||
132 | i->iov = iov; \ | ||
133 | } \ | ||
134 | i->count -= n; \ | ||
135 | i->iov_offset = skip; \ | ||
136 | } | ||
137 | |||
138 | static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, | ||
139 | struct iov_iter *i) | ||
140 | { | ||
141 | size_t skip, copy, left, wanted; | ||
142 | const struct iovec *iov; | ||
143 | char __user *buf; | ||
144 | void *kaddr, *from; | ||
145 | |||
146 | if (unlikely(bytes > i->count)) | ||
147 | bytes = i->count; | ||
148 | |||
149 | if (unlikely(!bytes)) | ||
150 | return 0; | ||
151 | |||
152 | wanted = bytes; | ||
153 | iov = i->iov; | ||
154 | skip = i->iov_offset; | ||
155 | buf = iov->iov_base + skip; | ||
156 | copy = min(bytes, iov->iov_len - skip); | ||
157 | |||
158 | if (!fault_in_pages_writeable(buf, copy)) { | ||
159 | kaddr = kmap_atomic(page); | ||
160 | from = kaddr + offset; | ||
161 | |||
162 | /* first chunk, usually the only one */ | ||
163 | left = __copy_to_user_inatomic(buf, from, copy); | ||
164 | copy -= left; | ||
165 | skip += copy; | ||
166 | from += copy; | ||
167 | bytes -= copy; | ||
168 | |||
169 | while (unlikely(!left && bytes)) { | ||
170 | iov++; | ||
171 | buf = iov->iov_base; | ||
172 | copy = min(bytes, iov->iov_len); | ||
173 | left = __copy_to_user_inatomic(buf, from, copy); | ||
174 | copy -= left; | ||
175 | skip = copy; | ||
176 | from += copy; | ||
177 | bytes -= copy; | ||
178 | } | ||
179 | if (likely(!bytes)) { | ||
180 | kunmap_atomic(kaddr); | ||
181 | goto done; | ||
182 | } | ||
183 | offset = from - kaddr; | ||
184 | buf += copy; | ||
185 | kunmap_atomic(kaddr); | ||
186 | copy = min(bytes, iov->iov_len - skip); | ||
187 | } | ||
188 | /* Too bad - revert to non-atomic kmap */ | ||
189 | kaddr = kmap(page); | ||
190 | from = kaddr + offset; | ||
191 | left = __copy_to_user(buf, from, copy); | ||
192 | copy -= left; | ||
193 | skip += copy; | ||
194 | from += copy; | ||
195 | bytes -= copy; | ||
196 | while (unlikely(!left && bytes)) { | ||
197 | iov++; | ||
198 | buf = iov->iov_base; | ||
199 | copy = min(bytes, iov->iov_len); | ||
200 | left = __copy_to_user(buf, from, copy); | ||
201 | copy -= left; | ||
202 | skip = copy; | ||
203 | from += copy; | ||
204 | bytes -= copy; | ||
205 | } | ||
206 | kunmap(page); | ||
207 | done: | ||
208 | if (skip == iov->iov_len) { | ||
209 | iov++; | ||
210 | skip = 0; | ||
211 | } | ||
212 | i->count -= wanted - bytes; | ||
213 | i->nr_segs -= iov - i->iov; | ||
214 | i->iov = iov; | ||
215 | i->iov_offset = skip; | ||
216 | return wanted - bytes; | ||
217 | } | ||
218 | |||
219 | static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, | ||
220 | struct iov_iter *i) | ||
221 | { | ||
222 | size_t skip, copy, left, wanted; | ||
223 | const struct iovec *iov; | ||
224 | char __user *buf; | ||
225 | void *kaddr, *to; | ||
226 | |||
227 | if (unlikely(bytes > i->count)) | ||
228 | bytes = i->count; | ||
229 | |||
230 | if (unlikely(!bytes)) | ||
231 | return 0; | ||
232 | |||
233 | wanted = bytes; | ||
234 | iov = i->iov; | ||
235 | skip = i->iov_offset; | ||
236 | buf = iov->iov_base + skip; | ||
237 | copy = min(bytes, iov->iov_len - skip); | ||
238 | |||
239 | if (!fault_in_pages_readable(buf, copy)) { | ||
240 | kaddr = kmap_atomic(page); | ||
241 | to = kaddr + offset; | ||
242 | |||
243 | /* first chunk, usually the only one */ | ||
244 | left = __copy_from_user_inatomic(to, buf, copy); | ||
245 | copy -= left; | ||
246 | skip += copy; | ||
247 | to += copy; | ||
248 | bytes -= copy; | ||
249 | |||
250 | while (unlikely(!left && bytes)) { | ||
251 | iov++; | ||
252 | buf = iov->iov_base; | ||
253 | copy = min(bytes, iov->iov_len); | ||
254 | left = __copy_from_user_inatomic(to, buf, copy); | ||
255 | copy -= left; | ||
256 | skip = copy; | ||
257 | to += copy; | ||
258 | bytes -= copy; | ||
259 | } | ||
260 | if (likely(!bytes)) { | ||
261 | kunmap_atomic(kaddr); | ||
262 | goto done; | ||
263 | } | ||
264 | offset = to - kaddr; | ||
265 | buf += copy; | ||
266 | kunmap_atomic(kaddr); | ||
267 | copy = min(bytes, iov->iov_len - skip); | ||
268 | } | ||
269 | /* Too bad - revert to non-atomic kmap */ | ||
270 | kaddr = kmap(page); | ||
271 | to = kaddr + offset; | ||
272 | left = __copy_from_user(to, buf, copy); | ||
273 | copy -= left; | ||
274 | skip += copy; | ||
275 | to += copy; | ||
276 | bytes -= copy; | ||
277 | while (unlikely(!left && bytes)) { | ||
278 | iov++; | ||
279 | buf = iov->iov_base; | ||
280 | copy = min(bytes, iov->iov_len); | ||
281 | left = __copy_from_user(to, buf, copy); | ||
282 | copy -= left; | ||
283 | skip = copy; | ||
284 | to += copy; | ||
285 | bytes -= copy; | ||
286 | } | ||
287 | kunmap(page); | ||
288 | done: | ||
289 | if (skip == iov->iov_len) { | ||
290 | iov++; | ||
291 | skip = 0; | ||
292 | } | ||
293 | i->count -= wanted - bytes; | ||
294 | i->nr_segs -= iov - i->iov; | ||
295 | i->iov = iov; | ||
296 | i->iov_offset = skip; | ||
297 | return wanted - bytes; | ||
298 | } | ||
299 | |||
300 | /* | ||
301 | * Fault in the first iovec of the given iov_iter, to a maximum length | ||
302 | * of bytes. Returns 0 on success, or non-zero if the memory could not be | ||
303 | * accessed (ie. because it is an invalid address). | ||
304 | * | ||
305 | * writev-intensive code may want this to prefault several iovecs -- that | ||
306 | * would be possible (callers must not rely on the fact that _only_ the | ||
307 | * first iovec will be faulted with the current implementation). | ||
308 | */ | ||
309 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) | ||
310 | { | ||
311 | if (!(i->type & (ITER_BVEC|ITER_KVEC))) { | ||
312 | char __user *buf = i->iov->iov_base + i->iov_offset; | ||
313 | bytes = min(bytes, i->iov->iov_len - i->iov_offset); | ||
314 | return fault_in_pages_readable(buf, bytes); | ||
315 | } | ||
316 | return 0; | ||
317 | } | ||
318 | EXPORT_SYMBOL(iov_iter_fault_in_readable); | ||
319 | |||
320 | void iov_iter_init(struct iov_iter *i, int direction, | ||
321 | const struct iovec *iov, unsigned long nr_segs, | ||
322 | size_t count) | ||
323 | { | ||
324 | /* It will get better. Eventually... */ | ||
325 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
326 | direction |= ITER_KVEC; | ||
327 | i->type = direction; | ||
328 | i->kvec = (struct kvec *)iov; | ||
329 | } else { | ||
330 | i->type = direction; | ||
331 | i->iov = iov; | ||
332 | } | ||
333 | i->nr_segs = nr_segs; | ||
334 | i->iov_offset = 0; | ||
335 | i->count = count; | ||
336 | } | ||
337 | EXPORT_SYMBOL(iov_iter_init); | ||
338 | |||
339 | static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) | ||
340 | { | ||
341 | char *from = kmap_atomic(page); | ||
342 | memcpy(to, from + offset, len); | ||
343 | kunmap_atomic(from); | ||
344 | } | ||
345 | |||
346 | static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len) | ||
347 | { | ||
348 | char *to = kmap_atomic(page); | ||
349 | memcpy(to + offset, from, len); | ||
350 | kunmap_atomic(to); | ||
351 | } | ||
352 | |||
353 | static void memzero_page(struct page *page, size_t offset, size_t len) | ||
354 | { | ||
355 | char *addr = kmap_atomic(page); | ||
356 | memset(addr + offset, 0, len); | ||
357 | kunmap_atomic(addr); | ||
358 | } | ||
359 | |||
360 | size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i) | ||
361 | { | ||
362 | char *from = addr; | ||
363 | if (unlikely(bytes > i->count)) | ||
364 | bytes = i->count; | ||
365 | |||
366 | if (unlikely(!bytes)) | ||
367 | return 0; | ||
368 | |||
369 | iterate_and_advance(i, bytes, v, | ||
370 | __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, | ||
371 | v.iov_len), | ||
372 | memcpy_to_page(v.bv_page, v.bv_offset, | ||
373 | (from += v.bv_len) - v.bv_len, v.bv_len), | ||
374 | memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) | ||
375 | ) | ||
376 | |||
377 | return bytes; | ||
378 | } | ||
379 | EXPORT_SYMBOL(copy_to_iter); | ||
380 | |||
381 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) | ||
382 | { | ||
383 | char *to = addr; | ||
384 | if (unlikely(bytes > i->count)) | ||
385 | bytes = i->count; | ||
386 | |||
387 | if (unlikely(!bytes)) | ||
388 | return 0; | ||
389 | |||
390 | iterate_and_advance(i, bytes, v, | ||
391 | __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, | ||
392 | v.iov_len), | ||
393 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, | ||
394 | v.bv_offset, v.bv_len), | ||
395 | memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) | ||
396 | ) | ||
397 | |||
398 | return bytes; | ||
399 | } | ||
400 | EXPORT_SYMBOL(copy_from_iter); | ||
401 | |||
402 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) | ||
403 | { | ||
404 | char *to = addr; | ||
405 | if (unlikely(bytes > i->count)) | ||
406 | bytes = i->count; | ||
407 | |||
408 | if (unlikely(!bytes)) | ||
409 | return 0; | ||
410 | |||
411 | iterate_and_advance(i, bytes, v, | ||
412 | __copy_from_user_nocache((to += v.iov_len) - v.iov_len, | ||
413 | v.iov_base, v.iov_len), | ||
414 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, | ||
415 | v.bv_offset, v.bv_len), | ||
416 | memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) | ||
417 | ) | ||
418 | |||
419 | return bytes; | ||
420 | } | ||
421 | EXPORT_SYMBOL(copy_from_iter_nocache); | ||
422 | |||
423 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | ||
424 | struct iov_iter *i) | ||
425 | { | ||
426 | if (i->type & (ITER_BVEC|ITER_KVEC)) { | ||
427 | void *kaddr = kmap_atomic(page); | ||
428 | size_t wanted = copy_to_iter(kaddr + offset, bytes, i); | ||
429 | kunmap_atomic(kaddr); | ||
430 | return wanted; | ||
431 | } else | ||
432 | return copy_page_to_iter_iovec(page, offset, bytes, i); | ||
433 | } | ||
434 | EXPORT_SYMBOL(copy_page_to_iter); | ||
435 | |||
436 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | ||
437 | struct iov_iter *i) | ||
438 | { | ||
439 | if (i->type & (ITER_BVEC|ITER_KVEC)) { | ||
440 | void *kaddr = kmap_atomic(page); | ||
441 | size_t wanted = copy_from_iter(kaddr + offset, bytes, i); | ||
442 | kunmap_atomic(kaddr); | ||
443 | return wanted; | ||
444 | } else | ||
445 | return copy_page_from_iter_iovec(page, offset, bytes, i); | ||
446 | } | ||
447 | EXPORT_SYMBOL(copy_page_from_iter); | ||
448 | |||
449 | size_t iov_iter_zero(size_t bytes, struct iov_iter *i) | ||
450 | { | ||
451 | if (unlikely(bytes > i->count)) | ||
452 | bytes = i->count; | ||
453 | |||
454 | if (unlikely(!bytes)) | ||
455 | return 0; | ||
456 | |||
457 | iterate_and_advance(i, bytes, v, | ||
458 | __clear_user(v.iov_base, v.iov_len), | ||
459 | memzero_page(v.bv_page, v.bv_offset, v.bv_len), | ||
460 | memset(v.iov_base, 0, v.iov_len) | ||
461 | ) | ||
462 | |||
463 | return bytes; | ||
464 | } | ||
465 | EXPORT_SYMBOL(iov_iter_zero); | ||
466 | |||
467 | size_t iov_iter_copy_from_user_atomic(struct page *page, | ||
468 | struct iov_iter *i, unsigned long offset, size_t bytes) | ||
469 | { | ||
470 | char *kaddr = kmap_atomic(page), *p = kaddr + offset; | ||
471 | iterate_all_kinds(i, bytes, v, | ||
472 | __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, | ||
473 | v.iov_base, v.iov_len), | ||
474 | memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, | ||
475 | v.bv_offset, v.bv_len), | ||
476 | memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) | ||
477 | ) | ||
478 | kunmap_atomic(kaddr); | ||
479 | return bytes; | ||
480 | } | ||
481 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); | ||
482 | |||
483 | void iov_iter_advance(struct iov_iter *i, size_t size) | ||
484 | { | ||
485 | iterate_and_advance(i, size, v, 0, 0, 0) | ||
486 | } | ||
487 | EXPORT_SYMBOL(iov_iter_advance); | ||
488 | |||
489 | /* | ||
490 | * Return the count of just the current iov_iter segment. | ||
491 | */ | ||
492 | size_t iov_iter_single_seg_count(const struct iov_iter *i) | ||
493 | { | ||
494 | if (i->nr_segs == 1) | ||
495 | return i->count; | ||
496 | else if (i->type & ITER_BVEC) | ||
497 | return min(i->count, i->bvec->bv_len - i->iov_offset); | ||
498 | else | ||
499 | return min(i->count, i->iov->iov_len - i->iov_offset); | ||
500 | } | ||
501 | EXPORT_SYMBOL(iov_iter_single_seg_count); | ||
502 | |||
503 | void iov_iter_kvec(struct iov_iter *i, int direction, | ||
504 | const struct kvec *kvec, unsigned long nr_segs, | ||
505 | size_t count) | ||
506 | { | ||
507 | BUG_ON(!(direction & ITER_KVEC)); | ||
508 | i->type = direction; | ||
509 | i->kvec = kvec; | ||
510 | i->nr_segs = nr_segs; | ||
511 | i->iov_offset = 0; | ||
512 | i->count = count; | ||
513 | } | ||
514 | EXPORT_SYMBOL(iov_iter_kvec); | ||
515 | |||
516 | void iov_iter_bvec(struct iov_iter *i, int direction, | ||
517 | const struct bio_vec *bvec, unsigned long nr_segs, | ||
518 | size_t count) | ||
519 | { | ||
520 | BUG_ON(!(direction & ITER_BVEC)); | ||
521 | i->type = direction; | ||
522 | i->bvec = bvec; | ||
523 | i->nr_segs = nr_segs; | ||
524 | i->iov_offset = 0; | ||
525 | i->count = count; | ||
526 | } | ||
527 | EXPORT_SYMBOL(iov_iter_bvec); | ||
528 | |||
529 | unsigned long iov_iter_alignment(const struct iov_iter *i) | ||
530 | { | ||
531 | unsigned long res = 0; | ||
532 | size_t size = i->count; | ||
533 | |||
534 | if (!size) | ||
535 | return 0; | ||
536 | |||
537 | iterate_all_kinds(i, size, v, | ||
538 | (res |= (unsigned long)v.iov_base | v.iov_len, 0), | ||
539 | res |= v.bv_offset | v.bv_len, | ||
540 | res |= (unsigned long)v.iov_base | v.iov_len | ||
541 | ) | ||
542 | return res; | ||
543 | } | ||
544 | EXPORT_SYMBOL(iov_iter_alignment); | ||
545 | |||
546 | ssize_t iov_iter_get_pages(struct iov_iter *i, | ||
547 | struct page **pages, size_t maxsize, unsigned maxpages, | ||
548 | size_t *start) | ||
549 | { | ||
550 | if (maxsize > i->count) | ||
551 | maxsize = i->count; | ||
552 | |||
553 | if (!maxsize) | ||
554 | return 0; | ||
555 | |||
556 | iterate_all_kinds(i, maxsize, v, ({ | ||
557 | unsigned long addr = (unsigned long)v.iov_base; | ||
558 | size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); | ||
559 | int n; | ||
560 | int res; | ||
561 | |||
562 | if (len > maxpages * PAGE_SIZE) | ||
563 | len = maxpages * PAGE_SIZE; | ||
564 | addr &= ~(PAGE_SIZE - 1); | ||
565 | n = DIV_ROUND_UP(len, PAGE_SIZE); | ||
566 | res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); | ||
567 | if (unlikely(res < 0)) | ||
568 | return res; | ||
569 | return (res == n ? len : res * PAGE_SIZE) - *start; | ||
570 | 0;}),({ | ||
571 | /* can't be more than PAGE_SIZE */ | ||
572 | *start = v.bv_offset; | ||
573 | get_page(*pages = v.bv_page); | ||
574 | return v.bv_len; | ||
575 | }),({ | ||
576 | return -EFAULT; | ||
577 | }) | ||
578 | ) | ||
579 | return 0; | ||
580 | } | ||
581 | EXPORT_SYMBOL(iov_iter_get_pages); | ||
582 | |||
583 | static struct page **get_pages_array(size_t n) | ||
584 | { | ||
585 | struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL); | ||
586 | if (!p) | ||
587 | p = vmalloc(n * sizeof(struct page *)); | ||
588 | return p; | ||
589 | } | ||
590 | |||
591 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, | ||
592 | struct page ***pages, size_t maxsize, | ||
593 | size_t *start) | ||
594 | { | ||
595 | struct page **p; | ||
596 | |||
597 | if (maxsize > i->count) | ||
598 | maxsize = i->count; | ||
599 | |||
600 | if (!maxsize) | ||
601 | return 0; | ||
602 | |||
603 | iterate_all_kinds(i, maxsize, v, ({ | ||
604 | unsigned long addr = (unsigned long)v.iov_base; | ||
605 | size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); | ||
606 | int n; | ||
607 | int res; | ||
608 | |||
609 | addr &= ~(PAGE_SIZE - 1); | ||
610 | n = DIV_ROUND_UP(len, PAGE_SIZE); | ||
611 | p = get_pages_array(n); | ||
612 | if (!p) | ||
613 | return -ENOMEM; | ||
614 | res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); | ||
615 | if (unlikely(res < 0)) { | ||
616 | kvfree(p); | ||
617 | return res; | ||
618 | } | ||
619 | *pages = p; | ||
620 | return (res == n ? len : res * PAGE_SIZE) - *start; | ||
621 | 0;}),({ | ||
622 | /* can't be more than PAGE_SIZE */ | ||
623 | *start = v.bv_offset; | ||
624 | *pages = p = get_pages_array(1); | ||
625 | if (!p) | ||
626 | return -ENOMEM; | ||
627 | get_page(*p = v.bv_page); | ||
628 | return v.bv_len; | ||
629 | }),({ | ||
630 | return -EFAULT; | ||
631 | }) | ||
632 | ) | ||
633 | return 0; | ||
634 | } | ||
635 | EXPORT_SYMBOL(iov_iter_get_pages_alloc); | ||
636 | |||
637 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, | ||
638 | struct iov_iter *i) | ||
639 | { | ||
640 | char *to = addr; | ||
641 | __wsum sum, next; | ||
642 | size_t off = 0; | ||
643 | if (unlikely(bytes > i->count)) | ||
644 | bytes = i->count; | ||
645 | |||
646 | if (unlikely(!bytes)) | ||
647 | return 0; | ||
648 | |||
649 | sum = *csum; | ||
650 | iterate_and_advance(i, bytes, v, ({ | ||
651 | int err = 0; | ||
652 | next = csum_and_copy_from_user(v.iov_base, | ||
653 | (to += v.iov_len) - v.iov_len, | ||
654 | v.iov_len, 0, &err); | ||
655 | if (!err) { | ||
656 | sum = csum_block_add(sum, next, off); | ||
657 | off += v.iov_len; | ||
658 | } | ||
659 | err ? v.iov_len : 0; | ||
660 | }), ({ | ||
661 | char *p = kmap_atomic(v.bv_page); | ||
662 | next = csum_partial_copy_nocheck(p + v.bv_offset, | ||
663 | (to += v.bv_len) - v.bv_len, | ||
664 | v.bv_len, 0); | ||
665 | kunmap_atomic(p); | ||
666 | sum = csum_block_add(sum, next, off); | ||
667 | off += v.bv_len; | ||
668 | }),({ | ||
669 | next = csum_partial_copy_nocheck(v.iov_base, | ||
670 | (to += v.iov_len) - v.iov_len, | ||
671 | v.iov_len, 0); | ||
672 | sum = csum_block_add(sum, next, off); | ||
673 | off += v.iov_len; | ||
674 | }) | ||
675 | ) | ||
676 | *csum = sum; | ||
677 | return bytes; | ||
678 | } | ||
679 | EXPORT_SYMBOL(csum_and_copy_from_iter); | ||
680 | |||
681 | size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, | ||
682 | struct iov_iter *i) | ||
683 | { | ||
684 | char *from = addr; | ||
685 | __wsum sum, next; | ||
686 | size_t off = 0; | ||
687 | if (unlikely(bytes > i->count)) | ||
688 | bytes = i->count; | ||
689 | |||
690 | if (unlikely(!bytes)) | ||
691 | return 0; | ||
692 | |||
693 | sum = *csum; | ||
694 | iterate_and_advance(i, bytes, v, ({ | ||
695 | int err = 0; | ||
696 | next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, | ||
697 | v.iov_base, | ||
698 | v.iov_len, 0, &err); | ||
699 | if (!err) { | ||
700 | sum = csum_block_add(sum, next, off); | ||
701 | off += v.iov_len; | ||
702 | } | ||
703 | err ? v.iov_len : 0; | ||
704 | }), ({ | ||
705 | char *p = kmap_atomic(v.bv_page); | ||
706 | next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len, | ||
707 | p + v.bv_offset, | ||
708 | v.bv_len, 0); | ||
709 | kunmap_atomic(p); | ||
710 | sum = csum_block_add(sum, next, off); | ||
711 | off += v.bv_len; | ||
712 | }),({ | ||
713 | next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len, | ||
714 | v.iov_base, | ||
715 | v.iov_len, 0); | ||
716 | sum = csum_block_add(sum, next, off); | ||
717 | off += v.iov_len; | ||
718 | }) | ||
719 | ) | ||
720 | *csum = sum; | ||
721 | return bytes; | ||
722 | } | ||
723 | EXPORT_SYMBOL(csum_and_copy_to_iter); | ||
724 | |||
725 | int iov_iter_npages(const struct iov_iter *i, int maxpages) | ||
726 | { | ||
727 | size_t size = i->count; | ||
728 | int npages = 0; | ||
729 | |||
730 | if (!size) | ||
731 | return 0; | ||
732 | |||
733 | iterate_all_kinds(i, size, v, ({ | ||
734 | unsigned long p = (unsigned long)v.iov_base; | ||
735 | npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) | ||
736 | - p / PAGE_SIZE; | ||
737 | if (npages >= maxpages) | ||
738 | return maxpages; | ||
739 | 0;}),({ | ||
740 | npages++; | ||
741 | if (npages >= maxpages) | ||
742 | return maxpages; | ||
743 | }),({ | ||
744 | unsigned long p = (unsigned long)v.iov_base; | ||
745 | npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) | ||
746 | - p / PAGE_SIZE; | ||
747 | if (npages >= maxpages) | ||
748 | return maxpages; | ||
749 | }) | ||
750 | ) | ||
751 | return npages; | ||
752 | } | ||
753 | EXPORT_SYMBOL(iov_iter_npages); | ||
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 78fee632a7ee..936d81661c47 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/stacktrace.h> | 29 | #include <linux/stacktrace.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <linux/vmalloc.h> | ||
32 | #include <linux/kasan.h> | 33 | #include <linux/kasan.h> |
33 | 34 | ||
34 | #include "kasan.h" | 35 | #include "kasan.h" |
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size) | |||
414 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 415 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
415 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, | 416 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
416 | __builtin_return_address(0)); | 417 | __builtin_return_address(0)); |
417 | return ret ? 0 : -ENOMEM; | 418 | |
419 | if (ret) { | ||
420 | find_vm_area(addr)->flags |= VM_KASAN; | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | return -ENOMEM; | ||
418 | } | 425 | } |
419 | 426 | ||
420 | void kasan_module_free(void *addr) | 427 | void kasan_free_shadow(const struct vm_struct *vm) |
421 | { | 428 | { |
422 | vfree(kasan_mem_to_shadow(addr)); | 429 | if (vm->flags & VM_KASAN) |
430 | vfree(kasan_mem_to_shadow(vm->addr)); | ||
423 | } | 431 | } |
424 | 432 | ||
425 | static void register_global(struct kasan_global *global) | 433 | static void register_global(struct kasan_global *global) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9fe07692eaad..b34ef4a32a3b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) | |||
5232 | * on for the root memcg is enough. | 5232 | * on for the root memcg is enough. |
5233 | */ | 5233 | */ |
5234 | if (cgroup_on_dfl(root_css->cgroup)) | 5234 | if (cgroup_on_dfl(root_css->cgroup)) |
5235 | mem_cgroup_from_css(root_css)->use_hierarchy = true; | 5235 | root_mem_cgroup->use_hierarchy = true; |
5236 | else | ||
5237 | root_mem_cgroup->use_hierarchy = false; | ||
5236 | } | 5238 | } |
5237 | 5239 | ||
5238 | static u64 memory_current_read(struct cgroup_subsys_state *css, | 5240 | static u64 memory_current_read(struct cgroup_subsys_state *css, |
diff --git a/mm/memory.c b/mm/memory.c index 8068893697bb..411144f977b1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3072,8 +3072,13 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3072 | * Avoid grouping on DSO/COW pages in specific and RO pages | 3072 | * Avoid grouping on DSO/COW pages in specific and RO pages |
3073 | * in general, RO pages shouldn't hurt as much anyway since | 3073 | * in general, RO pages shouldn't hurt as much anyway since |
3074 | * they can be in shared cache state. | 3074 | * they can be in shared cache state. |
3075 | * | ||
3076 | * FIXME! This checks "pmd_dirty()" as an approximation of | ||
3077 | * "is this a read-only page", since checking "pmd_write()" | ||
3078 | * is even more broken. We haven't actually turned this into | ||
3079 | * a writable page, so pmd_write() will always be false. | ||
3075 | */ | 3080 | */ |
3076 | if (!pte_write(pte)) | 3081 | if (!pte_dirty(pte)) |
3077 | flags |= TNF_NO_GROUP; | 3082 | flags |= TNF_NO_GROUP; |
3078 | 3083 | ||
3079 | /* | 3084 | /* |
diff --git a/mm/mlock.c b/mm/mlock.c index 73cf0987088c..8a54cd214925 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -26,10 +26,10 @@ | |||
26 | 26 | ||
27 | int can_do_mlock(void) | 27 | int can_do_mlock(void) |
28 | { | 28 | { |
29 | if (capable(CAP_IPC_LOCK)) | ||
30 | return 1; | ||
31 | if (rlimit(RLIMIT_MEMLOCK) != 0) | 29 | if (rlimit(RLIMIT_MEMLOCK) != 0) |
32 | return 1; | 30 | return 1; |
31 | if (capable(CAP_IPC_LOCK)) | ||
32 | return 1; | ||
33 | return 0; | 33 | return 0; |
34 | } | 34 | } |
35 | EXPORT_SYMBOL(can_do_mlock); | 35 | EXPORT_SYMBOL(can_do_mlock); |
diff --git a/mm/nommu.c b/mm/nommu.c index 3e67e7538ecf..3fba2dc97c44 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -62,6 +62,7 @@ void *high_memory; | |||
62 | EXPORT_SYMBOL(high_memory); | 62 | EXPORT_SYMBOL(high_memory); |
63 | struct page *mem_map; | 63 | struct page *mem_map; |
64 | unsigned long max_mapnr; | 64 | unsigned long max_mapnr; |
65 | EXPORT_SYMBOL(max_mapnr); | ||
65 | unsigned long highest_memmap_pfn; | 66 | unsigned long highest_memmap_pfn; |
66 | struct percpu_counter vm_committed_as; | 67 | struct percpu_counter vm_committed_as; |
67 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 68 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7abfa70cdc1a..40e29429e7b0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2373,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2373 | goto out; | 2373 | goto out; |
2374 | } | 2374 | } |
2375 | /* Exhausted what can be done so it's blamo time */ | 2375 | /* Exhausted what can be done so it's blamo time */ |
2376 | if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)) | 2376 | if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) |
2377 | || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) | ||
2377 | *did_some_progress = 1; | 2378 | *did_some_progress = 1; |
2378 | out: | 2379 | out: |
2379 | oom_zonelist_unlock(ac->zonelist, gfp_mask); | 2380 | oom_zonelist_unlock(ac->zonelist, gfp_mask); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 35b25e1340ca..49abccf29a29 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
1418 | spin_unlock(&vmap_area_lock); | 1418 | spin_unlock(&vmap_area_lock); |
1419 | 1419 | ||
1420 | vmap_debug_free_range(va->va_start, va->va_end); | 1420 | vmap_debug_free_range(va->va_start, va->va_end); |
1421 | kasan_free_shadow(vm); | ||
1421 | free_unmap_vmap_area(va); | 1422 | free_unmap_vmap_area(va); |
1422 | vm->size -= PAGE_SIZE; | 1423 | vm->size -= PAGE_SIZE; |
1423 | 1424 | ||