diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 6 | ||||
-rw-r--r-- | mm/memory.c | 44 | ||||
-rw-r--r-- | mm/mlock.c | 4 | ||||
-rw-r--r-- | mm/nommu.c | 16 |
4 files changed, 30 insertions, 40 deletions
diff --git a/mm/internal.h b/mm/internal.h index d41475078b20..75596574911e 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -250,12 +250,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |||
250 | } | 250 | } |
251 | #endif /* CONFIG_SPARSEMEM */ | 251 | #endif /* CONFIG_SPARSEMEM */ |
252 | 252 | ||
253 | #define GUP_FLAGS_WRITE 0x01 | ||
254 | #define GUP_FLAGS_FORCE 0x02 | ||
255 | #define GUP_FLAGS_DUMP 0x04 | ||
256 | |||
257 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 253 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
258 | unsigned long start, int len, int flags, | 254 | unsigned long start, int len, unsigned int foll_flags, |
259 | struct page **pages, struct vm_area_struct **vmas); | 255 | struct page **pages, struct vm_area_struct **vmas); |
260 | 256 | ||
261 | #define ZONE_RECLAIM_NOSCAN -2 | 257 | #define ZONE_RECLAIM_NOSCAN -2 |
diff --git a/mm/memory.c b/mm/memory.c index c8b5b9435a92..5c694f2b9c12 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1209,27 +1209,29 @@ no_page_table: | |||
1209 | } | 1209 | } |
1210 | 1210 | ||
1211 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 1211 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
1212 | unsigned long start, int nr_pages, int flags, | 1212 | unsigned long start, int nr_pages, unsigned int gup_flags, |
1213 | struct page **pages, struct vm_area_struct **vmas) | 1213 | struct page **pages, struct vm_area_struct **vmas) |
1214 | { | 1214 | { |
1215 | int i; | 1215 | int i; |
1216 | unsigned int vm_flags = 0; | 1216 | unsigned long vm_flags; |
1217 | int write = !!(flags & GUP_FLAGS_WRITE); | ||
1218 | int force = !!(flags & GUP_FLAGS_FORCE); | ||
1219 | 1217 | ||
1220 | if (nr_pages <= 0) | 1218 | if (nr_pages <= 0) |
1221 | return 0; | 1219 | return 0; |
1220 | |||
1221 | VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); | ||
1222 | |||
1222 | /* | 1223 | /* |
1223 | * Require read or write permissions. | 1224 | * Require read or write permissions. |
1224 | * If 'force' is set, we only require the "MAY" flags. | 1225 | * If FOLL_FORCE is set, we only require the "MAY" flags. |
1225 | */ | 1226 | */ |
1226 | vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | 1227 | vm_flags = (gup_flags & FOLL_WRITE) ? |
1227 | vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 1228 | (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); |
1229 | vm_flags &= (gup_flags & FOLL_FORCE) ? | ||
1230 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | ||
1228 | i = 0; | 1231 | i = 0; |
1229 | 1232 | ||
1230 | do { | 1233 | do { |
1231 | struct vm_area_struct *vma; | 1234 | struct vm_area_struct *vma; |
1232 | unsigned int foll_flags; | ||
1233 | 1235 | ||
1234 | vma = find_extend_vma(mm, start); | 1236 | vma = find_extend_vma(mm, start); |
1235 | if (!vma && in_gate_area(tsk, start)) { | 1237 | if (!vma && in_gate_area(tsk, start)) { |
@@ -1241,7 +1243,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1241 | pte_t *pte; | 1243 | pte_t *pte; |
1242 | 1244 | ||
1243 | /* user gate pages are read-only */ | 1245 | /* user gate pages are read-only */ |
1244 | if (write) | 1246 | if (gup_flags & FOLL_WRITE) |
1245 | return i ? : -EFAULT; | 1247 | return i ? : -EFAULT; |
1246 | if (pg > TASK_SIZE) | 1248 | if (pg > TASK_SIZE) |
1247 | pgd = pgd_offset_k(pg); | 1249 | pgd = pgd_offset_k(pg); |
@@ -1278,22 +1280,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1278 | !(vm_flags & vma->vm_flags)) | 1280 | !(vm_flags & vma->vm_flags)) |
1279 | return i ? : -EFAULT; | 1281 | return i ? : -EFAULT; |
1280 | 1282 | ||
1281 | foll_flags = FOLL_TOUCH; | ||
1282 | if (pages) | ||
1283 | foll_flags |= FOLL_GET; | ||
1284 | if (flags & GUP_FLAGS_DUMP) | ||
1285 | foll_flags |= FOLL_DUMP; | ||
1286 | if (write) | ||
1287 | foll_flags |= FOLL_WRITE; | ||
1288 | |||
1289 | if (is_vm_hugetlb_page(vma)) { | 1283 | if (is_vm_hugetlb_page(vma)) { |
1290 | i = follow_hugetlb_page(mm, vma, pages, vmas, | 1284 | i = follow_hugetlb_page(mm, vma, pages, vmas, |
1291 | &start, &nr_pages, i, foll_flags); | 1285 | &start, &nr_pages, i, gup_flags); |
1292 | continue; | 1286 | continue; |
1293 | } | 1287 | } |
1294 | 1288 | ||
1295 | do { | 1289 | do { |
1296 | struct page *page; | 1290 | struct page *page; |
1291 | unsigned int foll_flags = gup_flags; | ||
1297 | 1292 | ||
1298 | /* | 1293 | /* |
1299 | * If we have a pending SIGKILL, don't keep faulting | 1294 | * If we have a pending SIGKILL, don't keep faulting |
@@ -1302,9 +1297,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1302 | if (unlikely(fatal_signal_pending(current))) | 1297 | if (unlikely(fatal_signal_pending(current))) |
1303 | return i ? i : -ERESTARTSYS; | 1298 | return i ? i : -ERESTARTSYS; |
1304 | 1299 | ||
1305 | if (write) | ||
1306 | foll_flags |= FOLL_WRITE; | ||
1307 | |||
1308 | cond_resched(); | 1300 | cond_resched(); |
1309 | while (!(page = follow_page(vma, start, foll_flags))) { | 1301 | while (!(page = follow_page(vma, start, foll_flags))) { |
1310 | int ret; | 1302 | int ret; |
@@ -1415,12 +1407,14 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1415 | unsigned long start, int nr_pages, int write, int force, | 1407 | unsigned long start, int nr_pages, int write, int force, |
1416 | struct page **pages, struct vm_area_struct **vmas) | 1408 | struct page **pages, struct vm_area_struct **vmas) |
1417 | { | 1409 | { |
1418 | int flags = 0; | 1410 | int flags = FOLL_TOUCH; |
1419 | 1411 | ||
1412 | if (pages) | ||
1413 | flags |= FOLL_GET; | ||
1420 | if (write) | 1414 | if (write) |
1421 | flags |= GUP_FLAGS_WRITE; | 1415 | flags |= FOLL_WRITE; |
1422 | if (force) | 1416 | if (force) |
1423 | flags |= GUP_FLAGS_FORCE; | 1417 | flags |= FOLL_FORCE; |
1424 | 1418 | ||
1425 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); | 1419 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); |
1426 | } | 1420 | } |
@@ -1447,7 +1441,7 @@ struct page *get_dump_page(unsigned long addr) | |||
1447 | struct page *page; | 1441 | struct page *page; |
1448 | 1442 | ||
1449 | if (__get_user_pages(current, current->mm, addr, 1, | 1443 | if (__get_user_pages(current, current->mm, addr, 1, |
1450 | GUP_FLAGS_FORCE | GUP_FLAGS_DUMP, &page, &vma) < 1) | 1444 | FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1) |
1451 | return NULL; | 1445 | return NULL; |
1452 | if (page == ZERO_PAGE(0)) { | 1446 | if (page == ZERO_PAGE(0)) { |
1453 | page_cache_release(page); | 1447 | page_cache_release(page); |
diff --git a/mm/mlock.c b/mm/mlock.c index e13918d4fc4f..22041aa9f5c1 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -166,9 +166,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
166 | VM_BUG_ON(end > vma->vm_end); | 166 | VM_BUG_ON(end > vma->vm_end); |
167 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); | 167 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); |
168 | 168 | ||
169 | gup_flags = 0; | 169 | gup_flags = FOLL_TOUCH | FOLL_GET; |
170 | if (vma->vm_flags & VM_WRITE) | 170 | if (vma->vm_flags & VM_WRITE) |
171 | gup_flags = GUP_FLAGS_WRITE; | 171 | gup_flags |= FOLL_WRITE; |
172 | 172 | ||
173 | while (nr_pages > 0) { | 173 | while (nr_pages > 0) { |
174 | int i; | 174 | int i; |
diff --git a/mm/nommu.c b/mm/nommu.c index 386443e9d2c6..2d02ca17ce18 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -168,20 +168,20 @@ unsigned int kobjsize(const void *objp) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 170 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
171 | unsigned long start, int nr_pages, int flags, | 171 | unsigned long start, int nr_pages, int foll_flags, |
172 | struct page **pages, struct vm_area_struct **vmas) | 172 | struct page **pages, struct vm_area_struct **vmas) |
173 | { | 173 | { |
174 | struct vm_area_struct *vma; | 174 | struct vm_area_struct *vma; |
175 | unsigned long vm_flags; | 175 | unsigned long vm_flags; |
176 | int i; | 176 | int i; |
177 | int write = !!(flags & GUP_FLAGS_WRITE); | ||
178 | int force = !!(flags & GUP_FLAGS_FORCE); | ||
179 | 177 | ||
180 | /* calculate required read or write permissions. | 178 | /* calculate required read or write permissions. |
181 | * - if 'force' is set, we only require the "MAY" flags. | 179 | * If FOLL_FORCE is set, we only require the "MAY" flags. |
182 | */ | 180 | */ |
183 | vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | 181 | vm_flags = (foll_flags & FOLL_WRITE) ? |
184 | vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | 182 | (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); |
183 | vm_flags &= (foll_flags & FOLL_FORCE) ? | ||
184 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | ||
185 | 185 | ||
186 | for (i = 0; i < nr_pages; i++) { | 186 | for (i = 0; i < nr_pages; i++) { |
187 | vma = find_vma(mm, start); | 187 | vma = find_vma(mm, start); |
@@ -223,9 +223,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
223 | int flags = 0; | 223 | int flags = 0; |
224 | 224 | ||
225 | if (write) | 225 | if (write) |
226 | flags |= GUP_FLAGS_WRITE; | 226 | flags |= FOLL_WRITE; |
227 | if (force) | 227 | if (force) |
228 | flags |= GUP_FLAGS_FORCE; | 228 | flags |= FOLL_FORCE; |
229 | 229 | ||
230 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); | 230 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); |
231 | } | 231 | } |