aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c37
1 files changed, 18 insertions, 19 deletions
diff --git a/mm/memory.c b/mm/memory.c
index f46ac18ba231..aede2ce3aba4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -135,11 +135,12 @@ void pmd_clear_bad(pmd_t *pmd)
135 * Note: this doesn't free the actual pages themselves. That 135 * Note: this doesn't free the actual pages themselves. That
136 * has been handled earlier when unmapping all the memory regions. 136 * has been handled earlier when unmapping all the memory regions.
137 */ 137 */
138static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) 138static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
139 unsigned long addr)
139{ 140{
140 pgtable_t token = pmd_pgtable(*pmd); 141 pgtable_t token = pmd_pgtable(*pmd);
141 pmd_clear(pmd); 142 pmd_clear(pmd);
142 pte_free_tlb(tlb, token); 143 pte_free_tlb(tlb, token, addr);
143 tlb->mm->nr_ptes--; 144 tlb->mm->nr_ptes--;
144} 145}
145 146
@@ -157,7 +158,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
157 next = pmd_addr_end(addr, end); 158 next = pmd_addr_end(addr, end);
158 if (pmd_none_or_clear_bad(pmd)) 159 if (pmd_none_or_clear_bad(pmd))
159 continue; 160 continue;
160 free_pte_range(tlb, pmd); 161 free_pte_range(tlb, pmd, addr);
161 } while (pmd++, addr = next, addr != end); 162 } while (pmd++, addr = next, addr != end);
162 163
163 start &= PUD_MASK; 164 start &= PUD_MASK;
@@ -173,7 +174,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
173 174
174 pmd = pmd_offset(pud, start); 175 pmd = pmd_offset(pud, start);
175 pud_clear(pud); 176 pud_clear(pud);
176 pmd_free_tlb(tlb, pmd); 177 pmd_free_tlb(tlb, pmd, start);
177} 178}
178 179
179static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 180static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -206,7 +207,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
206 207
207 pud = pud_offset(pgd, start); 208 pud = pud_offset(pgd, start);
208 pgd_clear(pgd); 209 pgd_clear(pgd);
209 pud_free_tlb(tlb, pud); 210 pud_free_tlb(tlb, pud, start);
210} 211}
211 212
212/* 213/*
@@ -1207,8 +1208,8 @@ static inline int use_zero_page(struct vm_area_struct *vma)
1207 1208
1208 1209
1209int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1210int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1210 unsigned long start, int len, int flags, 1211 unsigned long start, int nr_pages, int flags,
1211 struct page **pages, struct vm_area_struct **vmas) 1212 struct page **pages, struct vm_area_struct **vmas)
1212{ 1213{
1213 int i; 1214 int i;
1214 unsigned int vm_flags = 0; 1215 unsigned int vm_flags = 0;
@@ -1217,7 +1218,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1217 int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); 1218 int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
1218 int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL); 1219 int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
1219 1220
1220 if (len <= 0) 1221 if (nr_pages <= 0)
1221 return 0; 1222 return 0;
1222 /* 1223 /*
1223 * Require read or write permissions. 1224 * Require read or write permissions.
@@ -1269,7 +1270,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1269 vmas[i] = gate_vma; 1270 vmas[i] = gate_vma;
1270 i++; 1271 i++;
1271 start += PAGE_SIZE; 1272 start += PAGE_SIZE;
1272 len--; 1273 nr_pages--;
1273 continue; 1274 continue;
1274 } 1275 }
1275 1276
@@ -1280,7 +1281,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1280 1281
1281 if (is_vm_hugetlb_page(vma)) { 1282 if (is_vm_hugetlb_page(vma)) {
1282 i = follow_hugetlb_page(mm, vma, pages, vmas, 1283 i = follow_hugetlb_page(mm, vma, pages, vmas,
1283 &start, &len, i, write); 1284 &start, &nr_pages, i, write);
1284 continue; 1285 continue;
1285 } 1286 }
1286 1287
@@ -1357,9 +1358,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1357 vmas[i] = vma; 1358 vmas[i] = vma;
1358 i++; 1359 i++;
1359 start += PAGE_SIZE; 1360 start += PAGE_SIZE;
1360 len--; 1361 nr_pages--;
1361 } while (len && start < vma->vm_end); 1362 } while (nr_pages && start < vma->vm_end);
1362 } while (len); 1363 } while (nr_pages);
1363 return i; 1364 return i;
1364} 1365}
1365 1366
@@ -1368,7 +1369,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1368 * @tsk: task_struct of target task 1369 * @tsk: task_struct of target task
1369 * @mm: mm_struct of target mm 1370 * @mm: mm_struct of target mm
1370 * @start: starting user address 1371 * @start: starting user address
1371 * @len: number of pages from start to pin 1372 * @nr_pages: number of pages from start to pin
1372 * @write: whether pages will be written to by the caller 1373 * @write: whether pages will be written to by the caller
1373 * @force: whether to force write access even if user mapping is 1374 * @force: whether to force write access even if user mapping is
1374 * readonly. This will result in the page being COWed even 1375 * readonly. This will result in the page being COWed even
@@ -1380,7 +1381,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1380 * Or NULL if the caller does not require them. 1381 * Or NULL if the caller does not require them.
1381 * 1382 *
1382 * Returns number of pages pinned. This may be fewer than the number 1383 * Returns number of pages pinned. This may be fewer than the number
1383 * requested. If len is 0 or negative, returns 0. If no pages 1384 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1384 * were pinned, returns -errno. Each page returned must be released 1385 * were pinned, returns -errno. Each page returned must be released
1385 * with a put_page() call when it is finished with. vmas will only 1386 * with a put_page() call when it is finished with. vmas will only
1386 * remain valid while mmap_sem is held. 1387 * remain valid while mmap_sem is held.
@@ -1414,7 +1415,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1414 * See also get_user_pages_fast, for performance critical applications. 1415 * See also get_user_pages_fast, for performance critical applications.
1415 */ 1416 */
1416int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1417int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1417 unsigned long start, int len, int write, int force, 1418 unsigned long start, int nr_pages, int write, int force,
1418 struct page **pages, struct vm_area_struct **vmas) 1419 struct page **pages, struct vm_area_struct **vmas)
1419{ 1420{
1420 int flags = 0; 1421 int flags = 0;
@@ -1424,9 +1425,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1424 if (force) 1425 if (force)
1425 flags |= GUP_FLAGS_FORCE; 1426 flags |= GUP_FLAGS_FORCE;
1426 1427
1427 return __get_user_pages(tsk, mm, 1428 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
1428 start, len, flags,
1429 pages, vmas);
1430} 1429}
1431 1430
1432EXPORT_SYMBOL(get_user_pages); 1431EXPORT_SYMBOL(get_user_pages);