aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 85e80a57db29..8d8f52569f32 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -277,7 +277,7 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
277 anon_vma_unlink(vma); 277 anon_vma_unlink(vma);
278 unlink_file_vma(vma); 278 unlink_file_vma(vma);
279 279
280 if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) { 280 if (is_vm_hugetlb_page(vma)) {
281 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 281 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
282 floor, next? next->vm_start: ceiling); 282 floor, next? next->vm_start: ceiling);
283 } else { 283 } else {
@@ -285,8 +285,7 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
285 * Optimization: gather nearby vmas into one call down 285 * Optimization: gather nearby vmas into one call down
286 */ 286 */
287 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 287 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
288 && !is_hugepage_only_range(vma->vm_mm, next->vm_start, 288 && !is_vm_hugetlb_page(next)) {
289 HPAGE_SIZE)) {
290 vma = next; 289 vma = next;
291 next = vma->vm_next; 290 next = vma->vm_next;
292 anon_vma_unlink(vma); 291 anon_vma_unlink(vma);
@@ -388,7 +387,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
388{ 387{
389 unsigned long pfn = pte_pfn(pte); 388 unsigned long pfn = pte_pfn(pte);
390 389
391 if (vma->vm_flags & VM_PFNMAP) { 390 if (unlikely(vma->vm_flags & VM_PFNMAP)) {
392 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; 391 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
393 if (pfn == vma->vm_pgoff + off) 392 if (pfn == vma->vm_pgoff + off)
394 return NULL; 393 return NULL;
@@ -401,8 +400,6 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
401 * we should just do "return pfn_to_page(pfn)", but 400 * we should just do "return pfn_to_page(pfn)", but
402 * in the meantime we check that we get a valid pfn, 401 * in the meantime we check that we get a valid pfn,
403 * and that the resulting page looks ok. 402 * and that the resulting page looks ok.
404 *
405 * Remove this test eventually!
406 */ 403 */
407 if (unlikely(!pfn_valid(pfn))) { 404 if (unlikely(!pfn_valid(pfn))) {
408 print_bad_pte(vma, pte, addr); 405 print_bad_pte(vma, pte, addr);
@@ -1074,6 +1071,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1074 } 1071 }
1075 if (pages) { 1072 if (pages) {
1076 pages[i] = page; 1073 pages[i] = page;
1074
1075 flush_anon_page(page, start);
1077 flush_dcache_page(page); 1076 flush_dcache_page(page);
1078 } 1077 }
1079 if (vmas) 1078 if (vmas)
@@ -1221,9 +1220,7 @@ out:
1221 * The page has to be a nice clean _individual_ kernel allocation. 1220 * The page has to be a nice clean _individual_ kernel allocation.
1222 * If you allocate a compound page, you need to have marked it as 1221 * If you allocate a compound page, you need to have marked it as
1223 * such (__GFP_COMP), or manually just split the page up yourself 1222 * such (__GFP_COMP), or manually just split the page up yourself
1224 * (which is mainly an issue of doing "set_page_count(page, 1)" for 1223 * (see split_page()).
1225 * each sub-page, and then freeing them one by one when you free
1226 * them rather than freeing it as a compound page).
1227 * 1224 *
1228 * NOTE! Traditionally this was done with "remap_pfn_range()" which 1225 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1229 * took an arbitrary page protection parameter. This doesn't allow 1226 * took an arbitrary page protection parameter. This doesn't allow
@@ -2357,10 +2354,8 @@ int make_pages_present(unsigned long addr, unsigned long end)
2357 if (!vma) 2354 if (!vma)
2358 return -1; 2355 return -1;
2359 write = (vma->vm_flags & VM_WRITE) != 0; 2356 write = (vma->vm_flags & VM_WRITE) != 0;
2360 if (addr >= end) 2357 BUG_ON(addr >= end);
2361 BUG(); 2358 BUG_ON(end > vma->vm_end);
2362 if (end > vma->vm_end)
2363 BUG();
2364 len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE; 2359 len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
2365 ret = get_user_pages(current, current->mm, addr, 2360 ret = get_user_pages(current, current->mm, addr,
2366 len, write, 0, NULL, NULL); 2361 len, write, 0, NULL, NULL);