diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 21 |
1 files changed, 6 insertions, 15 deletions
diff --git a/mm/memory.c b/mm/memory.c index 85e80a57db29..80c3fb370f91 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -277,7 +277,7 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, | |||
277 | anon_vma_unlink(vma); | 277 | anon_vma_unlink(vma); |
278 | unlink_file_vma(vma); | 278 | unlink_file_vma(vma); |
279 | 279 | ||
280 | if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) { | 280 | if (is_vm_hugetlb_page(vma)) { |
281 | hugetlb_free_pgd_range(tlb, addr, vma->vm_end, | 281 | hugetlb_free_pgd_range(tlb, addr, vma->vm_end, |
282 | floor, next? next->vm_start: ceiling); | 282 | floor, next? next->vm_start: ceiling); |
283 | } else { | 283 | } else { |
@@ -285,8 +285,7 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, | |||
285 | * Optimization: gather nearby vmas into one call down | 285 | * Optimization: gather nearby vmas into one call down |
286 | */ | 286 | */ |
287 | while (next && next->vm_start <= vma->vm_end + PMD_SIZE | 287 | while (next && next->vm_start <= vma->vm_end + PMD_SIZE |
288 | && !is_hugepage_only_range(vma->vm_mm, next->vm_start, | 288 | && !is_vm_hugetlb_page(next)) { |
289 | HPAGE_SIZE)) { | ||
290 | vma = next; | 289 | vma = next; |
291 | next = vma->vm_next; | 290 | next = vma->vm_next; |
292 | anon_vma_unlink(vma); | 291 | anon_vma_unlink(vma); |
@@ -388,7 +387,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_ | |||
388 | { | 387 | { |
389 | unsigned long pfn = pte_pfn(pte); | 388 | unsigned long pfn = pte_pfn(pte); |
390 | 389 | ||
391 | if (vma->vm_flags & VM_PFNMAP) { | 390 | if (unlikely(vma->vm_flags & VM_PFNMAP)) { |
392 | unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; | 391 | unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; |
393 | if (pfn == vma->vm_pgoff + off) | 392 | if (pfn == vma->vm_pgoff + off) |
394 | return NULL; | 393 | return NULL; |
@@ -396,18 +395,12 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_ | |||
396 | return NULL; | 395 | return NULL; |
397 | } | 396 | } |
398 | 397 | ||
399 | /* | 398 | #ifdef CONFIG_DEBUG_VM |
400 | * Add some anal sanity checks for now. Eventually, | ||
401 | * we should just do "return pfn_to_page(pfn)", but | ||
402 | * in the meantime we check that we get a valid pfn, | ||
403 | * and that the resulting page looks ok. | ||
404 | * | ||
405 | * Remove this test eventually! | ||
406 | */ | ||
407 | if (unlikely(!pfn_valid(pfn))) { | 399 | if (unlikely(!pfn_valid(pfn))) { |
408 | print_bad_pte(vma, pte, addr); | 400 | print_bad_pte(vma, pte, addr); |
409 | return NULL; | 401 | return NULL; |
410 | } | 402 | } |
403 | #endif | ||
411 | 404 | ||
412 | /* | 405 | /* |
413 | * NOTE! We still have PageReserved() pages in the page | 406 | * NOTE! We still have PageReserved() pages in the page |
@@ -1221,9 +1214,7 @@ out: | |||
1221 | * The page has to be a nice clean _individual_ kernel allocation. | 1214 | * The page has to be a nice clean _individual_ kernel allocation. |
1222 | * If you allocate a compound page, you need to have marked it as | 1215 | * If you allocate a compound page, you need to have marked it as |
1223 | * such (__GFP_COMP), or manually just split the page up yourself | 1216 | * such (__GFP_COMP), or manually just split the page up yourself |
1224 | * (which is mainly an issue of doing "set_page_count(page, 1)" for | 1217 | * (see split_page()). |
1225 | * each sub-page, and then freeing them one by one when you free | ||
1226 | * them rather than freeing it as a compound page). | ||
1227 | * | 1218 | * |
1228 | * NOTE! Traditionally this was done with "remap_pfn_range()" which | 1219 | * NOTE! Traditionally this was done with "remap_pfn_range()" which |
1229 | * took an arbitrary page protection parameter. This doesn't allow | 1220 | * took an arbitrary page protection parameter. This doesn't allow |