diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 42 |
1 files changed, 13 insertions, 29 deletions
@@ -226,8 +226,6 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
226 | /* | 226 | /* |
227 | * At what user virtual address is page expected in vma? checking that the | 227 | * At what user virtual address is page expected in vma? checking that the |
228 | * page matches the vma: currently only used on anon pages, by unuse_vma; | 228 | * page matches the vma: currently only used on anon pages, by unuse_vma; |
229 | * and by extraordinary checks on anon pages in VM_UNPAGED vmas, taking | ||
230 | * care that an mmap of /dev/mem might window free and foreign pages. | ||
231 | */ | 229 | */ |
232 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 230 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
233 | { | 231 | { |
@@ -292,7 +290,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, | |||
292 | * repeatedly from either page_referenced_anon or page_referenced_file. | 290 | * repeatedly from either page_referenced_anon or page_referenced_file. |
293 | */ | 291 | */ |
294 | static int page_referenced_one(struct page *page, | 292 | static int page_referenced_one(struct page *page, |
295 | struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token) | 293 | struct vm_area_struct *vma, unsigned int *mapcount) |
296 | { | 294 | { |
297 | struct mm_struct *mm = vma->vm_mm; | 295 | struct mm_struct *mm = vma->vm_mm; |
298 | unsigned long address; | 296 | unsigned long address; |
@@ -313,7 +311,7 @@ static int page_referenced_one(struct page *page, | |||
313 | 311 | ||
314 | /* Pretend the page is referenced if the task has the | 312 | /* Pretend the page is referenced if the task has the |
315 | swap token and is in the middle of a page fault. */ | 313 | swap token and is in the middle of a page fault. */ |
316 | if (mm != current->mm && !ignore_token && has_swap_token(mm) && | 314 | if (mm != current->mm && has_swap_token(mm) && |
317 | rwsem_is_locked(&mm->mmap_sem)) | 315 | rwsem_is_locked(&mm->mmap_sem)) |
318 | referenced++; | 316 | referenced++; |
319 | 317 | ||
@@ -323,7 +321,7 @@ out: | |||
323 | return referenced; | 321 | return referenced; |
324 | } | 322 | } |
325 | 323 | ||
326 | static int page_referenced_anon(struct page *page, int ignore_token) | 324 | static int page_referenced_anon(struct page *page) |
327 | { | 325 | { |
328 | unsigned int mapcount; | 326 | unsigned int mapcount; |
329 | struct anon_vma *anon_vma; | 327 | struct anon_vma *anon_vma; |
@@ -336,8 +334,7 @@ static int page_referenced_anon(struct page *page, int ignore_token) | |||
336 | 334 | ||
337 | mapcount = page_mapcount(page); | 335 | mapcount = page_mapcount(page); |
338 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 336 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
339 | referenced += page_referenced_one(page, vma, &mapcount, | 337 | referenced += page_referenced_one(page, vma, &mapcount); |
340 | ignore_token); | ||
341 | if (!mapcount) | 338 | if (!mapcount) |
342 | break; | 339 | break; |
343 | } | 340 | } |
@@ -356,7 +353,7 @@ static int page_referenced_anon(struct page *page, int ignore_token) | |||
356 | * | 353 | * |
357 | * This function is only called from page_referenced for object-based pages. | 354 | * This function is only called from page_referenced for object-based pages. |
358 | */ | 355 | */ |
359 | static int page_referenced_file(struct page *page, int ignore_token) | 356 | static int page_referenced_file(struct page *page) |
360 | { | 357 | { |
361 | unsigned int mapcount; | 358 | unsigned int mapcount; |
362 | struct address_space *mapping = page->mapping; | 359 | struct address_space *mapping = page->mapping; |
@@ -394,8 +391,7 @@ static int page_referenced_file(struct page *page, int ignore_token) | |||
394 | referenced++; | 391 | referenced++; |
395 | break; | 392 | break; |
396 | } | 393 | } |
397 | referenced += page_referenced_one(page, vma, &mapcount, | 394 | referenced += page_referenced_one(page, vma, &mapcount); |
398 | ignore_token); | ||
399 | if (!mapcount) | 395 | if (!mapcount) |
400 | break; | 396 | break; |
401 | } | 397 | } |
@@ -412,13 +408,10 @@ static int page_referenced_file(struct page *page, int ignore_token) | |||
412 | * Quick test_and_clear_referenced for all mappings to a page, | 408 | * Quick test_and_clear_referenced for all mappings to a page, |
413 | * returns the number of ptes which referenced the page. | 409 | * returns the number of ptes which referenced the page. |
414 | */ | 410 | */ |
415 | int page_referenced(struct page *page, int is_locked, int ignore_token) | 411 | int page_referenced(struct page *page, int is_locked) |
416 | { | 412 | { |
417 | int referenced = 0; | 413 | int referenced = 0; |
418 | 414 | ||
419 | if (!swap_token_default_timeout) | ||
420 | ignore_token = 1; | ||
421 | |||
422 | if (page_test_and_clear_young(page)) | 415 | if (page_test_and_clear_young(page)) |
423 | referenced++; | 416 | referenced++; |
424 | 417 | ||
@@ -427,15 +420,14 @@ int page_referenced(struct page *page, int is_locked, int ignore_token) | |||
427 | 420 | ||
428 | if (page_mapped(page) && page->mapping) { | 421 | if (page_mapped(page) && page->mapping) { |
429 | if (PageAnon(page)) | 422 | if (PageAnon(page)) |
430 | referenced += page_referenced_anon(page, ignore_token); | 423 | referenced += page_referenced_anon(page); |
431 | else if (is_locked) | 424 | else if (is_locked) |
432 | referenced += page_referenced_file(page, ignore_token); | 425 | referenced += page_referenced_file(page); |
433 | else if (TestSetPageLocked(page)) | 426 | else if (TestSetPageLocked(page)) |
434 | referenced++; | 427 | referenced++; |
435 | else { | 428 | else { |
436 | if (page->mapping) | 429 | if (page->mapping) |
437 | referenced += page_referenced_file(page, | 430 | referenced += page_referenced_file(page); |
438 | ignore_token); | ||
439 | unlock_page(page); | 431 | unlock_page(page); |
440 | } | 432 | } |
441 | } | 433 | } |
@@ -614,7 +606,6 @@ static void try_to_unmap_cluster(unsigned long cursor, | |||
614 | struct page *page; | 606 | struct page *page; |
615 | unsigned long address; | 607 | unsigned long address; |
616 | unsigned long end; | 608 | unsigned long end; |
617 | unsigned long pfn; | ||
618 | 609 | ||
619 | address = (vma->vm_start + cursor) & CLUSTER_MASK; | 610 | address = (vma->vm_start + cursor) & CLUSTER_MASK; |
620 | end = address + CLUSTER_SIZE; | 611 | end = address + CLUSTER_SIZE; |
@@ -643,21 +634,14 @@ static void try_to_unmap_cluster(unsigned long cursor, | |||
643 | for (; address < end; pte++, address += PAGE_SIZE) { | 634 | for (; address < end; pte++, address += PAGE_SIZE) { |
644 | if (!pte_present(*pte)) | 635 | if (!pte_present(*pte)) |
645 | continue; | 636 | continue; |
646 | 637 | page = vm_normal_page(vma, address, *pte); | |
647 | pfn = pte_pfn(*pte); | 638 | BUG_ON(!page || PageAnon(page)); |
648 | if (unlikely(!pfn_valid(pfn))) { | ||
649 | print_bad_pte(vma, *pte, address); | ||
650 | continue; | ||
651 | } | ||
652 | |||
653 | page = pfn_to_page(pfn); | ||
654 | BUG_ON(PageAnon(page)); | ||
655 | 639 | ||
656 | if (ptep_clear_flush_young(vma, address, pte)) | 640 | if (ptep_clear_flush_young(vma, address, pte)) |
657 | continue; | 641 | continue; |
658 | 642 | ||
659 | /* Nuke the page table entry. */ | 643 | /* Nuke the page table entry. */ |
660 | flush_cache_page(vma, address, pfn); | 644 | flush_cache_page(vma, address, pte_pfn(*pte)); |
661 | pteval = ptep_clear_flush(vma, address, pte); | 645 | pteval = ptep_clear_flush(vma, address, pte); |
662 | 646 | ||
663 | /* If nonlinear, store the file page offset in the pte. */ | 647 | /* If nonlinear, store the file page offset in the pte. */ |