diff options
author | Jérôme Glisse <jglisse@redhat.com> | 2018-04-10 19:28:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-11 13:28:30 -0400 |
commit | 5504ed29692faad06ea74c4275e96a8ffc83a1e1 (patch) | |
tree | fc5b15aa9076aeedd248c339b442ca7ad5c372e7 /mm | |
parent | 855ce7d2525c97cf706ad82a419f0c2d632b9481 (diff) |
mm/hmm: do not differentiate between empty entry or missing directory
There is no point in differentiating between a range for which there is
not even a directory (and thus entries) and empty entry (pte_none() or
pmd_none() returns true).
Simply drop the distinction ie remove HMM_PFN_EMPTY flag and merge now
duplicate hmm_vma_walk_hole() and hmm_vma_walk_clear() functions.
Link: http://lkml.kernel.org/r/20180323005527.758-11-jglisse@redhat.com
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Cc: Evgeny Baskakov <ebaskakov@nvidia.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mark Hairgrove <mhairgrove@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hmm.c | 45 |
1 files changed, 15 insertions, 30 deletions
@@ -348,6 +348,16 @@ static void hmm_pfns_clear(uint64_t *pfns, | |||
348 | *pfns = 0; | 348 | *pfns = 0; |
349 | } | 349 | } |
350 | 350 | ||
351 | /* | ||
352 | * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) | ||
353 | * @start: range virtual start address (inclusive) | ||
354 | * @end: range virtual end address (exclusive) | ||
355 | * @walk: mm_walk structure | ||
356 | * Returns: 0 on success, -EAGAIN after page fault, or page fault error | ||
357 | * | ||
358 | * This function will be called whenever pmd_none() or pte_none() returns true, | ||
359 | * or whenever there is no page directory covering the virtual address range. | ||
360 | */ | ||
351 | static int hmm_vma_walk_hole(unsigned long addr, | 361 | static int hmm_vma_walk_hole(unsigned long addr, |
352 | unsigned long end, | 362 | unsigned long end, |
353 | struct mm_walk *walk) | 363 | struct mm_walk *walk) |
@@ -360,31 +370,6 @@ static int hmm_vma_walk_hole(unsigned long addr, | |||
360 | hmm_vma_walk->last = addr; | 370 | hmm_vma_walk->last = addr; |
361 | i = (addr - range->start) >> PAGE_SHIFT; | 371 | i = (addr - range->start) >> PAGE_SHIFT; |
362 | for (; addr < end; addr += PAGE_SIZE, i++) { | 372 | for (; addr < end; addr += PAGE_SIZE, i++) { |
363 | pfns[i] = HMM_PFN_EMPTY; | ||
364 | if (hmm_vma_walk->fault) { | ||
365 | int ret; | ||
366 | |||
367 | ret = hmm_vma_do_fault(walk, addr, &pfns[i]); | ||
368 | if (ret != -EAGAIN) | ||
369 | return ret; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | return hmm_vma_walk->fault ? -EAGAIN : 0; | ||
374 | } | ||
375 | |||
376 | static int hmm_vma_walk_clear(unsigned long addr, | ||
377 | unsigned long end, | ||
378 | struct mm_walk *walk) | ||
379 | { | ||
380 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | ||
381 | struct hmm_range *range = hmm_vma_walk->range; | ||
382 | uint64_t *pfns = range->pfns; | ||
383 | unsigned long i; | ||
384 | |||
385 | hmm_vma_walk->last = addr; | ||
386 | i = (addr - range->start) >> PAGE_SHIFT; | ||
387 | for (; addr < end; addr += PAGE_SIZE, i++) { | ||
388 | pfns[i] = 0; | 373 | pfns[i] = 0; |
389 | if (hmm_vma_walk->fault) { | 374 | if (hmm_vma_walk->fault) { |
390 | int ret; | 375 | int ret; |
@@ -440,10 +425,10 @@ again: | |||
440 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) | 425 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) |
441 | goto again; | 426 | goto again; |
442 | if (pmd_protnone(pmd)) | 427 | if (pmd_protnone(pmd)) |
443 | return hmm_vma_walk_clear(start, end, walk); | 428 | return hmm_vma_walk_hole(start, end, walk); |
444 | 429 | ||
445 | if (write_fault && !pmd_write(pmd)) | 430 | if (write_fault && !pmd_write(pmd)) |
446 | return hmm_vma_walk_clear(start, end, walk); | 431 | return hmm_vma_walk_hole(start, end, walk); |
447 | 432 | ||
448 | pfn = pmd_pfn(pmd) + pte_index(addr); | 433 | pfn = pmd_pfn(pmd) + pte_index(addr); |
449 | flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0; | 434 | flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0; |
@@ -462,7 +447,7 @@ again: | |||
462 | pfns[i] = 0; | 447 | pfns[i] = 0; |
463 | 448 | ||
464 | if (pte_none(pte)) { | 449 | if (pte_none(pte)) { |
465 | pfns[i] = HMM_PFN_EMPTY; | 450 | pfns[i] = 0; |
466 | if (hmm_vma_walk->fault) | 451 | if (hmm_vma_walk->fault) |
467 | goto fault; | 452 | goto fault; |
468 | continue; | 453 | continue; |
@@ -513,8 +498,8 @@ again: | |||
513 | 498 | ||
514 | fault: | 499 | fault: |
515 | pte_unmap(ptep); | 500 | pte_unmap(ptep); |
516 | /* Fault all pages in range */ | 501 | /* Fault any virtual address we were asked to fault */ |
517 | return hmm_vma_walk_clear(start, end, walk); | 502 | return hmm_vma_walk_hole(start, end, walk); |
518 | } | 503 | } |
519 | pte_unmap(ptep - 1); | 504 | pte_unmap(ptep - 1); |
520 | 505 | ||