summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2018-04-10 19:28:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-11 13:28:30 -0400
commitff05c0c6bbe5043af6a1686522ed845f40ba49ee (patch)
tree9799943dde9991379fc2569182f5ba5cf7accad7 /mm
parent86586a41b8fe655e28be418a40e9bb2bb478cdd5 (diff)
mm/hmm: use uint64_t for HMM pfn instead of defining hmm_pfn_t to ulong
All device driver we care about are using 64bits page table entry. In order to match this and to avoid useless define convert all HMM pfn to directly use uint64_t. It is a first step on the road to allow driver to directly use pfn value return by HMM (saving memory and CPU cycles use for conversion between the two). Link: http://lkml.kernel.org/r/20180323005527.758-9-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Cc: Evgeny Baskakov <ebaskakov@nvidia.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Mark Hairgrove <mhairgrove@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hmm.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 5da0f852a7aa..b69f30fc064b 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -304,7 +304,7 @@ struct hmm_vma_walk {
304 304
305static int hmm_vma_do_fault(struct mm_walk *walk, 305static int hmm_vma_do_fault(struct mm_walk *walk,
306 unsigned long addr, 306 unsigned long addr,
307 hmm_pfn_t *pfn) 307 uint64_t *pfn)
308{ 308{
309 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; 309 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
310 struct hmm_vma_walk *hmm_vma_walk = walk->private; 310 struct hmm_vma_walk *hmm_vma_walk = walk->private;
@@ -324,7 +324,7 @@ static int hmm_vma_do_fault(struct mm_walk *walk,
324 return -EAGAIN; 324 return -EAGAIN;
325} 325}
326 326
327static void hmm_pfns_special(hmm_pfn_t *pfns, 327static void hmm_pfns_special(uint64_t *pfns,
328 unsigned long addr, 328 unsigned long addr,
329 unsigned long end) 329 unsigned long end)
330{ 330{
@@ -338,7 +338,7 @@ static int hmm_pfns_bad(unsigned long addr,
338{ 338{
339 struct hmm_vma_walk *hmm_vma_walk = walk->private; 339 struct hmm_vma_walk *hmm_vma_walk = walk->private;
340 struct hmm_range *range = hmm_vma_walk->range; 340 struct hmm_range *range = hmm_vma_walk->range;
341 hmm_pfn_t *pfns = range->pfns; 341 uint64_t *pfns = range->pfns;
342 unsigned long i; 342 unsigned long i;
343 343
344 i = (addr - range->start) >> PAGE_SHIFT; 344 i = (addr - range->start) >> PAGE_SHIFT;
@@ -348,7 +348,7 @@ static int hmm_pfns_bad(unsigned long addr,
348 return 0; 348 return 0;
349} 349}
350 350
351static void hmm_pfns_clear(hmm_pfn_t *pfns, 351static void hmm_pfns_clear(uint64_t *pfns,
352 unsigned long addr, 352 unsigned long addr,
353 unsigned long end) 353 unsigned long end)
354{ 354{
@@ -362,7 +362,7 @@ static int hmm_vma_walk_hole(unsigned long addr,
362{ 362{
363 struct hmm_vma_walk *hmm_vma_walk = walk->private; 363 struct hmm_vma_walk *hmm_vma_walk = walk->private;
364 struct hmm_range *range = hmm_vma_walk->range; 364 struct hmm_range *range = hmm_vma_walk->range;
365 hmm_pfn_t *pfns = range->pfns; 365 uint64_t *pfns = range->pfns;
366 unsigned long i; 366 unsigned long i;
367 367
368 hmm_vma_walk->last = addr; 368 hmm_vma_walk->last = addr;
@@ -387,7 +387,7 @@ static int hmm_vma_walk_clear(unsigned long addr,
387{ 387{
388 struct hmm_vma_walk *hmm_vma_walk = walk->private; 388 struct hmm_vma_walk *hmm_vma_walk = walk->private;
389 struct hmm_range *range = hmm_vma_walk->range; 389 struct hmm_range *range = hmm_vma_walk->range;
390 hmm_pfn_t *pfns = range->pfns; 390 uint64_t *pfns = range->pfns;
391 unsigned long i; 391 unsigned long i;
392 392
393 hmm_vma_walk->last = addr; 393 hmm_vma_walk->last = addr;
@@ -414,7 +414,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
414 struct hmm_vma_walk *hmm_vma_walk = walk->private; 414 struct hmm_vma_walk *hmm_vma_walk = walk->private;
415 struct hmm_range *range = hmm_vma_walk->range; 415 struct hmm_range *range = hmm_vma_walk->range;
416 struct vm_area_struct *vma = walk->vma; 416 struct vm_area_struct *vma = walk->vma;
417 hmm_pfn_t *pfns = range->pfns; 417 uint64_t *pfns = range->pfns;
418 unsigned long addr = start, i; 418 unsigned long addr = start, i;
419 bool write_fault; 419 bool write_fault;
420 pte_t *ptep; 420 pte_t *ptep;
@@ -431,7 +431,7 @@ again:
431 431
432 if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) { 432 if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) {
433 unsigned long pfn; 433 unsigned long pfn;
434 hmm_pfn_t flag = 0; 434 uint64_t flag = 0;
435 pmd_t pmd; 435 pmd_t pmd;
436 436
437 /* 437 /*
@@ -456,7 +456,7 @@ again:
456 pfn = pmd_pfn(pmd) + pte_index(addr); 456 pfn = pmd_pfn(pmd) + pte_index(addr);
457 flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0; 457 flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
458 for (; addr < end; addr += PAGE_SIZE, i++, pfn++) 458 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
459 pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; 459 pfns[i] = hmm_pfn_from_pfn(pfn) | flag;
460 return 0; 460 return 0;
461 } 461 }
462 462
@@ -490,7 +490,7 @@ again:
490 * device and report anything else as error. 490 * device and report anything else as error.
491 */ 491 */
492 if (is_device_private_entry(entry)) { 492 if (is_device_private_entry(entry)) {
493 pfns[i] = hmm_pfn_t_from_pfn(swp_offset(entry)); 493 pfns[i] = hmm_pfn_from_pfn(swp_offset(entry));
494 if (is_write_device_private_entry(entry)) { 494 if (is_write_device_private_entry(entry)) {
495 pfns[i] |= HMM_PFN_WRITE; 495 pfns[i] |= HMM_PFN_WRITE;
496 } else if (write_fault) 496 } else if (write_fault)
@@ -515,7 +515,7 @@ again:
515 if (write_fault && !pte_write(pte)) 515 if (write_fault && !pte_write(pte))
516 goto fault; 516 goto fault;
517 517
518 pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)); 518 pfns[i] = hmm_pfn_from_pfn(pte_pfn(pte));
519 pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0; 519 pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
520 continue; 520 continue;
521 521
@@ -678,8 +678,8 @@ EXPORT_SYMBOL(hmm_vma_range_done);
678 * This is similar to a regular CPU page fault except that it will not trigger 678 * This is similar to a regular CPU page fault except that it will not trigger
679 * any memory migration if the memory being faulted is not accessible by CPUs. 679 * any memory migration if the memory being faulted is not accessible by CPUs.
680 * 680 *
681 * On error, for one virtual address in the range, the function will set the 681 * On error, for one virtual address in the range, the function will mark the
682 * hmm_pfn_t error flag for the corresponding pfn entry. 682 * corresponding HMM pfn entry with an error flag.
683 * 683 *
684 * Expected use pattern: 684 * Expected use pattern:
685 * retry: 685 * retry: