aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2014-04-29 03:34:41 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-08-25 08:35:58 -0400
commit6e0a0431bf7d90ed0b8a0a974ad219617a70cc22 (patch)
tree964d7a595bba5c8cd84ae6a0d363fe9edaabc14e
parent9da4e3807657f3bcd12cfbb5671d80794303dde2 (diff)
KVM: s390/mm: cleanup gmap function arguments, variable names
Make the order of arguments for the gmap calls more consistent, if the gmap pointer is passed it is always the first argument. In addition distinguish between guest address and user address by naming the variables gaddr for a guest address and vmaddr for a user address. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/include/asm/pgtable.h14
-rw-r--r--arch/s390/kvm/diag.c8
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/pgtable.c110
7 files changed, 72 insertions, 70 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 4cd91ac42f46..d95012f9e77f 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -834,7 +834,7 @@ struct gmap_pgtable {
834 */ 834 */
835struct gmap_notifier { 835struct gmap_notifier {
836 struct list_head list; 836 struct list_head list;
837 void (*notifier_call)(struct gmap *gmap, unsigned long address); 837 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
838}; 838};
839 839
840struct gmap *gmap_alloc(struct mm_struct *mm); 840struct gmap *gmap_alloc(struct mm_struct *mm);
@@ -844,12 +844,12 @@ void gmap_disable(struct gmap *gmap);
844int gmap_map_segment(struct gmap *gmap, unsigned long from, 844int gmap_map_segment(struct gmap *gmap, unsigned long from,
845 unsigned long to, unsigned long len); 845 unsigned long to, unsigned long len);
846int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 846int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
847unsigned long __gmap_translate(unsigned long address, struct gmap *); 847unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
848unsigned long gmap_translate(unsigned long address, struct gmap *); 848unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
849unsigned long __gmap_fault(unsigned long address, struct gmap *); 849unsigned long __gmap_fault(struct gmap *, unsigned long gaddr);
850unsigned long gmap_fault(unsigned long address, struct gmap *); 850unsigned long gmap_fault(struct gmap *, unsigned long gaddr);
851void gmap_discard(unsigned long from, unsigned long to, struct gmap *); 851void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
852void __gmap_zap(unsigned long address, struct gmap *); 852void __gmap_zap(struct gmap *, unsigned long gaddr);
853bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); 853bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
854 854
855 855
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 59bd8f991b98..b374b6cb7785 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -37,13 +37,13 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
37 37
38 /* we checked for start > end above */ 38 /* we checked for start > end above */
39 if (end < prefix || start >= prefix + 2 * PAGE_SIZE) { 39 if (end < prefix || start >= prefix + 2 * PAGE_SIZE) {
40 gmap_discard(start, end, vcpu->arch.gmap); 40 gmap_discard(vcpu->arch.gmap, start, end);
41 } else { 41 } else {
42 if (start < prefix) 42 if (start < prefix)
43 gmap_discard(start, prefix, vcpu->arch.gmap); 43 gmap_discard(vcpu->arch.gmap, start, prefix);
44 if (end >= prefix) 44 if (end >= prefix)
45 gmap_discard(prefix + 2 * PAGE_SIZE, 45 gmap_discard(vcpu->arch.gmap,
46 end, vcpu->arch.gmap); 46 prefix + 2 * PAGE_SIZE, end);
47 } 47 }
48 return 0; 48 return 0;
49} 49}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index ba89bbbd2ed5..60a5cf40d49a 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1241,7 +1241,7 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1241 } 1241 }
1242 INIT_LIST_HEAD(&map->list); 1242 INIT_LIST_HEAD(&map->list);
1243 map->guest_addr = addr; 1243 map->guest_addr = addr;
1244 map->addr = gmap_translate(addr, kvm->arch.gmap); 1244 map->addr = gmap_translate(kvm->arch.gmap, addr);
1245 if (map->addr == -EFAULT) { 1245 if (map->addr == -EFAULT) {
1246 ret = -EFAULT; 1246 ret = -EFAULT;
1247 goto out; 1247 goto out;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c2caa175320c..5c877c8e4494 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1096,7 +1096,7 @@ long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1096 hva_t hva; 1096 hva_t hva;
1097 long rc; 1097 long rc;
1098 1098
1099 hva = gmap_fault(gpa, vcpu->arch.gmap); 1099 hva = gmap_fault(vcpu->arch.gmap, gpa);
1100 if (IS_ERR_VALUE(hva)) 1100 if (IS_ERR_VALUE(hva))
1101 return (long)hva; 1101 return (long)hva;
1102 down_read(&mm->mmap_sem); 1102 down_read(&mm->mmap_sem);
@@ -1683,7 +1683,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1683 } 1683 }
1684#endif 1684#endif
1685 case KVM_S390_VCPU_FAULT: { 1685 case KVM_S390_VCPU_FAULT: {
1686 r = gmap_fault(arg, vcpu->arch.gmap); 1686 r = gmap_fault(vcpu->arch.gmap, arg);
1687 if (!IS_ERR_VALUE(r)) 1687 if (!IS_ERR_VALUE(r))
1688 r = 0; 1688 r = 0;
1689 break; 1689 break;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d806f2cfde16..72bb2dd8b9cd 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -729,7 +729,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
729 /* invalid entry */ 729 /* invalid entry */
730 break; 730 break;
731 /* try to free backing */ 731 /* try to free backing */
732 __gmap_zap(cbrle, gmap); 732 __gmap_zap(gmap, cbrle);
733 } 733 }
734 up_read(&gmap->mm->mmap_sem); 734 up_read(&gmap->mm->mmap_sem);
735 if (i < entries) 735 if (i < entries)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3f3b35403d0a..4880399d040e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -445,7 +445,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
445 gmap = (struct gmap *) 445 gmap = (struct gmap *)
446 ((current->flags & PF_VCPU) ? S390_lowcore.gmap : 0); 446 ((current->flags & PF_VCPU) ? S390_lowcore.gmap : 0);
447 if (gmap) { 447 if (gmap) {
448 address = __gmap_fault(address, gmap); 448 address = __gmap_fault(gmap, address);
449 if (address == -EFAULT) { 449 if (address == -EFAULT) {
450 fault = VM_FAULT_BADMAP; 450 fault = VM_FAULT_BADMAP;
451 goto out_up; 451 goto out_up;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index c09820dce81c..16ca8617f2e1 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -295,7 +295,7 @@ static int gmap_alloc_table(struct gmap *gmap,
295/** 295/**
296 * gmap_unmap_segment - unmap segment from the guest address space 296 * gmap_unmap_segment - unmap segment from the guest address space
297 * @gmap: pointer to the guest address space structure 297 * @gmap: pointer to the guest address space structure
298 * @addr: address in the guest address space 298 * @to: address in the guest address space
299 * @len: length of the memory area to unmap 299 * @len: length of the memory area to unmap
300 * 300 *
301 * Returns 0 if the unmap succeeded, -EINVAL if not. 301 * Returns 0 if the unmap succeeded, -EINVAL if not.
@@ -348,6 +348,7 @@ EXPORT_SYMBOL_GPL(gmap_unmap_segment);
348 * @gmap: pointer to the guest address space structure 348 * @gmap: pointer to the guest address space structure
349 * @from: source address in the parent address space 349 * @from: source address in the parent address space
350 * @to: target address in the guest address space 350 * @to: target address in the guest address space
351 * @len: length of the memory area to map
351 * 352 *
352 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not. 353 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
353 */ 354 */
@@ -405,30 +406,30 @@ out_unmap:
405} 406}
406EXPORT_SYMBOL_GPL(gmap_map_segment); 407EXPORT_SYMBOL_GPL(gmap_map_segment);
407 408
408static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap) 409static unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr)
409{ 410{
410 unsigned long *table; 411 unsigned long *table;
411 412
412 table = gmap->table + ((address >> 53) & 0x7ff); 413 table = gmap->table + ((gaddr >> 53) & 0x7ff);
413 if (unlikely(*table & _REGION_ENTRY_INVALID)) 414 if (unlikely(*table & _REGION_ENTRY_INVALID))
414 return ERR_PTR(-EFAULT); 415 return ERR_PTR(-EFAULT);
415 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 416 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
416 table = table + ((address >> 42) & 0x7ff); 417 table = table + ((gaddr >> 42) & 0x7ff);
417 if (unlikely(*table & _REGION_ENTRY_INVALID)) 418 if (unlikely(*table & _REGION_ENTRY_INVALID))
418 return ERR_PTR(-EFAULT); 419 return ERR_PTR(-EFAULT);
419 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 420 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
420 table = table + ((address >> 31) & 0x7ff); 421 table = table + ((gaddr >> 31) & 0x7ff);
421 if (unlikely(*table & _REGION_ENTRY_INVALID)) 422 if (unlikely(*table & _REGION_ENTRY_INVALID))
422 return ERR_PTR(-EFAULT); 423 return ERR_PTR(-EFAULT);
423 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 424 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
424 table = table + ((address >> 20) & 0x7ff); 425 table = table + ((gaddr >> 20) & 0x7ff);
425 return table; 426 return table;
426} 427}
427 428
428/** 429/**
429 * __gmap_translate - translate a guest address to a user space address 430 * __gmap_translate - translate a guest address to a user space address
430 * @address: guest address
431 * @gmap: pointer to guest mapping meta data structure 431 * @gmap: pointer to guest mapping meta data structure
432 * @gaddr: guest address
432 * 433 *
433 * Returns user space address which corresponds to the guest address or 434 * Returns user space address which corresponds to the guest address or
434 * -EFAULT if no such mapping exists. 435 * -EFAULT if no such mapping exists.
@@ -436,14 +437,14 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
436 * The mmap_sem of the mm that belongs to the address space must be held 437 * The mmap_sem of the mm that belongs to the address space must be held
437 * when this function gets called. 438 * when this function gets called.
438 */ 439 */
439unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) 440unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
440{ 441{
441 unsigned long *segment_ptr, vmaddr, segment; 442 unsigned long *segment_ptr, vmaddr, segment;
442 struct gmap_pgtable *mp; 443 struct gmap_pgtable *mp;
443 struct page *page; 444 struct page *page;
444 445
445 current->thread.gmap_addr = address; 446 current->thread.gmap_addr = gaddr;
446 segment_ptr = gmap_table_walk(address, gmap); 447 segment_ptr = gmap_table_walk(gmap, gaddr);
447 if (IS_ERR(segment_ptr)) 448 if (IS_ERR(segment_ptr))
448 return PTR_ERR(segment_ptr); 449 return PTR_ERR(segment_ptr);
449 /* Convert the gmap address to an mm address. */ 450 /* Convert the gmap address to an mm address. */
@@ -451,10 +452,10 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
451 if (!(segment & _SEGMENT_ENTRY_INVALID)) { 452 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
452 page = pfn_to_page(segment >> PAGE_SHIFT); 453 page = pfn_to_page(segment >> PAGE_SHIFT);
453 mp = (struct gmap_pgtable *) page->index; 454 mp = (struct gmap_pgtable *) page->index;
454 return mp->vmaddr | (address & ~PMD_MASK); 455 return mp->vmaddr | (gaddr & ~PMD_MASK);
455 } else if (segment & _SEGMENT_ENTRY_PROTECT) { 456 } else if (segment & _SEGMENT_ENTRY_PROTECT) {
456 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; 457 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
457 return vmaddr | (address & ~PMD_MASK); 458 return vmaddr | (gaddr & ~PMD_MASK);
458 } 459 }
459 return -EFAULT; 460 return -EFAULT;
460} 461}
@@ -462,26 +463,27 @@ EXPORT_SYMBOL_GPL(__gmap_translate);
462 463
463/** 464/**
464 * gmap_translate - translate a guest address to a user space address 465 * gmap_translate - translate a guest address to a user space address
465 * @address: guest address
466 * @gmap: pointer to guest mapping meta data structure 466 * @gmap: pointer to guest mapping meta data structure
467 * @gaddr: guest address
467 * 468 *
468 * Returns user space address which corresponds to the guest address or 469 * Returns user space address which corresponds to the guest address or
469 * -EFAULT if no such mapping exists. 470 * -EFAULT if no such mapping exists.
470 * This function does not establish potentially missing page table entries. 471 * This function does not establish potentially missing page table entries.
471 */ 472 */
472unsigned long gmap_translate(unsigned long address, struct gmap *gmap) 473unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
473{ 474{
474 unsigned long rc; 475 unsigned long rc;
475 476
476 down_read(&gmap->mm->mmap_sem); 477 down_read(&gmap->mm->mmap_sem);
477 rc = __gmap_translate(address, gmap); 478 rc = __gmap_translate(gmap, gaddr);
478 up_read(&gmap->mm->mmap_sem); 479 up_read(&gmap->mm->mmap_sem);
479 return rc; 480 return rc;
480} 481}
481EXPORT_SYMBOL_GPL(gmap_translate); 482EXPORT_SYMBOL_GPL(gmap_translate);
482 483
483static int gmap_connect_pgtable(unsigned long address, unsigned long segment, 484static int gmap_connect_pgtable(struct gmap *gmap, unsigned long gaddr,
484 unsigned long *segment_ptr, struct gmap *gmap) 485 unsigned long segment,
486 unsigned long *segment_ptr)
485{ 487{
486 unsigned long vmaddr; 488 unsigned long vmaddr;
487 struct vm_area_struct *vma; 489 struct vm_area_struct *vma;
@@ -521,7 +523,7 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
521 mp = (struct gmap_pgtable *) page->index; 523 mp = (struct gmap_pgtable *) page->index;
522 rmap->gmap = gmap; 524 rmap->gmap = gmap;
523 rmap->entry = segment_ptr; 525 rmap->entry = segment_ptr;
524 rmap->vmaddr = address & PMD_MASK; 526 rmap->vmaddr = gaddr & PMD_MASK;
525 spin_lock(&mm->page_table_lock); 527 spin_lock(&mm->page_table_lock);
526 if (*segment_ptr == segment) { 528 if (*segment_ptr == segment) {
527 list_add(&rmap->list, &mp->mapper); 529 list_add(&rmap->list, &mp->mapper);
@@ -560,15 +562,15 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
560/* 562/*
561 * this function is assumed to be called with mmap_sem held 563 * this function is assumed to be called with mmap_sem held
562 */ 564 */
563unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) 565unsigned long __gmap_fault(struct gmap *gmap, unsigned long gaddr)
564{ 566{
565 unsigned long *segment_ptr, segment; 567 unsigned long *segment_ptr, segment;
566 struct gmap_pgtable *mp; 568 struct gmap_pgtable *mp;
567 struct page *page; 569 struct page *page;
568 int rc; 570 int rc;
569 571
570 current->thread.gmap_addr = address; 572 current->thread.gmap_addr = gaddr;
571 segment_ptr = gmap_table_walk(address, gmap); 573 segment_ptr = gmap_table_walk(gmap, gaddr);
572 if (IS_ERR(segment_ptr)) 574 if (IS_ERR(segment_ptr))
573 return -EFAULT; 575 return -EFAULT;
574 /* Convert the gmap address to an mm address. */ 576 /* Convert the gmap address to an mm address. */
@@ -578,24 +580,24 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
578 /* Page table is present */ 580 /* Page table is present */
579 page = pfn_to_page(segment >> PAGE_SHIFT); 581 page = pfn_to_page(segment >> PAGE_SHIFT);
580 mp = (struct gmap_pgtable *) page->index; 582 mp = (struct gmap_pgtable *) page->index;
581 return mp->vmaddr | (address & ~PMD_MASK); 583 return mp->vmaddr | (gaddr & ~PMD_MASK);
582 } 584 }
583 if (!(segment & _SEGMENT_ENTRY_PROTECT)) 585 if (!(segment & _SEGMENT_ENTRY_PROTECT))
584 /* Nothing mapped in the gmap address space. */ 586 /* Nothing mapped in the gmap address space. */
585 break; 587 break;
586 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); 588 rc = gmap_connect_pgtable(gmap, gaddr, segment, segment_ptr);
587 if (rc) 589 if (rc)
588 return rc; 590 return rc;
589 } 591 }
590 return -EFAULT; 592 return -EFAULT;
591} 593}
592 594
593unsigned long gmap_fault(unsigned long address, struct gmap *gmap) 595unsigned long gmap_fault(struct gmap *gmap, unsigned long gaddr)
594{ 596{
595 unsigned long rc; 597 unsigned long rc;
596 598
597 down_read(&gmap->mm->mmap_sem); 599 down_read(&gmap->mm->mmap_sem);
598 rc = __gmap_fault(address, gmap); 600 rc = __gmap_fault(gmap, gaddr);
599 up_read(&gmap->mm->mmap_sem); 601 up_read(&gmap->mm->mmap_sem);
600 602
601 return rc; 603 return rc;
@@ -620,14 +622,14 @@ static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
620/** 622/**
621 * The mm->mmap_sem lock must be held 623 * The mm->mmap_sem lock must be held
622 */ 624 */
623static void gmap_zap_unused(struct mm_struct *mm, unsigned long address) 625static void gmap_zap_unused(struct mm_struct *mm, unsigned long vmaddr)
624{ 626{
625 unsigned long ptev, pgstev; 627 unsigned long ptev, pgstev;
626 spinlock_t *ptl; 628 spinlock_t *ptl;
627 pgste_t pgste; 629 pgste_t pgste;
628 pte_t *ptep, pte; 630 pte_t *ptep, pte;
629 631
630 ptep = get_locked_pte(mm, address, &ptl); 632 ptep = get_locked_pte(mm, vmaddr, &ptl);
631 if (unlikely(!ptep)) 633 if (unlikely(!ptep))
632 return; 634 return;
633 pte = *ptep; 635 pte = *ptep;
@@ -640,7 +642,7 @@ static void gmap_zap_unused(struct mm_struct *mm, unsigned long address)
640 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) || 642 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
641 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) { 643 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
642 gmap_zap_swap_entry(pte_to_swp_entry(pte), mm); 644 gmap_zap_swap_entry(pte_to_swp_entry(pte), mm);
643 pte_clear(mm, address, ptep); 645 pte_clear(mm, vmaddr, ptep);
644 } 646 }
645 pgste_set_unlock(ptep, pgste); 647 pgste_set_unlock(ptep, pgste);
646out_pte: 648out_pte:
@@ -650,14 +652,14 @@ out_pte:
650/* 652/*
651 * this function is assumed to be called with mmap_sem held 653 * this function is assumed to be called with mmap_sem held
652 */ 654 */
653void __gmap_zap(unsigned long address, struct gmap *gmap) 655void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
654{ 656{
655 unsigned long *table, *segment_ptr; 657 unsigned long *table, *segment_ptr;
656 unsigned long segment, pgstev, ptev; 658 unsigned long segment, vmaddr, pgstev, ptev;
657 struct gmap_pgtable *mp; 659 struct gmap_pgtable *mp;
658 struct page *page; 660 struct page *page;
659 661
660 segment_ptr = gmap_table_walk(address, gmap); 662 segment_ptr = gmap_table_walk(gmap, gaddr);
661 if (IS_ERR(segment_ptr)) 663 if (IS_ERR(segment_ptr))
662 return; 664 return;
663 segment = *segment_ptr; 665 segment = *segment_ptr;
@@ -665,61 +667,61 @@ void __gmap_zap(unsigned long address, struct gmap *gmap)
665 return; 667 return;
666 page = pfn_to_page(segment >> PAGE_SHIFT); 668 page = pfn_to_page(segment >> PAGE_SHIFT);
667 mp = (struct gmap_pgtable *) page->index; 669 mp = (struct gmap_pgtable *) page->index;
668 address = mp->vmaddr | (address & ~PMD_MASK); 670 vmaddr = mp->vmaddr | (gaddr & ~PMD_MASK);
669 /* Page table is present */ 671 /* Page table is present */
670 table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN); 672 table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN);
671 table = table + ((address >> 12) & 0xff); 673 table = table + ((vmaddr >> 12) & 0xff);
672 pgstev = table[PTRS_PER_PTE]; 674 pgstev = table[PTRS_PER_PTE];
673 ptev = table[0]; 675 ptev = table[0];
674 /* quick check, checked again with locks held */ 676 /* quick check, checked again with locks held */
675 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) || 677 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
676 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) 678 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID)))
677 gmap_zap_unused(gmap->mm, address); 679 gmap_zap_unused(gmap->mm, vmaddr);
678} 680}
679EXPORT_SYMBOL_GPL(__gmap_zap); 681EXPORT_SYMBOL_GPL(__gmap_zap);
680 682
681void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) 683void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
682{ 684{
683 685
684 unsigned long *table, address, size; 686 unsigned long *table, gaddr, size;
685 struct vm_area_struct *vma; 687 struct vm_area_struct *vma;
686 struct gmap_pgtable *mp; 688 struct gmap_pgtable *mp;
687 struct page *page; 689 struct page *page;
688 690
689 down_read(&gmap->mm->mmap_sem); 691 down_read(&gmap->mm->mmap_sem);
690 address = from; 692 gaddr = from;
691 while (address < to) { 693 while (gaddr < to) {
692 /* Walk the gmap address space page table */ 694 /* Walk the gmap address space page table */
693 table = gmap->table + ((address >> 53) & 0x7ff); 695 table = gmap->table + ((gaddr >> 53) & 0x7ff);
694 if (unlikely(*table & _REGION_ENTRY_INVALID)) { 696 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
695 address = (address + PMD_SIZE) & PMD_MASK; 697 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
696 continue; 698 continue;
697 } 699 }
698 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 700 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
699 table = table + ((address >> 42) & 0x7ff); 701 table = table + ((gaddr >> 42) & 0x7ff);
700 if (unlikely(*table & _REGION_ENTRY_INVALID)) { 702 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
701 address = (address + PMD_SIZE) & PMD_MASK; 703 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
702 continue; 704 continue;
703 } 705 }
704 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 706 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
705 table = table + ((address >> 31) & 0x7ff); 707 table = table + ((gaddr >> 31) & 0x7ff);
706 if (unlikely(*table & _REGION_ENTRY_INVALID)) { 708 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
707 address = (address + PMD_SIZE) & PMD_MASK; 709 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
708 continue; 710 continue;
709 } 711 }
710 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 712 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
711 table = table + ((address >> 20) & 0x7ff); 713 table = table + ((gaddr >> 20) & 0x7ff);
712 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) { 714 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
713 address = (address + PMD_SIZE) & PMD_MASK; 715 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
714 continue; 716 continue;
715 } 717 }
716 page = pfn_to_page(*table >> PAGE_SHIFT); 718 page = pfn_to_page(*table >> PAGE_SHIFT);
717 mp = (struct gmap_pgtable *) page->index; 719 mp = (struct gmap_pgtable *) page->index;
718 vma = find_vma(gmap->mm, mp->vmaddr); 720 vma = find_vma(gmap->mm, mp->vmaddr);
719 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK)); 721 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
720 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK), 722 zap_page_range(vma, mp->vmaddr | (gaddr & ~PMD_MASK),
721 size, NULL); 723 size, NULL);
722 address = (address + PMD_SIZE) & PMD_MASK; 724 gaddr = (gaddr + PMD_SIZE) & PMD_MASK;
723 } 725 }
724 up_read(&gmap->mm->mmap_sem); 726 up_read(&gmap->mm->mmap_sem);
725} 727}
@@ -755,7 +757,7 @@ EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
755/** 757/**
756 * gmap_ipte_notify - mark a range of ptes for invalidation notification 758 * gmap_ipte_notify - mark a range of ptes for invalidation notification
757 * @gmap: pointer to guest mapping meta data structure 759 * @gmap: pointer to guest mapping meta data structure
758 * @start: virtual address in the guest address space 760 * @gaddr: virtual address in the guest address space
759 * @len: size of area 761 * @len: size of area
760 * 762 *
761 * Returns 0 if for each page in the given range a gmap mapping exists and 763 * Returns 0 if for each page in the given range a gmap mapping exists and
@@ -763,7 +765,7 @@ EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
763 * for one or more pages -EFAULT is returned. If no memory could be allocated 765 * for one or more pages -EFAULT is returned. If no memory could be allocated
764 * -ENOMEM is returned. This function establishes missing page table entries. 766 * -ENOMEM is returned. This function establishes missing page table entries.
765 */ 767 */
766int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len) 768int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
767{ 769{
768 unsigned long addr; 770 unsigned long addr;
769 spinlock_t *ptl; 771 spinlock_t *ptl;
@@ -771,12 +773,12 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
771 pgste_t pgste; 773 pgste_t pgste;
772 int rc = 0; 774 int rc = 0;
773 775
774 if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK)) 776 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
775 return -EINVAL; 777 return -EINVAL;
776 down_read(&gmap->mm->mmap_sem); 778 down_read(&gmap->mm->mmap_sem);
777 while (len) { 779 while (len) {
778 /* Convert gmap address and connect the page tables */ 780 /* Convert gmap address and connect the page tables */
779 addr = __gmap_fault(start, gmap); 781 addr = __gmap_fault(gmap, gaddr);
780 if (IS_ERR_VALUE(addr)) { 782 if (IS_ERR_VALUE(addr)) {
781 rc = addr; 783 rc = addr;
782 break; 784 break;
@@ -796,7 +798,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
796 pgste = pgste_get_lock(ptep); 798 pgste = pgste_get_lock(ptep);
797 pgste_val(pgste) |= PGSTE_IN_BIT; 799 pgste_val(pgste) |= PGSTE_IN_BIT;
798 pgste_set_unlock(ptep, pgste); 800 pgste_set_unlock(ptep, pgste);
799 start += PAGE_SIZE; 801 gaddr += PAGE_SIZE;
800 len -= PAGE_SIZE; 802 len -= PAGE_SIZE;
801 } 803 }
802 spin_unlock(ptl); 804 spin_unlock(ptl);