aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/pgtable.h5
-rw-r--r--arch/x86/mm/pat.c30
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/memory.c43
4 files changed, 11 insertions, 70 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 579f8ceee948..2aa792bbd7e0 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -230,11 +230,6 @@ static inline unsigned long pte_pfn(pte_t pte)
230 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; 230 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
231} 231}
232 232
233static inline u64 pte_pa(pte_t pte)
234{
235 return pte_val(pte) & PTE_PFN_MASK;
236}
237
238#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 233#define pte_page(pte) pfn_to_page(pte_pfn(pte))
239 234
240static inline int pmd_large(pmd_t pte) 235static inline int pmd_large(pmd_t pte)
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index d5254bae84f4..541bcc944a5b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -685,8 +685,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
685 int retval = 0; 685 int retval = 0;
686 unsigned long i, j; 686 unsigned long i, j;
687 u64 paddr; 687 u64 paddr;
688 pgprot_t prot; 688 unsigned long prot;
689 pte_t pte;
690 unsigned long vma_start = vma->vm_start; 689 unsigned long vma_start = vma->vm_start;
691 unsigned long vma_end = vma->vm_end; 690 unsigned long vma_end = vma->vm_end;
692 unsigned long vma_size = vma_end - vma_start; 691 unsigned long vma_size = vma_end - vma_start;
@@ -696,26 +695,22 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
696 695
697 if (is_linear_pfn_mapping(vma)) { 696 if (is_linear_pfn_mapping(vma)) {
698 /* 697 /*
699 * reserve the whole chunk starting from vm_pgoff, 698 * reserve the whole chunk covered by vma. We need the
700 * But, we have to get the protection from pte. 699 * starting address and protection from pte.
701 */ 700 */
702 if (follow_pfnmap_pte(vma, vma_start, &pte)) { 701 if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
703 WARN_ON_ONCE(1); 702 WARN_ON_ONCE(1);
704 return -1; 703 return -EINVAL;
705 } 704 }
706 prot = pte_pgprot(pte); 705 return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
707 paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
708 return reserve_pfn_range(paddr, vma_size, prot);
709 } 706 }
710 707
711 /* reserve entire vma page by page, using pfn and prot from pte */ 708 /* reserve entire vma page by page, using pfn and prot from pte */
712 for (i = 0; i < vma_size; i += PAGE_SIZE) { 709 for (i = 0; i < vma_size; i += PAGE_SIZE) {
713 if (follow_pfnmap_pte(vma, vma_start + i, &pte)) 710 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
714 continue; 711 continue;
715 712
716 paddr = pte_pa(pte); 713 retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
717 prot = pte_pgprot(pte);
718 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
719 if (retval) 714 if (retval)
720 goto cleanup_ret; 715 goto cleanup_ret;
721 } 716 }
@@ -724,10 +719,9 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
724cleanup_ret: 719cleanup_ret:
725 /* Reserve error: Cleanup partial reservation and return error */ 720 /* Reserve error: Cleanup partial reservation and return error */
726 for (j = 0; j < i; j += PAGE_SIZE) { 721 for (j = 0; j < i; j += PAGE_SIZE) {
727 if (follow_pfnmap_pte(vma, vma_start + j, &pte)) 722 if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
728 continue; 723 continue;
729 724
730 paddr = pte_pa(pte);
731 free_pfn_range(paddr, PAGE_SIZE); 725 free_pfn_range(paddr, PAGE_SIZE);
732 } 726 }
733 727
@@ -797,6 +791,7 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
797{ 791{
798 unsigned long i; 792 unsigned long i;
799 u64 paddr; 793 u64 paddr;
794 unsigned long prot;
800 unsigned long vma_start = vma->vm_start; 795 unsigned long vma_start = vma->vm_start;
801 unsigned long vma_end = vma->vm_end; 796 unsigned long vma_end = vma->vm_end;
802 unsigned long vma_size = vma_end - vma_start; 797 unsigned long vma_size = vma_end - vma_start;
@@ -821,12 +816,9 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
821 } else { 816 } else {
822 /* free entire vma, page by page, using the pfn from pte */ 817 /* free entire vma, page by page, using the pfn from pte */
823 for (i = 0; i < vma_size; i += PAGE_SIZE) { 818 for (i = 0; i < vma_size; i += PAGE_SIZE) {
824 pte_t pte; 819 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
825
826 if (follow_pfnmap_pte(vma, vma_start + i, &pte))
827 continue; 820 continue;
828 821
829 paddr = pte_pa(pte);
830 free_pfn_range(paddr, PAGE_SIZE); 822 free_pfn_range(paddr, PAGE_SIZE);
831 } 823 }
832 } 824 }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2f6e2f886d4b..36f9b3fa5e15 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1239,9 +1239,6 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
1239#define FOLL_GET 0x04 /* do get_page on page */ 1239#define FOLL_GET 0x04 /* do get_page on page */
1240#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ 1240#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
1241 1241
1242int follow_pfnmap_pte(struct vm_area_struct *vma,
1243 unsigned long address, pte_t *ret_ptep);
1244
1245typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 1242typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1246 void *data); 1243 void *data);
1247extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 1244extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
diff --git a/mm/memory.c b/mm/memory.c
index 79f28e35d4fc..6b29f39a5a3e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1168,49 +1168,6 @@ no_page_table:
1168 return page; 1168 return page;
1169} 1169}
1170 1170
1171int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
1172 pte_t *ret_ptep)
1173{
1174 pgd_t *pgd;
1175 pud_t *pud;
1176 pmd_t *pmd;
1177 pte_t *ptep, pte;
1178 spinlock_t *ptl;
1179 struct page *page;
1180 struct mm_struct *mm = vma->vm_mm;
1181
1182 if (!is_pfn_mapping(vma))
1183 goto err;
1184
1185 page = NULL;
1186 pgd = pgd_offset(mm, address);
1187 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1188 goto err;
1189
1190 pud = pud_offset(pgd, address);
1191 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
1192 goto err;
1193
1194 pmd = pmd_offset(pud, address);
1195 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
1196 goto err;
1197
1198 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
1199
1200 pte = *ptep;
1201 if (!pte_present(pte))
1202 goto err_unlock;
1203
1204 *ret_ptep = pte;
1205 pte_unmap_unlock(ptep, ptl);
1206 return 0;
1207
1208err_unlock:
1209 pte_unmap_unlock(ptep, ptl);
1210err:
1211 return -EINVAL;
1212}
1213
1214/* Can we do the FOLL_ANON optimization? */ 1171/* Can we do the FOLL_ANON optimization? */
1215static inline int use_zero_page(struct vm_area_struct *vma) 1172static inline int use_zero_page(struct vm_area_struct *vma)
1216{ 1173{