aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2009-01-09 19:13:11 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-13 13:13:01 -0500
commite4b866ed197cef9989348e0479fed8d864ea465b (patch)
tree0420e59a2312f6d1156ec85e6895cf6f322e0c6f
parentafc7d20c8429f32f19d47367fdc36eeed2334ec3 (diff)
x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param
Impact: cleanup Change the protection parameter for track_pfn_vma_new() into a pgprot_t pointer. Subsequent patch changes the x86 PAT handling to return a compatible memtype in pgprot_t, if what was requested cannot be allowed due to conflicts. No fuctionality change in this patch. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/mm/pat.c6
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--mm/memory.c7
3 files changed, 9 insertions, 8 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 85cbd3cd3723..f88ac80530c0 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -741,7 +741,7 @@ cleanup_ret:
741 * Note that this function can be called with caller trying to map only a 741 * Note that this function can be called with caller trying to map only a
742 * subrange/page inside the vma. 742 * subrange/page inside the vma.
743 */ 743 */
744int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 744int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
745 unsigned long pfn, unsigned long size) 745 unsigned long pfn, unsigned long size)
746{ 746{
747 int retval = 0; 747 int retval = 0;
@@ -758,14 +758,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
758 if (is_linear_pfn_mapping(vma)) { 758 if (is_linear_pfn_mapping(vma)) {
759 /* reserve the whole chunk starting from vm_pgoff */ 759 /* reserve the whole chunk starting from vm_pgoff */
760 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 760 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
761 return reserve_pfn_range(paddr, vma_size, prot); 761 return reserve_pfn_range(paddr, vma_size, *prot);
762 } 762 }
763 763
764 /* reserve page by page using pfn and size */ 764 /* reserve page by page using pfn and size */
765 base_paddr = (resource_size_t)pfn << PAGE_SHIFT; 765 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
766 for (i = 0; i < size; i += PAGE_SIZE) { 766 for (i = 0; i < size; i += PAGE_SIZE) {
767 paddr = base_paddr + i; 767 paddr = base_paddr + i;
768 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); 768 retval = reserve_pfn_range(paddr, PAGE_SIZE, *prot);
769 if (retval) 769 if (retval)
770 goto cleanup_ret; 770 goto cleanup_ret;
771 } 771 }
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 72ebe91005a8..8e6d0ca70aba 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -301,7 +301,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
301 * track_pfn_vma_new is called when a _new_ pfn mapping is being established 301 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
302 * for physical range indicated by pfn and size. 302 * for physical range indicated by pfn and size.
303 */ 303 */
304static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 304static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
305 unsigned long pfn, unsigned long size) 305 unsigned long pfn, unsigned long size)
306{ 306{
307 return 0; 307 return 0;
@@ -332,7 +332,7 @@ static inline void untrack_pfn_vma(struct vm_area_struct *vma,
332{ 332{
333} 333}
334#else 334#else
335extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, 335extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
336 unsigned long pfn, unsigned long size); 336 unsigned long pfn, unsigned long size);
337extern int track_pfn_vma_copy(struct vm_area_struct *vma); 337extern int track_pfn_vma_copy(struct vm_area_struct *vma);
338extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, 338extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
diff --git a/mm/memory.c b/mm/memory.c
index d3ee2ea5615c..22bfa7a47a0b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1511 unsigned long pfn) 1511 unsigned long pfn)
1512{ 1512{
1513 int ret; 1513 int ret;
1514 pgprot_t pgprot = vma->vm_page_prot;
1514 /* 1515 /*
1515 * Technically, architectures with pte_special can avoid all these 1516 * Technically, architectures with pte_special can avoid all these
1516 * restrictions (same for remap_pfn_range). However we would like 1517 * restrictions (same for remap_pfn_range). However we would like
@@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1525 1526
1526 if (addr < vma->vm_start || addr >= vma->vm_end) 1527 if (addr < vma->vm_start || addr >= vma->vm_end)
1527 return -EFAULT; 1528 return -EFAULT;
1528 if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) 1529 if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
1529 return -EINVAL; 1530 return -EINVAL;
1530 1531
1531 ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1532 ret = insert_pfn(vma, addr, pfn, pgprot);
1532 1533
1533 if (ret) 1534 if (ret)
1534 untrack_pfn_vma(vma, pfn, PAGE_SIZE); 1535 untrack_pfn_vma(vma, pfn, PAGE_SIZE);
@@ -1671,7 +1672,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1671 1672
1672 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1673 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1673 1674
1674 err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); 1675 err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
1675 if (err) { 1676 if (err) {
1676 /* 1677 /*
1677 * To indicate that track_pfn related cleanup is not 1678 * To indicate that track_pfn related cleanup is not