aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2008-12-18 14:41:29 -0500
committerH. Peter Anvin <hpa@zytor.com>2008-12-18 16:30:15 -0500
commit2ab640379a0ab4cef746ced1d7e04a0941774bcb (patch)
tree53837aaa257f463a0b40c4d00a80fec936558f49 /mm
parente121e418441525b5636321fe03d16f0193ad218e (diff)
x86: PAT: hooks in generic vm code to help archs to track pfnmap regions - v3
Impact: Introduces new hooks, which are currently null. Introduce generic hooks in remap_pfn_range and vm_insert_pfn and corresponding copy and free routines with reserve and free tracking. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c76
1 files changed, 75 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 8ca6bbf34ad6..1e8f0d347c0e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -99,6 +99,50 @@ int randomize_va_space __read_mostly =
99 2; 99 2;
100#endif 100#endif
101 101
102#ifndef track_pfn_vma_new
103/*
104 * Interface that can be used by architecture code to keep track of
105 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
106 *
107 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
108 * for physical range indicated by pfn and size.
109 */
110int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
111 unsigned long pfn, unsigned long size)
112{
113 return 0;
114}
115#endif
116
117#ifndef track_pfn_vma_copy
118/*
119 * Interface that can be used by architecture code to keep track of
120 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
121 *
122 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
123 * copied through copy_page_range().
124 */
125int track_pfn_vma_copy(struct vm_area_struct *vma)
126{
127 return 0;
128}
129#endif
130
131#ifndef untrack_pfn_vma
132/*
133 * Interface that can be used by architecture code to keep track of
134 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
135 *
136 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
137 * untrack can be called for a specific region indicated by pfn and size or
138 * can be for the entire vma (in which case size can be zero).
139 */
140void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
141 unsigned long size)
142{
143}
144#endif
145
102static int __init disable_randmaps(char *s) 146static int __init disable_randmaps(char *s)
103{ 147{
104 randomize_va_space = 0; 148 randomize_va_space = 0;
@@ -669,6 +713,16 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
669 if (is_vm_hugetlb_page(vma)) 713 if (is_vm_hugetlb_page(vma))
670 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 714 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
671 715
716 if (is_pfn_mapping(vma)) {
717 /*
718 * We do not free on error cases below as remove_vma
719 * gets called on error from higher level routine
720 */
721 ret = track_pfn_vma_copy(vma);
722 if (ret)
723 return ret;
724 }
725
672 /* 726 /*
673 * We need to invalidate the secondary MMU mappings only when 727 * We need to invalidate the secondary MMU mappings only when
674 * there could be a permission downgrade on the ptes of the 728 * there could be a permission downgrade on the ptes of the
@@ -915,6 +969,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
915 if (vma->vm_flags & VM_ACCOUNT) 969 if (vma->vm_flags & VM_ACCOUNT)
916 *nr_accounted += (end - start) >> PAGE_SHIFT; 970 *nr_accounted += (end - start) >> PAGE_SHIFT;
917 971
972 if (is_pfn_mapping(vma))
973 untrack_pfn_vma(vma, 0, 0);
974
918 while (start != end) { 975 while (start != end) {
919 if (!tlb_start_valid) { 976 if (!tlb_start_valid) {
920 tlb_start = start; 977 tlb_start = start;
@@ -1473,6 +1530,7 @@ out:
1473int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1530int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1474 unsigned long pfn) 1531 unsigned long pfn)
1475{ 1532{
1533 int ret;
1476 /* 1534 /*
1477 * Technically, architectures with pte_special can avoid all these 1535 * Technically, architectures with pte_special can avoid all these
1478 * restrictions (same for remap_pfn_range). However we would like 1536 * restrictions (same for remap_pfn_range). However we would like
@@ -1487,7 +1545,15 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1487 1545
1488 if (addr < vma->vm_start || addr >= vma->vm_end) 1546 if (addr < vma->vm_start || addr >= vma->vm_end)
1489 return -EFAULT; 1547 return -EFAULT;
1490 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1548 if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
1549 return -EINVAL;
1550
1551 ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
1552
1553 if (ret)
1554 untrack_pfn_vma(vma, pfn, PAGE_SIZE);
1555
1556 return ret;
1491} 1557}
1492EXPORT_SYMBOL(vm_insert_pfn); 1558EXPORT_SYMBOL(vm_insert_pfn);
1493 1559
@@ -1625,6 +1691,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1625 1691
1626 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1692 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1627 1693
1694 err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
1695 if (err)
1696 return -EINVAL;
1697
1628 BUG_ON(addr >= end); 1698 BUG_ON(addr >= end);
1629 pfn -= addr >> PAGE_SHIFT; 1699 pfn -= addr >> PAGE_SHIFT;
1630 pgd = pgd_offset(mm, addr); 1700 pgd = pgd_offset(mm, addr);
@@ -1636,6 +1706,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1636 if (err) 1706 if (err)
1637 break; 1707 break;
1638 } while (pgd++, addr = next, addr != end); 1708 } while (pgd++, addr = next, addr != end);
1709
1710 if (err)
1711 untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
1712
1639 return err; 1713 return err;
1640} 1714}
1641EXPORT_SYMBOL(remap_pfn_range); 1715EXPORT_SYMBOL(remap_pfn_range);