diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /mm/pgtable-generic.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'mm/pgtable-generic.c')
-rw-r--r-- | mm/pgtable-generic.c | 68 |
1 files changed, 8 insertions, 60 deletions
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 0c8323fe6c8..eb663fb533e 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c | |||
@@ -12,8 +12,8 @@ | |||
12 | 12 | ||
13 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 13 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
14 | /* | 14 | /* |
15 | * Only sets the access flags (dirty, accessed), as well as write | 15 | * Only sets the access flags (dirty, accessed, and |
16 | * permission. Furthermore, we know it always gets set to a "more | 16 | * writable). Furthermore, we know it always gets set to a "more |
17 | * permissive" setting, which allows most architectures to optimize | 17 | * permissive" setting, which allows most architectures to optimize |
18 | * this. We return whether the PTE actually changed, which in turn | 18 | * this. We return whether the PTE actually changed, which in turn |
19 | * instructs the caller to do things like update__mmu_cache. This | 19 | * instructs the caller to do things like update__mmu_cache. This |
@@ -27,7 +27,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, | |||
27 | int changed = !pte_same(*ptep, entry); | 27 | int changed = !pte_same(*ptep, entry); |
28 | if (changed) { | 28 | if (changed) { |
29 | set_pte_at(vma->vm_mm, address, ptep, entry); | 29 | set_pte_at(vma->vm_mm, address, ptep, entry); |
30 | flush_tlb_fix_spurious_fault(vma, address); | 30 | flush_tlb_page(vma, address); |
31 | } | 31 | } |
32 | return changed; | 32 | return changed; |
33 | } | 33 | } |
@@ -70,11 +70,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, | |||
70 | unsigned long address, pmd_t *pmdp) | 70 | unsigned long address, pmd_t *pmdp) |
71 | { | 71 | { |
72 | int young; | 72 | int young; |
73 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 73 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE |
74 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | ||
75 | #else | ||
76 | BUG(); | 74 | BUG(); |
77 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 75 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
76 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | ||
78 | young = pmdp_test_and_clear_young(vma, address, pmdp); | 77 | young = pmdp_test_and_clear_young(vma, address, pmdp); |
79 | if (young) | 78 | if (young) |
80 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | 79 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
@@ -88,8 +87,7 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, | |||
88 | { | 87 | { |
89 | pte_t pte; | 88 | pte_t pte; |
90 | pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); | 89 | pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); |
91 | if (pte_accessible(pte)) | 90 | flush_tlb_page(vma, address); |
92 | flush_tlb_page(vma, address); | ||
93 | return pte; | 91 | return pte; |
94 | } | 92 | } |
95 | #endif | 93 | #endif |
@@ -110,8 +108,8 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, | |||
110 | 108 | ||
111 | #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH | 109 | #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH |
112 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 110 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
113 | void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, | 111 | pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, |
114 | pmd_t *pmdp) | 112 | pmd_t *pmdp) |
115 | { | 113 | { |
116 | pmd_t pmd = pmd_mksplitting(*pmdp); | 114 | pmd_t pmd = pmd_mksplitting(*pmdp); |
117 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | 115 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
@@ -121,53 +119,3 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, | |||
121 | } | 119 | } |
122 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 120 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
123 | #endif | 121 | #endif |
124 | |||
125 | #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT | ||
126 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
127 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) | ||
128 | { | ||
129 | assert_spin_locked(&mm->page_table_lock); | ||
130 | |||
131 | /* FIFO */ | ||
132 | if (!mm->pmd_huge_pte) | ||
133 | INIT_LIST_HEAD(&pgtable->lru); | ||
134 | else | ||
135 | list_add(&pgtable->lru, &mm->pmd_huge_pte->lru); | ||
136 | mm->pmd_huge_pte = pgtable; | ||
137 | } | ||
138 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
139 | #endif | ||
140 | |||
141 | #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW | ||
142 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
143 | /* no "address" argument so destroys page coloring of some arch */ | ||
144 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm) | ||
145 | { | ||
146 | pgtable_t pgtable; | ||
147 | |||
148 | assert_spin_locked(&mm->page_table_lock); | ||
149 | |||
150 | /* FIFO */ | ||
151 | pgtable = mm->pmd_huge_pte; | ||
152 | if (list_empty(&pgtable->lru)) | ||
153 | mm->pmd_huge_pte = NULL; | ||
154 | else { | ||
155 | mm->pmd_huge_pte = list_entry(pgtable->lru.next, | ||
156 | struct page, lru); | ||
157 | list_del(&pgtable->lru); | ||
158 | } | ||
159 | return pgtable; | ||
160 | } | ||
161 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
162 | #endif | ||
163 | |||
164 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE | ||
165 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
166 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | ||
167 | pmd_t *pmdp) | ||
168 | { | ||
169 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp)); | ||
170 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | ||
171 | } | ||
172 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
173 | #endif | ||