aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-30 07:34:03 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:03 -0500
commit44af6c41e6a055a0b9bd0d2067cfbc8e9f6a24df (patch)
tree34cea33794739a8bb7505ea76891b6b2a923a5ee /arch
parent31a0717cbc6191fc56326fdf95548d87055686e3 (diff)
x86: backmerge 64-bit details into 32-bit pageattr.c
backmerge 64-bit details into 32-bit pageattr.c. the pageattr_32.c and pageattr_64.c files are now identical. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/pageattr_32.c134
1 files changed, 87 insertions, 47 deletions
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c
index 1c7bd81a4194..251613449dd6 100644
--- a/arch/x86/mm/pageattr_32.c
+++ b/arch/x86/mm/pageattr_32.c
@@ -39,23 +39,26 @@ pte_t *lookup_address(unsigned long address, int *level)
39 39
40static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 40static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
41{ 41{
42 struct page *page;
43
44 /* change init_mm */ 42 /* change init_mm */
45 set_pte_atomic(kpte, pte); 43 set_pte_atomic(kpte, pte);
44#ifdef CONFIG_X86_32
46 if (SHARED_KERNEL_PMD) 45 if (SHARED_KERNEL_PMD)
47 return; 46 return;
48 47 {
49 for (page = pgd_list; page; page = (struct page *)page->index) { 48 struct page *page;
50 pgd_t *pgd; 49
51 pud_t *pud; 50 for (page = pgd_list; page; page = (struct page *)page->index) {
52 pmd_t *pmd; 51 pgd_t *pgd;
53 52 pud_t *pud;
54 pgd = (pgd_t *)page_address(page) + pgd_index(address); 53 pmd_t *pmd;
55 pud = pud_offset(pgd, address); 54
56 pmd = pmd_offset(pud, address); 55 pgd = (pgd_t *)page_address(page) + pgd_index(address);
57 set_pte_atomic((pte_t *)pmd, pte); 56 pud = pud_offset(pgd, address);
57 pmd = pmd_offset(pud, address);
58 set_pte_atomic((pte_t *)pmd, pte);
59 }
58 } 60 }
61#endif
59} 62}
60 63
61static int split_large_page(pte_t *kpte, unsigned long address) 64static int split_large_page(pte_t *kpte, unsigned long address)
@@ -89,7 +92,9 @@ static int split_large_page(pte_t *kpte, unsigned long address)
89 address = __pa(address); 92 address = __pa(address);
90 addr = address & LARGE_PAGE_MASK; 93 addr = address & LARGE_PAGE_MASK;
91 pbase = (pte_t *)page_address(base); 94 pbase = (pte_t *)page_address(base);
95#ifdef CONFIG_X86_32
92 paravirt_alloc_pt(&init_mm, page_to_pfn(base)); 96 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
97#endif
93 98
94 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) 99 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
95 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot)); 100 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
@@ -109,15 +114,14 @@ out_unlock:
109 return 0; 114 return 0;
110} 115}
111 116
112static int __change_page_attr(struct page *page, pgprot_t prot) 117static int
118__change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
113{ 119{
114 struct page *kpte_page; 120 struct page *kpte_page;
115 unsigned long address;
116 int level, err = 0; 121 int level, err = 0;
117 pte_t *kpte; 122 pte_t *kpte;
118 123
119 BUG_ON(PageHighMem(page)); 124 BUG_ON(PageHighMem(page));
120 address = (unsigned long)page_address(page);
121 125
122repeat: 126repeat:
123 kpte = lookup_address(address, &level); 127 kpte = lookup_address(address, &level);
@@ -146,51 +150,87 @@ repeat:
146 return err; 150 return err;
147} 151}
148 152
149/* 153/**
150 * Change the page attributes of an page in the linear mapping. 154 * change_page_attr_addr - Change page table attributes in linear mapping
155 * @address: Virtual address in linear mapping.
156 * @numpages: Number of pages to change
157 * @prot: New page table attribute (PAGE_*)
151 * 158 *
152 * This should be used when a page is mapped with a different caching policy 159 * Change page attributes of a page in the direct mapping. This is a variant
153 * than write-back somewhere - some CPUs do not like it when mappings with 160 * of change_page_attr() that also works on memory holes that do not have
154 * different caching policies exist. This changes the page attributes of the 161 * mem_map entry (pfn_valid() is false).
155 * in kernel linear mapping too.
156 *
157 * The caller needs to ensure that there are no conflicting mappings elsewhere.
158 * This function only deals with the kernel linear map.
159 * 162 *
160 * Caller must call global_flush_tlb() after this. 163 * See change_page_attr() documentation for more details.
161 */ 164 */
162int change_page_attr(struct page *page, int numpages, pgprot_t prot) 165
166int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
163{ 167{
164 int err = 0, i; 168 int err = 0, kernel_map = 0, i;
169
170#ifdef CONFIG_X86_64
171 if (address >= __START_KERNEL_map &&
172 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
165 173
166 for (i = 0; i < numpages; i++, page++) { 174 address = (unsigned long)__va(__pa(address));
167 err = __change_page_attr(page, prot); 175 kernel_map = 1;
168 if (err) 176 }
169 break; 177#endif
178
179 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
180 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
181
182 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
183 err = __change_page_attr(address, pfn_to_page(pfn), prot);
184 if (err)
185 break;
186 }
187#ifdef CONFIG_X86_64
188 /*
189 * Handle kernel mapping too which aliases part of
190 * lowmem:
191 */
192 if (__pa(address) < KERNEL_TEXT_SIZE) {
193 unsigned long addr2;
194 pgprot_t prot2;
195
196 addr2 = __START_KERNEL_map + __pa(address);
197 /* Make sure the kernel mappings stay executable */
198 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
199 err = __change_page_attr(addr2, pfn_to_page(pfn), prot2);
200 }
201#endif
170 } 202 }
171 203
172 return err; 204 return err;
173} 205}
174EXPORT_SYMBOL(change_page_attr);
175 206
176int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot) 207/**
208 * change_page_attr - Change page table attributes in the linear mapping.
209 * @page: First page to change
210 * @numpages: Number of pages to change
211 * @prot: New protection/caching type (PAGE_*)
212 *
213 * Returns 0 on success, otherwise a negated errno.
214 *
215 * This should be used when a page is mapped with a different caching policy
216 * than write-back somewhere - some CPUs do not like it when mappings with
217 * different caching policies exist. This changes the page attributes of the
218 * in kernel linear mapping too.
219 *
220 * Caller must call global_flush_tlb() later to make the changes active.
221 *
222 * The caller needs to ensure that there are no conflicting mappings elsewhere
223 * (e.g. in user space) * This function only deals with the kernel linear map.
224 *
225 * For MMIO areas without mem_map use change_page_attr_addr() instead.
226 */
227int change_page_attr(struct page *page, int numpages, pgprot_t prot)
177{ 228{
178 int i; 229 unsigned long addr = (unsigned long)page_address(page);
179 unsigned long pfn = (__pa(addr) >> PAGE_SHIFT);
180
181 for (i = 0; i < numpages; i++) {
182 if (!pfn_valid(pfn + i)) {
183 WARN_ON_ONCE(1);
184 break;
185 } else {
186 int level;
187 pte_t *pte = lookup_address(addr + i*PAGE_SIZE, &level);
188 BUG_ON(pte && pte_none(*pte));
189 }
190 }
191 230
192 return change_page_attr(virt_to_page(addr), i, prot); 231 return change_page_attr_addr(addr, numpages, prot);
193} 232}
233EXPORT_SYMBOL(change_page_attr);
194 234
195static void flush_kernel_map(void *arg) 235static void flush_kernel_map(void *arg)
196{ 236{