aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pageattr_64.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 05:17:18 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 05:17:18 -0400
commit95119fbd87aabc263746731462062af5a38c0222 (patch)
tree4a424a187fa1ddcb545b856d81b5b4880da21219 /arch/x86/mm/pageattr_64.c
parent2a6c8d5a7e92a3e8db30fa0c5956a29c79d7e5a3 (diff)
x86_64: move mm
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/pageattr_64.c')
-rw-r--r--arch/x86/mm/pageattr_64.c249
1 files changed, 249 insertions, 0 deletions
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
new file mode 100644
index 000000000000..10b9809ce821
--- /dev/null
+++ b/arch/x86/mm/pageattr_64.c
@@ -0,0 +1,249 @@
1/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
5
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/highmem.h>
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <asm/uaccess.h>
12#include <asm/processor.h>
13#include <asm/tlbflush.h>
14#include <asm/io.h>
15
16pte_t *lookup_address(unsigned long address)
17{
18 pgd_t *pgd = pgd_offset_k(address);
19 pud_t *pud;
20 pmd_t *pmd;
21 pte_t *pte;
22 if (pgd_none(*pgd))
23 return NULL;
24 pud = pud_offset(pgd, address);
25 if (!pud_present(*pud))
26 return NULL;
27 pmd = pmd_offset(pud, address);
28 if (!pmd_present(*pmd))
29 return NULL;
30 if (pmd_large(*pmd))
31 return (pte_t *)pmd;
32 pte = pte_offset_kernel(pmd, address);
33 if (pte && !pte_present(*pte))
34 pte = NULL;
35 return pte;
36}
37
38static struct page *split_large_page(unsigned long address, pgprot_t prot,
39 pgprot_t ref_prot)
40{
41 int i;
42 unsigned long addr;
43 struct page *base = alloc_pages(GFP_KERNEL, 0);
44 pte_t *pbase;
45 if (!base)
46 return NULL;
47 /*
48 * page_private is used to track the number of entries in
49 * the page table page have non standard attributes.
50 */
51 SetPagePrivate(base);
52 page_private(base) = 0;
53
54 address = __pa(address);
55 addr = address & LARGE_PAGE_MASK;
56 pbase = (pte_t *)page_address(base);
57 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
58 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
59 addr == address ? prot : ref_prot);
60 }
61 return base;
62}
63
64static void cache_flush_page(void *adr)
65{
66 int i;
67 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
68 asm volatile("clflush (%0)" :: "r" (adr + i));
69}
70
71static void flush_kernel_map(void *arg)
72{
73 struct list_head *l = (struct list_head *)arg;
74 struct page *pg;
75
76 /* When clflush is available always use it because it is
77 much cheaper than WBINVD. */
78 /* clflush is still broken. Disable for now. */
79 if (1 || !cpu_has_clflush)
80 asm volatile("wbinvd" ::: "memory");
81 else list_for_each_entry(pg, l, lru) {
82 void *adr = page_address(pg);
83 cache_flush_page(adr);
84 }
85 __flush_tlb_all();
86}
87
88static inline void flush_map(struct list_head *l)
89{
90 on_each_cpu(flush_kernel_map, l, 1, 1);
91}
92
93static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
94
95static inline void save_page(struct page *fpage)
96{
97 if (!test_and_set_bit(PG_arch_1, &fpage->flags))
98 list_add(&fpage->lru, &deferred_pages);
99}
100
101/*
102 * No more special protections in this 2/4MB area - revert to a
103 * large page again.
104 */
105static void revert_page(unsigned long address, pgprot_t ref_prot)
106{
107 pgd_t *pgd;
108 pud_t *pud;
109 pmd_t *pmd;
110 pte_t large_pte;
111 unsigned long pfn;
112
113 pgd = pgd_offset_k(address);
114 BUG_ON(pgd_none(*pgd));
115 pud = pud_offset(pgd,address);
116 BUG_ON(pud_none(*pud));
117 pmd = pmd_offset(pud, address);
118 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
119 pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
120 large_pte = pfn_pte(pfn, ref_prot);
121 large_pte = pte_mkhuge(large_pte);
122 set_pte((pte_t *)pmd, large_pte);
123}
124
125static int
126__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
127 pgprot_t ref_prot)
128{
129 pte_t *kpte;
130 struct page *kpte_page;
131 pgprot_t ref_prot2;
132
133 kpte = lookup_address(address);
134 if (!kpte) return 0;
135 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
136 BUG_ON(PageLRU(kpte_page));
137 BUG_ON(PageCompound(kpte_page));
138 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
139 if (!pte_huge(*kpte)) {
140 set_pte(kpte, pfn_pte(pfn, prot));
141 } else {
142 /*
143 * split_large_page will take the reference for this
144 * change_page_attr on the split page.
145 */
146 struct page *split;
147 ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
148 split = split_large_page(address, prot, ref_prot2);
149 if (!split)
150 return -ENOMEM;
151 set_pte(kpte, mk_pte(split, ref_prot2));
152 kpte_page = split;
153 }
154 page_private(kpte_page)++;
155 } else if (!pte_huge(*kpte)) {
156 set_pte(kpte, pfn_pte(pfn, ref_prot));
157 BUG_ON(page_private(kpte_page) == 0);
158 page_private(kpte_page)--;
159 } else
160 BUG();
161
162 /* on x86-64 the direct mapping set at boot is not using 4k pages */
163 BUG_ON(PageReserved(kpte_page));
164
165 save_page(kpte_page);
166 if (page_private(kpte_page) == 0)
167 revert_page(address, ref_prot);
168 return 0;
169}
170
171/*
172 * Change the page attributes of an page in the linear mapping.
173 *
174 * This should be used when a page is mapped with a different caching policy
175 * than write-back somewhere - some CPUs do not like it when mappings with
176 * different caching policies exist. This changes the page attributes of the
177 * in kernel linear mapping too.
178 *
179 * The caller needs to ensure that there are no conflicting mappings elsewhere.
180 * This function only deals with the kernel linear map.
181 *
182 * Caller must call global_flush_tlb() after this.
183 */
184int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
185{
186 int err = 0, kernel_map = 0;
187 int i;
188
189 if (address >= __START_KERNEL_map
190 && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
191 address = (unsigned long)__va(__pa(address));
192 kernel_map = 1;
193 }
194
195 down_write(&init_mm.mmap_sem);
196 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
197 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
198
199 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
200 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
201 if (err)
202 break;
203 }
204 /* Handle kernel mapping too which aliases part of the
205 * lowmem */
206 if (__pa(address) < KERNEL_TEXT_SIZE) {
207 unsigned long addr2;
208 pgprot_t prot2;
209 addr2 = __START_KERNEL_map + __pa(address);
210 /* Make sure the kernel mappings stay executable */
211 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
212 err = __change_page_attr(addr2, pfn, prot2,
213 PAGE_KERNEL_EXEC);
214 }
215 }
216 up_write(&init_mm.mmap_sem);
217 return err;
218}
219
220/* Don't call this for MMIO areas that may not have a mem_map entry */
221int change_page_attr(struct page *page, int numpages, pgprot_t prot)
222{
223 unsigned long addr = (unsigned long)page_address(page);
224 return change_page_attr_addr(addr, numpages, prot);
225}
226
227void global_flush_tlb(void)
228{
229 struct page *pg, *next;
230 struct list_head l;
231
232 down_read(&init_mm.mmap_sem);
233 list_replace_init(&deferred_pages, &l);
234 up_read(&init_mm.mmap_sem);
235
236 flush_map(&l);
237
238 list_for_each_entry_safe(pg, next, &l, lru) {
239 list_del(&pg->lru);
240 clear_bit(PG_arch_1, &pg->flags);
241 if (page_private(pg) != 0)
242 continue;
243 ClearPagePrivate(pg);
244 __free_page(pg);
245 }
246}
247
248EXPORT_SYMBOL(change_page_attr);
249EXPORT_SYMBOL(global_flush_tlb);