aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-30 07:33:59 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:59 -0500
commitd6ee09a2a0865f5640592e73ca5da4b06404b200 (patch)
treec0a228ce6ef6bfe737a2b4e477523f8ede9e3e39 /arch
parenta5f55035f63cf040b4a7161226ec35520cc10daa (diff)
x86: simplify pageattr_64.c
simplify pageattr_64.c. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/pageattr_64.c168
1 files changed, 55 insertions, 113 deletions
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index 7b734ee5b7fd..8493c469f877 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -9,6 +9,14 @@
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11 11
12void clflush_cache_range(void *addr, int size)
13{
14 int i;
15
16 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
17 clflush(addr+i);
18}
19
12#include <asm/processor.h> 20#include <asm/processor.h>
13#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
14#include <asm/uaccess.h> 22#include <asm/uaccess.h>
@@ -47,12 +55,6 @@ split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
47 base = alloc_pages(GFP_KERNEL, 0); 55 base = alloc_pages(GFP_KERNEL, 0);
48 if (!base) 56 if (!base)
49 return NULL; 57 return NULL;
50 /*
51 * page_private is used to track the number of entries in
52 * the page table page have non standard attributes.
53 */
54 SetPagePrivate(base);
55 page_private(base) = 0;
56 58
57 address = __pa(address); 59 address = __pa(address);
58 addr = address & LARGE_PAGE_MASK; 60 addr = address & LARGE_PAGE_MASK;
@@ -64,80 +66,13 @@ split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
64 return base; 66 return base;
65} 67}
66 68
67void clflush_cache_range(void *addr, int size)
68{
69 int i;
70
71 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
72 clflush(addr+i);
73}
74
75static void flush_kernel_map(void *arg)
76{
77 struct list_head *l = (struct list_head *)arg;
78 struct page *pg;
79
80 __flush_tlb_all();
81
82 /* When clflush is available always use it because it is
83 much cheaper than WBINVD. */
84 /* clflush is still broken. Disable for now. */
85 if (1 || !cpu_has_clflush) {
86 wbinvd();
87 } else {
88 list_for_each_entry(pg, l, lru) {
89 void *addr = page_address(pg);
90
91 clflush_cache_range(addr, PAGE_SIZE);
92 }
93 }
94}
95
96static inline void flush_map(struct list_head *l)
97{
98 on_each_cpu(flush_kernel_map, l, 1, 1);
99}
100
101static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
102
103static inline void save_page(struct page *fpage)
104{
105 if (!test_and_set_bit(PG_arch_1, &fpage->flags))
106 list_add(&fpage->lru, &deferred_pages);
107}
108
109/*
110 * No more special protections in this 2/4MB area - revert to a
111 * large page again.
112 */
113static void revert_page(unsigned long address, pgprot_t ref_prot)
114{
115 unsigned long pfn;
116 pgd_t *pgd;
117 pud_t *pud;
118 pmd_t *pmd;
119 pte_t large_pte;
120
121 pgd = pgd_offset_k(address);
122 BUG_ON(pgd_none(*pgd));
123 pud = pud_offset(pgd, address);
124 BUG_ON(pud_none(*pud));
125 pmd = pmd_offset(pud, address);
126 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
127 pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
128 large_pte = pfn_pte(pfn, ref_prot);
129 large_pte = pte_mkhuge(large_pte);
130
131 set_pte((pte_t *)pmd, large_pte);
132}
133
134static int 69static int
135__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, 70__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
136 pgprot_t ref_prot) 71 pgprot_t ref_prot)
137{ 72{
138 struct page *kpte_page; 73 struct page *kpte_page;
139 pgprot_t ref_prot2;
140 pte_t *kpte; 74 pte_t *kpte;
75 pgprot_t ref_prot2, oldprot;
141 int level; 76 int level;
142 77
143 kpte = lookup_address(address, &level); 78 kpte = lookup_address(address, &level);
@@ -145,8 +80,12 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
145 return 0; 80 return 0;
146 81
147 kpte_page = virt_to_page(kpte); 82 kpte_page = virt_to_page(kpte);
83 oldprot = pte_pgprot(*kpte);
148 BUG_ON(PageLRU(kpte_page)); 84 BUG_ON(PageLRU(kpte_page));
149 BUG_ON(PageCompound(kpte_page)); 85 BUG_ON(PageCompound(kpte_page));
86 ref_prot = canon_pgprot(ref_prot);
87 prot = canon_pgprot(prot);
88
150 if (pgprot_val(prot) != pgprot_val(ref_prot)) { 89 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
151 if (level == 4) { 90 if (level == 4) {
152 set_pte(kpte, pfn_pte(pfn, prot)); 91 set_pte(kpte, pfn_pte(pfn, prot));
@@ -165,38 +104,29 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
165 set_pte(kpte, mk_pte(split, ref_prot2)); 104 set_pte(kpte, mk_pte(split, ref_prot2));
166 kpte_page = split; 105 kpte_page = split;
167 } 106 }
168 page_private(kpte_page)++;
169 } else { 107 } else {
170 if (level == 4) { 108 if (level == 4) {
171 set_pte(kpte, pfn_pte(pfn, ref_prot)); 109 set_pte(kpte, pfn_pte(pfn, ref_prot));
172 BUG_ON(page_private(kpte_page) == 0);
173 page_private(kpte_page)--;
174 } else 110 } else
175 BUG(); 111 BUG();
176 } 112 }
177 113
178 /* on x86-64 the direct mapping set at boot is not using 4k pages */
179 BUG_ON(PageReserved(kpte_page));
180
181 save_page(kpte_page);
182 if (page_private(kpte_page) == 0)
183 revert_page(address, ref_prot);
184 return 0; 114 return 0;
185} 115}
186 116
187/* 117/**
188 * Change the page attributes of an page in the linear mapping. 118 * change_page_attr_addr - Change page table attributes in linear mapping
189 * 119 * @address: Virtual address in linear mapping.
190 * This should be used when a page is mapped with a different caching policy 120 * @numpages: Number of pages to change
191 * than write-back somewhere - some CPUs do not like it when mappings with 121 * @prot: New page table attribute (PAGE_*)
192 * different caching policies exist. This changes the page attributes of the
193 * in kernel linear mapping too.
194 * 122 *
195 * The caller needs to ensure that there are no conflicting mappings elsewhere. 123 * Change page attributes of a page in the direct mapping. This is a variant
196 * This function only deals with the kernel linear map. 124 * of change_page_attr() that also works on memory holes that do not have
125 * mem_map entry (pfn_valid() is false).
197 * 126 *
198 * Caller must call global_flush_tlb() after this. 127 * See change_page_attr() documentation for more details.
199 */ 128 */
129
200int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) 130int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
201{ 131{
202 int err = 0, kernel_map = 0, i; 132 int err = 0, kernel_map = 0, i;
@@ -236,7 +166,26 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
236 return err; 166 return err;
237} 167}
238 168
239/* Don't call this for MMIO areas that may not have a mem_map entry */ 169/**
170 * change_page_attr - Change page table attributes in the linear mapping.
171 * @page: First page to change
172 * @numpages: Number of pages to change
173 * @prot: New protection/caching type (PAGE_*)
174 *
175 * Returns 0 on success, otherwise a negated errno.
176 *
177 * This should be used when a page is mapped with a different caching policy
178 * than write-back somewhere - some CPUs do not like it when mappings with
179 * different caching policies exist. This changes the page attributes of the
180 * in kernel linear mapping too.
181 *
182 * Caller must call global_flush_tlb() later to make the changes active.
183 *
184 * The caller needs to ensure that there are no conflicting mappings elsewhere
185 * (e.g. in user space) * This function only deals with the kernel linear map.
186 *
187 * For MMIO areas without mem_map use change_page_attr_addr() instead.
188 */
240int change_page_attr(struct page *page, int numpages, pgprot_t prot) 189int change_page_attr(struct page *page, int numpages, pgprot_t prot)
241{ 190{
242 unsigned long addr = (unsigned long)page_address(page); 191 unsigned long addr = (unsigned long)page_address(page);
@@ -245,29 +194,22 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
245} 194}
246EXPORT_SYMBOL(change_page_attr); 195EXPORT_SYMBOL(change_page_attr);
247 196
248void global_flush_tlb(void) 197static void flush_kernel_map(void *arg)
249{ 198{
250 struct page *pg, *next;
251 struct list_head l;
252
253 /* 199 /*
254 * Write-protect the semaphore, to exclude two contexts 200 * Flush all to work around Errata in early athlons regarding
255 * doing a list_replace_init() call in parallel and to 201 * large page flushing.
256 * exclude new additions to the deferred_pages list:
257 */ 202 */
258 down_write(&init_mm.mmap_sem); 203 __flush_tlb_all();
259 list_replace_init(&deferred_pages, &l);
260 up_write(&init_mm.mmap_sem);
261 204
262 flush_map(&l); 205 if (boot_cpu_data.x86_model >= 4)
206 wbinvd();
207}
263 208
264 list_for_each_entry_safe(pg, next, &l, lru) { 209void global_flush_tlb(void)
265 list_del(&pg->lru); 210{
266 clear_bit(PG_arch_1, &pg->flags); 211 BUG_ON(irqs_disabled());
267 if (page_private(pg) != 0) 212
268 continue; 213 on_each_cpu(flush_kernel_map, NULL, 1, 1);
269 ClearPagePrivate(pg);
270 __free_page(pg);
271 }
272} 214}
273EXPORT_SYMBOL(global_flush_tlb); 215EXPORT_SYMBOL(global_flush_tlb);