aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-02-04 10:48:07 -0500
committerIngo Molnar <mingo@elte.hu>2008-02-04 10:48:07 -0500
commit72e458dfa63b3db7a46f66b0eb19e9ff4e17fc0e (patch)
tree701676e3de89589b8eb2cde50e8a8a903861873b /arch/x86
parent6bb8383bebc02dae08a17f561401f58005f75c03 (diff)
x86: introduce struct cpa_data
The number of arguments which need to be transported is increasing and we want to add flush optimizations and large page preserving. Create struct cpa data and pass a pointer instead of increasing the number of arguments further. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/pageattr.c75
1 files changed, 38 insertions, 37 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 456ad0ab9c7e..d1c08308ecbb 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -16,6 +16,13 @@
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18 18
19struct cpa_data {
20 unsigned long vaddr;
21 int numpages;
22 pgprot_t mask_set;
23 pgprot_t mask_clr;
24};
25
19static inline int 26static inline int
20within(unsigned long addr, unsigned long start, unsigned long end) 27within(unsigned long addr, unsigned long start, unsigned long end)
21{ 28{
@@ -284,8 +291,7 @@ out_unlock:
284 return 0; 291 return 0;
285} 292}
286 293
287static int 294static int __change_page_attr(unsigned long address, struct cpa_data *cpa)
288__change_page_attr(unsigned long address, pgprot_t mask_set, pgprot_t mask_clr)
289{ 295{
290 struct page *kpte_page; 296 struct page *kpte_page;
291 int level, err = 0; 297 int level, err = 0;
@@ -305,12 +311,15 @@ repeat:
305 pgprot_t new_prot = pte_pgprot(old_pte); 311 pgprot_t new_prot = pte_pgprot(old_pte);
306 312
307 if(!pte_val(old_pte)) { 313 if(!pte_val(old_pte)) {
308 WARN_ON_ONCE(1); 314 printk(KERN_WARNING "CPA: called for zero pte. "
315 "vaddr = %lx cpa->vaddr = %lx\n", address,
316 cpa->vaddr);
317 WARN_ON(1);
309 return -EINVAL; 318 return -EINVAL;
310 } 319 }
311 320
312 pgprot_val(new_prot) &= ~pgprot_val(mask_clr); 321 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
313 pgprot_val(new_prot) |= pgprot_val(mask_set); 322 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
314 323
315 new_prot = static_protections(new_prot, address); 324 new_prot = static_protections(new_prot, address);
316 325
@@ -343,12 +352,10 @@ repeat:
343 * Modules and drivers should use the set_memory_* APIs instead. 352 * Modules and drivers should use the set_memory_* APIs instead.
344 */ 353 */
345 354
346 355static int change_page_attr_addr(struct cpa_data *cpa)
347static int
348change_page_attr_addr(unsigned long address, pgprot_t mask_set,
349 pgprot_t mask_clr)
350{ 356{
351 int err; 357 int err;
358 unsigned long address = cpa->vaddr;
352 359
353#ifdef CONFIG_X86_64 360#ifdef CONFIG_X86_64
354 unsigned long phys_addr = __pa(address); 361 unsigned long phys_addr = __pa(address);
@@ -362,7 +369,7 @@ change_page_attr_addr(unsigned long address, pgprot_t mask_set,
362 address = (unsigned long) __va(phys_addr); 369 address = (unsigned long) __va(phys_addr);
363#endif 370#endif
364 371
365 err = __change_page_attr(address, mask_set, mask_clr); 372 err = __change_page_attr(address, cpa);
366 if (err) 373 if (err)
367 return err; 374 return err;
368 375
@@ -386,20 +393,19 @@ change_page_attr_addr(unsigned long address, pgprot_t mask_set,
386 * everything between 0 and KERNEL_TEXT_SIZE, so do 393 * everything between 0 and KERNEL_TEXT_SIZE, so do
387 * not propagate lookup failures back to users: 394 * not propagate lookup failures back to users:
388 */ 395 */
389 __change_page_attr(address, mask_set, mask_clr); 396 __change_page_attr(address, cpa);
390 } 397 }
391#endif 398#endif
392 return err; 399 return err;
393} 400}
394 401
395static int __change_page_attr_set_clr(unsigned long addr, int numpages, 402static int __change_page_attr_set_clr(struct cpa_data *cpa)
396 pgprot_t mask_set, pgprot_t mask_clr)
397{ 403{
398 unsigned int i; 404 unsigned int i;
399 int ret; 405 int ret;
400 406
401 for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) { 407 for (i = 0; i < cpa->numpages ; i++, cpa->vaddr += PAGE_SIZE) {
402 ret = change_page_attr_addr(addr, mask_set, mask_clr); 408 ret = change_page_attr_addr(cpa);
403 if (ret) 409 if (ret)
404 return ret; 410 return ret;
405 } 411 }
@@ -416,6 +422,7 @@ static inline int cache_attr(pgprot_t attr)
416static int change_page_attr_set_clr(unsigned long addr, int numpages, 422static int change_page_attr_set_clr(unsigned long addr, int numpages,
417 pgprot_t mask_set, pgprot_t mask_clr) 423 pgprot_t mask_set, pgprot_t mask_clr)
418{ 424{
425 struct cpa_data cpa;
419 int ret, cache; 426 int ret, cache;
420 427
421 /* 428 /*
@@ -427,7 +434,12 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
427 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) 434 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
428 return 0; 435 return 0;
429 436
430 ret = __change_page_attr_set_clr(addr, numpages, mask_set, mask_clr); 437 cpa.vaddr = addr;
438 cpa.numpages = numpages;
439 cpa.mask_set = mask_set;
440 cpa.mask_clr = mask_clr;
441
442 ret = __change_page_attr_set_clr(&cpa);
431 443
432 /* 444 /*
433 * No need to flush, when we did not set any of the caching 445 * No need to flush, when we did not set any of the caching
@@ -548,37 +560,26 @@ int set_pages_rw(struct page *page, int numpages)
548 return set_memory_rw(addr, numpages); 560 return set_memory_rw(addr, numpages);
549} 561}
550 562
551
552#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
553static inline int __change_page_attr_set(unsigned long addr, int numpages,
554 pgprot_t mask)
555{
556 return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
557}
558
559static inline int __change_page_attr_clear(unsigned long addr, int numpages,
560 pgprot_t mask)
561{
562 return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
563}
564#endif
565
566#ifdef CONFIG_DEBUG_PAGEALLOC 563#ifdef CONFIG_DEBUG_PAGEALLOC
567 564
568static int __set_pages_p(struct page *page, int numpages) 565static int __set_pages_p(struct page *page, int numpages)
569{ 566{
570 unsigned long addr = (unsigned long)page_address(page); 567 struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
568 .numpages = numpages,
569 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
570 .mask_clr = __pgprot(0)};
571 571
572 return __change_page_attr_set(addr, numpages, 572 return __change_page_attr_set_clr(&cpa);
573 __pgprot(_PAGE_PRESENT | _PAGE_RW));
574} 573}
575 574
576static int __set_pages_np(struct page *page, int numpages) 575static int __set_pages_np(struct page *page, int numpages)
577{ 576{
578 unsigned long addr = (unsigned long)page_address(page); 577 struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
578 .numpages = numpages,
579 .mask_set = __pgprot(0),
580 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)};
579 581
580 return __change_page_attr_clear(addr, numpages, 582 return __change_page_attr_set_clr(&cpa);
581 __pgprot(_PAGE_PRESENT));
582} 583}
583 584
584void kernel_map_pages(struct page *page, int numpages, int enable) 585void kernel_map_pages(struct page *page, int numpages, int enable)