aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pageattr.c
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2008-03-01 13:29:55 -0500
committerSteve French <sfrench@us.ibm.com>2008-03-01 13:29:55 -0500
commit0dbd888936a23514716b8d944775bc56f731363a (patch)
treea2c60cdc45bdcbed47680731fa8188bffe58c098 /arch/x86/mm/pageattr.c
parent0b442d2c28479332610c46e1a74e5638ab63a97d (diff)
parentd395991c117d43bfca97101a931a41d062a93852 (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r--arch/x86/mm/pageattr.c335
1 files changed, 188 insertions, 147 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4119379f80ff..7049294fb469 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -16,6 +16,7 @@
16#include <asm/sections.h> 16#include <asm/sections.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <asm/pgalloc.h> 18#include <asm/pgalloc.h>
19#include <asm/proto.h>
19 20
20/* 21/*
21 * The current flushing context - we pass it instead of 5 arguments: 22 * The current flushing context - we pass it instead of 5 arguments:
@@ -25,9 +26,31 @@ struct cpa_data {
25 pgprot_t mask_set; 26 pgprot_t mask_set;
26 pgprot_t mask_clr; 27 pgprot_t mask_clr;
27 int numpages; 28 int numpages;
29 int processed;
28 int flushtlb; 30 int flushtlb;
31 unsigned long pfn;
29}; 32};
30 33
34#ifdef CONFIG_X86_64
35
36static inline unsigned long highmap_start_pfn(void)
37{
38 return __pa(_text) >> PAGE_SHIFT;
39}
40
41static inline unsigned long highmap_end_pfn(void)
42{
43 return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
44}
45
46#endif
47
48#ifdef CONFIG_DEBUG_PAGEALLOC
49# define debug_pagealloc 1
50#else
51# define debug_pagealloc 0
52#endif
53
31static inline int 54static inline int
32within(unsigned long addr, unsigned long start, unsigned long end) 55within(unsigned long addr, unsigned long start, unsigned long end)
33{ 56{
@@ -123,29 +146,14 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
123 } 146 }
124} 147}
125 148
126#define HIGH_MAP_START __START_KERNEL_map
127#define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE)
128
129
130/*
131 * Converts a virtual address to a X86-64 highmap address
132 */
133static unsigned long virt_to_highmap(void *address)
134{
135#ifdef CONFIG_X86_64
136 return __pa((unsigned long)address) + HIGH_MAP_START - phys_base;
137#else
138 return (unsigned long)address;
139#endif
140}
141
142/* 149/*
143 * Certain areas of memory on x86 require very specific protection flags, 150 * Certain areas of memory on x86 require very specific protection flags,
144 * for example the BIOS area or kernel text. Callers don't always get this 151 * for example the BIOS area or kernel text. Callers don't always get this
145 * right (again, ioremap() on BIOS memory is not uncommon) so this function 152 * right (again, ioremap() on BIOS memory is not uncommon) so this function
146 * checks and fixes these known static required protection bits. 153 * checks and fixes these known static required protection bits.
147 */ 154 */
148static inline pgprot_t static_protections(pgprot_t prot, unsigned long address) 155static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
156 unsigned long pfn)
149{ 157{
150 pgprot_t forbidden = __pgprot(0); 158 pgprot_t forbidden = __pgprot(0);
151 159
@@ -153,30 +161,23 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
153 * The BIOS area between 640k and 1Mb needs to be executable for 161 * The BIOS area between 640k and 1Mb needs to be executable for
154 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. 162 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
155 */ 163 */
156 if (within(__pa(address), BIOS_BEGIN, BIOS_END)) 164 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
157 pgprot_val(forbidden) |= _PAGE_NX; 165 pgprot_val(forbidden) |= _PAGE_NX;
158 166
159 /* 167 /*
160 * The kernel text needs to be executable for obvious reasons 168 * The kernel text needs to be executable for obvious reasons
161 * Does not cover __inittext since that is gone later on 169 * Does not cover __inittext since that is gone later on. On
170 * 64bit we do not enforce !NX on the low mapping
162 */ 171 */
163 if (within(address, (unsigned long)_text, (unsigned long)_etext)) 172 if (within(address, (unsigned long)_text, (unsigned long)_etext))
164 pgprot_val(forbidden) |= _PAGE_NX; 173 pgprot_val(forbidden) |= _PAGE_NX;
165 /*
166 * Do the same for the x86-64 high kernel mapping
167 */
168 if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext)))
169 pgprot_val(forbidden) |= _PAGE_NX;
170 174
171 /* The .rodata section needs to be read-only */
172 if (within(address, (unsigned long)__start_rodata,
173 (unsigned long)__end_rodata))
174 pgprot_val(forbidden) |= _PAGE_RW;
175 /* 175 /*
176 * Do the same for the x86-64 high kernel mapping 176 * The .rodata section needs to be read-only. Using the pfn
177 * catches all aliases.
177 */ 178 */
178 if (within(address, virt_to_highmap(__start_rodata), 179 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
179 virt_to_highmap(__end_rodata))) 180 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
180 pgprot_val(forbidden) |= _PAGE_RW; 181 pgprot_val(forbidden) |= _PAGE_RW;
181 182
182 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); 183 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
@@ -253,7 +254,7 @@ static int
253try_preserve_large_page(pte_t *kpte, unsigned long address, 254try_preserve_large_page(pte_t *kpte, unsigned long address,
254 struct cpa_data *cpa) 255 struct cpa_data *cpa)
255{ 256{
256 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr; 257 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
257 pte_t new_pte, old_pte, *tmp; 258 pte_t new_pte, old_pte, *tmp;
258 pgprot_t old_prot, new_prot; 259 pgprot_t old_prot, new_prot;
259 int i, do_split = 1; 260 int i, do_split = 1;
@@ -290,8 +291,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
290 */ 291 */
291 nextpage_addr = (address + psize) & pmask; 292 nextpage_addr = (address + psize) & pmask;
292 numpages = (nextpage_addr - address) >> PAGE_SHIFT; 293 numpages = (nextpage_addr - address) >> PAGE_SHIFT;
293 if (numpages < cpa->numpages) 294 if (numpages < cpa->processed)
294 cpa->numpages = numpages; 295 cpa->processed = numpages;
295 296
296 /* 297 /*
297 * We are safe now. Check whether the new pgprot is the same: 298 * We are safe now. Check whether the new pgprot is the same:
@@ -301,7 +302,15 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
301 302
302 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); 303 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
303 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 304 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
304 new_prot = static_protections(new_prot, address); 305
306 /*
307 * old_pte points to the large page base address. So we need
308 * to add the offset of the virtual address:
309 */
310 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
311 cpa->pfn = pfn;
312
313 new_prot = static_protections(new_prot, address, pfn);
305 314
306 /* 315 /*
307 * We need to check the full range, whether 316 * We need to check the full range, whether
@@ -309,8 +318,9 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
309 * the pages in the range we try to preserve: 318 * the pages in the range we try to preserve:
310 */ 319 */
311 addr = address + PAGE_SIZE; 320 addr = address + PAGE_SIZE;
312 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE) { 321 pfn++;
313 pgprot_t chk_prot = static_protections(new_prot, addr); 322 for (i = 1; i < cpa->processed; i++, addr += PAGE_SIZE, pfn++) {
323 pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
314 324
315 if (pgprot_val(chk_prot) != pgprot_val(new_prot)) 325 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
316 goto out_unlock; 326 goto out_unlock;
@@ -333,7 +343,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
333 * that we limited the number of possible pages already to 343 * that we limited the number of possible pages already to
334 * the number of pages in the large page. 344 * the number of pages in the large page.
335 */ 345 */
336 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { 346 if (address == (nextpage_addr - psize) && cpa->processed == numpages) {
337 /* 347 /*
338 * The address is aligned and the number of pages 348 * The address is aligned and the number of pages
339 * covers the full page. 349 * covers the full page.
@@ -352,45 +362,48 @@ out_unlock:
352 362
353static LIST_HEAD(page_pool); 363static LIST_HEAD(page_pool);
354static unsigned long pool_size, pool_pages, pool_low; 364static unsigned long pool_size, pool_pages, pool_low;
355static unsigned long pool_used, pool_failed, pool_refill; 365static unsigned long pool_used, pool_failed;
356 366
357static void cpa_fill_pool(void) 367static void cpa_fill_pool(struct page **ret)
358{ 368{
359 struct page *p;
360 gfp_t gfp = GFP_KERNEL; 369 gfp_t gfp = GFP_KERNEL;
370 unsigned long flags;
371 struct page *p;
361 372
362 /* Do not allocate from interrupt context */
363 if (in_irq() || irqs_disabled())
364 return;
365 /* 373 /*
366 * Check unlocked. I does not matter when we have one more 374 * Avoid recursion (on debug-pagealloc) and also signal
367 * page in the pool. The bit lock avoids recursive pool 375 * our priority to get to these pagetables:
368 * allocations:
369 */ 376 */
370 if (pool_pages >= pool_size || test_and_set_bit_lock(0, &pool_refill)) 377 if (current->flags & PF_MEMALLOC)
371 return; 378 return;
379 current->flags |= PF_MEMALLOC;
372 380
373#ifdef CONFIG_DEBUG_PAGEALLOC
374 /* 381 /*
375 * We could do: 382 * Allocate atomically from atomic contexts:
376 * gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
377 * but this fails on !PREEMPT kernels
378 */ 383 */
379 gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 384 if (in_atomic() || irqs_disabled() || debug_pagealloc)
380#endif 385 gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
381 386
382 while (pool_pages < pool_size) { 387 while (pool_pages < pool_size || (ret && !*ret)) {
383 p = alloc_pages(gfp, 0); 388 p = alloc_pages(gfp, 0);
384 if (!p) { 389 if (!p) {
385 pool_failed++; 390 pool_failed++;
386 break; 391 break;
387 } 392 }
388 spin_lock_irq(&pgd_lock); 393 /*
394 * If the call site needs a page right now, provide it:
395 */
396 if (ret && !*ret) {
397 *ret = p;
398 continue;
399 }
400 spin_lock_irqsave(&pgd_lock, flags);
389 list_add(&p->lru, &page_pool); 401 list_add(&p->lru, &page_pool);
390 pool_pages++; 402 pool_pages++;
391 spin_unlock_irq(&pgd_lock); 403 spin_unlock_irqrestore(&pgd_lock, flags);
392 } 404 }
393 clear_bit_unlock(0, &pool_refill); 405
406 current->flags &= ~PF_MEMALLOC;
394} 407}
395 408
396#define SHIFT_MB (20 - PAGE_SHIFT) 409#define SHIFT_MB (20 - PAGE_SHIFT)
@@ -411,11 +424,15 @@ void __init cpa_init(void)
411 * GiB. Shift MiB to Gib and multiply the result by 424 * GiB. Shift MiB to Gib and multiply the result by
412 * POOL_PAGES_PER_GB: 425 * POOL_PAGES_PER_GB:
413 */ 426 */
414 gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; 427 if (debug_pagealloc) {
415 pool_size = POOL_PAGES_PER_GB * gb; 428 gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
429 pool_size = POOL_PAGES_PER_GB * gb;
430 } else {
431 pool_size = 1;
432 }
416 pool_low = pool_size; 433 pool_low = pool_size;
417 434
418 cpa_fill_pool(); 435 cpa_fill_pool(NULL);
419 printk(KERN_DEBUG 436 printk(KERN_DEBUG
420 "CPA: page pool initialized %lu of %lu pages preallocated\n", 437 "CPA: page pool initialized %lu of %lu pages preallocated\n",
421 pool_pages, pool_size); 438 pool_pages, pool_size);
@@ -437,16 +454,20 @@ static int split_large_page(pte_t *kpte, unsigned long address)
437 spin_lock_irqsave(&pgd_lock, flags); 454 spin_lock_irqsave(&pgd_lock, flags);
438 if (list_empty(&page_pool)) { 455 if (list_empty(&page_pool)) {
439 spin_unlock_irqrestore(&pgd_lock, flags); 456 spin_unlock_irqrestore(&pgd_lock, flags);
440 return -ENOMEM; 457 base = NULL;
458 cpa_fill_pool(&base);
459 if (!base)
460 return -ENOMEM;
461 spin_lock_irqsave(&pgd_lock, flags);
462 } else {
463 base = list_first_entry(&page_pool, struct page, lru);
464 list_del(&base->lru);
465 pool_pages--;
466
467 if (pool_pages < pool_low)
468 pool_low = pool_pages;
441 } 469 }
442 470
443 base = list_first_entry(&page_pool, struct page, lru);
444 list_del(&base->lru);
445 pool_pages--;
446
447 if (pool_pages < pool_low)
448 pool_low = pool_pages;
449
450 /* 471 /*
451 * Check for races, another CPU might have split this page 472 * Check for races, another CPU might have split this page
452 * up for us already: 473 * up for us already:
@@ -505,46 +526,46 @@ out_unlock:
505 return 0; 526 return 0;
506} 527}
507 528
508static int __change_page_attr(unsigned long address, struct cpa_data *cpa) 529static int __change_page_attr(struct cpa_data *cpa, int primary)
509{ 530{
531 unsigned long address = cpa->vaddr;
510 int do_split, err; 532 int do_split, err;
511 unsigned int level; 533 unsigned int level;
512 struct page *kpte_page; 534 pte_t *kpte, old_pte;
513 pte_t *kpte;
514 535
515repeat: 536repeat:
516 kpte = lookup_address(address, &level); 537 kpte = lookup_address(address, &level);
517 if (!kpte) 538 if (!kpte)
518 return -EINVAL; 539 return primary ? -EINVAL : 0;
519 540
520 kpte_page = virt_to_page(kpte); 541 old_pte = *kpte;
521 BUG_ON(PageLRU(kpte_page)); 542 if (!pte_val(old_pte)) {
522 BUG_ON(PageCompound(kpte_page)); 543 if (!primary)
544 return 0;
545 printk(KERN_WARNING "CPA: called for zero pte. "
546 "vaddr = %lx cpa->vaddr = %lx\n", address,
547 cpa->vaddr);
548 WARN_ON(1);
549 return -EINVAL;
550 }
523 551
524 if (level == PG_LEVEL_4K) { 552 if (level == PG_LEVEL_4K) {
525 pte_t new_pte, old_pte = *kpte; 553 pte_t new_pte;
526 pgprot_t new_prot = pte_pgprot(old_pte); 554 pgprot_t new_prot = pte_pgprot(old_pte);
527 555 unsigned long pfn = pte_pfn(old_pte);
528 if(!pte_val(old_pte)) {
529 printk(KERN_WARNING "CPA: called for zero pte. "
530 "vaddr = %lx cpa->vaddr = %lx\n", address,
531 cpa->vaddr);
532 WARN_ON(1);
533 return -EINVAL;
534 }
535 556
536 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); 557 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
537 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 558 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
538 559
539 new_prot = static_protections(new_prot, address); 560 new_prot = static_protections(new_prot, address, pfn);
540 561
541 /* 562 /*
542 * We need to keep the pfn from the existing PTE, 563 * We need to keep the pfn from the existing PTE,
543 * after all we're only going to change it's attributes 564 * after all we're only going to change it's attributes
544 * not the memory it points to 565 * not the memory it points to
545 */ 566 */
546 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); 567 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
547 568 cpa->pfn = pfn;
548 /* 569 /*
549 * Do we really change anything ? 570 * Do we really change anything ?
550 */ 571 */
@@ -552,7 +573,7 @@ repeat:
552 set_pte_atomic(kpte, new_pte); 573 set_pte_atomic(kpte, new_pte);
553 cpa->flushtlb = 1; 574 cpa->flushtlb = 1;
554 } 575 }
555 cpa->numpages = 1; 576 cpa->processed = 1;
556 return 0; 577 return 0;
557 } 578 }
558 579
@@ -563,7 +584,7 @@ repeat:
563 do_split = try_preserve_large_page(kpte, address, cpa); 584 do_split = try_preserve_large_page(kpte, address, cpa);
564 /* 585 /*
565 * When the range fits into the existing large page, 586 * When the range fits into the existing large page,
566 * return. cp->numpages and cpa->tlbflush have been updated in 587 * return. cp->processed and cpa->tlbflush have been updated in
567 * try_large_page: 588 * try_large_page:
568 */ 589 */
569 if (do_split <= 0) 590 if (do_split <= 0)
@@ -581,67 +602,59 @@ repeat:
581 return err; 602 return err;
582} 603}
583 604
584/** 605static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
585 * change_page_attr_addr - Change page table attributes in linear mapping 606
586 * @address: Virtual address in linear mapping. 607static int cpa_process_alias(struct cpa_data *cpa)
587 * @prot: New page table attribute (PAGE_*)
588 *
589 * Change page attributes of a page in the direct mapping. This is a variant
590 * of change_page_attr() that also works on memory holes that do not have
591 * mem_map entry (pfn_valid() is false).
592 *
593 * See change_page_attr() documentation for more details.
594 *
595 * Modules and drivers should use the set_memory_* APIs instead.
596 */
597static int change_page_attr_addr(struct cpa_data *cpa)
598{ 608{
599 int err; 609 struct cpa_data alias_cpa;
600 unsigned long address = cpa->vaddr; 610 int ret = 0;
601 611
602#ifdef CONFIG_X86_64 612 if (cpa->pfn > max_pfn_mapped)
603 unsigned long phys_addr = __pa(address); 613 return 0;
604 614
605 /* 615 /*
606 * If we are inside the high mapped kernel range, then we 616 * No need to redo, when the primary call touched the direct
607 * fixup the low mapping first. __va() returns the virtual 617 * mapping already:
608 * address in the linear mapping:
609 */ 618 */
610 if (within(address, HIGH_MAP_START, HIGH_MAP_END)) 619 if (!within(cpa->vaddr, PAGE_OFFSET,
611 address = (unsigned long) __va(phys_addr); 620 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
612#endif
613 621
614 err = __change_page_attr(address, cpa); 622 alias_cpa = *cpa;
615 if (err) 623 alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
616 return err; 624
625 ret = __change_page_attr_set_clr(&alias_cpa, 0);
626 }
617 627
618#ifdef CONFIG_X86_64 628#ifdef CONFIG_X86_64
629 if (ret)
630 return ret;
631 /*
632 * No need to redo, when the primary call touched the high
633 * mapping already:
634 */
635 if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end))
636 return 0;
637
619 /* 638 /*
620 * If the physical address is inside the kernel map, we need 639 * If the physical address is inside the kernel map, we need
621 * to touch the high mapped kernel as well: 640 * to touch the high mapped kernel as well:
622 */ 641 */
623 if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) { 642 if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
624 /* 643 return 0;
625 * Calc the high mapping address. See __phys_addr()
626 * for the non obvious details.
627 *
628 * Note that NX and other required permissions are
629 * checked in static_protections().
630 */
631 address = phys_addr + HIGH_MAP_START - phys_base;
632 644
633 /* 645 alias_cpa = *cpa;
634 * Our high aliases are imprecise, because we check 646 alias_cpa.vaddr =
635 * everything between 0 and KERNEL_TEXT_SIZE, so do 647 (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
636 * not propagate lookup failures back to users: 648
637 */ 649 /*
638 __change_page_attr(address, cpa); 650 * The high mapping range is imprecise, so ignore the return value.
639 } 651 */
652 __change_page_attr_set_clr(&alias_cpa, 0);
640#endif 653#endif
641 return err; 654 return ret;
642} 655}
643 656
644static int __change_page_attr_set_clr(struct cpa_data *cpa) 657static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
645{ 658{
646 int ret, numpages = cpa->numpages; 659 int ret, numpages = cpa->numpages;
647 660
@@ -650,19 +663,26 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa)
650 * Store the remaining nr of pages for the large page 663 * Store the remaining nr of pages for the large page
651 * preservation check. 664 * preservation check.
652 */ 665 */
653 cpa->numpages = numpages; 666 cpa->numpages = cpa->processed = numpages;
654 ret = change_page_attr_addr(cpa); 667
668 ret = __change_page_attr(cpa, checkalias);
655 if (ret) 669 if (ret)
656 return ret; 670 return ret;
657 671
672 if (checkalias) {
673 ret = cpa_process_alias(cpa);
674 if (ret)
675 return ret;
676 }
677
658 /* 678 /*
659 * Adjust the number of pages with the result of the 679 * Adjust the number of pages with the result of the
660 * CPA operation. Either a large page has been 680 * CPA operation. Either a large page has been
661 * preserved or a single page update happened. 681 * preserved or a single page update happened.
662 */ 682 */
663 BUG_ON(cpa->numpages > numpages); 683 BUG_ON(cpa->processed > numpages);
664 numpages -= cpa->numpages; 684 numpages -= cpa->processed;
665 cpa->vaddr += cpa->numpages * PAGE_SIZE; 685 cpa->vaddr += cpa->processed * PAGE_SIZE;
666 } 686 }
667 return 0; 687 return 0;
668} 688}
@@ -677,7 +697,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
677 pgprot_t mask_set, pgprot_t mask_clr) 697 pgprot_t mask_set, pgprot_t mask_clr)
678{ 698{
679 struct cpa_data cpa; 699 struct cpa_data cpa;
680 int ret, cache; 700 int ret, cache, checkalias;
681 701
682 /* 702 /*
683 * Check, if we are requested to change a not supported 703 * Check, if we are requested to change a not supported
@@ -703,7 +723,10 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
703 cpa.mask_clr = mask_clr; 723 cpa.mask_clr = mask_clr;
704 cpa.flushtlb = 0; 724 cpa.flushtlb = 0;
705 725
706 ret = __change_page_attr_set_clr(&cpa); 726 /* No alias checking for _NX bit modifications */
727 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
728
729 ret = __change_page_attr_set_clr(&cpa, checkalias);
707 730
708 /* 731 /*
709 * Check whether we really changed something: 732 * Check whether we really changed something:
@@ -729,7 +752,8 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
729 cpa_flush_all(cache); 752 cpa_flush_all(cache);
730 753
731out: 754out:
732 cpa_fill_pool(); 755 cpa_fill_pool(NULL);
756
733 return ret; 757 return ret;
734} 758}
735 759
@@ -841,7 +865,7 @@ static int __set_pages_p(struct page *page, int numpages)
841 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), 865 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
842 .mask_clr = __pgprot(0)}; 866 .mask_clr = __pgprot(0)};
843 867
844 return __change_page_attr_set_clr(&cpa); 868 return __change_page_attr_set_clr(&cpa, 1);
845} 869}
846 870
847static int __set_pages_np(struct page *page, int numpages) 871static int __set_pages_np(struct page *page, int numpages)
@@ -851,7 +875,7 @@ static int __set_pages_np(struct page *page, int numpages)
851 .mask_set = __pgprot(0), 875 .mask_set = __pgprot(0),
852 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; 876 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)};
853 877
854 return __change_page_attr_set_clr(&cpa); 878 return __change_page_attr_set_clr(&cpa, 1);
855} 879}
856 880
857void kernel_map_pages(struct page *page, int numpages, int enable) 881void kernel_map_pages(struct page *page, int numpages, int enable)
@@ -892,9 +916,26 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
892 * Try to refill the page pool here. We can do this only after 916 * Try to refill the page pool here. We can do this only after
893 * the tlb flush. 917 * the tlb flush.
894 */ 918 */
895 cpa_fill_pool(); 919 cpa_fill_pool(NULL);
896} 920}
897#endif 921
922#ifdef CONFIG_HIBERNATION
923
924bool kernel_page_present(struct page *page)
925{
926 unsigned int level;
927 pte_t *pte;
928
929 if (PageHighMem(page))
930 return false;
931
932 pte = lookup_address((unsigned long)page_address(page), &level);
933 return (pte_val(*pte) & _PAGE_PRESENT);
934}
935
936#endif /* CONFIG_HIBERNATION */
937
938#endif /* CONFIG_DEBUG_PAGEALLOC */
898 939
899/* 940/*
900 * The testcases use internal knowledge of the implementation that shouldn't 941 * The testcases use internal knowledge of the implementation that shouldn't