diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/mm/pageattr.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r-- | arch/x86/mm/pageattr.c | 45 |
1 files changed, 23 insertions, 22 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 532e7933d606..f9e526742fa1 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/pfn.h> | 13 | #include <linux/pfn.h> |
14 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
15 | #include <linux/gfp.h> | 15 | #include <linux/gfp.h> |
16 | #include <linux/pci.h> | ||
16 | 17 | ||
17 | #include <asm/e820.h> | 18 | #include <asm/e820.h> |
18 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
@@ -56,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM]; | |||
56 | 57 | ||
57 | void update_page_count(int level, unsigned long pages) | 58 | void update_page_count(int level, unsigned long pages) |
58 | { | 59 | { |
59 | unsigned long flags; | ||
60 | |||
61 | /* Protect against CPA */ | 60 | /* Protect against CPA */ |
62 | spin_lock_irqsave(&pgd_lock, flags); | 61 | spin_lock(&pgd_lock); |
63 | direct_pages_count[level] += pages; | 62 | direct_pages_count[level] += pages; |
64 | spin_unlock_irqrestore(&pgd_lock, flags); | 63 | spin_unlock(&pgd_lock); |
65 | } | 64 | } |
66 | 65 | ||
67 | static void split_page_count(int level) | 66 | static void split_page_count(int level) |
@@ -260,8 +259,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |||
260 | * The BIOS area between 640k and 1Mb needs to be executable for | 259 | * The BIOS area between 640k and 1Mb needs to be executable for |
261 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | 260 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. |
262 | */ | 261 | */ |
263 | if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) | 262 | #ifdef CONFIG_PCI_BIOS |
263 | if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) | ||
264 | pgprot_val(forbidden) |= _PAGE_NX; | 264 | pgprot_val(forbidden) |= _PAGE_NX; |
265 | #endif | ||
265 | 266 | ||
266 | /* | 267 | /* |
267 | * The kernel text needs to be executable for obvious reasons | 268 | * The kernel text needs to be executable for obvious reasons |
@@ -309,7 +310,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |||
309 | * these shared mappings are made of small page mappings. | 310 | * these shared mappings are made of small page mappings. |
310 | * Thus this don't enforce !RW mapping for small page kernel | 311 | * Thus this don't enforce !RW mapping for small page kernel |
311 | * text mapping logic will help Linux Xen parvirt guest boot | 312 | * text mapping logic will help Linux Xen parvirt guest boot |
312 | * aswell. | 313 | * as well. |
313 | */ | 314 | */ |
314 | if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) | 315 | if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) |
315 | pgprot_val(forbidden) |= _PAGE_RW; | 316 | pgprot_val(forbidden) |= _PAGE_RW; |
@@ -391,16 +392,16 @@ static int | |||
391 | try_preserve_large_page(pte_t *kpte, unsigned long address, | 392 | try_preserve_large_page(pte_t *kpte, unsigned long address, |
392 | struct cpa_data *cpa) | 393 | struct cpa_data *cpa) |
393 | { | 394 | { |
394 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; | 395 | unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; |
395 | pte_t new_pte, old_pte, *tmp; | 396 | pte_t new_pte, old_pte, *tmp; |
396 | pgprot_t old_prot, new_prot; | 397 | pgprot_t old_prot, new_prot, req_prot; |
397 | int i, do_split = 1; | 398 | int i, do_split = 1; |
398 | unsigned int level; | 399 | unsigned int level; |
399 | 400 | ||
400 | if (cpa->force_split) | 401 | if (cpa->force_split) |
401 | return 1; | 402 | return 1; |
402 | 403 | ||
403 | spin_lock_irqsave(&pgd_lock, flags); | 404 | spin_lock(&pgd_lock); |
404 | /* | 405 | /* |
405 | * Check for races, another CPU might have split this page | 406 | * Check for races, another CPU might have split this page |
406 | * up already: | 407 | * up already: |
@@ -438,10 +439,10 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
438 | * We are safe now. Check whether the new pgprot is the same: | 439 | * We are safe now. Check whether the new pgprot is the same: |
439 | */ | 440 | */ |
440 | old_pte = *kpte; | 441 | old_pte = *kpte; |
441 | old_prot = new_prot = pte_pgprot(old_pte); | 442 | old_prot = new_prot = req_prot = pte_pgprot(old_pte); |
442 | 443 | ||
443 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | 444 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); |
444 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | 445 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); |
445 | 446 | ||
446 | /* | 447 | /* |
447 | * old_pte points to the large page base address. So we need | 448 | * old_pte points to the large page base address. So we need |
@@ -450,17 +451,17 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
450 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); | 451 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); |
451 | cpa->pfn = pfn; | 452 | cpa->pfn = pfn; |
452 | 453 | ||
453 | new_prot = static_protections(new_prot, address, pfn); | 454 | new_prot = static_protections(req_prot, address, pfn); |
454 | 455 | ||
455 | /* | 456 | /* |
456 | * We need to check the full range, whether | 457 | * We need to check the full range, whether |
457 | * static_protection() requires a different pgprot for one of | 458 | * static_protection() requires a different pgprot for one of |
458 | * the pages in the range we try to preserve: | 459 | * the pages in the range we try to preserve: |
459 | */ | 460 | */ |
460 | addr = address + PAGE_SIZE; | 461 | addr = address & pmask; |
461 | pfn++; | 462 | pfn = pte_pfn(old_pte); |
462 | for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) { | 463 | for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { |
463 | pgprot_t chk_prot = static_protections(new_prot, addr, pfn); | 464 | pgprot_t chk_prot = static_protections(req_prot, addr, pfn); |
464 | 465 | ||
465 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | 466 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) |
466 | goto out_unlock; | 467 | goto out_unlock; |
@@ -483,7 +484,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
483 | * that we limited the number of possible pages already to | 484 | * that we limited the number of possible pages already to |
484 | * the number of pages in the large page. | 485 | * the number of pages in the large page. |
485 | */ | 486 | */ |
486 | if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { | 487 | if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { |
487 | /* | 488 | /* |
488 | * The address is aligned and the number of pages | 489 | * The address is aligned and the number of pages |
489 | * covers the full page. | 490 | * covers the full page. |
@@ -495,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
495 | } | 496 | } |
496 | 497 | ||
497 | out_unlock: | 498 | out_unlock: |
498 | spin_unlock_irqrestore(&pgd_lock, flags); | 499 | spin_unlock(&pgd_lock); |
499 | 500 | ||
500 | return do_split; | 501 | return do_split; |
501 | } | 502 | } |
502 | 503 | ||
503 | static int split_large_page(pte_t *kpte, unsigned long address) | 504 | static int split_large_page(pte_t *kpte, unsigned long address) |
504 | { | 505 | { |
505 | unsigned long flags, pfn, pfninc = 1; | 506 | unsigned long pfn, pfninc = 1; |
506 | unsigned int i, level; | 507 | unsigned int i, level; |
507 | pte_t *pbase, *tmp; | 508 | pte_t *pbase, *tmp; |
508 | pgprot_t ref_prot; | 509 | pgprot_t ref_prot; |
@@ -516,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
516 | if (!base) | 517 | if (!base) |
517 | return -ENOMEM; | 518 | return -ENOMEM; |
518 | 519 | ||
519 | spin_lock_irqsave(&pgd_lock, flags); | 520 | spin_lock(&pgd_lock); |
520 | /* | 521 | /* |
521 | * Check for races, another CPU might have split this page | 522 | * Check for races, another CPU might have split this page |
522 | * up for us already: | 523 | * up for us already: |
@@ -588,7 +589,7 @@ out_unlock: | |||
588 | */ | 589 | */ |
589 | if (base) | 590 | if (base) |
590 | __free_page(base); | 591 | __free_page(base); |
591 | spin_unlock_irqrestore(&pgd_lock, flags); | 592 | spin_unlock(&pgd_lock); |
592 | 593 | ||
593 | return 0; | 594 | return 0; |
594 | } | 595 | } |