diff options
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r-- | arch/x86/mm/pat.c | 102 |
1 files changed, 49 insertions, 53 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 8f68a83491ba..ef75f3f89810 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -139,20 +139,21 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ | |||
139 | * The intersection is based on "Effective Memory Type" tables in IA-32 | 139 | * The intersection is based on "Effective Memory Type" tables in IA-32 |
140 | * SDM vol 3a | 140 | * SDM vol 3a |
141 | */ | 141 | */ |
142 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) | 142 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, |
143 | enum page_cache_mode req_type) | ||
143 | { | 144 | { |
144 | /* | 145 | /* |
145 | * Look for MTRR hint to get the effective type in case where PAT | 146 | * Look for MTRR hint to get the effective type in case where PAT |
146 | * request is for WB. | 147 | * request is for WB. |
147 | */ | 148 | */ |
148 | if (req_type == _PAGE_CACHE_WB) { | 149 | if (req_type == _PAGE_CACHE_MODE_WB) { |
149 | u8 mtrr_type; | 150 | u8 mtrr_type; |
150 | 151 | ||
151 | mtrr_type = mtrr_type_lookup(start, end); | 152 | mtrr_type = mtrr_type_lookup(start, end); |
152 | if (mtrr_type != MTRR_TYPE_WRBACK) | 153 | if (mtrr_type != MTRR_TYPE_WRBACK) |
153 | return _PAGE_CACHE_UC_MINUS; | 154 | return _PAGE_CACHE_MODE_UC_MINUS; |
154 | 155 | ||
155 | return _PAGE_CACHE_WB; | 156 | return _PAGE_CACHE_MODE_WB; |
156 | } | 157 | } |
157 | 158 | ||
158 | return req_type; | 159 | return req_type; |
@@ -207,25 +208,26 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) | |||
207 | * - Find the memtype of all the pages in the range, look for any conflicts | 208 | * - Find the memtype of all the pages in the range, look for any conflicts |
208 | * - In case of no conflicts, set the new memtype for pages in the range | 209 | * - In case of no conflicts, set the new memtype for pages in the range |
209 | */ | 210 | */ |
210 | static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, | 211 | static int reserve_ram_pages_type(u64 start, u64 end, |
211 | unsigned long *new_type) | 212 | enum page_cache_mode req_type, |
213 | enum page_cache_mode *new_type) | ||
212 | { | 214 | { |
213 | struct page *page; | 215 | struct page *page; |
214 | u64 pfn; | 216 | u64 pfn; |
215 | 217 | ||
216 | if (req_type == _PAGE_CACHE_UC) { | 218 | if (req_type == _PAGE_CACHE_MODE_UC) { |
217 | /* We do not support strong UC */ | 219 | /* We do not support strong UC */ |
218 | WARN_ON_ONCE(1); | 220 | WARN_ON_ONCE(1); |
219 | req_type = _PAGE_CACHE_UC_MINUS; | 221 | req_type = _PAGE_CACHE_MODE_UC_MINUS; |
220 | } | 222 | } |
221 | 223 | ||
222 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | 224 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
223 | unsigned long type; | 225 | enum page_cache_mode type; |
224 | 226 | ||
225 | page = pfn_to_page(pfn); | 227 | page = pfn_to_page(pfn); |
226 | type = get_page_memtype(page); | 228 | type = get_page_memtype(page); |
227 | if (type != -1) { | 229 | if (type != -1) { |
228 | printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n", | 230 | pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", |
229 | start, end - 1, type, req_type); | 231 | start, end - 1, type, req_type); |
230 | if (new_type) | 232 | if (new_type) |
231 | *new_type = type; | 233 | *new_type = type; |
@@ -258,21 +260,21 @@ static int free_ram_pages_type(u64 start, u64 end) | |||
258 | 260 | ||
259 | /* | 261 | /* |
260 | * req_type typically has one of the: | 262 | * req_type typically has one of the: |
261 | * - _PAGE_CACHE_WB | 263 | * - _PAGE_CACHE_MODE_WB |
262 | * - _PAGE_CACHE_WC | 264 | * - _PAGE_CACHE_MODE_WC |
263 | * - _PAGE_CACHE_UC_MINUS | 265 | * - _PAGE_CACHE_MODE_UC_MINUS |
264 | * - _PAGE_CACHE_UC | 266 | * - _PAGE_CACHE_MODE_UC |
265 | * | 267 | * |
266 | * If new_type is NULL, function will return an error if it cannot reserve the | 268 | * If new_type is NULL, function will return an error if it cannot reserve the |
267 | * region with req_type. If new_type is non-NULL, function will return | 269 | * region with req_type. If new_type is non-NULL, function will return |
268 | * available type in new_type in case of no error. In case of any error | 270 | * available type in new_type in case of no error. In case of any error |
269 | * it will return a negative return value. | 271 | * it will return a negative return value. |
270 | */ | 272 | */ |
271 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, | 273 | int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, |
272 | unsigned long *new_type) | 274 | enum page_cache_mode *new_type) |
273 | { | 275 | { |
274 | struct memtype *new; | 276 | struct memtype *new; |
275 | unsigned long actual_type; | 277 | enum page_cache_mode actual_type; |
276 | int is_range_ram; | 278 | int is_range_ram; |
277 | int err = 0; | 279 | int err = 0; |
278 | 280 | ||
@@ -281,10 +283,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
281 | if (!pat_enabled) { | 283 | if (!pat_enabled) { |
282 | /* This is identical to page table setting without PAT */ | 284 | /* This is identical to page table setting without PAT */ |
283 | if (new_type) { | 285 | if (new_type) { |
284 | if (req_type == _PAGE_CACHE_WC) | 286 | if (req_type == _PAGE_CACHE_MODE_WC) |
285 | *new_type = _PAGE_CACHE_UC_MINUS; | 287 | *new_type = _PAGE_CACHE_MODE_UC_MINUS; |
286 | else | 288 | else |
287 | *new_type = req_type & _PAGE_CACHE_MASK; | 289 | *new_type = req_type; |
288 | } | 290 | } |
289 | return 0; | 291 | return 0; |
290 | } | 292 | } |
@@ -292,7 +294,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
292 | /* Low ISA region is always mapped WB in page table. No need to track */ | 294 | /* Low ISA region is always mapped WB in page table. No need to track */ |
293 | if (x86_platform.is_untracked_pat_range(start, end)) { | 295 | if (x86_platform.is_untracked_pat_range(start, end)) { |
294 | if (new_type) | 296 | if (new_type) |
295 | *new_type = _PAGE_CACHE_WB; | 297 | *new_type = _PAGE_CACHE_MODE_WB; |
296 | return 0; | 298 | return 0; |
297 | } | 299 | } |
298 | 300 | ||
@@ -302,7 +304,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
302 | * tools and ACPI tools). Use WB request for WB memory and use | 304 | * tools and ACPI tools). Use WB request for WB memory and use |
303 | * UC_MINUS otherwise. | 305 | * UC_MINUS otherwise. |
304 | */ | 306 | */ |
305 | actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); | 307 | actual_type = pat_x_mtrr_type(start, end, req_type); |
306 | 308 | ||
307 | if (new_type) | 309 | if (new_type) |
308 | *new_type = actual_type; | 310 | *new_type = actual_type; |
@@ -408,7 +410,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr) | |||
408 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { | 410 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { |
409 | struct page *page; | 411 | struct page *page; |
410 | page = pfn_to_page(paddr >> PAGE_SHIFT); | 412 | page = pfn_to_page(paddr >> PAGE_SHIFT); |
411 | rettype = pgprot2cachemode(__pgprot(get_page_memtype(page))); | 413 | rettype = get_page_memtype(page); |
412 | /* | 414 | /* |
413 | * -1 from get_page_memtype() implies RAM page is in its | 415 | * -1 from get_page_memtype() implies RAM page is in its |
414 | * default state and not reserved, and hence of type WB | 416 | * default state and not reserved, and hence of type WB |
@@ -423,7 +425,7 @@ static enum page_cache_mode lookup_memtype(u64 paddr) | |||
423 | 425 | ||
424 | entry = rbt_memtype_lookup(paddr); | 426 | entry = rbt_memtype_lookup(paddr); |
425 | if (entry != NULL) | 427 | if (entry != NULL) |
426 | rettype = pgprot2cachemode(__pgprot(entry->type)); | 428 | rettype = entry->type; |
427 | else | 429 | else |
428 | rettype = _PAGE_CACHE_MODE_UC_MINUS; | 430 | rettype = _PAGE_CACHE_MODE_UC_MINUS; |
429 | 431 | ||
@@ -447,18 +449,14 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end, | |||
447 | resource_size_t size = end - start; | 449 | resource_size_t size = end - start; |
448 | enum page_cache_mode req_type = *type; | 450 | enum page_cache_mode req_type = *type; |
449 | enum page_cache_mode new_type; | 451 | enum page_cache_mode new_type; |
450 | unsigned long new_prot; | ||
451 | int ret; | 452 | int ret; |
452 | 453 | ||
453 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); | 454 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); |
454 | 455 | ||
455 | ret = reserve_memtype(start, end, cachemode2protval(req_type), | 456 | ret = reserve_memtype(start, end, req_type, &new_type); |
456 | &new_prot); | ||
457 | if (ret) | 457 | if (ret) |
458 | goto out_err; | 458 | goto out_err; |
459 | 459 | ||
460 | new_type = pgprot2cachemode(__pgprot(new_prot)); | ||
461 | |||
462 | if (!is_new_memtype_allowed(start, size, req_type, new_type)) | 460 | if (!is_new_memtype_allowed(start, size, req_type, new_type)) |
463 | goto out_free; | 461 | goto out_free; |
464 | 462 | ||
@@ -524,13 +522,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) | |||
524 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | 522 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
525 | unsigned long size, pgprot_t *vma_prot) | 523 | unsigned long size, pgprot_t *vma_prot) |
526 | { | 524 | { |
527 | unsigned long flags = _PAGE_CACHE_WB; | 525 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; |
528 | 526 | ||
529 | if (!range_is_allowed(pfn, size)) | 527 | if (!range_is_allowed(pfn, size)) |
530 | return 0; | 528 | return 0; |
531 | 529 | ||
532 | if (file->f_flags & O_DSYNC) | 530 | if (file->f_flags & O_DSYNC) |
533 | flags = _PAGE_CACHE_UC_MINUS; | 531 | pcm = _PAGE_CACHE_MODE_UC_MINUS; |
534 | 532 | ||
535 | #ifdef CONFIG_X86_32 | 533 | #ifdef CONFIG_X86_32 |
536 | /* | 534 | /* |
@@ -547,12 +545,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
547 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || | 545 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || |
548 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && | 546 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && |
549 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { | 547 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { |
550 | flags = _PAGE_CACHE_UC; | 548 | pcm = _PAGE_CACHE_MODE_UC; |
551 | } | 549 | } |
552 | #endif | 550 | #endif |
553 | 551 | ||
554 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | | 552 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | |
555 | flags); | 553 | cachemode2protval(pcm)); |
556 | return 1; | 554 | return 1; |
557 | } | 555 | } |
558 | 556 | ||
@@ -583,7 +581,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, | |||
583 | printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " | 581 | printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " |
584 | "for [mem %#010Lx-%#010Lx]\n", | 582 | "for [mem %#010Lx-%#010Lx]\n", |
585 | current->comm, current->pid, | 583 | current->comm, current->pid, |
586 | cattr_name(cachemode2protval(pcm)), | 584 | cattr_name(pcm), |
587 | base, (unsigned long long)(base + size-1)); | 585 | base, (unsigned long long)(base + size-1)); |
588 | return -EINVAL; | 586 | return -EINVAL; |
589 | } | 587 | } |
@@ -600,8 +598,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
600 | { | 598 | { |
601 | int is_ram = 0; | 599 | int is_ram = 0; |
602 | int ret; | 600 | int ret; |
603 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); | 601 | enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot); |
604 | unsigned long flags = want_flags; | 602 | enum page_cache_mode pcm = want_pcm; |
605 | 603 | ||
606 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); | 604 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
607 | 605 | ||
@@ -614,38 +612,36 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
614 | if (!pat_enabled) | 612 | if (!pat_enabled) |
615 | return 0; | 613 | return 0; |
616 | 614 | ||
617 | flags = cachemode2protval(lookup_memtype(paddr)); | 615 | pcm = lookup_memtype(paddr); |
618 | if (want_flags != flags) { | 616 | if (want_pcm != pcm) { |
619 | printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", | 617 | printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", |
620 | current->comm, current->pid, | 618 | current->comm, current->pid, |
621 | cattr_name(want_flags), | 619 | cattr_name(want_pcm), |
622 | (unsigned long long)paddr, | 620 | (unsigned long long)paddr, |
623 | (unsigned long long)(paddr + size - 1), | 621 | (unsigned long long)(paddr + size - 1), |
624 | cattr_name(flags)); | 622 | cattr_name(pcm)); |
625 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | 623 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
626 | (~_PAGE_CACHE_MASK)) | | 624 | (~_PAGE_CACHE_MASK)) | |
627 | flags); | 625 | cachemode2protval(pcm)); |
628 | } | 626 | } |
629 | return 0; | 627 | return 0; |
630 | } | 628 | } |
631 | 629 | ||
632 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); | 630 | ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm); |
633 | if (ret) | 631 | if (ret) |
634 | return ret; | 632 | return ret; |
635 | 633 | ||
636 | if (flags != want_flags) { | 634 | if (pcm != want_pcm) { |
637 | if (strict_prot || | 635 | if (strict_prot || |
638 | !is_new_memtype_allowed(paddr, size, | 636 | !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { |
639 | pgprot2cachemode(__pgprot(want_flags)), | ||
640 | pgprot2cachemode(__pgprot(flags)))) { | ||
641 | free_memtype(paddr, paddr + size); | 637 | free_memtype(paddr, paddr + size); |
642 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" | 638 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" |
643 | " for [mem %#010Lx-%#010Lx], got %s\n", | 639 | " for [mem %#010Lx-%#010Lx], got %s\n", |
644 | current->comm, current->pid, | 640 | current->comm, current->pid, |
645 | cattr_name(want_flags), | 641 | cattr_name(want_pcm), |
646 | (unsigned long long)paddr, | 642 | (unsigned long long)paddr, |
647 | (unsigned long long)(paddr + size - 1), | 643 | (unsigned long long)(paddr + size - 1), |
648 | cattr_name(flags)); | 644 | cattr_name(pcm)); |
649 | return -EINVAL; | 645 | return -EINVAL; |
650 | } | 646 | } |
651 | /* | 647 | /* |
@@ -654,11 +650,10 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
654 | */ | 650 | */ |
655 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | 651 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
656 | (~_PAGE_CACHE_MASK)) | | 652 | (~_PAGE_CACHE_MASK)) | |
657 | flags); | 653 | cachemode2protval(pcm)); |
658 | } | 654 | } |
659 | 655 | ||
660 | if (kernel_map_sync_memtype(paddr, size, | 656 | if (kernel_map_sync_memtype(paddr, size, pcm) < 0) { |
661 | pgprot2cachemode(__pgprot(flags))) < 0) { | ||
662 | free_memtype(paddr, paddr + size); | 657 | free_memtype(paddr, paddr + size); |
663 | return -EINVAL; | 658 | return -EINVAL; |
664 | } | 659 | } |
@@ -799,7 +794,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, | |||
799 | pgprot_t pgprot_writecombine(pgprot_t prot) | 794 | pgprot_t pgprot_writecombine(pgprot_t prot) |
800 | { | 795 | { |
801 | if (pat_enabled) | 796 | if (pat_enabled) |
802 | return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); | 797 | return __pgprot(pgprot_val(prot) | |
798 | cachemode2protval(_PAGE_CACHE_MODE_WC)); | ||
803 | else | 799 | else |
804 | return pgprot_noncached(prot); | 800 | return pgprot_noncached(prot); |
805 | } | 801 | } |