diff options
| -rw-r--r-- | arch/x86/include/asm/page.h | 1 | ||||
| -rw-r--r-- | arch/x86/mm/ioremap.c | 19 | ||||
| -rw-r--r-- | arch/x86/mm/pat.c | 83 | 
3 files changed, 45 insertions, 58 deletions
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index e9873a2e8695..776579119a00 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h  | |||
| @@ -57,7 +57,6 @@ typedef struct { pgdval_t pgd; } pgd_t; | |||
| 57 | typedef struct { pgprotval_t pgprot; } pgprot_t; | 57 | typedef struct { pgprotval_t pgprot; } pgprot_t; | 
| 58 | 58 | ||
| 59 | extern int page_is_ram(unsigned long pagenr); | 59 | extern int page_is_ram(unsigned long pagenr); | 
| 60 | extern int pagerange_is_ram(unsigned long start, unsigned long end); | ||
| 61 | extern int devmem_is_allowed(unsigned long pagenr); | 60 | extern int devmem_is_allowed(unsigned long pagenr); | 
| 62 | extern void map_devmem(unsigned long pfn, unsigned long size, | 61 | extern void map_devmem(unsigned long pfn, unsigned long size, | 
| 63 | pgprot_t vma_prot); | 62 | pgprot_t vma_prot); | 
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index af750ab973b6..f45d5e29a72e 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c  | |||
| @@ -134,25 +134,6 @@ int page_is_ram(unsigned long pagenr) | |||
| 134 | return 0; | 134 | return 0; | 
| 135 | } | 135 | } | 
| 136 | 136 | ||
| 137 | int pagerange_is_ram(unsigned long start, unsigned long end) | ||
| 138 | { | ||
| 139 | int ram_page = 0, not_rampage = 0; | ||
| 140 | unsigned long page_nr; | ||
| 141 | |||
| 142 | for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); | ||
| 143 | ++page_nr) { | ||
| 144 | if (page_is_ram(page_nr)) | ||
| 145 | ram_page = 1; | ||
| 146 | else | ||
| 147 | not_rampage = 1; | ||
| 148 | |||
| 149 | if (ram_page == not_rampage) | ||
| 150 | return -1; | ||
| 151 | } | ||
| 152 | |||
| 153 | return ram_page; | ||
| 154 | } | ||
| 155 | |||
| 156 | /* | 137 | /* | 
| 157 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | 138 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | 
| 158 | * conflicts. | 139 | * conflicts. | 
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 7b61036427df..aebbf67a79d0 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c  | |||
| @@ -211,6 +211,33 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type) | |||
| 211 | static struct memtype *cached_entry; | 211 | static struct memtype *cached_entry; | 
| 212 | static u64 cached_start; | 212 | static u64 cached_start; | 
| 213 | 213 | ||
| 214 | static int pat_pagerange_is_ram(unsigned long start, unsigned long end) | ||
| 215 | { | ||
| 216 | int ram_page = 0, not_rampage = 0; | ||
| 217 | unsigned long page_nr; | ||
| 218 | |||
| 219 | for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); | ||
| 220 | ++page_nr) { | ||
| 221 | /* | ||
| 222 | * For legacy reasons, physical address range in the legacy ISA | ||
| 223 | * region is tracked as non-RAM. This will allow users of | ||
| 224 | * /dev/mem to map portions of legacy ISA region, even when | ||
| 225 | * some of those portions are listed(or not even listed) with | ||
| 226 | * different e820 types(RAM/reserved/..) | ||
| 227 | */ | ||
| 228 | if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && | ||
| 229 | page_is_ram(page_nr)) | ||
| 230 | ram_page = 1; | ||
| 231 | else | ||
| 232 | not_rampage = 1; | ||
| 233 | |||
| 234 | if (ram_page == not_rampage) | ||
| 235 | return -1; | ||
| 236 | } | ||
| 237 | |||
| 238 | return ram_page; | ||
| 239 | } | ||
| 240 | |||
| 214 | /* | 241 | /* | 
| 215 | * For RAM pages, mark the pages as non WB memory type using | 242 | * For RAM pages, mark the pages as non WB memory type using | 
| 216 | * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or | 243 | * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or | 
| @@ -336,20 +363,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 336 | if (new_type) | 363 | if (new_type) | 
| 337 | *new_type = actual_type; | 364 | *new_type = actual_type; | 
| 338 | 365 | ||
| 339 | /* | 366 | is_range_ram = pat_pagerange_is_ram(start, end); | 
| 340 | * For legacy reasons, some parts of the physical address range in the | 367 | if (is_range_ram == 1) | 
| 341 | * legacy 1MB region is treated as non-RAM (even when listed as RAM in | 368 | return reserve_ram_pages_type(start, end, req_type, | 
| 342 | * the e820 tables). So we will track the memory attributes of this | 369 | new_type); | 
| 343 | * legacy 1MB region using the linear memtype_list always. | 370 | else if (is_range_ram < 0) | 
| 344 | */ | 371 | return -EINVAL; | 
| 345 | if (end >= ISA_END_ADDRESS) { | ||
| 346 | is_range_ram = pagerange_is_ram(start, end); | ||
| 347 | if (is_range_ram == 1) | ||
| 348 | return reserve_ram_pages_type(start, end, req_type, | ||
| 349 | new_type); | ||
| 350 | else if (is_range_ram < 0) | ||
| 351 | return -EINVAL; | ||
| 352 | } | ||
| 353 | 372 | ||
| 354 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); | 373 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); | 
| 355 | if (!new) | 374 | if (!new) | 
| @@ -446,19 +465,11 @@ int free_memtype(u64 start, u64 end) | |||
| 446 | if (is_ISA_range(start, end - 1)) | 465 | if (is_ISA_range(start, end - 1)) | 
| 447 | return 0; | 466 | return 0; | 
| 448 | 467 | ||
| 449 | /* | 468 | is_range_ram = pat_pagerange_is_ram(start, end); | 
| 450 | * For legacy reasons, some parts of the physical address range in the | 469 | if (is_range_ram == 1) | 
| 451 | * legacy 1MB region is treated as non-RAM (even when listed as RAM in | 470 | return free_ram_pages_type(start, end); | 
| 452 | * the e820 tables). So we will track the memory attributes of this | 471 | else if (is_range_ram < 0) | 
| 453 | * legacy 1MB region using the linear memtype_list always. | 472 | return -EINVAL; | 
| 454 | */ | ||
| 455 | if (end >= ISA_END_ADDRESS) { | ||
| 456 | is_range_ram = pagerange_is_ram(start, end); | ||
| 457 | if (is_range_ram == 1) | ||
| 458 | return free_ram_pages_type(start, end); | ||
| 459 | else if (is_range_ram < 0) | ||
| 460 | return -EINVAL; | ||
| 461 | } | ||
| 462 | 473 | ||
| 463 | spin_lock(&memtype_lock); | 474 | spin_lock(&memtype_lock); | 
| 464 | list_for_each_entry(entry, &memtype_list, nd) { | 475 | list_for_each_entry(entry, &memtype_list, nd) { | 
| @@ -626,17 +637,13 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
| 626 | unsigned long flags; | 637 | unsigned long flags; | 
| 627 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); | 638 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); | 
| 628 | 639 | ||
| 629 | is_ram = pagerange_is_ram(paddr, paddr + size); | 640 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); | 
| 630 | 641 | ||
| 631 | if (is_ram != 0) { | 642 | /* | 
| 632 | /* | 643 | * reserve_pfn_range() doesn't support RAM pages. | 
| 633 | * For mapping RAM pages, drivers need to call | 644 | */ | 
| 634 | * set_memory_[uc|wc|wb] directly, for reserve and free, before | 645 | if (is_ram != 0) | 
| 635 | * setting up the PTE. | 646 | return -EINVAL; | 
| 636 | */ | ||
| 637 | WARN_ON_ONCE(1); | ||
| 638 | return 0; | ||
| 639 | } | ||
| 640 | 647 | ||
| 641 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); | 648 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); | 
| 642 | if (ret) | 649 | if (ret) | 
| @@ -693,7 +700,7 @@ static void free_pfn_range(u64 paddr, unsigned long size) | |||
| 693 | { | 700 | { | 
| 694 | int is_ram; | 701 | int is_ram; | 
| 695 | 702 | ||
| 696 | is_ram = pagerange_is_ram(paddr, paddr + size); | 703 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); | 
| 697 | if (is_ram == 0) | 704 | if (is_ram == 0) | 
| 698 | free_memtype(paddr, paddr + size); | 705 | free_memtype(paddr, paddr + size); | 
| 699 | } | 706 | } | 
