diff options
| -rw-r--r-- | arch/x86/Kconfig | 7 | ||||
| -rw-r--r-- | arch/x86/mm/ioremap.c | 26 | ||||
| -rw-r--r-- | arch/x86/mm/pat.c | 7 |
3 files changed, 7 insertions, 33 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1b2182b4d5c8..a6cb3c32a1d3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -1332,7 +1332,9 @@ config MATH_EMULATION | |||
| 1332 | kernel, it won't hurt. | 1332 | kernel, it won't hurt. |
| 1333 | 1333 | ||
| 1334 | config MTRR | 1334 | config MTRR |
| 1335 | bool "MTRR (Memory Type Range Register) support" | 1335 | bool |
| 1336 | default y | ||
| 1337 | prompt "MTRR (Memory Type Range Register) support" if EMBEDDED | ||
| 1336 | ---help--- | 1338 | ---help--- |
| 1337 | On Intel P6 family processors (Pentium Pro, Pentium II and later) | 1339 | On Intel P6 family processors (Pentium Pro, Pentium II and later) |
| 1338 | the Memory Type Range Registers (MTRRs) may be used to control | 1340 | the Memory Type Range Registers (MTRRs) may be used to control |
| @@ -1398,7 +1400,8 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT | |||
| 1398 | 1400 | ||
| 1399 | config X86_PAT | 1401 | config X86_PAT |
| 1400 | bool | 1402 | bool |
| 1401 | prompt "x86 PAT support" | 1403 | default y |
| 1404 | prompt "x86 PAT support" if EMBEDDED | ||
| 1402 | depends on MTRR | 1405 | depends on MTRR |
| 1403 | ---help--- | 1406 | ---help--- |
| 1404 | Use PAT attributes to setup page level cache control. | 1407 | Use PAT attributes to setup page level cache control. |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 2feb9bdedaaf..c246d259822d 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
| @@ -281,30 +281,6 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) | |||
| 281 | } | 281 | } |
| 282 | EXPORT_SYMBOL(ioremap_cache); | 282 | EXPORT_SYMBOL(ioremap_cache); |
| 283 | 283 | ||
| 284 | static void __iomem *ioremap_default(resource_size_t phys_addr, | ||
| 285 | unsigned long size) | ||
| 286 | { | ||
| 287 | unsigned long flags; | ||
| 288 | void __iomem *ret; | ||
| 289 | int err; | ||
| 290 | |||
| 291 | /* | ||
| 292 | * - WB for WB-able memory and no other conflicting mappings | ||
| 293 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings | ||
| 294 | * - Inherit from confliting mappings otherwise | ||
| 295 | */ | ||
| 296 | err = reserve_memtype(phys_addr, phys_addr + size, | ||
| 297 | _PAGE_CACHE_WB, &flags); | ||
| 298 | if (err < 0) | ||
| 299 | return NULL; | ||
| 300 | |||
| 301 | ret = __ioremap_caller(phys_addr, size, flags, | ||
| 302 | __builtin_return_address(0)); | ||
| 303 | |||
| 304 | free_memtype(phys_addr, phys_addr + size); | ||
| 305 | return ret; | ||
| 306 | } | ||
| 307 | |||
| 308 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | 284 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
| 309 | unsigned long prot_val) | 285 | unsigned long prot_val) |
| 310 | { | 286 | { |
| @@ -380,7 +356,7 @@ void *xlate_dev_mem_ptr(unsigned long phys) | |||
| 380 | if (page_is_ram(start >> PAGE_SHIFT)) | 356 | if (page_is_ram(start >> PAGE_SHIFT)) |
| 381 | return __va(phys); | 357 | return __va(phys); |
| 382 | 358 | ||
| 383 | addr = (void __force *)ioremap_default(start, PAGE_SIZE); | 359 | addr = (void __force *)ioremap_cache(start, PAGE_SIZE); |
| 384 | if (addr) | 360 | if (addr) |
| 385 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); | 361 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); |
| 386 | 362 | ||
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index a81b7e73275d..66b55d6e69ed 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
| @@ -356,9 +356,6 @@ static int free_ram_pages_type(u64 start, u64 end) | |||
| 356 | * - _PAGE_CACHE_UC_MINUS | 356 | * - _PAGE_CACHE_UC_MINUS |
| 357 | * - _PAGE_CACHE_UC | 357 | * - _PAGE_CACHE_UC |
| 358 | * | 358 | * |
| 359 | * req_type will have a special case value '-1', when requester want to inherit | ||
| 360 | * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. | ||
| 361 | * | ||
| 362 | * If new_type is NULL, function will return an error if it cannot reserve the | 359 | * If new_type is NULL, function will return an error if it cannot reserve the |
| 363 | * region with req_type. If new_type is non-NULL, function will return | 360 | * region with req_type. If new_type is non-NULL, function will return |
| 364 | * available type in new_type in case of no error. In case of any error | 361 | * available type in new_type in case of no error. In case of any error |
| @@ -378,9 +375,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 378 | if (!pat_enabled) { | 375 | if (!pat_enabled) { |
| 379 | /* This is identical to page table setting without PAT */ | 376 | /* This is identical to page table setting without PAT */ |
| 380 | if (new_type) { | 377 | if (new_type) { |
| 381 | if (req_type == -1) | 378 | if (req_type == _PAGE_CACHE_WC) |
| 382 | *new_type = _PAGE_CACHE_WB; | ||
| 383 | else if (req_type == _PAGE_CACHE_WC) | ||
| 384 | *new_type = _PAGE_CACHE_UC_MINUS; | 379 | *new_type = _PAGE_CACHE_UC_MINUS; |
| 385 | else | 380 | else |
| 386 | *new_type = req_type & _PAGE_CACHE_MASK; | 381 | *new_type = req_type & _PAGE_CACHE_MASK; |
