diff options
Diffstat (limited to 'arch/x86/mm')
| -rw-r--r-- | arch/x86/mm/Makefile | 3 | ||||
| -rw-r--r-- | arch/x86/mm/init_64.c | 48 | ||||
| -rw-r--r-- | arch/x86/mm/ioremap.c | 10 | ||||
| -rw-r--r-- | arch/x86/mm/mmio-mod.c | 4 | ||||
| -rw-r--r-- | arch/x86/mm/pageattr-test.c | 3 | ||||
| -rw-r--r-- | arch/x86/mm/pageattr.c | 27 | ||||
| -rw-r--r-- | arch/x86/mm/pat.c | 50 | ||||
| -rw-r--r-- | arch/x86/mm/pgtable.c | 3 | ||||
| -rw-r--r-- | arch/x86/mm/srat_32.c | 12 |
9 files changed, 102 insertions, 58 deletions
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 2977ea37791f..dfb932dcf136 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
| @@ -1,7 +1,6 @@ | |||
| 1 | obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ | 1 | obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ |
| 2 | pat.o pgtable.o | 2 | pat.o pgtable.o gup.o |
| 3 | 3 | ||
| 4 | obj-$(CONFIG_HAVE_GET_USER_PAGES_FAST) += gup.o | ||
| 5 | obj-$(CONFIG_X86_32) += pgtable_32.o | 4 | obj-$(CONFIG_X86_32) += pgtable_32.o |
| 6 | 5 | ||
| 7 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 6 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 129618ca0ea2..d3746efb060d 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
| @@ -60,7 +60,7 @@ static unsigned long dma_reserve __initdata; | |||
| 60 | 60 | ||
| 61 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 61 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 62 | 62 | ||
| 63 | int direct_gbpages __meminitdata | 63 | int direct_gbpages |
| 64 | #ifdef CONFIG_DIRECT_GBPAGES | 64 | #ifdef CONFIG_DIRECT_GBPAGES |
| 65 | = 1 | 65 | = 1 |
| 66 | #endif | 66 | #endif |
| @@ -88,7 +88,11 @@ early_param("gbpages", parse_direct_gbpages_on); | |||
| 88 | 88 | ||
| 89 | int after_bootmem; | 89 | int after_bootmem; |
| 90 | 90 | ||
| 91 | static __init void *spp_getpage(void) | 91 | /* |
| 92 | * NOTE: This function is marked __ref because it calls __init function | ||
| 93 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | ||
| 94 | */ | ||
| 95 | static __ref void *spp_getpage(void) | ||
| 92 | { | 96 | { |
| 93 | void *ptr; | 97 | void *ptr; |
| 94 | 98 | ||
| @@ -237,7 +241,7 @@ static unsigned long __initdata table_start; | |||
| 237 | static unsigned long __meminitdata table_end; | 241 | static unsigned long __meminitdata table_end; |
| 238 | static unsigned long __meminitdata table_top; | 242 | static unsigned long __meminitdata table_top; |
| 239 | 243 | ||
| 240 | static __meminit void *alloc_low_page(unsigned long *phys) | 244 | static __ref void *alloc_low_page(unsigned long *phys) |
| 241 | { | 245 | { |
| 242 | unsigned long pfn = table_end++; | 246 | unsigned long pfn = table_end++; |
| 243 | void *adr; | 247 | void *adr; |
| @@ -258,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys) | |||
| 258 | return adr; | 262 | return adr; |
| 259 | } | 263 | } |
| 260 | 264 | ||
| 261 | static __meminit void unmap_low_page(void *adr) | 265 | static __ref void unmap_low_page(void *adr) |
| 262 | { | 266 | { |
| 263 | if (after_bootmem) | 267 | if (after_bootmem) |
| 264 | return; | 268 | return; |
| @@ -314,6 +318,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
| 314 | { | 318 | { |
| 315 | unsigned long pages = 0; | 319 | unsigned long pages = 0; |
| 316 | unsigned long last_map_addr = end; | 320 | unsigned long last_map_addr = end; |
| 321 | unsigned long start = address; | ||
| 317 | 322 | ||
| 318 | int i = pmd_index(address); | 323 | int i = pmd_index(address); |
| 319 | 324 | ||
| @@ -331,16 +336,24 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
| 331 | } | 336 | } |
| 332 | 337 | ||
| 333 | if (pmd_val(*pmd)) { | 338 | if (pmd_val(*pmd)) { |
| 334 | if (!pmd_large(*pmd)) | 339 | if (!pmd_large(*pmd)) { |
| 340 | spin_lock(&init_mm.page_table_lock); | ||
| 335 | last_map_addr = phys_pte_update(pmd, address, | 341 | last_map_addr = phys_pte_update(pmd, address, |
| 336 | end); | 342 | end); |
| 343 | spin_unlock(&init_mm.page_table_lock); | ||
| 344 | } | ||
| 345 | /* Count entries we're using from level2_ident_pgt */ | ||
| 346 | if (start == 0) | ||
| 347 | pages++; | ||
| 337 | continue; | 348 | continue; |
| 338 | } | 349 | } |
| 339 | 350 | ||
| 340 | if (page_size_mask & (1<<PG_LEVEL_2M)) { | 351 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
| 341 | pages++; | 352 | pages++; |
| 353 | spin_lock(&init_mm.page_table_lock); | ||
| 342 | set_pte((pte_t *)pmd, | 354 | set_pte((pte_t *)pmd, |
| 343 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 355 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
| 356 | spin_unlock(&init_mm.page_table_lock); | ||
| 344 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; | 357 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; |
| 345 | continue; | 358 | continue; |
| 346 | } | 359 | } |
| @@ -349,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
| 349 | last_map_addr = phys_pte_init(pte, address, end); | 362 | last_map_addr = phys_pte_init(pte, address, end); |
| 350 | unmap_low_page(pte); | 363 | unmap_low_page(pte); |
| 351 | 364 | ||
| 365 | spin_lock(&init_mm.page_table_lock); | ||
| 352 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); | 366 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); |
| 367 | spin_unlock(&init_mm.page_table_lock); | ||
| 353 | } | 368 | } |
| 354 | update_page_count(PG_LEVEL_2M, pages); | 369 | update_page_count(PG_LEVEL_2M, pages); |
| 355 | return last_map_addr; | 370 | return last_map_addr; |
| @@ -362,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, | |||
| 362 | pmd_t *pmd = pmd_offset(pud, 0); | 377 | pmd_t *pmd = pmd_offset(pud, 0); |
| 363 | unsigned long last_map_addr; | 378 | unsigned long last_map_addr; |
| 364 | 379 | ||
| 365 | spin_lock(&init_mm.page_table_lock); | ||
| 366 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); | 380 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); |
| 367 | spin_unlock(&init_mm.page_table_lock); | ||
| 368 | __flush_tlb_all(); | 381 | __flush_tlb_all(); |
| 369 | return last_map_addr; | 382 | return last_map_addr; |
| 370 | } | 383 | } |
| @@ -400,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
| 400 | 413 | ||
| 401 | if (page_size_mask & (1<<PG_LEVEL_1G)) { | 414 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
| 402 | pages++; | 415 | pages++; |
| 416 | spin_lock(&init_mm.page_table_lock); | ||
| 403 | set_pte((pte_t *)pud, | 417 | set_pte((pte_t *)pud, |
| 404 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 418 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
| 419 | spin_unlock(&init_mm.page_table_lock); | ||
| 405 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; | 420 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; |
| 406 | continue; | 421 | continue; |
| 407 | } | 422 | } |
| 408 | 423 | ||
| 409 | pmd = alloc_low_page(&pmd_phys); | 424 | pmd = alloc_low_page(&pmd_phys); |
| 410 | |||
| 411 | spin_lock(&init_mm.page_table_lock); | ||
| 412 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); | 425 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); |
| 413 | unmap_low_page(pmd); | 426 | unmap_low_page(pmd); |
| 427 | |||
| 428 | spin_lock(&init_mm.page_table_lock); | ||
| 414 | pud_populate(&init_mm, pud, __va(pmd_phys)); | 429 | pud_populate(&init_mm, pud, __va(pmd_phys)); |
| 415 | spin_unlock(&init_mm.page_table_lock); | 430 | spin_unlock(&init_mm.page_table_lock); |
| 416 | |||
| 417 | } | 431 | } |
| 418 | __flush_tlb_all(); | 432 | __flush_tlb_all(); |
| 419 | update_page_count(PG_LEVEL_1G, pages); | 433 | update_page_count(PG_LEVEL_1G, pages); |
| @@ -505,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start, | |||
| 505 | continue; | 519 | continue; |
| 506 | } | 520 | } |
| 507 | 521 | ||
| 508 | if (after_bootmem) | 522 | pud = alloc_low_page(&pud_phys); |
| 509 | pud = pud_offset(pgd, start & PGDIR_MASK); | ||
| 510 | else | ||
| 511 | pud = alloc_low_page(&pud_phys); | ||
| 512 | |||
| 513 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), | 523 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), |
| 514 | page_size_mask); | 524 | page_size_mask); |
| 515 | unmap_low_page(pud); | 525 | unmap_low_page(pud); |
| 516 | pgd_populate(&init_mm, pgd_offset_k(start), | 526 | |
| 517 | __va(pud_phys)); | 527 | spin_lock(&init_mm.page_table_lock); |
| 528 | pgd_populate(&init_mm, pgd, __va(pud_phys)); | ||
| 529 | spin_unlock(&init_mm.page_table_lock); | ||
| 518 | } | 530 | } |
| 519 | 531 | ||
| 520 | return last_map_addr; | 532 | return last_map_addr; |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 016f335bbeea..d4b6e6a29ae3 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
| @@ -170,7 +170,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
| 170 | phys_addr &= PAGE_MASK; | 170 | phys_addr &= PAGE_MASK; |
| 171 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | 171 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
| 172 | 172 | ||
| 173 | retval = reserve_memtype(phys_addr, phys_addr + size, | 173 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, |
| 174 | prot_val, &new_prot_val); | 174 | prot_val, &new_prot_val); |
| 175 | if (retval) { | 175 | if (retval) { |
| 176 | pr_debug("Warning: reserve_memtype returned %d\n", retval); | 176 | pr_debug("Warning: reserve_memtype returned %d\n", retval); |
| @@ -553,13 +553,11 @@ static int __init check_early_ioremap_leak(void) | |||
| 553 | { | 553 | { |
| 554 | if (!early_ioremap_nested) | 554 | if (!early_ioremap_nested) |
| 555 | return 0; | 555 | return 0; |
| 556 | 556 | WARN(1, KERN_WARNING | |
| 557 | printk(KERN_WARNING | ||
| 558 | "Debug warning: early ioremap leak of %d areas detected.\n", | 557 | "Debug warning: early ioremap leak of %d areas detected.\n", |
| 559 | early_ioremap_nested); | 558 | early_ioremap_nested); |
| 560 | printk(KERN_WARNING | 559 | printk(KERN_WARNING |
| 561 | "please boot with early_ioremap_debug and report the dmesg.\n"); | 560 | "please boot with early_ioremap_debug and report the dmesg.\n"); |
| 562 | WARN_ON(1); | ||
| 563 | 561 | ||
| 564 | return 1; | 562 | return 1; |
| 565 | } | 563 | } |
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index e7397e108beb..635b50e85581 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c | |||
| @@ -430,7 +430,9 @@ static void enter_uniprocessor(void) | |||
| 430 | "may miss events.\n"); | 430 | "may miss events.\n"); |
| 431 | } | 431 | } |
| 432 | 432 | ||
| 433 | static void leave_uniprocessor(void) | 433 | /* __ref because leave_uniprocessor calls cpu_up which is __cpuinit, |
| 434 | but this whole function is ifdefed CONFIG_HOTPLUG_CPU */ | ||
| 435 | static void __ref leave_uniprocessor(void) | ||
| 434 | { | 436 | { |
| 435 | int cpu; | 437 | int cpu; |
| 436 | int err; | 438 | int err; |
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index 0dcd42eb94e6..d4aa503caaa2 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c | |||
| @@ -221,8 +221,7 @@ static int pageattr_test(void) | |||
| 221 | failed += print_split(&sc); | 221 | failed += print_split(&sc); |
| 222 | 222 | ||
| 223 | if (failed) { | 223 | if (failed) { |
| 224 | printk(KERN_ERR "NOT PASSED. Please report.\n"); | 224 | WARN(1, KERN_ERR "NOT PASSED. Please report.\n"); |
| 225 | WARN_ON(1); | ||
| 226 | return -EINVAL; | 225 | return -EINVAL; |
| 227 | } else { | 226 | } else { |
| 228 | if (print) | 227 | if (print) |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 65c6e46bf059..43e2f8483e4f 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
| @@ -55,13 +55,19 @@ static void split_page_count(int level) | |||
| 55 | 55 | ||
| 56 | int arch_report_meminfo(char *page) | 56 | int arch_report_meminfo(char *page) |
| 57 | { | 57 | { |
| 58 | int n = sprintf(page, "DirectMap4k: %8lu\n" | 58 | int n = sprintf(page, "DirectMap4k: %8lu kB\n", |
| 59 | "DirectMap2M: %8lu\n", | 59 | direct_pages_count[PG_LEVEL_4K] << 2); |
| 60 | direct_pages_count[PG_LEVEL_4K], | 60 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
| 61 | direct_pages_count[PG_LEVEL_2M]); | 61 | n += sprintf(page + n, "DirectMap2M: %8lu kB\n", |
| 62 | direct_pages_count[PG_LEVEL_2M] << 11); | ||
| 63 | #else | ||
| 64 | n += sprintf(page + n, "DirectMap4M: %8lu kB\n", | ||
| 65 | direct_pages_count[PG_LEVEL_2M] << 12); | ||
| 66 | #endif | ||
| 62 | #ifdef CONFIG_X86_64 | 67 | #ifdef CONFIG_X86_64 |
| 63 | n += sprintf(page + n, "DirectMap1G: %8lu\n", | 68 | if (direct_gbpages) |
| 64 | direct_pages_count[PG_LEVEL_1G]); | 69 | n += sprintf(page + n, "DirectMap1G: %8lu kB\n", |
| 70 | direct_pages_count[PG_LEVEL_1G] << 20); | ||
| 65 | #endif | 71 | #endif |
| 66 | return n; | 72 | return n; |
| 67 | } | 73 | } |
| @@ -592,10 +598,9 @@ repeat: | |||
| 592 | if (!pte_val(old_pte)) { | 598 | if (!pte_val(old_pte)) { |
| 593 | if (!primary) | 599 | if (!primary) |
| 594 | return 0; | 600 | return 0; |
| 595 | printk(KERN_WARNING "CPA: called for zero pte. " | 601 | WARN(1, KERN_WARNING "CPA: called for zero pte. " |
| 596 | "vaddr = %lx cpa->vaddr = %lx\n", address, | 602 | "vaddr = %lx cpa->vaddr = %lx\n", address, |
| 597 | cpa->vaddr); | 603 | cpa->vaddr); |
| 598 | WARN_ON(1); | ||
| 599 | return -EINVAL; | 604 | return -EINVAL; |
| 600 | } | 605 | } |
| 601 | 606 | ||
| @@ -844,7 +849,7 @@ int set_memory_uc(unsigned long addr, int numpages) | |||
| 844 | /* | 849 | /* |
| 845 | * for now UC MINUS. see comments in ioremap_nocache() | 850 | * for now UC MINUS. see comments in ioremap_nocache() |
| 846 | */ | 851 | */ |
| 847 | if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, | 852 | if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
| 848 | _PAGE_CACHE_UC_MINUS, NULL)) | 853 | _PAGE_CACHE_UC_MINUS, NULL)) |
| 849 | return -EINVAL; | 854 | return -EINVAL; |
| 850 | 855 | ||
| @@ -863,7 +868,7 @@ int set_memory_wc(unsigned long addr, int numpages) | |||
| 863 | if (!pat_enabled) | 868 | if (!pat_enabled) |
| 864 | return set_memory_uc(addr, numpages); | 869 | return set_memory_uc(addr, numpages); |
| 865 | 870 | ||
| 866 | if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, | 871 | if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
| 867 | _PAGE_CACHE_WC, NULL)) | 872 | _PAGE_CACHE_WC, NULL)) |
| 868 | return -EINVAL; | 873 | return -EINVAL; |
| 869 | 874 | ||
| @@ -879,7 +884,7 @@ int _set_memory_wb(unsigned long addr, int numpages) | |||
| 879 | 884 | ||
| 880 | int set_memory_wb(unsigned long addr, int numpages) | 885 | int set_memory_wb(unsigned long addr, int numpages) |
| 881 | { | 886 | { |
| 882 | free_memtype(addr, addr + numpages * PAGE_SIZE); | 887 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
| 883 | 888 | ||
| 884 | return _set_memory_wb(addr, numpages); | 889 | return _set_memory_wb(addr, numpages); |
| 885 | } | 890 | } |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 2fe30916d4b6..2a50e0fa64a5 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
| @@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry, | |||
| 207 | return -EBUSY; | 207 | return -EBUSY; |
| 208 | } | 208 | } |
| 209 | 209 | ||
| 210 | static struct memtype *cached_entry; | ||
| 211 | static u64 cached_start; | ||
| 212 | |||
| 210 | /* | 213 | /* |
| 211 | * req_type typically has one of the: | 214 | * req_type typically has one of the: |
| 212 | * - _PAGE_CACHE_WB | 215 | * - _PAGE_CACHE_WB |
| @@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 280 | 283 | ||
| 281 | spin_lock(&memtype_lock); | 284 | spin_lock(&memtype_lock); |
| 282 | 285 | ||
| 286 | if (cached_entry && start >= cached_start) | ||
| 287 | entry = cached_entry; | ||
| 288 | else | ||
| 289 | entry = list_entry(&memtype_list, struct memtype, nd); | ||
| 290 | |||
| 283 | /* Search for existing mapping that overlaps the current range */ | 291 | /* Search for existing mapping that overlaps the current range */ |
| 284 | where = NULL; | 292 | where = NULL; |
| 285 | list_for_each_entry(entry, &memtype_list, nd) { | 293 | list_for_each_entry_continue(entry, &memtype_list, nd) { |
| 286 | if (end <= entry->start) { | 294 | if (end <= entry->start) { |
| 287 | where = entry->nd.prev; | 295 | where = entry->nd.prev; |
| 296 | cached_entry = list_entry(where, struct memtype, nd); | ||
| 288 | break; | 297 | break; |
| 289 | } else if (start <= entry->start) { /* end > entry->start */ | 298 | } else if (start <= entry->start) { /* end > entry->start */ |
| 290 | err = chk_conflict(new, entry, new_type); | 299 | err = chk_conflict(new, entry, new_type); |
| @@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 292 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 301 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
| 293 | entry->start, entry->end); | 302 | entry->start, entry->end); |
| 294 | where = entry->nd.prev; | 303 | where = entry->nd.prev; |
| 304 | cached_entry = list_entry(where, | ||
| 305 | struct memtype, nd); | ||
| 295 | } | 306 | } |
| 296 | break; | 307 | break; |
| 297 | } else if (start < entry->end) { /* start > entry->start */ | 308 | } else if (start < entry->end) { /* start > entry->start */ |
| @@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 299 | if (!err) { | 310 | if (!err) { |
| 300 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 311 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
| 301 | entry->start, entry->end); | 312 | entry->start, entry->end); |
| 302 | where = &entry->nd; | 313 | cached_entry = list_entry(entry->nd.prev, |
| 314 | struct memtype, nd); | ||
| 315 | |||
| 316 | /* | ||
| 317 | * Move to right position in the linked | ||
| 318 | * list to add this new entry | ||
| 319 | */ | ||
| 320 | list_for_each_entry_continue(entry, | ||
| 321 | &memtype_list, nd) { | ||
| 322 | if (start <= entry->start) { | ||
| 323 | where = entry->nd.prev; | ||
| 324 | break; | ||
| 325 | } | ||
| 326 | } | ||
| 303 | } | 327 | } |
| 304 | break; | 328 | break; |
| 305 | } | 329 | } |
| @@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 314 | return err; | 338 | return err; |
| 315 | } | 339 | } |
| 316 | 340 | ||
| 341 | cached_start = start; | ||
| 342 | |||
| 317 | if (where) | 343 | if (where) |
| 318 | list_add(&new->nd, where); | 344 | list_add(&new->nd, where); |
| 319 | else | 345 | else |
| @@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end) | |||
| 343 | spin_lock(&memtype_lock); | 369 | spin_lock(&memtype_lock); |
| 344 | list_for_each_entry(entry, &memtype_list, nd) { | 370 | list_for_each_entry(entry, &memtype_list, nd) { |
| 345 | if (entry->start == start && entry->end == end) { | 371 | if (entry->start == start && entry->end == end) { |
| 372 | if (cached_entry == entry || cached_start == start) | ||
| 373 | cached_entry = NULL; | ||
| 374 | |||
| 346 | list_del(&entry->nd); | 375 | list_del(&entry->nd); |
| 347 | kfree(entry); | 376 | kfree(entry); |
| 348 | err = 0; | 377 | err = 0; |
| @@ -361,14 +390,6 @@ int free_memtype(u64 start, u64 end) | |||
| 361 | } | 390 | } |
| 362 | 391 | ||
| 363 | 392 | ||
| 364 | /* | ||
| 365 | * /dev/mem mmap interface. The memtype used for mapping varies: | ||
| 366 | * - Use UC for mappings with O_SYNC flag | ||
| 367 | * - Without O_SYNC flag, if there is any conflict in reserve_memtype, | ||
| 368 | * inherit the memtype from existing mapping. | ||
| 369 | * - Else use UC_MINUS memtype (for backward compatibility with existing | ||
| 370 | * X drivers. | ||
| 371 | */ | ||
| 372 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 393 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 373 | unsigned long size, pgprot_t vma_prot) | 394 | unsigned long size, pgprot_t vma_prot) |
| 374 | { | 395 | { |
| @@ -406,14 +427,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
| 406 | unsigned long size, pgprot_t *vma_prot) | 427 | unsigned long size, pgprot_t *vma_prot) |
| 407 | { | 428 | { |
| 408 | u64 offset = ((u64) pfn) << PAGE_SHIFT; | 429 | u64 offset = ((u64) pfn) << PAGE_SHIFT; |
| 409 | unsigned long flags = _PAGE_CACHE_UC_MINUS; | 430 | unsigned long flags = -1; |
| 410 | int retval; | 431 | int retval; |
| 411 | 432 | ||
| 412 | if (!range_is_allowed(pfn, size)) | 433 | if (!range_is_allowed(pfn, size)) |
| 413 | return 0; | 434 | return 0; |
| 414 | 435 | ||
| 415 | if (file->f_flags & O_SYNC) { | 436 | if (file->f_flags & O_SYNC) { |
| 416 | flags = _PAGE_CACHE_UC; | 437 | flags = _PAGE_CACHE_UC_MINUS; |
| 417 | } | 438 | } |
| 418 | 439 | ||
| 419 | #ifdef CONFIG_X86_32 | 440 | #ifdef CONFIG_X86_32 |
| @@ -436,13 +457,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |||
| 436 | #endif | 457 | #endif |
| 437 | 458 | ||
| 438 | /* | 459 | /* |
| 439 | * With O_SYNC, we can only take UC mapping. Fail if we cannot. | 460 | * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot. |
| 461 | * | ||
| 440 | * Without O_SYNC, we want to get | 462 | * Without O_SYNC, we want to get |
| 441 | * - WB for WB-able memory and no other conflicting mappings | 463 | * - WB for WB-able memory and no other conflicting mappings |
| 442 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings | 464 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings |
| 443 | * - Inherit from confliting mappings otherwise | 465 | * - Inherit from confliting mappings otherwise |
| 444 | */ | 466 | */ |
| 445 | if (flags != _PAGE_CACHE_UC_MINUS) { | 467 | if (flags != -1) { |
| 446 | retval = reserve_memtype(offset, offset + size, flags, NULL); | 468 | retval = reserve_memtype(offset, offset + size, flags, NULL); |
| 447 | } else { | 469 | } else { |
| 448 | retval = reserve_memtype(offset, offset + size, -1, &flags); | 470 | retval = reserve_memtype(offset, offset + size, -1, &flags); |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 557b2abceef8..d50302774fe2 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
| @@ -207,6 +207,9 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) | |||
| 207 | unsigned long addr; | 207 | unsigned long addr; |
| 208 | int i; | 208 | int i; |
| 209 | 209 | ||
| 210 | if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ | ||
| 211 | return; | ||
| 212 | |||
| 210 | pud = pud_offset(pgd, 0); | 213 | pud = pud_offset(pgd, 0); |
| 211 | 214 | ||
| 212 | for (addr = i = 0; i < PREALLOCATED_PMDS; | 215 | for (addr = i = 0; i < PREALLOCATED_PMDS; |
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index 1eb2973a301c..16ae70fc57e7 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c | |||
| @@ -178,7 +178,7 @@ void acpi_numa_arch_fixup(void) | |||
| 178 | * start of the node, and that the current "end" address is after | 178 | * start of the node, and that the current "end" address is after |
| 179 | * the previous one. | 179 | * the previous one. |
| 180 | */ | 180 | */ |
| 181 | static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk) | 181 | static __init int node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk) |
| 182 | { | 182 | { |
| 183 | /* | 183 | /* |
| 184 | * Only add present memory as told by the e820. | 184 | * Only add present memory as told by the e820. |
| @@ -189,10 +189,10 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c | |||
| 189 | if (memory_chunk->start_pfn >= max_pfn) { | 189 | if (memory_chunk->start_pfn >= max_pfn) { |
| 190 | printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n", | 190 | printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n", |
| 191 | memory_chunk->start_pfn, memory_chunk->end_pfn); | 191 | memory_chunk->start_pfn, memory_chunk->end_pfn); |
| 192 | return; | 192 | return -1; |
| 193 | } | 193 | } |
| 194 | if (memory_chunk->nid != nid) | 194 | if (memory_chunk->nid != nid) |
| 195 | return; | 195 | return -1; |
| 196 | 196 | ||
| 197 | if (!node_has_online_mem(nid)) | 197 | if (!node_has_online_mem(nid)) |
| 198 | node_start_pfn[nid] = memory_chunk->start_pfn; | 198 | node_start_pfn[nid] = memory_chunk->start_pfn; |
| @@ -202,6 +202,8 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c | |||
| 202 | 202 | ||
| 203 | if (node_end_pfn[nid] < memory_chunk->end_pfn) | 203 | if (node_end_pfn[nid] < memory_chunk->end_pfn) |
| 204 | node_end_pfn[nid] = memory_chunk->end_pfn; | 204 | node_end_pfn[nid] = memory_chunk->end_pfn; |
| 205 | |||
| 206 | return 0; | ||
| 205 | } | 207 | } |
| 206 | 208 | ||
| 207 | int __init get_memcfg_from_srat(void) | 209 | int __init get_memcfg_from_srat(void) |
| @@ -259,7 +261,9 @@ int __init get_memcfg_from_srat(void) | |||
| 259 | printk(KERN_DEBUG | 261 | printk(KERN_DEBUG |
| 260 | "chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", | 262 | "chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", |
| 261 | j, chunk->nid, chunk->start_pfn, chunk->end_pfn); | 263 | j, chunk->nid, chunk->start_pfn, chunk->end_pfn); |
| 262 | node_read_chunk(chunk->nid, chunk); | 264 | if (node_read_chunk(chunk->nid, chunk)) |
| 265 | continue; | ||
| 266 | |||
| 263 | e820_register_active_regions(chunk->nid, chunk->start_pfn, | 267 | e820_register_active_regions(chunk->nid, chunk->start_pfn, |
| 264 | min(chunk->end_pfn, max_pfn)); | 268 | min(chunk->end_pfn, max_pfn)); |
| 265 | } | 269 | } |
