aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 14:06:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 14:06:59 -0400
commit02171b4a7c5b555d08c3321332e0c45776518276 (patch)
tree63f10cdab2a8c1bd9fe5ff29319323ff59419ef8
parent70311aaa8afb9790fb91886749cbf80e7e6cd8d0 (diff)
parent20167d3421a089a1bf1bd680b150dc69c9506810 (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm changes from Ingo Molnar: "This tree includes a micro-optimization that avoids cr3 switches during idling; it fixes corner cases and there's also small cleanups" Fix up trivial context conflict with the percpu_xx -> this_cpu_xx changes. * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86-64: Fix accounting in kernel_physical_mapping_init() x86/tlb: Clean up and unify TLB_FLUSH_ALL definition x86: Drop obsolete ARCH_BOOTMEM support x86, tlb: Switch cr3 in leave_mm() only when needed x86/mm: Fix the size calculation of mapping tables
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/mmzone_32.h6
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/mm/init.c21
-rw-r--r--arch/x86/mm/init_64.c23
-rw-r--r--arch/x86/mm/tlb.c8
6 files changed, 31 insertions, 37 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 21ea6d28d71f..5ab807c1192a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1238,10 +1238,6 @@ config NODES_SHIFT
1238 Specify the maximum number of NUMA Nodes available on the target 1238 Specify the maximum number of NUMA Nodes available on the target
1239 system. Increases memory reserved to accommodate various tables. 1239 system. Increases memory reserved to accommodate various tables.
1240 1240
1241config HAVE_ARCH_BOOTMEM
1242 def_bool y
1243 depends on X86_32 && NUMA
1244
1245config HAVE_ARCH_ALLOC_REMAP 1241config HAVE_ARCH_ALLOC_REMAP
1246 def_bool y 1242 def_bool y
1247 depends on X86_32 && NUMA 1243 depends on X86_32 && NUMA
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 55728e121473..eb05fb3b02fb 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -61,10 +61,4 @@ static inline int pfn_valid(int pfn)
61 61
62#endif /* CONFIG_DISCONTIGMEM */ 62#endif /* CONFIG_DISCONTIGMEM */
63 63
64#ifdef CONFIG_NEED_MULTIPLE_NODES
65/* always use node 0 for bootmem on this numa platform */
66#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
67 (NODE_DATA(0)->bdata)
68#endif /* CONFIG_NEED_MULTIPLE_NODES */
69
70#endif /* _ASM_X86_MMZONE_32_H */ 64#endif /* _ASM_X86_MMZONE_32_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 1620d23f14d7..36a1a2ab87d2 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,11 +62,7 @@ static inline void __flush_tlb_one(unsigned long addr)
62 __flush_tlb(); 62 __flush_tlb();
63} 63}
64 64
65#ifdef CONFIG_X86_32 65#define TLB_FLUSH_ALL -1UL
66# define TLB_FLUSH_ALL 0xffffffff
67#else
68# define TLB_FLUSH_ALL -1ULL
69#endif
70 66
71/* 67/*
72 * TLB flushing: 68 * TLB flushing:
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 4f0cec7e4ffb..319b6f2fb8b9 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -29,8 +29,14 @@ int direct_gbpages
29#endif 29#endif
30; 30;
31 31
32static void __init find_early_table_space(unsigned long end, int use_pse, 32struct map_range {
33 int use_gbpages) 33 unsigned long start;
34 unsigned long end;
35 unsigned page_size_mask;
36};
37
38static void __init find_early_table_space(struct map_range *mr, unsigned long end,
39 int use_pse, int use_gbpages)
34{ 40{
35 unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; 41 unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
36 phys_addr_t base; 42 phys_addr_t base;
@@ -55,6 +61,9 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
55#ifdef CONFIG_X86_32 61#ifdef CONFIG_X86_32
56 extra += PMD_SIZE; 62 extra += PMD_SIZE;
57#endif 63#endif
64 /* The first 2/4M doesn't use large pages. */
65 extra += mr->end - mr->start;
66
58 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 67 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
59 } else 68 } else
60 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 69 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -84,12 +93,6 @@ void __init native_pagetable_reserve(u64 start, u64 end)
84 memblock_reserve(start, end - start); 93 memblock_reserve(start, end - start);
85} 94}
86 95
87struct map_range {
88 unsigned long start;
89 unsigned long end;
90 unsigned page_size_mask;
91};
92
93#ifdef CONFIG_X86_32 96#ifdef CONFIG_X86_32
94#define NR_RANGE_MR 3 97#define NR_RANGE_MR 3
95#else /* CONFIG_X86_64 */ 98#else /* CONFIG_X86_64 */
@@ -261,7 +264,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
261 * nodes are discovered. 264 * nodes are discovered.
262 */ 265 */
263 if (!after_bootmem) 266 if (!after_bootmem)
264 find_early_table_space(end, use_pse, use_gbpages); 267 find_early_table_space(&mr[0], end, use_pse, use_gbpages);
265 268
266 for (i = 0; i < nr_range; i++) 269 for (i = 0; i < nr_range; i++)
267 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 270 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index fc18be0f6f29..2b6b4a3c8beb 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -407,12 +407,12 @@ static unsigned long __meminit
407phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, 407phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
408 unsigned long page_size_mask, pgprot_t prot) 408 unsigned long page_size_mask, pgprot_t prot)
409{ 409{
410 unsigned long pages = 0; 410 unsigned long pages = 0, next;
411 unsigned long last_map_addr = end; 411 unsigned long last_map_addr = end;
412 412
413 int i = pmd_index(address); 413 int i = pmd_index(address);
414 414
415 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { 415 for (; i < PTRS_PER_PMD; i++, address = next) {
416 unsigned long pte_phys; 416 unsigned long pte_phys;
417 pmd_t *pmd = pmd_page + pmd_index(address); 417 pmd_t *pmd = pmd_page + pmd_index(address);
418 pte_t *pte; 418 pte_t *pte;
@@ -426,6 +426,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
426 break; 426 break;
427 } 427 }
428 428
429 next = (address & PMD_MASK) + PMD_SIZE;
430
429 if (pmd_val(*pmd)) { 431 if (pmd_val(*pmd)) {
430 if (!pmd_large(*pmd)) { 432 if (!pmd_large(*pmd)) {
431 spin_lock(&init_mm.page_table_lock); 433 spin_lock(&init_mm.page_table_lock);
@@ -449,7 +451,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
449 * attributes. 451 * attributes.
450 */ 452 */
451 if (page_size_mask & (1 << PG_LEVEL_2M)) { 453 if (page_size_mask & (1 << PG_LEVEL_2M)) {
452 pages++; 454 last_map_addr = next;
453 continue; 455 continue;
454 } 456 }
455 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); 457 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
@@ -462,7 +464,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
462 pfn_pte(address >> PAGE_SHIFT, 464 pfn_pte(address >> PAGE_SHIFT,
463 __pgprot(pgprot_val(prot) | _PAGE_PSE))); 465 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
464 spin_unlock(&init_mm.page_table_lock); 466 spin_unlock(&init_mm.page_table_lock);
465 last_map_addr = (address & PMD_MASK) + PMD_SIZE; 467 last_map_addr = next;
466 continue; 468 continue;
467 } 469 }
468 470
@@ -482,11 +484,11 @@ static unsigned long __meminit
482phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, 484phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
483 unsigned long page_size_mask) 485 unsigned long page_size_mask)
484{ 486{
485 unsigned long pages = 0; 487 unsigned long pages = 0, next;
486 unsigned long last_map_addr = end; 488 unsigned long last_map_addr = end;
487 int i = pud_index(addr); 489 int i = pud_index(addr);
488 490
489 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { 491 for (; i < PTRS_PER_PUD; i++, addr = next) {
490 unsigned long pmd_phys; 492 unsigned long pmd_phys;
491 pud_t *pud = pud_page + pud_index(addr); 493 pud_t *pud = pud_page + pud_index(addr);
492 pmd_t *pmd; 494 pmd_t *pmd;
@@ -495,8 +497,9 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
495 if (addr >= end) 497 if (addr >= end)
496 break; 498 break;
497 499
498 if (!after_bootmem && 500 next = (addr & PUD_MASK) + PUD_SIZE;
499 !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { 501
502 if (!after_bootmem && !e820_any_mapped(addr, next, 0)) {
500 set_pud(pud, __pud(0)); 503 set_pud(pud, __pud(0));
501 continue; 504 continue;
502 } 505 }
@@ -523,7 +526,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
523 * attributes. 526 * attributes.
524 */ 527 */
525 if (page_size_mask & (1 << PG_LEVEL_1G)) { 528 if (page_size_mask & (1 << PG_LEVEL_1G)) {
526 pages++; 529 last_map_addr = next;
527 continue; 530 continue;
528 } 531 }
529 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); 532 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
@@ -535,7 +538,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
535 set_pte((pte_t *)pud, 538 set_pte((pte_t *)pud,
536 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 539 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
537 spin_unlock(&init_mm.page_table_lock); 540 spin_unlock(&init_mm.page_table_lock);
538 last_map_addr = (addr & PUD_MASK) + PUD_SIZE; 541 last_map_addr = next;
539 continue; 542 continue;
540 } 543 }
541 544
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3804471db104..5e57e113b72c 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -61,11 +61,13 @@ static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
61 */ 61 */
62void leave_mm(int cpu) 62void leave_mm(int cpu)
63{ 63{
64 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
64 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 65 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
65 BUG(); 66 BUG();
66 cpumask_clear_cpu(cpu, 67 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
67 mm_cpumask(this_cpu_read(cpu_tlbstate.active_mm))); 68 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
68 load_cr3(swapper_pg_dir); 69 load_cr3(swapper_pg_dir);
70 }
69} 71}
70EXPORT_SYMBOL_GPL(leave_mm); 72EXPORT_SYMBOL_GPL(leave_mm);
71 73