aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-13 12:54:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-13 12:54:45 -0400
commite7f2f9918c0e97aa98ba147ca387e2c7238f0711 (patch)
treedd85d6d2907bffeda76b42ce55a445e3142fe738 /arch/x86/mm
parent11a96d1820113fde0d55c3487b7da7a9031326b8 (diff)
parentc00193f9f09f9b852249a66391985f585d066084 (diff)
Merge phase #5 (misc) of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
Merges oprofile, timers/hpet, x86/traps, x86/time, and x86/core misc items. * 'x86-core-v4-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (132 commits) x86: change early_ioremap to use slots instead of nesting x86: adjust dependencies for CONFIG_X86_CMOV dumpstack: x86: various small unification steps, fix x86: remove additional_cpus x86: remove additional_cpus configurability x86: improve UP kernel when CPU-hotplug and SMP is enabled dumpstack: x86: various small unification steps dumpstack: i386: make kstack= an early boot-param and add oops=panic dumpstack: x86: use log_lvl and unify trace formatting dumptrace: x86: consistently include loglevel, print stack switch dumpstack: x86: add "end" parameter to valid_stack_ptr and print_context_stack dumpstack: x86: make printk_address equal dumpstack: x86: move die_nmi to dumpstack_32.c traps: x86: finalize unification of traps.c traps: x86: make traps_32.c and traps_64.c equal traps: x86: various noop-changes preparing for unification of traps_xx.c traps: x86_64: use task_pid_nr(tsk) instead of tsk->pid in do_general_protection traps: i386: expand clear_mem_error and remove from mach_traps.h traps: x86_64: make io_check_error equal to the one on i386 traps: i386: use preempt_conditional_sti/cli in do_int3 ...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/Makefile6
-rw-r--r--arch/x86/mm/fault.c5
-rw-r--r--arch/x86/mm/gup.c10
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/init_64.c9
-rw-r--r--arch/x86/mm/ioremap.c143
-rw-r--r--arch/x86/mm/numa_32.c (renamed from arch/x86/mm/discontig_32.c)0
-rw-r--r--arch/x86/mm/srat_64.c2
8 files changed, 123 insertions, 54 deletions
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index dfb932dcf136..59f89b434b45 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -13,12 +13,8 @@ obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
13mmiotrace-y := pf_in.o mmio-mod.o 13mmiotrace-y := pf_in.o mmio-mod.o
14obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 14obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
15 15
16ifeq ($(CONFIG_X86_32),y) 16obj-$(CONFIG_NUMA) += numa_$(BITS).o
17obj-$(CONFIG_NUMA) += discontig_32.o
18else
19obj-$(CONFIG_NUMA) += numa_64.o
20obj-$(CONFIG_K8_NUMA) += k8topology_64.o 17obj-$(CONFIG_K8_NUMA) += k8topology_64.o
21endif
22obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o 18obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o
23 19
24obj-$(CONFIG_MEMTEST) += memtest.o 20obj-$(CONFIG_MEMTEST) += memtest.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index a742d753d5b0..3f2b8962cbd0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -592,11 +592,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
592 unsigned long flags; 592 unsigned long flags;
593#endif 593#endif
594 594
595 /*
596 * We can fault from pretty much anywhere, with unknown IRQ state.
597 */
598 trace_hardirqs_fixup();
599
600 tsk = current; 595 tsk = current;
601 mm = tsk->mm; 596 mm = tsk->mm;
602 prefetchw(&mm->mmap_sem); 597 prefetchw(&mm->mmap_sem);
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 007bb06c7504..4ba373c5b8c8 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -82,7 +82,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
82 pte_t pte = gup_get_pte(ptep); 82 pte_t pte = gup_get_pte(ptep);
83 struct page *page; 83 struct page *page;
84 84
85 if ((pte_val(pte) & (mask | _PAGE_SPECIAL)) != mask) { 85 if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
86 pte_unmap(ptep); 86 pte_unmap(ptep);
87 return 0; 87 return 0;
88 } 88 }
@@ -116,10 +116,10 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
116 mask = _PAGE_PRESENT|_PAGE_USER; 116 mask = _PAGE_PRESENT|_PAGE_USER;
117 if (write) 117 if (write)
118 mask |= _PAGE_RW; 118 mask |= _PAGE_RW;
119 if ((pte_val(pte) & mask) != mask) 119 if ((pte_flags(pte) & mask) != mask)
120 return 0; 120 return 0;
121 /* hugepages are never "special" */ 121 /* hugepages are never "special" */
122 VM_BUG_ON(pte_val(pte) & _PAGE_SPECIAL); 122 VM_BUG_ON(pte_flags(pte) & _PAGE_SPECIAL);
123 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 123 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
124 124
125 refs = 0; 125 refs = 0;
@@ -173,10 +173,10 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
173 mask = _PAGE_PRESENT|_PAGE_USER; 173 mask = _PAGE_PRESENT|_PAGE_USER;
174 if (write) 174 if (write)
175 mask |= _PAGE_RW; 175 mask |= _PAGE_RW;
176 if ((pte_val(pte) & mask) != mask) 176 if ((pte_flags(pte) & mask) != mask)
177 return 0; 177 return 0;
178 /* hugepages are never "special" */ 178 /* hugepages are never "special" */
179 VM_BUG_ON(pte_val(pte) & _PAGE_SPECIAL); 179 VM_BUG_ON(pte_flags(pte) & _PAGE_SPECIAL);
180 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 180 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
181 181
182 refs = 0; 182 refs = 0;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index bbe044dbe014..8396868e82c5 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -558,7 +558,7 @@ void zap_low_mappings(void)
558 558
559int nx_enabled; 559int nx_enabled;
560 560
561pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL); 561pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
562EXPORT_SYMBOL_GPL(__supported_pte_mask); 562EXPORT_SYMBOL_GPL(__supported_pte_mask);
563 563
564#ifdef CONFIG_X86_PAE 564#ifdef CONFIG_X86_PAE
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3e10054c5731..b8e461d49412 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -89,7 +89,7 @@ early_param("gbpages", parse_direct_gbpages_on);
89 89
90int after_bootmem; 90int after_bootmem;
91 91
92unsigned long __supported_pte_mask __read_mostly = ~0UL; 92pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
93EXPORT_SYMBOL_GPL(__supported_pte_mask); 93EXPORT_SYMBOL_GPL(__supported_pte_mask);
94 94
95static int do_not_nx __cpuinitdata; 95static int do_not_nx __cpuinitdata;
@@ -196,9 +196,6 @@ set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
196 } 196 }
197 197
198 pte = pte_offset_kernel(pmd, vaddr); 198 pte = pte_offset_kernel(pmd, vaddr);
199 if (!pte_none(*pte) && pte_val(new_pte) &&
200 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
201 pte_ERROR(*pte);
202 set_pte(pte, new_pte); 199 set_pte(pte, new_pte);
203 200
204 /* 201 /*
@@ -313,7 +310,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
313 if (pfn >= table_top) 310 if (pfn >= table_top)
314 panic("alloc_low_page: ran out of memory"); 311 panic("alloc_low_page: ran out of memory");
315 312
316 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE); 313 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
317 memset(adr, 0, PAGE_SIZE); 314 memset(adr, 0, PAGE_SIZE);
318 *phys = pfn * PAGE_SIZE; 315 *phys = pfn * PAGE_SIZE;
319 return adr; 316 return adr;
@@ -749,7 +746,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
749 old_start = mr[i].start; 746 old_start = mr[i].start;
750 memmove(&mr[i], &mr[i+1], 747 memmove(&mr[i], &mr[i+1],
751 (nr_range - 1 - i) * sizeof (struct map_range)); 748 (nr_range - 1 - i) * sizeof (struct map_range));
752 mr[i].start = old_start; 749 mr[i--].start = old_start;
753 nr_range--; 750 nr_range--;
754 } 751 }
755 752
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 8cbeda15cd29..e4c43ec71b29 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -45,6 +45,27 @@ unsigned long __phys_addr(unsigned long x)
45} 45}
46EXPORT_SYMBOL(__phys_addr); 46EXPORT_SYMBOL(__phys_addr);
47 47
48bool __virt_addr_valid(unsigned long x)
49{
50 if (x >= __START_KERNEL_map) {
51 x -= __START_KERNEL_map;
52 if (x >= KERNEL_IMAGE_SIZE)
53 return false;
54 x += phys_base;
55 } else {
56 if (x < PAGE_OFFSET)
57 return false;
58 x -= PAGE_OFFSET;
59 if (system_state == SYSTEM_BOOTING ?
60 x > MAXMEM : !phys_addr_valid(x)) {
61 return false;
62 }
63 }
64
65 return pfn_valid(x >> PAGE_SHIFT);
66}
67EXPORT_SYMBOL(__virt_addr_valid);
68
48#else 69#else
49 70
50static inline int phys_addr_valid(unsigned long addr) 71static inline int phys_addr_valid(unsigned long addr)
@@ -56,13 +77,24 @@ static inline int phys_addr_valid(unsigned long addr)
56unsigned long __phys_addr(unsigned long x) 77unsigned long __phys_addr(unsigned long x)
57{ 78{
58 /* VMALLOC_* aren't constants; not available at the boot time */ 79 /* VMALLOC_* aren't constants; not available at the boot time */
59 VIRTUAL_BUG_ON(x < PAGE_OFFSET || (system_state != SYSTEM_BOOTING && 80 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
60 is_vmalloc_addr((void *)x))); 81 VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
82 is_vmalloc_addr((void *) x));
61 return x - PAGE_OFFSET; 83 return x - PAGE_OFFSET;
62} 84}
63EXPORT_SYMBOL(__phys_addr); 85EXPORT_SYMBOL(__phys_addr);
64#endif 86#endif
65 87
88bool __virt_addr_valid(unsigned long x)
89{
90 if (x < PAGE_OFFSET)
91 return false;
92 if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
93 return false;
94 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
95}
96EXPORT_SYMBOL(__virt_addr_valid);
97
66#endif 98#endif
67 99
68int page_is_ram(unsigned long pagenr) 100int page_is_ram(unsigned long pagenr)
@@ -242,16 +274,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
242 switch (prot_val) { 274 switch (prot_val) {
243 case _PAGE_CACHE_UC: 275 case _PAGE_CACHE_UC:
244 default: 276 default:
245 prot = PAGE_KERNEL_NOCACHE; 277 prot = PAGE_KERNEL_IO_NOCACHE;
246 break; 278 break;
247 case _PAGE_CACHE_UC_MINUS: 279 case _PAGE_CACHE_UC_MINUS:
248 prot = PAGE_KERNEL_UC_MINUS; 280 prot = PAGE_KERNEL_IO_UC_MINUS;
249 break; 281 break;
250 case _PAGE_CACHE_WC: 282 case _PAGE_CACHE_WC:
251 prot = PAGE_KERNEL_WC; 283 prot = PAGE_KERNEL_IO_WC;
252 break; 284 break;
253 case _PAGE_CACHE_WB: 285 case _PAGE_CACHE_WB:
254 prot = PAGE_KERNEL; 286 prot = PAGE_KERNEL_IO;
255 break; 287 break;
256 } 288 }
257 289
@@ -568,12 +600,12 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
568} 600}
569 601
570static inline void __init early_set_fixmap(enum fixed_addresses idx, 602static inline void __init early_set_fixmap(enum fixed_addresses idx,
571 unsigned long phys) 603 unsigned long phys, pgprot_t prot)
572{ 604{
573 if (after_paging_init) 605 if (after_paging_init)
574 set_fixmap(idx, phys); 606 __set_fixmap(idx, phys, prot);
575 else 607 else
576 __early_set_fixmap(idx, phys, PAGE_KERNEL); 608 __early_set_fixmap(idx, phys, prot);
577} 609}
578 610
579static inline void __init early_clear_fixmap(enum fixed_addresses idx) 611static inline void __init early_clear_fixmap(enum fixed_addresses idx)
@@ -584,16 +616,22 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
584 __early_set_fixmap(idx, 0, __pgprot(0)); 616 __early_set_fixmap(idx, 0, __pgprot(0));
585} 617}
586 618
587 619static void *prev_map[FIX_BTMAPS_SLOTS] __initdata;
588static int __initdata early_ioremap_nested; 620static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
589
590static int __init check_early_ioremap_leak(void) 621static int __init check_early_ioremap_leak(void)
591{ 622{
592 if (!early_ioremap_nested) 623 int count = 0;
624 int i;
625
626 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
627 if (prev_map[i])
628 count++;
629
630 if (!count)
593 return 0; 631 return 0;
594 WARN(1, KERN_WARNING 632 WARN(1, KERN_WARNING
595 "Debug warning: early ioremap leak of %d areas detected.\n", 633 "Debug warning: early ioremap leak of %d areas detected.\n",
596 early_ioremap_nested); 634 count);
597 printk(KERN_WARNING 635 printk(KERN_WARNING
598 "please boot with early_ioremap_debug and report the dmesg.\n"); 636 "please boot with early_ioremap_debug and report the dmesg.\n");
599 637
@@ -601,18 +639,33 @@ static int __init check_early_ioremap_leak(void)
601} 639}
602late_initcall(check_early_ioremap_leak); 640late_initcall(check_early_ioremap_leak);
603 641
604void __init *early_ioremap(unsigned long phys_addr, unsigned long size) 642static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
605{ 643{
606 unsigned long offset, last_addr; 644 unsigned long offset, last_addr;
607 unsigned int nrpages, nesting; 645 unsigned int nrpages;
608 enum fixed_addresses idx0, idx; 646 enum fixed_addresses idx0, idx;
647 int i, slot;
609 648
610 WARN_ON(system_state != SYSTEM_BOOTING); 649 WARN_ON(system_state != SYSTEM_BOOTING);
611 650
612 nesting = early_ioremap_nested; 651 slot = -1;
652 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
653 if (!prev_map[i]) {
654 slot = i;
655 break;
656 }
657 }
658
659 if (slot < 0) {
660 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
661 phys_addr, size);
662 WARN_ON(1);
663 return NULL;
664 }
665
613 if (early_ioremap_debug) { 666 if (early_ioremap_debug) {
614 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", 667 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
615 phys_addr, size, nesting); 668 phys_addr, size, slot);
616 dump_stack(); 669 dump_stack();
617 } 670 }
618 671
@@ -623,11 +676,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
623 return NULL; 676 return NULL;
624 } 677 }
625 678
626 if (nesting >= FIX_BTMAPS_NESTING) { 679 prev_size[slot] = size;
627 WARN_ON(1);
628 return NULL;
629 }
630 early_ioremap_nested++;
631 /* 680 /*
632 * Mappings have to be page-aligned 681 * Mappings have to be page-aligned
633 */ 682 */
@@ -647,10 +696,10 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
647 /* 696 /*
648 * Ok, go for it.. 697 * Ok, go for it..
649 */ 698 */
650 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; 699 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
651 idx = idx0; 700 idx = idx0;
652 while (nrpages > 0) { 701 while (nrpages > 0) {
653 early_set_fixmap(idx, phys_addr); 702 early_set_fixmap(idx, phys_addr, prot);
654 phys_addr += PAGE_SIZE; 703 phys_addr += PAGE_SIZE;
655 --idx; 704 --idx;
656 --nrpages; 705 --nrpages;
@@ -658,7 +707,20 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
658 if (early_ioremap_debug) 707 if (early_ioremap_debug)
659 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); 708 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
660 709
661 return (void *) (offset + fix_to_virt(idx0)); 710 prev_map[slot] = (void *) (offset + fix_to_virt(idx0));
711 return prev_map[slot];
712}
713
714/* Remap an IO device */
715void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
716{
717 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
718}
719
720/* Remap memory */
721void __init *early_memremap(unsigned long phys_addr, unsigned long size)
722{
723 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
662} 724}
663 725
664void __init early_iounmap(void *addr, unsigned long size) 726void __init early_iounmap(void *addr, unsigned long size)
@@ -667,15 +729,33 @@ void __init early_iounmap(void *addr, unsigned long size)
667 unsigned long offset; 729 unsigned long offset;
668 unsigned int nrpages; 730 unsigned int nrpages;
669 enum fixed_addresses idx; 731 enum fixed_addresses idx;
670 int nesting; 732 int i, slot;
733
734 slot = -1;
735 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
736 if (prev_map[i] == addr) {
737 slot = i;
738 break;
739 }
740 }
671 741
672 nesting = --early_ioremap_nested; 742 if (slot < 0) {
673 if (WARN_ON(nesting < 0)) 743 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
744 addr, size);
745 WARN_ON(1);
746 return;
747 }
748
749 if (prev_size[slot] != size) {
750 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
751 addr, size, slot, prev_size[slot]);
752 WARN_ON(1);
674 return; 753 return;
754 }
675 755
676 if (early_ioremap_debug) { 756 if (early_ioremap_debug) {
677 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, 757 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
678 size, nesting); 758 size, slot);
679 dump_stack(); 759 dump_stack();
680 } 760 }
681 761
@@ -687,12 +767,13 @@ void __init early_iounmap(void *addr, unsigned long size)
687 offset = virt_addr & ~PAGE_MASK; 767 offset = virt_addr & ~PAGE_MASK;
688 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; 768 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
689 769
690 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; 770 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
691 while (nrpages > 0) { 771 while (nrpages > 0) {
692 early_clear_fixmap(idx); 772 early_clear_fixmap(idx);
693 --idx; 773 --idx;
694 --nrpages; 774 --nrpages;
695 } 775 }
776 prev_map[slot] = 0;
696} 777}
697 778
698void __this_fixmap_does_not_exist(void) 779void __this_fixmap_does_not_exist(void)
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/numa_32.c
index 847c164725f4..847c164725f4 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/numa_32.c
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 1b4763e26ea9..51c0a2fc14fe 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -138,7 +138,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
138 return; 138 return;
139 } 139 }
140 140
141 if (is_uv_system()) 141 if (get_uv_system_type() >= UV_X2APIC)
142 apic_id = (pa->apic_id << 8) | pa->local_sapic_eid; 142 apic_id = (pa->apic_id << 8) | pa->local_sapic_eid;
143 else 143 else
144 apic_id = pa->apic_id; 144 apic_id = pa->apic_id;