aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-14 12:46:06 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-14 12:46:06 -0500
commit414f827c46973ba39320cfb43feb55a0eeb9b4e8 (patch)
tree45e860974ef698e71370a0ebdddcff4f14fbdf9e /arch/x86_64/kernel
parent86a71dbd3e81e8870d0f0e56b87875f57e58222b (diff)
parent126b1922367fbe5513daa675a2abd13ed3917f4e (diff)
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (94 commits) [PATCH] x86-64: Remove mk_pte_phys() [PATCH] i386: Fix broken CONFIG_COMPAT_VDSO on i386 [PATCH] i386: fix 32-bit ioctls on x64_32 [PATCH] x86: Unify pcspeaker platform device code between i386/x86-64 [PATCH] i386: Remove extern declaration from mm/discontig.c, put in header. [PATCH] i386: Rename cpu_gdt_descr and remove extern declaration from smpboot.c [PATCH] i386: Move mce_disabled to asm/mce.h [PATCH] i386: paravirt unhandled fallthrough [PATCH] x86_64: Wire up compat epoll_pwait [PATCH] x86: Don't require the vDSO for handling a.out signals [PATCH] i386: Fix Cyrix MediaGX detection [PATCH] i386: Fix warning in cpu initialization [PATCH] i386: Fix warning in microcode.c [PATCH] x86: Enable NMI watchdog for AMD Family 0x10 CPUs [PATCH] x86: Add new CPUID bits for AMD Family 10 CPUs in /proc/cpuinfo [PATCH] i386: Remove fastcall in paravirt.[ch] [PATCH] x86-64: Fix wrong gcc check in bitops.h [PATCH] x86-64: survive having no irq mapping for a vector [PATCH] i386: geode configuration fixes [PATCH] i386: add option to show more code in oops reports ...
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/Makefile2
-rw-r--r--arch/x86_64/kernel/acpi/sleep.c2
-rw-r--r--arch/x86_64/kernel/e820.c38
-rw-r--r--arch/x86_64/kernel/head.S20
-rw-r--r--arch/x86_64/kernel/io_apic.c24
-rw-r--r--arch/x86_64/kernel/ioport.c2
-rw-r--r--arch/x86_64/kernel/irq.c12
-rw-r--r--arch/x86_64/kernel/mce.c66
-rw-r--r--arch/x86_64/kernel/mce_amd.c44
-rw-r--r--arch/x86_64/kernel/nmi.c75
-rw-r--r--arch/x86_64/kernel/pci-calgary.c17
-rw-r--r--arch/x86_64/kernel/pci-dma.c28
-rw-r--r--arch/x86_64/kernel/pci-gart.c4
-rw-r--r--arch/x86_64/kernel/ptrace.c8
-rw-r--r--arch/x86_64/kernel/setup.c169
-rw-r--r--arch/x86_64/kernel/setup64.c1
-rw-r--r--arch/x86_64/kernel/stacktrace.c5
-rw-r--r--arch/x86_64/kernel/time.c14
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c5
19 files changed, 279 insertions, 257 deletions
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index 3c7cbff04d3d..ae399458024b 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_PCI) += early-quirks.o
43 43
44obj-y += topology.o 44obj-y += topology.o
45obj-y += intel_cacheinfo.o 45obj-y += intel_cacheinfo.o
46obj-y += pcspeaker.o
46 47
47CFLAGS_vsyscall.o := $(PROFILING) -g0 48CFLAGS_vsyscall.o := $(PROFILING) -g0
48 49
@@ -56,3 +57,4 @@ quirks-y += ../../i386/kernel/quirks.o
56i8237-y += ../../i386/kernel/i8237.o 57i8237-y += ../../i386/kernel/i8237.o
57msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o 58msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
58alternative-y += ../../i386/kernel/alternative.o 59alternative-y += ../../i386/kernel/alternative.o
60pcspeaker-y += ../../i386/kernel/pcspeaker.o
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c
index 5ebf62c7a3d2..23178ce6c783 100644
--- a/arch/x86_64/kernel/acpi/sleep.c
+++ b/arch/x86_64/kernel/acpi/sleep.c
@@ -58,7 +58,7 @@ unsigned long acpi_wakeup_address = 0;
58unsigned long acpi_video_flags; 58unsigned long acpi_video_flags;
59extern char wakeup_start, wakeup_end; 59extern char wakeup_start, wakeup_end;
60 60
61extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long)); 61extern unsigned long acpi_copy_wakeup_routine(unsigned long);
62 62
63static pgd_t low_ptr; 63static pgd_t low_ptr;
64 64
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 6fe191c58084..4651fd22b213 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -83,6 +83,13 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
83 return 1; 83 return 1;
84 } 84 }
85 85
86#ifdef CONFIG_NUMA
87 /* NUMA memory to node map */
88 if (last >= nodemap_addr && addr < nodemap_addr + nodemap_size) {
89 *addrp = nodemap_addr + nodemap_size;
90 return 1;
91 }
92#endif
86 /* XXX ramdisk image here? */ 93 /* XXX ramdisk image here? */
87 return 0; 94 return 0;
88} 95}
@@ -184,6 +191,37 @@ unsigned long __init e820_end_of_ram(void)
184} 191}
185 192
186/* 193/*
194 * Find the hole size in the range.
195 */
196unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
197{
198 unsigned long ram = 0;
199 int i;
200
201 for (i = 0; i < e820.nr_map; i++) {
202 struct e820entry *ei = &e820.map[i];
203 unsigned long last, addr;
204
205 if (ei->type != E820_RAM ||
206 ei->addr+ei->size <= start ||
207 ei->addr >= end)
208 continue;
209
210 addr = round_up(ei->addr, PAGE_SIZE);
211 if (addr < start)
212 addr = start;
213
214 last = round_down(ei->addr + ei->size, PAGE_SIZE);
215 if (last >= end)
216 last = end;
217
218 if (last > addr)
219 ram += last - addr;
220 }
221 return ((end - start) - ram);
222}
223
224/*
187 * Mark e820 reserved areas as busy for the resource manager. 225 * Mark e820 reserved areas as busy for the resource manager.
188 */ 226 */
189void __init e820_reserve_resources(void) 227void __init e820_reserve_resources(void)
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 1e6f80870679..598a4d0351fc 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -163,6 +163,20 @@ startup_64:
163 */ 163 */
164 lgdt cpu_gdt_descr 164 lgdt cpu_gdt_descr
165 165
166 /* set up data segments. actually 0 would do too */
167 movl $__KERNEL_DS,%eax
168 movl %eax,%ds
169 movl %eax,%ss
170 movl %eax,%es
171
172 /*
173 * We don't really need to load %fs or %gs, but load them anyway
174 * to kill any stale realmode selectors. This allows execution
175 * under VT hardware.
176 */
177 movl %eax,%fs
178 movl %eax,%gs
179
166 /* 180 /*
167 * Setup up a dummy PDA. this is just for some early bootup code 181 * Setup up a dummy PDA. this is just for some early bootup code
168 * that does in_interrupt() 182 * that does in_interrupt()
@@ -173,12 +187,6 @@ startup_64:
173 shrq $32,%rdx 187 shrq $32,%rdx
174 wrmsr 188 wrmsr
175 189
176 /* set up data segments. actually 0 would do too */
177 movl $__KERNEL_DS,%eax
178 movl %eax,%ds
179 movl %eax,%ss
180 movl %eax,%es
181
182 /* esi is pointer to real mode structure with interesting info. 190 /* esi is pointer to real mode structure with interesting info.
183 pass it to C */ 191 pass it to C */
184 movl %esi, %edi 192 movl %esi, %edi
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 6be6730acb5c..566e64d966c4 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -831,7 +831,7 @@ static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
831 entry.delivery_mode = INT_DELIVERY_MODE; 831 entry.delivery_mode = INT_DELIVERY_MODE;
832 entry.dest_mode = INT_DEST_MODE; 832 entry.dest_mode = INT_DEST_MODE;
833 entry.mask = 0; /* enable IRQ */ 833 entry.mask = 0; /* enable IRQ */
834 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 834 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
835 835
836 entry.trigger = irq_trigger(idx); 836 entry.trigger = irq_trigger(idx);
837 entry.polarity = irq_polarity(idx); 837 entry.polarity = irq_polarity(idx);
@@ -839,7 +839,7 @@ static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
839 if (irq_trigger(idx)) { 839 if (irq_trigger(idx)) {
840 entry.trigger = 1; 840 entry.trigger = 1;
841 entry.mask = 1; 841 entry.mask = 1;
842 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 842 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
843 } 843 }
844 844
845 if (!apic && !IO_APIC_IRQ(irq)) 845 if (!apic && !IO_APIC_IRQ(irq))
@@ -851,7 +851,7 @@ static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
851 if (vector < 0) 851 if (vector < 0)
852 return; 852 return;
853 853
854 entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask); 854 entry.dest = cpu_mask_to_apicid(mask);
855 entry.vector = vector; 855 entry.vector = vector;
856 856
857 ioapic_register_intr(irq, vector, IOAPIC_AUTO); 857 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
@@ -920,7 +920,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
920 */ 920 */
921 entry.dest_mode = INT_DEST_MODE; 921 entry.dest_mode = INT_DEST_MODE;
922 entry.mask = 0; /* unmask IRQ now */ 922 entry.mask = 0; /* unmask IRQ now */
923 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 923 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
924 entry.delivery_mode = INT_DELIVERY_MODE; 924 entry.delivery_mode = INT_DELIVERY_MODE;
925 entry.polarity = 0; 925 entry.polarity = 0;
926 entry.trigger = 0; 926 entry.trigger = 0;
@@ -1020,18 +1020,17 @@ void __apicdebuginit print_IO_APIC(void)
1020 1020
1021 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1021 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1022 1022
1023 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol" 1023 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1024 " Stat Dest Deli Vect: \n"); 1024 " Stat Dmod Deli Vect: \n");
1025 1025
1026 for (i = 0; i <= reg_01.bits.entries; i++) { 1026 for (i = 0; i <= reg_01.bits.entries; i++) {
1027 struct IO_APIC_route_entry entry; 1027 struct IO_APIC_route_entry entry;
1028 1028
1029 entry = ioapic_read_entry(apic, i); 1029 entry = ioapic_read_entry(apic, i);
1030 1030
1031 printk(KERN_DEBUG " %02x %03X %02X ", 1031 printk(KERN_DEBUG " %02x %03X ",
1032 i, 1032 i,
1033 entry.dest.logical.logical_dest, 1033 entry.dest
1034 entry.dest.physical.physical_dest
1035 ); 1034 );
1036 1035
1037 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", 1036 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
@@ -1293,8 +1292,7 @@ void disable_IO_APIC(void)
1293 entry.dest_mode = 0; /* Physical */ 1292 entry.dest_mode = 0; /* Physical */
1294 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1293 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1295 entry.vector = 0; 1294 entry.vector = 0;
1296 entry.dest.physical.physical_dest = 1295 entry.dest = GET_APIC_ID(apic_read(APIC_ID));
1297 GET_APIC_ID(apic_read(APIC_ID));
1298 1296
1299 /* 1297 /*
1300 * Add it to the IO-APIC irq-routing table: 1298 * Add it to the IO-APIC irq-routing table:
@@ -1556,7 +1554,7 @@ static inline void unlock_ExtINT_logic(void)
1556 1554
1557 entry1.dest_mode = 0; /* physical delivery */ 1555 entry1.dest_mode = 0; /* physical delivery */
1558 entry1.mask = 0; /* unmask IRQ now */ 1556 entry1.mask = 0; /* unmask IRQ now */
1559 entry1.dest.physical.physical_dest = hard_smp_processor_id(); 1557 entry1.dest = hard_smp_processor_id();
1560 entry1.delivery_mode = dest_ExtINT; 1558 entry1.delivery_mode = dest_ExtINT;
1561 entry1.polarity = entry0.polarity; 1559 entry1.polarity = entry0.polarity;
1562 entry1.trigger = 0; 1560 entry1.trigger = 0;
@@ -2131,7 +2129,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p
2131 2129
2132 entry.delivery_mode = INT_DELIVERY_MODE; 2130 entry.delivery_mode = INT_DELIVERY_MODE;
2133 entry.dest_mode = INT_DEST_MODE; 2131 entry.dest_mode = INT_DEST_MODE;
2134 entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask); 2132 entry.dest = cpu_mask_to_apicid(mask);
2135 entry.trigger = triggering; 2133 entry.trigger = triggering;
2136 entry.polarity = polarity; 2134 entry.polarity = polarity;
2137 entry.mask = 1; /* Disabled (masked) */ 2135 entry.mask = 1; /* Disabled (masked) */
diff --git a/arch/x86_64/kernel/ioport.c b/arch/x86_64/kernel/ioport.c
index fe063d3cfe42..745b1f0f494e 100644
--- a/arch/x86_64/kernel/ioport.c
+++ b/arch/x86_64/kernel/ioport.c
@@ -114,6 +114,6 @@ asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
114 if (!capable(CAP_SYS_RAWIO)) 114 if (!capable(CAP_SYS_RAWIO))
115 return -EPERM; 115 return -EPERM;
116 } 116 }
117 regs->eflags = (regs->eflags &~ 0x3000UL) | (level << 12); 117 regs->eflags = (regs->eflags &~ X86_EFLAGS_IOPL) | (level << 12);
118 return 0; 118 return 0;
119} 119}
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index 0c06af6c13bc..3bc30d2c13d3 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -18,6 +18,7 @@
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/io_apic.h> 19#include <asm/io_apic.h>
20#include <asm/idle.h> 20#include <asm/idle.h>
21#include <asm/smp.h>
21 22
22atomic_t irq_err_count; 23atomic_t irq_err_count;
23 24
@@ -120,9 +121,14 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
120 121
121 if (likely(irq < NR_IRQS)) 122 if (likely(irq < NR_IRQS))
122 generic_handle_irq(irq); 123 generic_handle_irq(irq);
123 else if (printk_ratelimit()) 124 else {
124 printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n", 125 if (!disable_apic)
125 __func__, smp_processor_id(), vector); 126 ack_APIC_irq();
127
128 if (printk_ratelimit())
129 printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
130 __func__, smp_processor_id(), vector);
131 }
126 132
127 irq_exit(); 133 irq_exit();
128 134
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index bdb54a2c9f18..8011a8e1c7d4 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -19,6 +19,7 @@
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/ctype.h> 21#include <linux/ctype.h>
22#include <linux/kmod.h>
22#include <asm/processor.h> 23#include <asm/processor.h>
23#include <asm/msr.h> 24#include <asm/msr.h>
24#include <asm/mce.h> 25#include <asm/mce.h>
@@ -42,6 +43,10 @@ static unsigned long console_logged;
42static int notify_user; 43static int notify_user;
43static int rip_msr; 44static int rip_msr;
44static int mce_bootlog = 1; 45static int mce_bootlog = 1;
46static atomic_t mce_events;
47
48static char trigger[128];
49static char *trigger_argv[2] = { trigger, NULL };
45 50
46/* 51/*
47 * Lockless MCE logging infrastructure. 52 * Lockless MCE logging infrastructure.
@@ -57,6 +62,7 @@ struct mce_log mcelog = {
57void mce_log(struct mce *mce) 62void mce_log(struct mce *mce)
58{ 63{
59 unsigned next, entry; 64 unsigned next, entry;
65 atomic_inc(&mce_events);
60 mce->finished = 0; 66 mce->finished = 0;
61 wmb(); 67 wmb();
62 for (;;) { 68 for (;;) {
@@ -161,6 +167,17 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
161 } 167 }
162} 168}
163 169
170static void do_mce_trigger(void)
171{
172 static atomic_t mce_logged;
173 int events = atomic_read(&mce_events);
174 if (events != atomic_read(&mce_logged) && trigger[0]) {
175 /* Small race window, but should be harmless. */
176 atomic_set(&mce_logged, events);
177 call_usermodehelper(trigger, trigger_argv, NULL, -1);
178 }
179}
180
164/* 181/*
165 * The actual machine check handler 182 * The actual machine check handler
166 */ 183 */
@@ -234,8 +251,12 @@ void do_machine_check(struct pt_regs * regs, long error_code)
234 } 251 }
235 252
236 /* Never do anything final in the polling timer */ 253 /* Never do anything final in the polling timer */
237 if (!regs) 254 if (!regs) {
255 /* Normal interrupt context here. Call trigger for any new
256 events. */
257 do_mce_trigger();
238 goto out; 258 goto out;
259 }
239 260
240 /* If we didn't find an uncorrectable error, pick 261 /* If we didn't find an uncorrectable error, pick
241 the last one (shouldn't happen, just being safe). */ 262 the last one (shouldn't happen, just being safe). */
@@ -606,17 +627,42 @@ DEFINE_PER_CPU(struct sys_device, device_mce);
606 } \ 627 } \
607 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); 628 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
608 629
630/* TBD should generate these dynamically based on number of available banks */
609ACCESSOR(bank0ctl,bank[0],mce_restart()) 631ACCESSOR(bank0ctl,bank[0],mce_restart())
610ACCESSOR(bank1ctl,bank[1],mce_restart()) 632ACCESSOR(bank1ctl,bank[1],mce_restart())
611ACCESSOR(bank2ctl,bank[2],mce_restart()) 633ACCESSOR(bank2ctl,bank[2],mce_restart())
612ACCESSOR(bank3ctl,bank[3],mce_restart()) 634ACCESSOR(bank3ctl,bank[3],mce_restart())
613ACCESSOR(bank4ctl,bank[4],mce_restart()) 635ACCESSOR(bank4ctl,bank[4],mce_restart())
614ACCESSOR(bank5ctl,bank[5],mce_restart()) 636ACCESSOR(bank5ctl,bank[5],mce_restart())
615static struct sysdev_attribute * bank_attributes[NR_BANKS] = { 637
616 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl, 638static ssize_t show_trigger(struct sys_device *s, char *buf)
617 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl}; 639{
640 strcpy(buf, trigger);
641 strcat(buf, "\n");
642 return strlen(trigger) + 1;
643}
644
645static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz)
646{
647 char *p;
648 int len;
649 strncpy(trigger, buf, sizeof(trigger));
650 trigger[sizeof(trigger)-1] = 0;
651 len = strlen(trigger);
652 p = strchr(trigger, '\n');
653 if (*p) *p = 0;
654 return len;
655}
656
657static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
618ACCESSOR(tolerant,tolerant,) 658ACCESSOR(tolerant,tolerant,)
619ACCESSOR(check_interval,check_interval,mce_restart()) 659ACCESSOR(check_interval,check_interval,mce_restart())
660static struct sysdev_attribute *mce_attributes[] = {
661 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
662 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
663 &attr_tolerant, &attr_check_interval, &attr_trigger,
664 NULL
665};
620 666
621/* Per cpu sysdev init. All of the cpus still share the same ctl bank */ 667/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
622static __cpuinit int mce_create_device(unsigned int cpu) 668static __cpuinit int mce_create_device(unsigned int cpu)
@@ -632,11 +678,9 @@ static __cpuinit int mce_create_device(unsigned int cpu)
632 err = sysdev_register(&per_cpu(device_mce,cpu)); 678 err = sysdev_register(&per_cpu(device_mce,cpu));
633 679
634 if (!err) { 680 if (!err) {
635 for (i = 0; i < banks; i++) 681 for (i = 0; mce_attributes[i]; i++)
636 sysdev_create_file(&per_cpu(device_mce,cpu), 682 sysdev_create_file(&per_cpu(device_mce,cpu),
637 bank_attributes[i]); 683 mce_attributes[i]);
638 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_tolerant);
639 sysdev_create_file(&per_cpu(device_mce,cpu), &attr_check_interval);
640 } 684 }
641 return err; 685 return err;
642} 686}
@@ -645,11 +689,9 @@ static void mce_remove_device(unsigned int cpu)
645{ 689{
646 int i; 690 int i;
647 691
648 for (i = 0; i < banks; i++) 692 for (i = 0; mce_attributes[i]; i++)
649 sysdev_remove_file(&per_cpu(device_mce,cpu), 693 sysdev_remove_file(&per_cpu(device_mce,cpu),
650 bank_attributes[i]); 694 mce_attributes[i]);
651 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_tolerant);
652 sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval);
653 sysdev_unregister(&per_cpu(device_mce,cpu)); 695 sysdev_unregister(&per_cpu(device_mce,cpu));
654 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); 696 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
655} 697}
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index 93c707257637..d0bd5d66e103 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -37,6 +37,8 @@
37#define THRESHOLD_MAX 0xFFF 37#define THRESHOLD_MAX 0xFFF
38#define INT_TYPE_APIC 0x00020000 38#define INT_TYPE_APIC 0x00020000
39#define MASK_VALID_HI 0x80000000 39#define MASK_VALID_HI 0x80000000
40#define MASK_CNTP_HI 0x40000000
41#define MASK_LOCKED_HI 0x20000000
40#define MASK_LVTOFF_HI 0x00F00000 42#define MASK_LVTOFF_HI 0x00F00000
41#define MASK_COUNT_EN_HI 0x00080000 43#define MASK_COUNT_EN_HI 0x00080000
42#define MASK_INT_TYPE_HI 0x00060000 44#define MASK_INT_TYPE_HI 0x00060000
@@ -122,14 +124,17 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
122 for (block = 0; block < NR_BLOCKS; ++block) { 124 for (block = 0; block < NR_BLOCKS; ++block) {
123 if (block == 0) 125 if (block == 0)
124 address = MSR_IA32_MC0_MISC + bank * 4; 126 address = MSR_IA32_MC0_MISC + bank * 4;
125 else if (block == 1) 127 else if (block == 1) {
126 address = MCG_XBLK_ADDR 128 address = (low & MASK_BLKPTR_LO) >> 21;
127 + ((low & MASK_BLKPTR_LO) >> 21); 129 if (!address)
130 break;
131 address += MCG_XBLK_ADDR;
132 }
128 else 133 else
129 ++address; 134 ++address;
130 135
131 if (rdmsr_safe(address, &low, &high)) 136 if (rdmsr_safe(address, &low, &high))
132 continue; 137 break;
133 138
134 if (!(high & MASK_VALID_HI)) { 139 if (!(high & MASK_VALID_HI)) {
135 if (block) 140 if (block)
@@ -138,8 +143,8 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
138 break; 143 break;
139 } 144 }
140 145
141 if (!(high & MASK_VALID_HI >> 1) || 146 if (!(high & MASK_CNTP_HI) ||
142 (high & MASK_VALID_HI >> 2)) 147 (high & MASK_LOCKED_HI))
143 continue; 148 continue;
144 149
145 if (!block) 150 if (!block)
@@ -187,17 +192,22 @@ asmlinkage void mce_threshold_interrupt(void)
187 192
188 /* assume first bank caused it */ 193 /* assume first bank caused it */
189 for (bank = 0; bank < NR_BANKS; ++bank) { 194 for (bank = 0; bank < NR_BANKS; ++bank) {
195 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
196 continue;
190 for (block = 0; block < NR_BLOCKS; ++block) { 197 for (block = 0; block < NR_BLOCKS; ++block) {
191 if (block == 0) 198 if (block == 0)
192 address = MSR_IA32_MC0_MISC + bank * 4; 199 address = MSR_IA32_MC0_MISC + bank * 4;
193 else if (block == 1) 200 else if (block == 1) {
194 address = MCG_XBLK_ADDR 201 address = (low & MASK_BLKPTR_LO) >> 21;
195 + ((low & MASK_BLKPTR_LO) >> 21); 202 if (!address)
203 break;
204 address += MCG_XBLK_ADDR;
205 }
196 else 206 else
197 ++address; 207 ++address;
198 208
199 if (rdmsr_safe(address, &low, &high)) 209 if (rdmsr_safe(address, &low, &high))
200 continue; 210 break;
201 211
202 if (!(high & MASK_VALID_HI)) { 212 if (!(high & MASK_VALID_HI)) {
203 if (block) 213 if (block)
@@ -206,10 +216,14 @@ asmlinkage void mce_threshold_interrupt(void)
206 break; 216 break;
207 } 217 }
208 218
209 if (!(high & MASK_VALID_HI >> 1) || 219 if (!(high & MASK_CNTP_HI) ||
210 (high & MASK_VALID_HI >> 2)) 220 (high & MASK_LOCKED_HI))
211 continue; 221 continue;
212 222
223 /* Log the machine check that caused the threshold
224 event. */
225 do_machine_check(NULL, 0);
226
213 if (high & MASK_OVERFLOW_HI) { 227 if (high & MASK_OVERFLOW_HI) {
214 rdmsrl(address, m.misc); 228 rdmsrl(address, m.misc);
215 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4, 229 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
@@ -385,7 +399,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
385 return 0; 399 return 0;
386 400
387 if (rdmsr_safe(address, &low, &high)) 401 if (rdmsr_safe(address, &low, &high))
388 goto recurse; 402 return 0;
389 403
390 if (!(high & MASK_VALID_HI)) { 404 if (!(high & MASK_VALID_HI)) {
391 if (block) 405 if (block)
@@ -394,8 +408,8 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
394 return 0; 408 return 0;
395 } 409 }
396 410
397 if (!(high & MASK_VALID_HI >> 1) || 411 if (!(high & MASK_CNTP_HI) ||
398 (high & MASK_VALID_HI >> 2)) 412 (high & MASK_LOCKED_HI))
399 goto recurse; 413 goto recurse;
400 414
401 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); 415 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 9cb42ecb7f89..486f4c61a948 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -172,7 +172,7 @@ static __cpuinit inline int nmi_known_cpu(void)
172{ 172{
173 switch (boot_cpu_data.x86_vendor) { 173 switch (boot_cpu_data.x86_vendor) {
174 case X86_VENDOR_AMD: 174 case X86_VENDOR_AMD:
175 return boot_cpu_data.x86 == 15; 175 return boot_cpu_data.x86 == 15 || boot_cpu_data.x86 == 16;
176 case X86_VENDOR_INTEL: 176 case X86_VENDOR_INTEL:
177 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 177 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
178 return 1; 178 return 1;
@@ -214,6 +214,23 @@ static __init void nmi_cpu_busy(void *data)
214} 214}
215#endif 215#endif
216 216
217static unsigned int adjust_for_32bit_ctr(unsigned int hz)
218{
219 unsigned int retval = hz;
220
221 /*
222 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter
223 * are writable, with higher bits sign extending from bit 31.
224 * So, we can only program the counter with 31 bit values and
225 * 32nd bit should be 1, for 33.. to be 1.
226 * Find the appropriate nmi_hz
227 */
228 if ((((u64)cpu_khz * 1000) / retval) > 0x7fffffffULL) {
229 retval = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1;
230 }
231 return retval;
232}
233
217int __init check_nmi_watchdog (void) 234int __init check_nmi_watchdog (void)
218{ 235{
219 int *counts; 236 int *counts;
@@ -268,17 +285,8 @@ int __init check_nmi_watchdog (void)
268 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 285 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
269 286
270 nmi_hz = 1; 287 nmi_hz = 1;
271 /* 288 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0)
272 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter 289 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
273 * are writable, with higher bits sign extending from bit 31.
274 * So, we can only program the counter with 31 bit values and
275 * 32nd bit should be 1, for 33.. to be 1.
276 * Find the appropriate nmi_hz
277 */
278 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0 &&
279 ((u64)cpu_khz * 1000) > 0x7fffffffULL) {
280 nmi_hz = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1;
281 }
282 } 290 }
283 291
284 kfree(counts); 292 kfree(counts);
@@ -360,6 +368,33 @@ void enable_timer_nmi_watchdog(void)
360 } 368 }
361} 369}
362 370
371static void __acpi_nmi_disable(void *__unused)
372{
373 apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
374}
375
376/*
377 * Disable timer based NMIs on all CPUs:
378 */
379void acpi_nmi_disable(void)
380{
381 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
382 on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
383}
384
385static void __acpi_nmi_enable(void *__unused)
386{
387 apic_write(APIC_LVT0, APIC_DM_NMI);
388}
389
390/*
391 * Enable timer based NMIs on all CPUs:
392 */
393void acpi_nmi_enable(void)
394{
395 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
396 on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
397}
363#ifdef CONFIG_PM 398#ifdef CONFIG_PM
364 399
365static int nmi_pm_active; /* nmi_active before suspend */ 400static int nmi_pm_active; /* nmi_active before suspend */
@@ -634,7 +669,9 @@ static int setup_intel_arch_watchdog(void)
634 669
635 /* setup the timer */ 670 /* setup the timer */
636 wrmsr(evntsel_msr, evntsel, 0); 671 wrmsr(evntsel_msr, evntsel, 0);
637 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); 672
673 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
674 wrmsr(perfctr_msr, (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0);
638 675
639 apic_write(APIC_LVTPC, APIC_DM_NMI); 676 apic_write(APIC_LVTPC, APIC_DM_NMI);
640 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; 677 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -855,15 +892,23 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
855 dummy &= ~P4_CCCR_OVF; 892 dummy &= ~P4_CCCR_OVF;
856 wrmsrl(wd->cccr_msr, dummy); 893 wrmsrl(wd->cccr_msr, dummy);
857 apic_write(APIC_LVTPC, APIC_DM_NMI); 894 apic_write(APIC_LVTPC, APIC_DM_NMI);
895 /* start the cycle over again */
896 wrmsrl(wd->perfctr_msr,
897 -((u64)cpu_khz * 1000 / nmi_hz));
858 } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { 898 } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
859 /* 899 /*
860 * ArchPerfom/Core Duo needs to re-unmask 900 * ArchPerfom/Core Duo needs to re-unmask
861 * the apic vector 901 * the apic vector
862 */ 902 */
863 apic_write(APIC_LVTPC, APIC_DM_NMI); 903 apic_write(APIC_LVTPC, APIC_DM_NMI);
904 /* ARCH_PERFMON has 32 bit counter writes */
905 wrmsr(wd->perfctr_msr,
906 (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0);
907 } else {
908 /* start the cycle over again */
909 wrmsrl(wd->perfctr_msr,
910 -((u64)cpu_khz * 1000 / nmi_hz));
864 } 911 }
865 /* start the cycle over again */
866 wrmsrl(wd->perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
867 rc = 1; 912 rc = 1;
868 } else if (nmi_watchdog == NMI_IO_APIC) { 913 } else if (nmi_watchdog == NMI_IO_APIC) {
869 /* don't know how to accurately check for this. 914 /* don't know how to accurately check for this.
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index 3d65b1d4c2b3..04480c3b68f5 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -138,6 +138,8 @@ static const unsigned long phb_debug_offsets[] = {
138 138
139#define PHB_DEBUG_STUFF_OFFSET 0x0020 139#define PHB_DEBUG_STUFF_OFFSET 0x0020
140 140
141#define EMERGENCY_PAGES 32 /* = 128KB */
142
141unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; 143unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
142static int translate_empty_slots __read_mostly = 0; 144static int translate_empty_slots __read_mostly = 0;
143static int calgary_detected __read_mostly = 0; 145static int calgary_detected __read_mostly = 0;
@@ -296,6 +298,16 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
296{ 298{
297 unsigned long entry; 299 unsigned long entry;
298 unsigned long badbit; 300 unsigned long badbit;
301 unsigned long badend;
302
303 /* were we called with bad_dma_address? */
304 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE);
305 if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) {
306 printk(KERN_ERR "Calgary: driver tried unmapping bad DMA "
307 "address 0x%Lx\n", dma_addr);
308 WARN_ON(1);
309 return;
310 }
299 311
300 entry = dma_addr >> PAGE_SHIFT; 312 entry = dma_addr >> PAGE_SHIFT;
301 313
@@ -656,8 +668,8 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
656 u64 start; 668 u64 start;
657 struct iommu_table *tbl = dev->sysdata; 669 struct iommu_table *tbl = dev->sysdata;
658 670
659 /* reserve bad_dma_address in case it's a legal address */ 671 /* reserve EMERGENCY_PAGES from bad_dma_address and up */
660 iommu_range_reserve(tbl, bad_dma_address, 1); 672 iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES);
661 673
662 /* avoid the BIOS/VGA first 640KB-1MB region */ 674 /* avoid the BIOS/VGA first 640KB-1MB region */
663 start = (640 * 1024); 675 start = (640 * 1024);
@@ -1176,6 +1188,7 @@ int __init calgary_iommu_init(void)
1176 } 1188 }
1177 1189
1178 force_iommu = 1; 1190 force_iommu = 1;
1191 bad_dma_address = 0x0;
1179 dma_ops = &calgary_dma_ops; 1192 dma_ops = &calgary_dma_ops;
1180 1193
1181 return 0; 1194 return 0;
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 683b7a5c1ab3..651ccfb06697 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -223,30 +223,10 @@ int dma_set_mask(struct device *dev, u64 mask)
223} 223}
224EXPORT_SYMBOL(dma_set_mask); 224EXPORT_SYMBOL(dma_set_mask);
225 225
226/* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge] 226/*
227 [,forcesac][,fullflush][,nomerge][,biomerge] 227 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
228 size set size of iommu (in bytes) 228 * documentation.
229 noagp don't initialize the AGP driver and use full aperture. 229 */
230 off don't use the IOMMU
231 leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
232 memaper[=order] allocate an own aperture over RAM with size 32MB^order.
233 noforce don't force IOMMU usage. Default.
234 force Force IOMMU.
235 merge Do lazy merging. This may improve performance on some block devices.
236 Implies force (experimental)
237 biomerge Do merging at the BIO layer. This is more efficient than merge,
238 but should be only done with very big IOMMUs. Implies merge,force.
239 nomerge Don't do SG merging.
240 forcesac For SAC mode for masks <40bits (experimental)
241 fullflush Flush IOMMU on each allocation (default)
242 nofullflush Don't use IOMMU fullflush
243 allowed overwrite iommu off workarounds for specific chipsets.
244 soft Use software bounce buffering (default for Intel machines)
245 noaperture Don't touch the aperture for AGP.
246 allowdac Allow DMA >4GB
247 nodac Forbid DMA >4GB
248 panic Force panic when IOMMU overflows
249*/
250__init int iommu_setup(char *p) 230__init int iommu_setup(char *p)
251{ 231{
252 iommu_merge = 1; 232 iommu_merge = 1;
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index fc1960f1f243..030eb3753358 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -185,7 +185,7 @@ static void iommu_full(struct device *dev, size_t size, int dir)
185static inline int need_iommu(struct device *dev, unsigned long addr, size_t size) 185static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
186{ 186{
187 u64 mask = *dev->dma_mask; 187 u64 mask = *dev->dma_mask;
188 int high = addr + size >= mask; 188 int high = addr + size > mask;
189 int mmu = high; 189 int mmu = high;
190 if (force_iommu) 190 if (force_iommu)
191 mmu = 1; 191 mmu = 1;
@@ -195,7 +195,7 @@ static inline int need_iommu(struct device *dev, unsigned long addr, size_t size
195static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size) 195static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
196{ 196{
197 u64 mask = *dev->dma_mask; 197 u64 mask = *dev->dma_mask;
198 int high = addr + size >= mask; 198 int high = addr + size > mask;
199 int mmu = high; 199 int mmu = high;
200 return mmu; 200 return mmu;
201} 201}
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index addc14af0c56..4326a690a509 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -536,8 +536,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
536 } 536 }
537 ret = 0; 537 ret = 0;
538 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) { 538 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
539 ret |= __get_user(tmp, (unsigned long __user *) data); 539 ret = __get_user(tmp, (unsigned long __user *) data);
540 putreg(child, ui, tmp); 540 if (ret)
541 break;
542 ret = putreg(child, ui, tmp);
543 if (ret)
544 break;
541 data += sizeof(long); 545 data += sizeof(long);
542 } 546 }
543 break; 547 break;
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 60477244d1a3..3d98b696881d 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -138,128 +138,6 @@ struct resource code_resource = {
138 .flags = IORESOURCE_RAM, 138 .flags = IORESOURCE_RAM,
139}; 139};
140 140
141#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
142
143static struct resource system_rom_resource = {
144 .name = "System ROM",
145 .start = 0xf0000,
146 .end = 0xfffff,
147 .flags = IORESOURCE_ROM,
148};
149
150static struct resource extension_rom_resource = {
151 .name = "Extension ROM",
152 .start = 0xe0000,
153 .end = 0xeffff,
154 .flags = IORESOURCE_ROM,
155};
156
157static struct resource adapter_rom_resources[] = {
158 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
159 .flags = IORESOURCE_ROM },
160 { .name = "Adapter ROM", .start = 0, .end = 0,
161 .flags = IORESOURCE_ROM },
162 { .name = "Adapter ROM", .start = 0, .end = 0,
163 .flags = IORESOURCE_ROM },
164 { .name = "Adapter ROM", .start = 0, .end = 0,
165 .flags = IORESOURCE_ROM },
166 { .name = "Adapter ROM", .start = 0, .end = 0,
167 .flags = IORESOURCE_ROM },
168 { .name = "Adapter ROM", .start = 0, .end = 0,
169 .flags = IORESOURCE_ROM }
170};
171
172static struct resource video_rom_resource = {
173 .name = "Video ROM",
174 .start = 0xc0000,
175 .end = 0xc7fff,
176 .flags = IORESOURCE_ROM,
177};
178
179static struct resource video_ram_resource = {
180 .name = "Video RAM area",
181 .start = 0xa0000,
182 .end = 0xbffff,
183 .flags = IORESOURCE_RAM,
184};
185
186#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
187
188static int __init romchecksum(unsigned char *rom, unsigned long length)
189{
190 unsigned char *p, sum = 0;
191
192 for (p = rom; p < rom + length; p++)
193 sum += *p;
194 return sum == 0;
195}
196
197static void __init probe_roms(void)
198{
199 unsigned long start, length, upper;
200 unsigned char *rom;
201 int i;
202
203 /* video rom */
204 upper = adapter_rom_resources[0].start;
205 for (start = video_rom_resource.start; start < upper; start += 2048) {
206 rom = isa_bus_to_virt(start);
207 if (!romsignature(rom))
208 continue;
209
210 video_rom_resource.start = start;
211
212 /* 0 < length <= 0x7f * 512, historically */
213 length = rom[2] * 512;
214
215 /* if checksum okay, trust length byte */
216 if (length && romchecksum(rom, length))
217 video_rom_resource.end = start + length - 1;
218
219 request_resource(&iomem_resource, &video_rom_resource);
220 break;
221 }
222
223 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
224 if (start < upper)
225 start = upper;
226
227 /* system rom */
228 request_resource(&iomem_resource, &system_rom_resource);
229 upper = system_rom_resource.start;
230
231 /* check for extension rom (ignore length byte!) */
232 rom = isa_bus_to_virt(extension_rom_resource.start);
233 if (romsignature(rom)) {
234 length = extension_rom_resource.end - extension_rom_resource.start + 1;
235 if (romchecksum(rom, length)) {
236 request_resource(&iomem_resource, &extension_rom_resource);
237 upper = extension_rom_resource.start;
238 }
239 }
240
241 /* check for adapter roms on 2k boundaries */
242 for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper;
243 start += 2048) {
244 rom = isa_bus_to_virt(start);
245 if (!romsignature(rom))
246 continue;
247
248 /* 0 < length <= 0x7f * 512, historically */
249 length = rom[2] * 512;
250
251 /* but accept any length that fits if checksum okay */
252 if (!length || start + length > upper || !romchecksum(rom, length))
253 continue;
254
255 adapter_rom_resources[i].start = start;
256 adapter_rom_resources[i].end = start + length - 1;
257 request_resource(&iomem_resource, &adapter_rom_resources[i]);
258
259 start = adapter_rom_resources[i++].end & ~2047UL;
260 }
261}
262
263#ifdef CONFIG_PROC_VMCORE 141#ifdef CONFIG_PROC_VMCORE
264/* elfcorehdr= specifies the location of elf core header 142/* elfcorehdr= specifies the location of elf core header
265 * stored by the crashed kernel. This option will be passed 143 * stored by the crashed kernel. This option will be passed
@@ -444,6 +322,11 @@ void __init setup_arch(char **cmdline_p)
444 /* reserve ebda region */ 322 /* reserve ebda region */
445 if (ebda_addr) 323 if (ebda_addr)
446 reserve_bootmem_generic(ebda_addr, ebda_size); 324 reserve_bootmem_generic(ebda_addr, ebda_size);
325#ifdef CONFIG_NUMA
326 /* reserve nodemap region */
327 if (nodemap_addr)
328 reserve_bootmem_generic(nodemap_addr, nodemap_size);
329#endif
447 330
448#ifdef CONFIG_SMP 331#ifdef CONFIG_SMP
449 /* 332 /*
@@ -519,15 +402,11 @@ void __init setup_arch(char **cmdline_p)
519 init_apic_mappings(); 402 init_apic_mappings();
520 403
521 /* 404 /*
522 * Request address space for all standard RAM and ROM resources 405 * We trust e820 completely. No explicit ROM probing in memory.
523 * and also for regions reported as reserved by the e820. 406 */
524 */
525 probe_roms();
526 e820_reserve_resources(); 407 e820_reserve_resources();
527 e820_mark_nosave_regions(); 408 e820_mark_nosave_regions();
528 409
529 request_resource(&iomem_resource, &video_ram_resource);
530
531 { 410 {
532 unsigned i; 411 unsigned i;
533 /* request I/O space for devices used on all i[345]86 PCs */ 412 /* request I/O space for devices used on all i[345]86 PCs */
@@ -1063,7 +942,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1063 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 942 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1064 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, 943 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL, 944 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1066 NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow", 945 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
946 "3dnowext", "3dnow",
1067 947
1068 /* Transmeta-defined */ 948 /* Transmeta-defined */
1069 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL, 949 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -1081,7 +961,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1081 /* Intel-defined (#2) */ 961 /* Intel-defined (#2) */
1082 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est", 962 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1083 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL, 963 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1084 NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL, 964 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
1085 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 965 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1086 966
1087 /* VIA/Cyrix/Centaur-defined */ 967 /* VIA/Cyrix/Centaur-defined */
@@ -1091,8 +971,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1091 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 971 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1092 972
1093 /* AMD-defined (#2) */ 973 /* AMD-defined (#2) */
1094 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL, 974 "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
1095 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 975 "altmovcr8", "abm", "sse4a",
976 "misalignsse", "3dnowprefetch",
977 "osvw", "ibs", NULL, NULL, NULL, NULL,
1096 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 978 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1097 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 979 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1098 }; 980 };
@@ -1103,6 +985,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1103 "ttp", /* thermal trip */ 985 "ttp", /* thermal trip */
1104 "tm", 986 "tm",
1105 "stc", 987 "stc",
988 "100mhzsteps",
989 "hwpstate",
990 NULL, /* tsc invariant mapped to constant_tsc */
1106 NULL, 991 NULL,
1107 /* nothing */ /* constant_tsc - moved to flags */ 992 /* nothing */ /* constant_tsc - moved to flags */
1108 }; 993 };
@@ -1219,23 +1104,3 @@ struct seq_operations cpuinfo_op = {
1219 .stop = c_stop, 1104 .stop = c_stop,
1220 .show = show_cpuinfo, 1105 .show = show_cpuinfo,
1221}; 1106};
1222
1223#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
1224#include <linux/platform_device.h>
1225static __init int add_pcspkr(void)
1226{
1227 struct platform_device *pd;
1228 int ret;
1229
1230 pd = platform_device_alloc("pcspkr", -1);
1231 if (!pd)
1232 return -ENOMEM;
1233
1234 ret = platform_device_add(pd);
1235 if (ret)
1236 platform_device_put(pd);
1237
1238 return ret;
1239}
1240device_initcall(add_pcspkr);
1241#endif
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 8c4b80fe71a1..6a70b55f719d 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -37,7 +37,6 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
37char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); 37char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
38 38
39unsigned long __supported_pte_mask __read_mostly = ~0UL; 39unsigned long __supported_pte_mask __read_mostly = ~0UL;
40EXPORT_SYMBOL(__supported_pte_mask);
41static int do_not_nx __cpuinitdata = 0; 40static int do_not_nx __cpuinitdata = 0;
42 41
43/* noexec=on|off 42/* noexec=on|off
diff --git a/arch/x86_64/kernel/stacktrace.c b/arch/x86_64/kernel/stacktrace.c
index 6026b31d037e..65ac2c6b34a6 100644
--- a/arch/x86_64/kernel/stacktrace.c
+++ b/arch/x86_64/kernel/stacktrace.c
@@ -32,7 +32,7 @@ static void save_stack_address(void *data, unsigned long addr)
32 trace->skip--; 32 trace->skip--;
33 return; 33 return;
34 } 34 }
35 if (trace->nr_entries < trace->max_entries - 1) 35 if (trace->nr_entries < trace->max_entries)
36 trace->entries[trace->nr_entries++] = addr; 36 trace->entries[trace->nr_entries++] = addr;
37} 37}
38 38
@@ -49,7 +49,8 @@ static struct stacktrace_ops save_stack_ops = {
49void save_stack_trace(struct stack_trace *trace, struct task_struct *task) 49void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
50{ 50{
51 dump_trace(task, NULL, NULL, &save_stack_ops, trace); 51 dump_trace(task, NULL, NULL, &save_stack_ops, trace);
52 trace->entries[trace->nr_entries++] = ULONG_MAX; 52 if (trace->nr_entries < trace->max_entries)
53 trace->entries[trace->nr_entries++] = ULONG_MAX;
53} 54}
54EXPORT_SYMBOL(save_stack_trace); 55EXPORT_SYMBOL(save_stack_trace);
55 56
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 335cc91c49b7..3cc6886f1fb7 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -657,6 +657,7 @@ core_initcall(cpufreq_tsc);
657 657
658#define TICK_COUNT 100000000 658#define TICK_COUNT 100000000
659#define TICK_MIN 5000 659#define TICK_MIN 5000
660#define MAX_READ_RETRIES 5
660 661
661/* 662/*
662 * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none 663 * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none
@@ -664,13 +665,17 @@ core_initcall(cpufreq_tsc);
664 */ 665 */
665static void __init read_hpet_tsc(int *hpet, int *tsc) 666static void __init read_hpet_tsc(int *hpet, int *tsc)
666{ 667{
667 int tsc1, tsc2, hpet1; 668 int tsc1, tsc2, hpet1, retries = 0;
669 static int msg;
668 670
669 do { 671 do {
670 tsc1 = get_cycles_sync(); 672 tsc1 = get_cycles_sync();
671 hpet1 = hpet_readl(HPET_COUNTER); 673 hpet1 = hpet_readl(HPET_COUNTER);
672 tsc2 = get_cycles_sync(); 674 tsc2 = get_cycles_sync();
673 } while (tsc2 - tsc1 > TICK_MIN); 675 } while (tsc2 - tsc1 > TICK_MIN && retries++ < MAX_READ_RETRIES);
676 if (retries >= MAX_READ_RETRIES && !msg++)
677 printk(KERN_WARNING
678 "hpet.c: exceeded max retries to read HPET & TSC\n");
674 *hpet = hpet1; 679 *hpet = hpet1;
675 *tsc = tsc2; 680 *tsc = tsc2;
676} 681}
@@ -1221,8 +1226,9 @@ static void hpet_rtc_timer_reinit(void)
1221 if (PIE_on) 1226 if (PIE_on)
1222 PIE_count += lost_ints; 1227 PIE_count += lost_ints;
1223 1228
1224 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", 1229 if (printk_ratelimit())
1225 hpet_rtc_int_freq); 1230 printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
1231 hpet_rtc_int_freq);
1226 } 1232 }
1227} 1233}
1228 1234
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 6d77e4797a47..0dffae69f4ad 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -26,6 +26,7 @@ EXPORT_SYMBOL(__put_user_4);
26EXPORT_SYMBOL(__put_user_8); 26EXPORT_SYMBOL(__put_user_8);
27 27
28EXPORT_SYMBOL(copy_user_generic); 28EXPORT_SYMBOL(copy_user_generic);
29EXPORT_SYMBOL(__copy_user_nocache);
29EXPORT_SYMBOL(copy_from_user); 30EXPORT_SYMBOL(copy_from_user);
30EXPORT_SYMBOL(copy_to_user); 31EXPORT_SYMBOL(copy_to_user);
31EXPORT_SYMBOL(__copy_from_user_inatomic); 32EXPORT_SYMBOL(__copy_from_user_inatomic);
@@ -34,8 +35,8 @@ EXPORT_SYMBOL(copy_page);
34EXPORT_SYMBOL(clear_page); 35EXPORT_SYMBOL(clear_page);
35 36
36#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
37extern void FASTCALL( __write_lock_failed(rwlock_t *rw)); 38extern void __write_lock_failed(rwlock_t *rw);
38extern void FASTCALL( __read_lock_failed(rwlock_t *rw)); 39extern void __read_lock_failed(rwlock_t *rw);
39EXPORT_SYMBOL(__write_lock_failed); 40EXPORT_SYMBOL(__write_lock_failed);
40EXPORT_SYMBOL(__read_lock_failed); 41EXPORT_SYMBOL(__read_lock_failed);
41#endif 42#endif