aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/acpi/Makefile5
-rw-r--r--arch/x86_64/kernel/acpi/processor.c72
-rw-r--r--arch/x86_64/kernel/apic.c89
-rw-r--r--arch/x86_64/kernel/entry.S7
-rw-r--r--arch/x86_64/kernel/io_apic.c18
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--arch/x86_64/kernel/mpparse.c8
-rw-r--r--arch/x86_64/kernel/nmi.c7
-rw-r--r--arch/x86_64/kernel/pci-dma.c3
-rw-r--r--arch/x86_64/kernel/pci-gart.c19
-rw-r--r--arch/x86_64/kernel/pci-nommu.c7
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/pmtimer.c25
-rw-r--r--arch/x86_64/kernel/process.c4
-rw-r--r--arch/x86_64/kernel/setup.c8
-rw-r--r--arch/x86_64/kernel/time.c59
-rw-r--r--arch/x86_64/kernel/traps.c21
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S10
18 files changed, 308 insertions, 58 deletions
diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
index 7da9ace890bd..4fe97071f297 100644
--- a/arch/x86_64/kernel/acpi/Makefile
+++ b/arch/x86_64/kernel/acpi/Makefile
@@ -1,3 +1,8 @@
1obj-y := boot.o 1obj-y := boot.o
2boot-y := ../../../i386/kernel/acpi/boot.o 2boot-y := ../../../i386/kernel/acpi/boot.o
3obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o 3obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
4
5ifneq ($(CONFIG_ACPI_PROCESSOR),)
6obj-y += processor.o
7endif
8
diff --git a/arch/x86_64/kernel/acpi/processor.c b/arch/x86_64/kernel/acpi/processor.c
new file mode 100644
index 000000000000..3bdc2baa5bb1
--- /dev/null
+++ b/arch/x86_64/kernel/acpi/processor.c
@@ -0,0 +1,72 @@
1/*
2 * arch/x86_64/kernel/acpi/processor.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for platforms with Intel CPUs
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/acpi.h>
13
14#include <acpi/processor.h>
15#include <asm/acpi.h>
16
17static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
18{
19 struct acpi_object_list *obj_list;
20 union acpi_object *obj;
21 u32 *buf;
22
23 /* allocate and initialize pdc. It will be used later. */
24 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
25 if (!obj_list) {
26 printk(KERN_ERR "Memory allocation error\n");
27 return;
28 }
29
30 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
31 if (!obj) {
32 printk(KERN_ERR "Memory allocation error\n");
33 kfree(obj_list);
34 return;
35 }
36
37 buf = kmalloc(12, GFP_KERNEL);
38 if (!buf) {
39 printk(KERN_ERR "Memory allocation error\n");
40 kfree(obj);
41 kfree(obj_list);
42 return;
43 }
44
45 buf[0] = ACPI_PDC_REVISION_ID;
46 buf[1] = 1;
47 buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
48
49 obj->type = ACPI_TYPE_BUFFER;
50 obj->buffer.length = 12;
51 obj->buffer.pointer = (u8 *) buf;
52 obj_list->count = 1;
53 obj_list->pointer = obj;
54 pr->pdc = obj_list;
55
56 return;
57}
58
59/* Initialize _PDC data based on the CPU vendor */
60void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
61{
62 unsigned int cpu = pr->id;
63 struct cpuinfo_x86 *c = cpu_data + cpu;
64
65 pr->pdc = NULL;
66 if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
67 init_intel_pdc(pr, c);
68
69 return;
70}
71
72EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 5d3c5b07b8db..6147770b4347 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -35,8 +35,12 @@
35#include <asm/mach_apic.h> 35#include <asm/mach_apic.h>
36#include <asm/nmi.h> 36#include <asm/nmi.h>
37#include <asm/idle.h> 37#include <asm/idle.h>
38#include <asm/proto.h>
39#include <asm/timex.h>
38 40
39int apic_verbosity; 41int apic_verbosity;
42int apic_runs_main_timer;
43int apic_calibrate_pmtmr __initdata;
40 44
41int disable_apic_timer __initdata; 45int disable_apic_timer __initdata;
42 46
@@ -68,6 +72,26 @@ int get_maxlvt(void)
68 return maxlvt; 72 return maxlvt;
69} 73}
70 74
75/*
76 * 'what should we do if we get a hw irq event on an illegal vector'.
77 * each architecture has to answer this themselves.
78 */
79void ack_bad_irq(unsigned int irq)
80{
81 printk("unexpected IRQ trap at vector %02x\n", irq);
82 /*
83 * Currently unexpected vectors happen only on SMP and APIC.
84 * We _must_ ack these because every local APIC has only N
85 * irq slots per priority level, and a 'hanging, unacked' IRQ
86 * holds up an irq slot - in excessive cases (when multiple
87 * unexpected vectors occur) that might lock up the APIC
88 * completely.
89 * But don't ack when the APIC is disabled. -AK
90 */
91 if (!disable_apic)
92 ack_APIC_irq();
93}
94
71void clear_local_APIC(void) 95void clear_local_APIC(void)
72{ 96{
73 int maxlvt; 97 int maxlvt;
@@ -702,9 +726,17 @@ static void setup_APIC_timer(unsigned int clocks)
702 c2 |= inb_p(0x40) << 8; 726 c2 |= inb_p(0x40) << 8;
703 } while (c2 - c1 < 300); 727 } while (c2 - c1 < 300);
704 } 728 }
705
706 __setup_APIC_LVTT(clocks); 729 __setup_APIC_LVTT(clocks);
707 730 /* Turn off PIT interrupt if we use APIC timer as main timer.
731 Only works with the PM timer right now
732 TBD fix it for HPET too. */
733 if (vxtime.mode == VXTIME_PMTMR &&
734 smp_processor_id() == boot_cpu_id &&
735 apic_runs_main_timer == 1 &&
736 !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
737 stop_timer_interrupt();
738 apic_runs_main_timer++;
739 }
708 local_irq_restore(flags); 740 local_irq_restore(flags);
709} 741}
710 742
@@ -735,14 +767,27 @@ static int __init calibrate_APIC_clock(void)
735 __setup_APIC_LVTT(1000000000); 767 __setup_APIC_LVTT(1000000000);
736 768
737 apic_start = apic_read(APIC_TMCCT); 769 apic_start = apic_read(APIC_TMCCT);
738 rdtscl(tsc_start); 770#ifdef CONFIG_X86_PM_TIMER
739 771 if (apic_calibrate_pmtmr && pmtmr_ioport) {
740 do { 772 pmtimer_wait(5000); /* 5ms wait */
741 apic = apic_read(APIC_TMCCT); 773 apic = apic_read(APIC_TMCCT);
742 rdtscl(tsc); 774 result = (apic_start - apic) * 1000L / 5;
743 } while ((tsc - tsc_start) < TICK_COUNT && (apic - apic_start) < TICK_COUNT); 775 } else
776#endif
777 {
778 rdtscl(tsc_start);
779
780 do {
781 apic = apic_read(APIC_TMCCT);
782 rdtscl(tsc);
783 } while ((tsc - tsc_start) < TICK_COUNT &&
784 (apic - apic_start) < TICK_COUNT);
785
786 result = (apic_start - apic) * 1000L * cpu_khz /
787 (tsc - tsc_start);
788 }
789 printk("result %d\n", result);
744 790
745 result = (apic_start - apic) * 1000L * cpu_khz / (tsc - tsc_start);
746 791
747 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", 792 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
748 result / 1000 / 1000, result / 1000 % 1000); 793 result / 1000 / 1000, result / 1000 % 1000);
@@ -872,6 +917,8 @@ void smp_local_timer_interrupt(struct pt_regs *regs)
872#ifdef CONFIG_SMP 917#ifdef CONFIG_SMP
873 update_process_times(user_mode(regs)); 918 update_process_times(user_mode(regs));
874#endif 919#endif
920 if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
921 main_timer_handler(regs);
875 /* 922 /*
876 * We take the 'long' return path, and there every subsystem 923 * We take the 'long' return path, and there every subsystem
877 * grabs the appropriate locks (kernel lock/ irq lock). 924 * grabs the appropriate locks (kernel lock/ irq lock).
@@ -924,7 +971,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
924 * multi-chassis. Use available data to take a good guess. 971 * multi-chassis. Use available data to take a good guess.
925 * If in doubt, go HPET. 972 * If in doubt, go HPET.
926 */ 973 */
927__init int oem_force_hpet_timer(void) 974__cpuinit int oem_force_hpet_timer(void)
928{ 975{
929 int i, clusters, zeros; 976 int i, clusters, zeros;
930 unsigned id; 977 unsigned id;
@@ -1081,10 +1128,34 @@ static __init int setup_nolapic(char *str)
1081 1128
1082static __init int setup_noapictimer(char *str) 1129static __init int setup_noapictimer(char *str)
1083{ 1130{
1131 if (str[0] != ' ' && str[0] != 0)
1132 return -1;
1084 disable_apic_timer = 1; 1133 disable_apic_timer = 1;
1085 return 0; 1134 return 0;
1086} 1135}
1087 1136
1137static __init int setup_apicmaintimer(char *str)
1138{
1139 apic_runs_main_timer = 1;
1140 nohpet = 1;
1141 return 0;
1142}
1143__setup("apicmaintimer", setup_apicmaintimer);
1144
1145static __init int setup_noapicmaintimer(char *str)
1146{
1147 apic_runs_main_timer = -1;
1148 return 0;
1149}
1150__setup("noapicmaintimer", setup_noapicmaintimer);
1151
1152static __init int setup_apicpmtimer(char *s)
1153{
1154 apic_calibrate_pmtmr = 1;
1155 return setup_apicmaintimer(NULL);
1156}
1157__setup("apicpmtimer", setup_apicpmtimer);
1158
1088/* dummy parsing: see setup.c */ 1159/* dummy parsing: see setup.c */
1089 1160
1090__setup("disableapic", setup_disableapic); 1161__setup("disableapic", setup_disableapic);
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index dbdba56e8faa..b150c87a08c6 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -499,7 +499,9 @@ ENTRY(stub_rt_sigreturn)
499 movq %gs:pda_irqstackptr,%rax 499 movq %gs:pda_irqstackptr,%rax
500 cmoveq %rax,%rsp /*todo This needs CFI annotation! */ 500 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
501 pushq %rdi # save old stack 501 pushq %rdi # save old stack
502#ifndef CONFIG_DEBUG_INFO
502 CFI_ADJUST_CFA_OFFSET 8 503 CFI_ADJUST_CFA_OFFSET 8
504#endif
503 call \func 505 call \func
504 .endm 506 .endm
505 507
@@ -509,7 +511,9 @@ ENTRY(common_interrupt)
509 /* 0(%rsp): oldrsp-ARGOFFSET */ 511 /* 0(%rsp): oldrsp-ARGOFFSET */
510ret_from_intr: 512ret_from_intr:
511 popq %rdi 513 popq %rdi
514#ifndef CONFIG_DEBUG_INFO
512 CFI_ADJUST_CFA_OFFSET -8 515 CFI_ADJUST_CFA_OFFSET -8
516#endif
513 cli 517 cli
514 decl %gs:pda_irqcount 518 decl %gs:pda_irqcount
515#ifdef CONFIG_DEBUG_INFO 519#ifdef CONFIG_DEBUG_INFO
@@ -922,7 +926,7 @@ KPROBE_ENTRY(debug)
922 .previous .text 926 .previous .text
923 927
924 /* runs on exception stack */ 928 /* runs on exception stack */
925ENTRY(nmi) 929KPROBE_ENTRY(nmi)
926 INTR_FRAME 930 INTR_FRAME
927 pushq $-1 931 pushq $-1
928 CFI_ADJUST_CFA_OFFSET 8 932 CFI_ADJUST_CFA_OFFSET 8
@@ -969,6 +973,7 @@ paranoid_schedule:
969 cli 973 cli
970 jmp paranoid_userspace 974 jmp paranoid_userspace
971 CFI_ENDPROC 975 CFI_ENDPROC
976 .previous .text
972 977
973KPROBE_ENTRY(int3) 978KPROBE_ENTRY(int3)
974 INTR_FRAME 979 INTR_FRAME
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index e8cf44ef8778..4282d72b2a26 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -304,6 +304,14 @@ void __init check_ioapic(void)
304#endif 304#endif
305 /* RED-PEN skip them on mptables too? */ 305 /* RED-PEN skip them on mptables too? */
306 return; 306 return;
307 case PCI_VENDOR_ID_ATI:
308 if (apic_runs_main_timer != 0)
309 break;
310 printk(KERN_INFO
311 "ATI board detected. Using APIC/PM timer.\n");
312 apic_runs_main_timer = 1;
313 nohpet = 1;
314 return;
307 } 315 }
308 316
309 /* No multi-function device? */ 317 /* No multi-function device? */
@@ -2027,7 +2035,7 @@ int __init io_apic_get_redir_entries (int ioapic)
2027} 2035}
2028 2036
2029 2037
2030int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) 2038int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
2031{ 2039{
2032 struct IO_APIC_route_entry entry; 2040 struct IO_APIC_route_entry entry;
2033 unsigned long flags; 2041 unsigned long flags;
@@ -2049,8 +2057,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
2049 entry.delivery_mode = INT_DELIVERY_MODE; 2057 entry.delivery_mode = INT_DELIVERY_MODE;
2050 entry.dest_mode = INT_DEST_MODE; 2058 entry.dest_mode = INT_DEST_MODE;
2051 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 2059 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
2052 entry.trigger = edge_level; 2060 entry.trigger = triggering;
2053 entry.polarity = active_high_low; 2061 entry.polarity = polarity;
2054 entry.mask = 1; /* Disabled (masked) */ 2062 entry.mask = 1; /* Disabled (masked) */
2055 2063
2056 irq = gsi_irq_sharing(irq); 2064 irq = gsi_irq_sharing(irq);
@@ -2065,9 +2073,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
2065 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> " 2073 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
2066 "IRQ %d Mode:%i Active:%i)\n", ioapic, 2074 "IRQ %d Mode:%i Active:%i)\n", ioapic,
2067 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, 2075 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
2068 edge_level, active_high_low); 2076 triggering, polarity);
2069 2077
2070 ioapic_register_intr(irq, entry.vector, edge_level); 2078 ioapic_register_intr(irq, entry.vector, triggering);
2071 2079
2072 if (!ioapic && (irq < 16)) 2080 if (!ioapic && (irq < 16))
2073 disable_8259A_irq(irq); 2081 disable_8259A_irq(irq);
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 13a2eada6c95..b8b9529fa89e 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -380,7 +380,7 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
380 */ 380 */
381void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 381void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
382{ 382{
383 static cpumask_t mce_cpus __initdata = CPU_MASK_NONE; 383 static cpumask_t mce_cpus = CPU_MASK_NONE;
384 384
385 mce_cpu_quirks(c); 385 mce_cpu_quirks(c);
386 386
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 1105250bf02c..dc49bfb6db0a 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void)
915 915
916#define MAX_GSI_NUM 4096 916#define MAX_GSI_NUM 4096
917 917
918int mp_register_gsi(u32 gsi, int edge_level, int active_high_low) 918int mp_register_gsi(u32 gsi, int triggering, int polarity)
919{ 919{
920 int ioapic = -1; 920 int ioapic = -1;
921 int ioapic_pin = 0; 921 int ioapic_pin = 0;
@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
964 964
965 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 965 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
966 966
967 if (edge_level) { 967 if (triggering == ACPI_LEVEL_SENSITIVE) {
968 /* 968 /*
969 * For PCI devices assign IRQs in order, avoiding gaps 969 * For PCI devices assign IRQs in order, avoiding gaps
970 * due to unused I/O APIC pins. 970 * due to unused I/O APIC pins.
@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
986 } 986 }
987 987
988 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, 988 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
989 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, 989 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
990 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); 990 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
991 return gsi; 991 return gsi;
992} 992}
993 993
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 5fae6f0cd994..8be407a1f62d 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -24,6 +24,7 @@
24#include <linux/sysdev.h> 24#include <linux/sysdev.h>
25#include <linux/nmi.h> 25#include <linux/nmi.h>
26#include <linux/sysctl.h> 26#include <linux/sysctl.h>
27#include <linux/kprobes.h>
27 28
28#include <asm/smp.h> 29#include <asm/smp.h>
29#include <asm/mtrr.h> 30#include <asm/mtrr.h>
@@ -468,7 +469,7 @@ void touch_nmi_watchdog (void)
468 touch_softlockup_watchdog(); 469 touch_softlockup_watchdog();
469} 470}
470 471
471void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason) 472void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
472{ 473{
473 int sum; 474 int sum;
474 int touched = 0; 475 int touched = 0;
@@ -512,14 +513,14 @@ void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
512 } 513 }
513} 514}
514 515
515static int dummy_nmi_callback(struct pt_regs * regs, int cpu) 516static __kprobes int dummy_nmi_callback(struct pt_regs * regs, int cpu)
516{ 517{
517 return 0; 518 return 0;
518} 519}
519 520
520static nmi_callback_t nmi_callback = dummy_nmi_callback; 521static nmi_callback_t nmi_callback = dummy_nmi_callback;
521 522
522asmlinkage void do_nmi(struct pt_regs * regs, long error_code) 523asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
523{ 524{
524 int cpu = safe_smp_processor_id(); 525 int cpu = safe_smp_processor_id();
525 526
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 2f5d8328e2b9..4ed391edd47a 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -107,6 +107,9 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
107 goto again; 107 goto again;
108 } 108 }
109 109
110 /* Let low level make its own zone decisions */
111 gfp &= ~(GFP_DMA32|GFP_DMA);
112
110 if (dma_ops->alloc_coherent) 113 if (dma_ops->alloc_coherent)
111 return dma_ops->alloc_coherent(dev, size, 114 return dma_ops->alloc_coherent(dev, size,
112 dma_handle, gfp); 115 dma_handle, gfp);
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index c37fc7726ba6..2fe23a6c361b 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -457,9 +457,12 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
457error: 457error:
458 flush_gart(NULL); 458 flush_gart(NULL);
459 gart_unmap_sg(dev, sg, nents, dir); 459 gart_unmap_sg(dev, sg, nents, dir);
460 /* When it was forced try again unforced */ 460 /* When it was forced or merged try again in a dumb way */
461 if (force_iommu) 461 if (force_iommu || iommu_merge) {
462 return dma_map_sg_nonforce(dev, sg, nents, dir); 462 out = dma_map_sg_nonforce(dev, sg, nents, dir);
463 if (out > 0)
464 return out;
465 }
463 if (panic_on_overflow) 466 if (panic_on_overflow)
464 panic("dma_map_sg: overflow on %lu pages\n", pages); 467 panic("dma_map_sg: overflow on %lu pages\n", pages);
465 iommu_full(dev, pages << PAGE_SHIFT, dir); 468 iommu_full(dev, pages << PAGE_SHIFT, dir);
@@ -642,9 +645,18 @@ static int __init pci_iommu_init(void)
642 (no_agp && init_k8_gatt(&info) < 0)) { 645 (no_agp && init_k8_gatt(&info) < 0)) {
643 no_iommu = 1; 646 no_iommu = 1;
644 no_iommu_init(); 647 no_iommu_init();
648 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
649 if (end_pfn > MAX_DMA32_PFN) {
650 printk(KERN_ERR "WARNING more than 4GB of memory "
651 "but IOMMU not compiled in.\n"
652 KERN_ERR "WARNING 32bit PCI may malfunction.\n"
653 KERN_ERR "You might want to enable "
654 "CONFIG_GART_IOMMU\n");
655 }
645 return -1; 656 return -1;
646 } 657 }
647 658
659 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
648 aper_size = info.aper_size * 1024 * 1024; 660 aper_size = info.aper_size * 1024 * 1024;
649 iommu_size = check_iommu_size(info.aper_base, aper_size); 661 iommu_size = check_iommu_size(info.aper_base, aper_size);
650 iommu_pages = iommu_size >> PAGE_SHIFT; 662 iommu_pages = iommu_size >> PAGE_SHIFT;
@@ -718,7 +730,6 @@ static int __init pci_iommu_init(void)
718 730
719 flush_gart(NULL); 731 flush_gart(NULL);
720 732
721 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
722 dma_ops = &gart_dma_ops; 733 dma_ops = &gart_dma_ops;
723 734
724 return 0; 735 return 0;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index e41564975195..44adcc2d5e5b 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -88,12 +88,5 @@ void __init no_iommu_init(void)
88{ 88{
89 if (dma_ops) 89 if (dma_ops)
90 return; 90 return;
91 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
92 dma_ops = &nommu_dma_ops; 91 dma_ops = &nommu_dma_ops;
93 if (end_pfn > MAX_DMA32_PFN) {
94 printk(KERN_ERR
95 "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
96 KERN_ERR "WARNING 32bit PCI may malfunction.\n"
97 KERN_ERR "You might want to enable CONFIG_GART_IOMMU\n");
98 }
99} 92}
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 3569a25ad7fb..990ed67896f2 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -35,8 +35,8 @@ void pci_swiotlb_init(void)
35 (end_pfn > MAX_DMA32_PFN || force_iommu)) 35 (end_pfn > MAX_DMA32_PFN || force_iommu))
36 swiotlb = 1; 36 swiotlb = 1;
37 if (swiotlb) { 37 if (swiotlb) {
38 swiotlb_init();
39 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); 38 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
39 swiotlb_init();
40 dma_ops = &swiotlb_dma_ops; 40 dma_ops = &swiotlb_dma_ops;
41 } 41 }
42} 42}
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index feb5f108dd26..5c51d10408a6 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -80,6 +80,31 @@ int pmtimer_mark_offset(void)
80 return lost - 1; 80 return lost - 1;
81} 81}
82 82
83static unsigned pmtimer_wait_tick(void)
84{
85 u32 a, b;
86 for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK;
87 a == b;
88 b = inl(pmtmr_ioport) & ACPI_PM_MASK)
89 ;
90 return b;
91}
92
93/* note: wait time is rounded up to one tick */
94void pmtimer_wait(unsigned us)
95{
96 u32 a, b;
97 a = pmtimer_wait_tick();
98 do {
99 b = inl(pmtmr_ioport);
100 } while (cyc2us(b - a) < us);
101}
102
103void pmtimer_resume(void)
104{
105 last_pmtmr_tick = inl(pmtmr_ioport);
106}
107
83unsigned int do_gettimeoffset_pm(void) 108unsigned int do_gettimeoffset_pm(void)
84{ 109{
85 u32 now, offset, delta = 0; 110 u32 now, offset, delta = 0;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 8ded407e4a94..22a05dec81a2 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -516,8 +516,10 @@ out:
516 * This could still be optimized: 516 * This could still be optimized:
517 * - fold all the options into a flag word and test it with a single test. 517 * - fold all the options into a flag word and test it with a single test.
518 * - could test fs/gs bitsliced 518 * - could test fs/gs bitsliced
519 *
520 * Kprobes not supported here. Set the probe on schedule instead.
519 */ 521 */
520struct task_struct * 522__kprobes struct task_struct *
521__switch_to(struct task_struct *prev_p, struct task_struct *next_p) 523__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
522{ 524{
523 struct thread_struct *prev = &prev_p->thread, 525 struct thread_struct *prev = &prev_p->thread,
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 28895c03cb11..363db5a003df 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -741,7 +741,7 @@ void __init setup_arch(char **cmdline_p)
741 e820_setup_gap(); 741 e820_setup_gap();
742 742
743#ifdef CONFIG_GART_IOMMU 743#ifdef CONFIG_GART_IOMMU
744 iommu_hole_init(); 744 iommu_hole_init();
745#endif 745#endif
746 746
747#ifdef CONFIG_VT 747#ifdef CONFIG_VT
@@ -877,6 +877,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
877static int __init init_amd(struct cpuinfo_x86 *c) 877static int __init init_amd(struct cpuinfo_x86 *c)
878{ 878{
879 int r; 879 int r;
880 unsigned level;
880 881
881#ifdef CONFIG_SMP 882#ifdef CONFIG_SMP
882 unsigned long value; 883 unsigned long value;
@@ -899,6 +900,11 @@ static int __init init_amd(struct cpuinfo_x86 *c)
899 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ 900 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
900 clear_bit(0*32+31, &c->x86_capability); 901 clear_bit(0*32+31, &c->x86_capability);
901 902
903 /* On C+ stepping K8 rep microcode works well for copy/memset */
904 level = cpuid_eax(1);
905 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
906 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
907
902 r = get_model_name(c); 908 r = get_model_name(c);
903 if (!r) { 909 if (!r) {
904 switch (c->x86) { 910 switch (c->x86) {
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index f8c47c688443..c0844bffbf84 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -51,7 +51,7 @@ extern int using_apic_timer;
51DEFINE_SPINLOCK(rtc_lock); 51DEFINE_SPINLOCK(rtc_lock);
52DEFINE_SPINLOCK(i8253_lock); 52DEFINE_SPINLOCK(i8253_lock);
53 53
54static int nohpet __initdata = 0; 54int nohpet __initdata = 0;
55static int notsc __initdata = 0; 55static int notsc __initdata = 0;
56 56
57#undef HPET_HACK_ENABLE_DANGEROUS 57#undef HPET_HACK_ENABLE_DANGEROUS
@@ -345,7 +345,7 @@ static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
345#endif 345#endif
346} 346}
347 347
348static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 348void main_timer_handler(struct pt_regs *regs)
349{ 349{
350 static unsigned long rtc_update = 0; 350 static unsigned long rtc_update = 0;
351 unsigned long tsc; 351 unsigned long tsc;
@@ -458,12 +458,17 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
458 } 458 }
459 459
460 write_sequnlock(&xtime_lock); 460 write_sequnlock(&xtime_lock);
461}
461 462
463static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
464{
465 if (apic_runs_main_timer > 1)
466 return IRQ_HANDLED;
467 main_timer_handler(regs);
462#ifdef CONFIG_X86_LOCAL_APIC 468#ifdef CONFIG_X86_LOCAL_APIC
463 if (using_apic_timer) 469 if (using_apic_timer)
464 smp_send_timer_broadcast_ipi(); 470 smp_send_timer_broadcast_ipi();
465#endif 471#endif
466
467 return IRQ_HANDLED; 472 return IRQ_HANDLED;
468} 473}
469 474
@@ -843,17 +848,43 @@ static int hpet_reenable(void)
843 return hpet_timer_stop_set_go(hpet_tick); 848 return hpet_timer_stop_set_go(hpet_tick);
844} 849}
845 850
846void __init pit_init(void) 851#define PIT_MODE 0x43
852#define PIT_CH0 0x40
853
854static void __init __pit_init(int val, u8 mode)
847{ 855{
848 unsigned long flags; 856 unsigned long flags;
849 857
850 spin_lock_irqsave(&i8253_lock, flags); 858 spin_lock_irqsave(&i8253_lock, flags);
851 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */ 859 outb_p(mode, PIT_MODE);
852 outb_p(LATCH & 0xff, 0x40); /* LSB */ 860 outb_p(val & 0xff, PIT_CH0); /* LSB */
853 outb_p(LATCH >> 8, 0x40); /* MSB */ 861 outb_p(val >> 8, PIT_CH0); /* MSB */
854 spin_unlock_irqrestore(&i8253_lock, flags); 862 spin_unlock_irqrestore(&i8253_lock, flags);
855} 863}
856 864
865void __init pit_init(void)
866{
867 __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
868}
869
870void __init pit_stop_interrupt(void)
871{
872 __pit_init(0, 0x30); /* mode 0 */
873}
874
875void __init stop_timer_interrupt(void)
876{
877 char *name;
878 if (vxtime.hpet_address) {
879 name = "HPET";
880 hpet_timer_stop_set_go(0);
881 } else {
882 name = "PIT";
883 pit_stop_interrupt();
884 }
885 printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
886}
887
857int __init time_setup(char *str) 888int __init time_setup(char *str)
858{ 889{
859 report_lost_ticks = 1; 890 report_lost_ticks = 1;
@@ -932,7 +963,7 @@ void __init time_init(void)
932 * Make an educated guess if the TSC is trustworthy and synchronized 963 * Make an educated guess if the TSC is trustworthy and synchronized
933 * over all CPUs. 964 * over all CPUs.
934 */ 965 */
935__init int unsynchronized_tsc(void) 966__cpuinit int unsynchronized_tsc(void)
936{ 967{
937#ifdef CONFIG_SMP 968#ifdef CONFIG_SMP
938 if (oem_force_hpet_timer()) 969 if (oem_force_hpet_timer())
@@ -1016,9 +1047,21 @@ static int timer_resume(struct sys_device *dev)
1016 write_seqlock_irqsave(&xtime_lock,flags); 1047 write_seqlock_irqsave(&xtime_lock,flags);
1017 xtime.tv_sec = sec; 1048 xtime.tv_sec = sec;
1018 xtime.tv_nsec = 0; 1049 xtime.tv_nsec = 0;
1050 if (vxtime.mode == VXTIME_HPET) {
1051 if (hpet_use_timer)
1052 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
1053 else
1054 vxtime.last = hpet_readl(HPET_COUNTER);
1055#ifdef CONFIG_X86_PM_TIMER
1056 } else if (vxtime.mode == VXTIME_PMTMR) {
1057 pmtimer_resume();
1058#endif
1059 } else
1060 vxtime.last_tsc = get_cycles_sync();
1019 write_sequnlock_irqrestore(&xtime_lock,flags); 1061 write_sequnlock_irqrestore(&xtime_lock,flags);
1020 jiffies += sleep_length; 1062 jiffies += sleep_length;
1021 wall_jiffies += sleep_length; 1063 wall_jiffies += sleep_length;
1064 monotonic_base += sleep_length * (NSEC_PER_SEC/HZ);
1022 touch_softlockup_watchdog(); 1065 touch_softlockup_watchdog();
1023 return 0; 1066 return 0;
1024} 1067}
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 8bb0aeda78b9..ee1b2da9e5e7 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -372,7 +372,7 @@ void out_of_line_bug(void)
372static DEFINE_SPINLOCK(die_lock); 372static DEFINE_SPINLOCK(die_lock);
373static int die_owner = -1; 373static int die_owner = -1;
374 374
375unsigned long oops_begin(void) 375unsigned __kprobes long oops_begin(void)
376{ 376{
377 int cpu = safe_smp_processor_id(); 377 int cpu = safe_smp_processor_id();
378 unsigned long flags; 378 unsigned long flags;
@@ -391,7 +391,7 @@ unsigned long oops_begin(void)
391 return flags; 391 return flags;
392} 392}
393 393
394void oops_end(unsigned long flags) 394void __kprobes oops_end(unsigned long flags)
395{ 395{
396 die_owner = -1; 396 die_owner = -1;
397 bust_spinlocks(0); 397 bust_spinlocks(0);
@@ -400,7 +400,7 @@ void oops_end(unsigned long flags)
400 panic("Oops"); 400 panic("Oops");
401} 401}
402 402
403void __die(const char * str, struct pt_regs * regs, long err) 403void __kprobes __die(const char * str, struct pt_regs * regs, long err)
404{ 404{
405 static int die_counter; 405 static int die_counter;
406 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter); 406 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
@@ -432,7 +432,7 @@ void die(const char * str, struct pt_regs * regs, long err)
432 do_exit(SIGSEGV); 432 do_exit(SIGSEGV);
433} 433}
434 434
435void die_nmi(char *str, struct pt_regs *regs) 435void __kprobes die_nmi(char *str, struct pt_regs *regs)
436{ 436{
437 unsigned long flags = oops_begin(); 437 unsigned long flags = oops_begin();
438 438
@@ -575,7 +575,8 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
575 } 575 }
576} 576}
577 577
578static void mem_parity_error(unsigned char reason, struct pt_regs * regs) 578static __kprobes void
579mem_parity_error(unsigned char reason, struct pt_regs * regs)
579{ 580{
580 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n"); 581 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
581 printk("You probably have a hardware problem with your RAM chips\n"); 582 printk("You probably have a hardware problem with your RAM chips\n");
@@ -585,7 +586,8 @@ static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
585 outb(reason, 0x61); 586 outb(reason, 0x61);
586} 587}
587 588
588static void io_check_error(unsigned char reason, struct pt_regs * regs) 589static __kprobes void
590io_check_error(unsigned char reason, struct pt_regs * regs)
589{ 591{
590 printk("NMI: IOCK error (debug interrupt?)\n"); 592 printk("NMI: IOCK error (debug interrupt?)\n");
591 show_registers(regs); 593 show_registers(regs);
@@ -598,7 +600,8 @@ static void io_check_error(unsigned char reason, struct pt_regs * regs)
598 outb(reason, 0x61); 600 outb(reason, 0x61);
599} 601}
600 602
601static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs) 603static __kprobes void
604unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
602{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason); 605{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
603 printk("Dazed and confused, but trying to continue\n"); 606 printk("Dazed and confused, but trying to continue\n");
604 printk("Do you have a strange power saving mode enabled?\n"); 607 printk("Do you have a strange power saving mode enabled?\n");
@@ -606,7 +609,7 @@ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
606 609
607/* Runs on IST stack. This code must keep interrupts off all the time. 610/* Runs on IST stack. This code must keep interrupts off all the time.
608 Nested NMIs are prevented by the CPU. */ 611 Nested NMIs are prevented by the CPU. */
609asmlinkage void default_do_nmi(struct pt_regs *regs) 612asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
610{ 613{
611 unsigned char reason = 0; 614 unsigned char reason = 0;
612 int cpu; 615 int cpu;
@@ -658,7 +661,7 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
658/* Help handler running on IST stack to switch back to user stack 661/* Help handler running on IST stack to switch back to user stack
659 for scheduling or signal handling. The actual stack switch is done in 662 for scheduling or signal handling. The actual stack switch is done in
660 entry.S */ 663 entry.S */
661asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs) 664asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
662{ 665{
663 struct pt_regs *regs = eregs; 666 struct pt_regs *regs = eregs;
664 /* Did already sync */ 667 /* Did already sync */
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b0eed1faf740..74db0062d4a2 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -172,13 +172,15 @@ SECTIONS
172 . = ALIGN(4096); 172 . = ALIGN(4096);
173 __initramfs_start = .; 173 __initramfs_start = .;
174 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } 174 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
175 __initramfs_end = .; 175 __initramfs_end = .;
176 . = ALIGN(32); 176 /* temporary here to work around NR_CPUS. If you see this comment in 2.6.17+
177 complain */
178 . = ALIGN(4096);
179 __init_end = .;
180 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
177 __per_cpu_start = .; 181 __per_cpu_start = .;
178 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) } 182 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
179 __per_cpu_end = .; 183 __per_cpu_end = .;
180 . = ALIGN(4096);
181 __init_end = .;
182 184
183 . = ALIGN(4096); 185 . = ALIGN(4096);
184 __nosave_begin = .; 186 __nosave_begin = .;