aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2006-02-07 01:47:12 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-02-07 01:47:12 -0500
commit3c9b3a8575b4f2551e3b5b74ffa1c3559c6338eb (patch)
tree7f8d84353852401ec74e005f6f0b1eb958b9a70d /arch/x86_64
parentc0d3c0c0ce94d3db893577ae98e64414d92e49d8 (diff)
parentc03296a868ae7c91aa2d8b372184763b18f16d7a (diff)
Merge branch 'master'
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig.debug7
-rw-r--r--arch/x86_64/defconfig21
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/kernel/acpi/Makefile5
-rw-r--r--arch/x86_64/kernel/acpi/processor.c72
-rw-r--r--arch/x86_64/kernel/apic.c89
-rw-r--r--arch/x86_64/kernel/entry.S7
-rw-r--r--arch/x86_64/kernel/io_apic.c18
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--arch/x86_64/kernel/mpparse.c8
-rw-r--r--arch/x86_64/kernel/nmi.c7
-rw-r--r--arch/x86_64/kernel/pci-dma.c3
-rw-r--r--arch/x86_64/kernel/pci-gart.c19
-rw-r--r--arch/x86_64/kernel/pci-nommu.c7
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/pmtimer.c25
-rw-r--r--arch/x86_64/kernel/process.c4
-rw-r--r--arch/x86_64/kernel/setup.c8
-rw-r--r--arch/x86_64/kernel/time.c59
-rw-r--r--arch/x86_64/kernel/traps.c21
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S10
-rw-r--r--arch/x86_64/lib/Makefile2
-rw-r--r--arch/x86_64/lib/clear_page.S38
-rw-r--r--arch/x86_64/lib/copy_page.S87
-rw-r--r--arch/x86_64/lib/copy_user.S247
-rw-r--r--arch/x86_64/lib/iomap_copy.S26
-rw-r--r--arch/x86_64/lib/memcpy.S93
-rw-r--r--arch/x86_64/lib/memset.S94
-rw-r--r--arch/x86_64/mm/fault.c3
-rw-r--r--arch/x86_64/mm/srat.c45
-rw-r--r--arch/x86_64/pci/mmconfig.c19
31 files changed, 936 insertions, 114 deletions
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index fcb06a50fdd2..ea31b4c62105 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -2,13 +2,6 @@ menu "Kernel hacking"
2 2
3source "lib/Kconfig.debug" 3source "lib/Kconfig.debug"
4 4
5config INIT_DEBUG
6 bool "Debug __init statements"
7 depends on DEBUG_KERNEL
8 help
9 Fill __init and __initdata at the end of boot. This helps debugging
10 illegal uses of __init and __initdata after initialization.
11
12config DEBUG_RODATA 5config DEBUG_RODATA
13 bool "Write protect kernel read-only data structures" 6 bool "Write protect kernel read-only data structures"
14 depends on DEBUG_KERNEL 7 depends on DEBUG_KERNEL
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 09a3eb743315..56832929a543 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.15-git12 3# Linux kernel version: 2.6.16-rc1-git2
4# Mon Jan 16 13:09:08 2006 4# Thu Jan 19 10:05:21 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -310,6 +310,11 @@ CONFIG_IPV6=y
310# SCTP Configuration (EXPERIMENTAL) 310# SCTP Configuration (EXPERIMENTAL)
311# 311#
312# CONFIG_IP_SCTP is not set 312# CONFIG_IP_SCTP is not set
313
314#
315# TIPC Configuration (EXPERIMENTAL)
316#
317# CONFIG_TIPC is not set
313# CONFIG_ATM is not set 318# CONFIG_ATM is not set
314# CONFIG_BRIDGE is not set 319# CONFIG_BRIDGE is not set
315# CONFIG_VLAN_8021Q is not set 320# CONFIG_VLAN_8021Q is not set
@@ -319,11 +324,6 @@ CONFIG_IPV6=y
319# CONFIG_ATALK is not set 324# CONFIG_ATALK is not set
320# CONFIG_X25 is not set 325# CONFIG_X25 is not set
321# CONFIG_LAPB is not set 326# CONFIG_LAPB is not set
322
323#
324# TIPC Configuration (EXPERIMENTAL)
325#
326# CONFIG_TIPC is not set
327# CONFIG_NET_DIVERT is not set 327# CONFIG_NET_DIVERT is not set
328# CONFIG_ECONET is not set 328# CONFIG_ECONET is not set
329# CONFIG_WAN_ROUTER is not set 329# CONFIG_WAN_ROUTER is not set
@@ -1098,6 +1098,12 @@ CONFIG_USB_MON=y
1098# 1098#
1099 1099
1100# 1100#
1101# EDAC - error detection and reporting (RAS)
1102#
1103# CONFIG_EDAC is not set
1104# CONFIG_EDAC_POLL is not set
1105
1106#
1101# Firmware Drivers 1107# Firmware Drivers
1102# 1108#
1103# CONFIG_EDD is not set 1109# CONFIG_EDD is not set
@@ -1290,6 +1296,7 @@ CONFIG_DEBUG_FS=y
1290# CONFIG_DEBUG_VM is not set 1296# CONFIG_DEBUG_VM is not set
1291# CONFIG_FRAME_POINTER is not set 1297# CONFIG_FRAME_POINTER is not set
1292# CONFIG_FORCED_INLINING is not set 1298# CONFIG_FORCED_INLINING is not set
1299# CONFIG_UNWIND_INFO is not set
1293# CONFIG_RCU_TORTURE_TEST is not set 1300# CONFIG_RCU_TORTURE_TEST is not set
1294CONFIG_INIT_DEBUG=y 1301CONFIG_INIT_DEBUG=y
1295# CONFIG_DEBUG_RODATA is not set 1302# CONFIG_DEBUG_RODATA is not set
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index f05c2a802489..067c0f47bd0d 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -676,7 +676,7 @@ ia32_sys_call_table:
676 .quad sys_mkdirat 676 .quad sys_mkdirat
677 .quad sys_mknodat 677 .quad sys_mknodat
678 .quad sys_fchownat 678 .quad sys_fchownat
679 .quad sys_futimesat 679 .quad compat_sys_futimesat
680 .quad compat_sys_newfstatat /* 300 */ 680 .quad compat_sys_newfstatat /* 300 */
681 .quad sys_unlinkat 681 .quad sys_unlinkat
682 .quad sys_renameat 682 .quad sys_renameat
diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
index 7da9ace890bd..4fe97071f297 100644
--- a/arch/x86_64/kernel/acpi/Makefile
+++ b/arch/x86_64/kernel/acpi/Makefile
@@ -1,3 +1,8 @@
1obj-y := boot.o 1obj-y := boot.o
2boot-y := ../../../i386/kernel/acpi/boot.o 2boot-y := ../../../i386/kernel/acpi/boot.o
3obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o 3obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
4
5ifneq ($(CONFIG_ACPI_PROCESSOR),)
6obj-y += processor.o
7endif
8
diff --git a/arch/x86_64/kernel/acpi/processor.c b/arch/x86_64/kernel/acpi/processor.c
new file mode 100644
index 000000000000..3bdc2baa5bb1
--- /dev/null
+++ b/arch/x86_64/kernel/acpi/processor.c
@@ -0,0 +1,72 @@
1/*
2 * arch/x86_64/kernel/acpi/processor.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for platforms with Intel CPUs
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/acpi.h>
13
14#include <acpi/processor.h>
15#include <asm/acpi.h>
16
17static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
18{
19 struct acpi_object_list *obj_list;
20 union acpi_object *obj;
21 u32 *buf;
22
23 /* allocate and initialize pdc. It will be used later. */
24 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
25 if (!obj_list) {
26 printk(KERN_ERR "Memory allocation error\n");
27 return;
28 }
29
30 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
31 if (!obj) {
32 printk(KERN_ERR "Memory allocation error\n");
33 kfree(obj_list);
34 return;
35 }
36
37 buf = kmalloc(12, GFP_KERNEL);
38 if (!buf) {
39 printk(KERN_ERR "Memory allocation error\n");
40 kfree(obj);
41 kfree(obj_list);
42 return;
43 }
44
45 buf[0] = ACPI_PDC_REVISION_ID;
46 buf[1] = 1;
47 buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
48
49 obj->type = ACPI_TYPE_BUFFER;
50 obj->buffer.length = 12;
51 obj->buffer.pointer = (u8 *) buf;
52 obj_list->count = 1;
53 obj_list->pointer = obj;
54 pr->pdc = obj_list;
55
56 return;
57}
58
59/* Initialize _PDC data based on the CPU vendor */
60void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
61{
62 unsigned int cpu = pr->id;
63 struct cpuinfo_x86 *c = cpu_data + cpu;
64
65 pr->pdc = NULL;
66 if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
67 init_intel_pdc(pr, c);
68
69 return;
70}
71
72EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 5d3c5b07b8db..6147770b4347 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -35,8 +35,12 @@
35#include <asm/mach_apic.h> 35#include <asm/mach_apic.h>
36#include <asm/nmi.h> 36#include <asm/nmi.h>
37#include <asm/idle.h> 37#include <asm/idle.h>
38#include <asm/proto.h>
39#include <asm/timex.h>
38 40
39int apic_verbosity; 41int apic_verbosity;
42int apic_runs_main_timer;
43int apic_calibrate_pmtmr __initdata;
40 44
41int disable_apic_timer __initdata; 45int disable_apic_timer __initdata;
42 46
@@ -68,6 +72,26 @@ int get_maxlvt(void)
68 return maxlvt; 72 return maxlvt;
69} 73}
70 74
75/*
76 * 'what should we do if we get a hw irq event on an illegal vector'.
77 * each architecture has to answer this themselves.
78 */
79void ack_bad_irq(unsigned int irq)
80{
81 printk("unexpected IRQ trap at vector %02x\n", irq);
82 /*
83 * Currently unexpected vectors happen only on SMP and APIC.
84 * We _must_ ack these because every local APIC has only N
85 * irq slots per priority level, and a 'hanging, unacked' IRQ
86 * holds up an irq slot - in excessive cases (when multiple
87 * unexpected vectors occur) that might lock up the APIC
88 * completely.
89 * But don't ack when the APIC is disabled. -AK
90 */
91 if (!disable_apic)
92 ack_APIC_irq();
93}
94
71void clear_local_APIC(void) 95void clear_local_APIC(void)
72{ 96{
73 int maxlvt; 97 int maxlvt;
@@ -702,9 +726,17 @@ static void setup_APIC_timer(unsigned int clocks)
702 c2 |= inb_p(0x40) << 8; 726 c2 |= inb_p(0x40) << 8;
703 } while (c2 - c1 < 300); 727 } while (c2 - c1 < 300);
704 } 728 }
705
706 __setup_APIC_LVTT(clocks); 729 __setup_APIC_LVTT(clocks);
707 730 /* Turn off PIT interrupt if we use APIC timer as main timer.
731 Only works with the PM timer right now
732 TBD fix it for HPET too. */
733 if (vxtime.mode == VXTIME_PMTMR &&
734 smp_processor_id() == boot_cpu_id &&
735 apic_runs_main_timer == 1 &&
736 !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
737 stop_timer_interrupt();
738 apic_runs_main_timer++;
739 }
708 local_irq_restore(flags); 740 local_irq_restore(flags);
709} 741}
710 742
@@ -735,14 +767,27 @@ static int __init calibrate_APIC_clock(void)
735 __setup_APIC_LVTT(1000000000); 767 __setup_APIC_LVTT(1000000000);
736 768
737 apic_start = apic_read(APIC_TMCCT); 769 apic_start = apic_read(APIC_TMCCT);
738 rdtscl(tsc_start); 770#ifdef CONFIG_X86_PM_TIMER
739 771 if (apic_calibrate_pmtmr && pmtmr_ioport) {
740 do { 772 pmtimer_wait(5000); /* 5ms wait */
741 apic = apic_read(APIC_TMCCT); 773 apic = apic_read(APIC_TMCCT);
742 rdtscl(tsc); 774 result = (apic_start - apic) * 1000L / 5;
743 } while ((tsc - tsc_start) < TICK_COUNT && (apic - apic_start) < TICK_COUNT); 775 } else
776#endif
777 {
778 rdtscl(tsc_start);
779
780 do {
781 apic = apic_read(APIC_TMCCT);
782 rdtscl(tsc);
783 } while ((tsc - tsc_start) < TICK_COUNT &&
784 (apic - apic_start) < TICK_COUNT);
785
786 result = (apic_start - apic) * 1000L * cpu_khz /
787 (tsc - tsc_start);
788 }
789 printk("result %d\n", result);
744 790
745 result = (apic_start - apic) * 1000L * cpu_khz / (tsc - tsc_start);
746 791
747 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", 792 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
748 result / 1000 / 1000, result / 1000 % 1000); 793 result / 1000 / 1000, result / 1000 % 1000);
@@ -872,6 +917,8 @@ void smp_local_timer_interrupt(struct pt_regs *regs)
872#ifdef CONFIG_SMP 917#ifdef CONFIG_SMP
873 update_process_times(user_mode(regs)); 918 update_process_times(user_mode(regs));
874#endif 919#endif
920 if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
921 main_timer_handler(regs);
875 /* 922 /*
876 * We take the 'long' return path, and there every subsystem 923 * We take the 'long' return path, and there every subsystem
877 * grabs the appropriate locks (kernel lock/ irq lock). 924 * grabs the appropriate locks (kernel lock/ irq lock).
@@ -924,7 +971,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
924 * multi-chassis. Use available data to take a good guess. 971 * multi-chassis. Use available data to take a good guess.
925 * If in doubt, go HPET. 972 * If in doubt, go HPET.
926 */ 973 */
927__init int oem_force_hpet_timer(void) 974__cpuinit int oem_force_hpet_timer(void)
928{ 975{
929 int i, clusters, zeros; 976 int i, clusters, zeros;
930 unsigned id; 977 unsigned id;
@@ -1081,10 +1128,34 @@ static __init int setup_nolapic(char *str)
1081 1128
1082static __init int setup_noapictimer(char *str) 1129static __init int setup_noapictimer(char *str)
1083{ 1130{
1131 if (str[0] != ' ' && str[0] != 0)
1132 return -1;
1084 disable_apic_timer = 1; 1133 disable_apic_timer = 1;
1085 return 0; 1134 return 0;
1086} 1135}
1087 1136
1137static __init int setup_apicmaintimer(char *str)
1138{
1139 apic_runs_main_timer = 1;
1140 nohpet = 1;
1141 return 0;
1142}
1143__setup("apicmaintimer", setup_apicmaintimer);
1144
1145static __init int setup_noapicmaintimer(char *str)
1146{
1147 apic_runs_main_timer = -1;
1148 return 0;
1149}
1150__setup("noapicmaintimer", setup_noapicmaintimer);
1151
1152static __init int setup_apicpmtimer(char *s)
1153{
1154 apic_calibrate_pmtmr = 1;
1155 return setup_apicmaintimer(NULL);
1156}
1157__setup("apicpmtimer", setup_apicpmtimer);
1158
1088/* dummy parsing: see setup.c */ 1159/* dummy parsing: see setup.c */
1089 1160
1090__setup("disableapic", setup_disableapic); 1161__setup("disableapic", setup_disableapic);
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index dbdba56e8faa..b150c87a08c6 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -499,7 +499,9 @@ ENTRY(stub_rt_sigreturn)
499 movq %gs:pda_irqstackptr,%rax 499 movq %gs:pda_irqstackptr,%rax
500 cmoveq %rax,%rsp /*todo This needs CFI annotation! */ 500 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
501 pushq %rdi # save old stack 501 pushq %rdi # save old stack
502#ifndef CONFIG_DEBUG_INFO
502 CFI_ADJUST_CFA_OFFSET 8 503 CFI_ADJUST_CFA_OFFSET 8
504#endif
503 call \func 505 call \func
504 .endm 506 .endm
505 507
@@ -509,7 +511,9 @@ ENTRY(common_interrupt)
509 /* 0(%rsp): oldrsp-ARGOFFSET */ 511 /* 0(%rsp): oldrsp-ARGOFFSET */
510ret_from_intr: 512ret_from_intr:
511 popq %rdi 513 popq %rdi
514#ifndef CONFIG_DEBUG_INFO
512 CFI_ADJUST_CFA_OFFSET -8 515 CFI_ADJUST_CFA_OFFSET -8
516#endif
513 cli 517 cli
514 decl %gs:pda_irqcount 518 decl %gs:pda_irqcount
515#ifdef CONFIG_DEBUG_INFO 519#ifdef CONFIG_DEBUG_INFO
@@ -922,7 +926,7 @@ KPROBE_ENTRY(debug)
922 .previous .text 926 .previous .text
923 927
924 /* runs on exception stack */ 928 /* runs on exception stack */
925ENTRY(nmi) 929KPROBE_ENTRY(nmi)
926 INTR_FRAME 930 INTR_FRAME
927 pushq $-1 931 pushq $-1
928 CFI_ADJUST_CFA_OFFSET 8 932 CFI_ADJUST_CFA_OFFSET 8
@@ -969,6 +973,7 @@ paranoid_schedule:
969 cli 973 cli
970 jmp paranoid_userspace 974 jmp paranoid_userspace
971 CFI_ENDPROC 975 CFI_ENDPROC
976 .previous .text
972 977
973KPROBE_ENTRY(int3) 978KPROBE_ENTRY(int3)
974 INTR_FRAME 979 INTR_FRAME
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index e8cf44ef8778..4282d72b2a26 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -304,6 +304,14 @@ void __init check_ioapic(void)
304#endif 304#endif
305 /* RED-PEN skip them on mptables too? */ 305 /* RED-PEN skip them on mptables too? */
306 return; 306 return;
307 case PCI_VENDOR_ID_ATI:
308 if (apic_runs_main_timer != 0)
309 break;
310 printk(KERN_INFO
311 "ATI board detected. Using APIC/PM timer.\n");
312 apic_runs_main_timer = 1;
313 nohpet = 1;
314 return;
307 } 315 }
308 316
309 /* No multi-function device? */ 317 /* No multi-function device? */
@@ -2027,7 +2035,7 @@ int __init io_apic_get_redir_entries (int ioapic)
2027} 2035}
2028 2036
2029 2037
2030int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) 2038int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
2031{ 2039{
2032 struct IO_APIC_route_entry entry; 2040 struct IO_APIC_route_entry entry;
2033 unsigned long flags; 2041 unsigned long flags;
@@ -2049,8 +2057,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
2049 entry.delivery_mode = INT_DELIVERY_MODE; 2057 entry.delivery_mode = INT_DELIVERY_MODE;
2050 entry.dest_mode = INT_DEST_MODE; 2058 entry.dest_mode = INT_DEST_MODE;
2051 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 2059 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
2052 entry.trigger = edge_level; 2060 entry.trigger = triggering;
2053 entry.polarity = active_high_low; 2061 entry.polarity = polarity;
2054 entry.mask = 1; /* Disabled (masked) */ 2062 entry.mask = 1; /* Disabled (masked) */
2055 2063
2056 irq = gsi_irq_sharing(irq); 2064 irq = gsi_irq_sharing(irq);
@@ -2065,9 +2073,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
2065 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> " 2073 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
2066 "IRQ %d Mode:%i Active:%i)\n", ioapic, 2074 "IRQ %d Mode:%i Active:%i)\n", ioapic,
2067 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, 2075 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
2068 edge_level, active_high_low); 2076 triggering, polarity);
2069 2077
2070 ioapic_register_intr(irq, entry.vector, edge_level); 2078 ioapic_register_intr(irq, entry.vector, triggering);
2071 2079
2072 if (!ioapic && (irq < 16)) 2080 if (!ioapic && (irq < 16))
2073 disable_8259A_irq(irq); 2081 disable_8259A_irq(irq);
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 13a2eada6c95..b8b9529fa89e 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -380,7 +380,7 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
380 */ 380 */
381void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 381void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
382{ 382{
383 static cpumask_t mce_cpus __initdata = CPU_MASK_NONE; 383 static cpumask_t mce_cpus = CPU_MASK_NONE;
384 384
385 mce_cpu_quirks(c); 385 mce_cpu_quirks(c);
386 386
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 1105250bf02c..dc49bfb6db0a 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void)
915 915
916#define MAX_GSI_NUM 4096 916#define MAX_GSI_NUM 4096
917 917
918int mp_register_gsi(u32 gsi, int edge_level, int active_high_low) 918int mp_register_gsi(u32 gsi, int triggering, int polarity)
919{ 919{
920 int ioapic = -1; 920 int ioapic = -1;
921 int ioapic_pin = 0; 921 int ioapic_pin = 0;
@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
964 964
965 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 965 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
966 966
967 if (edge_level) { 967 if (triggering == ACPI_LEVEL_SENSITIVE) {
968 /* 968 /*
969 * For PCI devices assign IRQs in order, avoiding gaps 969 * For PCI devices assign IRQs in order, avoiding gaps
970 * due to unused I/O APIC pins. 970 * due to unused I/O APIC pins.
@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
986 } 986 }
987 987
988 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, 988 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
989 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, 989 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
990 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); 990 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
991 return gsi; 991 return gsi;
992} 992}
993 993
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 5fae6f0cd994..8be407a1f62d 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -24,6 +24,7 @@
24#include <linux/sysdev.h> 24#include <linux/sysdev.h>
25#include <linux/nmi.h> 25#include <linux/nmi.h>
26#include <linux/sysctl.h> 26#include <linux/sysctl.h>
27#include <linux/kprobes.h>
27 28
28#include <asm/smp.h> 29#include <asm/smp.h>
29#include <asm/mtrr.h> 30#include <asm/mtrr.h>
@@ -468,7 +469,7 @@ void touch_nmi_watchdog (void)
468 touch_softlockup_watchdog(); 469 touch_softlockup_watchdog();
469} 470}
470 471
471void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason) 472void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
472{ 473{
473 int sum; 474 int sum;
474 int touched = 0; 475 int touched = 0;
@@ -512,14 +513,14 @@ void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
512 } 513 }
513} 514}
514 515
515static int dummy_nmi_callback(struct pt_regs * regs, int cpu) 516static __kprobes int dummy_nmi_callback(struct pt_regs * regs, int cpu)
516{ 517{
517 return 0; 518 return 0;
518} 519}
519 520
520static nmi_callback_t nmi_callback = dummy_nmi_callback; 521static nmi_callback_t nmi_callback = dummy_nmi_callback;
521 522
522asmlinkage void do_nmi(struct pt_regs * regs, long error_code) 523asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
523{ 524{
524 int cpu = safe_smp_processor_id(); 525 int cpu = safe_smp_processor_id();
525 526
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 2f5d8328e2b9..4ed391edd47a 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -107,6 +107,9 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
107 goto again; 107 goto again;
108 } 108 }
109 109
110 /* Let low level make its own zone decisions */
111 gfp &= ~(GFP_DMA32|GFP_DMA);
112
110 if (dma_ops->alloc_coherent) 113 if (dma_ops->alloc_coherent)
111 return dma_ops->alloc_coherent(dev, size, 114 return dma_ops->alloc_coherent(dev, size,
112 dma_handle, gfp); 115 dma_handle, gfp);
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index c37fc7726ba6..2fe23a6c361b 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -457,9 +457,12 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
457error: 457error:
458 flush_gart(NULL); 458 flush_gart(NULL);
459 gart_unmap_sg(dev, sg, nents, dir); 459 gart_unmap_sg(dev, sg, nents, dir);
460 /* When it was forced try again unforced */ 460 /* When it was forced or merged try again in a dumb way */
461 if (force_iommu) 461 if (force_iommu || iommu_merge) {
462 return dma_map_sg_nonforce(dev, sg, nents, dir); 462 out = dma_map_sg_nonforce(dev, sg, nents, dir);
463 if (out > 0)
464 return out;
465 }
463 if (panic_on_overflow) 466 if (panic_on_overflow)
464 panic("dma_map_sg: overflow on %lu pages\n", pages); 467 panic("dma_map_sg: overflow on %lu pages\n", pages);
465 iommu_full(dev, pages << PAGE_SHIFT, dir); 468 iommu_full(dev, pages << PAGE_SHIFT, dir);
@@ -642,9 +645,18 @@ static int __init pci_iommu_init(void)
642 (no_agp && init_k8_gatt(&info) < 0)) { 645 (no_agp && init_k8_gatt(&info) < 0)) {
643 no_iommu = 1; 646 no_iommu = 1;
644 no_iommu_init(); 647 no_iommu_init();
648 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
649 if (end_pfn > MAX_DMA32_PFN) {
650 printk(KERN_ERR "WARNING more than 4GB of memory "
651 "but IOMMU not compiled in.\n"
652 KERN_ERR "WARNING 32bit PCI may malfunction.\n"
653 KERN_ERR "You might want to enable "
654 "CONFIG_GART_IOMMU\n");
655 }
645 return -1; 656 return -1;
646 } 657 }
647 658
659 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
648 aper_size = info.aper_size * 1024 * 1024; 660 aper_size = info.aper_size * 1024 * 1024;
649 iommu_size = check_iommu_size(info.aper_base, aper_size); 661 iommu_size = check_iommu_size(info.aper_base, aper_size);
650 iommu_pages = iommu_size >> PAGE_SHIFT; 662 iommu_pages = iommu_size >> PAGE_SHIFT;
@@ -718,7 +730,6 @@ static int __init pci_iommu_init(void)
718 730
719 flush_gart(NULL); 731 flush_gart(NULL);
720 732
721 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
722 dma_ops = &gart_dma_ops; 733 dma_ops = &gart_dma_ops;
723 734
724 return 0; 735 return 0;
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index e41564975195..44adcc2d5e5b 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -88,12 +88,5 @@ void __init no_iommu_init(void)
88{ 88{
89 if (dma_ops) 89 if (dma_ops)
90 return; 90 return;
91 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
92 dma_ops = &nommu_dma_ops; 91 dma_ops = &nommu_dma_ops;
93 if (end_pfn > MAX_DMA32_PFN) {
94 printk(KERN_ERR
95 "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
96 KERN_ERR "WARNING 32bit PCI may malfunction.\n"
97 KERN_ERR "You might want to enable CONFIG_GART_IOMMU\n");
98 }
99} 92}
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 3569a25ad7fb..990ed67896f2 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -35,8 +35,8 @@ void pci_swiotlb_init(void)
35 (end_pfn > MAX_DMA32_PFN || force_iommu)) 35 (end_pfn > MAX_DMA32_PFN || force_iommu))
36 swiotlb = 1; 36 swiotlb = 1;
37 if (swiotlb) { 37 if (swiotlb) {
38 swiotlb_init();
39 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); 38 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
39 swiotlb_init();
40 dma_ops = &swiotlb_dma_ops; 40 dma_ops = &swiotlb_dma_ops;
41 } 41 }
42} 42}
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index feb5f108dd26..5c51d10408a6 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -80,6 +80,31 @@ int pmtimer_mark_offset(void)
80 return lost - 1; 80 return lost - 1;
81} 81}
82 82
83static unsigned pmtimer_wait_tick(void)
84{
85 u32 a, b;
86 for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK;
87 a == b;
88 b = inl(pmtmr_ioport) & ACPI_PM_MASK)
89 ;
90 return b;
91}
92
93/* note: wait time is rounded up to one tick */
94void pmtimer_wait(unsigned us)
95{
96 u32 a, b;
97 a = pmtimer_wait_tick();
98 do {
99 b = inl(pmtmr_ioport);
100 } while (cyc2us(b - a) < us);
101}
102
103void pmtimer_resume(void)
104{
105 last_pmtmr_tick = inl(pmtmr_ioport);
106}
107
83unsigned int do_gettimeoffset_pm(void) 108unsigned int do_gettimeoffset_pm(void)
84{ 109{
85 u32 now, offset, delta = 0; 110 u32 now, offset, delta = 0;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 8ded407e4a94..22a05dec81a2 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -516,8 +516,10 @@ out:
516 * This could still be optimized: 516 * This could still be optimized:
517 * - fold all the options into a flag word and test it with a single test. 517 * - fold all the options into a flag word and test it with a single test.
518 * - could test fs/gs bitsliced 518 * - could test fs/gs bitsliced
519 *
520 * Kprobes not supported here. Set the probe on schedule instead.
519 */ 521 */
520struct task_struct * 522__kprobes struct task_struct *
521__switch_to(struct task_struct *prev_p, struct task_struct *next_p) 523__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
522{ 524{
523 struct thread_struct *prev = &prev_p->thread, 525 struct thread_struct *prev = &prev_p->thread,
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 28895c03cb11..363db5a003df 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -741,7 +741,7 @@ void __init setup_arch(char **cmdline_p)
741 e820_setup_gap(); 741 e820_setup_gap();
742 742
743#ifdef CONFIG_GART_IOMMU 743#ifdef CONFIG_GART_IOMMU
744 iommu_hole_init(); 744 iommu_hole_init();
745#endif 745#endif
746 746
747#ifdef CONFIG_VT 747#ifdef CONFIG_VT
@@ -877,6 +877,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
877static int __init init_amd(struct cpuinfo_x86 *c) 877static int __init init_amd(struct cpuinfo_x86 *c)
878{ 878{
879 int r; 879 int r;
880 unsigned level;
880 881
881#ifdef CONFIG_SMP 882#ifdef CONFIG_SMP
882 unsigned long value; 883 unsigned long value;
@@ -899,6 +900,11 @@ static int __init init_amd(struct cpuinfo_x86 *c)
899 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ 900 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
900 clear_bit(0*32+31, &c->x86_capability); 901 clear_bit(0*32+31, &c->x86_capability);
901 902
903 /* On C+ stepping K8 rep microcode works well for copy/memset */
904 level = cpuid_eax(1);
905 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
906 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
907
902 r = get_model_name(c); 908 r = get_model_name(c);
903 if (!r) { 909 if (!r) {
904 switch (c->x86) { 910 switch (c->x86) {
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index f8c47c688443..c0844bffbf84 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -51,7 +51,7 @@ extern int using_apic_timer;
51DEFINE_SPINLOCK(rtc_lock); 51DEFINE_SPINLOCK(rtc_lock);
52DEFINE_SPINLOCK(i8253_lock); 52DEFINE_SPINLOCK(i8253_lock);
53 53
54static int nohpet __initdata = 0; 54int nohpet __initdata = 0;
55static int notsc __initdata = 0; 55static int notsc __initdata = 0;
56 56
57#undef HPET_HACK_ENABLE_DANGEROUS 57#undef HPET_HACK_ENABLE_DANGEROUS
@@ -345,7 +345,7 @@ static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
345#endif 345#endif
346} 346}
347 347
348static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 348void main_timer_handler(struct pt_regs *regs)
349{ 349{
350 static unsigned long rtc_update = 0; 350 static unsigned long rtc_update = 0;
351 unsigned long tsc; 351 unsigned long tsc;
@@ -458,12 +458,17 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
458 } 458 }
459 459
460 write_sequnlock(&xtime_lock); 460 write_sequnlock(&xtime_lock);
461}
461 462
463static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
464{
465 if (apic_runs_main_timer > 1)
466 return IRQ_HANDLED;
467 main_timer_handler(regs);
462#ifdef CONFIG_X86_LOCAL_APIC 468#ifdef CONFIG_X86_LOCAL_APIC
463 if (using_apic_timer) 469 if (using_apic_timer)
464 smp_send_timer_broadcast_ipi(); 470 smp_send_timer_broadcast_ipi();
465#endif 471#endif
466
467 return IRQ_HANDLED; 472 return IRQ_HANDLED;
468} 473}
469 474
@@ -843,17 +848,43 @@ static int hpet_reenable(void)
843 return hpet_timer_stop_set_go(hpet_tick); 848 return hpet_timer_stop_set_go(hpet_tick);
844} 849}
845 850
846void __init pit_init(void) 851#define PIT_MODE 0x43
852#define PIT_CH0 0x40
853
854static void __init __pit_init(int val, u8 mode)
847{ 855{
848 unsigned long flags; 856 unsigned long flags;
849 857
850 spin_lock_irqsave(&i8253_lock, flags); 858 spin_lock_irqsave(&i8253_lock, flags);
851 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */ 859 outb_p(mode, PIT_MODE);
852 outb_p(LATCH & 0xff, 0x40); /* LSB */ 860 outb_p(val & 0xff, PIT_CH0); /* LSB */
853 outb_p(LATCH >> 8, 0x40); /* MSB */ 861 outb_p(val >> 8, PIT_CH0); /* MSB */
854 spin_unlock_irqrestore(&i8253_lock, flags); 862 spin_unlock_irqrestore(&i8253_lock, flags);
855} 863}
856 864
865void __init pit_init(void)
866{
867 __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
868}
869
870void __init pit_stop_interrupt(void)
871{
872 __pit_init(0, 0x30); /* mode 0 */
873}
874
875void __init stop_timer_interrupt(void)
876{
877 char *name;
878 if (vxtime.hpet_address) {
879 name = "HPET";
880 hpet_timer_stop_set_go(0);
881 } else {
882 name = "PIT";
883 pit_stop_interrupt();
884 }
885 printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
886}
887
857int __init time_setup(char *str) 888int __init time_setup(char *str)
858{ 889{
859 report_lost_ticks = 1; 890 report_lost_ticks = 1;
@@ -932,7 +963,7 @@ void __init time_init(void)
932 * Make an educated guess if the TSC is trustworthy and synchronized 963 * Make an educated guess if the TSC is trustworthy and synchronized
933 * over all CPUs. 964 * over all CPUs.
934 */ 965 */
935__init int unsynchronized_tsc(void) 966__cpuinit int unsynchronized_tsc(void)
936{ 967{
937#ifdef CONFIG_SMP 968#ifdef CONFIG_SMP
938 if (oem_force_hpet_timer()) 969 if (oem_force_hpet_timer())
@@ -1016,9 +1047,21 @@ static int timer_resume(struct sys_device *dev)
1016 write_seqlock_irqsave(&xtime_lock,flags); 1047 write_seqlock_irqsave(&xtime_lock,flags);
1017 xtime.tv_sec = sec; 1048 xtime.tv_sec = sec;
1018 xtime.tv_nsec = 0; 1049 xtime.tv_nsec = 0;
1050 if (vxtime.mode == VXTIME_HPET) {
1051 if (hpet_use_timer)
1052 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
1053 else
1054 vxtime.last = hpet_readl(HPET_COUNTER);
1055#ifdef CONFIG_X86_PM_TIMER
1056 } else if (vxtime.mode == VXTIME_PMTMR) {
1057 pmtimer_resume();
1058#endif
1059 } else
1060 vxtime.last_tsc = get_cycles_sync();
1019 write_sequnlock_irqrestore(&xtime_lock,flags); 1061 write_sequnlock_irqrestore(&xtime_lock,flags);
1020 jiffies += sleep_length; 1062 jiffies += sleep_length;
1021 wall_jiffies += sleep_length; 1063 wall_jiffies += sleep_length;
1064 monotonic_base += sleep_length * (NSEC_PER_SEC/HZ);
1022 touch_softlockup_watchdog(); 1065 touch_softlockup_watchdog();
1023 return 0; 1066 return 0;
1024} 1067}
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 8bb0aeda78b9..ee1b2da9e5e7 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -372,7 +372,7 @@ void out_of_line_bug(void)
372static DEFINE_SPINLOCK(die_lock); 372static DEFINE_SPINLOCK(die_lock);
373static int die_owner = -1; 373static int die_owner = -1;
374 374
375unsigned long oops_begin(void) 375unsigned __kprobes long oops_begin(void)
376{ 376{
377 int cpu = safe_smp_processor_id(); 377 int cpu = safe_smp_processor_id();
378 unsigned long flags; 378 unsigned long flags;
@@ -391,7 +391,7 @@ unsigned long oops_begin(void)
391 return flags; 391 return flags;
392} 392}
393 393
394void oops_end(unsigned long flags) 394void __kprobes oops_end(unsigned long flags)
395{ 395{
396 die_owner = -1; 396 die_owner = -1;
397 bust_spinlocks(0); 397 bust_spinlocks(0);
@@ -400,7 +400,7 @@ void oops_end(unsigned long flags)
400 panic("Oops"); 400 panic("Oops");
401} 401}
402 402
403void __die(const char * str, struct pt_regs * regs, long err) 403void __kprobes __die(const char * str, struct pt_regs * regs, long err)
404{ 404{
405 static int die_counter; 405 static int die_counter;
406 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter); 406 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
@@ -432,7 +432,7 @@ void die(const char * str, struct pt_regs * regs, long err)
432 do_exit(SIGSEGV); 432 do_exit(SIGSEGV);
433} 433}
434 434
435void die_nmi(char *str, struct pt_regs *regs) 435void __kprobes die_nmi(char *str, struct pt_regs *regs)
436{ 436{
437 unsigned long flags = oops_begin(); 437 unsigned long flags = oops_begin();
438 438
@@ -575,7 +575,8 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
575 } 575 }
576} 576}
577 577
578static void mem_parity_error(unsigned char reason, struct pt_regs * regs) 578static __kprobes void
579mem_parity_error(unsigned char reason, struct pt_regs * regs)
579{ 580{
580 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n"); 581 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
581 printk("You probably have a hardware problem with your RAM chips\n"); 582 printk("You probably have a hardware problem with your RAM chips\n");
@@ -585,7 +586,8 @@ static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
585 outb(reason, 0x61); 586 outb(reason, 0x61);
586} 587}
587 588
588static void io_check_error(unsigned char reason, struct pt_regs * regs) 589static __kprobes void
590io_check_error(unsigned char reason, struct pt_regs * regs)
589{ 591{
590 printk("NMI: IOCK error (debug interrupt?)\n"); 592 printk("NMI: IOCK error (debug interrupt?)\n");
591 show_registers(regs); 593 show_registers(regs);
@@ -598,7 +600,8 @@ static void io_check_error(unsigned char reason, struct pt_regs * regs)
598 outb(reason, 0x61); 600 outb(reason, 0x61);
599} 601}
600 602
601static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs) 603static __kprobes void
604unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
602{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason); 605{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
603 printk("Dazed and confused, but trying to continue\n"); 606 printk("Dazed and confused, but trying to continue\n");
604 printk("Do you have a strange power saving mode enabled?\n"); 607 printk("Do you have a strange power saving mode enabled?\n");
@@ -606,7 +609,7 @@ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
606 609
607/* Runs on IST stack. This code must keep interrupts off all the time. 610/* Runs on IST stack. This code must keep interrupts off all the time.
608 Nested NMIs are prevented by the CPU. */ 611 Nested NMIs are prevented by the CPU. */
609asmlinkage void default_do_nmi(struct pt_regs *regs) 612asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
610{ 613{
611 unsigned char reason = 0; 614 unsigned char reason = 0;
612 int cpu; 615 int cpu;
@@ -658,7 +661,7 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
658/* Help handler running on IST stack to switch back to user stack 661/* Help handler running on IST stack to switch back to user stack
659 for scheduling or signal handling. The actual stack switch is done in 662 for scheduling or signal handling. The actual stack switch is done in
660 entry.S */ 663 entry.S */
661asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs) 664asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
662{ 665{
663 struct pt_regs *regs = eregs; 666 struct pt_regs *regs = eregs;
664 /* Did already sync */ 667 /* Did already sync */
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index b0eed1faf740..74db0062d4a2 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -172,13 +172,15 @@ SECTIONS
172 . = ALIGN(4096); 172 . = ALIGN(4096);
173 __initramfs_start = .; 173 __initramfs_start = .;
174 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } 174 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
175 __initramfs_end = .; 175 __initramfs_end = .;
176 . = ALIGN(32); 176 /* temporary here to work around NR_CPUS. If you see this comment in 2.6.17+
177 complain */
178 . = ALIGN(4096);
179 __init_end = .;
180 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
177 __per_cpu_start = .; 181 __per_cpu_start = .;
178 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) } 182 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
179 __per_cpu_end = .; 183 __per_cpu_end = .;
180 . = ALIGN(4096);
181 __init_end = .;
182 184
183 . = ALIGN(4096); 185 . = ALIGN(4096);
184 __nosave_begin = .; 186 __nosave_begin = .;
diff --git a/arch/x86_64/lib/Makefile b/arch/x86_64/lib/Makefile
index bba5db6cebd6..ccef6ae747a3 100644
--- a/arch/x86_64/lib/Makefile
+++ b/arch/x86_64/lib/Makefile
@@ -4,7 +4,7 @@
4 4
5CFLAGS_csum-partial.o := -funroll-loops 5CFLAGS_csum-partial.o := -funroll-loops
6 6
7obj-y := io.o 7obj-y := io.o iomap_copy.o
8 8
9lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \ 9lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \
10 usercopy.o getuser.o putuser.o \ 10 usercopy.o getuser.o putuser.o \
diff --git a/arch/x86_64/lib/clear_page.S b/arch/x86_64/lib/clear_page.S
index 43d9fa136180..1f81b79b796c 100644
--- a/arch/x86_64/lib/clear_page.S
+++ b/arch/x86_64/lib/clear_page.S
@@ -5,8 +5,46 @@
5 .globl clear_page 5 .globl clear_page
6 .p2align 4 6 .p2align 4
7clear_page: 7clear_page:
8 xorl %eax,%eax
9 movl $4096/64,%ecx
10 .p2align 4
11.Lloop:
12 decl %ecx
13#define PUT(x) movq %rax,x*8(%rdi)
14 movq %rax,(%rdi)
15 PUT(1)
16 PUT(2)
17 PUT(3)
18 PUT(4)
19 PUT(5)
20 PUT(6)
21 PUT(7)
22 leaq 64(%rdi),%rdi
23 jnz .Lloop
24 nop
25 ret
26clear_page_end:
27
28 /* Some CPUs run faster using the string instructions.
29 It is also a lot simpler. Use this when possible */
30
31#include <asm/cpufeature.h>
32
33 .section .altinstructions,"a"
34 .align 8
35 .quad clear_page
36 .quad clear_page_c
37 .byte X86_FEATURE_REP_GOOD
38 .byte clear_page_end-clear_page
39 .byte clear_page_c_end-clear_page_c
40 .previous
41
42 .section .altinstr_replacement,"ax"
43clear_page_c:
8 movl $4096/8,%ecx 44 movl $4096/8,%ecx
9 xorl %eax,%eax 45 xorl %eax,%eax
10 rep 46 rep
11 stosq 47 stosq
12 ret 48 ret
49clear_page_c_end:
50 .previous
diff --git a/arch/x86_64/lib/copy_page.S b/arch/x86_64/lib/copy_page.S
index 621a19769406..8fa19d96a7ee 100644
--- a/arch/x86_64/lib/copy_page.S
+++ b/arch/x86_64/lib/copy_page.S
@@ -8,7 +8,94 @@
8 .globl copy_page 8 .globl copy_page
9 .p2align 4 9 .p2align 4
10copy_page: 10copy_page:
11 subq $3*8,%rsp
12 movq %rbx,(%rsp)
13 movq %r12,1*8(%rsp)
14 movq %r13,2*8(%rsp)
15
16 movl $(4096/64)-5,%ecx
17 .p2align 4
18.Loop64:
19 dec %rcx
20
21 movq (%rsi), %rax
22 movq 8 (%rsi), %rbx
23 movq 16 (%rsi), %rdx
24 movq 24 (%rsi), %r8
25 movq 32 (%rsi), %r9
26 movq 40 (%rsi), %r10
27 movq 48 (%rsi), %r11
28 movq 56 (%rsi), %r12
29
30 prefetcht0 5*64(%rsi)
31
32 movq %rax, (%rdi)
33 movq %rbx, 8 (%rdi)
34 movq %rdx, 16 (%rdi)
35 movq %r8, 24 (%rdi)
36 movq %r9, 32 (%rdi)
37 movq %r10, 40 (%rdi)
38 movq %r11, 48 (%rdi)
39 movq %r12, 56 (%rdi)
40
41 leaq 64 (%rsi), %rsi
42 leaq 64 (%rdi), %rdi
43
44 jnz .Loop64
45
46 movl $5,%ecx
47 .p2align 4
48.Loop2:
49 decl %ecx
50
51 movq (%rsi), %rax
52 movq 8 (%rsi), %rbx
53 movq 16 (%rsi), %rdx
54 movq 24 (%rsi), %r8
55 movq 32 (%rsi), %r9
56 movq 40 (%rsi), %r10
57 movq 48 (%rsi), %r11
58 movq 56 (%rsi), %r12
59
60 movq %rax, (%rdi)
61 movq %rbx, 8 (%rdi)
62 movq %rdx, 16 (%rdi)
63 movq %r8, 24 (%rdi)
64 movq %r9, 32 (%rdi)
65 movq %r10, 40 (%rdi)
66 movq %r11, 48 (%rdi)
67 movq %r12, 56 (%rdi)
68
69 leaq 64(%rdi),%rdi
70 leaq 64(%rsi),%rsi
71
72 jnz .Loop2
73
74 movq (%rsp),%rbx
75 movq 1*8(%rsp),%r12
76 movq 2*8(%rsp),%r13
77 addq $3*8,%rsp
78 ret
79
80 /* Some CPUs run faster using the string copy instructions.
81 It is also a lot simpler. Use this when possible */
82
83#include <asm/cpufeature.h>
84
85 .section .altinstructions,"a"
86 .align 8
87 .quad copy_page
88 .quad copy_page_c
89 .byte X86_FEATURE_REP_GOOD
90 .byte copy_page_c_end-copy_page_c
91 .byte copy_page_c_end-copy_page_c
92 .previous
93
94 .section .altinstr_replacement,"ax"
95copy_page_c:
11 movl $4096/8,%ecx 96 movl $4096/8,%ecx
12 rep 97 rep
13 movsq 98 movsq
14 ret 99 ret
100copy_page_c_end:
101 .previous
diff --git a/arch/x86_64/lib/copy_user.S b/arch/x86_64/lib/copy_user.S
index 79422b6559c3..f64569b83b54 100644
--- a/arch/x86_64/lib/copy_user.S
+++ b/arch/x86_64/lib/copy_user.S
@@ -4,9 +4,12 @@
4 * Functions to copy from and to user space. 4 * Functions to copy from and to user space.
5 */ 5 */
6 6
7#define FIX_ALIGNMENT 1
8
7 #include <asm/current.h> 9 #include <asm/current.h>
8 #include <asm/asm-offsets.h> 10 #include <asm/asm-offsets.h>
9 #include <asm/thread_info.h> 11 #include <asm/thread_info.h>
12 #include <asm/cpufeature.h>
10 13
11/* Standard copy_to_user with segment limit checking */ 14/* Standard copy_to_user with segment limit checking */
12 .globl copy_to_user 15 .globl copy_to_user
@@ -18,7 +21,23 @@ copy_to_user:
18 jc bad_to_user 21 jc bad_to_user
19 cmpq threadinfo_addr_limit(%rax),%rcx 22 cmpq threadinfo_addr_limit(%rax),%rcx
20 jae bad_to_user 23 jae bad_to_user
21 jmp copy_user_generic 242:
25 .byte 0xe9 /* 32bit jump */
26 .long .Lcug-1f
271:
28
29 .section .altinstr_replacement,"ax"
303: .byte 0xe9 /* replacement jmp with 8 bit immediate */
31 .long copy_user_generic_c-1b /* offset */
32 .previous
33 .section .altinstructions,"a"
34 .align 8
35 .quad 2b
36 .quad 3b
37 .byte X86_FEATURE_REP_GOOD
38 .byte 5
39 .byte 5
40 .previous
22 41
23/* Standard copy_from_user with segment limit checking */ 42/* Standard copy_from_user with segment limit checking */
24 .globl copy_from_user 43 .globl copy_from_user
@@ -53,44 +72,230 @@ bad_to_user:
53 * rsi source 72 * rsi source
54 * rdx count 73 * rdx count
55 * 74 *
56 * Only 4GB of copy is supported. This shouldn't be a problem
57 * because the kernel normally only writes from/to page sized chunks
58 * even if user space passed a longer buffer.
59 * And more would be dangerous because both Intel and AMD have
60 * errata with rep movsq > 4GB. If someone feels the need to fix
61 * this please consider this.
62 *
63 * Output: 75 * Output:
64 * eax uncopied bytes or 0 if successful. 76 * eax uncopied bytes or 0 if successful.
65 */ 77 */
66
67 .globl copy_user_generic 78 .globl copy_user_generic
79 .p2align 4
68copy_user_generic: 80copy_user_generic:
81 .byte 0x66,0x66,0x90 /* 5 byte nop for replacement jump */
82 .byte 0x66,0x90
831:
84 .section .altinstr_replacement,"ax"
852: .byte 0xe9 /* near jump with 32bit immediate */
86 .long copy_user_generic_c-1b /* offset */
87 .previous
88 .section .altinstructions,"a"
89 .align 8
90 .quad copy_user_generic
91 .quad 2b
92 .byte X86_FEATURE_REP_GOOD
93 .byte 5
94 .byte 5
95 .previous
96.Lcug:
97 pushq %rbx
98 xorl %eax,%eax /*zero for the exception handler */
99
100#ifdef FIX_ALIGNMENT
101 /* check for bad alignment of destination */
102 movl %edi,%ecx
103 andl $7,%ecx
104 jnz .Lbad_alignment
105.Lafter_bad_alignment:
106#endif
107
108 movq %rdx,%rcx
109
110 movl $64,%ebx
111 shrq $6,%rdx
112 decq %rdx
113 js .Lhandle_tail
114
115 .p2align 4
116.Lloop:
117.Ls1: movq (%rsi),%r11
118.Ls2: movq 1*8(%rsi),%r8
119.Ls3: movq 2*8(%rsi),%r9
120.Ls4: movq 3*8(%rsi),%r10
121.Ld1: movq %r11,(%rdi)
122.Ld2: movq %r8,1*8(%rdi)
123.Ld3: movq %r9,2*8(%rdi)
124.Ld4: movq %r10,3*8(%rdi)
125
126.Ls5: movq 4*8(%rsi),%r11
127.Ls6: movq 5*8(%rsi),%r8
128.Ls7: movq 6*8(%rsi),%r9
129.Ls8: movq 7*8(%rsi),%r10
130.Ld5: movq %r11,4*8(%rdi)
131.Ld6: movq %r8,5*8(%rdi)
132.Ld7: movq %r9,6*8(%rdi)
133.Ld8: movq %r10,7*8(%rdi)
134
135 decq %rdx
136
137 leaq 64(%rsi),%rsi
138 leaq 64(%rdi),%rdi
139
140 jns .Lloop
141
142 .p2align 4
143.Lhandle_tail:
144 movl %ecx,%edx
145 andl $63,%ecx
146 shrl $3,%ecx
147 jz .Lhandle_7
148 movl $8,%ebx
149 .p2align 4
150.Lloop_8:
151.Ls9: movq (%rsi),%r8
152.Ld9: movq %r8,(%rdi)
153 decl %ecx
154 leaq 8(%rdi),%rdi
155 leaq 8(%rsi),%rsi
156 jnz .Lloop_8
157
158.Lhandle_7:
159 movl %edx,%ecx
160 andl $7,%ecx
161 jz .Lende
162 .p2align 4
163.Lloop_1:
164.Ls10: movb (%rsi),%bl
165.Ld10: movb %bl,(%rdi)
166 incq %rdi
167 incq %rsi
168 decl %ecx
169 jnz .Lloop_1
170
171.Lende:
172 popq %rbx
173 ret
174
175#ifdef FIX_ALIGNMENT
176 /* align destination */
177 .p2align 4
178.Lbad_alignment:
179 movl $8,%r9d
180 subl %ecx,%r9d
181 movl %r9d,%ecx
182 cmpq %r9,%rdx
183 jz .Lhandle_7
184 js .Lhandle_7
185.Lalign_1:
186.Ls11: movb (%rsi),%bl
187.Ld11: movb %bl,(%rdi)
188 incq %rsi
189 incq %rdi
190 decl %ecx
191 jnz .Lalign_1
192 subq %r9,%rdx
193 jmp .Lafter_bad_alignment
194#endif
195
196 /* table sorted by exception address */
197 .section __ex_table,"a"
198 .align 8
199 .quad .Ls1,.Ls1e
200 .quad .Ls2,.Ls2e
201 .quad .Ls3,.Ls3e
202 .quad .Ls4,.Ls4e
203 .quad .Ld1,.Ls1e
204 .quad .Ld2,.Ls2e
205 .quad .Ld3,.Ls3e
206 .quad .Ld4,.Ls4e
207 .quad .Ls5,.Ls5e
208 .quad .Ls6,.Ls6e
209 .quad .Ls7,.Ls7e
210 .quad .Ls8,.Ls8e
211 .quad .Ld5,.Ls5e
212 .quad .Ld6,.Ls6e
213 .quad .Ld7,.Ls7e
214 .quad .Ld8,.Ls8e
215 .quad .Ls9,.Le_quad
216 .quad .Ld9,.Le_quad
217 .quad .Ls10,.Le_byte
218 .quad .Ld10,.Le_byte
219#ifdef FIX_ALIGNMENT
220 .quad .Ls11,.Lzero_rest
221 .quad .Ld11,.Lzero_rest
222#endif
223 .quad .Le5,.Le_zero
224 .previous
225
226 /* compute 64-offset for main loop. 8 bytes accuracy with error on the
227 pessimistic side. this is gross. it would be better to fix the
228 interface. */
229 /* eax: zero, ebx: 64 */
230.Ls1e: addl $8,%eax
231.Ls2e: addl $8,%eax
232.Ls3e: addl $8,%eax
233.Ls4e: addl $8,%eax
234.Ls5e: addl $8,%eax
235.Ls6e: addl $8,%eax
236.Ls7e: addl $8,%eax
237.Ls8e: addl $8,%eax
238 addq %rbx,%rdi /* +64 */
239 subq %rax,%rdi /* correct destination with computed offset */
240
241 shlq $6,%rdx /* loop counter * 64 (stride length) */
242 addq %rax,%rdx /* add offset to loopcnt */
243 andl $63,%ecx /* remaining bytes */
244 addq %rcx,%rdx /* add them */
245 jmp .Lzero_rest
246
247 /* exception on quad word loop in tail handling */
248 /* ecx: loopcnt/8, %edx: length, rdi: correct */
249.Le_quad:
250 shll $3,%ecx
251 andl $7,%edx
252 addl %ecx,%edx
253 /* edx: bytes to zero, rdi: dest, eax:zero */
254.Lzero_rest:
255 movq %rdx,%rcx
256.Le_byte:
257 xorl %eax,%eax
258.Le5: rep
259 stosb
260 /* when there is another exception while zeroing the rest just return */
261.Le_zero:
262 movq %rdx,%rax
263 jmp .Lende
264
265 /* Some CPUs run faster using the string copy instructions.
266 This is also a lot simpler. Use them when possible.
267 Patch in jmps to this code instead of copying it fully
268 to avoid unwanted aliasing in the exception tables. */
269
270 /* rdi destination
271 * rsi source
272 * rdx count
273 *
274 * Output:
275 * eax uncopied bytes or 0 if successfull.
276 *
277 * Only 4GB of copy is supported. This shouldn't be a problem
278 * because the kernel normally only writes from/to page sized chunks
279 * even if user space passed a longer buffer.
280 * And more would be dangerous because both Intel and AMD have
281 * errata with rep movsq > 4GB. If someone feels the need to fix
282 * this please consider this.
283 */
284copy_user_generic_c:
69 movl %edx,%ecx 285 movl %edx,%ecx
70 shrl $3,%ecx 286 shrl $3,%ecx
71 andl $7,%edx 287 andl $7,%edx
72 jz 5f
731: rep 2881: rep
74 movsq 289 movsq
75 movl %edx,%ecx 290 movl %edx,%ecx
76 xor %eax,%eax
772: rep 2912: rep
78 movsb 292 movsb
2934: movl %ecx,%eax
79 ret 294 ret
80 /* align here? */
815: xorl %eax,%eax
826: rep movsq
83 ret
84
85 .section .fixup,"ax"
863: lea (%rdx,%rcx,8),%rax 2953: lea (%rdx,%rcx,8),%rax
87 ret 296 ret
884: movl %ecx,%eax
89 ret
90 .previous
91 297
92 .section __ex_table,"a" 298 .section __ex_table,"a"
93 .quad 1b,3b 299 .quad 1b,3b
94 .quad 2b,4b 300 .quad 2b,4b
95 .quad 6b,4b
96 .previous 301 .previous
diff --git a/arch/x86_64/lib/iomap_copy.S b/arch/x86_64/lib/iomap_copy.S
new file mode 100644
index 000000000000..8bbade5fea05
--- /dev/null
+++ b/arch/x86_64/lib/iomap_copy.S
@@ -0,0 +1,26 @@
1/*
2 * Copyright 2006 PathScale, Inc. All Rights Reserved.
3 *
4 * This file is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
18/*
19 * override generic version in lib/iomap_copy.c
20 */
21 .globl __iowrite32_copy
22 .p2align 4
23__iowrite32_copy:
24 movl %edx,%ecx
25 rep movsd
26 ret
diff --git a/arch/x86_64/lib/memcpy.S b/arch/x86_64/lib/memcpy.S
index 92dd80544602..5554948b5554 100644
--- a/arch/x86_64/lib/memcpy.S
+++ b/arch/x86_64/lib/memcpy.S
@@ -11,8 +11,6 @@
11 * 11 *
12 * Output: 12 * Output:
13 * rax original destination 13 * rax original destination
14 *
15 * TODO: check best memcpy for PSC
16 */ 14 */
17 15
18 .globl __memcpy 16 .globl __memcpy
@@ -20,6 +18,95 @@
20 .p2align 4 18 .p2align 4
21__memcpy: 19__memcpy:
22memcpy: 20memcpy:
21 pushq %rbx
22 movq %rdi,%rax
23
24 movl %edx,%ecx
25 shrl $6,%ecx
26 jz .Lhandle_tail
27
28 .p2align 4
29.Lloop_64:
30 decl %ecx
31
32 movq (%rsi),%r11
33 movq 8(%rsi),%r8
34
35 movq %r11,(%rdi)
36 movq %r8,1*8(%rdi)
37
38 movq 2*8(%rsi),%r9
39 movq 3*8(%rsi),%r10
40
41 movq %r9,2*8(%rdi)
42 movq %r10,3*8(%rdi)
43
44 movq 4*8(%rsi),%r11
45 movq 5*8(%rsi),%r8
46
47 movq %r11,4*8(%rdi)
48 movq %r8,5*8(%rdi)
49
50 movq 6*8(%rsi),%r9
51 movq 7*8(%rsi),%r10
52
53 movq %r9,6*8(%rdi)
54 movq %r10,7*8(%rdi)
55
56 leaq 64(%rsi),%rsi
57 leaq 64(%rdi),%rdi
58 jnz .Lloop_64
59
60.Lhandle_tail:
61 movl %edx,%ecx
62 andl $63,%ecx
63 shrl $3,%ecx
64 jz .Lhandle_7
65 .p2align 4
66.Lloop_8:
67 decl %ecx
68 movq (%rsi),%r8
69 movq %r8,(%rdi)
70 leaq 8(%rdi),%rdi
71 leaq 8(%rsi),%rsi
72 jnz .Lloop_8
73
74.Lhandle_7:
75 movl %edx,%ecx
76 andl $7,%ecx
77 jz .Lende
78 .p2align 4
79.Lloop_1:
80 movb (%rsi),%r8b
81 movb %r8b,(%rdi)
82 incq %rdi
83 incq %rsi
84 decl %ecx
85 jnz .Lloop_1
86
87.Lende:
88 popq %rbx
89 ret
90.Lfinal:
91
92 /* Some CPUs run faster using the string copy instructions.
93 It is also a lot simpler. Use this when possible */
94
95 .section .altinstructions,"a"
96 .align 8
97 .quad memcpy
98 .quad memcpy_c
99 .byte X86_FEATURE_REP_GOOD
100 .byte .Lfinal-memcpy
101 .byte memcpy_c_end-memcpy_c
102 .previous
103
104 .section .altinstr_replacement,"ax"
105 /* rdi destination
106 * rsi source
107 * rdx count
108 */
109memcpy_c:
23 movq %rdi,%rax 110 movq %rdi,%rax
24 movl %edx,%ecx 111 movl %edx,%ecx
25 shrl $3,%ecx 112 shrl $3,%ecx
@@ -30,3 +117,5 @@ memcpy:
30 rep 117 rep
31 movsb 118 movsb
32 ret 119 ret
120memcpy_c_end:
121 .previous
diff --git a/arch/x86_64/lib/memset.S b/arch/x86_64/lib/memset.S
index 2aa48f24ed1e..ad397f2c7de8 100644
--- a/arch/x86_64/lib/memset.S
+++ b/arch/x86_64/lib/memset.S
@@ -13,6 +13,98 @@
13 .p2align 4 13 .p2align 4
14memset: 14memset:
15__memset: 15__memset:
16 movq %rdi,%r10
17 movq %rdx,%r11
18
19 /* expand byte value */
20 movzbl %sil,%ecx
21 movabs $0x0101010101010101,%rax
22 mul %rcx /* with rax, clobbers rdx */
23
24 /* align dst */
25 movl %edi,%r9d
26 andl $7,%r9d
27 jnz .Lbad_alignment
28.Lafter_bad_alignment:
29
30 movl %r11d,%ecx
31 shrl $6,%ecx
32 jz .Lhandle_tail
33
34 .p2align 4
35.Lloop_64:
36 decl %ecx
37 movq %rax,(%rdi)
38 movq %rax,8(%rdi)
39 movq %rax,16(%rdi)
40 movq %rax,24(%rdi)
41 movq %rax,32(%rdi)
42 movq %rax,40(%rdi)
43 movq %rax,48(%rdi)
44 movq %rax,56(%rdi)
45 leaq 64(%rdi),%rdi
46 jnz .Lloop_64
47
48 /* Handle tail in loops. The loops should be faster than hard
49 to predict jump tables. */
50 .p2align 4
51.Lhandle_tail:
52 movl %r11d,%ecx
53 andl $63&(~7),%ecx
54 jz .Lhandle_7
55 shrl $3,%ecx
56 .p2align 4
57.Lloop_8:
58 decl %ecx
59 movq %rax,(%rdi)
60 leaq 8(%rdi),%rdi
61 jnz .Lloop_8
62
63.Lhandle_7:
64 movl %r11d,%ecx
65 andl $7,%ecx
66 jz .Lende
67 .p2align 4
68.Lloop_1:
69 decl %ecx
70 movb %al,(%rdi)
71 leaq 1(%rdi),%rdi
72 jnz .Lloop_1
73
74.Lende:
75 movq %r10,%rax
76 ret
77
78.Lbad_alignment:
79 cmpq $7,%r11
80 jbe .Lhandle_7
81 movq %rax,(%rdi) /* unaligned store */
82 movq $8,%r8
83 subq %r9,%r8
84 addq %r8,%rdi
85 subq %r8,%r11
86 jmp .Lafter_bad_alignment
87
88 /* Some CPUs run faster using the string instructions.
89 It is also a lot simpler. Use this when possible */
90
91#include <asm/cpufeature.h>
92
93 .section .altinstructions,"a"
94 .align 8
95 .quad memset
96 .quad memset_c
97 .byte X86_FEATURE_REP_GOOD
98 .byte memset_c_end-memset_c
99 .byte memset_c_end-memset_c
100 .previous
101
102 .section .altinstr_replacement,"ax"
103 /* rdi destination
104 * rsi value
105 * rdx count
106 */
107memset_c:
16 movq %rdi,%r9 108 movq %rdi,%r9
17 movl %edx,%r8d 109 movl %edx,%r8d
18 andl $7,%r8d 110 andl $7,%r8d
@@ -29,3 +121,5 @@ __memset:
29 stosb 121 stosb
30 movq %r9,%rax 122 movq %r9,%rax
31 ret 123 ret
124memset_c_end:
125 .previous
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 26eac194064b..2e7c3c8ffe03 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -33,7 +33,6 @@
33#include <asm/proto.h> 33#include <asm/proto.h>
34#include <asm/kdebug.h> 34#include <asm/kdebug.h>
35#include <asm-generic/sections.h> 35#include <asm-generic/sections.h>
36#include <asm/kdebug.h>
37 36
38/* Page fault error code bits */ 37/* Page fault error code bits */
39#define PF_PROT (1<<0) /* or no page found */ 38#define PF_PROT (1<<0) /* or no page found */
@@ -157,8 +156,8 @@ void dump_pagetable(unsigned long address)
157 156
158 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); 157 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
159 pgd += pgd_index(address); 158 pgd += pgd_index(address);
160 printk("PGD %lx ", pgd_val(*pgd));
161 if (bad_address(pgd)) goto bad; 159 if (bad_address(pgd)) goto bad;
160 printk("PGD %lx ", pgd_val(*pgd));
162 if (!pgd_present(*pgd)) goto ret; 161 if (!pgd_present(*pgd)) goto ret;
163 162
164 pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address); 163 pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 8b7f85608fa8..cd25300726fc 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -26,6 +26,10 @@ static nodemask_t nodes_found __initdata;
26static struct node nodes[MAX_NUMNODES] __initdata; 26static struct node nodes[MAX_NUMNODES] __initdata;
27static u8 pxm2node[256] = { [0 ... 255] = 0xff }; 27static u8 pxm2node[256] = { [0 ... 255] = 0xff };
28 28
29/* Too small nodes confuse the VM badly. Usually they result
30 from BIOS bugs. */
31#define NODE_MIN_SIZE (4*1024*1024)
32
29static int node_to_pxm(int n); 33static int node_to_pxm(int n);
30 34
31int pxm_to_node(int pxm) 35int pxm_to_node(int pxm)
@@ -131,7 +135,12 @@ void __init
131acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 135acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
132{ 136{
133 int pxm, node; 137 int pxm, node;
134 if (srat_disabled() || pa->flags.enabled == 0) 138 if (srat_disabled())
139 return;
140 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
141 return;
142 }
143 if (pa->flags.enabled == 0)
135 return; 144 return;
136 pxm = pa->proximity_domain; 145 pxm = pa->proximity_domain;
137 node = setup_node(pxm); 146 node = setup_node(pxm);
@@ -155,8 +164,16 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
155 int node, pxm; 164 int node, pxm;
156 int i; 165 int i;
157 166
158 if (srat_disabled() || ma->flags.enabled == 0) 167 if (srat_disabled())
168 return;
169 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
170 bad_srat();
159 return; 171 return;
172 }
173 if (ma->flags.enabled == 0)
174 return;
175 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
176 end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
160 pxm = ma->proximity_domain; 177 pxm = ma->proximity_domain;
161 node = setup_node(pxm); 178 node = setup_node(pxm);
162 if (node < 0) { 179 if (node < 0) {
@@ -164,8 +181,6 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
164 bad_srat(); 181 bad_srat();
165 return; 182 return;
166 } 183 }
167 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
168 end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
169 /* It is fine to add this area to the nodes data it will be used later*/ 184 /* It is fine to add this area to the nodes data it will be used later*/
170 if (ma->flags.hot_pluggable == 1) 185 if (ma->flags.hot_pluggable == 1)
171 printk(KERN_INFO "SRAT: hot plug zone found %lx - %lx \n", 186 printk(KERN_INFO "SRAT: hot plug zone found %lx - %lx \n",
@@ -223,6 +238,16 @@ static int nodes_cover_memory(void)
223 return 1; 238 return 1;
224} 239}
225 240
241static void unparse_node(int node)
242{
243 int i;
244 node_clear(node, nodes_parsed);
245 for (i = 0; i < MAX_LOCAL_APIC; i++) {
246 if (apicid_to_node[i] == node)
247 apicid_to_node[i] = NUMA_NO_NODE;
248 }
249}
250
226void __init acpi_numa_arch_fixup(void) {} 251void __init acpi_numa_arch_fixup(void) {}
227 252
228/* Use the information discovered above to actually set up the nodes. */ 253/* Use the information discovered above to actually set up the nodes. */
@@ -230,16 +255,16 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
230{ 255{
231 int i; 256 int i;
232 257
233 if (acpi_numa <= 0)
234 return -1;
235
236 /* First clean up the node list */ 258 /* First clean up the node list */
237 for_each_node_mask(i, nodes_parsed) { 259 for (i = 0; i < MAX_NUMNODES; i++) {
238 cutoff_node(i, start, end); 260 cutoff_node(i, start, end);
239 if (nodes[i].start == nodes[i].end) 261 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE)
240 node_clear(i, nodes_parsed); 262 unparse_node(i);
241 } 263 }
242 264
265 if (acpi_numa <= 0)
266 return -1;
267
243 if (!nodes_cover_memory()) { 268 if (!nodes_cover_memory()) {
244 bad_srat(); 269 bad_srat();
245 return -1; 270 return -1;
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index f16c0d57c552..b4a3fe4ec249 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -29,11 +29,8 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
29 29
30 while (1) { 30 while (1) {
31 ++cfg_num; 31 ++cfg_num;
32 if (cfg_num >= pci_mmcfg_config_num) { 32 if (cfg_num >= pci_mmcfg_config_num)
33 /* Not found - fall back to type 1. This happens 33 break;
34 e.g. on the internal devices of a K8 northbridge. */
35 return NULL;
36 }
37 cfg = pci_mmcfg_virt[cfg_num].cfg; 34 cfg = pci_mmcfg_virt[cfg_num].cfg;
38 if (cfg->pci_segment_group_number != seg) 35 if (cfg->pci_segment_group_number != seg)
39 continue; 36 continue;
@@ -41,6 +38,18 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
41 (cfg->end_bus_number >= bus)) 38 (cfg->end_bus_number >= bus))
42 return pci_mmcfg_virt[cfg_num].virt; 39 return pci_mmcfg_virt[cfg_num].virt;
43 } 40 }
41
42 /* Handle more broken MCFG tables on Asus etc.
43 They only contain a single entry for bus 0-0. Assume
44 this applies to all busses. */
45 cfg = &pci_mmcfg_config[0];
46 if (pci_mmcfg_config_num == 1 &&
47 cfg->pci_segment_group_number == 0 &&
48 (cfg->start_bus_number | cfg->end_bus_number) == 0)
49 return pci_mmcfg_virt[0].virt;
50
51 /* Fall back to type 0 */
52 return 0;
44} 53}
45 54
46static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) 55static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)