diff options
Diffstat (limited to 'arch/i386')
34 files changed, 494 insertions, 217 deletions
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu index 21c9a4e71104..fc4f2abccf06 100644 --- a/arch/i386/Kconfig.cpu +++ b/arch/i386/Kconfig.cpu | |||
@@ -7,6 +7,7 @@ choice | |||
7 | 7 | ||
8 | config M386 | 8 | config M386 |
9 | bool "386" | 9 | bool "386" |
10 | depends on !UML | ||
10 | ---help--- | 11 | ---help--- |
11 | This is the processor type of your CPU. This information is used for | 12 | This is the processor type of your CPU. This information is used for |
12 | optimizing purposes. In order to compile a kernel that can run on | 13 | optimizing purposes. In order to compile a kernel that can run on |
@@ -301,7 +302,7 @@ config X86_USE_PPRO_CHECKSUM | |||
301 | 302 | ||
302 | config X86_USE_3DNOW | 303 | config X86_USE_3DNOW |
303 | bool | 304 | bool |
304 | depends on MCYRIXIII || MK7 || MGEODE_LX | 305 | depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML |
305 | default y | 306 | default y |
306 | 307 | ||
307 | config X86_OOSTORE | 308 | config X86_OOSTORE |
diff --git a/arch/i386/Makefile b/arch/i386/Makefile index 7cc0b189b82b..0677908dfa06 100644 --- a/arch/i386/Makefile +++ b/arch/i386/Makefile | |||
@@ -42,6 +42,10 @@ cflags-$(CONFIG_REGPARM) += -mregparm=3 | |||
42 | # temporary until string.h is fixed | 42 | # temporary until string.h is fixed |
43 | cflags-y += -ffreestanding | 43 | cflags-y += -ffreestanding |
44 | 44 | ||
45 | # this works around some issues with generating unwind tables in older gccs | ||
46 | # newer gccs do it by default | ||
47 | cflags-y += -maccumulate-outgoing-args | ||
48 | |||
45 | # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use | 49 | # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use |
46 | # a lot more stack due to the lack of sharing of stacklots: | 50 | # a lot more stack due to the lack of sharing of stacklots: |
47 | CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;) | 51 | CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;) |
@@ -51,8 +55,8 @@ cflags-y += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) | |||
51 | AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) | 55 | AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) |
52 | 56 | ||
53 | # is .cfi_signal_frame supported too? | 57 | # is .cfi_signal_frame supported too? |
54 | cflags-y += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) | 58 | cflags-y += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,) |
55 | AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) | 59 | AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,) |
56 | 60 | ||
57 | CFLAGS += $(cflags-y) | 61 | CFLAGS += $(cflags-y) |
58 | 62 | ||
diff --git a/arch/i386/defconfig b/arch/i386/defconfig index 60c0c02574f0..97aacd6bd7d8 100644 --- a/arch/i386/defconfig +++ b/arch/i386/defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.19-rc1 | 3 | # Linux kernel version: 2.6.19-rc2-git4 |
4 | # Thu Oct 5 13:04:53 2006 | 4 | # Sat Oct 21 03:38:56 2006 |
5 | # | 5 | # |
6 | CONFIG_X86_32=y | 6 | CONFIG_X86_32=y |
7 | CONFIG_GENERIC_TIME=y | 7 | CONFIG_GENERIC_TIME=y |
@@ -380,8 +380,8 @@ CONFIG_INET6_XFRM_MODE_TRANSPORT=y | |||
380 | CONFIG_INET6_XFRM_MODE_TUNNEL=y | 380 | CONFIG_INET6_XFRM_MODE_TUNNEL=y |
381 | # CONFIG_INET6_XFRM_MODE_BEET is not set | 381 | # CONFIG_INET6_XFRM_MODE_BEET is not set |
382 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set | 382 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set |
383 | CONFIG_IPV6_SIT=y | ||
383 | # CONFIG_IPV6_TUNNEL is not set | 384 | # CONFIG_IPV6_TUNNEL is not set |
384 | # CONFIG_IPV6_SUBTREES is not set | ||
385 | # CONFIG_IPV6_MULTIPLE_TABLES is not set | 385 | # CONFIG_IPV6_MULTIPLE_TABLES is not set |
386 | # CONFIG_NETWORK_SECMARK is not set | 386 | # CONFIG_NETWORK_SECMARK is not set |
387 | # CONFIG_NETFILTER is not set | 387 | # CONFIG_NETFILTER is not set |
@@ -483,6 +483,13 @@ CONFIG_BLK_DEV_INITRD=y | |||
483 | # CONFIG_ATA_OVER_ETH is not set | 483 | # CONFIG_ATA_OVER_ETH is not set |
484 | 484 | ||
485 | # | 485 | # |
486 | # Misc devices | ||
487 | # | ||
488 | # CONFIG_IBM_ASM is not set | ||
489 | # CONFIG_SGI_IOC4 is not set | ||
490 | # CONFIG_TIFM_CORE is not set | ||
491 | |||
492 | # | ||
486 | # ATA/ATAPI/MFM/RLL support | 493 | # ATA/ATAPI/MFM/RLL support |
487 | # | 494 | # |
488 | CONFIG_IDE=y | 495 | CONFIG_IDE=y |
@@ -1024,6 +1031,7 @@ CONFIG_HANGCHECK_TIMER=y | |||
1024 | # | 1031 | # |
1025 | # Dallas's 1-wire bus | 1032 | # Dallas's 1-wire bus |
1026 | # | 1033 | # |
1034 | # CONFIG_W1 is not set | ||
1027 | 1035 | ||
1028 | # | 1036 | # |
1029 | # Hardware Monitoring support | 1037 | # Hardware Monitoring support |
@@ -1032,12 +1040,6 @@ CONFIG_HANGCHECK_TIMER=y | |||
1032 | # CONFIG_HWMON_VID is not set | 1040 | # CONFIG_HWMON_VID is not set |
1033 | 1041 | ||
1034 | # | 1042 | # |
1035 | # Misc devices | ||
1036 | # | ||
1037 | # CONFIG_IBM_ASM is not set | ||
1038 | # CONFIG_TIFM_CORE is not set | ||
1039 | |||
1040 | # | ||
1041 | # Multimedia devices | 1043 | # Multimedia devices |
1042 | # | 1044 | # |
1043 | # CONFIG_VIDEO_DEV is not set | 1045 | # CONFIG_VIDEO_DEV is not set |
@@ -1169,7 +1171,6 @@ CONFIG_USB_HIDINPUT=y | |||
1169 | # CONFIG_USB_ATI_REMOTE2 is not set | 1171 | # CONFIG_USB_ATI_REMOTE2 is not set |
1170 | # CONFIG_USB_KEYSPAN_REMOTE is not set | 1172 | # CONFIG_USB_KEYSPAN_REMOTE is not set |
1171 | # CONFIG_USB_APPLETOUCH is not set | 1173 | # CONFIG_USB_APPLETOUCH is not set |
1172 | # CONFIG_USB_TRANCEVIBRATOR is not set | ||
1173 | 1174 | ||
1174 | # | 1175 | # |
1175 | # USB Imaging devices | 1176 | # USB Imaging devices |
@@ -1215,6 +1216,7 @@ CONFIG_USB_MON=y | |||
1215 | # CONFIG_USB_APPLEDISPLAY is not set | 1216 | # CONFIG_USB_APPLEDISPLAY is not set |
1216 | # CONFIG_USB_SISUSBVGA is not set | 1217 | # CONFIG_USB_SISUSBVGA is not set |
1217 | # CONFIG_USB_LD is not set | 1218 | # CONFIG_USB_LD is not set |
1219 | # CONFIG_USB_TRANCEVIBRATOR is not set | ||
1218 | # CONFIG_USB_TEST is not set | 1220 | # CONFIG_USB_TEST is not set |
1219 | 1221 | ||
1220 | # | 1222 | # |
@@ -1284,6 +1286,7 @@ CONFIG_EXT3_FS=y | |||
1284 | CONFIG_EXT3_FS_XATTR=y | 1286 | CONFIG_EXT3_FS_XATTR=y |
1285 | CONFIG_EXT3_FS_POSIX_ACL=y | 1287 | CONFIG_EXT3_FS_POSIX_ACL=y |
1286 | # CONFIG_EXT3_FS_SECURITY is not set | 1288 | # CONFIG_EXT3_FS_SECURITY is not set |
1289 | # CONFIG_EXT4DEV_FS is not set | ||
1287 | CONFIG_JBD=y | 1290 | CONFIG_JBD=y |
1288 | # CONFIG_JBD_DEBUG is not set | 1291 | # CONFIG_JBD_DEBUG is not set |
1289 | CONFIG_FS_MBCACHE=y | 1292 | CONFIG_FS_MBCACHE=y |
@@ -1307,6 +1310,7 @@ CONFIG_DNOTIFY=y | |||
1307 | # CONFIG_AUTOFS_FS is not set | 1310 | # CONFIG_AUTOFS_FS is not set |
1308 | CONFIG_AUTOFS4_FS=y | 1311 | CONFIG_AUTOFS4_FS=y |
1309 | # CONFIG_FUSE_FS is not set | 1312 | # CONFIG_FUSE_FS is not set |
1313 | CONFIG_GENERIC_ACL=y | ||
1310 | 1314 | ||
1311 | # | 1315 | # |
1312 | # CD-ROM/DVD Filesystems | 1316 | # CD-ROM/DVD Filesystems |
@@ -1384,7 +1388,6 @@ CONFIG_SUNRPC=y | |||
1384 | # CONFIG_CODA_FS is not set | 1388 | # CONFIG_CODA_FS is not set |
1385 | # CONFIG_AFS_FS is not set | 1389 | # CONFIG_AFS_FS is not set |
1386 | # CONFIG_9P_FS is not set | 1390 | # CONFIG_9P_FS is not set |
1387 | CONFIG_GENERIC_ACL=y | ||
1388 | 1391 | ||
1389 | # | 1392 | # |
1390 | # Partition Types | 1393 | # Partition Types |
@@ -1437,10 +1440,6 @@ CONFIG_NLS_ISO8859_15=y | |||
1437 | CONFIG_NLS_UTF8=y | 1440 | CONFIG_NLS_UTF8=y |
1438 | 1441 | ||
1439 | # | 1442 | # |
1440 | # Distributed Lock Manager | ||
1441 | # | ||
1442 | |||
1443 | # | ||
1444 | # Instrumentation Support | 1443 | # Instrumentation Support |
1445 | # | 1444 | # |
1446 | CONFIG_PROFILING=y | 1445 | CONFIG_PROFILING=y |
@@ -1480,6 +1479,7 @@ CONFIG_DEBUG_BUGVERBOSE=y | |||
1480 | CONFIG_UNWIND_INFO=y | 1479 | CONFIG_UNWIND_INFO=y |
1481 | CONFIG_STACK_UNWIND=y | 1480 | CONFIG_STACK_UNWIND=y |
1482 | # CONFIG_FORCED_INLINING is not set | 1481 | # CONFIG_FORCED_INLINING is not set |
1482 | # CONFIG_HEADERS_CHECK is not set | ||
1483 | # CONFIG_RCU_TORTURE_TEST is not set | 1483 | # CONFIG_RCU_TORTURE_TEST is not set |
1484 | # CONFIG_LKDTM is not set | 1484 | # CONFIG_LKDTM is not set |
1485 | CONFIG_EARLY_PRINTK=y | 1485 | CONFIG_EARLY_PRINTK=y |
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 92f79cdd9a48..d12fb97a5337 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c | |||
@@ -70,7 +70,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return | |||
70 | 70 | ||
71 | #define PREFIX "ACPI: " | 71 | #define PREFIX "ACPI: " |
72 | 72 | ||
73 | int acpi_noirq __initdata; /* skip ACPI IRQ initialization */ | 73 | int acpi_noirq; /* skip ACPI IRQ initialization */ |
74 | int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */ | 74 | int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */ |
75 | int acpi_ht __initdata = 1; /* enable HT */ | 75 | int acpi_ht __initdata = 1; /* enable HT */ |
76 | 76 | ||
@@ -82,6 +82,7 @@ EXPORT_SYMBOL(acpi_strict); | |||
82 | acpi_interrupt_flags acpi_sci_flags __initdata; | 82 | acpi_interrupt_flags acpi_sci_flags __initdata; |
83 | int acpi_sci_override_gsi __initdata; | 83 | int acpi_sci_override_gsi __initdata; |
84 | int acpi_skip_timer_override __initdata; | 84 | int acpi_skip_timer_override __initdata; |
85 | int acpi_use_timer_override __initdata; | ||
85 | 86 | ||
86 | #ifdef CONFIG_X86_LOCAL_APIC | 87 | #ifdef CONFIG_X86_LOCAL_APIC |
87 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 88 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
@@ -332,7 +333,7 @@ acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end) | |||
332 | /* | 333 | /* |
333 | * Parse Interrupt Source Override for the ACPI SCI | 334 | * Parse Interrupt Source Override for the ACPI SCI |
334 | */ | 335 | */ |
335 | static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) | 336 | static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigger) |
336 | { | 337 | { |
337 | if (trigger == 0) /* compatible SCI trigger is level */ | 338 | if (trigger == 0) /* compatible SCI trigger is level */ |
338 | trigger = 3; | 339 | trigger = 3; |
@@ -352,13 +353,13 @@ static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) | |||
352 | * If GSI is < 16, this will update its flags, | 353 | * If GSI is < 16, this will update its flags, |
353 | * else it will create a new mp_irqs[] entry. | 354 | * else it will create a new mp_irqs[] entry. |
354 | */ | 355 | */ |
355 | mp_override_legacy_irq(gsi, polarity, trigger, gsi); | 356 | mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); |
356 | 357 | ||
357 | /* | 358 | /* |
358 | * stash over-ride to indicate we've been here | 359 | * stash over-ride to indicate we've been here |
359 | * and for later update of acpi_fadt | 360 | * and for later update of acpi_fadt |
360 | */ | 361 | */ |
361 | acpi_sci_override_gsi = gsi; | 362 | acpi_sci_override_gsi = bus_irq; |
362 | return; | 363 | return; |
363 | } | 364 | } |
364 | 365 | ||
@@ -376,7 +377,7 @@ acpi_parse_int_src_ovr(acpi_table_entry_header * header, | |||
376 | acpi_table_print_madt_entry(header); | 377 | acpi_table_print_madt_entry(header); |
377 | 378 | ||
378 | if (intsrc->bus_irq == acpi_fadt.sci_int) { | 379 | if (intsrc->bus_irq == acpi_fadt.sci_int) { |
379 | acpi_sci_ioapic_setup(intsrc->global_irq, | 380 | acpi_sci_ioapic_setup(intsrc->bus_irq, intsrc->global_irq, |
380 | intsrc->flags.polarity, | 381 | intsrc->flags.polarity, |
381 | intsrc->flags.trigger); | 382 | intsrc->flags.trigger); |
382 | return 0; | 383 | return 0; |
@@ -879,7 +880,7 @@ static int __init acpi_parse_madt_ioapic_entries(void) | |||
879 | * pretend we got one so we can set the SCI flags. | 880 | * pretend we got one so we can set the SCI flags. |
880 | */ | 881 | */ |
881 | if (!acpi_sci_override_gsi) | 882 | if (!acpi_sci_override_gsi) |
882 | acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); | 883 | acpi_sci_ioapic_setup(acpi_fadt.sci_int, acpi_fadt.sci_int, 0, 0); |
883 | 884 | ||
884 | /* Fill in identity legacy mapings where no override */ | 885 | /* Fill in identity legacy mapings where no override */ |
885 | mp_config_acpi_legacy_irqs(); | 886 | mp_config_acpi_legacy_irqs(); |
@@ -1300,6 +1301,13 @@ static int __init parse_acpi_skip_timer_override(char *arg) | |||
1300 | return 0; | 1301 | return 0; |
1301 | } | 1302 | } |
1302 | early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); | 1303 | early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); |
1304 | |||
1305 | static int __init parse_acpi_use_timer_override(char *arg) | ||
1306 | { | ||
1307 | acpi_use_timer_override = 1; | ||
1308 | return 0; | ||
1309 | } | ||
1310 | early_param("acpi_use_timer_override", parse_acpi_use_timer_override); | ||
1303 | #endif /* CONFIG_X86_IO_APIC */ | 1311 | #endif /* CONFIG_X86_IO_APIC */ |
1304 | 1312 | ||
1305 | static int __init setup_acpi_sci(char *s) | 1313 | static int __init setup_acpi_sci(char *s) |
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c index 25db49ef1770..20563e52c622 100644 --- a/arch/i386/kernel/acpi/cstate.c +++ b/arch/i386/kernel/acpi/cstate.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/acpi.h> | 12 | #include <linux/acpi.h> |
13 | #include <linux/cpu.h> | ||
13 | 14 | ||
14 | #include <acpi/processor.h> | 15 | #include <acpi/processor.h> |
15 | #include <asm/acpi.h> | 16 | #include <asm/acpi.h> |
@@ -41,5 +42,124 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, | |||
41 | flags->bm_check = 1; | 42 | flags->bm_check = 1; |
42 | } | 43 | } |
43 | } | 44 | } |
44 | |||
45 | EXPORT_SYMBOL(acpi_processor_power_init_bm_check); | 45 | EXPORT_SYMBOL(acpi_processor_power_init_bm_check); |
46 | |||
47 | /* The code below handles cstate entry with monitor-mwait pair on Intel*/ | ||
48 | |||
49 | struct cstate_entry_s { | ||
50 | struct { | ||
51 | unsigned int eax; | ||
52 | unsigned int ecx; | ||
53 | } states[ACPI_PROCESSOR_MAX_POWER]; | ||
54 | }; | ||
55 | static struct cstate_entry_s *cpu_cstate_entry; /* per CPU ptr */ | ||
56 | |||
57 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; | ||
58 | |||
59 | #define MWAIT_SUBSTATE_MASK (0xf) | ||
60 | #define MWAIT_SUBSTATE_SIZE (4) | ||
61 | |||
62 | #define CPUID_MWAIT_LEAF (5) | ||
63 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) | ||
64 | #define CPUID5_ECX_INTERRUPT_BREAK (0x2) | ||
65 | |||
66 | #define MWAIT_ECX_INTERRUPT_BREAK (0x1) | ||
67 | |||
68 | #define NATIVE_CSTATE_BEYOND_HALT (2) | ||
69 | |||
70 | int acpi_processor_ffh_cstate_probe(unsigned int cpu, | ||
71 | struct acpi_processor_cx *cx, struct acpi_power_register *reg) | ||
72 | { | ||
73 | struct cstate_entry_s *percpu_entry; | ||
74 | struct cpuinfo_x86 *c = cpu_data + cpu; | ||
75 | |||
76 | cpumask_t saved_mask; | ||
77 | int retval; | ||
78 | unsigned int eax, ebx, ecx, edx; | ||
79 | unsigned int edx_part; | ||
80 | unsigned int cstate_type; /* C-state type and not ACPI C-state type */ | ||
81 | unsigned int num_cstate_subtype; | ||
82 | |||
83 | if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF ) | ||
84 | return -1; | ||
85 | |||
86 | if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) | ||
87 | return -1; | ||
88 | |||
89 | percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); | ||
90 | percpu_entry->states[cx->index].eax = 0; | ||
91 | percpu_entry->states[cx->index].ecx = 0; | ||
92 | |||
93 | /* Make sure we are running on right CPU */ | ||
94 | saved_mask = current->cpus_allowed; | ||
95 | retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
96 | if (retval) | ||
97 | return -1; | ||
98 | |||
99 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | ||
100 | |||
101 | /* Check whether this particular cx_type (in CST) is supported or not */ | ||
102 | cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1; | ||
103 | edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); | ||
104 | num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; | ||
105 | |||
106 | retval = 0; | ||
107 | if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) { | ||
108 | retval = -1; | ||
109 | goto out; | ||
110 | } | ||
111 | |||
112 | /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */ | ||
113 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || | ||
114 | !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) { | ||
115 | retval = -1; | ||
116 | goto out; | ||
117 | } | ||
118 | percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; | ||
119 | |||
120 | /* Use the hint in CST */ | ||
121 | percpu_entry->states[cx->index].eax = cx->address; | ||
122 | |||
123 | if (!mwait_supported[cstate_type]) { | ||
124 | mwait_supported[cstate_type] = 1; | ||
125 | printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " | ||
126 | "state\n", cx->type); | ||
127 | } | ||
128 | |||
129 | out: | ||
130 | set_cpus_allowed(current, saved_mask); | ||
131 | return retval; | ||
132 | } | ||
133 | EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); | ||
134 | |||
135 | void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) | ||
136 | { | ||
137 | unsigned int cpu = smp_processor_id(); | ||
138 | struct cstate_entry_s *percpu_entry; | ||
139 | |||
140 | percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); | ||
141 | mwait_idle_with_hints(percpu_entry->states[cx->index].eax, | ||
142 | percpu_entry->states[cx->index].ecx); | ||
143 | } | ||
144 | EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter); | ||
145 | |||
146 | static int __init ffh_cstate_init(void) | ||
147 | { | ||
148 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
149 | if (c->x86_vendor != X86_VENDOR_INTEL) | ||
150 | return -1; | ||
151 | |||
152 | cpu_cstate_entry = alloc_percpu(struct cstate_entry_s); | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static void __exit ffh_cstate_exit(void) | ||
157 | { | ||
158 | if (cpu_cstate_entry) { | ||
159 | free_percpu(cpu_cstate_entry); | ||
160 | cpu_cstate_entry = NULL; | ||
161 | } | ||
162 | } | ||
163 | |||
164 | arch_initcall(ffh_cstate_init); | ||
165 | __exitcall(ffh_cstate_exit); | ||
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c index fe799b11ac0a..c9841692bb7c 100644 --- a/arch/i386/kernel/acpi/earlyquirk.c +++ b/arch/i386/kernel/acpi/earlyquirk.c | |||
@@ -27,11 +27,17 @@ static int __init check_bridge(int vendor, int device) | |||
27 | #ifdef CONFIG_ACPI | 27 | #ifdef CONFIG_ACPI |
28 | /* According to Nvidia all timer overrides are bogus unless HPET | 28 | /* According to Nvidia all timer overrides are bogus unless HPET |
29 | is enabled. */ | 29 | is enabled. */ |
30 | if (vendor == PCI_VENDOR_ID_NVIDIA) { | 30 | if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { |
31 | nvidia_hpet_detected = 0; | 31 | nvidia_hpet_detected = 0; |
32 | acpi_table_parse(ACPI_HPET, nvidia_hpet_check); | 32 | acpi_table_parse(ACPI_HPET, nvidia_hpet_check); |
33 | if (nvidia_hpet_detected == 0) { | 33 | if (nvidia_hpet_detected == 0) { |
34 | acpi_skip_timer_override = 1; | 34 | acpi_skip_timer_override = 1; |
35 | printk(KERN_INFO "Nvidia board " | ||
36 | "detected. Ignoring ACPI " | ||
37 | "timer override.\n"); | ||
38 | printk(KERN_INFO "If you got timer trouble " | ||
39 | "try acpi_use_timer_override\n"); | ||
40 | |||
35 | } | 41 | } |
36 | } | 42 | } |
37 | #endif | 43 | #endif |
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c index 28ab80649764..583c238e17fb 100644 --- a/arch/i386/kernel/alternative.c +++ b/arch/i386/kernel/alternative.c | |||
@@ -344,6 +344,7 @@ void alternatives_smp_switch(int smp) | |||
344 | 344 | ||
345 | void __init alternative_instructions(void) | 345 | void __init alternative_instructions(void) |
346 | { | 346 | { |
347 | unsigned long flags; | ||
347 | if (no_replacement) { | 348 | if (no_replacement) { |
348 | printk(KERN_INFO "(SMP-)alternatives turned off\n"); | 349 | printk(KERN_INFO "(SMP-)alternatives turned off\n"); |
349 | free_init_pages("SMP alternatives", | 350 | free_init_pages("SMP alternatives", |
@@ -351,6 +352,8 @@ void __init alternative_instructions(void) | |||
351 | (unsigned long)__smp_alt_end); | 352 | (unsigned long)__smp_alt_end); |
352 | return; | 353 | return; |
353 | } | 354 | } |
355 | |||
356 | local_irq_save(flags); | ||
354 | apply_alternatives(__alt_instructions, __alt_instructions_end); | 357 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
355 | 358 | ||
356 | /* switch to patch-once-at-boottime-only mode and free the | 359 | /* switch to patch-once-at-boottime-only mode and free the |
@@ -386,4 +389,5 @@ void __init alternative_instructions(void) | |||
386 | alternatives_smp_switch(0); | 389 | alternatives_smp_switch(0); |
387 | } | 390 | } |
388 | #endif | 391 | #endif |
392 | local_irq_restore(flags); | ||
389 | } | 393 | } |
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index b42f2d914af3..a60358fe9a49 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c | |||
@@ -198,7 +198,7 @@ | |||
198 | * (APM) BIOS Interface Specification, Revision 1.2, February 1996. | 198 | * (APM) BIOS Interface Specification, Revision 1.2, February 1996. |
199 | * | 199 | * |
200 | * [This document is available from Microsoft at: | 200 | * [This document is available from Microsoft at: |
201 | * http://www.microsoft.com/hwdev/busbios/amp_12.htm] | 201 | * http://www.microsoft.com/whdc/archive/amp_12.mspx] |
202 | */ | 202 | */ |
203 | 203 | ||
204 | #include <linux/module.h> | 204 | #include <linux/module.h> |
@@ -540,11 +540,30 @@ static inline void apm_restore_cpus(cpumask_t mask) | |||
540 | * Also, we KNOW that for the non error case of apm_bios_call, there | 540 | * Also, we KNOW that for the non error case of apm_bios_call, there |
541 | * is no useful data returned in the low order 8 bits of eax. | 541 | * is no useful data returned in the low order 8 bits of eax. |
542 | */ | 542 | */ |
543 | #define APM_DO_CLI \ | 543 | |
544 | if (apm_info.allow_ints) \ | 544 | static inline unsigned long __apm_irq_save(void) |
545 | local_irq_enable(); \ | 545 | { |
546 | else \ | 546 | unsigned long flags; |
547 | local_save_flags(flags); | ||
548 | if (apm_info.allow_ints) { | ||
549 | if (irqs_disabled_flags(flags)) | ||
550 | local_irq_enable(); | ||
551 | } else | ||
552 | local_irq_disable(); | ||
553 | |||
554 | return flags; | ||
555 | } | ||
556 | |||
557 | #define apm_irq_save(flags) \ | ||
558 | do { flags = __apm_irq_save(); } while (0) | ||
559 | |||
560 | static inline void apm_irq_restore(unsigned long flags) | ||
561 | { | ||
562 | if (irqs_disabled_flags(flags)) | ||
547 | local_irq_disable(); | 563 | local_irq_disable(); |
564 | else if (irqs_disabled()) | ||
565 | local_irq_enable(); | ||
566 | } | ||
548 | 567 | ||
549 | #ifdef APM_ZERO_SEGS | 568 | #ifdef APM_ZERO_SEGS |
550 | # define APM_DECL_SEGS \ | 569 | # define APM_DECL_SEGS \ |
@@ -596,12 +615,11 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in, | |||
596 | save_desc_40 = gdt[0x40 / 8]; | 615 | save_desc_40 = gdt[0x40 / 8]; |
597 | gdt[0x40 / 8] = bad_bios_desc; | 616 | gdt[0x40 / 8] = bad_bios_desc; |
598 | 617 | ||
599 | local_save_flags(flags); | 618 | apm_irq_save(flags); |
600 | APM_DO_CLI; | ||
601 | APM_DO_SAVE_SEGS; | 619 | APM_DO_SAVE_SEGS; |
602 | apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); | 620 | apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); |
603 | APM_DO_RESTORE_SEGS; | 621 | APM_DO_RESTORE_SEGS; |
604 | local_irq_restore(flags); | 622 | apm_irq_restore(flags); |
605 | gdt[0x40 / 8] = save_desc_40; | 623 | gdt[0x40 / 8] = save_desc_40; |
606 | put_cpu(); | 624 | put_cpu(); |
607 | apm_restore_cpus(cpus); | 625 | apm_restore_cpus(cpus); |
@@ -640,12 +658,11 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) | |||
640 | save_desc_40 = gdt[0x40 / 8]; | 658 | save_desc_40 = gdt[0x40 / 8]; |
641 | gdt[0x40 / 8] = bad_bios_desc; | 659 | gdt[0x40 / 8] = bad_bios_desc; |
642 | 660 | ||
643 | local_save_flags(flags); | 661 | apm_irq_save(flags); |
644 | APM_DO_CLI; | ||
645 | APM_DO_SAVE_SEGS; | 662 | APM_DO_SAVE_SEGS; |
646 | error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); | 663 | error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); |
647 | APM_DO_RESTORE_SEGS; | 664 | APM_DO_RESTORE_SEGS; |
648 | local_irq_restore(flags); | 665 | apm_irq_restore(flags); |
649 | gdt[0x40 / 8] = save_desc_40; | 666 | gdt[0x40 / 8] = save_desc_40; |
650 | put_cpu(); | 667 | put_cpu(); |
651 | apm_restore_cpus(cpus); | 668 | apm_restore_cpus(cpus); |
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c index 4f43047de406..2d8703b7ce65 100644 --- a/arch/i386/kernel/cpu/mcheck/therm_throt.c +++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c | |||
@@ -110,17 +110,15 @@ int therm_throt_process(int curr) | |||
110 | 110 | ||
111 | #ifdef CONFIG_SYSFS | 111 | #ifdef CONFIG_SYSFS |
112 | /* Add/Remove thermal_throttle interface for CPU device */ | 112 | /* Add/Remove thermal_throttle interface for CPU device */ |
113 | static __cpuinit int thermal_throttle_add_dev(struct sys_device * sys_dev) | 113 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) |
114 | { | 114 | { |
115 | sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); | 115 | return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); |
116 | return 0; | ||
117 | } | 116 | } |
118 | 117 | ||
119 | #ifdef CONFIG_HOTPLUG_CPU | 118 | #ifdef CONFIG_HOTPLUG_CPU |
120 | static __cpuinit int thermal_throttle_remove_dev(struct sys_device * sys_dev) | 119 | static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) |
121 | { | 120 | { |
122 | sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); | 121 | return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); |
123 | return 0; | ||
124 | } | 122 | } |
125 | 123 | ||
126 | /* Mutex protecting device creation against CPU hotplug */ | 124 | /* Mutex protecting device creation against CPU hotplug */ |
@@ -133,12 +131,14 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, | |||
133 | { | 131 | { |
134 | unsigned int cpu = (unsigned long)hcpu; | 132 | unsigned int cpu = (unsigned long)hcpu; |
135 | struct sys_device *sys_dev; | 133 | struct sys_device *sys_dev; |
134 | int err; | ||
136 | 135 | ||
137 | sys_dev = get_cpu_sysdev(cpu); | 136 | sys_dev = get_cpu_sysdev(cpu); |
138 | mutex_lock(&therm_cpu_lock); | 137 | mutex_lock(&therm_cpu_lock); |
139 | switch (action) { | 138 | switch (action) { |
140 | case CPU_ONLINE: | 139 | case CPU_ONLINE: |
141 | thermal_throttle_add_dev(sys_dev); | 140 | err = thermal_throttle_add_dev(sys_dev); |
141 | WARN_ON(err); | ||
142 | break; | 142 | break; |
143 | case CPU_DEAD: | 143 | case CPU_DEAD: |
144 | thermal_throttle_remove_dev(sys_dev); | 144 | thermal_throttle_remove_dev(sys_dev); |
@@ -157,6 +157,7 @@ static struct notifier_block thermal_throttle_cpu_notifier = | |||
157 | static __init int thermal_throttle_init_device(void) | 157 | static __init int thermal_throttle_init_device(void) |
158 | { | 158 | { |
159 | unsigned int cpu = 0; | 159 | unsigned int cpu = 0; |
160 | int err; | ||
160 | 161 | ||
161 | if (!atomic_read(&therm_throt_en)) | 162 | if (!atomic_read(&therm_throt_en)) |
162 | return 0; | 163 | return 0; |
@@ -167,8 +168,10 @@ static __init int thermal_throttle_init_device(void) | |||
167 | mutex_lock(&therm_cpu_lock); | 168 | mutex_lock(&therm_cpu_lock); |
168 | #endif | 169 | #endif |
169 | /* connect live CPUs to sysfs */ | 170 | /* connect live CPUs to sysfs */ |
170 | for_each_online_cpu(cpu) | 171 | for_each_online_cpu(cpu) { |
171 | thermal_throttle_add_dev(get_cpu_sysdev(cpu)); | 172 | err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); |
173 | WARN_ON(err); | ||
174 | } | ||
172 | #ifdef CONFIG_HOTPLUG_CPU | 175 | #ifdef CONFIG_HOTPLUG_CPU |
173 | mutex_unlock(&therm_cpu_lock); | 176 | mutex_unlock(&therm_cpu_lock); |
174 | #endif | 177 | #endif |
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index be9d883c62ce..ca31f18d277c 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S | |||
@@ -317,7 +317,7 @@ is386: movl $2,%ecx # set MP | |||
317 | movl %eax,%gs | 317 | movl %eax,%gs |
318 | lldt %ax | 318 | lldt %ax |
319 | cld # gcc2 wants the direction flag cleared at all times | 319 | cld # gcc2 wants the direction flag cleared at all times |
320 | pushl %eax # fake return address | 320 | pushl $0 # fake return address for unwinder |
321 | #ifdef CONFIG_SMP | 321 | #ifdef CONFIG_SMP |
322 | movb ready, %cl | 322 | movb ready, %cl |
323 | movb $1, ready | 323 | movb $1, ready |
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c index 477b24daff53..9a0060b92e32 100644 --- a/arch/i386/kernel/i8253.c +++ b/arch/i386/kernel/i8253.c | |||
@@ -109,7 +109,7 @@ static struct clocksource clocksource_pit = { | |||
109 | 109 | ||
110 | static int __init init_pit_clocksource(void) | 110 | static int __init init_pit_clocksource(void) |
111 | { | 111 | { |
112 | if (num_possible_cpus() > 4) /* PIT does not scale! */ | 112 | if (num_possible_cpus() > 1) /* PIT does not scale! */ |
113 | return 0; | 113 | return 0; |
114 | 114 | ||
115 | clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); | 115 | clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); |
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c index d53eafb6daa7..62996cd17084 100644 --- a/arch/i386/kernel/i8259.c +++ b/arch/i386/kernel/i8259.c | |||
@@ -113,7 +113,8 @@ void make_8259A_irq(unsigned int irq) | |||
113 | { | 113 | { |
114 | disable_irq_nosync(irq); | 114 | disable_irq_nosync(irq); |
115 | io_apic_irqs &= ~(1<<irq); | 115 | io_apic_irqs &= ~(1<<irq); |
116 | set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); | 116 | set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, |
117 | "XT"); | ||
117 | enable_irq(irq); | 118 | enable_irq(irq); |
118 | } | 119 | } |
119 | 120 | ||
@@ -369,8 +370,8 @@ void __init init_ISA_irqs (void) | |||
369 | /* | 370 | /* |
370 | * 16 old-style INTA-cycle interrupts: | 371 | * 16 old-style INTA-cycle interrupts: |
371 | */ | 372 | */ |
372 | set_irq_chip_and_handler(i, &i8259A_chip, | 373 | set_irq_chip_and_handler_name(i, &i8259A_chip, |
373 | handle_level_irq); | 374 | handle_level_irq, "XT"); |
374 | } else { | 375 | } else { |
375 | /* | 376 | /* |
376 | * 'high' PCI IRQs filled in on demand | 377 | * 'high' PCI IRQs filled in on demand |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index cd082c36ca03..3b7a63e0ed1a 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
@@ -91,6 +91,46 @@ static struct irq_pin_list { | |||
91 | int apic, pin, next; | 91 | int apic, pin, next; |
92 | } irq_2_pin[PIN_MAP_SIZE]; | 92 | } irq_2_pin[PIN_MAP_SIZE]; |
93 | 93 | ||
94 | struct io_apic { | ||
95 | unsigned int index; | ||
96 | unsigned int unused[3]; | ||
97 | unsigned int data; | ||
98 | }; | ||
99 | |||
100 | static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) | ||
101 | { | ||
102 | return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) | ||
103 | + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK); | ||
104 | } | ||
105 | |||
106 | static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) | ||
107 | { | ||
108 | struct io_apic __iomem *io_apic = io_apic_base(apic); | ||
109 | writel(reg, &io_apic->index); | ||
110 | return readl(&io_apic->data); | ||
111 | } | ||
112 | |||
113 | static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) | ||
114 | { | ||
115 | struct io_apic __iomem *io_apic = io_apic_base(apic); | ||
116 | writel(reg, &io_apic->index); | ||
117 | writel(value, &io_apic->data); | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Re-write a value: to be used for read-modify-write | ||
122 | * cycles where the read already set up the index register. | ||
123 | * | ||
124 | * Older SiS APIC requires we rewrite the index register | ||
125 | */ | ||
126 | static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) | ||
127 | { | ||
128 | volatile struct io_apic *io_apic = io_apic_base(apic); | ||
129 | if (sis_apic_bug) | ||
130 | writel(reg, &io_apic->index); | ||
131 | writel(value, &io_apic->data); | ||
132 | } | ||
133 | |||
94 | union entry_union { | 134 | union entry_union { |
95 | struct { u32 w1, w2; }; | 135 | struct { u32 w1, w2; }; |
96 | struct IO_APIC_route_entry entry; | 136 | struct IO_APIC_route_entry entry; |
@@ -107,12 +147,34 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) | |||
107 | return eu.entry; | 147 | return eu.entry; |
108 | } | 148 | } |
109 | 149 | ||
150 | /* | ||
151 | * When we write a new IO APIC routing entry, we need to write the high | ||
152 | * word first! If the mask bit in the low word is clear, we will enable | ||
153 | * the interrupt, and we need to make sure the entry is fully populated | ||
154 | * before that happens. | ||
155 | */ | ||
110 | static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) | 156 | static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) |
111 | { | 157 | { |
112 | unsigned long flags; | 158 | unsigned long flags; |
113 | union entry_union eu; | 159 | union entry_union eu; |
114 | eu.entry = e; | 160 | eu.entry = e; |
115 | spin_lock_irqsave(&ioapic_lock, flags); | 161 | spin_lock_irqsave(&ioapic_lock, flags); |
162 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); | ||
163 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | ||
164 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * When we mask an IO APIC routing entry, we need to write the low | ||
169 | * word first, in order to set the mask bit before we change the | ||
170 | * high bits! | ||
171 | */ | ||
172 | static void ioapic_mask_entry(int apic, int pin) | ||
173 | { | ||
174 | unsigned long flags; | ||
175 | union entry_union eu = { .entry.mask = 1 }; | ||
176 | |||
177 | spin_lock_irqsave(&ioapic_lock, flags); | ||
116 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); | 178 | io_apic_write(apic, 0x10 + 2*pin, eu.w1); |
117 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); | 179 | io_apic_write(apic, 0x11 + 2*pin, eu.w2); |
118 | spin_unlock_irqrestore(&ioapic_lock, flags); | 180 | spin_unlock_irqrestore(&ioapic_lock, flags); |
@@ -234,9 +296,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | |||
234 | /* | 296 | /* |
235 | * Disable it in the IO-APIC irq-routing table: | 297 | * Disable it in the IO-APIC irq-routing table: |
236 | */ | 298 | */ |
237 | memset(&entry, 0, sizeof(entry)); | 299 | ioapic_mask_entry(apic, pin); |
238 | entry.mask = 1; | ||
239 | ioapic_write_entry(apic, pin, entry); | ||
240 | } | 300 | } |
241 | 301 | ||
242 | static void clear_IO_APIC (void) | 302 | static void clear_IO_APIC (void) |
@@ -1225,11 +1285,13 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger) | |||
1225 | { | 1285 | { |
1226 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || | 1286 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
1227 | trigger == IOAPIC_LEVEL) | 1287 | trigger == IOAPIC_LEVEL) |
1228 | set_irq_chip_and_handler(irq, &ioapic_chip, | 1288 | set_irq_chip_and_handler_name(irq, &ioapic_chip, |
1229 | handle_fasteoi_irq); | 1289 | handle_fasteoi_irq, "fasteoi"); |
1230 | else | 1290 | else { |
1231 | set_irq_chip_and_handler(irq, &ioapic_chip, | 1291 | irq_desc[irq].status |= IRQ_DELAYED_DISABLE; |
1232 | handle_edge_irq); | 1292 | set_irq_chip_and_handler_name(irq, &ioapic_chip, |
1293 | handle_edge_irq, "edge"); | ||
1294 | } | ||
1233 | set_intr_gate(vector, interrupt[irq]); | 1295 | set_intr_gate(vector, interrupt[irq]); |
1234 | } | 1296 | } |
1235 | 1297 | ||
@@ -2235,7 +2297,8 @@ static inline void check_timer(void) | |||
2235 | printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); | 2297 | printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); |
2236 | 2298 | ||
2237 | disable_8259A_irq(0); | 2299 | disable_8259A_irq(0); |
2238 | set_irq_chip_and_handler(0, &lapic_chip, handle_fasteoi_irq); | 2300 | set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq, |
2301 | "fasteio"); | ||
2239 | apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ | 2302 | apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ |
2240 | enable_8259A_irq(0); | 2303 | enable_8259A_irq(0); |
2241 | 2304 | ||
@@ -2541,7 +2604,8 @@ int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev) | |||
2541 | 2604 | ||
2542 | write_msi_msg(irq, &msg); | 2605 | write_msi_msg(irq, &msg); |
2543 | 2606 | ||
2544 | set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq); | 2607 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, |
2608 | "edge"); | ||
2545 | 2609 | ||
2546 | return 0; | 2610 | return 0; |
2547 | } | 2611 | } |
@@ -2562,18 +2626,16 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
2562 | 2626 | ||
2563 | static void target_ht_irq(unsigned int irq, unsigned int dest) | 2627 | static void target_ht_irq(unsigned int irq, unsigned int dest) |
2564 | { | 2628 | { |
2565 | u32 low, high; | 2629 | struct ht_irq_msg msg; |
2566 | low = read_ht_irq_low(irq); | 2630 | fetch_ht_irq_msg(irq, &msg); |
2567 | high = read_ht_irq_high(irq); | ||
2568 | 2631 | ||
2569 | low &= ~(HT_IRQ_LOW_DEST_ID_MASK); | 2632 | msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK); |
2570 | high &= ~(HT_IRQ_HIGH_DEST_ID_MASK); | 2633 | msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); |
2571 | 2634 | ||
2572 | low |= HT_IRQ_LOW_DEST_ID(dest); | 2635 | msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest); |
2573 | high |= HT_IRQ_HIGH_DEST_ID(dest); | 2636 | msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); |
2574 | 2637 | ||
2575 | write_ht_irq_low(irq, low); | 2638 | write_ht_irq_msg(irq, &msg); |
2576 | write_ht_irq_high(irq, high); | ||
2577 | } | 2639 | } |
2578 | 2640 | ||
2579 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 2641 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) |
@@ -2594,7 +2656,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | |||
2594 | } | 2656 | } |
2595 | #endif | 2657 | #endif |
2596 | 2658 | ||
2597 | static struct hw_interrupt_type ht_irq_chip = { | 2659 | static struct irq_chip ht_irq_chip = { |
2598 | .name = "PCI-HT", | 2660 | .name = "PCI-HT", |
2599 | .mask = mask_ht_irq, | 2661 | .mask = mask_ht_irq, |
2600 | .unmask = unmask_ht_irq, | 2662 | .unmask = unmask_ht_irq, |
@@ -2611,7 +2673,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2611 | 2673 | ||
2612 | vector = assign_irq_vector(irq); | 2674 | vector = assign_irq_vector(irq); |
2613 | if (vector >= 0) { | 2675 | if (vector >= 0) { |
2614 | u32 low, high; | 2676 | struct ht_irq_msg msg; |
2615 | unsigned dest; | 2677 | unsigned dest; |
2616 | cpumask_t tmp; | 2678 | cpumask_t tmp; |
2617 | 2679 | ||
@@ -2619,9 +2681,10 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2619 | cpu_set(vector >> 8, tmp); | 2681 | cpu_set(vector >> 8, tmp); |
2620 | dest = cpu_mask_to_apicid(tmp); | 2682 | dest = cpu_mask_to_apicid(tmp); |
2621 | 2683 | ||
2622 | high = HT_IRQ_HIGH_DEST_ID(dest); | 2684 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); |
2623 | 2685 | ||
2624 | low = HT_IRQ_LOW_BASE | | 2686 | msg.address_lo = |
2687 | HT_IRQ_LOW_BASE | | ||
2625 | HT_IRQ_LOW_DEST_ID(dest) | | 2688 | HT_IRQ_LOW_DEST_ID(dest) | |
2626 | HT_IRQ_LOW_VECTOR(vector) | | 2689 | HT_IRQ_LOW_VECTOR(vector) | |
2627 | ((INT_DEST_MODE == 0) ? | 2690 | ((INT_DEST_MODE == 0) ? |
@@ -2633,10 +2696,10 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
2633 | HT_IRQ_LOW_MT_ARBITRATED) | | 2696 | HT_IRQ_LOW_MT_ARBITRATED) | |
2634 | HT_IRQ_LOW_IRQ_MASKED; | 2697 | HT_IRQ_LOW_IRQ_MASKED; |
2635 | 2698 | ||
2636 | write_ht_irq_low(irq, low); | 2699 | write_ht_irq_msg(irq, &msg); |
2637 | write_ht_irq_high(irq, high); | ||
2638 | 2700 | ||
2639 | set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq); | 2701 | set_irq_chip_and_handler_name(irq, &ht_irq_chip, |
2702 | handle_edge_irq, "edge"); | ||
2640 | } | 2703 | } |
2641 | return vector; | 2704 | return vector; |
2642 | } | 2705 | } |
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 8cfc7dbec7b9..3201d421090a 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c | |||
@@ -258,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
258 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 258 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
259 | #endif | 259 | #endif |
260 | seq_printf(p, " %8s", irq_desc[i].chip->name); | 260 | seq_printf(p, " %8s", irq_desc[i].chip->name); |
261 | seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq)); | 261 | seq_printf(p, "-%-8s", irq_desc[i].name); |
262 | seq_printf(p, " %s", action->name); | 262 | seq_printf(p, " %s", action->name); |
263 | 263 | ||
264 | for (action=action->next; action; action = action->next) | 264 | for (action=action->next; action; action = action->next) |
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index d98e44b16fe2..fc79e1e859c4 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
@@ -361,8 +361,11 @@ no_kprobe: | |||
361 | asm volatile ( ".global kretprobe_trampoline\n" | 361 | asm volatile ( ".global kretprobe_trampoline\n" |
362 | "kretprobe_trampoline: \n" | 362 | "kretprobe_trampoline: \n" |
363 | " pushf\n" | 363 | " pushf\n" |
364 | /* skip cs, eip, orig_eax, es, ds */ | 364 | /* skip cs, eip, orig_eax */ |
365 | " subl $20, %esp\n" | 365 | " subl $12, %esp\n" |
366 | " pushl %gs\n" | ||
367 | " pushl %ds\n" | ||
368 | " pushl %es\n" | ||
366 | " pushl %eax\n" | 369 | " pushl %eax\n" |
367 | " pushl %ebp\n" | 370 | " pushl %ebp\n" |
368 | " pushl %edi\n" | 371 | " pushl %edi\n" |
@@ -373,10 +376,10 @@ no_kprobe: | |||
373 | " movl %esp, %eax\n" | 376 | " movl %esp, %eax\n" |
374 | " call trampoline_handler\n" | 377 | " call trampoline_handler\n" |
375 | /* move eflags to cs */ | 378 | /* move eflags to cs */ |
376 | " movl 48(%esp), %edx\n" | 379 | " movl 52(%esp), %edx\n" |
377 | " movl %edx, 44(%esp)\n" | 380 | " movl %edx, 48(%esp)\n" |
378 | /* save true return address on eflags */ | 381 | /* save true return address on eflags */ |
379 | " movl %eax, 48(%esp)\n" | 382 | " movl %eax, 52(%esp)\n" |
380 | " popl %ebx\n" | 383 | " popl %ebx\n" |
381 | " popl %ecx\n" | 384 | " popl %ecx\n" |
382 | " popl %edx\n" | 385 | " popl %edx\n" |
@@ -384,8 +387,8 @@ no_kprobe: | |||
384 | " popl %edi\n" | 387 | " popl %edi\n" |
385 | " popl %ebp\n" | 388 | " popl %ebp\n" |
386 | " popl %eax\n" | 389 | " popl %eax\n" |
387 | /* skip eip, orig_eax, es, ds */ | 390 | /* skip eip, orig_eax, es, ds, gs */ |
388 | " addl $16, %esp\n" | 391 | " addl $20, %esp\n" |
389 | " popf\n" | 392 | " popf\n" |
390 | " ret\n"); | 393 | " ret\n"); |
391 | } | 394 | } |
@@ -404,6 +407,10 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) | |||
404 | INIT_HLIST_HEAD(&empty_rp); | 407 | INIT_HLIST_HEAD(&empty_rp); |
405 | spin_lock_irqsave(&kretprobe_lock, flags); | 408 | spin_lock_irqsave(&kretprobe_lock, flags); |
406 | head = kretprobe_inst_table_head(current); | 409 | head = kretprobe_inst_table_head(current); |
410 | /* fixup registers */ | ||
411 | regs->xcs = __KERNEL_CS; | ||
412 | regs->eip = trampoline_address; | ||
413 | regs->orig_eax = 0xffffffff; | ||
407 | 414 | ||
408 | /* | 415 | /* |
409 | * It is possible to have multiple instances associated with a given | 416 | * It is possible to have multiple instances associated with a given |
@@ -425,6 +432,7 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) | |||
425 | 432 | ||
426 | if (ri->rp && ri->rp->handler){ | 433 | if (ri->rp && ri->rp->handler){ |
427 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | 434 | __get_cpu_var(current_kprobe) = &ri->rp->kp; |
435 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | ||
428 | ri->rp->handler(ri, regs); | 436 | ri->rp->handler(ri, regs); |
429 | __get_cpu_var(current_kprobe) = NULL; | 437 | __get_cpu_var(current_kprobe) = NULL; |
430 | } | 438 | } |
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index 9b9479768d5e..23f5984d0654 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c | |||
@@ -577,7 +577,7 @@ static void microcode_init_cpu(int cpu) | |||
577 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 577 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); |
578 | mutex_lock(µcode_mutex); | 578 | mutex_lock(µcode_mutex); |
579 | collect_cpu_info(cpu); | 579 | collect_cpu_info(cpu); |
580 | if (uci->valid) | 580 | if (uci->valid && system_state == SYSTEM_RUNNING) |
581 | cpu_request_microcode(cpu); | 581 | cpu_request_microcode(cpu); |
582 | mutex_unlock(µcode_mutex); | 582 | mutex_unlock(µcode_mutex); |
583 | set_cpus_allowed(current, old); | 583 | set_cpus_allowed(current, old); |
@@ -656,14 +656,18 @@ static struct attribute_group mc_attr_group = { | |||
656 | 656 | ||
657 | static int mc_sysdev_add(struct sys_device *sys_dev) | 657 | static int mc_sysdev_add(struct sys_device *sys_dev) |
658 | { | 658 | { |
659 | int cpu = sys_dev->id; | 659 | int err, cpu = sys_dev->id; |
660 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 660 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
661 | 661 | ||
662 | if (!cpu_online(cpu)) | 662 | if (!cpu_online(cpu)) |
663 | return 0; | 663 | return 0; |
664 | |||
664 | pr_debug("Microcode:CPU %d added\n", cpu); | 665 | pr_debug("Microcode:CPU %d added\n", cpu); |
665 | memset(uci, 0, sizeof(*uci)); | 666 | memset(uci, 0, sizeof(*uci)); |
666 | sysfs_create_group(&sys_dev->kobj, &mc_attr_group); | 667 | |
668 | err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); | ||
669 | if (err) | ||
670 | return err; | ||
667 | 671 | ||
668 | microcode_init_cpu(cpu); | 672 | microcode_init_cpu(cpu); |
669 | return 0; | 673 | return 0; |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 3e8e3adb0489..eaafe233a5da 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -219,11 +219,11 @@ static int __init check_nmi_watchdog(void) | |||
219 | int cpu; | 219 | int cpu; |
220 | 220 | ||
221 | /* Enable NMI watchdog for newer systems. | 221 | /* Enable NMI watchdog for newer systems. |
222 | Actually it should be safe for most systems before 2004 too except | 222 | Probably safe on most older systems too, but let's be careful. |
223 | for some IBM systems that corrupt registers when NMI happens | 223 | IBM ThinkPads use INT10 inside SMM and that allows early NMI inside SMM |
224 | during SMM. Unfortunately we don't have more exact information | 224 | which hangs the system. Disable watchdog for all thinkpads */ |
225 | on these and use this coarse check. */ | 225 | if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004 && |
226 | if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004) | 226 | !dmi_name_in_vendors("ThinkPad")) |
227 | nmi_watchdog = NMI_LOCAL_APIC; | 227 | nmi_watchdog = NMI_LOCAL_APIC; |
228 | 228 | ||
229 | if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT)) | 229 | if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT)) |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index b0a07801d9df..dd53c58f64f1 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -205,7 +205,7 @@ void cpu_idle(void) | |||
205 | void cpu_idle_wait(void) | 205 | void cpu_idle_wait(void) |
206 | { | 206 | { |
207 | unsigned int cpu, this_cpu = get_cpu(); | 207 | unsigned int cpu, this_cpu = get_cpu(); |
208 | cpumask_t map; | 208 | cpumask_t map, tmp = current->cpus_allowed; |
209 | 209 | ||
210 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); | 210 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); |
211 | put_cpu(); | 211 | put_cpu(); |
@@ -227,6 +227,8 @@ void cpu_idle_wait(void) | |||
227 | } | 227 | } |
228 | cpus_and(map, map, cpu_online_map); | 228 | cpus_and(map, map, cpu_online_map); |
229 | } while (!cpus_empty(map)); | 229 | } while (!cpus_empty(map)); |
230 | |||
231 | set_cpus_allowed(current, tmp); | ||
230 | } | 232 | } |
231 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 233 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
232 | 234 | ||
@@ -236,20 +238,28 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); | |||
236 | * We execute MONITOR against need_resched and enter optimized wait state | 238 | * We execute MONITOR against need_resched and enter optimized wait state |
237 | * through MWAIT. Whenever someone changes need_resched, we would be woken | 239 | * through MWAIT. Whenever someone changes need_resched, we would be woken |
238 | * up from MWAIT (without an IPI). | 240 | * up from MWAIT (without an IPI). |
241 | * | ||
242 | * New with Core Duo processors, MWAIT can take some hints based on CPU | ||
243 | * capability. | ||
239 | */ | 244 | */ |
240 | static void mwait_idle(void) | 245 | void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) |
241 | { | 246 | { |
242 | local_irq_enable(); | 247 | if (!need_resched()) { |
243 | |||
244 | while (!need_resched()) { | ||
245 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 248 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
246 | smp_mb(); | 249 | smp_mb(); |
247 | if (need_resched()) | 250 | if (!need_resched()) |
248 | break; | 251 | __mwait(eax, ecx); |
249 | __mwait(0, 0); | ||
250 | } | 252 | } |
251 | } | 253 | } |
252 | 254 | ||
255 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | ||
256 | static void mwait_idle(void) | ||
257 | { | ||
258 | local_irq_enable(); | ||
259 | while (!need_resched()) | ||
260 | mwait_idle_with_hints(0, 0); | ||
261 | } | ||
262 | |||
253 | void __devinit select_idle_routine(const struct cpuinfo_x86 *c) | 263 | void __devinit select_idle_routine(const struct cpuinfo_x86 *c) |
254 | { | 264 | { |
255 | if (cpu_has(c, X86_FEATURE_MWAIT)) { | 265 | if (cpu_has(c, X86_FEATURE_MWAIT)) { |
@@ -328,7 +338,6 @@ extern void kernel_thread_helper(void); | |||
328 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | 338 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) |
329 | { | 339 | { |
330 | struct pt_regs regs; | 340 | struct pt_regs regs; |
331 | int err; | ||
332 | 341 | ||
333 | memset(®s, 0, sizeof(regs)); | 342 | memset(®s, 0, sizeof(regs)); |
334 | 343 | ||
@@ -343,10 +352,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
343 | regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; | 352 | regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; |
344 | 353 | ||
345 | /* Ok, create the new process.. */ | 354 | /* Ok, create the new process.. */ |
346 | err = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | 355 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); |
347 | if (err == 0) /* terminate kernel stack */ | ||
348 | task_pt_regs(current)->eip = 0; | ||
349 | return err; | ||
350 | } | 356 | } |
351 | EXPORT_SYMBOL(kernel_thread); | 357 | EXPORT_SYMBOL(kernel_thread); |
352 | 358 | ||
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 000cf03751fe..141041dde74d 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -846,7 +846,7 @@ efi_find_max_pfn(unsigned long start, unsigned long end, void *arg) | |||
846 | static int __init | 846 | static int __init |
847 | efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg) | 847 | efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg) |
848 | { | 848 | { |
849 | memory_present(0, start, end); | 849 | memory_present(0, PFN_UP(start), PFN_DOWN(end)); |
850 | return 0; | 850 | return 0; |
851 | } | 851 | } |
852 | 852 | ||
@@ -1083,16 +1083,15 @@ static unsigned long __init setup_memory(void) | |||
1083 | 1083 | ||
1084 | void __init zone_sizes_init(void) | 1084 | void __init zone_sizes_init(void) |
1085 | { | 1085 | { |
1086 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
1087 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | ||
1088 | max_zone_pfns[ZONE_DMA] = | ||
1089 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | ||
1090 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | ||
1086 | #ifdef CONFIG_HIGHMEM | 1091 | #ifdef CONFIG_HIGHMEM |
1087 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { | 1092 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; |
1088 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT, | ||
1089 | max_low_pfn, | ||
1090 | highend_pfn}; | ||
1091 | add_active_range(0, 0, highend_pfn); | 1093 | add_active_range(0, 0, highend_pfn); |
1092 | #else | 1094 | #else |
1093 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { | ||
1094 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT, | ||
1095 | max_low_pfn}; | ||
1096 | add_active_range(0, 0, max_low_pfn); | 1095 | add_active_range(0, 0, max_low_pfn); |
1097 | #endif | 1096 | #endif |
1098 | 1097 | ||
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 7e639f78b0b9..2697e9210e92 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S | |||
@@ -318,3 +318,4 @@ ENTRY(sys_call_table) | |||
318 | .long sys_vmsplice | 318 | .long sys_vmsplice |
319 | .long sys_move_pages | 319 | .long sys_move_pages |
320 | .long sys_getcpu | 320 | .long sys_getcpu |
321 | .long sys_epoll_pwait | ||
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 00489b706d27..fe9c5e8e7e6f 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -129,15 +129,19 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, | |||
129 | 129 | ||
130 | #ifdef CONFIG_FRAME_POINTER | 130 | #ifdef CONFIG_FRAME_POINTER |
131 | while (valid_stack_ptr(tinfo, (void *)ebp)) { | 131 | while (valid_stack_ptr(tinfo, (void *)ebp)) { |
132 | unsigned long new_ebp; | ||
132 | addr = *(unsigned long *)(ebp + 4); | 133 | addr = *(unsigned long *)(ebp + 4); |
133 | ops->address(data, addr); | 134 | ops->address(data, addr); |
134 | /* | 135 | /* |
135 | * break out of recursive entries (such as | 136 | * break out of recursive entries (such as |
136 | * end_of_stack_stop_unwind_function): | 137 | * end_of_stack_stop_unwind_function). Also, |
138 | * we can never allow a frame pointer to | ||
139 | * move downwards! | ||
137 | */ | 140 | */ |
138 | if (ebp == *(unsigned long *)ebp) | 141 | new_ebp = *(unsigned long *)ebp; |
142 | if (new_ebp <= ebp) | ||
139 | break; | 143 | break; |
140 | ebp = *(unsigned long *)ebp; | 144 | ebp = new_ebp; |
141 | } | 145 | } |
142 | #else | 146 | #else |
143 | while (valid_stack_ptr(tinfo, stack)) { | 147 | while (valid_stack_ptr(tinfo, stack)) { |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index b8fa0a8b2e47..fbc95828cd74 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -349,8 +349,8 @@ static int tsc_update_callback(void) | |||
349 | int change = 0; | 349 | int change = 0; |
350 | 350 | ||
351 | /* check to see if we should switch to the safe clocksource: */ | 351 | /* check to see if we should switch to the safe clocksource: */ |
352 | if (clocksource_tsc.rating != 50 && check_tsc_unstable()) { | 352 | if (clocksource_tsc.rating != 0 && check_tsc_unstable()) { |
353 | clocksource_tsc.rating = 50; | 353 | clocksource_tsc.rating = 0; |
354 | clocksource_reselect(); | 354 | clocksource_reselect(); |
355 | change = 1; | 355 | change = 1; |
356 | } | 356 | } |
@@ -461,7 +461,7 @@ static int __init init_tsc_clocksource(void) | |||
461 | clocksource_tsc.shift); | 461 | clocksource_tsc.shift); |
462 | /* lower the rating if we already know its unstable: */ | 462 | /* lower the rating if we already know its unstable: */ |
463 | if (check_tsc_unstable()) | 463 | if (check_tsc_unstable()) |
464 | clocksource_tsc.rating = 50; | 464 | clocksource_tsc.rating = 0; |
465 | 465 | ||
466 | init_timer(&verify_tsc_freq_timer); | 466 | init_timer(&verify_tsc_freq_timer); |
467 | verify_tsc_freq_timer.function = verify_tsc_freq; | 467 | verify_tsc_freq_timer.function = verify_tsc_freq; |
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 1e7ac1c44ddc..c6f84a0322ba 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S | |||
@@ -51,6 +51,7 @@ SECTIONS | |||
51 | __tracedata_end = .; | 51 | __tracedata_end = .; |
52 | 52 | ||
53 | /* writeable */ | 53 | /* writeable */ |
54 | . = ALIGN(4096); | ||
54 | .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ | 55 | .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ |
55 | *(.data) | 56 | *(.data) |
56 | CONSTRUCTORS | 57 | CONSTRUCTORS |
@@ -126,13 +127,7 @@ SECTIONS | |||
126 | __setup_end = .; | 127 | __setup_end = .; |
127 | __initcall_start = .; | 128 | __initcall_start = .; |
128 | .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { | 129 | .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { |
129 | *(.initcall1.init) | 130 | INITCALLS |
130 | *(.initcall2.init) | ||
131 | *(.initcall3.init) | ||
132 | *(.initcall4.init) | ||
133 | *(.initcall5.init) | ||
134 | *(.initcall6.init) | ||
135 | *(.initcall7.init) | ||
136 | } | 131 | } |
137 | __initcall_end = .; | 132 | __initcall_end = .; |
138 | __con_initcall_start = .; | 133 | __con_initcall_start = .; |
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c index 08502fc6d0cb..d22cfc9d656c 100644 --- a/arch/i386/lib/usercopy.c +++ b/arch/i386/lib/usercopy.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/highmem.h> | 9 | #include <linux/highmem.h> |
10 | #include <linux/blkdev.h> | 10 | #include <linux/blkdev.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/backing-dev.h> | ||
12 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
13 | #include <asm/mmx.h> | 14 | #include <asm/mmx.h> |
14 | 15 | ||
@@ -179,7 +180,7 @@ __clear_user(void __user *to, unsigned long n) | |||
179 | EXPORT_SYMBOL(__clear_user); | 180 | EXPORT_SYMBOL(__clear_user); |
180 | 181 | ||
181 | /** | 182 | /** |
182 | * strlen_user: - Get the size of a string in user space. | 183 | * strnlen_user: - Get the size of a string in user space. |
183 | * @s: The string to measure. | 184 | * @s: The string to measure. |
184 | * @n: The maximum valid length | 185 | * @n: The maximum valid length |
185 | * | 186 | * |
@@ -741,7 +742,7 @@ survive: | |||
741 | 742 | ||
742 | if (retval == -ENOMEM && is_init(current)) { | 743 | if (retval == -ENOMEM && is_init(current)) { |
743 | up_read(¤t->mm->mmap_sem); | 744 | up_read(¤t->mm->mmap_sem); |
744 | blk_congestion_wait(WRITE, HZ/50); | 745 | congestion_wait(WRITE, HZ/50); |
745 | goto survive; | 746 | goto survive; |
746 | } | 747 | } |
747 | 748 | ||
diff --git a/arch/i386/mach-visws/visws_apic.c b/arch/i386/mach-visws/visws_apic.c index 07097ed48890..38c2b13124d9 100644 --- a/arch/i386/mach-visws/visws_apic.c +++ b/arch/i386/mach-visws/visws_apic.c | |||
@@ -122,7 +122,7 @@ static void end_cobalt_irq(unsigned int irq) | |||
122 | spin_unlock_irqrestore(&cobalt_lock, flags); | 122 | spin_unlock_irqrestore(&cobalt_lock, flags); |
123 | } | 123 | } |
124 | 124 | ||
125 | static struct hw_interrupt_type cobalt_irq_type = { | 125 | static struct irq_chip cobalt_irq_type = { |
126 | .typename = "Cobalt-APIC", | 126 | .typename = "Cobalt-APIC", |
127 | .startup = startup_cobalt_irq, | 127 | .startup = startup_cobalt_irq, |
128 | .shutdown = disable_cobalt_irq, | 128 | .shutdown = disable_cobalt_irq, |
@@ -159,7 +159,7 @@ static void end_piix4_master_irq(unsigned int irq) | |||
159 | spin_unlock_irqrestore(&cobalt_lock, flags); | 159 | spin_unlock_irqrestore(&cobalt_lock, flags); |
160 | } | 160 | } |
161 | 161 | ||
162 | static struct hw_interrupt_type piix4_master_irq_type = { | 162 | static struct irq_chip piix4_master_irq_type = { |
163 | .typename = "PIIX4-master", | 163 | .typename = "PIIX4-master", |
164 | .startup = startup_piix4_master_irq, | 164 | .startup = startup_piix4_master_irq, |
165 | .ack = ack_cobalt_irq, | 165 | .ack = ack_cobalt_irq, |
@@ -167,9 +167,8 @@ static struct hw_interrupt_type piix4_master_irq_type = { | |||
167 | }; | 167 | }; |
168 | 168 | ||
169 | 169 | ||
170 | static struct hw_interrupt_type piix4_virtual_irq_type = { | 170 | static struct irq_chip piix4_virtual_irq_type = { |
171 | .typename = "PIIX4-virtual", | 171 | .typename = "PIIX4-virtual", |
172 | .startup = startup_8259A_irq, | ||
173 | .shutdown = disable_8259A_irq, | 172 | .shutdown = disable_8259A_irq, |
174 | .enable = enable_8259A_irq, | 173 | .enable = enable_8259A_irq, |
175 | .disable = disable_8259A_irq, | 174 | .disable = disable_8259A_irq, |
diff --git a/arch/i386/mach-voyager/voyager_basic.c b/arch/i386/mach-voyager/voyager_basic.c index c639d30d8bdc..8fe7e4593d5f 100644 --- a/arch/i386/mach-voyager/voyager_basic.c +++ b/arch/i386/mach-voyager/voyager_basic.c | |||
@@ -44,7 +44,7 @@ struct voyager_SUS *voyager_SUS = NULL; | |||
44 | 44 | ||
45 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
46 | static void | 46 | static void |
47 | voyager_dump(int dummy1, struct pt_regs *dummy2, struct tty_struct *dummy3) | 47 | voyager_dump(int dummy1, struct tty_struct *dummy3) |
48 | { | 48 | { |
49 | /* get here via a sysrq */ | 49 | /* get here via a sysrq */ |
50 | voyager_smp_dump(); | 50 | voyager_smp_dump(); |
@@ -166,7 +166,7 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length) | |||
166 | * off the timer tick to the SMP code, since the VIC doesn't have an | 166 | * off the timer tick to the SMP code, since the VIC doesn't have an |
167 | * internal timer (The QIC does, but that's another story). */ | 167 | * internal timer (The QIC does, but that's another story). */ |
168 | void | 168 | void |
169 | voyager_timer_interrupt(struct pt_regs *regs) | 169 | voyager_timer_interrupt(void) |
170 | { | 170 | { |
171 | if((jiffies & 0x3ff) == 0) { | 171 | if((jiffies & 0x3ff) == 0) { |
172 | 172 | ||
@@ -202,7 +202,7 @@ voyager_timer_interrupt(struct pt_regs *regs) | |||
202 | } | 202 | } |
203 | } | 203 | } |
204 | #ifdef CONFIG_SMP | 204 | #ifdef CONFIG_SMP |
205 | smp_vic_timer_interrupt(regs); | 205 | smp_vic_timer_interrupt(); |
206 | #endif | 206 | #endif |
207 | } | 207 | } |
208 | 208 | ||
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index d42422fc4af3..f3fea2ad50fe 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c | |||
@@ -85,8 +85,8 @@ static int ack_QIC_CPI(__u8 cpi); | |||
85 | static void ack_special_QIC_CPI(__u8 cpi); | 85 | static void ack_special_QIC_CPI(__u8 cpi); |
86 | static void ack_VIC_CPI(__u8 cpi); | 86 | static void ack_VIC_CPI(__u8 cpi); |
87 | static void send_CPI_allbutself(__u8 cpi); | 87 | static void send_CPI_allbutself(__u8 cpi); |
88 | static void enable_vic_irq(unsigned int irq); | 88 | static void mask_vic_irq(unsigned int irq); |
89 | static void disable_vic_irq(unsigned int irq); | 89 | static void unmask_vic_irq(unsigned int irq); |
90 | static unsigned int startup_vic_irq(unsigned int irq); | 90 | static unsigned int startup_vic_irq(unsigned int irq); |
91 | static void enable_local_vic_irq(unsigned int irq); | 91 | static void enable_local_vic_irq(unsigned int irq); |
92 | static void disable_local_vic_irq(unsigned int irq); | 92 | static void disable_local_vic_irq(unsigned int irq); |
@@ -205,15 +205,12 @@ ack_CPI(__u8 cpi) | |||
205 | /* The VIC IRQ descriptors -- these look almost identical to the | 205 | /* The VIC IRQ descriptors -- these look almost identical to the |
206 | * 8259 IRQs except that masks and things must be kept per processor | 206 | * 8259 IRQs except that masks and things must be kept per processor |
207 | */ | 207 | */ |
208 | static struct hw_interrupt_type vic_irq_type = { | 208 | static struct irq_chip vic_chip = { |
209 | .typename = "VIC-level", | 209 | .name = "VIC", |
210 | .startup = startup_vic_irq, | 210 | .startup = startup_vic_irq, |
211 | .shutdown = disable_vic_irq, | 211 | .mask = mask_vic_irq, |
212 | .enable = enable_vic_irq, | 212 | .unmask = unmask_vic_irq, |
213 | .disable = disable_vic_irq, | 213 | .set_affinity = set_vic_irq_affinity, |
214 | .ack = before_handle_vic_irq, | ||
215 | .end = after_handle_vic_irq, | ||
216 | .set_affinity = set_vic_irq_affinity, | ||
217 | }; | 214 | }; |
218 | 215 | ||
219 | /* used to count up as CPUs are brought on line (starts at 0) */ | 216 | /* used to count up as CPUs are brought on line (starts at 0) */ |
@@ -1144,9 +1141,9 @@ smp_apic_timer_interrupt(struct pt_regs *regs) | |||
1144 | fastcall void | 1141 | fastcall void |
1145 | smp_qic_timer_interrupt(struct pt_regs *regs) | 1142 | smp_qic_timer_interrupt(struct pt_regs *regs) |
1146 | { | 1143 | { |
1147 | ack_QIC_CPI(QIC_TIMER_CPI); | ||
1148 | struct pt_regs *old_regs = set_irq_regs(regs); | 1144 | struct pt_regs *old_regs = set_irq_regs(regs); |
1149 | wrapper_smp_local_timer_interrupt(void); | 1145 | ack_QIC_CPI(QIC_TIMER_CPI); |
1146 | wrapper_smp_local_timer_interrupt(); | ||
1150 | set_irq_regs(old_regs); | 1147 | set_irq_regs(old_regs); |
1151 | } | 1148 | } |
1152 | 1149 | ||
@@ -1270,12 +1267,10 @@ smp_send_stop(void) | |||
1270 | /* this function is triggered in time.c when a clock tick fires | 1267 | /* this function is triggered in time.c when a clock tick fires |
1271 | * we need to re-broadcast the tick to all CPUs */ | 1268 | * we need to re-broadcast the tick to all CPUs */ |
1272 | void | 1269 | void |
1273 | smp_vic_timer_interrupt(struct pt_regs *regs) | 1270 | smp_vic_timer_interrupt(void) |
1274 | { | 1271 | { |
1275 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
1276 | send_CPI_allbutself(VIC_TIMER_CPI); | 1272 | send_CPI_allbutself(VIC_TIMER_CPI); |
1277 | smp_local_timer_interrupt(); | 1273 | smp_local_timer_interrupt(); |
1278 | set_irq_regs(old_regs); | ||
1279 | } | 1274 | } |
1280 | 1275 | ||
1281 | /* local (per CPU) timer interrupt. It does both profiling and | 1276 | /* local (per CPU) timer interrupt. It does both profiling and |
@@ -1310,7 +1305,7 @@ smp_local_timer_interrupt(void) | |||
1310 | per_cpu(prof_counter, cpu); | 1305 | per_cpu(prof_counter, cpu); |
1311 | } | 1306 | } |
1312 | 1307 | ||
1313 | update_process_times(user_mode_vm(irq_regs)); | 1308 | update_process_times(user_mode_vm(get_irq_regs())); |
1314 | } | 1309 | } |
1315 | 1310 | ||
1316 | if( ((1<<cpu) & voyager_extended_vic_processors) == 0) | 1311 | if( ((1<<cpu) & voyager_extended_vic_processors) == 0) |
@@ -1397,6 +1392,17 @@ setup_profiling_timer(unsigned int multiplier) | |||
1397 | return 0; | 1392 | return 0; |
1398 | } | 1393 | } |
1399 | 1394 | ||
1395 | /* This is a bit of a mess, but forced on us by the genirq changes | ||
1396 | * there's no genirq handler that really does what voyager wants | ||
1397 | * so hack it up with the simple IRQ handler */ | ||
1398 | static void fastcall | ||
1399 | handle_vic_irq(unsigned int irq, struct irq_desc *desc) | ||
1400 | { | ||
1401 | before_handle_vic_irq(irq); | ||
1402 | handle_simple_irq(irq, desc); | ||
1403 | after_handle_vic_irq(irq); | ||
1404 | } | ||
1405 | |||
1400 | 1406 | ||
1401 | /* The CPIs are handled in the per cpu 8259s, so they must be | 1407 | /* The CPIs are handled in the per cpu 8259s, so they must be |
1402 | * enabled to be received: FIX: enabling the CPIs in the early | 1408 | * enabled to be received: FIX: enabling the CPIs in the early |
@@ -1433,7 +1439,7 @@ smp_intr_init(void) | |||
1433 | * This is for later: first 16 correspond to PC IRQs; next 16 | 1439 | * This is for later: first 16 correspond to PC IRQs; next 16 |
1434 | * are Primary MC IRQs and final 16 are Secondary MC IRQs */ | 1440 | * are Primary MC IRQs and final 16 are Secondary MC IRQs */ |
1435 | for(i = 0; i < 48; i++) | 1441 | for(i = 0; i < 48; i++) |
1436 | irq_desc[i].chip = &vic_irq_type; | 1442 | set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq); |
1437 | } | 1443 | } |
1438 | 1444 | ||
1439 | /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per | 1445 | /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per |
@@ -1531,7 +1537,7 @@ ack_VIC_CPI(__u8 cpi) | |||
1531 | static unsigned int | 1537 | static unsigned int |
1532 | startup_vic_irq(unsigned int irq) | 1538 | startup_vic_irq(unsigned int irq) |
1533 | { | 1539 | { |
1534 | enable_vic_irq(irq); | 1540 | unmask_vic_irq(irq); |
1535 | 1541 | ||
1536 | return 0; | 1542 | return 0; |
1537 | } | 1543 | } |
@@ -1558,7 +1564,7 @@ startup_vic_irq(unsigned int irq) | |||
1558 | * adjust their masks accordingly. */ | 1564 | * adjust their masks accordingly. */ |
1559 | 1565 | ||
1560 | static void | 1566 | static void |
1561 | enable_vic_irq(unsigned int irq) | 1567 | unmask_vic_irq(unsigned int irq) |
1562 | { | 1568 | { |
1563 | /* linux doesn't to processor-irq affinity, so enable on | 1569 | /* linux doesn't to processor-irq affinity, so enable on |
1564 | * all CPUs we know about */ | 1570 | * all CPUs we know about */ |
@@ -1567,7 +1573,7 @@ enable_vic_irq(unsigned int irq) | |||
1567 | __u32 processorList = 0; | 1573 | __u32 processorList = 0; |
1568 | unsigned long flags; | 1574 | unsigned long flags; |
1569 | 1575 | ||
1570 | VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n", | 1576 | VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n", |
1571 | irq, cpu, cpu_irq_affinity[cpu])); | 1577 | irq, cpu, cpu_irq_affinity[cpu])); |
1572 | spin_lock_irqsave(&vic_irq_lock, flags); | 1578 | spin_lock_irqsave(&vic_irq_lock, flags); |
1573 | for_each_online_cpu(real_cpu) { | 1579 | for_each_online_cpu(real_cpu) { |
@@ -1591,7 +1597,7 @@ enable_vic_irq(unsigned int irq) | |||
1591 | } | 1597 | } |
1592 | 1598 | ||
1593 | static void | 1599 | static void |
1594 | disable_vic_irq(unsigned int irq) | 1600 | mask_vic_irq(unsigned int irq) |
1595 | { | 1601 | { |
1596 | /* lazy disable, do nothing */ | 1602 | /* lazy disable, do nothing */ |
1597 | } | 1603 | } |
@@ -1819,7 +1825,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask) | |||
1819 | * disabled again as it comes in (voyager lazy disable). If | 1825 | * disabled again as it comes in (voyager lazy disable). If |
1820 | * the affinity map is tightened to disable the interrupt on a | 1826 | * the affinity map is tightened to disable the interrupt on a |
1821 | * cpu, it will be pushed off when it comes in */ | 1827 | * cpu, it will be pushed off when it comes in */ |
1822 | enable_vic_irq(irq); | 1828 | unmask_vic_irq(irq); |
1823 | } | 1829 | } |
1824 | 1830 | ||
1825 | static void | 1831 | static void |
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c index 455597db84df..ddbdb0336f28 100644 --- a/arch/i386/mm/discontig.c +++ b/arch/i386/mm/discontig.c | |||
@@ -356,11 +356,12 @@ void __init numa_kva_reserve(void) | |||
356 | void __init zone_sizes_init(void) | 356 | void __init zone_sizes_init(void) |
357 | { | 357 | { |
358 | int nid; | 358 | int nid; |
359 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { | 359 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
360 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT, | 360 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
361 | max_low_pfn, | 361 | max_zone_pfns[ZONE_DMA] = |
362 | highend_pfn | 362 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
363 | }; | 363 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
364 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; | ||
364 | 365 | ||
365 | /* If SRAT has not registered memory, register it now */ | 366 | /* If SRAT has not registered memory, register it now */ |
366 | if (find_max_pfn_with_active_regions() == 0) { | 367 | if (find_max_pfn_with_active_regions() == 0) { |
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c index 68bce194e688..cdfcf971098b 100644 --- a/arch/i386/pci/common.c +++ b/arch/i386/pci/common.c | |||
@@ -20,6 +20,7 @@ | |||
20 | unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | | 20 | unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | |
21 | PCI_PROBE_MMCONF; | 21 | PCI_PROBE_MMCONF; |
22 | 22 | ||
23 | int pci_bf_sort; | ||
23 | int pci_routeirq; | 24 | int pci_routeirq; |
24 | int pcibios_last_bus = -1; | 25 | int pcibios_last_bus = -1; |
25 | unsigned long pirq_table_addr; | 26 | unsigned long pirq_table_addr; |
@@ -118,6 +119,20 @@ void __devinit pcibios_fixup_bus(struct pci_bus *b) | |||
118 | } | 119 | } |
119 | 120 | ||
120 | /* | 121 | /* |
122 | * Only use DMI information to set this if nothing was passed | ||
123 | * on the kernel command line (which was parsed earlier). | ||
124 | */ | ||
125 | |||
126 | static int __devinit set_bf_sort(struct dmi_system_id *d) | ||
127 | { | ||
128 | if (pci_bf_sort == pci_bf_sort_default) { | ||
129 | pci_bf_sort = pci_dmi_bf; | ||
130 | printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident); | ||
131 | } | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | /* | ||
121 | * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus) | 136 | * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus) |
122 | */ | 137 | */ |
123 | #ifdef __i386__ | 138 | #ifdef __i386__ |
@@ -130,11 +145,11 @@ static int __devinit assign_all_busses(struct dmi_system_id *d) | |||
130 | } | 145 | } |
131 | #endif | 146 | #endif |
132 | 147 | ||
148 | static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { | ||
149 | #ifdef __i386__ | ||
133 | /* | 150 | /* |
134 | * Laptops which need pci=assign-busses to see Cardbus cards | 151 | * Laptops which need pci=assign-busses to see Cardbus cards |
135 | */ | 152 | */ |
136 | static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { | ||
137 | #ifdef __i386__ | ||
138 | { | 153 | { |
139 | .callback = assign_all_busses, | 154 | .callback = assign_all_busses, |
140 | .ident = "Samsung X20 Laptop", | 155 | .ident = "Samsung X20 Laptop", |
@@ -144,6 +159,38 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = { | |||
144 | }, | 159 | }, |
145 | }, | 160 | }, |
146 | #endif /* __i386__ */ | 161 | #endif /* __i386__ */ |
162 | { | ||
163 | .callback = set_bf_sort, | ||
164 | .ident = "Dell PowerEdge 1950", | ||
165 | .matches = { | ||
166 | DMI_MATCH(DMI_SYS_VENDOR, "Dell"), | ||
167 | DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"), | ||
168 | }, | ||
169 | }, | ||
170 | { | ||
171 | .callback = set_bf_sort, | ||
172 | .ident = "Dell PowerEdge 1955", | ||
173 | .matches = { | ||
174 | DMI_MATCH(DMI_SYS_VENDOR, "Dell"), | ||
175 | DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"), | ||
176 | }, | ||
177 | }, | ||
178 | { | ||
179 | .callback = set_bf_sort, | ||
180 | .ident = "Dell PowerEdge 2900", | ||
181 | .matches = { | ||
182 | DMI_MATCH(DMI_SYS_VENDOR, "Dell"), | ||
183 | DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"), | ||
184 | }, | ||
185 | }, | ||
186 | { | ||
187 | .callback = set_bf_sort, | ||
188 | .ident = "Dell PowerEdge 2950", | ||
189 | .matches = { | ||
190 | DMI_MATCH(DMI_SYS_VENDOR, "Dell"), | ||
191 | DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"), | ||
192 | }, | ||
193 | }, | ||
147 | {} | 194 | {} |
148 | }; | 195 | }; |
149 | 196 | ||
@@ -189,6 +236,8 @@ static int __init pcibios_init(void) | |||
189 | 236 | ||
190 | pcibios_resource_survey(); | 237 | pcibios_resource_survey(); |
191 | 238 | ||
239 | if (pci_bf_sort >= pci_force_bf) | ||
240 | pci_sort_breadthfirst(); | ||
192 | #ifdef CONFIG_PCI_BIOS | 241 | #ifdef CONFIG_PCI_BIOS |
193 | if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT)) | 242 | if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT)) |
194 | pcibios_sort(); | 243 | pcibios_sort(); |
@@ -203,6 +252,12 @@ char * __devinit pcibios_setup(char *str) | |||
203 | if (!strcmp(str, "off")) { | 252 | if (!strcmp(str, "off")) { |
204 | pci_probe = 0; | 253 | pci_probe = 0; |
205 | return NULL; | 254 | return NULL; |
255 | } else if (!strcmp(str, "bfsort")) { | ||
256 | pci_bf_sort = pci_force_bf; | ||
257 | return NULL; | ||
258 | } else if (!strcmp(str, "nobfsort")) { | ||
259 | pci_bf_sort = pci_force_nobf; | ||
260 | return NULL; | ||
206 | } | 261 | } |
207 | #ifdef CONFIG_PCI_BIOS | 262 | #ifdef CONFIG_PCI_BIOS |
208 | else if (!strcmp(str, "bios")) { | 263 | else if (!strcmp(str, "bios")) { |
@@ -288,7 +343,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
288 | 343 | ||
289 | void pcibios_disable_device (struct pci_dev *dev) | 344 | void pcibios_disable_device (struct pci_dev *dev) |
290 | { | 345 | { |
291 | pcibios_disable_resources(dev); | ||
292 | if (pcibios_disable_irq) | 346 | if (pcibios_disable_irq) |
293 | pcibios_disable_irq(dev); | 347 | pcibios_disable_irq(dev); |
294 | } | 348 | } |
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c index b60d7e8689ed..c1949ff38d61 100644 --- a/arch/i386/pci/fixup.c +++ b/arch/i386/pci/fixup.c | |||
@@ -348,8 +348,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_r | |||
348 | * From information provided by "Jon Smirl" <jonsmirl@gmail.com> | 348 | * From information provided by "Jon Smirl" <jonsmirl@gmail.com> |
349 | * | 349 | * |
350 | * The standard boot ROM sequence for an x86 machine uses the BIOS | 350 | * The standard boot ROM sequence for an x86 machine uses the BIOS |
351 | * to select an initial video card for boot display. This boot video | 351 | * to select an initial video card for boot display. This boot video |
352 | * card will have it's BIOS copied to C0000 in system RAM. | 352 | * card will have it's BIOS copied to C0000 in system RAM. |
353 | * IORESOURCE_ROM_SHADOW is used to associate the boot video | 353 | * IORESOURCE_ROM_SHADOW is used to associate the boot video |
354 | * card with this copy. On laptops this copy has to be used since | 354 | * card with this copy. On laptops this copy has to be used since |
355 | * the main ROM may be compressed or combined with another image. | 355 | * the main ROM may be compressed or combined with another image. |
@@ -371,7 +371,17 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev) | |||
371 | bus = pdev->bus; | 371 | bus = pdev->bus; |
372 | while (bus) { | 372 | while (bus) { |
373 | bridge = bus->self; | 373 | bridge = bus->self; |
374 | if (bridge) { | 374 | |
375 | /* | ||
376 | * From information provided by | ||
377 | * "David Miller" <davem@davemloft.net> | ||
378 | * The bridge control register is valid for PCI header | ||
379 | * type BRIDGE, or CARDBUS. Host to PCI controllers use | ||
380 | * PCI header type NORMAL. | ||
381 | */ | ||
382 | if (bridge | ||
383 | &&((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE) | ||
384 | ||(bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) { | ||
375 | pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, | 385 | pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, |
376 | &config); | 386 | &config); |
377 | if (!(config & PCI_BRIDGE_CTL_VGA)) | 387 | if (!(config & PCI_BRIDGE_CTL_VGA)) |
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c index 10154a2cac68..98580292f0d4 100644 --- a/arch/i386/pci/i386.c +++ b/arch/i386/pci/i386.c | |||
@@ -242,15 +242,6 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask) | |||
242 | return 0; | 242 | return 0; |
243 | } | 243 | } |
244 | 244 | ||
245 | void pcibios_disable_resources(struct pci_dev *dev) | ||
246 | { | ||
247 | u16 cmd; | ||
248 | |||
249 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
250 | cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY); | ||
251 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
252 | } | ||
253 | |||
254 | /* | 245 | /* |
255 | * If we set up a device for bus mastering, we need to check the latency | 246 | * If we set up a device for bus mastering, we need to check the latency |
256 | * timer as certain crappy BIOSes forget to set it properly. | 247 | * timer as certain crappy BIOSes forget to set it properly. |
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c index dbc4aae91959..69163998adeb 100644 --- a/arch/i386/pci/irq.c +++ b/arch/i386/pci/irq.c | |||
@@ -255,13 +255,13 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i | |||
255 | */ | 255 | */ |
256 | static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq) | 256 | static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq) |
257 | { | 257 | { |
258 | static const unsigned int pirqmap[4] = { 3, 2, 5, 1 }; | 258 | static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; |
259 | return read_config_nybble(router, 0x55, pirqmap[pirq-1]); | 259 | return read_config_nybble(router, 0x55, pirqmap[pirq-1]); |
260 | } | 260 | } |
261 | 261 | ||
262 | static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) | 262 | static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) |
263 | { | 263 | { |
264 | static const unsigned int pirqmap[4] = { 3, 2, 5, 1 }; | 264 | static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; |
265 | write_config_nybble(router, 0x55, pirqmap[pirq-1], irq); | 265 | write_config_nybble(router, 0x55, pirqmap[pirq-1], irq); |
266 | return 1; | 266 | return 1; |
267 | } | 267 | } |
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c index d0c3da3aa2aa..c6b6d9bbc453 100644 --- a/arch/i386/pci/mmconfig.c +++ b/arch/i386/pci/mmconfig.c | |||
@@ -154,38 +154,6 @@ static struct pci_raw_ops pci_mmcfg = { | |||
154 | .write = pci_mmcfg_write, | 154 | .write = pci_mmcfg_write, |
155 | }; | 155 | }; |
156 | 156 | ||
157 | |||
158 | static __init void pci_mmcfg_insert_resources(void) | ||
159 | { | ||
160 | #define PCI_MMCFG_RESOURCE_NAME_LEN 19 | ||
161 | int i; | ||
162 | struct resource *res; | ||
163 | char *names; | ||
164 | unsigned num_buses; | ||
165 | |||
166 | res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res), | ||
167 | pci_mmcfg_config_num, GFP_KERNEL); | ||
168 | |||
169 | if (!res) { | ||
170 | printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n"); | ||
171 | return; | ||
172 | } | ||
173 | |||
174 | names = (void *)&res[pci_mmcfg_config_num]; | ||
175 | for (i = 0; i < pci_mmcfg_config_num; i++, res++) { | ||
176 | num_buses = pci_mmcfg_config[i].end_bus_number - | ||
177 | pci_mmcfg_config[i].start_bus_number + 1; | ||
178 | res->name = names; | ||
179 | snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u", | ||
180 | pci_mmcfg_config[i].pci_segment_group_number); | ||
181 | res->start = pci_mmcfg_config[i].base_address; | ||
182 | res->end = res->start + (num_buses << 20) - 1; | ||
183 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
184 | insert_resource(&iomem_resource, res); | ||
185 | names += PCI_MMCFG_RESOURCE_NAME_LEN; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | /* K8 systems have some devices (typically in the builtin northbridge) | 157 | /* K8 systems have some devices (typically in the builtin northbridge) |
190 | that are only accessible using type1 | 158 | that are only accessible using type1 |
191 | Normally this can be expressed in the MCFG by not listing them | 159 | Normally this can be expressed in the MCFG by not listing them |
@@ -222,8 +190,6 @@ static __init void unreachable_devices(void) | |||
222 | } | 190 | } |
223 | } | 191 | } |
224 | 192 | ||
225 | |||
226 | |||
227 | void __init pci_mmcfg_init(int type) | 193 | void __init pci_mmcfg_init(int type) |
228 | { | 194 | { |
229 | if ((pci_probe & PCI_PROBE_MMCONF) == 0) | 195 | if ((pci_probe & PCI_PROBE_MMCONF) == 0) |
@@ -251,5 +217,4 @@ void __init pci_mmcfg_init(int type) | |||
251 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; | 217 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; |
252 | 218 | ||
253 | unreachable_devices(); | 219 | unreachable_devices(); |
254 | pci_mmcfg_insert_resources(); | ||
255 | } | 220 | } |
diff --git a/arch/i386/pci/pci.h b/arch/i386/pci/pci.h index 1814f74569c6..a0a25180b61a 100644 --- a/arch/i386/pci/pci.h +++ b/arch/i386/pci/pci.h | |||
@@ -30,13 +30,19 @@ | |||
30 | extern unsigned int pci_probe; | 30 | extern unsigned int pci_probe; |
31 | extern unsigned long pirq_table_addr; | 31 | extern unsigned long pirq_table_addr; |
32 | 32 | ||
33 | enum pci_bf_sort_state { | ||
34 | pci_bf_sort_default, | ||
35 | pci_force_nobf, | ||
36 | pci_force_bf, | ||
37 | pci_dmi_bf, | ||
38 | }; | ||
39 | |||
33 | /* pci-i386.c */ | 40 | /* pci-i386.c */ |
34 | 41 | ||
35 | extern unsigned int pcibios_max_latency; | 42 | extern unsigned int pcibios_max_latency; |
36 | 43 | ||
37 | void pcibios_resource_survey(void); | 44 | void pcibios_resource_survey(void); |
38 | int pcibios_enable_resources(struct pci_dev *, int); | 45 | int pcibios_enable_resources(struct pci_dev *, int); |
39 | void pcibios_disable_resources(struct pci_dev *); | ||
40 | 46 | ||
41 | /* pci-pc.c */ | 47 | /* pci-pc.c */ |
42 | 48 | ||