diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/acpi/boot.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/acpi/earlyquirk.c | 8 | ||||
-rw-r--r-- | arch/x86_64/boot/setup.S | 5 | ||||
-rw-r--r-- | arch/x86_64/ia32/ptrace32.c | 2 | ||||
-rw-r--r-- | arch/x86_64/kernel/e820.c | 4 | ||||
-rw-r--r-- | arch/x86_64/kernel/early-quirks.c | 8 | ||||
-rw-r--r-- | arch/x86_64/kernel/process.c | 3 | ||||
-rw-r--r-- | arch/x86_64/kernel/smp.c | 3 | ||||
-rw-r--r-- | arch/x86_64/kernel/time.c | 11 | ||||
-rw-r--r-- | arch/x86_64/kernel/vsyscall.c | 45 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 15 | ||||
-rw-r--r-- | arch/x86_64/pci/mmconfig.c | 32 |
12 files changed, 71 insertions, 73 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 22e4c466e5a3..d12fb97a5337 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c | |||
@@ -82,6 +82,7 @@ EXPORT_SYMBOL(acpi_strict); | |||
82 | acpi_interrupt_flags acpi_sci_flags __initdata; | 82 | acpi_interrupt_flags acpi_sci_flags __initdata; |
83 | int acpi_sci_override_gsi __initdata; | 83 | int acpi_sci_override_gsi __initdata; |
84 | int acpi_skip_timer_override __initdata; | 84 | int acpi_skip_timer_override __initdata; |
85 | int acpi_use_timer_override __initdata; | ||
85 | 86 | ||
86 | #ifdef CONFIG_X86_LOCAL_APIC | 87 | #ifdef CONFIG_X86_LOCAL_APIC |
87 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 88 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
@@ -1300,6 +1301,13 @@ static int __init parse_acpi_skip_timer_override(char *arg) | |||
1300 | return 0; | 1301 | return 0; |
1301 | } | 1302 | } |
1302 | early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); | 1303 | early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); |
1304 | |||
1305 | static int __init parse_acpi_use_timer_override(char *arg) | ||
1306 | { | ||
1307 | acpi_use_timer_override = 1; | ||
1308 | return 0; | ||
1309 | } | ||
1310 | early_param("acpi_use_timer_override", parse_acpi_use_timer_override); | ||
1303 | #endif /* CONFIG_X86_IO_APIC */ | 1311 | #endif /* CONFIG_X86_IO_APIC */ |
1304 | 1312 | ||
1305 | static int __init setup_acpi_sci(char *s) | 1313 | static int __init setup_acpi_sci(char *s) |
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c index fe799b11ac0a..c9841692bb7c 100644 --- a/arch/i386/kernel/acpi/earlyquirk.c +++ b/arch/i386/kernel/acpi/earlyquirk.c | |||
@@ -27,11 +27,17 @@ static int __init check_bridge(int vendor, int device) | |||
27 | #ifdef CONFIG_ACPI | 27 | #ifdef CONFIG_ACPI |
28 | /* According to Nvidia all timer overrides are bogus unless HPET | 28 | /* According to Nvidia all timer overrides are bogus unless HPET |
29 | is enabled. */ | 29 | is enabled. */ |
30 | if (vendor == PCI_VENDOR_ID_NVIDIA) { | 30 | if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { |
31 | nvidia_hpet_detected = 0; | 31 | nvidia_hpet_detected = 0; |
32 | acpi_table_parse(ACPI_HPET, nvidia_hpet_check); | 32 | acpi_table_parse(ACPI_HPET, nvidia_hpet_check); |
33 | if (nvidia_hpet_detected == 0) { | 33 | if (nvidia_hpet_detected == 0) { |
34 | acpi_skip_timer_override = 1; | 34 | acpi_skip_timer_override = 1; |
35 | printk(KERN_INFO "Nvidia board " | ||
36 | "detected. Ignoring ACPI " | ||
37 | "timer override.\n"); | ||
38 | printk(KERN_INFO "If you got timer trouble " | ||
39 | "try acpi_use_timer_override\n"); | ||
40 | |||
35 | } | 41 | } |
36 | } | 42 | } |
37 | #endif | 43 | #endif |
diff --git a/arch/x86_64/boot/setup.S b/arch/x86_64/boot/setup.S index c3bfd223ab49..770940cc0108 100644 --- a/arch/x86_64/boot/setup.S +++ b/arch/x86_64/boot/setup.S | |||
@@ -836,13 +836,12 @@ gdt: | |||
836 | .word 0x9200 # data read/write | 836 | .word 0x9200 # data read/write |
837 | .word 0x00CF # granularity = 4096, 386 | 837 | .word 0x00CF # granularity = 4096, 386 |
838 | # (+5th nibble of limit) | 838 | # (+5th nibble of limit) |
839 | gdt_end: | ||
839 | idt_48: | 840 | idt_48: |
840 | .word 0 # idt limit = 0 | 841 | .word 0 # idt limit = 0 |
841 | .word 0, 0 # idt base = 0L | 842 | .word 0, 0 # idt base = 0L |
842 | gdt_48: | 843 | gdt_48: |
843 | .word 0x8000 # gdt limit=2048, | 844 | .word gdt_end-gdt-1 # gdt limit |
844 | # 256 GDT entries | ||
845 | |||
846 | .word 0, 0 # gdt base (filled in later) | 845 | .word 0, 0 # gdt base (filled in later) |
847 | 846 | ||
848 | # Include video setup & detection code | 847 | # Include video setup & detection code |
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c index 3a7561d4703e..04566fe5de49 100644 --- a/arch/x86_64/ia32/ptrace32.c +++ b/arch/x86_64/ia32/ptrace32.c | |||
@@ -244,6 +244,8 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) | |||
244 | case PTRACE_DETACH: | 244 | case PTRACE_DETACH: |
245 | case PTRACE_SYSCALL: | 245 | case PTRACE_SYSCALL: |
246 | case PTRACE_SETOPTIONS: | 246 | case PTRACE_SETOPTIONS: |
247 | case PTRACE_SET_THREAD_AREA: | ||
248 | case PTRACE_GET_THREAD_AREA: | ||
247 | return sys_ptrace(request, pid, addr, data); | 249 | return sys_ptrace(request, pid, addr, data); |
248 | 250 | ||
249 | default: | 251 | default: |
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index a75c829c2b02..6fe191c58084 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c | |||
@@ -278,7 +278,7 @@ e820_register_active_regions(int nid, unsigned long start_pfn, | |||
278 | >> PAGE_SHIFT; | 278 | >> PAGE_SHIFT; |
279 | 279 | ||
280 | /* Skip map entries smaller than a page */ | 280 | /* Skip map entries smaller than a page */ |
281 | if (ei_startpfn > ei_endpfn) | 281 | if (ei_startpfn >= ei_endpfn) |
282 | continue; | 282 | continue; |
283 | 283 | ||
284 | /* Check if end_pfn_map should be updated */ | 284 | /* Check if end_pfn_map should be updated */ |
@@ -594,7 +594,9 @@ static int __init parse_memmap_opt(char *p) | |||
594 | * size before original memory map is | 594 | * size before original memory map is |
595 | * reset. | 595 | * reset. |
596 | */ | 596 | */ |
597 | e820_register_active_regions(0, 0, -1UL); | ||
597 | saved_max_pfn = e820_end_of_ram(); | 598 | saved_max_pfn = e820_end_of_ram(); |
599 | remove_all_active_ranges(); | ||
598 | #endif | 600 | #endif |
599 | end_pfn_map = 0; | 601 | end_pfn_map = 0; |
600 | e820.nr_map = 0; | 602 | e820.nr_map = 0; |
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c index 2b1245d86258..68273bff58cc 100644 --- a/arch/x86_64/kernel/early-quirks.c +++ b/arch/x86_64/kernel/early-quirks.c | |||
@@ -45,7 +45,13 @@ static void nvidia_bugs(void) | |||
45 | /* | 45 | /* |
46 | * All timer overrides on Nvidia are | 46 | * All timer overrides on Nvidia are |
47 | * wrong unless HPET is enabled. | 47 | * wrong unless HPET is enabled. |
48 | * Unfortunately that's not true on many Asus boards. | ||
49 | * We don't know yet how to detect this automatically, but | ||
50 | * at least allow a command line override. | ||
48 | */ | 51 | */ |
52 | if (acpi_use_timer_override) | ||
53 | return; | ||
54 | |||
49 | nvidia_hpet_detected = 0; | 55 | nvidia_hpet_detected = 0; |
50 | acpi_table_parse(ACPI_HPET, nvidia_hpet_check); | 56 | acpi_table_parse(ACPI_HPET, nvidia_hpet_check); |
51 | if (nvidia_hpet_detected == 0) { | 57 | if (nvidia_hpet_detected == 0) { |
@@ -53,6 +59,8 @@ static void nvidia_bugs(void) | |||
53 | printk(KERN_INFO "Nvidia board " | 59 | printk(KERN_INFO "Nvidia board " |
54 | "detected. Ignoring ACPI " | 60 | "detected. Ignoring ACPI " |
55 | "timer override.\n"); | 61 | "timer override.\n"); |
62 | printk(KERN_INFO "If you got timer trouble " | ||
63 | "try acpi_use_timer_override\n"); | ||
56 | } | 64 | } |
57 | #endif | 65 | #endif |
58 | /* RED-PEN skip them on mptables too? */ | 66 | /* RED-PEN skip them on mptables too? */ |
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 49f7fac6229e..f6226055d53d 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c | |||
@@ -88,9 +88,8 @@ void enter_idle(void) | |||
88 | 88 | ||
89 | static void __exit_idle(void) | 89 | static void __exit_idle(void) |
90 | { | 90 | { |
91 | if (read_pda(isidle) == 0) | 91 | if (test_and_clear_bit_pda(0, isidle) == 0) |
92 | return; | 92 | return; |
93 | write_pda(isidle, 0); | ||
94 | atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); | 93 | atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); |
95 | } | 94 | } |
96 | 95 | ||
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c index 4f67697f5036..9f74c883568c 100644 --- a/arch/x86_64/kernel/smp.c +++ b/arch/x86_64/kernel/smp.c | |||
@@ -376,9 +376,8 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, | |||
376 | /* prevent preemption and reschedule on another processor */ | 376 | /* prevent preemption and reschedule on another processor */ |
377 | int me = get_cpu(); | 377 | int me = get_cpu(); |
378 | if (cpu == me) { | 378 | if (cpu == me) { |
379 | WARN_ON(1); | ||
380 | put_cpu(); | 379 | put_cpu(); |
381 | return -EBUSY; | 380 | return 0; |
382 | } | 381 | } |
383 | spin_lock_bh(&call_lock); | 382 | spin_lock_bh(&call_lock); |
384 | __smp_call_function_single(cpu, func, info, nonatomic, wait); | 383 | __smp_call_function_single(cpu, func, info, nonatomic, wait); |
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 88722f11ca13..e3ef544d2cfb 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -876,15 +876,6 @@ static struct irqaction irq0 = { | |||
876 | timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL | 876 | timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL |
877 | }; | 877 | }; |
878 | 878 | ||
879 | static int __cpuinit | ||
880 | time_cpu_notifier(struct notifier_block *nb, unsigned long action, void *hcpu) | ||
881 | { | ||
882 | unsigned cpu = (unsigned long) hcpu; | ||
883 | if (action == CPU_ONLINE) | ||
884 | vsyscall_set_cpu(cpu); | ||
885 | return NOTIFY_DONE; | ||
886 | } | ||
887 | |||
888 | void __init time_init(void) | 879 | void __init time_init(void) |
889 | { | 880 | { |
890 | if (nohpet) | 881 | if (nohpet) |
@@ -925,8 +916,6 @@ void __init time_init(void) | |||
925 | vxtime.last_tsc = get_cycles_sync(); | 916 | vxtime.last_tsc = get_cycles_sync(); |
926 | set_cyc2ns_scale(cpu_khz); | 917 | set_cyc2ns_scale(cpu_khz); |
927 | setup_irq(0, &irq0); | 918 | setup_irq(0, &irq0); |
928 | hotcpu_notifier(time_cpu_notifier, 0); | ||
929 | time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id()); | ||
930 | 919 | ||
931 | #ifndef CONFIG_SMP | 920 | #ifndef CONFIG_SMP |
932 | time_init_gtod(); | 921 | time_init_gtod(); |
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c index a98b460af6a1..a730bacecb0b 100644 --- a/arch/x86_64/kernel/vsyscall.c +++ b/arch/x86_64/kernel/vsyscall.c | |||
@@ -27,6 +27,9 @@ | |||
27 | #include <linux/jiffies.h> | 27 | #include <linux/jiffies.h> |
28 | #include <linux/sysctl.h> | 28 | #include <linux/sysctl.h> |
29 | #include <linux/getcpu.h> | 29 | #include <linux/getcpu.h> |
30 | #include <linux/cpu.h> | ||
31 | #include <linux/smp.h> | ||
32 | #include <linux/notifier.h> | ||
30 | 33 | ||
31 | #include <asm/vsyscall.h> | 34 | #include <asm/vsyscall.h> |
32 | #include <asm/pgtable.h> | 35 | #include <asm/pgtable.h> |
@@ -243,32 +246,17 @@ static ctl_table kernel_root_table2[] = { | |||
243 | 246 | ||
244 | #endif | 247 | #endif |
245 | 248 | ||
246 | static void __cpuinit write_rdtscp_cb(void *info) | 249 | /* Assume __initcall executes before all user space. Hopefully kmod |
247 | { | 250 | doesn't violate that. We'll find out if it does. */ |
248 | write_rdtscp_aux((unsigned long)info); | 251 | static void __cpuinit vsyscall_set_cpu(int cpu) |
249 | } | ||
250 | |||
251 | void __cpuinit vsyscall_set_cpu(int cpu) | ||
252 | { | 252 | { |
253 | unsigned long *d; | 253 | unsigned long *d; |
254 | unsigned long node = 0; | 254 | unsigned long node = 0; |
255 | #ifdef CONFIG_NUMA | 255 | #ifdef CONFIG_NUMA |
256 | node = cpu_to_node[cpu]; | 256 | node = cpu_to_node[cpu]; |
257 | #endif | 257 | #endif |
258 | if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) { | 258 | if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) |
259 | void *info = (void *)((node << 12) | cpu); | 259 | write_rdtscp_aux((node << 12) | cpu); |
260 | /* Can happen on preemptive kernel */ | ||
261 | if (get_cpu() == cpu) | ||
262 | write_rdtscp_cb(info); | ||
263 | #ifdef CONFIG_SMP | ||
264 | else { | ||
265 | /* the notifier is unfortunately not executed on the | ||
266 | target CPU */ | ||
267 | smp_call_function_single(cpu,write_rdtscp_cb,info,0,1); | ||
268 | } | ||
269 | #endif | ||
270 | put_cpu(); | ||
271 | } | ||
272 | 260 | ||
273 | /* Store cpu number in limit so that it can be loaded quickly | 261 | /* Store cpu number in limit so that it can be loaded quickly |
274 | in user space in vgetcpu. | 262 | in user space in vgetcpu. |
@@ -280,6 +268,21 @@ void __cpuinit vsyscall_set_cpu(int cpu) | |||
280 | *d |= (node >> 4) << 48; | 268 | *d |= (node >> 4) << 48; |
281 | } | 269 | } |
282 | 270 | ||
271 | static void __cpuinit cpu_vsyscall_init(void *arg) | ||
272 | { | ||
273 | /* preemption should be already off */ | ||
274 | vsyscall_set_cpu(raw_smp_processor_id()); | ||
275 | } | ||
276 | |||
277 | static int __cpuinit | ||
278 | cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) | ||
279 | { | ||
280 | long cpu = (long)arg; | ||
281 | if (action == CPU_ONLINE) | ||
282 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); | ||
283 | return NOTIFY_DONE; | ||
284 | } | ||
285 | |||
283 | static void __init map_vsyscall(void) | 286 | static void __init map_vsyscall(void) |
284 | { | 287 | { |
285 | extern char __vsyscall_0; | 288 | extern char __vsyscall_0; |
@@ -299,6 +302,8 @@ static int __init vsyscall_init(void) | |||
299 | #ifdef CONFIG_SYSCTL | 302 | #ifdef CONFIG_SYSCTL |
300 | register_sysctl_table(kernel_root_table2, 0); | 303 | register_sysctl_table(kernel_root_table2, 0); |
301 | #endif | 304 | #endif |
305 | on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); | ||
306 | hotcpu_notifier(cpu_vsyscall_notifier, 0); | ||
302 | return 0; | 307 | return 0; |
303 | } | 308 | } |
304 | 309 | ||
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 971dc1181e69..f1f977aafae1 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -655,9 +655,22 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
655 | 655 | ||
656 | void __init reserve_bootmem_generic(unsigned long phys, unsigned len) | 656 | void __init reserve_bootmem_generic(unsigned long phys, unsigned len) |
657 | { | 657 | { |
658 | /* Should check here against the e820 map to avoid double free */ | ||
659 | #ifdef CONFIG_NUMA | 658 | #ifdef CONFIG_NUMA |
660 | int nid = phys_to_nid(phys); | 659 | int nid = phys_to_nid(phys); |
660 | #endif | ||
661 | unsigned long pfn = phys >> PAGE_SHIFT; | ||
662 | if (pfn >= end_pfn) { | ||
663 | /* This can happen with kdump kernels when accessing firmware | ||
664 | tables. */ | ||
665 | if (pfn < end_pfn_map) | ||
666 | return; | ||
667 | printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", | ||
668 | phys, len); | ||
669 | return; | ||
670 | } | ||
671 | |||
672 | /* Should check here against the e820 map to avoid double free */ | ||
673 | #ifdef CONFIG_NUMA | ||
661 | reserve_bootmem_node(NODE_DATA(nid), phys, len); | 674 | reserve_bootmem_node(NODE_DATA(nid), phys, len); |
662 | #else | 675 | #else |
663 | reserve_bootmem(phys, len); | 676 | reserve_bootmem(phys, len); |
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c index e61093b34c26..f8b6b2800a62 100644 --- a/arch/x86_64/pci/mmconfig.c +++ b/arch/x86_64/pci/mmconfig.c | |||
@@ -163,37 +163,6 @@ static __init void unreachable_devices(void) | |||
163 | } | 163 | } |
164 | } | 164 | } |
165 | 165 | ||
166 | static __init void pci_mmcfg_insert_resources(void) | ||
167 | { | ||
168 | #define PCI_MMCFG_RESOURCE_NAME_LEN 19 | ||
169 | int i; | ||
170 | struct resource *res; | ||
171 | char *names; | ||
172 | unsigned num_buses; | ||
173 | |||
174 | res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res), | ||
175 | pci_mmcfg_config_num, GFP_KERNEL); | ||
176 | |||
177 | if (!res) { | ||
178 | printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n"); | ||
179 | return; | ||
180 | } | ||
181 | |||
182 | names = (void *)&res[pci_mmcfg_config_num]; | ||
183 | for (i = 0; i < pci_mmcfg_config_num; i++, res++) { | ||
184 | num_buses = pci_mmcfg_config[i].end_bus_number - | ||
185 | pci_mmcfg_config[i].start_bus_number + 1; | ||
186 | res->name = names; | ||
187 | snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u", | ||
188 | pci_mmcfg_config[i].pci_segment_group_number); | ||
189 | res->start = pci_mmcfg_config[i].base_address; | ||
190 | res->end = res->start + (num_buses << 20) - 1; | ||
191 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
192 | insert_resource(&iomem_resource, res); | ||
193 | names += PCI_MMCFG_RESOURCE_NAME_LEN; | ||
194 | } | ||
195 | } | ||
196 | |||
197 | void __init pci_mmcfg_init(int type) | 166 | void __init pci_mmcfg_init(int type) |
198 | { | 167 | { |
199 | int i; | 168 | int i; |
@@ -237,7 +206,6 @@ void __init pci_mmcfg_init(int type) | |||
237 | } | 206 | } |
238 | 207 | ||
239 | unreachable_devices(); | 208 | unreachable_devices(); |
240 | pci_mmcfg_insert_resources(); | ||
241 | 209 | ||
242 | raw_pci_ops = &pci_mmcfg; | 210 | raw_pci_ops = &pci_mmcfg; |
243 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; | 211 | pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; |