diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-03-11 02:10:07 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-03-11 02:10:07 -0400 |
commit | e14eee56c2280953c6e3d24d5dce42bd90836b81 (patch) | |
tree | 21ab792d9ad6fbbab460058f352a0158f995644e /arch/x86/kernel | |
parent | d6ee6f7e4c74d9a0fed7544f4d389bde004651d3 (diff) | |
parent | 99adcd9d67aaf04e28f5ae96df280f236bde4b66 (diff) |
Merge commit 'origin/master' into next
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/acpi/wakeup_64.S | 30 | ||||
-rw-r--r-- | arch/x86/kernel/apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/apm_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/ds.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/efi.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/efi_64.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/i387.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/ptrace.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/reboot.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/time_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vmiclock_32.c | 7 |
17 files changed, 51 insertions, 59 deletions
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index bcc293423a70..96258d9dc974 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S | |||
@@ -13,7 +13,6 @@ | |||
13 | * Hooray, we are in Long 64-bit mode (but still running in low memory) | 13 | * Hooray, we are in Long 64-bit mode (but still running in low memory) |
14 | */ | 14 | */ |
15 | ENTRY(wakeup_long64) | 15 | ENTRY(wakeup_long64) |
16 | wakeup_long64: | ||
17 | movq saved_magic, %rax | 16 | movq saved_magic, %rax |
18 | movq $0x123456789abcdef0, %rdx | 17 | movq $0x123456789abcdef0, %rdx |
19 | cmpq %rdx, %rax | 18 | cmpq %rdx, %rax |
@@ -34,16 +33,12 @@ wakeup_long64: | |||
34 | 33 | ||
35 | movq saved_rip, %rax | 34 | movq saved_rip, %rax |
36 | jmp *%rax | 35 | jmp *%rax |
36 | ENDPROC(wakeup_long64) | ||
37 | 37 | ||
38 | bogus_64_magic: | 38 | bogus_64_magic: |
39 | jmp bogus_64_magic | 39 | jmp bogus_64_magic |
40 | 40 | ||
41 | .align 2 | 41 | ENTRY(do_suspend_lowlevel) |
42 | .p2align 4,,15 | ||
43 | .globl do_suspend_lowlevel | ||
44 | .type do_suspend_lowlevel,@function | ||
45 | do_suspend_lowlevel: | ||
46 | .LFB5: | ||
47 | subq $8, %rsp | 42 | subq $8, %rsp |
48 | xorl %eax, %eax | 43 | xorl %eax, %eax |
49 | call save_processor_state | 44 | call save_processor_state |
@@ -67,7 +62,7 @@ do_suspend_lowlevel: | |||
67 | pushfq | 62 | pushfq |
68 | popq pt_regs_flags(%rax) | 63 | popq pt_regs_flags(%rax) |
69 | 64 | ||
70 | movq $.L97, saved_rip(%rip) | 65 | movq $resume_point, saved_rip(%rip) |
71 | 66 | ||
72 | movq %rsp, saved_rsp | 67 | movq %rsp, saved_rsp |
73 | movq %rbp, saved_rbp | 68 | movq %rbp, saved_rbp |
@@ -78,14 +73,12 @@ do_suspend_lowlevel: | |||
78 | addq $8, %rsp | 73 | addq $8, %rsp |
79 | movl $3, %edi | 74 | movl $3, %edi |
80 | xorl %eax, %eax | 75 | xorl %eax, %eax |
81 | jmp acpi_enter_sleep_state | 76 | call acpi_enter_sleep_state |
82 | .L97: | 77 | /* in case something went wrong, restore the machine status and go on */ |
83 | .p2align 4,,7 | 78 | jmp resume_point |
84 | .L99: | ||
85 | .align 4 | ||
86 | movl $24, %eax | ||
87 | movw %ax, %ds | ||
88 | 79 | ||
80 | .align 4 | ||
81 | resume_point: | ||
89 | /* We don't restore %rax, it must be 0 anyway */ | 82 | /* We don't restore %rax, it must be 0 anyway */ |
90 | movq $saved_context, %rax | 83 | movq $saved_context, %rax |
91 | movq saved_context_cr4(%rax), %rbx | 84 | movq saved_context_cr4(%rax), %rbx |
@@ -117,12 +110,9 @@ do_suspend_lowlevel: | |||
117 | xorl %eax, %eax | 110 | xorl %eax, %eax |
118 | addq $8, %rsp | 111 | addq $8, %rsp |
119 | jmp restore_processor_state | 112 | jmp restore_processor_state |
120 | .LFE5: | 113 | ENDPROC(do_suspend_lowlevel) |
121 | .Lfe5: | 114 | |
122 | .size do_suspend_lowlevel, .Lfe5-do_suspend_lowlevel | ||
123 | |||
124 | .data | 115 | .data |
125 | ALIGN | ||
126 | ENTRY(saved_rbp) .quad 0 | 116 | ENTRY(saved_rbp) .quad 0 |
127 | ENTRY(saved_rsi) .quad 0 | 117 | ENTRY(saved_rsi) .quad 0 |
128 | ENTRY(saved_rdi) .quad 0 | 118 | ENTRY(saved_rdi) .quad 0 |
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 115449f869ee..570f36e44e59 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
@@ -862,7 +862,7 @@ void clear_local_APIC(void) | |||
862 | } | 862 | } |
863 | 863 | ||
864 | /* lets not touch this if we didn't frob it */ | 864 | /* lets not touch this if we didn't frob it */ |
865 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL) | 865 | #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL) |
866 | if (maxlvt >= 5) { | 866 | if (maxlvt >= 5) { |
867 | v = apic_read(APIC_LVTTHMR); | 867 | v = apic_read(APIC_LVTTHMR); |
868 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); | 868 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 98807bb095ad..266ec6c18b6c 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -1192,6 +1192,7 @@ static int suspend(int vetoable) | |||
1192 | device_suspend(PMSG_SUSPEND); | 1192 | device_suspend(PMSG_SUSPEND); |
1193 | local_irq_disable(); | 1193 | local_irq_disable(); |
1194 | device_power_down(PMSG_SUSPEND); | 1194 | device_power_down(PMSG_SUSPEND); |
1195 | sysdev_suspend(PMSG_SUSPEND); | ||
1195 | 1196 | ||
1196 | local_irq_enable(); | 1197 | local_irq_enable(); |
1197 | 1198 | ||
@@ -1208,6 +1209,7 @@ static int suspend(int vetoable) | |||
1208 | if (err != APM_SUCCESS) | 1209 | if (err != APM_SUCCESS) |
1209 | apm_error("suspend", err); | 1210 | apm_error("suspend", err); |
1210 | err = (err == APM_SUCCESS) ? 0 : -EIO; | 1211 | err = (err == APM_SUCCESS) ? 0 : -EIO; |
1212 | sysdev_resume(); | ||
1211 | device_power_up(PMSG_RESUME); | 1213 | device_power_up(PMSG_RESUME); |
1212 | local_irq_enable(); | 1214 | local_irq_enable(); |
1213 | device_resume(PMSG_RESUME); | 1215 | device_resume(PMSG_RESUME); |
@@ -1228,6 +1230,7 @@ static void standby(void) | |||
1228 | 1230 | ||
1229 | local_irq_disable(); | 1231 | local_irq_disable(); |
1230 | device_power_down(PMSG_SUSPEND); | 1232 | device_power_down(PMSG_SUSPEND); |
1233 | sysdev_suspend(PMSG_SUSPEND); | ||
1231 | local_irq_enable(); | 1234 | local_irq_enable(); |
1232 | 1235 | ||
1233 | err = set_system_power_state(APM_STATE_STANDBY); | 1236 | err = set_system_power_state(APM_STATE_STANDBY); |
@@ -1235,6 +1238,7 @@ static void standby(void) | |||
1235 | apm_error("standby", err); | 1238 | apm_error("standby", err); |
1236 | 1239 | ||
1237 | local_irq_disable(); | 1240 | local_irq_disable(); |
1241 | sysdev_resume(); | ||
1238 | device_power_up(PMSG_RESUME); | 1242 | device_power_up(PMSG_RESUME); |
1239 | local_irq_enable(); | 1243 | local_irq_enable(); |
1240 | } | 1244 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index b585e04cbc9e..3178c3acd97e 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -277,7 +277,6 @@ static struct cpufreq_driver p4clockmod_driver = { | |||
277 | .name = "p4-clockmod", | 277 | .name = "p4-clockmod", |
278 | .owner = THIS_MODULE, | 278 | .owner = THIS_MODULE, |
279 | .attr = p4clockmod_attr, | 279 | .attr = p4clockmod_attr, |
280 | .hide_interface = 1, | ||
281 | }; | 280 | }; |
282 | 281 | ||
283 | 282 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 1c838032fd37..fe79985ce0f2 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -295,11 +295,11 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
295 | * If we know that the error was in user space, send a | 295 | * If we know that the error was in user space, send a |
296 | * SIGBUS. Otherwise, panic if tolerance is low. | 296 | * SIGBUS. Otherwise, panic if tolerance is low. |
297 | * | 297 | * |
298 | * do_exit() takes an awful lot of locks and has a slight | 298 | * force_sig() takes an awful lot of locks and has a slight |
299 | * risk of deadlocking. | 299 | * risk of deadlocking. |
300 | */ | 300 | */ |
301 | if (user_space) { | 301 | if (user_space) { |
302 | do_exit(SIGBUS); | 302 | force_sig(SIGBUS, current); |
303 | } else if (panic_on_oops || tolerant < 2) { | 303 | } else if (panic_on_oops || tolerant < 2) { |
304 | mce_panic("Uncorrected machine check", | 304 | mce_panic("Uncorrected machine check", |
305 | &panicm, mcestart); | 305 | &panicm, mcestart); |
@@ -490,7 +490,7 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) | |||
490 | 490 | ||
491 | } | 491 | } |
492 | 492 | ||
493 | static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) | 493 | static void mce_cpu_features(struct cpuinfo_x86 *c) |
494 | { | 494 | { |
495 | switch (c->x86_vendor) { | 495 | switch (c->x86_vendor) { |
496 | case X86_VENDOR_INTEL: | 496 | case X86_VENDOR_INTEL: |
@@ -734,6 +734,7 @@ __setup("mce=", mcheck_enable); | |||
734 | static int mce_resume(struct sys_device *dev) | 734 | static int mce_resume(struct sys_device *dev) |
735 | { | 735 | { |
736 | mce_init(NULL); | 736 | mce_init(NULL); |
737 | mce_cpu_features(¤t_cpu_data); | ||
737 | return 0; | 738 | return 0; |
738 | } | 739 | } |
739 | 740 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 8ae8c4ff094d..f2ee0ae29bd6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -121,7 +121,7 @@ static long threshold_restart_bank(void *_tr) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /* cpu init entry point, called from mce.c with preempt off */ | 123 | /* cpu init entry point, called from mce.c with preempt off */ |
124 | void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | 124 | void mce_amd_feature_init(struct cpuinfo_x86 *c) |
125 | { | 125 | { |
126 | unsigned int bank, block; | 126 | unsigned int bank, block; |
127 | unsigned int cpu = smp_processor_id(); | 127 | unsigned int cpu = smp_processor_id(); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 4b48f251fd39..f44c36624360 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -30,7 +30,7 @@ asmlinkage void smp_thermal_interrupt(void) | |||
30 | irq_exit(); | 30 | irq_exit(); |
31 | } | 31 | } |
32 | 32 | ||
33 | static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) | 33 | static void intel_init_thermal(struct cpuinfo_x86 *c) |
34 | { | 34 | { |
35 | u32 l, h; | 35 | u32 l, h; |
36 | int tm2 = 0; | 36 | int tm2 = 0; |
@@ -84,7 +84,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) | |||
84 | return; | 84 | return; |
85 | } | 85 | } |
86 | 86 | ||
87 | void __cpuinit mce_intel_feature_init(struct cpuinfo_x86 *c) | 87 | void mce_intel_feature_init(struct cpuinfo_x86 *c) |
88 | { | 88 | { |
89 | intel_init_thermal(c); | 89 | intel_init_thermal(c); |
90 | } | 90 | } |
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index 169a120587be..87b67e3a765a 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -729,7 +729,7 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task, | |||
729 | 729 | ||
730 | spin_unlock_irqrestore(&ds_lock, irq); | 730 | spin_unlock_irqrestore(&ds_lock, irq); |
731 | 731 | ||
732 | ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts); | 732 | ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_pebs); |
733 | ds_resume_pebs(tracer); | 733 | ds_resume_pebs(tracer); |
734 | 734 | ||
735 | return tracer; | 735 | return tracer; |
@@ -1029,5 +1029,4 @@ void ds_copy_thread(struct task_struct *tsk, struct task_struct *father) | |||
1029 | 1029 | ||
1030 | void ds_exit_thread(struct task_struct *tsk) | 1030 | void ds_exit_thread(struct task_struct *tsk) |
1031 | { | 1031 | { |
1032 | WARN_ON(tsk->thread.ds_ctx); | ||
1033 | } | 1032 | } |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index 1119d247fe11..eb1ef3b67dd5 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -467,7 +467,7 @@ void __init efi_enter_virtual_mode(void) | |||
467 | efi_memory_desc_t *md; | 467 | efi_memory_desc_t *md; |
468 | efi_status_t status; | 468 | efi_status_t status; |
469 | unsigned long size; | 469 | unsigned long size; |
470 | u64 end, systab, addr, npages; | 470 | u64 end, systab, addr, npages, end_pfn; |
471 | void *p, *va; | 471 | void *p, *va; |
472 | 472 | ||
473 | efi.systab = NULL; | 473 | efi.systab = NULL; |
@@ -479,7 +479,10 @@ void __init efi_enter_virtual_mode(void) | |||
479 | size = md->num_pages << EFI_PAGE_SHIFT; | 479 | size = md->num_pages << EFI_PAGE_SHIFT; |
480 | end = md->phys_addr + size; | 480 | end = md->phys_addr + size; |
481 | 481 | ||
482 | if (PFN_UP(end) <= max_low_pfn_mapped) | 482 | end_pfn = PFN_UP(end); |
483 | if (end_pfn <= max_low_pfn_mapped | ||
484 | || (end_pfn > (1UL << (32 - PAGE_SHIFT)) | ||
485 | && end_pfn <= max_pfn_mapped)) | ||
483 | va = __va(md->phys_addr); | 486 | va = __va(md->phys_addr); |
484 | else | 487 | else |
485 | va = efi_ioremap(md->phys_addr, size); | 488 | va = efi_ioremap(md->phys_addr, size); |
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c index 652c5287215f..cb783b92c50c 100644 --- a/arch/x86/kernel/efi_64.c +++ b/arch/x86/kernel/efi_64.c | |||
@@ -99,24 +99,11 @@ void __init efi_call_phys_epilog(void) | |||
99 | 99 | ||
100 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size) | 100 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size) |
101 | { | 101 | { |
102 | static unsigned pages_mapped __initdata; | 102 | unsigned long last_map_pfn; |
103 | unsigned i, pages; | ||
104 | unsigned long offset; | ||
105 | 103 | ||
106 | pages = PFN_UP(phys_addr + size) - PFN_DOWN(phys_addr); | 104 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); |
107 | offset = phys_addr & ~PAGE_MASK; | 105 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) |
108 | phys_addr &= PAGE_MASK; | ||
109 | |||
110 | if (pages_mapped + pages > MAX_EFI_IO_PAGES) | ||
111 | return NULL; | 106 | return NULL; |
112 | 107 | ||
113 | for (i = 0; i < pages; i++) { | 108 | return (void __iomem *)__va(phys_addr); |
114 | __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped, | ||
115 | phys_addr, PAGE_KERNEL); | ||
116 | phys_addr += PAGE_SIZE; | ||
117 | pages_mapped++; | ||
118 | } | ||
119 | |||
120 | return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \ | ||
121 | (pages_mapped - pages)) + offset; | ||
122 | } | 109 | } |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index b0f61f0dcd0a..f2f8540a7f3d 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -136,7 +136,7 @@ int init_fpu(struct task_struct *tsk) | |||
136 | #ifdef CONFIG_X86_32 | 136 | #ifdef CONFIG_X86_32 |
137 | if (!HAVE_HWFP) { | 137 | if (!HAVE_HWFP) { |
138 | memset(tsk->thread.xstate, 0, xstate_size); | 138 | memset(tsk->thread.xstate, 0, xstate_size); |
139 | finit(); | 139 | finit_task(tsk); |
140 | set_stopped_child_used_math(tsk); | 140 | set_stopped_child_used_math(tsk); |
141 | return 0; | 141 | return 0; |
142 | } | 142 | } |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a546f55c77b4..bd4da2af08ae 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -104,9 +104,6 @@ void cpu_idle(void) | |||
104 | check_pgt_cache(); | 104 | check_pgt_cache(); |
105 | rmb(); | 105 | rmb(); |
106 | 106 | ||
107 | if (rcu_pending(cpu)) | ||
108 | rcu_check_callbacks(cpu, 0); | ||
109 | |||
110 | if (cpu_is_offline(cpu)) | 107 | if (cpu_is_offline(cpu)) |
111 | play_dead(); | 108 | play_dead(); |
112 | 109 | ||
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 5a4c23d89892..06ca07f6ad86 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -1388,7 +1388,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | |||
1388 | #ifdef CONFIG_X86_32 | 1388 | #ifdef CONFIG_X86_32 |
1389 | # define IS_IA32 1 | 1389 | # define IS_IA32 1 |
1390 | #elif defined CONFIG_IA32_EMULATION | 1390 | #elif defined CONFIG_IA32_EMULATION |
1391 | # define IS_IA32 test_thread_flag(TIF_IA32) | 1391 | # define IS_IA32 is_compat_task() |
1392 | #else | 1392 | #else |
1393 | # define IS_IA32 0 | 1393 | # define IS_IA32 0 |
1394 | #endif | 1394 | #endif |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 2b46eb41643b..4526b3a75ed2 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -217,6 +217,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
217 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), | 217 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), |
218 | }, | 218 | }, |
219 | }, | 219 | }, |
220 | { /* Handle problems with rebooting on Dell XPS710 */ | ||
221 | .callback = set_bios_reboot, | ||
222 | .ident = "Dell XPS710", | ||
223 | .matches = { | ||
224 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
225 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), | ||
226 | }, | ||
227 | }, | ||
220 | { } | 228 | { } |
221 | }; | 229 | }; |
222 | 230 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c461f6d69074..6a8811a69324 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -770,6 +770,9 @@ void __init setup_arch(char **cmdline_p) | |||
770 | 770 | ||
771 | finish_e820_parsing(); | 771 | finish_e820_parsing(); |
772 | 772 | ||
773 | if (efi_enabled) | ||
774 | efi_init(); | ||
775 | |||
773 | dmi_scan_machine(); | 776 | dmi_scan_machine(); |
774 | 777 | ||
775 | dmi_check_system(bad_bios_dmi_table); | 778 | dmi_check_system(bad_bios_dmi_table); |
@@ -789,8 +792,6 @@ void __init setup_arch(char **cmdline_p) | |||
789 | insert_resource(&iomem_resource, &data_resource); | 792 | insert_resource(&iomem_resource, &data_resource); |
790 | insert_resource(&iomem_resource, &bss_resource); | 793 | insert_resource(&iomem_resource, &bss_resource); |
791 | 794 | ||
792 | if (efi_enabled) | ||
793 | efi_init(); | ||
794 | 795 | ||
795 | #ifdef CONFIG_X86_32 | 796 | #ifdef CONFIG_X86_32 |
796 | if (ppro_with_ram_bug()) { | 797 | if (ppro_with_ram_bug()) { |
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index e6e695acd725..241ec3923f61 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c | |||
@@ -115,7 +115,7 @@ unsigned long __init calibrate_cpu(void) | |||
115 | 115 | ||
116 | static struct irqaction irq0 = { | 116 | static struct irqaction irq0 = { |
117 | .handler = timer_interrupt, | 117 | .handler = timer_interrupt, |
118 | .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING, | 118 | .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING | IRQF_TIMER, |
119 | .mask = CPU_MASK_NONE, | 119 | .mask = CPU_MASK_NONE, |
120 | .name = "timer" | 120 | .name = "timer" |
121 | }; | 121 | }; |
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index c4c1f9e09402..e5b088fffa40 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -202,7 +202,7 @@ static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id) | |||
202 | static struct irqaction vmi_clock_action = { | 202 | static struct irqaction vmi_clock_action = { |
203 | .name = "vmi-timer", | 203 | .name = "vmi-timer", |
204 | .handler = vmi_timer_interrupt, | 204 | .handler = vmi_timer_interrupt, |
205 | .flags = IRQF_DISABLED | IRQF_NOBALANCING, | 205 | .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, |
206 | .mask = CPU_MASK_ALL, | 206 | .mask = CPU_MASK_ALL, |
207 | }; | 207 | }; |
208 | 208 | ||
@@ -283,10 +283,13 @@ void __devinit vmi_time_ap_init(void) | |||
283 | #endif | 283 | #endif |
284 | 284 | ||
285 | /** vmi clocksource */ | 285 | /** vmi clocksource */ |
286 | static struct clocksource clocksource_vmi; | ||
286 | 287 | ||
287 | static cycle_t read_real_cycles(void) | 288 | static cycle_t read_real_cycles(void) |
288 | { | 289 | { |
289 | return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); | 290 | cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); |
291 | return ret >= clocksource_vmi.cycle_last ? | ||
292 | ret : clocksource_vmi.cycle_last; | ||
290 | } | 293 | } |
291 | 294 | ||
292 | static struct clocksource clocksource_vmi = { | 295 | static struct clocksource clocksource_vmi = { |