aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/kernel/entry-common.S2
-rw-r--r--arch/sh/kernel/ftrace.c37
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/signal_32.c9
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sh/kernel/traps_32.c7
-rw-r--r--arch/sh/mm/cache.c2
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/include/asm/paravirt.h28
-rw-r--r--arch/x86/include/asm/paravirt_types.h10
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/smp.c1
-rw-r--r--arch/x86/kernel/time.c3
-rw-r--r--arch/x86/kernel/trampoline.c12
-rw-r--r--arch/x86/kernel/trampoline_64.S4
-rw-r--r--arch/x86/kernel/vmi_32.c2
16 files changed, 78 insertions, 56 deletions
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 68d9223b145e..3eb84931d2aa 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -121,7 +121,7 @@ noresched:
121ENTRY(resume_userspace) 121ENTRY(resume_userspace)
122 ! r8: current_thread_info 122 ! r8: current_thread_info
123 cli 123 cli
124 TRACE_IRQS_OfF 124 TRACE_IRQS_OFF
125 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags 125 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
126 tst #(_TIF_WORK_MASK & 0xff), r0 126 tst #(_TIF_WORK_MASK & 0xff), r0
127 bt/s __restore_all 127 bt/s __restore_all
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index a3dcc6d5d253..2c48e267256e 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -291,31 +291,48 @@ struct syscall_metadata *syscall_nr_to_meta(int nr)
291 return syscalls_metadata[nr]; 291 return syscalls_metadata[nr];
292} 292}
293 293
294void arch_init_ftrace_syscalls(void) 294int syscall_name_to_nr(char *name)
295{
296 int i;
297
298 if (!syscalls_metadata)
299 return -1;
300 for (i = 0; i < NR_syscalls; i++)
301 if (syscalls_metadata[i])
302 if (!strcmp(syscalls_metadata[i]->name, name))
303 return i;
304 return -1;
305}
306
307void set_syscall_enter_id(int num, int id)
308{
309 syscalls_metadata[num]->enter_id = id;
310}
311
312void set_syscall_exit_id(int num, int id)
313{
314 syscalls_metadata[num]->exit_id = id;
315}
316
317static int __init arch_init_ftrace_syscalls(void)
295{ 318{
296 int i; 319 int i;
297 struct syscall_metadata *meta; 320 struct syscall_metadata *meta;
298 unsigned long **psys_syscall_table = &sys_call_table; 321 unsigned long **psys_syscall_table = &sys_call_table;
299 static atomic_t refs;
300
301 if (atomic_inc_return(&refs) != 1)
302 goto end;
303 322
304 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * 323 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
305 FTRACE_SYSCALL_MAX, GFP_KERNEL); 324 FTRACE_SYSCALL_MAX, GFP_KERNEL);
306 if (!syscalls_metadata) { 325 if (!syscalls_metadata) {
307 WARN_ON(1); 326 WARN_ON(1);
308 return; 327 return -ENOMEM;
309 } 328 }
310 329
311 for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { 330 for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
312 meta = find_syscall_meta(psys_syscall_table[i]); 331 meta = find_syscall_meta(psys_syscall_table[i]);
313 syscalls_metadata[i] = meta; 332 syscalls_metadata[i] = meta;
314 } 333 }
315 return;
316 334
317 /* Paranoid: avoid overflow */ 335 return 0;
318end:
319 atomic_dec(&refs);
320} 336}
337arch_initcall(arch_init_ftrace_syscalls);
321#endif /* CONFIG_FTRACE_SYSCALLS */ 338#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index f9d44f8e0df6..99b4fb553bf1 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -549,6 +549,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
549 549
550 if (cpu == 0) 550 if (cpu == 0)
551 seq_printf(m, "machine\t\t: %s\n", get_system_type()); 551 seq_printf(m, "machine\t\t: %s\n", get_system_type());
552 else
553 seq_printf(m, "\n");
552 554
553 seq_printf(m, "processor\t: %d\n", cpu); 555 seq_printf(m, "processor\t: %d\n", cpu);
554 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine); 556 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 6729703547a1..3db37425210d 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -145,7 +145,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
145{ 145{
146 struct task_struct *tsk = current; 146 struct task_struct *tsk = current;
147 147
148 if (!(current_cpu_data.flags & CPU_HAS_FPU)) 148 if (!(boot_cpu_data.flags & CPU_HAS_FPU))
149 return 0; 149 return 0;
150 150
151 set_used_math(); 151 set_used_math();
@@ -158,7 +158,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
158{ 158{
159 struct task_struct *tsk = current; 159 struct task_struct *tsk = current;
160 160
161 if (!(current_cpu_data.flags & CPU_HAS_FPU)) 161 if (!(boot_cpu_data.flags & CPU_HAS_FPU))
162 return 0; 162 return 0;
163 163
164 if (!used_math()) { 164 if (!used_math()) {
@@ -199,7 +199,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
199#undef COPY 199#undef COPY
200 200
201#ifdef CONFIG_SH_FPU 201#ifdef CONFIG_SH_FPU
202 if (current_cpu_data.flags & CPU_HAS_FPU) { 202 if (boot_cpu_data.flags & CPU_HAS_FPU) {
203 int owned_fp; 203 int owned_fp;
204 struct task_struct *tsk = current; 204 struct task_struct *tsk = current;
205 205
@@ -472,6 +472,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
472 err |= __put_user(OR_R0_R0, &frame->retcode[6]); 472 err |= __put_user(OR_R0_R0, &frame->retcode[6]);
473 err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); 473 err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
474 regs->pr = (unsigned long) frame->retcode; 474 regs->pr = (unsigned long) frame->retcode;
475 flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
475 } 476 }
476 477
477 if (err) 478 if (err)
@@ -497,8 +498,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
497 pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", 498 pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
498 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); 499 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
499 500
500 flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
501
502 return 0; 501 return 0;
503 502
504give_sigsegv: 503give_sigsegv:
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 442d8d47a41e..160db1003cfb 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -35,6 +35,8 @@ static inline void __init smp_store_cpu_info(unsigned int cpu)
35{ 35{
36 struct sh_cpuinfo *c = cpu_data + cpu; 36 struct sh_cpuinfo *c = cpu_data + cpu;
37 37
38 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
39
38 c->loops_per_jiffy = loops_per_jiffy; 40 c->loops_per_jiffy = loops_per_jiffy;
39} 41}
40 42
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index e0b5e4b5accd..7a2ee3a6b8e7 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -25,6 +25,7 @@
25#include <linux/kexec.h> 25#include <linux/kexec.h>
26#include <linux/limits.h> 26#include <linux/limits.h>
27#include <linux/proc_fs.h> 27#include <linux/proc_fs.h>
28#include <linux/sysfs.h>
28#include <asm/system.h> 29#include <asm/system.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <asm/fpu.h> 31#include <asm/fpu.h>
@@ -159,12 +160,12 @@ void die(const char * str, struct pt_regs * regs, long err)
159 160
160 oops_enter(); 161 oops_enter();
161 162
162 console_verbose();
163 spin_lock_irq(&die_lock); 163 spin_lock_irq(&die_lock);
164 console_verbose();
164 bust_spinlocks(1); 165 bust_spinlocks(1);
165 166
166 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 167 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
167 168 sysfs_printk_last_file();
168 print_modules(); 169 print_modules();
169 show_regs(regs); 170 show_regs(regs);
170 171
@@ -180,6 +181,7 @@ void die(const char * str, struct pt_regs * regs, long err)
180 bust_spinlocks(0); 181 bust_spinlocks(0);
181 add_taint(TAINT_DIE); 182 add_taint(TAINT_DIE);
182 spin_unlock_irq(&die_lock); 183 spin_unlock_irq(&die_lock);
184 oops_exit();
183 185
184 if (kexec_should_crash(current)) 186 if (kexec_should_crash(current))
185 crash_kexec(regs); 187 crash_kexec(regs);
@@ -190,7 +192,6 @@ void die(const char * str, struct pt_regs * regs, long err)
190 if (panic_on_oops) 192 if (panic_on_oops)
191 panic("Fatal exception"); 193 panic("Fatal exception");
192 194
193 oops_exit();
194 do_exit(SIGSEGV); 195 do_exit(SIGSEGV);
195} 196}
196 197
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 35c37b7f717a..5e1091be9dc4 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -128,7 +128,7 @@ void __update_cache(struct vm_area_struct *vma,
128 return; 128 return;
129 129
130 page = pfn_to_page(pfn); 130 page = pfn_to_page(pfn);
131 if (pfn_valid(pfn) && page_mapping(page)) { 131 if (pfn_valid(pfn)) {
132 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); 132 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
133 if (dirty) { 133 if (dirty) {
134 unsigned long addr = (unsigned long)page_address(page); 134 unsigned long addr = (unsigned long)page_address(page);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c876bace8fdc..07e01149e3bf 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -491,7 +491,7 @@ if PARAVIRT_GUEST
491source "arch/x86/xen/Kconfig" 491source "arch/x86/xen/Kconfig"
492 492
493config VMI 493config VMI
494 bool "VMI Guest support" 494 bool "VMI Guest support (DEPRECATED)"
495 select PARAVIRT 495 select PARAVIRT
496 depends on X86_32 496 depends on X86_32
497 ---help--- 497 ---help---
@@ -500,6 +500,15 @@ config VMI
500 at the moment), by linking the kernel to a GPL-ed ROM module 500 at the moment), by linking the kernel to a GPL-ed ROM module
501 provided by the hypervisor. 501 provided by the hypervisor.
502 502
503 As of September 2009, VMware has started a phased retirement
504 of this feature from VMware's products. Please see
505 feature-removal-schedule.txt for details. If you are
506 planning to enable this option, please note that you cannot
507 live migrate a VMI enabled VM to a future VMware product,
508 which doesn't support VMI. So if you expect your kernel to
509 seamlessly migrate to newer VMware products, keep this
510 disabled.
511
503config KVM_CLOCK 512config KVM_CLOCK
504 bool "KVM paravirtualized clock" 513 bool "KVM paravirtualized clock"
505 select PARAVIRT 514 select PARAVIRT
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 8aebcc41041d..efb38994859c 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
840 840
841static inline unsigned long __raw_local_save_flags(void) 841static inline unsigned long __raw_local_save_flags(void)
842{ 842{
843 unsigned long f; 843 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
844
845 asm volatile(paravirt_alt(PARAVIRT_CALL)
846 : "=a"(f)
847 : paravirt_type(pv_irq_ops.save_fl),
848 paravirt_clobber(CLBR_EAX)
849 : "memory", "cc");
850 return f;
851} 844}
852 845
853static inline void raw_local_irq_restore(unsigned long f) 846static inline void raw_local_irq_restore(unsigned long f)
854{ 847{
855 asm volatile(paravirt_alt(PARAVIRT_CALL) 848 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
856 : "=a"(f)
857 : PV_FLAGS_ARG(f),
858 paravirt_type(pv_irq_ops.restore_fl),
859 paravirt_clobber(CLBR_EAX)
860 : "memory", "cc");
861} 849}
862 850
863static inline void raw_local_irq_disable(void) 851static inline void raw_local_irq_disable(void)
864{ 852{
865 asm volatile(paravirt_alt(PARAVIRT_CALL) 853 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
866 :
867 : paravirt_type(pv_irq_ops.irq_disable),
868 paravirt_clobber(CLBR_EAX)
869 : "memory", "eax", "cc");
870} 854}
871 855
872static inline void raw_local_irq_enable(void) 856static inline void raw_local_irq_enable(void)
873{ 857{
874 asm volatile(paravirt_alt(PARAVIRT_CALL) 858 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
875 :
876 : paravirt_type(pv_irq_ops.irq_enable),
877 paravirt_clobber(CLBR_EAX)
878 : "memory", "eax", "cc");
879} 859}
880 860
881static inline unsigned long __raw_local_irq_save(void) 861static inline unsigned long __raw_local_irq_save(void)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index dd0f5b32489d..9357473c8da0 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -494,10 +494,11 @@ int paravirt_disable_iospace(void);
494#define EXTRA_CLOBBERS 494#define EXTRA_CLOBBERS
495#define VEXTRA_CLOBBERS 495#define VEXTRA_CLOBBERS
496#else /* CONFIG_X86_64 */ 496#else /* CONFIG_X86_64 */
497/* [re]ax isn't an arg, but the return val */
497#define PVOP_VCALL_ARGS \ 498#define PVOP_VCALL_ARGS \
498 unsigned long __edi = __edi, __esi = __esi, \ 499 unsigned long __edi = __edi, __esi = __esi, \
499 __edx = __edx, __ecx = __ecx 500 __edx = __edx, __ecx = __ecx, __eax = __eax
500#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax 501#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
501 502
502#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) 503#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
503#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) 504#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
@@ -509,6 +510,7 @@ int paravirt_disable_iospace(void);
509 "=c" (__ecx) 510 "=c" (__ecx)
510#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 511#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
511 512
513/* void functions are still allowed [re]ax for scratch */
512#define PVOP_VCALLEE_CLOBBERS "=a" (__eax) 514#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
513#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 515#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
514 516
@@ -583,8 +585,8 @@ int paravirt_disable_iospace(void);
583 VEXTRA_CLOBBERS, \ 585 VEXTRA_CLOBBERS, \
584 pre, post, ##__VA_ARGS__) 586 pre, post, ##__VA_ARGS__)
585 587
586#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ 588#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
587 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ 589 ____PVOP_VCALL(op.func, CLBR_RET_REG, \
588 PVOP_VCALLEE_CLOBBERS, , \ 590 PVOP_VCALLEE_CLOBBERS, , \
589 pre, post, ##__VA_ARGS__) 591 pre, post, ##__VA_ARGS__)
590 592
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 391206199515..74656d1d4e30 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -244,7 +244,6 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
244 __func__, smp_processor_id(), vector, irq); 244 __func__, smp_processor_id(), vector, irq);
245 } 245 }
246 246
247 run_local_timers();
248 irq_exit(); 247 irq_exit();
249 248
250 set_irq_regs(old_regs); 249 set_irq_regs(old_regs);
@@ -269,7 +268,6 @@ void smp_generic_interrupt(struct pt_regs *regs)
269 if (generic_interrupt_extension) 268 if (generic_interrupt_extension)
270 generic_interrupt_extension(); 269 generic_interrupt_extension();
271 270
272 run_local_timers();
273 irq_exit(); 271 irq_exit();
274 272
275 set_irq_regs(old_regs); 273 set_irq_regs(old_regs);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index d915d956e66d..ec1de97600e7 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -198,7 +198,6 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
198{ 198{
199 ack_APIC_irq(); 199 ack_APIC_irq();
200 inc_irq_stat(irq_resched_count); 200 inc_irq_stat(irq_resched_count);
201 run_local_timers();
202 /* 201 /*
203 * KVM uses this interrupt to force a cpu out of guest mode 202 * KVM uses this interrupt to force a cpu out of guest mode
204 */ 203 */
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index dcb00d278512..be2573448ed9 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
38#ifdef CONFIG_FRAME_POINTER 38#ifdef CONFIG_FRAME_POINTER
39 return *(unsigned long *)(regs->bp + sizeof(long)); 39 return *(unsigned long *)(regs->bp + sizeof(long));
40#else 40#else
41 unsigned long *sp = (unsigned long *)regs->sp; 41 unsigned long *sp =
42 (unsigned long *)kernel_stack_pointer(regs);
42 /* 43 /*
43 * Return address is either directly at stack pointer 44 * Return address is either directly at stack pointer
44 * or above a saved flags. Eflags has bits 22-31 zero, 45 * or above a saved flags. Eflags has bits 22-31 zero,
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index 699f7eeb896a..cd022121cab6 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -3,8 +3,16 @@
3#include <asm/trampoline.h> 3#include <asm/trampoline.h>
4#include <asm/e820.h> 4#include <asm/e820.h>
5 5
6#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
7#define __trampinit
8#define __trampinitdata
9#else
10#define __trampinit __cpuinit
11#define __trampinitdata __cpuinitdata
12#endif
13
6/* ready for x86_64 and x86 */ 14/* ready for x86_64 and x86 */
7unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE); 15unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
8 16
9void __init reserve_trampoline_memory(void) 17void __init reserve_trampoline_memory(void)
10{ 18{
@@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void)
26 * bootstrap into the page concerned. The caller 34 * bootstrap into the page concerned. The caller
27 * has made sure it's suitably aligned. 35 * has made sure it's suitably aligned.
28 */ 36 */
29unsigned long __cpuinit setup_trampoline(void) 37unsigned long __trampinit setup_trampoline(void)
30{ 38{
31 memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); 39 memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
32 return virt_to_phys(trampoline_base); 40 return virt_to_phys(trampoline_base);
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
index 596d54c660a5..3af2dff58b21 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/kernel/trampoline_64.S
@@ -32,8 +32,12 @@
32#include <asm/segment.h> 32#include <asm/segment.h>
33#include <asm/processor-flags.h> 33#include <asm/processor-flags.h>
34 34
35#ifdef CONFIG_ACPI_SLEEP
36.section .rodata, "a", @progbits
37#else
35/* We can free up the trampoline after bootup if cpu hotplug is not supported. */ 38/* We can free up the trampoline after bootup if cpu hotplug is not supported. */
36__CPUINITRODATA 39__CPUINITRODATA
40#endif
37.code16 41.code16
38 42
39ENTRY(trampoline_data) 43ENTRY(trampoline_data)
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index 31e6f6cfe53e..d430e4c30193 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -648,7 +648,7 @@ static inline int __init activate_vmi(void)
648 648
649 pv_info.paravirt_enabled = 1; 649 pv_info.paravirt_enabled = 1;
650 pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; 650 pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
651 pv_info.name = "vmi"; 651 pv_info.name = "vmi [deprecated]";
652 652
653 pv_init_ops.patch = vmi_patch; 653 pv_init_ops.patch = vmi_patch;
654 654