diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt-spinlocks.c | 37 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 27 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 38 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 20 | ||||
-rw-r--r-- | arch/x86/kernel/smp.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 75 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_32.c | 8 |
10 files changed, 123 insertions, 112 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3db651fc8ec5..d679cb2c79b4 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -10,7 +10,7 @@ ifdef CONFIG_FTRACE | |||
10 | # Do not profile debug and lowlevel utilities | 10 | # Do not profile debug and lowlevel utilities |
11 | CFLAGS_REMOVE_tsc.o = -pg | 11 | CFLAGS_REMOVE_tsc.o = -pg |
12 | CFLAGS_REMOVE_rtc.o = -pg | 12 | CFLAGS_REMOVE_rtc.o = -pg |
13 | CFLAGS_REMOVE_paravirt.o = -pg | 13 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg |
14 | endif | 14 | endif |
15 | 15 | ||
16 | # | 16 | # |
@@ -89,7 +89,7 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o | |||
89 | obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o | 89 | obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o |
90 | obj-$(CONFIG_KVM_GUEST) += kvm.o | 90 | obj-$(CONFIG_KVM_GUEST) += kvm.o |
91 | obj-$(CONFIG_KVM_CLOCK) += kvmclock.o | 91 | obj-$(CONFIG_KVM_CLOCK) += kvmclock.o |
92 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o | 92 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o |
93 | obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o | 93 | obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o |
94 | 94 | ||
95 | obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o | 95 | obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8aab8517642e..1c7d39f0e89e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -744,14 +744,3 @@ void __cpuinit cpu_init(void) | |||
744 | mxcsr_feature_mask_init(); | 744 | mxcsr_feature_mask_init(); |
745 | } | 745 | } |
746 | 746 | ||
747 | #ifdef CONFIG_HOTPLUG_CPU | ||
748 | void __cpuinit cpu_uninit(void) | ||
749 | { | ||
750 | int cpu = raw_smp_processor_id(); | ||
751 | cpu_clear(cpu, cpu_initialized); | ||
752 | |||
753 | /* lazy TLB state */ | ||
754 | per_cpu(cpu_tlbstate, cpu).state = 0; | ||
755 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | ||
756 | } | ||
757 | #endif | ||
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index b68e21f06f4f..6e388412a854 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -51,6 +51,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
51 | memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, | 51 | memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, |
52 | (mincount - oldsize) * LDT_ENTRY_SIZE); | 52 | (mincount - oldsize) * LDT_ENTRY_SIZE); |
53 | 53 | ||
54 | paravirt_alloc_ldt(newldt, mincount); | ||
55 | |||
54 | #ifdef CONFIG_X86_64 | 56 | #ifdef CONFIG_X86_64 |
55 | /* CHECKME: Do we really need this ? */ | 57 | /* CHECKME: Do we really need this ? */ |
56 | wmb(); | 58 | wmb(); |
@@ -73,6 +75,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
73 | #endif | 75 | #endif |
74 | } | 76 | } |
75 | if (oldsize) { | 77 | if (oldsize) { |
78 | paravirt_free_ldt(oldldt, oldsize); | ||
76 | if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) | 79 | if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) |
77 | vfree(oldldt); | 80 | vfree(oldldt); |
78 | else | 81 | else |
@@ -84,10 +87,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
84 | static inline int copy_ldt(mm_context_t *new, mm_context_t *old) | 87 | static inline int copy_ldt(mm_context_t *new, mm_context_t *old) |
85 | { | 88 | { |
86 | int err = alloc_ldt(new, old->size, 0); | 89 | int err = alloc_ldt(new, old->size, 0); |
90 | int i; | ||
87 | 91 | ||
88 | if (err < 0) | 92 | if (err < 0) |
89 | return err; | 93 | return err; |
90 | memcpy(new->ldt, old->ldt, old->size * LDT_ENTRY_SIZE); | 94 | |
95 | for(i = 0; i < old->size; i++) | ||
96 | write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); | ||
91 | return 0; | 97 | return 0; |
92 | } | 98 | } |
93 | 99 | ||
@@ -124,6 +130,7 @@ void destroy_context(struct mm_struct *mm) | |||
124 | if (mm == current->active_mm) | 130 | if (mm == current->active_mm) |
125 | clear_LDT(); | 131 | clear_LDT(); |
126 | #endif | 132 | #endif |
133 | paravirt_free_ldt(mm->context.ldt, mm->context.size); | ||
127 | if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) | 134 | if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) |
128 | vfree(mm->context.ldt); | 135 | vfree(mm->context.ldt); |
129 | else | 136 | else |
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c new file mode 100644 index 000000000000..0e9f1982b1dd --- /dev/null +++ b/arch/x86/kernel/paravirt-spinlocks.c | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Split spinlock implementation out into its own file, so it can be | ||
3 | * compiled in a FTRACE-compatible way. | ||
4 | */ | ||
5 | #include <linux/spinlock.h> | ||
6 | #include <linux/module.h> | ||
7 | |||
8 | #include <asm/paravirt.h> | ||
9 | |||
10 | static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) | ||
11 | { | ||
12 | __raw_spin_lock(lock); | ||
13 | } | ||
14 | |||
15 | struct pv_lock_ops pv_lock_ops = { | ||
16 | #ifdef CONFIG_SMP | ||
17 | .spin_is_locked = __ticket_spin_is_locked, | ||
18 | .spin_is_contended = __ticket_spin_is_contended, | ||
19 | |||
20 | .spin_lock = __ticket_spin_lock, | ||
21 | .spin_lock_flags = default_spin_lock_flags, | ||
22 | .spin_trylock = __ticket_spin_trylock, | ||
23 | .spin_unlock = __ticket_spin_unlock, | ||
24 | #endif | ||
25 | }; | ||
26 | EXPORT_SYMBOL(pv_lock_ops); | ||
27 | |||
28 | void __init paravirt_use_bytelocks(void) | ||
29 | { | ||
30 | #ifdef CONFIG_SMP | ||
31 | pv_lock_ops.spin_is_locked = __byte_spin_is_locked; | ||
32 | pv_lock_ops.spin_is_contended = __byte_spin_is_contended; | ||
33 | pv_lock_ops.spin_lock = __byte_spin_lock; | ||
34 | pv_lock_ops.spin_trylock = __byte_spin_trylock; | ||
35 | pv_lock_ops.spin_unlock = __byte_spin_unlock; | ||
36 | #endif | ||
37 | } | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 300da17e61cb..7faea1817d05 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -268,17 +268,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | |||
268 | return __get_cpu_var(paravirt_lazy_mode); | 268 | return __get_cpu_var(paravirt_lazy_mode); |
269 | } | 269 | } |
270 | 270 | ||
271 | void __init paravirt_use_bytelocks(void) | ||
272 | { | ||
273 | #ifdef CONFIG_SMP | ||
274 | pv_lock_ops.spin_is_locked = __byte_spin_is_locked; | ||
275 | pv_lock_ops.spin_is_contended = __byte_spin_is_contended; | ||
276 | pv_lock_ops.spin_lock = __byte_spin_lock; | ||
277 | pv_lock_ops.spin_trylock = __byte_spin_trylock; | ||
278 | pv_lock_ops.spin_unlock = __byte_spin_unlock; | ||
279 | #endif | ||
280 | } | ||
281 | |||
282 | struct pv_info pv_info = { | 271 | struct pv_info pv_info = { |
283 | .name = "bare hardware", | 272 | .name = "bare hardware", |
284 | .paravirt_enabled = 0, | 273 | .paravirt_enabled = 0, |
@@ -348,6 +337,10 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
348 | .write_ldt_entry = native_write_ldt_entry, | 337 | .write_ldt_entry = native_write_ldt_entry, |
349 | .write_gdt_entry = native_write_gdt_entry, | 338 | .write_gdt_entry = native_write_gdt_entry, |
350 | .write_idt_entry = native_write_idt_entry, | 339 | .write_idt_entry = native_write_idt_entry, |
340 | |||
341 | .alloc_ldt = paravirt_nop, | ||
342 | .free_ldt = paravirt_nop, | ||
343 | |||
351 | .load_sp0 = native_load_sp0, | 344 | .load_sp0 = native_load_sp0, |
352 | 345 | ||
353 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) | 346 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) |
@@ -461,18 +454,6 @@ struct pv_mmu_ops pv_mmu_ops = { | |||
461 | .set_fixmap = native_set_fixmap, | 454 | .set_fixmap = native_set_fixmap, |
462 | }; | 455 | }; |
463 | 456 | ||
464 | struct pv_lock_ops pv_lock_ops = { | ||
465 | #ifdef CONFIG_SMP | ||
466 | .spin_is_locked = __ticket_spin_is_locked, | ||
467 | .spin_is_contended = __ticket_spin_is_contended, | ||
468 | |||
469 | .spin_lock = __ticket_spin_lock, | ||
470 | .spin_trylock = __ticket_spin_trylock, | ||
471 | .spin_unlock = __ticket_spin_unlock, | ||
472 | #endif | ||
473 | }; | ||
474 | EXPORT_SYMBOL(pv_lock_ops); | ||
475 | |||
476 | EXPORT_SYMBOL_GPL(pv_time_ops); | 457 | EXPORT_SYMBOL_GPL(pv_time_ops); |
477 | EXPORT_SYMBOL (pv_cpu_ops); | 458 | EXPORT_SYMBOL (pv_cpu_ops); |
478 | EXPORT_SYMBOL (pv_mmu_ops); | 459 | EXPORT_SYMBOL (pv_mmu_ops); |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 3b7a1ddcc0bc..b76b38ff962b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -72,46 +72,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
72 | return ((unsigned long *)tsk->thread.sp)[3]; | 72 | return ((unsigned long *)tsk->thread.sp)[3]; |
73 | } | 73 | } |
74 | 74 | ||
75 | #ifdef CONFIG_HOTPLUG_CPU | 75 | #ifndef CONFIG_SMP |
76 | #include <asm/nmi.h> | ||
77 | |||
78 | static void cpu_exit_clear(void) | ||
79 | { | ||
80 | int cpu = raw_smp_processor_id(); | ||
81 | |||
82 | idle_task_exit(); | ||
83 | |||
84 | cpu_uninit(); | ||
85 | irq_ctx_exit(cpu); | ||
86 | |||
87 | cpu_clear(cpu, cpu_callout_map); | ||
88 | cpu_clear(cpu, cpu_callin_map); | ||
89 | |||
90 | numa_remove_cpu(cpu); | ||
91 | } | ||
92 | |||
93 | /* We don't actually take CPU down, just spin without interrupts. */ | ||
94 | static inline void play_dead(void) | ||
95 | { | ||
96 | /* This must be done before dead CPU ack */ | ||
97 | cpu_exit_clear(); | ||
98 | mb(); | ||
99 | /* Ack it */ | ||
100 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
101 | |||
102 | /* | ||
103 | * With physical CPU hotplug, we should halt the cpu | ||
104 | */ | ||
105 | local_irq_disable(); | ||
106 | /* mask all interrupts, flush any and all caches, and halt */ | ||
107 | wbinvd_halt(); | ||
108 | } | ||
109 | #else | ||
110 | static inline void play_dead(void) | 76 | static inline void play_dead(void) |
111 | { | 77 | { |
112 | BUG(); | 78 | BUG(); |
113 | } | 79 | } |
114 | #endif /* CONFIG_HOTPLUG_CPU */ | 80 | #endif |
115 | 81 | ||
116 | /* | 82 | /* |
117 | * The idle thread. There's no useful work to be | 83 | * The idle thread. There's no useful work to be |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71553b664e2a..ec27afa43d7e 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -85,28 +85,12 @@ void exit_idle(void) | |||
85 | __exit_idle(); | 85 | __exit_idle(); |
86 | } | 86 | } |
87 | 87 | ||
88 | #ifdef CONFIG_HOTPLUG_CPU | 88 | #ifndef CONFIG_SMP |
89 | DECLARE_PER_CPU(int, cpu_state); | ||
90 | |||
91 | #include <asm/nmi.h> | ||
92 | /* We halt the CPU with physical CPU hotplug */ | ||
93 | static inline void play_dead(void) | ||
94 | { | ||
95 | idle_task_exit(); | ||
96 | mb(); | ||
97 | /* Ack it */ | ||
98 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
99 | |||
100 | local_irq_disable(); | ||
101 | /* mask all interrupts, flush any and all caches, and halt */ | ||
102 | wbinvd_halt(); | ||
103 | } | ||
104 | #else | ||
105 | static inline void play_dead(void) | 89 | static inline void play_dead(void) |
106 | { | 90 | { |
107 | BUG(); | 91 | BUG(); |
108 | } | 92 | } |
109 | #endif /* CONFIG_HOTPLUG_CPU */ | 93 | #endif |
110 | 94 | ||
111 | /* | 95 | /* |
112 | * The idle thread. There's no useful work to be | 96 | * The idle thread. There's no useful work to be |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 361b7a4c640c..18f9b19f5f8f 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -214,12 +214,16 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) | |||
214 | struct smp_ops smp_ops = { | 214 | struct smp_ops smp_ops = { |
215 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | 215 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, |
216 | .smp_prepare_cpus = native_smp_prepare_cpus, | 216 | .smp_prepare_cpus = native_smp_prepare_cpus, |
217 | .cpu_up = native_cpu_up, | ||
218 | .smp_cpus_done = native_smp_cpus_done, | 217 | .smp_cpus_done = native_smp_cpus_done, |
219 | 218 | ||
220 | .smp_send_stop = native_smp_send_stop, | 219 | .smp_send_stop = native_smp_send_stop, |
221 | .smp_send_reschedule = native_smp_send_reschedule, | 220 | .smp_send_reschedule = native_smp_send_reschedule, |
222 | 221 | ||
222 | .cpu_up = native_cpu_up, | ||
223 | .cpu_die = native_cpu_die, | ||
224 | .cpu_disable = native_cpu_disable, | ||
225 | .play_dead = native_play_dead, | ||
226 | |||
223 | .send_call_func_ipi = native_send_call_func_ipi, | 227 | .send_call_func_ipi = native_send_call_func_ipi, |
224 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | 228 | .send_call_func_single_ipi = native_send_call_func_single_ipi, |
225 | }; | 229 | }; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7985c5b3f916..66b04e598817 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1346,25 +1346,9 @@ static void __ref remove_cpu_from_maps(int cpu) | |||
1346 | numa_remove_cpu(cpu); | 1346 | numa_remove_cpu(cpu); |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | int __cpu_disable(void) | 1349 | void cpu_disable_common(void) |
1350 | { | 1350 | { |
1351 | int cpu = smp_processor_id(); | 1351 | int cpu = smp_processor_id(); |
1352 | |||
1353 | /* | ||
1354 | * Perhaps use cpufreq to drop frequency, but that could go | ||
1355 | * into generic code. | ||
1356 | * | ||
1357 | * We won't take down the boot processor on i386 due to some | ||
1358 | * interrupts only being able to be serviced by the BSP. | ||
1359 | * Especially so if we're not using an IOAPIC -zwane | ||
1360 | */ | ||
1361 | if (cpu == 0) | ||
1362 | return -EBUSY; | ||
1363 | |||
1364 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
1365 | stop_apic_nmi_watchdog(NULL); | ||
1366 | clear_local_APIC(); | ||
1367 | |||
1368 | /* | 1352 | /* |
1369 | * HACK: | 1353 | * HACK: |
1370 | * Allow any queued timer interrupts to get serviced | 1354 | * Allow any queued timer interrupts to get serviced |
@@ -1382,10 +1366,32 @@ int __cpu_disable(void) | |||
1382 | remove_cpu_from_maps(cpu); | 1366 | remove_cpu_from_maps(cpu); |
1383 | unlock_vector_lock(); | 1367 | unlock_vector_lock(); |
1384 | fixup_irqs(cpu_online_map); | 1368 | fixup_irqs(cpu_online_map); |
1369 | } | ||
1370 | |||
1371 | int native_cpu_disable(void) | ||
1372 | { | ||
1373 | int cpu = smp_processor_id(); | ||
1374 | |||
1375 | /* | ||
1376 | * Perhaps use cpufreq to drop frequency, but that could go | ||
1377 | * into generic code. | ||
1378 | * | ||
1379 | * We won't take down the boot processor on i386 due to some | ||
1380 | * interrupts only being able to be serviced by the BSP. | ||
1381 | * Especially so if we're not using an IOAPIC -zwane | ||
1382 | */ | ||
1383 | if (cpu == 0) | ||
1384 | return -EBUSY; | ||
1385 | |||
1386 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
1387 | stop_apic_nmi_watchdog(NULL); | ||
1388 | clear_local_APIC(); | ||
1389 | |||
1390 | cpu_disable_common(); | ||
1385 | return 0; | 1391 | return 0; |
1386 | } | 1392 | } |
1387 | 1393 | ||
1388 | void __cpu_die(unsigned int cpu) | 1394 | void native_cpu_die(unsigned int cpu) |
1389 | { | 1395 | { |
1390 | /* We don't do anything here: idle task is faking death itself. */ | 1396 | /* We don't do anything here: idle task is faking death itself. */ |
1391 | unsigned int i; | 1397 | unsigned int i; |
@@ -1402,15 +1408,44 @@ void __cpu_die(unsigned int cpu) | |||
1402 | } | 1408 | } |
1403 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 1409 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
1404 | } | 1410 | } |
1411 | |||
1412 | void play_dead_common(void) | ||
1413 | { | ||
1414 | idle_task_exit(); | ||
1415 | reset_lazy_tlbstate(); | ||
1416 | irq_ctx_exit(raw_smp_processor_id()); | ||
1417 | |||
1418 | mb(); | ||
1419 | /* Ack it */ | ||
1420 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
1421 | |||
1422 | /* | ||
1423 | * With physical CPU hotplug, we should halt the cpu | ||
1424 | */ | ||
1425 | local_irq_disable(); | ||
1426 | } | ||
1427 | |||
1428 | void native_play_dead(void) | ||
1429 | { | ||
1430 | play_dead_common(); | ||
1431 | wbinvd_halt(); | ||
1432 | } | ||
1433 | |||
1405 | #else /* ... !CONFIG_HOTPLUG_CPU */ | 1434 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
1406 | int __cpu_disable(void) | 1435 | int native_cpu_disable(void) |
1407 | { | 1436 | { |
1408 | return -ENOSYS; | 1437 | return -ENOSYS; |
1409 | } | 1438 | } |
1410 | 1439 | ||
1411 | void __cpu_die(unsigned int cpu) | 1440 | void native_cpu_die(unsigned int cpu) |
1412 | { | 1441 | { |
1413 | /* We said "no" in __cpu_disable */ | 1442 | /* We said "no" in __cpu_disable */ |
1414 | BUG(); | 1443 | BUG(); |
1415 | } | 1444 | } |
1445 | |||
1446 | void native_play_dead(void) | ||
1447 | { | ||
1448 | BUG(); | ||
1449 | } | ||
1450 | |||
1416 | #endif | 1451 | #endif |
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index fec1ecedc9b7..e00534b33534 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -241,3 +241,11 @@ void flush_tlb_all(void) | |||
241 | on_each_cpu(do_flush_tlb_all, NULL, 1); | 241 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
242 | } | 242 | } |
243 | 243 | ||
244 | void reset_lazy_tlbstate(void) | ||
245 | { | ||
246 | int cpu = raw_smp_processor_id(); | ||
247 | |||
248 | per_cpu(cpu_tlbstate, cpu).state = 0; | ||
249 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | ||
250 | } | ||
251 | |||