diff options
author | Joe Perches <joe@perches.com> | 2008-03-23 04:02:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:41:25 -0400 |
commit | c4fe760efde84e52168a81bf125f25ba2f118b51 (patch) | |
tree | 99a8b2a79d2ba28bc0dd799e485b05f8fdf8cdd9 /include/asm-x86/mmu_context_64.h | |
parent | 55464da94a845e057ffb94a9fc7be1aa86ffcd89 (diff) |
include/asm-x86/mmu_context_64.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/mmu_context_64.h')
-rw-r--r-- | include/asm-x86/mmu_context_64.h | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h index ad6dc821ef9e..ca44c71e7fb3 100644 --- a/include/asm-x86/mmu_context_64.h +++ b/include/asm-x86/mmu_context_64.h | |||
@@ -20,12 +20,12 @@ void destroy_context(struct mm_struct *mm); | |||
20 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 20 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
21 | { | 21 | { |
22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
23 | if (read_pda(mmu_state) == TLBSTATE_OK) | 23 | if (read_pda(mmu_state) == TLBSTATE_OK) |
24 | write_pda(mmu_state, TLBSTATE_LAZY); | 24 | write_pda(mmu_state, TLBSTATE_LAZY); |
25 | #endif | 25 | #endif |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 28 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
29 | struct task_struct *tsk) | 29 | struct task_struct *tsk) |
30 | { | 30 | { |
31 | unsigned cpu = smp_processor_id(); | 31 | unsigned cpu = smp_processor_id(); |
@@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
39 | cpu_set(cpu, next->cpu_vm_mask); | 39 | cpu_set(cpu, next->cpu_vm_mask); |
40 | load_cr3(next->pgd); | 40 | load_cr3(next->pgd); |
41 | 41 | ||
42 | if (unlikely(next->context.ldt != prev->context.ldt)) | 42 | if (unlikely(next->context.ldt != prev->context.ldt)) |
43 | load_LDT_nolock(&next->context); | 43 | load_LDT_nolock(&next->context); |
44 | } | 44 | } |
45 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
@@ -48,7 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
48 | if (read_pda(active_mm) != next) | 48 | if (read_pda(active_mm) != next) |
49 | BUG(); | 49 | BUG(); |
50 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 50 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
51 | /* We were in lazy tlb mode and leave_mm disabled | 51 | /* We were in lazy tlb mode and leave_mm disabled |
52 | * tlb flush IPI delivery. We must reload CR3 | 52 | * tlb flush IPI delivery. We must reload CR3 |
53 | * to make sure to use no freed page tables. | 53 | * to make sure to use no freed page tables. |
54 | */ | 54 | */ |
@@ -59,13 +59,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
59 | #endif | 59 | #endif |
60 | } | 60 | } |
61 | 61 | ||
62 | #define deactivate_mm(tsk,mm) do { \ | 62 | #define deactivate_mm(tsk, mm) \ |
63 | load_gs_index(0); \ | 63 | do { \ |
64 | asm volatile("movl %0,%%fs"::"r"(0)); \ | 64 | load_gs_index(0); \ |
65 | } while(0) | 65 | asm volatile("movl %0,%%fs"::"r"(0)); \ |
66 | } while (0) | ||
66 | 67 | ||
67 | #define activate_mm(prev, next) \ | 68 | #define activate_mm(prev, next) \ |
68 | switch_mm((prev),(next),NULL) | 69 | switch_mm((prev), (next), NULL) |
69 | 70 | ||
70 | 71 | ||
71 | #endif | 72 | #endif |