aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64/mmu_context.h')
-rw-r--r--include/asm-sparc64/mmu_context.h25
1 files changed, 12 insertions, 13 deletions
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index eb660b1609c4..4be40c58e3c1 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -19,6 +19,12 @@ extern unsigned long tlb_context_cache;
19extern unsigned long mmu_context_bmap[]; 19extern unsigned long mmu_context_bmap[];
20 20
21extern void get_new_mmu_context(struct mm_struct *mm); 21extern void get_new_mmu_context(struct mm_struct *mm);
22#ifdef CONFIG_SMP
23extern void smp_new_mmu_context_version(void);
24#else
25#define smp_new_mmu_context_version() do { } while (0)
26#endif
27
22extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 28extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
23extern void destroy_context(struct mm_struct *mm); 29extern void destroy_context(struct mm_struct *mm);
24 30
@@ -58,21 +64,17 @@ extern void smp_tsb_sync(struct mm_struct *mm);
58 64
59extern void __flush_tlb_mm(unsigned long, unsigned long); 65extern void __flush_tlb_mm(unsigned long, unsigned long);
60 66
61/* Switch the current MM context. */ 67/* Switch the current MM context. Interrupts are disabled. */
62static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 68static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
63{ 69{
64 unsigned long ctx_valid; 70 unsigned long ctx_valid;
65 int cpu; 71 int cpu;
66 72
67 /* Note: page_table_lock is used here to serialize switch_mm 73 spin_lock(&mm->context.lock);
68 * and activate_mm, and their calls to get_new_mmu_context.
69 * This use of page_table_lock is unrelated to its other uses.
70 */
71 spin_lock(&mm->page_table_lock);
72 ctx_valid = CTX_VALID(mm->context); 74 ctx_valid = CTX_VALID(mm->context);
73 if (!ctx_valid) 75 if (!ctx_valid)
74 get_new_mmu_context(mm); 76 get_new_mmu_context(mm);
75 spin_unlock(&mm->page_table_lock); 77 spin_unlock(&mm->context.lock);
76 78
77 if (!ctx_valid || (old_mm != mm)) { 79 if (!ctx_valid || (old_mm != mm)) {
78 load_secondary_context(mm); 80 load_secondary_context(mm);
@@ -98,19 +100,16 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
98/* Activate a new MM instance for the current task. */ 100/* Activate a new MM instance for the current task. */
99static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) 101static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
100{ 102{
103 unsigned long flags;
101 int cpu; 104 int cpu;
102 105
103 /* Note: page_table_lock is used here to serialize switch_mm 106 spin_lock_irqsave(&mm->context.lock, flags);
104 * and activate_mm, and their calls to get_new_mmu_context.
105 * This use of page_table_lock is unrelated to its other uses.
106 */
107 spin_lock(&mm->page_table_lock);
108 if (!CTX_VALID(mm->context)) 107 if (!CTX_VALID(mm->context))
109 get_new_mmu_context(mm); 108 get_new_mmu_context(mm);
110 cpu = smp_processor_id(); 109 cpu = smp_processor_id();
111 if (!cpu_isset(cpu, mm->cpu_vm_mask)) 110 if (!cpu_isset(cpu, mm->cpu_vm_mask))
112 cpu_set(cpu, mm->cpu_vm_mask); 111 cpu_set(cpu, mm->cpu_vm_mask);
113 spin_unlock(&mm->page_table_lock); 112 spin_unlock_irqrestore(&mm->context.lock, flags);
114 113
115 load_secondary_context(mm); 114 load_secondary_context(mm);
116 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 115 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);