aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/mmu.h1
-rw-r--r--arch/arm/include/asm/mmu_context.h15
-rw-r--r--arch/arm/mm/context.c124
3 files changed, 126 insertions, 14 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b561584d04a..68870c77667 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,6 +6,7 @@
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 unsigned int id; 8 unsigned int id;
9 spinlock_t id_lock;
9#endif 10#endif
10 unsigned int kvm_seq; 11 unsigned int kvm_seq;
11} mm_context_t; 12} mm_context_t;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index de6cefb329d..a0b3cac0547 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm);
43#define ASID_FIRST_VERSION (1 << ASID_BITS) 43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44 44
45extern unsigned int cpu_last_asid; 45extern unsigned int cpu_last_asid;
46#ifdef CONFIG_SMP
47DECLARE_PER_CPU(struct mm_struct *, current_mm);
48#endif
46 49
47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 50void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
48void __new_context(struct mm_struct *mm); 51void __new_context(struct mm_struct *mm);
49 52
50static inline void check_context(struct mm_struct *mm) 53static inline void check_context(struct mm_struct *mm)
51{ 54{
55 /*
56 * This code is executed with interrupts enabled. Therefore,
57 * mm->context.id cannot be updated to the latest ASID version
58 * on a different CPU (and condition below not triggered)
59 * without first getting an IPI to reset the context. The
60 * alternative is to take a read_lock on mm->context.id_lock
61 * (after changing its type to rwlock_t).
62 */
52 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) 63 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
53 __new_context(mm); 64 __new_context(mm);
54 65
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
108 __flush_icache_all(); 119 __flush_icache_all();
109#endif 120#endif
110 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 121 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
122#ifdef CONFIG_SMP
123 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
124 *crt_mm = next;
125#endif
111 check_context(next); 126 check_context(next);
112 cpu_switch_mm(next->pgd, next); 127 cpu_switch_mm(next->pgd, next);
113 if (cache_is_vivt()) 128 if (cache_is_vivt())
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index a9e22e31eaa..b0ee9ba3cfa 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -10,12 +10,17 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/percpu.h>
13 15
14#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
15#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
16 18
17static DEFINE_SPINLOCK(cpu_asid_lock); 19static DEFINE_SPINLOCK(cpu_asid_lock);
18unsigned int cpu_last_asid = ASID_FIRST_VERSION; 20unsigned int cpu_last_asid = ASID_FIRST_VERSION;
21#ifdef CONFIG_SMP
22DEFINE_PER_CPU(struct mm_struct *, current_mm);
23#endif
19 24
20/* 25/*
21 * We fork()ed a process, and we need a new context for the child 26 * We fork()ed a process, and we need a new context for the child
@@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
26void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
27{ 32{
28 mm->context.id = 0; 33 mm->context.id = 0;
34 spin_lock_init(&mm->context.id_lock);
29} 35}
30 36
37static void flush_context(void)
38{
39 /* set the reserved ASID before flushing the TLB */
40 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
41 isb();
42 local_flush_tlb_all();
43 if (icache_is_vivt_asid_tagged()) {
44 __flush_icache_all();
45 dsb();
46 }
47}
48
49#ifdef CONFIG_SMP
50
51static void set_mm_context(struct mm_struct *mm, unsigned int asid)
52{
53 unsigned long flags;
54
55 /*
56 * Locking needed for multi-threaded applications where the
57 * same mm->context.id could be set from different CPUs during
58 * the broadcast. This function is also called via IPI so the
59 * mm->context.id_lock has to be IRQ-safe.
60 */
61 spin_lock_irqsave(&mm->context.id_lock, flags);
62 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
63 /*
64 * Old version of ASID found. Set the new one and
65 * reset mm_cpumask(mm).
66 */
67 mm->context.id = asid;
68 cpumask_clear(mm_cpumask(mm));
69 }
70 spin_unlock_irqrestore(&mm->context.id_lock, flags);
71
72 /*
73 * Set the mm_cpumask(mm) bit for the current CPU.
74 */
75 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
76}
77
78/*
79 * Reset the ASID on the current CPU. This function call is broadcast
80 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
81 */
82static void reset_context(void *info)
83{
84 unsigned int asid;
85 unsigned int cpu = smp_processor_id();
86 struct mm_struct *mm = per_cpu(current_mm, cpu);
87
88 /*
89 * Check if a current_mm was set on this CPU as it might still
90 * be in the early booting stages and using the reserved ASID.
91 */
92 if (!mm)
93 return;
94
95 smp_rmb();
96 asid = cpu_last_asid + cpu + 1;
97
98 flush_context();
99 set_mm_context(mm, asid);
100
101 /* set the new ASID */
102 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
103 isb();
104}
105
106#else
107
108static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
109{
110 mm->context.id = asid;
111 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
112}
113
114#endif
115
31void __new_context(struct mm_struct *mm) 116void __new_context(struct mm_struct *mm)
32{ 117{
33 unsigned int asid; 118 unsigned int asid;
34 119
35 spin_lock(&cpu_asid_lock); 120 spin_lock(&cpu_asid_lock);
121#ifdef CONFIG_SMP
122 /*
123 * Check the ASID again, in case the change was broadcast from
124 * another CPU before we acquired the lock.
125 */
126 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
127 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
128 spin_unlock(&cpu_asid_lock);
129 return;
130 }
131#endif
132 /*
133 * At this point, it is guaranteed that the current mm (with
134 * an old ASID) isn't active on any other CPU since the ASIDs
135 * are changed simultaneously via IPI.
136 */
36 asid = ++cpu_last_asid; 137 asid = ++cpu_last_asid;
37 if (asid == 0) 138 if (asid == 0)
38 asid = cpu_last_asid = ASID_FIRST_VERSION; 139 asid = cpu_last_asid = ASID_FIRST_VERSION;
@@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm)
42 * to start a new version and flush the TLB. 143 * to start a new version and flush the TLB.
43 */ 144 */
44 if (unlikely((asid & ~ASID_MASK) == 0)) { 145 if (unlikely((asid & ~ASID_MASK) == 0)) {
45 asid = ++cpu_last_asid; 146 asid = cpu_last_asid + smp_processor_id() + 1;
46 /* set the reserved ASID before flushing the TLB */ 147 flush_context();
47 asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" 148#ifdef CONFIG_SMP
48 : 149 smp_wmb();
49 : "r" (0)); 150 smp_call_function(reset_context, NULL, 1);
50 isb(); 151#endif
51 flush_tlb_all(); 152 cpu_last_asid += NR_CPUS;
52 if (icache_is_vivt_asid_tagged()) {
53 __flush_icache_all();
54 dsb();
55 }
56 } 153 }
57 spin_unlock(&cpu_asid_lock);
58 154
59 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); 155 set_mm_context(mm, asid);
60 mm->context.id = asid; 156 spin_unlock(&cpu_asid_lock);
61} 157}