diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-07-24 16:53:45 -0400 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-08-30 12:12:18 -0400 |
commit | 3daa48d1d9bc44baa079d65e72ef2e3f1139ac03 (patch) | |
tree | 2e659049c5ee5584c789cc42643aa653cfdb307d /arch/arc/include | |
parent | 5bd87adf9b2ae5fa1bb469c68029b4eec06d6e03 (diff) |
ARC: [ASID] get_new_mmu_context() to conditionally allocate new ASID
ASID allocation changes/1
This patch does 2 things:
(1) get_new_mmu_context() NOW moves mm->ASID to a new value ONLY if it
was from a prev allocation cycle/generation OR if mm had no ASID
allocated (vs. before would unconditionally moving to a new ASID)
Callers desiring unconditional update of ASID, e.g.local_flush_tlb_mm()
(for parent's address space invalidation at fork) need to first force
the parent to an unallocated ASID.
(2) get_new_mmu_context() always sets the MMU PID reg with unchanged/new
ASID value.
The gains are:
- consolidation of all asid alloc logic into get_new_mmu_context()
- avoiding code duplication in switch_mm() for PID reg setting
- Enables future change to fold activate_mm() into switch_mm()
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/mmu_context.h | 45 |
1 files changed, 18 insertions, 27 deletions
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index a63800fd1dba..7a3ecd25ffc9 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h | |||
@@ -69,8 +69,8 @@ extern struct mm_struct *asid_mm_map[NUM_ASID + 1]; | |||
69 | extern int asid_cache; | 69 | extern int asid_cache; |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Assign a new ASID to task. If the task already has an ASID, it is | 72 | * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) |
73 | * relinquished. | 73 | * Also set the MMU PID register to existing/updated ASID |
74 | */ | 74 | */ |
75 | static inline void get_new_mmu_context(struct mm_struct *mm) | 75 | static inline void get_new_mmu_context(struct mm_struct *mm) |
76 | { | 76 | { |
@@ -80,6 +80,17 @@ static inline void get_new_mmu_context(struct mm_struct *mm) | |||
80 | local_irq_save(flags); | 80 | local_irq_save(flags); |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Move to new ASID if it was not from current alloc-cycle/generation. | ||
84 | * | ||
85 | * Note: Callers needing new ASID unconditionally, independent of | ||
86 | * generation, e.g. local_flush_tlb_mm() for forking parent, | ||
87 | * first need to destroy the context, setting it to invalid | ||
88 | * value. | ||
89 | */ | ||
90 | if (mm->context.asid <= asid_cache) | ||
91 | goto set_hw; | ||
92 | |||
93 | /* | ||
83 | * Relinquish the currently owned ASID (if any). | 94 | * Relinquish the currently owned ASID (if any). |
84 | * Doing unconditionally saves a cmp-n-branch; for already unused | 95 | * Doing unconditionally saves a cmp-n-branch; for already unused |
85 | * ASID slot, the value was/remains NULL | 96 | * ASID slot, the value was/remains NULL |
@@ -99,9 +110,9 @@ static inline void get_new_mmu_context(struct mm_struct *mm) | |||
99 | * task with ASID from prev allocation cycle (before ASID roll-over). | 110 | * task with ASID from prev allocation cycle (before ASID roll-over). |
100 | * | 111 | * |
101 | * This might look wrong - if we are re-using some other task's ASID, | 112 | * This might look wrong - if we are re-using some other task's ASID, |
102 | * won't we use it's stale TLB entries too. Actually switch_mm( ) takes | 113 | * won't we use it's stale TLB entries too. Actually the algorithm takes |
103 | * care of such a case: it ensures that task with ASID from prev alloc | 114 | * care of such a case: it ensures that task with ASID from prev alloc |
104 | * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below | 115 | * cycle, when scheduled will refresh it's ASID |
105 | * The stealing scenario described here will only happen if that task | 116 | * The stealing scenario described here will only happen if that task |
106 | * didn't get a chance to refresh it's ASID - implying stale entries | 117 | * didn't get a chance to refresh it's ASID - implying stale entries |
107 | * won't exist. | 118 | * won't exist. |
@@ -114,7 +125,8 @@ static inline void get_new_mmu_context(struct mm_struct *mm) | |||
114 | asid_mm_map[asid_cache] = mm; | 125 | asid_mm_map[asid_cache] = mm; |
115 | mm->context.asid = asid_cache; | 126 | mm->context.asid = asid_cache; |
116 | 127 | ||
117 | write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE); | 128 | set_hw: |
129 | write_aux_reg(ARC_REG_PID, mm->context.asid | MMU_ENABLE); | ||
118 | 130 | ||
119 | local_irq_restore(flags); | 131 | local_irq_restore(flags); |
120 | } | 132 | } |
@@ -141,28 +153,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
141 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | 153 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); |
142 | #endif | 154 | #endif |
143 | 155 | ||
144 | /* | 156 | get_new_mmu_context(next); |
145 | * Get a new ASID if task doesn't have a valid one. Possible when | ||
146 | * -task never had an ASID (fresh after fork) | ||
147 | * -it's ASID was stolen - past an ASID roll-over. | ||
148 | * -There's a third obscure scenario (if this task is running for the | ||
149 | * first time afer an ASID rollover), where despite having a valid | ||
150 | * ASID, we force a get for new ASID - see comments at top. | ||
151 | * | ||
152 | * Both the non-alloc scenario and first-use-after-rollover can be | ||
153 | * detected using the single condition below: NO_ASID = 256 | ||
154 | * while asid_cache is always a valid ASID value (0-255). | ||
155 | */ | ||
156 | if (next->context.asid > asid_cache) { | ||
157 | get_new_mmu_context(next); | ||
158 | } else { | ||
159 | /* | ||
160 | * XXX: This will never happen given the chks above | ||
161 | * BUG_ON(next->context.asid > MAX_ASID); | ||
162 | */ | ||
163 | write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE); | ||
164 | } | ||
165 | |||
166 | } | 157 | } |
167 | 158 | ||
168 | static inline void destroy_context(struct mm_struct *mm) | 159 | static inline void destroy_context(struct mm_struct *mm) |