aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-02-20 01:34:10 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-20 12:46:08 -0500
commit0fbebed682ff2788dee58e8d7f7dda46e33aa10b (patch)
tree62aecd278c28c2f5007ece44c5cf34429807e8ae /arch/sparc/kernel
parentbcd896bae0166b4443503482a26ecf84d9ba60ab (diff)
sparc64: Fix tsb_grow() in atomic context.
If our first THP installation for an MM is via the set_pmd_at() done during khugepaged's collapsing we'll end up in tsb_grow() trying to do a GFP_KERNEL allocation with several locks held. Simply using GFP_ATOMIC in this situation is not the best option because we really can't have this fail, so we'd really like to keep this an order 0 GFP_KERNEL allocation if possible. Also, doing the TSB allocation from khugepaged is a really bad idea because we'll allocate it potentially from the wrong NUMA node in that context. So what we do is defer the hugepage TSB allocation until the first TLB miss we take on a hugepage. This is slightly tricky because we have to handle two unusual cases: 1) Taking the first hugepage TLB miss in the window trap handler. We'll call the winfix_trampoline when that is detected. 2) An initial TSB allocation via TLB miss races with a hugetlb fault on another cpu running the same MM. We handle this by unconditionally loading the TSB we see into the current cpu even if it's non-NULL at hugetlb_setup time. Reported-by: Meelis Roos <mroos@ut.ee> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/tsb.S39
1 files changed, 35 insertions, 4 deletions
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index d4bdc7a62375..a313e4a9399b 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -136,12 +136,43 @@ tsb_miss_page_table_walk_sun4v_fastpath:
136 nop 136 nop
137 137
138 /* It is a huge page, use huge page TSB entry address we 138 /* It is a huge page, use huge page TSB entry address we
139 * calculated above. 139 * calculated above. If the huge page TSB has not been
140 * allocated, setup a trap stack and call hugetlb_setup()
141 * to do so, then return from the trap to replay the TLB
142 * miss.
143 *
144 * This is necessary to handle the case of transparent huge
145 * pages where we don't really have a non-atomic context
146 * in which to allocate the hugepage TSB hash table. When
147 * the 'mm' faults in the hugepage for the first time, we
148 * thus handle it here. This also makes sure that we can
149 * allocate the TSB hash table on the correct NUMA node.
140 */ 150 */
141 TRAP_LOAD_TRAP_BLOCK(%g7, %g2) 151 TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
142 ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2 152 ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1
143 cmp %g2, -1 153 cmp %g1, -1
144 movne %xcc, %g2, %g1 154 bne,pt %xcc, 60f
155 nop
156
157661: rdpr %pstate, %g5
158 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
159 .section .sun4v_2insn_patch, "ax"
160 .word 661b
161 SET_GL(1)
162 nop
163 .previous
164
165 rdpr %tl, %g3
166 cmp %g3, 1
167 bne,pn %xcc, winfix_trampoline
168 nop
169 ba,pt %xcc, etrap
170 rd %pc, %g7
171 call hugetlb_setup
172 add %sp, PTREGS_OFF, %o0
173 ba,pt %xcc, rtrap
174 nop
175
14560: 17660:
146#endif 177#endif
147 178