diff options
author | David S. Miller <davem@davemloft.net> | 2013-02-20 01:34:10 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-02-20 12:46:08 -0500 |
commit | 0fbebed682ff2788dee58e8d7f7dda46e33aa10b (patch) | |
tree | 62aecd278c28c2f5007ece44c5cf34429807e8ae /arch/sparc/mm/init_64.c | |
parent | bcd896bae0166b4443503482a26ecf84d9ba60ab (diff) |
sparc64: Fix tsb_grow() in atomic context.
If our first THP installation for an MM is via the set_pmd_at() done
during khugepaged's collapsing we'll end up in tsb_grow() trying to do
a GFP_KERNEL allocation with several locks held.
Simply using GFP_ATOMIC in this situation is not the best option
because we really can't have this fail, so we'd really like to keep
this an order 0 GFP_KERNEL allocation if possible.
Also, doing the TSB allocation from khugepaged is a really bad idea
because we'll allocate it potentially from the wrong NUMA node in that
context.
So what we do is defer the hugepage TSB allocation until the first TLB
miss we take on a hugepage. This is slightly tricky because we have
to handle two unusual cases:
1) Taking the first hugepage TLB miss in the window trap handler.
We'll call the winfix_trampoline when that is detected.
2) An initial TSB allocation via TLB miss races with a hugetlb
fault on another cpu running the same MM. We handle this by
unconditionally loading the TSB we see into the current cpu
even if it's non-NULL at hugetlb_setup time.
Reported-by: Meelis Roos <mroos@ut.ee>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/init_64.c')
-rw-r--r-- | arch/sparc/mm/init_64.c | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 0d0bc392c35f..82bbf048a5b0 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -2718,14 +2718,28 @@ static void context_reload(void *__data) | |||
2718 | load_secondary_context(mm); | 2718 | load_secondary_context(mm); |
2719 | } | 2719 | } |
2720 | 2720 | ||
2721 | void hugetlb_setup(struct mm_struct *mm) | 2721 | void hugetlb_setup(struct pt_regs *regs) |
2722 | { | 2722 | { |
2723 | struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; | 2723 | struct mm_struct *mm = current->mm; |
2724 | struct tsb_config *tp; | ||
2724 | 2725 | ||
2725 | if (likely(tp->tsb != NULL)) | 2726 | if (in_atomic() || !mm) { |
2726 | return; | 2727 | const struct exception_table_entry *entry; |
2728 | |||
2729 | entry = search_exception_tables(regs->tpc); | ||
2730 | if (entry) { | ||
2731 | regs->tpc = entry->fixup; | ||
2732 | regs->tnpc = regs->tpc + 4; | ||
2733 | return; | ||
2734 | } | ||
2735 | pr_alert("Unexpected HugeTLB setup in atomic context.\n"); | ||
2736 | die_if_kernel("HugeTSB in atomic", regs); | ||
2737 | } | ||
2738 | |||
2739 | tp = &mm->context.tsb_block[MM_TSB_HUGE]; | ||
2740 | if (likely(tp->tsb == NULL)) | ||
2741 | tsb_grow(mm, MM_TSB_HUGE, 0); | ||
2727 | 2742 | ||
2728 | tsb_grow(mm, MM_TSB_HUGE, 0); | ||
2729 | tsb_context_switch(mm); | 2743 | tsb_context_switch(mm); |
2730 | smp_tsb_sync(mm); | 2744 | smp_tsb_sync(mm); |
2731 | 2745 | ||