aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/init_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm/init_64.c')
-rw-r--r--arch/sparc/mm/init_64.c89
1 files changed, 60 insertions, 29 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 0cda653ae007..3c40ebd50f92 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
358 } 358 }
359 359
360 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { 360 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
361 pr_warn("hugepagesz=%llu not supported by MMU.\n", 361 hugetlb_bad_size();
362 pr_err("hugepagesz=%llu not supported by MMU.\n",
362 hugepage_size); 363 hugepage_size);
363 goto out; 364 goto out;
364 } 365 }
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
706 707
707/* get_new_mmu_context() uses "cache + 1". */ 708/* get_new_mmu_context() uses "cache + 1". */
708DEFINE_SPINLOCK(ctx_alloc_lock); 709DEFINE_SPINLOCK(ctx_alloc_lock);
709unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 710unsigned long tlb_context_cache = CTX_FIRST_VERSION;
710#define MAX_CTX_NR (1UL << CTX_NR_BITS) 711#define MAX_CTX_NR (1UL << CTX_NR_BITS)
711#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 712#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
712DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 713DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
714DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
715
716static void mmu_context_wrap(void)
717{
718 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
719 unsigned long new_ver, new_ctx, old_ctx;
720 struct mm_struct *mm;
721 int cpu;
722
723 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
724
725 /* Reserve kernel context */
726 set_bit(0, mmu_context_bmap);
727
728 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
729 if (unlikely(new_ver == 0))
730 new_ver = CTX_FIRST_VERSION;
731 tlb_context_cache = new_ver;
732
733 /*
734 * Make sure that any new mm that are added into per_cpu_secondary_mm,
735 * are going to go through get_new_mmu_context() path.
736 */
737 mb();
738
739 /*
740 * Updated versions to current on those CPUs that had valid secondary
741 * contexts
742 */
743 for_each_online_cpu(cpu) {
744 /*
745 * If a new mm is stored after we took this mm from the array,
746 * it will go into get_new_mmu_context() path, because we
747 * already bumped the version in tlb_context_cache.
748 */
749 mm = per_cpu(per_cpu_secondary_mm, cpu);
750
751 if (unlikely(!mm || mm == &init_mm))
752 continue;
753
754 old_ctx = mm->context.sparc64_ctx_val;
755 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
756 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
757 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
758 mm->context.sparc64_ctx_val = new_ctx;
759 }
760 }
761}
713 762
714/* Caller does TLB context flushing on local CPU if necessary. 763/* Caller does TLB context flushing on local CPU if necessary.
715 * The caller also ensures that CTX_VALID(mm->context) is false. 764 * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
725{ 774{
726 unsigned long ctx, new_ctx; 775 unsigned long ctx, new_ctx;
727 unsigned long orig_pgsz_bits; 776 unsigned long orig_pgsz_bits;
728 int new_version;
729 777
730 spin_lock(&ctx_alloc_lock); 778 spin_lock(&ctx_alloc_lock);
779retry:
780 /* wrap might have happened, test again if our context became valid */
781 if (unlikely(CTX_VALID(mm->context)))
782 goto out;
731 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 783 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
732 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 784 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
733 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 785 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
734 new_version = 0;
735 if (new_ctx >= (1 << CTX_NR_BITS)) { 786 if (new_ctx >= (1 << CTX_NR_BITS)) {
736 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 787 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
737 if (new_ctx >= ctx) { 788 if (new_ctx >= ctx) {
738 int i; 789 mmu_context_wrap();
739 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 790 goto retry;
740 CTX_FIRST_VERSION;
741 if (new_ctx == 1)
742 new_ctx = CTX_FIRST_VERSION;
743
744 /* Don't call memset, for 16 entries that's just
745 * plain silly...
746 */
747 mmu_context_bmap[0] = 3;
748 mmu_context_bmap[1] = 0;
749 mmu_context_bmap[2] = 0;
750 mmu_context_bmap[3] = 0;
751 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
752 mmu_context_bmap[i + 0] = 0;
753 mmu_context_bmap[i + 1] = 0;
754 mmu_context_bmap[i + 2] = 0;
755 mmu_context_bmap[i + 3] = 0;
756 }
757 new_version = 1;
758 goto out;
759 } 791 }
760 } 792 }
793 if (mm->context.sparc64_ctx_val)
794 cpumask_clear(mm_cpumask(mm));
761 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 795 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
762 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 796 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
763out:
764 tlb_context_cache = new_ctx; 797 tlb_context_cache = new_ctx;
765 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 798 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
799out:
766 spin_unlock(&ctx_alloc_lock); 800 spin_unlock(&ctx_alloc_lock);
767
768 if (unlikely(new_version))
769 smp_new_mmu_context_version();
770} 801}
771 802
772static int numa_enabled = 1; 803static int numa_enabled = 1;