aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-03-21 23:37:00 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2017-04-01 06:12:27 -0400
commit82228e362f9b7f4b876d0fbb1036c235797c6b1d (patch)
treeba7432ffa3577cd4cadb5bd41e1fc8ac21e4a7d3
parentbb1832217a859f6dbe4a45ff2ba7fdcab0bb3958 (diff)
powerpc/pseries: Skip using reserved virtual address range
Now that we use all the available virtual address range, we need to make sure we don't generate VSID such that it overlaps with the reserved vsid range. Reserved vsid range include the virtual address range used by the adjunct partition and also the VRMA virtual segment. We find the context value that can result in generating such a VSID and reserve it early in boot. We don't look at the adjunct range, because for now we disable the adjunct usage in a Linux LPAR via CAS interface. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> [mpe: Rewrite hash__reserve_context_id(), move the rest into pseries] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h7
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h2
-rw-r--r--arch/powerpc/include/asm/mmu_context.h1
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c16
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c61
6 files changed, 85 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 5961b0d65a79..6d56974adf28 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -589,11 +589,18 @@ extern void slb_set_size(u16 size);
589#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ 589#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
590#define VSID_BITS_256M (VA_BITS - SID_SHIFT) 590#define VSID_BITS_256M (VA_BITS - SID_SHIFT)
591#define VSID_BITS_65_256M (65 - SID_SHIFT) 591#define VSID_BITS_65_256M (65 - SID_SHIFT)
592/*
593 * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
594 */
595#define VSID_MULINV_256M ASM_CONST(665548017062)
592 596
593#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ 597#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
594#define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T) 598#define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
595#define VSID_BITS_65_1T (65 - SID_SHIFT_1T) 599#define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
600#define VSID_MULINV_1T ASM_CONST(209034062)
596 601
602/* 1TB VSID reserved for VRMA */
603#define VRMA_VSID 0x1ffffffUL
597#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) 604#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
598 605
599/* 4 bits per slice and we have one slice per 1TB */ 606/* 4 bits per slice and we have one slice per 1TB */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d9b48f5bb606..d55c7f881ce7 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -49,8 +49,6 @@ static inline bool kvm_is_radix(struct kvm *kvm)
49#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ 49#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
50#endif 50#endif
51 51
52#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
53
54/* 52/*
55 * We use a lock bit in HPTE dword 0 to synchronize updates and 53 * We use a lock bit in HPTE dword 0 to synchronize updates and
56 * accesses to each HPTE, and another bit to indicate non-present 54 * accesses to each HPTE, and another bit to indicate non-present
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 7d721101ec78..78803a7ebdd9 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -52,6 +52,7 @@ static inline void switch_mmu_context(struct mm_struct *prev,
52} 52}
53 53
54extern int hash__alloc_context_id(void); 54extern int hash__alloc_context_id(void);
55extern void hash__reserve_context_id(int id);
55extern void __destroy_context(int context_id); 56extern void __destroy_context(int context_id);
56static inline void mmu_context_init(void) { } 57static inline void mmu_context_init(void) { }
57#else 58#else
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 716255f88bbd..8848fec51ce9 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1868,5 +1868,4 @@ static int __init hash64_debugfs(void)
1868 return 0; 1868 return 0;
1869} 1869}
1870machine_device_initcall(pseries, hash64_debugfs); 1870machine_device_initcall(pseries, hash64_debugfs);
1871
1872#endif /* CONFIG_DEBUG_FS */ 1871#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index fd0bc6db2dcd..7bc5b63034db 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -57,6 +57,22 @@ again:
57 return index; 57 return index;
58} 58}
59 59
60void hash__reserve_context_id(int id)
61{
62 int rc, result = 0;
63
64 do {
65 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
66 break;
67
68 spin_lock(&mmu_context_lock);
69 rc = ida_get_new_above(&mmu_context_ida, id, &result);
70 spin_unlock(&mmu_context_lock);
71 } while (rc == -EAGAIN);
72
73 WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
74}
75
60int hash__alloc_context_id(void) 76int hash__alloc_context_id(void)
61{ 77{
62 unsigned long max; 78 unsigned long max;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8b1fe895daa3..6541d0b03e4c 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -958,3 +958,64 @@ int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
958 958
959 return rc; 959 return rc;
960} 960}
961
962static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
963{
964 unsigned long protovsid;
965 unsigned long va_bits = VA_BITS;
966 unsigned long modinv, vsid_modulus;
967 unsigned long max_mod_inv, tmp_modinv;
968
969 if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
970 va_bits = 65;
971
972 if (ssize == MMU_SEGSIZE_256M) {
973 modinv = VSID_MULINV_256M;
974 vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1);
975 } else {
976 modinv = VSID_MULINV_1T;
977 vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1);
978 }
979
980 /*
981 * vsid outside our range.
982 */
983 if (vsid >= vsid_modulus)
984 return 0;
985
986 /*
987 * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
988 * and vsid = (protovsid * x) % vsid_modulus, then we say:
989 * protovsid = (vsid * modinv) % vsid_modulus
990 */
991
992 /* Check if (vsid * modinv) overflow (63 bits) */
993 max_mod_inv = 0x7fffffffffffffffull / vsid;
994 if (modinv < max_mod_inv)
995 return (vsid * modinv) % vsid_modulus;
996
997 tmp_modinv = modinv/max_mod_inv;
998 modinv %= max_mod_inv;
999
1000 protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus;
1001 protovsid = (protovsid + vsid * modinv) % vsid_modulus;
1002
1003 return protovsid;
1004}
1005
1006static int __init reserve_vrma_context_id(void)
1007{
1008 unsigned long protovsid;
1009
1010 /*
1011 * Reserve context ids which map to reserved virtual addresses. For now
1012 * we only reserve the context id which maps to the VRMA VSID. We ignore
1013 * the addresses in "ibm,adjunct-virtual-addresses" because we don't
1014 * enable adjunct support via the "ibm,client-architecture-support"
1015 * interface.
1016 */
1017 protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T);
1018 hash__reserve_context_id(protovsid >> ESID_BITS_1T);
1019 return 0;
1020}
1021machine_device_initcall(pseries, reserve_vrma_context_id);