aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/tlbflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm/tlbflush.h')
-rw-r--r--arch/s390/include/asm/tlbflush.h115
1 files changed, 95 insertions, 20 deletions
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index f9fef0425fee..16c9c88658c8 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -7,19 +7,41 @@
7#include <asm/pgalloc.h> 7#include <asm/pgalloc.h>
8 8
9/* 9/*
10 * Flush all tlb entries on the local cpu. 10 * Flush all TLB entries on the local CPU.
11 */ 11 */
12static inline void __tlb_flush_local(void) 12static inline void __tlb_flush_local(void)
13{ 13{
14 asm volatile("ptlb" : : : "memory"); 14 asm volatile("ptlb" : : : "memory");
15} 15}
16 16
17#ifdef CONFIG_SMP
18/* 17/*
19 * Flush all tlb entries on all cpus. 18 * Flush TLB entries for a specific ASCE on all CPUs
20 */ 19 */
20static inline void __tlb_flush_idte(unsigned long asce)
21{
22 /* Global TLB flush for the mm */
23 asm volatile(
24 " .insn rrf,0xb98e0000,0,%0,%1,0"
25 : : "a" (2048), "a" (asce) : "cc");
26}
27
28/*
29 * Flush TLB entries for a specific ASCE on the local CPU
30 */
31static inline void __tlb_flush_idte_local(unsigned long asce)
32{
33 /* Local TLB flush for the mm */
34 asm volatile(
35 " .insn rrf,0xb98e0000,0,%0,%1,1"
36 : : "a" (2048), "a" (asce) : "cc");
37}
38
39#ifdef CONFIG_SMP
21void smp_ptlb_all(void); 40void smp_ptlb_all(void);
22 41
42/*
43 * Flush all TLB entries on all CPUs.
44 */
23static inline void __tlb_flush_global(void) 45static inline void __tlb_flush_global(void)
24{ 46{
25 register unsigned long reg2 asm("2"); 47 register unsigned long reg2 asm("2");
@@ -42,36 +64,89 @@ static inline void __tlb_flush_global(void)
42 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); 64 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
43} 65}
44 66
67/*
68 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
69 * this implicates multiple ASCEs!).
70 */
45static inline void __tlb_flush_full(struct mm_struct *mm) 71static inline void __tlb_flush_full(struct mm_struct *mm)
46{ 72{
47 cpumask_t local_cpumask;
48
49 preempt_disable(); 73 preempt_disable();
50 /* 74 atomic_add(0x10000, &mm->context.attach_count);
51 * If the process only ran on the local cpu, do a local flush. 75 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
52 */ 76 /* Local TLB flush */
53 cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id()));
54 if (cpumask_equal(mm_cpumask(mm), &local_cpumask))
55 __tlb_flush_local(); 77 __tlb_flush_local();
56 else 78 } else {
79 /* Global TLB flush */
57 __tlb_flush_global(); 80 __tlb_flush_global();
81 /* Reset TLB flush mask */
82 if (MACHINE_HAS_TLB_LC)
83 cpumask_copy(mm_cpumask(mm),
84 &mm->context.cpu_attach_mask);
85 }
86 atomic_sub(0x10000, &mm->context.attach_count);
58 preempt_enable(); 87 preempt_enable();
59} 88}
89
90/*
91 * Flush TLB entries for a specific ASCE on all CPUs.
92 */
93static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
94{
95 int active, count;
96
97 preempt_disable();
98 active = (mm == current->active_mm) ? 1 : 0;
99 count = atomic_add_return(0x10000, &mm->context.attach_count);
100 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
101 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
102 __tlb_flush_idte_local(asce);
103 } else {
104 if (MACHINE_HAS_IDTE)
105 __tlb_flush_idte(asce);
106 else
107 __tlb_flush_global();
108 /* Reset TLB flush mask */
109 if (MACHINE_HAS_TLB_LC)
110 cpumask_copy(mm_cpumask(mm),
111 &mm->context.cpu_attach_mask);
112 }
113 atomic_sub(0x10000, &mm->context.attach_count);
114 preempt_enable();
115}
116
117static inline void __tlb_flush_kernel(void)
118{
119 if (MACHINE_HAS_IDTE)
120 __tlb_flush_idte((unsigned long) init_mm.pgd |
121 init_mm.context.asce_bits);
122 else
123 __tlb_flush_global();
124}
60#else 125#else
61#define __tlb_flush_full(mm) __tlb_flush_local()
62#define __tlb_flush_global() __tlb_flush_local() 126#define __tlb_flush_global() __tlb_flush_local()
63#endif 127#define __tlb_flush_full(mm) __tlb_flush_local()
64 128
65/* 129/*
66 * Flush all tlb entries of a page table on all cpus. 130 * Flush TLB entries for a specific ASCE on all CPUs.
67 */ 131 */
68static inline void __tlb_flush_idte(unsigned long asce) 132static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
69{ 133{
70 asm volatile( 134 if (MACHINE_HAS_TLB_LC)
71 " .insn rrf,0xb98e0000,0,%0,%1,0" 135 __tlb_flush_idte_local(asce);
72 : : "a" (2048), "a" (asce) : "cc" ); 136 else
137 __tlb_flush_local();
73} 138}
74 139
140static inline void __tlb_flush_kernel(void)
141{
142 if (MACHINE_HAS_TLB_LC)
143 __tlb_flush_idte_local((unsigned long) init_mm.pgd |
144 init_mm.context.asce_bits);
145 else
146 __tlb_flush_local();
147}
148#endif
149
75static inline void __tlb_flush_mm(struct mm_struct * mm) 150static inline void __tlb_flush_mm(struct mm_struct * mm)
76{ 151{
77 /* 152 /*
@@ -80,7 +155,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
80 * only ran on the local cpu. 155 * only ran on the local cpu.
81 */ 156 */
82 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) 157 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
83 __tlb_flush_idte((unsigned long) mm->pgd | 158 __tlb_flush_asce(mm, (unsigned long) mm->pgd |
84 mm->context.asce_bits); 159 mm->context.asce_bits);
85 else 160 else
86 __tlb_flush_full(mm); 161 __tlb_flush_full(mm);
@@ -130,7 +205,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
130static inline void flush_tlb_kernel_range(unsigned long start, 205static inline void flush_tlb_kernel_range(unsigned long start,
131 unsigned long end) 206 unsigned long end)
132{ 207{
133 __tlb_flush_mm(&init_mm); 208 __tlb_flush_kernel();
134} 209}
135 210
136#endif /* _S390_TLBFLUSH_H */ 211#endif /* _S390_TLBFLUSH_H */