aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-06-08 10:25:50 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-06-10 02:14:43 -0400
commit36194812a4063dd2a72070aec3ee7890d135a587 (patch)
tree964bc8474f56fd52e9bbf98082ca2aee059790a5
parent8017ea35d33f9e0950d369773ab48bcb1efb9ba0 (diff)
powerpc/mm/radix: Update to tlb functions ric argument
Radix invalidate control (RIC) is used to control which cache to flush using tlb instructions. When doing a PID flush, we currently flush everything including page walk cache. For address range flush, we flush only the TLB. In the next patch, we add support for flushing only the page walk cache. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/mm/tlb-radix.c43
1 files changed, 22 insertions, 21 deletions
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 54efba2fd66e..71083621884e 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -18,16 +18,20 @@
18 18
19static DEFINE_RAW_SPINLOCK(native_tlbie_lock); 19static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
20 20
21static inline void __tlbiel_pid(unsigned long pid, int set) 21#define RIC_FLUSH_TLB 0
22#define RIC_FLUSH_PWC 1
23#define RIC_FLUSH_ALL 2
24
25static inline void __tlbiel_pid(unsigned long pid, int set,
26 unsigned long ric)
22{ 27{
23 unsigned long rb,rs,ric,prs,r; 28 unsigned long rb,rs,prs,r;
24 29
25 rb = PPC_BIT(53); /* IS = 1 */ 30 rb = PPC_BIT(53); /* IS = 1 */
26 rb |= set << PPC_BITLSHIFT(51); 31 rb |= set << PPC_BITLSHIFT(51);
27 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); 32 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
28 prs = 1; /* process scoped */ 33 prs = 1; /* process scoped */
29 r = 1; /* raidx format */ 34 r = 1; /* raidx format */
30 ric = 2; /* invalidate all the caches */
31 35
32 asm volatile("ptesync": : :"memory"); 36 asm volatile("ptesync": : :"memory");
33 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" 37 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
39/* 43/*
40 * We use 128 set in radix mode and 256 set in hpt mode. 44 * We use 128 set in radix mode and 256 set in hpt mode.
41 */ 45 */
42static inline void _tlbiel_pid(unsigned long pid) 46static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
43{ 47{
44 int set; 48 int set;
45 49
46 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { 50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
47 __tlbiel_pid(pid, set); 51 __tlbiel_pid(pid, set, ric);
48 } 52 }
49 return; 53 return;
50} 54}
51 55
52static inline void _tlbie_pid(unsigned long pid) 56static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
53{ 57{
54 unsigned long rb,rs,ric,prs,r; 58 unsigned long rb,rs,prs,r;
55 59
56 rb = PPC_BIT(53); /* IS = 1 */ 60 rb = PPC_BIT(53); /* IS = 1 */
57 rs = pid << PPC_BITLSHIFT(31); 61 rs = pid << PPC_BITLSHIFT(31);
58 prs = 1; /* process scoped */ 62 prs = 1; /* process scoped */
59 r = 1; /* raidx format */ 63 r = 1; /* raidx format */
60 ric = 2; /* invalidate all the caches */
61 64
62 asm volatile("ptesync": : :"memory"); 65 asm volatile("ptesync": : :"memory");
63 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" 66 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid)
67} 70}
68 71
69static inline void _tlbiel_va(unsigned long va, unsigned long pid, 72static inline void _tlbiel_va(unsigned long va, unsigned long pid,
70 unsigned long ap) 73 unsigned long ap, unsigned long ric)
71{ 74{
72 unsigned long rb,rs,ric,prs,r; 75 unsigned long rb,rs,prs,r;
73 76
74 rb = va & ~(PPC_BITMASK(52, 63)); 77 rb = va & ~(PPC_BITMASK(52, 63));
75 rb |= ap << PPC_BITLSHIFT(58); 78 rb |= ap << PPC_BITLSHIFT(58);
76 rs = pid << PPC_BITLSHIFT(31); 79 rs = pid << PPC_BITLSHIFT(31);
77 prs = 1; /* process scoped */ 80 prs = 1; /* process scoped */
78 r = 1; /* raidx format */ 81 r = 1; /* raidx format */
79 ric = 0; /* no cluster flush yet */
80 82
81 asm volatile("ptesync": : :"memory"); 83 asm volatile("ptesync": : :"memory");
82 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" 84 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
86} 88}
87 89
88static inline void _tlbie_va(unsigned long va, unsigned long pid, 90static inline void _tlbie_va(unsigned long va, unsigned long pid,
89 unsigned long ap) 91 unsigned long ap, unsigned long ric)
90{ 92{
91 unsigned long rb,rs,ric,prs,r; 93 unsigned long rb,rs,prs,r;
92 94
93 rb = va & ~(PPC_BITMASK(52, 63)); 95 rb = va & ~(PPC_BITMASK(52, 63));
94 rb |= ap << PPC_BITLSHIFT(58); 96 rb |= ap << PPC_BITLSHIFT(58);
95 rs = pid << PPC_BITLSHIFT(31); 97 rs = pid << PPC_BITLSHIFT(31);
96 prs = 1; /* process scoped */ 98 prs = 1; /* process scoped */
97 r = 1; /* raidx format */ 99 r = 1; /* raidx format */
98 ric = 0; /* no cluster flush yet */
99 100
100 asm volatile("ptesync": : :"memory"); 101 asm volatile("ptesync": : :"memory");
101 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" 102 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -122,7 +123,7 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm)
122 preempt_disable(); 123 preempt_disable();
123 pid = mm->context.id; 124 pid = mm->context.id;
124 if (pid != MMU_NO_CONTEXT) 125 if (pid != MMU_NO_CONTEXT)
125 _tlbiel_pid(pid); 126 _tlbiel_pid(pid, RIC_FLUSH_ALL);
126 preempt_enable(); 127 preempt_enable();
127} 128}
128EXPORT_SYMBOL(radix__local_flush_tlb_mm); 129EXPORT_SYMBOL(radix__local_flush_tlb_mm);
@@ -135,7 +136,7 @@ void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
135 preempt_disable(); 136 preempt_disable();
136 pid = mm ? mm->context.id : 0; 137 pid = mm ? mm->context.id : 0;
137 if (pid != MMU_NO_CONTEXT) 138 if (pid != MMU_NO_CONTEXT)
138 _tlbiel_va(vmaddr, pid, ap); 139 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
139 preempt_enable(); 140 preempt_enable();
140} 141}
141 142
@@ -172,11 +173,11 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
172 173
173 if (lock_tlbie) 174 if (lock_tlbie)
174 raw_spin_lock(&native_tlbie_lock); 175 raw_spin_lock(&native_tlbie_lock);
175 _tlbie_pid(pid); 176 _tlbie_pid(pid, RIC_FLUSH_ALL);
176 if (lock_tlbie) 177 if (lock_tlbie)
177 raw_spin_unlock(&native_tlbie_lock); 178 raw_spin_unlock(&native_tlbie_lock);
178 } else 179 } else
179 _tlbiel_pid(pid); 180 _tlbiel_pid(pid, RIC_FLUSH_ALL);
180no_context: 181no_context:
181 preempt_enable(); 182 preempt_enable();
182} 183}
@@ -196,11 +197,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
196 197
197 if (lock_tlbie) 198 if (lock_tlbie)
198 raw_spin_lock(&native_tlbie_lock); 199 raw_spin_lock(&native_tlbie_lock);
199 _tlbie_va(vmaddr, pid, ap); 200 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
200 if (lock_tlbie) 201 if (lock_tlbie)
201 raw_spin_unlock(&native_tlbie_lock); 202 raw_spin_unlock(&native_tlbie_lock);
202 } else 203 } else
203 _tlbiel_va(vmaddr, pid, ap); 204 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
204bail: 205bail:
205 preempt_enable(); 206 preempt_enable();
206} 207}
@@ -224,7 +225,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
224 225
225 if (lock_tlbie) 226 if (lock_tlbie)
226 raw_spin_lock(&native_tlbie_lock); 227 raw_spin_lock(&native_tlbie_lock);
227 _tlbie_pid(0); 228 _tlbie_pid(0, RIC_FLUSH_ALL);
228 if (lock_tlbie) 229 if (lock_tlbie)
229 raw_spin_unlock(&native_tlbie_lock); 230 raw_spin_unlock(&native_tlbie_lock);
230} 231}