diff options
Diffstat (limited to 'arch/mips/cavium-octeon/executive/cvmx-l2c.c')
-rw-r--r-- | arch/mips/cavium-octeon/executive/cvmx-l2c.c | 810 |
1 files changed, 488 insertions, 322 deletions
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c index 6abe56f1e097..d38246e33ddb 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c +++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Contact: support@caviumnetworks.com | 4 | * Contact: support@caviumnetworks.com |
5 | * This file is part of the OCTEON SDK | 5 | * This file is part of the OCTEON SDK |
6 | * | 6 | * |
7 | * Copyright (c) 2003-2008 Cavium Networks | 7 | * Copyright (c) 2003-2010 Cavium Networks |
8 | * | 8 | * |
9 | * This file is free software; you can redistribute it and/or modify | 9 | * This file is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License, Version 2, as | 10 | * it under the terms of the GNU General Public License, Version 2, as |
@@ -26,8 +26,8 @@ | |||
26 | ***********************license end**************************************/ | 26 | ***********************license end**************************************/ |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Implementation of the Level 2 Cache (L2C) control, measurement, and | 29 | * Implementation of the Level 2 Cache (L2C) control, |
30 | * debugging facilities. | 30 | * measurement, and debugging facilities. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <asm/octeon/cvmx.h> | 33 | #include <asm/octeon/cvmx.h> |
@@ -42,13 +42,7 @@ | |||
42 | * if multiple applications or operating systems are running, then it | 42 | * if multiple applications or operating systems are running, then it |
43 | * is up to the user program to coordinate between them. | 43 | * is up to the user program to coordinate between them. |
44 | */ | 44 | */ |
45 | static cvmx_spinlock_t cvmx_l2c_spinlock; | 45 | cvmx_spinlock_t cvmx_l2c_spinlock; |
46 | |||
47 | static inline int l2_size_half(void) | ||
48 | { | ||
49 | uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3); | ||
50 | return !!(val & (1ull << 34)); | ||
51 | } | ||
52 | 46 | ||
53 | int cvmx_l2c_get_core_way_partition(uint32_t core) | 47 | int cvmx_l2c_get_core_way_partition(uint32_t core) |
54 | { | 48 | { |
@@ -58,6 +52,9 @@ int cvmx_l2c_get_core_way_partition(uint32_t core) | |||
58 | if (core >= cvmx_octeon_num_cores()) | 52 | if (core >= cvmx_octeon_num_cores()) |
59 | return -1; | 53 | return -1; |
60 | 54 | ||
55 | if (OCTEON_IS_MODEL(OCTEON_CN63XX)) | ||
56 | return cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff; | ||
57 | |||
61 | /* | 58 | /* |
62 | * Use the lower two bits of the coreNumber to determine the | 59 | * Use the lower two bits of the coreNumber to determine the |
63 | * bit offset of the UMSK[] field in the L2C_SPAR register. | 60 | * bit offset of the UMSK[] field in the L2C_SPAR register. |
@@ -71,17 +68,13 @@ int cvmx_l2c_get_core_way_partition(uint32_t core) | |||
71 | 68 | ||
72 | switch (core & 0xC) { | 69 | switch (core & 0xC) { |
73 | case 0x0: | 70 | case 0x0: |
74 | return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> | 71 | return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field; |
75 | field; | ||
76 | case 0x4: | 72 | case 0x4: |
77 | return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> | 73 | return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field; |
78 | field; | ||
79 | case 0x8: | 74 | case 0x8: |
80 | return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> | 75 | return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field; |
81 | field; | ||
82 | case 0xC: | 76 | case 0xC: |
83 | return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> | 77 | return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field; |
84 | field; | ||
85 | } | 78 | } |
86 | return 0; | 79 | return 0; |
87 | } | 80 | } |
@@ -95,48 +88,50 @@ int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask) | |||
95 | 88 | ||
96 | mask &= valid_mask; | 89 | mask &= valid_mask; |
97 | 90 | ||
98 | /* A UMSK setting which blocks all L2C Ways is an error. */ | 91 | /* A UMSK setting which blocks all L2C Ways is an error on some chips */ |
99 | if (mask == valid_mask) | 92 | if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX)) |
100 | return -1; | 93 | return -1; |
101 | 94 | ||
102 | /* Validate the core number */ | 95 | /* Validate the core number */ |
103 | if (core >= cvmx_octeon_num_cores()) | 96 | if (core >= cvmx_octeon_num_cores()) |
104 | return -1; | 97 | return -1; |
105 | 98 | ||
106 | /* Check to make sure current mask & new mask don't block all ways */ | 99 | if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { |
107 | if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) == | 100 | cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask); |
108 | valid_mask) | 101 | return 0; |
109 | return -1; | 102 | } |
110 | 103 | ||
111 | /* Use the lower two bits of core to determine the bit offset of the | 104 | /* |
105 | * Use the lower two bits of core to determine the bit offset of the | ||
112 | * UMSK[] field in the L2C_SPAR register. | 106 | * UMSK[] field in the L2C_SPAR register. |
113 | */ | 107 | */ |
114 | field = (core & 0x3) * 8; | 108 | field = (core & 0x3) * 8; |
115 | 109 | ||
116 | /* Assign the new mask setting to the UMSK[] field in the appropriate | 110 | /* |
111 | * Assign the new mask setting to the UMSK[] field in the appropriate | ||
117 | * L2C_SPAR register based on the core_num. | 112 | * L2C_SPAR register based on the core_num. |
118 | * | 113 | * |
119 | */ | 114 | */ |
120 | switch (core & 0xC) { | 115 | switch (core & 0xC) { |
121 | case 0x0: | 116 | case 0x0: |
122 | cvmx_write_csr(CVMX_L2C_SPAR0, | 117 | cvmx_write_csr(CVMX_L2C_SPAR0, |
123 | (cvmx_read_csr(CVMX_L2C_SPAR0) & | 118 | (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) | |
124 | ~(0xFF << field)) | mask << field); | 119 | mask << field); |
125 | break; | 120 | break; |
126 | case 0x4: | 121 | case 0x4: |
127 | cvmx_write_csr(CVMX_L2C_SPAR1, | 122 | cvmx_write_csr(CVMX_L2C_SPAR1, |
128 | (cvmx_read_csr(CVMX_L2C_SPAR1) & | 123 | (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) | |
129 | ~(0xFF << field)) | mask << field); | 124 | mask << field); |
130 | break; | 125 | break; |
131 | case 0x8: | 126 | case 0x8: |
132 | cvmx_write_csr(CVMX_L2C_SPAR2, | 127 | cvmx_write_csr(CVMX_L2C_SPAR2, |
133 | (cvmx_read_csr(CVMX_L2C_SPAR2) & | 128 | (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) | |
134 | ~(0xFF << field)) | mask << field); | 129 | mask << field); |
135 | break; | 130 | break; |
136 | case 0xC: | 131 | case 0xC: |
137 | cvmx_write_csr(CVMX_L2C_SPAR3, | 132 | cvmx_write_csr(CVMX_L2C_SPAR3, |
138 | (cvmx_read_csr(CVMX_L2C_SPAR3) & | 133 | (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) | |
139 | ~(0xFF << field)) | mask << field); | 134 | mask << field); |
140 | break; | 135 | break; |
141 | } | 136 | } |
142 | return 0; | 137 | return 0; |
@@ -146,84 +141,137 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask) | |||
146 | { | 141 | { |
147 | uint32_t valid_mask; | 142 | uint32_t valid_mask; |
148 | 143 | ||
149 | valid_mask = 0xff; | 144 | valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1; |
150 | |||
151 | if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) { | ||
152 | if (l2_size_half()) | ||
153 | valid_mask = 0xf; | ||
154 | } else if (l2_size_half()) | ||
155 | valid_mask = 0x3; | ||
156 | |||
157 | mask &= valid_mask; | 145 | mask &= valid_mask; |
158 | 146 | ||
159 | /* A UMSK setting which blocks all L2C Ways is an error. */ | 147 | /* A UMSK setting which blocks all L2C Ways is an error on some chips */ |
160 | if (mask == valid_mask) | 148 | if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX)) |
161 | return -1; | ||
162 | /* Check to make sure current mask & new mask don't block all ways */ | ||
163 | if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) == | ||
164 | valid_mask) | ||
165 | return -1; | 149 | return -1; |
166 | 150 | ||
167 | cvmx_write_csr(CVMX_L2C_SPAR4, | 151 | if (OCTEON_IS_MODEL(OCTEON_CN63XX)) |
168 | (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask); | 152 | cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask); |
153 | else | ||
154 | cvmx_write_csr(CVMX_L2C_SPAR4, | ||
155 | (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask); | ||
169 | return 0; | 156 | return 0; |
170 | } | 157 | } |
171 | 158 | ||
172 | int cvmx_l2c_get_hw_way_partition(void) | 159 | int cvmx_l2c_get_hw_way_partition(void) |
173 | { | 160 | { |
174 | return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF); | 161 | if (OCTEON_IS_MODEL(OCTEON_CN63XX)) |
162 | return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff; | ||
163 | else | ||
164 | return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF); | ||
175 | } | 165 | } |
176 | 166 | ||
177 | void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, | 167 | void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, |
178 | uint32_t clear_on_read) | 168 | uint32_t clear_on_read) |
179 | { | 169 | { |
180 | union cvmx_l2c_pfctl pfctl; | 170 | if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) { |
171 | union cvmx_l2c_pfctl pfctl; | ||
181 | 172 | ||
182 | pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL); | 173 | pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL); |
183 | 174 | ||
184 | switch (counter) { | 175 | switch (counter) { |
185 | case 0: | 176 | case 0: |
186 | pfctl.s.cnt0sel = event; | 177 | pfctl.s.cnt0sel = event; |
187 | pfctl.s.cnt0ena = 1; | 178 | pfctl.s.cnt0ena = 1; |
188 | if (!cvmx_octeon_is_pass1()) | ||
189 | pfctl.s.cnt0rdclr = clear_on_read; | 179 | pfctl.s.cnt0rdclr = clear_on_read; |
190 | break; | 180 | break; |
191 | case 1: | 181 | case 1: |
192 | pfctl.s.cnt1sel = event; | 182 | pfctl.s.cnt1sel = event; |
193 | pfctl.s.cnt1ena = 1; | 183 | pfctl.s.cnt1ena = 1; |
194 | if (!cvmx_octeon_is_pass1()) | ||
195 | pfctl.s.cnt1rdclr = clear_on_read; | 184 | pfctl.s.cnt1rdclr = clear_on_read; |
196 | break; | 185 | break; |
197 | case 2: | 186 | case 2: |
198 | pfctl.s.cnt2sel = event; | 187 | pfctl.s.cnt2sel = event; |
199 | pfctl.s.cnt2ena = 1; | 188 | pfctl.s.cnt2ena = 1; |
200 | if (!cvmx_octeon_is_pass1()) | ||
201 | pfctl.s.cnt2rdclr = clear_on_read; | 189 | pfctl.s.cnt2rdclr = clear_on_read; |
202 | break; | 190 | break; |
203 | case 3: | 191 | case 3: |
204 | default: | 192 | default: |
205 | pfctl.s.cnt3sel = event; | 193 | pfctl.s.cnt3sel = event; |
206 | pfctl.s.cnt3ena = 1; | 194 | pfctl.s.cnt3ena = 1; |
207 | if (!cvmx_octeon_is_pass1()) | ||
208 | pfctl.s.cnt3rdclr = clear_on_read; | 195 | pfctl.s.cnt3rdclr = clear_on_read; |
209 | break; | 196 | break; |
210 | } | 197 | } |
211 | 198 | ||
212 | cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64); | 199 | cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64); |
200 | } else { | ||
201 | union cvmx_l2c_tadx_prf l2c_tadx_prf; | ||
202 | int tad; | ||
203 | |||
204 | cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n"); | ||
205 | if (clear_on_read) | ||
206 | cvmx_dprintf("L2C counters don't support clear on read for this chip\n"); | ||
207 | |||
208 | l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0)); | ||
209 | |||
210 | switch (counter) { | ||
211 | case 0: | ||
212 | l2c_tadx_prf.s.cnt0sel = event; | ||
213 | break; | ||
214 | case 1: | ||
215 | l2c_tadx_prf.s.cnt1sel = event; | ||
216 | break; | ||
217 | case 2: | ||
218 | l2c_tadx_prf.s.cnt2sel = event; | ||
219 | break; | ||
220 | default: | ||
221 | case 3: | ||
222 | l2c_tadx_prf.s.cnt3sel = event; | ||
223 | break; | ||
224 | } | ||
225 | for (tad = 0; tad < CVMX_L2C_TADS; tad++) | ||
226 | cvmx_write_csr(CVMX_L2C_TADX_PRF(tad), | ||
227 | l2c_tadx_prf.u64); | ||
228 | } | ||
213 | } | 229 | } |
214 | 230 | ||
215 | uint64_t cvmx_l2c_read_perf(uint32_t counter) | 231 | uint64_t cvmx_l2c_read_perf(uint32_t counter) |
216 | { | 232 | { |
217 | switch (counter) { | 233 | switch (counter) { |
218 | case 0: | 234 | case 0: |
219 | return cvmx_read_csr(CVMX_L2C_PFC0); | 235 | if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) |
236 | return cvmx_read_csr(CVMX_L2C_PFC0); | ||
237 | else { | ||
238 | uint64_t counter = 0; | ||
239 | int tad; | ||
240 | for (tad = 0; tad < CVMX_L2C_TADS; tad++) | ||
241 | counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad)); | ||
242 | return counter; | ||
243 | } | ||
220 | case 1: | 244 | case 1: |
221 | return cvmx_read_csr(CVMX_L2C_PFC1); | 245 | if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) |
246 | return cvmx_read_csr(CVMX_L2C_PFC1); | ||
247 | else { | ||
248 | uint64_t counter = 0; | ||
249 | int tad; | ||
250 | for (tad = 0; tad < CVMX_L2C_TADS; tad++) | ||
251 | counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad)); | ||
252 | return counter; | ||
253 | } | ||
222 | case 2: | 254 | case 2: |
223 | return cvmx_read_csr(CVMX_L2C_PFC2); | 255 | if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) |
256 | return cvmx_read_csr(CVMX_L2C_PFC2); | ||
257 | else { | ||
258 | uint64_t counter = 0; | ||
259 | int tad; | ||
260 | for (tad = 0; tad < CVMX_L2C_TADS; tad++) | ||
261 | counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad)); | ||
262 | return counter; | ||
263 | } | ||
224 | case 3: | 264 | case 3: |
225 | default: | 265 | default: |
226 | return cvmx_read_csr(CVMX_L2C_PFC3); | 266 | if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) |
267 | return cvmx_read_csr(CVMX_L2C_PFC3); | ||
268 | else { | ||
269 | uint64_t counter = 0; | ||
270 | int tad; | ||
271 | for (tad = 0; tad < CVMX_L2C_TADS; tad++) | ||
272 | counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad)); | ||
273 | return counter; | ||
274 | } | ||
227 | } | 275 | } |
228 | } | 276 | } |
229 | 277 | ||
@@ -240,7 +288,7 @@ static void fault_in(uint64_t addr, int len) | |||
240 | volatile char dummy; | 288 | volatile char dummy; |
241 | /* | 289 | /* |
242 | * Adjust addr and length so we get all cache lines even for | 290 | * Adjust addr and length so we get all cache lines even for |
243 | * small ranges spanning two cache lines | 291 | * small ranges spanning two cache lines. |
244 | */ | 292 | */ |
245 | len += addr & CVMX_CACHE_LINE_MASK; | 293 | len += addr & CVMX_CACHE_LINE_MASK; |
246 | addr &= ~CVMX_CACHE_LINE_MASK; | 294 | addr &= ~CVMX_CACHE_LINE_MASK; |
@@ -259,67 +307,100 @@ static void fault_in(uint64_t addr, int len) | |||
259 | 307 | ||
260 | int cvmx_l2c_lock_line(uint64_t addr) | 308 | int cvmx_l2c_lock_line(uint64_t addr) |
261 | { | 309 | { |
262 | int retval = 0; | 310 | if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { |
263 | union cvmx_l2c_dbg l2cdbg; | 311 | int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT; |
264 | union cvmx_l2c_lckbase lckbase; | 312 | uint64_t assoc = cvmx_l2c_get_num_assoc(); |
265 | union cvmx_l2c_lckoff lckoff; | 313 | uint64_t tag = addr >> shift; |
266 | union cvmx_l2t_err l2t_err; | 314 | uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT); |
267 | l2cdbg.u64 = 0; | 315 | uint64_t way; |
268 | lckbase.u64 = 0; | 316 | union cvmx_l2c_tadx_tag l2c_tadx_tag; |
269 | lckoff.u64 = 0; | 317 | |
270 | 318 | CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0); | |
271 | cvmx_spinlock_lock(&cvmx_l2c_spinlock); | 319 | |
272 | 320 | /* Make sure we were able to lock the line */ | |
273 | /* Clear l2t error bits if set */ | 321 | for (way = 0; way < assoc; way++) { |
274 | l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); | 322 | CVMX_CACHE_LTGL2I(index | (way << shift), 0); |
275 | l2t_err.s.lckerr = 1; | 323 | /* make sure CVMX_L2C_TADX_TAG is updated */ |
276 | l2t_err.s.lckerr2 = 1; | 324 | CVMX_SYNC; |
277 | cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64); | 325 | l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0)); |
326 | if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag) | ||
327 | break; | ||
328 | } | ||
278 | 329 | ||
279 | addr &= ~CVMX_CACHE_LINE_MASK; | 330 | /* Check if a valid line is found */ |
331 | if (way >= assoc) { | ||
332 | /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr); */ | ||
333 | return -1; | ||
334 | } | ||
280 | 335 | ||
281 | /* Set this core as debug core */ | 336 | /* Check if lock bit is not set */ |
282 | l2cdbg.s.ppnum = cvmx_get_core_num(); | 337 | if (!l2c_tadx_tag.s.lock) { |
283 | CVMX_SYNC; | 338 | /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr); */ |
284 | cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); | 339 | return -1; |
285 | cvmx_read_csr(CVMX_L2C_DBG); | 340 | } |
286 | 341 | return way; | |
287 | lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */ | ||
288 | cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64); | ||
289 | cvmx_read_csr(CVMX_L2C_LCKOFF); | ||
290 | |||
291 | if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) { | ||
292 | int alias_shift = | ||
293 | CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1; | ||
294 | uint64_t addr_tmp = | ||
295 | addr ^ (addr & ((1 << alias_shift) - 1)) >> | ||
296 | CVMX_L2_SET_BITS; | ||
297 | lckbase.s.lck_base = addr_tmp >> 7; | ||
298 | } else { | 342 | } else { |
299 | lckbase.s.lck_base = addr >> 7; | 343 | int retval = 0; |
300 | } | 344 | union cvmx_l2c_dbg l2cdbg; |
345 | union cvmx_l2c_lckbase lckbase; | ||
346 | union cvmx_l2c_lckoff lckoff; | ||
347 | union cvmx_l2t_err l2t_err; | ||
301 | 348 | ||
302 | lckbase.s.lck_ena = 1; | 349 | cvmx_spinlock_lock(&cvmx_l2c_spinlock); |
303 | cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64); | ||
304 | cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */ | ||
305 | 350 | ||
306 | fault_in(addr, CVMX_CACHE_LINE_SIZE); | 351 | l2cdbg.u64 = 0; |
352 | lckbase.u64 = 0; | ||
353 | lckoff.u64 = 0; | ||
307 | 354 | ||
308 | lckbase.s.lck_ena = 0; | 355 | /* Clear l2t error bits if set */ |
309 | cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64); | 356 | l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); |
310 | cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */ | 357 | l2t_err.s.lckerr = 1; |
358 | l2t_err.s.lckerr2 = 1; | ||
359 | cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64); | ||
311 | 360 | ||
312 | /* Stop being debug core */ | 361 | addr &= ~CVMX_CACHE_LINE_MASK; |
313 | cvmx_write_csr(CVMX_L2C_DBG, 0); | ||
314 | cvmx_read_csr(CVMX_L2C_DBG); | ||
315 | 362 | ||
316 | l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); | 363 | /* Set this core as debug core */ |
317 | if (l2t_err.s.lckerr || l2t_err.s.lckerr2) | 364 | l2cdbg.s.ppnum = cvmx_get_core_num(); |
318 | retval = 1; /* We were unable to lock the line */ | 365 | CVMX_SYNC; |
366 | cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); | ||
367 | cvmx_read_csr(CVMX_L2C_DBG); | ||
368 | |||
369 | lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */ | ||
370 | cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64); | ||
371 | cvmx_read_csr(CVMX_L2C_LCKOFF); | ||
372 | |||
373 | if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) { | ||
374 | int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1; | ||
375 | uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS; | ||
376 | lckbase.s.lck_base = addr_tmp >> 7; | ||
377 | } else { | ||
378 | lckbase.s.lck_base = addr >> 7; | ||
379 | } | ||
319 | 380 | ||
320 | cvmx_spinlock_unlock(&cvmx_l2c_spinlock); | 381 | lckbase.s.lck_ena = 1; |
382 | cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64); | ||
383 | /* Make sure it gets there */ | ||
384 | cvmx_read_csr(CVMX_L2C_LCKBASE); | ||
321 | 385 | ||
322 | return retval; | 386 | fault_in(addr, CVMX_CACHE_LINE_SIZE); |
387 | |||
388 | lckbase.s.lck_ena = 0; | ||
389 | cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64); | ||
390 | /* Make sure it gets there */ | ||
391 | cvmx_read_csr(CVMX_L2C_LCKBASE); | ||
392 | |||
393 | /* Stop being debug core */ | ||
394 | cvmx_write_csr(CVMX_L2C_DBG, 0); | ||
395 | cvmx_read_csr(CVMX_L2C_DBG); | ||
396 | |||
397 | l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); | ||
398 | if (l2t_err.s.lckerr || l2t_err.s.lckerr2) | ||
399 | retval = 1; /* We were unable to lock the line */ | ||
400 | |||
401 | cvmx_spinlock_unlock(&cvmx_l2c_spinlock); | ||
402 | return retval; | ||
403 | } | ||
323 | } | 404 | } |
324 | 405 | ||
325 | int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len) | 406 | int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len) |
@@ -336,7 +417,6 @@ int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len) | |||
336 | start += CVMX_CACHE_LINE_SIZE; | 417 | start += CVMX_CACHE_LINE_SIZE; |
337 | len -= CVMX_CACHE_LINE_SIZE; | 418 | len -= CVMX_CACHE_LINE_SIZE; |
338 | } | 419 | } |
339 | |||
340 | return retval; | 420 | return retval; |
341 | } | 421 | } |
342 | 422 | ||
@@ -344,80 +424,73 @@ void cvmx_l2c_flush(void) | |||
344 | { | 424 | { |
345 | uint64_t assoc, set; | 425 | uint64_t assoc, set; |
346 | uint64_t n_assoc, n_set; | 426 | uint64_t n_assoc, n_set; |
347 | union cvmx_l2c_dbg l2cdbg; | ||
348 | |||
349 | cvmx_spinlock_lock(&cvmx_l2c_spinlock); | ||
350 | 427 | ||
351 | l2cdbg.u64 = 0; | 428 | n_set = cvmx_l2c_get_num_sets(); |
352 | if (!OCTEON_IS_MODEL(OCTEON_CN30XX)) | 429 | n_assoc = cvmx_l2c_get_num_assoc(); |
353 | l2cdbg.s.ppnum = cvmx_get_core_num(); | 430 | |
354 | l2cdbg.s.finv = 1; | 431 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { |
355 | n_set = CVMX_L2_SETS; | 432 | uint64_t address; |
356 | n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC; | 433 | /* These may look like constants, but they aren't... */ |
357 | for (set = 0; set < n_set; set++) { | 434 | int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT; |
358 | for (assoc = 0; assoc < n_assoc; assoc++) { | 435 | int set_shift = CVMX_L2C_IDX_ADDR_SHIFT; |
359 | l2cdbg.s.set = assoc; | 436 | for (set = 0; set < n_set; set++) { |
360 | /* Enter debug mode, and make sure all other | 437 | for (assoc = 0; assoc < n_assoc; assoc++) { |
361 | ** writes complete before we enter debug | 438 | address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, |
362 | ** mode */ | 439 | (assoc << assoc_shift) | (set << set_shift)); |
363 | CVMX_SYNCW; | 440 | CVMX_CACHE_WBIL2I(address, 0); |
364 | cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); | 441 | } |
365 | cvmx_read_csr(CVMX_L2C_DBG); | ||
366 | |||
367 | CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG | ||
368 | (CVMX_MIPS_SPACE_XKPHYS, | ||
369 | set * CVMX_CACHE_LINE_SIZE), 0); | ||
370 | CVMX_SYNCW; /* Push STF out to L2 */ | ||
371 | /* Exit debug mode */ | ||
372 | CVMX_SYNC; | ||
373 | cvmx_write_csr(CVMX_L2C_DBG, 0); | ||
374 | cvmx_read_csr(CVMX_L2C_DBG); | ||
375 | } | 442 | } |
443 | } else { | ||
444 | for (set = 0; set < n_set; set++) | ||
445 | for (assoc = 0; assoc < n_assoc; assoc++) | ||
446 | cvmx_l2c_flush_line(assoc, set); | ||
376 | } | 447 | } |
377 | |||
378 | cvmx_spinlock_unlock(&cvmx_l2c_spinlock); | ||
379 | } | 448 | } |
380 | 449 | ||
450 | |||
381 | int cvmx_l2c_unlock_line(uint64_t address) | 451 | int cvmx_l2c_unlock_line(uint64_t address) |
382 | { | 452 | { |
383 | int assoc; | ||
384 | union cvmx_l2c_tag tag; | ||
385 | union cvmx_l2c_dbg l2cdbg; | ||
386 | uint32_t tag_addr; | ||
387 | 453 | ||
388 | uint32_t index = cvmx_l2c_address_to_index(address); | 454 | if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { |
455 | int assoc; | ||
456 | union cvmx_l2c_tag tag; | ||
457 | uint32_t tag_addr; | ||
458 | uint32_t index = cvmx_l2c_address_to_index(address); | ||
459 | |||
460 | tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1)); | ||
461 | |||
462 | /* | ||
463 | * For 63XX, we can flush a line by using the physical | ||
464 | * address directly, so finding the cache line used by | ||
465 | * the address is only required to provide the proper | ||
466 | * return value for the function. | ||
467 | */ | ||
468 | for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) { | ||
469 | tag = cvmx_l2c_get_tag(assoc, index); | ||
470 | |||
471 | if (tag.s.V && (tag.s.addr == tag_addr)) { | ||
472 | CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0); | ||
473 | return tag.s.L; | ||
474 | } | ||
475 | } | ||
476 | } else { | ||
477 | int assoc; | ||
478 | union cvmx_l2c_tag tag; | ||
479 | uint32_t tag_addr; | ||
389 | 480 | ||
390 | cvmx_spinlock_lock(&cvmx_l2c_spinlock); | 481 | uint32_t index = cvmx_l2c_address_to_index(address); |
391 | /* Compute portion of address that is stored in tag */ | ||
392 | tag_addr = | ||
393 | ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & | ||
394 | ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1)); | ||
395 | for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) { | ||
396 | tag = cvmx_get_l2c_tag(assoc, index); | ||
397 | 482 | ||
398 | if (tag.s.V && (tag.s.addr == tag_addr)) { | 483 | /* Compute portion of address that is stored in tag */ |
399 | l2cdbg.u64 = 0; | 484 | tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1)); |
400 | l2cdbg.s.ppnum = cvmx_get_core_num(); | 485 | for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) { |
401 | l2cdbg.s.set = assoc; | 486 | tag = cvmx_l2c_get_tag(assoc, index); |
402 | l2cdbg.s.finv = 1; | ||
403 | 487 | ||
404 | CVMX_SYNC; | 488 | if (tag.s.V && (tag.s.addr == tag_addr)) { |
405 | /* Enter debug mode */ | 489 | cvmx_l2c_flush_line(assoc, index); |
406 | cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); | 490 | return tag.s.L; |
407 | cvmx_read_csr(CVMX_L2C_DBG); | 491 | } |
408 | |||
409 | CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG | ||
410 | (CVMX_MIPS_SPACE_XKPHYS, | ||
411 | address), 0); | ||
412 | CVMX_SYNC; | ||
413 | /* Exit debug mode */ | ||
414 | cvmx_write_csr(CVMX_L2C_DBG, 0); | ||
415 | cvmx_read_csr(CVMX_L2C_DBG); | ||
416 | cvmx_spinlock_unlock(&cvmx_l2c_spinlock); | ||
417 | return tag.s.L; | ||
418 | } | 492 | } |
419 | } | 493 | } |
420 | cvmx_spinlock_unlock(&cvmx_l2c_spinlock); | ||
421 | return 0; | 494 | return 0; |
422 | } | 495 | } |
423 | 496 | ||
@@ -445,48 +518,49 @@ union __cvmx_l2c_tag { | |||
445 | uint64_t u64; | 518 | uint64_t u64; |
446 | struct cvmx_l2c_tag_cn50xx { | 519 | struct cvmx_l2c_tag_cn50xx { |
447 | uint64_t reserved:40; | 520 | uint64_t reserved:40; |
448 | uint64_t V:1; /* Line valid */ | 521 | uint64_t V:1; /* Line valid */ |
449 | uint64_t D:1; /* Line dirty */ | 522 | uint64_t D:1; /* Line dirty */ |
450 | uint64_t L:1; /* Line locked */ | 523 | uint64_t L:1; /* Line locked */ |
451 | uint64_t U:1; /* Use, LRU eviction */ | 524 | uint64_t U:1; /* Use, LRU eviction */ |
452 | uint64_t addr:20; /* Phys mem addr (33..14) */ | 525 | uint64_t addr:20; /* Phys mem addr (33..14) */ |
453 | } cn50xx; | 526 | } cn50xx; |
454 | struct cvmx_l2c_tag_cn30xx { | 527 | struct cvmx_l2c_tag_cn30xx { |
455 | uint64_t reserved:41; | 528 | uint64_t reserved:41; |
456 | uint64_t V:1; /* Line valid */ | 529 | uint64_t V:1; /* Line valid */ |
457 | uint64_t D:1; /* Line dirty */ | 530 | uint64_t D:1; /* Line dirty */ |
458 | uint64_t L:1; /* Line locked */ | 531 | uint64_t L:1; /* Line locked */ |
459 | uint64_t U:1; /* Use, LRU eviction */ | 532 | uint64_t U:1; /* Use, LRU eviction */ |
460 | uint64_t addr:19; /* Phys mem addr (33..15) */ | 533 | uint64_t addr:19; /* Phys mem addr (33..15) */ |
461 | } cn30xx; | 534 | } cn30xx; |
462 | struct cvmx_l2c_tag_cn31xx { | 535 | struct cvmx_l2c_tag_cn31xx { |
463 | uint64_t reserved:42; | 536 | uint64_t reserved:42; |
464 | uint64_t V:1; /* Line valid */ | 537 | uint64_t V:1; /* Line valid */ |
465 | uint64_t D:1; /* Line dirty */ | 538 | uint64_t D:1; /* Line dirty */ |
466 | uint64_t L:1; /* Line locked */ | 539 | uint64_t L:1; /* Line locked */ |
467 | uint64_t U:1; /* Use, LRU eviction */ | 540 | uint64_t U:1; /* Use, LRU eviction */ |
468 | uint64_t addr:18; /* Phys mem addr (33..16) */ | 541 | uint64_t addr:18; /* Phys mem addr (33..16) */ |
469 | } cn31xx; | 542 | } cn31xx; |
470 | struct cvmx_l2c_tag_cn38xx { | 543 | struct cvmx_l2c_tag_cn38xx { |
471 | uint64_t reserved:43; | 544 | uint64_t reserved:43; |
472 | uint64_t V:1; /* Line valid */ | 545 | uint64_t V:1; /* Line valid */ |
473 | uint64_t D:1; /* Line dirty */ | 546 | uint64_t D:1; /* Line dirty */ |
474 | uint64_t L:1; /* Line locked */ | 547 | uint64_t L:1; /* Line locked */ |
475 | uint64_t U:1; /* Use, LRU eviction */ | 548 | uint64_t U:1; /* Use, LRU eviction */ |
476 | uint64_t addr:17; /* Phys mem addr (33..17) */ | 549 | uint64_t addr:17; /* Phys mem addr (33..17) */ |
477 | } cn38xx; | 550 | } cn38xx; |
478 | struct cvmx_l2c_tag_cn58xx { | 551 | struct cvmx_l2c_tag_cn58xx { |
479 | uint64_t reserved:44; | 552 | uint64_t reserved:44; |
480 | uint64_t V:1; /* Line valid */ | 553 | uint64_t V:1; /* Line valid */ |
481 | uint64_t D:1; /* Line dirty */ | 554 | uint64_t D:1; /* Line dirty */ |
482 | uint64_t L:1; /* Line locked */ | 555 | uint64_t L:1; /* Line locked */ |
483 | uint64_t U:1; /* Use, LRU eviction */ | 556 | uint64_t U:1; /* Use, LRU eviction */ |
484 | uint64_t addr:16; /* Phys mem addr (33..18) */ | 557 | uint64_t addr:16; /* Phys mem addr (33..18) */ |
485 | } cn58xx; | 558 | } cn58xx; |
486 | struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */ | 559 | struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */ |
487 | struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */ | 560 | struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */ |
488 | }; | 561 | }; |
489 | 562 | ||
563 | |||
490 | /** | 564 | /** |
491 | * @INTERNAL | 565 | * @INTERNAL |
492 | * Function to read a L2C tag. This code make the current core | 566 | * Function to read a L2C tag. This code make the current core |
@@ -503,7 +577,7 @@ union __cvmx_l2c_tag { | |||
503 | static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index) | 577 | static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index) |
504 | { | 578 | { |
505 | 579 | ||
506 | uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96); | 580 | uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96); |
507 | uint64_t core = cvmx_get_core_num(); | 581 | uint64_t core = cvmx_get_core_num(); |
508 | union __cvmx_l2c_tag tag_val; | 582 | union __cvmx_l2c_tag tag_val; |
509 | uint64_t dbg_addr = CVMX_L2C_DBG; | 583 | uint64_t dbg_addr = CVMX_L2C_DBG; |
@@ -512,12 +586,15 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index) | |||
512 | union cvmx_l2c_dbg debug_val; | 586 | union cvmx_l2c_dbg debug_val; |
513 | debug_val.u64 = 0; | 587 | debug_val.u64 = 0; |
514 | /* | 588 | /* |
515 | * For low core count parts, the core number is always small enough | 589 | * For low core count parts, the core number is always small |
516 | * to stay in the correct field and not set any reserved bits. | 590 | * enough to stay in the correct field and not set any |
591 | * reserved bits. | ||
517 | */ | 592 | */ |
518 | debug_val.s.ppnum = core; | 593 | debug_val.s.ppnum = core; |
519 | debug_val.s.l2t = 1; | 594 | debug_val.s.l2t = 1; |
520 | debug_val.s.set = assoc; | 595 | debug_val.s.set = assoc; |
596 | |||
597 | local_irq_save(flags); | ||
521 | /* | 598 | /* |
522 | * Make sure core is quiet (no prefetches, etc.) before | 599 | * Make sure core is quiet (no prefetches, etc.) before |
523 | * entering debug mode. | 600 | * entering debug mode. |
@@ -526,112 +603,139 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index) | |||
526 | /* Flush L1 to make sure debug load misses L1 */ | 603 | /* Flush L1 to make sure debug load misses L1 */ |
527 | CVMX_DCACHE_INVALIDATE; | 604 | CVMX_DCACHE_INVALIDATE; |
528 | 605 | ||
529 | local_irq_save(flags); | ||
530 | |||
531 | /* | 606 | /* |
532 | * The following must be done in assembly as when in debug | 607 | * The following must be done in assembly as when in debug |
533 | * mode all data loads from L2 return special debug data, not | 608 | * mode all data loads from L2 return special debug data, not |
534 | * normal memory contents. Also, interrupts must be | 609 | * normal memory contents. Also, interrupts must be disabled, |
535 | * disabled, since if an interrupt occurs while in debug mode | 610 | * since if an interrupt occurs while in debug mode the ISR |
536 | * the ISR will get debug data from all its memory reads | 611 | * will get debug data from all its memory * reads instead of |
537 | * instead of the contents of memory | 612 | * the contents of memory. |
538 | */ | 613 | */ |
539 | 614 | ||
540 | asm volatile (".set push \n" | 615 | asm volatile ( |
541 | " .set mips64 \n" | 616 | ".set push\n\t" |
542 | " .set noreorder \n" | 617 | ".set mips64\n\t" |
543 | /* Enter debug mode, wait for store */ | 618 | ".set noreorder\n\t" |
544 | " sd %[dbg_val], 0(%[dbg_addr]) \n" | 619 | "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */ |
545 | " ld $0, 0(%[dbg_addr]) \n" | 620 | "ld $0, 0(%[dbg_addr])\n\t" |
546 | /* Read L2C tag data */ | 621 | "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */ |
547 | " ld %[tag_val], 0(%[tag_addr]) \n" | 622 | "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */ |
548 | /* Exit debug mode, wait for store */ | 623 | "ld $0, 0(%[dbg_addr])\n\t" |
549 | " sd $0, 0(%[dbg_addr]) \n" | 624 | "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */ |
550 | " ld $0, 0(%[dbg_addr]) \n" | 625 | ".set pop" |
551 | /* Invalidate dcache to discard debug data */ | 626 | : [tag_val] "=r" (tag_val) |
552 | " cache 9, 0($0) \n" | 627 | : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr) |
553 | " .set pop" : | 628 | : "memory"); |
554 | [tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr), | ||
555 | [dbg_val] "r"(debug_val.u64), | ||
556 | [tag_addr] "r"(debug_tag_addr) : "memory"); | ||
557 | 629 | ||
558 | local_irq_restore(flags); | 630 | local_irq_restore(flags); |
559 | return tag_val; | ||
560 | 631 | ||
632 | return tag_val; | ||
561 | } | 633 | } |
562 | 634 | ||
635 | |||
563 | union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index) | 636 | union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index) |
564 | { | 637 | { |
565 | union __cvmx_l2c_tag tmp_tag; | ||
566 | union cvmx_l2c_tag tag; | 638 | union cvmx_l2c_tag tag; |
567 | tag.u64 = 0; | 639 | tag.u64 = 0; |
568 | 640 | ||
569 | if ((int)association >= cvmx_l2c_get_num_assoc()) { | 641 | if ((int)association >= cvmx_l2c_get_num_assoc()) { |
570 | cvmx_dprintf | 642 | cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n"); |
571 | ("ERROR: cvmx_get_l2c_tag association out of range\n"); | ||
572 | return tag; | 643 | return tag; |
573 | } | 644 | } |
574 | if ((int)index >= cvmx_l2c_get_num_sets()) { | 645 | if ((int)index >= cvmx_l2c_get_num_sets()) { |
575 | cvmx_dprintf("ERROR: cvmx_get_l2c_tag " | 646 | cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n", |
576 | "index out of range (arg: %d, max: %d\n", | 647 | (int)index, cvmx_l2c_get_num_sets()); |
577 | index, cvmx_l2c_get_num_sets()); | ||
578 | return tag; | 648 | return tag; |
579 | } | 649 | } |
580 | /* __read_l2_tag is intended for internal use only */ | 650 | if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { |
581 | tmp_tag = __read_l2_tag(association, index); | 651 | union cvmx_l2c_tadx_tag l2c_tadx_tag; |
582 | 652 | uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, | |
583 | /* | 653 | (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) | |
584 | * Convert all tag structure types to generic version, as it | 654 | (index << CVMX_L2C_IDX_ADDR_SHIFT)); |
585 | * can represent all models. | 655 | /* |
586 | */ | 656 | * Use L2 cache Index load tag cache instruction, as |
587 | if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { | 657 | * hardware loads the virtual tag for the L2 cache |
588 | tag.s.V = tmp_tag.cn58xx.V; | 658 | * block with the contents of L2C_TAD0_TAG |
589 | tag.s.D = tmp_tag.cn58xx.D; | 659 | * register. |
590 | tag.s.L = tmp_tag.cn58xx.L; | 660 | */ |
591 | tag.s.U = tmp_tag.cn58xx.U; | 661 | CVMX_CACHE_LTGL2I(address, 0); |
592 | tag.s.addr = tmp_tag.cn58xx.addr; | 662 | CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */ |
593 | } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { | 663 | l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0)); |
594 | tag.s.V = tmp_tag.cn38xx.V; | 664 | |
595 | tag.s.D = tmp_tag.cn38xx.D; | 665 | tag.s.V = l2c_tadx_tag.s.valid; |
596 | tag.s.L = tmp_tag.cn38xx.L; | 666 | tag.s.D = l2c_tadx_tag.s.dirty; |
597 | tag.s.U = tmp_tag.cn38xx.U; | 667 | tag.s.L = l2c_tadx_tag.s.lock; |
598 | tag.s.addr = tmp_tag.cn38xx.addr; | 668 | tag.s.U = l2c_tadx_tag.s.use; |
599 | } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) | 669 | tag.s.addr = l2c_tadx_tag.s.tag; |
600 | || OCTEON_IS_MODEL(OCTEON_CN52XX)) { | ||
601 | tag.s.V = tmp_tag.cn31xx.V; | ||
602 | tag.s.D = tmp_tag.cn31xx.D; | ||
603 | tag.s.L = tmp_tag.cn31xx.L; | ||
604 | tag.s.U = tmp_tag.cn31xx.U; | ||
605 | tag.s.addr = tmp_tag.cn31xx.addr; | ||
606 | } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) { | ||
607 | tag.s.V = tmp_tag.cn30xx.V; | ||
608 | tag.s.D = tmp_tag.cn30xx.D; | ||
609 | tag.s.L = tmp_tag.cn30xx.L; | ||
610 | tag.s.U = tmp_tag.cn30xx.U; | ||
611 | tag.s.addr = tmp_tag.cn30xx.addr; | ||
612 | } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) { | ||
613 | tag.s.V = tmp_tag.cn50xx.V; | ||
614 | tag.s.D = tmp_tag.cn50xx.D; | ||
615 | tag.s.L = tmp_tag.cn50xx.L; | ||
616 | tag.s.U = tmp_tag.cn50xx.U; | ||
617 | tag.s.addr = tmp_tag.cn50xx.addr; | ||
618 | } else { | 670 | } else { |
619 | cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__); | 671 | union __cvmx_l2c_tag tmp_tag; |
672 | /* __read_l2_tag is intended for internal use only */ | ||
673 | tmp_tag = __read_l2_tag(association, index); | ||
674 | |||
675 | /* | ||
676 | * Convert all tag structure types to generic version, | ||
677 | * as it can represent all models. | ||
678 | */ | ||
679 | if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { | ||
680 | tag.s.V = tmp_tag.cn58xx.V; | ||
681 | tag.s.D = tmp_tag.cn58xx.D; | ||
682 | tag.s.L = tmp_tag.cn58xx.L; | ||
683 | tag.s.U = tmp_tag.cn58xx.U; | ||
684 | tag.s.addr = tmp_tag.cn58xx.addr; | ||
685 | } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { | ||
686 | tag.s.V = tmp_tag.cn38xx.V; | ||
687 | tag.s.D = tmp_tag.cn38xx.D; | ||
688 | tag.s.L = tmp_tag.cn38xx.L; | ||
689 | tag.s.U = tmp_tag.cn38xx.U; | ||
690 | tag.s.addr = tmp_tag.cn38xx.addr; | ||
691 | } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) { | ||
692 | tag.s.V = tmp_tag.cn31xx.V; | ||
693 | tag.s.D = tmp_tag.cn31xx.D; | ||
694 | tag.s.L = tmp_tag.cn31xx.L; | ||
695 | tag.s.U = tmp_tag.cn31xx.U; | ||
696 | tag.s.addr = tmp_tag.cn31xx.addr; | ||
697 | } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) { | ||
698 | tag.s.V = tmp_tag.cn30xx.V; | ||
699 | tag.s.D = tmp_tag.cn30xx.D; | ||
700 | tag.s.L = tmp_tag.cn30xx.L; | ||
701 | tag.s.U = tmp_tag.cn30xx.U; | ||
702 | tag.s.addr = tmp_tag.cn30xx.addr; | ||
703 | } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) { | ||
704 | tag.s.V = tmp_tag.cn50xx.V; | ||
705 | tag.s.D = tmp_tag.cn50xx.D; | ||
706 | tag.s.L = tmp_tag.cn50xx.L; | ||
707 | tag.s.U = tmp_tag.cn50xx.U; | ||
708 | tag.s.addr = tmp_tag.cn50xx.addr; | ||
709 | } else { | ||
710 | cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__); | ||
711 | } | ||
620 | } | 712 | } |
621 | |||
622 | return tag; | 713 | return tag; |
623 | } | 714 | } |
624 | 715 | ||
625 | uint32_t cvmx_l2c_address_to_index(uint64_t addr) | 716 | uint32_t cvmx_l2c_address_to_index(uint64_t addr) |
626 | { | 717 | { |
627 | uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT; | 718 | uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT; |
628 | union cvmx_l2c_cfg l2c_cfg; | 719 | int indxalias = 0; |
629 | l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG); | ||
630 | 720 | ||
631 | if (l2c_cfg.s.idxalias) { | 721 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { |
632 | idx ^= | 722 | union cvmx_l2c_ctl l2c_ctl; |
633 | ((addr & CVMX_L2C_ALIAS_MASK) >> | 723 | l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL); |
634 | CVMX_L2C_TAG_ADDR_ALIAS_SHIFT); | 724 | indxalias = !l2c_ctl.s.disidxalias; |
725 | } else { | ||
726 | union cvmx_l2c_cfg l2c_cfg; | ||
727 | l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG); | ||
728 | indxalias = l2c_cfg.s.idxalias; | ||
729 | } | ||
730 | |||
731 | if (indxalias) { | ||
732 | if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { | ||
733 | uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7; | ||
734 | idx ^= idx / cvmx_l2c_get_num_sets(); | ||
735 | idx ^= a_14_12; | ||
736 | } else { | ||
737 | idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT); | ||
738 | } | ||
635 | } | 739 | } |
636 | idx &= CVMX_L2C_IDX_MASK; | 740 | idx &= CVMX_L2C_IDX_MASK; |
637 | return idx; | 741 | return idx; |
@@ -652,10 +756,9 @@ int cvmx_l2c_get_set_bits(void) | |||
652 | int l2_set_bits; | 756 | int l2_set_bits; |
653 | if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) | 757 | if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) |
654 | l2_set_bits = 11; /* 2048 sets */ | 758 | l2_set_bits = 11; /* 2048 sets */ |
655 | else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) | 759 | else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)) |
656 | l2_set_bits = 10; /* 1024 sets */ | 760 | l2_set_bits = 10; /* 1024 sets */ |
657 | else if (OCTEON_IS_MODEL(OCTEON_CN31XX) | 761 | else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) |
658 | || OCTEON_IS_MODEL(OCTEON_CN52XX)) | ||
659 | l2_set_bits = 9; /* 512 sets */ | 762 | l2_set_bits = 9; /* 512 sets */ |
660 | else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) | 763 | else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) |
661 | l2_set_bits = 8; /* 256 sets */ | 764 | l2_set_bits = 8; /* 256 sets */ |
@@ -666,7 +769,6 @@ int cvmx_l2c_get_set_bits(void) | |||
666 | l2_set_bits = 11; /* 2048 sets */ | 769 | l2_set_bits = 11; /* 2048 sets */ |
667 | } | 770 | } |
668 | return l2_set_bits; | 771 | return l2_set_bits; |
669 | |||
670 | } | 772 | } |
671 | 773 | ||
672 | /* Return the number of sets in the L2 Cache */ | 774 | /* Return the number of sets in the L2 Cache */ |
@@ -682,8 +784,11 @@ int cvmx_l2c_get_num_assoc(void) | |||
682 | if (OCTEON_IS_MODEL(OCTEON_CN56XX) || | 784 | if (OCTEON_IS_MODEL(OCTEON_CN56XX) || |
683 | OCTEON_IS_MODEL(OCTEON_CN52XX) || | 785 | OCTEON_IS_MODEL(OCTEON_CN52XX) || |
684 | OCTEON_IS_MODEL(OCTEON_CN58XX) || | 786 | OCTEON_IS_MODEL(OCTEON_CN58XX) || |
685 | OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) | 787 | OCTEON_IS_MODEL(OCTEON_CN50XX) || |
788 | OCTEON_IS_MODEL(OCTEON_CN38XX)) | ||
686 | l2_assoc = 8; | 789 | l2_assoc = 8; |
790 | else if (OCTEON_IS_MODEL(OCTEON_CN63XX)) | ||
791 | l2_assoc = 16; | ||
687 | else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || | 792 | else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || |
688 | OCTEON_IS_MODEL(OCTEON_CN30XX)) | 793 | OCTEON_IS_MODEL(OCTEON_CN30XX)) |
689 | l2_assoc = 4; | 794 | l2_assoc = 4; |
@@ -693,11 +798,42 @@ int cvmx_l2c_get_num_assoc(void) | |||
693 | } | 798 | } |
694 | 799 | ||
695 | /* Check to see if part of the cache is disabled */ | 800 | /* Check to see if part of the cache is disabled */ |
696 | if (cvmx_fuse_read(265)) | 801 | if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { |
697 | l2_assoc = l2_assoc >> 2; | 802 | union cvmx_mio_fus_dat3 mio_fus_dat3; |
698 | else if (cvmx_fuse_read(264)) | 803 | |
699 | l2_assoc = l2_assoc >> 1; | 804 | mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3); |
700 | 805 | /* | |
806 | * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows | ||
807 | * <2> will be not used for 63xx | ||
808 | * <1> disables 1/2 ways | ||
809 | * <0> disables 1/4 ways | ||
810 | * They are cumulative, so for 63xx: | ||
811 | * <1> <0> | ||
812 | * 0 0 16-way 2MB cache | ||
813 | * 0 1 12-way 1.5MB cache | ||
814 | * 1 0 8-way 1MB cache | ||
815 | * 1 1 4-way 512KB cache | ||
816 | */ | ||
817 | |||
818 | if (mio_fus_dat3.s.l2c_crip == 3) | ||
819 | l2_assoc = 4; | ||
820 | else if (mio_fus_dat3.s.l2c_crip == 2) | ||
821 | l2_assoc = 8; | ||
822 | else if (mio_fus_dat3.s.l2c_crip == 1) | ||
823 | l2_assoc = 12; | ||
824 | } else { | ||
825 | union cvmx_l2d_fus3 val; | ||
826 | val.u64 = cvmx_read_csr(CVMX_L2D_FUS3); | ||
827 | /* | ||
828 | * Using shifts here, as bit position names are | ||
829 | * different for each model but they all mean the | ||
830 | * same. | ||
831 | */ | ||
832 | if ((val.u64 >> 35) & 0x1) | ||
833 | l2_assoc = l2_assoc >> 2; | ||
834 | else if ((val.u64 >> 34) & 0x1) | ||
835 | l2_assoc = l2_assoc >> 1; | ||
836 | } | ||
701 | return l2_assoc; | 837 | return l2_assoc; |
702 | } | 838 | } |
703 | 839 | ||
@@ -711,24 +847,54 @@ int cvmx_l2c_get_num_assoc(void) | |||
711 | */ | 847 | */ |
712 | void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index) | 848 | void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index) |
713 | { | 849 | { |
714 | union cvmx_l2c_dbg l2cdbg; | 850 | /* Check the range of the index. */ |
851 | if (index > (uint32_t)cvmx_l2c_get_num_sets()) { | ||
852 | cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n"); | ||
853 | return; | ||
854 | } | ||
715 | 855 | ||
716 | l2cdbg.u64 = 0; | 856 | /* Check the range of association. */ |
717 | l2cdbg.s.ppnum = cvmx_get_core_num(); | 857 | if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) { |
718 | l2cdbg.s.finv = 1; | 858 | cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n"); |
859 | return; | ||
860 | } | ||
719 | 861 | ||
720 | l2cdbg.s.set = assoc; | 862 | if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { |
721 | /* | 863 | uint64_t address; |
722 | * Enter debug mode, and make sure all other writes complete | 864 | /* Create the address based on index and association. |
723 | * before we enter debug mode. | 865 | * Bits<20:17> select the way of the cache block involved in |
724 | */ | 866 | * the operation |
725 | asm volatile ("sync" : : : "memory"); | 867 | * Bits<16:7> of the effect address select the index |
726 | cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); | 868 | */ |
727 | cvmx_read_csr(CVMX_L2C_DBG); | 869 | address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, |
728 | 870 | (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) | | |
729 | CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0); | 871 | (index << CVMX_L2C_IDX_ADDR_SHIFT)); |
730 | /* Exit debug mode */ | 872 | CVMX_CACHE_WBIL2I(address, 0); |
731 | asm volatile ("sync" : : : "memory"); | 873 | } else { |
732 | cvmx_write_csr(CVMX_L2C_DBG, 0); | 874 | union cvmx_l2c_dbg l2cdbg; |
733 | cvmx_read_csr(CVMX_L2C_DBG); | 875 | |
876 | l2cdbg.u64 = 0; | ||
877 | if (!OCTEON_IS_MODEL(OCTEON_CN30XX)) | ||
878 | l2cdbg.s.ppnum = cvmx_get_core_num(); | ||
879 | l2cdbg.s.finv = 1; | ||
880 | |||
881 | l2cdbg.s.set = assoc; | ||
882 | cvmx_spinlock_lock(&cvmx_l2c_spinlock); | ||
883 | /* | ||
884 | * Enter debug mode, and make sure all other writes | ||
885 | * complete before we enter debug mode | ||
886 | */ | ||
887 | CVMX_SYNC; | ||
888 | cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); | ||
889 | cvmx_read_csr(CVMX_L2C_DBG); | ||
890 | |||
891 | CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, | ||
892 | index * CVMX_CACHE_LINE_SIZE), | ||
893 | 0); | ||
894 | /* Exit debug mode */ | ||
895 | CVMX_SYNC; | ||
896 | cvmx_write_csr(CVMX_L2C_DBG, 0); | ||
897 | cvmx_read_csr(CVMX_L2C_DBG); | ||
898 | cvmx_spinlock_unlock(&cvmx_l2c_spinlock); | ||
899 | } | ||
734 | } | 900 | } |