aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/spitfire.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-sparc64/spitfire.h
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-sparc64/spitfire.h')
-rw-r--r--include/asm-sparc64/spitfire.h461
1 files changed, 461 insertions, 0 deletions
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h
new file mode 100644
index 00000000000..ad78ce64d69
--- /dev/null
+++ b/include/asm-sparc64/spitfire.h
@@ -0,0 +1,461 @@
1/* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $
2 * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _SPARC64_SPITFIRE_H
8#define _SPARC64_SPITFIRE_H
9
10#include <asm/asi.h>
11
12/* The following register addresses are accessible via ASI_DMMU
13 * and ASI_IMMU, that is there is a distinct and unique copy of
14 * each these registers for each TLB.
15 */
16#define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
17#define TLB_SFSR 0x0000000000000018 /* All chips */
18#define TSB_REG 0x0000000000000028 /* All chips */
19#define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
20#define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
21#define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
22#define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
23#define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
24#define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
25#define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
26
27/* These registers only exist as one entity, and are accessed
28 * via ASI_DMMU only.
29 */
30#define PRIMARY_CONTEXT 0x0000000000000008
31#define SECONDARY_CONTEXT 0x0000000000000010
32#define DMMU_SFAR 0x0000000000000020
33#define VIRT_WATCHPOINT 0x0000000000000038
34#define PHYS_WATCHPOINT 0x0000000000000040
35
36#define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
37#define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
38
39#define L1DCACHE_SIZE 0x4000
40
41#ifndef __ASSEMBLY__
42
43enum ultra_tlb_layout {
44 spitfire = 0,
45 cheetah = 1,
46 cheetah_plus = 2,
47};
48
49extern enum ultra_tlb_layout tlb_type;
50
51#define sparc64_highest_locked_tlbent() \
52 (tlb_type == spitfire ? \
53 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
54 CHEETAH_HIGHEST_LOCKED_TLBENT)
55
56static __inline__ unsigned long spitfire_get_isfsr(void)
57{
58 unsigned long ret;
59
60 __asm__ __volatile__("ldxa [%1] %2, %0"
61 : "=r" (ret)
62 : "r" (TLB_SFSR), "i" (ASI_IMMU));
63 return ret;
64}
65
66static __inline__ unsigned long spitfire_get_dsfsr(void)
67{
68 unsigned long ret;
69
70 __asm__ __volatile__("ldxa [%1] %2, %0"
71 : "=r" (ret)
72 : "r" (TLB_SFSR), "i" (ASI_DMMU));
73 return ret;
74}
75
76static __inline__ unsigned long spitfire_get_sfar(void)
77{
78 unsigned long ret;
79
80 __asm__ __volatile__("ldxa [%1] %2, %0"
81 : "=r" (ret)
82 : "r" (DMMU_SFAR), "i" (ASI_DMMU));
83 return ret;
84}
85
86static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
87{
88 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
89 "membar #Sync"
90 : /* no outputs */
91 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
92}
93
94static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
95{
96 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
97 "membar #Sync"
98 : /* no outputs */
99 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
100}
101
102/* The data cache is write through, so this just invalidates the
103 * specified line.
104 */
105static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
106{
107 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
108 "membar #Sync"
109 : /* No outputs */
110 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
111 __asm__ __volatile__ ("membar #Sync" : : : "memory");
112}
113
114/* The instruction cache lines are flushed with this, but note that
115 * this does not flush the pipeline. It is possible for a line to
116 * get flushed but stale instructions to still be in the pipeline,
117 * a flush instruction (to any address) is sufficient to handle
118 * this issue after the line is invalidated.
119 */
120static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
121{
122 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
123 "membar #Sync"
124 : /* No outputs */
125 : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
126}
127
128static __inline__ unsigned long spitfire_get_dtlb_data(int entry)
129{
130 unsigned long data;
131
132 __asm__ __volatile__("ldxa [%1] %2, %0"
133 : "=r" (data)
134 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
135
136 /* Clear TTE diag bits. */
137 data &= ~0x0003fe0000000000UL;
138
139 return data;
140}
141
142static __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
143{
144 unsigned long tag;
145
146 __asm__ __volatile__("ldxa [%1] %2, %0"
147 : "=r" (tag)
148 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
149 return tag;
150}
151
152static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
153{
154 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
155 "membar #Sync"
156 : /* No outputs */
157 : "r" (data), "r" (entry << 3),
158 "i" (ASI_DTLB_DATA_ACCESS));
159}
160
161static __inline__ unsigned long spitfire_get_itlb_data(int entry)
162{
163 unsigned long data;
164
165 __asm__ __volatile__("ldxa [%1] %2, %0"
166 : "=r" (data)
167 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
168
169 /* Clear TTE diag bits. */
170 data &= ~0x0003fe0000000000UL;
171
172 return data;
173}
174
175static __inline__ unsigned long spitfire_get_itlb_tag(int entry)
176{
177 unsigned long tag;
178
179 __asm__ __volatile__("ldxa [%1] %2, %0"
180 : "=r" (tag)
181 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
182 return tag;
183}
184
185static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
186{
187 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
188 "membar #Sync"
189 : /* No outputs */
190 : "r" (data), "r" (entry << 3),
191 "i" (ASI_ITLB_DATA_ACCESS));
192}
193
194/* Spitfire hardware assisted TLB flushes. */
195
196/* Context level flushes. */
197static __inline__ void spitfire_flush_dtlb_primary_context(void)
198{
199 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
200 "membar #Sync"
201 : /* No outputs */
202 : "r" (0x40), "i" (ASI_DMMU_DEMAP));
203}
204
205static __inline__ void spitfire_flush_itlb_primary_context(void)
206{
207 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
208 "membar #Sync"
209 : /* No outputs */
210 : "r" (0x40), "i" (ASI_IMMU_DEMAP));
211}
212
213static __inline__ void spitfire_flush_dtlb_secondary_context(void)
214{
215 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
216 "membar #Sync"
217 : /* No outputs */
218 : "r" (0x50), "i" (ASI_DMMU_DEMAP));
219}
220
221static __inline__ void spitfire_flush_itlb_secondary_context(void)
222{
223 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
224 "membar #Sync"
225 : /* No outputs */
226 : "r" (0x50), "i" (ASI_IMMU_DEMAP));
227}
228
229static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
230{
231 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
232 "membar #Sync"
233 : /* No outputs */
234 : "r" (0x60), "i" (ASI_DMMU_DEMAP));
235}
236
237static __inline__ void spitfire_flush_itlb_nucleus_context(void)
238{
239 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
240 "membar #Sync"
241 : /* No outputs */
242 : "r" (0x60), "i" (ASI_IMMU_DEMAP));
243}
244
245/* Page level flushes. */
246static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
247{
248 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
249 "membar #Sync"
250 : /* No outputs */
251 : "r" (page), "i" (ASI_DMMU_DEMAP));
252}
253
254static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
255{
256 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
257 "membar #Sync"
258 : /* No outputs */
259 : "r" (page), "i" (ASI_IMMU_DEMAP));
260}
261
262static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
263{
264 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
265 "membar #Sync"
266 : /* No outputs */
267 : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
268}
269
270static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
271{
272 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
273 "membar #Sync"
274 : /* No outputs */
275 : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
276}
277
278static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
279{
280 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
281 "membar #Sync"
282 : /* No outputs */
283 : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
284}
285
286static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
287{
288 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
289 "membar #Sync"
290 : /* No outputs */
291 : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
292}
293
294/* Cheetah has "all non-locked" tlb flushes. */
295static __inline__ void cheetah_flush_dtlb_all(void)
296{
297 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
298 "membar #Sync"
299 : /* No outputs */
300 : "r" (0x80), "i" (ASI_DMMU_DEMAP));
301}
302
303static __inline__ void cheetah_flush_itlb_all(void)
304{
305 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
306 "membar #Sync"
307 : /* No outputs */
308 : "r" (0x80), "i" (ASI_IMMU_DEMAP));
309}
310
311/* Cheetah has a 4-tlb layout so direct access is a bit different.
312 * The first two TLBs are fully assosciative, hold 16 entries, and are
313 * used only for locked and >8K sized translations. One exists for
314 * data accesses and one for instruction accesses.
315 *
316 * The third TLB is for data accesses to 8K non-locked translations, is
317 * 2 way assosciative, and holds 512 entries. The fourth TLB is for
318 * instruction accesses to 8K non-locked translations, is 2 way
319 * assosciative, and holds 128 entries.
320 *
321 * Cheetah has some bug where bogus data can be returned from
322 * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
323 * the problem for me. -DaveM
324 */
325static __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
326{
327 unsigned long data;
328
329 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
330 "ldxa [%1] %2, %0"
331 : "=r" (data)
332 : "r" ((0 << 16) | (entry << 3)),
333 "i" (ASI_DTLB_DATA_ACCESS));
334
335 return data;
336}
337
338static __inline__ unsigned long cheetah_get_litlb_data(int entry)
339{
340 unsigned long data;
341
342 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
343 "ldxa [%1] %2, %0"
344 : "=r" (data)
345 : "r" ((0 << 16) | (entry << 3)),
346 "i" (ASI_ITLB_DATA_ACCESS));
347
348 return data;
349}
350
351static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
352{
353 unsigned long tag;
354
355 __asm__ __volatile__("ldxa [%1] %2, %0"
356 : "=r" (tag)
357 : "r" ((0 << 16) | (entry << 3)),
358 "i" (ASI_DTLB_TAG_READ));
359
360 return tag;
361}
362
363static __inline__ unsigned long cheetah_get_litlb_tag(int entry)
364{
365 unsigned long tag;
366
367 __asm__ __volatile__("ldxa [%1] %2, %0"
368 : "=r" (tag)
369 : "r" ((0 << 16) | (entry << 3)),
370 "i" (ASI_ITLB_TAG_READ));
371
372 return tag;
373}
374
375static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
376{
377 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
378 "membar #Sync"
379 : /* No outputs */
380 : "r" (data),
381 "r" ((0 << 16) | (entry << 3)),
382 "i" (ASI_DTLB_DATA_ACCESS));
383}
384
385static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
386{
387 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
388 "membar #Sync"
389 : /* No outputs */
390 : "r" (data),
391 "r" ((0 << 16) | (entry << 3)),
392 "i" (ASI_ITLB_DATA_ACCESS));
393}
394
395static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
396{
397 unsigned long data;
398
399 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
400 "ldxa [%1] %2, %0"
401 : "=r" (data)
402 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
403
404 return data;
405}
406
407static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
408{
409 unsigned long tag;
410
411 __asm__ __volatile__("ldxa [%1] %2, %0"
412 : "=r" (tag)
413 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
414 return tag;
415}
416
417static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
418{
419 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
420 "membar #Sync"
421 : /* No outputs */
422 : "r" (data),
423 "r" ((tlb << 16) | (entry << 3)),
424 "i" (ASI_DTLB_DATA_ACCESS));
425}
426
427static __inline__ unsigned long cheetah_get_itlb_data(int entry)
428{
429 unsigned long data;
430
431 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
432 "ldxa [%1] %2, %0"
433 : "=r" (data)
434 : "r" ((2 << 16) | (entry << 3)),
435 "i" (ASI_ITLB_DATA_ACCESS));
436
437 return data;
438}
439
440static __inline__ unsigned long cheetah_get_itlb_tag(int entry)
441{
442 unsigned long tag;
443
444 __asm__ __volatile__("ldxa [%1] %2, %0"
445 : "=r" (tag)
446 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
447 return tag;
448}
449
450static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
451{
452 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
453 "membar #Sync"
454 : /* No outputs */
455 : "r" (data), "r" ((2 << 16) | (entry << 3)),
456 "i" (ASI_ITLB_DATA_ACCESS));
457}
458
459#endif /* !(__ASSEMBLY__) */
460
461#endif /* !(_SPARC64_SPITFIRE_H) */