aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-06-21 19:20:28 -0400
committerDavid S. Miller <davem@davemloft.net>2005-06-21 19:20:28 -0400
commit7049e6800f40046c384c522a990669024d5f5836 (patch)
tree5e790230b26721a89864fa610bc9d8e53114a881 /include/asm-sparc64
parent8005aba69a6440a535a4cc2aed99ffca580847e0 (diff)
[SPARC64]: Add prefetch support.
The implementation is optimal for UltraSPARC-III and later. It will work, however suboptimally, on UltraSPARC-II and be treated as a NOP on UltraSPARC-I. It is not worth code patching this thing as the highest cost is the code space, and code patching cannot eliminate that. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/processor.h34
1 files changed, 34 insertions, 0 deletions
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index bc1445b904ef..d0bee2413560 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -192,6 +192,40 @@ extern unsigned long get_wchan(struct task_struct *task);
192 192
193#define cpu_relax() barrier() 193#define cpu_relax() barrier()
194 194
195/* Prefetch support. This is tuned for UltraSPARC-III and later.
196 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
197 * a shallower prefetch queue than later chips.
198 */
199#define ARCH_HAS_PREFETCH
200#define ARCH_HAS_PREFETCHW
201#define ARCH_HAS_SPINLOCK_PREFETCH
202
203static inline void prefetch(const void *x)
204{
205 /* We do not use the read prefetch mnemonic because that
206 * prefetches into the prefetch-cache which only is accessible
207 * by floating point operations in UltraSPARC-III and later.
208 * By contrast, "#one_write" prefetches into the L2 cache
209 * in shared state.
210 */
211 __asm__ __volatile__("prefetch [%0], #one_write"
212 : /* no outputs */
213 : "r" (x));
214}
215
216static inline void prefetchw(const void *x)
217{
218 /* The most optimal prefetch to use for writes is
219 * "#n_writes". This brings the cacheline into the
220 * L2 cache in "owned" state.
221 */
222 __asm__ __volatile__("prefetch [%0], #n_writes"
223 : /* no outputs */
224 : "r" (x));
225}
226
227#define spin_lock_prefetch(x) prefetchw(x)
228
195#endif /* !(__ASSEMBLY__) */ 229#endif /* !(__ASSEMBLY__) */
196 230
197#endif /* !(__ASM_SPARC64_PROCESSOR_H) */ 231#endif /* !(__ASM_SPARC64_PROCESSOR_H) */