diff options
author | Jaroslav Kysela <perex@petra> | 2005-06-22 06:19:24 -0400 |
---|---|---|
committer | Jaroslav Kysela <perex@petra> | 2005-06-22 06:19:24 -0400 |
commit | da04b128cf0d74dd4cab270c53d9264e70f9203e (patch) | |
tree | 095355c32dfd709236a85b497d3bd461d7cdfe8a /include/asm-sparc64/processor.h | |
parent | fae6ec69c84d71b1d5bda9ede1a262c1681684aa (diff) | |
parent | 2a5a68b840cbab31baab2d9b2e1e6de3b289ae1e (diff) |
Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'include/asm-sparc64/processor.h')
-rw-r--r-- | include/asm-sparc64/processor.h | 34 |
1 files changed, 34 insertions, 0 deletions
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index bc1445b904ef..d0bee2413560 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h | |||
@@ -192,6 +192,40 @@ extern unsigned long get_wchan(struct task_struct *task); | |||
192 | 192 | ||
193 | #define cpu_relax() barrier() | 193 | #define cpu_relax() barrier() |
194 | 194 | ||
195 | /* Prefetch support. This is tuned for UltraSPARC-III and later. | ||
196 | * UltraSPARC-I will treat these as nops, and UltraSPARC-II has | ||
197 | * a shallower prefetch queue than later chips. | ||
198 | */ | ||
199 | #define ARCH_HAS_PREFETCH | ||
200 | #define ARCH_HAS_PREFETCHW | ||
201 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
202 | |||
203 | static inline void prefetch(const void *x) | ||
204 | { | ||
205 | /* We do not use the read prefetch mnemonic because that | ||
206 | * prefetches into the prefetch-cache which only is accessible | ||
207 | * by floating point operations in UltraSPARC-III and later. | ||
208 | * By contrast, "#one_write" prefetches into the L2 cache | ||
209 | * in shared state. | ||
210 | */ | ||
211 | __asm__ __volatile__("prefetch [%0], #one_write" | ||
212 | : /* no outputs */ | ||
213 | : "r" (x)); | ||
214 | } | ||
215 | |||
216 | static inline void prefetchw(const void *x) | ||
217 | { | ||
218 | /* The most optimal prefetch to use for writes is | ||
219 | * "#n_writes". This brings the cacheline into the | ||
220 | * L2 cache in "owned" state. | ||
221 | */ | ||
222 | __asm__ __volatile__("prefetch [%0], #n_writes" | ||
223 | : /* no outputs */ | ||
224 | : "r" (x)); | ||
225 | } | ||
226 | |||
227 | #define spin_lock_prefetch(x) prefetchw(x) | ||
228 | |||
195 | #endif /* !(__ASSEMBLY__) */ | 229 | #endif /* !(__ASSEMBLY__) */ |
196 | 230 | ||
197 | #endif /* !(__ASM_SPARC64_PROCESSOR_H) */ | 231 | #endif /* !(__ASM_SPARC64_PROCESSOR_H) */ |