aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64
diff options
context:
space:
mode:
authorOlof Johansson <olof@austin.ibm.com>2005-04-16 18:24:38 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:24:38 -0400
commite63f8f439de010b6227c0c9c6f56e2c44dbe5dae (patch)
treeeb80efbfb8e4c55a94610d30fb8424cd128ffe8b /include/asm-ppc64
parent89e09f5ebba4bcde9852e4be4af536d5b691f20a (diff)
[PATCH] ppc64: no prefetch for NULL pointers
For prefetches of NULL (as when walking a short linked list), PPC64 will in some cases take a performance hit. The hardware needs to do the TLB walk, and said walk will always miss, which means (up to) two L2 misses as penalty. This seems to hurt overall performance, so for NULL pointers skip the prefetch alltogether. Signed-off-by: Olof Johansson <olof@austin.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-ppc64')
-rw-r--r--include/asm-ppc64/processor.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index eb33d33cfd6d..cae65b30adb8 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -642,11 +642,17 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
642 642
643static inline void prefetch(const void *x) 643static inline void prefetch(const void *x)
644{ 644{
645 if (unlikely(!x))
646 return;
647
645 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x)); 648 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
646} 649}
647 650
648static inline void prefetchw(const void *x) 651static inline void prefetchw(const void *x)
649{ 652{
653 if (unlikely(!x))
654 return;
655
650 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x)); 656 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
651} 657}
652 658