From 458f935527372499b714bf4f8e646a68bb0f52e3 Mon Sep 17 00:00:00 2001 From: David Mosberger-Tang Date: Wed, 4 May 2005 13:25:00 -0700 Subject: [IA64] Speed up lfetch.fault [NULL] This patch greatly speeds up the handling of lfetch.fault instructions which result in NaT consumption. Due to the NaT-page mapped at address 0, this is guaranteed to happen when lfetch.fault'ing a NULL pointer. With this patch in place, we can even define prefetch()/prefetchw() as lfetch.fault without significant performance degradation. More importantly, it allows compilers to be more aggressive with using lfetch.fault on pointers that might be NULL. Signed-off-by: David Mosberger-Tang Signed-off-by: Tony Luck --- arch/ia64/kernel/ivt.S | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/ia64/kernel/ivt.S') diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index b28d2212a779..3bb3a13c4047 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -1243,6 +1243,25 @@ END(disabled_fp_reg) // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) ENTRY(nat_consumption) DBG_FAULT(26) + + mov r16=cr.ipsr + mov r17=cr.isr + mov r31=pr // save PR + ;; + and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} + tbit.z p6,p0=r17,IA64_ISR_NA_BIT + ;; + cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18 + dep r16=-1,r16,IA64_PSR_ED_BIT,1 +(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) + ;; + mov cr.ipsr=r16 // set cr.ipsr.na + mov pr=r31,-1 + ;; + rfi + +1: mov pr=r31,-1 + ;; FAULT(26) END(nat_consumption) -- cgit v1.2.2