aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-06-15 17:06:48 -0400
committerTony Luck <tony.luck@intel.com>2005-06-15 17:06:48 -0400
commitf2cbb4f01936a3e4225692e03b084b78c56d386d (patch)
treef89f3d8baa250589a38a4dd2df56f84cddae3c76 /arch/ia64/kernel
parent325a479c4c110db278ef3361460a48c4093252cc (diff)
parent1016888fb69662936b32ab767c7419a3be9a69d3 (diff)
Auto merge with /home/aegl/GIT/linus
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi.c23
-rw-r--r--arch/ia64/kernel/entry.S6
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/mca.c8
-rw-r--r--arch/ia64/kernel/mca_drv.c4
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S18
-rw-r--r--arch/ia64/kernel/minstate.h3
-rw-r--r--arch/ia64/kernel/module.c10
-rw-r--r--arch/ia64/kernel/perfmon.c218
-rw-r--r--arch/ia64/kernel/process.c55
-rw-r--r--arch/ia64/kernel/ptrace.c47
-rw-r--r--arch/ia64/kernel/setup.c3
-rw-r--r--arch/ia64/kernel/signal.c3
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/sys_ia64.c7
-rw-r--r--arch/ia64/kernel/traps.c29
16 files changed, 336 insertions, 104 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index a8e99c56a768..72dfd9e7de0f 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
779 union acpi_object *obj; 779 union acpi_object *obj;
780 struct acpi_table_iosapic *iosapic; 780 struct acpi_table_iosapic *iosapic;
781 unsigned int gsi_base; 781 unsigned int gsi_base;
782 int node; 782 int pxm, node;
783 783
784 /* Only care about objects w/ a method that returns the MADT */ 784 /* Only care about objects w/ a method that returns the MADT */
785 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 785 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
@@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
805 gsi_base = iosapic->global_irq_base; 805 gsi_base = iosapic->global_irq_base;
806 806
807 acpi_os_free(buffer.pointer); 807 acpi_os_free(buffer.pointer);
808 buffer.length = ACPI_ALLOCATE_BUFFER;
809 buffer.pointer = NULL;
810 808
811 /* 809 /*
812 * OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell 810 * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
813 * us which node to associate this with. 811 * us which node to associate this with.
814 */ 812 */
815 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) 813 pxm = acpi_get_pxm(handle);
816 return AE_OK; 814 if (pxm < 0)
817
818 if (!buffer.length || !buffer.pointer)
819 return AE_OK;
820
821 obj = buffer.pointer;
822
823 if (obj->type != ACPI_TYPE_INTEGER ||
824 obj->integer.value >= MAX_PXM_DOMAINS) {
825 acpi_os_free(buffer.pointer);
826 return AE_OK; 815 return AE_OK;
827 }
828 816
829 node = pxm_to_nid_map[obj->integer.value]; 817 node = pxm_to_nid_map[pxm];
830 acpi_os_free(buffer.pointer);
831 818
832 if (node >= MAX_NUMNODES || !node_online(node) || 819 if (node >= MAX_NUMNODES || !node_online(node) ||
833 cpus_empty(node_to_cpumask(node))) 820 cpus_empty(node_to_cpumask(node)))
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 9353adc18956..8ebfcc0c813a 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -777,7 +777,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
777 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit 777 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
778 .mem.offset 8,0 778 .mem.offset 8,0
779 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit 779 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
780END(ia64_ret_from_ia32_execve_syscall) 780END(ia64_ret_from_ia32_execve)
781 // fall through 781 // fall through
782#endif /* CONFIG_IA32_SUPPORT */ 782#endif /* CONFIG_IA32_SUPPORT */
783GLOBAL_ENTRY(ia64_leave_kernel) 783GLOBAL_ENTRY(ia64_leave_kernel)
@@ -1176,7 +1176,7 @@ ENTRY(notify_resume_user)
1176 ;; 1176 ;;
1177(pNonSys) mov out2=0 // out2==0 => not a syscall 1177(pNonSys) mov out2=0 // out2==0 => not a syscall
1178 .fframe 16 1178 .fframe 16
1179 .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!) 1179 .spillsp ar.unat, 16
1180 st8 [sp]=r9,-16 // allocate space for ar.unat and save it 1180 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
1181 st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch 1181 st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
1182 .body 1182 .body
@@ -1202,7 +1202,7 @@ GLOBAL_ENTRY(sys_rt_sigsuspend)
1202 adds out2=8,sp // out2=&sigscratch->ar_pfs 1202 adds out2=8,sp // out2=&sigscratch->ar_pfs
1203 ;; 1203 ;;
1204 .fframe 16 1204 .fframe 16
1205 .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!) 1205 .spillsp ar.unat, 16
1206 st8 [sp]=r9,-16 // allocate space for ar.unat and save it 1206 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
1207 st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch 1207 st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch
1208 .body 1208 .body
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index f566ff43a389..7d7684a369d3 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -460,9 +460,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
460 ;; 460 ;;
461 461
462 st8 [r2]=r14 // update current->blocked with new mask 462 st8 [r2]=r14 // update current->blocked with new mask
463 cmpxchg4.acq r14=[r9],r18,ar.ccv // current->thread_info->flags <- r18 463 cmpxchg4.acq r8=[r9],r18,ar.ccv // current->thread_info->flags <- r18
464 ;; 464 ;;
465 cmp.ne p6,p0=r17,r14 // update failed? 465 cmp.ne p6,p0=r17,r8 // update failed?
466(p6) br.cond.spnt.few 1b // yes -> retry 466(p6) br.cond.spnt.few 1b // yes -> retry
467 467
468#ifdef CONFIG_SMP 468#ifdef CONFIG_SMP
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 4d6c7b8f667b..736e328b5e61 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1103,8 +1103,6 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
1103 return IRQ_HANDLED; 1103 return IRQ_HANDLED;
1104} 1104}
1105 1105
1106#endif /* CONFIG_ACPI */
1107
1108/* 1106/*
1109 * ia64_mca_cpe_poll 1107 * ia64_mca_cpe_poll
1110 * 1108 *
@@ -1122,6 +1120,8 @@ ia64_mca_cpe_poll (unsigned long dummy)
1122 platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); 1120 platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1123} 1121}
1124 1122
1123#endif /* CONFIG_ACPI */
1124
1125/* 1125/*
1126 * C portion of the OS INIT handler 1126 * C portion of the OS INIT handler
1127 * 1127 *
@@ -1390,8 +1390,7 @@ ia64_mca_init(void)
1390 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); 1390 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
1391 1391
1392#ifdef CONFIG_ACPI 1392#ifdef CONFIG_ACPI
1393 /* Setup the CPEI/P vector and handler */ 1393 /* Setup the CPEI/P handler */
1394 cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
1395 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 1394 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
1396#endif 1395#endif
1397 1396
@@ -1436,6 +1435,7 @@ ia64_mca_late_init(void)
1436 1435
1437#ifdef CONFIG_ACPI 1436#ifdef CONFIG_ACPI
1438 /* Setup the CPEI/P vector and handler */ 1437 /* Setup the CPEI/P vector and handler */
1438 cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
1439 init_timer(&cpe_poll_timer); 1439 init_timer(&cpe_poll_timer);
1440 cpe_poll_timer.function = ia64_mca_cpe_poll; 1440 cpe_poll_timer.function = ia64_mca_cpe_poll;
1441 1441
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index ab478172c349..abc0113a821d 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr)
132 spin_unlock(&mca_bh_lock); 132 spin_unlock(&mca_bh_lock);
133 133
134 /* This process is about to be killed itself */ 134 /* This process is about to be killed itself */
135 force_sig(SIGKILL, current); 135 do_exit(SIGKILL);
136 schedule();
137} 136}
138 137
139/** 138/**
@@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
439 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; 438 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
440 psr2->cpl = 0; 439 psr2->cpl = 0;
441 psr2->ri = 0; 440 psr2->ri = 0;
441 psr2->i = 0;
442 442
443 return 1; 443 return 1;
444 } 444 }
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index bcfa05acc561..2d7e0217638d 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -10,6 +10,7 @@
10 10
11#include <asm/asmmacro.h> 11#include <asm/asmmacro.h>
12#include <asm/processor.h> 12#include <asm/processor.h>
13#include <asm/ptrace.h>
13 14
14GLOBAL_ENTRY(mca_handler_bhhook) 15GLOBAL_ENTRY(mca_handler_bhhook)
15 invala // clear RSE ? 16 invala // clear RSE ?
@@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook)
20 ;; 21 ;;
21 alloc r16=ar.pfs,0,2,1,0 // make a new frame 22 alloc r16=ar.pfs,0,2,1,0 // make a new frame
22 ;; 23 ;;
24 mov ar.rsc=0
25 ;;
23 mov r13=IA64_KR(CURRENT) // current task pointer 26 mov r13=IA64_KR(CURRENT) // current task pointer
24 ;; 27 ;;
25 adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13 28 mov r2=r13
29 ;;
30 addl r22=IA64_RBS_OFFSET,r2
31 ;;
32 mov ar.bspstore=r22
26 ;; 33 ;;
27 ld8 r12=[r12] // stack pointer 34 addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
28 ;; 35 ;;
36 adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
37 ;;
38 st1 [r2]=r0 // clear current->thread.on_ustack flag
29 mov loc0=r16 39 mov loc0=r16
30 movl loc1=mca_handler_bh // recovery C function 40 movl loc1=mca_handler_bh // recovery C function
31 ;; 41 ;;
@@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook)
34 ;; 44 ;;
35 mov loc1=rp 45 mov loc1=rp
36 ;; 46 ;;
37 br.call.sptk.many rp=b6 // not return ... 47 ssm psr.i
48 ;;
49 br.call.sptk.many rp=b6 // does not return ...
38 ;; 50 ;;
39 mov ar.pfs=loc0 51 mov ar.pfs=loc0
40 mov rp=loc1 52 mov rp=loc1
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 1dbc7b2497c9..f6d8a010d99b 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -41,7 +41,7 @@
41(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \ 41(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \
42(pKStk) ld8 r3 = [r3];; \ 42(pKStk) ld8 r3 = [r3];; \
43(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \ 43(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \
44(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \ 44(pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \
45(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ 45(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
46(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \ 46(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
47 ;; \ 47 ;; \
@@ -50,7 +50,6 @@
50(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ 50(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
51(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \ 51(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
52 ;; \ 52 ;; \
53(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
54(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ 53(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
55 ;; \ 54 ;; \
56(pUStk) mov r18=ar.bsp; \ 55(pUStk) mov r18=ar.bsp; \
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index febc091c2f02..f1aca7cffd12 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -825,14 +825,16 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
825 * XXX Should have an arch-hook for running this after final section 825 * XXX Should have an arch-hook for running this after final section
826 * addresses have been selected... 826 * addresses have been selected...
827 */ 827 */
828 /* See if gp can cover the entire core module: */ 828 uint64_t gp;
829 uint64_t gp = (uint64_t) mod->module_core + MAX_LTOFF / 2; 829 if (mod->core_size > MAX_LTOFF)
830 if (mod->core_size >= MAX_LTOFF)
831 /* 830 /*
832 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated 831 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
833 * at the end of the module. 832 * at the end of the module.
834 */ 833 */
835 gp = (uint64_t) mod->module_core + mod->core_size - MAX_LTOFF / 2; 834 gp = mod->core_size - MAX_LTOFF / 2;
835 else
836 gp = mod->core_size / 2;
837 gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
836 mod->arch.gp = gp; 838 mod->arch.gp = gp;
837 DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); 839 DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp);
838 } 840 }
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 376fcbc3f8da..6407bff6bfd7 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -11,7 +11,7 @@
11 * Version Perfmon-2.x is a rewrite of perfmon-1.x 11 * Version Perfmon-2.x is a rewrite of perfmon-1.x
12 * by Stephane Eranian, Hewlett Packard Co. 12 * by Stephane Eranian, Hewlett Packard Co.
13 * 13 *
14 * Copyright (C) 1999-2003, 2005 Hewlett Packard Co 14 * Copyright (C) 1999-2005 Hewlett Packard Co
15 * Stephane Eranian <eranian@hpl.hp.com> 15 * Stephane Eranian <eranian@hpl.hp.com>
16 * David Mosberger-Tang <davidm@hpl.hp.com> 16 * David Mosberger-Tang <davidm@hpl.hp.com>
17 * 17 *
@@ -497,6 +497,9 @@ typedef struct {
497static pfm_stats_t pfm_stats[NR_CPUS]; 497static pfm_stats_t pfm_stats[NR_CPUS];
498static pfm_session_t pfm_sessions; /* global sessions information */ 498static pfm_session_t pfm_sessions; /* global sessions information */
499 499
500static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED;
501static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
502
500static struct proc_dir_entry *perfmon_dir; 503static struct proc_dir_entry *perfmon_dir;
501static pfm_uuid_t pfm_null_uuid = {0,}; 504static pfm_uuid_t pfm_null_uuid = {0,};
502 505
@@ -606,6 +609,7 @@ DEFINE_PER_CPU(unsigned long, pfm_syst_info);
606DEFINE_PER_CPU(struct task_struct *, pmu_owner); 609DEFINE_PER_CPU(struct task_struct *, pmu_owner);
607DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); 610DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
608DEFINE_PER_CPU(unsigned long, pmu_activation_number); 611DEFINE_PER_CPU(unsigned long, pmu_activation_number);
612EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
609 613
610 614
611/* forward declaration */ 615/* forward declaration */
@@ -1265,6 +1269,8 @@ out:
1265} 1269}
1266EXPORT_SYMBOL(pfm_unregister_buffer_fmt); 1270EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1267 1271
1272extern void update_pal_halt_status(int);
1273
1268static int 1274static int
1269pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) 1275pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1270{ 1276{
@@ -1311,6 +1317,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1311 is_syswide, 1317 is_syswide,
1312 cpu)); 1318 cpu));
1313 1319
1320 /*
1321 * disable default_idle() to go to PAL_HALT
1322 */
1323 update_pal_halt_status(0);
1324
1314 UNLOCK_PFS(flags); 1325 UNLOCK_PFS(flags);
1315 1326
1316 return 0; 1327 return 0;
@@ -1318,7 +1329,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1318error_conflict: 1329error_conflict:
1319 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", 1330 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1320 pfm_sessions.pfs_sys_session[cpu]->pid, 1331 pfm_sessions.pfs_sys_session[cpu]->pid,
1321 smp_processor_id())); 1332 cpu));
1322abort: 1333abort:
1323 UNLOCK_PFS(flags); 1334 UNLOCK_PFS(flags);
1324 1335
@@ -1366,6 +1377,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1366 is_syswide, 1377 is_syswide,
1367 cpu)); 1378 cpu));
1368 1379
1380 /*
1381 * if possible, enable default_idle() to go into PAL_HALT
1382 */
1383 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1384 update_pal_halt_status(1);
1385
1369 UNLOCK_PFS(flags); 1386 UNLOCK_PFS(flags);
1370 1387
1371 return 0; 1388 return 0;
@@ -4202,7 +4219,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4202 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", 4219 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4203 req->load_pid, 4220 req->load_pid,
4204 ctx->ctx_state)); 4221 ctx->ctx_state));
4205 return -EINVAL; 4222 return -EBUSY;
4206 } 4223 }
4207 4224
4208 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); 4225 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
@@ -4704,16 +4721,26 @@ recheck:
4704 if (task == current || ctx->ctx_fl_system) return 0; 4721 if (task == current || ctx->ctx_fl_system) return 0;
4705 4722
4706 /* 4723 /*
4707 * if context is UNLOADED we are safe to go 4724 * we are monitoring another thread
4708 */ 4725 */
4709 if (state == PFM_CTX_UNLOADED) return 0; 4726 switch(state) {
4710 4727 case PFM_CTX_UNLOADED:
4711 /* 4728 /*
4712 * no command can operate on a zombie context 4729 * if context is UNLOADED we are safe to go
4713 */ 4730 */
4714 if (state == PFM_CTX_ZOMBIE) { 4731 return 0;
4715 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); 4732 case PFM_CTX_ZOMBIE:
4716 return -EINVAL; 4733 /*
4734 * no command can operate on a zombie context
4735 */
4736 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4737 return -EINVAL;
4738 case PFM_CTX_MASKED:
4739 /*
4740 * PMU state has been saved to software even though
4741 * the thread may still be running.
4742 */
4743 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4717 } 4744 }
4718 4745
4719 /* 4746 /*
@@ -5532,26 +5559,32 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
5532 int ret; 5559 int ret;
5533 5560
5534 this_cpu = get_cpu(); 5561 this_cpu = get_cpu();
5535 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; 5562 if (likely(!pfm_alt_intr_handler)) {
5536 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; 5563 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5564 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5537 5565
5538 start_cycles = ia64_get_itc(); 5566 start_cycles = ia64_get_itc();
5539 5567
5540 ret = pfm_do_interrupt_handler(irq, arg, regs); 5568 ret = pfm_do_interrupt_handler(irq, arg, regs);
5541 5569
5542 total_cycles = ia64_get_itc(); 5570 total_cycles = ia64_get_itc();
5543 5571
5544 /* 5572 /*
5545 * don't measure spurious interrupts 5573 * don't measure spurious interrupts
5546 */ 5574 */
5547 if (likely(ret == 0)) { 5575 if (likely(ret == 0)) {
5548 total_cycles -= start_cycles; 5576 total_cycles -= start_cycles;
5549 5577
5550 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; 5578 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5551 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; 5579 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5552 5580
5553 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; 5581 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5582 }
5554 } 5583 }
5584 else {
5585 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5586 }
5587
5555 put_cpu_no_resched(); 5588 put_cpu_no_resched();
5556 return IRQ_HANDLED; 5589 return IRQ_HANDLED;
5557} 5590}
@@ -6402,6 +6435,141 @@ static struct irqaction perfmon_irqaction = {
6402 .name = "perfmon" 6435 .name = "perfmon"
6403}; 6436};
6404 6437
6438static void
6439pfm_alt_save_pmu_state(void *data)
6440{
6441 struct pt_regs *regs;
6442
6443 regs = ia64_task_regs(current);
6444
6445 DPRINT(("called\n"));
6446
6447 /*
6448 * should not be necessary but
6449 * let's take not risk
6450 */
6451 pfm_clear_psr_up();
6452 pfm_clear_psr_pp();
6453 ia64_psr(regs)->pp = 0;
6454
6455 /*
6456 * This call is required
6457 * May cause a spurious interrupt on some processors
6458 */
6459 pfm_freeze_pmu();
6460
6461 ia64_srlz_d();
6462}
6463
6464void
6465pfm_alt_restore_pmu_state(void *data)
6466{
6467 struct pt_regs *regs;
6468
6469 regs = ia64_task_regs(current);
6470
6471 DPRINT(("called\n"));
6472
6473 /*
6474 * put PMU back in state expected
6475 * by perfmon
6476 */
6477 pfm_clear_psr_up();
6478 pfm_clear_psr_pp();
6479 ia64_psr(regs)->pp = 0;
6480
6481 /*
6482 * perfmon runs with PMU unfrozen at all times
6483 */
6484 pfm_unfreeze_pmu();
6485
6486 ia64_srlz_d();
6487}
6488
6489int
6490pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6491{
6492 int ret, i;
6493 int reserve_cpu;
6494
6495 /* some sanity checks */
6496 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6497
6498 /* do the easy test first */
6499 if (pfm_alt_intr_handler) return -EBUSY;
6500
6501 /* one at a time in the install or remove, just fail the others */
6502 if (!spin_trylock(&pfm_alt_install_check)) {
6503 return -EBUSY;
6504 }
6505
6506 /* reserve our session */
6507 for_each_online_cpu(reserve_cpu) {
6508 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6509 if (ret) goto cleanup_reserve;
6510 }
6511
6512 /* save the current system wide pmu states */
6513 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
6514 if (ret) {
6515 DPRINT(("on_each_cpu() failed: %d\n", ret));
6516 goto cleanup_reserve;
6517 }
6518
6519 /* officially change to the alternate interrupt handler */
6520 pfm_alt_intr_handler = hdl;
6521
6522 spin_unlock(&pfm_alt_install_check);
6523
6524 return 0;
6525
6526cleanup_reserve:
6527 for_each_online_cpu(i) {
6528 /* don't unreserve more than we reserved */
6529 if (i >= reserve_cpu) break;
6530
6531 pfm_unreserve_session(NULL, 1, i);
6532 }
6533
6534 spin_unlock(&pfm_alt_install_check);
6535
6536 return ret;
6537}
6538EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6539
6540int
6541pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6542{
6543 int i;
6544 int ret;
6545
6546 if (hdl == NULL) return -EINVAL;
6547
6548 /* cannot remove someone else's handler! */
6549 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6550
6551 /* one at a time in the install or remove, just fail the others */
6552 if (!spin_trylock(&pfm_alt_install_check)) {
6553 return -EBUSY;
6554 }
6555
6556 pfm_alt_intr_handler = NULL;
6557
6558 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
6559 if (ret) {
6560 DPRINT(("on_each_cpu() failed: %d\n", ret));
6561 }
6562
6563 for_each_online_cpu(i) {
6564 pfm_unreserve_session(NULL, 1, i);
6565 }
6566
6567 spin_unlock(&pfm_alt_install_check);
6568
6569 return 0;
6570}
6571EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6572
6405/* 6573/*
6406 * perfmon initialization routine, called from the initcall() table 6574 * perfmon initialization routine, called from the initcall() table
6407 */ 6575 */
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 7c43aea5f7f7..ebb71f3d6d19 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -50,7 +50,7 @@
50#include "sigframe.h" 50#include "sigframe.h"
51 51
52void (*ia64_mark_idle)(int); 52void (*ia64_mark_idle)(int);
53static cpumask_t cpu_idle_map; 53static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
54 54
55unsigned long boot_option_idle_override = 0; 55unsigned long boot_option_idle_override = 0;
56EXPORT_SYMBOL(boot_option_idle_override); 56EXPORT_SYMBOL(boot_option_idle_override);
@@ -173,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
173 ia64_do_signal(oldset, scr, in_syscall); 173 ia64_do_signal(oldset, scr, in_syscall);
174} 174}
175 175
176static int pal_halt = 1; 176static int pal_halt = 1;
177static int can_do_pal_halt = 1;
178
177static int __init nohalt_setup(char * str) 179static int __init nohalt_setup(char * str)
178{ 180{
179 pal_halt = 0; 181 pal_halt = 0;
@@ -181,16 +183,20 @@ static int __init nohalt_setup(char * str)
181} 183}
182__setup("nohalt", nohalt_setup); 184__setup("nohalt", nohalt_setup);
183 185
186void
187update_pal_halt_status(int status)
188{
189 can_do_pal_halt = pal_halt && status;
190}
191
184/* 192/*
185 * We use this if we don't have any better idle routine.. 193 * We use this if we don't have any better idle routine..
186 */ 194 */
187void 195void
188default_idle (void) 196default_idle (void)
189{ 197{
190 unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
191
192 while (!need_resched()) 198 while (!need_resched())
193 if (pal_halt && !pmu_active) 199 if (can_do_pal_halt)
194 safe_halt(); 200 safe_halt();
195 else 201 else
196 cpu_relax(); 202 cpu_relax();
@@ -223,20 +229,31 @@ static inline void play_dead(void)
223} 229}
224#endif /* CONFIG_HOTPLUG_CPU */ 230#endif /* CONFIG_HOTPLUG_CPU */
225 231
226
227void cpu_idle_wait(void) 232void cpu_idle_wait(void)
228{ 233{
229 int cpu; 234 unsigned int cpu, this_cpu = get_cpu();
230 cpumask_t map; 235 cpumask_t map;
231 236
232 for_each_online_cpu(cpu) 237 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
233 cpu_set(cpu, cpu_idle_map); 238 put_cpu();
234 239
235 wmb(); 240 cpus_clear(map);
236 do { 241 for_each_online_cpu(cpu) {
237 ssleep(1); 242 per_cpu(cpu_idle_state, cpu) = 1;
238 cpus_and(map, cpu_idle_map, cpu_online_map); 243 cpu_set(cpu, map);
239 } while (!cpus_empty(map)); 244 }
245
246 __get_cpu_var(cpu_idle_state) = 0;
247
248 wmb();
249 do {
250 ssleep(1);
251 for_each_online_cpu(cpu) {
252 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
253 cpu_clear(cpu, map);
254 }
255 cpus_and(map, map, cpu_online_map);
256 } while (!cpus_empty(map));
240} 257}
241EXPORT_SYMBOL_GPL(cpu_idle_wait); 258EXPORT_SYMBOL_GPL(cpu_idle_wait);
242 259
@@ -244,7 +261,6 @@ void __attribute__((noreturn))
244cpu_idle (void) 261cpu_idle (void)
245{ 262{
246 void (*mark_idle)(int) = ia64_mark_idle; 263 void (*mark_idle)(int) = ia64_mark_idle;
247 int cpu = smp_processor_id();
248 264
249 /* endless idle loop with no priority at all */ 265 /* endless idle loop with no priority at all */
250 while (1) { 266 while (1) {
@@ -255,12 +271,13 @@ cpu_idle (void)
255 while (!need_resched()) { 271 while (!need_resched()) {
256 void (*idle)(void); 272 void (*idle)(void);
257 273
274 if (__get_cpu_var(cpu_idle_state))
275 __get_cpu_var(cpu_idle_state) = 0;
276
277 rmb();
258 if (mark_idle) 278 if (mark_idle)
259 (*mark_idle)(1); 279 (*mark_idle)(1);
260 280
261 if (cpu_isset(cpu, cpu_idle_map))
262 cpu_clear(cpu, cpu_idle_map);
263 rmb();
264 idle = pm_idle; 281 idle = pm_idle;
265 if (!idle) 282 if (!idle)
266 idle = default_idle; 283 idle = default_idle;
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 9e730c7bf0cd..4c1d2f5442b3 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -635,11 +635,17 @@ ia64_flush_fph (struct task_struct *task)
635{ 635{
636 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); 636 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
637 637
638 /*
639 * Prevent migrating this task while
640 * we're fiddling with the FPU state
641 */
642 preempt_disable();
638 if (ia64_is_local_fpu_owner(task) && psr->mfh) { 643 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
639 psr->mfh = 0; 644 psr->mfh = 0;
640 task->thread.flags |= IA64_THREAD_FPH_VALID; 645 task->thread.flags |= IA64_THREAD_FPH_VALID;
641 ia64_save_fpu(&task->thread.fph[0]); 646 ia64_save_fpu(&task->thread.fph[0]);
642 } 647 }
648 preempt_enable();
643} 649}
644 650
645/* 651/*
@@ -692,16 +698,30 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
692 unsigned long cfm) 698 unsigned long cfm)
693{ 699{
694 struct unw_frame_info info, prev_info; 700 struct unw_frame_info info, prev_info;
695 unsigned long ip, pr; 701 unsigned long ip, sp, pr;
696 702
697 unw_init_from_blocked_task(&info, child); 703 unw_init_from_blocked_task(&info, child);
698 while (1) { 704 while (1) {
699 prev_info = info; 705 prev_info = info;
700 if (unw_unwind(&info) < 0) 706 if (unw_unwind(&info) < 0)
701 return; 707 return;
702 if (unw_get_rp(&info, &ip) < 0) 708
709 unw_get_sp(&info, &sp);
710 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
711 < IA64_PT_REGS_SIZE) {
712 dprintk("ptrace.%s: ran off the top of the kernel "
713 "stack\n", __FUNCTION__);
714 return;
715 }
716 if (unw_get_pr (&prev_info, &pr) < 0) {
717 unw_get_rp(&prev_info, &ip);
718 dprintk("ptrace.%s: failed to read "
719 "predicate register (ip=0x%lx)\n",
720 __FUNCTION__, ip);
703 return; 721 return;
704 if (ip < FIXADDR_USER_END) 722 }
723 if (unw_is_intr_frame(&info)
724 && (pr & (1UL << PRED_USER_STACK)))
705 break; 725 break;
706 } 726 }
707 727
@@ -1616,20 +1636,25 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1616 long arg4, long arg5, long arg6, long arg7, 1636 long arg4, long arg5, long arg6, long arg7,
1617 struct pt_regs regs) 1637 struct pt_regs regs)
1618{ 1638{
1619 long syscall; 1639 if (test_thread_flag(TIF_SYSCALL_TRACE)
1640 && (current->ptrace & PT_PTRACED))
1641 syscall_trace();
1620 1642
1621 if (unlikely(current->audit_context)) { 1643 if (unlikely(current->audit_context)) {
1622 if (IS_IA32_PROCESS(&regs)) 1644 long syscall;
1645 int arch;
1646
1647 if (IS_IA32_PROCESS(&regs)) {
1623 syscall = regs.r1; 1648 syscall = regs.r1;
1624 else 1649 arch = AUDIT_ARCH_I386;
1650 } else {
1625 syscall = regs.r15; 1651 syscall = regs.r15;
1652 arch = AUDIT_ARCH_IA64;
1653 }
1626 1654
1627 audit_syscall_entry(current, syscall, arg0, arg1, arg2, arg3); 1655 audit_syscall_entry(current, arch, syscall, arg0, arg1, arg2, arg3);
1628 } 1656 }
1629 1657
1630 if (test_thread_flag(TIF_SYSCALL_TRACE)
1631 && (current->ptrace & PT_PTRACED))
1632 syscall_trace();
1633} 1658}
1634 1659
1635/* "asmlinkage" so the input arguments are preserved... */ 1660/* "asmlinkage" so the input arguments are preserved... */
@@ -1640,7 +1665,7 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1640 struct pt_regs regs) 1665 struct pt_regs regs)
1641{ 1666{
1642 if (unlikely(current->audit_context)) 1667 if (unlikely(current->audit_context))
1643 audit_syscall_exit(current, regs.r8); 1668 audit_syscall_exit(current, AUDITSC_RESULT(regs.r10), regs.r8);
1644 1669
1645 if (test_thread_flag(TIF_SYSCALL_TRACE) 1670 if (test_thread_flag(TIF_SYSCALL_TRACE)
1646 && (current->ptrace & PT_PTRACED)) 1671 && (current->ptrace & PT_PTRACED))
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index b7e6b4cb374b..d14692e0920a 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -720,7 +720,8 @@ cpu_init (void)
720 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 720 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
721 721
722 /* 722 /*
723 * Initialize default control register to defer all speculative faults. The 723 * Initialize default control register to defer speculative faults except
724 * for those arising from TLB misses, which are not deferred. The
724 * kernel MUST NOT depend on a particular setting of these bits (in other words, 725 * kernel MUST NOT depend on a particular setting of these bits (in other words,
725 * the kernel must have recovery code for all speculative accesses). Turn on 726 * the kernel must have recovery code for all speculative accesses). Turn on
726 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 727 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 6891d86937d9..499b7e5317cf 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -224,7 +224,8 @@ ia64_rt_sigreturn (struct sigscratch *scr)
224 * could be corrupted. 224 * could be corrupted.
225 */ 225 */
226 retval = (long) &ia64_leave_kernel; 226 retval = (long) &ia64_leave_kernel;
227 if (test_thread_flag(TIF_SYSCALL_TRACE)) 227 if (test_thread_flag(TIF_SYSCALL_TRACE)
228 || test_thread_flag(TIF_SYSCALL_AUDIT))
228 /* 229 /*
229 * strace expects to be notified after sigreturn returns even though the 230 * strace expects to be notified after sigreturn returns even though the
230 * context to which we return may not be in the middle of a syscall. 231 * context to which we return may not be in the middle of a syscall.
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 0d5ee57c9865..3865f088ffa2 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -624,7 +624,7 @@ static struct {
624 __u16 thread_id; 624 __u16 thread_id;
625 __u16 proc_fixed_addr; 625 __u16 proc_fixed_addr;
626 __u8 valid; 626 __u8 valid;
627}mt_info[NR_CPUS] __devinit; 627} mt_info[NR_CPUS] __devinitdata;
628 628
629#ifdef CONFIG_HOTPLUG_CPU 629#ifdef CONFIG_HOTPLUG_CPU
630static inline void 630static inline void
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index a8cf6d8a509c..770fab37928e 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -182,13 +182,6 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
182 } 182 }
183 } 183 }
184 184
185 /*
186 * A zero mmap always succeeds in Linux, independent of whether or not the
187 * remaining arguments are valid.
188 */
189 if (len == 0)
190 goto out;
191
192 /* Careful about overflows.. */ 185 /* Careful about overflows.. */
193 len = PAGE_ALIGN(len); 186 len = PAGE_ALIGN(len);
194 if (!len || len > TASK_SIZE) { 187 if (!len || len > TASK_SIZE) {
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index e82ad78081b3..1861173bd4f6 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -111,6 +111,24 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
111 siginfo_t siginfo; 111 siginfo_t siginfo;
112 int sig, code; 112 int sig, code;
113 113
114 /* break.b always sets cr.iim to 0, which causes problems for
115 * debuggers. Get the real break number from the original instruction,
116 * but only for kernel code. User space break.b is left alone, to
117 * preserve the existing behaviour. All break codings have the same
118 * format, so there is no need to check the slot type.
119 */
120 if (break_num == 0 && !user_mode(regs)) {
121 struct ia64_psr *ipsr = ia64_psr(regs);
122 unsigned long *bundle = (unsigned long *)regs->cr_iip;
123 unsigned long slot;
124 switch (ipsr->ri) {
125 case 0: slot = (bundle[0] >> 5); break;
126 case 1: slot = (bundle[0] >> 46) | (bundle[1] << 18); break;
127 default: slot = (bundle[1] >> 23); break;
128 }
129 break_num = ((slot >> 36 & 1) << 20) | (slot >> 6 & 0xfffff);
130 }
131
114 /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ 132 /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
115 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); 133 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
116 siginfo.si_imm = break_num; 134 siginfo.si_imm = break_num;
@@ -202,13 +220,21 @@ disabled_fph_fault (struct pt_regs *regs)
202 220
203 /* first, grant user-level access to fph partition: */ 221 /* first, grant user-level access to fph partition: */
204 psr->dfh = 0; 222 psr->dfh = 0;
223
224 /*
225 * Make sure that no other task gets in on this processor
226 * while we're claiming the FPU
227 */
228 preempt_disable();
205#ifndef CONFIG_SMP 229#ifndef CONFIG_SMP
206 { 230 {
207 struct task_struct *fpu_owner 231 struct task_struct *fpu_owner
208 = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER); 232 = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
209 233
210 if (ia64_is_local_fpu_owner(current)) 234 if (ia64_is_local_fpu_owner(current)) {
235 preempt_enable_no_resched();
211 return; 236 return;
237 }
212 238
213 if (fpu_owner) 239 if (fpu_owner)
214 ia64_flush_fph(fpu_owner); 240 ia64_flush_fph(fpu_owner);
@@ -226,6 +252,7 @@ disabled_fph_fault (struct pt_regs *regs)
226 */ 252 */
227 psr->mfh = 1; 253 psr->mfh = 1;
228 } 254 }
255 preempt_enable_no_resched();
229} 256}
230 257
231static inline int 258static inline int