aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi.c23
-rw-r--r--arch/ia64/kernel/entry.S18
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/head.S284
-rw-r--r--arch/ia64/kernel/iosapic.c358
-rw-r--r--arch/ia64/kernel/irq_ia64.c16
-rw-r--r--arch/ia64/kernel/mca_asm.S88
-rw-r--r--arch/ia64/kernel/mca_drv.c4
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S18
-rw-r--r--arch/ia64/kernel/perfmon.c102
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c13
-rw-r--r--arch/ia64/kernel/process.c77
-rw-r--r--arch/ia64/kernel/ptrace.c26
-rw-r--r--arch/ia64/kernel/setup.c69
-rw-r--r--arch/ia64/kernel/signal.c3
-rw-r--r--arch/ia64/kernel/smpboot.c294
-rw-r--r--arch/ia64/kernel/sys_ia64.c14
-rw-r--r--arch/ia64/kernel/unwind.c27
18 files changed, 1096 insertions, 342 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index a8e99c56a768..72dfd9e7de0f 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
779 union acpi_object *obj; 779 union acpi_object *obj;
780 struct acpi_table_iosapic *iosapic; 780 struct acpi_table_iosapic *iosapic;
781 unsigned int gsi_base; 781 unsigned int gsi_base;
782 int node; 782 int pxm, node;
783 783
784 /* Only care about objects w/ a method that returns the MADT */ 784 /* Only care about objects w/ a method that returns the MADT */
785 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 785 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
@@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
805 gsi_base = iosapic->global_irq_base; 805 gsi_base = iosapic->global_irq_base;
806 806
807 acpi_os_free(buffer.pointer); 807 acpi_os_free(buffer.pointer);
808 buffer.length = ACPI_ALLOCATE_BUFFER;
809 buffer.pointer = NULL;
810 808
811 /* 809 /*
812 * OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell 810 * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
813 * us which node to associate this with. 811 * us which node to associate this with.
814 */ 812 */
815 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) 813 pxm = acpi_get_pxm(handle);
816 return AE_OK; 814 if (pxm < 0)
817
818 if (!buffer.length || !buffer.pointer)
819 return AE_OK;
820
821 obj = buffer.pointer;
822
823 if (obj->type != ACPI_TYPE_INTEGER ||
824 obj->integer.value >= MAX_PXM_DOMAINS) {
825 acpi_os_free(buffer.pointer);
826 return AE_OK; 815 return AE_OK;
827 }
828 816
829 node = pxm_to_nid_map[obj->integer.value]; 817 node = pxm_to_nid_map[pxm];
830 acpi_os_free(buffer.pointer);
831 818
832 if (node >= MAX_NUMNODES || !node_online(node) || 819 if (node >= MAX_NUMNODES || !node_online(node) ||
833 cpus_empty(node_to_cpumask(node))) 820 cpus_empty(node_to_cpumask(node)))
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 0272c010a3ba..81c45d447394 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -728,12 +728,8 @@ ENTRY(ia64_leave_syscall)
728 mov f8=f0 // clear f8 728 mov f8=f0 // clear f8
729 ;; 729 ;;
730 ld8 r30=[r2],16 // M0|1 load cr.ifs 730 ld8 r30=[r2],16 // M0|1 load cr.ifs
731 mov.m ar.ssd=r0 // M2 clear ar.ssd
732 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
733 ;;
734 ld8 r25=[r3],16 // M0|1 load ar.unat 731 ld8 r25=[r3],16 // M0|1 load ar.unat
735 mov.m ar.csd=r0 // M2 clear ar.csd 732 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
736 mov r22=r0 // clear r22
737 ;; 733 ;;
738 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs 734 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
739(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 735(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
@@ -756,11 +752,15 @@ ENTRY(ia64_leave_syscall)
756 mov f7=f0 // clear f7 752 mov f7=f0 // clear f7
757 ;; 753 ;;
758 ld8.fill r12=[r2] // restore r12 (sp) 754 ld8.fill r12=[r2] // restore r12 (sp)
755 mov.m ar.ssd=r0 // M2 clear ar.ssd
756 mov r22=r0 // clear r22
757
759 ld8.fill r15=[r3] // restore r15 758 ld8.fill r15=[r3] // restore r15
759(pUStk) st1 [r14]=r17
760 addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0 760 addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
761 ;; 761 ;;
762(pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8 762(pUStk) ld4 r17=[r3] // r17 = cpu_data->phys_stacked_size_p8
763(pUStk) st1 [r14]=r17 763 mov.m ar.csd=r0 // M2 clear ar.csd
764 mov b6=r18 // I0 restore b6 764 mov b6=r18 // I0 restore b6
765 ;; 765 ;;
766 mov r14=r0 // clear r14 766 mov r14=r0 // clear r14
@@ -782,7 +782,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
782 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit 782 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
783 .mem.offset 8,0 783 .mem.offset 8,0
784 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit 784 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
785END(ia64_ret_from_ia32_execve_syscall) 785END(ia64_ret_from_ia32_execve)
786 // fall through 786 // fall through
787#endif /* CONFIG_IA32_SUPPORT */ 787#endif /* CONFIG_IA32_SUPPORT */
788GLOBAL_ENTRY(ia64_leave_kernel) 788GLOBAL_ENTRY(ia64_leave_kernel)
@@ -1417,7 +1417,7 @@ sys_call_table:
1417 data8 sys_msgrcv 1417 data8 sys_msgrcv
1418 data8 sys_msgctl 1418 data8 sys_msgctl
1419 data8 sys_shmget 1419 data8 sys_shmget
1420 data8 ia64_shmat 1420 data8 sys_shmat
1421 data8 sys_shmdt // 1115 1421 data8 sys_shmdt // 1115
1422 data8 sys_shmctl 1422 data8 sys_shmctl
1423 data8 sys_syslog 1423 data8 sys_syslog
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 0d8650f7fce7..4f3cdef75797 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -611,8 +611,10 @@ GLOBAL_ENTRY(fsys_bubble_down)
611 movl r2=ia64_ret_from_syscall 611 movl r2=ia64_ret_from_syscall
612 ;; 612 ;;
613 mov rp=r2 // set the real return addr 613 mov rp=r2 // set the real return addr
614 tbit.z p8,p0=r3,TIF_SYSCALL_TRACE 614 and r3=_TIF_SYSCALL_TRACEAUDIT,r3
615 ;; 615 ;;
616 cmp.eq p8,p0=r3,r0
617
616(p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8 618(p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8
617(p8) br.call.sptk.many b6=b6 // ignore this return addr 619(p8) br.call.sptk.many b6=b6 // ignore this return addr
618 br.cond.sptk ia64_trace_syscall 620 br.cond.sptk ia64_trace_syscall
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 105c7fec8c6d..8d3a9291b47f 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -15,6 +15,8 @@
15 * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com> 15 * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
16 * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com> 16 * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com>
17 * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2. 17 * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
18 * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
19 * Support for CPU Hotplug
18 */ 20 */
19 21
20#include <linux/config.h> 22#include <linux/config.h>
@@ -29,6 +31,139 @@
29#include <asm/processor.h> 31#include <asm/processor.h>
30#include <asm/ptrace.h> 32#include <asm/ptrace.h>
31#include <asm/system.h> 33#include <asm/system.h>
34#include <asm/mca_asm.h>
35
36#ifdef CONFIG_HOTPLUG_CPU
37#define SAL_PSR_BITS_TO_SET \
38 (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL)
39
40#define SAVE_FROM_REG(src, ptr, dest) \
41 mov dest=src;; \
42 st8 [ptr]=dest,0x08
43
44#define RESTORE_REG(reg, ptr, _tmp) \
45 ld8 _tmp=[ptr],0x08;; \
46 mov reg=_tmp
47
48#define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\
49 mov ar.lc=IA64_NUM_DBG_REGS-1;; \
50 mov _idx=0;; \
511: \
52 SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \
53 add _idx=1,_idx;; \
54 br.cloop.sptk.many 1b
55
56#define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\
57 mov ar.lc=IA64_NUM_DBG_REGS-1;; \
58 mov _idx=0;; \
59_lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \
60 add _idx=1, _idx;; \
61 br.cloop.sptk.many _lbl
62
63#define SAVE_ONE_RR(num, _reg, _tmp) \
64 movl _tmp=(num<<61);; \
65 mov _reg=rr[_tmp]
66
67#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
68 SAVE_ONE_RR(0,_r0, _tmp);; \
69 SAVE_ONE_RR(1,_r1, _tmp);; \
70 SAVE_ONE_RR(2,_r2, _tmp);; \
71 SAVE_ONE_RR(3,_r3, _tmp);; \
72 SAVE_ONE_RR(4,_r4, _tmp);; \
73 SAVE_ONE_RR(5,_r5, _tmp);; \
74 SAVE_ONE_RR(6,_r6, _tmp);; \
75 SAVE_ONE_RR(7,_r7, _tmp);;
76
77#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
78 st8 [ptr]=_r0, 8;; \
79 st8 [ptr]=_r1, 8;; \
80 st8 [ptr]=_r2, 8;; \
81 st8 [ptr]=_r3, 8;; \
82 st8 [ptr]=_r4, 8;; \
83 st8 [ptr]=_r5, 8;; \
84 st8 [ptr]=_r6, 8;; \
85 st8 [ptr]=_r7, 8;;
86
87#define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \
88 mov ar.lc=0x08-1;; \
89 movl _idx1=0x00;; \
90RestRR: \
91 dep.z _idx2=_idx1,61,3;; \
92 ld8 _tmp=[ptr],8;; \
93 mov rr[_idx2]=_tmp;; \
94 srlz.d;; \
95 add _idx1=1,_idx1;; \
96 br.cloop.sptk.few RestRR
97
98#define SET_AREA_FOR_BOOTING_CPU(reg1, reg2) \
99 movl reg1=sal_state_for_booting_cpu;; \
100 ld8 reg2=[reg1];;
101
102/*
103 * Adjust region registers saved before starting to save
104 * break regs and rest of the states that need to be preserved.
105 */
106#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred) \
107 SAVE_FROM_REG(b0,_reg1,_reg2);; \
108 SAVE_FROM_REG(b1,_reg1,_reg2);; \
109 SAVE_FROM_REG(b2,_reg1,_reg2);; \
110 SAVE_FROM_REG(b3,_reg1,_reg2);; \
111 SAVE_FROM_REG(b4,_reg1,_reg2);; \
112 SAVE_FROM_REG(b5,_reg1,_reg2);; \
113 st8 [_reg1]=r1,0x08;; \
114 st8 [_reg1]=r12,0x08;; \
115 st8 [_reg1]=r13,0x08;; \
116 SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \
117 SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \
118 SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \
119 SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \
120 SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \
121 SAVE_FROM_REG(cr.dcr,_reg1,_reg2);; \
122 SAVE_FROM_REG(cr.iva,_reg1,_reg2);; \
123 SAVE_FROM_REG(cr.pta,_reg1,_reg2);; \
124 SAVE_FROM_REG(cr.itv,_reg1,_reg2);; \
125 SAVE_FROM_REG(cr.pmv,_reg1,_reg2);; \
126 SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \
127 SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \
128 SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);; \
129 st8 [_reg1]=r4,0x08;; \
130 st8 [_reg1]=r5,0x08;; \
131 st8 [_reg1]=r6,0x08;; \
132 st8 [_reg1]=r7,0x08;; \
133 st8 [_reg1]=_pred,0x08;; \
134 SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \
135 stf.spill.nta [_reg1]=f2,16;; \
136 stf.spill.nta [_reg1]=f3,16;; \
137 stf.spill.nta [_reg1]=f4,16;; \
138 stf.spill.nta [_reg1]=f5,16;; \
139 stf.spill.nta [_reg1]=f16,16;; \
140 stf.spill.nta [_reg1]=f17,16;; \
141 stf.spill.nta [_reg1]=f18,16;; \
142 stf.spill.nta [_reg1]=f19,16;; \
143 stf.spill.nta [_reg1]=f20,16;; \
144 stf.spill.nta [_reg1]=f21,16;; \
145 stf.spill.nta [_reg1]=f22,16;; \
146 stf.spill.nta [_reg1]=f23,16;; \
147 stf.spill.nta [_reg1]=f24,16;; \
148 stf.spill.nta [_reg1]=f25,16;; \
149 stf.spill.nta [_reg1]=f26,16;; \
150 stf.spill.nta [_reg1]=f27,16;; \
151 stf.spill.nta [_reg1]=f28,16;; \
152 stf.spill.nta [_reg1]=f29,16;; \
153 stf.spill.nta [_reg1]=f30,16;; \
154 stf.spill.nta [_reg1]=f31,16;;
155
156#else
157#define SET_AREA_FOR_BOOTING_CPU(a1, a2)
158#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2, a3)
159#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
160#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
161#endif
162
163#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
164 movl _tmp1=(num << 61);; \
165 mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \
166 mov rr[_tmp1]=_tmp2
32 167
33 .section __special_page_section,"ax" 168 .section __special_page_section,"ax"
34 169
@@ -64,6 +199,12 @@ start_ap:
64 srlz.i 199 srlz.i
65 ;; 200 ;;
66 /* 201 /*
202 * Save the region registers, predicate before they get clobbered
203 */
204 SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15);
205 mov r25=pr;;
206
207 /*
67 * Initialize kernel region registers: 208 * Initialize kernel region registers:
68 * rr[0]: VHPT enabled, page size = PAGE_SHIFT 209 * rr[0]: VHPT enabled, page size = PAGE_SHIFT
69 * rr[1]: VHPT enabled, page size = PAGE_SHIFT 210 * rr[1]: VHPT enabled, page size = PAGE_SHIFT
@@ -76,32 +217,14 @@ start_ap:
76 * We initialize all of them to prevent inadvertently assuming 217 * We initialize all of them to prevent inadvertently assuming
77 * something about the state of address translation early in boot. 218 * something about the state of address translation early in boot.
78 */ 219 */
79 mov r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 220 SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);;
80 movl r7=(0<<61) 221 SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);;
81 mov r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 222 SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);;
82 movl r9=(1<<61) 223 SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);;
83 mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 224 SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);;
84 movl r11=(2<<61) 225 SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);;
85 mov r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1) 226 SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);;
86 movl r13=(3<<61) 227 SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);;
87 mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
88 movl r15=(4<<61)
89 mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
90 movl r17=(5<<61)
91 mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
92 movl r19=(6<<61)
93 mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
94 movl r21=(7<<61)
95 ;;
96 mov rr[r7]=r6
97 mov rr[r9]=r8
98 mov rr[r11]=r10
99 mov rr[r13]=r12
100 mov rr[r15]=r14
101 mov rr[r17]=r16
102 mov rr[r19]=r18
103 mov rr[r21]=r20
104 ;;
105 /* 228 /*
106 * Now pin mappings into the TLB for kernel text and data 229 * Now pin mappings into the TLB for kernel text and data
107 */ 230 */
@@ -142,6 +265,12 @@ start_ap:
142 ;; 265 ;;
1431: // now we are in virtual mode 2661: // now we are in virtual mode
144 267
268 SET_AREA_FOR_BOOTING_CPU(r2, r16);
269
270 STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15);
271 SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25)
272 ;;
273
145 // set IVT entry point---can't access I/O ports without it 274 // set IVT entry point---can't access I/O ports without it
146 movl r3=ia64_ivt 275 movl r3=ia64_ivt
147 ;; 276 ;;
@@ -211,12 +340,13 @@ start_ap:
211 mov IA64_KR(CURRENT_STACK)=r16 340 mov IA64_KR(CURRENT_STACK)=r16
212 mov r13=r2 341 mov r13=r2
213 /* 342 /*
214 * Reserve space at the top of the stack for "struct pt_regs". Kernel threads 343 * Reserve space at the top of the stack for "struct pt_regs". Kernel
215 * don't store interesting values in that structure, but the space still needs 344 * threads don't store interesting values in that structure, but the space
216 * to be there because time-critical stuff such as the context switching can 345 * still needs to be there because time-critical stuff such as the context
217 * be implemented more efficiently (for example, __switch_to() 346 * switching can be implemented more efficiently (for example, __switch_to()
218 * always sets the psr.dfh bit of the task it is switching to). 347 * always sets the psr.dfh bit of the task it is switching to).
219 */ 348 */
349
220 addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2 350 addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
221 addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE 351 addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE
222 mov ar.rsc=0 // place RSE in enforced lazy mode 352 mov ar.rsc=0 // place RSE in enforced lazy mode
@@ -993,4 +1123,98 @@ END(ia64_spinlock_contention)
993 1123
994#endif 1124#endif
995 1125
1126#ifdef CONFIG_HOTPLUG_CPU
1127GLOBAL_ENTRY(ia64_jump_to_sal)
1128 alloc r16=ar.pfs,1,0,0,0;;
1129 rsm psr.i | psr.ic
1130{
1131 flushrs
1132 srlz.i
1133}
1134 tpa r25=in0
1135 movl r18=tlb_purge_done;;
1136 DATA_VA_TO_PA(r18);;
1137 mov b1=r18 // Return location
1138 movl r18=ia64_do_tlb_purge;;
1139 DATA_VA_TO_PA(r18);;
1140 mov b2=r18 // doing tlb_flush work
1141 mov ar.rsc=0 // Put RSE in enforced lazy, LE mode
1142 movl r17=1f;;
1143 DATA_VA_TO_PA(r17);;
1144 mov cr.iip=r17
1145 movl r16=SAL_PSR_BITS_TO_SET;;
1146 mov cr.ipsr=r16
1147 mov cr.ifs=r0;;
1148 rfi;;
11491:
1150 /*
1151 * Invalidate all TLB data/inst
1152 */
1153 br.sptk.many b2;; // jump to tlb purge code
1154
1155tlb_purge_done:
1156 RESTORE_REGION_REGS(r25, r17,r18,r19);;
1157 RESTORE_REG(b0, r25, r17);;
1158 RESTORE_REG(b1, r25, r17);;
1159 RESTORE_REG(b2, r25, r17);;
1160 RESTORE_REG(b3, r25, r17);;
1161 RESTORE_REG(b4, r25, r17);;
1162 RESTORE_REG(b5, r25, r17);;
1163 ld8 r1=[r25],0x08;;
1164 ld8 r12=[r25],0x08;;
1165 ld8 r13=[r25],0x08;;
1166 RESTORE_REG(ar.fpsr, r25, r17);;
1167 RESTORE_REG(ar.pfs, r25, r17);;
1168 RESTORE_REG(ar.rnat, r25, r17);;
1169 RESTORE_REG(ar.unat, r25, r17);;
1170 RESTORE_REG(ar.bspstore, r25, r17);;
1171 RESTORE_REG(cr.dcr, r25, r17);;
1172 RESTORE_REG(cr.iva, r25, r17);;
1173 RESTORE_REG(cr.pta, r25, r17);;
1174 RESTORE_REG(cr.itv, r25, r17);;
1175 RESTORE_REG(cr.pmv, r25, r17);;
1176 RESTORE_REG(cr.cmcv, r25, r17);;
1177 RESTORE_REG(cr.lrr0, r25, r17);;
1178 RESTORE_REG(cr.lrr1, r25, r17);;
1179 ld8 r4=[r25],0x08;;
1180 ld8 r5=[r25],0x08;;
1181 ld8 r6=[r25],0x08;;
1182 ld8 r7=[r25],0x08;;
1183 ld8 r17=[r25],0x08;;
1184 mov pr=r17,-1;;
1185 RESTORE_REG(ar.lc, r25, r17);;
1186 /*
1187 * Now Restore floating point regs
1188 */
1189 ldf.fill.nta f2=[r25],16;;
1190 ldf.fill.nta f3=[r25],16;;
1191 ldf.fill.nta f4=[r25],16;;
1192 ldf.fill.nta f5=[r25],16;;
1193 ldf.fill.nta f16=[r25],16;;
1194 ldf.fill.nta f17=[r25],16;;
1195 ldf.fill.nta f18=[r25],16;;
1196 ldf.fill.nta f19=[r25],16;;
1197 ldf.fill.nta f20=[r25],16;;
1198 ldf.fill.nta f21=[r25],16;;
1199 ldf.fill.nta f22=[r25],16;;
1200 ldf.fill.nta f23=[r25],16;;
1201 ldf.fill.nta f24=[r25],16;;
1202 ldf.fill.nta f25=[r25],16;;
1203 ldf.fill.nta f26=[r25],16;;
1204 ldf.fill.nta f27=[r25],16;;
1205 ldf.fill.nta f28=[r25],16;;
1206 ldf.fill.nta f29=[r25],16;;
1207 ldf.fill.nta f30=[r25],16;;
1208 ldf.fill.nta f31=[r25],16;;
1209
1210 /*
1211 * Now that we have done all the register restores
1212 * we are now ready for the big DIVE to SAL Land
1213 */
1214 ssm psr.ic;;
1215 srlz.d;;
1216 br.ret.sptk.many b0;;
1217END(ia64_jump_to_sal)
1218#endif /* CONFIG_HOTPLUG_CPU */
1219
996#endif /* CONFIG_SMP */ 1220#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index c15be5c38f56..88b014381df5 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -79,6 +79,7 @@
79#include <linux/smp.h> 79#include <linux/smp.h>
80#include <linux/smp_lock.h> 80#include <linux/smp_lock.h>
81#include <linux/string.h> 81#include <linux/string.h>
82#include <linux/bootmem.h>
82 83
83#include <asm/delay.h> 84#include <asm/delay.h>
84#include <asm/hw_irq.h> 85#include <asm/hw_irq.h>
@@ -98,19 +99,30 @@
98#define DBG(fmt...) 99#define DBG(fmt...)
99#endif 100#endif
100 101
102#define NR_PREALLOCATE_RTE_ENTRIES (PAGE_SIZE / sizeof(struct iosapic_rte_info))
103#define RTE_PREALLOCATED (1)
104
101static DEFINE_SPINLOCK(iosapic_lock); 105static DEFINE_SPINLOCK(iosapic_lock);
102 106
103/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */ 107/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */
104 108
105static struct iosapic_intr_info { 109struct iosapic_rte_info {
110 struct list_head rte_list; /* node in list of RTEs sharing the same vector */
106 char __iomem *addr; /* base address of IOSAPIC */ 111 char __iomem *addr; /* base address of IOSAPIC */
107 u32 low32; /* current value of low word of Redirection table entry */
108 unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */ 112 unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */
109 char rte_index; /* IOSAPIC RTE index (-1 => not an IOSAPIC interrupt) */ 113 char rte_index; /* IOSAPIC RTE index */
114 int refcnt; /* reference counter */
115 unsigned int flags; /* flags */
116} ____cacheline_aligned;
117
118static struct iosapic_intr_info {
119 struct list_head rtes; /* RTEs using this vector (empty => not an IOSAPIC interrupt) */
120 int count; /* # of RTEs that shares this vector */
121 u32 low32; /* current value of low word of Redirection table entry */
122 unsigned int dest; /* destination CPU physical ID */
110 unsigned char dmode : 3; /* delivery mode (see iosapic.h) */ 123 unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
111 unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */ 124 unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */
112 unsigned char trigger : 1; /* trigger mode (see iosapic.h) */ 125 unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
113 int refcnt; /* reference counter */
114} iosapic_intr_info[IA64_NUM_VECTORS]; 126} iosapic_intr_info[IA64_NUM_VECTORS];
115 127
116static struct iosapic { 128static struct iosapic {
@@ -126,6 +138,8 @@ static int num_iosapic;
126 138
127static unsigned char pcat_compat __initdata; /* 8259 compatibility flag */ 139static unsigned char pcat_compat __initdata; /* 8259 compatibility flag */
128 140
141static int iosapic_kmalloc_ok;
142static LIST_HEAD(free_rte_list);
129 143
130/* 144/*
131 * Find an IOSAPIC associated with a GSI 145 * Find an IOSAPIC associated with a GSI
@@ -147,10 +161,12 @@ static inline int
147_gsi_to_vector (unsigned int gsi) 161_gsi_to_vector (unsigned int gsi)
148{ 162{
149 struct iosapic_intr_info *info; 163 struct iosapic_intr_info *info;
164 struct iosapic_rte_info *rte;
150 165
151 for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info) 166 for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info)
152 if (info->gsi_base + info->rte_index == gsi) 167 list_for_each_entry(rte, &info->rtes, rte_list)
153 return info - iosapic_intr_info; 168 if (rte->gsi_base + rte->rte_index == gsi)
169 return info - iosapic_intr_info;
154 return -1; 170 return -1;
155} 171}
156 172
@@ -167,33 +183,52 @@ gsi_to_vector (unsigned int gsi)
167int 183int
168gsi_to_irq (unsigned int gsi) 184gsi_to_irq (unsigned int gsi)
169{ 185{
186 unsigned long flags;
187 int irq;
170 /* 188 /*
171 * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq 189 * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq
172 * numbers... 190 * numbers...
173 */ 191 */
174 return _gsi_to_vector(gsi); 192 spin_lock_irqsave(&iosapic_lock, flags);
193 {
194 irq = _gsi_to_vector(gsi);
195 }
196 spin_unlock_irqrestore(&iosapic_lock, flags);
197
198 return irq;
199}
200
201static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, unsigned int vec)
202{
203 struct iosapic_rte_info *rte;
204
205 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
206 if (rte->gsi_base + rte->rte_index == gsi)
207 return rte;
208 return NULL;
175} 209}
176 210
177static void 211static void
178set_rte (unsigned int vector, unsigned int dest, int mask) 212set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
179{ 213{
180 unsigned long pol, trigger, dmode; 214 unsigned long pol, trigger, dmode;
181 u32 low32, high32; 215 u32 low32, high32;
182 char __iomem *addr; 216 char __iomem *addr;
183 int rte_index; 217 int rte_index;
184 char redir; 218 char redir;
219 struct iosapic_rte_info *rte;
185 220
186 DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest); 221 DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
187 222
188 rte_index = iosapic_intr_info[vector].rte_index; 223 rte = gsi_vector_to_rte(gsi, vector);
189 if (rte_index < 0) 224 if (!rte)
190 return; /* not an IOSAPIC interrupt */ 225 return; /* not an IOSAPIC interrupt */
191 226
192 addr = iosapic_intr_info[vector].addr; 227 rte_index = rte->rte_index;
228 addr = rte->addr;
193 pol = iosapic_intr_info[vector].polarity; 229 pol = iosapic_intr_info[vector].polarity;
194 trigger = iosapic_intr_info[vector].trigger; 230 trigger = iosapic_intr_info[vector].trigger;
195 dmode = iosapic_intr_info[vector].dmode; 231 dmode = iosapic_intr_info[vector].dmode;
196 vector &= (~IA64_IRQ_REDIRECTED);
197 232
198 redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0; 233 redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
199 234
@@ -221,6 +256,7 @@ set_rte (unsigned int vector, unsigned int dest, int mask)
221 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); 256 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
222 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); 257 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
223 iosapic_intr_info[vector].low32 = low32; 258 iosapic_intr_info[vector].low32 = low32;
259 iosapic_intr_info[vector].dest = dest;
224} 260}
225 261
226static void 262static void
@@ -237,18 +273,20 @@ mask_irq (unsigned int irq)
237 u32 low32; 273 u32 low32;
238 int rte_index; 274 int rte_index;
239 ia64_vector vec = irq_to_vector(irq); 275 ia64_vector vec = irq_to_vector(irq);
276 struct iosapic_rte_info *rte;
240 277
241 addr = iosapic_intr_info[vec].addr; 278 if (list_empty(&iosapic_intr_info[vec].rtes))
242 rte_index = iosapic_intr_info[vec].rte_index;
243
244 if (rte_index < 0)
245 return; /* not an IOSAPIC interrupt! */ 279 return; /* not an IOSAPIC interrupt! */
246 280
247 spin_lock_irqsave(&iosapic_lock, flags); 281 spin_lock_irqsave(&iosapic_lock, flags);
248 { 282 {
249 /* set only the mask bit */ 283 /* set only the mask bit */
250 low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK; 284 low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK;
251 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); 285 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
286 addr = rte->addr;
287 rte_index = rte->rte_index;
288 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
289 }
252 } 290 }
253 spin_unlock_irqrestore(&iosapic_lock, flags); 291 spin_unlock_irqrestore(&iosapic_lock, flags);
254} 292}
@@ -261,16 +299,19 @@ unmask_irq (unsigned int irq)
261 u32 low32; 299 u32 low32;
262 int rte_index; 300 int rte_index;
263 ia64_vector vec = irq_to_vector(irq); 301 ia64_vector vec = irq_to_vector(irq);
302 struct iosapic_rte_info *rte;
264 303
265 addr = iosapic_intr_info[vec].addr; 304 if (list_empty(&iosapic_intr_info[vec].rtes))
266 rte_index = iosapic_intr_info[vec].rte_index;
267 if (rte_index < 0)
268 return; /* not an IOSAPIC interrupt! */ 305 return; /* not an IOSAPIC interrupt! */
269 306
270 spin_lock_irqsave(&iosapic_lock, flags); 307 spin_lock_irqsave(&iosapic_lock, flags);
271 { 308 {
272 low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK; 309 low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK;
273 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); 310 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
311 addr = rte->addr;
312 rte_index = rte->rte_index;
313 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
314 }
274 } 315 }
275 spin_unlock_irqrestore(&iosapic_lock, flags); 316 spin_unlock_irqrestore(&iosapic_lock, flags);
276} 317}
@@ -286,6 +327,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
286 char __iomem *addr; 327 char __iomem *addr;
287 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 328 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
288 ia64_vector vec; 329 ia64_vector vec;
330 struct iosapic_rte_info *rte;
289 331
290 irq &= (~IA64_IRQ_REDIRECTED); 332 irq &= (~IA64_IRQ_REDIRECTED);
291 vec = irq_to_vector(irq); 333 vec = irq_to_vector(irq);
@@ -295,10 +337,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
295 337
296 dest = cpu_physical_id(first_cpu(mask)); 338 dest = cpu_physical_id(first_cpu(mask));
297 339
298 rte_index = iosapic_intr_info[vec].rte_index; 340 if (list_empty(&iosapic_intr_info[vec].rtes))
299 addr = iosapic_intr_info[vec].addr;
300
301 if (rte_index < 0)
302 return; /* not an IOSAPIC interrupt */ 341 return; /* not an IOSAPIC interrupt */
303 342
304 set_irq_affinity_info(irq, dest, redir); 343 set_irq_affinity_info(irq, dest, redir);
@@ -318,8 +357,13 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
318 low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); 357 low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
319 358
320 iosapic_intr_info[vec].low32 = low32; 359 iosapic_intr_info[vec].low32 = low32;
321 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); 360 iosapic_intr_info[vec].dest = dest;
322 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); 361 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
362 addr = rte->addr;
363 rte_index = rte->rte_index;
364 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
365 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
366 }
323 } 367 }
324 spin_unlock_irqrestore(&iosapic_lock, flags); 368 spin_unlock_irqrestore(&iosapic_lock, flags);
325#endif 369#endif
@@ -340,9 +384,11 @@ static void
340iosapic_end_level_irq (unsigned int irq) 384iosapic_end_level_irq (unsigned int irq)
341{ 385{
342 ia64_vector vec = irq_to_vector(irq); 386 ia64_vector vec = irq_to_vector(irq);
387 struct iosapic_rte_info *rte;
343 388
344 move_irq(irq); 389 move_irq(irq);
345 iosapic_eoi(iosapic_intr_info[vec].addr, vec); 390 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
391 iosapic_eoi(rte->addr, vec);
346} 392}
347 393
348#define iosapic_shutdown_level_irq mask_irq 394#define iosapic_shutdown_level_irq mask_irq
@@ -422,6 +468,34 @@ iosapic_version (char __iomem *addr)
422 return iosapic_read(addr, IOSAPIC_VERSION); 468 return iosapic_read(addr, IOSAPIC_VERSION);
423} 469}
424 470
471static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long pol)
472{
473 int i, vector = -1, min_count = -1;
474 struct iosapic_intr_info *info;
475
476 /*
477 * shared vectors for edge-triggered interrupts are not
478 * supported yet
479 */
480 if (trigger == IOSAPIC_EDGE)
481 return -1;
482
483 for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) {
484 info = &iosapic_intr_info[i];
485 if (info->trigger == trigger && info->polarity == pol &&
486 (info->dmode == IOSAPIC_FIXED || info->dmode == IOSAPIC_LOWEST_PRIORITY)) {
487 if (min_count == -1 || info->count < min_count) {
488 vector = i;
489 min_count = info->count;
490 }
491 }
492 }
493 if (vector < 0)
494 panic("%s: out of interrupt vectors!\n", __FUNCTION__);
495
496 return vector;
497}
498
425/* 499/*
426 * if the given vector is already owned by other, 500 * if the given vector is already owned by other,
427 * assign a new vector for the other and make the vector available 501 * assign a new vector for the other and make the vector available
@@ -431,19 +505,63 @@ iosapic_reassign_vector (int vector)
431{ 505{
432 int new_vector; 506 int new_vector;
433 507
434 if (iosapic_intr_info[vector].rte_index >= 0 || iosapic_intr_info[vector].addr 508 if (!list_empty(&iosapic_intr_info[vector].rtes)) {
435 || iosapic_intr_info[vector].gsi_base || iosapic_intr_info[vector].dmode
436 || iosapic_intr_info[vector].polarity || iosapic_intr_info[vector].trigger)
437 {
438 new_vector = assign_irq_vector(AUTO_ASSIGN); 509 new_vector = assign_irq_vector(AUTO_ASSIGN);
439 printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector); 510 printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector);
440 memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], 511 memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector],
441 sizeof(struct iosapic_intr_info)); 512 sizeof(struct iosapic_intr_info));
513 INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes);
514 list_move(iosapic_intr_info[vector].rtes.next, &iosapic_intr_info[new_vector].rtes);
442 memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); 515 memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
443 iosapic_intr_info[vector].rte_index = -1; 516 iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
517 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
444 } 518 }
445} 519}
446 520
521static struct iosapic_rte_info *iosapic_alloc_rte (void)
522{
523 int i;
524 struct iosapic_rte_info *rte;
525 int preallocated = 0;
526
527 if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
528 rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES);
529 if (!rte)
530 return NULL;
531 for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
532 list_add(&rte->rte_list, &free_rte_list);
533 }
534
535 if (!list_empty(&free_rte_list)) {
536 rte = list_entry(free_rte_list.next, struct iosapic_rte_info, rte_list);
537 list_del(&rte->rte_list);
538 preallocated++;
539 } else {
540 rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
541 if (!rte)
542 return NULL;
543 }
544
545 memset(rte, 0, sizeof(struct iosapic_rte_info));
546 if (preallocated)
547 rte->flags |= RTE_PREALLOCATED;
548
549 return rte;
550}
551
552static void iosapic_free_rte (struct iosapic_rte_info *rte)
553{
554 if (rte->flags & RTE_PREALLOCATED)
555 list_add_tail(&rte->rte_list, &free_rte_list);
556 else
557 kfree(rte);
558}
559
560static inline int vector_is_shared (int vector)
561{
562 return (iosapic_intr_info[vector].count > 1);
563}
564
447static void 565static void
448register_intr (unsigned int gsi, int vector, unsigned char delivery, 566register_intr (unsigned int gsi, int vector, unsigned char delivery,
449 unsigned long polarity, unsigned long trigger) 567 unsigned long polarity, unsigned long trigger)
@@ -454,6 +572,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
454 int index; 572 int index;
455 unsigned long gsi_base; 573 unsigned long gsi_base;
456 void __iomem *iosapic_address; 574 void __iomem *iosapic_address;
575 struct iosapic_rte_info *rte;
457 576
458 index = find_iosapic(gsi); 577 index = find_iosapic(gsi);
459 if (index < 0) { 578 if (index < 0) {
@@ -464,14 +583,33 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
464 iosapic_address = iosapic_lists[index].addr; 583 iosapic_address = iosapic_lists[index].addr;
465 gsi_base = iosapic_lists[index].gsi_base; 584 gsi_base = iosapic_lists[index].gsi_base;
466 585
467 rte_index = gsi - gsi_base; 586 rte = gsi_vector_to_rte(gsi, vector);
468 iosapic_intr_info[vector].rte_index = rte_index; 587 if (!rte) {
588 rte = iosapic_alloc_rte();
589 if (!rte) {
590 printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__);
591 return;
592 }
593
594 rte_index = gsi - gsi_base;
595 rte->rte_index = rte_index;
596 rte->addr = iosapic_address;
597 rte->gsi_base = gsi_base;
598 rte->refcnt++;
599 list_add_tail(&rte->rte_list, &iosapic_intr_info[vector].rtes);
600 iosapic_intr_info[vector].count++;
601 }
602 else if (vector_is_shared(vector)) {
603 struct iosapic_intr_info *info = &iosapic_intr_info[vector];
604 if (info->trigger != trigger || info->polarity != polarity) {
605 printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__);
606 return;
607 }
608 }
609
469 iosapic_intr_info[vector].polarity = polarity; 610 iosapic_intr_info[vector].polarity = polarity;
470 iosapic_intr_info[vector].dmode = delivery; 611 iosapic_intr_info[vector].dmode = delivery;
471 iosapic_intr_info[vector].addr = iosapic_address;
472 iosapic_intr_info[vector].gsi_base = gsi_base;
473 iosapic_intr_info[vector].trigger = trigger; 612 iosapic_intr_info[vector].trigger = trigger;
474 iosapic_intr_info[vector].refcnt++;
475 613
476 if (trigger == IOSAPIC_EDGE) 614 if (trigger == IOSAPIC_EDGE)
477 irq_type = &irq_type_iosapic_edge; 615 irq_type = &irq_type_iosapic_edge;
@@ -494,6 +632,13 @@ get_target_cpu (unsigned int gsi, int vector)
494 static int cpu = -1; 632 static int cpu = -1;
495 633
496 /* 634 /*
635 * In case of vector shared by multiple RTEs, all RTEs that
636 * share the vector need to use the same destination CPU.
637 */
638 if (!list_empty(&iosapic_intr_info[vector].rtes))
639 return iosapic_intr_info[vector].dest;
640
641 /*
497 * If the platform supports redirection via XTP, let it 642 * If the platform supports redirection via XTP, let it
498 * distribute interrupts. 643 * distribute interrupts.
499 */ 644 */
@@ -565,10 +710,12 @@ int
565iosapic_register_intr (unsigned int gsi, 710iosapic_register_intr (unsigned int gsi,
566 unsigned long polarity, unsigned long trigger) 711 unsigned long polarity, unsigned long trigger)
567{ 712{
568 int vector; 713 int vector, mask = 1;
569 unsigned int dest; 714 unsigned int dest;
570 unsigned long flags; 715 unsigned long flags;
571 716 struct iosapic_rte_info *rte;
717 u32 low32;
718again:
572 /* 719 /*
573 * If this GSI has already been registered (i.e., it's a 720 * If this GSI has already been registered (i.e., it's a
574 * shared interrupt, or we lost a race to register it), 721 * shared interrupt, or we lost a race to register it),
@@ -578,19 +725,45 @@ iosapic_register_intr (unsigned int gsi,
578 { 725 {
579 vector = gsi_to_vector(gsi); 726 vector = gsi_to_vector(gsi);
580 if (vector > 0) { 727 if (vector > 0) {
581 iosapic_intr_info[vector].refcnt++; 728 rte = gsi_vector_to_rte(gsi, vector);
729 rte->refcnt++;
582 spin_unlock_irqrestore(&iosapic_lock, flags); 730 spin_unlock_irqrestore(&iosapic_lock, flags);
583 return vector; 731 return vector;
584 } 732 }
733 }
734 spin_unlock_irqrestore(&iosapic_lock, flags);
735
736 /* If vector is running out, we try to find a sharable vector */
737 vector = assign_irq_vector_nopanic(AUTO_ASSIGN);
738 if (vector < 0)
739 vector = iosapic_find_sharable_vector(trigger, polarity);
740
741 spin_lock_irqsave(&irq_descp(vector)->lock, flags);
742 spin_lock(&iosapic_lock);
743 {
744 if (gsi_to_vector(gsi) > 0) {
745 if (list_empty(&iosapic_intr_info[vector].rtes))
746 free_irq_vector(vector);
747 spin_unlock(&iosapic_lock);
748 spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
749 goto again;
750 }
585 751
586 vector = assign_irq_vector(AUTO_ASSIGN);
587 dest = get_target_cpu(gsi, vector); 752 dest = get_target_cpu(gsi, vector);
588 register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, 753 register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY,
589 polarity, trigger); 754 polarity, trigger);
590 755
591 set_rte(vector, dest, 1); 756 /*
757 * If the vector is shared and already unmasked for
758 * other interrupt sources, don't mask it.
759 */
760 low32 = iosapic_intr_info[vector].low32;
761 if (vector_is_shared(vector) && !(low32 & IOSAPIC_MASK))
762 mask = 0;
763 set_rte(gsi, vector, dest, mask);
592 } 764 }
593 spin_unlock_irqrestore(&iosapic_lock, flags); 765 spin_unlock(&iosapic_lock);
766 spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
594 767
595 printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", 768 printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
596 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), 769 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
@@ -607,8 +780,10 @@ iosapic_unregister_intr (unsigned int gsi)
607 unsigned long flags; 780 unsigned long flags;
608 int irq, vector; 781 int irq, vector;
609 irq_desc_t *idesc; 782 irq_desc_t *idesc;
610 int rte_index; 783 u32 low32;
611 unsigned long trigger, polarity; 784 unsigned long trigger, polarity;
785 unsigned int dest;
786 struct iosapic_rte_info *rte;
612 787
613 /* 788 /*
614 * If the irq associated with the gsi is not found, 789 * If the irq associated with the gsi is not found,
@@ -627,54 +802,56 @@ iosapic_unregister_intr (unsigned int gsi)
627 spin_lock_irqsave(&idesc->lock, flags); 802 spin_lock_irqsave(&idesc->lock, flags);
628 spin_lock(&iosapic_lock); 803 spin_lock(&iosapic_lock);
629 { 804 {
630 rte_index = iosapic_intr_info[vector].rte_index; 805 if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) {
631 if (rte_index < 0) {
632 spin_unlock(&iosapic_lock);
633 spin_unlock_irqrestore(&idesc->lock, flags);
634 printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi); 806 printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi);
635 WARN_ON(1); 807 WARN_ON(1);
636 return; 808 goto out;
637 } 809 }
638 810
639 if (--iosapic_intr_info[vector].refcnt > 0) { 811 if (--rte->refcnt > 0)
640 spin_unlock(&iosapic_lock); 812 goto out;
641 spin_unlock_irqrestore(&idesc->lock, flags);
642 return;
643 }
644 813
645 /* 814 /* Mask the interrupt */
646 * If interrupt handlers still exist on the irq 815 low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK;
647 * associated with the gsi, don't unregister the 816 iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), low32);
648 * interrupt.
649 */
650 if (idesc->action) {
651 iosapic_intr_info[vector].refcnt++;
652 spin_unlock(&iosapic_lock);
653 spin_unlock_irqrestore(&idesc->lock, flags);
654 printk(KERN_WARNING "Cannot unregister GSI. IRQ %u is still in use.\n", irq);
655 return;
656 }
657 817
658 /* Clear the interrupt controller descriptor. */ 818 /* Remove the rte entry from the list */
659 idesc->handler = &no_irq_type; 819 list_del(&rte->rte_list);
820 iosapic_intr_info[vector].count--;
821 iosapic_free_rte(rte);
660 822
661 trigger = iosapic_intr_info[vector].trigger; 823 trigger = iosapic_intr_info[vector].trigger;
662 polarity = iosapic_intr_info[vector].polarity; 824 polarity = iosapic_intr_info[vector].polarity;
825 dest = iosapic_intr_info[vector].dest;
826 printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
827 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
828 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
829 cpu_logical_id(dest), dest, vector);
830
831 if (list_empty(&iosapic_intr_info[vector].rtes)) {
832 /* Sanity check */
833 BUG_ON(iosapic_intr_info[vector].count);
834
835 /* Clear the interrupt controller descriptor */
836 idesc->handler = &no_irq_type;
837
838 /* Clear the interrupt information */
839 memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
840 iosapic_intr_info[vector].low32 |= IOSAPIC_MASK;
841 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
842
843 if (idesc->action) {
844 printk(KERN_ERR "interrupt handlers still exist on IRQ %u\n", irq);
845 WARN_ON(1);
846 }
663 847
664 /* Clear the interrupt information. */ 848 /* Free the interrupt vector */
665 memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); 849 free_irq_vector(vector);
666 iosapic_intr_info[vector].rte_index = -1; /* mark as unused */ 850 }
667 } 851 }
852 out:
668 spin_unlock(&iosapic_lock); 853 spin_unlock(&iosapic_lock);
669 spin_unlock_irqrestore(&idesc->lock, flags); 854 spin_unlock_irqrestore(&idesc->lock, flags);
670
671 /* Free the interrupt vector */
672 free_irq_vector(vector);
673
674 printk(KERN_INFO "GSI %u (%s, %s) -> vector %d unregisterd.\n",
675 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
676 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
677 vector);
678} 855}
679#endif /* CONFIG_ACPI_DEALLOCATE_IRQ */ 856#endif /* CONFIG_ACPI_DEALLOCATE_IRQ */
680 857
@@ -724,7 +901,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
724 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 901 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
725 cpu_logical_id(dest), dest, vector); 902 cpu_logical_id(dest), dest, vector);
726 903
727 set_rte(vector, dest, mask); 904 set_rte(gsi, vector, dest, mask);
728 return vector; 905 return vector;
729} 906}
730 907
@@ -750,7 +927,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
750 polarity == IOSAPIC_POL_HIGH ? "high" : "low", 927 polarity == IOSAPIC_POL_HIGH ? "high" : "low",
751 cpu_logical_id(dest), dest, vector); 928 cpu_logical_id(dest), dest, vector);
752 929
753 set_rte(vector, dest, 1); 930 set_rte(gsi, vector, dest, 1);
754} 931}
755 932
756void __init 933void __init
@@ -758,8 +935,10 @@ iosapic_system_init (int system_pcat_compat)
758{ 935{
759 int vector; 936 int vector;
760 937
761 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) 938 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) {
762 iosapic_intr_info[vector].rte_index = -1; /* mark as unused */ 939 iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
940 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); /* mark as unused */
941 }
763 942
764 pcat_compat = system_pcat_compat; 943 pcat_compat = system_pcat_compat;
765 if (pcat_compat) { 944 if (pcat_compat) {
@@ -825,3 +1004,10 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
825 return; 1004 return;
826} 1005}
827#endif 1006#endif
1007
1008static int __init iosapic_enable_kmalloc (void)
1009{
1010 iosapic_kmalloc_ok = 1;
1011 return 0;
1012}
1013core_initcall (iosapic_enable_kmalloc);
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 5ba06ebe355b..4fe60c7a2e90 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -63,20 +63,30 @@ EXPORT_SYMBOL(isa_irq_to_vector_map);
63static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; 63static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
64 64
65int 65int
66assign_irq_vector (int irq) 66assign_irq_vector_nopanic (int irq)
67{ 67{
68 int pos, vector; 68 int pos, vector;
69 again: 69 again:
70 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); 70 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
71 vector = IA64_FIRST_DEVICE_VECTOR + pos; 71 vector = IA64_FIRST_DEVICE_VECTOR + pos;
72 if (vector > IA64_LAST_DEVICE_VECTOR) 72 if (vector > IA64_LAST_DEVICE_VECTOR)
73 /* XXX could look for sharable vectors instead of panic'ing... */ 73 return -1;
74 panic("assign_irq_vector: out of interrupt vectors!");
75 if (test_and_set_bit(pos, ia64_vector_mask)) 74 if (test_and_set_bit(pos, ia64_vector_mask))
76 goto again; 75 goto again;
77 return vector; 76 return vector;
78} 77}
79 78
79int
80assign_irq_vector (int irq)
81{
82 int vector = assign_irq_vector_nopanic(irq);
83
84 if (vector < 0)
85 panic("assign_irq_vector: out of interrupt vectors!");
86
87 return vector;
88}
89
80void 90void
81free_irq_vector (int vector) 91free_irq_vector (int vector)
82{ 92{
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index cf3f8014f9ad..ef3fd7265b67 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -110,46 +110,19 @@
110 .global ia64_os_mca_dispatch_end 110 .global ia64_os_mca_dispatch_end
111 .global ia64_sal_to_os_handoff_state 111 .global ia64_sal_to_os_handoff_state
112 .global ia64_os_to_sal_handoff_state 112 .global ia64_os_to_sal_handoff_state
113 .global ia64_do_tlb_purge
113 114
114 .text 115 .text
115 .align 16 116 .align 16
116 117
117ia64_os_mca_dispatch: 118/*
118 119 * Just the TLB purge part is moved to a separate function
119 // Serialize all MCA processing 120 * so we can re-use the code for cpu hotplug code as well
120 mov r3=1;; 121 * Caller should now setup b1, so we can branch once the
121 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; 122 * tlb flush is complete.
122ia64_os_mca_spin: 123 */
123 xchg8 r4=[r2],r3;;
124 cmp.ne p6,p0=r4,r0
125(p6) br ia64_os_mca_spin
126
127 // Save the SAL to OS MCA handoff state as defined
128 // by SAL SPEC 3.0
129 // NOTE : The order in which the state gets saved
130 // is dependent on the way the C-structure
131 // for ia64_mca_sal_to_os_state_t has been
132 // defined in include/asm/mca.h
133 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
134 ;;
135
136 // LOG PROCESSOR STATE INFO FROM HERE ON..
137begin_os_mca_dump:
138 br ia64_os_mca_proc_state_dump;;
139
140ia64_os_mca_done_dump:
141
142 LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
143 ;;
144 ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
145 ;;
146 tbit.nz p6,p7=r18,60
147(p7) br.spnt done_tlb_purge_and_reload
148
149 // The following code purges TC and TR entries. Then reload all TC entries.
150 // Purge percpu data TC entries.
151begin_tlb_purge_and_reload:
152 124
125ia64_do_tlb_purge:
153#define O(member) IA64_CPUINFO_##member##_OFFSET 126#define O(member) IA64_CPUINFO_##member##_OFFSET
154 127
155 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 128 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
@@ -230,6 +203,51 @@ begin_tlb_purge_and_reload:
230 ;; 203 ;;
231 srlz.i 204 srlz.i
232 ;; 205 ;;
206 // Now branch away to caller.
207 br.sptk.many b1
208 ;;
209
210ia64_os_mca_dispatch:
211
212 // Serialize all MCA processing
213 mov r3=1;;
214 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
215ia64_os_mca_spin:
216 xchg8 r4=[r2],r3;;
217 cmp.ne p6,p0=r4,r0
218(p6) br ia64_os_mca_spin
219
220 // Save the SAL to OS MCA handoff state as defined
221 // by SAL SPEC 3.0
222 // NOTE : The order in which the state gets saved
223 // is dependent on the way the C-structure
224 // for ia64_mca_sal_to_os_state_t has been
225 // defined in include/asm/mca.h
226 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
227 ;;
228
229 // LOG PROCESSOR STATE INFO FROM HERE ON..
230begin_os_mca_dump:
231 br ia64_os_mca_proc_state_dump;;
232
233ia64_os_mca_done_dump:
234
235 LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
236 ;;
237 ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
238 ;;
239 tbit.nz p6,p7=r18,60
240(p7) br.spnt done_tlb_purge_and_reload
241
242 // The following code purges TC and TR entries. Then reload all TC entries.
243 // Purge percpu data TC entries.
244begin_tlb_purge_and_reload:
245 movl r18=ia64_reload_tr;;
246 LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
247 mov b1=r18;;
248 br.sptk.many ia64_do_tlb_purge;;
249
250ia64_reload_tr:
233 // Finally reload the TR registers. 251 // Finally reload the TR registers.
234 // 1. Reload DTR/ITR registers for kernel. 252 // 1. Reload DTR/ITR registers for kernel.
235 mov r18=KERNEL_TR_PAGE_SHIFT<<2 253 mov r18=KERNEL_TR_PAGE_SHIFT<<2
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index ab478172c349..abc0113a821d 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr)
132 spin_unlock(&mca_bh_lock); 132 spin_unlock(&mca_bh_lock);
133 133
134 /* This process is about to be killed itself */ 134 /* This process is about to be killed itself */
135 force_sig(SIGKILL, current); 135 do_exit(SIGKILL);
136 schedule();
137} 136}
138 137
139/** 138/**
@@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
439 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; 438 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
440 psr2->cpl = 0; 439 psr2->cpl = 0;
441 psr2->ri = 0; 440 psr2->ri = 0;
441 psr2->i = 0;
442 442
443 return 1; 443 return 1;
444 } 444 }
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index bcfa05acc561..2d7e0217638d 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -10,6 +10,7 @@
10 10
11#include <asm/asmmacro.h> 11#include <asm/asmmacro.h>
12#include <asm/processor.h> 12#include <asm/processor.h>
13#include <asm/ptrace.h>
13 14
14GLOBAL_ENTRY(mca_handler_bhhook) 15GLOBAL_ENTRY(mca_handler_bhhook)
15 invala // clear RSE ? 16 invala // clear RSE ?
@@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook)
20 ;; 21 ;;
21 alloc r16=ar.pfs,0,2,1,0 // make a new frame 22 alloc r16=ar.pfs,0,2,1,0 // make a new frame
22 ;; 23 ;;
24 mov ar.rsc=0
25 ;;
23 mov r13=IA64_KR(CURRENT) // current task pointer 26 mov r13=IA64_KR(CURRENT) // current task pointer
24 ;; 27 ;;
25 adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13 28 mov r2=r13
29 ;;
30 addl r22=IA64_RBS_OFFSET,r2
31 ;;
32 mov ar.bspstore=r22
26 ;; 33 ;;
27 ld8 r12=[r12] // stack pointer 34 addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
28 ;; 35 ;;
36 adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
37 ;;
38 st1 [r2]=r0 // clear current->thread.on_ustack flag
29 mov loc0=r16 39 mov loc0=r16
30 movl loc1=mca_handler_bh // recovery C function 40 movl loc1=mca_handler_bh // recovery C function
31 ;; 41 ;;
@@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook)
34 ;; 44 ;;
35 mov loc1=rp 45 mov loc1=rp
36 ;; 46 ;;
37 br.call.sptk.many rp=b6 // not return ... 47 ssm psr.i
48 ;;
49 br.call.sptk.many rp=b6 // does not return ...
38 ;; 50 ;;
39 mov ar.pfs=loc0 51 mov ar.pfs=loc0
40 mov rp=loc1 52 mov rp=loc1
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 71147be3279c..71c101601e3e 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -480,14 +480,6 @@ typedef struct {
480#define PFM_CMD_ARG_MANY -1 /* cannot be zero */ 480#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
481 481
482typedef struct { 482typedef struct {
483 int debug; /* turn on/off debugging via syslog */
484 int debug_ovfl; /* turn on/off debug printk in overflow handler */
485 int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
486 int expert_mode; /* turn on/off value checking */
487 int debug_pfm_read;
488} pfm_sysctl_t;
489
490typedef struct {
491 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ 483 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
492 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ 484 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
493 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ 485 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
@@ -514,8 +506,8 @@ static LIST_HEAD(pfm_buffer_fmt_list);
514static pmu_config_t *pmu_conf; 506static pmu_config_t *pmu_conf;
515 507
516/* sysctl() controls */ 508/* sysctl() controls */
517static pfm_sysctl_t pfm_sysctl; 509pfm_sysctl_t pfm_sysctl;
518int pfm_debug_var; 510EXPORT_SYMBOL(pfm_sysctl);
519 511
520static ctl_table pfm_ctl_table[]={ 512static ctl_table pfm_ctl_table[]={
521 {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, 513 {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
@@ -1273,6 +1265,8 @@ out:
1273} 1265}
1274EXPORT_SYMBOL(pfm_unregister_buffer_fmt); 1266EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1275 1267
1268extern void update_pal_halt_status(int);
1269
1276static int 1270static int
1277pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) 1271pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1278{ 1272{
@@ -1319,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1319 is_syswide, 1313 is_syswide,
1320 cpu)); 1314 cpu));
1321 1315
1316 /*
1317 * disable default_idle() to go to PAL_HALT
1318 */
1319 update_pal_halt_status(0);
1320
1322 UNLOCK_PFS(flags); 1321 UNLOCK_PFS(flags);
1323 1322
1324 return 0; 1323 return 0;
@@ -1374,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1374 is_syswide, 1373 is_syswide,
1375 cpu)); 1374 cpu));
1376 1375
1376 /*
1377 * if possible, enable default_idle() to go into PAL_HALT
1378 */
1379 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1380 update_pal_halt_status(1);
1381
1377 UNLOCK_PFS(flags); 1382 UNLOCK_PFS(flags);
1378 1383
1379 return 0; 1384 return 0;
@@ -1576,7 +1581,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1576 goto abort_locked; 1581 goto abort_locked;
1577 } 1582 }
1578 1583
1579 DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); 1584 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1580 1585
1581 ret = -EFAULT; 1586 ret = -EFAULT;
1582 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); 1587 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
@@ -3695,8 +3700,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3695 3700
3696 pfm_sysctl.debug = m == 0 ? 0 : 1; 3701 pfm_sysctl.debug = m == 0 ? 0 : 1;
3697 3702
3698 pfm_debug_var = pfm_sysctl.debug;
3699
3700 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); 3703 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3701 3704
3702 if (m == 0) { 3705 if (m == 0) {
@@ -4212,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4212 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", 4215 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4213 req->load_pid, 4216 req->load_pid,
4214 ctx->ctx_state)); 4217 ctx->ctx_state));
4215 return -EINVAL; 4218 return -EBUSY;
4216 } 4219 }
4217 4220
4218 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); 4221 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
@@ -4714,16 +4717,26 @@ recheck:
4714 if (task == current || ctx->ctx_fl_system) return 0; 4717 if (task == current || ctx->ctx_fl_system) return 0;
4715 4718
4716 /* 4719 /*
4717 * if context is UNLOADED we are safe to go 4720 * we are monitoring another thread
4718 */ 4721 */
4719 if (state == PFM_CTX_UNLOADED) return 0; 4722 switch(state) {
4720 4723 case PFM_CTX_UNLOADED:
4721 /* 4724 /*
4722 * no command can operate on a zombie context 4725 * if context is UNLOADED we are safe to go
4723 */ 4726 */
4724 if (state == PFM_CTX_ZOMBIE) { 4727 return 0;
4725 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); 4728 case PFM_CTX_ZOMBIE:
4726 return -EINVAL; 4729 /*
4730 * no command can operate on a zombie context
4731 */
4732 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4733 return -EINVAL;
4734 case PFM_CTX_MASKED:
4735 /*
4736 * PMU state has been saved to software even though
4737 * the thread may still be running.
4738 */
4739 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4727 } 4740 }
4728 4741
4729 /* 4742 /*
@@ -4996,13 +5009,21 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4996} 5009}
4997 5010
4998static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); 5011static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4999 5012 /*
5013 * pfm_handle_work() can be called with interrupts enabled
5014 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
5015 * call may sleep, therefore we must re-enable interrupts
5016 * to avoid deadlocks. It is safe to do so because this function
5017 * is called ONLY when returning to user level (PUStk=1), in which case
5018 * there is no risk of kernel stack overflow due to deep
5019 * interrupt nesting.
5020 */
5000void 5021void
5001pfm_handle_work(void) 5022pfm_handle_work(void)
5002{ 5023{
5003 pfm_context_t *ctx; 5024 pfm_context_t *ctx;
5004 struct pt_regs *regs; 5025 struct pt_regs *regs;
5005 unsigned long flags; 5026 unsigned long flags, dummy_flags;
5006 unsigned long ovfl_regs; 5027 unsigned long ovfl_regs;
5007 unsigned int reason; 5028 unsigned int reason;
5008 int ret; 5029 int ret;
@@ -5039,18 +5060,15 @@ pfm_handle_work(void)
5039 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; 5060 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
5040 if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; 5061 if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
5041 5062
5063 /*
5064 * restore interrupt mask to what it was on entry.
5065 * Could be enabled/diasbled.
5066 */
5042 UNPROTECT_CTX(ctx, flags); 5067 UNPROTECT_CTX(ctx, flags);
5043 5068
5044 /* 5069 /*
5045 * pfm_handle_work() is currently called with interrupts disabled. 5070 * force interrupt enable because of down_interruptible()
5046 * The down_interruptible call may sleep, therefore we 5071 */
5047 * must re-enable interrupts to avoid deadlocks. It is
5048 * safe to do so because this function is called ONLY
5049 * when returning to user level (PUStk=1), in which case
5050 * there is no risk of kernel stack overflow due to deep
5051 * interrupt nesting.
5052 */
5053 BUG_ON(flags & IA64_PSR_I);
5054 local_irq_enable(); 5072 local_irq_enable();
5055 5073
5056 DPRINT(("before block sleeping\n")); 5074 DPRINT(("before block sleeping\n"));
@@ -5064,12 +5082,12 @@ pfm_handle_work(void)
5064 DPRINT(("after block sleeping ret=%d\n", ret)); 5082 DPRINT(("after block sleeping ret=%d\n", ret));
5065 5083
5066 /* 5084 /*
5067 * disable interrupts to restore state we had upon entering 5085 * lock context and mask interrupts again
5068 * this function 5086 * We save flags into a dummy because we may have
5087 * altered interrupts mask compared to entry in this
5088 * function.
5069 */ 5089 */
5070 local_irq_disable(); 5090 PROTECT_CTX(ctx, dummy_flags);
5071
5072 PROTECT_CTX(ctx, flags);
5073 5091
5074 /* 5092 /*
5075 * we need to read the ovfl_regs only after wake-up 5093 * we need to read the ovfl_regs only after wake-up
@@ -5095,7 +5113,9 @@ skip_blocking:
5095 ctx->ctx_ovfl_regs[0] = 0UL; 5113 ctx->ctx_ovfl_regs[0] = 0UL;
5096 5114
5097nothing_to_do: 5115nothing_to_do:
5098 5116 /*
5117 * restore flags as they were upon entry
5118 */
5099 UNPROTECT_CTX(ctx, flags); 5119 UNPROTECT_CTX(ctx, flags);
5100} 5120}
5101 5121
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index 965d29004555..344941db0a9e 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -20,24 +20,17 @@ MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
20MODULE_DESCRIPTION("perfmon default sampling format"); 20MODULE_DESCRIPTION("perfmon default sampling format");
21MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
22 22
23MODULE_PARM(debug, "i");
24MODULE_PARM_DESC(debug, "debug");
25
26MODULE_PARM(debug_ovfl, "i");
27MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
28
29
30#define DEFAULT_DEBUG 1 23#define DEFAULT_DEBUG 1
31 24
32#ifdef DEFAULT_DEBUG 25#ifdef DEFAULT_DEBUG
33#define DPRINT(a) \ 26#define DPRINT(a) \
34 do { \ 27 do { \
35 if (unlikely(debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ 28 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
36 } while (0) 29 } while (0)
37 30
38#define DPRINT_ovfl(a) \ 31#define DPRINT_ovfl(a) \
39 do { \ 32 do { \
40 if (unlikely(debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ 33 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
41 } while (0) 34 } while (0)
42 35
43#else 36#else
@@ -45,8 +38,6 @@ MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
45#define DPRINT_ovfl(a) 38#define DPRINT_ovfl(a)
46#endif 39#endif
47 40
48static int debug, debug_ovfl;
49
50static int 41static int
51default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data) 42default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
52{ 43{
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 91293388dd29..ebb71f3d6d19 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co 4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com> 5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
6 */ 7 */
7#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ 8#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
8#include <linux/config.h> 9#include <linux/config.h>
@@ -49,7 +50,7 @@
49#include "sigframe.h" 50#include "sigframe.h"
50 51
51void (*ia64_mark_idle)(int); 52void (*ia64_mark_idle)(int);
52static cpumask_t cpu_idle_map; 53static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
53 54
54unsigned long boot_option_idle_override = 0; 55unsigned long boot_option_idle_override = 0;
55EXPORT_SYMBOL(boot_option_idle_override); 56EXPORT_SYMBOL(boot_option_idle_override);
@@ -172,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
172 ia64_do_signal(oldset, scr, in_syscall); 173 ia64_do_signal(oldset, scr, in_syscall);
173} 174}
174 175
175static int pal_halt = 1; 176static int pal_halt = 1;
177static int can_do_pal_halt = 1;
178
176static int __init nohalt_setup(char * str) 179static int __init nohalt_setup(char * str)
177{ 180{
178 pal_halt = 0; 181 pal_halt = 0;
@@ -180,16 +183,20 @@ static int __init nohalt_setup(char * str)
180} 183}
181__setup("nohalt", nohalt_setup); 184__setup("nohalt", nohalt_setup);
182 185
186void
187update_pal_halt_status(int status)
188{
189 can_do_pal_halt = pal_halt && status;
190}
191
183/* 192/*
184 * We use this if we don't have any better idle routine.. 193 * We use this if we don't have any better idle routine..
185 */ 194 */
186void 195void
187default_idle (void) 196default_idle (void)
188{ 197{
189 unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
190
191 while (!need_resched()) 198 while (!need_resched())
192 if (pal_halt && !pmu_active) 199 if (can_do_pal_halt)
193 safe_halt(); 200 safe_halt();
194 else 201 else
195 cpu_relax(); 202 cpu_relax();
@@ -200,27 +207,20 @@ default_idle (void)
200static inline void play_dead(void) 207static inline void play_dead(void)
201{ 208{
202 extern void ia64_cpu_local_tick (void); 209 extern void ia64_cpu_local_tick (void);
210 unsigned int this_cpu = smp_processor_id();
211
203 /* Ack it */ 212 /* Ack it */
204 __get_cpu_var(cpu_state) = CPU_DEAD; 213 __get_cpu_var(cpu_state) = CPU_DEAD;
205 214
206 /* We shouldn't have to disable interrupts while dead, but
207 * some interrupts just don't seem to go away, and this makes
208 * it "work" for testing purposes. */
209 max_xtp(); 215 max_xtp();
210 local_irq_disable(); 216 local_irq_disable();
211 /* Death loop */ 217 idle_task_exit();
212 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 218 ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
213 cpu_relax();
214
215 /* 219 /*
216 * Enable timer interrupts from now on 220 * The above is a point of no-return, the processor is
217 * Not required if we put processor in SAL_BOOT_RENDEZ mode. 221 * expected to be in SAL loop now.
218 */ 222 */
219 local_flush_tlb_all(); 223 BUG();
220 cpu_set(smp_processor_id(), cpu_online_map);
221 wmb();
222 ia64_cpu_local_tick ();
223 local_irq_enable();
224} 224}
225#else 225#else
226static inline void play_dead(void) 226static inline void play_dead(void)
@@ -229,20 +229,31 @@ static inline void play_dead(void)
229} 229}
230#endif /* CONFIG_HOTPLUG_CPU */ 230#endif /* CONFIG_HOTPLUG_CPU */
231 231
232
233void cpu_idle_wait(void) 232void cpu_idle_wait(void)
234{ 233{
235 int cpu; 234 unsigned int cpu, this_cpu = get_cpu();
236 cpumask_t map; 235 cpumask_t map;
236
237 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
238 put_cpu();
237 239
238 for_each_online_cpu(cpu) 240 cpus_clear(map);
239 cpu_set(cpu, cpu_idle_map); 241 for_each_online_cpu(cpu) {
242 per_cpu(cpu_idle_state, cpu) = 1;
243 cpu_set(cpu, map);
244 }
240 245
241 wmb(); 246 __get_cpu_var(cpu_idle_state) = 0;
242 do { 247
243 ssleep(1); 248 wmb();
244 cpus_and(map, cpu_idle_map, cpu_online_map); 249 do {
245 } while (!cpus_empty(map)); 250 ssleep(1);
251 for_each_online_cpu(cpu) {
252 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
253 cpu_clear(cpu, map);
254 }
255 cpus_and(map, map, cpu_online_map);
256 } while (!cpus_empty(map));
246} 257}
247EXPORT_SYMBOL_GPL(cpu_idle_wait); 258EXPORT_SYMBOL_GPL(cpu_idle_wait);
248 259
@@ -250,7 +261,6 @@ void __attribute__((noreturn))
250cpu_idle (void) 261cpu_idle (void)
251{ 262{
252 void (*mark_idle)(int) = ia64_mark_idle; 263 void (*mark_idle)(int) = ia64_mark_idle;
253 int cpu = smp_processor_id();
254 264
255 /* endless idle loop with no priority at all */ 265 /* endless idle loop with no priority at all */
256 while (1) { 266 while (1) {
@@ -261,12 +271,13 @@ cpu_idle (void)
261 while (!need_resched()) { 271 while (!need_resched()) {
262 void (*idle)(void); 272 void (*idle)(void);
263 273
274 if (__get_cpu_var(cpu_idle_state))
275 __get_cpu_var(cpu_idle_state) = 0;
276
277 rmb();
264 if (mark_idle) 278 if (mark_idle)
265 (*mark_idle)(1); 279 (*mark_idle)(1);
266 280
267 if (cpu_isset(cpu, cpu_idle_map))
268 cpu_clear(cpu, cpu_idle_map);
269 rmb();
270 idle = pm_idle; 281 idle = pm_idle;
271 if (!idle) 282 if (!idle)
272 idle = default_idle; 283 idle = default_idle;
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 55789fcd7210..907464ee7273 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -17,6 +17,7 @@
17#include <linux/user.h> 17#include <linux/user.h>
18#include <linux/security.h> 18#include <linux/security.h>
19#include <linux/audit.h> 19#include <linux/audit.h>
20#include <linux/signal.h>
20 21
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
22#include <asm/processor.h> 23#include <asm/processor.h>
@@ -1481,7 +1482,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1481 case PTRACE_CONT: 1482 case PTRACE_CONT:
1482 /* restart after signal. */ 1483 /* restart after signal. */
1483 ret = -EIO; 1484 ret = -EIO;
1484 if (data > _NSIG) 1485 if (!valid_signal(data))
1485 goto out_tsk; 1486 goto out_tsk;
1486 if (request == PTRACE_SYSCALL) 1487 if (request == PTRACE_SYSCALL)
1487 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 1488 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
@@ -1520,7 +1521,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1520 /* let child execute for one instruction */ 1521 /* let child execute for one instruction */
1521 case PTRACE_SINGLEBLOCK: 1522 case PTRACE_SINGLEBLOCK:
1522 ret = -EIO; 1523 ret = -EIO;
1523 if (data > _NSIG) 1524 if (!valid_signal(data))
1524 goto out_tsk; 1525 goto out_tsk;
1525 1526
1526 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 1527 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
@@ -1595,20 +1596,25 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1595 long arg4, long arg5, long arg6, long arg7, 1596 long arg4, long arg5, long arg6, long arg7,
1596 struct pt_regs regs) 1597 struct pt_regs regs)
1597{ 1598{
1598 long syscall; 1599 if (test_thread_flag(TIF_SYSCALL_TRACE)
1600 && (current->ptrace & PT_PTRACED))
1601 syscall_trace();
1599 1602
1600 if (unlikely(current->audit_context)) { 1603 if (unlikely(current->audit_context)) {
1601 if (IS_IA32_PROCESS(&regs)) 1604 long syscall;
1605 int arch;
1606
1607 if (IS_IA32_PROCESS(&regs)) {
1602 syscall = regs.r1; 1608 syscall = regs.r1;
1603 else 1609 arch = AUDIT_ARCH_I386;
1610 } else {
1604 syscall = regs.r15; 1611 syscall = regs.r15;
1612 arch = AUDIT_ARCH_IA64;
1613 }
1605 1614
1606 audit_syscall_entry(current, syscall, arg0, arg1, arg2, arg3); 1615 audit_syscall_entry(current, arch, syscall, arg0, arg1, arg2, arg3);
1607 } 1616 }
1608 1617
1609 if (test_thread_flag(TIF_SYSCALL_TRACE)
1610 && (current->ptrace & PT_PTRACED))
1611 syscall_trace();
1612} 1618}
1613 1619
1614/* "asmlinkage" so the input arguments are preserved... */ 1620/* "asmlinkage" so the input arguments are preserved... */
@@ -1619,7 +1625,7 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1619 struct pt_regs regs) 1625 struct pt_regs regs)
1620{ 1626{
1621 if (unlikely(current->audit_context)) 1627 if (unlikely(current->audit_context))
1622 audit_syscall_exit(current, regs.r8); 1628 audit_syscall_exit(current, AUDITSC_RESULT(regs.r10), regs.r8);
1623 1629
1624 if (test_thread_flag(TIF_SYSCALL_TRACE) 1630 if (test_thread_flag(TIF_SYSCALL_TRACE)
1625 && (current->ptrace & PT_PTRACED)) 1631 && (current->ptrace & PT_PTRACED))
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index f05650c801d2..b7e6b4cb374b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -4,10 +4,15 @@
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com> 5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com> 7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
8 * Copyright (C) 1999 VA Linux Systems 11 * Copyright (C) 1999 VA Linux Systems
9 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
10 * 13 *
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
11 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
12 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
13 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
@@ -296,6 +301,34 @@ mark_bsp_online (void)
296#endif 301#endif
297} 302}
298 303
304#ifdef CONFIG_SMP
305static void
306check_for_logical_procs (void)
307{
308 pal_logical_to_physical_t info;
309 s64 status;
310
311 status = ia64_pal_logical_to_phys(0, &info);
312 if (status == -1) {
313 printk(KERN_INFO "No logical to physical processor mapping "
314 "available\n");
315 return;
316 }
317 if (status) {
318 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
319 status);
320 return;
321 }
322 /*
323 * Total number of siblings that BSP has. Though not all of them
324 * may have booted successfully. The correct number of siblings
325 * booted is in info.overview_num_log.
326 */
327 smp_num_siblings = info.overview_tpc;
328 smp_num_cpucores = info.overview_cpp;
329}
330#endif
331
299void __init 332void __init
300setup_arch (char **cmdline_p) 333setup_arch (char **cmdline_p)
301{ 334{
@@ -356,6 +389,19 @@ setup_arch (char **cmdline_p)
356 389
357#ifdef CONFIG_SMP 390#ifdef CONFIG_SMP
358 cpu_physical_id(0) = hard_smp_processor_id(); 391 cpu_physical_id(0) = hard_smp_processor_id();
392
393 cpu_set(0, cpu_sibling_map[0]);
394 cpu_set(0, cpu_core_map[0]);
395
396 check_for_logical_procs();
397 if (smp_num_cpucores > 1)
398 printk(KERN_INFO
399 "cpu package is Multi-Core capable: number of cores=%d\n",
400 smp_num_cpucores);
401 if (smp_num_siblings > 1)
402 printk(KERN_INFO
403 "cpu package is Multi-Threading capable: number of siblings=%d\n",
404 smp_num_siblings);
359#endif 405#endif
360 406
361 cpu_init(); /* initialize the bootstrap CPU */ 407 cpu_init(); /* initialize the bootstrap CPU */
@@ -459,12 +505,23 @@ show_cpuinfo (struct seq_file *m, void *v)
459 "cpu regs : %u\n" 505 "cpu regs : %u\n"
460 "cpu MHz : %lu.%06lu\n" 506 "cpu MHz : %lu.%06lu\n"
461 "itc MHz : %lu.%06lu\n" 507 "itc MHz : %lu.%06lu\n"
462 "BogoMIPS : %lu.%02lu\n\n", 508 "BogoMIPS : %lu.%02lu\n",
463 cpunum, c->vendor, family, c->model, c->revision, c->archrev, 509 cpunum, c->vendor, family, c->model, c->revision, c->archrev,
464 features, c->ppn, c->number, 510 features, c->ppn, c->number,
465 c->proc_freq / 1000000, c->proc_freq % 1000000, 511 c->proc_freq / 1000000, c->proc_freq % 1000000,
466 c->itc_freq / 1000000, c->itc_freq % 1000000, 512 c->itc_freq / 1000000, c->itc_freq % 1000000,
467 lpj*HZ/500000, (lpj*HZ/5000) % 100); 513 lpj*HZ/500000, (lpj*HZ/5000) % 100);
514#ifdef CONFIG_SMP
515 seq_printf(m, "siblings : %u\n", c->num_log);
516 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
517 seq_printf(m,
518 "physical id: %u\n"
519 "core id : %u\n"
520 "thread id : %u\n",
521 c->socket_id, c->core_id, c->thread_id);
522#endif
523 seq_printf(m,"\n");
524
468 return 0; 525 return 0;
469} 526}
470 527
@@ -533,6 +590,14 @@ identify_cpu (struct cpuinfo_ia64 *c)
533 memcpy(c->vendor, cpuid.field.vendor, 16); 590 memcpy(c->vendor, cpuid.field.vendor, 16);
534#ifdef CONFIG_SMP 591#ifdef CONFIG_SMP
535 c->cpu = smp_processor_id(); 592 c->cpu = smp_processor_id();
593
594 /* below default values will be overwritten by identify_siblings()
595 * for Multi-Threading/Multi-Core capable cpu's
596 */
597 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
598 c->socket_id = -1;
599
600 identify_siblings(c);
536#endif 601#endif
537 c->ppn = cpuid.field.ppn; 602 c->ppn = cpuid.field.ppn;
538 c->number = cpuid.field.number; 603 c->number = cpuid.field.number;
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 6891d86937d9..499b7e5317cf 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -224,7 +224,8 @@ ia64_rt_sigreturn (struct sigscratch *scr)
224 * could be corrupted. 224 * could be corrupted.
225 */ 225 */
226 retval = (long) &ia64_leave_kernel; 226 retval = (long) &ia64_leave_kernel;
227 if (test_thread_flag(TIF_SYSCALL_TRACE)) 227 if (test_thread_flag(TIF_SYSCALL_TRACE)
228 || test_thread_flag(TIF_SYSCALL_AUDIT))
228 /* 229 /*
229 * strace expects to be notified after sigreturn returns even though the 230 * strace expects to be notified after sigreturn returns even though the
230 * context to which we return may not be in the middle of a syscall. 231 * context to which we return may not be in the middle of a syscall.
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 5318f0cbfc26..0d5ee57c9865 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -1,14 +1,25 @@
1/* 1/*
2 * SMP boot-related support 2 * SMP boot-related support
3 * 3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co 4 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com> 5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2001, 2004-2005 Intel Corp
7 * Rohit Seth <rohit.seth@intel.com>
8 * Suresh Siddha <suresh.b.siddha@intel.com>
9 * Gordon Jin <gordon.jin@intel.com>
10 * Ashok Raj <ashok.raj@intel.com>
6 * 11 *
7 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here. 12 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
8 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code. 13 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
9 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence. 14 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
10 * smp_boot_cpus()/smp_commence() is replaced by 15 * smp_boot_cpus()/smp_commence() is replaced by
11 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). 16 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
17 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
18 * 04/12/26 Jin Gordon <gordon.jin@intel.com>
19 * 04/12/26 Rohit Seth <rohit.seth@intel.com>
20 * Add multi-threading and multi-core detection
21 * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
22 * Setup cpu_sibling_map and cpu_core_map
12 */ 23 */
13#include <linux/config.h> 24#include <linux/config.h>
14 25
@@ -58,6 +69,37 @@
58#define Dprintk(x...) 69#define Dprintk(x...)
59#endif 70#endif
60 71
72#ifdef CONFIG_HOTPLUG_CPU
73/*
74 * Store all idle threads, this can be reused instead of creating
75 * a new thread. Also avoids complicated thread destroy functionality
76 * for idle threads.
77 */
78struct task_struct *idle_thread_array[NR_CPUS];
79
80/*
81 * Global array allocated for NR_CPUS at boot time
82 */
83struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
84
85/*
86 * start_ap in head.S uses this to store current booting cpu
87 * info.
88 */
89struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
90
91#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
92
93#define get_idle_for_cpu(x) (idle_thread_array[(x)])
94#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
95
96#else
97
98#define get_idle_for_cpu(x) (NULL)
99#define set_idle_for_cpu(x,p)
100#define set_brendez_area(x)
101#endif
102
61 103
62/* 104/*
63 * ITC synchronization related stuff: 105 * ITC synchronization related stuff:
@@ -90,6 +132,11 @@ EXPORT_SYMBOL(cpu_online_map);
90cpumask_t cpu_possible_map; 132cpumask_t cpu_possible_map;
91EXPORT_SYMBOL(cpu_possible_map); 133EXPORT_SYMBOL(cpu_possible_map);
92 134
135cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
136cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
137int smp_num_siblings = 1;
138int smp_num_cpucores = 1;
139
93/* which logical CPU number maps to which CPU (physical APIC ID) */ 140/* which logical CPU number maps to which CPU (physical APIC ID) */
94volatile int ia64_cpu_to_sapicid[NR_CPUS]; 141volatile int ia64_cpu_to_sapicid[NR_CPUS];
95EXPORT_SYMBOL(ia64_cpu_to_sapicid); 142EXPORT_SYMBOL(ia64_cpu_to_sapicid);
@@ -124,7 +171,8 @@ sync_master (void *arg)
124 local_irq_save(flags); 171 local_irq_save(flags);
125 { 172 {
126 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) { 173 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
127 while (!go[MASTER]); 174 while (!go[MASTER])
175 cpu_relax();
128 go[MASTER] = 0; 176 go[MASTER] = 0;
129 go[SLAVE] = ia64_get_itc(); 177 go[SLAVE] = ia64_get_itc();
130 } 178 }
@@ -147,7 +195,8 @@ get_delta (long *rt, long *master)
147 for (i = 0; i < NUM_ITERS; ++i) { 195 for (i = 0; i < NUM_ITERS; ++i) {
148 t0 = ia64_get_itc(); 196 t0 = ia64_get_itc();
149 go[MASTER] = 1; 197 go[MASTER] = 1;
150 while (!(tm = go[SLAVE])); 198 while (!(tm = go[SLAVE]))
199 cpu_relax();
151 go[SLAVE] = 0; 200 go[SLAVE] = 0;
152 t1 = ia64_get_itc(); 201 t1 = ia64_get_itc();
153 202
@@ -226,7 +275,8 @@ ia64_sync_itc (unsigned int master)
226 return; 275 return;
227 } 276 }
228 277
229 while (go[MASTER]); /* wait for master to be ready */ 278 while (go[MASTER])
279 cpu_relax(); /* wait for master to be ready */
230 280
231 spin_lock_irqsave(&itc_sync_lock, flags); 281 spin_lock_irqsave(&itc_sync_lock, flags);
232 { 282 {
@@ -345,7 +395,6 @@ start_secondary (void *unused)
345{ 395{
346 /* Early console may use I/O ports */ 396 /* Early console may use I/O ports */
347 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 397 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
348
349 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); 398 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
350 efi_map_pal_code(); 399 efi_map_pal_code();
351 cpu_init(); 400 cpu_init();
@@ -384,6 +433,13 @@ do_boot_cpu (int sapicid, int cpu)
384 .done = COMPLETION_INITIALIZER(c_idle.done), 433 .done = COMPLETION_INITIALIZER(c_idle.done),
385 }; 434 };
386 DECLARE_WORK(work, do_fork_idle, &c_idle); 435 DECLARE_WORK(work, do_fork_idle, &c_idle);
436
437 c_idle.idle = get_idle_for_cpu(cpu);
438 if (c_idle.idle) {
439 init_idle(c_idle.idle, cpu);
440 goto do_rest;
441 }
442
387 /* 443 /*
388 * We can't use kernel_thread since we must avoid to reschedule the child. 444 * We can't use kernel_thread since we must avoid to reschedule the child.
389 */ 445 */
@@ -396,10 +452,15 @@ do_boot_cpu (int sapicid, int cpu)
396 452
397 if (IS_ERR(c_idle.idle)) 453 if (IS_ERR(c_idle.idle))
398 panic("failed fork for CPU %d", cpu); 454 panic("failed fork for CPU %d", cpu);
455
456 set_idle_for_cpu(cpu, c_idle.idle);
457
458do_rest:
399 task_for_booting_cpu = c_idle.idle; 459 task_for_booting_cpu = c_idle.idle;
400 460
401 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); 461 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
402 462
463 set_brendez_area(cpu);
403 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); 464 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
404 465
405 /* 466 /*
@@ -552,19 +613,70 @@ void __devinit smp_prepare_boot_cpu(void)
552 cpu_set(smp_processor_id(), cpu_callin_map); 613 cpu_set(smp_processor_id(), cpu_callin_map);
553} 614}
554 615
616/*
617 * mt_info[] is a temporary store for all info returned by
618 * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the
619 * specific cpu comes.
620 */
621static struct {
622 __u32 socket_id;
623 __u16 core_id;
624 __u16 thread_id;
625 __u16 proc_fixed_addr;
626 __u8 valid;
627}mt_info[NR_CPUS] __devinit;
628
555#ifdef CONFIG_HOTPLUG_CPU 629#ifdef CONFIG_HOTPLUG_CPU
556extern void fixup_irqs(void); 630static inline void
557/* must be called with cpucontrol mutex held */ 631remove_from_mtinfo(int cpu)
558static int __devinit cpu_enable(unsigned int cpu)
559{ 632{
560 per_cpu(cpu_state,cpu) = CPU_UP_PREPARE; 633 int i;
561 wmb();
562 634
563 while (!cpu_online(cpu)) 635 for_each_cpu(i)
564 cpu_relax(); 636 if (mt_info[i].valid && mt_info[i].socket_id ==
565 return 0; 637 cpu_data(cpu)->socket_id)
638 mt_info[i].valid = 0;
639}
640
641static inline void
642clear_cpu_sibling_map(int cpu)
643{
644 int i;
645
646 for_each_cpu_mask(i, cpu_sibling_map[cpu])
647 cpu_clear(cpu, cpu_sibling_map[i]);
648 for_each_cpu_mask(i, cpu_core_map[cpu])
649 cpu_clear(cpu, cpu_core_map[i]);
650
651 cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE;
652}
653
654static void
655remove_siblinginfo(int cpu)
656{
657 int last = 0;
658
659 if (cpu_data(cpu)->threads_per_core == 1 &&
660 cpu_data(cpu)->cores_per_socket == 1) {
661 cpu_clear(cpu, cpu_core_map[cpu]);
662 cpu_clear(cpu, cpu_sibling_map[cpu]);
663 return;
664 }
665
666 last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
667
668 /* remove it from all sibling map's */
669 clear_cpu_sibling_map(cpu);
670
671 /* if this cpu is the last in the core group, remove all its info
672 * from mt_info structure
673 */
674 if (last)
675 remove_from_mtinfo(cpu);
566} 676}
567 677
678extern void fixup_irqs(void);
679/* must be called with cpucontrol mutex held */
568int __cpu_disable(void) 680int __cpu_disable(void)
569{ 681{
570 int cpu = smp_processor_id(); 682 int cpu = smp_processor_id();
@@ -575,9 +687,10 @@ int __cpu_disable(void)
575 if (cpu == 0) 687 if (cpu == 0)
576 return -EBUSY; 688 return -EBUSY;
577 689
690 remove_siblinginfo(cpu);
578 fixup_irqs(); 691 fixup_irqs();
579 local_flush_tlb_all(); 692 local_flush_tlb_all();
580 printk ("Disabled cpu %u\n", smp_processor_id()); 693 cpu_clear(cpu, cpu_callin_map);
581 return 0; 694 return 0;
582} 695}
583 696
@@ -589,12 +702,7 @@ void __cpu_die(unsigned int cpu)
589 /* They ack this in play_dead by setting CPU_DEAD */ 702 /* They ack this in play_dead by setting CPU_DEAD */
590 if (per_cpu(cpu_state, cpu) == CPU_DEAD) 703 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
591 { 704 {
592 /* 705 printk ("CPU %d is now offline\n", cpu);
593 * TBD: Enable this when physical removal
594 * or when we put the processor is put in
595 * SAL_BOOT_RENDEZ mode
596 * cpu_clear(cpu, cpu_callin_map);
597 */
598 return; 706 return;
599 } 707 }
600 msleep(100); 708 msleep(100);
@@ -602,11 +710,6 @@ void __cpu_die(unsigned int cpu)
602 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 710 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
603} 711}
604#else /* !CONFIG_HOTPLUG_CPU */ 712#else /* !CONFIG_HOTPLUG_CPU */
605static int __devinit cpu_enable(unsigned int cpu)
606{
607 return 0;
608}
609
610int __cpu_disable(void) 713int __cpu_disable(void)
611{ 714{
612 return -ENOSYS; 715 return -ENOSYS;
@@ -637,6 +740,23 @@ smp_cpus_done (unsigned int dummy)
637 (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); 740 (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
638} 741}
639 742
743static inline void __devinit
744set_cpu_sibling_map(int cpu)
745{
746 int i;
747
748 for_each_online_cpu(i) {
749 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
750 cpu_set(i, cpu_core_map[cpu]);
751 cpu_set(cpu, cpu_core_map[i]);
752 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
753 cpu_set(i, cpu_sibling_map[cpu]);
754 cpu_set(cpu, cpu_sibling_map[i]);
755 }
756 }
757 }
758}
759
640int __devinit 760int __devinit
641__cpu_up (unsigned int cpu) 761__cpu_up (unsigned int cpu)
642{ 762{
@@ -648,21 +768,26 @@ __cpu_up (unsigned int cpu)
648 return -EINVAL; 768 return -EINVAL;
649 769
650 /* 770 /*
651 * Already booted.. just enable and get outa idle lool 771 * Already booted cpu? not valid anymore since we dont
772 * do idle loop tightspin anymore.
652 */ 773 */
653 if (cpu_isset(cpu, cpu_callin_map)) 774 if (cpu_isset(cpu, cpu_callin_map))
654 { 775 return -EINVAL;
655 cpu_enable(cpu); 776
656 local_irq_enable();
657 while (!cpu_isset(cpu, cpu_online_map))
658 mb();
659 return 0;
660 }
661 /* Processor goes to start_secondary(), sets online flag */ 777 /* Processor goes to start_secondary(), sets online flag */
662 ret = do_boot_cpu(sapicid, cpu); 778 ret = do_boot_cpu(sapicid, cpu);
663 if (ret < 0) 779 if (ret < 0)
664 return ret; 780 return ret;
665 781
782 if (cpu_data(cpu)->threads_per_core == 1 &&
783 cpu_data(cpu)->cores_per_socket == 1) {
784 cpu_set(cpu, cpu_sibling_map[cpu]);
785 cpu_set(cpu, cpu_core_map[cpu]);
786 return 0;
787 }
788
789 set_cpu_sibling_map(cpu);
790
666 return 0; 791 return 0;
667} 792}
668 793
@@ -690,3 +815,106 @@ init_smp_config(void)
690 ia64_sal_strerror(sal_ret)); 815 ia64_sal_strerror(sal_ret));
691} 816}
692 817
818static inline int __devinit
819check_for_mtinfo_index(void)
820{
821 int i;
822
823 for_each_cpu(i)
824 if (!mt_info[i].valid)
825 return i;
826
827 return -1;
828}
829
830/*
831 * Search the mt_info to find out if this socket's cid/tid information is
832 * cached or not. If the socket exists, fill in the core_id and thread_id
833 * in cpuinfo
834 */
835static int __devinit
836check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c)
837{
838 int i;
839 __u32 sid = c->socket_id;
840
841 for_each_cpu(i) {
842 if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address
843 && mt_info[i].socket_id == sid) {
844 c->core_id = mt_info[i].core_id;
845 c->thread_id = mt_info[i].thread_id;
846 return 1; /* not a new socket */
847 }
848 }
849 return 0;
850}
851
852/*
853 * identify_siblings(cpu) gets called from identify_cpu. This populates the
854 * information related to logical execution units in per_cpu_data structure.
855 */
856void __devinit
857identify_siblings(struct cpuinfo_ia64 *c)
858{
859 s64 status;
860 u16 pltid;
861 u64 proc_fixed_addr;
862 int count, i;
863 pal_logical_to_physical_t info;
864
865 if (smp_num_cpucores == 1 && smp_num_siblings == 1)
866 return;
867
868 if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) {
869 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
870 status);
871 return;
872 }
873 if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) {
874 printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
875 return;
876 }
877 if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) {
878 printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status);
879 return;
880 }
881
882 c->socket_id = (pltid << 8) | info.overview_ppid;
883 c->cores_per_socket = info.overview_cpp;
884 c->threads_per_core = info.overview_tpc;
885 count = c->num_log = info.overview_num_log;
886
887 /* If the thread and core id information is already cached, then
888 * we will simply update cpu_info and return. Otherwise, we will
889 * do the PAL calls and cache core and thread id's of all the siblings.
890 */
891 if (check_for_new_socket(proc_fixed_addr, c))
892 return;
893
894 for (i = 0; i < count; i++) {
895 int index;
896
897 if (i && (status = ia64_pal_logical_to_phys(i, &info))
898 != PAL_STATUS_SUCCESS) {
899 printk(KERN_ERR "ia64_pal_logical_to_phys failed"
900 " with %ld\n", status);
901 return;
902 }
903 if (info.log2_la == proc_fixed_addr) {
904 c->core_id = info.log1_cid;
905 c->thread_id = info.log1_tid;
906 }
907
908 index = check_for_mtinfo_index();
909 /* We will not do the mt_info caching optimization in this case.
910 */
911 if (index < 0)
912 continue;
913
914 mt_info[index].valid = 1;
915 mt_info[index].socket_id = c->socket_id;
916 mt_info[index].core_id = info.log1_cid;
917 mt_info[index].thread_id = info.log1_tid;
918 mt_info[index].proc_fixed_addr = info.log2_la;
919 }
920}
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 3ac216e1c8bb..a8cf6d8a509c 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -93,20 +93,6 @@ sys_getpagesize (void)
93} 93}
94 94
95asmlinkage unsigned long 95asmlinkage unsigned long
96ia64_shmat (int shmid, void __user *shmaddr, int shmflg)
97{
98 unsigned long raddr;
99 int retval;
100
101 retval = do_shmat(shmid, shmaddr, shmflg, &raddr);
102 if (retval < 0)
103 return retval;
104
105 force_successful_syscall_return();
106 return raddr;
107}
108
109asmlinkage unsigned long
110ia64_brk (unsigned long brk) 96ia64_brk (unsigned long brk)
111{ 97{
112 unsigned long rlim, retval, newbrk, oldbrk; 98 unsigned long rlim, retval, newbrk, oldbrk;
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index d494ff647cac..2776a074c6f1 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -1943,23 +1943,30 @@ EXPORT_SYMBOL(unw_unwind);
1943int 1943int
1944unw_unwind_to_user (struct unw_frame_info *info) 1944unw_unwind_to_user (struct unw_frame_info *info)
1945{ 1945{
1946 unsigned long ip, sp; 1946 unsigned long ip, sp, pr = 0;
1947 1947
1948 while (unw_unwind(info) >= 0) { 1948 while (unw_unwind(info) >= 0) {
1949 if (unw_get_rp(info, &ip) < 0) {
1950 unw_get_ip(info, &ip);
1951 UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
1952 __FUNCTION__, ip);
1953 return -1;
1954 }
1955 unw_get_sp(info, &sp); 1949 unw_get_sp(info, &sp);
1956 if (sp >= (unsigned long)info->task + IA64_STK_OFFSET) 1950 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1951 < IA64_PT_REGS_SIZE) {
1952 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1953 __FUNCTION__);
1957 break; 1954 break;
1958 if (ip < FIXADDR_USER_END) 1955 }
1956 if (unw_is_intr_frame(info) &&
1957 (pr & (1UL << PRED_USER_STACK)))
1959 return 0; 1958 return 0;
1959 if (unw_get_pr (info, &pr) < 0) {
1960 unw_get_rp(info, &ip);
1961 UNW_DPRINT(0, "unwind.%s: failed to read "
1962 "predicate register (ip=0x%lx)\n",
1963 __FUNCTION__, ip);
1964 return -1;
1965 }
1960 } 1966 }
1961 unw_get_ip(info, &ip); 1967 unw_get_ip(info, &ip);
1962 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip); 1968 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1969 __FUNCTION__, ip);
1963 return -1; 1970 return -1;
1964} 1971}
1965EXPORT_SYMBOL(unw_unwind_to_user); 1972EXPORT_SYMBOL(unw_unwind_to_user);