diff options
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/acpi-ext.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/iosapic.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/irq.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/mca.h | 102 | ||||
-rw-r--r-- | include/asm-ia64/mca_asm.h | 125 | ||||
-rw-r--r-- | include/asm-ia64/pci.h | 13 | ||||
-rw-r--r-- | include/asm-ia64/ptrace.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/sn/sn_feature_sets.h | 57 | ||||
-rw-r--r-- | include/asm-ia64/sn/sn_sal.h | 36 | ||||
-rw-r--r-- | include/asm-ia64/spinlock.h | 69 | ||||
-rw-r--r-- | include/asm-ia64/spinlock_types.h | 21 | ||||
-rw-r--r-- | include/asm-ia64/system.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/thread_info.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/unwind.h | 7 |
14 files changed, 242 insertions, 206 deletions
diff --git a/include/asm-ia64/acpi-ext.h b/include/asm-ia64/acpi-ext.h index 9271d74c64cc..56d2ddc97b30 100644 --- a/include/asm-ia64/acpi-ext.h +++ b/include/asm-ia64/acpi-ext.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #define _ASM_IA64_ACPI_EXT_H | 11 | #define _ASM_IA64_ACPI_EXT_H |
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <acpi/actypes.h> | ||
14 | 15 | ||
15 | extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length); | 16 | extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length); |
16 | 17 | ||
diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h index a429fe225b07..20f98f1751a1 100644 --- a/include/asm-ia64/iosapic.h +++ b/include/asm-ia64/iosapic.h | |||
@@ -80,12 +80,9 @@ extern int iosapic_remove (unsigned int gsi_base); | |||
80 | #endif /* CONFIG_HOTPLUG */ | 80 | #endif /* CONFIG_HOTPLUG */ |
81 | extern int gsi_to_vector (unsigned int gsi); | 81 | extern int gsi_to_vector (unsigned int gsi); |
82 | extern int gsi_to_irq (unsigned int gsi); | 82 | extern int gsi_to_irq (unsigned int gsi); |
83 | extern void iosapic_enable_intr (unsigned int vector); | ||
84 | extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity, | 83 | extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity, |
85 | unsigned long trigger); | 84 | unsigned long trigger); |
86 | #ifdef CONFIG_ACPI_DEALLOCATE_IRQ | ||
87 | extern void iosapic_unregister_intr (unsigned int irq); | 85 | extern void iosapic_unregister_intr (unsigned int irq); |
88 | #endif | ||
89 | extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, | 86 | extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, |
90 | unsigned long polarity, | 87 | unsigned long polarity, |
91 | unsigned long trigger); | 88 | unsigned long trigger); |
@@ -97,7 +94,6 @@ extern int __init iosapic_register_platform_intr (u32 int_type, | |||
97 | unsigned long trigger); | 94 | unsigned long trigger); |
98 | extern unsigned int iosapic_version (char __iomem *addr); | 95 | extern unsigned int iosapic_version (char __iomem *addr); |
99 | 96 | ||
100 | extern void iosapic_pci_fixup (int); | ||
101 | #ifdef CONFIG_NUMA | 97 | #ifdef CONFIG_NUMA |
102 | extern void __devinit map_iosapic_to_node (unsigned int, int); | 98 | extern void __devinit map_iosapic_to_node (unsigned int, int); |
103 | #endif | 99 | #endif |
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h index cd984d08fd15..dbe86c0bbce5 100644 --- a/include/asm-ia64/irq.h +++ b/include/asm-ia64/irq.h | |||
@@ -35,8 +35,4 @@ extern void disable_irq_nosync (unsigned int); | |||
35 | extern void enable_irq (unsigned int); | 35 | extern void enable_irq (unsigned int); |
36 | extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); | 36 | extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); |
37 | 37 | ||
38 | struct irqaction; | ||
39 | struct pt_regs; | ||
40 | int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); | ||
41 | |||
42 | #endif /* _ASM_IA64_IRQ_H */ | 38 | #endif /* _ASM_IA64_IRQ_H */ |
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index 149ad0118455..97a28b8b2ddd 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h | |||
@@ -11,8 +11,6 @@ | |||
11 | #ifndef _ASM_IA64_MCA_H | 11 | #ifndef _ASM_IA64_MCA_H |
12 | #define _ASM_IA64_MCA_H | 12 | #define _ASM_IA64_MCA_H |
13 | 13 | ||
14 | #define IA64_MCA_STACK_SIZE 8192 | ||
15 | |||
16 | #if !defined(__ASSEMBLY__) | 14 | #if !defined(__ASSEMBLY__) |
17 | 15 | ||
18 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
@@ -48,7 +46,8 @@ typedef union cmcv_reg_u { | |||
48 | 46 | ||
49 | enum { | 47 | enum { |
50 | IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, | 48 | IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, |
51 | IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1 | 49 | IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1, |
50 | IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2, | ||
52 | }; | 51 | }; |
53 | 52 | ||
54 | /* Information maintained by the MC infrastructure */ | 53 | /* Information maintained by the MC infrastructure */ |
@@ -63,18 +62,42 @@ typedef struct ia64_mc_info_s { | |||
63 | 62 | ||
64 | } ia64_mc_info_t; | 63 | } ia64_mc_info_t; |
65 | 64 | ||
66 | typedef struct ia64_mca_sal_to_os_state_s { | 65 | /* Handover state from SAL to OS and vice versa, for both MCA and INIT events. |
67 | u64 imsto_os_gp; /* GP of the os registered with the SAL */ | 66 | * Besides the handover state, it also contains some saved registers from the |
68 | u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */ | 67 | * time of the event. |
69 | u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */ | 68 | * Note: mca_asm.S depends on the precise layout of this structure. |
70 | u64 imsto_sal_gp; /* GP of the SAL - physical */ | 69 | */ |
71 | u64 imsto_rendez_state; /* Rendez state information */ | 70 | |
72 | u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going | 71 | struct ia64_sal_os_state { |
73 | * back to SAL from OS after MCA handling. | 72 | /* SAL to OS, must be at offset 0 */ |
74 | */ | 73 | u64 os_gp; /* GP of the os registered with the SAL, physical */ |
75 | u64 pal_min_state; /* from PAL in r17 */ | 74 | u64 pal_proc; /* PAL_PROC entry point, physical */ |
76 | u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */ | 75 | u64 sal_proc; /* SAL_PROC entry point, physical */ |
77 | } ia64_mca_sal_to_os_state_t; | 76 | u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */ |
77 | u64 proc_state_param; /* from R18 */ | ||
78 | u64 monarch; /* 1 for a monarch event, 0 for a slave */ | ||
79 | /* common, must follow SAL to OS */ | ||
80 | u64 sal_ra; /* Return address in SAL, physical */ | ||
81 | u64 sal_gp; /* GP of the SAL - physical */ | ||
82 | pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ | ||
83 | u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ | ||
84 | struct task_struct *prev_task; /* previous task, NULL if it is not useful */ | ||
85 | /* Some interrupt registers are not saved in minstate, pt_regs or | ||
86 | * switch_stack. Because MCA/INIT can occur when interrupts are | ||
87 | * disabled, we need to save the additional interrupt registers over | ||
88 | * MCA/INIT and resume. | ||
89 | */ | ||
90 | u64 isr; | ||
91 | u64 ifa; | ||
92 | u64 itir; | ||
93 | u64 iipa; | ||
94 | u64 iim; | ||
95 | u64 iha; | ||
96 | /* OS to SAL, must follow common */ | ||
97 | u64 os_status; /* OS status to SAL, enum below */ | ||
98 | u64 context; /* 0 if return to same context | ||
99 | 1 if return to new context */ | ||
100 | }; | ||
78 | 101 | ||
79 | enum { | 102 | enum { |
80 | IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ | 103 | IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ |
@@ -84,35 +107,21 @@ enum { | |||
84 | }; | 107 | }; |
85 | 108 | ||
86 | enum { | 109 | enum { |
110 | IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */ | ||
111 | IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */ | ||
112 | }; | ||
113 | |||
114 | enum { | ||
87 | IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ | 115 | IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ |
88 | IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ | 116 | IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ |
89 | }; | 117 | }; |
90 | 118 | ||
91 | typedef struct ia64_mca_os_to_sal_state_s { | ||
92 | u64 imots_os_status; /* OS status to SAL as to what happened | ||
93 | * with the MCA handling. | ||
94 | */ | ||
95 | u64 imots_sal_gp; /* GP of the SAL - physical */ | ||
96 | u64 imots_context; /* 0 if return to same context | ||
97 | 1 if return to new context */ | ||
98 | u64 *imots_new_min_state; /* Pointer to structure containing | ||
99 | * new values of registers in the min state | ||
100 | * save area. | ||
101 | */ | ||
102 | u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going | ||
103 | * back to SAL from OS after MCA handling. | ||
104 | */ | ||
105 | } ia64_mca_os_to_sal_state_t; | ||
106 | |||
107 | /* Per-CPU MCA state that is too big for normal per-CPU variables. */ | 119 | /* Per-CPU MCA state that is too big for normal per-CPU variables. */ |
108 | 120 | ||
109 | struct ia64_mca_cpu { | 121 | struct ia64_mca_cpu { |
110 | u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */ | 122 | u64 mca_stack[KERNEL_STACK_SIZE/8]; |
111 | u64 proc_state_dump[512]; | ||
112 | u64 stackframe[32]; | ||
113 | u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */ | ||
114 | u64 init_stack[KERNEL_STACK_SIZE/8]; | 123 | u64 init_stack[KERNEL_STACK_SIZE/8]; |
115 | } __attribute__ ((aligned(16))); | 124 | }; |
116 | 125 | ||
117 | /* Array of physical addresses of each CPU's MCA area. */ | 126 | /* Array of physical addresses of each CPU's MCA area. */ |
118 | extern unsigned long __per_cpu_mca[NR_CPUS]; | 127 | extern unsigned long __per_cpu_mca[NR_CPUS]; |
@@ -121,12 +130,29 @@ extern void ia64_mca_init(void); | |||
121 | extern void ia64_mca_cpu_init(void *); | 130 | extern void ia64_mca_cpu_init(void *); |
122 | extern void ia64_os_mca_dispatch(void); | 131 | extern void ia64_os_mca_dispatch(void); |
123 | extern void ia64_os_mca_dispatch_end(void); | 132 | extern void ia64_os_mca_dispatch_end(void); |
124 | extern void ia64_mca_ucmc_handler(void); | 133 | extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *); |
134 | extern void ia64_init_handler(struct pt_regs *, | ||
135 | struct switch_stack *, | ||
136 | struct ia64_sal_os_state *); | ||
125 | extern void ia64_monarch_init_handler(void); | 137 | extern void ia64_monarch_init_handler(void); |
126 | extern void ia64_slave_init_handler(void); | 138 | extern void ia64_slave_init_handler(void); |
127 | extern void ia64_mca_cmc_vector_setup(void); | 139 | extern void ia64_mca_cmc_vector_setup(void); |
128 | extern int ia64_reg_MCA_extension(void*); | 140 | extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); |
129 | extern void ia64_unreg_MCA_extension(void); | 141 | extern void ia64_unreg_MCA_extension(void); |
142 | extern u64 ia64_get_rnat(u64 *); | ||
143 | |||
144 | #else /* __ASSEMBLY__ */ | ||
145 | |||
146 | #define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */ | ||
147 | #define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */ | ||
148 | #define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */ | ||
149 | #define IA64_MCA_HALT -3 /* System to be halted by SAL */ | ||
150 | |||
151 | #define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */ | ||
152 | #define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */ | ||
153 | |||
154 | #define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */ | ||
155 | #define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */ | ||
130 | 156 | ||
131 | #endif /* !__ASSEMBLY__ */ | 157 | #endif /* !__ASSEMBLY__ */ |
132 | #endif /* _ASM_IA64_MCA_H */ | 158 | #endif /* _ASM_IA64_MCA_H */ |
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index 836953e0f91f..27c9203d8ce3 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h | |||
@@ -8,6 +8,8 @@ | |||
8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> | 8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> |
9 | * Copyright (C) 2002 Intel Corp. | 9 | * Copyright (C) 2002 Intel Corp. |
10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> | 10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> |
11 | * Copyright (C) 2005 Silicon Graphics, Inc | ||
12 | * Copyright (C) 2005 Keith Owens <kaos@sgi.com> | ||
11 | */ | 13 | */ |
12 | #ifndef _ASM_IA64_MCA_ASM_H | 14 | #ifndef _ASM_IA64_MCA_ASM_H |
13 | #define _ASM_IA64_MCA_ASM_H | 15 | #define _ASM_IA64_MCA_ASM_H |
@@ -207,106 +209,33 @@ | |||
207 | ;; | 209 | ;; |
208 | 210 | ||
209 | /* | 211 | /* |
210 | * The following offsets capture the order in which the | 212 | * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel |
211 | * RSE related registers from the old context are | 213 | * stacks, except that the SAL/OS state and a switch_stack are stored near the |
212 | * saved onto the new stack frame. | 214 | * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as |
215 | * well as MCA over INIT, each event needs its own SAL/OS state. All entries | ||
216 | * are 16 byte aligned. | ||
213 | * | 217 | * |
214 | * +-----------------------+ | 218 | * +---------------------------+ |
215 | * |NDIRTY [BSP - BSPSTORE]| | 219 | * | pt_regs | |
216 | * +-----------------------+ | 220 | * +---------------------------+ |
217 | * | RNAT | | 221 | * | switch_stack | |
218 | * +-----------------------+ | 222 | * +---------------------------+ |
219 | * | BSPSTORE | | 223 | * | SAL/OS state | |
220 | * +-----------------------+ | 224 | * +---------------------------+ |
221 | * | IFS | | 225 | * | 16 byte scratch area | |
222 | * +-----------------------+ | 226 | * +---------------------------+ <-------- SP at start of C MCA handler |
223 | * | PFS | | 227 | * | ..... | |
224 | * +-----------------------+ | 228 | * +---------------------------+ |
225 | * | RSC | | 229 | * | RBS for MCA/INIT handler | |
226 | * +-----------------------+ <-------- Bottom of new stack frame | 230 | * +---------------------------+ |
231 | * | struct task for MCA/INIT | | ||
232 | * +---------------------------+ <-------- Bottom of MCA/INIT stack | ||
227 | */ | 233 | */ |
228 | #define rse_rsc_offset 0 | ||
229 | #define rse_pfs_offset (rse_rsc_offset+0x08) | ||
230 | #define rse_ifs_offset (rse_pfs_offset+0x08) | ||
231 | #define rse_bspstore_offset (rse_ifs_offset+0x08) | ||
232 | #define rse_rnat_offset (rse_bspstore_offset+0x08) | ||
233 | #define rse_ndirty_offset (rse_rnat_offset+0x08) | ||
234 | 234 | ||
235 | /* | 235 | #define ALIGN16(x) ((x)&~15) |
236 | * rse_switch_context | 236 | #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE) |
237 | * | 237 | #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE) |
238 | * 1. Save old RSC onto the new stack frame | 238 | #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE) |
239 | * 2. Save PFS onto new stack frame | 239 | #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16) |
240 | * 3. Cover the old frame and start a new frame. | ||
241 | * 4. Save IFS onto new stack frame | ||
242 | * 5. Save the old BSPSTORE on the new stack frame | ||
243 | * 6. Save the old RNAT on the new stack frame | ||
244 | * 7. Write BSPSTORE with the new backing store pointer | ||
245 | * 8. Read and save the new BSP to calculate the #dirty registers | ||
246 | * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2 | ||
247 | */ | ||
248 | #define rse_switch_context(temp,p_stackframe,p_bspstore) \ | ||
249 | ;; \ | ||
250 | mov temp=ar.rsc;; \ | ||
251 | st8 [p_stackframe]=temp,8;; \ | ||
252 | mov temp=ar.pfs;; \ | ||
253 | st8 [p_stackframe]=temp,8; \ | ||
254 | cover ;; \ | ||
255 | mov temp=cr.ifs;; \ | ||
256 | st8 [p_stackframe]=temp,8;; \ | ||
257 | mov temp=ar.bspstore;; \ | ||
258 | st8 [p_stackframe]=temp,8;; \ | ||
259 | mov temp=ar.rnat;; \ | ||
260 | st8 [p_stackframe]=temp,8; \ | ||
261 | mov ar.bspstore=p_bspstore;; \ | ||
262 | mov temp=ar.bsp;; \ | ||
263 | sub temp=temp,p_bspstore;; \ | ||
264 | st8 [p_stackframe]=temp,8;; | ||
265 | |||
266 | /* | ||
267 | * rse_return_context | ||
268 | * 1. Allocate a zero-sized frame | ||
269 | * 2. Store the number of dirty registers RSC.loadrs field | ||
270 | * 3. Issue a loadrs to insure that any registers from the interrupted | ||
271 | * context which were saved on the new stack frame have been loaded | ||
272 | * back into the stacked registers | ||
273 | * 4. Restore BSPSTORE | ||
274 | * 5. Restore RNAT | ||
275 | * 6. Restore PFS | ||
276 | * 7. Restore IFS | ||
277 | * 8. Restore RSC | ||
278 | * 9. Issue an RFI | ||
279 | */ | ||
280 | #define rse_return_context(psr_mask_reg,temp,p_stackframe) \ | ||
281 | ;; \ | ||
282 | alloc temp=ar.pfs,0,0,0,0; \ | ||
283 | add p_stackframe=rse_ndirty_offset,p_stackframe;; \ | ||
284 | ld8 temp=[p_stackframe];; \ | ||
285 | shl temp=temp,16;; \ | ||
286 | mov ar.rsc=temp;; \ | ||
287 | loadrs;; \ | ||
288 | add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ | ||
289 | ld8 temp=[p_stackframe];; \ | ||
290 | mov ar.bspstore=temp;; \ | ||
291 | add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ | ||
292 | ld8 temp=[p_stackframe];; \ | ||
293 | mov ar.rnat=temp;; \ | ||
294 | add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ | ||
295 | ld8 temp=[p_stackframe];; \ | ||
296 | mov ar.pfs=temp;; \ | ||
297 | add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ | ||
298 | ld8 temp=[p_stackframe];; \ | ||
299 | mov cr.ifs=temp;; \ | ||
300 | add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ | ||
301 | ld8 temp=[p_stackframe];; \ | ||
302 | mov ar.rsc=temp ; \ | ||
303 | mov temp=psr;; \ | ||
304 | or temp=temp,psr_mask_reg;; \ | ||
305 | mov cr.ipsr=temp;; \ | ||
306 | mov temp=ip;; \ | ||
307 | add temp=0x30,temp;; \ | ||
308 | mov cr.iip=temp;; \ | ||
309 | srlz.i;; \ | ||
310 | rfi;; | ||
311 | 240 | ||
312 | #endif /* _ASM_IA64_MCA_ASM_H */ | 241 | #endif /* _ASM_IA64_MCA_ASM_H */ |
diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h index dba9f220be71..ef616fd4cb1b 100644 --- a/include/asm-ia64/pci.h +++ b/include/asm-ia64/pci.h | |||
@@ -156,6 +156,19 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev, | |||
156 | extern void pcibios_bus_to_resource(struct pci_dev *dev, | 156 | extern void pcibios_bus_to_resource(struct pci_dev *dev, |
157 | struct resource *res, struct pci_bus_region *region); | 157 | struct resource *res, struct pci_bus_region *region); |
158 | 158 | ||
159 | static inline struct resource * | ||
160 | pcibios_select_root(struct pci_dev *pdev, struct resource *res) | ||
161 | { | ||
162 | struct resource *root = NULL; | ||
163 | |||
164 | if (res->flags & IORESOURCE_IO) | ||
165 | root = &ioport_resource; | ||
166 | if (res->flags & IORESOURCE_MEM) | ||
167 | root = &iomem_resource; | ||
168 | |||
169 | return root; | ||
170 | } | ||
171 | |||
159 | #define pcibios_scan_all_fns(a, b) 0 | 172 | #define pcibios_scan_all_fns(a, b) 0 |
160 | 173 | ||
161 | #endif /* _ASM_IA64_PCI_H */ | 174 | #endif /* _ASM_IA64_PCI_H */ |
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index 0bef19538406..fc544929ac34 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h | |||
@@ -57,7 +57,7 @@ | |||
57 | #include <linux/config.h> | 57 | #include <linux/config.h> |
58 | 58 | ||
59 | #include <asm/fpu.h> | 59 | #include <asm/fpu.h> |
60 | #include <asm/offsets.h> | 60 | #include <asm/asm-offsets.h> |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Base-2 logarithm of number of pages to allocate per task structure | 63 | * Base-2 logarithm of number of pages to allocate per task structure |
@@ -119,7 +119,7 @@ struct pt_regs { | |||
119 | unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ | 119 | unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ |
120 | unsigned long ar_pfs; /* prev function state */ | 120 | unsigned long ar_pfs; /* prev function state */ |
121 | unsigned long ar_rsc; /* RSE configuration */ | 121 | unsigned long ar_rsc; /* RSE configuration */ |
122 | /* The following two are valid only if cr_ipsr.cpl > 0: */ | 122 | /* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */ |
123 | unsigned long ar_rnat; /* RSE NaT */ | 123 | unsigned long ar_rnat; /* RSE NaT */ |
124 | unsigned long ar_bspstore; /* RSE bspstore */ | 124 | unsigned long ar_bspstore; /* RSE bspstore */ |
125 | 125 | ||
diff --git a/include/asm-ia64/sn/sn_feature_sets.h b/include/asm-ia64/sn/sn_feature_sets.h new file mode 100644 index 000000000000..e68a80853d5d --- /dev/null +++ b/include/asm-ia64/sn/sn_feature_sets.h | |||
@@ -0,0 +1,57 @@ | |||
1 | #ifndef _ASM_IA64_SN_FEATURE_SETS_H | ||
2 | #define _ASM_IA64_SN_FEATURE_SETS_H | ||
3 | |||
4 | /* | ||
5 | * SN PROM Features | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. | ||
12 | */ | ||
13 | |||
14 | |||
15 | #include <asm/types.h> | ||
16 | #include <asm/bitops.h> | ||
17 | |||
18 | /* --------------------- PROM Features -----------------------------*/ | ||
19 | extern int sn_prom_feature_available(int id); | ||
20 | |||
21 | #define MAX_PROM_FEATURE_SETS 2 | ||
22 | |||
23 | /* | ||
24 | * The following defines features that may or may not be supported by the | ||
25 | * current PROM. The OS uses sn_prom_feature_available(feature) to test for | ||
26 | * the presence of a PROM feature. Down rev (old) PROMs will always test | ||
27 | * "false" for new features. | ||
28 | * | ||
29 | * Use: | ||
30 | * if (sn_prom_feature_available(PRF_FEATURE_XXX)) | ||
31 | * ... | ||
32 | */ | ||
33 | |||
34 | /* | ||
35 | * Example: feature XXX | ||
36 | */ | ||
37 | #define PRF_FEATURE_XXX 0 | ||
38 | |||
39 | |||
40 | |||
41 | /* --------------------- OS Features -------------------------------*/ | ||
42 | |||
43 | /* | ||
44 | * The following defines OS features that are optionally present in | ||
45 | * the operating system. | ||
46 | * During boot, PROM is notified of these features via a series of calls: | ||
47 | * | ||
48 | * ia64_sn_set_os_feature(feature1); | ||
49 | * | ||
50 | * Once enabled, a feature cannot be disabled. | ||
51 | * | ||
52 | * By default, features are disabled unless explicitly enabled. | ||
53 | */ | ||
54 | #define OSF_MCA_SLV_TO_OS_INIT_SLV 0 | ||
55 | #define OSF_FEAT_LOG_SBES 1 | ||
56 | |||
57 | #endif /* _ASM_IA64_SN_FEATURE_SETS_H */ | ||
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index e67825ad1930..fea35b33d4e4 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h | |||
@@ -80,6 +80,9 @@ | |||
80 | #define SN_SAL_RESERVED_DO_NOT_USE 0x02000062 | 80 | #define SN_SAL_RESERVED_DO_NOT_USE 0x02000062 |
81 | #define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064 | 81 | #define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064 |
82 | 82 | ||
83 | #define SN_SAL_GET_PROM_FEATURE_SET 0x02000065 | ||
84 | #define SN_SAL_SET_OS_FEATURE_SET 0x02000066 | ||
85 | |||
83 | /* | 86 | /* |
84 | * Service-specific constants | 87 | * Service-specific constants |
85 | */ | 88 | */ |
@@ -118,8 +121,8 @@ | |||
118 | /* | 121 | /* |
119 | * Error Handling Features | 122 | * Error Handling Features |
120 | */ | 123 | */ |
121 | #define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 | 124 | #define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 // obsolete |
122 | #define SAL_ERR_FEAT_LOG_SBES 0x2 | 125 | #define SAL_ERR_FEAT_LOG_SBES 0x2 // obsolete |
123 | #define SAL_ERR_FEAT_MFR_OVERRIDE 0x4 | 126 | #define SAL_ERR_FEAT_MFR_OVERRIDE 0x4 |
124 | #define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000 | 127 | #define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000 |
125 | 128 | ||
@@ -152,12 +155,6 @@ sn_sal_rev(void) | |||
152 | } | 155 | } |
153 | 156 | ||
154 | /* | 157 | /* |
155 | * Specify the minimum PROM revsion required for this kernel. | ||
156 | * Note that they're stored in hex format... | ||
157 | */ | ||
158 | #define SN_SAL_MIN_VERSION 0x0404 | ||
159 | |||
160 | /* | ||
161 | * Returns the master console nasid, if the call fails, return an illegal | 158 | * Returns the master console nasid, if the call fails, return an illegal |
162 | * value. | 159 | * value. |
163 | */ | 160 | */ |
@@ -336,7 +333,7 @@ ia64_sn_plat_cpei_handler(void) | |||
336 | } | 333 | } |
337 | 334 | ||
338 | /* | 335 | /* |
339 | * Set Error Handling Features | 336 | * Set Error Handling Features (Obsolete) |
340 | */ | 337 | */ |
341 | static inline u64 | 338 | static inline u64 |
342 | ia64_sn_plat_set_error_handling_features(void) | 339 | ia64_sn_plat_set_error_handling_features(void) |
@@ -1052,4 +1049,25 @@ ia64_sn_is_fake_prom(void) | |||
1052 | return (rv.status == 0); | 1049 | return (rv.status == 0); |
1053 | } | 1050 | } |
1054 | 1051 | ||
1052 | static inline int | ||
1053 | ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set) | ||
1054 | { | ||
1055 | struct ia64_sal_retval rv; | ||
1056 | |||
1057 | SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0); | ||
1058 | if (rv.status != 0) | ||
1059 | return rv.status; | ||
1060 | *feature_set = rv.v0; | ||
1061 | return 0; | ||
1062 | } | ||
1063 | |||
1064 | static inline int | ||
1065 | ia64_sn_set_os_feature(int feature) | ||
1066 | { | ||
1067 | struct ia64_sal_retval rv; | ||
1068 | |||
1069 | SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0); | ||
1070 | return rv.status; | ||
1071 | } | ||
1072 | |||
1055 | #endif /* _ASM_IA64_SN_SN_SAL_H */ | 1073 | #endif /* _ASM_IA64_SN_SN_SAL_H */ |
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index d2430aa0d49d..5b78611411c3 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h | |||
@@ -17,28 +17,20 @@ | |||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | typedef struct { | 20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) |
21 | volatile unsigned int lock; | ||
22 | #ifdef CONFIG_PREEMPT | ||
23 | unsigned int break_lock; | ||
24 | #endif | ||
25 | } spinlock_t; | ||
26 | |||
27 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
28 | #define spin_lock_init(x) ((x)->lock = 0) | ||
29 | 21 | ||
30 | #ifdef ASM_SUPPORTED | 22 | #ifdef ASM_SUPPORTED |
31 | /* | 23 | /* |
32 | * Try to get the lock. If we fail to get the lock, make a non-standard call to | 24 | * Try to get the lock. If we fail to get the lock, make a non-standard call to |
33 | * ia64_spinlock_contention(). We do not use a normal call because that would force all | 25 | * ia64_spinlock_contention(). We do not use a normal call because that would force all |
34 | * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is | 26 | * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is |
35 | * carefully coded to touch only those registers that spin_lock() marks "clobbered". | 27 | * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". |
36 | */ | 28 | */ |
37 | 29 | ||
38 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" | 30 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" |
39 | 31 | ||
40 | static inline void | 32 | static inline void |
41 | _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 33 | __raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) |
42 | { | 34 | { |
43 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; | 35 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; |
44 | 36 | ||
@@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
94 | #endif | 86 | #endif |
95 | } | 87 | } |
96 | 88 | ||
97 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) | 89 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) |
98 | 90 | ||
99 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ | 91 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ |
100 | static inline void _raw_spin_unlock(spinlock_t *x) { | 92 | static inline void __raw_spin_unlock(raw_spinlock_t *x) { |
101 | barrier(); | 93 | barrier(); |
102 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); | 94 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); |
103 | } | 95 | } |
104 | 96 | ||
105 | #else /* !ASM_SUPPORTED */ | 97 | #else /* !ASM_SUPPORTED */ |
106 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 98 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
107 | # define _raw_spin_lock(x) \ | 99 | # define __raw_spin_lock(x) \ |
108 | do { \ | 100 | do { \ |
109 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | 101 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ |
110 | __u64 ia64_spinlock_val; \ | 102 | __u64 ia64_spinlock_val; \ |
@@ -117,29 +109,20 @@ do { \ | |||
117 | } while (ia64_spinlock_val); \ | 109 | } while (ia64_spinlock_val); \ |
118 | } \ | 110 | } \ |
119 | } while (0) | 111 | } while (0) |
120 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) | 112 | #define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) |
121 | #endif /* !ASM_SUPPORTED */ | 113 | #endif /* !ASM_SUPPORTED */ |
122 | 114 | ||
123 | #define spin_is_locked(x) ((x)->lock != 0) | 115 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
124 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) | 116 | #define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) |
125 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | 117 | #define __raw_spin_unlock_wait(lock) \ |
126 | 118 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
127 | typedef struct { | ||
128 | volatile unsigned int read_counter : 24; | ||
129 | volatile unsigned int write_lock : 8; | ||
130 | #ifdef CONFIG_PREEMPT | ||
131 | unsigned int break_lock; | ||
132 | #endif | ||
133 | } rwlock_t; | ||
134 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
135 | 119 | ||
136 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 120 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
137 | #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 121 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
138 | #define write_can_lock(rw) (*(volatile int *)(rw) == 0) | ||
139 | 122 | ||
140 | #define _raw_read_lock(rw) \ | 123 | #define __raw_read_lock(rw) \ |
141 | do { \ | 124 | do { \ |
142 | rwlock_t *__read_lock_ptr = (rw); \ | 125 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
143 | \ | 126 | \ |
144 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 127 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
145 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 128 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
@@ -148,14 +131,14 @@ do { \ | |||
148 | } \ | 131 | } \ |
149 | } while (0) | 132 | } while (0) |
150 | 133 | ||
151 | #define _raw_read_unlock(rw) \ | 134 | #define __raw_read_unlock(rw) \ |
152 | do { \ | 135 | do { \ |
153 | rwlock_t *__read_lock_ptr = (rw); \ | 136 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
154 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 137 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
155 | } while (0) | 138 | } while (0) |
156 | 139 | ||
157 | #ifdef ASM_SUPPORTED | 140 | #ifdef ASM_SUPPORTED |
158 | #define _raw_write_lock(rw) \ | 141 | #define __raw_write_lock(rw) \ |
159 | do { \ | 142 | do { \ |
160 | __asm__ __volatile__ ( \ | 143 | __asm__ __volatile__ ( \ |
161 | "mov ar.ccv = r0\n" \ | 144 | "mov ar.ccv = r0\n" \ |
@@ -170,7 +153,7 @@ do { \ | |||
170 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ | 153 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ |
171 | } while(0) | 154 | } while(0) |
172 | 155 | ||
173 | #define _raw_write_trylock(rw) \ | 156 | #define __raw_write_trylock(rw) \ |
174 | ({ \ | 157 | ({ \ |
175 | register long result; \ | 158 | register long result; \ |
176 | \ | 159 | \ |
@@ -182,7 +165,7 @@ do { \ | |||
182 | (result == 0); \ | 165 | (result == 0); \ |
183 | }) | 166 | }) |
184 | 167 | ||
185 | static inline void _raw_write_unlock(rwlock_t *x) | 168 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
186 | { | 169 | { |
187 | u8 *y = (u8 *)x; | 170 | u8 *y = (u8 *)x; |
188 | barrier(); | 171 | barrier(); |
@@ -191,7 +174,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
191 | 174 | ||
192 | #else /* !ASM_SUPPORTED */ | 175 | #else /* !ASM_SUPPORTED */ |
193 | 176 | ||
194 | #define _raw_write_lock(l) \ | 177 | #define __raw_write_lock(l) \ |
195 | ({ \ | 178 | ({ \ |
196 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 179 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
197 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 180 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
@@ -202,7 +185,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
202 | } while (ia64_val); \ | 185 | } while (ia64_val); \ |
203 | }) | 186 | }) |
204 | 187 | ||
205 | #define _raw_write_trylock(rw) \ | 188 | #define __raw_write_trylock(rw) \ |
206 | ({ \ | 189 | ({ \ |
207 | __u64 ia64_val; \ | 190 | __u64 ia64_val; \ |
208 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 191 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
@@ -210,7 +193,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
210 | (ia64_val == 0); \ | 193 | (ia64_val == 0); \ |
211 | }) | 194 | }) |
212 | 195 | ||
213 | static inline void _raw_write_unlock(rwlock_t *x) | 196 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
214 | { | 197 | { |
215 | barrier(); | 198 | barrier(); |
216 | x->write_lock = 0; | 199 | x->write_lock = 0; |
@@ -218,6 +201,6 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
218 | 201 | ||
219 | #endif /* !ASM_SUPPORTED */ | 202 | #endif /* !ASM_SUPPORTED */ |
220 | 203 | ||
221 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 204 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
222 | 205 | ||
223 | #endif /* _ASM_IA64_SPINLOCK_H */ | 206 | #endif /* _ASM_IA64_SPINLOCK_H */ |
diff --git a/include/asm-ia64/spinlock_types.h b/include/asm-ia64/spinlock_types.h new file mode 100644 index 000000000000..474e46f1ab4a --- /dev/null +++ b/include/asm-ia64/spinlock_types.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _ASM_IA64_SPINLOCK_TYPES_H | ||
2 | #define _ASM_IA64_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int read_counter : 31; | ||
16 | volatile unsigned int write_lock : 1; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | ||
20 | |||
21 | #endif | ||
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 33256db4a7cf..635235fa1e32 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h | |||
@@ -275,6 +275,7 @@ extern void ia64_load_extra (struct task_struct *task); | |||
275 | */ | 275 | */ |
276 | #define __ARCH_WANT_UNLOCKED_CTXSW | 276 | #define __ARCH_WANT_UNLOCKED_CTXSW |
277 | 277 | ||
278 | #define ARCH_HAS_PREFETCH_SWITCH_STACK | ||
278 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) | 279 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) |
279 | 280 | ||
280 | void cpu_idle_wait(void); | 281 | void cpu_idle_wait(void); |
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 7dc8951708a3..cf4a950a0f4f 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h | |||
@@ -5,7 +5,7 @@ | |||
5 | #ifndef _ASM_IA64_THREAD_INFO_H | 5 | #ifndef _ASM_IA64_THREAD_INFO_H |
6 | #define _ASM_IA64_THREAD_INFO_H | 6 | #define _ASM_IA64_THREAD_INFO_H |
7 | 7 | ||
8 | #include <asm/offsets.h> | 8 | #include <asm/asm-offsets.h> |
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/ptrace.h> | 10 | #include <asm/ptrace.h> |
11 | 11 | ||
@@ -76,6 +76,7 @@ struct thread_info { | |||
76 | #define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ | 76 | #define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ |
77 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 77 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
78 | #define TIF_MEMDIE 17 | 78 | #define TIF_MEMDIE 17 |
79 | #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ | ||
79 | 80 | ||
80 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 81 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
81 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 82 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
@@ -85,6 +86,7 @@ struct thread_info { | |||
85 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 86 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
86 | #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) | 87 | #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) |
87 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 88 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
89 | #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) | ||
88 | 90 | ||
89 | /* "work to do on user-return" bits */ | 91 | /* "work to do on user-return" bits */ |
90 | #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) | 92 | #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) |
diff --git a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h index 61426ad3ecdb..5df0276b0493 100644 --- a/include/asm-ia64/unwind.h +++ b/include/asm-ia64/unwind.h | |||
@@ -114,13 +114,6 @@ extern void unw_remove_unwind_table (void *handle); | |||
114 | */ | 114 | */ |
115 | extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); | 115 | extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); |
116 | 116 | ||
117 | /* | ||
118 | * Prepare to unwind from interruption. The pt-regs and switch-stack structures must have | ||
119 | * be "adjacent" (no state modifications between pt-regs and switch-stack). | ||
120 | */ | ||
121 | extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t, | ||
122 | struct pt_regs *pt, struct switch_stack *sw); | ||
123 | |||
124 | extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, | 117 | extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, |
125 | struct switch_stack *sw); | 118 | struct switch_stack *sw); |
126 | 119 | ||