aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi.c9
-rw-r--r--arch/ia64/kernel/init_task.c1
-rw-r--r--arch/ia64/kernel/ivt.S84
-rw-r--r--arch/ia64/kernel/minstate.h46
-rw-r--r--arch/ia64/kernel/patch.c23
-rw-r--r--arch/ia64/kernel/setup.c11
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S7
7 files changed, 132 insertions, 49 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 853d1f11be00..43687cc60dfb 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -465,7 +465,6 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
465 printk(KERN_ERR 465 printk(KERN_ERR
466 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 466 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
467 len, slit->header.length); 467 len, slit->header.length);
468 memset(numa_slit, 10, sizeof(numa_slit));
469 return; 468 return;
470 } 469 }
471 slit_table = slit; 470 slit_table = slit;
@@ -574,8 +573,14 @@ void __init acpi_numa_arch_fixup(void)
574 printk(KERN_INFO "Number of memory chunks in system = %d\n", 573 printk(KERN_INFO "Number of memory chunks in system = %d\n",
575 num_node_memblks); 574 num_node_memblks);
576 575
577 if (!slit_table) 576 if (!slit_table) {
577 for (i = 0; i < MAX_NUMNODES; i++)
578 for (j = 0; j < MAX_NUMNODES; j++)
579 node_distance(i, j) = i == j ? LOCAL_DISTANCE :
580 REMOTE_DISTANCE;
578 return; 581 return;
582 }
583
579 memset(numa_slit, -1, sizeof(numa_slit)); 584 memset(numa_slit, -1, sizeof(numa_slit));
580 for (i = 0; i < slit_table->locality_count; i++) { 585 for (i = 0; i < slit_table->locality_count; i++) {
581 if (!pxm_bit_test(i)) 586 if (!pxm_bit_test(i))
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index bc8efcad28b8..9d7e1c66faf4 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -18,7 +18,6 @@
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19 19
20static struct fs_struct init_fs = INIT_FS; 20static struct fs_struct init_fs = INIT_FS;
21static struct files_struct init_files = INIT_FILES;
22static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 21static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
23static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 22static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
24struct mm_struct init_mm = INIT_MM(init_mm); 23struct mm_struct init_mm = INIT_MM(init_mm);
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 6678c49daba3..80b44ea052d7 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -1076,48 +1076,6 @@ END(ia64_syscall_setup)
1076 DBG_FAULT(15) 1076 DBG_FAULT(15)
1077 FAULT(15) 1077 FAULT(15)
1078 1078
1079 /*
1080 * Squatting in this space ...
1081 *
1082 * This special case dispatcher for illegal operation faults allows preserved
1083 * registers to be modified through a callback function (asm only) that is handed
1084 * back from the fault handler in r8. Up to three arguments can be passed to the
1085 * callback function by returning an aggregate with the callback as its first
1086 * element, followed by the arguments.
1087 */
1088ENTRY(dispatch_illegal_op_fault)
1089 .prologue
1090 .body
1091 SAVE_MIN_WITH_COVER
1092 ssm psr.ic | PSR_DEFAULT_BITS
1093 ;;
1094 srlz.i // guarantee that interruption collection is on
1095 ;;
1096(p15) ssm psr.i // restore psr.i
1097 adds r3=8,r2 // set up second base pointer for SAVE_REST
1098 ;;
1099 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1100 mov out0=ar.ec
1101 ;;
1102 SAVE_REST
1103 PT_REGS_UNWIND_INFO(0)
1104 ;;
1105 br.call.sptk.many rp=ia64_illegal_op_fault
1106.ret0: ;;
1107 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1108 mov out0=r9
1109 mov out1=r10
1110 mov out2=r11
1111 movl r15=ia64_leave_kernel
1112 ;;
1113 mov rp=r15
1114 mov b6=r8
1115 ;;
1116 cmp.ne p6,p0=0,r8
1117(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1118 br.sptk.many ia64_leave_kernel
1119END(dispatch_illegal_op_fault)
1120
1121 .org ia64_ivt+0x4000 1079 .org ia64_ivt+0x4000
1122///////////////////////////////////////////////////////////////////////////////////////// 1080/////////////////////////////////////////////////////////////////////////////////////////
1123// 0x4000 Entry 16 (size 64 bundles) Reserved 1081// 0x4000 Entry 16 (size 64 bundles) Reserved
@@ -1715,6 +1673,48 @@ END(ia32_interrupt)
1715 DBG_FAULT(67) 1673 DBG_FAULT(67)
1716 FAULT(67) 1674 FAULT(67)
1717 1675
1676 /*
1677 * Squatting in this space ...
1678 *
1679 * This special case dispatcher for illegal operation faults allows preserved
1680 * registers to be modified through a callback function (asm only) that is handed
1681 * back from the fault handler in r8. Up to three arguments can be passed to the
1682 * callback function by returning an aggregate with the callback as its first
1683 * element, followed by the arguments.
1684 */
1685ENTRY(dispatch_illegal_op_fault)
1686 .prologue
1687 .body
1688 SAVE_MIN_WITH_COVER
1689 ssm psr.ic | PSR_DEFAULT_BITS
1690 ;;
1691 srlz.i // guarantee that interruption collection is on
1692 ;;
1693(p15) ssm psr.i // restore psr.i
1694 adds r3=8,r2 // set up second base pointer for SAVE_REST
1695 ;;
1696 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1697 mov out0=ar.ec
1698 ;;
1699 SAVE_REST
1700 PT_REGS_UNWIND_INFO(0)
1701 ;;
1702 br.call.sptk.many rp=ia64_illegal_op_fault
1703.ret0: ;;
1704 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1705 mov out0=r9
1706 mov out1=r10
1707 mov out2=r11
1708 movl r15=ia64_leave_kernel
1709 ;;
1710 mov rp=r15
1711 mov b6=r8
1712 ;;
1713 cmp.ne p6,p0=0,r8
1714(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1715 br.sptk.many ia64_leave_kernel
1716END(dispatch_illegal_op_fault)
1717
1718#ifdef CONFIG_IA32_SUPPORT 1718#ifdef CONFIG_IA32_SUPPORT
1719 1719
1720 /* 1720 /*
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 7c548ac52bbc..74b6d670aaef 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -15,6 +15,9 @@
15#define ACCOUNT_SYS_ENTER 15#define ACCOUNT_SYS_ENTER
16#endif 16#endif
17 17
18.section ".data.patch.rse", "a"
19.previous
20
18/* 21/*
19 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves 22 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
20 * the minimum state necessary that allows us to turn psr.ic back 23 * the minimum state necessary that allows us to turn psr.ic back
@@ -40,7 +43,7 @@
40 * Note that psr.ic is NOT turned on by this macro. This is so that 43 * Note that psr.ic is NOT turned on by this macro. This is so that
41 * we can pass interruption state as arguments to a handler. 44 * we can pass interruption state as arguments to a handler.
42 */ 45 */
43#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ 46#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \
44 mov r16=IA64_KR(CURRENT); /* M */ \ 47 mov r16=IA64_KR(CURRENT); /* M */ \
45 mov r27=ar.rsc; /* M */ \ 48 mov r27=ar.rsc; /* M */ \
46 mov r20=r1; /* A */ \ 49 mov r20=r1; /* A */ \
@@ -87,6 +90,7 @@
87 tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ 90 tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
88 mov r29=b0 \ 91 mov r29=b0 \
89 ;; \ 92 ;; \
93 WORKAROUND; \
90 adds r16=PT(R8),r1; /* initialize first base pointer */ \ 94 adds r16=PT(R8),r1; /* initialize first base pointer */ \
91 adds r17=PT(R9),r1; /* initialize second base pointer */ \ 95 adds r17=PT(R9),r1; /* initialize second base pointer */ \
92(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ 96(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
@@ -206,6 +210,40 @@
206 st8 [r25]=r10; /* ar.ssd */ \ 210 st8 [r25]=r10; /* ar.ssd */ \
207 ;; 211 ;;
208 212
209#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,) 213#define RSE_WORKAROUND \
210#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19) 214(pUStk) extr.u r17=r18,3,6; \
211#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, ) 215(pUStk) sub r16=r18,r22; \
216[1:](pKStk) br.cond.sptk.many 1f; \
217 .xdata4 ".data.patch.rse",1b-. \
218 ;; \
219 cmp.ge p6,p7 = 33,r17; \
220 ;; \
221(p6) mov r17=0x310; \
222(p7) mov r17=0x308; \
223 ;; \
224 cmp.leu p1,p0=r16,r17; \
225(p1) br.cond.sptk.many 1f; \
226 dep.z r17=r26,0,62; \
227 movl r16=2f; \
228 ;; \
229 mov ar.pfs=r17; \
230 dep r27=r0,r27,16,14; \
231 mov b0=r16; \
232 ;; \
233 br.ret.sptk b0; \
234 ;; \
2352: \
236 mov ar.rsc=r0 \
237 ;; \
238 flushrs; \
239 ;; \
240 mov ar.bspstore=r22 \
241 ;; \
242 mov r18=ar.bsp; \
243 ;; \
2441: \
245 .pred.rel "mutex", pKStk, pUStk
246
247#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND)
248#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND)
249#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , )
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index e0dca8743dbb..b83b2c516008 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -115,6 +115,29 @@ ia64_patch_vtop (unsigned long start, unsigned long end)
115 ia64_srlz_i(); 115 ia64_srlz_i();
116} 116}
117 117
118/*
119 * Disable the RSE workaround by turning the conditional branch
120 * that we tagged in each place the workaround was used into an
121 * unconditional branch.
122 */
123void __init
124ia64_patch_rse (unsigned long start, unsigned long end)
125{
126 s32 *offp = (s32 *) start;
127 u64 ip, *b;
128
129 while (offp < (s32 *) end) {
130 ip = (u64) offp + *offp;
131
132 b = (u64 *)(ip & -16);
133 b[1] &= ~0xf800000L;
134 ia64_fc((void *) ip);
135 ++offp;
136 }
137 ia64_sync_i();
138 ia64_srlz_i();
139}
140
118void __init 141void __init
119ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) 142ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
120{ 143{
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index e9596cd0cdab..f48a809c686d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -560,6 +560,17 @@ setup_arch (char **cmdline_p)
560 /* process SAL system table: */ 560 /* process SAL system table: */
561 ia64_sal_init(__va(efi.sal_systab)); 561 ia64_sal_init(__va(efi.sal_systab));
562 562
563#ifdef CONFIG_ITANIUM
564 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
565#else
566 {
567 u64 num_phys_stacked;
568
569 if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
570 ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
571 }
572#endif
573
563#ifdef CONFIG_SMP 574#ifdef CONFIG_SMP
564 cpu_physical_id(0) = hard_smp_processor_id(); 575 cpu_physical_id(0) = hard_smp_processor_id();
565#endif 576#endif
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 80622acc95de..5929ab10a289 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -156,6 +156,13 @@ SECTIONS
156 __end___vtop_patchlist = .; 156 __end___vtop_patchlist = .;
157 } 157 }
158 158
159 .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET)
160 {
161 __start___rse_patchlist = .;
162 *(.data.patch.rse)
163 __end___rse_patchlist = .;
164 }
165
159 .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) 166 .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
160 { 167 {
161 __start___mckinley_e9_bundles = .; 168 __start___mckinley_e9_bundles = .;