diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/acpi.c | 9 | ||||
-rw-r--r-- | arch/ia64/kernel/init_task.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/ivt.S | 84 | ||||
-rw-r--r-- | arch/ia64/kernel/minstate.h | 46 | ||||
-rw-r--r-- | arch/ia64/kernel/patch.c | 23 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/sal.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 17 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 7 |
11 files changed, 144 insertions, 63 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 853d1f11be00..43687cc60dfb 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -465,7 +465,6 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) | |||
465 | printk(KERN_ERR | 465 | printk(KERN_ERR |
466 | "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", | 466 | "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", |
467 | len, slit->header.length); | 467 | len, slit->header.length); |
468 | memset(numa_slit, 10, sizeof(numa_slit)); | ||
469 | return; | 468 | return; |
470 | } | 469 | } |
471 | slit_table = slit; | 470 | slit_table = slit; |
@@ -574,8 +573,14 @@ void __init acpi_numa_arch_fixup(void) | |||
574 | printk(KERN_INFO "Number of memory chunks in system = %d\n", | 573 | printk(KERN_INFO "Number of memory chunks in system = %d\n", |
575 | num_node_memblks); | 574 | num_node_memblks); |
576 | 575 | ||
577 | if (!slit_table) | 576 | if (!slit_table) { |
577 | for (i = 0; i < MAX_NUMNODES; i++) | ||
578 | for (j = 0; j < MAX_NUMNODES; j++) | ||
579 | node_distance(i, j) = i == j ? LOCAL_DISTANCE : | ||
580 | REMOTE_DISTANCE; | ||
578 | return; | 581 | return; |
582 | } | ||
583 | |||
579 | memset(numa_slit, -1, sizeof(numa_slit)); | 584 | memset(numa_slit, -1, sizeof(numa_slit)); |
580 | for (i = 0; i < slit_table->locality_count; i++) { | 585 | for (i = 0; i < slit_table->locality_count; i++) { |
581 | if (!pxm_bit_test(i)) | 586 | if (!pxm_bit_test(i)) |
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index bc8efcad28b8..9d7e1c66faf4 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
19 | 19 | ||
20 | static struct fs_struct init_fs = INIT_FS; | 20 | static struct fs_struct init_fs = INIT_FS; |
21 | static struct files_struct init_files = INIT_FILES; | ||
22 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | 21 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); |
23 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | 22 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); |
24 | struct mm_struct init_mm = INIT_MM(init_mm); | 23 | struct mm_struct init_mm = INIT_MM(init_mm); |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 082c31dcfd99..39752cdef6ff 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -558,8 +558,6 @@ static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void) | |||
558 | if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) { | 558 | if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) { |
559 | rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * | 559 | rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * |
560 | NR_PREALLOCATE_RTE_ENTRIES); | 560 | NR_PREALLOCATE_RTE_ENTRIES); |
561 | if (!rte) | ||
562 | return NULL; | ||
563 | for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++) | 561 | for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++) |
564 | list_add(&rte->rte_list, &free_rte_list); | 562 | list_add(&rte->rte_list, &free_rte_list); |
565 | } | 563 | } |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 6678c49daba3..80b44ea052d7 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -1076,48 +1076,6 @@ END(ia64_syscall_setup) | |||
1076 | DBG_FAULT(15) | 1076 | DBG_FAULT(15) |
1077 | FAULT(15) | 1077 | FAULT(15) |
1078 | 1078 | ||
1079 | /* | ||
1080 | * Squatting in this space ... | ||
1081 | * | ||
1082 | * This special case dispatcher for illegal operation faults allows preserved | ||
1083 | * registers to be modified through a callback function (asm only) that is handed | ||
1084 | * back from the fault handler in r8. Up to three arguments can be passed to the | ||
1085 | * callback function by returning an aggregate with the callback as its first | ||
1086 | * element, followed by the arguments. | ||
1087 | */ | ||
1088 | ENTRY(dispatch_illegal_op_fault) | ||
1089 | .prologue | ||
1090 | .body | ||
1091 | SAVE_MIN_WITH_COVER | ||
1092 | ssm psr.ic | PSR_DEFAULT_BITS | ||
1093 | ;; | ||
1094 | srlz.i // guarantee that interruption collection is on | ||
1095 | ;; | ||
1096 | (p15) ssm psr.i // restore psr.i | ||
1097 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
1098 | ;; | ||
1099 | alloc r14=ar.pfs,0,0,1,0 // must be first in insn group | ||
1100 | mov out0=ar.ec | ||
1101 | ;; | ||
1102 | SAVE_REST | ||
1103 | PT_REGS_UNWIND_INFO(0) | ||
1104 | ;; | ||
1105 | br.call.sptk.many rp=ia64_illegal_op_fault | ||
1106 | .ret0: ;; | ||
1107 | alloc r14=ar.pfs,0,0,3,0 // must be first in insn group | ||
1108 | mov out0=r9 | ||
1109 | mov out1=r10 | ||
1110 | mov out2=r11 | ||
1111 | movl r15=ia64_leave_kernel | ||
1112 | ;; | ||
1113 | mov rp=r15 | ||
1114 | mov b6=r8 | ||
1115 | ;; | ||
1116 | cmp.ne p6,p0=0,r8 | ||
1117 | (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel | ||
1118 | br.sptk.many ia64_leave_kernel | ||
1119 | END(dispatch_illegal_op_fault) | ||
1120 | |||
1121 | .org ia64_ivt+0x4000 | 1079 | .org ia64_ivt+0x4000 |
1122 | ///////////////////////////////////////////////////////////////////////////////////////// | 1080 | ///////////////////////////////////////////////////////////////////////////////////////// |
1123 | // 0x4000 Entry 16 (size 64 bundles) Reserved | 1081 | // 0x4000 Entry 16 (size 64 bundles) Reserved |
@@ -1715,6 +1673,48 @@ END(ia32_interrupt) | |||
1715 | DBG_FAULT(67) | 1673 | DBG_FAULT(67) |
1716 | FAULT(67) | 1674 | FAULT(67) |
1717 | 1675 | ||
1676 | /* | ||
1677 | * Squatting in this space ... | ||
1678 | * | ||
1679 | * This special case dispatcher for illegal operation faults allows preserved | ||
1680 | * registers to be modified through a callback function (asm only) that is handed | ||
1681 | * back from the fault handler in r8. Up to three arguments can be passed to the | ||
1682 | * callback function by returning an aggregate with the callback as its first | ||
1683 | * element, followed by the arguments. | ||
1684 | */ | ||
1685 | ENTRY(dispatch_illegal_op_fault) | ||
1686 | .prologue | ||
1687 | .body | ||
1688 | SAVE_MIN_WITH_COVER | ||
1689 | ssm psr.ic | PSR_DEFAULT_BITS | ||
1690 | ;; | ||
1691 | srlz.i // guarantee that interruption collection is on | ||
1692 | ;; | ||
1693 | (p15) ssm psr.i // restore psr.i | ||
1694 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
1695 | ;; | ||
1696 | alloc r14=ar.pfs,0,0,1,0 // must be first in insn group | ||
1697 | mov out0=ar.ec | ||
1698 | ;; | ||
1699 | SAVE_REST | ||
1700 | PT_REGS_UNWIND_INFO(0) | ||
1701 | ;; | ||
1702 | br.call.sptk.many rp=ia64_illegal_op_fault | ||
1703 | .ret0: ;; | ||
1704 | alloc r14=ar.pfs,0,0,3,0 // must be first in insn group | ||
1705 | mov out0=r9 | ||
1706 | mov out1=r10 | ||
1707 | mov out2=r11 | ||
1708 | movl r15=ia64_leave_kernel | ||
1709 | ;; | ||
1710 | mov rp=r15 | ||
1711 | mov b6=r8 | ||
1712 | ;; | ||
1713 | cmp.ne p6,p0=0,r8 | ||
1714 | (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel | ||
1715 | br.sptk.many ia64_leave_kernel | ||
1716 | END(dispatch_illegal_op_fault) | ||
1717 | |||
1718 | #ifdef CONFIG_IA32_SUPPORT | 1718 | #ifdef CONFIG_IA32_SUPPORT |
1719 | 1719 | ||
1720 | /* | 1720 | /* |
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index 7c548ac52bbc..74b6d670aaef 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h | |||
@@ -15,6 +15,9 @@ | |||
15 | #define ACCOUNT_SYS_ENTER | 15 | #define ACCOUNT_SYS_ENTER |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | .section ".data.patch.rse", "a" | ||
19 | .previous | ||
20 | |||
18 | /* | 21 | /* |
19 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | 22 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves |
20 | * the minimum state necessary that allows us to turn psr.ic back | 23 | * the minimum state necessary that allows us to turn psr.ic back |
@@ -40,7 +43,7 @@ | |||
40 | * Note that psr.ic is NOT turned on by this macro. This is so that | 43 | * Note that psr.ic is NOT turned on by this macro. This is so that |
41 | * we can pass interruption state as arguments to a handler. | 44 | * we can pass interruption state as arguments to a handler. |
42 | */ | 45 | */ |
43 | #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ | 46 | #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \ |
44 | mov r16=IA64_KR(CURRENT); /* M */ \ | 47 | mov r16=IA64_KR(CURRENT); /* M */ \ |
45 | mov r27=ar.rsc; /* M */ \ | 48 | mov r27=ar.rsc; /* M */ \ |
46 | mov r20=r1; /* A */ \ | 49 | mov r20=r1; /* A */ \ |
@@ -87,6 +90,7 @@ | |||
87 | tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ | 90 | tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ |
88 | mov r29=b0 \ | 91 | mov r29=b0 \ |
89 | ;; \ | 92 | ;; \ |
93 | WORKAROUND; \ | ||
90 | adds r16=PT(R8),r1; /* initialize first base pointer */ \ | 94 | adds r16=PT(R8),r1; /* initialize first base pointer */ \ |
91 | adds r17=PT(R9),r1; /* initialize second base pointer */ \ | 95 | adds r17=PT(R9),r1; /* initialize second base pointer */ \ |
92 | (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ | 96 | (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ |
@@ -206,6 +210,40 @@ | |||
206 | st8 [r25]=r10; /* ar.ssd */ \ | 210 | st8 [r25]=r10; /* ar.ssd */ \ |
207 | ;; | 211 | ;; |
208 | 212 | ||
209 | #define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,) | 213 | #define RSE_WORKAROUND \ |
210 | #define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19) | 214 | (pUStk) extr.u r17=r18,3,6; \ |
211 | #define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, ) | 215 | (pUStk) sub r16=r18,r22; \ |
216 | [1:](pKStk) br.cond.sptk.many 1f; \ | ||
217 | .xdata4 ".data.patch.rse",1b-. \ | ||
218 | ;; \ | ||
219 | cmp.ge p6,p7 = 33,r17; \ | ||
220 | ;; \ | ||
221 | (p6) mov r17=0x310; \ | ||
222 | (p7) mov r17=0x308; \ | ||
223 | ;; \ | ||
224 | cmp.leu p1,p0=r16,r17; \ | ||
225 | (p1) br.cond.sptk.many 1f; \ | ||
226 | dep.z r17=r26,0,62; \ | ||
227 | movl r16=2f; \ | ||
228 | ;; \ | ||
229 | mov ar.pfs=r17; \ | ||
230 | dep r27=r0,r27,16,14; \ | ||
231 | mov b0=r16; \ | ||
232 | ;; \ | ||
233 | br.ret.sptk b0; \ | ||
234 | ;; \ | ||
235 | 2: \ | ||
236 | mov ar.rsc=r0 \ | ||
237 | ;; \ | ||
238 | flushrs; \ | ||
239 | ;; \ | ||
240 | mov ar.bspstore=r22 \ | ||
241 | ;; \ | ||
242 | mov r18=ar.bsp; \ | ||
243 | ;; \ | ||
244 | 1: \ | ||
245 | .pred.rel "mutex", pKStk, pUStk | ||
246 | |||
247 | #define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND) | ||
248 | #define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND) | ||
249 | #define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , ) | ||
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index e0dca8743dbb..b83b2c516008 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c | |||
@@ -115,6 +115,29 @@ ia64_patch_vtop (unsigned long start, unsigned long end) | |||
115 | ia64_srlz_i(); | 115 | ia64_srlz_i(); |
116 | } | 116 | } |
117 | 117 | ||
118 | /* | ||
119 | * Disable the RSE workaround by turning the conditional branch | ||
120 | * that we tagged in each place the workaround was used into an | ||
121 | * unconditional branch. | ||
122 | */ | ||
123 | void __init | ||
124 | ia64_patch_rse (unsigned long start, unsigned long end) | ||
125 | { | ||
126 | s32 *offp = (s32 *) start; | ||
127 | u64 ip, *b; | ||
128 | |||
129 | while (offp < (s32 *) end) { | ||
130 | ip = (u64) offp + *offp; | ||
131 | |||
132 | b = (u64 *)(ip & -16); | ||
133 | b[1] &= ~0xf800000L; | ||
134 | ia64_fc((void *) ip); | ||
135 | ++offp; | ||
136 | } | ||
137 | ia64_sync_i(); | ||
138 | ia64_srlz_i(); | ||
139 | } | ||
140 | |||
118 | void __init | 141 | void __init |
119 | ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) | 142 | ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) |
120 | { | 143 | { |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 71d05133f556..7714a97b0104 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -1864,11 +1864,6 @@ pfm_flush(struct file *filp, fl_owner_t id) | |||
1864 | * invoked after, it will find an empty queue and no | 1864 | * invoked after, it will find an empty queue and no |
1865 | * signal will be sent. In both case, we are safe | 1865 | * signal will be sent. In both case, we are safe |
1866 | */ | 1866 | */ |
1867 | if (filp->f_flags & FASYNC) { | ||
1868 | DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); | ||
1869 | pfm_do_fasync (-1, filp, ctx, 0); | ||
1870 | } | ||
1871 | |||
1872 | PROTECT_CTX(ctx, flags); | 1867 | PROTECT_CTX(ctx, flags); |
1873 | 1868 | ||
1874 | state = ctx->ctx_state; | 1869 | state = ctx->ctx_state; |
@@ -1999,6 +1994,11 @@ pfm_close(struct inode *inode, struct file *filp) | |||
1999 | return -EBADF; | 1994 | return -EBADF; |
2000 | } | 1995 | } |
2001 | 1996 | ||
1997 | if (filp->f_flags & FASYNC) { | ||
1998 | DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); | ||
1999 | pfm_do_fasync(-1, filp, ctx, 0); | ||
2000 | } | ||
2001 | |||
2002 | PROTECT_CTX(ctx, flags); | 2002 | PROTECT_CTX(ctx, flags); |
2003 | 2003 | ||
2004 | state = ctx->ctx_state; | 2004 | state = ctx->ctx_state; |
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index 7e0259709c04..0464173ea568 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c | |||
@@ -252,11 +252,10 @@ check_sal_cache_flush (void) | |||
252 | local_irq_save(flags); | 252 | local_irq_save(flags); |
253 | 253 | ||
254 | /* | 254 | /* |
255 | * Schedule a timer interrupt, wait until it's reported, and see if | 255 | * Send ourselves a timer interrupt, wait until it's reported, and see |
256 | * SAL_CACHE_FLUSH drops it. | 256 | * if SAL_CACHE_FLUSH drops it. |
257 | */ | 257 | */ |
258 | ia64_set_itv(IA64_TIMER_VECTOR); | 258 | platform_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0); |
259 | ia64_set_itm(ia64_get_itc() + 1000); | ||
260 | 259 | ||
261 | while (!ia64_get_irr(IA64_TIMER_VECTOR)) | 260 | while (!ia64_get_irr(IA64_TIMER_VECTOR)) |
262 | cpu_relax(); | 261 | cpu_relax(); |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index e9596cd0cdab..632cda8f2e76 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -547,7 +547,8 @@ setup_arch (char **cmdline_p) | |||
547 | # ifdef CONFIG_ACPI_NUMA | 547 | # ifdef CONFIG_ACPI_NUMA |
548 | acpi_numa_init(); | 548 | acpi_numa_init(); |
549 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? | 549 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? |
550 | 32 : cpus_weight(early_cpu_possible_map)), additional_cpus); | 550 | 32 : cpus_weight(early_cpu_possible_map)), |
551 | additional_cpus > 0 ? additional_cpus : 0); | ||
551 | # endif | 552 | # endif |
552 | #else | 553 | #else |
553 | # ifdef CONFIG_SMP | 554 | # ifdef CONFIG_SMP |
@@ -560,6 +561,17 @@ setup_arch (char **cmdline_p) | |||
560 | /* process SAL system table: */ | 561 | /* process SAL system table: */ |
561 | ia64_sal_init(__va(efi.sal_systab)); | 562 | ia64_sal_init(__va(efi.sal_systab)); |
562 | 563 | ||
564 | #ifdef CONFIG_ITANIUM | ||
565 | ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); | ||
566 | #else | ||
567 | { | ||
568 | u64 num_phys_stacked; | ||
569 | |||
570 | if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) | ||
571 | ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); | ||
572 | } | ||
573 | #endif | ||
574 | |||
563 | #ifdef CONFIG_SMP | 575 | #ifdef CONFIG_SMP |
564 | cpu_physical_id(0) = hard_smp_processor_id(); | 576 | cpu_physical_id(0) = hard_smp_processor_id(); |
565 | #endif | 577 | #endif |
@@ -567,8 +579,6 @@ setup_arch (char **cmdline_p) | |||
567 | cpu_init(); /* initialize the bootstrap CPU */ | 579 | cpu_init(); /* initialize the bootstrap CPU */ |
568 | mmu_context_init(); /* initialize context_id bitmap */ | 580 | mmu_context_init(); /* initialize context_id bitmap */ |
569 | 581 | ||
570 | check_sal_cache_flush(); | ||
571 | |||
572 | #ifdef CONFIG_ACPI | 582 | #ifdef CONFIG_ACPI |
573 | acpi_boot_init(); | 583 | acpi_boot_init(); |
574 | #endif | 584 | #endif |
@@ -596,6 +606,7 @@ setup_arch (char **cmdline_p) | |||
596 | ia64_mca_init(); | 606 | ia64_mca_init(); |
597 | 607 | ||
598 | platform_setup(cmdline_p); | 608 | platform_setup(cmdline_p); |
609 | check_sal_cache_flush(); | ||
599 | paging_init(); | 610 | paging_init(); |
600 | } | 611 | } |
601 | 612 | ||
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 8c73643f2d66..aad1b7b1fff9 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -117,6 +117,7 @@ void account_system_vtime(struct task_struct *tsk) | |||
117 | 117 | ||
118 | local_irq_restore(flags); | 118 | local_irq_restore(flags); |
119 | } | 119 | } |
120 | EXPORT_SYMBOL_GPL(account_system_vtime); | ||
120 | 121 | ||
121 | /* | 122 | /* |
122 | * Called from the timer interrupt handler to charge accumulated user time | 123 | * Called from the timer interrupt handler to charge accumulated user time |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 80622acc95de..5929ab10a289 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -156,6 +156,13 @@ SECTIONS | |||
156 | __end___vtop_patchlist = .; | 156 | __end___vtop_patchlist = .; |
157 | } | 157 | } |
158 | 158 | ||
159 | .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) | ||
160 | { | ||
161 | __start___rse_patchlist = .; | ||
162 | *(.data.patch.rse) | ||
163 | __end___rse_patchlist = .; | ||
164 | } | ||
165 | |||
159 | .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) | 166 | .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) |
160 | { | 167 | { |
161 | __start___mckinley_e9_bundles = .; | 168 | __start___mckinley_e9_bundles = .; |