diff options
61 files changed, 3188 insertions, 290 deletions
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h index c47830e26cb7..111ed5222892 100644 --- a/arch/ia64/include/asm/intrinsics.h +++ b/arch/ia64/include/asm/intrinsics.h | |||
| @@ -202,7 +202,11 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void); | |||
| 202 | 202 | ||
| 203 | #ifndef __ASSEMBLY__ | 203 | #ifndef __ASSEMBLY__ |
| 204 | #if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) | 204 | #if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) |
| 205 | #define IA64_INTRINSIC_API(name) pv_cpu_ops.name | 205 | #ifdef ASM_SUPPORTED |
| 206 | # define IA64_INTRINSIC_API(name) paravirt_ ## name | ||
| 207 | #else | ||
| 208 | # define IA64_INTRINSIC_API(name) pv_cpu_ops.name | ||
| 209 | #endif | ||
| 206 | #define IA64_INTRINSIC_MACRO(name) paravirt_ ## name | 210 | #define IA64_INTRINSIC_MACRO(name) paravirt_ ## name |
| 207 | #else | 211 | #else |
| 208 | #define IA64_INTRINSIC_API(name) ia64_native_ ## name | 212 | #define IA64_INTRINSIC_API(name) ia64_native_ ## name |
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h index 040bc87db930..7f2a456603cb 100644 --- a/arch/ia64/include/asm/mmu_context.h +++ b/arch/ia64/include/asm/mmu_context.h | |||
| @@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm) | |||
| 87 | /* re-check, now that we've got the lock: */ | 87 | /* re-check, now that we've got the lock: */ |
| 88 | context = mm->context; | 88 | context = mm->context; |
| 89 | if (context == 0) { | 89 | if (context == 0) { |
| 90 | cpus_clear(mm->cpu_vm_mask); | 90 | cpumask_clear(mm_cpumask(mm)); |
| 91 | if (ia64_ctx.next >= ia64_ctx.limit) { | 91 | if (ia64_ctx.next >= ia64_ctx.limit) { |
| 92 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, | 92 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, |
| 93 | ia64_ctx.max_ctx, ia64_ctx.next); | 93 | ia64_ctx.max_ctx, ia64_ctx.next); |
| @@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm) | |||
| 166 | 166 | ||
| 167 | do { | 167 | do { |
| 168 | context = get_mmu_context(mm); | 168 | context = get_mmu_context(mm); |
| 169 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | 169 | if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
| 170 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | 170 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
| 171 | reload_context(context); | 171 | reload_context(context); |
| 172 | /* | 172 | /* |
| 173 | * in the unlikely event of a TLB-flush by another thread, | 173 | * in the unlikely event of a TLB-flush by another thread, |
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h index d2da61e4c49b..908eaef42a08 100644 --- a/arch/ia64/include/asm/module.h +++ b/arch/ia64/include/asm/module.h | |||
| @@ -16,6 +16,12 @@ struct mod_arch_specific { | |||
| 16 | struct elf64_shdr *got; /* global offset table */ | 16 | struct elf64_shdr *got; /* global offset table */ |
| 17 | struct elf64_shdr *opd; /* official procedure descriptors */ | 17 | struct elf64_shdr *opd; /* official procedure descriptors */ |
| 18 | struct elf64_shdr *unwind; /* unwind-table section */ | 18 | struct elf64_shdr *unwind; /* unwind-table section */ |
| 19 | #ifdef CONFIG_PARAVIRT | ||
| 20 | struct elf64_shdr *paravirt_bundles; | ||
| 21 | /* paravirt_alt_bundle_patch table */ | ||
| 22 | struct elf64_shdr *paravirt_insts; | ||
| 23 | /* paravirt_alt_inst_patch table */ | ||
| 24 | #endif | ||
| 19 | unsigned long gp; /* global-pointer for module */ | 25 | unsigned long gp; /* global-pointer for module */ |
| 20 | 26 | ||
| 21 | void *core_unw_table; /* core unwind-table cookie returned by unwinder */ | 27 | void *core_unw_table; /* core unwind-table cookie returned by unwinder */ |
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h index 0a1026cca4fa..d2d46efb3e6e 100644 --- a/arch/ia64/include/asm/native/inst.h +++ b/arch/ia64/include/asm/native/inst.h | |||
| @@ -30,6 +30,9 @@ | |||
| 30 | #define __paravirt_work_processed_syscall_target \ | 30 | #define __paravirt_work_processed_syscall_target \ |
| 31 | ia64_work_processed_syscall | 31 | ia64_work_processed_syscall |
| 32 | 32 | ||
| 33 | #define paravirt_fsyscall_table ia64_native_fsyscall_table | ||
| 34 | #define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down | ||
| 35 | |||
| 33 | #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK | 36 | #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK |
| 34 | # define PARAVIRT_POISON 0xdeadbeefbaadf00d | 37 | # define PARAVIRT_POISON 0xdeadbeefbaadf00d |
| 35 | # define CLOBBER(clob) \ | 38 | # define CLOBBER(clob) \ |
| @@ -74,6 +77,11 @@ | |||
| 74 | (pred) mov reg = psr \ | 77 | (pred) mov reg = psr \ |
| 75 | CLOBBER(clob) | 78 | CLOBBER(clob) |
| 76 | 79 | ||
| 80 | #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ | ||
| 81 | (pred) mov reg = ar.itc \ | ||
| 82 | CLOBBER(clob) \ | ||
| 83 | CLOBBER_PRED(pred_clob) | ||
| 84 | |||
| 77 | #define MOV_TO_IFA(reg, clob) \ | 85 | #define MOV_TO_IFA(reg, clob) \ |
| 78 | mov cr.ifa = reg \ | 86 | mov cr.ifa = reg \ |
| 79 | CLOBBER(clob) | 87 | CLOBBER(clob) |
| @@ -158,6 +166,11 @@ | |||
| 158 | #define RSM_PSR_DT \ | 166 | #define RSM_PSR_DT \ |
| 159 | rsm psr.dt | 167 | rsm psr.dt |
| 160 | 168 | ||
| 169 | #define RSM_PSR_BE_I(clob0, clob1) \ | ||
| 170 | rsm psr.be | psr.i \ | ||
| 171 | CLOBBER(clob0) \ | ||
| 172 | CLOBBER(clob1) | ||
| 173 | |||
| 161 | #define SSM_PSR_DT_AND_SRLZ_I \ | 174 | #define SSM_PSR_DT_AND_SRLZ_I \ |
| 162 | ssm psr.dt \ | 175 | ssm psr.dt \ |
| 163 | ;; \ | 176 | ;; \ |
diff --git a/arch/ia64/include/asm/native/patchlist.h b/arch/ia64/include/asm/native/patchlist.h new file mode 100644 index 000000000000..be16ca9311bf --- /dev/null +++ b/arch/ia64/include/asm/native/patchlist.h | |||
| @@ -0,0 +1,38 @@ | |||
| 1 | /****************************************************************************** | ||
| 2 | * arch/ia64/include/asm/native/inst.h | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
| 5 | * VA Linux Systems Japan K.K. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 20 | * | ||
| 21 | */ | ||
| 22 | |||
| 23 | #define __paravirt_start_gate_fsyscall_patchlist \ | ||
| 24 | __ia64_native_start_gate_fsyscall_patchlist | ||
| 25 | #define __paravirt_end_gate_fsyscall_patchlist \ | ||
| 26 | __ia64_native_end_gate_fsyscall_patchlist | ||
| 27 | #define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ | ||
| 28 | __ia64_native_start_gate_brl_fsys_bubble_down_patchlist | ||
| 29 | #define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ | ||
| 30 | __ia64_native_end_gate_brl_fsys_bubble_down_patchlist | ||
| 31 | #define __paravirt_start_gate_vtop_patchlist \ | ||
| 32 | __ia64_native_start_gate_vtop_patchlist | ||
| 33 | #define __paravirt_end_gate_vtop_patchlist \ | ||
| 34 | __ia64_native_end_gate_vtop_patchlist | ||
| 35 | #define __paravirt_start_gate_mckinley_e9_patchlist \ | ||
| 36 | __ia64_native_start_gate_mckinley_e9_patchlist | ||
| 37 | #define __paravirt_end_gate_mckinley_e9_patchlist \ | ||
| 38 | __ia64_native_end_gate_mckinley_e9_patchlist | ||
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h index b8e6eb1090d7..8d72962ec838 100644 --- a/arch/ia64/include/asm/native/pvchk_inst.h +++ b/arch/ia64/include/asm/native/pvchk_inst.h | |||
| @@ -180,6 +180,11 @@ | |||
| 180 | IS_PRED_IN(pred) \ | 180 | IS_PRED_IN(pred) \ |
| 181 | IS_RREG_OUT(reg) \ | 181 | IS_RREG_OUT(reg) \ |
| 182 | IS_RREG_CLOB(clob) | 182 | IS_RREG_CLOB(clob) |
| 183 | #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ | ||
| 184 | IS_PRED_IN(pred) \ | ||
| 185 | IS_PRED_CLOB(pred_clob) \ | ||
| 186 | IS_RREG_OUT(reg) \ | ||
| 187 | IS_RREG_CLOB(clob) | ||
| 183 | #define MOV_TO_IFA(reg, clob) \ | 188 | #define MOV_TO_IFA(reg, clob) \ |
| 184 | IS_RREG_IN(reg) \ | 189 | IS_RREG_IN(reg) \ |
| 185 | IS_RREG_CLOB(clob) | 190 | IS_RREG_CLOB(clob) |
| @@ -246,6 +251,9 @@ | |||
| 246 | IS_RREG_CLOB(clob2) | 251 | IS_RREG_CLOB(clob2) |
| 247 | #define RSM_PSR_DT \ | 252 | #define RSM_PSR_DT \ |
| 248 | nop 0 | 253 | nop 0 |
| 254 | #define RSM_PSR_BE_I(clob0, clob1) \ | ||
| 255 | IS_RREG_CLOB(clob0) \ | ||
| 256 | IS_RREG_CLOB(clob1) | ||
| 249 | #define SSM_PSR_DT_AND_SRLZ_I \ | 257 | #define SSM_PSR_DT_AND_SRLZ_I \ |
| 250 | nop 0 | 258 | nop 0 |
| 251 | #define BSW_0(clob0, clob1, clob2) \ | 259 | #define BSW_0(clob0, clob1, clob2) \ |
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index 2bf3636473fe..2eb0a981a09a 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h | |||
| @@ -22,6 +22,56 @@ | |||
| 22 | #ifndef __ASM_PARAVIRT_H | 22 | #ifndef __ASM_PARAVIRT_H |
| 23 | #define __ASM_PARAVIRT_H | 23 | #define __ASM_PARAVIRT_H |
| 24 | 24 | ||
| 25 | #ifndef __ASSEMBLY__ | ||
| 26 | /****************************************************************************** | ||
| 27 | * fsys related addresses | ||
| 28 | */ | ||
| 29 | struct pv_fsys_data { | ||
| 30 | unsigned long *fsyscall_table; | ||
| 31 | void *fsys_bubble_down; | ||
| 32 | }; | ||
| 33 | |||
| 34 | extern struct pv_fsys_data pv_fsys_data; | ||
| 35 | |||
| 36 | unsigned long *paravirt_get_fsyscall_table(void); | ||
| 37 | char *paravirt_get_fsys_bubble_down(void); | ||
| 38 | |||
| 39 | /****************************************************************************** | ||
| 40 | * patchlist addresses for gate page | ||
| 41 | */ | ||
| 42 | enum pv_gate_patchlist { | ||
| 43 | PV_GATE_START_FSYSCALL, | ||
| 44 | PV_GATE_END_FSYSCALL, | ||
| 45 | |||
| 46 | PV_GATE_START_BRL_FSYS_BUBBLE_DOWN, | ||
| 47 | PV_GATE_END_BRL_FSYS_BUBBLE_DOWN, | ||
| 48 | |||
| 49 | PV_GATE_START_VTOP, | ||
| 50 | PV_GATE_END_VTOP, | ||
| 51 | |||
| 52 | PV_GATE_START_MCKINLEY_E9, | ||
| 53 | PV_GATE_END_MCKINLEY_E9, | ||
| 54 | }; | ||
| 55 | |||
| 56 | struct pv_patchdata { | ||
| 57 | unsigned long start_fsyscall_patchlist; | ||
| 58 | unsigned long end_fsyscall_patchlist; | ||
| 59 | unsigned long start_brl_fsys_bubble_down_patchlist; | ||
| 60 | unsigned long end_brl_fsys_bubble_down_patchlist; | ||
| 61 | unsigned long start_vtop_patchlist; | ||
| 62 | unsigned long end_vtop_patchlist; | ||
| 63 | unsigned long start_mckinley_e9_patchlist; | ||
| 64 | unsigned long end_mckinley_e9_patchlist; | ||
| 65 | |||
| 66 | void *gate_section; | ||
| 67 | }; | ||
| 68 | |||
| 69 | extern struct pv_patchdata pv_patchdata; | ||
| 70 | |||
| 71 | unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type); | ||
| 72 | void *paravirt_get_gate_section(void); | ||
| 73 | #endif | ||
| 74 | |||
| 25 | #ifdef CONFIG_PARAVIRT_GUEST | 75 | #ifdef CONFIG_PARAVIRT_GUEST |
| 26 | 76 | ||
| 27 | #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 | 77 | #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 |
| @@ -68,6 +118,14 @@ struct pv_init_ops { | |||
| 68 | int (*arch_setup_nomca)(void); | 118 | int (*arch_setup_nomca)(void); |
| 69 | 119 | ||
| 70 | void (*post_smp_prepare_boot_cpu)(void); | 120 | void (*post_smp_prepare_boot_cpu)(void); |
| 121 | |||
| 122 | #ifdef ASM_SUPPORTED | ||
| 123 | unsigned long (*patch_bundle)(void *sbundle, void *ebundle, | ||
| 124 | unsigned long type); | ||
| 125 | unsigned long (*patch_inst)(unsigned long stag, unsigned long etag, | ||
| 126 | unsigned long type); | ||
| 127 | #endif | ||
| 128 | void (*patch_branch)(unsigned long tag, unsigned long type); | ||
| 71 | }; | 129 | }; |
| 72 | 130 | ||
| 73 | extern struct pv_init_ops pv_init_ops; | 131 | extern struct pv_init_ops pv_init_ops; |
| @@ -210,6 +268,8 @@ struct pv_time_ops { | |||
| 210 | int (*do_steal_accounting)(unsigned long *new_itm); | 268 | int (*do_steal_accounting)(unsigned long *new_itm); |
| 211 | 269 | ||
| 212 | void (*clocksource_resume)(void); | 270 | void (*clocksource_resume)(void); |
| 271 | |||
| 272 | unsigned long long (*sched_clock)(void); | ||
| 213 | }; | 273 | }; |
| 214 | 274 | ||
| 215 | extern struct pv_time_ops pv_time_ops; | 275 | extern struct pv_time_ops pv_time_ops; |
| @@ -227,6 +287,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm) | |||
| 227 | return pv_time_ops.do_steal_accounting(new_itm); | 287 | return pv_time_ops.do_steal_accounting(new_itm); |
| 228 | } | 288 | } |
| 229 | 289 | ||
| 290 | static inline unsigned long long paravirt_sched_clock(void) | ||
| 291 | { | ||
| 292 | return pv_time_ops.sched_clock(); | ||
| 293 | } | ||
| 294 | |||
| 230 | #endif /* !__ASSEMBLY__ */ | 295 | #endif /* !__ASSEMBLY__ */ |
| 231 | 296 | ||
| 232 | #else | 297 | #else |
diff --git a/arch/ia64/include/asm/paravirt_patch.h b/arch/ia64/include/asm/paravirt_patch.h new file mode 100644 index 000000000000..128ff5db6e67 --- /dev/null +++ b/arch/ia64/include/asm/paravirt_patch.h | |||
| @@ -0,0 +1,143 @@ | |||
| 1 | /****************************************************************************** | ||
| 2 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
| 3 | * VA Linux Systems Japan K.K. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License as published by | ||
| 7 | * the Free Software Foundation; either version 2 of the License, or | ||
| 8 | * (at your option) any later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 18 | * | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __ASM_PARAVIRT_PATCH_H | ||
| 22 | #define __ASM_PARAVIRT_PATCH_H | ||
| 23 | |||
| 24 | #ifdef __ASSEMBLY__ | ||
| 25 | |||
| 26 | .section .paravirt_branches, "a" | ||
| 27 | .previous | ||
| 28 | #define PARAVIRT_PATCH_SITE_BR(type) \ | ||
| 29 | { \ | ||
| 30 | [1:] ; \ | ||
| 31 | br.cond.sptk.many 2f ; \ | ||
| 32 | nop.b 0 ; \ | ||
| 33 | nop.b 0;; ; \ | ||
| 34 | } ; \ | ||
| 35 | 2: \ | ||
| 36 | .xdata8 ".paravirt_branches", 1b, type | ||
| 37 | |||
| 38 | #else | ||
| 39 | |||
| 40 | #include <linux/stringify.h> | ||
| 41 | #include <asm/intrinsics.h> | ||
| 42 | |||
| 43 | /* for binary patch */ | ||
| 44 | struct paravirt_patch_site_bundle { | ||
| 45 | void *sbundle; | ||
| 46 | void *ebundle; | ||
| 47 | unsigned long type; | ||
| 48 | }; | ||
| 49 | |||
| 50 | /* label means the beginning of new bundle */ | ||
| 51 | #define paravirt_alt_bundle(instr, privop) \ | ||
| 52 | "\t998:\n" \ | ||
| 53 | "\t" instr "\n" \ | ||
| 54 | "\t999:\n" \ | ||
| 55 | "\t.pushsection .paravirt_bundles, \"a\"\n" \ | ||
| 56 | "\t.popsection\n" \ | ||
| 57 | "\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \ | ||
| 58 | __stringify(privop) "\n" | ||
| 59 | |||
| 60 | |||
| 61 | struct paravirt_patch_bundle_elem { | ||
| 62 | const void *sbundle; | ||
| 63 | const void *ebundle; | ||
| 64 | unsigned long type; | ||
| 65 | }; | ||
| 66 | |||
| 67 | |||
| 68 | struct paravirt_patch_site_inst { | ||
| 69 | unsigned long stag; | ||
| 70 | unsigned long etag; | ||
| 71 | unsigned long type; | ||
| 72 | }; | ||
| 73 | |||
| 74 | #define paravirt_alt_inst(instr, privop) \ | ||
| 75 | "\t[998:]\n" \ | ||
| 76 | "\t" instr "\n" \ | ||
| 77 | "\t[999:]\n" \ | ||
| 78 | "\t.pushsection .paravirt_insts, \"a\"\n" \ | ||
| 79 | "\t.popsection\n" \ | ||
| 80 | "\t.xdata8 \".paravirt_insts\", 998b, 999b, " \ | ||
| 81 | __stringify(privop) "\n" | ||
| 82 | |||
| 83 | struct paravirt_patch_site_branch { | ||
| 84 | unsigned long tag; | ||
| 85 | unsigned long type; | ||
| 86 | }; | ||
| 87 | |||
| 88 | struct paravirt_patch_branch_target { | ||
| 89 | const void *entry; | ||
| 90 | unsigned long type; | ||
| 91 | }; | ||
| 92 | |||
| 93 | void | ||
| 94 | __paravirt_patch_apply_branch( | ||
| 95 | unsigned long tag, unsigned long type, | ||
| 96 | const struct paravirt_patch_branch_target *entries, | ||
| 97 | unsigned int nr_entries); | ||
| 98 | |||
| 99 | void | ||
| 100 | paravirt_patch_reloc_br(unsigned long tag, const void *target); | ||
| 101 | |||
| 102 | void | ||
| 103 | paravirt_patch_reloc_brl(unsigned long tag, const void *target); | ||
| 104 | |||
| 105 | |||
| 106 | #if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT) | ||
| 107 | unsigned long | ||
| 108 | ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type); | ||
| 109 | |||
| 110 | unsigned long | ||
| 111 | __paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type, | ||
| 112 | const struct paravirt_patch_bundle_elem *elems, | ||
| 113 | unsigned long nelems, | ||
| 114 | const struct paravirt_patch_bundle_elem **found); | ||
| 115 | |||
| 116 | void | ||
| 117 | paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start, | ||
| 118 | const struct paravirt_patch_site_bundle *end); | ||
| 119 | |||
| 120 | void | ||
| 121 | paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start, | ||
| 122 | const struct paravirt_patch_site_inst *end); | ||
| 123 | |||
| 124 | void paravirt_patch_apply(void); | ||
| 125 | #else | ||
| 126 | #define paravirt_patch_apply_bundle(start, end) do { } while (0) | ||
| 127 | #define paravirt_patch_apply_inst(start, end) do { } while (0) | ||
| 128 | #define paravirt_patch_apply() do { } while (0) | ||
| 129 | #endif | ||
| 130 | |||
| 131 | #endif /* !__ASSEMBLEY__ */ | ||
| 132 | |||
| 133 | #endif /* __ASM_PARAVIRT_PATCH_H */ | ||
| 134 | |||
| 135 | /* | ||
| 136 | * Local variables: | ||
| 137 | * mode: C | ||
| 138 | * c-set-style: "linux" | ||
| 139 | * c-basic-offset: 8 | ||
| 140 | * tab-width: 8 | ||
| 141 | * indent-tabs-mode: t | ||
| 142 | * End: | ||
| 143 | */ | ||
diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h index 33c8e55f5775..3d2951130b5f 100644 --- a/arch/ia64/include/asm/paravirt_privop.h +++ b/arch/ia64/include/asm/paravirt_privop.h | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | */ | 33 | */ |
| 34 | 34 | ||
| 35 | struct pv_cpu_ops { | 35 | struct pv_cpu_ops { |
| 36 | void (*fc)(unsigned long addr); | 36 | void (*fc)(void *addr); |
| 37 | unsigned long (*thash)(unsigned long addr); | 37 | unsigned long (*thash)(unsigned long addr); |
| 38 | unsigned long (*get_cpuid)(int index); | 38 | unsigned long (*get_cpuid)(int index); |
| 39 | unsigned long (*get_pmd)(int index); | 39 | unsigned long (*get_pmd)(int index); |
| @@ -60,12 +60,18 @@ extern unsigned long ia64_native_getreg_func(int regnum); | |||
| 60 | /* Instructions paravirtualized for performance */ | 60 | /* Instructions paravirtualized for performance */ |
| 61 | /************************************************/ | 61 | /************************************************/ |
| 62 | 62 | ||
| 63 | #ifndef ASM_SUPPORTED | ||
| 64 | #define paravirt_ssm_i() pv_cpu_ops.ssm_i() | ||
| 65 | #define paravirt_rsm_i() pv_cpu_ops.rsm_i() | ||
| 66 | #define __paravirt_getreg() pv_cpu_ops.getreg() | ||
| 67 | #endif | ||
| 68 | |||
| 63 | /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). | 69 | /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). |
| 64 | * static inline function doesn't satisfy it. */ | 70 | * static inline function doesn't satisfy it. */ |
| 65 | #define paravirt_ssm(mask) \ | 71 | #define paravirt_ssm(mask) \ |
| 66 | do { \ | 72 | do { \ |
| 67 | if ((mask) == IA64_PSR_I) \ | 73 | if ((mask) == IA64_PSR_I) \ |
| 68 | pv_cpu_ops.ssm_i(); \ | 74 | paravirt_ssm_i(); \ |
| 69 | else \ | 75 | else \ |
| 70 | ia64_native_ssm(mask); \ | 76 | ia64_native_ssm(mask); \ |
| 71 | } while (0) | 77 | } while (0) |
| @@ -73,7 +79,7 @@ extern unsigned long ia64_native_getreg_func(int regnum); | |||
| 73 | #define paravirt_rsm(mask) \ | 79 | #define paravirt_rsm(mask) \ |
| 74 | do { \ | 80 | do { \ |
| 75 | if ((mask) == IA64_PSR_I) \ | 81 | if ((mask) == IA64_PSR_I) \ |
| 76 | pv_cpu_ops.rsm_i(); \ | 82 | paravirt_rsm_i(); \ |
| 77 | else \ | 83 | else \ |
| 78 | ia64_native_rsm(mask); \ | 84 | ia64_native_rsm(mask); \ |
| 79 | } while (0) | 85 | } while (0) |
| @@ -86,7 +92,7 @@ extern unsigned long ia64_native_getreg_func(int regnum); | |||
| 86 | if ((reg) == _IA64_REG_IP) \ | 92 | if ((reg) == _IA64_REG_IP) \ |
| 87 | res = ia64_native_getreg(_IA64_REG_IP); \ | 93 | res = ia64_native_getreg(_IA64_REG_IP); \ |
| 88 | else \ | 94 | else \ |
| 89 | res = pv_cpu_ops.getreg(reg); \ | 95 | res = __paravirt_getreg(reg); \ |
| 90 | res; \ | 96 | res; \ |
| 91 | }) | 97 | }) |
| 92 | 98 | ||
| @@ -112,6 +118,12 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch); | |||
| 112 | 118 | ||
| 113 | #endif /* CONFIG_PARAVIRT */ | 119 | #endif /* CONFIG_PARAVIRT */ |
| 114 | 120 | ||
| 121 | #if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED) | ||
| 122 | #define paravirt_dv_serialize_data() ia64_dv_serialize_data() | ||
| 123 | #else | ||
| 124 | #define paravirt_dv_serialize_data() /* nothing */ | ||
| 125 | #endif | ||
| 126 | |||
| 115 | /* these routines utilize privilege-sensitive or performance-sensitive | 127 | /* these routines utilize privilege-sensitive or performance-sensitive |
| 116 | * privileged instructions so the code must be replaced with | 128 | * privileged instructions so the code must be replaced with |
| 117 | * paravirtualized versions */ | 129 | * paravirtualized versions */ |
| @@ -121,4 +133,349 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch); | |||
| 121 | IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) | 133 | IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) |
| 122 | #define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) | 134 | #define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) |
| 123 | 135 | ||
| 136 | |||
| 137 | #if defined(CONFIG_PARAVIRT) | ||
| 138 | /****************************************************************************** | ||
| 139 | * binary patching infrastructure | ||
| 140 | */ | ||
| 141 | #define PARAVIRT_PATCH_TYPE_FC 1 | ||
| 142 | #define PARAVIRT_PATCH_TYPE_THASH 2 | ||
| 143 | #define PARAVIRT_PATCH_TYPE_GET_CPUID 3 | ||
| 144 | #define PARAVIRT_PATCH_TYPE_GET_PMD 4 | ||
| 145 | #define PARAVIRT_PATCH_TYPE_PTCGA 5 | ||
| 146 | #define PARAVIRT_PATCH_TYPE_GET_RR 6 | ||
| 147 | #define PARAVIRT_PATCH_TYPE_SET_RR 7 | ||
| 148 | #define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8 | ||
| 149 | #define PARAVIRT_PATCH_TYPE_SSM_I 9 | ||
| 150 | #define PARAVIRT_PATCH_TYPE_RSM_I 10 | ||
| 151 | #define PARAVIRT_PATCH_TYPE_GET_PSR_I 11 | ||
| 152 | #define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12 | ||
| 153 | |||
| 154 | /* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */ | ||
| 155 | #define PARAVIRT_PATCH_TYPE_GETREG 0x10000000 | ||
| 156 | #define PARAVIRT_PATCH_TYPE_SETREG 0x20000000 | ||
| 157 | |||
| 158 | /* | ||
| 159 | * struct task_struct* (*ia64_switch_to)(void* next_task); | ||
| 160 | * void *ia64_leave_syscall; | ||
| 161 | * void *ia64_work_processed_syscall | ||
| 162 | * void *ia64_leave_kernel; | ||
| 163 | */ | ||
| 164 | |||
| 165 | #define PARAVIRT_PATCH_TYPE_BR_START 0x30000000 | ||
| 166 | #define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \ | ||
| 167 | (PARAVIRT_PATCH_TYPE_BR_START + 0) | ||
| 168 | #define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \ | ||
| 169 | (PARAVIRT_PATCH_TYPE_BR_START + 1) | ||
| 170 | #define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \ | ||
| 171 | (PARAVIRT_PATCH_TYPE_BR_START + 2) | ||
| 172 | #define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \ | ||
| 173 | (PARAVIRT_PATCH_TYPE_BR_START + 3) | ||
| 174 | |||
| 175 | #ifdef ASM_SUPPORTED | ||
| 176 | #include <asm/paravirt_patch.h> | ||
| 177 | |||
| 178 | /* | ||
| 179 | * pv_cpu_ops calling stub. | ||
| 180 | * normal function call convension can't be written by gcc | ||
| 181 | * inline assembly. | ||
| 182 | * | ||
| 183 | * from the caller's point of view, | ||
| 184 | * the following registers will be clobbered. | ||
| 185 | * r2, r3 | ||
| 186 | * r8-r15 | ||
| 187 | * r16, r17 | ||
| 188 | * b6, b7 | ||
| 189 | * p6-p15 | ||
| 190 | * ar.ccv | ||
| 191 | * | ||
| 192 | * from the callee's point of view , | ||
| 193 | * the following registers can be used. | ||
| 194 | * r2, r3: scratch | ||
| 195 | * r8: scratch, input argument0 and return value | ||
| 196 | * r0-r15: scratch, input argument1-5 | ||
| 197 | * b6: return pointer | ||
| 198 | * b7: scratch | ||
| 199 | * p6-p15: scratch | ||
| 200 | * ar.ccv: scratch | ||
| 201 | * | ||
| 202 | * other registers must not be changed. especially | ||
| 203 | * b0: rp: preserved. gcc ignores b0 in clobbered register. | ||
| 204 | * r16: saved gp | ||
| 205 | */ | ||
| 206 | /* 5 bundles */ | ||
| 207 | #define __PARAVIRT_BR \ | ||
| 208 | ";;\n" \ | ||
| 209 | "{ .mlx\n" \ | ||
| 210 | "nop 0\n" \ | ||
| 211 | "movl r2 = %[op_addr]\n"/* get function pointer address */ \ | ||
| 212 | ";;\n" \ | ||
| 213 | "}\n" \ | ||
| 214 | "1:\n" \ | ||
| 215 | "{ .mii\n" \ | ||
| 216 | "ld8 r2 = [r2]\n" /* load function descriptor address */ \ | ||
| 217 | "mov r17 = ip\n" /* get ip to calc return address */ \ | ||
| 218 | "mov r16 = gp\n" /* save gp */ \ | ||
| 219 | ";;\n" \ | ||
| 220 | "}\n" \ | ||
| 221 | "{ .mii\n" \ | ||
| 222 | "ld8 r3 = [r2], 8\n" /* load entry address */ \ | ||
| 223 | "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \ | ||
| 224 | ";;\n" \ | ||
| 225 | "mov b7 = r3\n" /* set entry address */ \ | ||
| 226 | "}\n" \ | ||
| 227 | "{ .mib\n" \ | ||
| 228 | "ld8 gp = [r2]\n" /* load gp value */ \ | ||
| 229 | "mov b6 = r17\n" /* set return address */ \ | ||
| 230 | "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \ | ||
| 231 | "}\n" \ | ||
| 232 | "1:\n" \ | ||
| 233 | "{ .mii\n" \ | ||
| 234 | "mov gp = r16\n" /* restore gp value */ \ | ||
| 235 | "nop 0\n" \ | ||
| 236 | "nop 0\n" \ | ||
| 237 | ";;\n" \ | ||
| 238 | "}\n" | ||
| 239 | |||
| 240 | #define PARAVIRT_OP(op) \ | ||
| 241 | [op_addr] "i"(&pv_cpu_ops.op) | ||
| 242 | |||
| 243 | #define PARAVIRT_TYPE(type) \ | ||
| 244 | PARAVIRT_PATCH_TYPE_ ## type | ||
| 245 | |||
| 246 | #define PARAVIRT_REG_CLOBBERS0 \ | ||
| 247 | "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ | ||
| 248 | "r15", "r16", "r17" | ||
| 249 | |||
| 250 | #define PARAVIRT_REG_CLOBBERS1 \ | ||
| 251 | "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ | ||
| 252 | "r15", "r16", "r17" | ||
| 253 | |||
| 254 | #define PARAVIRT_REG_CLOBBERS2 \ | ||
| 255 | "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \ | ||
| 256 | "r15", "r16", "r17" | ||
| 257 | |||
| 258 | #define PARAVIRT_REG_CLOBBERS5 \ | ||
| 259 | "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \ | ||
| 260 | "r15", "r16", "r17" | ||
| 261 | |||
| 262 | #define PARAVIRT_BR_CLOBBERS \ | ||
| 263 | "b6", "b7" | ||
| 264 | |||
| 265 | #define PARAVIRT_PR_CLOBBERS \ | ||
| 266 | "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15" | ||
| 267 | |||
| 268 | #define PARAVIRT_AR_CLOBBERS \ | ||
| 269 | "ar.ccv" | ||
| 270 | |||
| 271 | #define PARAVIRT_CLOBBERS0 \ | ||
| 272 | PARAVIRT_REG_CLOBBERS0, \ | ||
| 273 | PARAVIRT_BR_CLOBBERS, \ | ||
| 274 | PARAVIRT_PR_CLOBBERS, \ | ||
| 275 | PARAVIRT_AR_CLOBBERS, \ | ||
| 276 | "memory" | ||
| 277 | |||
| 278 | #define PARAVIRT_CLOBBERS1 \ | ||
| 279 | PARAVIRT_REG_CLOBBERS1, \ | ||
| 280 | PARAVIRT_BR_CLOBBERS, \ | ||
| 281 | PARAVIRT_PR_CLOBBERS, \ | ||
| 282 | PARAVIRT_AR_CLOBBERS, \ | ||
| 283 | "memory" | ||
| 284 | |||
| 285 | #define PARAVIRT_CLOBBERS2 \ | ||
| 286 | PARAVIRT_REG_CLOBBERS2, \ | ||
| 287 | PARAVIRT_BR_CLOBBERS, \ | ||
| 288 | PARAVIRT_PR_CLOBBERS, \ | ||
| 289 | PARAVIRT_AR_CLOBBERS, \ | ||
| 290 | "memory" | ||
| 291 | |||
| 292 | #define PARAVIRT_CLOBBERS5 \ | ||
| 293 | PARAVIRT_REG_CLOBBERS5, \ | ||
| 294 | PARAVIRT_BR_CLOBBERS, \ | ||
| 295 | PARAVIRT_PR_CLOBBERS, \ | ||
| 296 | PARAVIRT_AR_CLOBBERS, \ | ||
| 297 | "memory" | ||
| 298 | |||
| 299 | #define PARAVIRT_BR0(op, type) \ | ||
| 300 | register unsigned long ia64_clobber asm ("r8"); \ | ||
| 301 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
| 302 | PARAVIRT_TYPE(type)) \ | ||
| 303 | : "=r"(ia64_clobber) \ | ||
| 304 | : PARAVIRT_OP(op) \ | ||
| 305 | : PARAVIRT_CLOBBERS0) | ||
| 306 | |||
| 307 | #define PARAVIRT_BR0_RET(op, type) \ | ||
| 308 | register unsigned long ia64_intri_res asm ("r8"); \ | ||
| 309 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
| 310 | PARAVIRT_TYPE(type)) \ | ||
| 311 | : "=r"(ia64_intri_res) \ | ||
| 312 | : PARAVIRT_OP(op) \ | ||
| 313 | : PARAVIRT_CLOBBERS0) | ||
| 314 | |||
| 315 | #define PARAVIRT_BR1(op, type, arg1) \ | ||
| 316 | register unsigned long __##arg1 asm ("r8") = arg1; \ | ||
| 317 | register unsigned long ia64_clobber asm ("r8"); \ | ||
| 318 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
| 319 | PARAVIRT_TYPE(type)) \ | ||
| 320 | : "=r"(ia64_clobber) \ | ||
| 321 | : PARAVIRT_OP(op), "0"(__##arg1) \ | ||
| 322 | : PARAVIRT_CLOBBERS1) | ||
| 323 | |||
| 324 | #define PARAVIRT_BR1_RET(op, type, arg1) \ | ||
| 325 | register unsigned long ia64_intri_res asm ("r8"); \ | ||
| 326 | register unsigned long __##arg1 asm ("r8") = arg1; \ | ||
| 327 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
| 328 | PARAVIRT_TYPE(type)) \ | ||
| 329 | : "=r"(ia64_intri_res) \ | ||
| 330 | : PARAVIRT_OP(op), "0"(__##arg1) \ | ||
| 331 | : PARAVIRT_CLOBBERS1) | ||
| 332 | |||
| 333 | #define PARAVIRT_BR1_VOID(op, type, arg1) \ | ||
| 334 | register void *__##arg1 asm ("r8") = arg1; \ | ||
| 335 | register unsigned long ia64_clobber asm ("r8"); \ | ||
| 336 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
| 337 | PARAVIRT_TYPE(type)) \ | ||
| 338 | : "=r"(ia64_clobber) \ | ||
| 339 | : PARAVIRT_OP(op), "0"(__##arg1) \ | ||
| 340 | : PARAVIRT_CLOBBERS1) | ||
| 341 | |||
| 342 | #define PARAVIRT_BR2(op, type, arg1, arg2) \ | ||
| 343 | register unsigned long __##arg1 asm ("r8") = arg1; \ | ||
| 344 | register unsigned long __##arg2 asm ("r9") = arg2; \ | ||
| 345 | register unsigned long ia64_clobber1 asm ("r8"); \ | ||
| 346 | register unsigned long ia64_clobber2 asm ("r9"); \ | ||
| 347 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
| 348 | PARAVIRT_TYPE(type)) \ | ||
| 349 | : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \ | ||
| 350 | : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \ | ||
| 351 | : PARAVIRT_CLOBBERS2) | ||
| 352 | |||
| 353 | |||
| 354 | #define PARAVIRT_DEFINE_CPU_OP0(op, type) \ | ||
| 355 | static inline void \ | ||
| 356 | paravirt_ ## op (void) \ | ||
| 357 | { \ | ||
| 358 | PARAVIRT_BR0(op, type); \ | ||
| 359 | } | ||
| 360 | |||
| 361 | #define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \ | ||
| 362 | static inline unsigned long \ | ||
| 363 | paravirt_ ## op (void) \ | ||
| 364 | { \ | ||
| 365 | PARAVIRT_BR0_RET(op, type); \ | ||
| 366 | return ia64_intri_res; \ | ||
| 367 | } | ||
| 368 | |||
| 369 | #define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \ | ||
| 370 | static inline void \ | ||
| 371 | paravirt_ ## op (void *arg1) \ | ||
| 372 | { \ | ||
| 373 | PARAVIRT_BR1_VOID(op, type, arg1); \ | ||
| 374 | } | ||
| 375 | |||
| 376 | #define PARAVIRT_DEFINE_CPU_OP1(op, type) \ | ||
| 377 | static inline void \ | ||
| 378 | paravirt_ ## op (unsigned long arg1) \ | ||
| 379 | { \ | ||
| 380 | PARAVIRT_BR1(op, type, arg1); \ | ||
| 381 | } | ||
| 382 | |||
| 383 | #define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \ | ||
| 384 | static inline unsigned long \ | ||
| 385 | paravirt_ ## op (unsigned long arg1) \ | ||
| 386 | { \ | ||
| 387 | PARAVIRT_BR1_RET(op, type, arg1); \ | ||
| 388 | return ia64_intri_res; \ | ||
| 389 | } | ||
| 390 | |||
| 391 | #define PARAVIRT_DEFINE_CPU_OP2(op, type) \ | ||
| 392 | static inline void \ | ||
| 393 | paravirt_ ## op (unsigned long arg1, \ | ||
| 394 | unsigned long arg2) \ | ||
| 395 | { \ | ||
| 396 | PARAVIRT_BR2(op, type, arg1, arg2); \ | ||
| 397 | } | ||
| 398 | |||
| 399 | |||
| 400 | PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC); | ||
| 401 | PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH) | ||
| 402 | PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID) | ||
| 403 | PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD) | ||
| 404 | PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA) | ||
| 405 | PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR) | ||
| 406 | PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR) | ||
| 407 | PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I) | ||
| 408 | PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I) | ||
| 409 | PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I) | ||
| 410 | PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE) | ||
| 411 | |||
| 412 | static inline void | ||
| 413 | paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | ||
| 414 | unsigned long val2, unsigned long val3, | ||
| 415 | unsigned long val4) | ||
| 416 | { | ||
| 417 | register unsigned long __val0 asm ("r8") = val0; | ||
| 418 | register unsigned long __val1 asm ("r9") = val1; | ||
| 419 | register unsigned long __val2 asm ("r10") = val2; | ||
| 420 | register unsigned long __val3 asm ("r11") = val3; | ||
| 421 | register unsigned long __val4 asm ("r14") = val4; | ||
| 422 | |||
| 423 | register unsigned long ia64_clobber0 asm ("r8"); | ||
| 424 | register unsigned long ia64_clobber1 asm ("r9"); | ||
| 425 | register unsigned long ia64_clobber2 asm ("r10"); | ||
| 426 | register unsigned long ia64_clobber3 asm ("r11"); | ||
| 427 | register unsigned long ia64_clobber4 asm ("r14"); | ||
| 428 | |||
| 429 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, | ||
| 430 | PARAVIRT_TYPE(SET_RR0_TO_RR4)) | ||
| 431 | : "=r"(ia64_clobber0), | ||
| 432 | "=r"(ia64_clobber1), | ||
| 433 | "=r"(ia64_clobber2), | ||
| 434 | "=r"(ia64_clobber3), | ||
| 435 | "=r"(ia64_clobber4) | ||
| 436 | : PARAVIRT_OP(set_rr0_to_rr4), | ||
| 437 | "0"(__val0), "1"(__val1), "2"(__val2), | ||
| 438 | "3"(__val3), "4"(__val4) | ||
| 439 | : PARAVIRT_CLOBBERS5); | ||
| 440 | } | ||
| 441 | |||
| 442 | /* unsigned long paravirt_getreg(int reg) */ | ||
| 443 | #define __paravirt_getreg(reg) \ | ||
| 444 | ({ \ | ||
| 445 | register unsigned long ia64_intri_res asm ("r8"); \ | ||
| 446 | register unsigned long __reg asm ("r8") = (reg); \ | ||
| 447 | \ | ||
| 448 | BUILD_BUG_ON(!__builtin_constant_p(reg)); \ | ||
| 449 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
| 450 | PARAVIRT_TYPE(GETREG) \ | ||
| 451 | + (reg)) \ | ||
| 452 | : "=r"(ia64_intri_res) \ | ||
| 453 | : PARAVIRT_OP(getreg), "0"(__reg) \ | ||
| 454 | : PARAVIRT_CLOBBERS1); \ | ||
| 455 | \ | ||
| 456 | ia64_intri_res; \ | ||
| 457 | }) | ||
| 458 | |||
| 459 | /* void paravirt_setreg(int reg, unsigned long val) */ | ||
| 460 | #define paravirt_setreg(reg, val) \ | ||
| 461 | do { \ | ||
| 462 | register unsigned long __val asm ("r8") = val; \ | ||
| 463 | register unsigned long __reg asm ("r9") = reg; \ | ||
| 464 | register unsigned long ia64_clobber1 asm ("r8"); \ | ||
| 465 | register unsigned long ia64_clobber2 asm ("r9"); \ | ||
| 466 | \ | ||
| 467 | BUILD_BUG_ON(!__builtin_constant_p(reg)); \ | ||
| 468 | asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ | ||
| 469 | PARAVIRT_TYPE(SETREG) \ | ||
| 470 | + (reg)) \ | ||
| 471 | : "=r"(ia64_clobber1), \ | ||
| 472 | "=r"(ia64_clobber2) \ | ||
| 473 | : PARAVIRT_OP(setreg), \ | ||
| 474 | "1"(__reg), "0"(__val) \ | ||
| 475 | : PARAVIRT_CLOBBERS2); \ | ||
| 476 | } while (0) | ||
| 477 | |||
| 478 | #endif /* ASM_SUPPORTED */ | ||
| 479 | #endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */ | ||
| 480 | |||
| 124 | #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ | 481 | #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ |
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index 21c402365d0e..598408336251 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h | |||
| @@ -126,7 +126,8 @@ extern void identify_siblings (struct cpuinfo_ia64 *); | |||
| 126 | extern int is_multithreading_enabled(void); | 126 | extern int is_multithreading_enabled(void); |
| 127 | 127 | ||
| 128 | extern void arch_send_call_function_single_ipi(int cpu); | 128 | extern void arch_send_call_function_single_ipi(int cpu); |
| 129 | extern void arch_send_call_function_ipi(cpumask_t mask); | 129 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
| 130 | #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask | ||
| 130 | 131 | ||
| 131 | #else /* CONFIG_SMP */ | 132 | #else /* CONFIG_SMP */ |
| 132 | 133 | ||
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h index 4e03cfe74a0c..86c7db861180 100644 --- a/arch/ia64/include/asm/timex.h +++ b/arch/ia64/include/asm/timex.h | |||
| @@ -40,5 +40,6 @@ get_cycles (void) | |||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | extern void ia64_cpu_local_tick (void); | 42 | extern void ia64_cpu_local_tick (void); |
| 43 | extern unsigned long long ia64_native_sched_clock (void); | ||
| 43 | 44 | ||
| 44 | #endif /* _ASM_IA64_TIMEX_H */ | 45 | #endif /* _ASM_IA64_TIMEX_H */ |
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index f260dcf21515..7b4c8c70b2d1 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h | |||
| @@ -112,11 +112,6 @@ void build_cpu_to_node_map(void); | |||
| 112 | 112 | ||
| 113 | extern void arch_fix_phys_package_id(int num, u32 slot); | 113 | extern void arch_fix_phys_package_id(int num, u32 slot); |
| 114 | 114 | ||
| 115 | #define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ | ||
| 116 | CPU_MASK_ALL : \ | ||
| 117 | node_to_cpumask(pcibus_to_node(bus)) \ | ||
| 118 | ) | ||
| 119 | |||
| 120 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | 115 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ |
| 121 | cpu_all_mask : \ | 116 | cpu_all_mask : \ |
| 122 | cpumask_of_node(pcibus_to_node(bus))) | 117 | cpumask_of_node(pcibus_to_node(bus))) |
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h index 7a804e80fc67..e425227a418e 100644 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ b/arch/ia64/include/asm/xen/hypervisor.h | |||
| @@ -33,9 +33,6 @@ | |||
| 33 | #ifndef _ASM_IA64_XEN_HYPERVISOR_H | 33 | #ifndef _ASM_IA64_XEN_HYPERVISOR_H |
| 34 | #define _ASM_IA64_XEN_HYPERVISOR_H | 34 | #define _ASM_IA64_XEN_HYPERVISOR_H |
| 35 | 35 | ||
| 36 | #ifdef CONFIG_XEN | ||
| 37 | |||
| 38 | #include <linux/init.h> | ||
| 39 | #include <xen/interface/xen.h> | 36 | #include <xen/interface/xen.h> |
| 40 | #include <xen/interface/version.h> /* to compile feature.c */ | 37 | #include <xen/interface/version.h> /* to compile feature.c */ |
| 41 | #include <xen/features.h> /* to comiple xen-netfront.c */ | 38 | #include <xen/features.h> /* to comiple xen-netfront.c */ |
| @@ -43,22 +40,32 @@ | |||
| 43 | 40 | ||
| 44 | /* xen_domain_type is set before executing any C code by early_xen_setup */ | 41 | /* xen_domain_type is set before executing any C code by early_xen_setup */ |
| 45 | enum xen_domain_type { | 42 | enum xen_domain_type { |
| 46 | XEN_NATIVE, | 43 | XEN_NATIVE, /* running on bare hardware */ |
| 47 | XEN_PV_DOMAIN, | 44 | XEN_PV_DOMAIN, /* running in a PV domain */ |
| 48 | XEN_HVM_DOMAIN, | 45 | XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/ |
| 49 | }; | 46 | }; |
| 50 | 47 | ||
| 48 | #ifdef CONFIG_XEN | ||
| 51 | extern enum xen_domain_type xen_domain_type; | 49 | extern enum xen_domain_type xen_domain_type; |
| 50 | #else | ||
| 51 | #define xen_domain_type XEN_NATIVE | ||
| 52 | #endif | ||
| 52 | 53 | ||
| 53 | #define xen_domain() (xen_domain_type != XEN_NATIVE) | 54 | #define xen_domain() (xen_domain_type != XEN_NATIVE) |
| 54 | #define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN) | 55 | #define xen_pv_domain() (xen_domain() && \ |
| 55 | #define xen_initial_domain() (xen_pv_domain() && \ | 56 | xen_domain_type == XEN_PV_DOMAIN) |
| 57 | #define xen_hvm_domain() (xen_domain() && \ | ||
| 58 | xen_domain_type == XEN_HVM_DOMAIN) | ||
| 59 | |||
| 60 | #ifdef CONFIG_XEN_DOM0 | ||
| 61 | #define xen_initial_domain() (xen_pv_domain() && \ | ||
| 56 | (xen_start_info->flags & SIF_INITDOMAIN)) | 62 | (xen_start_info->flags & SIF_INITDOMAIN)) |
| 57 | #define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) | 63 | #else |
| 64 | #define xen_initial_domain() (0) | ||
| 65 | #endif | ||
| 58 | 66 | ||
| 59 | /* deprecated. remove this */ | ||
| 60 | #define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN) | ||
| 61 | 67 | ||
| 68 | #ifdef CONFIG_XEN | ||
| 62 | extern struct shared_info *HYPERVISOR_shared_info; | 69 | extern struct shared_info *HYPERVISOR_shared_info; |
| 63 | extern struct start_info *xen_start_info; | 70 | extern struct start_info *xen_start_info; |
| 64 | 71 | ||
| @@ -74,16 +81,6 @@ void force_evtchn_callback(void); | |||
| 74 | 81 | ||
| 75 | /* For setup_arch() in arch/ia64/kernel/setup.c */ | 82 | /* For setup_arch() in arch/ia64/kernel/setup.c */ |
| 76 | void xen_ia64_enable_opt_feature(void); | 83 | void xen_ia64_enable_opt_feature(void); |
| 77 | |||
| 78 | #else /* CONFIG_XEN */ | ||
| 79 | |||
| 80 | #define xen_domain() (0) | ||
| 81 | #define xen_pv_domain() (0) | ||
| 82 | #define xen_initial_domain() (0) | ||
| 83 | #define xen_hvm_domain() (0) | ||
| 84 | #define is_running_on_xen() (0) /* deprecated. remove this */ | ||
| 85 | #endif | 84 | #endif |
| 86 | 85 | ||
| 87 | #define is_initial_xendomain() (0) /* deprecated. remove this */ | ||
| 88 | |||
| 89 | #endif /* _ASM_IA64_XEN_HYPERVISOR_H */ | 86 | #endif /* _ASM_IA64_XEN_HYPERVISOR_H */ |
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h index 19c2ae1d878a..c53a47611208 100644 --- a/arch/ia64/include/asm/xen/inst.h +++ b/arch/ia64/include/asm/xen/inst.h | |||
| @@ -33,6 +33,9 @@ | |||
| 33 | #define __paravirt_work_processed_syscall_target \ | 33 | #define __paravirt_work_processed_syscall_target \ |
| 34 | xen_work_processed_syscall | 34 | xen_work_processed_syscall |
| 35 | 35 | ||
| 36 | #define paravirt_fsyscall_table xen_fsyscall_table | ||
| 37 | #define paravirt_fsys_bubble_down xen_fsys_bubble_down | ||
| 38 | |||
| 36 | #define MOV_FROM_IFA(reg) \ | 39 | #define MOV_FROM_IFA(reg) \ |
| 37 | movl reg = XSI_IFA; \ | 40 | movl reg = XSI_IFA; \ |
| 38 | ;; \ | 41 | ;; \ |
| @@ -110,6 +113,27 @@ | |||
| 110 | .endm | 113 | .endm |
| 111 | #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob | 114 | #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob |
| 112 | 115 | ||
| 116 | /* assuming ar.itc is read with interrupt disabled. */ | ||
| 117 | #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ | ||
| 118 | (pred) movl clob = XSI_ITC_OFFSET; \ | ||
| 119 | ;; \ | ||
| 120 | (pred) ld8 clob = [clob]; \ | ||
| 121 | (pred) mov reg = ar.itc; \ | ||
| 122 | ;; \ | ||
| 123 | (pred) add reg = reg, clob; \ | ||
| 124 | ;; \ | ||
| 125 | (pred) movl clob = XSI_ITC_LAST; \ | ||
| 126 | ;; \ | ||
| 127 | (pred) ld8 clob = [clob]; \ | ||
| 128 | ;; \ | ||
| 129 | (pred) cmp.geu.unc pred_clob, p0 = clob, reg; \ | ||
| 130 | ;; \ | ||
| 131 | (pred_clob) add reg = 1, clob; \ | ||
| 132 | ;; \ | ||
| 133 | (pred) movl clob = XSI_ITC_LAST; \ | ||
| 134 | ;; \ | ||
| 135 | (pred) st8 [clob] = reg | ||
| 136 | |||
| 113 | 137 | ||
| 114 | #define MOV_TO_IFA(reg, clob) \ | 138 | #define MOV_TO_IFA(reg, clob) \ |
| 115 | movl clob = XSI_IFA; \ | 139 | movl clob = XSI_IFA; \ |
| @@ -362,6 +386,10 @@ | |||
| 362 | #define RSM_PSR_DT \ | 386 | #define RSM_PSR_DT \ |
| 363 | XEN_HYPER_RSM_PSR_DT | 387 | XEN_HYPER_RSM_PSR_DT |
| 364 | 388 | ||
| 389 | #define RSM_PSR_BE_I(clob0, clob1) \ | ||
| 390 | RSM_PSR_I(p0, clob0, clob1); \ | ||
| 391 | rum psr.be | ||
| 392 | |||
| 365 | #define SSM_PSR_DT_AND_SRLZ_I \ | 393 | #define SSM_PSR_DT_AND_SRLZ_I \ |
| 366 | XEN_HYPER_SSM_PSR_DT | 394 | XEN_HYPER_SSM_PSR_DT |
| 367 | 395 | ||
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h index f00fab40854d..e951e740bdf2 100644 --- a/arch/ia64/include/asm/xen/interface.h +++ b/arch/ia64/include/asm/xen/interface.h | |||
| @@ -209,6 +209,15 @@ struct mapped_regs { | |||
| 209 | unsigned long krs[8]; /* kernel registers */ | 209 | unsigned long krs[8]; /* kernel registers */ |
| 210 | unsigned long tmp[16]; /* temp registers | 210 | unsigned long tmp[16]; /* temp registers |
| 211 | (e.g. for hyperprivops) */ | 211 | (e.g. for hyperprivops) */ |
| 212 | |||
| 213 | /* itc paravirtualization | ||
| 214 | * vAR.ITC = mAR.ITC + itc_offset | ||
| 215 | * itc_last is one which was lastly passed to | ||
| 216 | * the guest OS in order to prevent it from | ||
| 217 | * going backwords. | ||
| 218 | */ | ||
| 219 | unsigned long itc_offset; | ||
| 220 | unsigned long itc_last; | ||
| 212 | }; | 221 | }; |
| 213 | }; | 222 | }; |
| 214 | }; | 223 | }; |
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h index 4d92d9bbda7b..c57fa910f2c9 100644 --- a/arch/ia64/include/asm/xen/minstate.h +++ b/arch/ia64/include/asm/xen/minstate.h | |||
| @@ -1,3 +1,12 @@ | |||
| 1 | |||
| 2 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 3 | /* read ar.itc in advance, and use it before leaving bank 0 */ | ||
| 4 | #define XEN_ACCOUNT_GET_STAMP \ | ||
| 5 | MOV_FROM_ITC(pUStk, p6, r20, r2); | ||
| 6 | #else | ||
| 7 | #define XEN_ACCOUNT_GET_STAMP | ||
| 8 | #endif | ||
| 9 | |||
| 1 | /* | 10 | /* |
| 2 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | 11 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves |
| 3 | * the minimum state necessary that allows us to turn psr.ic back | 12 | * the minimum state necessary that allows us to turn psr.ic back |
| @@ -123,7 +132,7 @@ | |||
| 123 | ;; \ | 132 | ;; \ |
| 124 | .mem.offset 0,0; st8.spill [r16]=r2,16; \ | 133 | .mem.offset 0,0; st8.spill [r16]=r2,16; \ |
| 125 | .mem.offset 8,0; st8.spill [r17]=r3,16; \ | 134 | .mem.offset 8,0; st8.spill [r17]=r3,16; \ |
| 126 | ACCOUNT_GET_STAMP \ | 135 | XEN_ACCOUNT_GET_STAMP \ |
| 127 | adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ | 136 | adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ |
| 128 | ;; \ | 137 | ;; \ |
| 129 | EXTRA; \ | 138 | EXTRA; \ |
diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h new file mode 100644 index 000000000000..eae944e88846 --- /dev/null +++ b/arch/ia64/include/asm/xen/patchlist.h | |||
| @@ -0,0 +1,38 @@ | |||
| 1 | /****************************************************************************** | ||
| 2 | * arch/ia64/include/asm/xen/patchlist.h | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
| 5 | * VA Linux Systems Japan K.K. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 20 | * | ||
| 21 | */ | ||
| 22 | |||
| 23 | #define __paravirt_start_gate_fsyscall_patchlist \ | ||
| 24 | __xen_start_gate_fsyscall_patchlist | ||
| 25 | #define __paravirt_end_gate_fsyscall_patchlist \ | ||
| 26 | __xen_end_gate_fsyscall_patchlist | ||
| 27 | #define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ | ||
| 28 | __xen_start_gate_brl_fsys_bubble_down_patchlist | ||
| 29 | #define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ | ||
| 30 | __xen_end_gate_brl_fsys_bubble_down_patchlist | ||
| 31 | #define __paravirt_start_gate_vtop_patchlist \ | ||
| 32 | __xen_start_gate_vtop_patchlist | ||
| 33 | #define __paravirt_end_gate_vtop_patchlist \ | ||
| 34 | __xen_end_gate_vtop_patchlist | ||
| 35 | #define __paravirt_start_gate_mckinley_e9_patchlist \ | ||
| 36 | __xen_start_gate_mckinley_e9_patchlist | ||
| 37 | #define __paravirt_end_gate_mckinley_e9_patchlist \ | ||
| 38 | __xen_end_gate_mckinley_e9_patchlist | ||
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h index 71ec7546e100..fb4ec5e0b066 100644 --- a/arch/ia64/include/asm/xen/privop.h +++ b/arch/ia64/include/asm/xen/privop.h | |||
| @@ -55,6 +55,8 @@ | |||
| 55 | #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) | 55 | #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) |
| 56 | #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) | 56 | #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) |
| 57 | #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) | 57 | #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) |
| 58 | #define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS) | ||
| 59 | #define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS) | ||
| 58 | #endif | 60 | #endif |
| 59 | 61 | ||
| 60 | #ifndef __ASSEMBLY__ | 62 | #ifndef __ASSEMBLY__ |
| @@ -67,7 +69,7 @@ | |||
| 67 | * may have different semantics depending on whether they are executed | 69 | * may have different semantics depending on whether they are executed |
| 68 | * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't | 70 | * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't |
| 69 | * be allowed to execute directly, lest incorrect semantics result. */ | 71 | * be allowed to execute directly, lest incorrect semantics result. */ |
| 70 | extern void xen_fc(unsigned long addr); | 72 | extern void xen_fc(void *addr); |
| 71 | extern unsigned long xen_thash(unsigned long addr); | 73 | extern unsigned long xen_thash(unsigned long addr); |
| 72 | 74 | ||
| 73 | /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" | 75 | /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" |
| @@ -80,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr); | |||
| 80 | extern unsigned long xen_get_cpuid(int index); | 82 | extern unsigned long xen_get_cpuid(int index); |
| 81 | extern unsigned long xen_get_pmd(int index); | 83 | extern unsigned long xen_get_pmd(int index); |
| 82 | 84 | ||
| 85 | #ifndef ASM_SUPPORTED | ||
| 83 | extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ | 86 | extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ |
| 84 | extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ | 87 | extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ |
| 88 | #endif | ||
| 85 | 89 | ||
| 86 | /************************************************/ | 90 | /************************************************/ |
| 87 | /* Instructions paravirtualized for performance */ | 91 | /* Instructions paravirtualized for performance */ |
| @@ -106,6 +110,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ | |||
| 106 | #define xen_get_virtual_pend() \ | 110 | #define xen_get_virtual_pend() \ |
| 107 | (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) | 111 | (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) |
| 108 | 112 | ||
| 113 | #ifndef ASM_SUPPORTED | ||
| 109 | /* Although all privileged operations can be left to trap and will | 114 | /* Although all privileged operations can be left to trap and will |
| 110 | * be properly handled by Xen, some are frequent enough that we use | 115 | * be properly handled by Xen, some are frequent enough that we use |
| 111 | * hyperprivops for performance. */ | 116 | * hyperprivops for performance. */ |
| @@ -123,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | |||
| 123 | unsigned long val4); | 128 | unsigned long val4); |
| 124 | extern void xen_set_kr(unsigned long index, unsigned long val); | 129 | extern void xen_set_kr(unsigned long index, unsigned long val); |
| 125 | extern void xen_ptcga(unsigned long addr, unsigned long size); | 130 | extern void xen_ptcga(unsigned long addr, unsigned long size); |
| 131 | #endif /* !ASM_SUPPORTED */ | ||
| 126 | 132 | ||
| 127 | #endif /* !__ASSEMBLY__ */ | 133 | #endif /* !__ASSEMBLY__ */ |
| 128 | 134 | ||
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index f2778f2c4fd9..5628e9a990a6 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
| 6 | 6 | ||
| 7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ | 7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ |
| 8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ | 8 | irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ |
| 9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ | 9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ |
| 10 | unwind.o mca.o mca_asm.o topology.o dma-mapping.o | 10 | unwind.o mca.o mca_asm.o topology.o dma-mapping.o |
| 11 | 11 | ||
| @@ -36,7 +36,8 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o | |||
| 36 | mca_recovery-y += mca_drv.o mca_drv_asm.o | 36 | mca_recovery-y += mca_drv.o mca_drv_asm.o |
| 37 | obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o | 37 | obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o |
| 38 | 38 | ||
| 39 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o | 39 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ |
| 40 | paravirt_patch.o | ||
| 40 | 41 | ||
| 41 | obj-$(CONFIG_IA64_ESI) += esi.o | 42 | obj-$(CONFIG_IA64_ESI) += esi.o |
| 42 | ifneq ($(CONFIG_IA64_ESI),) | 43 | ifneq ($(CONFIG_IA64_ESI),) |
| @@ -45,35 +46,13 @@ endif | |||
| 45 | obj-$(CONFIG_DMAR) += pci-dma.o | 46 | obj-$(CONFIG_DMAR) += pci-dma.o |
| 46 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 47 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
| 47 | 48 | ||
| 48 | # The gate DSO image is built using a special linker script. | ||
| 49 | targets += gate.so gate-syms.o | ||
| 50 | |||
| 51 | extra-y += gate.so gate-syms.o gate.lds gate.o | ||
| 52 | |||
| 53 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. | 49 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. |
| 54 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 | 50 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 |
| 55 | 51 | ||
| 56 | CPPFLAGS_gate.lds := -P -C -U$(ARCH) | 52 | # The gate DSO image is built using a special linker script. |
| 57 | 53 | include $(srctree)/arch/ia64/kernel/Makefile.gate | |
| 58 | quiet_cmd_gate = GATE $@ | 54 | # tell compiled for native |
| 59 | cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ | 55 | CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE |
| 60 | |||
| 61 | GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \ | ||
| 62 | $(call ld-option, -Wl$(comma)--hash-style=sysv) | ||
| 63 | $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE | ||
| 64 | $(call if_changed,gate) | ||
| 65 | |||
| 66 | $(obj)/built-in.o: $(obj)/gate-syms.o | ||
| 67 | $(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o | ||
| 68 | |||
| 69 | GATECFLAGS_gate-syms.o = -r | ||
| 70 | $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE | ||
| 71 | $(call if_changed,gate) | ||
| 72 | |||
| 73 | # gate-data.o contains the gate DSO image as data in section .data.gate. | ||
| 74 | # We must build gate.so before we can assemble it. | ||
| 75 | # Note: kbuild does not track this dependency due to usage of .incbin | ||
| 76 | $(obj)/gate-data.o: $(obj)/gate.so | ||
| 77 | 56 | ||
| 78 | # Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config | 57 | # Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config |
| 79 | define sed-y | 58 | define sed-y |
| @@ -109,9 +88,9 @@ include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s | |||
| 109 | clean-files += $(objtree)/include/asm-ia64/nr-irqs.h | 88 | clean-files += $(objtree)/include/asm-ia64/nr-irqs.h |
| 110 | 89 | ||
| 111 | # | 90 | # |
| 112 | # native ivt.S and entry.S | 91 | # native ivt.S, entry.S and fsys.S |
| 113 | # | 92 | # |
| 114 | ASM_PARAVIRT_OBJS = ivt.o entry.o | 93 | ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o |
| 115 | define paravirtualized_native | 94 | define paravirtualized_native |
| 116 | AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE | 95 | AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE |
| 117 | AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK | 96 | AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK |
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate new file mode 100644 index 000000000000..1d87f84069b3 --- /dev/null +++ b/arch/ia64/kernel/Makefile.gate | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | # The gate DSO image is built using a special linker script. | ||
| 2 | |||
| 3 | targets += gate.so gate-syms.o | ||
| 4 | |||
| 5 | extra-y += gate.so gate-syms.o gate.lds gate.o | ||
| 6 | |||
| 7 | CPPFLAGS_gate.lds := -P -C -U$(ARCH) | ||
| 8 | |||
| 9 | quiet_cmd_gate = GATE $@ | ||
| 10 | cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ | ||
| 11 | |||
| 12 | GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \ | ||
| 13 | $(call ld-option, -Wl$(comma)--hash-style=sysv) | ||
| 14 | $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE | ||
| 15 | $(call if_changed,gate) | ||
| 16 | |||
| 17 | $(obj)/built-in.o: $(obj)/gate-syms.o | ||
| 18 | $(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o | ||
| 19 | |||
| 20 | GATECFLAGS_gate-syms.o = -r | ||
| 21 | $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE | ||
| 22 | $(call if_changed,gate) | ||
| 23 | |||
| 24 | # gate-data.o contains the gate DSO image as data in section .data.gate. | ||
| 25 | # We must build gate.so before we can assemble it. | ||
| 26 | # Note: kbuild does not track this dependency due to usage of .incbin | ||
| 27 | $(obj)/gate-data.o: $(obj)/gate.so | ||
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index bdef2ce38c8b..5510317db37b 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
| @@ -890,7 +890,7 @@ __init void prefill_possible_map(void) | |||
| 890 | possible, max((possible - available_cpus), 0)); | 890 | possible, max((possible - available_cpus), 0)); |
| 891 | 891 | ||
| 892 | for (i = 0; i < possible; i++) | 892 | for (i = 0; i < possible; i++) |
| 893 | cpu_set(i, cpu_possible_map); | 893 | set_cpu_possible(i, true); |
| 894 | } | 894 | } |
| 895 | 895 | ||
| 896 | int acpi_map_lsapic(acpi_handle handle, int *pcpu) | 896 | int acpi_map_lsapic(acpi_handle handle, int *pcpu) |
| @@ -928,9 +928,9 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
| 928 | buffer.length = ACPI_ALLOCATE_BUFFER; | 928 | buffer.length = ACPI_ALLOCATE_BUFFER; |
| 929 | buffer.pointer = NULL; | 929 | buffer.pointer = NULL; |
| 930 | 930 | ||
| 931 | cpus_complement(tmp_map, cpu_present_map); | 931 | cpumask_complement(&tmp_map, cpu_present_mask); |
| 932 | cpu = first_cpu(tmp_map); | 932 | cpu = cpumask_first(&tmp_map); |
| 933 | if (cpu >= NR_CPUS) | 933 | if (cpu >= nr_cpu_ids) |
| 934 | return -EINVAL; | 934 | return -EINVAL; |
| 935 | 935 | ||
| 936 | acpi_map_cpu2node(handle, cpu, physid); | 936 | acpi_map_cpu2node(handle, cpu, physid); |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 742dbb1d5a4f..af5650169043 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
| @@ -316,5 +316,7 @@ void foo(void) | |||
| 316 | DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); | 316 | DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); |
| 317 | DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); | 317 | DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); |
| 318 | DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); | 318 | DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); |
| 319 | DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); | ||
| 320 | DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); | ||
| 319 | #endif /* CONFIG_XEN */ | 321 | #endif /* CONFIG_XEN */ |
| 320 | } | 322 | } |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index efaff15d8cf1..7ef80e8161ce 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
| @@ -456,6 +456,7 @@ efi_map_pal_code (void) | |||
| 456 | GRANULEROUNDDOWN((unsigned long) pal_vaddr), | 456 | GRANULEROUNDDOWN((unsigned long) pal_vaddr), |
| 457 | pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), | 457 | pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), |
| 458 | IA64_GRANULE_SHIFT); | 458 | IA64_GRANULE_SHIFT); |
| 459 | paravirt_dv_serialize_data(); | ||
| 459 | ia64_set_psr(psr); /* restore psr */ | 460 | ia64_set_psr(psr); /* restore psr */ |
| 460 | } | 461 | } |
| 461 | 462 | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index e5341e2c1175..ccfdeee9d89f 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
| @@ -735,7 +735,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall) | |||
| 735 | __paravirt_work_processed_syscall: | 735 | __paravirt_work_processed_syscall: |
| 736 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 736 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| 737 | adds r2=PT(LOADRS)+16,r12 | 737 | adds r2=PT(LOADRS)+16,r12 |
| 738 | (pUStk) mov.m r22=ar.itc // fetch time at leave | 738 | MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave |
| 739 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 | 739 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 |
| 740 | ;; | 740 | ;; |
| 741 | (p6) ld4 r31=[r18] // load current_thread_info()->flags | 741 | (p6) ld4 r31=[r18] // load current_thread_info()->flags |
| @@ -984,7 +984,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) | |||
| 984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| 985 | .pred.rel.mutex pUStk,pKStk | 985 | .pred.rel.mutex pUStk,pKStk |
| 986 | MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled | 986 | MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled |
| 987 | (pUStk) mov.m r22=ar.itc // M fetch time at leave | 987 | MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave |
| 988 | nop.i 0 | 988 | nop.i 0 |
| 989 | ;; | 989 | ;; |
| 990 | #else | 990 | #else |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index c1625c7e1779..3567d54f8cee 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <asm/unistd.h> | 25 | #include <asm/unistd.h> |
| 26 | 26 | ||
| 27 | #include "entry.h" | 27 | #include "entry.h" |
| 28 | #include "paravirt_inst.h" | ||
| 28 | 29 | ||
| 29 | /* | 30 | /* |
| 30 | * See Documentation/ia64/fsys.txt for details on fsyscalls. | 31 | * See Documentation/ia64/fsys.txt for details on fsyscalls. |
| @@ -279,7 +280,7 @@ ENTRY(fsys_gettimeofday) | |||
| 279 | (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control | 280 | (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control |
| 280 | ;; | 281 | ;; |
| 281 | .pred.rel.mutex p8,p9 | 282 | .pred.rel.mutex p8,p9 |
| 282 | (p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! | 283 | MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!! |
| 283 | (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. | 284 | (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. |
| 284 | (p13) ld8 r25 = [r19] // get itc_lastcycle value | 285 | (p13) ld8 r25 = [r19] // get itc_lastcycle value |
| 285 | ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec | 286 | ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec |
| @@ -418,7 +419,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set | |||
| 418 | mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1)) | 419 | mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1)) |
| 419 | ;; | 420 | ;; |
| 420 | 421 | ||
| 421 | rsm psr.i // mask interrupt delivery | 422 | RSM_PSR_I(p0, r18, r19) // mask interrupt delivery |
| 422 | mov ar.ccv=0 | 423 | mov ar.ccv=0 |
| 423 | andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP | 424 | andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP |
| 424 | 425 | ||
| @@ -491,7 +492,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set | |||
| 491 | #ifdef CONFIG_SMP | 492 | #ifdef CONFIG_SMP |
| 492 | st4.rel [r31]=r0 // release the lock | 493 | st4.rel [r31]=r0 // release the lock |
| 493 | #endif | 494 | #endif |
| 494 | ssm psr.i | 495 | SSM_PSR_I(p0, p9, r31) |
| 495 | ;; | 496 | ;; |
| 496 | 497 | ||
| 497 | srlz.d // ensure psr.i is set again | 498 | srlz.d // ensure psr.i is set again |
| @@ -513,7 +514,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3) | |||
| 513 | #ifdef CONFIG_SMP | 514 | #ifdef CONFIG_SMP |
| 514 | st4.rel [r31]=r0 // release the lock | 515 | st4.rel [r31]=r0 // release the lock |
| 515 | #endif | 516 | #endif |
| 516 | ssm psr.i | 517 | SSM_PSR_I(p0, p9, r17) |
| 517 | ;; | 518 | ;; |
| 518 | srlz.d | 519 | srlz.d |
| 519 | br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall | 520 | br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall |
| @@ -521,7 +522,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3) | |||
| 521 | #ifdef CONFIG_SMP | 522 | #ifdef CONFIG_SMP |
| 522 | .lock_contention: | 523 | .lock_contention: |
| 523 | /* Rather than spinning here, fall back on doing a heavy-weight syscall. */ | 524 | /* Rather than spinning here, fall back on doing a heavy-weight syscall. */ |
| 524 | ssm psr.i | 525 | SSM_PSR_I(p0, p9, r17) |
| 525 | ;; | 526 | ;; |
| 526 | srlz.d | 527 | srlz.d |
| 527 | br.sptk.many fsys_fallback_syscall | 528 | br.sptk.many fsys_fallback_syscall |
| @@ -592,17 +593,17 @@ ENTRY(fsys_fallback_syscall) | |||
| 592 | adds r17=-1024,r15 | 593 | adds r17=-1024,r15 |
| 593 | movl r14=sys_call_table | 594 | movl r14=sys_call_table |
| 594 | ;; | 595 | ;; |
| 595 | rsm psr.i | 596 | RSM_PSR_I(p0, r26, r27) |
| 596 | shladd r18=r17,3,r14 | 597 | shladd r18=r17,3,r14 |
| 597 | ;; | 598 | ;; |
| 598 | ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point | 599 | ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point |
| 599 | mov r29=psr // read psr (12 cyc load latency) | 600 | MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency) |
| 600 | mov r27=ar.rsc | 601 | mov r27=ar.rsc |
| 601 | mov r21=ar.fpsr | 602 | mov r21=ar.fpsr |
| 602 | mov r26=ar.pfs | 603 | mov r26=ar.pfs |
| 603 | END(fsys_fallback_syscall) | 604 | END(fsys_fallback_syscall) |
| 604 | /* FALL THROUGH */ | 605 | /* FALL THROUGH */ |
| 605 | GLOBAL_ENTRY(fsys_bubble_down) | 606 | GLOBAL_ENTRY(paravirt_fsys_bubble_down) |
| 606 | .prologue | 607 | .prologue |
| 607 | .altrp b6 | 608 | .altrp b6 |
| 608 | .body | 609 | .body |
| @@ -640,7 +641,7 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
| 640 | * | 641 | * |
| 641 | * PSR.BE : already is turned off in __kernel_syscall_via_epc() | 642 | * PSR.BE : already is turned off in __kernel_syscall_via_epc() |
| 642 | * PSR.AC : don't care (kernel normally turns PSR.AC on) | 643 | * PSR.AC : don't care (kernel normally turns PSR.AC on) |
| 643 | * PSR.I : already turned off by the time fsys_bubble_down gets | 644 | * PSR.I : already turned off by the time paravirt_fsys_bubble_down gets |
| 644 | * invoked | 645 | * invoked |
| 645 | * PSR.DFL: always 0 (kernel never turns it on) | 646 | * PSR.DFL: always 0 (kernel never turns it on) |
| 646 | * PSR.DFH: don't care --- kernel never touches f32-f127 on its own | 647 | * PSR.DFH: don't care --- kernel never touches f32-f127 on its own |
| @@ -650,7 +651,7 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
| 650 | * PSR.DB : don't care --- kernel never enables kernel-level | 651 | * PSR.DB : don't care --- kernel never enables kernel-level |
| 651 | * breakpoints | 652 | * breakpoints |
| 652 | * PSR.TB : must be 0 already; if it wasn't zero on entry to | 653 | * PSR.TB : must be 0 already; if it wasn't zero on entry to |
| 653 | * __kernel_syscall_via_epc, the branch to fsys_bubble_down | 654 | * __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down |
| 654 | * will trigger a taken branch; the taken-trap-handler then | 655 | * will trigger a taken branch; the taken-trap-handler then |
| 655 | * converts the syscall into a break-based system-call. | 656 | * converts the syscall into a break-based system-call. |
| 656 | */ | 657 | */ |
| @@ -683,7 +684,7 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
| 683 | ;; | 684 | ;; |
| 684 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 | 685 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 |
| 685 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 686 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| 686 | mov.m r30=ar.itc // M get cycle for accounting | 687 | MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting |
| 687 | #else | 688 | #else |
| 688 | nop.m 0 | 689 | nop.m 0 |
| 689 | #endif | 690 | #endif |
| @@ -734,21 +735,21 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
| 734 | mov rp=r14 // I0 set the real return addr | 735 | mov rp=r14 // I0 set the real return addr |
| 735 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A | 736 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A |
| 736 | ;; | 737 | ;; |
| 737 | ssm psr.i // M2 we're on kernel stacks now, reenable irqs | 738 | SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs |
| 738 | cmp.eq p8,p0=r3,r0 // A | 739 | cmp.eq p8,p0=r3,r0 // A |
| 739 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT | 740 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT |
| 740 | 741 | ||
| 741 | nop.m 0 | 742 | nop.m 0 |
| 742 | (p8) br.call.sptk.many b6=b6 // B (ignore return address) | 743 | (p8) br.call.sptk.many b6=b6 // B (ignore return address) |
| 743 | br.cond.spnt ia64_trace_syscall // B | 744 | br.cond.spnt ia64_trace_syscall // B |
| 744 | END(fsys_bubble_down) | 745 | END(paravirt_fsys_bubble_down) |
| 745 | 746 | ||
| 746 | .rodata | 747 | .rodata |
| 747 | .align 8 | 748 | .align 8 |
| 748 | .globl fsyscall_table | 749 | .globl paravirt_fsyscall_table |
| 749 | 750 | ||
| 750 | data8 fsys_bubble_down | 751 | data8 paravirt_fsys_bubble_down |
| 751 | fsyscall_table: | 752 | paravirt_fsyscall_table: |
| 752 | data8 fsys_ni_syscall | 753 | data8 fsys_ni_syscall |
| 753 | data8 0 // exit // 1025 | 754 | data8 0 // exit // 1025 |
| 754 | data8 0 // read | 755 | data8 0 // read |
| @@ -1033,4 +1034,4 @@ fsyscall_table: | |||
| 1033 | 1034 | ||
| 1034 | // fill in zeros for the remaining entries | 1035 | // fill in zeros for the remaining entries |
| 1035 | .zero: | 1036 | .zero: |
| 1036 | .space fsyscall_table + 8*NR_syscalls - .zero, 0 | 1037 | .space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0 |
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index 74b1ccce4e84..cf5e0a105e16 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <asm/sigcontext.h> | 13 | #include <asm/sigcontext.h> |
| 14 | #include <asm/system.h> | 14 | #include <asm/system.h> |
| 15 | #include <asm/unistd.h> | 15 | #include <asm/unistd.h> |
| 16 | #include "paravirt_inst.h" | ||
| 16 | 17 | ||
| 17 | /* | 18 | /* |
| 18 | * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, | 19 | * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, |
| @@ -48,87 +49,6 @@ GLOBAL_ENTRY(__kernel_syscall_via_break) | |||
| 48 | } | 49 | } |
| 49 | END(__kernel_syscall_via_break) | 50 | END(__kernel_syscall_via_break) |
| 50 | 51 | ||
| 51 | /* | ||
| 52 | * On entry: | ||
| 53 | * r11 = saved ar.pfs | ||
| 54 | * r15 = system call # | ||
| 55 | * b0 = saved return address | ||
| 56 | * b6 = return address | ||
| 57 | * On exit: | ||
| 58 | * r11 = saved ar.pfs | ||
| 59 | * r15 = system call # | ||
| 60 | * b0 = saved return address | ||
| 61 | * all other "scratch" registers: undefined | ||
| 62 | * all "preserved" registers: same as on entry | ||
| 63 | */ | ||
| 64 | |||
| 65 | GLOBAL_ENTRY(__kernel_syscall_via_epc) | ||
| 66 | .prologue | ||
| 67 | .altrp b6 | ||
| 68 | .body | ||
| 69 | { | ||
| 70 | /* | ||
| 71 | * Note: the kernel cannot assume that the first two instructions in this | ||
| 72 | * bundle get executed. The remaining code must be safe even if | ||
| 73 | * they do not get executed. | ||
| 74 | */ | ||
| 75 | adds r17=-1024,r15 // A | ||
| 76 | mov r10=0 // A default to successful syscall execution | ||
| 77 | epc // B causes split-issue | ||
| 78 | } | ||
| 79 | ;; | ||
| 80 | rsm psr.be | psr.i // M2 (5 cyc to srlz.d) | ||
| 81 | LOAD_FSYSCALL_TABLE(r14) // X | ||
| 82 | ;; | ||
| 83 | mov r16=IA64_KR(CURRENT) // M2 (12 cyc) | ||
| 84 | shladd r18=r17,3,r14 // A | ||
| 85 | mov r19=NR_syscalls-1 // A | ||
| 86 | ;; | ||
| 87 | lfetch [r18] // M0|1 | ||
| 88 | mov r29=psr // M2 (12 cyc) | ||
| 89 | // If r17 is a NaT, p6 will be zero | ||
| 90 | cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? | ||
| 91 | ;; | ||
| 92 | mov r21=ar.fpsr // M2 (12 cyc) | ||
| 93 | tnat.nz p10,p9=r15 // I0 | ||
| 94 | mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) | ||
| 95 | ;; | ||
| 96 | srlz.d // M0 (forces split-issue) ensure PSR.BE==0 | ||
| 97 | (p6) ld8 r18=[r18] // M0|1 | ||
| 98 | nop.i 0 | ||
| 99 | ;; | ||
| 100 | nop.m 0 | ||
| 101 | (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) | ||
| 102 | nop.i 0 | ||
| 103 | ;; | ||
| 104 | (p8) ssm psr.i | ||
| 105 | (p6) mov b7=r18 // I0 | ||
| 106 | (p8) br.dptk.many b7 // B | ||
| 107 | |||
| 108 | mov r27=ar.rsc // M2 (12 cyc) | ||
| 109 | /* | ||
| 110 | * brl.cond doesn't work as intended because the linker would convert this branch | ||
| 111 | * into a branch to a PLT. Perhaps there will be a way to avoid this with some | ||
| 112 | * future version of the linker. In the meantime, we just use an indirect branch | ||
| 113 | * instead. | ||
| 114 | */ | ||
| 115 | #ifdef CONFIG_ITANIUM | ||
| 116 | (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry | ||
| 117 | ;; | ||
| 118 | (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down | ||
| 119 | ;; | ||
| 120 | (p6) mov b7=r14 | ||
| 121 | (p6) br.sptk.many b7 | ||
| 122 | #else | ||
| 123 | BRL_COND_FSYS_BUBBLE_DOWN(p6) | ||
| 124 | #endif | ||
| 125 | ssm psr.i | ||
| 126 | mov r10=-1 | ||
| 127 | (p10) mov r8=EINVAL | ||
| 128 | (p9) mov r8=ENOSYS | ||
| 129 | FSYS_RETURN | ||
| 130 | END(__kernel_syscall_via_epc) | ||
| 131 | |||
| 132 | # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) | 52 | # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) |
| 133 | # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) | 53 | # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) |
| 134 | # define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) | 54 | # define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) |
| @@ -374,3 +294,92 @@ restore_rbs: | |||
| 374 | // invala not necessary as that will happen when returning to user-mode | 294 | // invala not necessary as that will happen when returning to user-mode |
| 375 | br.cond.sptk back_from_restore_rbs | 295 | br.cond.sptk back_from_restore_rbs |
| 376 | END(__kernel_sigtramp) | 296 | END(__kernel_sigtramp) |
| 297 | |||
| 298 | /* | ||
| 299 | * On entry: | ||
| 300 | * r11 = saved ar.pfs | ||
| 301 | * r15 = system call # | ||
| 302 | * b0 = saved return address | ||
| 303 | * b6 = return address | ||
| 304 | * On exit: | ||
| 305 | * r11 = saved ar.pfs | ||
| 306 | * r15 = system call # | ||
| 307 | * b0 = saved return address | ||
| 308 | * all other "scratch" registers: undefined | ||
| 309 | * all "preserved" registers: same as on entry | ||
| 310 | */ | ||
| 311 | |||
| 312 | GLOBAL_ENTRY(__kernel_syscall_via_epc) | ||
| 313 | .prologue | ||
| 314 | .altrp b6 | ||
| 315 | .body | ||
| 316 | { | ||
| 317 | /* | ||
| 318 | * Note: the kernel cannot assume that the first two instructions in this | ||
| 319 | * bundle get executed. The remaining code must be safe even if | ||
| 320 | * they do not get executed. | ||
| 321 | */ | ||
| 322 | adds r17=-1024,r15 // A | ||
| 323 | mov r10=0 // A default to successful syscall execution | ||
| 324 | epc // B causes split-issue | ||
| 325 | } | ||
| 326 | ;; | ||
| 327 | RSM_PSR_BE_I(r20, r22) // M2 (5 cyc to srlz.d) | ||
| 328 | LOAD_FSYSCALL_TABLE(r14) // X | ||
| 329 | ;; | ||
| 330 | mov r16=IA64_KR(CURRENT) // M2 (12 cyc) | ||
| 331 | shladd r18=r17,3,r14 // A | ||
| 332 | mov r19=NR_syscalls-1 // A | ||
| 333 | ;; | ||
| 334 | lfetch [r18] // M0|1 | ||
| 335 | MOV_FROM_PSR(p0, r29, r8) // M2 (12 cyc) | ||
| 336 | // If r17 is a NaT, p6 will be zero | ||
| 337 | cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? | ||
| 338 | ;; | ||
| 339 | mov r21=ar.fpsr // M2 (12 cyc) | ||
| 340 | tnat.nz p10,p9=r15 // I0 | ||
| 341 | mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) | ||
| 342 | ;; | ||
| 343 | srlz.d // M0 (forces split-issue) ensure PSR.BE==0 | ||
| 344 | (p6) ld8 r18=[r18] // M0|1 | ||
| 345 | nop.i 0 | ||
| 346 | ;; | ||
| 347 | nop.m 0 | ||
| 348 | (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) | ||
| 349 | nop.i 0 | ||
| 350 | ;; | ||
| 351 | SSM_PSR_I(p8, p14, r25) | ||
| 352 | (p6) mov b7=r18 // I0 | ||
| 353 | (p8) br.dptk.many b7 // B | ||
| 354 | |||
| 355 | mov r27=ar.rsc // M2 (12 cyc) | ||
| 356 | /* | ||
| 357 | * brl.cond doesn't work as intended because the linker would convert this branch | ||
| 358 | * into a branch to a PLT. Perhaps there will be a way to avoid this with some | ||
| 359 | * future version of the linker. In the meantime, we just use an indirect branch | ||
| 360 | * instead. | ||
| 361 | */ | ||
| 362 | #ifdef CONFIG_ITANIUM | ||
| 363 | (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry | ||
| 364 | ;; | ||
| 365 | (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down | ||
| 366 | ;; | ||
| 367 | (p6) mov b7=r14 | ||
| 368 | (p6) br.sptk.many b7 | ||
| 369 | #else | ||
| 370 | BRL_COND_FSYS_BUBBLE_DOWN(p6) | ||
| 371 | #endif | ||
| 372 | SSM_PSR_I(p0, p14, r10) | ||
| 373 | mov r10=-1 | ||
| 374 | (p10) mov r8=EINVAL | ||
| 375 | (p9) mov r8=ENOSYS | ||
| 376 | FSYS_RETURN | ||
| 377 | |||
| 378 | #ifdef CONFIG_PARAVIRT | ||
| 379 | /* | ||
| 380 | * padd to make the size of this symbol constant | ||
| 381 | * independent of paravirtualization. | ||
| 382 | */ | ||
| 383 | .align PAGE_SIZE / 8 | ||
| 384 | #endif | ||
| 385 | END(__kernel_syscall_via_epc) | ||
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index 3cb1abc00e24..88c64ed47c36 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | 7 | ||
| 8 | 8 | ||
| 9 | #include <asm/system.h> | 9 | #include <asm/system.h> |
| 10 | #include "paravirt_patchlist.h" | ||
| 10 | 11 | ||
| 11 | SECTIONS | 12 | SECTIONS |
| 12 | { | 13 | { |
| @@ -33,21 +34,21 @@ SECTIONS | |||
| 33 | . = GATE_ADDR + 0x600; | 34 | . = GATE_ADDR + 0x600; |
| 34 | 35 | ||
| 35 | .data.patch : { | 36 | .data.patch : { |
| 36 | __start_gate_mckinley_e9_patchlist = .; | 37 | __paravirt_start_gate_mckinley_e9_patchlist = .; |
| 37 | *(.data.patch.mckinley_e9) | 38 | *(.data.patch.mckinley_e9) |
| 38 | __end_gate_mckinley_e9_patchlist = .; | 39 | __paravirt_end_gate_mckinley_e9_patchlist = .; |
| 39 | 40 | ||
| 40 | __start_gate_vtop_patchlist = .; | 41 | __paravirt_start_gate_vtop_patchlist = .; |
| 41 | *(.data.patch.vtop) | 42 | *(.data.patch.vtop) |
| 42 | __end_gate_vtop_patchlist = .; | 43 | __paravirt_end_gate_vtop_patchlist = .; |
| 43 | 44 | ||
| 44 | __start_gate_fsyscall_patchlist = .; | 45 | __paravirt_start_gate_fsyscall_patchlist = .; |
| 45 | *(.data.patch.fsyscall_table) | 46 | *(.data.patch.fsyscall_table) |
| 46 | __end_gate_fsyscall_patchlist = .; | 47 | __paravirt_end_gate_fsyscall_patchlist = .; |
| 47 | 48 | ||
| 48 | __start_gate_brl_fsys_bubble_down_patchlist = .; | 49 | __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; |
| 49 | *(.data.patch.brl_fsys_bubble_down) | 50 | *(.data.patch.brl_fsys_bubble_down) |
| 50 | __end_gate_brl_fsys_bubble_down_patchlist = .; | 51 | __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; |
| 51 | } :readable | 52 | } :readable |
| 52 | 53 | ||
| 53 | .IA_64.unwind_info : { *(.IA_64.unwind_info*) } | 54 | .IA_64.unwind_info : { *(.IA_64.unwind_info*) } |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 59301c472800..23f846de62d5 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
| @@ -1050,7 +1050,7 @@ END(ia64_delay_loop) | |||
| 1050 | * except that the multiplication and the shift are done with 128-bit | 1050 | * except that the multiplication and the shift are done with 128-bit |
| 1051 | * intermediate precision so that we can produce a full 64-bit result. | 1051 | * intermediate precision so that we can produce a full 64-bit result. |
| 1052 | */ | 1052 | */ |
| 1053 | GLOBAL_ENTRY(sched_clock) | 1053 | GLOBAL_ENTRY(ia64_native_sched_clock) |
| 1054 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1054 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
| 1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) | 1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) |
| 1056 | ;; | 1056 | ;; |
| @@ -1066,7 +1066,13 @@ GLOBAL_ENTRY(sched_clock) | |||
| 1066 | ;; | 1066 | ;; |
| 1067 | shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT | 1067 | shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT |
| 1068 | br.ret.sptk.many rp | 1068 | br.ret.sptk.many rp |
| 1069 | END(sched_clock) | 1069 | END(ia64_native_sched_clock) |
| 1070 | #ifndef CONFIG_PARAVIRT | ||
| 1071 | //unsigned long long | ||
| 1072 | //sched_clock(void) __attribute__((alias("ia64_native_sched_clock"))); | ||
| 1073 | .global sched_clock | ||
| 1074 | sched_clock = ia64_native_sched_clock | ||
| 1075 | #endif | ||
| 1070 | 1076 | ||
| 1071 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| 1072 | GLOBAL_ENTRY(cycle_to_cputime) | 1078 | GLOBAL_ENTRY(cycle_to_cputime) |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index f675d8e33853..ec9a5fdfa1b9 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
| @@ -804,7 +804,7 @@ ENTRY(break_fault) | |||
| 804 | /////////////////////////////////////////////////////////////////////// | 804 | /////////////////////////////////////////////////////////////////////// |
| 805 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag | 805 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag |
| 806 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 806 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| 807 | mov.m r30=ar.itc // M get cycle for accounting | 807 | MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting |
| 808 | #else | 808 | #else |
| 809 | mov b6=r30 // I0 setup syscall handler branch reg early | 809 | mov b6=r30 // I0 setup syscall handler branch reg early |
| 810 | #endif | 810 | #endif |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index bab1de2d2f6a..8f33a8840422 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
| @@ -1456,9 +1456,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg) | |||
| 1456 | 1456 | ||
| 1457 | ia64_mca_cmc_int_handler(cmc_irq, arg); | 1457 | ia64_mca_cmc_int_handler(cmc_irq, arg); |
| 1458 | 1458 | ||
| 1459 | for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); | 1459 | cpuid = cpumask_next(cpuid+1, cpu_online_mask); |
| 1460 | 1460 | ||
| 1461 | if (cpuid < NR_CPUS) { | 1461 | if (cpuid < nr_cpu_ids) { |
| 1462 | platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); | 1462 | platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); |
| 1463 | } else { | 1463 | } else { |
| 1464 | /* If no log record, switch out of polling mode */ | 1464 | /* If no log record, switch out of polling mode */ |
| @@ -1525,7 +1525,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg) | |||
| 1525 | 1525 | ||
| 1526 | ia64_mca_cpe_int_handler(cpe_irq, arg); | 1526 | ia64_mca_cpe_int_handler(cpe_irq, arg); |
| 1527 | 1527 | ||
| 1528 | for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); | 1528 | cpuid = cpumask_next(cpuid+1, cpu_online_mask); |
| 1529 | 1529 | ||
| 1530 | if (cpuid < NR_CPUS) { | 1530 | if (cpuid < NR_CPUS) { |
| 1531 | platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); | 1531 | platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); |
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index aaa7d901521f..da3b0cf495a3 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c | |||
| @@ -446,6 +446,14 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, | |||
| 446 | mod->arch.opd = s; | 446 | mod->arch.opd = s; |
| 447 | else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) | 447 | else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) |
| 448 | mod->arch.unwind = s; | 448 | mod->arch.unwind = s; |
| 449 | #ifdef CONFIG_PARAVIRT | ||
| 450 | else if (strcmp(".paravirt_bundles", | ||
| 451 | secstrings + s->sh_name) == 0) | ||
| 452 | mod->arch.paravirt_bundles = s; | ||
| 453 | else if (strcmp(".paravirt_insts", | ||
| 454 | secstrings + s->sh_name) == 0) | ||
| 455 | mod->arch.paravirt_insts = s; | ||
| 456 | #endif | ||
| 449 | 457 | ||
| 450 | if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { | 458 | if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { |
| 451 | printk(KERN_ERR "%s: sections missing\n", mod->name); | 459 | printk(KERN_ERR "%s: sections missing\n", mod->name); |
| @@ -525,8 +533,7 @@ get_ltoff (struct module *mod, uint64_t value, int *okp) | |||
| 525 | goto found; | 533 | goto found; |
| 526 | 534 | ||
| 527 | /* Not enough GOT entries? */ | 535 | /* Not enough GOT entries? */ |
| 528 | if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)) | 536 | BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)); |
| 529 | BUG(); | ||
| 530 | 537 | ||
| 531 | e->val = value; | 538 | e->val = value; |
| 532 | ++mod->arch.next_got_entry; | 539 | ++mod->arch.next_got_entry; |
| @@ -921,6 +928,30 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo | |||
| 921 | DEBUGP("%s: init: entry=%p\n", __func__, mod->init); | 928 | DEBUGP("%s: init: entry=%p\n", __func__, mod->init); |
| 922 | if (mod->arch.unwind) | 929 | if (mod->arch.unwind) |
| 923 | register_unwind_table(mod); | 930 | register_unwind_table(mod); |
| 931 | #ifdef CONFIG_PARAVIRT | ||
| 932 | if (mod->arch.paravirt_bundles) { | ||
| 933 | struct paravirt_patch_site_bundle *start = | ||
| 934 | (struct paravirt_patch_site_bundle *) | ||
| 935 | mod->arch.paravirt_bundles->sh_addr; | ||
| 936 | struct paravirt_patch_site_bundle *end = | ||
| 937 | (struct paravirt_patch_site_bundle *) | ||
| 938 | (mod->arch.paravirt_bundles->sh_addr + | ||
| 939 | mod->arch.paravirt_bundles->sh_size); | ||
| 940 | |||
| 941 | paravirt_patch_apply_bundle(start, end); | ||
| 942 | } | ||
| 943 | if (mod->arch.paravirt_insts) { | ||
| 944 | struct paravirt_patch_site_inst *start = | ||
| 945 | (struct paravirt_patch_site_inst *) | ||
| 946 | mod->arch.paravirt_insts->sh_addr; | ||
| 947 | struct paravirt_patch_site_inst *end = | ||
| 948 | (struct paravirt_patch_site_inst *) | ||
| 949 | (mod->arch.paravirt_insts->sh_addr + | ||
| 950 | mod->arch.paravirt_insts->sh_size); | ||
| 951 | |||
| 952 | paravirt_patch_apply_inst(start, end); | ||
| 953 | } | ||
| 954 | #endif | ||
| 924 | return 0; | 955 | return 0; |
| 925 | } | 956 | } |
| 926 | 957 | ||
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c index 9f14c16f6369..a21d7bb9c69c 100644 --- a/arch/ia64/kernel/paravirt.c +++ b/arch/ia64/kernel/paravirt.c | |||
| @@ -46,13 +46,23 @@ struct pv_info pv_info = { | |||
| 46 | * initialization hooks. | 46 | * initialization hooks. |
| 47 | */ | 47 | */ |
| 48 | 48 | ||
| 49 | struct pv_init_ops pv_init_ops; | 49 | static void __init |
| 50 | ia64_native_patch_branch(unsigned long tag, unsigned long type); | ||
| 51 | |||
| 52 | struct pv_init_ops pv_init_ops = | ||
| 53 | { | ||
| 54 | #ifdef ASM_SUPPORTED | ||
| 55 | .patch_bundle = ia64_native_patch_bundle, | ||
| 56 | #endif | ||
| 57 | .patch_branch = ia64_native_patch_branch, | ||
| 58 | }; | ||
| 50 | 59 | ||
| 51 | /*************************************************************************** | 60 | /*************************************************************************** |
| 52 | * pv_cpu_ops | 61 | * pv_cpu_ops |
| 53 | * intrinsics hooks. | 62 | * intrinsics hooks. |
| 54 | */ | 63 | */ |
| 55 | 64 | ||
| 65 | #ifndef ASM_SUPPORTED | ||
| 56 | /* ia64_native_xxx are macros so that we have to make them real functions */ | 66 | /* ia64_native_xxx are macros so that we have to make them real functions */ |
| 57 | 67 | ||
| 58 | #define DEFINE_VOID_FUNC1(name) \ | 68 | #define DEFINE_VOID_FUNC1(name) \ |
| @@ -60,7 +70,14 @@ struct pv_init_ops pv_init_ops; | |||
| 60 | ia64_native_ ## name ## _func(unsigned long arg) \ | 70 | ia64_native_ ## name ## _func(unsigned long arg) \ |
| 61 | { \ | 71 | { \ |
| 62 | ia64_native_ ## name(arg); \ | 72 | ia64_native_ ## name(arg); \ |
| 63 | } \ | 73 | } |
| 74 | |||
| 75 | #define DEFINE_VOID_FUNC1_VOID(name) \ | ||
| 76 | static void \ | ||
| 77 | ia64_native_ ## name ## _func(void *arg) \ | ||
| 78 | { \ | ||
| 79 | ia64_native_ ## name(arg); \ | ||
| 80 | } | ||
| 64 | 81 | ||
| 65 | #define DEFINE_VOID_FUNC2(name) \ | 82 | #define DEFINE_VOID_FUNC2(name) \ |
| 66 | static void \ | 83 | static void \ |
| @@ -68,7 +85,7 @@ struct pv_init_ops pv_init_ops; | |||
| 68 | unsigned long arg1) \ | 85 | unsigned long arg1) \ |
| 69 | { \ | 86 | { \ |
| 70 | ia64_native_ ## name(arg0, arg1); \ | 87 | ia64_native_ ## name(arg0, arg1); \ |
| 71 | } \ | 88 | } |
| 72 | 89 | ||
| 73 | #define DEFINE_FUNC0(name) \ | 90 | #define DEFINE_FUNC0(name) \ |
| 74 | static unsigned long \ | 91 | static unsigned long \ |
| @@ -84,7 +101,7 @@ struct pv_init_ops pv_init_ops; | |||
| 84 | return ia64_native_ ## name(arg); \ | 101 | return ia64_native_ ## name(arg); \ |
| 85 | } \ | 102 | } \ |
| 86 | 103 | ||
| 87 | DEFINE_VOID_FUNC1(fc); | 104 | DEFINE_VOID_FUNC1_VOID(fc); |
| 88 | DEFINE_VOID_FUNC1(intrin_local_irq_restore); | 105 | DEFINE_VOID_FUNC1(intrin_local_irq_restore); |
| 89 | 106 | ||
| 90 | DEFINE_VOID_FUNC2(ptcga); | 107 | DEFINE_VOID_FUNC2(ptcga); |
| @@ -274,6 +291,266 @@ ia64_native_setreg_func(int regnum, unsigned long val) | |||
| 274 | break; | 291 | break; |
| 275 | } | 292 | } |
| 276 | } | 293 | } |
| 294 | #else | ||
| 295 | |||
| 296 | #define __DEFINE_FUNC(name, code) \ | ||
| 297 | extern const char ia64_native_ ## name ## _direct_start[]; \ | ||
| 298 | extern const char ia64_native_ ## name ## _direct_end[]; \ | ||
| 299 | asm (".align 32\n" \ | ||
| 300 | ".proc ia64_native_" #name "_func\n" \ | ||
| 301 | "ia64_native_" #name "_func:\n" \ | ||
| 302 | "ia64_native_" #name "_direct_start:\n" \ | ||
| 303 | code \ | ||
| 304 | "ia64_native_" #name "_direct_end:\n" \ | ||
| 305 | "br.cond.sptk.many b6\n" \ | ||
| 306 | ".endp ia64_native_" #name "_func\n") | ||
| 307 | |||
| 308 | #define DEFINE_VOID_FUNC0(name, code) \ | ||
| 309 | extern void \ | ||
| 310 | ia64_native_ ## name ## _func(void); \ | ||
| 311 | __DEFINE_FUNC(name, code) | ||
| 312 | |||
| 313 | #define DEFINE_VOID_FUNC1(name, code) \ | ||
| 314 | extern void \ | ||
| 315 | ia64_native_ ## name ## _func(unsigned long arg); \ | ||
| 316 | __DEFINE_FUNC(name, code) | ||
| 317 | |||
| 318 | #define DEFINE_VOID_FUNC1_VOID(name, code) \ | ||
| 319 | extern void \ | ||
| 320 | ia64_native_ ## name ## _func(void *arg); \ | ||
| 321 | __DEFINE_FUNC(name, code) | ||
| 322 | |||
| 323 | #define DEFINE_VOID_FUNC2(name, code) \ | ||
| 324 | extern void \ | ||
| 325 | ia64_native_ ## name ## _func(unsigned long arg0, \ | ||
| 326 | unsigned long arg1); \ | ||
| 327 | __DEFINE_FUNC(name, code) | ||
| 328 | |||
| 329 | #define DEFINE_FUNC0(name, code) \ | ||
| 330 | extern unsigned long \ | ||
| 331 | ia64_native_ ## name ## _func(void); \ | ||
| 332 | __DEFINE_FUNC(name, code) | ||
| 333 | |||
| 334 | #define DEFINE_FUNC1(name, type, code) \ | ||
| 335 | extern unsigned long \ | ||
| 336 | ia64_native_ ## name ## _func(type arg); \ | ||
| 337 | __DEFINE_FUNC(name, code) | ||
| 338 | |||
| 339 | DEFINE_VOID_FUNC1_VOID(fc, | ||
| 340 | "fc r8\n"); | ||
| 341 | DEFINE_VOID_FUNC1(intrin_local_irq_restore, | ||
| 342 | ";;\n" | ||
| 343 | " cmp.ne p6, p7 = r8, r0\n" | ||
| 344 | ";;\n" | ||
| 345 | "(p6) ssm psr.i\n" | ||
| 346 | "(p7) rsm psr.i\n" | ||
| 347 | ";;\n" | ||
| 348 | "(p6) srlz.d\n"); | ||
| 349 | |||
| 350 | DEFINE_VOID_FUNC2(ptcga, | ||
| 351 | "ptc.ga r8, r9\n"); | ||
| 352 | DEFINE_VOID_FUNC2(set_rr, | ||
| 353 | "mov rr[r8] = r9\n"); | ||
| 354 | |||
| 355 | /* ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I */ | ||
| 356 | DEFINE_FUNC0(get_psr_i, | ||
| 357 | "mov r2 = " __stringify(1 << IA64_PSR_I_BIT) "\n" | ||
| 358 | "mov r8 = psr\n" | ||
| 359 | ";;\n" | ||
| 360 | "and r8 = r2, r8\n"); | ||
| 361 | |||
| 362 | DEFINE_FUNC1(thash, unsigned long, | ||
| 363 | "thash r8 = r8\n"); | ||
| 364 | DEFINE_FUNC1(get_cpuid, int, | ||
| 365 | "mov r8 = cpuid[r8]\n"); | ||
| 366 | DEFINE_FUNC1(get_pmd, int, | ||
| 367 | "mov r8 = pmd[r8]\n"); | ||
| 368 | DEFINE_FUNC1(get_rr, unsigned long, | ||
| 369 | "mov r8 = rr[r8]\n"); | ||
| 370 | |||
| 371 | DEFINE_VOID_FUNC0(ssm_i, | ||
| 372 | "ssm psr.i\n"); | ||
| 373 | DEFINE_VOID_FUNC0(rsm_i, | ||
| 374 | "rsm psr.i\n"); | ||
| 375 | |||
| 376 | extern void | ||
| 377 | ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1, | ||
| 378 | unsigned long val2, unsigned long val3, | ||
| 379 | unsigned long val4); | ||
| 380 | __DEFINE_FUNC(set_rr0_to_rr4, | ||
| 381 | "mov rr[r0] = r8\n" | ||
| 382 | "movl r2 = 0x2000000000000000\n" | ||
| 383 | ";;\n" | ||
| 384 | "mov rr[r2] = r9\n" | ||
| 385 | "shl r3 = r2, 1\n" /* movl r3 = 0x4000000000000000 */ | ||
| 386 | ";;\n" | ||
| 387 | "add r2 = r2, r3\n" /* movl r2 = 0x6000000000000000 */ | ||
| 388 | "mov rr[r3] = r10\n" | ||
| 389 | ";;\n" | ||
| 390 | "mov rr[r2] = r11\n" | ||
| 391 | "shl r3 = r3, 1\n" /* movl r3 = 0x8000000000000000 */ | ||
| 392 | ";;\n" | ||
| 393 | "mov rr[r3] = r14\n"); | ||
| 394 | |||
| 395 | extern unsigned long ia64_native_getreg_func(int regnum); | ||
| 396 | asm(".global ia64_native_getreg_func\n"); | ||
| 397 | #define __DEFINE_GET_REG(id, reg) \ | ||
| 398 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
| 399 | ";;\n" \ | ||
| 400 | "cmp.eq p6, p0 = r2, r8\n" \ | ||
| 401 | ";;\n" \ | ||
| 402 | "(p6) mov r8 = " #reg "\n" \ | ||
| 403 | "(p6) br.cond.sptk.many b6\n" \ | ||
| 404 | ";;\n" | ||
| 405 | #define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg) | ||
| 406 | #define __DEFINE_GET_CR(id, reg) __DEFINE_GET_REG(CR_ ## id, cr.reg) | ||
| 407 | |||
| 408 | __DEFINE_FUNC(getreg, | ||
| 409 | __DEFINE_GET_REG(GP, gp) | ||
| 410 | /*__DEFINE_GET_REG(IP, ip)*/ /* returned ip value shouldn't be constant */ | ||
| 411 | __DEFINE_GET_REG(PSR, psr) | ||
| 412 | __DEFINE_GET_REG(TP, tp) | ||
| 413 | __DEFINE_GET_REG(SP, sp) | ||
| 414 | |||
| 415 | __DEFINE_GET_REG(AR_KR0, ar0) | ||
| 416 | __DEFINE_GET_REG(AR_KR1, ar1) | ||
| 417 | __DEFINE_GET_REG(AR_KR2, ar2) | ||
| 418 | __DEFINE_GET_REG(AR_KR3, ar3) | ||
| 419 | __DEFINE_GET_REG(AR_KR4, ar4) | ||
| 420 | __DEFINE_GET_REG(AR_KR5, ar5) | ||
| 421 | __DEFINE_GET_REG(AR_KR6, ar6) | ||
| 422 | __DEFINE_GET_REG(AR_KR7, ar7) | ||
| 423 | __DEFINE_GET_AR(RSC, rsc) | ||
| 424 | __DEFINE_GET_AR(BSP, bsp) | ||
| 425 | __DEFINE_GET_AR(BSPSTORE, bspstore) | ||
| 426 | __DEFINE_GET_AR(RNAT, rnat) | ||
| 427 | __DEFINE_GET_AR(FCR, fcr) | ||
| 428 | __DEFINE_GET_AR(EFLAG, eflag) | ||
| 429 | __DEFINE_GET_AR(CSD, csd) | ||
| 430 | __DEFINE_GET_AR(SSD, ssd) | ||
| 431 | __DEFINE_GET_REG(AR_CFLAG, ar27) | ||
| 432 | __DEFINE_GET_AR(FSR, fsr) | ||
| 433 | __DEFINE_GET_AR(FIR, fir) | ||
| 434 | __DEFINE_GET_AR(FDR, fdr) | ||
| 435 | __DEFINE_GET_AR(CCV, ccv) | ||
| 436 | __DEFINE_GET_AR(UNAT, unat) | ||
| 437 | __DEFINE_GET_AR(FPSR, fpsr) | ||
| 438 | __DEFINE_GET_AR(ITC, itc) | ||
| 439 | __DEFINE_GET_AR(PFS, pfs) | ||
| 440 | __DEFINE_GET_AR(LC, lc) | ||
| 441 | __DEFINE_GET_AR(EC, ec) | ||
| 442 | |||
| 443 | __DEFINE_GET_CR(DCR, dcr) | ||
| 444 | __DEFINE_GET_CR(ITM, itm) | ||
| 445 | __DEFINE_GET_CR(IVA, iva) | ||
| 446 | __DEFINE_GET_CR(PTA, pta) | ||
| 447 | __DEFINE_GET_CR(IPSR, ipsr) | ||
| 448 | __DEFINE_GET_CR(ISR, isr) | ||
| 449 | __DEFINE_GET_CR(IIP, iip) | ||
| 450 | __DEFINE_GET_CR(IFA, ifa) | ||
| 451 | __DEFINE_GET_CR(ITIR, itir) | ||
| 452 | __DEFINE_GET_CR(IIPA, iipa) | ||
| 453 | __DEFINE_GET_CR(IFS, ifs) | ||
| 454 | __DEFINE_GET_CR(IIM, iim) | ||
| 455 | __DEFINE_GET_CR(IHA, iha) | ||
| 456 | __DEFINE_GET_CR(LID, lid) | ||
| 457 | __DEFINE_GET_CR(IVR, ivr) | ||
| 458 | __DEFINE_GET_CR(TPR, tpr) | ||
| 459 | __DEFINE_GET_CR(EOI, eoi) | ||
| 460 | __DEFINE_GET_CR(IRR0, irr0) | ||
| 461 | __DEFINE_GET_CR(IRR1, irr1) | ||
| 462 | __DEFINE_GET_CR(IRR2, irr2) | ||
| 463 | __DEFINE_GET_CR(IRR3, irr3) | ||
| 464 | __DEFINE_GET_CR(ITV, itv) | ||
| 465 | __DEFINE_GET_CR(PMV, pmv) | ||
| 466 | __DEFINE_GET_CR(CMCV, cmcv) | ||
| 467 | __DEFINE_GET_CR(LRR0, lrr0) | ||
| 468 | __DEFINE_GET_CR(LRR1, lrr1) | ||
| 469 | |||
| 470 | "mov r8 = -1\n" /* unsupported case */ | ||
| 471 | ); | ||
| 472 | |||
| 473 | extern void ia64_native_setreg_func(int regnum, unsigned long val); | ||
| 474 | asm(".global ia64_native_setreg_func\n"); | ||
| 475 | #define __DEFINE_SET_REG(id, reg) \ | ||
| 476 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
| 477 | ";;\n" \ | ||
| 478 | "cmp.eq p6, p0 = r2, r9\n" \ | ||
| 479 | ";;\n" \ | ||
| 480 | "(p6) mov " #reg " = r8\n" \ | ||
| 481 | "(p6) br.cond.sptk.many b6\n" \ | ||
| 482 | ";;\n" | ||
| 483 | #define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg) | ||
| 484 | #define __DEFINE_SET_CR(id, reg) __DEFINE_SET_REG(CR_ ## id, cr.reg) | ||
| 485 | __DEFINE_FUNC(setreg, | ||
| 486 | "mov r2 = " __stringify(_IA64_REG_PSR_L) "\n" | ||
| 487 | ";;\n" | ||
| 488 | "cmp.eq p6, p0 = r2, r9\n" | ||
| 489 | ";;\n" | ||
| 490 | "(p6) mov psr.l = r8\n" | ||
| 491 | #ifdef HAVE_SERIALIZE_DIRECTIVE | ||
| 492 | ".serialize.data\n" | ||
| 493 | #endif | ||
| 494 | "(p6) br.cond.sptk.many b6\n" | ||
| 495 | __DEFINE_SET_REG(GP, gp) | ||
| 496 | __DEFINE_SET_REG(SP, sp) | ||
| 497 | |||
| 498 | __DEFINE_SET_REG(AR_KR0, ar0) | ||
| 499 | __DEFINE_SET_REG(AR_KR1, ar1) | ||
| 500 | __DEFINE_SET_REG(AR_KR2, ar2) | ||
| 501 | __DEFINE_SET_REG(AR_KR3, ar3) | ||
| 502 | __DEFINE_SET_REG(AR_KR4, ar4) | ||
| 503 | __DEFINE_SET_REG(AR_KR5, ar5) | ||
| 504 | __DEFINE_SET_REG(AR_KR6, ar6) | ||
| 505 | __DEFINE_SET_REG(AR_KR7, ar7) | ||
| 506 | __DEFINE_SET_AR(RSC, rsc) | ||
| 507 | __DEFINE_SET_AR(BSP, bsp) | ||
| 508 | __DEFINE_SET_AR(BSPSTORE, bspstore) | ||
| 509 | __DEFINE_SET_AR(RNAT, rnat) | ||
| 510 | __DEFINE_SET_AR(FCR, fcr) | ||
| 511 | __DEFINE_SET_AR(EFLAG, eflag) | ||
| 512 | __DEFINE_SET_AR(CSD, csd) | ||
| 513 | __DEFINE_SET_AR(SSD, ssd) | ||
| 514 | __DEFINE_SET_REG(AR_CFLAG, ar27) | ||
| 515 | __DEFINE_SET_AR(FSR, fsr) | ||
| 516 | __DEFINE_SET_AR(FIR, fir) | ||
| 517 | __DEFINE_SET_AR(FDR, fdr) | ||
| 518 | __DEFINE_SET_AR(CCV, ccv) | ||
| 519 | __DEFINE_SET_AR(UNAT, unat) | ||
| 520 | __DEFINE_SET_AR(FPSR, fpsr) | ||
| 521 | __DEFINE_SET_AR(ITC, itc) | ||
| 522 | __DEFINE_SET_AR(PFS, pfs) | ||
| 523 | __DEFINE_SET_AR(LC, lc) | ||
| 524 | __DEFINE_SET_AR(EC, ec) | ||
| 525 | |||
| 526 | __DEFINE_SET_CR(DCR, dcr) | ||
| 527 | __DEFINE_SET_CR(ITM, itm) | ||
| 528 | __DEFINE_SET_CR(IVA, iva) | ||
| 529 | __DEFINE_SET_CR(PTA, pta) | ||
| 530 | __DEFINE_SET_CR(IPSR, ipsr) | ||
| 531 | __DEFINE_SET_CR(ISR, isr) | ||
| 532 | __DEFINE_SET_CR(IIP, iip) | ||
| 533 | __DEFINE_SET_CR(IFA, ifa) | ||
| 534 | __DEFINE_SET_CR(ITIR, itir) | ||
| 535 | __DEFINE_SET_CR(IIPA, iipa) | ||
| 536 | __DEFINE_SET_CR(IFS, ifs) | ||
| 537 | __DEFINE_SET_CR(IIM, iim) | ||
| 538 | __DEFINE_SET_CR(IHA, iha) | ||
| 539 | __DEFINE_SET_CR(LID, lid) | ||
| 540 | __DEFINE_SET_CR(IVR, ivr) | ||
| 541 | __DEFINE_SET_CR(TPR, tpr) | ||
| 542 | __DEFINE_SET_CR(EOI, eoi) | ||
| 543 | __DEFINE_SET_CR(IRR0, irr0) | ||
| 544 | __DEFINE_SET_CR(IRR1, irr1) | ||
| 545 | __DEFINE_SET_CR(IRR2, irr2) | ||
| 546 | __DEFINE_SET_CR(IRR3, irr3) | ||
| 547 | __DEFINE_SET_CR(ITV, itv) | ||
| 548 | __DEFINE_SET_CR(PMV, pmv) | ||
| 549 | __DEFINE_SET_CR(CMCV, cmcv) | ||
| 550 | __DEFINE_SET_CR(LRR0, lrr0) | ||
| 551 | __DEFINE_SET_CR(LRR1, lrr1) | ||
| 552 | ); | ||
| 553 | #endif | ||
| 277 | 554 | ||
| 278 | struct pv_cpu_ops pv_cpu_ops = { | 555 | struct pv_cpu_ops pv_cpu_ops = { |
| 279 | .fc = ia64_native_fc_func, | 556 | .fc = ia64_native_fc_func, |
| @@ -366,4 +643,258 @@ ia64_native_do_steal_accounting(unsigned long *new_itm) | |||
| 366 | 643 | ||
| 367 | struct pv_time_ops pv_time_ops = { | 644 | struct pv_time_ops pv_time_ops = { |
| 368 | .do_steal_accounting = ia64_native_do_steal_accounting, | 645 | .do_steal_accounting = ia64_native_do_steal_accounting, |
| 646 | .sched_clock = ia64_native_sched_clock, | ||
| 647 | }; | ||
| 648 | |||
| 649 | /*************************************************************************** | ||
| 650 | * binary pacthing | ||
| 651 | * pv_init_ops.patch_bundle | ||
| 652 | */ | ||
| 653 | |||
| 654 | #ifdef ASM_SUPPORTED | ||
| 655 | #define IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg) \ | ||
| 656 | __DEFINE_FUNC(get_ ## name, \ | ||
| 657 | ";;\n" \ | ||
| 658 | "mov r8 = " #reg "\n" \ | ||
| 659 | ";;\n") | ||
| 660 | |||
| 661 | #define IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ | ||
| 662 | __DEFINE_FUNC(set_ ## name, \ | ||
| 663 | ";;\n" \ | ||
| 664 | "mov " #reg " = r8\n" \ | ||
| 665 | ";;\n") | ||
| 666 | |||
| 667 | #define IA64_NATIVE_PATCH_DEFINE_REG(name, reg) \ | ||
| 668 | IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg); \ | ||
| 669 | IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ | ||
| 670 | |||
| 671 | #define IA64_NATIVE_PATCH_DEFINE_AR(name, reg) \ | ||
| 672 | IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg) | ||
| 673 | |||
| 674 | #define IA64_NATIVE_PATCH_DEFINE_CR(name, reg) \ | ||
| 675 | IA64_NATIVE_PATCH_DEFINE_REG(cr_ ## name, cr.reg) | ||
| 676 | |||
| 677 | |||
| 678 | IA64_NATIVE_PATCH_DEFINE_GET_REG(psr, psr); | ||
| 679 | IA64_NATIVE_PATCH_DEFINE_GET_REG(tp, tp); | ||
| 680 | |||
| 681 | /* IA64_NATIVE_PATCH_DEFINE_SET_REG(psr_l, psr.l); */ | ||
| 682 | __DEFINE_FUNC(set_psr_l, | ||
| 683 | ";;\n" | ||
| 684 | "mov psr.l = r8\n" | ||
| 685 | #ifdef HAVE_SERIALIZE_DIRECTIVE | ||
| 686 | ".serialize.data\n" | ||
| 687 | #endif | ||
| 688 | ";;\n"); | ||
| 689 | |||
| 690 | IA64_NATIVE_PATCH_DEFINE_REG(gp, gp); | ||
| 691 | IA64_NATIVE_PATCH_DEFINE_REG(sp, sp); | ||
| 692 | |||
| 693 | IA64_NATIVE_PATCH_DEFINE_REG(kr0, ar0); | ||
| 694 | IA64_NATIVE_PATCH_DEFINE_REG(kr1, ar1); | ||
| 695 | IA64_NATIVE_PATCH_DEFINE_REG(kr2, ar2); | ||
| 696 | IA64_NATIVE_PATCH_DEFINE_REG(kr3, ar3); | ||
| 697 | IA64_NATIVE_PATCH_DEFINE_REG(kr4, ar4); | ||
| 698 | IA64_NATIVE_PATCH_DEFINE_REG(kr5, ar5); | ||
| 699 | IA64_NATIVE_PATCH_DEFINE_REG(kr6, ar6); | ||
| 700 | IA64_NATIVE_PATCH_DEFINE_REG(kr7, ar7); | ||
| 701 | |||
| 702 | IA64_NATIVE_PATCH_DEFINE_AR(rsc, rsc); | ||
| 703 | IA64_NATIVE_PATCH_DEFINE_AR(bsp, bsp); | ||
| 704 | IA64_NATIVE_PATCH_DEFINE_AR(bspstore, bspstore); | ||
| 705 | IA64_NATIVE_PATCH_DEFINE_AR(rnat, rnat); | ||
| 706 | IA64_NATIVE_PATCH_DEFINE_AR(fcr, fcr); | ||
| 707 | IA64_NATIVE_PATCH_DEFINE_AR(eflag, eflag); | ||
| 708 | IA64_NATIVE_PATCH_DEFINE_AR(csd, csd); | ||
| 709 | IA64_NATIVE_PATCH_DEFINE_AR(ssd, ssd); | ||
| 710 | IA64_NATIVE_PATCH_DEFINE_REG(ar27, ar27); | ||
| 711 | IA64_NATIVE_PATCH_DEFINE_AR(fsr, fsr); | ||
| 712 | IA64_NATIVE_PATCH_DEFINE_AR(fir, fir); | ||
| 713 | IA64_NATIVE_PATCH_DEFINE_AR(fdr, fdr); | ||
| 714 | IA64_NATIVE_PATCH_DEFINE_AR(ccv, ccv); | ||
| 715 | IA64_NATIVE_PATCH_DEFINE_AR(unat, unat); | ||
| 716 | IA64_NATIVE_PATCH_DEFINE_AR(fpsr, fpsr); | ||
| 717 | IA64_NATIVE_PATCH_DEFINE_AR(itc, itc); | ||
| 718 | IA64_NATIVE_PATCH_DEFINE_AR(pfs, pfs); | ||
| 719 | IA64_NATIVE_PATCH_DEFINE_AR(lc, lc); | ||
| 720 | IA64_NATIVE_PATCH_DEFINE_AR(ec, ec); | ||
| 721 | |||
| 722 | IA64_NATIVE_PATCH_DEFINE_CR(dcr, dcr); | ||
| 723 | IA64_NATIVE_PATCH_DEFINE_CR(itm, itm); | ||
| 724 | IA64_NATIVE_PATCH_DEFINE_CR(iva, iva); | ||
| 725 | IA64_NATIVE_PATCH_DEFINE_CR(pta, pta); | ||
| 726 | IA64_NATIVE_PATCH_DEFINE_CR(ipsr, ipsr); | ||
| 727 | IA64_NATIVE_PATCH_DEFINE_CR(isr, isr); | ||
| 728 | IA64_NATIVE_PATCH_DEFINE_CR(iip, iip); | ||
| 729 | IA64_NATIVE_PATCH_DEFINE_CR(ifa, ifa); | ||
| 730 | IA64_NATIVE_PATCH_DEFINE_CR(itir, itir); | ||
| 731 | IA64_NATIVE_PATCH_DEFINE_CR(iipa, iipa); | ||
| 732 | IA64_NATIVE_PATCH_DEFINE_CR(ifs, ifs); | ||
| 733 | IA64_NATIVE_PATCH_DEFINE_CR(iim, iim); | ||
| 734 | IA64_NATIVE_PATCH_DEFINE_CR(iha, iha); | ||
| 735 | IA64_NATIVE_PATCH_DEFINE_CR(lid, lid); | ||
| 736 | IA64_NATIVE_PATCH_DEFINE_CR(ivr, ivr); | ||
| 737 | IA64_NATIVE_PATCH_DEFINE_CR(tpr, tpr); | ||
| 738 | IA64_NATIVE_PATCH_DEFINE_CR(eoi, eoi); | ||
| 739 | IA64_NATIVE_PATCH_DEFINE_CR(irr0, irr0); | ||
| 740 | IA64_NATIVE_PATCH_DEFINE_CR(irr1, irr1); | ||
| 741 | IA64_NATIVE_PATCH_DEFINE_CR(irr2, irr2); | ||
| 742 | IA64_NATIVE_PATCH_DEFINE_CR(irr3, irr3); | ||
| 743 | IA64_NATIVE_PATCH_DEFINE_CR(itv, itv); | ||
| 744 | IA64_NATIVE_PATCH_DEFINE_CR(pmv, pmv); | ||
| 745 | IA64_NATIVE_PATCH_DEFINE_CR(cmcv, cmcv); | ||
| 746 | IA64_NATIVE_PATCH_DEFINE_CR(lrr0, lrr0); | ||
| 747 | IA64_NATIVE_PATCH_DEFINE_CR(lrr1, lrr1); | ||
| 748 | |||
| 749 | static const struct paravirt_patch_bundle_elem ia64_native_patch_bundle_elems[] | ||
| 750 | __initdata_or_module = | ||
| 751 | { | ||
| 752 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM(name, type) \ | ||
| 753 | { \ | ||
| 754 | (void*)ia64_native_ ## name ## _direct_start, \ | ||
| 755 | (void*)ia64_native_ ## name ## _direct_end, \ | ||
| 756 | PARAVIRT_PATCH_TYPE_ ## type, \ | ||
| 757 | } | ||
| 758 | |||
| 759 | IA64_NATIVE_PATCH_BUNDLE_ELEM(fc, FC), | ||
| 760 | IA64_NATIVE_PATCH_BUNDLE_ELEM(thash, THASH), | ||
| 761 | IA64_NATIVE_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), | ||
| 762 | IA64_NATIVE_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), | ||
| 763 | IA64_NATIVE_PATCH_BUNDLE_ELEM(ptcga, PTCGA), | ||
| 764 | IA64_NATIVE_PATCH_BUNDLE_ELEM(get_rr, GET_RR), | ||
| 765 | IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr, SET_RR), | ||
| 766 | IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), | ||
| 767 | IA64_NATIVE_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), | ||
| 768 | IA64_NATIVE_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), | ||
| 769 | IA64_NATIVE_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), | ||
| 770 | IA64_NATIVE_PATCH_BUNDLE_ELEM(intrin_local_irq_restore, | ||
| 771 | INTRIN_LOCAL_IRQ_RESTORE), | ||
| 772 | |||
| 773 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ | ||
| 774 | { \ | ||
| 775 | (void*)ia64_native_get_ ## name ## _direct_start, \ | ||
| 776 | (void*)ia64_native_get_ ## name ## _direct_end, \ | ||
| 777 | PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ | ||
| 778 | } | ||
| 779 | |||
| 780 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
| 781 | { \ | ||
| 782 | (void*)ia64_native_set_ ## name ## _direct_start, \ | ||
| 783 | (void*)ia64_native_set_ ## name ## _direct_end, \ | ||
| 784 | PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ | ||
| 785 | } | ||
| 786 | |||
| 787 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(name, reg) \ | ||
| 788 | IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg), \ | ||
| 789 | IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
| 790 | |||
| 791 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(name, reg) \ | ||
| 792 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar_ ## name, AR_ ## reg) | ||
| 793 | |||
| 794 | #define IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(name, reg) \ | ||
| 795 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(cr_ ## name, CR_ ## reg) | ||
| 796 | |||
| 797 | IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), | ||
| 798 | IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(tp, TP), | ||
| 799 | |||
| 800 | IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(psr_l, PSR_L), | ||
| 801 | |||
| 802 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(gp, GP), | ||
| 803 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(sp, SP), | ||
| 804 | |||
| 805 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr0, AR_KR0), | ||
| 806 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr1, AR_KR1), | ||
| 807 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr2, AR_KR2), | ||
| 808 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr3, AR_KR3), | ||
| 809 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr4, AR_KR4), | ||
| 810 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr5, AR_KR5), | ||
| 811 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr6, AR_KR6), | ||
| 812 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr7, AR_KR7), | ||
| 813 | |||
| 814 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rsc, RSC), | ||
| 815 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bsp, BSP), | ||
| 816 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bspstore, BSPSTORE), | ||
| 817 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rnat, RNAT), | ||
| 818 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fcr, FCR), | ||
| 819 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(eflag, EFLAG), | ||
| 820 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(csd, CSD), | ||
| 821 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ssd, SSD), | ||
| 822 | IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar27, AR_CFLAG), | ||
| 823 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fsr, FSR), | ||
| 824 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fir, FIR), | ||
| 825 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fdr, FDR), | ||
| 826 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ccv, CCV), | ||
| 827 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(unat, UNAT), | ||
| 828 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fpsr, FPSR), | ||
| 829 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(itc, ITC), | ||
| 830 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(pfs, PFS), | ||
| 831 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(lc, LC), | ||
| 832 | IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ec, EC), | ||
| 833 | |||
| 834 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(dcr, DCR), | ||
| 835 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itm, ITM), | ||
| 836 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iva, IVA), | ||
| 837 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pta, PTA), | ||
| 838 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ipsr, IPSR), | ||
| 839 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(isr, ISR), | ||
| 840 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iip, IIP), | ||
| 841 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifa, IFA), | ||
| 842 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itir, ITIR), | ||
| 843 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iipa, IIPA), | ||
| 844 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifs, IFS), | ||
| 845 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iim, IIM), | ||
| 846 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iha, IHA), | ||
| 847 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lid, LID), | ||
| 848 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ivr, IVR), | ||
| 849 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(tpr, TPR), | ||
| 850 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(eoi, EOI), | ||
| 851 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr0, IRR0), | ||
| 852 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr1, IRR1), | ||
| 853 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr2, IRR2), | ||
| 854 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr3, IRR3), | ||
| 855 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itv, ITV), | ||
| 856 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pmv, PMV), | ||
| 857 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(cmcv, CMCV), | ||
| 858 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr0, LRR0), | ||
| 859 | IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr1, LRR1), | ||
| 369 | }; | 860 | }; |
| 861 | |||
| 862 | unsigned long __init_or_module | ||
| 863 | ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type) | ||
| 864 | { | ||
| 865 | const unsigned long nelems = sizeof(ia64_native_patch_bundle_elems) / | ||
| 866 | sizeof(ia64_native_patch_bundle_elems[0]); | ||
| 867 | |||
| 868 | return __paravirt_patch_apply_bundle(sbundle, ebundle, type, | ||
| 869 | ia64_native_patch_bundle_elems, | ||
| 870 | nelems, NULL); | ||
| 871 | } | ||
| 872 | #endif /* ASM_SUPPOTED */ | ||
| 873 | |||
| 874 | extern const char ia64_native_switch_to[]; | ||
| 875 | extern const char ia64_native_leave_syscall[]; | ||
| 876 | extern const char ia64_native_work_processed_syscall[]; | ||
| 877 | extern const char ia64_native_leave_kernel[]; | ||
| 878 | |||
| 879 | const struct paravirt_patch_branch_target ia64_native_branch_target[] | ||
| 880 | __initconst = { | ||
| 881 | #define PARAVIRT_BR_TARGET(name, type) \ | ||
| 882 | { \ | ||
| 883 | ia64_native_ ## name, \ | ||
| 884 | PARAVIRT_PATCH_TYPE_BR_ ## type, \ | ||
| 885 | } | ||
| 886 | PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), | ||
| 887 | PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), | ||
| 888 | PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), | ||
| 889 | PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), | ||
| 890 | }; | ||
| 891 | |||
| 892 | static void __init | ||
| 893 | ia64_native_patch_branch(unsigned long tag, unsigned long type) | ||
| 894 | { | ||
| 895 | const unsigned long nelem = | ||
| 896 | sizeof(ia64_native_branch_target) / | ||
| 897 | sizeof(ia64_native_branch_target[0]); | ||
| 898 | __paravirt_patch_apply_branch(tag, type, | ||
| 899 | ia64_native_branch_target, nelem); | ||
| 900 | } | ||
diff --git a/arch/ia64/kernel/paravirt_patch.c b/arch/ia64/kernel/paravirt_patch.c new file mode 100644 index 000000000000..bfdfef1b1ffd --- /dev/null +++ b/arch/ia64/kernel/paravirt_patch.c | |||
| @@ -0,0 +1,514 @@ | |||
| 1 | /****************************************************************************** | ||
| 2 | * linux/arch/ia64/xen/paravirt_patch.c | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
| 5 | * VA Linux Systems Japan K.K. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 20 | * | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <linux/init.h> | ||
| 24 | #include <asm/intrinsics.h> | ||
| 25 | #include <asm/kprobes.h> | ||
| 26 | #include <asm/paravirt.h> | ||
| 27 | #include <asm/paravirt_patch.h> | ||
| 28 | |||
| 29 | typedef union ia64_inst { | ||
| 30 | struct { | ||
| 31 | unsigned long long qp : 6; | ||
| 32 | unsigned long long : 31; | ||
| 33 | unsigned long long opcode : 4; | ||
| 34 | unsigned long long reserved : 23; | ||
| 35 | } generic; | ||
| 36 | unsigned long long l; | ||
| 37 | } ia64_inst_t; | ||
| 38 | |||
| 39 | /* | ||
| 40 | * flush_icache_range() can't be used here. | ||
| 41 | * we are here before cpu_init() which initializes | ||
| 42 | * ia64_i_cache_stride_shift. flush_icache_range() uses it. | ||
| 43 | */ | ||
| 44 | void __init_or_module | ||
| 45 | paravirt_flush_i_cache_range(const void *instr, unsigned long size) | ||
| 46 | { | ||
| 47 | extern void paravirt_fc_i(const void *addr); | ||
| 48 | unsigned long i; | ||
| 49 | |||
| 50 | for (i = 0; i < size; i += sizeof(bundle_t)) | ||
| 51 | paravirt_fc_i(instr + i); | ||
| 52 | } | ||
| 53 | |||
| 54 | bundle_t* __init_or_module | ||
| 55 | paravirt_get_bundle(unsigned long tag) | ||
| 56 | { | ||
| 57 | return (bundle_t *)(tag & ~3UL); | ||
| 58 | } | ||
| 59 | |||
| 60 | unsigned long __init_or_module | ||
| 61 | paravirt_get_slot(unsigned long tag) | ||
| 62 | { | ||
| 63 | return tag & 3UL; | ||
| 64 | } | ||
| 65 | |||
| 66 | unsigned long __init_or_module | ||
| 67 | paravirt_get_num_inst(unsigned long stag, unsigned long etag) | ||
| 68 | { | ||
| 69 | bundle_t *sbundle = paravirt_get_bundle(stag); | ||
| 70 | unsigned long sslot = paravirt_get_slot(stag); | ||
| 71 | bundle_t *ebundle = paravirt_get_bundle(etag); | ||
| 72 | unsigned long eslot = paravirt_get_slot(etag); | ||
| 73 | |||
| 74 | return (ebundle - sbundle) * 3 + eslot - sslot + 1; | ||
| 75 | } | ||
| 76 | |||
| 77 | unsigned long __init_or_module | ||
| 78 | paravirt_get_next_tag(unsigned long tag) | ||
| 79 | { | ||
| 80 | unsigned long slot = paravirt_get_slot(tag); | ||
| 81 | |||
| 82 | switch (slot) { | ||
| 83 | case 0: | ||
| 84 | case 1: | ||
| 85 | return tag + 1; | ||
| 86 | case 2: { | ||
| 87 | bundle_t *bundle = paravirt_get_bundle(tag); | ||
| 88 | return (unsigned long)(bundle + 1); | ||
| 89 | } | ||
| 90 | default: | ||
| 91 | BUG(); | ||
| 92 | } | ||
| 93 | /* NOTREACHED */ | ||
| 94 | } | ||
| 95 | |||
| 96 | ia64_inst_t __init_or_module | ||
| 97 | paravirt_read_slot0(const bundle_t *bundle) | ||
| 98 | { | ||
| 99 | ia64_inst_t inst; | ||
| 100 | inst.l = bundle->quad0.slot0; | ||
| 101 | return inst; | ||
| 102 | } | ||
| 103 | |||
| 104 | ia64_inst_t __init_or_module | ||
| 105 | paravirt_read_slot1(const bundle_t *bundle) | ||
| 106 | { | ||
| 107 | ia64_inst_t inst; | ||
| 108 | inst.l = bundle->quad0.slot1_p0 | | ||
| 109 | ((unsigned long long)bundle->quad1.slot1_p1 << 18UL); | ||
| 110 | return inst; | ||
| 111 | } | ||
| 112 | |||
| 113 | ia64_inst_t __init_or_module | ||
| 114 | paravirt_read_slot2(const bundle_t *bundle) | ||
| 115 | { | ||
| 116 | ia64_inst_t inst; | ||
| 117 | inst.l = bundle->quad1.slot2; | ||
| 118 | return inst; | ||
| 119 | } | ||
| 120 | |||
| 121 | ia64_inst_t __init_or_module | ||
| 122 | paravirt_read_inst(unsigned long tag) | ||
| 123 | { | ||
| 124 | bundle_t *bundle = paravirt_get_bundle(tag); | ||
| 125 | unsigned long slot = paravirt_get_slot(tag); | ||
| 126 | |||
| 127 | switch (slot) { | ||
| 128 | case 0: | ||
| 129 | return paravirt_read_slot0(bundle); | ||
| 130 | case 1: | ||
| 131 | return paravirt_read_slot1(bundle); | ||
| 132 | case 2: | ||
| 133 | return paravirt_read_slot2(bundle); | ||
| 134 | default: | ||
| 135 | BUG(); | ||
| 136 | } | ||
| 137 | /* NOTREACHED */ | ||
| 138 | } | ||
| 139 | |||
| 140 | void __init_or_module | ||
| 141 | paravirt_write_slot0(bundle_t *bundle, ia64_inst_t inst) | ||
| 142 | { | ||
| 143 | bundle->quad0.slot0 = inst.l; | ||
| 144 | } | ||
| 145 | |||
| 146 | void __init_or_module | ||
| 147 | paravirt_write_slot1(bundle_t *bundle, ia64_inst_t inst) | ||
| 148 | { | ||
| 149 | bundle->quad0.slot1_p0 = inst.l; | ||
| 150 | bundle->quad1.slot1_p1 = inst.l >> 18UL; | ||
| 151 | } | ||
| 152 | |||
| 153 | void __init_or_module | ||
| 154 | paravirt_write_slot2(bundle_t *bundle, ia64_inst_t inst) | ||
| 155 | { | ||
| 156 | bundle->quad1.slot2 = inst.l; | ||
| 157 | } | ||
| 158 | |||
| 159 | void __init_or_module | ||
| 160 | paravirt_write_inst(unsigned long tag, ia64_inst_t inst) | ||
| 161 | { | ||
| 162 | bundle_t *bundle = paravirt_get_bundle(tag); | ||
| 163 | unsigned long slot = paravirt_get_slot(tag); | ||
| 164 | |||
| 165 | switch (slot) { | ||
| 166 | case 0: | ||
| 167 | paravirt_write_slot0(bundle, inst); | ||
| 168 | break; | ||
| 169 | case 1: | ||
| 170 | paravirt_write_slot1(bundle, inst); | ||
| 171 | break; | ||
| 172 | case 2: | ||
| 173 | paravirt_write_slot2(bundle, inst); | ||
| 174 | break; | ||
| 175 | default: | ||
| 176 | BUG(); | ||
| 177 | break; | ||
| 178 | } | ||
| 179 | paravirt_flush_i_cache_range(bundle, sizeof(*bundle)); | ||
| 180 | } | ||
| 181 | |||
| 182 | /* for debug */ | ||
| 183 | void | ||
| 184 | paravirt_print_bundle(const bundle_t *bundle) | ||
| 185 | { | ||
| 186 | const unsigned long *quad = (const unsigned long *)bundle; | ||
| 187 | ia64_inst_t slot0 = paravirt_read_slot0(bundle); | ||
| 188 | ia64_inst_t slot1 = paravirt_read_slot1(bundle); | ||
| 189 | ia64_inst_t slot2 = paravirt_read_slot2(bundle); | ||
| 190 | |||
| 191 | printk(KERN_DEBUG | ||
| 192 | "bundle 0x%p 0x%016lx 0x%016lx\n", bundle, quad[0], quad[1]); | ||
| 193 | printk(KERN_DEBUG | ||
| 194 | "bundle template 0x%x\n", | ||
| 195 | bundle->quad0.template); | ||
| 196 | printk(KERN_DEBUG | ||
| 197 | "slot0 0x%lx slot1_p0 0x%lx slot1_p1 0x%lx slot2 0x%lx\n", | ||
| 198 | (unsigned long)bundle->quad0.slot0, | ||
| 199 | (unsigned long)bundle->quad0.slot1_p0, | ||
| 200 | (unsigned long)bundle->quad1.slot1_p1, | ||
| 201 | (unsigned long)bundle->quad1.slot2); | ||
| 202 | printk(KERN_DEBUG | ||
| 203 | "slot0 0x%016llx slot1 0x%016llx slot2 0x%016llx\n", | ||
| 204 | slot0.l, slot1.l, slot2.l); | ||
| 205 | } | ||
| 206 | |||
| 207 | static int noreplace_paravirt __init_or_module = 0; | ||
| 208 | |||
| 209 | static int __init setup_noreplace_paravirt(char *str) | ||
| 210 | { | ||
| 211 | noreplace_paravirt = 1; | ||
| 212 | return 1; | ||
| 213 | } | ||
| 214 | __setup("noreplace-paravirt", setup_noreplace_paravirt); | ||
| 215 | |||
| 216 | #ifdef ASM_SUPPORTED | ||
| 217 | static void __init_or_module | ||
| 218 | fill_nop_bundle(void *sbundle, void *ebundle) | ||
| 219 | { | ||
| 220 | extern const char paravirt_nop_bundle[]; | ||
| 221 | extern const unsigned long paravirt_nop_bundle_size; | ||
| 222 | |||
| 223 | void *bundle = sbundle; | ||
| 224 | |||
| 225 | BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0); | ||
| 226 | BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0); | ||
| 227 | |||
| 228 | while (bundle < ebundle) { | ||
| 229 | memcpy(bundle, paravirt_nop_bundle, paravirt_nop_bundle_size); | ||
| 230 | |||
| 231 | bundle += paravirt_nop_bundle_size; | ||
| 232 | } | ||
| 233 | } | ||
| 234 | |||
| 235 | /* helper function */ | ||
| 236 | unsigned long __init_or_module | ||
| 237 | __paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type, | ||
| 238 | const struct paravirt_patch_bundle_elem *elems, | ||
| 239 | unsigned long nelems, | ||
| 240 | const struct paravirt_patch_bundle_elem **found) | ||
| 241 | { | ||
| 242 | unsigned long used = 0; | ||
| 243 | unsigned long i; | ||
| 244 | |||
| 245 | BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0); | ||
| 246 | BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0); | ||
| 247 | |||
| 248 | found = NULL; | ||
| 249 | for (i = 0; i < nelems; i++) { | ||
| 250 | const struct paravirt_patch_bundle_elem *p = &elems[i]; | ||
| 251 | if (p->type == type) { | ||
| 252 | unsigned long need = p->ebundle - p->sbundle; | ||
| 253 | unsigned long room = ebundle - sbundle; | ||
| 254 | |||
| 255 | if (found != NULL) | ||
| 256 | *found = p; | ||
| 257 | |||
| 258 | if (room < need) { | ||
| 259 | /* no room to replace. skip it */ | ||
| 260 | printk(KERN_DEBUG | ||
| 261 | "the space is too small to put " | ||
| 262 | "bundles. type %ld need %ld room %ld\n", | ||
| 263 | type, need, room); | ||
| 264 | break; | ||
| 265 | } | ||
| 266 | |||
| 267 | used = need; | ||
| 268 | memcpy(sbundle, p->sbundle, used); | ||
| 269 | break; | ||
| 270 | } | ||
| 271 | } | ||
| 272 | |||
| 273 | return used; | ||
| 274 | } | ||
| 275 | |||
| 276 | void __init_or_module | ||
| 277 | paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start, | ||
| 278 | const struct paravirt_patch_site_bundle *end) | ||
| 279 | { | ||
| 280 | const struct paravirt_patch_site_bundle *p; | ||
| 281 | |||
| 282 | if (noreplace_paravirt) | ||
| 283 | return; | ||
| 284 | if (pv_init_ops.patch_bundle == NULL) | ||
| 285 | return; | ||
| 286 | |||
| 287 | for (p = start; p < end; p++) { | ||
| 288 | unsigned long used; | ||
| 289 | |||
| 290 | used = (*pv_init_ops.patch_bundle)(p->sbundle, p->ebundle, | ||
| 291 | p->type); | ||
| 292 | if (used == 0) | ||
| 293 | continue; | ||
| 294 | |||
| 295 | fill_nop_bundle(p->sbundle + used, p->ebundle); | ||
| 296 | paravirt_flush_i_cache_range(p->sbundle, | ||
| 297 | p->ebundle - p->sbundle); | ||
| 298 | } | ||
| 299 | ia64_sync_i(); | ||
| 300 | ia64_srlz_i(); | ||
| 301 | } | ||
| 302 | |||
| 303 | /* | ||
| 304 | * nop.i, nop.m, nop.f instruction are same format. | ||
| 305 | * but nop.b has differennt format. | ||
| 306 | * This doesn't support nop.b for now. | ||
| 307 | */ | ||
| 308 | static void __init_or_module | ||
| 309 | fill_nop_inst(unsigned long stag, unsigned long etag) | ||
| 310 | { | ||
| 311 | extern const bundle_t paravirt_nop_mfi_inst_bundle[]; | ||
| 312 | unsigned long tag; | ||
| 313 | const ia64_inst_t nop_inst = | ||
| 314 | paravirt_read_slot0(paravirt_nop_mfi_inst_bundle); | ||
| 315 | |||
| 316 | for (tag = stag; tag < etag; tag = paravirt_get_next_tag(tag)) | ||
| 317 | paravirt_write_inst(tag, nop_inst); | ||
| 318 | } | ||
| 319 | |||
| 320 | void __init_or_module | ||
| 321 | paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start, | ||
| 322 | const struct paravirt_patch_site_inst *end) | ||
| 323 | { | ||
| 324 | const struct paravirt_patch_site_inst *p; | ||
| 325 | |||
| 326 | if (noreplace_paravirt) | ||
| 327 | return; | ||
| 328 | if (pv_init_ops.patch_inst == NULL) | ||
| 329 | return; | ||
| 330 | |||
| 331 | for (p = start; p < end; p++) { | ||
| 332 | unsigned long tag; | ||
| 333 | bundle_t *sbundle; | ||
| 334 | bundle_t *ebundle; | ||
| 335 | |||
| 336 | tag = (*pv_init_ops.patch_inst)(p->stag, p->etag, p->type); | ||
| 337 | if (tag == p->stag) | ||
| 338 | continue; | ||
| 339 | |||
| 340 | fill_nop_inst(tag, p->etag); | ||
| 341 | sbundle = paravirt_get_bundle(p->stag); | ||
| 342 | ebundle = paravirt_get_bundle(p->etag) + 1; | ||
| 343 | paravirt_flush_i_cache_range(sbundle, (ebundle - sbundle) * | ||
| 344 | sizeof(bundle_t)); | ||
| 345 | } | ||
| 346 | ia64_sync_i(); | ||
| 347 | ia64_srlz_i(); | ||
| 348 | } | ||
| 349 | #endif /* ASM_SUPPOTED */ | ||
| 350 | |||
| 351 | /* brl.cond.sptk.many <target64> X3 */ | ||
| 352 | typedef union inst_x3_op { | ||
| 353 | ia64_inst_t inst; | ||
| 354 | struct { | ||
| 355 | unsigned long qp: 6; | ||
| 356 | unsigned long btyp: 3; | ||
| 357 | unsigned long unused: 3; | ||
| 358 | unsigned long p: 1; | ||
| 359 | unsigned long imm20b: 20; | ||
| 360 | unsigned long wh: 2; | ||
| 361 | unsigned long d: 1; | ||
| 362 | unsigned long i: 1; | ||
| 363 | unsigned long opcode: 4; | ||
| 364 | }; | ||
| 365 | unsigned long l; | ||
| 366 | } inst_x3_op_t; | ||
| 367 | |||
| 368 | typedef union inst_x3_imm { | ||
| 369 | ia64_inst_t inst; | ||
| 370 | struct { | ||
| 371 | unsigned long unused: 2; | ||
| 372 | unsigned long imm39: 39; | ||
| 373 | }; | ||
| 374 | unsigned long l; | ||
| 375 | } inst_x3_imm_t; | ||
| 376 | |||
| 377 | void __init_or_module | ||
| 378 | paravirt_patch_reloc_brl(unsigned long tag, const void *target) | ||
| 379 | { | ||
| 380 | unsigned long tag_op = paravirt_get_next_tag(tag); | ||
| 381 | unsigned long tag_imm = tag; | ||
| 382 | bundle_t *bundle = paravirt_get_bundle(tag); | ||
| 383 | |||
| 384 | ia64_inst_t inst_op = paravirt_read_inst(tag_op); | ||
| 385 | ia64_inst_t inst_imm = paravirt_read_inst(tag_imm); | ||
| 386 | |||
| 387 | inst_x3_op_t inst_x3_op = { .l = inst_op.l }; | ||
| 388 | inst_x3_imm_t inst_x3_imm = { .l = inst_imm.l }; | ||
| 389 | |||
| 390 | unsigned long imm60 = | ||
| 391 | ((unsigned long)target - (unsigned long)bundle) >> 4; | ||
| 392 | |||
| 393 | BUG_ON(paravirt_get_slot(tag) != 1); /* MLX */ | ||
| 394 | BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0); | ||
| 395 | |||
| 396 | /* imm60[59] 1bit */ | ||
| 397 | inst_x3_op.i = (imm60 >> 59) & 1; | ||
| 398 | /* imm60[19:0] 20bit */ | ||
| 399 | inst_x3_op.imm20b = imm60 & ((1UL << 20) - 1); | ||
| 400 | /* imm60[58:20] 39bit */ | ||
| 401 | inst_x3_imm.imm39 = (imm60 >> 20) & ((1UL << 39) - 1); | ||
| 402 | |||
| 403 | inst_op.l = inst_x3_op.l; | ||
| 404 | inst_imm.l = inst_x3_imm.l; | ||
| 405 | |||
| 406 | paravirt_write_inst(tag_op, inst_op); | ||
| 407 | paravirt_write_inst(tag_imm, inst_imm); | ||
| 408 | } | ||
| 409 | |||
| 410 | /* br.cond.sptk.many <target25> B1 */ | ||
| 411 | typedef union inst_b1 { | ||
| 412 | ia64_inst_t inst; | ||
| 413 | struct { | ||
| 414 | unsigned long qp: 6; | ||
| 415 | unsigned long btype: 3; | ||
| 416 | unsigned long unused: 3; | ||
| 417 | unsigned long p: 1; | ||
| 418 | unsigned long imm20b: 20; | ||
| 419 | unsigned long wh: 2; | ||
| 420 | unsigned long d: 1; | ||
| 421 | unsigned long s: 1; | ||
| 422 | unsigned long opcode: 4; | ||
| 423 | }; | ||
| 424 | unsigned long l; | ||
| 425 | } inst_b1_t; | ||
| 426 | |||
| 427 | void __init | ||
| 428 | paravirt_patch_reloc_br(unsigned long tag, const void *target) | ||
| 429 | { | ||
| 430 | bundle_t *bundle = paravirt_get_bundle(tag); | ||
| 431 | ia64_inst_t inst = paravirt_read_inst(tag); | ||
| 432 | unsigned long target25 = (unsigned long)target - (unsigned long)bundle; | ||
| 433 | inst_b1_t inst_b1; | ||
| 434 | |||
| 435 | BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0); | ||
| 436 | |||
| 437 | inst_b1.l = inst.l; | ||
| 438 | if (target25 & (1UL << 63)) | ||
| 439 | inst_b1.s = 1; | ||
| 440 | else | ||
| 441 | inst_b1.s = 0; | ||
| 442 | |||
| 443 | inst_b1.imm20b = target25 >> 4; | ||
| 444 | inst.l = inst_b1.l; | ||
| 445 | |||
| 446 | paravirt_write_inst(tag, inst); | ||
| 447 | } | ||
| 448 | |||
| 449 | void __init | ||
| 450 | __paravirt_patch_apply_branch( | ||
| 451 | unsigned long tag, unsigned long type, | ||
| 452 | const struct paravirt_patch_branch_target *entries, | ||
| 453 | unsigned int nr_entries) | ||
| 454 | { | ||
| 455 | unsigned int i; | ||
| 456 | for (i = 0; i < nr_entries; i++) { | ||
| 457 | if (entries[i].type == type) { | ||
| 458 | paravirt_patch_reloc_br(tag, entries[i].entry); | ||
| 459 | break; | ||
| 460 | } | ||
| 461 | } | ||
| 462 | } | ||
| 463 | |||
| 464 | static void __init | ||
| 465 | paravirt_patch_apply_branch(const struct paravirt_patch_site_branch *start, | ||
| 466 | const struct paravirt_patch_site_branch *end) | ||
| 467 | { | ||
| 468 | const struct paravirt_patch_site_branch *p; | ||
| 469 | |||
| 470 | if (noreplace_paravirt) | ||
| 471 | return; | ||
| 472 | if (pv_init_ops.patch_branch == NULL) | ||
| 473 | return; | ||
| 474 | |||
| 475 | for (p = start; p < end; p++) | ||
| 476 | (*pv_init_ops.patch_branch)(p->tag, p->type); | ||
| 477 | |||
| 478 | ia64_sync_i(); | ||
| 479 | ia64_srlz_i(); | ||
| 480 | } | ||
| 481 | |||
| 482 | void __init | ||
| 483 | paravirt_patch_apply(void) | ||
| 484 | { | ||
| 485 | extern const char __start_paravirt_bundles[]; | ||
| 486 | extern const char __stop_paravirt_bundles[]; | ||
| 487 | extern const char __start_paravirt_insts[]; | ||
| 488 | extern const char __stop_paravirt_insts[]; | ||
| 489 | extern const char __start_paravirt_branches[]; | ||
| 490 | extern const char __stop_paravirt_branches[]; | ||
| 491 | |||
| 492 | paravirt_patch_apply_bundle((const struct paravirt_patch_site_bundle *) | ||
| 493 | __start_paravirt_bundles, | ||
| 494 | (const struct paravirt_patch_site_bundle *) | ||
| 495 | __stop_paravirt_bundles); | ||
| 496 | paravirt_patch_apply_inst((const struct paravirt_patch_site_inst *) | ||
| 497 | __start_paravirt_insts, | ||
| 498 | (const struct paravirt_patch_site_inst *) | ||
| 499 | __stop_paravirt_insts); | ||
| 500 | paravirt_patch_apply_branch((const struct paravirt_patch_site_branch *) | ||
| 501 | __start_paravirt_branches, | ||
| 502 | (const struct paravirt_patch_site_branch *) | ||
| 503 | __stop_paravirt_branches); | ||
| 504 | } | ||
| 505 | |||
| 506 | /* | ||
| 507 | * Local variables: | ||
| 508 | * mode: C | ||
| 509 | * c-set-style: "linux" | ||
| 510 | * c-basic-offset: 8 | ||
| 511 | * tab-width: 8 | ||
| 512 | * indent-tabs-mode: t | ||
| 513 | * End: | ||
| 514 | */ | ||
diff --git a/arch/ia64/kernel/paravirt_patchlist.c b/arch/ia64/kernel/paravirt_patchlist.c new file mode 100644 index 000000000000..b28082a95d45 --- /dev/null +++ b/arch/ia64/kernel/paravirt_patchlist.c | |||
| @@ -0,0 +1,79 @@ | |||
| 1 | /****************************************************************************** | ||
| 2 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
| 3 | * VA Linux Systems Japan K.K. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License as published by | ||
| 7 | * the Free Software Foundation; either version 2 of the License, or | ||
| 8 | * (at your option) any later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 18 | * | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/bug.h> | ||
| 22 | #include <asm/paravirt.h> | ||
| 23 | |||
| 24 | #define DECLARE(name) \ | ||
| 25 | extern unsigned long \ | ||
| 26 | __ia64_native_start_gate_##name##_patchlist[]; \ | ||
| 27 | extern unsigned long \ | ||
| 28 | __ia64_native_end_gate_##name##_patchlist[] | ||
| 29 | |||
| 30 | DECLARE(fsyscall); | ||
| 31 | DECLARE(brl_fsys_bubble_down); | ||
| 32 | DECLARE(vtop); | ||
| 33 | DECLARE(mckinley_e9); | ||
| 34 | |||
| 35 | extern unsigned long __start_gate_section[]; | ||
| 36 | |||
| 37 | #define ASSIGN(name) \ | ||
| 38 | .start_##name##_patchlist = \ | ||
| 39 | (unsigned long)__ia64_native_start_gate_##name##_patchlist, \ | ||
| 40 | .end_##name##_patchlist = \ | ||
| 41 | (unsigned long)__ia64_native_end_gate_##name##_patchlist | ||
| 42 | |||
| 43 | struct pv_patchdata pv_patchdata __initdata = { | ||
| 44 | ASSIGN(fsyscall), | ||
| 45 | ASSIGN(brl_fsys_bubble_down), | ||
| 46 | ASSIGN(vtop), | ||
| 47 | ASSIGN(mckinley_e9), | ||
| 48 | |||
| 49 | .gate_section = (void*)__start_gate_section, | ||
| 50 | }; | ||
| 51 | |||
| 52 | |||
| 53 | unsigned long __init | ||
| 54 | paravirt_get_gate_patchlist(enum pv_gate_patchlist type) | ||
| 55 | { | ||
| 56 | |||
| 57 | #define CASE(NAME, name) \ | ||
| 58 | case PV_GATE_START_##NAME: \ | ||
| 59 | return pv_patchdata.start_##name##_patchlist; \ | ||
| 60 | case PV_GATE_END_##NAME: \ | ||
| 61 | return pv_patchdata.end_##name##_patchlist; \ | ||
| 62 | |||
| 63 | switch (type) { | ||
| 64 | CASE(FSYSCALL, fsyscall); | ||
| 65 | CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down); | ||
| 66 | CASE(VTOP, vtop); | ||
| 67 | CASE(MCKINLEY_E9, mckinley_e9); | ||
| 68 | default: | ||
| 69 | BUG(); | ||
| 70 | break; | ||
| 71 | } | ||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | |||
| 75 | void * __init | ||
| 76 | paravirt_get_gate_section(void) | ||
| 77 | { | ||
| 78 | return pv_patchdata.gate_section; | ||
| 79 | } | ||
diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h new file mode 100644 index 000000000000..0684aa6c6507 --- /dev/null +++ b/arch/ia64/kernel/paravirt_patchlist.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | /****************************************************************************** | ||
| 2 | * linux/arch/ia64/xen/paravirt_patchlist.h | ||
| 3 | * | ||
| 4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
| 5 | * VA Linux Systems Japan K.K. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 20 | * | ||
| 21 | */ | ||
| 22 | |||
| 23 | #if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) | ||
| 24 | #include <asm/xen/patchlist.h> | ||
| 25 | #else | ||
| 26 | #include <asm/native/patchlist.h> | ||
| 27 | #endif | ||
| 28 | |||
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S index 2f42fcb9776a..6158560d7f17 100644 --- a/arch/ia64/kernel/paravirtentry.S +++ b/arch/ia64/kernel/paravirtentry.S | |||
| @@ -20,8 +20,11 @@ | |||
| 20 | * | 20 | * |
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #include <linux/init.h> | ||
| 23 | #include <asm/asmmacro.h> | 24 | #include <asm/asmmacro.h> |
| 24 | #include <asm/asm-offsets.h> | 25 | #include <asm/asm-offsets.h> |
| 26 | #include <asm/paravirt_privop.h> | ||
| 27 | #include <asm/paravirt_patch.h> | ||
| 25 | #include "entry.h" | 28 | #include "entry.h" |
| 26 | 29 | ||
| 27 | #define DATA8(sym, init_value) \ | 30 | #define DATA8(sym, init_value) \ |
| @@ -32,29 +35,87 @@ | |||
| 32 | data8 init_value ; \ | 35 | data8 init_value ; \ |
| 33 | .popsection | 36 | .popsection |
| 34 | 37 | ||
| 35 | #define BRANCH(targ, reg, breg) \ | 38 | #define BRANCH(targ, reg, breg, type) \ |
| 36 | movl reg=targ ; \ | 39 | PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \ |
| 37 | ;; \ | 40 | ;; \ |
| 38 | ld8 reg=[reg] ; \ | 41 | movl reg=targ ; \ |
| 39 | ;; \ | 42 | ;; \ |
| 40 | mov breg=reg ; \ | 43 | ld8 reg=[reg] ; \ |
| 44 | ;; \ | ||
| 45 | mov breg=reg ; \ | ||
| 41 | br.cond.sptk.many breg | 46 | br.cond.sptk.many breg |
| 42 | 47 | ||
| 43 | #define BRANCH_PROC(sym, reg, breg) \ | 48 | #define BRANCH_PROC(sym, reg, breg, type) \ |
| 44 | DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ | 49 | DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ |
| 45 | GLOBAL_ENTRY(paravirt_ ## sym) ; \ | 50 | GLOBAL_ENTRY(paravirt_ ## sym) ; \ |
| 46 | BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ | 51 | BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \ |
| 47 | END(paravirt_ ## sym) | 52 | END(paravirt_ ## sym) |
| 48 | 53 | ||
| 49 | #define BRANCH_PROC_UNWINFO(sym, reg, breg) \ | 54 | #define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \ |
| 50 | DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ | 55 | DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ |
| 51 | GLOBAL_ENTRY(paravirt_ ## sym) ; \ | 56 | GLOBAL_ENTRY(paravirt_ ## sym) ; \ |
| 52 | PT_REGS_UNWIND_INFO(0) ; \ | 57 | PT_REGS_UNWIND_INFO(0) ; \ |
| 53 | BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ | 58 | BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \ |
| 54 | END(paravirt_ ## sym) | 59 | END(paravirt_ ## sym) |
| 55 | 60 | ||
| 56 | 61 | ||
| 57 | BRANCH_PROC(switch_to, r22, b7) | 62 | BRANCH_PROC(switch_to, r22, b7, SWITCH_TO) |
| 58 | BRANCH_PROC_UNWINFO(leave_syscall, r22, b7) | 63 | BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL) |
| 59 | BRANCH_PROC(work_processed_syscall, r2, b7) | 64 | BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL) |
| 60 | BRANCH_PROC_UNWINFO(leave_kernel, r22, b7) | 65 | BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL) |
| 66 | |||
| 67 | |||
| 68 | #ifdef CONFIG_MODULES | ||
| 69 | #define __INIT_OR_MODULE .text | ||
| 70 | #define __INITDATA_OR_MODULE .data | ||
| 71 | #else | ||
| 72 | #define __INIT_OR_MODULE __INIT | ||
| 73 | #define __INITDATA_OR_MODULE __INITDATA | ||
| 74 | #endif /* CONFIG_MODULES */ | ||
| 75 | |||
| 76 | __INIT_OR_MODULE | ||
| 77 | GLOBAL_ENTRY(paravirt_fc_i) | ||
| 78 | fc.i r32 | ||
| 79 | br.ret.sptk.many rp | ||
| 80 | END(paravirt_fc_i) | ||
| 81 | __FINIT | ||
| 82 | |||
| 83 | __INIT_OR_MODULE | ||
| 84 | .align 32 | ||
| 85 | GLOBAL_ENTRY(paravirt_nop_b_inst_bundle) | ||
| 86 | { | ||
| 87 | nop.b 0 | ||
| 88 | nop.b 0 | ||
| 89 | nop.b 0 | ||
| 90 | } | ||
| 91 | END(paravirt_nop_b_inst_bundle) | ||
| 92 | __FINIT | ||
| 93 | |||
| 94 | /* NOTE: nop.[mfi] has same format */ | ||
| 95 | __INIT_OR_MODULE | ||
| 96 | GLOBAL_ENTRY(paravirt_nop_mfi_inst_bundle) | ||
| 97 | { | ||
| 98 | nop.m 0 | ||
| 99 | nop.f 0 | ||
| 100 | nop.i 0 | ||
| 101 | } | ||
| 102 | END(paravirt_nop_mfi_inst_bundle) | ||
| 103 | __FINIT | ||
| 104 | |||
| 105 | __INIT_OR_MODULE | ||
| 106 | GLOBAL_ENTRY(paravirt_nop_bundle) | ||
| 107 | paravirt_nop_bundle_start: | ||
| 108 | { | ||
| 109 | nop 0 | ||
| 110 | nop 0 | ||
| 111 | nop 0 | ||
| 112 | } | ||
| 113 | paravirt_nop_bundle_end: | ||
| 114 | END(paravirt_nop_bundle) | ||
| 115 | __FINIT | ||
| 116 | |||
| 117 | __INITDATA_OR_MODULE | ||
| 118 | .align 8 | ||
| 119 | .global paravirt_nop_bundle_size | ||
| 120 | paravirt_nop_bundle_size: | ||
| 121 | data8 paravirt_nop_bundle_end - paravirt_nop_bundle_start | ||
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index b83b2c516008..68a1311db806 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
| 8 | #include <linux/string.h> | 8 | #include <linux/string.h> |
| 9 | 9 | ||
| 10 | #include <asm/paravirt.h> | ||
| 10 | #include <asm/patch.h> | 11 | #include <asm/patch.h> |
| 11 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
| 12 | #include <asm/sections.h> | 13 | #include <asm/sections.h> |
| @@ -169,16 +170,35 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) | |||
| 169 | ia64_srlz_i(); | 170 | ia64_srlz_i(); |
| 170 | } | 171 | } |
| 171 | 172 | ||
| 173 | extern unsigned long ia64_native_fsyscall_table[NR_syscalls]; | ||
| 174 | extern char ia64_native_fsys_bubble_down[]; | ||
| 175 | struct pv_fsys_data pv_fsys_data __initdata = { | ||
| 176 | .fsyscall_table = (unsigned long *)ia64_native_fsyscall_table, | ||
| 177 | .fsys_bubble_down = (void *)ia64_native_fsys_bubble_down, | ||
| 178 | }; | ||
| 179 | |||
| 180 | unsigned long * __init | ||
| 181 | paravirt_get_fsyscall_table(void) | ||
| 182 | { | ||
| 183 | return pv_fsys_data.fsyscall_table; | ||
| 184 | } | ||
| 185 | |||
| 186 | char * __init | ||
| 187 | paravirt_get_fsys_bubble_down(void) | ||
| 188 | { | ||
| 189 | return pv_fsys_data.fsys_bubble_down; | ||
| 190 | } | ||
| 191 | |||
| 172 | static void __init | 192 | static void __init |
| 173 | patch_fsyscall_table (unsigned long start, unsigned long end) | 193 | patch_fsyscall_table (unsigned long start, unsigned long end) |
| 174 | { | 194 | { |
| 175 | extern unsigned long fsyscall_table[NR_syscalls]; | 195 | u64 fsyscall_table = (u64)paravirt_get_fsyscall_table(); |
| 176 | s32 *offp = (s32 *) start; | 196 | s32 *offp = (s32 *) start; |
| 177 | u64 ip; | 197 | u64 ip; |
| 178 | 198 | ||
| 179 | while (offp < (s32 *) end) { | 199 | while (offp < (s32 *) end) { |
| 180 | ip = (u64) ia64_imva((char *) offp + *offp); | 200 | ip = (u64) ia64_imva((char *) offp + *offp); |
| 181 | ia64_patch_imm64(ip, (u64) fsyscall_table); | 201 | ia64_patch_imm64(ip, fsyscall_table); |
| 182 | ia64_fc((void *) ip); | 202 | ia64_fc((void *) ip); |
| 183 | ++offp; | 203 | ++offp; |
| 184 | } | 204 | } |
| @@ -189,7 +209,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end) | |||
| 189 | static void __init | 209 | static void __init |
| 190 | patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) | 210 | patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) |
| 191 | { | 211 | { |
| 192 | extern char fsys_bubble_down[]; | 212 | u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down(); |
| 193 | s32 *offp = (s32 *) start; | 213 | s32 *offp = (s32 *) start; |
| 194 | u64 ip; | 214 | u64 ip; |
| 195 | 215 | ||
| @@ -207,13 +227,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) | |||
| 207 | void __init | 227 | void __init |
| 208 | ia64_patch_gate (void) | 228 | ia64_patch_gate (void) |
| 209 | { | 229 | { |
| 210 | # define START(name) ((unsigned long) __start_gate_##name##_patchlist) | 230 | # define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name) |
| 211 | # define END(name) ((unsigned long)__end_gate_##name##_patchlist) | 231 | # define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name) |
| 212 | 232 | ||
| 213 | patch_fsyscall_table(START(fsyscall), END(fsyscall)); | 233 | patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL)); |
| 214 | patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down)); | 234 | patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN)); |
| 215 | ia64_patch_vtop(START(vtop), END(vtop)); | 235 | ia64_patch_vtop(START(VTOP), END(VTOP)); |
| 216 | ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); | 236 | ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9)); |
| 217 | } | 237 | } |
| 218 | 238 | ||
| 219 | void ia64_patch_phys_stack_reg(unsigned long val) | 239 | void ia64_patch_phys_stack_reg(unsigned long val) |
| @@ -229,7 +249,7 @@ void ia64_patch_phys_stack_reg(unsigned long val) | |||
| 229 | while (offp < end) { | 249 | while (offp < end) { |
| 230 | ip = (u64) offp + *offp; | 250 | ip = (u64) offp + *offp; |
| 231 | ia64_patch(ip, mask, imm); | 251 | ia64_patch(ip, mask, imm); |
| 232 | ia64_fc(ip); | 252 | ia64_fc((void *)ip); |
| 233 | ++offp; | 253 | ++offp; |
| 234 | } | 254 | } |
| 235 | ia64_sync_i(); | 255 | ia64_sync_i(); |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 5c0f408cfd71..8a06dc480594 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
| @@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg) | |||
| 5603 | * /proc/perfmon interface, for debug only | 5603 | * /proc/perfmon interface, for debug only |
| 5604 | */ | 5604 | */ |
| 5605 | 5605 | ||
| 5606 | #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) | 5606 | #define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1) |
| 5607 | 5607 | ||
| 5608 | static void * | 5608 | static void * |
| 5609 | pfm_proc_start(struct seq_file *m, loff_t *pos) | 5609 | pfm_proc_start(struct seq_file *m, loff_t *pos) |
| @@ -5612,7 +5612,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos) | |||
| 5612 | return PFM_PROC_SHOW_HEADER; | 5612 | return PFM_PROC_SHOW_HEADER; |
| 5613 | } | 5613 | } |
| 5614 | 5614 | ||
| 5615 | while (*pos <= NR_CPUS) { | 5615 | while (*pos <= nr_cpu_ids) { |
| 5616 | if (cpu_online(*pos - 1)) { | 5616 | if (cpu_online(*pos - 1)) { |
| 5617 | return (void *)*pos; | 5617 | return (void *)*pos; |
| 5618 | } | 5618 | } |
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index ecb9eb78d687..7053c55b7649 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
| @@ -317,7 +317,7 @@ retry: | |||
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | n = data->cpu_check; | 319 | n = data->cpu_check; |
| 320 | for (i = 0; i < NR_CPUS; i++) { | 320 | for (i = 0; i < nr_cpu_ids; i++) { |
| 321 | if (cpu_isset(n, data->cpu_event)) { | 321 | if (cpu_isset(n, data->cpu_event)) { |
| 322 | if (!cpu_online(n)) { | 322 | if (!cpu_online(n)) { |
| 323 | cpu_clear(n, data->cpu_event); | 323 | cpu_clear(n, data->cpu_event); |
| @@ -326,7 +326,7 @@ retry: | |||
| 326 | cpu = n; | 326 | cpu = n; |
| 327 | break; | 327 | break; |
| 328 | } | 328 | } |
| 329 | if (++n == NR_CPUS) | 329 | if (++n == nr_cpu_ids) |
| 330 | n = 0; | 330 | n = 0; |
| 331 | } | 331 | } |
| 332 | 332 | ||
| @@ -337,7 +337,7 @@ retry: | |||
| 337 | 337 | ||
| 338 | /* for next read, start checking at next CPU */ | 338 | /* for next read, start checking at next CPU */ |
| 339 | data->cpu_check = cpu; | 339 | data->cpu_check = cpu; |
| 340 | if (++data->cpu_check == NR_CPUS) | 340 | if (++data->cpu_check == nr_cpu_ids) |
| 341 | data->cpu_check = 0; | 341 | data->cpu_check = 0; |
| 342 | 342 | ||
| 343 | snprintf(cmd, sizeof(cmd), "read %d\n", cpu); | 343 | snprintf(cmd, sizeof(cmd), "read %d\n", cpu); |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 865af27c7737..714066aeda7f 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
| @@ -52,6 +52,7 @@ | |||
| 52 | #include <asm/meminit.h> | 52 | #include <asm/meminit.h> |
| 53 | #include <asm/page.h> | 53 | #include <asm/page.h> |
| 54 | #include <asm/paravirt.h> | 54 | #include <asm/paravirt.h> |
| 55 | #include <asm/paravirt_patch.h> | ||
| 55 | #include <asm/patch.h> | 56 | #include <asm/patch.h> |
| 56 | #include <asm/pgtable.h> | 57 | #include <asm/pgtable.h> |
| 57 | #include <asm/processor.h> | 58 | #include <asm/processor.h> |
| @@ -537,6 +538,7 @@ setup_arch (char **cmdline_p) | |||
| 537 | paravirt_arch_setup_early(); | 538 | paravirt_arch_setup_early(); |
| 538 | 539 | ||
| 539 | ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); | 540 | ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); |
| 541 | paravirt_patch_apply(); | ||
| 540 | 542 | ||
| 541 | *cmdline_p = __va(ia64_boot_param->command_line); | 543 | *cmdline_p = __va(ia64_boot_param->command_line); |
| 542 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); | 544 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); |
| @@ -730,10 +732,10 @@ static void * | |||
| 730 | c_start (struct seq_file *m, loff_t *pos) | 732 | c_start (struct seq_file *m, loff_t *pos) |
| 731 | { | 733 | { |
| 732 | #ifdef CONFIG_SMP | 734 | #ifdef CONFIG_SMP |
| 733 | while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) | 735 | while (*pos < nr_cpu_ids && !cpu_online(*pos)) |
| 734 | ++*pos; | 736 | ++*pos; |
| 735 | #endif | 737 | #endif |
| 736 | return *pos < NR_CPUS ? cpu_data(*pos) : NULL; | 738 | return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; |
| 737 | } | 739 | } |
| 738 | 740 | ||
| 739 | static void * | 741 | static void * |
| @@ -1016,8 +1018,7 @@ cpu_init (void) | |||
| 1016 | | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); | 1018 | | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); |
| 1017 | atomic_inc(&init_mm.mm_count); | 1019 | atomic_inc(&init_mm.mm_count); |
| 1018 | current->active_mm = &init_mm; | 1020 | current->active_mm = &init_mm; |
| 1019 | if (current->mm) | 1021 | BUG_ON(current->mm); |
| 1020 | BUG(); | ||
| 1021 | 1022 | ||
| 1022 | ia64_mmu_init(ia64_imva(cpu_data)); | 1023 | ia64_mmu_init(ia64_imva(cpu_data)); |
| 1023 | ia64_mca_cpu_init(ia64_imva(cpu_data)); | 1024 | ia64_mca_cpu_init(ia64_imva(cpu_data)); |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index da8f020d82c1..2ea4199d9c57 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
| @@ -166,11 +166,11 @@ send_IPI_allbutself (int op) | |||
| 166 | * Called with preemption disabled. | 166 | * Called with preemption disabled. |
| 167 | */ | 167 | */ |
| 168 | static inline void | 168 | static inline void |
| 169 | send_IPI_mask(cpumask_t mask, int op) | 169 | send_IPI_mask(const struct cpumask *mask, int op) |
| 170 | { | 170 | { |
| 171 | unsigned int cpu; | 171 | unsigned int cpu; |
| 172 | 172 | ||
| 173 | for_each_cpu_mask(cpu, mask) { | 173 | for_each_cpu(cpu, mask) { |
| 174 | send_IPI_single(cpu, op); | 174 | send_IPI_single(cpu, op); |
| 175 | } | 175 | } |
| 176 | } | 176 | } |
| @@ -316,7 +316,7 @@ void arch_send_call_function_single_ipi(int cpu) | |||
| 316 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); | 316 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | void arch_send_call_function_ipi(cpumask_t mask) | 319 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
| 320 | { | 320 | { |
| 321 | send_IPI_mask(mask, IPI_CALL_FUNC); | 321 | send_IPI_mask(mask, IPI_CALL_FUNC); |
| 322 | } | 322 | } |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 52290547c85b..7700e23034bb 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
| @@ -581,14 +581,14 @@ smp_build_cpu_map (void) | |||
| 581 | 581 | ||
| 582 | ia64_cpu_to_sapicid[0] = boot_cpu_id; | 582 | ia64_cpu_to_sapicid[0] = boot_cpu_id; |
| 583 | cpus_clear(cpu_present_map); | 583 | cpus_clear(cpu_present_map); |
| 584 | cpu_set(0, cpu_present_map); | 584 | set_cpu_present(0, true); |
| 585 | cpu_set(0, cpu_possible_map); | 585 | set_cpu_possible(0, true); |
| 586 | for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { | 586 | for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { |
| 587 | sapicid = smp_boot_data.cpu_phys_id[i]; | 587 | sapicid = smp_boot_data.cpu_phys_id[i]; |
| 588 | if (sapicid == boot_cpu_id) | 588 | if (sapicid == boot_cpu_id) |
| 589 | continue; | 589 | continue; |
| 590 | cpu_set(cpu, cpu_present_map); | 590 | set_cpu_present(cpu, true); |
| 591 | cpu_set(cpu, cpu_possible_map); | 591 | set_cpu_possible(cpu, true); |
| 592 | ia64_cpu_to_sapicid[cpu] = sapicid; | 592 | ia64_cpu_to_sapicid[cpu] = sapicid; |
| 593 | cpu++; | 593 | cpu++; |
| 594 | } | 594 | } |
| @@ -626,12 +626,9 @@ smp_prepare_cpus (unsigned int max_cpus) | |||
| 626 | */ | 626 | */ |
| 627 | if (!max_cpus) { | 627 | if (!max_cpus) { |
| 628 | printk(KERN_INFO "SMP mode deactivated.\n"); | 628 | printk(KERN_INFO "SMP mode deactivated.\n"); |
| 629 | cpus_clear(cpu_online_map); | 629 | init_cpu_online(cpumask_of(0)); |
| 630 | cpus_clear(cpu_present_map); | 630 | init_cpu_present(cpumask_of(0)); |
| 631 | cpus_clear(cpu_possible_map); | 631 | init_cpu_possible(cpumask_of(0)); |
| 632 | cpu_set(0, cpu_online_map); | ||
| 633 | cpu_set(0, cpu_present_map); | ||
| 634 | cpu_set(0, cpu_possible_map); | ||
| 635 | return; | 632 | return; |
| 636 | } | 633 | } |
| 637 | } | 634 | } |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index d6747bae52d8..641c8b61c4f1 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
| @@ -51,6 +51,15 @@ EXPORT_SYMBOL(last_cli_ip); | |||
| 51 | #endif | 51 | #endif |
| 52 | 52 | ||
| 53 | #ifdef CONFIG_PARAVIRT | 53 | #ifdef CONFIG_PARAVIRT |
| 54 | /* We need to define a real function for sched_clock, to override the | ||
| 55 | weak default version */ | ||
| 56 | unsigned long long sched_clock(void) | ||
| 57 | { | ||
| 58 | return paravirt_sched_clock(); | ||
| 59 | } | ||
| 60 | #endif | ||
| 61 | |||
| 62 | #ifdef CONFIG_PARAVIRT | ||
| 54 | static void | 63 | static void |
| 55 | paravirt_clocksource_resume(void) | 64 | paravirt_clocksource_resume(void) |
| 56 | { | 65 | { |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 3765efc5f963..4a95e86b9ac2 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
| @@ -169,6 +169,30 @@ SECTIONS | |||
| 169 | __end___mckinley_e9_bundles = .; | 169 | __end___mckinley_e9_bundles = .; |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | #if defined(CONFIG_PARAVIRT) | ||
| 173 | . = ALIGN(16); | ||
| 174 | .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) | ||
| 175 | { | ||
| 176 | __start_paravirt_bundles = .; | ||
| 177 | *(.paravirt_bundles) | ||
| 178 | __stop_paravirt_bundles = .; | ||
| 179 | } | ||
| 180 | . = ALIGN(16); | ||
| 181 | .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) | ||
| 182 | { | ||
| 183 | __start_paravirt_insts = .; | ||
| 184 | *(.paravirt_insts) | ||
| 185 | __stop_paravirt_insts = .; | ||
| 186 | } | ||
| 187 | . = ALIGN(16); | ||
| 188 | .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) | ||
| 189 | { | ||
| 190 | __start_paravirt_branches = .; | ||
| 191 | *(.paravirt_branches) | ||
| 192 | __stop_paravirt_branches = .; | ||
| 193 | } | ||
| 194 | #endif | ||
| 195 | |||
| 172 | #if defined(CONFIG_IA64_GENERIC) | 196 | #if defined(CONFIG_IA64_GENERIC) |
| 173 | /* Machine Vector */ | 197 | /* Machine Vector */ |
| 174 | . = ALIGN(16); | 198 | . = ALIGN(16); |
| @@ -201,6 +225,12 @@ SECTIONS | |||
| 201 | __start_gate_section = .; | 225 | __start_gate_section = .; |
| 202 | *(.data.gate) | 226 | *(.data.gate) |
| 203 | __stop_gate_section = .; | 227 | __stop_gate_section = .; |
| 228 | #ifdef CONFIG_XEN | ||
| 229 | . = ALIGN(PAGE_SIZE); | ||
| 230 | __xen_start_gate_section = .; | ||
| 231 | *(.data.gate.xen) | ||
| 232 | __xen_stop_gate_section = .; | ||
| 233 | #endif | ||
| 204 | } | 234 | } |
| 205 | . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose | 235 | . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose |
| 206 | * kernel data | 236 | * kernel data |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 076b00d1dbff..28af6a731bb8 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
| @@ -70,7 +70,7 @@ static void kvm_flush_icache(unsigned long start, unsigned long len) | |||
| 70 | int l; | 70 | int l; |
| 71 | 71 | ||
| 72 | for (l = 0; l < (len + 32); l += 32) | 72 | for (l = 0; l < (len + 32); l += 32) |
| 73 | ia64_fc(start + l); | 73 | ia64_fc((void *)(start + l)); |
| 74 | 74 | ||
| 75 | ia64_sync_i(); | 75 | ia64_sync_i(); |
| 76 | ia64_srlz_i(); | 76 | ia64_srlz_i(); |
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index d4d280505878..a18ee17b9192 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c | |||
| @@ -386,7 +386,7 @@ void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, | |||
| 386 | else | 386 | else |
| 387 | *rnat_addr = (*rnat_addr) & (~nat_mask); | 387 | *rnat_addr = (*rnat_addr) & (~nat_mask); |
| 388 | 388 | ||
| 389 | ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore); | 389 | ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); |
| 390 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); | 390 | ia64_setreg(_IA64_REG_AR_RNAT, rnat); |
| 391 | } | 391 | } |
| 392 | local_irq_restore(psr); | 392 | local_irq_restore(psr); |
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 38232b37668b..2c2501f13159 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
| @@ -210,6 +210,7 @@ void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type) | |||
| 210 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | 210 | phy_pte &= ~PAGE_FLAGS_RV_MASK; |
| 211 | psr = ia64_clear_ic(); | 211 | psr = ia64_clear_ic(); |
| 212 | ia64_itc(type, va, phy_pte, itir_ps(itir)); | 212 | ia64_itc(type, va, phy_pte, itir_ps(itir)); |
| 213 | paravirt_dv_serialize_data(); | ||
| 213 | ia64_set_psr(psr); | 214 | ia64_set_psr(psr); |
| 214 | } | 215 | } |
| 215 | 216 | ||
| @@ -456,6 +457,7 @@ void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
| 456 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | 457 | phy_pte &= ~PAGE_FLAGS_RV_MASK; |
| 457 | psr = ia64_clear_ic(); | 458 | psr = ia64_clear_ic(); |
| 458 | ia64_itc(type, ifa, phy_pte, ps); | 459 | ia64_itc(type, ifa, phy_pte, ps); |
| 460 | paravirt_dv_serialize_data(); | ||
| 459 | ia64_set_psr(psr); | 461 | ia64_set_psr(psr); |
| 460 | } | 462 | } |
| 461 | if (!(pte&VTLB_PTE_IO)) | 463 | if (!(pte&VTLB_PTE_IO)) |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 56e12903973c..c0f3bee69042 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
| 36 | #include <asm/unistd.h> | 36 | #include <asm/unistd.h> |
| 37 | #include <asm/mca.h> | 37 | #include <asm/mca.h> |
| 38 | #include <asm/paravirt.h> | ||
| 38 | 39 | ||
| 39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 40 | 41 | ||
| @@ -259,6 +260,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) | |||
| 259 | static void __init | 260 | static void __init |
| 260 | setup_gate (void) | 261 | setup_gate (void) |
| 261 | { | 262 | { |
| 263 | void *gate_section; | ||
| 262 | struct page *page; | 264 | struct page *page; |
| 263 | 265 | ||
| 264 | /* | 266 | /* |
| @@ -266,10 +268,11 @@ setup_gate (void) | |||
| 266 | * headers etc. and once execute-only page to enable | 268 | * headers etc. and once execute-only page to enable |
| 267 | * privilege-promotion via "epc": | 269 | * privilege-promotion via "epc": |
| 268 | */ | 270 | */ |
| 269 | page = virt_to_page(ia64_imva(__start_gate_section)); | 271 | gate_section = paravirt_get_gate_section(); |
| 272 | page = virt_to_page(ia64_imva(gate_section)); | ||
| 270 | put_kernel_page(page, GATE_ADDR, PAGE_READONLY); | 273 | put_kernel_page(page, GATE_ADDR, PAGE_READONLY); |
| 271 | #ifdef HAVE_BUGGY_SEGREL | 274 | #ifdef HAVE_BUGGY_SEGREL |
| 272 | page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); | 275 | page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); |
| 273 | put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); | 276 | put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); |
| 274 | #else | 277 | #else |
| 275 | put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); | 278 | put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); |
| @@ -633,8 +636,7 @@ mem_init (void) | |||
| 633 | #endif | 636 | #endif |
| 634 | 637 | ||
| 635 | #ifdef CONFIG_FLATMEM | 638 | #ifdef CONFIG_FLATMEM |
| 636 | if (!mem_map) | 639 | BUG_ON(!mem_map); |
| 637 | BUG(); | ||
| 638 | max_mapnr = max_low_pfn; | 640 | max_mapnr = max_low_pfn; |
| 639 | #endif | 641 | #endif |
| 640 | 642 | ||
| @@ -667,8 +669,8 @@ mem_init (void) | |||
| 667 | * code can tell them apart. | 669 | * code can tell them apart. |
| 668 | */ | 670 | */ |
| 669 | for (i = 0; i < NR_syscalls; ++i) { | 671 | for (i = 0; i < NR_syscalls; ++i) { |
| 670 | extern unsigned long fsyscall_table[NR_syscalls]; | ||
| 671 | extern unsigned long sys_call_table[NR_syscalls]; | 672 | extern unsigned long sys_call_table[NR_syscalls]; |
| 673 | unsigned long *fsyscall_table = paravirt_get_fsyscall_table(); | ||
| 672 | 674 | ||
| 673 | if (!fsyscall_table[i] || nolwsys) | 675 | if (!fsyscall_table[i] || nolwsys) |
| 674 | fsyscall_table[i] = sys_call_table[i] | 1; | 676 | fsyscall_table[i] = sys_call_table[i] | 1; |
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index bd9818a36b47..b9f3d7bbb338 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
| @@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, | |||
| 309 | 309 | ||
| 310 | preempt_disable(); | 310 | preempt_disable(); |
| 311 | #ifdef CONFIG_SMP | 311 | #ifdef CONFIG_SMP |
| 312 | if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { | 312 | if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) { |
| 313 | platform_global_tlb_purge(mm, start, end, nbits); | 313 | platform_global_tlb_purge(mm, start, end, nbits); |
| 314 | preempt_enable(); | 314 | preempt_enable(); |
| 315 | return; | 315 | return; |
diff --git a/arch/ia64/scripts/pvcheck.sed b/arch/ia64/scripts/pvcheck.sed index ba66ac2e4c60..e59809a3fc01 100644 --- a/arch/ia64/scripts/pvcheck.sed +++ b/arch/ia64/scripts/pvcheck.sed | |||
| @@ -17,6 +17,7 @@ s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g | |||
| 17 | s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g | 17 | s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g |
| 18 | s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr | 18 | s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr |
| 19 | s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g | 19 | s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g |
| 20 | s/mov.*=.*ar\.itc.*/.warning \"ar.itc should not used directly\"/g | ||
| 20 | s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g | 21 | s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g |
| 21 | s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g | 22 | s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g |
| 22 | s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g | 23 | s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g |
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 0d4ffa4da1da..57f280dd9def 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c | |||
| @@ -135,8 +135,7 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, | |||
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); | 137 | war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); |
| 138 | if (!war_list) | 138 | BUG_ON(!war_list); |
| 139 | BUG(); | ||
| 140 | 139 | ||
| 141 | SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, | 140 | SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, |
| 142 | nasid, widget, __pa(war_list), 0, 0, 0 ,0); | 141 | nasid, widget, __pa(war_list), 0, 0, 0 ,0); |
| @@ -180,23 +179,20 @@ sn_common_hubdev_init(struct hubdev_info *hubdev) | |||
| 180 | sizeof(struct sn_flush_device_kernel *); | 179 | sizeof(struct sn_flush_device_kernel *); |
| 181 | hubdev->hdi_flush_nasid_list.widget_p = | 180 | hubdev->hdi_flush_nasid_list.widget_p = |
| 182 | kzalloc(size, GFP_KERNEL); | 181 | kzalloc(size, GFP_KERNEL); |
| 183 | if (!hubdev->hdi_flush_nasid_list.widget_p) | 182 | BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p); |
| 184 | BUG(); | ||
| 185 | 183 | ||
| 186 | for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { | 184 | for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { |
| 187 | size = DEV_PER_WIDGET * | 185 | size = DEV_PER_WIDGET * |
| 188 | sizeof(struct sn_flush_device_kernel); | 186 | sizeof(struct sn_flush_device_kernel); |
| 189 | sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); | 187 | sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); |
| 190 | if (!sn_flush_device_kernel) | 188 | BUG_ON(!sn_flush_device_kernel); |
| 191 | BUG(); | ||
| 192 | 189 | ||
| 193 | dev_entry = sn_flush_device_kernel; | 190 | dev_entry = sn_flush_device_kernel; |
| 194 | for (device = 0; device < DEV_PER_WIDGET; | 191 | for (device = 0; device < DEV_PER_WIDGET; |
| 195 | device++, dev_entry++) { | 192 | device++, dev_entry++) { |
| 196 | size = sizeof(struct sn_flush_device_common); | 193 | size = sizeof(struct sn_flush_device_common); |
| 197 | dev_entry->common = kzalloc(size, GFP_KERNEL); | 194 | dev_entry->common = kzalloc(size, GFP_KERNEL); |
| 198 | if (!dev_entry->common) | 195 | BUG_ON(!dev_entry->common); |
| 199 | BUG(); | ||
| 200 | if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST)) | 196 | if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST)) |
| 201 | status = sal_get_device_dmaflush_list( | 197 | status = sal_get_device_dmaflush_list( |
| 202 | hubdev->hdi_nasid, widget, device, | 198 | hubdev->hdi_nasid, widget, device, |
| @@ -326,8 +322,7 @@ sn_common_bus_fixup(struct pci_bus *bus, | |||
| 326 | */ | 322 | */ |
| 327 | controller->platform_data = kzalloc(sizeof(struct sn_platform_data), | 323 | controller->platform_data = kzalloc(sizeof(struct sn_platform_data), |
| 328 | GFP_KERNEL); | 324 | GFP_KERNEL); |
| 329 | if (controller->platform_data == NULL) | 325 | BUG_ON(controller->platform_data == NULL); |
| 330 | BUG(); | ||
| 331 | sn_platform_data = | 326 | sn_platform_data = |
| 332 | (struct sn_platform_data *) controller->platform_data; | 327 | (struct sn_platform_data *) controller->platform_data; |
| 333 | sn_platform_data->provider_soft = provider_soft; | 328 | sn_platform_data->provider_soft = provider_soft; |
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index e2eb2da60f96..ee774c366a06 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
| @@ -128,8 +128,7 @@ sn_legacy_pci_window_fixup(struct pci_controller *controller, | |||
| 128 | { | 128 | { |
| 129 | controller->window = kcalloc(2, sizeof(struct pci_window), | 129 | controller->window = kcalloc(2, sizeof(struct pci_window), |
| 130 | GFP_KERNEL); | 130 | GFP_KERNEL); |
| 131 | if (controller->window == NULL) | 131 | BUG_ON(controller->window == NULL); |
| 132 | BUG(); | ||
| 133 | controller->window[0].offset = legacy_io; | 132 | controller->window[0].offset = legacy_io; |
| 134 | controller->window[0].resource.name = "legacy_io"; | 133 | controller->window[0].resource.name = "legacy_io"; |
| 135 | controller->window[0].resource.flags = IORESOURCE_IO; | 134 | controller->window[0].resource.flags = IORESOURCE_IO; |
| @@ -168,8 +167,7 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, | |||
| 168 | idx = controller->windows; | 167 | idx = controller->windows; |
| 169 | new_count = controller->windows + count; | 168 | new_count = controller->windows + count; |
| 170 | new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL); | 169 | new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL); |
| 171 | if (new_window == NULL) | 170 | BUG_ON(new_window == NULL); |
| 172 | BUG(); | ||
| 173 | if (controller->window) { | 171 | if (controller->window) { |
| 174 | memcpy(new_window, controller->window, | 172 | memcpy(new_window, controller->window, |
| 175 | sizeof(struct pci_window) * controller->windows); | 173 | sizeof(struct pci_window) * controller->windows); |
| @@ -222,8 +220,7 @@ sn_io_slot_fixup(struct pci_dev *dev) | |||
| 222 | (u64) __pa(pcidev_info), | 220 | (u64) __pa(pcidev_info), |
| 223 | (u64) __pa(sn_irq_info)); | 221 | (u64) __pa(sn_irq_info)); |
| 224 | 222 | ||
| 225 | if (status) | 223 | BUG_ON(status); /* Cannot get platform pci device information */ |
| 226 | BUG(); /* Cannot get platform pci device information */ | ||
| 227 | 224 | ||
| 228 | 225 | ||
| 229 | /* Copy over PIO Mapped Addresses */ | 226 | /* Copy over PIO Mapped Addresses */ |
| @@ -307,8 +304,7 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) | |||
| 307 | prom_bussoft_ptr = __va(prom_bussoft_ptr); | 304 | prom_bussoft_ptr = __va(prom_bussoft_ptr); |
| 308 | 305 | ||
| 309 | controller = kzalloc(sizeof(*controller), GFP_KERNEL); | 306 | controller = kzalloc(sizeof(*controller), GFP_KERNEL); |
| 310 | if (!controller) | 307 | BUG_ON(!controller); |
| 311 | BUG(); | ||
| 312 | controller->segment = segment; | 308 | controller->segment = segment; |
| 313 | 309 | ||
| 314 | /* | 310 | /* |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 02c5b8a9fb60..e456f062f241 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
| @@ -732,8 +732,7 @@ void __init build_cnode_tables(void) | |||
| 732 | kl_config_hdr_t *klgraph_header; | 732 | kl_config_hdr_t *klgraph_header; |
| 733 | nasid = cnodeid_to_nasid(node); | 733 | nasid = cnodeid_to_nasid(node); |
| 734 | klgraph_header = ia64_sn_get_klconfig_addr(nasid); | 734 | klgraph_header = ia64_sn_get_klconfig_addr(nasid); |
| 735 | if (klgraph_header == NULL) | 735 | BUG_ON(klgraph_header == NULL); |
| 736 | BUG(); | ||
| 737 | brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); | 736 | brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); |
| 738 | while (brd) { | 737 | while (brd) { |
| 739 | if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { | 738 | if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { |
| @@ -750,7 +749,7 @@ nasid_slice_to_cpuid(int nasid, int slice) | |||
| 750 | { | 749 | { |
| 751 | long cpu; | 750 | long cpu; |
| 752 | 751 | ||
| 753 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 752 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
| 754 | if (cpuid_to_nasid(cpu) == nasid && | 753 | if (cpuid_to_nasid(cpu) == nasid && |
| 755 | cpuid_to_slice(cpu) == slice) | 754 | cpuid_to_slice(cpu) == slice) |
| 756 | return cpu; | 755 | return cpu; |
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index e585f9a2afb9..1176506b2bae 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
| @@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm) | |||
| 133 | unsigned long itc; | 133 | unsigned long itc; |
| 134 | 134 | ||
| 135 | itc = ia64_get_itc(); | 135 | itc = ia64_get_itc(); |
| 136 | smp_flush_tlb_cpumask(mm->cpu_vm_mask); | 136 | smp_flush_tlb_cpumask(*mm_cpumask(mm)); |
| 137 | itc = ia64_get_itc() - itc; | 137 | itc = ia64_get_itc() - itc; |
| 138 | __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; | 138 | __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; |
| 139 | __get_cpu_var(ptcstats).shub_ipi_flushes++; | 139 | __get_cpu_var(ptcstats).shub_ipi_flushes++; |
| @@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
| 182 | nodes_clear(nodes_flushed); | 182 | nodes_clear(nodes_flushed); |
| 183 | i = 0; | 183 | i = 0; |
| 184 | 184 | ||
| 185 | for_each_cpu_mask(cpu, mm->cpu_vm_mask) { | 185 | for_each_cpu(cpu, mm_cpumask(mm)) { |
| 186 | cnode = cpu_to_node(cpu); | 186 | cnode = cpu_to_node(cpu); |
| 187 | node_set(cnode, nodes_flushed); | 187 | node_set(cnode, nodes_flushed); |
| 188 | lcpu = cpu; | 188 | lcpu = cpu; |
| @@ -461,7 +461,7 @@ bool sn_cpu_disable_allowed(int cpu) | |||
| 461 | 461 | ||
| 462 | static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) | 462 | static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) |
| 463 | { | 463 | { |
| 464 | if (*offset < NR_CPUS) | 464 | if (*offset < nr_cpu_ids) |
| 465 | return offset; | 465 | return offset; |
| 466 | return NULL; | 466 | return NULL; |
| 467 | } | 467 | } |
| @@ -469,7 +469,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) | |||
| 469 | static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) | 469 | static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) |
| 470 | { | 470 | { |
| 471 | (*offset)++; | 471 | (*offset)++; |
| 472 | if (*offset < NR_CPUS) | 472 | if (*offset < nr_cpu_ids) |
| 473 | return offset; | 473 | return offset; |
| 474 | return NULL; | 474 | return NULL; |
| 475 | } | 475 | } |
| @@ -491,7 +491,7 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) | |||
| 491 | seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); | 491 | seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); |
| 492 | } | 492 | } |
| 493 | 493 | ||
| 494 | if (cpu < NR_CPUS && cpu_online(cpu)) { | 494 | if (cpu < nr_cpu_ids && cpu_online(cpu)) { |
| 495 | stat = &per_cpu(ptcstats, cpu); | 495 | stat = &per_cpu(ptcstats, cpu); |
| 496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, | 496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, |
| 497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, | 497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, |
| @@ -554,7 +554,7 @@ static int __init sn2_ptc_init(void) | |||
| 554 | 554 | ||
| 555 | proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, | 555 | proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, |
| 556 | NULL, &proc_sn2_ptc_operations); | 556 | NULL, &proc_sn2_ptc_operations); |
| 557 | if (!&proc_sn2_ptc_operations) { | 557 | if (!proc_sn2_ptc) { |
| 558 | printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); | 558 | printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); |
| 559 | return -EINVAL; | 559 | return -EINVAL; |
| 560 | } | 560 | } |
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index be339477f906..9e6491cf72bd 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
| @@ -275,8 +275,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb | |||
| 275 | 275 | ||
| 276 | /* get it's interconnect topology */ | 276 | /* get it's interconnect topology */ |
| 277 | sz = op->ports * sizeof(struct sn_hwperf_port_info); | 277 | sz = op->ports * sizeof(struct sn_hwperf_port_info); |
| 278 | if (sz > sizeof(ptdata)) | 278 | BUG_ON(sz > sizeof(ptdata)); |
| 279 | BUG(); | ||
| 280 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | 279 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, |
| 281 | SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, | 280 | SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, |
| 282 | (u64)&ptdata, 0, 0, NULL); | 281 | (u64)&ptdata, 0, 0, NULL); |
| @@ -310,8 +309,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb | |||
| 310 | if (router && (!found_cpu || !found_mem)) { | 309 | if (router && (!found_cpu || !found_mem)) { |
| 311 | /* search for a node connected to the same router */ | 310 | /* search for a node connected to the same router */ |
| 312 | sz = router->ports * sizeof(struct sn_hwperf_port_info); | 311 | sz = router->ports * sizeof(struct sn_hwperf_port_info); |
| 313 | if (sz > sizeof(ptdata)) | 312 | BUG_ON(sz > sizeof(ptdata)); |
| 314 | BUG(); | ||
| 315 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | 313 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, |
| 316 | SN_HWPERF_ENUM_PORTS, router->id, sz, | 314 | SN_HWPERF_ENUM_PORTS, router->id, sz, |
| 317 | (u64)&ptdata, 0, 0, NULL); | 315 | (u64)&ptdata, 0, 0, NULL); |
| @@ -612,7 +610,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) | |||
| 612 | op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; | 610 | op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; |
| 613 | 611 | ||
| 614 | if (cpu != SN_HWPERF_ARG_ANY_CPU) { | 612 | if (cpu != SN_HWPERF_ARG_ANY_CPU) { |
| 615 | if (cpu >= NR_CPUS || !cpu_online(cpu)) { | 613 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
| 616 | r = -EINVAL; | 614 | r = -EINVAL; |
| 617 | goto out; | 615 | goto out; |
| 618 | } | 616 | } |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 060df4aa9916..c659ad5613a0 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
| @@ -256,9 +256,7 @@ void sn_dma_flush(u64 addr) | |||
| 256 | 256 | ||
| 257 | hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; | 257 | hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; |
| 258 | 258 | ||
| 259 | if (!hubinfo) { | 259 | BUG_ON(!hubinfo); |
| 260 | BUG(); | ||
| 261 | } | ||
| 262 | 260 | ||
| 263 | flush_nasid_list = &hubinfo->hdi_flush_nasid_list; | 261 | flush_nasid_list = &hubinfo->hdi_flush_nasid_list; |
| 264 | if (flush_nasid_list->widget_p == NULL) | 262 | if (flush_nasid_list->widget_p == NULL) |
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile index 0ad0224693d9..e6f4a0a74228 100644 --- a/arch/ia64/xen/Makefile +++ b/arch/ia64/xen/Makefile | |||
| @@ -3,14 +3,29 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ | 5 | obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ |
| 6 | hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o | 6 | hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \ |
| 7 | gate-data.o | ||
| 7 | 8 | ||
| 8 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | 9 | obj-$(CONFIG_IA64_GENERIC) += machvec.o |
| 9 | 10 | ||
| 11 | # The gate DSO image is built using a special linker script. | ||
| 12 | include $(srctree)/arch/ia64/kernel/Makefile.gate | ||
| 13 | |||
| 14 | # tell compiled for xen | ||
| 15 | CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN | ||
| 16 | AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN | ||
| 17 | |||
| 18 | # use same file of native. | ||
| 19 | $(obj)/gate.o: $(src)/../kernel/gate.S FORCE | ||
| 20 | $(call if_changed_dep,as_o_S) | ||
| 21 | $(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE | ||
| 22 | $(call if_changed_dep,cpp_lds_S) | ||
| 23 | |||
| 24 | |||
| 10 | AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN | 25 | AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN |
| 11 | 26 | ||
| 12 | # xen multi compile | 27 | # xen multi compile |
| 13 | ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S | 28 | ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S |
| 14 | ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) | 29 | ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) |
| 15 | obj-y += $(ASM_PARAVIRT_OBJS) | 30 | obj-y += $(ASM_PARAVIRT_OBJS) |
| 16 | define paravirtualized_xen | 31 | define paravirtualized_xen |
diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S new file mode 100644 index 000000000000..7d4830afc91d --- /dev/null +++ b/arch/ia64/xen/gate-data.S | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | .section .data.gate.xen, "aw" | ||
| 2 | |||
| 3 | .incbin "arch/ia64/xen/gate.so" | ||
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S index 45e02bb64a92..e32dae444dd6 100644 --- a/arch/ia64/xen/hypercall.S +++ b/arch/ia64/xen/hypercall.S | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <asm/intrinsics.h> | 9 | #include <asm/intrinsics.h> |
| 10 | #include <asm/xen/privop.h> | 10 | #include <asm/xen/privop.h> |
| 11 | 11 | ||
| 12 | #ifdef __INTEL_COMPILER | ||
| 12 | /* | 13 | /* |
| 13 | * Hypercalls without parameter. | 14 | * Hypercalls without parameter. |
| 14 | */ | 15 | */ |
| @@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4) | |||
| 72 | br.ret.sptk.many rp | 73 | br.ret.sptk.many rp |
| 73 | ;; | 74 | ;; |
| 74 | END(xen_set_rr0_to_rr4) | 75 | END(xen_set_rr0_to_rr4) |
| 76 | #endif | ||
| 75 | 77 | ||
| 76 | GLOBAL_ENTRY(xen_send_ipi) | 78 | GLOBAL_ENTRY(xen_send_ipi) |
| 77 | mov r14=r32 | 79 | mov r14=r32 |
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index 68d6204c3f16..fb8332690179 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c | |||
| @@ -175,10 +175,58 @@ static void xen_itc_jitter_data_reset(void) | |||
| 175 | } while (unlikely(ret != lcycle)); | 175 | } while (unlikely(ret != lcycle)); |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | /* based on xen_sched_clock() in arch/x86/xen/time.c. */ | ||
| 179 | /* | ||
| 180 | * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined, | ||
| 181 | * something similar logic should be implemented here. | ||
| 182 | */ | ||
| 183 | /* | ||
| 184 | * Xen sched_clock implementation. Returns the number of unstolen | ||
| 185 | * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED | ||
| 186 | * states. | ||
| 187 | */ | ||
| 188 | static unsigned long long xen_sched_clock(void) | ||
| 189 | { | ||
| 190 | struct vcpu_runstate_info runstate; | ||
| 191 | |||
| 192 | unsigned long long now; | ||
| 193 | unsigned long long offset; | ||
| 194 | unsigned long long ret; | ||
| 195 | |||
| 196 | /* | ||
| 197 | * Ideally sched_clock should be called on a per-cpu basis | ||
| 198 | * anyway, so preempt should already be disabled, but that's | ||
| 199 | * not current practice at the moment. | ||
| 200 | */ | ||
| 201 | preempt_disable(); | ||
| 202 | |||
| 203 | /* | ||
| 204 | * both ia64_native_sched_clock() and xen's runstate are | ||
| 205 | * based on mAR.ITC. So difference of them makes sense. | ||
| 206 | */ | ||
| 207 | now = ia64_native_sched_clock(); | ||
| 208 | |||
| 209 | get_runstate_snapshot(&runstate); | ||
| 210 | |||
| 211 | WARN_ON(runstate.state != RUNSTATE_running); | ||
| 212 | |||
| 213 | offset = 0; | ||
| 214 | if (now > runstate.state_entry_time) | ||
| 215 | offset = now - runstate.state_entry_time; | ||
| 216 | ret = runstate.time[RUNSTATE_blocked] + | ||
| 217 | runstate.time[RUNSTATE_running] + | ||
| 218 | offset; | ||
| 219 | |||
| 220 | preempt_enable(); | ||
| 221 | |||
| 222 | return ret; | ||
| 223 | } | ||
| 224 | |||
| 178 | struct pv_time_ops xen_time_ops __initdata = { | 225 | struct pv_time_ops xen_time_ops __initdata = { |
| 179 | .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, | 226 | .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, |
| 180 | .do_steal_accounting = xen_do_steal_accounting, | 227 | .do_steal_accounting = xen_do_steal_accounting, |
| 181 | .clocksource_resume = xen_itc_jitter_data_reset, | 228 | .clocksource_resume = xen_itc_jitter_data_reset, |
| 229 | .sched_clock = xen_sched_clock, | ||
| 182 | }; | 230 | }; |
| 183 | 231 | ||
| 184 | /* Called after suspend, to resume time. */ | 232 | /* Called after suspend, to resume time. */ |
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c index 936cff3c96e0..5e2270a999fa 100644 --- a/arch/ia64/xen/xen_pv_ops.c +++ b/arch/ia64/xen/xen_pv_ops.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
| 25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
| 26 | #include <linux/pm.h> | 26 | #include <linux/pm.h> |
| 27 | #include <linux/unistd.h> | ||
| 27 | 28 | ||
| 28 | #include <asm/xen/hypervisor.h> | 29 | #include <asm/xen/hypervisor.h> |
| 29 | #include <asm/xen/xencomm.h> | 30 | #include <asm/xen/xencomm.h> |
| @@ -153,6 +154,13 @@ xen_post_smp_prepare_boot_cpu(void) | |||
| 153 | xen_setup_vcpu_info_placement(); | 154 | xen_setup_vcpu_info_placement(); |
| 154 | } | 155 | } |
| 155 | 156 | ||
| 157 | #ifdef ASM_SUPPORTED | ||
| 158 | static unsigned long __init_or_module | ||
| 159 | xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type); | ||
| 160 | #endif | ||
| 161 | static void __init | ||
| 162 | xen_patch_branch(unsigned long tag, unsigned long type); | ||
| 163 | |||
| 156 | static const struct pv_init_ops xen_init_ops __initconst = { | 164 | static const struct pv_init_ops xen_init_ops __initconst = { |
| 157 | .banner = xen_banner, | 165 | .banner = xen_banner, |
| 158 | 166 | ||
| @@ -163,6 +171,53 @@ static const struct pv_init_ops xen_init_ops __initconst = { | |||
| 163 | .arch_setup_nomca = xen_arch_setup_nomca, | 171 | .arch_setup_nomca = xen_arch_setup_nomca, |
| 164 | 172 | ||
| 165 | .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, | 173 | .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, |
| 174 | #ifdef ASM_SUPPORTED | ||
| 175 | .patch_bundle = xen_patch_bundle, | ||
| 176 | #endif | ||
| 177 | .patch_branch = xen_patch_branch, | ||
| 178 | }; | ||
| 179 | |||
| 180 | /*************************************************************************** | ||
| 181 | * pv_fsys_data | ||
| 182 | * addresses for fsys | ||
| 183 | */ | ||
| 184 | |||
| 185 | extern unsigned long xen_fsyscall_table[NR_syscalls]; | ||
| 186 | extern char xen_fsys_bubble_down[]; | ||
| 187 | struct pv_fsys_data xen_fsys_data __initdata = { | ||
| 188 | .fsyscall_table = (unsigned long *)xen_fsyscall_table, | ||
| 189 | .fsys_bubble_down = (void *)xen_fsys_bubble_down, | ||
| 190 | }; | ||
| 191 | |||
| 192 | /*************************************************************************** | ||
| 193 | * pv_patchdata | ||
| 194 | * patchdata addresses | ||
| 195 | */ | ||
| 196 | |||
| 197 | #define DECLARE(name) \ | ||
| 198 | extern unsigned long __xen_start_gate_##name##_patchlist[]; \ | ||
| 199 | extern unsigned long __xen_end_gate_##name##_patchlist[] | ||
| 200 | |||
| 201 | DECLARE(fsyscall); | ||
| 202 | DECLARE(brl_fsys_bubble_down); | ||
| 203 | DECLARE(vtop); | ||
| 204 | DECLARE(mckinley_e9); | ||
| 205 | |||
| 206 | extern unsigned long __xen_start_gate_section[]; | ||
| 207 | |||
| 208 | #define ASSIGN(name) \ | ||
| 209 | .start_##name##_patchlist = \ | ||
| 210 | (unsigned long)__xen_start_gate_##name##_patchlist, \ | ||
| 211 | .end_##name##_patchlist = \ | ||
| 212 | (unsigned long)__xen_end_gate_##name##_patchlist | ||
| 213 | |||
| 214 | static struct pv_patchdata xen_patchdata __initdata = { | ||
| 215 | ASSIGN(fsyscall), | ||
| 216 | ASSIGN(brl_fsys_bubble_down), | ||
| 217 | ASSIGN(vtop), | ||
| 218 | ASSIGN(mckinley_e9), | ||
| 219 | |||
| 220 | .gate_section = (void*)__xen_start_gate_section, | ||
| 166 | }; | 221 | }; |
| 167 | 222 | ||
| 168 | /*************************************************************************** | 223 | /*************************************************************************** |
| @@ -170,6 +225,76 @@ static const struct pv_init_ops xen_init_ops __initconst = { | |||
| 170 | * intrinsics hooks. | 225 | * intrinsics hooks. |
| 171 | */ | 226 | */ |
| 172 | 227 | ||
| 228 | #ifndef ASM_SUPPORTED | ||
| 229 | static void | ||
| 230 | xen_set_itm_with_offset(unsigned long val) | ||
| 231 | { | ||
| 232 | /* ia64_cpu_local_tick() calls this with interrupt enabled. */ | ||
| 233 | /* WARN_ON(!irqs_disabled()); */ | ||
| 234 | xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); | ||
| 235 | } | ||
| 236 | |||
| 237 | static unsigned long | ||
| 238 | xen_get_itm_with_offset(void) | ||
| 239 | { | ||
| 240 | /* unused at this moment */ | ||
| 241 | printk(KERN_DEBUG "%s is called.\n", __func__); | ||
| 242 | |||
| 243 | WARN_ON(!irqs_disabled()); | ||
| 244 | return ia64_native_getreg(_IA64_REG_CR_ITM) + | ||
| 245 | XEN_MAPPEDREGS->itc_offset; | ||
| 246 | } | ||
| 247 | |||
| 248 | /* ia64_set_itc() is only called by | ||
| 249 | * cpu_init() with ia64_set_itc(0) and ia64_sync_itc(). | ||
| 250 | * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant. | ||
| 251 | */ | ||
| 252 | static void | ||
| 253 | xen_set_itc(unsigned long val) | ||
| 254 | { | ||
| 255 | unsigned long mitc; | ||
| 256 | |||
| 257 | WARN_ON(!irqs_disabled()); | ||
| 258 | mitc = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
| 259 | XEN_MAPPEDREGS->itc_offset = val - mitc; | ||
| 260 | XEN_MAPPEDREGS->itc_last = val; | ||
| 261 | } | ||
| 262 | |||
| 263 | static unsigned long | ||
| 264 | xen_get_itc(void) | ||
| 265 | { | ||
| 266 | unsigned long res; | ||
| 267 | unsigned long itc_offset; | ||
| 268 | unsigned long itc_last; | ||
| 269 | unsigned long ret_itc_last; | ||
| 270 | |||
| 271 | itc_offset = XEN_MAPPEDREGS->itc_offset; | ||
| 272 | do { | ||
| 273 | itc_last = XEN_MAPPEDREGS->itc_last; | ||
| 274 | res = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
| 275 | res += itc_offset; | ||
| 276 | if (itc_last >= res) | ||
| 277 | res = itc_last + 1; | ||
| 278 | ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, | ||
| 279 | itc_last, res); | ||
| 280 | } while (unlikely(ret_itc_last != itc_last)); | ||
| 281 | return res; | ||
| 282 | |||
| 283 | #if 0 | ||
| 284 | /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled. | ||
| 285 | Should it be paravirtualized instead? */ | ||
| 286 | WARN_ON(!irqs_disabled()); | ||
| 287 | itc_offset = XEN_MAPPEDREGS->itc_offset; | ||
| 288 | itc_last = XEN_MAPPEDREGS->itc_last; | ||
| 289 | res = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
| 290 | res += itc_offset; | ||
| 291 | if (itc_last >= res) | ||
| 292 | res = itc_last + 1; | ||
| 293 | XEN_MAPPEDREGS->itc_last = res; | ||
| 294 | return res; | ||
| 295 | #endif | ||
| 296 | } | ||
| 297 | |||
| 173 | static void xen_setreg(int regnum, unsigned long val) | 298 | static void xen_setreg(int regnum, unsigned long val) |
| 174 | { | 299 | { |
| 175 | switch (regnum) { | 300 | switch (regnum) { |
| @@ -181,11 +306,14 @@ static void xen_setreg(int regnum, unsigned long val) | |||
| 181 | xen_set_eflag(val); | 306 | xen_set_eflag(val); |
| 182 | break; | 307 | break; |
| 183 | #endif | 308 | #endif |
| 309 | case _IA64_REG_AR_ITC: | ||
| 310 | xen_set_itc(val); | ||
| 311 | break; | ||
| 184 | case _IA64_REG_CR_TPR: | 312 | case _IA64_REG_CR_TPR: |
| 185 | xen_set_tpr(val); | 313 | xen_set_tpr(val); |
| 186 | break; | 314 | break; |
| 187 | case _IA64_REG_CR_ITM: | 315 | case _IA64_REG_CR_ITM: |
| 188 | xen_set_itm(val); | 316 | xen_set_itm_with_offset(val); |
| 189 | break; | 317 | break; |
| 190 | case _IA64_REG_CR_EOI: | 318 | case _IA64_REG_CR_EOI: |
| 191 | xen_eoi(val); | 319 | xen_eoi(val); |
| @@ -209,6 +337,12 @@ static unsigned long xen_getreg(int regnum) | |||
| 209 | res = xen_get_eflag(); | 337 | res = xen_get_eflag(); |
| 210 | break; | 338 | break; |
| 211 | #endif | 339 | #endif |
| 340 | case _IA64_REG_AR_ITC: | ||
| 341 | res = xen_get_itc(); | ||
| 342 | break; | ||
| 343 | case _IA64_REG_CR_ITM: | ||
| 344 | res = xen_get_itm_with_offset(); | ||
| 345 | break; | ||
| 212 | case _IA64_REG_CR_IVR: | 346 | case _IA64_REG_CR_IVR: |
| 213 | res = xen_get_ivr(); | 347 | res = xen_get_ivr(); |
| 214 | break; | 348 | break; |
| @@ -259,8 +393,417 @@ xen_intrin_local_irq_restore(unsigned long mask) | |||
| 259 | else | 393 | else |
| 260 | xen_rsm_i(); | 394 | xen_rsm_i(); |
| 261 | } | 395 | } |
| 396 | #else | ||
| 397 | #define __DEFINE_FUNC(name, code) \ | ||
| 398 | extern const char xen_ ## name ## _direct_start[]; \ | ||
| 399 | extern const char xen_ ## name ## _direct_end[]; \ | ||
| 400 | asm (".align 32\n" \ | ||
| 401 | ".proc xen_" #name "\n" \ | ||
| 402 | "xen_" #name ":\n" \ | ||
| 403 | "xen_" #name "_direct_start:\n" \ | ||
| 404 | code \ | ||
| 405 | "xen_" #name "_direct_end:\n" \ | ||
| 406 | "br.cond.sptk.many b6\n" \ | ||
| 407 | ".endp xen_" #name "\n") | ||
| 408 | |||
| 409 | #define DEFINE_VOID_FUNC0(name, code) \ | ||
| 410 | extern void \ | ||
| 411 | xen_ ## name (void); \ | ||
| 412 | __DEFINE_FUNC(name, code) | ||
| 413 | |||
| 414 | #define DEFINE_VOID_FUNC1(name, code) \ | ||
| 415 | extern void \ | ||
| 416 | xen_ ## name (unsigned long arg); \ | ||
| 417 | __DEFINE_FUNC(name, code) | ||
| 418 | |||
| 419 | #define DEFINE_VOID_FUNC1_VOID(name, code) \ | ||
| 420 | extern void \ | ||
| 421 | xen_ ## name (void *arg); \ | ||
| 422 | __DEFINE_FUNC(name, code) | ||
| 423 | |||
| 424 | #define DEFINE_VOID_FUNC2(name, code) \ | ||
| 425 | extern void \ | ||
| 426 | xen_ ## name (unsigned long arg0, \ | ||
| 427 | unsigned long arg1); \ | ||
| 428 | __DEFINE_FUNC(name, code) | ||
| 262 | 429 | ||
| 263 | static const struct pv_cpu_ops xen_cpu_ops __initdata = { | 430 | #define DEFINE_FUNC0(name, code) \ |
| 431 | extern unsigned long \ | ||
| 432 | xen_ ## name (void); \ | ||
| 433 | __DEFINE_FUNC(name, code) | ||
| 434 | |||
| 435 | #define DEFINE_FUNC1(name, type, code) \ | ||
| 436 | extern unsigned long \ | ||
| 437 | xen_ ## name (type arg); \ | ||
| 438 | __DEFINE_FUNC(name, code) | ||
| 439 | |||
| 440 | #define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) | ||
| 441 | |||
| 442 | /* | ||
| 443 | * static void xen_set_itm_with_offset(unsigned long val) | ||
| 444 | * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); | ||
| 445 | */ | ||
| 446 | /* 2 bundles */ | ||
| 447 | DEFINE_VOID_FUNC1(set_itm_with_offset, | ||
| 448 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
| 449 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
| 450 | ";;\n" | ||
| 451 | "ld8 r3 = [r2]\n" | ||
| 452 | ";;\n" | ||
| 453 | "sub r8 = r8, r3\n" | ||
| 454 | "break " __stringify(HYPERPRIVOP_SET_ITM) "\n"); | ||
| 455 | |||
| 456 | /* | ||
| 457 | * static unsigned long xen_get_itm_with_offset(void) | ||
| 458 | * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset; | ||
| 459 | */ | ||
| 460 | /* 2 bundles */ | ||
| 461 | DEFINE_FUNC0(get_itm_with_offset, | ||
| 462 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
| 463 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
| 464 | ";;\n" | ||
| 465 | "ld8 r3 = [r2]\n" | ||
| 466 | "mov r8 = cr.itm\n" | ||
| 467 | ";;\n" | ||
| 468 | "add r8 = r8, r2\n"); | ||
| 469 | |||
| 470 | /* | ||
| 471 | * static void xen_set_itc(unsigned long val) | ||
| 472 | * unsigned long mitc; | ||
| 473 | * | ||
| 474 | * WARN_ON(!irqs_disabled()); | ||
| 475 | * mitc = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
| 476 | * XEN_MAPPEDREGS->itc_offset = val - mitc; | ||
| 477 | * XEN_MAPPEDREGS->itc_last = val; | ||
| 478 | */ | ||
| 479 | /* 2 bundles */ | ||
| 480 | DEFINE_VOID_FUNC1(set_itc, | ||
| 481 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
| 482 | __stringify(XSI_ITC_LAST_OFS) "\n" | ||
| 483 | "mov r3 = ar.itc\n" | ||
| 484 | ";;\n" | ||
| 485 | "sub r3 = r8, r3\n" | ||
| 486 | "st8 [r2] = r8, " | ||
| 487 | __stringify(XSI_ITC_LAST_OFS) " - " | ||
| 488 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
| 489 | ";;\n" | ||
| 490 | "st8 [r2] = r3\n"); | ||
| 491 | |||
| 492 | /* | ||
| 493 | * static unsigned long xen_get_itc(void) | ||
| 494 | * unsigned long res; | ||
| 495 | * unsigned long itc_offset; | ||
| 496 | * unsigned long itc_last; | ||
| 497 | * unsigned long ret_itc_last; | ||
| 498 | * | ||
| 499 | * itc_offset = XEN_MAPPEDREGS->itc_offset; | ||
| 500 | * do { | ||
| 501 | * itc_last = XEN_MAPPEDREGS->itc_last; | ||
| 502 | * res = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
| 503 | * res += itc_offset; | ||
| 504 | * if (itc_last >= res) | ||
| 505 | * res = itc_last + 1; | ||
| 506 | * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, | ||
| 507 | * itc_last, res); | ||
| 508 | * } while (unlikely(ret_itc_last != itc_last)); | ||
| 509 | * return res; | ||
| 510 | */ | ||
| 511 | /* 5 bundles */ | ||
| 512 | DEFINE_FUNC0(get_itc, | ||
| 513 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
| 514 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
| 515 | ";;\n" | ||
| 516 | "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - " | ||
| 517 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
| 518 | /* r9 = itc_offset */ | ||
| 519 | /* r2 = XSI_ITC_OFFSET */ | ||
| 520 | "888:\n" | ||
| 521 | "mov r8 = ar.itc\n" /* res = ar.itc */ | ||
| 522 | ";;\n" | ||
| 523 | "ld8 r3 = [r2]\n" /* r3 = itc_last */ | ||
| 524 | "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */ | ||
| 525 | ";;\n" | ||
| 526 | "cmp.gtu p6, p0 = r3, r8\n" | ||
| 527 | ";;\n" | ||
| 528 | "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */ | ||
| 529 | ";;\n" | ||
| 530 | "mov ar.ccv = r8\n" | ||
| 531 | ";;\n" | ||
| 532 | "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n" | ||
| 533 | ";;\n" | ||
| 534 | "cmp.ne p6, p0 = r10, r3\n" | ||
| 535 | "(p6) hint @pause\n" | ||
| 536 | "(p6) br.cond.spnt 888b\n"); | ||
| 537 | |||
| 538 | DEFINE_VOID_FUNC1_VOID(fc, | ||
| 539 | "break " __stringify(HYPERPRIVOP_FC) "\n"); | ||
| 540 | |||
| 541 | /* | ||
| 542 | * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR | ||
| 543 | * masked_addr = *psr_i_addr_addr | ||
| 544 | * pending_intr_addr = masked_addr - 1 | ||
| 545 | * if (val & IA64_PSR_I) { | ||
| 546 | * masked = *masked_addr | ||
| 547 | * *masked_addr = 0:xen_set_virtual_psr_i(1) | ||
| 548 | * compiler barrier | ||
| 549 | * if (masked) { | ||
| 550 | * uint8_t pending = *pending_intr_addr; | ||
| 551 | * if (pending) | ||
| 552 | * XEN_HYPER_SSM_I | ||
| 553 | * } | ||
| 554 | * } else { | ||
| 555 | * *masked_addr = 1:xen_set_virtual_psr_i(0) | ||
| 556 | * } | ||
| 557 | */ | ||
| 558 | /* 6 bundles */ | ||
| 559 | DEFINE_VOID_FUNC1(intrin_local_irq_restore, | ||
| 560 | /* r8 = input value: 0 or IA64_PSR_I | ||
| 561 | * p6 = (flags & IA64_PSR_I) | ||
| 562 | * = if clause | ||
| 563 | * p7 = !(flags & IA64_PSR_I) | ||
| 564 | * = else clause | ||
| 565 | */ | ||
| 566 | "cmp.ne p6, p7 = r8, r0\n" | ||
| 567 | "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
| 568 | ";;\n" | ||
| 569 | /* r9 = XEN_PSR_I_ADDR */ | ||
| 570 | "ld8 r9 = [r9]\n" | ||
| 571 | ";;\n" | ||
| 572 | |||
| 573 | /* r10 = masked previous value */ | ||
| 574 | "(p6) ld1.acq r10 = [r9]\n" | ||
| 575 | ";;\n" | ||
| 576 | |||
| 577 | /* p8 = !masked interrupt masked previously? */ | ||
| 578 | "(p6) cmp.ne.unc p8, p0 = r10, r0\n" | ||
| 579 | |||
| 580 | /* p7 = else clause */ | ||
| 581 | "(p7) mov r11 = 1\n" | ||
| 582 | ";;\n" | ||
| 583 | /* masked = 1 */ | ||
| 584 | "(p7) st1.rel [r9] = r11\n" | ||
| 585 | |||
| 586 | /* p6 = if clause */ | ||
| 587 | /* masked = 0 | ||
| 588 | * r9 = masked_addr - 1 | ||
| 589 | * = pending_intr_addr | ||
| 590 | */ | ||
| 591 | "(p8) st1.rel [r9] = r0, -1\n" | ||
| 592 | ";;\n" | ||
| 593 | /* r8 = pending_intr */ | ||
| 594 | "(p8) ld1.acq r11 = [r9]\n" | ||
| 595 | ";;\n" | ||
| 596 | /* p9 = interrupt pending? */ | ||
| 597 | "(p8) cmp.ne.unc p9, p10 = r11, r0\n" | ||
| 598 | ";;\n" | ||
| 599 | "(p10) mf\n" | ||
| 600 | /* issue hypercall to trigger interrupt */ | ||
| 601 | "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"); | ||
| 602 | |||
| 603 | DEFINE_VOID_FUNC2(ptcga, | ||
| 604 | "break " __stringify(HYPERPRIVOP_PTC_GA) "\n"); | ||
| 605 | DEFINE_VOID_FUNC2(set_rr, | ||
| 606 | "break " __stringify(HYPERPRIVOP_SET_RR) "\n"); | ||
| 607 | |||
| 608 | /* | ||
| 609 | * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; | ||
| 610 | * tmp = *tmp | ||
| 611 | * tmp = *tmp; | ||
| 612 | * psr_i = tmp? 0: IA64_PSR_I; | ||
| 613 | */ | ||
| 614 | /* 4 bundles */ | ||
| 615 | DEFINE_FUNC0(get_psr_i, | ||
| 616 | "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
| 617 | ";;\n" | ||
| 618 | "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */ | ||
| 619 | "mov r8 = 0\n" /* psr_i = 0 */ | ||
| 620 | ";;\n" | ||
| 621 | "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */ | ||
| 622 | ";;\n" | ||
| 623 | "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */ | ||
| 624 | ";;\n" | ||
| 625 | "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n"); | ||
| 626 | |||
| 627 | DEFINE_FUNC1(thash, unsigned long, | ||
| 628 | "break " __stringify(HYPERPRIVOP_THASH) "\n"); | ||
| 629 | DEFINE_FUNC1(get_cpuid, int, | ||
| 630 | "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n"); | ||
| 631 | DEFINE_FUNC1(get_pmd, int, | ||
| 632 | "break " __stringify(HYPERPRIVOP_GET_PMD) "\n"); | ||
| 633 | DEFINE_FUNC1(get_rr, unsigned long, | ||
| 634 | "break " __stringify(HYPERPRIVOP_GET_RR) "\n"); | ||
| 635 | |||
| 636 | /* | ||
| 637 | * void xen_privop_ssm_i(void) | ||
| 638 | * | ||
| 639 | * int masked = !xen_get_virtual_psr_i(); | ||
| 640 | * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) | ||
| 641 | * xen_set_virtual_psr_i(1) | ||
| 642 | * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 | ||
| 643 | * // compiler barrier | ||
| 644 | * if (masked) { | ||
| 645 | * uint8_t* pend_int_addr = | ||
| 646 | * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; | ||
| 647 | * uint8_t pending = *pend_int_addr; | ||
| 648 | * if (pending) | ||
| 649 | * XEN_HYPER_SSM_I | ||
| 650 | * } | ||
| 651 | */ | ||
| 652 | /* 4 bundles */ | ||
| 653 | DEFINE_VOID_FUNC0(ssm_i, | ||
| 654 | "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
| 655 | ";;\n" | ||
| 656 | "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */ | ||
| 657 | ";;\n" | ||
| 658 | "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */ | ||
| 659 | ";;\n" | ||
| 660 | "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt | ||
| 661 | * r8 = XEN_PSR_I_ADDR - 1 | ||
| 662 | * = pend_int_addr | ||
| 663 | */ | ||
| 664 | "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I | ||
| 665 | * previously interrupt | ||
| 666 | * masked? | ||
| 667 | */ | ||
| 668 | ";;\n" | ||
| 669 | "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */ | ||
| 670 | ";;\n" | ||
| 671 | "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/ | ||
| 672 | ";;\n" | ||
| 673 | /* issue hypercall to get interrupt */ | ||
| 674 | "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n" | ||
| 675 | ";;\n"); | ||
| 676 | |||
| 677 | /* | ||
| 678 | * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr | ||
| 679 | * = XEN_PSR_I_ADDR_ADDR; | ||
| 680 | * psr_i_addr = *psr_i_addr_addr; | ||
| 681 | * *psr_i_addr = 1; | ||
| 682 | */ | ||
| 683 | /* 2 bundles */ | ||
| 684 | DEFINE_VOID_FUNC0(rsm_i, | ||
| 685 | "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
| 686 | /* r8 = XEN_PSR_I_ADDR */ | ||
| 687 | "mov r9 = 1\n" | ||
| 688 | ";;\n" | ||
| 689 | "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */ | ||
| 690 | ";;\n" | ||
| 691 | "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */ | ||
| 692 | |||
| 693 | extern void | ||
| 694 | xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | ||
| 695 | unsigned long val2, unsigned long val3, | ||
| 696 | unsigned long val4); | ||
| 697 | __DEFINE_FUNC(set_rr0_to_rr4, | ||
| 698 | "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n"); | ||
| 699 | |||
| 700 | |||
| 701 | extern unsigned long xen_getreg(int regnum); | ||
| 702 | #define __DEFINE_GET_REG(id, privop) \ | ||
| 703 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
| 704 | ";;\n" \ | ||
| 705 | "cmp.eq p6, p0 = r2, r8\n" \ | ||
| 706 | ";;\n" \ | ||
| 707 | "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \ | ||
| 708 | "(p6) br.cond.sptk.many b6\n" \ | ||
| 709 | ";;\n" | ||
| 710 | |||
| 711 | __DEFINE_FUNC(getreg, | ||
| 712 | __DEFINE_GET_REG(PSR, PSR) | ||
| 713 | #ifdef CONFIG_IA32_SUPPORT | ||
| 714 | __DEFINE_GET_REG(AR_EFLAG, EFLAG) | ||
| 715 | #endif | ||
| 716 | |||
| 717 | /* get_itc */ | ||
| 718 | "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" | ||
| 719 | ";;\n" | ||
| 720 | "cmp.eq p6, p0 = r2, r8\n" | ||
| 721 | ";;\n" | ||
| 722 | "(p6) br.cond.spnt xen_get_itc\n" | ||
| 723 | ";;\n" | ||
| 724 | |||
| 725 | /* get itm */ | ||
| 726 | "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" | ||
| 727 | ";;\n" | ||
| 728 | "cmp.eq p6, p0 = r2, r8\n" | ||
| 729 | ";;\n" | ||
| 730 | "(p6) br.cond.spnt xen_get_itm_with_offset\n" | ||
| 731 | ";;\n" | ||
| 732 | |||
| 733 | __DEFINE_GET_REG(CR_IVR, IVR) | ||
| 734 | __DEFINE_GET_REG(CR_TPR, TPR) | ||
| 735 | |||
| 736 | /* fall back */ | ||
| 737 | "movl r2 = ia64_native_getreg_func\n" | ||
| 738 | ";;\n" | ||
| 739 | "mov b7 = r2\n" | ||
| 740 | ";;\n" | ||
| 741 | "br.cond.sptk.many b7\n"); | ||
| 742 | |||
| 743 | extern void xen_setreg(int regnum, unsigned long val); | ||
| 744 | #define __DEFINE_SET_REG(id, privop) \ | ||
| 745 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
| 746 | ";;\n" \ | ||
| 747 | "cmp.eq p6, p0 = r2, r9\n" \ | ||
| 748 | ";;\n" \ | ||
| 749 | "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \ | ||
| 750 | "(p6) br.cond.sptk.many b6\n" \ | ||
| 751 | ";;\n" | ||
| 752 | |||
| 753 | __DEFINE_FUNC(setreg, | ||
| 754 | /* kr0 .. kr 7*/ | ||
| 755 | /* | ||
| 756 | * if (_IA64_REG_AR_KR0 <= regnum && | ||
| 757 | * regnum <= _IA64_REG_AR_KR7) { | ||
| 758 | * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0 | ||
| 759 | * register __val asm ("r9") = val | ||
| 760 | * "break HYPERPRIVOP_SET_KR" | ||
| 761 | * } | ||
| 762 | */ | ||
| 763 | "mov r17 = r9\n" | ||
| 764 | "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n" | ||
| 765 | ";;\n" | ||
| 766 | "cmp.ge p6, p0 = r9, r2\n" | ||
| 767 | "sub r17 = r17, r2\n" | ||
| 768 | ";;\n" | ||
| 769 | "(p6) cmp.ge.unc p7, p0 = " | ||
| 770 | __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0) | ||
| 771 | ", r17\n" | ||
| 772 | ";;\n" | ||
| 773 | "(p7) mov r9 = r8\n" | ||
| 774 | ";;\n" | ||
| 775 | "(p7) mov r8 = r17\n" | ||
| 776 | "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n" | ||
| 777 | |||
| 778 | /* set itm */ | ||
| 779 | "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" | ||
| 780 | ";;\n" | ||
| 781 | "cmp.eq p6, p0 = r2, r8\n" | ||
| 782 | ";;\n" | ||
| 783 | "(p6) br.cond.spnt xen_set_itm_with_offset\n" | ||
| 784 | |||
| 785 | /* set itc */ | ||
| 786 | "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" | ||
| 787 | ";;\n" | ||
| 788 | "cmp.eq p6, p0 = r2, r8\n" | ||
| 789 | ";;\n" | ||
| 790 | "(p6) br.cond.spnt xen_set_itc\n" | ||
| 791 | |||
| 792 | #ifdef CONFIG_IA32_SUPPORT | ||
| 793 | __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG) | ||
| 794 | #endif | ||
| 795 | __DEFINE_SET_REG(CR_TPR, SET_TPR) | ||
| 796 | __DEFINE_SET_REG(CR_EOI, EOI) | ||
| 797 | |||
| 798 | /* fall back */ | ||
| 799 | "movl r2 = ia64_native_setreg_func\n" | ||
| 800 | ";;\n" | ||
| 801 | "mov b7 = r2\n" | ||
| 802 | ";;\n" | ||
| 803 | "br.cond.sptk.many b7\n"); | ||
| 804 | #endif | ||
| 805 | |||
| 806 | static const struct pv_cpu_ops xen_cpu_ops __initconst = { | ||
| 264 | .fc = xen_fc, | 807 | .fc = xen_fc, |
| 265 | .thash = xen_thash, | 808 | .thash = xen_thash, |
| 266 | .get_cpuid = xen_get_cpuid, | 809 | .get_cpuid = xen_get_cpuid, |
| @@ -337,7 +880,7 @@ xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) | |||
| 337 | HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); | 880 | HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); |
| 338 | } | 881 | } |
| 339 | 882 | ||
| 340 | static const struct pv_iosapic_ops xen_iosapic_ops __initconst = { | 883 | static struct pv_iosapic_ops xen_iosapic_ops __initdata = { |
| 341 | .pcat_compat_init = xen_pcat_compat_init, | 884 | .pcat_compat_init = xen_pcat_compat_init, |
| 342 | .__get_irq_chip = xen_iosapic_get_irq_chip, | 885 | .__get_irq_chip = xen_iosapic_get_irq_chip, |
| 343 | 886 | ||
| @@ -355,6 +898,8 @@ xen_setup_pv_ops(void) | |||
| 355 | xen_info_init(); | 898 | xen_info_init(); |
| 356 | pv_info = xen_info; | 899 | pv_info = xen_info; |
| 357 | pv_init_ops = xen_init_ops; | 900 | pv_init_ops = xen_init_ops; |
| 901 | pv_fsys_data = xen_fsys_data; | ||
| 902 | pv_patchdata = xen_patchdata; | ||
| 358 | pv_cpu_ops = xen_cpu_ops; | 903 | pv_cpu_ops = xen_cpu_ops; |
| 359 | pv_iosapic_ops = xen_iosapic_ops; | 904 | pv_iosapic_ops = xen_iosapic_ops; |
| 360 | pv_irq_ops = xen_irq_ops; | 905 | pv_irq_ops = xen_irq_ops; |
| @@ -362,3 +907,252 @@ xen_setup_pv_ops(void) | |||
| 362 | 907 | ||
| 363 | paravirt_cpu_asm_init(&xen_cpu_asm_switch); | 908 | paravirt_cpu_asm_init(&xen_cpu_asm_switch); |
| 364 | } | 909 | } |
| 910 | |||
| 911 | #ifdef ASM_SUPPORTED | ||
| 912 | /*************************************************************************** | ||
| 913 | * binary pacthing | ||
| 914 | * pv_init_ops.patch_bundle | ||
| 915 | */ | ||
| 916 | |||
| 917 | #define DEFINE_FUNC_GETREG(name, privop) \ | ||
| 918 | DEFINE_FUNC0(get_ ## name, \ | ||
| 919 | "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n") | ||
| 920 | |||
| 921 | DEFINE_FUNC_GETREG(psr, PSR); | ||
| 922 | DEFINE_FUNC_GETREG(eflag, EFLAG); | ||
| 923 | DEFINE_FUNC_GETREG(ivr, IVR); | ||
| 924 | DEFINE_FUNC_GETREG(tpr, TPR); | ||
| 925 | |||
| 926 | #define DEFINE_FUNC_SET_KR(n) \ | ||
| 927 | DEFINE_VOID_FUNC0(set_kr ## n, \ | ||
| 928 | ";;\n" \ | ||
| 929 | "mov r9 = r8\n" \ | ||
| 930 | "mov r8 = " #n "\n" \ | ||
| 931 | "break " __stringify(HYPERPRIVOP_SET_KR) "\n") | ||
| 932 | |||
| 933 | DEFINE_FUNC_SET_KR(0); | ||
| 934 | DEFINE_FUNC_SET_KR(1); | ||
| 935 | DEFINE_FUNC_SET_KR(2); | ||
| 936 | DEFINE_FUNC_SET_KR(3); | ||
| 937 | DEFINE_FUNC_SET_KR(4); | ||
| 938 | DEFINE_FUNC_SET_KR(5); | ||
| 939 | DEFINE_FUNC_SET_KR(6); | ||
| 940 | DEFINE_FUNC_SET_KR(7); | ||
| 941 | |||
| 942 | #define __DEFINE_FUNC_SETREG(name, privop) \ | ||
| 943 | DEFINE_VOID_FUNC0(name, \ | ||
| 944 | "break "__stringify(HYPERPRIVOP_ ## privop) "\n") | ||
| 945 | |||
| 946 | #define DEFINE_FUNC_SETREG(name, privop) \ | ||
| 947 | __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop) | ||
| 948 | |||
| 949 | DEFINE_FUNC_SETREG(eflag, EFLAG); | ||
| 950 | DEFINE_FUNC_SETREG(tpr, TPR); | ||
| 951 | __DEFINE_FUNC_SETREG(eoi, EOI); | ||
| 952 | |||
| 953 | extern const char xen_check_events[]; | ||
| 954 | extern const char __xen_intrin_local_irq_restore_direct_start[]; | ||
| 955 | extern const char __xen_intrin_local_irq_restore_direct_end[]; | ||
| 956 | extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc; | ||
| 957 | |||
| 958 | asm ( | ||
| 959 | ".align 32\n" | ||
| 960 | ".proc xen_check_events\n" | ||
| 961 | "xen_check_events:\n" | ||
| 962 | /* masked = 0 | ||
| 963 | * r9 = masked_addr - 1 | ||
| 964 | * = pending_intr_addr | ||
| 965 | */ | ||
| 966 | "st1.rel [r9] = r0, -1\n" | ||
| 967 | ";;\n" | ||
| 968 | /* r8 = pending_intr */ | ||
| 969 | "ld1.acq r11 = [r9]\n" | ||
| 970 | ";;\n" | ||
| 971 | /* p9 = interrupt pending? */ | ||
| 972 | "cmp.ne p9, p10 = r11, r0\n" | ||
| 973 | ";;\n" | ||
| 974 | "(p10) mf\n" | ||
| 975 | /* issue hypercall to trigger interrupt */ | ||
| 976 | "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n" | ||
| 977 | "br.cond.sptk.many b6\n" | ||
| 978 | ".endp xen_check_events\n" | ||
| 979 | "\n" | ||
| 980 | ".align 32\n" | ||
| 981 | ".proc __xen_intrin_local_irq_restore_direct\n" | ||
| 982 | "__xen_intrin_local_irq_restore_direct:\n" | ||
| 983 | "__xen_intrin_local_irq_restore_direct_start:\n" | ||
| 984 | "1:\n" | ||
| 985 | "{\n" | ||
| 986 | "cmp.ne p6, p7 = r8, r0\n" | ||
| 987 | "mov r17 = ip\n" /* get ip to calc return address */ | ||
| 988 | "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
| 989 | ";;\n" | ||
| 990 | "}\n" | ||
| 991 | "{\n" | ||
| 992 | /* r9 = XEN_PSR_I_ADDR */ | ||
| 993 | "ld8 r9 = [r9]\n" | ||
| 994 | ";;\n" | ||
| 995 | /* r10 = masked previous value */ | ||
| 996 | "(p6) ld1.acq r10 = [r9]\n" | ||
| 997 | "adds r17 = 1f - 1b, r17\n" /* calculate return address */ | ||
| 998 | ";;\n" | ||
| 999 | "}\n" | ||
| 1000 | "{\n" | ||
| 1001 | /* p8 = !masked interrupt masked previously? */ | ||
| 1002 | "(p6) cmp.ne.unc p8, p0 = r10, r0\n" | ||
| 1003 | "\n" | ||
| 1004 | /* p7 = else clause */ | ||
| 1005 | "(p7) mov r11 = 1\n" | ||
| 1006 | ";;\n" | ||
| 1007 | "(p8) mov b6 = r17\n" /* set return address */ | ||
| 1008 | "}\n" | ||
| 1009 | "{\n" | ||
| 1010 | /* masked = 1 */ | ||
| 1011 | "(p7) st1.rel [r9] = r11\n" | ||
| 1012 | "\n" | ||
| 1013 | "[99:]\n" | ||
| 1014 | "(p8) brl.cond.dptk.few xen_check_events\n" | ||
| 1015 | "}\n" | ||
| 1016 | /* pv calling stub is 5 bundles. fill nop to adjust return address */ | ||
| 1017 | "{\n" | ||
| 1018 | "nop 0\n" | ||
| 1019 | "nop 0\n" | ||
| 1020 | "nop 0\n" | ||
| 1021 | "}\n" | ||
| 1022 | "1:\n" | ||
| 1023 | "__xen_intrin_local_irq_restore_direct_end:\n" | ||
| 1024 | ".endp __xen_intrin_local_irq_restore_direct\n" | ||
| 1025 | "\n" | ||
| 1026 | ".align 8\n" | ||
| 1027 | "__xen_intrin_local_irq_restore_direct_reloc:\n" | ||
| 1028 | "data8 99b\n" | ||
| 1029 | ); | ||
| 1030 | |||
| 1031 | static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[] | ||
| 1032 | __initdata_or_module = | ||
| 1033 | { | ||
| 1034 | #define XEN_PATCH_BUNDLE_ELEM(name, type) \ | ||
| 1035 | { \ | ||
| 1036 | (void*)xen_ ## name ## _direct_start, \ | ||
| 1037 | (void*)xen_ ## name ## _direct_end, \ | ||
| 1038 | PARAVIRT_PATCH_TYPE_ ## type, \ | ||
| 1039 | } | ||
| 1040 | |||
| 1041 | XEN_PATCH_BUNDLE_ELEM(fc, FC), | ||
| 1042 | XEN_PATCH_BUNDLE_ELEM(thash, THASH), | ||
| 1043 | XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), | ||
| 1044 | XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), | ||
| 1045 | XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA), | ||
| 1046 | XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR), | ||
| 1047 | XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR), | ||
| 1048 | XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), | ||
| 1049 | XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), | ||
| 1050 | XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), | ||
| 1051 | XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), | ||
| 1052 | { | ||
| 1053 | (void*)__xen_intrin_local_irq_restore_direct_start, | ||
| 1054 | (void*)__xen_intrin_local_irq_restore_direct_end, | ||
| 1055 | PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE, | ||
| 1056 | }, | ||
| 1057 | |||
| 1058 | #define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ | ||
| 1059 | { \ | ||
| 1060 | xen_get_ ## name ## _direct_start, \ | ||
| 1061 | xen_get_ ## name ## _direct_end, \ | ||
| 1062 | PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ | ||
| 1063 | } | ||
| 1064 | |||
| 1065 | XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), | ||
| 1066 | XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG), | ||
| 1067 | |||
| 1068 | XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR), | ||
| 1069 | XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR), | ||
| 1070 | |||
| 1071 | XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC), | ||
| 1072 | XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM), | ||
| 1073 | |||
| 1074 | |||
| 1075 | #define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
| 1076 | { \ | ||
| 1077 | xen_ ## name ## _direct_start, \ | ||
| 1078 | xen_ ## name ## _direct_end, \ | ||
| 1079 | PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | #define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
| 1083 | __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg) | ||
| 1084 | |||
| 1085 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0), | ||
| 1086 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1), | ||
| 1087 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2), | ||
| 1088 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3), | ||
| 1089 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4), | ||
| 1090 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5), | ||
| 1091 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6), | ||
| 1092 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7), | ||
| 1093 | |||
| 1094 | XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG), | ||
| 1095 | XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR), | ||
| 1096 | __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI), | ||
| 1097 | |||
| 1098 | XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC), | ||
| 1099 | XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM), | ||
| 1100 | }; | ||
| 1101 | |||
| 1102 | static unsigned long __init_or_module | ||
| 1103 | xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type) | ||
| 1104 | { | ||
| 1105 | const unsigned long nelems = sizeof(xen_patch_bundle_elems) / | ||
| 1106 | sizeof(xen_patch_bundle_elems[0]); | ||
| 1107 | unsigned long used; | ||
| 1108 | const struct paravirt_patch_bundle_elem *found; | ||
| 1109 | |||
| 1110 | used = __paravirt_patch_apply_bundle(sbundle, ebundle, type, | ||
| 1111 | xen_patch_bundle_elems, nelems, | ||
| 1112 | &found); | ||
| 1113 | |||
| 1114 | if (found == NULL) | ||
| 1115 | /* fallback */ | ||
| 1116 | return ia64_native_patch_bundle(sbundle, ebundle, type); | ||
| 1117 | if (used == 0) | ||
| 1118 | return used; | ||
| 1119 | |||
| 1120 | /* relocation */ | ||
| 1121 | switch (type) { | ||
| 1122 | case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: { | ||
| 1123 | unsigned long reloc = | ||
| 1124 | __xen_intrin_local_irq_restore_direct_reloc; | ||
| 1125 | unsigned long reloc_offset = reloc - (unsigned long) | ||
| 1126 | __xen_intrin_local_irq_restore_direct_start; | ||
| 1127 | unsigned long tag = (unsigned long)sbundle + reloc_offset; | ||
| 1128 | paravirt_patch_reloc_brl(tag, xen_check_events); | ||
| 1129 | break; | ||
| 1130 | } | ||
| 1131 | default: | ||
| 1132 | /* nothing */ | ||
| 1133 | break; | ||
| 1134 | } | ||
| 1135 | return used; | ||
| 1136 | } | ||
| 1137 | #endif /* ASM_SUPPOTED */ | ||
| 1138 | |||
| 1139 | const struct paravirt_patch_branch_target xen_branch_target[] | ||
| 1140 | __initconst = { | ||
| 1141 | #define PARAVIRT_BR_TARGET(name, type) \ | ||
| 1142 | { \ | ||
| 1143 | &xen_ ## name, \ | ||
| 1144 | PARAVIRT_PATCH_TYPE_BR_ ## type, \ | ||
| 1145 | } | ||
| 1146 | PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), | ||
| 1147 | PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), | ||
| 1148 | PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), | ||
| 1149 | PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), | ||
| 1150 | }; | ||
| 1151 | |||
| 1152 | static void __init | ||
| 1153 | xen_patch_branch(unsigned long tag, unsigned long type) | ||
| 1154 | { | ||
| 1155 | const unsigned long nelem = | ||
| 1156 | sizeof(xen_branch_target) / sizeof(xen_branch_target[0]); | ||
| 1157 | __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem); | ||
| 1158 | } | ||
