aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuis R. Rodriguez <mcgrof@suse.com>2015-06-02 14:42:02 -0400
committerTony Luck <tony.luck@intel.com>2015-06-10 17:26:32 -0400
commite55645ec5725a33eac9d6133f3bce381af1e993d (patch)
treeff805fb6042e5e06c3a60d8482855cfcaf6b6dfe
parentc65b99f046843d2455aa231747b5a07a999a9f3d (diff)
ia64: remove paravirt code
All the ia64 pvops code is now dead code since both xen and kvm support have been ripped out [0] [1]. Just that no one had troubled to rip this stuff out. The only useful remaining pieces were the old pvops docs but that was recently also generalized and moved out from ia64 [2]. This has been run time tested on an ia64 Madison system. [0] 003f7de625890 "KVM: ia64: remove" since v3.19-rc1 [1] d52eefb47d4eb "ia64/xen: Remove Xen support for ia64" since v3.14-rc1 [2] "virtual: Documentation: simplify and generalize paravirt_ops.txt" Signed-off-by: Luis R. Rodriguez <mcgrof@suse.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/Kconfig23
-rw-r--r--arch/ia64/include/asm/hw_irq.h8
-rw-r--r--arch/ia64/include/asm/intrinsics.h13
-rw-r--r--arch/ia64/include/asm/iosapic.h4
-rw-r--r--arch/ia64/include/asm/module.h6
-rw-r--r--arch/ia64/include/asm/native/inst.h103
-rw-r--r--arch/ia64/include/asm/native/pvchk_inst.h271
-rw-r--r--arch/ia64/include/asm/paravirt.h321
-rw-r--r--arch/ia64/include/asm/paravirt_patch.h143
-rw-r--r--arch/ia64/include/asm/paravirt_privop.h479
-rw-r--r--arch/ia64/kernel/Makefile34
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/entry.S41
-rw-r--r--arch/ia64/kernel/fsys.S18
-rw-r--r--arch/ia64/kernel/gate.S9
-rw-r--r--arch/ia64/kernel/gate.lds.S17
-rw-r--r--arch/ia64/kernel/head.S42
-rw-r--r--arch/ia64/kernel/ivt.S4
-rw-r--r--arch/ia64/kernel/minstate.h2
-rw-r--r--arch/ia64/kernel/module.c32
-rw-r--r--arch/ia64/kernel/paravirt.c902
-rw-r--r--arch/ia64/kernel/paravirt_inst.h28
-rw-r--r--arch/ia64/kernel/paravirt_patch.c514
-rw-r--r--arch/ia64/kernel/paravirt_patchlist.c81
-rw-r--r--arch/ia64/kernel/paravirt_patchlist.h24
-rw-r--r--arch/ia64/kernel/paravirtentry.S121
-rw-r--r--arch/ia64/kernel/patch.c38
-rw-r--r--arch/ia64/kernel/setup.c12
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/time.c29
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S21
-rw-r--r--arch/ia64/mm/init.c9
-rw-r--r--arch/ia64/scripts/pvcheck.sed33
33 files changed, 68 insertions, 3317 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 76d25b2cfbbe..42a91a7aa2b0 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -137,29 +137,6 @@ config AUDIT_ARCH
137 bool 137 bool
138 default y 138 default y
139 139
140menuconfig PARAVIRT_GUEST
141 bool "Paravirtualized guest support"
142 depends on BROKEN
143 help
144 Say Y here to get to see options related to running Linux under
145 various hypervisors. This option alone does not add any kernel code.
146
147 If you say N, all options in this submenu will be skipped and disabled.
148
149if PARAVIRT_GUEST
150
151config PARAVIRT
152 bool "Enable paravirtualization code"
153 depends on PARAVIRT_GUEST
154 default y
155 help
156 This changes the kernel so it can modify itself when it is run
157 under a hypervisor, potentially improving performance significantly
158 over full virtualization. However, when run without a hypervisor
159 the kernel is theoretically slower and slightly larger.
160
161endif
162
163choice 140choice
164 prompt "System type" 141 prompt "System type"
165 default IA64_GENERIC 142 default IA64_GENERIC
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index 668786e84af8..74347ebf7d68 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -15,11 +15,7 @@
15#include <asm/ptrace.h> 15#include <asm/ptrace.h>
16#include <asm/smp.h> 16#include <asm/smp.h>
17 17
18#ifndef CONFIG_PARAVIRT
19typedef u8 ia64_vector; 18typedef u8 ia64_vector;
20#else
21typedef u16 ia64_vector;
22#endif
23 19
24/* 20/*
25 * 0 special 21 * 0 special
@@ -114,15 +110,11 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
114 110
115extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ 111extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
116 112
117#ifdef CONFIG_PARAVIRT_GUEST
118#include <asm/paravirt.h>
119#else
120#define ia64_register_ipi ia64_native_register_ipi 113#define ia64_register_ipi ia64_native_register_ipi
121#define assign_irq_vector ia64_native_assign_irq_vector 114#define assign_irq_vector ia64_native_assign_irq_vector
122#define free_irq_vector ia64_native_free_irq_vector 115#define free_irq_vector ia64_native_free_irq_vector
123#define register_percpu_irq ia64_native_register_percpu_irq 116#define register_percpu_irq ia64_native_register_percpu_irq
124#define ia64_resend_irq ia64_native_resend_irq 117#define ia64_resend_irq ia64_native_resend_irq
125#endif
126 118
127extern void ia64_native_register_ipi(void); 119extern void ia64_native_register_ipi(void);
128extern int bind_irq_vector(int irq, int vector, cpumask_t domain); 120extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
index 20477ea111ba..ec970a920132 100644
--- a/arch/ia64/include/asm/intrinsics.h
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -7,19 +7,6 @@
7#ifndef _ASM_IA64_INTRINSICS_H 7#ifndef _ASM_IA64_INTRINSICS_H
8#define _ASM_IA64_INTRINSICS_H 8#define _ASM_IA64_INTRINSICS_H
9 9
10#include <asm/paravirt_privop.h>
11#include <uapi/asm/intrinsics.h> 10#include <uapi/asm/intrinsics.h>
12 11
13#ifndef __ASSEMBLY__
14#if defined(CONFIG_PARAVIRT)
15# undef IA64_INTRINSIC_API
16# undef IA64_INTRINSIC_MACRO
17# ifdef ASM_SUPPORTED
18# define IA64_INTRINSIC_API(name) paravirt_ ## name
19# else
20# define IA64_INTRINSIC_API(name) pv_cpu_ops.name
21# endif
22#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
23#endif
24#endif /* !__ASSEMBLY__ */
25#endif /* _ASM_IA64_INTRINSICS_H */ 12#endif /* _ASM_IA64_INTRINSICS_H */
diff --git a/arch/ia64/include/asm/iosapic.h b/arch/ia64/include/asm/iosapic.h
index 94c89a2d97fe..4ae1fbd7f10e 100644
--- a/arch/ia64/include/asm/iosapic.h
+++ b/arch/ia64/include/asm/iosapic.h
@@ -55,14 +55,10 @@
55 55
56#define NR_IOSAPICS 256 56#define NR_IOSAPICS 256
57 57
58#ifdef CONFIG_PARAVIRT_GUEST
59#include <asm/paravirt.h>
60#else
61#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init 58#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init
62#define __iosapic_read __ia64_native_iosapic_read 59#define __iosapic_read __ia64_native_iosapic_read
63#define __iosapic_write __ia64_native_iosapic_write 60#define __iosapic_write __ia64_native_iosapic_write
64#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip 61#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip
65#endif
66 62
67extern void __init ia64_native_iosapic_pcat_compat_init(void); 63extern void __init ia64_native_iosapic_pcat_compat_init(void);
68extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger); 64extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger);
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
index dfba22a872c3..f31894b2a354 100644
--- a/arch/ia64/include/asm/module.h
+++ b/arch/ia64/include/asm/module.h
@@ -18,12 +18,6 @@ struct mod_arch_specific {
18 struct elf64_shdr *got; /* global offset table */ 18 struct elf64_shdr *got; /* global offset table */
19 struct elf64_shdr *opd; /* official procedure descriptors */ 19 struct elf64_shdr *opd; /* official procedure descriptors */
20 struct elf64_shdr *unwind; /* unwind-table section */ 20 struct elf64_shdr *unwind; /* unwind-table section */
21#ifdef CONFIG_PARAVIRT
22 struct elf64_shdr *paravirt_bundles;
23 /* paravirt_alt_bundle_patch table */
24 struct elf64_shdr *paravirt_insts;
25 /* paravirt_alt_inst_patch table */
26#endif
27 unsigned long gp; /* global-pointer for module */ 21 unsigned long gp; /* global-pointer for module */
28 22
29 void *core_unw_table; /* core unwind-table cookie returned by unwinder */ 23 void *core_unw_table; /* core unwind-table cookie returned by unwinder */
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
index d2d46efb3e6e..7e08f17accd5 100644
--- a/arch/ia64/include/asm/native/inst.h
+++ b/arch/ia64/include/asm/native/inst.h
@@ -22,32 +22,6 @@
22 22
23#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN 23#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
24 24
25#define __paravirt_switch_to ia64_native_switch_to
26#define __paravirt_leave_syscall ia64_native_leave_syscall
27#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall
28#define __paravirt_leave_kernel ia64_native_leave_kernel
29#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end
30#define __paravirt_work_processed_syscall_target \
31 ia64_work_processed_syscall
32
33#define paravirt_fsyscall_table ia64_native_fsyscall_table
34#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down
35
36#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
37# define PARAVIRT_POISON 0xdeadbeefbaadf00d
38# define CLOBBER(clob) \
39 ;; \
40 movl clob = PARAVIRT_POISON; \
41 ;;
42# define CLOBBER_PRED(pred_clob) \
43 ;; \
44 cmp.eq pred_clob, p0 = r0, r0 \
45 ;;
46#else
47# define CLOBBER(clob) /* nothing */
48# define CLOBBER_PRED(pred_clob) /* nothing */
49#endif
50
51#define MOV_FROM_IFA(reg) \ 25#define MOV_FROM_IFA(reg) \
52 mov reg = cr.ifa 26 mov reg = cr.ifa
53 27
@@ -70,106 +44,76 @@
70 mov reg = cr.iip 44 mov reg = cr.iip
71 45
72#define MOV_FROM_IVR(reg, clob) \ 46#define MOV_FROM_IVR(reg, clob) \
73 mov reg = cr.ivr \ 47 mov reg = cr.ivr
74 CLOBBER(clob)
75 48
76#define MOV_FROM_PSR(pred, reg, clob) \ 49#define MOV_FROM_PSR(pred, reg, clob) \
77(pred) mov reg = psr \ 50(pred) mov reg = psr
78 CLOBBER(clob)
79 51
80#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ 52#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
81(pred) mov reg = ar.itc \ 53(pred) mov reg = ar.itc
82 CLOBBER(clob) \
83 CLOBBER_PRED(pred_clob)
84 54
85#define MOV_TO_IFA(reg, clob) \ 55#define MOV_TO_IFA(reg, clob) \
86 mov cr.ifa = reg \ 56 mov cr.ifa = reg
87 CLOBBER(clob)
88 57
89#define MOV_TO_ITIR(pred, reg, clob) \ 58#define MOV_TO_ITIR(pred, reg, clob) \
90(pred) mov cr.itir = reg \ 59(pred) mov cr.itir = reg
91 CLOBBER(clob)
92 60
93#define MOV_TO_IHA(pred, reg, clob) \ 61#define MOV_TO_IHA(pred, reg, clob) \
94(pred) mov cr.iha = reg \ 62(pred) mov cr.iha = reg
95 CLOBBER(clob)
96 63
97#define MOV_TO_IPSR(pred, reg, clob) \ 64#define MOV_TO_IPSR(pred, reg, clob) \
98(pred) mov cr.ipsr = reg \ 65(pred) mov cr.ipsr = reg
99 CLOBBER(clob)
100 66
101#define MOV_TO_IFS(pred, reg, clob) \ 67#define MOV_TO_IFS(pred, reg, clob) \
102(pred) mov cr.ifs = reg \ 68(pred) mov cr.ifs = reg
103 CLOBBER(clob)
104 69
105#define MOV_TO_IIP(reg, clob) \ 70#define MOV_TO_IIP(reg, clob) \
106 mov cr.iip = reg \ 71 mov cr.iip = reg
107 CLOBBER(clob)
108 72
109#define MOV_TO_KR(kr, reg, clob0, clob1) \ 73#define MOV_TO_KR(kr, reg, clob0, clob1) \
110 mov IA64_KR(kr) = reg \ 74 mov IA64_KR(kr) = reg
111 CLOBBER(clob0) \
112 CLOBBER(clob1)
113 75
114#define ITC_I(pred, reg, clob) \ 76#define ITC_I(pred, reg, clob) \
115(pred) itc.i reg \ 77(pred) itc.i reg
116 CLOBBER(clob)
117 78
118#define ITC_D(pred, reg, clob) \ 79#define ITC_D(pred, reg, clob) \
119(pred) itc.d reg \ 80(pred) itc.d reg
120 CLOBBER(clob)
121 81
122#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ 82#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
123(pred_i) itc.i reg; \ 83(pred_i) itc.i reg; \
124(pred_d) itc.d reg \ 84(pred_d) itc.d reg
125 CLOBBER(clob)
126 85
127#define THASH(pred, reg0, reg1, clob) \ 86#define THASH(pred, reg0, reg1, clob) \
128(pred) thash reg0 = reg1 \ 87(pred) thash reg0 = reg1
129 CLOBBER(clob)
130 88
131#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ 89#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
132 ssm psr.ic | PSR_DEFAULT_BITS \ 90 ssm psr.ic | PSR_DEFAULT_BITS \
133 CLOBBER(clob0) \
134 CLOBBER(clob1) \
135 ;; \ 91 ;; \
136 srlz.i /* guarantee that interruption collectin is on */ \ 92 srlz.i /* guarantee that interruption collectin is on */ \
137 ;; 93 ;;
138 94
139#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ 95#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
140 ssm psr.ic \ 96 ssm psr.ic \
141 CLOBBER(clob0) \
142 CLOBBER(clob1) \
143 ;; \ 97 ;; \
144 srlz.d 98 srlz.d
145 99
146#define RSM_PSR_IC(clob) \ 100#define RSM_PSR_IC(clob) \
147 rsm psr.ic \ 101 rsm psr.ic
148 CLOBBER(clob)
149 102
150#define SSM_PSR_I(pred, pred_clob, clob) \ 103#define SSM_PSR_I(pred, pred_clob, clob) \
151(pred) ssm psr.i \ 104(pred) ssm psr.i
152 CLOBBER(clob) \
153 CLOBBER_PRED(pred_clob)
154 105
155#define RSM_PSR_I(pred, clob0, clob1) \ 106#define RSM_PSR_I(pred, clob0, clob1) \
156(pred) rsm psr.i \ 107(pred) rsm psr.i
157 CLOBBER(clob0) \
158 CLOBBER(clob1)
159 108
160#define RSM_PSR_I_IC(clob0, clob1, clob2) \ 109#define RSM_PSR_I_IC(clob0, clob1, clob2) \
161 rsm psr.i | psr.ic \ 110 rsm psr.i | psr.ic
162 CLOBBER(clob0) \
163 CLOBBER(clob1) \
164 CLOBBER(clob2)
165 111
166#define RSM_PSR_DT \ 112#define RSM_PSR_DT \
167 rsm psr.dt 113 rsm psr.dt
168 114
169#define RSM_PSR_BE_I(clob0, clob1) \ 115#define RSM_PSR_BE_I(clob0, clob1) \
170 rsm psr.be | psr.i \ 116 rsm psr.be | psr.i
171 CLOBBER(clob0) \
172 CLOBBER(clob1)
173 117
174#define SSM_PSR_DT_AND_SRLZ_I \ 118#define SSM_PSR_DT_AND_SRLZ_I \
175 ssm psr.dt \ 119 ssm psr.dt \
@@ -177,15 +121,10 @@
177 srlz.i 121 srlz.i
178 122
179#define BSW_0(clob0, clob1, clob2) \ 123#define BSW_0(clob0, clob1, clob2) \
180 bsw.0 \ 124 bsw.0
181 CLOBBER(clob0) \
182 CLOBBER(clob1) \
183 CLOBBER(clob2)
184 125
185#define BSW_1(clob0, clob1) \ 126#define BSW_1(clob0, clob1) \
186 bsw.1 \ 127 bsw.1
187 CLOBBER(clob0) \
188 CLOBBER(clob1)
189 128
190#define COVER \ 129#define COVER \
191 cover 130 cover
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h
deleted file mode 100644
index 8d72962ec838..000000000000
--- a/arch/ia64/include/asm/native/pvchk_inst.h
+++ /dev/null
@@ -1,271 +0,0 @@
1#ifndef _ASM_NATIVE_PVCHK_INST_H
2#define _ASM_NATIVE_PVCHK_INST_H
3
4/******************************************************************************
5 * arch/ia64/include/asm/native/pvchk_inst.h
6 * Checker for paravirtualizations of privileged operations.
7 *
8 * Copyright (C) 2005 Hewlett-Packard Co
9 * Dan Magenheimer <dan.magenheimer@hp.com>
10 *
11 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
12 * VA Linux Systems Japan K.K.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 *
28 */
29
30/**********************************************
31 * Instructions paravirtualized for correctness
32 **********************************************/
33
34/* "fc" and "thash" are privilege-sensitive instructions, meaning they
35 * may have different semantics depending on whether they are executed
36 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
37 * be allowed to execute directly, lest incorrect semantics result.
38 */
39
40#define fc .error "fc should not be used directly."
41#define thash .error "thash should not be used directly."
42
43/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
44 * is not currently used (though it may be in a long-format VHPT system!)
45 * and the semantics of cover only change if psr.ic is off which is very
46 * rare (and currently non-existent outside of assembly code
47 */
48#define ttag .error "ttag should not be used directly."
49#define cover .error "cover should not be used directly."
50
51/* There are also privilege-sensitive registers. These registers are
52 * readable at any privilege level but only writable at PL0.
53 */
54#define cpuid .error "cpuid should not be used directly."
55#define pmd .error "pmd should not be used directly."
56
57/*
58 * mov ar.eflag =
59 * mov = ar.eflag
60 */
61
62/**********************************************
63 * Instructions paravirtualized for performance
64 **********************************************/
65/*
66 * Those instructions include '.' which can't be handled by cpp.
67 * or can't be handled by cpp easily.
68 * They are handled by sed instead of cpp.
69 */
70
71/* for .S
72 * itc.i
73 * itc.d
74 *
75 * bsw.0
76 * bsw.1
77 *
78 * ssm psr.ic | PSR_DEFAULT_BITS
79 * ssm psr.ic
80 * rsm psr.ic
81 * ssm psr.i
82 * rsm psr.i
83 * rsm psr.i | psr.ic
84 * rsm psr.dt
85 * ssm psr.dt
86 *
87 * mov = cr.ifa
88 * mov = cr.itir
89 * mov = cr.isr
90 * mov = cr.iha
91 * mov = cr.ipsr
92 * mov = cr.iim
93 * mov = cr.iip
94 * mov = cr.ivr
95 * mov = psr
96 *
97 * mov cr.ifa =
98 * mov cr.itir =
99 * mov cr.iha =
100 * mov cr.ipsr =
101 * mov cr.ifs =
102 * mov cr.iip =
103 * mov cr.kr =
104 */
105
106/* for intrinsics
107 * ssm psr.i
108 * rsm psr.i
109 * mov = psr
110 * mov = ivr
111 * mov = tpr
112 * mov cr.itm =
113 * mov eoi =
114 * mov rr[] =
115 * mov = rr[]
116 * mov = kr
117 * mov kr =
118 * ptc.ga
119 */
120
121/*************************************************************
122 * define paravirtualized instrcution macros as nop to ingore.
123 * and check whether arguments are appropriate.
124 *************************************************************/
125
126/* check whether reg is a regular register */
127.macro is_rreg_in reg
128 .ifc "\reg", "r0"
129 nop 0
130 .exitm
131 .endif
132 ;;
133 mov \reg = r0
134 ;;
135.endm
136#define IS_RREG_IN(reg) is_rreg_in reg ;
137
138#define IS_RREG_OUT(reg) \
139 ;; \
140 mov reg = r0 \
141 ;;
142
143#define IS_RREG_CLOB(reg) IS_RREG_OUT(reg)
144
145/* check whether pred is a predicate register */
146#define IS_PRED_IN(pred) \
147 ;; \
148 (pred) nop 0 \
149 ;;
150
151#define IS_PRED_OUT(pred) \
152 ;; \
153 cmp.eq pred, p0 = r0, r0 \
154 ;;
155
156#define IS_PRED_CLOB(pred) IS_PRED_OUT(pred)
157
158
159#define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND) \
160 nop 0
161#define MOV_FROM_IFA(reg) \
162 IS_RREG_OUT(reg)
163#define MOV_FROM_ITIR(reg) \
164 IS_RREG_OUT(reg)
165#define MOV_FROM_ISR(reg) \
166 IS_RREG_OUT(reg)
167#define MOV_FROM_IHA(reg) \
168 IS_RREG_OUT(reg)
169#define MOV_FROM_IPSR(pred, reg) \
170 IS_PRED_IN(pred) \
171 IS_RREG_OUT(reg)
172#define MOV_FROM_IIM(reg) \
173 IS_RREG_OUT(reg)
174#define MOV_FROM_IIP(reg) \
175 IS_RREG_OUT(reg)
176#define MOV_FROM_IVR(reg, clob) \
177 IS_RREG_OUT(reg) \
178 IS_RREG_CLOB(clob)
179#define MOV_FROM_PSR(pred, reg, clob) \
180 IS_PRED_IN(pred) \
181 IS_RREG_OUT(reg) \
182 IS_RREG_CLOB(clob)
183#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
184 IS_PRED_IN(pred) \
185 IS_PRED_CLOB(pred_clob) \
186 IS_RREG_OUT(reg) \
187 IS_RREG_CLOB(clob)
188#define MOV_TO_IFA(reg, clob) \
189 IS_RREG_IN(reg) \
190 IS_RREG_CLOB(clob)
191#define MOV_TO_ITIR(pred, reg, clob) \
192 IS_PRED_IN(pred) \
193 IS_RREG_IN(reg) \
194 IS_RREG_CLOB(clob)
195#define MOV_TO_IHA(pred, reg, clob) \
196 IS_PRED_IN(pred) \
197 IS_RREG_IN(reg) \
198 IS_RREG_CLOB(clob)
199#define MOV_TO_IPSR(pred, reg, clob) \
200 IS_PRED_IN(pred) \
201 IS_RREG_IN(reg) \
202 IS_RREG_CLOB(clob)
203#define MOV_TO_IFS(pred, reg, clob) \
204 IS_PRED_IN(pred) \
205 IS_RREG_IN(reg) \
206 IS_RREG_CLOB(clob)
207#define MOV_TO_IIP(reg, clob) \
208 IS_RREG_IN(reg) \
209 IS_RREG_CLOB(clob)
210#define MOV_TO_KR(kr, reg, clob0, clob1) \
211 IS_RREG_IN(reg) \
212 IS_RREG_CLOB(clob0) \
213 IS_RREG_CLOB(clob1)
214#define ITC_I(pred, reg, clob) \
215 IS_PRED_IN(pred) \
216 IS_RREG_IN(reg) \
217 IS_RREG_CLOB(clob)
218#define ITC_D(pred, reg, clob) \
219 IS_PRED_IN(pred) \
220 IS_RREG_IN(reg) \
221 IS_RREG_CLOB(clob)
222#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
223 IS_PRED_IN(pred_i) \
224 IS_PRED_IN(pred_d) \
225 IS_RREG_IN(reg) \
226 IS_RREG_CLOB(clob)
227#define THASH(pred, reg0, reg1, clob) \
228 IS_PRED_IN(pred) \
229 IS_RREG_OUT(reg0) \
230 IS_RREG_IN(reg1) \
231 IS_RREG_CLOB(clob)
232#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
233 IS_RREG_CLOB(clob0) \
234 IS_RREG_CLOB(clob1)
235#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
236 IS_RREG_CLOB(clob0) \
237 IS_RREG_CLOB(clob1)
238#define RSM_PSR_IC(clob) \
239 IS_RREG_CLOB(clob)
240#define SSM_PSR_I(pred, pred_clob, clob) \
241 IS_PRED_IN(pred) \
242 IS_PRED_CLOB(pred_clob) \
243 IS_RREG_CLOB(clob)
244#define RSM_PSR_I(pred, clob0, clob1) \
245 IS_PRED_IN(pred) \
246 IS_RREG_CLOB(clob0) \
247 IS_RREG_CLOB(clob1)
248#define RSM_PSR_I_IC(clob0, clob1, clob2) \
249 IS_RREG_CLOB(clob0) \
250 IS_RREG_CLOB(clob1) \
251 IS_RREG_CLOB(clob2)
252#define RSM_PSR_DT \
253 nop 0
254#define RSM_PSR_BE_I(clob0, clob1) \
255 IS_RREG_CLOB(clob0) \
256 IS_RREG_CLOB(clob1)
257#define SSM_PSR_DT_AND_SRLZ_I \
258 nop 0
259#define BSW_0(clob0, clob1, clob2) \
260 IS_RREG_CLOB(clob0) \
261 IS_RREG_CLOB(clob1) \
262 IS_RREG_CLOB(clob2)
263#define BSW_1(clob0, clob1) \
264 IS_RREG_CLOB(clob0) \
265 IS_RREG_CLOB(clob1)
266#define COVER \
267 nop 0
268#define RFI \
269 br.ret.sptk.many rp /* defining nop causes dependency error */
270
271#endif /* _ASM_NATIVE_PVCHK_INST_H */
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
deleted file mode 100644
index b53518a98026..000000000000
--- a/arch/ia64/include/asm/paravirt.h
+++ /dev/null
@@ -1,321 +0,0 @@
1/******************************************************************************
2 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21
22#ifndef __ASM_PARAVIRT_H
23#define __ASM_PARAVIRT_H
24
25#ifndef __ASSEMBLY__
26/******************************************************************************
27 * fsys related addresses
28 */
29struct pv_fsys_data {
30 unsigned long *fsyscall_table;
31 void *fsys_bubble_down;
32};
33
34extern struct pv_fsys_data pv_fsys_data;
35
36unsigned long *paravirt_get_fsyscall_table(void);
37char *paravirt_get_fsys_bubble_down(void);
38
39/******************************************************************************
40 * patchlist addresses for gate page
41 */
42enum pv_gate_patchlist {
43 PV_GATE_START_FSYSCALL,
44 PV_GATE_END_FSYSCALL,
45
46 PV_GATE_START_BRL_FSYS_BUBBLE_DOWN,
47 PV_GATE_END_BRL_FSYS_BUBBLE_DOWN,
48
49 PV_GATE_START_VTOP,
50 PV_GATE_END_VTOP,
51
52 PV_GATE_START_MCKINLEY_E9,
53 PV_GATE_END_MCKINLEY_E9,
54};
55
56struct pv_patchdata {
57 unsigned long start_fsyscall_patchlist;
58 unsigned long end_fsyscall_patchlist;
59 unsigned long start_brl_fsys_bubble_down_patchlist;
60 unsigned long end_brl_fsys_bubble_down_patchlist;
61 unsigned long start_vtop_patchlist;
62 unsigned long end_vtop_patchlist;
63 unsigned long start_mckinley_e9_patchlist;
64 unsigned long end_mckinley_e9_patchlist;
65
66 void *gate_section;
67};
68
69extern struct pv_patchdata pv_patchdata;
70
71unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type);
72void *paravirt_get_gate_section(void);
73#endif
74
75#ifdef CONFIG_PARAVIRT_GUEST
76
77#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
78
79#ifndef __ASSEMBLY__
80
81#include <asm/hw_irq.h>
82#include <asm/meminit.h>
83
84/******************************************************************************
85 * general info
86 */
87struct pv_info {
88 unsigned int kernel_rpl;
89 int paravirt_enabled;
90 const char *name;
91};
92
93extern struct pv_info pv_info;
94
95static inline int paravirt_enabled(void)
96{
97 return pv_info.paravirt_enabled;
98}
99
100static inline unsigned int get_kernel_rpl(void)
101{
102 return pv_info.kernel_rpl;
103}
104
105/******************************************************************************
106 * initialization hooks.
107 */
108struct rsvd_region;
109
110struct pv_init_ops {
111 void (*banner)(void);
112
113 int (*reserve_memory)(struct rsvd_region *region);
114
115 void (*arch_setup_early)(void);
116 void (*arch_setup_console)(char **cmdline_p);
117 int (*arch_setup_nomca)(void);
118
119 void (*post_smp_prepare_boot_cpu)(void);
120
121#ifdef ASM_SUPPORTED
122 unsigned long (*patch_bundle)(void *sbundle, void *ebundle,
123 unsigned long type);
124 unsigned long (*patch_inst)(unsigned long stag, unsigned long etag,
125 unsigned long type);
126#endif
127 void (*patch_branch)(unsigned long tag, unsigned long type);
128};
129
130extern struct pv_init_ops pv_init_ops;
131
132static inline void paravirt_banner(void)
133{
134 if (pv_init_ops.banner)
135 pv_init_ops.banner();
136}
137
138static inline int paravirt_reserve_memory(struct rsvd_region *region)
139{
140 if (pv_init_ops.reserve_memory)
141 return pv_init_ops.reserve_memory(region);
142 return 0;
143}
144
145static inline void paravirt_arch_setup_early(void)
146{
147 if (pv_init_ops.arch_setup_early)
148 pv_init_ops.arch_setup_early();
149}
150
151static inline void paravirt_arch_setup_console(char **cmdline_p)
152{
153 if (pv_init_ops.arch_setup_console)
154 pv_init_ops.arch_setup_console(cmdline_p);
155}
156
157static inline int paravirt_arch_setup_nomca(void)
158{
159 if (pv_init_ops.arch_setup_nomca)
160 return pv_init_ops.arch_setup_nomca();
161 return 0;
162}
163
164static inline void paravirt_post_smp_prepare_boot_cpu(void)
165{
166 if (pv_init_ops.post_smp_prepare_boot_cpu)
167 pv_init_ops.post_smp_prepare_boot_cpu();
168}
169
170/******************************************************************************
171 * replacement of iosapic operations.
172 */
173
174struct pv_iosapic_ops {
175 void (*pcat_compat_init)(void);
176
177 struct irq_chip *(*__get_irq_chip)(unsigned long trigger);
178
179 unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
180 void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
181};
182
183extern struct pv_iosapic_ops pv_iosapic_ops;
184
185static inline void
186iosapic_pcat_compat_init(void)
187{
188 if (pv_iosapic_ops.pcat_compat_init)
189 pv_iosapic_ops.pcat_compat_init();
190}
191
192static inline struct irq_chip*
193iosapic_get_irq_chip(unsigned long trigger)
194{
195 return pv_iosapic_ops.__get_irq_chip(trigger);
196}
197
198static inline unsigned int
199__iosapic_read(char __iomem *iosapic, unsigned int reg)
200{
201 return pv_iosapic_ops.__read(iosapic, reg);
202}
203
204static inline void
205__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
206{
207 return pv_iosapic_ops.__write(iosapic, reg, val);
208}
209
210/******************************************************************************
211 * replacement of irq operations.
212 */
213
214struct pv_irq_ops {
215 void (*register_ipi)(void);
216
217 int (*assign_irq_vector)(int irq);
218 void (*free_irq_vector)(int vector);
219
220 void (*register_percpu_irq)(ia64_vector vec,
221 struct irqaction *action);
222
223 void (*resend_irq)(unsigned int vector);
224};
225
226extern struct pv_irq_ops pv_irq_ops;
227
228static inline void
229ia64_register_ipi(void)
230{
231 pv_irq_ops.register_ipi();
232}
233
234static inline int
235assign_irq_vector(int irq)
236{
237 return pv_irq_ops.assign_irq_vector(irq);
238}
239
240static inline void
241free_irq_vector(int vector)
242{
243 return pv_irq_ops.free_irq_vector(vector);
244}
245
246static inline void
247register_percpu_irq(ia64_vector vec, struct irqaction *action)
248{
249 pv_irq_ops.register_percpu_irq(vec, action);
250}
251
252static inline void
253ia64_resend_irq(unsigned int vector)
254{
255 pv_irq_ops.resend_irq(vector);
256}
257
258/******************************************************************************
259 * replacement of time operations.
260 */
261
262extern struct itc_jitter_data_t itc_jitter_data;
263extern volatile int time_keeper_id;
264
265struct pv_time_ops {
266 void (*init_missing_ticks_accounting)(int cpu);
267 int (*do_steal_accounting)(unsigned long *new_itm);
268
269 void (*clocksource_resume)(void);
270
271 unsigned long long (*sched_clock)(void);
272};
273
274extern struct pv_time_ops pv_time_ops;
275
276static inline void
277paravirt_init_missing_ticks_accounting(int cpu)
278{
279 if (pv_time_ops.init_missing_ticks_accounting)
280 pv_time_ops.init_missing_ticks_accounting(cpu);
281}
282
283struct static_key;
284extern struct static_key paravirt_steal_enabled;
285extern struct static_key paravirt_steal_rq_enabled;
286
287static inline int
288paravirt_do_steal_accounting(unsigned long *new_itm)
289{
290 return pv_time_ops.do_steal_accounting(new_itm);
291}
292
293static inline unsigned long long paravirt_sched_clock(void)
294{
295 return pv_time_ops.sched_clock();
296}
297
298#endif /* !__ASSEMBLY__ */
299
300#else
301/* fallback for native case */
302
303#ifndef __ASSEMBLY__
304
305#define paravirt_banner() do { } while (0)
306#define paravirt_reserve_memory(region) 0
307
308#define paravirt_arch_setup_early() do { } while (0)
309#define paravirt_arch_setup_console(cmdline_p) do { } while (0)
310#define paravirt_arch_setup_nomca() 0
311#define paravirt_post_smp_prepare_boot_cpu() do { } while (0)
312
313#define paravirt_init_missing_ticks_accounting(cpu) do { } while (0)
314#define paravirt_do_steal_accounting(new_itm) 0
315
316#endif /* __ASSEMBLY__ */
317
318
319#endif /* CONFIG_PARAVIRT_GUEST */
320
321#endif /* __ASM_PARAVIRT_H */
diff --git a/arch/ia64/include/asm/paravirt_patch.h b/arch/ia64/include/asm/paravirt_patch.h
deleted file mode 100644
index 128ff5db6e67..000000000000
--- a/arch/ia64/include/asm/paravirt_patch.h
+++ /dev/null
@@ -1,143 +0,0 @@
1/******************************************************************************
2 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21#ifndef __ASM_PARAVIRT_PATCH_H
22#define __ASM_PARAVIRT_PATCH_H
23
24#ifdef __ASSEMBLY__
25
26 .section .paravirt_branches, "a"
27 .previous
28#define PARAVIRT_PATCH_SITE_BR(type) \
29 { \
30 [1:] ; \
31 br.cond.sptk.many 2f ; \
32 nop.b 0 ; \
33 nop.b 0;; ; \
34 } ; \
35 2: \
36 .xdata8 ".paravirt_branches", 1b, type
37
38#else
39
40#include <linux/stringify.h>
41#include <asm/intrinsics.h>
42
43/* for binary patch */
44struct paravirt_patch_site_bundle {
45 void *sbundle;
46 void *ebundle;
47 unsigned long type;
48};
49
50/* label means the beginning of new bundle */
51#define paravirt_alt_bundle(instr, privop) \
52 "\t998:\n" \
53 "\t" instr "\n" \
54 "\t999:\n" \
55 "\t.pushsection .paravirt_bundles, \"a\"\n" \
56 "\t.popsection\n" \
57 "\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \
58 __stringify(privop) "\n"
59
60
61struct paravirt_patch_bundle_elem {
62 const void *sbundle;
63 const void *ebundle;
64 unsigned long type;
65};
66
67
68struct paravirt_patch_site_inst {
69 unsigned long stag;
70 unsigned long etag;
71 unsigned long type;
72};
73
74#define paravirt_alt_inst(instr, privop) \
75 "\t[998:]\n" \
76 "\t" instr "\n" \
77 "\t[999:]\n" \
78 "\t.pushsection .paravirt_insts, \"a\"\n" \
79 "\t.popsection\n" \
80 "\t.xdata8 \".paravirt_insts\", 998b, 999b, " \
81 __stringify(privop) "\n"
82
83struct paravirt_patch_site_branch {
84 unsigned long tag;
85 unsigned long type;
86};
87
88struct paravirt_patch_branch_target {
89 const void *entry;
90 unsigned long type;
91};
92
93void
94__paravirt_patch_apply_branch(
95 unsigned long tag, unsigned long type,
96 const struct paravirt_patch_branch_target *entries,
97 unsigned int nr_entries);
98
99void
100paravirt_patch_reloc_br(unsigned long tag, const void *target);
101
102void
103paravirt_patch_reloc_brl(unsigned long tag, const void *target);
104
105
106#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT)
107unsigned long
108ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
109
110unsigned long
111__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
112 const struct paravirt_patch_bundle_elem *elems,
113 unsigned long nelems,
114 const struct paravirt_patch_bundle_elem **found);
115
116void
117paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
118 const struct paravirt_patch_site_bundle *end);
119
120void
121paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
122 const struct paravirt_patch_site_inst *end);
123
124void paravirt_patch_apply(void);
125#else
126#define paravirt_patch_apply_bundle(start, end) do { } while (0)
127#define paravirt_patch_apply_inst(start, end) do { } while (0)
128#define paravirt_patch_apply() do { } while (0)
129#endif
130
131#endif /* !__ASSEMBLEY__ */
132
133#endif /* __ASM_PARAVIRT_PATCH_H */
134
135/*
136 * Local variables:
137 * mode: C
138 * c-set-style: "linux"
139 * c-basic-offset: 8
140 * tab-width: 8
141 * indent-tabs-mode: t
142 * End:
143 */
diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h
deleted file mode 100644
index 8f6cb11c9fae..000000000000
--- a/arch/ia64/include/asm/paravirt_privop.h
+++ /dev/null
@@ -1,479 +0,0 @@
1/******************************************************************************
2 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
22#define _ASM_IA64_PARAVIRT_PRIVOP_H
23
24#ifdef CONFIG_PARAVIRT
25
26#ifndef __ASSEMBLY__
27
28#include <linux/types.h>
29#include <asm/kregs.h> /* for IA64_PSR_I */
30
31/******************************************************************************
32 * replacement of intrinsics operations.
33 */
34
35struct pv_cpu_ops {
36 void (*fc)(void *addr);
37 unsigned long (*thash)(unsigned long addr);
38 unsigned long (*get_cpuid)(int index);
39 unsigned long (*get_pmd)(int index);
40 unsigned long (*getreg)(int reg);
41 void (*setreg)(int reg, unsigned long val);
42 void (*ptcga)(unsigned long addr, unsigned long size);
43 unsigned long (*get_rr)(unsigned long index);
44 void (*set_rr)(unsigned long index, unsigned long val);
45 void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
46 unsigned long val2, unsigned long val3,
47 unsigned long val4);
48 void (*ssm_i)(void);
49 void (*rsm_i)(void);
50 unsigned long (*get_psr_i)(void);
51 void (*intrin_local_irq_restore)(unsigned long flags);
52};
53
54extern struct pv_cpu_ops pv_cpu_ops;
55
56extern void ia64_native_setreg_func(int regnum, unsigned long val);
57extern unsigned long ia64_native_getreg_func(int regnum);
58
59/************************************************/
60/* Instructions paravirtualized for performance */
61/************************************************/
62
63#ifndef ASM_SUPPORTED
64#define paravirt_ssm_i() pv_cpu_ops.ssm_i()
65#define paravirt_rsm_i() pv_cpu_ops.rsm_i()
66#define __paravirt_getreg() pv_cpu_ops.getreg()
67#endif
68
69/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
70 * static inline function doesn't satisfy it. */
71#define paravirt_ssm(mask) \
72 do { \
73 if ((mask) == IA64_PSR_I) \
74 paravirt_ssm_i(); \
75 else \
76 ia64_native_ssm(mask); \
77 } while (0)
78
79#define paravirt_rsm(mask) \
80 do { \
81 if ((mask) == IA64_PSR_I) \
82 paravirt_rsm_i(); \
83 else \
84 ia64_native_rsm(mask); \
85 } while (0)
86
87/* returned ip value should be the one in the caller,
88 * not in __paravirt_getreg() */
89#define paravirt_getreg(reg) \
90 ({ \
91 unsigned long res; \
92 if ((reg) == _IA64_REG_IP) \
93 res = ia64_native_getreg(_IA64_REG_IP); \
94 else \
95 res = __paravirt_getreg(reg); \
96 res; \
97 })
98
99/******************************************************************************
100 * replacement of hand written assembly codes.
101 */
102struct pv_cpu_asm_switch {
103 unsigned long switch_to;
104 unsigned long leave_syscall;
105 unsigned long work_processed_syscall;
106 unsigned long leave_kernel;
107};
108void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
109
110#endif /* __ASSEMBLY__ */
111
112#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
113
114#else
115
116/* fallback for native case */
117#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
118
119#endif /* CONFIG_PARAVIRT */
120
121#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)
122#define paravirt_dv_serialize_data() ia64_dv_serialize_data()
123#else
124#define paravirt_dv_serialize_data() /* nothing */
125#endif
126
127/* these routines utilize privilege-sensitive or performance-sensitive
128 * privileged instructions so the code must be replaced with
129 * paravirtualized versions */
130#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
131#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
132#define ia64_work_processed_syscall \
133 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
134#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
135
136
137#if defined(CONFIG_PARAVIRT)
138/******************************************************************************
139 * binary patching infrastructure
140 */
141#define PARAVIRT_PATCH_TYPE_FC 1
142#define PARAVIRT_PATCH_TYPE_THASH 2
143#define PARAVIRT_PATCH_TYPE_GET_CPUID 3
144#define PARAVIRT_PATCH_TYPE_GET_PMD 4
145#define PARAVIRT_PATCH_TYPE_PTCGA 5
146#define PARAVIRT_PATCH_TYPE_GET_RR 6
147#define PARAVIRT_PATCH_TYPE_SET_RR 7
148#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8
149#define PARAVIRT_PATCH_TYPE_SSM_I 9
150#define PARAVIRT_PATCH_TYPE_RSM_I 10
151#define PARAVIRT_PATCH_TYPE_GET_PSR_I 11
152#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12
153
154/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
155#define PARAVIRT_PATCH_TYPE_GETREG 0x10000000
156#define PARAVIRT_PATCH_TYPE_SETREG 0x20000000
157
158/*
159 * struct task_struct* (*ia64_switch_to)(void* next_task);
160 * void *ia64_leave_syscall;
161 * void *ia64_work_processed_syscall
162 * void *ia64_leave_kernel;
163 */
164
165#define PARAVIRT_PATCH_TYPE_BR_START 0x30000000
166#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \
167 (PARAVIRT_PATCH_TYPE_BR_START + 0)
168#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \
169 (PARAVIRT_PATCH_TYPE_BR_START + 1)
170#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \
171 (PARAVIRT_PATCH_TYPE_BR_START + 2)
172#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \
173 (PARAVIRT_PATCH_TYPE_BR_START + 3)
174
175#ifdef ASM_SUPPORTED
176#include <asm/paravirt_patch.h>
177
178/*
179 * pv_cpu_ops calling stub.
180 * normal function call convension can't be written by gcc
181 * inline assembly.
182 *
183 * from the caller's point of view,
184 * the following registers will be clobbered.
185 * r2, r3
186 * r8-r15
187 * r16, r17
188 * b6, b7
189 * p6-p15
190 * ar.ccv
191 *
192 * from the callee's point of view ,
193 * the following registers can be used.
194 * r2, r3: scratch
195 * r8: scratch, input argument0 and return value
196 * r0-r15: scratch, input argument1-5
197 * b6: return pointer
198 * b7: scratch
199 * p6-p15: scratch
200 * ar.ccv: scratch
201 *
202 * other registers must not be changed. especially
203 * b0: rp: preserved. gcc ignores b0 in clobbered register.
204 * r16: saved gp
205 */
206/* 5 bundles */
207#define __PARAVIRT_BR \
208 ";;\n" \
209 "{ .mlx\n" \
210 "nop 0\n" \
211 "movl r2 = %[op_addr]\n"/* get function pointer address */ \
212 ";;\n" \
213 "}\n" \
214 "1:\n" \
215 "{ .mii\n" \
216 "ld8 r2 = [r2]\n" /* load function descriptor address */ \
217 "mov r17 = ip\n" /* get ip to calc return address */ \
218 "mov r16 = gp\n" /* save gp */ \
219 ";;\n" \
220 "}\n" \
221 "{ .mii\n" \
222 "ld8 r3 = [r2], 8\n" /* load entry address */ \
223 "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \
224 ";;\n" \
225 "mov b7 = r3\n" /* set entry address */ \
226 "}\n" \
227 "{ .mib\n" \
228 "ld8 gp = [r2]\n" /* load gp value */ \
229 "mov b6 = r17\n" /* set return address */ \
230 "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \
231 "}\n" \
232 "1:\n" \
233 "{ .mii\n" \
234 "mov gp = r16\n" /* restore gp value */ \
235 "nop 0\n" \
236 "nop 0\n" \
237 ";;\n" \
238 "}\n"
239
240#define PARAVIRT_OP(op) \
241 [op_addr] "i"(&pv_cpu_ops.op)
242
243#define PARAVIRT_TYPE(type) \
244 PARAVIRT_PATCH_TYPE_ ## type
245
246#define PARAVIRT_REG_CLOBBERS0 \
247 "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
248 "r15", "r16", "r17"
249
250#define PARAVIRT_REG_CLOBBERS1 \
251 "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
252 "r15", "r16", "r17"
253
254#define PARAVIRT_REG_CLOBBERS2 \
255 "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \
256 "r15", "r16", "r17"
257
258#define PARAVIRT_REG_CLOBBERS5 \
259 "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \
260 "r15", "r16", "r17"
261
262#define PARAVIRT_BR_CLOBBERS \
263 "b6", "b7"
264
265#define PARAVIRT_PR_CLOBBERS \
266 "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
267
268#define PARAVIRT_AR_CLOBBERS \
269 "ar.ccv"
270
271#define PARAVIRT_CLOBBERS0 \
272 PARAVIRT_REG_CLOBBERS0, \
273 PARAVIRT_BR_CLOBBERS, \
274 PARAVIRT_PR_CLOBBERS, \
275 PARAVIRT_AR_CLOBBERS, \
276 "memory"
277
278#define PARAVIRT_CLOBBERS1 \
279 PARAVIRT_REG_CLOBBERS1, \
280 PARAVIRT_BR_CLOBBERS, \
281 PARAVIRT_PR_CLOBBERS, \
282 PARAVIRT_AR_CLOBBERS, \
283 "memory"
284
285#define PARAVIRT_CLOBBERS2 \
286 PARAVIRT_REG_CLOBBERS2, \
287 PARAVIRT_BR_CLOBBERS, \
288 PARAVIRT_PR_CLOBBERS, \
289 PARAVIRT_AR_CLOBBERS, \
290 "memory"
291
292#define PARAVIRT_CLOBBERS5 \
293 PARAVIRT_REG_CLOBBERS5, \
294 PARAVIRT_BR_CLOBBERS, \
295 PARAVIRT_PR_CLOBBERS, \
296 PARAVIRT_AR_CLOBBERS, \
297 "memory"
298
299#define PARAVIRT_BR0(op, type) \
300 register unsigned long ia64_clobber asm ("r8"); \
301 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
302 PARAVIRT_TYPE(type)) \
303 : "=r"(ia64_clobber) \
304 : PARAVIRT_OP(op) \
305 : PARAVIRT_CLOBBERS0)
306
307#define PARAVIRT_BR0_RET(op, type) \
308 register unsigned long ia64_intri_res asm ("r8"); \
309 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
310 PARAVIRT_TYPE(type)) \
311 : "=r"(ia64_intri_res) \
312 : PARAVIRT_OP(op) \
313 : PARAVIRT_CLOBBERS0)
314
315#define PARAVIRT_BR1(op, type, arg1) \
316 register unsigned long __##arg1 asm ("r8") = arg1; \
317 register unsigned long ia64_clobber asm ("r8"); \
318 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
319 PARAVIRT_TYPE(type)) \
320 : "=r"(ia64_clobber) \
321 : PARAVIRT_OP(op), "0"(__##arg1) \
322 : PARAVIRT_CLOBBERS1)
323
324#define PARAVIRT_BR1_RET(op, type, arg1) \
325 register unsigned long ia64_intri_res asm ("r8"); \
326 register unsigned long __##arg1 asm ("r8") = arg1; \
327 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
328 PARAVIRT_TYPE(type)) \
329 : "=r"(ia64_intri_res) \
330 : PARAVIRT_OP(op), "0"(__##arg1) \
331 : PARAVIRT_CLOBBERS1)
332
333#define PARAVIRT_BR1_VOID(op, type, arg1) \
334 register void *__##arg1 asm ("r8") = arg1; \
335 register unsigned long ia64_clobber asm ("r8"); \
336 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
337 PARAVIRT_TYPE(type)) \
338 : "=r"(ia64_clobber) \
339 : PARAVIRT_OP(op), "0"(__##arg1) \
340 : PARAVIRT_CLOBBERS1)
341
342#define PARAVIRT_BR2(op, type, arg1, arg2) \
343 register unsigned long __##arg1 asm ("r8") = arg1; \
344 register unsigned long __##arg2 asm ("r9") = arg2; \
345 register unsigned long ia64_clobber1 asm ("r8"); \
346 register unsigned long ia64_clobber2 asm ("r9"); \
347 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
348 PARAVIRT_TYPE(type)) \
349 : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \
350 : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \
351 : PARAVIRT_CLOBBERS2)
352
353
354#define PARAVIRT_DEFINE_CPU_OP0(op, type) \
355 static inline void \
356 paravirt_ ## op (void) \
357 { \
358 PARAVIRT_BR0(op, type); \
359 }
360
361#define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \
362 static inline unsigned long \
363 paravirt_ ## op (void) \
364 { \
365 PARAVIRT_BR0_RET(op, type); \
366 return ia64_intri_res; \
367 }
368
369#define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \
370 static inline void \
371 paravirt_ ## op (void *arg1) \
372 { \
373 PARAVIRT_BR1_VOID(op, type, arg1); \
374 }
375
376#define PARAVIRT_DEFINE_CPU_OP1(op, type) \
377 static inline void \
378 paravirt_ ## op (unsigned long arg1) \
379 { \
380 PARAVIRT_BR1(op, type, arg1); \
381 }
382
383#define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \
384 static inline unsigned long \
385 paravirt_ ## op (unsigned long arg1) \
386 { \
387 PARAVIRT_BR1_RET(op, type, arg1); \
388 return ia64_intri_res; \
389 }
390
391#define PARAVIRT_DEFINE_CPU_OP2(op, type) \
392 static inline void \
393 paravirt_ ## op (unsigned long arg1, \
394 unsigned long arg2) \
395 { \
396 PARAVIRT_BR2(op, type, arg1, arg2); \
397 }
398
399
400PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC);
401PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
402PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
403PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
404PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
405PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
406PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
407PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
408PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
409PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
410PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
411
412static inline void
413paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
414 unsigned long val2, unsigned long val3,
415 unsigned long val4)
416{
417 register unsigned long __val0 asm ("r8") = val0;
418 register unsigned long __val1 asm ("r9") = val1;
419 register unsigned long __val2 asm ("r10") = val2;
420 register unsigned long __val3 asm ("r11") = val3;
421 register unsigned long __val4 asm ("r14") = val4;
422
423 register unsigned long ia64_clobber0 asm ("r8");
424 register unsigned long ia64_clobber1 asm ("r9");
425 register unsigned long ia64_clobber2 asm ("r10");
426 register unsigned long ia64_clobber3 asm ("r11");
427 register unsigned long ia64_clobber4 asm ("r14");
428
429 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
430 PARAVIRT_TYPE(SET_RR0_TO_RR4))
431 : "=r"(ia64_clobber0),
432 "=r"(ia64_clobber1),
433 "=r"(ia64_clobber2),
434 "=r"(ia64_clobber3),
435 "=r"(ia64_clobber4)
436 : PARAVIRT_OP(set_rr0_to_rr4),
437 "0"(__val0), "1"(__val1), "2"(__val2),
438 "3"(__val3), "4"(__val4)
439 : PARAVIRT_CLOBBERS5);
440}
441
442/* unsigned long paravirt_getreg(int reg) */
443#define __paravirt_getreg(reg) \
444 ({ \
445 register unsigned long ia64_intri_res asm ("r8"); \
446 register unsigned long __reg asm ("r8") = (reg); \
447 \
448 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
449 PARAVIRT_TYPE(GETREG) \
450 + (reg)) \
451 : "=r"(ia64_intri_res) \
452 : PARAVIRT_OP(getreg), "0"(__reg) \
453 : PARAVIRT_CLOBBERS1); \
454 \
455 ia64_intri_res; \
456 })
457
458/* void paravirt_setreg(int reg, unsigned long val) */
459#define paravirt_setreg(reg, val) \
460 do { \
461 register unsigned long __val asm ("r8") = val; \
462 register unsigned long __reg asm ("r9") = reg; \
463 register unsigned long ia64_clobber1 asm ("r8"); \
464 register unsigned long ia64_clobber2 asm ("r9"); \
465 \
466 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
467 PARAVIRT_TYPE(SETREG) \
468 + (reg)) \
469 : "=r"(ia64_clobber1), \
470 "=r"(ia64_clobber2) \
471 : PARAVIRT_OP(setreg), \
472 "1"(__reg), "0"(__val) \
473 : PARAVIRT_CLOBBERS2); \
474 } while (0)
475
476#endif /* ASM_SUPPORTED */
477#endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
478
479#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index d68b5cf81e31..3686d6abafde 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -9,7 +9,7 @@ endif
9extra-y := head.o init_task.o vmlinux.lds 9extra-y := head.o init_task.o vmlinux.lds
10 10
11obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 11obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
12 irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ 12 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
13 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 13 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
14 unwind.o mca.o mca_asm.o topology.o dma-mapping.o 14 unwind.o mca.o mca_asm.o topology.o dma-mapping.o
15 15
@@ -35,9 +35,6 @@ mca_recovery-y += mca_drv.o mca_drv_asm.o
35obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 35obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
36obj-$(CONFIG_STACKTRACE) += stacktrace.o 36obj-$(CONFIG_STACKTRACE) += stacktrace.o
37 37
38obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \
39 paravirt_patch.o
40
41obj-$(CONFIG_IA64_ESI) += esi.o 38obj-$(CONFIG_IA64_ESI) += esi.o
42ifneq ($(CONFIG_IA64_ESI),) 39ifneq ($(CONFIG_IA64_ESI),)
43obj-y += esi_stub.o # must be in kernel proper 40obj-y += esi_stub.o # must be in kernel proper
@@ -52,8 +49,6 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
52 49
53# The gate DSO image is built using a special linker script. 50# The gate DSO image is built using a special linker script.
54include $(src)/Makefile.gate 51include $(src)/Makefile.gate
55# tell compiled for native
56CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE
57 52
58# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config 53# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
59define sed-y 54define sed-y
@@ -84,30 +79,3 @@ arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
84include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s 79include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
85 $(Q)mkdir -p $(dir $@) 80 $(Q)mkdir -p $(dir $@)
86 $(call cmd,nr_irqs) 81 $(call cmd,nr_irqs)
87
88#
89# native ivt.S, entry.S and fsys.S
90#
91ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o
92define paravirtualized_native
93AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
94AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
95extra-y += pvchk-$(1)
96endef
97$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
98
99#
100# Checker for paravirtualizations of privileged operations.
101#
102quiet_cmd_pv_check_sed = PVCHK $@
103define cmd_pv_check_sed
104 sed -f $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed $< > $@
105endef
106
107$(obj)/pvchk-sed-%.s: $(src)/%.S $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed FORCE
108 $(call if_changed_dep,as_s_S)
109$(obj)/pvchk-%.s: $(obj)/pvchk-sed-%.s FORCE
110 $(call if_changed,pv_check_sed)
111$(obj)/pvchk-%.o: $(obj)/pvchk-%.s FORCE
112 $(call if_changed,as_o_S)
113.PRECIOUS: $(obj)/pvchk-sed-%.s $(obj)/pvchk-%.s $(obj)/pvchk-%.o
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index c52d7540dc05..47e962f7ed5a 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -464,7 +464,6 @@ efi_map_pal_code (void)
464 GRANULEROUNDDOWN((unsigned long) pal_vaddr), 464 GRANULEROUNDDOWN((unsigned long) pal_vaddr),
465 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 465 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
466 IA64_GRANULE_SHIFT); 466 IA64_GRANULE_SHIFT);
467 paravirt_dv_serialize_data();
468 ia64_set_psr(psr); /* restore psr */ 467 ia64_set_psr(psr); /* restore psr */
469} 468}
470 469
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index fcf8b8cbca0b..ae0de7bf5525 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -51,7 +51,6 @@
51 51
52#include "minstate.h" 52#include "minstate.h"
53 53
54#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
55 /* 54 /*
56 * execve() is special because in case of success, we need to 55 * execve() is special because in case of success, we need to
57 * setup a null register window frame. 56 * setup a null register window frame.
@@ -161,7 +160,6 @@ GLOBAL_ENTRY(sys_clone)
161 mov rp=loc0 160 mov rp=loc0
162 br.ret.sptk.many rp 161 br.ret.sptk.many rp
163END(sys_clone) 162END(sys_clone)
164#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
165 163
166/* 164/*
167 * prev_task <- ia64_switch_to(struct task_struct *next) 165 * prev_task <- ia64_switch_to(struct task_struct *next)
@@ -169,7 +167,7 @@ END(sys_clone)
169 * called. The code starting at .map relies on this. The rest of the code 167 * called. The code starting at .map relies on this. The rest of the code
170 * doesn't care about the interrupt masking status. 168 * doesn't care about the interrupt masking status.
171 */ 169 */
172GLOBAL_ENTRY(__paravirt_switch_to) 170GLOBAL_ENTRY(ia64_switch_to)
173 .prologue 171 .prologue
174 alloc r16=ar.pfs,1,0,0,0 172 alloc r16=ar.pfs,1,0,0,0
175 DO_SAVE_SWITCH_STACK 173 DO_SAVE_SWITCH_STACK
@@ -221,9 +219,8 @@ GLOBAL_ENTRY(__paravirt_switch_to)
221 itr.d dtr[r25]=r23 // wire in new mapping... 219 itr.d dtr[r25]=r23 // wire in new mapping...
222 SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit 220 SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
223 br.cond.sptk .done 221 br.cond.sptk .done
224END(__paravirt_switch_to) 222END(ia64_switch_to)
225 223
226#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
227/* 224/*
228 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This 225 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
229 * means that we may get an interrupt with "sp" pointing to the new kernel stack while 226 * means that we may get an interrupt with "sp" pointing to the new kernel stack while
@@ -639,16 +636,8 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
639 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 636 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
640 mov r10=r0 // clear error indication in r10 637 mov r10=r0 // clear error indication in r10
641(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure 638(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
642#ifdef CONFIG_PARAVIRT
643 ;;
644 br.cond.sptk.few ia64_leave_syscall
645 ;;
646#endif /* CONFIG_PARAVIRT */
647END(ia64_ret_from_syscall) 639END(ia64_ret_from_syscall)
648#ifndef CONFIG_PARAVIRT
649 // fall through 640 // fall through
650#endif
651#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
652 641
653/* 642/*
654 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't 643 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
@@ -694,7 +683,7 @@ END(ia64_ret_from_syscall)
694 * ar.csd: cleared 683 * ar.csd: cleared
695 * ar.ssd: cleared 684 * ar.ssd: cleared
696 */ 685 */
697GLOBAL_ENTRY(__paravirt_leave_syscall) 686GLOBAL_ENTRY(ia64_leave_syscall)
698 PT_REGS_UNWIND_INFO(0) 687 PT_REGS_UNWIND_INFO(0)
699 /* 688 /*
700 * work.need_resched etc. mustn't get changed by this CPU before it returns to 689 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -722,8 +711,8 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
722 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall 711 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
723(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 712(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
724#endif 713#endif
725.global __paravirt_work_processed_syscall; 714.global ia64_work_processed_syscall;
726__paravirt_work_processed_syscall: 715ia64_work_processed_syscall:
727#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 716#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
728 adds r2=PT(LOADRS)+16,r12 717 adds r2=PT(LOADRS)+16,r12
729 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave 718 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
@@ -836,9 +825,9 @@ __paravirt_work_processed_syscall:
836 mov.m ar.ssd=r0 // M2 clear ar.ssd 825 mov.m ar.ssd=r0 // M2 clear ar.ssd
837 mov f11=f0 // F clear f11 826 mov f11=f0 // F clear f11
838 br.cond.sptk.many rbs_switch // B 827 br.cond.sptk.many rbs_switch // B
839END(__paravirt_leave_syscall) 828END(ia64_leave_syscall)
840 829
841GLOBAL_ENTRY(__paravirt_leave_kernel) 830GLOBAL_ENTRY(ia64_leave_kernel)
842 PT_REGS_UNWIND_INFO(0) 831 PT_REGS_UNWIND_INFO(0)
843 /* 832 /*
844 * work.need_resched etc. mustn't get changed by this CPU before it returns to 833 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -1171,26 +1160,25 @@ skip_rbs_switch:
1171(p6) br.cond.sptk.few .notify 1160(p6) br.cond.sptk.few .notify
1172 br.call.spnt.many rp=preempt_schedule_irq 1161 br.call.spnt.many rp=preempt_schedule_irq
1173.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) 1162.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1174(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end 1163(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
1175 br.cond.sptk.many .work_processed_kernel 1164 br.cond.sptk.many .work_processed_kernel
1176 1165
1177.notify: 1166.notify:
1178(pUStk) br.call.spnt.many rp=notify_resume_user 1167(pUStk) br.call.spnt.many rp=notify_resume_user
1179.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) 1168.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
1180(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end 1169(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
1181 br.cond.sptk.many .work_processed_kernel 1170 br.cond.sptk.many .work_processed_kernel
1182 1171
1183.global __paravirt_pending_syscall_end; 1172.global ia64_work_pending_syscall_end;
1184__paravirt_pending_syscall_end: 1173ia64_work_pending_syscall_end:
1185 adds r2=PT(R8)+16,r12 1174 adds r2=PT(R8)+16,r12
1186 adds r3=PT(R10)+16,r12 1175 adds r3=PT(R10)+16,r12
1187 ;; 1176 ;;
1188 ld8 r8=[r2] 1177 ld8 r8=[r2]
1189 ld8 r10=[r3] 1178 ld8 r10=[r3]
1190 br.cond.sptk.many __paravirt_work_processed_syscall_target 1179 br.cond.sptk.many ia64_work_processed_syscall
1191END(__paravirt_leave_kernel) 1180END(ia64_leave_kernel)
1192 1181
1193#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
1194ENTRY(handle_syscall_error) 1182ENTRY(handle_syscall_error)
1195 /* 1183 /*
1196 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could 1184 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
@@ -1294,7 +1282,7 @@ ENTRY(sys_rt_sigreturn)
1294 adds sp=16,sp 1282 adds sp=16,sp
1295 ;; 1283 ;;
1296 ld8 r9=[sp] // load new ar.unat 1284 ld8 r9=[sp] // load new ar.unat
1297 mov.sptk b7=r8,ia64_native_leave_kernel 1285 mov.sptk b7=r8,ia64_leave_kernel
1298 ;; 1286 ;;
1299 mov ar.unat=r9 1287 mov ar.unat=r9
1300 br.many b7 1288 br.many b7
@@ -1782,4 +1770,3 @@ sys_call_table:
1782 data8 sys_execveat 1770 data8 sys_execveat
1783 1771
1784 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1772 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1785#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index abc6dee3799c..edbf7af95849 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -24,7 +24,7 @@
24#include <asm/unistd.h> 24#include <asm/unistd.h>
25 25
26#include "entry.h" 26#include "entry.h"
27#include "paravirt_inst.h" 27#include <asm/native/inst.h>
28 28
29/* 29/*
30 * See Documentation/ia64/fsys.txt for details on fsyscalls. 30 * See Documentation/ia64/fsys.txt for details on fsyscalls.
@@ -402,7 +402,7 @@ ENTRY(fsys_fallback_syscall)
402 mov r26=ar.pfs 402 mov r26=ar.pfs
403END(fsys_fallback_syscall) 403END(fsys_fallback_syscall)
404 /* FALL THROUGH */ 404 /* FALL THROUGH */
405GLOBAL_ENTRY(paravirt_fsys_bubble_down) 405GLOBAL_ENTRY(fsys_bubble_down)
406 .prologue 406 .prologue
407 .altrp b6 407 .altrp b6
408 .body 408 .body
@@ -440,7 +440,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
440 * 440 *
441 * PSR.BE : already is turned off in __kernel_syscall_via_epc() 441 * PSR.BE : already is turned off in __kernel_syscall_via_epc()
442 * PSR.AC : don't care (kernel normally turns PSR.AC on) 442 * PSR.AC : don't care (kernel normally turns PSR.AC on)
443 * PSR.I : already turned off by the time paravirt_fsys_bubble_down gets 443 * PSR.I : already turned off by the time fsys_bubble_down gets
444 * invoked 444 * invoked
445 * PSR.DFL: always 0 (kernel never turns it on) 445 * PSR.DFL: always 0 (kernel never turns it on)
446 * PSR.DFH: don't care --- kernel never touches f32-f127 on its own 446 * PSR.DFH: don't care --- kernel never touches f32-f127 on its own
@@ -450,7 +450,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
450 * PSR.DB : don't care --- kernel never enables kernel-level 450 * PSR.DB : don't care --- kernel never enables kernel-level
451 * breakpoints 451 * breakpoints
452 * PSR.TB : must be 0 already; if it wasn't zero on entry to 452 * PSR.TB : must be 0 already; if it wasn't zero on entry to
453 * __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down 453 * __kernel_syscall_via_epc, the branch to fsys_bubble_down
454 * will trigger a taken branch; the taken-trap-handler then 454 * will trigger a taken branch; the taken-trap-handler then
455 * converts the syscall into a break-based system-call. 455 * converts the syscall into a break-based system-call.
456 */ 456 */
@@ -541,14 +541,14 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
541 nop.m 0 541 nop.m 0
542(p8) br.call.sptk.many b6=b6 // B (ignore return address) 542(p8) br.call.sptk.many b6=b6 // B (ignore return address)
543 br.cond.spnt ia64_trace_syscall // B 543 br.cond.spnt ia64_trace_syscall // B
544END(paravirt_fsys_bubble_down) 544END(fsys_bubble_down)
545 545
546 .rodata 546 .rodata
547 .align 8 547 .align 8
548 .globl paravirt_fsyscall_table 548 .globl fsyscall_table
549 549
550 data8 paravirt_fsys_bubble_down 550 data8 fsys_bubble_down
551paravirt_fsyscall_table: 551fsyscall_table:
552 data8 fsys_ni_syscall 552 data8 fsys_ni_syscall
553 data8 0 // exit // 1025 553 data8 0 // exit // 1025
554 data8 0 // read 554 data8 0 // read
@@ -833,4 +833,4 @@ paravirt_fsyscall_table:
833 833
834 // fill in zeros for the remaining entries 834 // fill in zeros for the remaining entries
835 .zero: 835 .zero:
836 .space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0 836 .space fsyscall_table + 8*NR_syscalls - .zero, 0
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index b5f8bdd8618e..0bd1b3bfaf1c 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -14,7 +14,7 @@
14#include <asm/unistd.h> 14#include <asm/unistd.h>
15#include <asm/kregs.h> 15#include <asm/kregs.h>
16#include <asm/page.h> 16#include <asm/page.h>
17#include "paravirt_inst.h" 17#include <asm/native/inst.h>
18 18
19/* 19/*
20 * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, 20 * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
@@ -376,11 +376,4 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
376(p9) mov r8=ENOSYS 376(p9) mov r8=ENOSYS
377 FSYS_RETURN 377 FSYS_RETURN
378 378
379#ifdef CONFIG_PARAVIRT
380 /*
381 * padd to make the size of this symbol constant
382 * independent of paravirtualization.
383 */
384 .align PAGE_SIZE / 8
385#endif
386END(__kernel_syscall_via_epc) 379END(__kernel_syscall_via_epc)
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
index e518f7902af6..3e8271e85a1e 100644
--- a/arch/ia64/kernel/gate.lds.S
+++ b/arch/ia64/kernel/gate.lds.S
@@ -6,7 +6,6 @@
6 */ 6 */
7 7
8#include <asm/page.h> 8#include <asm/page.h>
9#include "paravirt_patchlist.h"
10 9
11SECTIONS 10SECTIONS
12{ 11{
@@ -33,21 +32,21 @@ SECTIONS
33 . = GATE_ADDR + 0x600; 32 . = GATE_ADDR + 0x600;
34 33
35 .data..patch : { 34 .data..patch : {
36 __paravirt_start_gate_mckinley_e9_patchlist = .; 35 __start_gate_mckinley_e9_patchlist = .;
37 *(.data..patch.mckinley_e9) 36 *(.data..patch.mckinley_e9)
38 __paravirt_end_gate_mckinley_e9_patchlist = .; 37 __end_gate_mckinley_e9_patchlist = .;
39 38
40 __paravirt_start_gate_vtop_patchlist = .; 39 __start_gate_vtop_patchlist = .;
41 *(.data..patch.vtop) 40 *(.data..patch.vtop)
42 __paravirt_end_gate_vtop_patchlist = .; 41 __end_gate_vtop_patchlist = .;
43 42
44 __paravirt_start_gate_fsyscall_patchlist = .; 43 __start_gate_fsyscall_patchlist = .;
45 *(.data..patch.fsyscall_table) 44 *(.data..patch.fsyscall_table)
46 __paravirt_end_gate_fsyscall_patchlist = .; 45 __end_gate_fsyscall_patchlist = .;
47 46
48 __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; 47 __start_gate_brl_fsys_bubble_down_patchlist = .;
49 *(.data..patch.brl_fsys_bubble_down) 48 *(.data..patch.brl_fsys_bubble_down)
50 __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; 49 __end_gate_brl_fsys_bubble_down_patchlist = .;
51 } :readable 50 } :readable
52 51
53 .IA_64.unwind_info : { *(.IA_64.unwind_info*) } 52 .IA_64.unwind_info : { *(.IA_64.unwind_info*) }
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index a4acddad0c78..bb748c596443 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -26,7 +26,6 @@
26#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
27#include <asm/asm-offsets.h> 27#include <asm/asm-offsets.h>
28#include <asm/pal.h> 28#include <asm/pal.h>
29#include <asm/paravirt.h>
30#include <asm/pgtable.h> 29#include <asm/pgtable.h>
31#include <asm/processor.h> 30#include <asm/processor.h>
32#include <asm/ptrace.h> 31#include <asm/ptrace.h>
@@ -394,41 +393,6 @@ start_ap:
394 ;; 393 ;;
395(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader 394(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
396 395
397#ifdef CONFIG_PARAVIRT
398
399 movl r14=hypervisor_setup_hooks
400 movl r15=hypervisor_type
401 mov r16=num_hypervisor_hooks
402 ;;
403 ld8 r2=[r15]
404 ;;
405 cmp.ltu p7,p0=r2,r16 // array size check
406 shladd r8=r2,3,r14
407 ;;
408(p7) ld8 r9=[r8]
409 ;;
410(p7) mov b1=r9
411(p7) cmp.ne.unc p7,p0=r9,r0 // no actual branch to NULL
412 ;;
413(p7) br.call.sptk.many rp=b1
414
415 __INITDATA
416
417default_setup_hook = 0 // Currently nothing needs to be done.
418
419 .global hypervisor_type
420hypervisor_type:
421 data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
422
423 // must have the same order with PARAVIRT_HYPERVISOR_TYPE_xxx
424
425hypervisor_setup_hooks:
426 data8 default_setup_hook
427num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
428 .previous
429
430#endif
431
432#ifdef CONFIG_SMP 396#ifdef CONFIG_SMP
433(isAP) br.call.sptk.many rp=start_secondary 397(isAP) br.call.sptk.many rp=start_secondary
434.ret0: 398.ret0:
@@ -1063,12 +1027,6 @@ GLOBAL_ENTRY(ia64_native_sched_clock)
1063 shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT 1027 shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
1064 br.ret.sptk.many rp 1028 br.ret.sptk.many rp
1065END(ia64_native_sched_clock) 1029END(ia64_native_sched_clock)
1066#ifndef CONFIG_PARAVIRT
1067 //unsigned long long
1068 //sched_clock(void) __attribute__((alias("ia64_native_sched_clock")));
1069 .global sched_clock
1070sched_clock = ia64_native_sched_clock
1071#endif
1072 1030
1073#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 1031#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1074GLOBAL_ENTRY(cycle_to_cputime) 1032GLOBAL_ENTRY(cycle_to_cputime)
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index e42bf7a913f3..b1c3cfc93e71 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -937,7 +937,6 @@ END(interrupt)
937 * - ar.fpsr: set to kernel settings 937 * - ar.fpsr: set to kernel settings
938 * - b6: preserved (same as on entry) 938 * - b6: preserved (same as on entry)
939 */ 939 */
940#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
941GLOBAL_ENTRY(ia64_syscall_setup) 940GLOBAL_ENTRY(ia64_syscall_setup)
942#if PT(B6) != 0 941#if PT(B6) != 0
943# error This code assumes that b6 is the first field in pt_regs. 942# error This code assumes that b6 is the first field in pt_regs.
@@ -1029,7 +1028,6 @@ GLOBAL_ENTRY(ia64_syscall_setup)
1029(p10) mov r8=-EINVAL 1028(p10) mov r8=-EINVAL
1030 br.ret.sptk.many b7 1029 br.ret.sptk.many b7
1031END(ia64_syscall_setup) 1030END(ia64_syscall_setup)
1032#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
1033 1031
1034 .org ia64_ivt+0x3c00 1032 .org ia64_ivt+0x3c00
1035///////////////////////////////////////////////////////////////////////////////////////// 1033/////////////////////////////////////////////////////////////////////////////////////////
@@ -1043,7 +1041,7 @@ END(ia64_syscall_setup)
1043 DBG_FAULT(16) 1041 DBG_FAULT(16)
1044 FAULT(16) 1042 FAULT(16)
1045 1043
1046#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) 1044#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
1047 /* 1045 /*
1048 * There is no particular reason for this code to be here, other than 1046 * There is no particular reason for this code to be here, other than
1049 * that there happens to be space here that would go unused otherwise. 1047 * that there happens to be space here that would go unused otherwise.
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index cc82a7d744c9..5704700fb703 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -2,7 +2,7 @@
2#include <asm/cache.h> 2#include <asm/cache.h>
3 3
4#include "entry.h" 4#include "entry.h"
5#include "paravirt_inst.h" 5#include <asm/native/inst.h>
6 6
7#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 7#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
8/* read ar.itc in advance, and use it before leaving bank 0 */ 8/* read ar.itc in advance, and use it before leaving bank 0 */
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 29754aae5177..b15933c31b2f 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -439,14 +439,6 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
439 mod->arch.opd = s; 439 mod->arch.opd = s;
440 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) 440 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
441 mod->arch.unwind = s; 441 mod->arch.unwind = s;
442#ifdef CONFIG_PARAVIRT
443 else if (strcmp(".paravirt_bundles",
444 secstrings + s->sh_name) == 0)
445 mod->arch.paravirt_bundles = s;
446 else if (strcmp(".paravirt_insts",
447 secstrings + s->sh_name) == 0)
448 mod->arch.paravirt_insts = s;
449#endif
450 442
451 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { 443 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
452 printk(KERN_ERR "%s: sections missing\n", mod->name); 444 printk(KERN_ERR "%s: sections missing\n", mod->name);
@@ -914,30 +906,6 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
914 DEBUGP("%s: init: entry=%p\n", __func__, mod->init); 906 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
915 if (mod->arch.unwind) 907 if (mod->arch.unwind)
916 register_unwind_table(mod); 908 register_unwind_table(mod);
917#ifdef CONFIG_PARAVIRT
918 if (mod->arch.paravirt_bundles) {
919 struct paravirt_patch_site_bundle *start =
920 (struct paravirt_patch_site_bundle *)
921 mod->arch.paravirt_bundles->sh_addr;
922 struct paravirt_patch_site_bundle *end =
923 (struct paravirt_patch_site_bundle *)
924 (mod->arch.paravirt_bundles->sh_addr +
925 mod->arch.paravirt_bundles->sh_size);
926
927 paravirt_patch_apply_bundle(start, end);
928 }
929 if (mod->arch.paravirt_insts) {
930 struct paravirt_patch_site_inst *start =
931 (struct paravirt_patch_site_inst *)
932 mod->arch.paravirt_insts->sh_addr;
933 struct paravirt_patch_site_inst *end =
934 (struct paravirt_patch_site_inst *)
935 (mod->arch.paravirt_insts->sh_addr +
936 mod->arch.paravirt_insts->sh_size);
937
938 paravirt_patch_apply_inst(start, end);
939 }
940#endif
941 return 0; 909 return 0;
942} 910}
943 911
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
deleted file mode 100644
index 1b22f6de2932..000000000000
--- a/arch/ia64/kernel/paravirt.c
+++ /dev/null
@@ -1,902 +0,0 @@
1/******************************************************************************
2 * arch/ia64/kernel/paravirt.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/init.h>
25
26#include <linux/compiler.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/module.h>
30#include <linux/types.h>
31
32#include <asm/iosapic.h>
33#include <asm/paravirt.h>
34
35/***************************************************************************
36 * general info
37 */
38struct pv_info pv_info = {
39 .kernel_rpl = 0,
40 .paravirt_enabled = 0,
41 .name = "bare hardware"
42};
43
44/***************************************************************************
45 * pv_init_ops
46 * initialization hooks.
47 */
48
49static void __init
50ia64_native_patch_branch(unsigned long tag, unsigned long type);
51
52struct pv_init_ops pv_init_ops =
53{
54#ifdef ASM_SUPPORTED
55 .patch_bundle = ia64_native_patch_bundle,
56#endif
57 .patch_branch = ia64_native_patch_branch,
58};
59
60/***************************************************************************
61 * pv_cpu_ops
62 * intrinsics hooks.
63 */
64
65#ifndef ASM_SUPPORTED
66/* ia64_native_xxx are macros so that we have to make them real functions */
67
68#define DEFINE_VOID_FUNC1(name) \
69 static void \
70 ia64_native_ ## name ## _func(unsigned long arg) \
71 { \
72 ia64_native_ ## name(arg); \
73 }
74
75#define DEFINE_VOID_FUNC1_VOID(name) \
76 static void \
77 ia64_native_ ## name ## _func(void *arg) \
78 { \
79 ia64_native_ ## name(arg); \
80 }
81
82#define DEFINE_VOID_FUNC2(name) \
83 static void \
84 ia64_native_ ## name ## _func(unsigned long arg0, \
85 unsigned long arg1) \
86 { \
87 ia64_native_ ## name(arg0, arg1); \
88 }
89
90#define DEFINE_FUNC0(name) \
91 static unsigned long \
92 ia64_native_ ## name ## _func(void) \
93 { \
94 return ia64_native_ ## name(); \
95 }
96
97#define DEFINE_FUNC1(name, type) \
98 static unsigned long \
99 ia64_native_ ## name ## _func(type arg) \
100 { \
101 return ia64_native_ ## name(arg); \
102 } \
103
104DEFINE_VOID_FUNC1_VOID(fc);
105DEFINE_VOID_FUNC1(intrin_local_irq_restore);
106
107DEFINE_VOID_FUNC2(ptcga);
108DEFINE_VOID_FUNC2(set_rr);
109
110DEFINE_FUNC0(get_psr_i);
111
112DEFINE_FUNC1(thash, unsigned long);
113DEFINE_FUNC1(get_cpuid, int);
114DEFINE_FUNC1(get_pmd, int);
115DEFINE_FUNC1(get_rr, unsigned long);
116
117static void
118ia64_native_ssm_i_func(void)
119{
120 ia64_native_ssm(IA64_PSR_I);
121}
122
123static void
124ia64_native_rsm_i_func(void)
125{
126 ia64_native_rsm(IA64_PSR_I);
127}
128
129static void
130ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1,
131 unsigned long val2, unsigned long val3,
132 unsigned long val4)
133{
134 ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4);
135}
136
137#define CASE_GET_REG(id) \
138 case _IA64_REG_ ## id: \
139 res = ia64_native_getreg(_IA64_REG_ ## id); \
140 break;
141#define CASE_GET_AR(id) CASE_GET_REG(AR_ ## id)
142#define CASE_GET_CR(id) CASE_GET_REG(CR_ ## id)
143
144unsigned long
145ia64_native_getreg_func(int regnum)
146{
147 unsigned long res = -1;
148 switch (regnum) {
149 CASE_GET_REG(GP);
150 /*CASE_GET_REG(IP);*/ /* returned ip value shouldn't be constant */
151 CASE_GET_REG(PSR);
152 CASE_GET_REG(TP);
153 CASE_GET_REG(SP);
154
155 CASE_GET_AR(KR0);
156 CASE_GET_AR(KR1);
157 CASE_GET_AR(KR2);
158 CASE_GET_AR(KR3);
159 CASE_GET_AR(KR4);
160 CASE_GET_AR(KR5);
161 CASE_GET_AR(KR6);
162 CASE_GET_AR(KR7);
163 CASE_GET_AR(RSC);
164 CASE_GET_AR(BSP);
165 CASE_GET_AR(BSPSTORE);
166 CASE_GET_AR(RNAT);
167 CASE_GET_AR(FCR);
168 CASE_GET_AR(EFLAG);
169 CASE_GET_AR(CSD);
170 CASE_GET_AR(SSD);
171 CASE_GET_AR(CFLAG);
172 CASE_GET_AR(FSR);
173 CASE_GET_AR(FIR);
174 CASE_GET_AR(FDR);
175 CASE_GET_AR(CCV);
176 CASE_GET_AR(UNAT);
177 CASE_GET_AR(FPSR);
178 CASE_GET_AR(ITC);
179 CASE_GET_AR(PFS);
180 CASE_GET_AR(LC);
181 CASE_GET_AR(EC);
182
183 CASE_GET_CR(DCR);
184 CASE_GET_CR(ITM);
185 CASE_GET_CR(IVA);
186 CASE_GET_CR(PTA);
187 CASE_GET_CR(IPSR);
188 CASE_GET_CR(ISR);
189 CASE_GET_CR(IIP);
190 CASE_GET_CR(IFA);
191 CASE_GET_CR(ITIR);
192 CASE_GET_CR(IIPA);
193 CASE_GET_CR(IFS);
194 CASE_GET_CR(IIM);
195 CASE_GET_CR(IHA);
196 CASE_GET_CR(LID);
197 CASE_GET_CR(IVR);
198 CASE_GET_CR(TPR);
199 CASE_GET_CR(EOI);
200 CASE_GET_CR(IRR0);
201 CASE_GET_CR(IRR1);
202 CASE_GET_CR(IRR2);
203 CASE_GET_CR(IRR3);
204 CASE_GET_CR(ITV);
205 CASE_GET_CR(PMV);
206 CASE_GET_CR(CMCV);
207 CASE_GET_CR(LRR0);
208 CASE_GET_CR(LRR1);
209
210 default:
211 printk(KERN_CRIT "wrong_getreg %d\n", regnum);
212 break;
213 }
214 return res;
215}
216
217#define CASE_SET_REG(id) \
218 case _IA64_REG_ ## id: \
219 ia64_native_setreg(_IA64_REG_ ## id, val); \
220 break;
221#define CASE_SET_AR(id) CASE_SET_REG(AR_ ## id)
222#define CASE_SET_CR(id) CASE_SET_REG(CR_ ## id)
223
224void
225ia64_native_setreg_func(int regnum, unsigned long val)
226{
227 switch (regnum) {
228 case _IA64_REG_PSR_L:
229 ia64_native_setreg(_IA64_REG_PSR_L, val);
230 ia64_dv_serialize_data();
231 break;
232 CASE_SET_REG(SP);
233 CASE_SET_REG(GP);
234
235 CASE_SET_AR(KR0);
236 CASE_SET_AR(KR1);
237 CASE_SET_AR(KR2);
238 CASE_SET_AR(KR3);
239 CASE_SET_AR(KR4);
240 CASE_SET_AR(KR5);
241 CASE_SET_AR(KR6);
242 CASE_SET_AR(KR7);
243 CASE_SET_AR(RSC);
244 CASE_SET_AR(BSP);
245 CASE_SET_AR(BSPSTORE);
246 CASE_SET_AR(RNAT);
247 CASE_SET_AR(FCR);
248 CASE_SET_AR(EFLAG);
249 CASE_SET_AR(CSD);
250 CASE_SET_AR(SSD);
251 CASE_SET_AR(CFLAG);
252 CASE_SET_AR(FSR);
253 CASE_SET_AR(FIR);
254 CASE_SET_AR(FDR);
255 CASE_SET_AR(CCV);
256 CASE_SET_AR(UNAT);
257 CASE_SET_AR(FPSR);
258 CASE_SET_AR(ITC);
259 CASE_SET_AR(PFS);
260 CASE_SET_AR(LC);
261 CASE_SET_AR(EC);
262
263 CASE_SET_CR(DCR);
264 CASE_SET_CR(ITM);
265 CASE_SET_CR(IVA);
266 CASE_SET_CR(PTA);
267 CASE_SET_CR(IPSR);
268 CASE_SET_CR(ISR);
269 CASE_SET_CR(IIP);
270 CASE_SET_CR(IFA);
271 CASE_SET_CR(ITIR);
272 CASE_SET_CR(IIPA);
273 CASE_SET_CR(IFS);
274 CASE_SET_CR(IIM);
275 CASE_SET_CR(IHA);
276 CASE_SET_CR(LID);
277 CASE_SET_CR(IVR);
278 CASE_SET_CR(TPR);
279 CASE_SET_CR(EOI);
280 CASE_SET_CR(IRR0);
281 CASE_SET_CR(IRR1);
282 CASE_SET_CR(IRR2);
283 CASE_SET_CR(IRR3);
284 CASE_SET_CR(ITV);
285 CASE_SET_CR(PMV);
286 CASE_SET_CR(CMCV);
287 CASE_SET_CR(LRR0);
288 CASE_SET_CR(LRR1);
289 default:
290 printk(KERN_CRIT "wrong setreg %d\n", regnum);
291 break;
292 }
293}
294#else
295
296#define __DEFINE_FUNC(name, code) \
297 extern const char ia64_native_ ## name ## _direct_start[]; \
298 extern const char ia64_native_ ## name ## _direct_end[]; \
299 asm (".align 32\n" \
300 ".proc ia64_native_" #name "_func\n" \
301 "ia64_native_" #name "_func:\n" \
302 "ia64_native_" #name "_direct_start:\n" \
303 code \
304 "ia64_native_" #name "_direct_end:\n" \
305 "br.cond.sptk.many b6\n" \
306 ".endp ia64_native_" #name "_func\n")
307
308#define DEFINE_VOID_FUNC0(name, code) \
309 extern void \
310 ia64_native_ ## name ## _func(void); \
311 __DEFINE_FUNC(name, code)
312
313#define DEFINE_VOID_FUNC1(name, code) \
314 extern void \
315 ia64_native_ ## name ## _func(unsigned long arg); \
316 __DEFINE_FUNC(name, code)
317
318#define DEFINE_VOID_FUNC1_VOID(name, code) \
319 extern void \
320 ia64_native_ ## name ## _func(void *arg); \
321 __DEFINE_FUNC(name, code)
322
323#define DEFINE_VOID_FUNC2(name, code) \
324 extern void \
325 ia64_native_ ## name ## _func(unsigned long arg0, \
326 unsigned long arg1); \
327 __DEFINE_FUNC(name, code)
328
329#define DEFINE_FUNC0(name, code) \
330 extern unsigned long \
331 ia64_native_ ## name ## _func(void); \
332 __DEFINE_FUNC(name, code)
333
334#define DEFINE_FUNC1(name, type, code) \
335 extern unsigned long \
336 ia64_native_ ## name ## _func(type arg); \
337 __DEFINE_FUNC(name, code)
338
339DEFINE_VOID_FUNC1_VOID(fc,
340 "fc r8\n");
341DEFINE_VOID_FUNC1(intrin_local_irq_restore,
342 ";;\n"
343 " cmp.ne p6, p7 = r8, r0\n"
344 ";;\n"
345 "(p6) ssm psr.i\n"
346 "(p7) rsm psr.i\n"
347 ";;\n"
348 "(p6) srlz.d\n");
349
350DEFINE_VOID_FUNC2(ptcga,
351 "ptc.ga r8, r9\n");
352DEFINE_VOID_FUNC2(set_rr,
353 "mov rr[r8] = r9\n");
354
355/* ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I */
356DEFINE_FUNC0(get_psr_i,
357 "mov r2 = " __stringify(1 << IA64_PSR_I_BIT) "\n"
358 "mov r8 = psr\n"
359 ";;\n"
360 "and r8 = r2, r8\n");
361
362DEFINE_FUNC1(thash, unsigned long,
363 "thash r8 = r8\n");
364DEFINE_FUNC1(get_cpuid, int,
365 "mov r8 = cpuid[r8]\n");
366DEFINE_FUNC1(get_pmd, int,
367 "mov r8 = pmd[r8]\n");
368DEFINE_FUNC1(get_rr, unsigned long,
369 "mov r8 = rr[r8]\n");
370
371DEFINE_VOID_FUNC0(ssm_i,
372 "ssm psr.i\n");
373DEFINE_VOID_FUNC0(rsm_i,
374 "rsm psr.i\n");
375
376extern void
377ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1,
378 unsigned long val2, unsigned long val3,
379 unsigned long val4);
380__DEFINE_FUNC(set_rr0_to_rr4,
381 "mov rr[r0] = r8\n"
382 "movl r2 = 0x2000000000000000\n"
383 ";;\n"
384 "mov rr[r2] = r9\n"
385 "shl r3 = r2, 1\n" /* movl r3 = 0x4000000000000000 */
386 ";;\n"
387 "add r2 = r2, r3\n" /* movl r2 = 0x6000000000000000 */
388 "mov rr[r3] = r10\n"
389 ";;\n"
390 "mov rr[r2] = r11\n"
391 "shl r3 = r3, 1\n" /* movl r3 = 0x8000000000000000 */
392 ";;\n"
393 "mov rr[r3] = r14\n");
394
395extern unsigned long ia64_native_getreg_func(int regnum);
396asm(".global ia64_native_getreg_func\n");
397#define __DEFINE_GET_REG(id, reg) \
398 "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
399 ";;\n" \
400 "cmp.eq p6, p0 = r2, r8\n" \
401 ";;\n" \
402 "(p6) mov r8 = " #reg "\n" \
403 "(p6) br.cond.sptk.many b6\n" \
404 ";;\n"
405#define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg)
406#define __DEFINE_GET_CR(id, reg) __DEFINE_GET_REG(CR_ ## id, cr.reg)
407
408__DEFINE_FUNC(getreg,
409 __DEFINE_GET_REG(GP, gp)
410 /*__DEFINE_GET_REG(IP, ip)*/ /* returned ip value shouldn't be constant */
411 __DEFINE_GET_REG(PSR, psr)
412 __DEFINE_GET_REG(TP, tp)
413 __DEFINE_GET_REG(SP, sp)
414
415 __DEFINE_GET_REG(AR_KR0, ar0)
416 __DEFINE_GET_REG(AR_KR1, ar1)
417 __DEFINE_GET_REG(AR_KR2, ar2)
418 __DEFINE_GET_REG(AR_KR3, ar3)
419 __DEFINE_GET_REG(AR_KR4, ar4)
420 __DEFINE_GET_REG(AR_KR5, ar5)
421 __DEFINE_GET_REG(AR_KR6, ar6)
422 __DEFINE_GET_REG(AR_KR7, ar7)
423 __DEFINE_GET_AR(RSC, rsc)
424 __DEFINE_GET_AR(BSP, bsp)
425 __DEFINE_GET_AR(BSPSTORE, bspstore)
426 __DEFINE_GET_AR(RNAT, rnat)
427 __DEFINE_GET_AR(FCR, fcr)
428 __DEFINE_GET_AR(EFLAG, eflag)
429 __DEFINE_GET_AR(CSD, csd)
430 __DEFINE_GET_AR(SSD, ssd)
431 __DEFINE_GET_REG(AR_CFLAG, ar27)
432 __DEFINE_GET_AR(FSR, fsr)
433 __DEFINE_GET_AR(FIR, fir)
434 __DEFINE_GET_AR(FDR, fdr)
435 __DEFINE_GET_AR(CCV, ccv)
436 __DEFINE_GET_AR(UNAT, unat)
437 __DEFINE_GET_AR(FPSR, fpsr)
438 __DEFINE_GET_AR(ITC, itc)
439 __DEFINE_GET_AR(PFS, pfs)
440 __DEFINE_GET_AR(LC, lc)
441 __DEFINE_GET_AR(EC, ec)
442
443 __DEFINE_GET_CR(DCR, dcr)
444 __DEFINE_GET_CR(ITM, itm)
445 __DEFINE_GET_CR(IVA, iva)
446 __DEFINE_GET_CR(PTA, pta)
447 __DEFINE_GET_CR(IPSR, ipsr)
448 __DEFINE_GET_CR(ISR, isr)
449 __DEFINE_GET_CR(IIP, iip)
450 __DEFINE_GET_CR(IFA, ifa)
451 __DEFINE_GET_CR(ITIR, itir)
452 __DEFINE_GET_CR(IIPA, iipa)
453 __DEFINE_GET_CR(IFS, ifs)
454 __DEFINE_GET_CR(IIM, iim)
455 __DEFINE_GET_CR(IHA, iha)
456 __DEFINE_GET_CR(LID, lid)
457 __DEFINE_GET_CR(IVR, ivr)
458 __DEFINE_GET_CR(TPR, tpr)
459 __DEFINE_GET_CR(EOI, eoi)
460 __DEFINE_GET_CR(IRR0, irr0)
461 __DEFINE_GET_CR(IRR1, irr1)
462 __DEFINE_GET_CR(IRR2, irr2)
463 __DEFINE_GET_CR(IRR3, irr3)
464 __DEFINE_GET_CR(ITV, itv)
465 __DEFINE_GET_CR(PMV, pmv)
466 __DEFINE_GET_CR(CMCV, cmcv)
467 __DEFINE_GET_CR(LRR0, lrr0)
468 __DEFINE_GET_CR(LRR1, lrr1)
469
470 "mov r8 = -1\n" /* unsupported case */
471 );
472
473extern void ia64_native_setreg_func(int regnum, unsigned long val);
474asm(".global ia64_native_setreg_func\n");
475#define __DEFINE_SET_REG(id, reg) \
476 "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
477 ";;\n" \
478 "cmp.eq p6, p0 = r2, r9\n" \
479 ";;\n" \
480 "(p6) mov " #reg " = r8\n" \
481 "(p6) br.cond.sptk.many b6\n" \
482 ";;\n"
483#define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg)
484#define __DEFINE_SET_CR(id, reg) __DEFINE_SET_REG(CR_ ## id, cr.reg)
485__DEFINE_FUNC(setreg,
486 "mov r2 = " __stringify(_IA64_REG_PSR_L) "\n"
487 ";;\n"
488 "cmp.eq p6, p0 = r2, r9\n"
489 ";;\n"
490 "(p6) mov psr.l = r8\n"
491#ifdef HAVE_SERIALIZE_DIRECTIVE
492 ".serialize.data\n"
493#endif
494 "(p6) br.cond.sptk.many b6\n"
495 __DEFINE_SET_REG(GP, gp)
496 __DEFINE_SET_REG(SP, sp)
497
498 __DEFINE_SET_REG(AR_KR0, ar0)
499 __DEFINE_SET_REG(AR_KR1, ar1)
500 __DEFINE_SET_REG(AR_KR2, ar2)
501 __DEFINE_SET_REG(AR_KR3, ar3)
502 __DEFINE_SET_REG(AR_KR4, ar4)
503 __DEFINE_SET_REG(AR_KR5, ar5)
504 __DEFINE_SET_REG(AR_KR6, ar6)
505 __DEFINE_SET_REG(AR_KR7, ar7)
506 __DEFINE_SET_AR(RSC, rsc)
507 __DEFINE_SET_AR(BSP, bsp)
508 __DEFINE_SET_AR(BSPSTORE, bspstore)
509 __DEFINE_SET_AR(RNAT, rnat)
510 __DEFINE_SET_AR(FCR, fcr)
511 __DEFINE_SET_AR(EFLAG, eflag)
512 __DEFINE_SET_AR(CSD, csd)
513 __DEFINE_SET_AR(SSD, ssd)
514 __DEFINE_SET_REG(AR_CFLAG, ar27)
515 __DEFINE_SET_AR(FSR, fsr)
516 __DEFINE_SET_AR(FIR, fir)
517 __DEFINE_SET_AR(FDR, fdr)
518 __DEFINE_SET_AR(CCV, ccv)
519 __DEFINE_SET_AR(UNAT, unat)
520 __DEFINE_SET_AR(FPSR, fpsr)
521 __DEFINE_SET_AR(ITC, itc)
522 __DEFINE_SET_AR(PFS, pfs)
523 __DEFINE_SET_AR(LC, lc)
524 __DEFINE_SET_AR(EC, ec)
525
526 __DEFINE_SET_CR(DCR, dcr)
527 __DEFINE_SET_CR(ITM, itm)
528 __DEFINE_SET_CR(IVA, iva)
529 __DEFINE_SET_CR(PTA, pta)
530 __DEFINE_SET_CR(IPSR, ipsr)
531 __DEFINE_SET_CR(ISR, isr)
532 __DEFINE_SET_CR(IIP, iip)
533 __DEFINE_SET_CR(IFA, ifa)
534 __DEFINE_SET_CR(ITIR, itir)
535 __DEFINE_SET_CR(IIPA, iipa)
536 __DEFINE_SET_CR(IFS, ifs)
537 __DEFINE_SET_CR(IIM, iim)
538 __DEFINE_SET_CR(IHA, iha)
539 __DEFINE_SET_CR(LID, lid)
540 __DEFINE_SET_CR(IVR, ivr)
541 __DEFINE_SET_CR(TPR, tpr)
542 __DEFINE_SET_CR(EOI, eoi)
543 __DEFINE_SET_CR(IRR0, irr0)
544 __DEFINE_SET_CR(IRR1, irr1)
545 __DEFINE_SET_CR(IRR2, irr2)
546 __DEFINE_SET_CR(IRR3, irr3)
547 __DEFINE_SET_CR(ITV, itv)
548 __DEFINE_SET_CR(PMV, pmv)
549 __DEFINE_SET_CR(CMCV, cmcv)
550 __DEFINE_SET_CR(LRR0, lrr0)
551 __DEFINE_SET_CR(LRR1, lrr1)
552 );
553#endif
554
555struct pv_cpu_ops pv_cpu_ops = {
556 .fc = ia64_native_fc_func,
557 .thash = ia64_native_thash_func,
558 .get_cpuid = ia64_native_get_cpuid_func,
559 .get_pmd = ia64_native_get_pmd_func,
560 .ptcga = ia64_native_ptcga_func,
561 .get_rr = ia64_native_get_rr_func,
562 .set_rr = ia64_native_set_rr_func,
563 .set_rr0_to_rr4 = ia64_native_set_rr0_to_rr4_func,
564 .ssm_i = ia64_native_ssm_i_func,
565 .getreg = ia64_native_getreg_func,
566 .setreg = ia64_native_setreg_func,
567 .rsm_i = ia64_native_rsm_i_func,
568 .get_psr_i = ia64_native_get_psr_i_func,
569 .intrin_local_irq_restore
570 = ia64_native_intrin_local_irq_restore_func,
571};
572EXPORT_SYMBOL(pv_cpu_ops);
573
574/******************************************************************************
575 * replacement of hand written assembly codes.
576 */
577
578void
579paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch)
580{
581 extern unsigned long paravirt_switch_to_targ;
582 extern unsigned long paravirt_leave_syscall_targ;
583 extern unsigned long paravirt_work_processed_syscall_targ;
584 extern unsigned long paravirt_leave_kernel_targ;
585
586 paravirt_switch_to_targ = cpu_asm_switch->switch_to;
587 paravirt_leave_syscall_targ = cpu_asm_switch->leave_syscall;
588 paravirt_work_processed_syscall_targ =
589 cpu_asm_switch->work_processed_syscall;
590 paravirt_leave_kernel_targ = cpu_asm_switch->leave_kernel;
591}
592
593/***************************************************************************
594 * pv_iosapic_ops
595 * iosapic read/write hooks.
596 */
597
598static unsigned int
599ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
600{
601 return __ia64_native_iosapic_read(iosapic, reg);
602}
603
604static void
605ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
606{
607 __ia64_native_iosapic_write(iosapic, reg, val);
608}
609
610struct pv_iosapic_ops pv_iosapic_ops = {
611 .pcat_compat_init = ia64_native_iosapic_pcat_compat_init,
612 .__get_irq_chip = ia64_native_iosapic_get_irq_chip,
613
614 .__read = ia64_native_iosapic_read,
615 .__write = ia64_native_iosapic_write,
616};
617
618/***************************************************************************
619 * pv_irq_ops
620 * irq operations
621 */
622
623struct pv_irq_ops pv_irq_ops = {
624 .register_ipi = ia64_native_register_ipi,
625
626 .assign_irq_vector = ia64_native_assign_irq_vector,
627 .free_irq_vector = ia64_native_free_irq_vector,
628 .register_percpu_irq = ia64_native_register_percpu_irq,
629
630 .resend_irq = ia64_native_resend_irq,
631};
632
633/***************************************************************************
634 * pv_time_ops
635 * time operations
636 */
637struct static_key paravirt_steal_enabled;
638struct static_key paravirt_steal_rq_enabled;
639
640static int
641ia64_native_do_steal_accounting(unsigned long *new_itm)
642{
643 return 0;
644}
645
646struct pv_time_ops pv_time_ops = {
647 .do_steal_accounting = ia64_native_do_steal_accounting,
648 .sched_clock = ia64_native_sched_clock,
649};
650
651/***************************************************************************
652 * binary pacthing
653 * pv_init_ops.patch_bundle
654 */
655
656#ifdef ASM_SUPPORTED
657#define IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg) \
658 __DEFINE_FUNC(get_ ## name, \
659 ";;\n" \
660 "mov r8 = " #reg "\n" \
661 ";;\n")
662
663#define IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \
664 __DEFINE_FUNC(set_ ## name, \
665 ";;\n" \
666 "mov " #reg " = r8\n" \
667 ";;\n")
668
669#define IA64_NATIVE_PATCH_DEFINE_REG(name, reg) \
670 IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg); \
671 IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \
672
673#define IA64_NATIVE_PATCH_DEFINE_AR(name, reg) \
674 IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg)
675
676#define IA64_NATIVE_PATCH_DEFINE_CR(name, reg) \
677 IA64_NATIVE_PATCH_DEFINE_REG(cr_ ## name, cr.reg)
678
679
680IA64_NATIVE_PATCH_DEFINE_GET_REG(psr, psr);
681IA64_NATIVE_PATCH_DEFINE_GET_REG(tp, tp);
682
683/* IA64_NATIVE_PATCH_DEFINE_SET_REG(psr_l, psr.l); */
684__DEFINE_FUNC(set_psr_l,
685 ";;\n"
686 "mov psr.l = r8\n"
687#ifdef HAVE_SERIALIZE_DIRECTIVE
688 ".serialize.data\n"
689#endif
690 ";;\n");
691
692IA64_NATIVE_PATCH_DEFINE_REG(gp, gp);
693IA64_NATIVE_PATCH_DEFINE_REG(sp, sp);
694
695IA64_NATIVE_PATCH_DEFINE_REG(kr0, ar0);
696IA64_NATIVE_PATCH_DEFINE_REG(kr1, ar1);
697IA64_NATIVE_PATCH_DEFINE_REG(kr2, ar2);
698IA64_NATIVE_PATCH_DEFINE_REG(kr3, ar3);
699IA64_NATIVE_PATCH_DEFINE_REG(kr4, ar4);
700IA64_NATIVE_PATCH_DEFINE_REG(kr5, ar5);
701IA64_NATIVE_PATCH_DEFINE_REG(kr6, ar6);
702IA64_NATIVE_PATCH_DEFINE_REG(kr7, ar7);
703
704IA64_NATIVE_PATCH_DEFINE_AR(rsc, rsc);
705IA64_NATIVE_PATCH_DEFINE_AR(bsp, bsp);
706IA64_NATIVE_PATCH_DEFINE_AR(bspstore, bspstore);
707IA64_NATIVE_PATCH_DEFINE_AR(rnat, rnat);
708IA64_NATIVE_PATCH_DEFINE_AR(fcr, fcr);
709IA64_NATIVE_PATCH_DEFINE_AR(eflag, eflag);
710IA64_NATIVE_PATCH_DEFINE_AR(csd, csd);
711IA64_NATIVE_PATCH_DEFINE_AR(ssd, ssd);
712IA64_NATIVE_PATCH_DEFINE_REG(ar27, ar27);
713IA64_NATIVE_PATCH_DEFINE_AR(fsr, fsr);
714IA64_NATIVE_PATCH_DEFINE_AR(fir, fir);
715IA64_NATIVE_PATCH_DEFINE_AR(fdr, fdr);
716IA64_NATIVE_PATCH_DEFINE_AR(ccv, ccv);
717IA64_NATIVE_PATCH_DEFINE_AR(unat, unat);
718IA64_NATIVE_PATCH_DEFINE_AR(fpsr, fpsr);
719IA64_NATIVE_PATCH_DEFINE_AR(itc, itc);
720IA64_NATIVE_PATCH_DEFINE_AR(pfs, pfs);
721IA64_NATIVE_PATCH_DEFINE_AR(lc, lc);
722IA64_NATIVE_PATCH_DEFINE_AR(ec, ec);
723
724IA64_NATIVE_PATCH_DEFINE_CR(dcr, dcr);
725IA64_NATIVE_PATCH_DEFINE_CR(itm, itm);
726IA64_NATIVE_PATCH_DEFINE_CR(iva, iva);
727IA64_NATIVE_PATCH_DEFINE_CR(pta, pta);
728IA64_NATIVE_PATCH_DEFINE_CR(ipsr, ipsr);
729IA64_NATIVE_PATCH_DEFINE_CR(isr, isr);
730IA64_NATIVE_PATCH_DEFINE_CR(iip, iip);
731IA64_NATIVE_PATCH_DEFINE_CR(ifa, ifa);
732IA64_NATIVE_PATCH_DEFINE_CR(itir, itir);
733IA64_NATIVE_PATCH_DEFINE_CR(iipa, iipa);
734IA64_NATIVE_PATCH_DEFINE_CR(ifs, ifs);
735IA64_NATIVE_PATCH_DEFINE_CR(iim, iim);
736IA64_NATIVE_PATCH_DEFINE_CR(iha, iha);
737IA64_NATIVE_PATCH_DEFINE_CR(lid, lid);
738IA64_NATIVE_PATCH_DEFINE_CR(ivr, ivr);
739IA64_NATIVE_PATCH_DEFINE_CR(tpr, tpr);
740IA64_NATIVE_PATCH_DEFINE_CR(eoi, eoi);
741IA64_NATIVE_PATCH_DEFINE_CR(irr0, irr0);
742IA64_NATIVE_PATCH_DEFINE_CR(irr1, irr1);
743IA64_NATIVE_PATCH_DEFINE_CR(irr2, irr2);
744IA64_NATIVE_PATCH_DEFINE_CR(irr3, irr3);
745IA64_NATIVE_PATCH_DEFINE_CR(itv, itv);
746IA64_NATIVE_PATCH_DEFINE_CR(pmv, pmv);
747IA64_NATIVE_PATCH_DEFINE_CR(cmcv, cmcv);
748IA64_NATIVE_PATCH_DEFINE_CR(lrr0, lrr0);
749IA64_NATIVE_PATCH_DEFINE_CR(lrr1, lrr1);
750
751static const struct paravirt_patch_bundle_elem ia64_native_patch_bundle_elems[]
752__initdata_or_module =
753{
754#define IA64_NATIVE_PATCH_BUNDLE_ELEM(name, type) \
755 { \
756 (void*)ia64_native_ ## name ## _direct_start, \
757 (void*)ia64_native_ ## name ## _direct_end, \
758 PARAVIRT_PATCH_TYPE_ ## type, \
759 }
760
761 IA64_NATIVE_PATCH_BUNDLE_ELEM(fc, FC),
762 IA64_NATIVE_PATCH_BUNDLE_ELEM(thash, THASH),
763 IA64_NATIVE_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
764 IA64_NATIVE_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
765 IA64_NATIVE_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
766 IA64_NATIVE_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
767 IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
768 IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
769 IA64_NATIVE_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
770 IA64_NATIVE_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
771 IA64_NATIVE_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
772 IA64_NATIVE_PATCH_BUNDLE_ELEM(intrin_local_irq_restore,
773 INTRIN_LOCAL_IRQ_RESTORE),
774
775#define IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
776 { \
777 (void*)ia64_native_get_ ## name ## _direct_start, \
778 (void*)ia64_native_get_ ## name ## _direct_end, \
779 PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
780 }
781
782#define IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
783 { \
784 (void*)ia64_native_set_ ## name ## _direct_start, \
785 (void*)ia64_native_set_ ## name ## _direct_end, \
786 PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
787 }
788
789#define IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(name, reg) \
790 IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg), \
791 IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
792
793#define IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(name, reg) \
794 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar_ ## name, AR_ ## reg)
795
796#define IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(name, reg) \
797 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(cr_ ## name, CR_ ## reg)
798
799 IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
800 IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(tp, TP),
801
802 IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(psr_l, PSR_L),
803
804 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(gp, GP),
805 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(sp, SP),
806
807 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr0, AR_KR0),
808 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr1, AR_KR1),
809 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr2, AR_KR2),
810 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr3, AR_KR3),
811 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr4, AR_KR4),
812 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr5, AR_KR5),
813 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr6, AR_KR6),
814 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr7, AR_KR7),
815
816 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rsc, RSC),
817 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bsp, BSP),
818 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bspstore, BSPSTORE),
819 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rnat, RNAT),
820 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fcr, FCR),
821 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(eflag, EFLAG),
822 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(csd, CSD),
823 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ssd, SSD),
824 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar27, AR_CFLAG),
825 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fsr, FSR),
826 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fir, FIR),
827 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fdr, FDR),
828 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ccv, CCV),
829 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(unat, UNAT),
830 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fpsr, FPSR),
831 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(itc, ITC),
832 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(pfs, PFS),
833 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(lc, LC),
834 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ec, EC),
835
836 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(dcr, DCR),
837 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itm, ITM),
838 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iva, IVA),
839 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pta, PTA),
840 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ipsr, IPSR),
841 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(isr, ISR),
842 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iip, IIP),
843 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifa, IFA),
844 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itir, ITIR),
845 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iipa, IIPA),
846 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifs, IFS),
847 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iim, IIM),
848 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iha, IHA),
849 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lid, LID),
850 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ivr, IVR),
851 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(tpr, TPR),
852 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(eoi, EOI),
853 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr0, IRR0),
854 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr1, IRR1),
855 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr2, IRR2),
856 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr3, IRR3),
857 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itv, ITV),
858 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pmv, PMV),
859 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(cmcv, CMCV),
860 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr0, LRR0),
861 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr1, LRR1),
862};
863
864unsigned long __init_or_module
865ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
866{
867 const unsigned long nelems = sizeof(ia64_native_patch_bundle_elems) /
868 sizeof(ia64_native_patch_bundle_elems[0]);
869
870 return __paravirt_patch_apply_bundle(sbundle, ebundle, type,
871 ia64_native_patch_bundle_elems,
872 nelems, NULL);
873}
874#endif /* ASM_SUPPOTED */
875
876extern const char ia64_native_switch_to[];
877extern const char ia64_native_leave_syscall[];
878extern const char ia64_native_work_processed_syscall[];
879extern const char ia64_native_leave_kernel[];
880
881const struct paravirt_patch_branch_target ia64_native_branch_target[]
882__initconst = {
883#define PARAVIRT_BR_TARGET(name, type) \
884 { \
885 ia64_native_ ## name, \
886 PARAVIRT_PATCH_TYPE_BR_ ## type, \
887 }
888 PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
889 PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
890 PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
891 PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
892};
893
894static void __init
895ia64_native_patch_branch(unsigned long tag, unsigned long type)
896{
897 const unsigned long nelem =
898 sizeof(ia64_native_branch_target) /
899 sizeof(ia64_native_branch_target[0]);
900 __paravirt_patch_apply_branch(tag, type,
901 ia64_native_branch_target, nelem);
902}
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
deleted file mode 100644
index 1ad7512b5f65..000000000000
--- a/arch/ia64/kernel/paravirt_inst.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirt_inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
24#include <asm/native/pvchk_inst.h>
25#else
26#include <asm/native/inst.h>
27#endif
28
diff --git a/arch/ia64/kernel/paravirt_patch.c b/arch/ia64/kernel/paravirt_patch.c
deleted file mode 100644
index bfdfef1b1ffd..000000000000
--- a/arch/ia64/kernel/paravirt_patch.c
+++ /dev/null
@@ -1,514 +0,0 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirt_patch.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/init.h>
24#include <asm/intrinsics.h>
25#include <asm/kprobes.h>
26#include <asm/paravirt.h>
27#include <asm/paravirt_patch.h>
28
29typedef union ia64_inst {
30 struct {
31 unsigned long long qp : 6;
32 unsigned long long : 31;
33 unsigned long long opcode : 4;
34 unsigned long long reserved : 23;
35 } generic;
36 unsigned long long l;
37} ia64_inst_t;
38
39/*
40 * flush_icache_range() can't be used here.
41 * we are here before cpu_init() which initializes
42 * ia64_i_cache_stride_shift. flush_icache_range() uses it.
43 */
44void __init_or_module
45paravirt_flush_i_cache_range(const void *instr, unsigned long size)
46{
47 extern void paravirt_fc_i(const void *addr);
48 unsigned long i;
49
50 for (i = 0; i < size; i += sizeof(bundle_t))
51 paravirt_fc_i(instr + i);
52}
53
54bundle_t* __init_or_module
55paravirt_get_bundle(unsigned long tag)
56{
57 return (bundle_t *)(tag & ~3UL);
58}
59
60unsigned long __init_or_module
61paravirt_get_slot(unsigned long tag)
62{
63 return tag & 3UL;
64}
65
66unsigned long __init_or_module
67paravirt_get_num_inst(unsigned long stag, unsigned long etag)
68{
69 bundle_t *sbundle = paravirt_get_bundle(stag);
70 unsigned long sslot = paravirt_get_slot(stag);
71 bundle_t *ebundle = paravirt_get_bundle(etag);
72 unsigned long eslot = paravirt_get_slot(etag);
73
74 return (ebundle - sbundle) * 3 + eslot - sslot + 1;
75}
76
77unsigned long __init_or_module
78paravirt_get_next_tag(unsigned long tag)
79{
80 unsigned long slot = paravirt_get_slot(tag);
81
82 switch (slot) {
83 case 0:
84 case 1:
85 return tag + 1;
86 case 2: {
87 bundle_t *bundle = paravirt_get_bundle(tag);
88 return (unsigned long)(bundle + 1);
89 }
90 default:
91 BUG();
92 }
93 /* NOTREACHED */
94}
95
96ia64_inst_t __init_or_module
97paravirt_read_slot0(const bundle_t *bundle)
98{
99 ia64_inst_t inst;
100 inst.l = bundle->quad0.slot0;
101 return inst;
102}
103
104ia64_inst_t __init_or_module
105paravirt_read_slot1(const bundle_t *bundle)
106{
107 ia64_inst_t inst;
108 inst.l = bundle->quad0.slot1_p0 |
109 ((unsigned long long)bundle->quad1.slot1_p1 << 18UL);
110 return inst;
111}
112
113ia64_inst_t __init_or_module
114paravirt_read_slot2(const bundle_t *bundle)
115{
116 ia64_inst_t inst;
117 inst.l = bundle->quad1.slot2;
118 return inst;
119}
120
121ia64_inst_t __init_or_module
122paravirt_read_inst(unsigned long tag)
123{
124 bundle_t *bundle = paravirt_get_bundle(tag);
125 unsigned long slot = paravirt_get_slot(tag);
126
127 switch (slot) {
128 case 0:
129 return paravirt_read_slot0(bundle);
130 case 1:
131 return paravirt_read_slot1(bundle);
132 case 2:
133 return paravirt_read_slot2(bundle);
134 default:
135 BUG();
136 }
137 /* NOTREACHED */
138}
139
140void __init_or_module
141paravirt_write_slot0(bundle_t *bundle, ia64_inst_t inst)
142{
143 bundle->quad0.slot0 = inst.l;
144}
145
146void __init_or_module
147paravirt_write_slot1(bundle_t *bundle, ia64_inst_t inst)
148{
149 bundle->quad0.slot1_p0 = inst.l;
150 bundle->quad1.slot1_p1 = inst.l >> 18UL;
151}
152
153void __init_or_module
154paravirt_write_slot2(bundle_t *bundle, ia64_inst_t inst)
155{
156 bundle->quad1.slot2 = inst.l;
157}
158
159void __init_or_module
160paravirt_write_inst(unsigned long tag, ia64_inst_t inst)
161{
162 bundle_t *bundle = paravirt_get_bundle(tag);
163 unsigned long slot = paravirt_get_slot(tag);
164
165 switch (slot) {
166 case 0:
167 paravirt_write_slot0(bundle, inst);
168 break;
169 case 1:
170 paravirt_write_slot1(bundle, inst);
171 break;
172 case 2:
173 paravirt_write_slot2(bundle, inst);
174 break;
175 default:
176 BUG();
177 break;
178 }
179 paravirt_flush_i_cache_range(bundle, sizeof(*bundle));
180}
181
182/* for debug */
183void
184paravirt_print_bundle(const bundle_t *bundle)
185{
186 const unsigned long *quad = (const unsigned long *)bundle;
187 ia64_inst_t slot0 = paravirt_read_slot0(bundle);
188 ia64_inst_t slot1 = paravirt_read_slot1(bundle);
189 ia64_inst_t slot2 = paravirt_read_slot2(bundle);
190
191 printk(KERN_DEBUG
192 "bundle 0x%p 0x%016lx 0x%016lx\n", bundle, quad[0], quad[1]);
193 printk(KERN_DEBUG
194 "bundle template 0x%x\n",
195 bundle->quad0.template);
196 printk(KERN_DEBUG
197 "slot0 0x%lx slot1_p0 0x%lx slot1_p1 0x%lx slot2 0x%lx\n",
198 (unsigned long)bundle->quad0.slot0,
199 (unsigned long)bundle->quad0.slot1_p0,
200 (unsigned long)bundle->quad1.slot1_p1,
201 (unsigned long)bundle->quad1.slot2);
202 printk(KERN_DEBUG
203 "slot0 0x%016llx slot1 0x%016llx slot2 0x%016llx\n",
204 slot0.l, slot1.l, slot2.l);
205}
206
207static int noreplace_paravirt __init_or_module = 0;
208
209static int __init setup_noreplace_paravirt(char *str)
210{
211 noreplace_paravirt = 1;
212 return 1;
213}
214__setup("noreplace-paravirt", setup_noreplace_paravirt);
215
216#ifdef ASM_SUPPORTED
217static void __init_or_module
218fill_nop_bundle(void *sbundle, void *ebundle)
219{
220 extern const char paravirt_nop_bundle[];
221 extern const unsigned long paravirt_nop_bundle_size;
222
223 void *bundle = sbundle;
224
225 BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0);
226 BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0);
227
228 while (bundle < ebundle) {
229 memcpy(bundle, paravirt_nop_bundle, paravirt_nop_bundle_size);
230
231 bundle += paravirt_nop_bundle_size;
232 }
233}
234
235/* helper function */
236unsigned long __init_or_module
237__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
238 const struct paravirt_patch_bundle_elem *elems,
239 unsigned long nelems,
240 const struct paravirt_patch_bundle_elem **found)
241{
242 unsigned long used = 0;
243 unsigned long i;
244
245 BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0);
246 BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0);
247
248 found = NULL;
249 for (i = 0; i < nelems; i++) {
250 const struct paravirt_patch_bundle_elem *p = &elems[i];
251 if (p->type == type) {
252 unsigned long need = p->ebundle - p->sbundle;
253 unsigned long room = ebundle - sbundle;
254
255 if (found != NULL)
256 *found = p;
257
258 if (room < need) {
259 /* no room to replace. skip it */
260 printk(KERN_DEBUG
261 "the space is too small to put "
262 "bundles. type %ld need %ld room %ld\n",
263 type, need, room);
264 break;
265 }
266
267 used = need;
268 memcpy(sbundle, p->sbundle, used);
269 break;
270 }
271 }
272
273 return used;
274}
275
276void __init_or_module
277paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
278 const struct paravirt_patch_site_bundle *end)
279{
280 const struct paravirt_patch_site_bundle *p;
281
282 if (noreplace_paravirt)
283 return;
284 if (pv_init_ops.patch_bundle == NULL)
285 return;
286
287 for (p = start; p < end; p++) {
288 unsigned long used;
289
290 used = (*pv_init_ops.patch_bundle)(p->sbundle, p->ebundle,
291 p->type);
292 if (used == 0)
293 continue;
294
295 fill_nop_bundle(p->sbundle + used, p->ebundle);
296 paravirt_flush_i_cache_range(p->sbundle,
297 p->ebundle - p->sbundle);
298 }
299 ia64_sync_i();
300 ia64_srlz_i();
301}
302
303/*
304 * nop.i, nop.m, nop.f instruction are same format.
305 * but nop.b has differennt format.
306 * This doesn't support nop.b for now.
307 */
308static void __init_or_module
309fill_nop_inst(unsigned long stag, unsigned long etag)
310{
311 extern const bundle_t paravirt_nop_mfi_inst_bundle[];
312 unsigned long tag;
313 const ia64_inst_t nop_inst =
314 paravirt_read_slot0(paravirt_nop_mfi_inst_bundle);
315
316 for (tag = stag; tag < etag; tag = paravirt_get_next_tag(tag))
317 paravirt_write_inst(tag, nop_inst);
318}
319
320void __init_or_module
321paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
322 const struct paravirt_patch_site_inst *end)
323{
324 const struct paravirt_patch_site_inst *p;
325
326 if (noreplace_paravirt)
327 return;
328 if (pv_init_ops.patch_inst == NULL)
329 return;
330
331 for (p = start; p < end; p++) {
332 unsigned long tag;
333 bundle_t *sbundle;
334 bundle_t *ebundle;
335
336 tag = (*pv_init_ops.patch_inst)(p->stag, p->etag, p->type);
337 if (tag == p->stag)
338 continue;
339
340 fill_nop_inst(tag, p->etag);
341 sbundle = paravirt_get_bundle(p->stag);
342 ebundle = paravirt_get_bundle(p->etag) + 1;
343 paravirt_flush_i_cache_range(sbundle, (ebundle - sbundle) *
344 sizeof(bundle_t));
345 }
346 ia64_sync_i();
347 ia64_srlz_i();
348}
349#endif /* ASM_SUPPOTED */
350
351/* brl.cond.sptk.many <target64> X3 */
352typedef union inst_x3_op {
353 ia64_inst_t inst;
354 struct {
355 unsigned long qp: 6;
356 unsigned long btyp: 3;
357 unsigned long unused: 3;
358 unsigned long p: 1;
359 unsigned long imm20b: 20;
360 unsigned long wh: 2;
361 unsigned long d: 1;
362 unsigned long i: 1;
363 unsigned long opcode: 4;
364 };
365 unsigned long l;
366} inst_x3_op_t;
367
368typedef union inst_x3_imm {
369 ia64_inst_t inst;
370 struct {
371 unsigned long unused: 2;
372 unsigned long imm39: 39;
373 };
374 unsigned long l;
375} inst_x3_imm_t;
376
377void __init_or_module
378paravirt_patch_reloc_brl(unsigned long tag, const void *target)
379{
380 unsigned long tag_op = paravirt_get_next_tag(tag);
381 unsigned long tag_imm = tag;
382 bundle_t *bundle = paravirt_get_bundle(tag);
383
384 ia64_inst_t inst_op = paravirt_read_inst(tag_op);
385 ia64_inst_t inst_imm = paravirt_read_inst(tag_imm);
386
387 inst_x3_op_t inst_x3_op = { .l = inst_op.l };
388 inst_x3_imm_t inst_x3_imm = { .l = inst_imm.l };
389
390 unsigned long imm60 =
391 ((unsigned long)target - (unsigned long)bundle) >> 4;
392
393 BUG_ON(paravirt_get_slot(tag) != 1); /* MLX */
394 BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0);
395
396 /* imm60[59] 1bit */
397 inst_x3_op.i = (imm60 >> 59) & 1;
398 /* imm60[19:0] 20bit */
399 inst_x3_op.imm20b = imm60 & ((1UL << 20) - 1);
400 /* imm60[58:20] 39bit */
401 inst_x3_imm.imm39 = (imm60 >> 20) & ((1UL << 39) - 1);
402
403 inst_op.l = inst_x3_op.l;
404 inst_imm.l = inst_x3_imm.l;
405
406 paravirt_write_inst(tag_op, inst_op);
407 paravirt_write_inst(tag_imm, inst_imm);
408}
409
410/* br.cond.sptk.many <target25> B1 */
411typedef union inst_b1 {
412 ia64_inst_t inst;
413 struct {
414 unsigned long qp: 6;
415 unsigned long btype: 3;
416 unsigned long unused: 3;
417 unsigned long p: 1;
418 unsigned long imm20b: 20;
419 unsigned long wh: 2;
420 unsigned long d: 1;
421 unsigned long s: 1;
422 unsigned long opcode: 4;
423 };
424 unsigned long l;
425} inst_b1_t;
426
427void __init
428paravirt_patch_reloc_br(unsigned long tag, const void *target)
429{
430 bundle_t *bundle = paravirt_get_bundle(tag);
431 ia64_inst_t inst = paravirt_read_inst(tag);
432 unsigned long target25 = (unsigned long)target - (unsigned long)bundle;
433 inst_b1_t inst_b1;
434
435 BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0);
436
437 inst_b1.l = inst.l;
438 if (target25 & (1UL << 63))
439 inst_b1.s = 1;
440 else
441 inst_b1.s = 0;
442
443 inst_b1.imm20b = target25 >> 4;
444 inst.l = inst_b1.l;
445
446 paravirt_write_inst(tag, inst);
447}
448
449void __init
450__paravirt_patch_apply_branch(
451 unsigned long tag, unsigned long type,
452 const struct paravirt_patch_branch_target *entries,
453 unsigned int nr_entries)
454{
455 unsigned int i;
456 for (i = 0; i < nr_entries; i++) {
457 if (entries[i].type == type) {
458 paravirt_patch_reloc_br(tag, entries[i].entry);
459 break;
460 }
461 }
462}
463
464static void __init
465paravirt_patch_apply_branch(const struct paravirt_patch_site_branch *start,
466 const struct paravirt_patch_site_branch *end)
467{
468 const struct paravirt_patch_site_branch *p;
469
470 if (noreplace_paravirt)
471 return;
472 if (pv_init_ops.patch_branch == NULL)
473 return;
474
475 for (p = start; p < end; p++)
476 (*pv_init_ops.patch_branch)(p->tag, p->type);
477
478 ia64_sync_i();
479 ia64_srlz_i();
480}
481
482void __init
483paravirt_patch_apply(void)
484{
485 extern const char __start_paravirt_bundles[];
486 extern const char __stop_paravirt_bundles[];
487 extern const char __start_paravirt_insts[];
488 extern const char __stop_paravirt_insts[];
489 extern const char __start_paravirt_branches[];
490 extern const char __stop_paravirt_branches[];
491
492 paravirt_patch_apply_bundle((const struct paravirt_patch_site_bundle *)
493 __start_paravirt_bundles,
494 (const struct paravirt_patch_site_bundle *)
495 __stop_paravirt_bundles);
496 paravirt_patch_apply_inst((const struct paravirt_patch_site_inst *)
497 __start_paravirt_insts,
498 (const struct paravirt_patch_site_inst *)
499 __stop_paravirt_insts);
500 paravirt_patch_apply_branch((const struct paravirt_patch_site_branch *)
501 __start_paravirt_branches,
502 (const struct paravirt_patch_site_branch *)
503 __stop_paravirt_branches);
504}
505
506/*
507 * Local variables:
508 * mode: C
509 * c-set-style: "linux"
510 * c-basic-offset: 8
511 * tab-width: 8
512 * indent-tabs-mode: t
513 * End:
514 */
diff --git a/arch/ia64/kernel/paravirt_patchlist.c b/arch/ia64/kernel/paravirt_patchlist.c
deleted file mode 100644
index 0a70720662ed..000000000000
--- a/arch/ia64/kernel/paravirt_patchlist.c
+++ /dev/null
@@ -1,81 +0,0 @@
1/******************************************************************************
2 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21#include <linux/bug.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <asm/paravirt.h>
25
26#define DECLARE(name) \
27 extern unsigned long \
28 __ia64_native_start_gate_##name##_patchlist[]; \
29 extern unsigned long \
30 __ia64_native_end_gate_##name##_patchlist[]
31
32DECLARE(fsyscall);
33DECLARE(brl_fsys_bubble_down);
34DECLARE(vtop);
35DECLARE(mckinley_e9);
36
37extern unsigned long __start_gate_section[];
38
39#define ASSIGN(name) \
40 .start_##name##_patchlist = \
41 (unsigned long)__ia64_native_start_gate_##name##_patchlist, \
42 .end_##name##_patchlist = \
43 (unsigned long)__ia64_native_end_gate_##name##_patchlist
44
45struct pv_patchdata pv_patchdata __initdata = {
46 ASSIGN(fsyscall),
47 ASSIGN(brl_fsys_bubble_down),
48 ASSIGN(vtop),
49 ASSIGN(mckinley_e9),
50
51 .gate_section = (void*)__start_gate_section,
52};
53
54
55unsigned long __init
56paravirt_get_gate_patchlist(enum pv_gate_patchlist type)
57{
58
59#define CASE(NAME, name) \
60 case PV_GATE_START_##NAME: \
61 return pv_patchdata.start_##name##_patchlist; \
62 case PV_GATE_END_##NAME: \
63 return pv_patchdata.end_##name##_patchlist; \
64
65 switch (type) {
66 CASE(FSYSCALL, fsyscall);
67 CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down);
68 CASE(VTOP, vtop);
69 CASE(MCKINLEY_E9, mckinley_e9);
70 default:
71 BUG();
72 break;
73 }
74 return 0;
75}
76
77void * __init
78paravirt_get_gate_section(void)
79{
80 return pv_patchdata.gate_section;
81}
diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h
deleted file mode 100644
index 67cffc3643a3..000000000000
--- a/arch/ia64/kernel/paravirt_patchlist.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirt_patchlist.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <asm/native/patchlist.h>
24
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S
deleted file mode 100644
index 92d880c4d3d1..000000000000
--- a/arch/ia64/kernel/paravirtentry.S
+++ /dev/null
@@ -1,121 +0,0 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirtentry.S
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/init.h>
24#include <asm/asmmacro.h>
25#include <asm/asm-offsets.h>
26#include <asm/paravirt_privop.h>
27#include <asm/paravirt_patch.h>
28#include "entry.h"
29
30#define DATA8(sym, init_value) \
31 .pushsection .data..read_mostly ; \
32 .align 8 ; \
33 .global sym ; \
34 sym: ; \
35 data8 init_value ; \
36 .popsection
37
38#define BRANCH(targ, reg, breg, type) \
39 PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \
40 ;; \
41 movl reg=targ ; \
42 ;; \
43 ld8 reg=[reg] ; \
44 ;; \
45 mov breg=reg ; \
46 br.cond.sptk.many breg
47
48#define BRANCH_PROC(sym, reg, breg, type) \
49 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
50 GLOBAL_ENTRY(paravirt_ ## sym) ; \
51 BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
52 END(paravirt_ ## sym)
53
54#define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \
55 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
56 GLOBAL_ENTRY(paravirt_ ## sym) ; \
57 PT_REGS_UNWIND_INFO(0) ; \
58 BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
59 END(paravirt_ ## sym)
60
61
62BRANCH_PROC(switch_to, r22, b7, SWITCH_TO)
63BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL)
64BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL)
65BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL)
66
67
68#ifdef CONFIG_MODULES
69#define __INIT_OR_MODULE .text
70#define __INITDATA_OR_MODULE .data
71#else
72#define __INIT_OR_MODULE __INIT
73#define __INITDATA_OR_MODULE __INITDATA
74#endif /* CONFIG_MODULES */
75
76 __INIT_OR_MODULE
77 GLOBAL_ENTRY(paravirt_fc_i)
78 fc.i r32
79 br.ret.sptk.many rp
80 END(paravirt_fc_i)
81 __FINIT
82
83 __INIT_OR_MODULE
84 .align 32
85 GLOBAL_ENTRY(paravirt_nop_b_inst_bundle)
86 {
87 nop.b 0
88 nop.b 0
89 nop.b 0
90 }
91 END(paravirt_nop_b_inst_bundle)
92 __FINIT
93
94 /* NOTE: nop.[mfi] has same format */
95 __INIT_OR_MODULE
96 GLOBAL_ENTRY(paravirt_nop_mfi_inst_bundle)
97 {
98 nop.m 0
99 nop.f 0
100 nop.i 0
101 }
102 END(paravirt_nop_mfi_inst_bundle)
103 __FINIT
104
105 __INIT_OR_MODULE
106 GLOBAL_ENTRY(paravirt_nop_bundle)
107paravirt_nop_bundle_start:
108 {
109 nop 0
110 nop 0
111 nop 0
112 }
113paravirt_nop_bundle_end:
114 END(paravirt_nop_bundle)
115 __FINIT
116
117 __INITDATA_OR_MODULE
118 .align 8
119 .global paravirt_nop_bundle_size
120paravirt_nop_bundle_size:
121 data8 paravirt_nop_bundle_end - paravirt_nop_bundle_start
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index 1cf091793714..944a8e2438a6 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -7,7 +7,6 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/string.h> 8#include <linux/string.h>
9 9
10#include <asm/paravirt.h>
11#include <asm/patch.h> 10#include <asm/patch.h>
12#include <asm/processor.h> 11#include <asm/processor.h>
13#include <asm/sections.h> 12#include <asm/sections.h>
@@ -169,35 +168,16 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
169 ia64_srlz_i(); 168 ia64_srlz_i();
170} 169}
171 170
172extern unsigned long ia64_native_fsyscall_table[NR_syscalls];
173extern char ia64_native_fsys_bubble_down[];
174struct pv_fsys_data pv_fsys_data __initdata = {
175 .fsyscall_table = (unsigned long *)ia64_native_fsyscall_table,
176 .fsys_bubble_down = (void *)ia64_native_fsys_bubble_down,
177};
178
179unsigned long * __init
180paravirt_get_fsyscall_table(void)
181{
182 return pv_fsys_data.fsyscall_table;
183}
184
185char * __init
186paravirt_get_fsys_bubble_down(void)
187{
188 return pv_fsys_data.fsys_bubble_down;
189}
190
191static void __init 171static void __init
192patch_fsyscall_table (unsigned long start, unsigned long end) 172patch_fsyscall_table (unsigned long start, unsigned long end)
193{ 173{
194 u64 fsyscall_table = (u64)paravirt_get_fsyscall_table(); 174 extern unsigned long fsyscall_table[NR_syscalls];
195 s32 *offp = (s32 *) start; 175 s32 *offp = (s32 *) start;
196 u64 ip; 176 u64 ip;
197 177
198 while (offp < (s32 *) end) { 178 while (offp < (s32 *) end) {
199 ip = (u64) ia64_imva((char *) offp + *offp); 179 ip = (u64) ia64_imva((char *) offp + *offp);
200 ia64_patch_imm64(ip, fsyscall_table); 180 ia64_patch_imm64(ip, (u64) fsyscall_table);
201 ia64_fc((void *) ip); 181 ia64_fc((void *) ip);
202 ++offp; 182 ++offp;
203 } 183 }
@@ -208,7 +188,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end)
208static void __init 188static void __init
209patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) 189patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
210{ 190{
211 u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down(); 191 extern char fsys_bubble_down[];
212 s32 *offp = (s32 *) start; 192 s32 *offp = (s32 *) start;
213 u64 ip; 193 u64 ip;
214 194
@@ -226,13 +206,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
226void __init 206void __init
227ia64_patch_gate (void) 207ia64_patch_gate (void)
228{ 208{
229# define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name) 209# define START(name) ((unsigned long) __start_gate_##name##_patchlist)
230# define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name) 210# define END(name) ((unsigned long)__end_gate_##name##_patchlist)
231 211
232 patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL)); 212 patch_fsyscall_table(START(fsyscall), END(fsyscall));
233 patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN)); 213 patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
234 ia64_patch_vtop(START(VTOP), END(VTOP)); 214 ia64_patch_vtop(START(vtop), END(vtop));
235 ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9)); 215 ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
236} 216}
237 217
238void ia64_patch_phys_stack_reg(unsigned long val) 218void ia64_patch_phys_stack_reg(unsigned long val)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index b9761389cb8d..4f118b0d3091 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -50,8 +50,6 @@
50#include <asm/mca.h> 50#include <asm/mca.h>
51#include <asm/meminit.h> 51#include <asm/meminit.h>
52#include <asm/page.h> 52#include <asm/page.h>
53#include <asm/paravirt.h>
54#include <asm/paravirt_patch.h>
55#include <asm/patch.h> 53#include <asm/patch.h>
56#include <asm/pgtable.h> 54#include <asm/pgtable.h>
57#include <asm/processor.h> 55#include <asm/processor.h>
@@ -360,8 +358,6 @@ reserve_memory (void)
360 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 358 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
361 n++; 359 n++;
362 360
363 n += paravirt_reserve_memory(&rsvd_region[n]);
364
365#ifdef CONFIG_BLK_DEV_INITRD 361#ifdef CONFIG_BLK_DEV_INITRD
366 if (ia64_boot_param->initrd_start) { 362 if (ia64_boot_param->initrd_start) {
367 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 363 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
@@ -528,10 +524,7 @@ setup_arch (char **cmdline_p)
528{ 524{
529 unw_init(); 525 unw_init();
530 526
531 paravirt_arch_setup_early();
532
533 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 527 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
534 paravirt_patch_apply();
535 528
536 *cmdline_p = __va(ia64_boot_param->command_line); 529 *cmdline_p = __va(ia64_boot_param->command_line);
537 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 530 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
@@ -594,9 +587,6 @@ setup_arch (char **cmdline_p)
594 cpu_init(); /* initialize the bootstrap CPU */ 587 cpu_init(); /* initialize the bootstrap CPU */
595 mmu_context_init(); /* initialize context_id bitmap */ 588 mmu_context_init(); /* initialize context_id bitmap */
596 589
597 paravirt_banner();
598 paravirt_arch_setup_console(cmdline_p);
599
600#ifdef CONFIG_VT 590#ifdef CONFIG_VT
601 if (!conswitchp) { 591 if (!conswitchp) {
602# if defined(CONFIG_DUMMY_CONSOLE) 592# if defined(CONFIG_DUMMY_CONSOLE)
@@ -616,8 +606,6 @@ setup_arch (char **cmdline_p)
616#endif 606#endif
617 607
618 /* enable IA-64 Machine Check Abort Handling unless disabled */ 608 /* enable IA-64 Machine Check Abort Handling unless disabled */
619 if (paravirt_arch_setup_nomca())
620 nomca = 1;
621 if (!nomca) 609 if (!nomca)
622 ia64_mca_init(); 610 ia64_mca_init();
623 611
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 15051e9c2c6f..213a7a54df37 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -49,7 +49,6 @@
49#include <asm/machvec.h> 49#include <asm/machvec.h>
50#include <asm/mca.h> 50#include <asm/mca.h>
51#include <asm/page.h> 51#include <asm/page.h>
52#include <asm/paravirt.h>
53#include <asm/pgalloc.h> 52#include <asm/pgalloc.h>
54#include <asm/pgtable.h> 53#include <asm/pgtable.h>
55#include <asm/processor.h> 54#include <asm/processor.h>
@@ -568,7 +567,6 @@ void smp_prepare_boot_cpu(void)
568 cpumask_set_cpu(smp_processor_id(), &cpu_callin_map); 567 cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
569 set_numa_node(cpu_to_node_map[smp_processor_id()]); 568 set_numa_node(cpu_to_node_map[smp_processor_id()]);
570 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 569 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
571 paravirt_post_smp_prepare_boot_cpu();
572} 570}
573 571
574#ifdef CONFIG_HOTPLUG_CPU 572#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 9a0104a38cd3..c8dbe2acd735 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -25,7 +25,6 @@
25#include <asm/machvec.h> 25#include <asm/machvec.h>
26#include <asm/delay.h> 26#include <asm/delay.h>
27#include <asm/hw_irq.h> 27#include <asm/hw_irq.h>
28#include <asm/paravirt.h>
29#include <asm/ptrace.h> 28#include <asm/ptrace.h>
30#include <asm/sal.h> 29#include <asm/sal.h>
31#include <asm/sections.h> 30#include <asm/sections.h>
@@ -47,33 +46,12 @@ EXPORT_SYMBOL(last_cli_ip);
47 46
48#endif 47#endif
49 48
50#ifdef CONFIG_PARAVIRT
51/* We need to define a real function for sched_clock, to override the
52 weak default version */
53unsigned long long sched_clock(void)
54{
55 return paravirt_sched_clock();
56}
57#endif
58
59#ifdef CONFIG_PARAVIRT
60static void
61paravirt_clocksource_resume(struct clocksource *cs)
62{
63 if (pv_time_ops.clocksource_resume)
64 pv_time_ops.clocksource_resume();
65}
66#endif
67
68static struct clocksource clocksource_itc = { 49static struct clocksource clocksource_itc = {
69 .name = "itc", 50 .name = "itc",
70 .rating = 350, 51 .rating = 350,
71 .read = itc_get_cycles, 52 .read = itc_get_cycles,
72 .mask = CLOCKSOURCE_MASK(64), 53 .mask = CLOCKSOURCE_MASK(64),
73 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 54 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
74#ifdef CONFIG_PARAVIRT
75 .resume = paravirt_clocksource_resume,
76#endif
77}; 55};
78static struct clocksource *itc_clocksource; 56static struct clocksource *itc_clocksource;
79 57
@@ -164,9 +142,6 @@ timer_interrupt (int irq, void *dev_id)
164 142
165 profile_tick(CPU_PROFILING); 143 profile_tick(CPU_PROFILING);
166 144
167 if (paravirt_do_steal_accounting(&new_itm))
168 goto skip_process_time_accounting;
169
170 while (1) { 145 while (1) {
171 update_process_times(user_mode(get_irq_regs())); 146 update_process_times(user_mode(get_irq_regs()));
172 147
@@ -187,8 +162,6 @@ timer_interrupt (int irq, void *dev_id)
187 local_irq_disable(); 162 local_irq_disable();
188 } 163 }
189 164
190skip_process_time_accounting:
191
192 do { 165 do {
193 /* 166 /*
194 * If we're too close to the next clock tick for 167 * If we're too close to the next clock tick for
@@ -337,8 +310,6 @@ void ia64_init_itm(void)
337 */ 310 */
338 clocksource_itc.rating = 50; 311 clocksource_itc.rating = 50;
339 312
340 paravirt_init_missing_ticks_accounting(smp_processor_id());
341
342 /* avoid softlock up message when cpu is unplug and plugged again. */ 313 /* avoid softlock up message when cpu is unplug and plugged again. */
343 touch_softlockup_watchdog(); 314 touch_softlockup_watchdog();
344 315
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 84f8a52ac5ae..dc506b05ffbd 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -136,27 +136,6 @@ SECTIONS {
136 __end___mckinley_e9_bundles = .; 136 __end___mckinley_e9_bundles = .;
137 } 137 }
138 138
139#if defined(CONFIG_PARAVIRT)
140 . = ALIGN(16);
141 .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) {
142 __start_paravirt_bundles = .;
143 *(.paravirt_bundles)
144 __stop_paravirt_bundles = .;
145 }
146 . = ALIGN(16);
147 .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) {
148 __start_paravirt_insts = .;
149 *(.paravirt_insts)
150 __stop_paravirt_insts = .;
151 }
152 . = ALIGN(16);
153 .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) {
154 __start_paravirt_branches = .;
155 *(.paravirt_branches)
156 __stop_paravirt_branches = .;
157 }
158#endif
159
160#if defined(CONFIG_IA64_GENERIC) 139#if defined(CONFIG_IA64_GENERIC)
161 /* Machine Vector */ 140 /* Machine Vector */
162 . = ALIGN(16); 141 . = ALIGN(16);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index a9b65cf7b34a..7f3028965064 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -34,7 +34,6 @@
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <asm/unistd.h> 35#include <asm/unistd.h>
36#include <asm/mca.h> 36#include <asm/mca.h>
37#include <asm/paravirt.h>
38 37
39extern void ia64_tlb_init (void); 38extern void ia64_tlb_init (void);
40 39
@@ -244,7 +243,6 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
244static void __init 243static void __init
245setup_gate (void) 244setup_gate (void)
246{ 245{
247 void *gate_section;
248 struct page *page; 246 struct page *page;
249 247
250 /* 248 /*
@@ -252,11 +250,10 @@ setup_gate (void)
252 * headers etc. and once execute-only page to enable 250 * headers etc. and once execute-only page to enable
253 * privilege-promotion via "epc": 251 * privilege-promotion via "epc":
254 */ 252 */
255 gate_section = paravirt_get_gate_section(); 253 page = virt_to_page(ia64_imva(__start_gate_section));
256 page = virt_to_page(ia64_imva(gate_section));
257 put_kernel_page(page, GATE_ADDR, PAGE_READONLY); 254 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
258#ifdef HAVE_BUGGY_SEGREL 255#ifdef HAVE_BUGGY_SEGREL
259 page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); 256 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
260 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); 257 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
261#else 258#else
262 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); 259 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
@@ -642,8 +639,8 @@ mem_init (void)
642 * code can tell them apart. 639 * code can tell them apart.
643 */ 640 */
644 for (i = 0; i < NR_syscalls; ++i) { 641 for (i = 0; i < NR_syscalls; ++i) {
642 extern unsigned long fsyscall_table[NR_syscalls];
645 extern unsigned long sys_call_table[NR_syscalls]; 643 extern unsigned long sys_call_table[NR_syscalls];
646 unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
647 644
648 if (!fsyscall_table[i] || nolwsys) 645 if (!fsyscall_table[i] || nolwsys)
649 fsyscall_table[i] = sys_call_table[i] | 1; 646 fsyscall_table[i] = sys_call_table[i] | 1;
diff --git a/arch/ia64/scripts/pvcheck.sed b/arch/ia64/scripts/pvcheck.sed
deleted file mode 100644
index e59809a3fc01..000000000000
--- a/arch/ia64/scripts/pvcheck.sed
+++ /dev/null
@@ -1,33 +0,0 @@
1#
2# Checker for paravirtualizations of privileged operations.
3#
4s/ssm.*psr\.ic.*/.warning \"ssm psr.ic should not be used directly\"/g
5s/rsm.*psr\.ic.*/.warning \"rsm psr.ic should not be used directly\"/g
6s/ssm.*psr\.i.*/.warning \"ssm psr.i should not be used directly\"/g
7s/rsm.*psr\.i.*/.warning \"rsm psr.i should not be used directly\"/g
8s/ssm.*psr\.dt.*/.warning \"ssm psr.dt should not be used directly\"/g
9s/rsm.*psr\.dt.*/.warning \"rsm psr.dt should not be used directly\"/g
10s/mov.*=.*cr\.ifa/.warning \"cr.ifa should not used directly\"/g
11s/mov.*=.*cr\.itir/.warning \"cr.itir should not used directly\"/g
12s/mov.*=.*cr\.isr/.warning \"cr.isr should not used directly\"/g
13s/mov.*=.*cr\.iha/.warning \"cr.iha should not used directly\"/g
14s/mov.*=.*cr\.ipsr/.warning \"cr.ipsr should not used directly\"/g
15s/mov.*=.*cr\.iim/.warning \"cr.iim should not used directly\"/g
16s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g
17s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g
18s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr
19s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g
20s/mov.*=.*ar\.itc.*/.warning \"ar.itc should not used directly\"/g
21s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g
22s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g
23s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g
24s/mov.*cr\.ipsr.*=.*/.warning \"cr.ipsr should not used directly\"/g
25s/mov.*cr\.ifs.*=.*/.warning \"cr.ifs should not used directly\"/g
26s/mov.*cr\.iip.*=.*/.warning \"cr.iip should not used directly\"/g
27s/mov.*cr\.kr.*=.*/.warning \"cr.kr should not used directly\"/g
28s/mov.*ar\.eflags.*=.*/.warning \"ar.eflags should not used directly\"/g
29s/itc\.i.*/.warning \"itc.i should not be used directly.\"/g
30s/itc\.d.*/.warning \"itc.d should not be used directly.\"/g
31s/bsw\.0/.warning \"bsw.0 should not be used directly.\"/g
32s/bsw\.1/.warning \"bsw.1 should not be used directly.\"/g
33s/ptc\.ga.*/.warning \"ptc.ga should not be used directly.\"/g