aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2008-10-17 16:51:28 -0400
committerTony Luck <tony.luck@intel.com>2008-10-17 16:51:28 -0400
commit22ca532a4d137545244fdff0b687325fd4e13eae (patch)
tree459f5116344c71d13d860f4fd9f0c2c477df268a
parent9224652cc76417bfe779bcdb1243cf0b083e3eee (diff)
parentf8d1f99f3958c46cdc983743d75d0b31b9accb80 (diff)
Pull pv_ops-xen into release branch
-rw-r--r--Documentation/ia64/xen.txt183
-rw-r--r--arch/ia64/Kconfig32
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/include/asm/break.h9
-rw-r--r--arch/ia64/include/asm/machvec.h2
-rw-r--r--arch/ia64/include/asm/machvec_xen.h22
-rw-r--r--arch/ia64/include/asm/meminit.h3
-rw-r--r--arch/ia64/include/asm/native/inst.h10
-rw-r--r--arch/ia64/include/asm/native/pvchk_inst.h263
-rw-r--r--arch/ia64/include/asm/paravirt.h4
-rw-r--r--arch/ia64/include/asm/pvclock-abi.h48
-rw-r--r--arch/ia64/include/asm/sync_bitops.h51
-rw-r--r--arch/ia64/include/asm/timex.h2
-rw-r--r--arch/ia64/include/asm/xen/events.h50
-rw-r--r--arch/ia64/include/asm/xen/grant_table.h29
-rw-r--r--arch/ia64/include/asm/xen/hypercall.h265
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h89
-rw-r--r--arch/ia64/include/asm/xen/inst.h458
-rw-r--r--arch/ia64/include/asm/xen/interface.h346
-rw-r--r--arch/ia64/include/asm/xen/irq.h44
-rw-r--r--arch/ia64/include/asm/xen/minstate.h134
-rw-r--r--arch/ia64/include/asm/xen/page.h65
-rw-r--r--arch/ia64/include/asm/xen/privop.h129
-rw-r--r--arch/ia64/include/asm/xen/xcom_hcall.h51
-rw-r--r--arch/ia64/include/asm/xen/xencomm.h42
-rw-r--r--arch/ia64/kernel/Makefile18
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/ia64/kernel/asm-offsets.c31
-rw-r--r--arch/ia64/kernel/ivt.S6
-rw-r--r--arch/ia64/kernel/nr-irqs.c1
-rw-r--r--arch/ia64/kernel/paravirt.c2
-rw-r--r--arch/ia64/kernel/paravirt_inst.h4
-rw-r--r--arch/ia64/kernel/process.c1
-rw-r--r--arch/ia64/scripts/pvcheck.sed32
-rw-r--r--arch/ia64/xen/Kconfig26
-rw-r--r--arch/ia64/xen/Makefile22
-rw-r--r--arch/ia64/xen/grant-table.c155
-rw-r--r--arch/ia64/xen/hypercall.S91
-rw-r--r--arch/ia64/xen/hypervisor.c96
-rw-r--r--arch/ia64/xen/irq_xen.c435
-rw-r--r--arch/ia64/xen/irq_xen.h34
-rw-r--r--arch/ia64/xen/machvec.c4
-rw-r--r--arch/ia64/xen/suspend.c64
-rw-r--r--arch/ia64/xen/time.c213
-rw-r--r--arch/ia64/xen/time.h24
-rw-r--r--arch/ia64/xen/xcom_hcall.c441
-rw-r--r--arch/ia64/xen/xen_pv_ops.c364
-rw-r--r--arch/ia64/xen/xencomm.c105
-rw-r--r--arch/ia64/xen/xenivt.S52
-rw-r--r--arch/ia64/xen/xensetup.S83
50 files changed, 4631 insertions, 11 deletions
diff --git a/Documentation/ia64/xen.txt b/Documentation/ia64/xen.txt
new file mode 100644
index 000000000000..c61a99f7c8bb
--- /dev/null
+++ b/Documentation/ia64/xen.txt
@@ -0,0 +1,183 @@
1 Recipe for getting/building/running Xen/ia64 with pv_ops
2 --------------------------------------------------------
3
4This recipe describes how to get xen-ia64 source and build it,
5and run domU with pv_ops.
6
7============
8Requirements
9============
10
11 - python
12 - mercurial
13 it (aka "hg") is an open-source source code
14 management software. See the below.
15 http://www.selenic.com/mercurial/wiki/
16 - git
17 - bridge-utils
18
19=================================
20Getting and Building Xen and Dom0
21=================================
22
23 My environment is;
24 Machine : Tiger4
25 Domain0 OS : RHEL5
26 DomainU OS : RHEL5
27
28 1. Download source
29 # hg clone http://xenbits.xensource.com/ext/ia64/xen-unstable.hg
30 # cd xen-unstable.hg
31 # hg clone http://xenbits.xensource.com/ext/ia64/linux-2.6.18-xen.hg
32
33 2. # make world
34
35 3. # make install-tools
36
37 4. copy kernels and xen
38 # cp xen/xen.gz /boot/efi/efi/redhat/
39 # cp build-linux-2.6.18-xen_ia64/vmlinux.gz \
40 /boot/efi/efi/redhat/vmlinuz-2.6.18.8-xen
41
42 5. make initrd for Dom0/DomU
43 # make -C linux-2.6.18-xen.hg ARCH=ia64 modules_install \
44 O=$(/bin/pwd)/build-linux-2.6.18-xen_ia64
45 # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6.18.8-xen.img \
46 2.6.18.8-xen --builtin mptspi --builtin mptbase \
47 --builtin mptscsih --builtin uhci-hcd --builtin ohci-hcd \
48 --builtin ehci-hcd
49
50================================
51Making a disk image for guest OS
52================================
53
54 1. make file
55 # dd if=/dev/zero of=/root/rhel5.img bs=1M seek=4096 count=0
56 # mke2fs -F -j /root/rhel5.img
57 # mount -o loop /root/rhel5.img /mnt
58 # cp -ax /{dev,var,etc,usr,bin,sbin,lib} /mnt
59 # mkdir /mnt/{root,proc,sys,home,tmp}
60
61 Note: You may miss some device files. If so, please create them
62 with mknod. Or you can use tar instead of cp.
63
64 2. modify DomU's fstab
65 # vi /mnt/etc/fstab
66 /dev/xvda1 / ext3 defaults 1 1
67 none /dev/pts devpts gid=5,mode=620 0 0
68 none /dev/shm tmpfs defaults 0 0
69 none /proc proc defaults 0 0
70 none /sys sysfs defaults 0 0
71
72 3. modify inittab
73 set runlevel to 3 to avoid X trying to start
74 # vi /mnt/etc/inittab
75 id:3:initdefault:
76 Start a getty on the hvc0 console
77 X0:2345:respawn:/sbin/mingetty hvc0
78 tty1-6 mingetty can be commented out
79
80 4. add hvc0 into /etc/securetty
81 # vi /mnt/etc/securetty (add hvc0)
82
83 5. umount
84 # umount /mnt
85
86FYI, virt-manager can also make a disk image for guest OS.
87It's GUI tools and easy to make it.
88
89==================
90Boot Xen & Domain0
91==================
92
93 1. replace elilo
94 elilo of RHEL5 can boot Xen and Dom0.
95 If you use old elilo (e.g RHEL4), please download from the below
96 http://elilo.sourceforge.net/cgi-bin/blosxom
97 and copy into /boot/efi/efi/redhat/
98 # cp elilo-3.6-ia64.efi /boot/efi/efi/redhat/elilo.efi
99
100 2. modify elilo.conf (like the below)
101 # vi /boot/efi/efi/redhat/elilo.conf
102 prompt
103 timeout=20
104 default=xen
105 relocatable
106
107 image=vmlinuz-2.6.18.8-xen
108 label=xen
109 vmm=xen.gz
110 initrd=initrd-2.6.18.8-xen.img
111 read-only
112 append=" -- rhgb root=/dev/sda2"
113
114The append options before "--" are for xen hypervisor,
115the options after "--" are for dom0.
116
117FYI, your machine may need console options like
118"com1=19200,8n1 console=vga,com1". For example,
119append="com1=19200,8n1 console=vga,com1 -- rhgb console=tty0 \
120console=ttyS0 root=/dev/sda2"
121
122=====================================
123Getting and Building domU with pv_ops
124=====================================
125
126 1. get pv_ops tree
127 # git clone http://people.valinux.co.jp/~yamahata/xen-ia64/linux-2.6-xen-ia64.git/
128
129 2. git branch (if necessary)
130 # cd linux-2.6-xen-ia64/
131 # git checkout -b your_branch origin/xen-ia64-domu-minimal-2008may19
132 (Note: The current branch is xen-ia64-domu-minimal-2008may19.
133 But you would find the new branch. You can see with
134 "git branch -r" to get the branch lists.
135 http://people.valinux.co.jp/~yamahata/xen-ia64/for_eagl/linux-2.6-ia64-pv-ops.git/
136 is also available. The tree is based on
137 git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6 test)
138
139
140 3. copy .config for pv_ops of domU
141 # cp arch/ia64/configs/xen_domu_wip_defconfig .config
142
143 4. make kernel with pv_ops
144 # make oldconfig
145 # make
146
147 5. install the kernel and initrd
148 # cp vmlinux.gz /boot/efi/efi/redhat/vmlinuz-2.6-pv_ops-xenU
149 # make modules_install
150 # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6-pv_ops-xenU.img \
151 2.6.26-rc3xen-ia64-08941-g1b12161 --builtin mptspi \
152 --builtin mptbase --builtin mptscsih --builtin uhci-hcd \
153 --builtin ohci-hcd --builtin ehci-hcd
154
155========================
156Boot DomainU with pv_ops
157========================
158
159 1. make config of DomU
160 # vi /etc/xen/rhel5
161 kernel = "/boot/efi/efi/redhat/vmlinuz-2.6-pv_ops-xenU"
162 ramdisk = "/boot/efi/efi/redhat/initrd-2.6-pv_ops-xenU.img"
163 vcpus = 1
164 memory = 512
165 name = "rhel5"
166 disk = [ 'file:/root/rhel5.img,xvda1,w' ]
167 root = "/dev/xvda1 ro"
168 extra= "rhgb console=hvc0"
169
170 2. After boot xen and dom0, start xend
171 # /etc/init.d/xend start
172 ( In the debugging case, # XEND_DEBUG=1 xend trace_start )
173
174 3. start domU
175 # xm create -c rhel5
176
177=========
178Reference
179=========
180- Wiki of Xen/IA64 upstream merge
181 http://wiki.xensource.com/xenwiki/XenIA64/UpstreamMerge
182
183Written by Akio Takebe <takebe_akio@jp.fujitsu.com> on 28 May 2008
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 892f296f99b6..239ad6b1c74f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -109,6 +109,33 @@ config AUDIT_ARCH
109 bool 109 bool
110 default y 110 default y
111 111
112menuconfig PARAVIRT_GUEST
113 bool "Paravirtualized guest support"
114 help
115 Say Y here to get to see options related to running Linux under
116 various hypervisors. This option alone does not add any kernel code.
117
118 If you say N, all options in this submenu will be skipped and disabled.
119
120if PARAVIRT_GUEST
121
122config PARAVIRT
123 bool "Enable paravirtualization code"
124 depends on PARAVIRT_GUEST
125 default y
126 bool
127 default y
128 help
129 This changes the kernel so it can modify itself when it is run
130 under a hypervisor, potentially improving performance significantly
131 over full virtualization. However, when run without a hypervisor
132 the kernel is theoretically slower and slightly larger.
133
134
135source "arch/ia64/xen/Kconfig"
136
137endif
138
112choice 139choice
113 prompt "System type" 140 prompt "System type"
114 default IA64_GENERIC 141 default IA64_GENERIC
@@ -130,6 +157,7 @@ config IA64_GENERIC
130 SGI-SN2 For SGI Altix systems 157 SGI-SN2 For SGI Altix systems
131 SGI-UV For SGI UV systems 158 SGI-UV For SGI UV systems
132 Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> 159 Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
160 Xen-domU For xen domU system
133 161
134 If you don't know what to do, choose "generic". 162 If you don't know what to do, choose "generic".
135 163
@@ -180,6 +208,10 @@ config IA64_HP_SIM
180 bool "Ski-simulator" 208 bool "Ski-simulator"
181 select SWIOTLB 209 select SWIOTLB
182 210
211config IA64_XEN_GUEST
212 bool "Xen guest"
213 depends on XEN
214
183endchoice 215endchoice
184 216
185choice 217choice
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 905d25b13d5a..40242501bcdd 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -56,9 +56,11 @@ core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
56core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ 56core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
57core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ 57core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
58core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ 58core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
59core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/
59core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ 60core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
60core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ 61core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
61core-$(CONFIG_KVM) += arch/ia64/kvm/ 62core-$(CONFIG_KVM) += arch/ia64/kvm/
63core-$(CONFIG_XEN) += arch/ia64/xen/
62 64
63drivers-$(CONFIG_PCI) += arch/ia64/pci/ 65drivers-$(CONFIG_PCI) += arch/ia64/pci/
64drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ 66drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
diff --git a/arch/ia64/include/asm/break.h b/arch/ia64/include/asm/break.h
index f03402039896..e90c40ec9edf 100644
--- a/arch/ia64/include/asm/break.h
+++ b/arch/ia64/include/asm/break.h
@@ -20,4 +20,13 @@
20 */ 20 */
21#define __IA64_BREAK_SYSCALL 0x100000 21#define __IA64_BREAK_SYSCALL 0x100000
22 22
23/*
24 * Xen specific break numbers:
25 */
26#define __IA64_XEN_HYPERCALL 0x1000
27/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used
28 for xen hyperprivops */
29#define __IA64_XEN_HYPERPRIVOP_START 0x1
30#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a
31
23#endif /* _ASM_IA64_BREAK_H */ 32#endif /* _ASM_IA64_BREAK_H */
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 2b850ccafef5..de99cb2799cf 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -128,6 +128,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
128# include <asm/machvec_sn2.h> 128# include <asm/machvec_sn2.h>
129# elif defined (CONFIG_IA64_SGI_UV) 129# elif defined (CONFIG_IA64_SGI_UV)
130# include <asm/machvec_uv.h> 130# include <asm/machvec_uv.h>
131# elif defined (CONFIG_IA64_XEN_GUEST)
132# include <asm/machvec_xen.h>
131# elif defined (CONFIG_IA64_GENERIC) 133# elif defined (CONFIG_IA64_GENERIC)
132 134
133# ifdef MACHVEC_PLATFORM_HEADER 135# ifdef MACHVEC_PLATFORM_HEADER
diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h
new file mode 100644
index 000000000000..55f9228056cd
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_xen.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_IA64_MACHVEC_XEN_h
2#define _ASM_IA64_MACHVEC_XEN_h
3
4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_cpu_init_t xen_cpu_init;
6extern ia64_mv_irq_init_t xen_irq_init;
7extern ia64_mv_send_ipi_t xen_platform_send_ipi;
8
9/*
10 * This stuff has dual use!
11 *
12 * For a generic kernel, the macros are used to initialize the
13 * platform's machvec structure. When compiling a non-generic kernel,
14 * the macros are used directly.
15 */
16#define platform_name "xen"
17#define platform_setup dig_setup
18#define platform_cpu_init xen_cpu_init
19#define platform_irq_init xen_irq_init
20#define platform_send_ipi xen_platform_send_ipi
21
22#endif /* _ASM_IA64_MACHVEC_XEN_h */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 7245a5781594..6bc96ee54327 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -18,10 +18,11 @@
18 * - crash dumping code reserved region 18 * - crash dumping code reserved region
19 * - Kernel memory map built from EFI memory map 19 * - Kernel memory map built from EFI memory map
20 * - ELF core header 20 * - ELF core header
21 * - xen start info if CONFIG_XEN
21 * 22 *
22 * More could be added if necessary 23 * More could be added if necessary
23 */ 24 */
24#define IA64_MAX_RSVD_REGIONS 8 25#define IA64_MAX_RSVD_REGIONS 9
25 26
26struct rsvd_region { 27struct rsvd_region {
27 unsigned long start; /* virtual address of beginning of element */ 28 unsigned long start; /* virtual address of beginning of element */
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
index c8efbf7b849e..0a1026cca4fa 100644
--- a/arch/ia64/include/asm/native/inst.h
+++ b/arch/ia64/include/asm/native/inst.h
@@ -36,8 +36,13 @@
36 ;; \ 36 ;; \
37 movl clob = PARAVIRT_POISON; \ 37 movl clob = PARAVIRT_POISON; \
38 ;; 38 ;;
39# define CLOBBER_PRED(pred_clob) \
40 ;; \
41 cmp.eq pred_clob, p0 = r0, r0 \
42 ;;
39#else 43#else
40# define CLOBBER(clob) /* nothing */ 44# define CLOBBER(clob) /* nothing */
45# define CLOBBER_PRED(pred_clob) /* nothing */
41#endif 46#endif
42 47
43#define MOV_FROM_IFA(reg) \ 48#define MOV_FROM_IFA(reg) \
@@ -136,7 +141,8 @@
136 141
137#define SSM_PSR_I(pred, pred_clob, clob) \ 142#define SSM_PSR_I(pred, pred_clob, clob) \
138(pred) ssm psr.i \ 143(pred) ssm psr.i \
139 CLOBBER(clob) 144 CLOBBER(clob) \
145 CLOBBER_PRED(pred_clob)
140 146
141#define RSM_PSR_I(pred, clob0, clob1) \ 147#define RSM_PSR_I(pred, clob0, clob1) \
142(pred) rsm psr.i \ 148(pred) rsm psr.i \
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h
new file mode 100644
index 000000000000..b8e6eb1090d7
--- /dev/null
+++ b/arch/ia64/include/asm/native/pvchk_inst.h
@@ -0,0 +1,263 @@
1#ifndef _ASM_NATIVE_PVCHK_INST_H
2#define _ASM_NATIVE_PVCHK_INST_H
3
4/******************************************************************************
5 * arch/ia64/include/asm/native/pvchk_inst.h
6 * Checker for paravirtualizations of privileged operations.
7 *
8 * Copyright (C) 2005 Hewlett-Packard Co
9 * Dan Magenheimer <dan.magenheimer@hp.com>
10 *
11 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
12 * VA Linux Systems Japan K.K.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 *
28 */
29
30/**********************************************
31 * Instructions paravirtualized for correctness
32 **********************************************/
33
34/* "fc" and "thash" are privilege-sensitive instructions, meaning they
35 * may have different semantics depending on whether they are executed
36 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
37 * be allowed to execute directly, lest incorrect semantics result.
38 */
39
40#define fc .error "fc should not be used directly."
41#define thash .error "thash should not be used directly."
42
43/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
44 * is not currently used (though it may be in a long-format VHPT system!)
45 * and the semantics of cover only change if psr.ic is off which is very
46 * rare (and currently non-existent outside of assembly code
47 */
48#define ttag .error "ttag should not be used directly."
49#define cover .error "cover should not be used directly."
50
51/* There are also privilege-sensitive registers. These registers are
52 * readable at any privilege level but only writable at PL0.
53 */
54#define cpuid .error "cpuid should not be used directly."
55#define pmd .error "pmd should not be used directly."
56
57/*
58 * mov ar.eflag =
59 * mov = ar.eflag
60 */
61
62/**********************************************
63 * Instructions paravirtualized for performance
64 **********************************************/
65/*
66 * Those instructions include '.' which can't be handled by cpp.
67 * or can't be handled by cpp easily.
68 * They are handled by sed instead of cpp.
69 */
70
71/* for .S
72 * itc.i
73 * itc.d
74 *
75 * bsw.0
76 * bsw.1
77 *
78 * ssm psr.ic | PSR_DEFAULT_BITS
79 * ssm psr.ic
80 * rsm psr.ic
81 * ssm psr.i
82 * rsm psr.i
83 * rsm psr.i | psr.ic
84 * rsm psr.dt
85 * ssm psr.dt
86 *
87 * mov = cr.ifa
88 * mov = cr.itir
89 * mov = cr.isr
90 * mov = cr.iha
91 * mov = cr.ipsr
92 * mov = cr.iim
93 * mov = cr.iip
94 * mov = cr.ivr
95 * mov = psr
96 *
97 * mov cr.ifa =
98 * mov cr.itir =
99 * mov cr.iha =
100 * mov cr.ipsr =
101 * mov cr.ifs =
102 * mov cr.iip =
103 * mov cr.kr =
104 */
105
106/* for intrinsics
107 * ssm psr.i
108 * rsm psr.i
109 * mov = psr
110 * mov = ivr
111 * mov = tpr
112 * mov cr.itm =
113 * mov eoi =
114 * mov rr[] =
115 * mov = rr[]
116 * mov = kr
117 * mov kr =
118 * ptc.ga
119 */
120
121/*************************************************************
122 * define paravirtualized instrcution macros as nop to ingore.
123 * and check whether arguments are appropriate.
124 *************************************************************/
125
126/* check whether reg is a regular register */
127.macro is_rreg_in reg
128 .ifc "\reg", "r0"
129 nop 0
130 .exitm
131 .endif
132 ;;
133 mov \reg = r0
134 ;;
135.endm
136#define IS_RREG_IN(reg) is_rreg_in reg ;
137
138#define IS_RREG_OUT(reg) \
139 ;; \
140 mov reg = r0 \
141 ;;
142
143#define IS_RREG_CLOB(reg) IS_RREG_OUT(reg)
144
145/* check whether pred is a predicate register */
146#define IS_PRED_IN(pred) \
147 ;; \
148 (pred) nop 0 \
149 ;;
150
151#define IS_PRED_OUT(pred) \
152 ;; \
153 cmp.eq pred, p0 = r0, r0 \
154 ;;
155
156#define IS_PRED_CLOB(pred) IS_PRED_OUT(pred)
157
158
159#define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND) \
160 nop 0
161#define MOV_FROM_IFA(reg) \
162 IS_RREG_OUT(reg)
163#define MOV_FROM_ITIR(reg) \
164 IS_RREG_OUT(reg)
165#define MOV_FROM_ISR(reg) \
166 IS_RREG_OUT(reg)
167#define MOV_FROM_IHA(reg) \
168 IS_RREG_OUT(reg)
169#define MOV_FROM_IPSR(pred, reg) \
170 IS_PRED_IN(pred) \
171 IS_RREG_OUT(reg)
172#define MOV_FROM_IIM(reg) \
173 IS_RREG_OUT(reg)
174#define MOV_FROM_IIP(reg) \
175 IS_RREG_OUT(reg)
176#define MOV_FROM_IVR(reg, clob) \
177 IS_RREG_OUT(reg) \
178 IS_RREG_CLOB(clob)
179#define MOV_FROM_PSR(pred, reg, clob) \
180 IS_PRED_IN(pred) \
181 IS_RREG_OUT(reg) \
182 IS_RREG_CLOB(clob)
183#define MOV_TO_IFA(reg, clob) \
184 IS_RREG_IN(reg) \
185 IS_RREG_CLOB(clob)
186#define MOV_TO_ITIR(pred, reg, clob) \
187 IS_PRED_IN(pred) \
188 IS_RREG_IN(reg) \
189 IS_RREG_CLOB(clob)
190#define MOV_TO_IHA(pred, reg, clob) \
191 IS_PRED_IN(pred) \
192 IS_RREG_IN(reg) \
193 IS_RREG_CLOB(clob)
194#define MOV_TO_IPSR(pred, reg, clob) \
195 IS_PRED_IN(pred) \
196 IS_RREG_IN(reg) \
197 IS_RREG_CLOB(clob)
198#define MOV_TO_IFS(pred, reg, clob) \
199 IS_PRED_IN(pred) \
200 IS_RREG_IN(reg) \
201 IS_RREG_CLOB(clob)
202#define MOV_TO_IIP(reg, clob) \
203 IS_RREG_IN(reg) \
204 IS_RREG_CLOB(clob)
205#define MOV_TO_KR(kr, reg, clob0, clob1) \
206 IS_RREG_IN(reg) \
207 IS_RREG_CLOB(clob0) \
208 IS_RREG_CLOB(clob1)
209#define ITC_I(pred, reg, clob) \
210 IS_PRED_IN(pred) \
211 IS_RREG_IN(reg) \
212 IS_RREG_CLOB(clob)
213#define ITC_D(pred, reg, clob) \
214 IS_PRED_IN(pred) \
215 IS_RREG_IN(reg) \
216 IS_RREG_CLOB(clob)
217#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
218 IS_PRED_IN(pred_i) \
219 IS_PRED_IN(pred_d) \
220 IS_RREG_IN(reg) \
221 IS_RREG_CLOB(clob)
222#define THASH(pred, reg0, reg1, clob) \
223 IS_PRED_IN(pred) \
224 IS_RREG_OUT(reg0) \
225 IS_RREG_IN(reg1) \
226 IS_RREG_CLOB(clob)
227#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
228 IS_RREG_CLOB(clob0) \
229 IS_RREG_CLOB(clob1)
230#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
231 IS_RREG_CLOB(clob0) \
232 IS_RREG_CLOB(clob1)
233#define RSM_PSR_IC(clob) \
234 IS_RREG_CLOB(clob)
235#define SSM_PSR_I(pred, pred_clob, clob) \
236 IS_PRED_IN(pred) \
237 IS_PRED_CLOB(pred_clob) \
238 IS_RREG_CLOB(clob)
239#define RSM_PSR_I(pred, clob0, clob1) \
240 IS_PRED_IN(pred) \
241 IS_RREG_CLOB(clob0) \
242 IS_RREG_CLOB(clob1)
243#define RSM_PSR_I_IC(clob0, clob1, clob2) \
244 IS_RREG_CLOB(clob0) \
245 IS_RREG_CLOB(clob1) \
246 IS_RREG_CLOB(clob2)
247#define RSM_PSR_DT \
248 nop 0
249#define SSM_PSR_DT_AND_SRLZ_I \
250 nop 0
251#define BSW_0(clob0, clob1, clob2) \
252 IS_RREG_CLOB(clob0) \
253 IS_RREG_CLOB(clob1) \
254 IS_RREG_CLOB(clob2)
255#define BSW_1(clob0, clob1) \
256 IS_RREG_CLOB(clob0) \
257 IS_RREG_CLOB(clob1)
258#define COVER \
259 nop 0
260#define RFI \
261 br.ret.sptk.many rp /* defining nop causes dependency error */
262
263#endif /* _ASM_NATIVE_PVCHK_INST_H */
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 660cab044834..2bf3636473fe 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -117,7 +117,7 @@ static inline void paravirt_post_smp_prepare_boot_cpu(void)
117struct pv_iosapic_ops { 117struct pv_iosapic_ops {
118 void (*pcat_compat_init)(void); 118 void (*pcat_compat_init)(void);
119 119
120 struct irq_chip *(*get_irq_chip)(unsigned long trigger); 120 struct irq_chip *(*__get_irq_chip)(unsigned long trigger);
121 121
122 unsigned int (*__read)(char __iomem *iosapic, unsigned int reg); 122 unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
123 void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val); 123 void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
@@ -135,7 +135,7 @@ iosapic_pcat_compat_init(void)
135static inline struct irq_chip* 135static inline struct irq_chip*
136iosapic_get_irq_chip(unsigned long trigger) 136iosapic_get_irq_chip(unsigned long trigger)
137{ 137{
138 return pv_iosapic_ops.get_irq_chip(trigger); 138 return pv_iosapic_ops.__get_irq_chip(trigger);
139} 139}
140 140
141static inline unsigned int 141static inline unsigned int
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h
new file mode 100644
index 000000000000..44ef9ef8f5b3
--- /dev/null
+++ b/arch/ia64/include/asm/pvclock-abi.h
@@ -0,0 +1,48 @@
1/*
2 * same structure to x86's
3 * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic.
4 * For now, define same duplicated definitions.
5 */
6
7#ifndef _ASM_IA64__PVCLOCK_ABI_H
8#define _ASM_IA64__PVCLOCK_ABI_H
9#ifndef __ASSEMBLY__
10
11/*
12 * These structs MUST NOT be changed.
13 * They are the ABI between hypervisor and guest OS.
14 * Both Xen and KVM are using this.
15 *
16 * pvclock_vcpu_time_info holds the system time and the tsc timestamp
17 * of the last update. So the guest can use the tsc delta to get a
18 * more precise system time. There is one per virtual cpu.
19 *
20 * pvclock_wall_clock references the point in time when the system
21 * time was zero (usually boot time), thus the guest calculates the
22 * current wall clock by adding the system time.
23 *
24 * Protocol for the "version" fields is: hypervisor raises it (making
25 * it uneven) before it starts updating the fields and raises it again
26 * (making it even) when it is done. Thus the guest can make sure the
27 * time values it got are consistent by checking the version before
28 * and after reading them.
29 */
30
31struct pvclock_vcpu_time_info {
32 u32 version;
33 u32 pad0;
34 u64 tsc_timestamp;
35 u64 system_time;
36 u32 tsc_to_system_mul;
37 s8 tsc_shift;
38 u8 pad[3];
39} __attribute__((__packed__)); /* 32 bytes */
40
41struct pvclock_wall_clock {
42 u32 version;
43 u32 sec;
44 u32 nsec;
45} __attribute__((__packed__));
46
47#endif /* __ASSEMBLY__ */
48#endif /* _ASM_IA64__PVCLOCK_ABI_H */
diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h
new file mode 100644
index 000000000000..593c12eeb270
--- /dev/null
+++ b/arch/ia64/include/asm/sync_bitops.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_IA64_SYNC_BITOPS_H
2#define _ASM_IA64_SYNC_BITOPS_H
3
4/*
5 * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp>
6 *
7 * Based on synch_bitops.h which Dan Magenhaimer wrote.
8 *
9 * bit operations which provide guaranteed strong synchronisation
10 * when communicating with Xen or other guest OSes running on other CPUs.
11 */
12
13static inline void sync_set_bit(int nr, volatile void *addr)
14{
15 set_bit(nr, addr);
16}
17
18static inline void sync_clear_bit(int nr, volatile void *addr)
19{
20 clear_bit(nr, addr);
21}
22
23static inline void sync_change_bit(int nr, volatile void *addr)
24{
25 change_bit(nr, addr);
26}
27
28static inline int sync_test_and_set_bit(int nr, volatile void *addr)
29{
30 return test_and_set_bit(nr, addr);
31}
32
33static inline int sync_test_and_clear_bit(int nr, volatile void *addr)
34{
35 return test_and_clear_bit(nr, addr);
36}
37
38static inline int sync_test_and_change_bit(int nr, volatile void *addr)
39{
40 return test_and_change_bit(nr, addr);
41}
42
43static inline int sync_test_bit(int nr, const volatile void *addr)
44{
45 return test_bit(nr, addr);
46}
47
48#define sync_cmpxchg(ptr, old, new) \
49 ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new)))
50
51#endif /* _ASM_IA64_SYNC_BITOPS_H */
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
index 05a6baf8a472..4e03cfe74a0c 100644
--- a/arch/ia64/include/asm/timex.h
+++ b/arch/ia64/include/asm/timex.h
@@ -39,4 +39,6 @@ get_cycles (void)
39 return ret; 39 return ret;
40} 40}
41 41
42extern void ia64_cpu_local_tick (void);
43
42#endif /* _ASM_IA64_TIMEX_H */ 44#endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
new file mode 100644
index 000000000000..73248781fba8
--- /dev/null
+++ b/arch/ia64/include/asm/xen/events.h
@@ -0,0 +1,50 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/events.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22#ifndef _ASM_IA64_XEN_EVENTS_H
23#define _ASM_IA64_XEN_EVENTS_H
24
25enum ipi_vector {
26 XEN_RESCHEDULE_VECTOR,
27 XEN_IPI_VECTOR,
28 XEN_CMCP_VECTOR,
29 XEN_CPEP_VECTOR,
30
31 XEN_NR_IPIS,
32};
33
34static inline int xen_irqs_disabled(struct pt_regs *regs)
35{
36 return !(ia64_psr(regs)->i);
37}
38
39static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
40{
41 struct pt_regs *old_regs;
42 old_regs = set_irq_regs(regs);
43 irq_enter();
44 __do_IRQ(irq);
45 irq_exit();
46 set_irq_regs(old_regs);
47}
48#define irq_ctx_init(cpu) do { } while (0)
49
50#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/include/asm/xen/grant_table.h b/arch/ia64/include/asm/xen/grant_table.h
new file mode 100644
index 000000000000..2b1fae0e2d11
--- /dev/null
+++ b/arch/ia64/include/asm/xen/grant_table.h
@@ -0,0 +1,29 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/grant_table.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_GRANT_TABLE_H
24#define _ASM_IA64_XEN_GRANT_TABLE_H
25
26struct vm_struct *xen_alloc_vm_area(unsigned long size);
27void xen_free_vm_area(struct vm_struct *area);
28
29#endif /* _ASM_IA64_XEN_GRANT_TABLE_H */
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h
new file mode 100644
index 000000000000..96fc62366aa4
--- /dev/null
+++ b/arch/ia64/include/asm/xen/hypercall.h
@@ -0,0 +1,265 @@
1/******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _ASM_IA64_XEN_HYPERCALL_H
34#define _ASM_IA64_XEN_HYPERCALL_H
35
36#include <xen/interface/xen.h>
37#include <xen/interface/physdev.h>
38#include <xen/interface/sched.h>
39#include <asm/xen/xcom_hcall.h>
40struct xencomm_handle;
41extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
42 unsigned long a3, unsigned long a4,
43 unsigned long a5, unsigned long cmd);
44
45/*
46 * Assembler stubs for hyper-calls.
47 */
48
49#define _hypercall0(type, name) \
50({ \
51 long __res; \
52 __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\
53 (type)__res; \
54})
55
56#define _hypercall1(type, name, a1) \
57({ \
58 long __res; \
59 __res = __hypercall((unsigned long)a1, \
60 0, 0, 0, 0, __HYPERVISOR_##name); \
61 (type)__res; \
62})
63
64#define _hypercall2(type, name, a1, a2) \
65({ \
66 long __res; \
67 __res = __hypercall((unsigned long)a1, \
68 (unsigned long)a2, \
69 0, 0, 0, __HYPERVISOR_##name); \
70 (type)__res; \
71})
72
73#define _hypercall3(type, name, a1, a2, a3) \
74({ \
75 long __res; \
76 __res = __hypercall((unsigned long)a1, \
77 (unsigned long)a2, \
78 (unsigned long)a3, \
79 0, 0, __HYPERVISOR_##name); \
80 (type)__res; \
81})
82
83#define _hypercall4(type, name, a1, a2, a3, a4) \
84({ \
85 long __res; \
86 __res = __hypercall((unsigned long)a1, \
87 (unsigned long)a2, \
88 (unsigned long)a3, \
89 (unsigned long)a4, \
90 0, __HYPERVISOR_##name); \
91 (type)__res; \
92})
93
94#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
95({ \
96 long __res; \
97 __res = __hypercall((unsigned long)a1, \
98 (unsigned long)a2, \
99 (unsigned long)a3, \
100 (unsigned long)a4, \
101 (unsigned long)a5, \
102 __HYPERVISOR_##name); \
103 (type)__res; \
104})
105
106
107static inline int
108xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
109{
110 return _hypercall2(int, sched_op_new, cmd, arg);
111}
112
113static inline long
114HYPERVISOR_set_timer_op(u64 timeout)
115{
116 unsigned long timeout_hi = (unsigned long)(timeout >> 32);
117 unsigned long timeout_lo = (unsigned long)timeout;
118 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
119}
120
121static inline int
122xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
123 int nr_calls)
124{
125 return _hypercall2(int, multicall, call_list, nr_calls);
126}
127
128static inline int
129xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
130{
131 return _hypercall2(int, memory_op, cmd, arg);
132}
133
134static inline int
135xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
136{
137 return _hypercall2(int, event_channel_op, cmd, arg);
138}
139
140static inline int
141xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
142{
143 return _hypercall2(int, xen_version, cmd, arg);
144}
145
146static inline int
147xencomm_arch_hypercall_console_io(int cmd, int count,
148 struct xencomm_handle *str)
149{
150 return _hypercall3(int, console_io, cmd, count, str);
151}
152
153static inline int
154xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
155{
156 return _hypercall2(int, physdev_op, cmd, arg);
157}
158
159static inline int
160xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
161 struct xencomm_handle *uop,
162 unsigned int count)
163{
164 return _hypercall3(int, grant_table_op, cmd, uop, count);
165}
166
167int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
168
169extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
170
171static inline int
172xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
173{
174 return _hypercall2(int, callback_op, cmd, arg);
175}
176
177static inline long
178xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
179{
180 return _hypercall3(long, vcpu_op, cmd, cpu, arg);
181}
182
183static inline int
184HYPERVISOR_physdev_op(int cmd, void *arg)
185{
186 switch (cmd) {
187 case PHYSDEVOP_eoi:
188 return _hypercall1(int, ia64_fast_eoi,
189 ((struct physdev_eoi *)arg)->irq);
190 default:
191 return xencomm_hypercall_physdev_op(cmd, arg);
192 }
193}
194
195static inline long
196xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
197{
198 return _hypercall1(long, opt_feature, arg);
199}
200
201/* for balloon driver */
202#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
203
204/* Use xencomm to do hypercalls. */
205#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
206#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
207#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
208#define HYPERVISOR_multicall xencomm_hypercall_multicall
209#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
210#define HYPERVISOR_console_io xencomm_hypercall_console_io
211#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
212#define HYPERVISOR_suspend xencomm_hypercall_suspend
213#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
214#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
215
216/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
217#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; })
218
219static inline int
220HYPERVISOR_shutdown(
221 unsigned int reason)
222{
223 struct sched_shutdown sched_shutdown = {
224 .reason = reason
225 };
226
227 int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
228
229 return rc;
230}
231
232/* for netfront.c, netback.c */
233#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
234
235static inline void
236MULTI_update_va_mapping(
237 struct multicall_entry *mcl, unsigned long va,
238 pte_t new_val, unsigned long flags)
239{
240 mcl->op = __HYPERVISOR_update_va_mapping;
241 mcl->result = 0;
242}
243
244static inline void
245MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
246 void *uop, unsigned int count)
247{
248 mcl->op = __HYPERVISOR_grant_table_op;
249 mcl->args[0] = cmd;
250 mcl->args[1] = (unsigned long)uop;
251 mcl->args[2] = count;
252}
253
254static inline void
255MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
256 int count, int *success_count, domid_t domid)
257{
258 mcl->op = __HYPERVISOR_mmu_update;
259 mcl->args[0] = (unsigned long)req;
260 mcl->args[1] = count;
261 mcl->args[2] = (unsigned long)success_count;
262 mcl->args[3] = domid;
263}
264
265#endif /* _ASM_IA64_XEN_HYPERCALL_H */
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
new file mode 100644
index 000000000000..7a804e80fc67
--- /dev/null
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -0,0 +1,89 @@
1/******************************************************************************
2 * hypervisor.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _ASM_IA64_XEN_HYPERVISOR_H
34#define _ASM_IA64_XEN_HYPERVISOR_H
35
36#ifdef CONFIG_XEN
37
38#include <linux/init.h>
39#include <xen/interface/xen.h>
40#include <xen/interface/version.h> /* to compile feature.c */
41#include <xen/features.h> /* to comiple xen-netfront.c */
42#include <asm/xen/hypercall.h>
43
44/* xen_domain_type is set before executing any C code by early_xen_setup */
45enum xen_domain_type {
46 XEN_NATIVE,
47 XEN_PV_DOMAIN,
48 XEN_HVM_DOMAIN,
49};
50
51extern enum xen_domain_type xen_domain_type;
52
53#define xen_domain() (xen_domain_type != XEN_NATIVE)
54#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
55#define xen_initial_domain() (xen_pv_domain() && \
56 (xen_start_info->flags & SIF_INITDOMAIN))
57#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
58
59/* deprecated. remove this */
60#define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN)
61
62extern struct shared_info *HYPERVISOR_shared_info;
63extern struct start_info *xen_start_info;
64
65void __init xen_setup_vcpu_info_placement(void);
66void force_evtchn_callback(void);
67
68/* for drivers/xen/balloon/balloon.c */
69#ifdef CONFIG_XEN_SCRUB_PAGES
70#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
71#else
72#define scrub_pages(_p, _n) ((void)0)
73#endif
74
75/* For setup_arch() in arch/ia64/kernel/setup.c */
76void xen_ia64_enable_opt_feature(void);
77
78#else /* CONFIG_XEN */
79
80#define xen_domain() (0)
81#define xen_pv_domain() (0)
82#define xen_initial_domain() (0)
83#define xen_hvm_domain() (0)
84#define is_running_on_xen() (0) /* deprecated. remove this */
85#endif
86
87#define is_initial_xendomain() (0) /* deprecated. remove this */
88
89#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
new file mode 100644
index 000000000000..19c2ae1d878a
--- /dev/null
+++ b/arch/ia64/include/asm/xen/inst.h
@@ -0,0 +1,458 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <asm/xen/privop.h>
24
25#define ia64_ivt xen_ivt
26#define DO_SAVE_MIN XEN_DO_SAVE_MIN
27
28#define __paravirt_switch_to xen_switch_to
29#define __paravirt_leave_syscall xen_leave_syscall
30#define __paravirt_work_processed_syscall xen_work_processed_syscall
31#define __paravirt_leave_kernel xen_leave_kernel
32#define __paravirt_pending_syscall_end xen_work_pending_syscall_end
33#define __paravirt_work_processed_syscall_target \
34 xen_work_processed_syscall
35
36#define MOV_FROM_IFA(reg) \
37 movl reg = XSI_IFA; \
38 ;; \
39 ld8 reg = [reg]
40
41#define MOV_FROM_ITIR(reg) \
42 movl reg = XSI_ITIR; \
43 ;; \
44 ld8 reg = [reg]
45
46#define MOV_FROM_ISR(reg) \
47 movl reg = XSI_ISR; \
48 ;; \
49 ld8 reg = [reg]
50
51#define MOV_FROM_IHA(reg) \
52 movl reg = XSI_IHA; \
53 ;; \
54 ld8 reg = [reg]
55
56#define MOV_FROM_IPSR(pred, reg) \
57(pred) movl reg = XSI_IPSR; \
58 ;; \
59(pred) ld8 reg = [reg]
60
61#define MOV_FROM_IIM(reg) \
62 movl reg = XSI_IIM; \
63 ;; \
64 ld8 reg = [reg]
65
66#define MOV_FROM_IIP(reg) \
67 movl reg = XSI_IIP; \
68 ;; \
69 ld8 reg = [reg]
70
71.macro __MOV_FROM_IVR reg, clob
72 .ifc "\reg", "r8"
73 XEN_HYPER_GET_IVR
74 .exitm
75 .endif
76 .ifc "\clob", "r8"
77 XEN_HYPER_GET_IVR
78 ;;
79 mov \reg = r8
80 .exitm
81 .endif
82
83 mov \clob = r8
84 ;;
85 XEN_HYPER_GET_IVR
86 ;;
87 mov \reg = r8
88 ;;
89 mov r8 = \clob
90.endm
91#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
92
93.macro __MOV_FROM_PSR pred, reg, clob
94 .ifc "\reg", "r8"
95 (\pred) XEN_HYPER_GET_PSR;
96 .exitm
97 .endif
98 .ifc "\clob", "r8"
99 (\pred) XEN_HYPER_GET_PSR
100 ;;
101 (\pred) mov \reg = r8
102 .exitm
103 .endif
104
105 (\pred) mov \clob = r8
106 (\pred) XEN_HYPER_GET_PSR
107 ;;
108 (\pred) mov \reg = r8
109 (\pred) mov r8 = \clob
110.endm
111#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
112
113
114#define MOV_TO_IFA(reg, clob) \
115 movl clob = XSI_IFA; \
116 ;; \
117 st8 [clob] = reg \
118
119#define MOV_TO_ITIR(pred, reg, clob) \
120(pred) movl clob = XSI_ITIR; \
121 ;; \
122(pred) st8 [clob] = reg
123
124#define MOV_TO_IHA(pred, reg, clob) \
125(pred) movl clob = XSI_IHA; \
126 ;; \
127(pred) st8 [clob] = reg
128
129#define MOV_TO_IPSR(pred, reg, clob) \
130(pred) movl clob = XSI_IPSR; \
131 ;; \
132(pred) st8 [clob] = reg; \
133 ;;
134
135#define MOV_TO_IFS(pred, reg, clob) \
136(pred) movl clob = XSI_IFS; \
137 ;; \
138(pred) st8 [clob] = reg; \
139 ;;
140
141#define MOV_TO_IIP(reg, clob) \
142 movl clob = XSI_IIP; \
143 ;; \
144 st8 [clob] = reg
145
146.macro ____MOV_TO_KR kr, reg, clob0, clob1
147 .ifc "\clob0", "r9"
148 .error "clob0 \clob0 must not be r9"
149 .endif
150 .ifc "\clob1", "r8"
151 .error "clob1 \clob1 must not be r8"
152 .endif
153
154 .ifnc "\reg", "r9"
155 .ifnc "\clob1", "r9"
156 mov \clob1 = r9
157 .endif
158 mov r9 = \reg
159 .endif
160 .ifnc "\clob0", "r8"
161 mov \clob0 = r8
162 .endif
163 mov r8 = \kr
164 ;;
165 XEN_HYPER_SET_KR
166
167 .ifnc "\reg", "r9"
168 .ifnc "\clob1", "r9"
169 mov r9 = \clob1
170 .endif
171 .endif
172 .ifnc "\clob0", "r8"
173 mov r8 = \clob0
174 .endif
175.endm
176
177.macro __MOV_TO_KR kr, reg, clob0, clob1
178 .ifc "\clob0", "r9"
179 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
180 .exitm
181 .endif
182 .ifc "\clob1", "r8"
183 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
184 .exitm
185 .endif
186
187 ____MOV_TO_KR \kr, \reg, \clob0, \clob1
188.endm
189
190#define MOV_TO_KR(kr, reg, clob0, clob1) \
191 __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
192
193
194.macro __ITC_I pred, reg, clob
195 .ifc "\reg", "r8"
196 (\pred) XEN_HYPER_ITC_I
197 .exitm
198 .endif
199 .ifc "\clob", "r8"
200 (\pred) mov r8 = \reg
201 ;;
202 (\pred) XEN_HYPER_ITC_I
203 .exitm
204 .endif
205
206 (\pred) mov \clob = r8
207 (\pred) mov r8 = \reg
208 ;;
209 (\pred) XEN_HYPER_ITC_I
210 ;;
211 (\pred) mov r8 = \clob
212 ;;
213.endm
214#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
215
216.macro __ITC_D pred, reg, clob
217 .ifc "\reg", "r8"
218 (\pred) XEN_HYPER_ITC_D
219 ;;
220 .exitm
221 .endif
222 .ifc "\clob", "r8"
223 (\pred) mov r8 = \reg
224 ;;
225 (\pred) XEN_HYPER_ITC_D
226 ;;
227 .exitm
228 .endif
229
230 (\pred) mov \clob = r8
231 (\pred) mov r8 = \reg
232 ;;
233 (\pred) XEN_HYPER_ITC_D
234 ;;
235 (\pred) mov r8 = \clob
236 ;;
237.endm
238#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
239
240.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
241 .ifc "\reg", "r8"
242 (\pred_i)XEN_HYPER_ITC_I
243 ;;
244 (\pred_d)XEN_HYPER_ITC_D
245 ;;
246 .exitm
247 .endif
248 .ifc "\clob", "r8"
249 mov r8 = \reg
250 ;;
251 (\pred_i)XEN_HYPER_ITC_I
252 ;;
253 (\pred_d)XEN_HYPER_ITC_D
254 ;;
255 .exitm
256 .endif
257
258 mov \clob = r8
259 mov r8 = \reg
260 ;;
261 (\pred_i)XEN_HYPER_ITC_I
262 ;;
263 (\pred_d)XEN_HYPER_ITC_D
264 ;;
265 mov r8 = \clob
266 ;;
267.endm
268#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
269 __ITC_I_AND_D pred_i, pred_d, reg, clob
270
271.macro __THASH pred, reg0, reg1, clob
272 .ifc "\reg0", "r8"
273 (\pred) mov r8 = \reg1
274 (\pred) XEN_HYPER_THASH
275 .exitm
276 .endc
277 .ifc "\reg1", "r8"
278 (\pred) XEN_HYPER_THASH
279 ;;
280 (\pred) mov \reg0 = r8
281 ;;
282 .exitm
283 .endif
284 .ifc "\clob", "r8"
285 (\pred) mov r8 = \reg1
286 (\pred) XEN_HYPER_THASH
287 ;;
288 (\pred) mov \reg0 = r8
289 ;;
290 .exitm
291 .endif
292
293 (\pred) mov \clob = r8
294 (\pred) mov r8 = \reg1
295 (\pred) XEN_HYPER_THASH
296 ;;
297 (\pred) mov \reg0 = r8
298 (\pred) mov r8 = \clob
299 ;;
300.endm
301#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
302
303#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
304 mov clob0 = 1; \
305 movl clob1 = XSI_PSR_IC; \
306 ;; \
307 st4 [clob1] = clob0 \
308 ;;
309
310#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
311 ;; \
312 srlz.d; \
313 mov clob1 = 1; \
314 movl clob0 = XSI_PSR_IC; \
315 ;; \
316 st4 [clob0] = clob1
317
318#define RSM_PSR_IC(clob) \
319 movl clob = XSI_PSR_IC; \
320 ;; \
321 st4 [clob] = r0; \
322 ;;
323
324/* pred will be clobbered */
325#define MASK_TO_PEND_OFS (-1)
326#define SSM_PSR_I(pred, pred_clob, clob) \
327(pred) movl clob = XSI_PSR_I_ADDR \
328 ;; \
329(pred) ld8 clob = [clob] \
330 ;; \
331 /* if (pred) vpsr.i = 1 */ \
332 /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
333(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
334 ;; \
335 /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
336(pred) ld1 clob = [clob] \
337 ;; \
338(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
339 ;; \
340(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
341
342#define RSM_PSR_I(pred, clob0, clob1) \
343 movl clob0 = XSI_PSR_I_ADDR; \
344 mov clob1 = 1; \
345 ;; \
346 ld8 clob0 = [clob0]; \
347 ;; \
348(pred) st1 [clob0] = clob1
349
350#define RSM_PSR_I_IC(clob0, clob1, clob2) \
351 movl clob0 = XSI_PSR_I_ADDR; \
352 movl clob1 = XSI_PSR_IC; \
353 ;; \
354 ld8 clob0 = [clob0]; \
355 mov clob2 = 1; \
356 ;; \
357 /* note: clears both vpsr.i and vpsr.ic! */ \
358 st1 [clob0] = clob2; \
359 st4 [clob1] = r0; \
360 ;;
361
362#define RSM_PSR_DT \
363 XEN_HYPER_RSM_PSR_DT
364
365#define SSM_PSR_DT_AND_SRLZ_I \
366 XEN_HYPER_SSM_PSR_DT
367
368#define BSW_0(clob0, clob1, clob2) \
369 ;; \
370 /* r16-r31 all now hold bank1 values */ \
371 mov clob2 = ar.unat; \
372 movl clob0 = XSI_BANK1_R16; \
373 movl clob1 = XSI_BANK1_R16 + 8; \
374 ;; \
375.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
376.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
377 ;; \
378.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
379.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
380 ;; \
381.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
382.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
383 ;; \
384.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
385.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
386 ;; \
387.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
388.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
389 ;; \
390.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
391.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
392 ;; \
393.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
394.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
395 ;; \
396.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
397.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
398 ;; \
399 mov clob1 = ar.unat; \
400 movl clob0 = XSI_B1NAT; \
401 ;; \
402 st8 [clob0] = clob1; \
403 mov ar.unat = clob2; \
404 movl clob0 = XSI_BANKNUM; \
405 ;; \
406 st4 [clob0] = r0
407
408
409 /* FIXME: THIS CODE IS NOT NaT SAFE! */
410#define XEN_BSW_1(clob) \
411 mov clob = ar.unat; \
412 movl r30 = XSI_B1NAT; \
413 ;; \
414 ld8 r30 = [r30]; \
415 mov r31 = 1; \
416 ;; \
417 mov ar.unat = r30; \
418 movl r30 = XSI_BANKNUM; \
419 ;; \
420 st4 [r30] = r31; \
421 movl r30 = XSI_BANK1_R16; \
422 movl r31 = XSI_BANK1_R16+8; \
423 ;; \
424 ld8.fill r16 = [r30], 16; \
425 ld8.fill r17 = [r31], 16; \
426 ;; \
427 ld8.fill r18 = [r30], 16; \
428 ld8.fill r19 = [r31], 16; \
429 ;; \
430 ld8.fill r20 = [r30], 16; \
431 ld8.fill r21 = [r31], 16; \
432 ;; \
433 ld8.fill r22 = [r30], 16; \
434 ld8.fill r23 = [r31], 16; \
435 ;; \
436 ld8.fill r24 = [r30], 16; \
437 ld8.fill r25 = [r31], 16; \
438 ;; \
439 ld8.fill r26 = [r30], 16; \
440 ld8.fill r27 = [r31], 16; \
441 ;; \
442 ld8.fill r28 = [r30], 16; \
443 ld8.fill r29 = [r31], 16; \
444 ;; \
445 ld8.fill r30 = [r30]; \
446 ld8.fill r31 = [r31]; \
447 ;; \
448 mov ar.unat = clob
449
450#define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
451
452
453#define COVER \
454 XEN_HYPER_COVER
455
456#define RFI \
457 XEN_HYPER_RFI; \
458 dv_serialize_data
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
new file mode 100644
index 000000000000..f00fab40854d
--- /dev/null
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -0,0 +1,346 @@
1/******************************************************************************
2 * arch-ia64/hypervisor-if.h
3 *
4 * Guest OS interface to IA64 Xen.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright by those who contributed. (in alphabetical order)
25 *
26 * Anthony Xu <anthony.xu@intel.com>
27 * Eddie Dong <eddie.dong@intel.com>
28 * Fred Yang <fred.yang@intel.com>
29 * Kevin Tian <kevin.tian@intel.com>
30 * Alex Williamson <alex.williamson@hp.com>
31 * Chris Wright <chrisw@sous-sol.org>
32 * Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
33 * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
34 * Hollis Blanchard <hollisb@us.ibm.com>
35 * Isaku Yamahata <yamahata@valinux.co.jp>
36 * Jan Beulich <jbeulich@novell.com>
37 * John Levon <john.levon@sun.com>
38 * Kazuhiro Suzuki <kaz@jp.fujitsu.com>
39 * Keir Fraser <keir.fraser@citrix.com>
40 * Kouya Shimura <kouya@jp.fujitsu.com>
41 * Masaki Kanno <kanno.masaki@jp.fujitsu.com>
42 * Matt Chapman <matthewc@hp.com>
43 * Matthew Chapman <matthewc@hp.com>
44 * Samuel Thibault <samuel.thibault@eu.citrix.com>
45 * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
46 * Tristan Gingold <tgingold@free.fr>
47 * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
48 * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
49 * Zhang Xin <xing.z.zhang@intel.com>
50 * Zhang xiantao <xiantao.zhang@intel.com>
51 * dan.magenheimer@hp.com
52 * ian.pratt@cl.cam.ac.uk
53 * michael.fetterman@cl.cam.ac.uk
54 */
55
56#ifndef _ASM_IA64_XEN_INTERFACE_H
57#define _ASM_IA64_XEN_INTERFACE_H
58
59#define __DEFINE_GUEST_HANDLE(name, type) \
60 typedef struct { type *p; } __guest_handle_ ## name
61
62#define DEFINE_GUEST_HANDLE_STRUCT(name) \
63 __DEFINE_GUEST_HANDLE(name, struct name)
64#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
65#define GUEST_HANDLE(name) __guest_handle_ ## name
66#define GUEST_HANDLE_64(name) GUEST_HANDLE(name)
67#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
68
69#ifndef __ASSEMBLY__
70/* Guest handles for primitive C types. */
71__DEFINE_GUEST_HANDLE(uchar, unsigned char);
72__DEFINE_GUEST_HANDLE(uint, unsigned int);
73__DEFINE_GUEST_HANDLE(ulong, unsigned long);
74__DEFINE_GUEST_HANDLE(u64, unsigned long);
75DEFINE_GUEST_HANDLE(char);
76DEFINE_GUEST_HANDLE(int);
77DEFINE_GUEST_HANDLE(long);
78DEFINE_GUEST_HANDLE(void);
79
80typedef unsigned long xen_pfn_t;
81DEFINE_GUEST_HANDLE(xen_pfn_t);
82#define PRI_xen_pfn "lx"
83#endif
84
85/* Arch specific VIRQs definition */
86#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
87#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
88#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
89
90/* Maximum number of virtual CPUs in multi-processor guests. */
91/* keep sizeof(struct shared_page) <= PAGE_SIZE.
92 * this is checked in arch/ia64/xen/hypervisor.c. */
93#define MAX_VIRT_CPUS 64
94
95#ifndef __ASSEMBLY__
96
97#define INVALID_MFN (~0UL)
98
99union vac {
100 unsigned long value;
101 struct {
102 int a_int:1;
103 int a_from_int_cr:1;
104 int a_to_int_cr:1;
105 int a_from_psr:1;
106 int a_from_cpuid:1;
107 int a_cover:1;
108 int a_bsw:1;
109 long reserved:57;
110 };
111};
112
113union vdc {
114 unsigned long value;
115 struct {
116 int d_vmsw:1;
117 int d_extint:1;
118 int d_ibr_dbr:1;
119 int d_pmc:1;
120 int d_to_pmd:1;
121 int d_itm:1;
122 long reserved:58;
123 };
124};
125
126struct mapped_regs {
127 union vac vac;
128 union vdc vdc;
129 unsigned long virt_env_vaddr;
130 unsigned long reserved1[29];
131 unsigned long vhpi;
132 unsigned long reserved2[95];
133 union {
134 unsigned long vgr[16];
135 unsigned long bank1_regs[16]; /* bank1 regs (r16-r31)
136 when bank0 active */
137 };
138 union {
139 unsigned long vbgr[16];
140 unsigned long bank0_regs[16]; /* bank0 regs (r16-r31)
141 when bank1 active */
142 };
143 unsigned long vnat;
144 unsigned long vbnat;
145 unsigned long vcpuid[5];
146 unsigned long reserved3[11];
147 unsigned long vpsr;
148 unsigned long vpr;
149 unsigned long reserved4[76];
150 union {
151 unsigned long vcr[128];
152 struct {
153 unsigned long dcr; /* CR0 */
154 unsigned long itm;
155 unsigned long iva;
156 unsigned long rsv1[5];
157 unsigned long pta; /* CR8 */
158 unsigned long rsv2[7];
159 unsigned long ipsr; /* CR16 */
160 unsigned long isr;
161 unsigned long rsv3;
162 unsigned long iip;
163 unsigned long ifa;
164 unsigned long itir;
165 unsigned long iipa;
166 unsigned long ifs;
167 unsigned long iim; /* CR24 */
168 unsigned long iha;
169 unsigned long rsv4[38];
170 unsigned long lid; /* CR64 */
171 unsigned long ivr;
172 unsigned long tpr;
173 unsigned long eoi;
174 unsigned long irr[4];
175 unsigned long itv; /* CR72 */
176 unsigned long pmv;
177 unsigned long cmcv;
178 unsigned long rsv5[5];
179 unsigned long lrr0; /* CR80 */
180 unsigned long lrr1;
181 unsigned long rsv6[46];
182 };
183 };
184 union {
185 unsigned long reserved5[128];
186 struct {
187 unsigned long precover_ifs;
188 unsigned long unat; /* not sure if this is needed
189 until NaT arch is done */
190 int interrupt_collection_enabled; /* virtual psr.ic */
191
192 /* virtual interrupt deliverable flag is
193 * evtchn_upcall_mask in shared info area now.
194 * interrupt_mask_addr is the address
195 * of evtchn_upcall_mask for current vcpu
196 */
197 unsigned char *interrupt_mask_addr;
198 int pending_interruption;
199 unsigned char vpsr_pp;
200 unsigned char vpsr_dfh;
201 unsigned char hpsr_dfh;
202 unsigned char hpsr_mfh;
203 unsigned long reserved5_1[4];
204 int metaphysical_mode; /* 1 = use metaphys mapping
205 0 = use virtual */
206 int banknum; /* 0 or 1, which virtual
207 register bank is active */
208 unsigned long rrs[8]; /* region registers */
209 unsigned long krs[8]; /* kernel registers */
210 unsigned long tmp[16]; /* temp registers
211 (e.g. for hyperprivops) */
212 };
213 };
214};
215
216struct arch_vcpu_info {
217 /* nothing */
218};
219
220/*
221 * This structure is used for magic page in domain pseudo physical address
222 * space and the result of XENMEM_machine_memory_map.
223 * As the XENMEM_machine_memory_map result,
224 * xen_memory_map::nr_entries indicates the size in bytes
225 * including struct xen_ia64_memmap_info. Not the number of entries.
226 */
227struct xen_ia64_memmap_info {
228 uint64_t efi_memmap_size; /* size of EFI memory map */
229 uint64_t efi_memdesc_size; /* size of an EFI memory map
230 * descriptor */
231 uint32_t efi_memdesc_version; /* memory descriptor version */
232 void *memdesc[0]; /* array of efi_memory_desc_t */
233};
234
235struct arch_shared_info {
236 /* PFN of the start_info page. */
237 unsigned long start_info_pfn;
238
239 /* Interrupt vector for event channel. */
240 int evtchn_vector;
241
242 /* PFN of memmap_info page */
243 unsigned int memmap_info_num_pages; /* currently only = 1 case is
244 supported. */
245 unsigned long memmap_info_pfn;
246
247 uint64_t pad[31];
248};
249
250struct xen_callback {
251 unsigned long ip;
252};
253typedef struct xen_callback xen_callback_t;
254
255#endif /* !__ASSEMBLY__ */
256
257/* Size of the shared_info area (this is not related to page size). */
258#define XSI_SHIFT 14
259#define XSI_SIZE (1 << XSI_SHIFT)
260/* Log size of mapped_regs area (64 KB - only 4KB is used). */
261#define XMAPPEDREGS_SHIFT 12
262#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
263/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
264#define XMAPPEDREGS_OFS XSI_SIZE
265
266/* Hyperprivops. */
267#define HYPERPRIVOP_START 0x1
268#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
269#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
270#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
271#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
272#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
273#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
274#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
275#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
276#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
277#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
278#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
279#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
280#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
281#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
282#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
283#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
284#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
285#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
286#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
287#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
288#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
289#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
290#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
291#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
292#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
293#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19)
294#define HYPERPRIVOP_MAX (0x1a)
295
296/* Fast and light hypercalls. */
297#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
298
299/* Xencomm macros. */
300#define XENCOMM_INLINE_MASK 0xf800000000000000UL
301#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
302
303#ifndef __ASSEMBLY__
304
305/*
306 * Optimization features.
307 * The hypervisor may do some special optimizations for guests. This hypercall
308 * can be used to switch on/of these special optimizations.
309 */
310#define __HYPERVISOR_opt_feature 0x700UL
311
312#define XEN_IA64_OPTF_OFF 0x0
313#define XEN_IA64_OPTF_ON 0x1
314
315/*
316 * If this feature is switched on, the hypervisor inserts the
317 * tlb entries without calling the guests traphandler.
318 * This is useful in guests using region 7 for identity mapping
319 * like the linux kernel does.
320 */
321#define XEN_IA64_OPTF_IDENT_MAP_REG7 1
322
323/* Identity mapping of region 4 addresses in HVM. */
324#define XEN_IA64_OPTF_IDENT_MAP_REG4 2
325
326/* Identity mapping of region 5 addresses in HVM. */
327#define XEN_IA64_OPTF_IDENT_MAP_REG5 3
328
329#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0)
330
331struct xen_ia64_opt_feature {
332 unsigned long cmd; /* Which feature */
333 unsigned char on; /* Switch feature on/off */
334 union {
335 struct {
336 /* The page protection bit mask of the pte.
337 * This will be or'ed with the pte. */
338 unsigned long pgprot;
339 unsigned long key; /* A protection key for itir.*/
340 };
341 };
342};
343
344#endif /* __ASSEMBLY__ */
345
346#endif /* _ASM_IA64_XEN_INTERFACE_H */
diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h
new file mode 100644
index 000000000000..a90450983003
--- /dev/null
+++ b/arch/ia64/include/asm/xen/irq.h
@@ -0,0 +1,44 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/irq.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_IRQ_H
24#define _ASM_IA64_XEN_IRQ_H
25
26/*
27 * The flat IRQ space is divided into two regions:
28 * 1. A one-to-one mapping of real physical IRQs. This space is only used
29 * if we have physical device-access privilege. This region is at the
30 * start of the IRQ space so that existing device drivers do not need
31 * to be modified to translate physical IRQ numbers into our IRQ space.
32 * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
33 * are bound using the provided bind/unbind functions.
34 */
35
36#define XEN_PIRQ_BASE 0
37#define XEN_NR_PIRQS 256
38
39#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS)
40#define XEN_NR_DYNIRQS (NR_CPUS * 8)
41
42#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS)
43
44#endif /* _ASM_IA64_XEN_IRQ_H */
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
new file mode 100644
index 000000000000..4d92d9bbda7b
--- /dev/null
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -0,0 +1,134 @@
1/*
2 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
3 * the minimum state necessary that allows us to turn psr.ic back
4 * on.
5 *
6 * Assumed state upon entry:
7 * psr.ic: off
8 * r31: contains saved predicates (pr)
9 *
10 * Upon exit, the state is as follows:
11 * psr.ic: off
12 * r2 = points to &pt_regs.r16
13 * r8 = contents of ar.ccv
14 * r9 = contents of ar.csd
15 * r10 = contents of ar.ssd
16 * r11 = FPSR_DEFAULT
17 * r12 = kernel sp (kernel virtual address)
18 * r13 = points to current task_struct (kernel virtual address)
19 * p15 = TRUE if psr.i is set in cr.ipsr
20 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
21 * preserved
22 * CONFIG_XEN note: p6/p7 are not preserved
23 *
24 * Note that psr.ic is NOT turned on by this macro. This is so that
25 * we can pass interruption state as arguments to a handler.
26 */
27#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
28 mov r16=IA64_KR(CURRENT); /* M */ \
29 mov r27=ar.rsc; /* M */ \
30 mov r20=r1; /* A */ \
31 mov r25=ar.unat; /* M */ \
32 MOV_FROM_IPSR(p0,r29); /* M */ \
33 MOV_FROM_IIP(r28); /* M */ \
34 mov r21=ar.fpsr; /* M */ \
35 mov r26=ar.pfs; /* I */ \
36 __COVER; /* B;; (or nothing) */ \
37 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
38 ;; \
39 ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
40 st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
41 adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
42 /* switch from user to kernel RBS: */ \
43 ;; \
44 invala; /* M */ \
45 /* SAVE_IFS;*/ /* see xen special handling below */ \
46 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
47 ;; \
48(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
49 ;; \
50(pUStk) mov.m r24=ar.rnat; \
51(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
52(pKStk) mov r1=sp; /* get sp */ \
53 ;; \
54(pUStk) lfetch.fault.excl.nt1 [r22]; \
55(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
56(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
57 ;; \
58(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
59(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
60 ;; \
61(pUStk) mov r18=ar.bsp; \
62(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
63 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
64 adds r16=PT(CR_IPSR),r1; \
65 ;; \
66 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
67 st8 [r16]=r29; /* save cr.ipsr */ \
68 ;; \
69 lfetch.fault.excl.nt1 [r17]; \
70 tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
71 mov r29=b0 \
72 ;; \
73 WORKAROUND; \
74 adds r16=PT(R8),r1; /* initialize first base pointer */ \
75 adds r17=PT(R9),r1; /* initialize second base pointer */ \
76(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
77 ;; \
78.mem.offset 0,0; st8.spill [r16]=r8,16; \
79.mem.offset 8,0; st8.spill [r17]=r9,16; \
80 ;; \
81.mem.offset 0,0; st8.spill [r16]=r10,24; \
82 movl r8=XSI_PRECOVER_IFS; \
83.mem.offset 8,0; st8.spill [r17]=r11,24; \
84 ;; \
85 /* xen special handling for possibly lazy cover */ \
86 /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \
87 ld8 r30=[r8]; \
88(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
89 st8 [r16]=r28,16; /* save cr.iip */ \
90 ;; \
91 st8 [r17]=r30,16; /* save cr.ifs */ \
92 mov r8=ar.ccv; \
93 mov r9=ar.csd; \
94 mov r10=ar.ssd; \
95 movl r11=FPSR_DEFAULT; /* L-unit */ \
96 ;; \
97 st8 [r16]=r25,16; /* save ar.unat */ \
98 st8 [r17]=r26,16; /* save ar.pfs */ \
99 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
100 ;; \
101 st8 [r16]=r27,16; /* save ar.rsc */ \
102(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
103(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
104 ;; /* avoid RAW on r16 & r17 */ \
105(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
106 st8 [r17]=r31,16; /* save predicates */ \
107(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
108 ;; \
109 st8 [r16]=r29,16; /* save b0 */ \
110 st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
111 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
112 ;; \
113.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
114.mem.offset 8,0; st8.spill [r17]=r12,16; \
115 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
116 ;; \
117.mem.offset 0,0; st8.spill [r16]=r13,16; \
118.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
119 mov r13=IA64_KR(CURRENT); /* establish `current' */ \
120 ;; \
121.mem.offset 0,0; st8.spill [r16]=r15,16; \
122.mem.offset 8,0; st8.spill [r17]=r14,16; \
123 ;; \
124.mem.offset 0,0; st8.spill [r16]=r2,16; \
125.mem.offset 8,0; st8.spill [r17]=r3,16; \
126 ACCOUNT_GET_STAMP \
127 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
128 ;; \
129 EXTRA; \
130 movl r1=__gp; /* establish kernel global pointer */ \
131 ;; \
132 ACCOUNT_SYS_ENTER \
133 BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \
134 ;;
diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h
new file mode 100644
index 000000000000..03441a780b5b
--- /dev/null
+++ b/arch/ia64/include/asm/xen/page.h
@@ -0,0 +1,65 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/page.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_PAGE_H
24#define _ASM_IA64_XEN_PAGE_H
25
26#define INVALID_P2M_ENTRY (~0UL)
27
28static inline unsigned long mfn_to_pfn(unsigned long mfn)
29{
30 return mfn;
31}
32
33static inline unsigned long pfn_to_mfn(unsigned long pfn)
34{
35 return pfn;
36}
37
38#define phys_to_machine_mapping_valid(_x) (1)
39
40static inline void *mfn_to_virt(unsigned long mfn)
41{
42 return __va(mfn << PAGE_SHIFT);
43}
44
45static inline unsigned long virt_to_mfn(void *virt)
46{
47 return __pa(virt) >> PAGE_SHIFT;
48}
49
50/* for tpmfront.c */
51static inline unsigned long virt_to_machine(void *virt)
52{
53 return __pa(virt);
54}
55
56static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
57{
58 /* nothing */
59}
60
61#define pte_mfn(_x) pte_pfn(_x)
62#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */
63#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */
64
65#endif /* _ASM_IA64_XEN_PAGE_H */
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h
new file mode 100644
index 000000000000..71ec7546e100
--- /dev/null
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -0,0 +1,129 @@
1#ifndef _ASM_IA64_XEN_PRIVOP_H
2#define _ASM_IA64_XEN_PRIVOP_H
3
4/*
5 * Copyright (C) 2005 Hewlett-Packard Co
6 * Dan Magenheimer <dan.magenheimer@hp.com>
7 *
8 * Paravirtualizations of privileged operations for Xen/ia64
9 *
10 *
11 * inline privop and paravirt_alt support
12 * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
13 * VA Linux Systems Japan K.K.
14 *
15 */
16
17#ifndef __ASSEMBLY__
18#include <linux/types.h> /* arch-ia64.h requires uint64_t */
19#endif
20#include <asm/xen/interface.h>
21
22/* At 1 MB, before per-cpu space but still addressable using addl instead
23 of movl. */
24#define XSI_BASE 0xfffffffffff00000
25
26/* Address of mapped regs. */
27#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
28
29#ifdef __ASSEMBLY__
30#define XEN_HYPER_RFI break HYPERPRIVOP_RFI
31#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT
32#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT
33#define XEN_HYPER_COVER break HYPERPRIVOP_COVER
34#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D
35#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I
36#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I
37#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR
38#define XEN_HYPER_THASH break HYPERPRIVOP_THASH
39#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D
40#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR
41#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
42#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4
43
44#define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
45#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
46#define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
47#define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
48#define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
49#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS)
50#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
51#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS)
52#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS)
53#define XSI_IIP (XSI_BASE + XSI_IIP_OFS)
54#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS)
55#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
56#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
57#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
58#endif
59
60#ifndef __ASSEMBLY__
61
62/************************************************/
63/* Instructions paravirtualized for correctness */
64/************************************************/
65
66/* "fc" and "thash" are privilege-sensitive instructions, meaning they
67 * may have different semantics depending on whether they are executed
68 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
69 * be allowed to execute directly, lest incorrect semantics result. */
70extern void xen_fc(unsigned long addr);
71extern unsigned long xen_thash(unsigned long addr);
72
73/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
74 * is not currently used (though it may be in a long-format VHPT system!)
75 * and the semantics of cover only change if psr.ic is off which is very
76 * rare (and currently non-existent outside of assembly code */
77
78/* There are also privilege-sensitive registers. These registers are
79 * readable at any privilege level but only writable at PL0. */
80extern unsigned long xen_get_cpuid(int index);
81extern unsigned long xen_get_pmd(int index);
82
83extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
84extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
85
86/************************************************/
87/* Instructions paravirtualized for performance */
88/************************************************/
89
90/* Xen uses memory-mapped virtual privileged registers for access to many
91 * performance-sensitive privileged registers. Some, like the processor
92 * status register (psr), are broken up into multiple memory locations.
93 * Others, like "pend", are abstractions based on privileged registers.
94 * "Pend" is guaranteed to be set if reading cr.ivr would return a
95 * (non-spurious) interrupt. */
96#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
97
98#define XSI_PSR_I \
99 (*XEN_MAPPEDREGS->interrupt_mask_addr)
100#define xen_get_virtual_psr_i() \
101 (!XSI_PSR_I)
102#define xen_set_virtual_psr_i(_val) \
103 ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
104#define xen_set_virtual_psr_ic(_val) \
105 ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
106#define xen_get_virtual_pend() \
107 (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
108
109/* Although all privileged operations can be left to trap and will
110 * be properly handled by Xen, some are frequent enough that we use
111 * hyperprivops for performance. */
112extern unsigned long xen_get_psr(void);
113extern unsigned long xen_get_ivr(void);
114extern unsigned long xen_get_tpr(void);
115extern void xen_hyper_ssm_i(void);
116extern void xen_set_itm(unsigned long);
117extern void xen_set_tpr(unsigned long);
118extern void xen_eoi(unsigned long);
119extern unsigned long xen_get_rr(unsigned long index);
120extern void xen_set_rr(unsigned long index, unsigned long val);
121extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
122 unsigned long val2, unsigned long val3,
123 unsigned long val4);
124extern void xen_set_kr(unsigned long index, unsigned long val);
125extern void xen_ptcga(unsigned long addr, unsigned long size);
126
127#endif /* !__ASSEMBLY__ */
128
129#endif /* _ASM_IA64_XEN_PRIVOP_H */
diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h
new file mode 100644
index 000000000000..20b2950c71b6
--- /dev/null
+++ b/arch/ia64/include/asm/xen/xcom_hcall.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_IA64_XEN_XCOM_HCALL_H
20#define _ASM_IA64_XEN_XCOM_HCALL_H
21
22/* These function creates inline or mini descriptor for the parameters and
23 calls the corresponding xencomm_arch_hypercall_X.
24 Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
25 they want to use their own wrapper. */
26extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
27
28extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
29
30extern int xencomm_hypercall_xen_version(int cmd, void *arg);
31
32extern int xencomm_hypercall_physdev_op(int cmd, void *op);
33
34extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
35 unsigned int count);
36
37extern int xencomm_hypercall_sched_op(int cmd, void *arg);
38
39extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
40
41extern int xencomm_hypercall_callback_op(int cmd, void *arg);
42
43extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
44
45extern int xencomm_hypercall_suspend(unsigned long srec);
46
47extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
48
49extern long xencomm_hypercall_opt_feature(void *arg);
50
51#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */
diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h
new file mode 100644
index 000000000000..cded677bebf2
--- /dev/null
+++ b/arch/ia64/include/asm/xen/xencomm.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_IA64_XEN_XENCOMM_H
20#define _ASM_IA64_XEN_XENCOMM_H
21
22#include <xen/xencomm.h>
23#include <asm/pgtable.h>
24
25/* Must be called before any hypercall. */
26extern void xencomm_initialize(void);
27extern int xencomm_is_initialized(void);
28
29/* Check if virtual contiguity means physical contiguity
30 * where the passed address is a pointer value in virtual address.
31 * On ia64, identity mapping area in region 7 or the piece of region 5
32 * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL]
33 */
34static inline int xencomm_is_phys_contiguous(unsigned long addr)
35{
36 return (PAGE_OFFSET <= addr &&
37 addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||
38 (KERNEL_START <= addr &&
39 addr < KERNEL_START + KERNEL_TR_PAGE_SIZE);
40}
41
42#endif /* _ASM_IA64_XEN_XENCOMM_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 87fea11aecb7..55e6ca8eebda 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -112,5 +112,23 @@ clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
112ASM_PARAVIRT_OBJS = ivt.o entry.o 112ASM_PARAVIRT_OBJS = ivt.o entry.o
113define paravirtualized_native 113define paravirtualized_native
114AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE 114AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
115AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
116extra-y += pvchk-$(1)
115endef 117endef
116$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj)))) 118$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
119
120#
121# Checker for paravirtualizations of privileged operations.
122#
123quiet_cmd_pv_check_sed = PVCHK $@
124define cmd_pv_check_sed
125 sed -f $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed $< > $@
126endef
127
128$(obj)/pvchk-sed-%.s: $(src)/%.S $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed FORCE
129 $(call if_changed_dep,as_s_S)
130$(obj)/pvchk-%.s: $(obj)/pvchk-sed-%.s FORCE
131 $(call if_changed,pv_check_sed)
132$(obj)/pvchk-%.o: $(obj)/pvchk-%.s FORCE
133 $(call if_changed,as_o_S)
134.PRECIOUS: $(obj)/pvchk-sed-%.s $(obj)/pvchk-%.s $(obj)/pvchk-%.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 5d1eb7ee2bf6..00936491933e 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -52,6 +52,7 @@
52#include <asm/numa.h> 52#include <asm/numa.h>
53#include <asm/sal.h> 53#include <asm/sal.h>
54#include <asm/cyclone.h> 54#include <asm/cyclone.h>
55#include <asm/xen/hypervisor.h>
55 56
56#define BAD_MADT_ENTRY(entry, end) ( \ 57#define BAD_MADT_ENTRY(entry, end) ( \
57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 58 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
@@ -121,6 +122,8 @@ acpi_get_sysname(void)
121 return "uv"; 122 return "uv";
122 else 123 else
123 return "sn2"; 124 return "sn2";
125 } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) {
126 return "xen";
124 } 127 }
125 128
126 return "dig"; 129 return "dig";
@@ -137,6 +140,8 @@ acpi_get_sysname(void)
137 return "uv"; 140 return "uv";
138# elif defined (CONFIG_IA64_DIG) 141# elif defined (CONFIG_IA64_DIG)
139 return "dig"; 142 return "dig";
143# elif defined (CONFIG_IA64_XEN_GUEST)
144 return "xen";
140# else 145# else
141# error Unknown platform. Fix acpi.c. 146# error Unknown platform. Fix acpi.c.
142# endif 147# endif
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 94c44b1ccfd0..742dbb1d5a4f 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -16,6 +16,9 @@
16#include <asm/sigcontext.h> 16#include <asm/sigcontext.h>
17#include <asm/mca.h> 17#include <asm/mca.h>
18 18
19#include <asm/xen/interface.h>
20#include <asm/xen/hypervisor.h>
21
19#include "../kernel/sigframe.h" 22#include "../kernel/sigframe.h"
20#include "../kernel/fsyscall_gtod_data.h" 23#include "../kernel/fsyscall_gtod_data.h"
21 24
@@ -286,4 +289,32 @@ void foo(void)
286 offsetof (struct itc_jitter_data_t, itc_jitter)); 289 offsetof (struct itc_jitter_data_t, itc_jitter));
287 DEFINE(IA64_ITC_LASTCYCLE_OFFSET, 290 DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
288 offsetof (struct itc_jitter_data_t, itc_lastcycle)); 291 offsetof (struct itc_jitter_data_t, itc_lastcycle));
292
293#ifdef CONFIG_XEN
294 BLANK();
295
296 DEFINE(XEN_NATIVE_ASM, XEN_NATIVE);
297 DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN);
298
299#define DEFINE_MAPPED_REG_OFS(sym, field) \
300 DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field)))
301
302 DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
303 DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
304 DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
305 DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
306 DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
307 DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
308 DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
309 DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
310 DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
311 DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
312 DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
313 DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
314 DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
315 DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
316 DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
317 DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
318 DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
319#endif /* CONFIG_XEN */
289} 320}
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 416a952b19bd..f675d8e33853 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -580,7 +580,7 @@ ENTRY(dirty_bit)
580 mov b0=r29 // restore b0 580 mov b0=r29 // restore b0
581 ;; 581 ;;
582 st8 [r17]=r18 // store back updated PTE 582 st8 [r17]=r18 // store back updated PTE
583 itc.d r18 // install updated PTE 583 ITC_D(p0, r18, r16) // install updated PTE
584#endif 584#endif
585 mov pr=r31,-1 // restore pr 585 mov pr=r31,-1 // restore pr
586 RFI 586 RFI
@@ -646,7 +646,7 @@ ENTRY(iaccess_bit)
646 mov b0=r29 // restore b0 646 mov b0=r29 // restore b0
647 ;; 647 ;;
648 st8 [r17]=r18 // store back updated PTE 648 st8 [r17]=r18 // store back updated PTE
649 itc.i r18 // install updated PTE 649 ITC_I(p0, r18, r16) // install updated PTE
650#endif /* !CONFIG_SMP */ 650#endif /* !CONFIG_SMP */
651 mov pr=r31,-1 651 mov pr=r31,-1
652 RFI 652 RFI
@@ -698,7 +698,7 @@ ENTRY(daccess_bit)
698 or r18=_PAGE_A,r18 // set the accessed bit 698 or r18=_PAGE_A,r18 // set the accessed bit
699 ;; 699 ;;
700 st8 [r17]=r18 // store back updated PTE 700 st8 [r17]=r18 // store back updated PTE
701 itc.d r18 // install updated PTE 701 ITC_D(p0, r18, r16) // install updated PTE
702#endif 702#endif
703 mov b0=r29 // restore b0 703 mov b0=r29 // restore b0
704 mov pr=r31,-1 704 mov pr=r31,-1
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
index 8273afc32db8..ee564575148e 100644
--- a/arch/ia64/kernel/nr-irqs.c
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -10,6 +10,7 @@
10#include <linux/kbuild.h> 10#include <linux/kbuild.h>
11#include <linux/threads.h> 11#include <linux/threads.h>
12#include <asm/native/irq.h> 12#include <asm/native/irq.h>
13#include <asm/xen/irq.h>
13 14
14void foo(void) 15void foo(void)
15{ 16{
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index afaf5b9a2cf0..de35d8e8b7d2 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -332,7 +332,7 @@ ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
332 332
333struct pv_iosapic_ops pv_iosapic_ops = { 333struct pv_iosapic_ops pv_iosapic_ops = {
334 .pcat_compat_init = ia64_native_iosapic_pcat_compat_init, 334 .pcat_compat_init = ia64_native_iosapic_pcat_compat_init,
335 .get_irq_chip = ia64_native_iosapic_get_irq_chip, 335 .__get_irq_chip = ia64_native_iosapic_get_irq_chip,
336 336
337 .__read = ia64_native_iosapic_read, 337 .__read = ia64_native_iosapic_read,
338 .__write = ia64_native_iosapic_write, 338 .__write = ia64_native_iosapic_write,
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
index 5cad6fb2ed19..64d6d810c64b 100644
--- a/arch/ia64/kernel/paravirt_inst.h
+++ b/arch/ia64/kernel/paravirt_inst.h
@@ -20,7 +20,9 @@
20 * 20 *
21 */ 21 */
22 22
23#ifdef __IA64_ASM_PARAVIRTUALIZED_XEN 23#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
24#include <asm/native/pvchk_inst.h>
25#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN)
24#include <asm/xen/inst.h> 26#include <asm/xen/inst.h>
25#include <asm/xen/minstate.h> 27#include <asm/xen/minstate.h>
26#else 28#else
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 341a0319a5ba..c57162705147 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -242,7 +242,6 @@ default_idle (void)
242/* We don't actually take CPU down, just spin without interrupts. */ 242/* We don't actually take CPU down, just spin without interrupts. */
243static inline void play_dead(void) 243static inline void play_dead(void)
244{ 244{
245 extern void ia64_cpu_local_tick (void);
246 unsigned int this_cpu = smp_processor_id(); 245 unsigned int this_cpu = smp_processor_id();
247 246
248 /* Ack it */ 247 /* Ack it */
diff --git a/arch/ia64/scripts/pvcheck.sed b/arch/ia64/scripts/pvcheck.sed
new file mode 100644
index 000000000000..ba66ac2e4c60
--- /dev/null
+++ b/arch/ia64/scripts/pvcheck.sed
@@ -0,0 +1,32 @@
1#
2# Checker for paravirtualizations of privileged operations.
3#
4s/ssm.*psr\.ic.*/.warning \"ssm psr.ic should not be used directly\"/g
5s/rsm.*psr\.ic.*/.warning \"rsm psr.ic should not be used directly\"/g
6s/ssm.*psr\.i.*/.warning \"ssm psr.i should not be used directly\"/g
7s/rsm.*psr\.i.*/.warning \"rsm psr.i should not be used directly\"/g
8s/ssm.*psr\.dt.*/.warning \"ssm psr.dt should not be used directly\"/g
9s/rsm.*psr\.dt.*/.warning \"rsm psr.dt should not be used directly\"/g
10s/mov.*=.*cr\.ifa/.warning \"cr.ifa should not used directly\"/g
11s/mov.*=.*cr\.itir/.warning \"cr.itir should not used directly\"/g
12s/mov.*=.*cr\.isr/.warning \"cr.isr should not used directly\"/g
13s/mov.*=.*cr\.iha/.warning \"cr.iha should not used directly\"/g
14s/mov.*=.*cr\.ipsr/.warning \"cr.ipsr should not used directly\"/g
15s/mov.*=.*cr\.iim/.warning \"cr.iim should not used directly\"/g
16s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g
17s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g
18s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr
19s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g
20s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g
21s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g
22s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g
23s/mov.*cr\.ipsr.*=.*/.warning \"cr.ipsr should not used directly\"/g
24s/mov.*cr\.ifs.*=.*/.warning \"cr.ifs should not used directly\"/g
25s/mov.*cr\.iip.*=.*/.warning \"cr.iip should not used directly\"/g
26s/mov.*cr\.kr.*=.*/.warning \"cr.kr should not used directly\"/g
27s/mov.*ar\.eflags.*=.*/.warning \"ar.eflags should not used directly\"/g
28s/itc\.i.*/.warning \"itc.i should not be used directly.\"/g
29s/itc\.d.*/.warning \"itc.d should not be used directly.\"/g
30s/bsw\.0/.warning \"bsw.0 should not be used directly.\"/g
31s/bsw\.1/.warning \"bsw.1 should not be used directly.\"/g
32s/ptc\.ga.*/.warning \"ptc.ga should not be used directly.\"/g
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig
new file mode 100644
index 000000000000..f1683a20275b
--- /dev/null
+++ b/arch/ia64/xen/Kconfig
@@ -0,0 +1,26 @@
1#
2# This Kconfig describes xen/ia64 options
3#
4
5config XEN
6 bool "Xen hypervisor support"
7 default y
8 depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB && EXPERIMENTAL
9 select XEN_XENCOMM
10 select NO_IDLE_HZ
11
12 # those are required to save/restore.
13 select ARCH_SUSPEND_POSSIBLE
14 select SUSPEND
15 select PM_SLEEP
16 help
17 Enable Xen hypervisor support. Resulting kernel runs
18 both as a guest OS on Xen and natively on hardware.
19
20config XEN_XENCOMM
21 depends on XEN
22 bool
23
24config NO_IDLE_HZ
25 depends on XEN
26 bool
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile
new file mode 100644
index 000000000000..0ad0224693d9
--- /dev/null
+++ b/arch/ia64/xen/Makefile
@@ -0,0 +1,22 @@
1#
2# Makefile for Xen components
3#
4
5obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \
6 hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o
7
8obj-$(CONFIG_IA64_GENERIC) += machvec.o
9
10AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN
11
12# xen multi compile
13ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S
14ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o))
15obj-y += $(ASM_PARAVIRT_OBJS)
16define paravirtualized_xen
17AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN
18endef
19$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o))))
20
21$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE
22 $(call if_changed_dep,as_o_S)
diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c
new file mode 100644
index 000000000000..777dd9a9108b
--- /dev/null
+++ b/arch/ia64/xen/grant-table.c
@@ -0,0 +1,155 @@
1/******************************************************************************
2 * arch/ia64/xen/grant-table.c
3 *
4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/vmalloc.h>
25#include <linux/mm.h>
26
27#include <xen/interface/xen.h>
28#include <xen/interface/memory.h>
29#include <xen/grant_table.h>
30
31#include <asm/xen/hypervisor.h>
32
33struct vm_struct *xen_alloc_vm_area(unsigned long size)
34{
35 int order;
36 unsigned long virt;
37 unsigned long nr_pages;
38 struct vm_struct *area;
39
40 order = get_order(size);
41 virt = __get_free_pages(GFP_KERNEL, order);
42 if (virt == 0)
43 goto err0;
44 nr_pages = 1 << order;
45 scrub_pages(virt, nr_pages);
46
47 area = kmalloc(sizeof(*area), GFP_KERNEL);
48 if (area == NULL)
49 goto err1;
50
51 area->flags = VM_IOREMAP;
52 area->addr = (void *)virt;
53 area->size = size;
54 area->pages = NULL;
55 area->nr_pages = nr_pages;
56 area->phys_addr = 0; /* xenbus_map_ring_valloc uses this field! */
57
58 return area;
59
60err1:
61 free_pages(virt, order);
62err0:
63 return NULL;
64}
65EXPORT_SYMBOL_GPL(xen_alloc_vm_area);
66
67void xen_free_vm_area(struct vm_struct *area)
68{
69 unsigned int order = get_order(area->size);
70 unsigned long i;
71 unsigned long phys_addr = __pa(area->addr);
72
73 /* This area is used for foreign page mappping.
74 * So underlying machine page may not be assigned. */
75 for (i = 0; i < (1 << order); i++) {
76 unsigned long ret;
77 unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i;
78 struct xen_memory_reservation reservation = {
79 .nr_extents = 1,
80 .address_bits = 0,
81 .extent_order = 0,
82 .domid = DOMID_SELF
83 };
84 set_xen_guest_handle(reservation.extent_start, &gpfn);
85 ret = HYPERVISOR_memory_op(XENMEM_populate_physmap,
86 &reservation);
87 BUG_ON(ret != 1);
88 }
89 free_pages((unsigned long)area->addr, order);
90 kfree(area);
91}
92EXPORT_SYMBOL_GPL(xen_free_vm_area);
93
94
95/****************************************************************************
96 * grant table hack
97 * cmd: GNTTABOP_xxx
98 */
99
100int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
101 unsigned long max_nr_gframes,
102 struct grant_entry **__shared)
103{
104 *__shared = __va(frames[0] << PAGE_SHIFT);
105 return 0;
106}
107
108void arch_gnttab_unmap_shared(struct grant_entry *shared,
109 unsigned long nr_gframes)
110{
111 /* nothing */
112}
113
114static void
115gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
116{
117 uint32_t flags;
118
119 flags = uop->flags;
120
121 if (flags & GNTMAP_host_map) {
122 if (flags & GNTMAP_application_map) {
123 printk(KERN_DEBUG
124 "GNTMAP_application_map is not supported yet: "
125 "flags 0x%x\n", flags);
126 BUG();
127 }
128 if (flags & GNTMAP_contains_pte) {
129 printk(KERN_DEBUG
130 "GNTMAP_contains_pte is not supported yet: "
131 "flags 0x%x\n", flags);
132 BUG();
133 }
134 } else if (flags & GNTMAP_device_map) {
135 printk("GNTMAP_device_map is not supported yet 0x%x\n", flags);
136 BUG(); /* not yet. actually this flag is not used. */
137 } else {
138 BUG();
139 }
140}
141
142int
143HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
144{
145 if (cmd == GNTTABOP_map_grant_ref) {
146 unsigned int i;
147 for (i = 0; i < count; i++) {
148 gnttab_map_grant_ref_pre(
149 (struct gnttab_map_grant_ref *)uop + i);
150 }
151 }
152 return xencomm_hypercall_grant_table_op(cmd, uop, count);
153}
154
155EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
new file mode 100644
index 000000000000..d4ff0b9e79f1
--- /dev/null
+++ b/arch/ia64/xen/hypercall.S
@@ -0,0 +1,91 @@
1/*
2 * Support routines for Xen hypercalls
3 *
4 * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
5 * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com>
6 */
7
8#include <asm/asmmacro.h>
9#include <asm/intrinsics.h>
10#include <asm/xen/privop.h>
11
12/*
13 * Hypercalls without parameter.
14 */
15#define __HCALL0(name,hcall) \
16 GLOBAL_ENTRY(name); \
17 break hcall; \
18 br.ret.sptk.many rp; \
19 END(name)
20
21/*
22 * Hypercalls with 1 parameter.
23 */
24#define __HCALL1(name,hcall) \
25 GLOBAL_ENTRY(name); \
26 mov r8=r32; \
27 break hcall; \
28 br.ret.sptk.many rp; \
29 END(name)
30
31/*
32 * Hypercalls with 2 parameters.
33 */
34#define __HCALL2(name,hcall) \
35 GLOBAL_ENTRY(name); \
36 mov r8=r32; \
37 mov r9=r33; \
38 break hcall; \
39 br.ret.sptk.many rp; \
40 END(name)
41
42__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR)
43__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR)
44__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR)
45__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I)
46
47__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR)
48__HCALL1(xen_eoi, HYPERPRIVOP_EOI)
49__HCALL1(xen_thash, HYPERPRIVOP_THASH)
50__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM)
51__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR)
52__HCALL1(xen_fc, HYPERPRIVOP_FC)
53__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID)
54__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD)
55
56__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA)
57__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
58__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
59
60#ifdef CONFIG_IA32_SUPPORT
61__HCALL1(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
62__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8
63#endif /* CONFIG_IA32_SUPPORT */
64
65GLOBAL_ENTRY(xen_set_rr0_to_rr4)
66 mov r8=r32
67 mov r9=r33
68 mov r10=r34
69 mov r11=r35
70 mov r14=r36
71 XEN_HYPER_SET_RR0_TO_RR4
72 br.ret.sptk.many rp
73 ;;
74END(xen_set_rr0_to_rr4)
75
76GLOBAL_ENTRY(xen_send_ipi)
77 mov r14=r32
78 mov r15=r33
79 mov r2=0x400
80 break 0x1000
81 ;;
82 br.ret.sptk.many rp
83 ;;
84END(xen_send_ipi)
85
86GLOBAL_ENTRY(__hypercall)
87 mov r2=r37
88 break 0x1000
89 br.ret.sptk.many b0
90 ;;
91END(__hypercall)
diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c
new file mode 100644
index 000000000000..cac4d97c0b5a
--- /dev/null
+++ b/arch/ia64/xen/hypervisor.c
@@ -0,0 +1,96 @@
1/******************************************************************************
2 * arch/ia64/xen/hypervisor.c
3 *
4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/efi.h>
24#include <asm/xen/hypervisor.h>
25#include <asm/xen/privop.h>
26
27#include "irq_xen.h"
28
29struct shared_info *HYPERVISOR_shared_info __read_mostly =
30 (struct shared_info *)XSI_BASE;
31EXPORT_SYMBOL(HYPERVISOR_shared_info);
32
33DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
34
35struct start_info *xen_start_info;
36EXPORT_SYMBOL(xen_start_info);
37
38EXPORT_SYMBOL(xen_domain_type);
39
40EXPORT_SYMBOL(__hypercall);
41
42/* Stolen from arch/x86/xen/enlighten.c */
43/*
44 * Flag to determine whether vcpu info placement is available on all
45 * VCPUs. We assume it is to start with, and then set it to zero on
46 * the first failure. This is because it can succeed on some VCPUs
47 * and not others, since it can involve hypervisor memory allocation,
48 * or because the guest failed to guarantee all the appropriate
49 * constraints on all VCPUs (ie buffer can't cross a page boundary).
50 *
51 * Note that any particular CPU may be using a placed vcpu structure,
52 * but we can only optimise if the all are.
53 *
54 * 0: not available, 1: available
55 */
56
57static void __init xen_vcpu_setup(int cpu)
58{
59 /*
60 * WARNING:
61 * before changing MAX_VIRT_CPUS,
62 * check that shared_info fits on a page
63 */
64 BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE);
65 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
66}
67
68void __init xen_setup_vcpu_info_placement(void)
69{
70 int cpu;
71
72 for_each_possible_cpu(cpu)
73 xen_vcpu_setup(cpu);
74}
75
76void __cpuinit
77xen_cpu_init(void)
78{
79 xen_smp_intr_init();
80}
81
82/**************************************************************************
83 * opt feature
84 */
85void
86xen_ia64_enable_opt_feature(void)
87{
88 /* Enable region 7 identity map optimizations in Xen */
89 struct xen_ia64_opt_feature optf;
90
91 optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7;
92 optf.on = XEN_IA64_OPTF_ON;
93 optf.pgprot = pgprot_val(PAGE_KERNEL);
94 optf.key = 0; /* No key on linux. */
95 HYPERVISOR_opt_feature(&optf);
96}
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
new file mode 100644
index 000000000000..af93aadb68bb
--- /dev/null
+++ b/arch/ia64/xen/irq_xen.c
@@ -0,0 +1,435 @@
1/******************************************************************************
2 * arch/ia64/xen/irq_xen.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/cpu.h>
24
25#include <xen/interface/xen.h>
26#include <xen/interface/callback.h>
27#include <xen/events.h>
28
29#include <asm/xen/privop.h>
30
31#include "irq_xen.h"
32
33/***************************************************************************
34 * pv_irq_ops
35 * irq operations
36 */
37
38static int
39xen_assign_irq_vector(int irq)
40{
41 struct physdev_irq irq_op;
42
43 irq_op.irq = irq;
44 if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
45 return -ENOSPC;
46
47 return irq_op.vector;
48}
49
50static void
51xen_free_irq_vector(int vector)
52{
53 struct physdev_irq irq_op;
54
55 if (vector < IA64_FIRST_DEVICE_VECTOR ||
56 vector > IA64_LAST_DEVICE_VECTOR)
57 return;
58
59 irq_op.vector = vector;
60 if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
61 printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n",
62 __func__, vector);
63}
64
65
66static DEFINE_PER_CPU(int, timer_irq) = -1;
67static DEFINE_PER_CPU(int, ipi_irq) = -1;
68static DEFINE_PER_CPU(int, resched_irq) = -1;
69static DEFINE_PER_CPU(int, cmc_irq) = -1;
70static DEFINE_PER_CPU(int, cmcp_irq) = -1;
71static DEFINE_PER_CPU(int, cpep_irq) = -1;
72#define NAME_SIZE 15
73static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
74static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
75static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
76static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
77static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
78static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
79#undef NAME_SIZE
80
81struct saved_irq {
82 unsigned int irq;
83 struct irqaction *action;
84};
85/* 16 should be far optimistic value, since only several percpu irqs
86 * are registered early.
87 */
88#define MAX_LATE_IRQ 16
89static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
90static unsigned short late_irq_cnt;
91static unsigned short saved_irq_cnt;
92static int xen_slab_ready;
93
94#ifdef CONFIG_SMP
95/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
96 * it ends up to issue several memory accesses upon percpu data and
97 * thus adds unnecessary traffic to other paths.
98 */
99static irqreturn_t
100xen_dummy_handler(int irq, void *dev_id)
101{
102
103 return IRQ_HANDLED;
104}
105
106static struct irqaction xen_ipi_irqaction = {
107 .handler = handle_IPI,
108 .flags = IRQF_DISABLED,
109 .name = "IPI"
110};
111
112static struct irqaction xen_resched_irqaction = {
113 .handler = xen_dummy_handler,
114 .flags = IRQF_DISABLED,
115 .name = "resched"
116};
117
118static struct irqaction xen_tlb_irqaction = {
119 .handler = xen_dummy_handler,
120 .flags = IRQF_DISABLED,
121 .name = "tlb_flush"
122};
123#endif
124
125/*
126 * This is xen version percpu irq registration, which needs bind
127 * to xen specific evtchn sub-system. One trick here is that xen
128 * evtchn binding interface depends on kmalloc because related
129 * port needs to be freed at device/cpu down. So we cache the
130 * registration on BSP before slab is ready and then deal them
131 * at later point. For rest instances happening after slab ready,
132 * we hook them to xen evtchn immediately.
133 *
134 * FIXME: MCA is not supported by far, and thus "nomca" boot param is
135 * required.
136 */
137static void
138__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
139 struct irqaction *action, int save)
140{
141 irq_desc_t *desc;
142 int irq = 0;
143
144 if (xen_slab_ready) {
145 switch (vec) {
146 case IA64_TIMER_VECTOR:
147 snprintf(per_cpu(timer_name, cpu),
148 sizeof(per_cpu(timer_name, cpu)),
149 "%s%d", action->name, cpu);
150 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
151 action->handler, action->flags,
152 per_cpu(timer_name, cpu), action->dev_id);
153 per_cpu(timer_irq, cpu) = irq;
154 break;
155 case IA64_IPI_RESCHEDULE:
156 snprintf(per_cpu(resched_name, cpu),
157 sizeof(per_cpu(resched_name, cpu)),
158 "%s%d", action->name, cpu);
159 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
160 action->handler, action->flags,
161 per_cpu(resched_name, cpu), action->dev_id);
162 per_cpu(resched_irq, cpu) = irq;
163 break;
164 case IA64_IPI_VECTOR:
165 snprintf(per_cpu(ipi_name, cpu),
166 sizeof(per_cpu(ipi_name, cpu)),
167 "%s%d", action->name, cpu);
168 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
169 action->handler, action->flags,
170 per_cpu(ipi_name, cpu), action->dev_id);
171 per_cpu(ipi_irq, cpu) = irq;
172 break;
173 case IA64_CMC_VECTOR:
174 snprintf(per_cpu(cmc_name, cpu),
175 sizeof(per_cpu(cmc_name, cpu)),
176 "%s%d", action->name, cpu);
177 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
178 action->handler,
179 action->flags,
180 per_cpu(cmc_name, cpu),
181 action->dev_id);
182 per_cpu(cmc_irq, cpu) = irq;
183 break;
184 case IA64_CMCP_VECTOR:
185 snprintf(per_cpu(cmcp_name, cpu),
186 sizeof(per_cpu(cmcp_name, cpu)),
187 "%s%d", action->name, cpu);
188 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
189 action->handler,
190 action->flags,
191 per_cpu(cmcp_name, cpu),
192 action->dev_id);
193 per_cpu(cmcp_irq, cpu) = irq;
194 break;
195 case IA64_CPEP_VECTOR:
196 snprintf(per_cpu(cpep_name, cpu),
197 sizeof(per_cpu(cpep_name, cpu)),
198 "%s%d", action->name, cpu);
199 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
200 action->handler,
201 action->flags,
202 per_cpu(cpep_name, cpu),
203 action->dev_id);
204 per_cpu(cpep_irq, cpu) = irq;
205 break;
206 case IA64_CPE_VECTOR:
207 case IA64_MCA_RENDEZ_VECTOR:
208 case IA64_PERFMON_VECTOR:
209 case IA64_MCA_WAKEUP_VECTOR:
210 case IA64_SPURIOUS_INT_VECTOR:
211 /* No need to complain, these aren't supported. */
212 break;
213 default:
214 printk(KERN_WARNING "Percpu irq %d is unsupported "
215 "by xen!\n", vec);
216 break;
217 }
218 BUG_ON(irq < 0);
219
220 if (irq > 0) {
221 /*
222 * Mark percpu. Without this, migrate_irqs() will
223 * mark the interrupt for migrations and trigger it
224 * on cpu hotplug.
225 */
226 desc = irq_desc + irq;
227 desc->status |= IRQ_PER_CPU;
228 }
229 }
230
231 /* For BSP, we cache registered percpu irqs, and then re-walk
232 * them when initializing APs
233 */
234 if (!cpu && save) {
235 BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
236 saved_percpu_irqs[saved_irq_cnt].irq = vec;
237 saved_percpu_irqs[saved_irq_cnt].action = action;
238 saved_irq_cnt++;
239 if (!xen_slab_ready)
240 late_irq_cnt++;
241 }
242}
243
244static void
245xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
246{
247 __xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
248}
249
250static void
251xen_bind_early_percpu_irq(void)
252{
253 int i;
254
255 xen_slab_ready = 1;
256 /* There's no race when accessing this cached array, since only
257 * BSP will face with such step shortly
258 */
259 for (i = 0; i < late_irq_cnt; i++)
260 __xen_register_percpu_irq(smp_processor_id(),
261 saved_percpu_irqs[i].irq,
262 saved_percpu_irqs[i].action, 0);
263}
264
265/* FIXME: There's no obvious point to check whether slab is ready. So
266 * a hack is used here by utilizing a late time hook.
267 */
268
269#ifdef CONFIG_HOTPLUG_CPU
270static int __devinit
271unbind_evtchn_callback(struct notifier_block *nfb,
272 unsigned long action, void *hcpu)
273{
274 unsigned int cpu = (unsigned long)hcpu;
275
276 if (action == CPU_DEAD) {
277 /* Unregister evtchn. */
278 if (per_cpu(cpep_irq, cpu) >= 0) {
279 unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
280 per_cpu(cpep_irq, cpu) = -1;
281 }
282 if (per_cpu(cmcp_irq, cpu) >= 0) {
283 unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
284 per_cpu(cmcp_irq, cpu) = -1;
285 }
286 if (per_cpu(cmc_irq, cpu) >= 0) {
287 unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
288 per_cpu(cmc_irq, cpu) = -1;
289 }
290 if (per_cpu(ipi_irq, cpu) >= 0) {
291 unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
292 per_cpu(ipi_irq, cpu) = -1;
293 }
294 if (per_cpu(resched_irq, cpu) >= 0) {
295 unbind_from_irqhandler(per_cpu(resched_irq, cpu),
296 NULL);
297 per_cpu(resched_irq, cpu) = -1;
298 }
299 if (per_cpu(timer_irq, cpu) >= 0) {
300 unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
301 per_cpu(timer_irq, cpu) = -1;
302 }
303 }
304 return NOTIFY_OK;
305}
306
307static struct notifier_block unbind_evtchn_notifier = {
308 .notifier_call = unbind_evtchn_callback,
309 .priority = 0
310};
311#endif
312
313void xen_smp_intr_init_early(unsigned int cpu)
314{
315#ifdef CONFIG_SMP
316 unsigned int i;
317
318 for (i = 0; i < saved_irq_cnt; i++)
319 __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
320 saved_percpu_irqs[i].action, 0);
321#endif
322}
323
324void xen_smp_intr_init(void)
325{
326#ifdef CONFIG_SMP
327 unsigned int cpu = smp_processor_id();
328 struct callback_register event = {
329 .type = CALLBACKTYPE_event,
330 .address = { .ip = (unsigned long)&xen_event_callback },
331 };
332
333 if (cpu == 0) {
334 /* Initialization was already done for boot cpu. */
335#ifdef CONFIG_HOTPLUG_CPU
336 /* Register the notifier only once. */
337 register_cpu_notifier(&unbind_evtchn_notifier);
338#endif
339 return;
340 }
341
342 /* This should be piggyback when setup vcpu guest context */
343 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
344#endif /* CONFIG_SMP */
345}
346
347void __init
348xen_irq_init(void)
349{
350 struct callback_register event = {
351 .type = CALLBACKTYPE_event,
352 .address = { .ip = (unsigned long)&xen_event_callback },
353 };
354
355 xen_init_IRQ();
356 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
357 late_time_init = xen_bind_early_percpu_irq;
358}
359
360void
361xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
362{
363#ifdef CONFIG_SMP
364 /* TODO: we need to call vcpu_up here */
365 if (unlikely(vector == ap_wakeup_vector)) {
366 /* XXX
367 * This should be in __cpu_up(cpu) in ia64 smpboot.c
368 * like x86. But don't want to modify it,
369 * keep it untouched.
370 */
371 xen_smp_intr_init_early(cpu);
372
373 xen_send_ipi(cpu, vector);
374 /* vcpu_prepare_and_up(cpu); */
375 return;
376 }
377#endif
378
379 switch (vector) {
380 case IA64_IPI_VECTOR:
381 xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
382 break;
383 case IA64_IPI_RESCHEDULE:
384 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
385 break;
386 case IA64_CMCP_VECTOR:
387 xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
388 break;
389 case IA64_CPEP_VECTOR:
390 xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
391 break;
392 case IA64_TIMER_VECTOR: {
393 /* this is used only once by check_sal_cache_flush()
394 at boot time */
395 static int used = 0;
396 if (!used) {
397 xen_send_ipi(cpu, IA64_TIMER_VECTOR);
398 used = 1;
399 break;
400 }
401 /* fallthrough */
402 }
403 default:
404 printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
405 vector);
406 notify_remote_via_irq(0); /* defaults to 0 irq */
407 break;
408 }
409}
410
411static void __init
412xen_register_ipi(void)
413{
414#ifdef CONFIG_SMP
415 register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
416 register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
417 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
418#endif
419}
420
421static void
422xen_resend_irq(unsigned int vector)
423{
424 (void)resend_irq_on_evtchn(vector);
425}
426
427const struct pv_irq_ops xen_irq_ops __initdata = {
428 .register_ipi = xen_register_ipi,
429
430 .assign_irq_vector = xen_assign_irq_vector,
431 .free_irq_vector = xen_free_irq_vector,
432 .register_percpu_irq = xen_register_percpu_irq,
433
434 .resend_irq = xen_resend_irq,
435};
diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h
new file mode 100644
index 000000000000..26110f330c87
--- /dev/null
+++ b/arch/ia64/xen/irq_xen.h
@@ -0,0 +1,34 @@
1/******************************************************************************
2 * arch/ia64/xen/irq_xen.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef IRQ_XEN_H
24#define IRQ_XEN_H
25
26extern void (*late_time_init)(void);
27extern char xen_event_callback;
28void __init xen_init_IRQ(void);
29
30extern const struct pv_irq_ops xen_irq_ops __initdata;
31extern void xen_smp_intr_init(void);
32extern void xen_send_ipi(int cpu, int vec);
33
34#endif /* IRQ_XEN_H */
diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c
new file mode 100644
index 000000000000..4ad588a7c279
--- /dev/null
+++ b/arch/ia64/xen/machvec.c
@@ -0,0 +1,4 @@
1#define MACHVEC_PLATFORM_NAME xen
2#define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h>
3#include <asm/machvec_init.h>
4
diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c
new file mode 100644
index 000000000000..fd66b048c6fa
--- /dev/null
+++ b/arch/ia64/xen/suspend.c
@@ -0,0 +1,64 @@
1/******************************************************************************
2 * arch/ia64/xen/suspend.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * suspend/resume
22 */
23
24#include <xen/xen-ops.h>
25#include <asm/xen/hypervisor.h>
26#include "time.h"
27
28void
29xen_mm_pin_all(void)
30{
31 /* nothing */
32}
33
34void
35xen_mm_unpin_all(void)
36{
37 /* nothing */
38}
39
40void xen_pre_device_suspend(void)
41{
42 /* nothing */
43}
44
45void
46xen_pre_suspend()
47{
48 /* nothing */
49}
50
51void
52xen_post_suspend(int suspend_cancelled)
53{
54 if (suspend_cancelled)
55 return;
56
57 xen_ia64_enable_opt_feature();
58 /* add more if necessary */
59}
60
61void xen_arch_resume(void)
62{
63 xen_timer_resume_on_aps();
64}
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
new file mode 100644
index 000000000000..d15a94c330fb
--- /dev/null
+++ b/arch/ia64/xen/time.c
@@ -0,0 +1,213 @@
1/******************************************************************************
2 * arch/ia64/xen/time.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/delay.h>
24#include <linux/kernel_stat.h>
25#include <linux/posix-timers.h>
26#include <linux/irq.h>
27#include <linux/clocksource.h>
28
29#include <asm/timex.h>
30
31#include <asm/xen/hypervisor.h>
32
33#include <xen/interface/vcpu.h>
34
35#include "../kernel/fsyscall_gtod_data.h"
36
37DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
38DEFINE_PER_CPU(unsigned long, processed_stolen_time);
39DEFINE_PER_CPU(unsigned long, processed_blocked_time);
40
41/* taken from i386/kernel/time-xen.c */
42static void xen_init_missing_ticks_accounting(int cpu)
43{
44 struct vcpu_register_runstate_memory_area area;
45 struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
46 int rc;
47
48 memset(runstate, 0, sizeof(*runstate));
49
50 area.addr.v = runstate;
51 rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu,
52 &area);
53 WARN_ON(rc && rc != -ENOSYS);
54
55 per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
56 per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
57 + runstate->time[RUNSTATE_offline];
58}
59
60/*
61 * Runstate accounting
62 */
63/* stolen from arch/x86/xen/time.c */
64static void get_runstate_snapshot(struct vcpu_runstate_info *res)
65{
66 u64 state_time;
67 struct vcpu_runstate_info *state;
68
69 BUG_ON(preemptible());
70
71 state = &__get_cpu_var(runstate);
72
73 /*
74 * The runstate info is always updated by the hypervisor on
75 * the current CPU, so there's no need to use anything
76 * stronger than a compiler barrier when fetching it.
77 */
78 do {
79 state_time = state->state_entry_time;
80 rmb();
81 *res = *state;
82 rmb();
83 } while (state->state_entry_time != state_time);
84}
85
86#define NS_PER_TICK (1000000000LL/HZ)
87
88static unsigned long
89consider_steal_time(unsigned long new_itm)
90{
91 unsigned long stolen, blocked;
92 unsigned long delta_itm = 0, stolentick = 0;
93 int cpu = smp_processor_id();
94 struct vcpu_runstate_info runstate;
95 struct task_struct *p = current;
96
97 get_runstate_snapshot(&runstate);
98
99 /*
100 * Check for vcpu migration effect
101 * In this case, itc value is reversed.
102 * This causes huge stolen value.
103 * This function just checks and reject this effect.
104 */
105 if (!time_after_eq(runstate.time[RUNSTATE_blocked],
106 per_cpu(processed_blocked_time, cpu)))
107 blocked = 0;
108
109 if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
110 runstate.time[RUNSTATE_offline],
111 per_cpu(processed_stolen_time, cpu)))
112 stolen = 0;
113
114 if (!time_after(delta_itm + new_itm, ia64_get_itc()))
115 stolentick = ia64_get_itc() - new_itm;
116
117 do_div(stolentick, NS_PER_TICK);
118 stolentick++;
119
120 do_div(stolen, NS_PER_TICK);
121
122 if (stolen > stolentick)
123 stolen = stolentick;
124
125 stolentick -= stolen;
126 do_div(blocked, NS_PER_TICK);
127
128 if (blocked > stolentick)
129 blocked = stolentick;
130
131 if (stolen > 0 || blocked > 0) {
132 account_steal_time(NULL, jiffies_to_cputime(stolen));
133 account_steal_time(idle_task(cpu), jiffies_to_cputime(blocked));
134 run_local_timers();
135
136 if (rcu_pending(cpu))
137 rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
138
139 scheduler_tick();
140 run_posix_cpu_timers(p);
141 delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
142
143 if (cpu == time_keeper_id) {
144 write_seqlock(&xtime_lock);
145 do_timer(stolen + blocked);
146 local_cpu_data->itm_next = delta_itm + new_itm;
147 write_sequnlock(&xtime_lock);
148 } else {
149 local_cpu_data->itm_next = delta_itm + new_itm;
150 }
151 per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen;
152 per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked;
153 }
154 return delta_itm;
155}
156
157static int xen_do_steal_accounting(unsigned long *new_itm)
158{
159 unsigned long delta_itm;
160 delta_itm = consider_steal_time(*new_itm);
161 *new_itm += delta_itm;
162 if (time_after(*new_itm, ia64_get_itc()) && delta_itm)
163 return 1;
164
165 return 0;
166}
167
168static void xen_itc_jitter_data_reset(void)
169{
170 u64 lcycle, ret;
171
172 do {
173 lcycle = itc_jitter_data.itc_lastcycle;
174 ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0);
175 } while (unlikely(ret != lcycle));
176}
177
178struct pv_time_ops xen_time_ops __initdata = {
179 .init_missing_ticks_accounting = xen_init_missing_ticks_accounting,
180 .do_steal_accounting = xen_do_steal_accounting,
181 .clocksource_resume = xen_itc_jitter_data_reset,
182};
183
184/* Called after suspend, to resume time. */
185static void xen_local_tick_resume(void)
186{
187 /* Just trigger a tick. */
188 ia64_cpu_local_tick();
189 touch_softlockup_watchdog();
190}
191
192void
193xen_timer_resume(void)
194{
195 unsigned int cpu;
196
197 xen_local_tick_resume();
198
199 for_each_online_cpu(cpu)
200 xen_init_missing_ticks_accounting(cpu);
201}
202
203static void ia64_cpu_local_tick_fn(void *unused)
204{
205 xen_local_tick_resume();
206 xen_init_missing_ticks_accounting(smp_processor_id());
207}
208
209void
210xen_timer_resume_on_aps(void)
211{
212 smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1);
213}
diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h
new file mode 100644
index 000000000000..f98d7e1a42f0
--- /dev/null
+++ b/arch/ia64/xen/time.h
@@ -0,0 +1,24 @@
1/******************************************************************************
2 * arch/ia64/xen/time.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23extern struct pv_time_ops xen_time_ops __initdata;
24void xen_timer_resume_on_aps(void);
diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c
new file mode 100644
index 000000000000..ccaf7431f7c8
--- /dev/null
+++ b/arch/ia64/xen/xcom_hcall.c
@@ -0,0 +1,441 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Tristan Gingold <tristan.gingold@bull.net>
17 *
18 * Copyright (c) 2007
19 * Isaku Yamahata <yamahata at valinux co jp>
20 * VA Linux Systems Japan K.K.
21 * consolidate mini and inline version.
22 */
23
24#include <linux/module.h>
25#include <xen/interface/xen.h>
26#include <xen/interface/memory.h>
27#include <xen/interface/grant_table.h>
28#include <xen/interface/callback.h>
29#include <xen/interface/vcpu.h>
30#include <asm/xen/hypervisor.h>
31#include <asm/xen/xencomm.h>
32
33/* Xencomm notes:
34 * This file defines hypercalls to be used by xencomm. The hypercalls simply
35 * create inlines or mini descriptors for pointers and then call the raw arch
36 * hypercall xencomm_arch_hypercall_XXX
37 *
38 * If the arch wants to directly use these hypercalls, simply define macros
39 * in asm/xen/hypercall.h, eg:
40 * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op
41 *
42 * The arch may also define HYPERVISOR_xxx as a function and do more operations
43 * before/after doing the hypercall.
44 *
45 * Note: because only inline or mini descriptors are created these functions
46 * must only be called with in kernel memory parameters.
47 */
48
49int
50xencomm_hypercall_console_io(int cmd, int count, char *str)
51{
52 /* xen early printk uses console io hypercall before
53 * xencomm initialization. In that case, we just ignore it.
54 */
55 if (!xencomm_is_initialized())
56 return 0;
57
58 return xencomm_arch_hypercall_console_io
59 (cmd, count, xencomm_map_no_alloc(str, count));
60}
61EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io);
62
63int
64xencomm_hypercall_event_channel_op(int cmd, void *op)
65{
66 struct xencomm_handle *desc;
67 desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op));
68 if (desc == NULL)
69 return -EINVAL;
70
71 return xencomm_arch_hypercall_event_channel_op(cmd, desc);
72}
73EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op);
74
75int
76xencomm_hypercall_xen_version(int cmd, void *arg)
77{
78 struct xencomm_handle *desc;
79 unsigned int argsize;
80
81 switch (cmd) {
82 case XENVER_version:
83 /* do not actually pass an argument */
84 return xencomm_arch_hypercall_xen_version(cmd, 0);
85 case XENVER_extraversion:
86 argsize = sizeof(struct xen_extraversion);
87 break;
88 case XENVER_compile_info:
89 argsize = sizeof(struct xen_compile_info);
90 break;
91 case XENVER_capabilities:
92 argsize = sizeof(struct xen_capabilities_info);
93 break;
94 case XENVER_changeset:
95 argsize = sizeof(struct xen_changeset_info);
96 break;
97 case XENVER_platform_parameters:
98 argsize = sizeof(struct xen_platform_parameters);
99 break;
100 case XENVER_get_features:
101 argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info);
102 break;
103
104 default:
105 printk(KERN_DEBUG
106 "%s: unknown version op %d\n", __func__, cmd);
107 return -ENOSYS;
108 }
109
110 desc = xencomm_map_no_alloc(arg, argsize);
111 if (desc == NULL)
112 return -EINVAL;
113
114 return xencomm_arch_hypercall_xen_version(cmd, desc);
115}
116EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version);
117
118int
119xencomm_hypercall_physdev_op(int cmd, void *op)
120{
121 unsigned int argsize;
122
123 switch (cmd) {
124 case PHYSDEVOP_apic_read:
125 case PHYSDEVOP_apic_write:
126 argsize = sizeof(struct physdev_apic);
127 break;
128 case PHYSDEVOP_alloc_irq_vector:
129 case PHYSDEVOP_free_irq_vector:
130 argsize = sizeof(struct physdev_irq);
131 break;
132 case PHYSDEVOP_irq_status_query:
133 argsize = sizeof(struct physdev_irq_status_query);
134 break;
135
136 default:
137 printk(KERN_DEBUG
138 "%s: unknown physdev op %d\n", __func__, cmd);
139 return -ENOSYS;
140 }
141
142 return xencomm_arch_hypercall_physdev_op
143 (cmd, xencomm_map_no_alloc(op, argsize));
144}
145
146static int
147xencommize_grant_table_op(struct xencomm_mini **xc_area,
148 unsigned int cmd, void *op, unsigned int count,
149 struct xencomm_handle **desc)
150{
151 struct xencomm_handle *desc1;
152 unsigned int argsize;
153
154 switch (cmd) {
155 case GNTTABOP_map_grant_ref:
156 argsize = sizeof(struct gnttab_map_grant_ref);
157 break;
158 case GNTTABOP_unmap_grant_ref:
159 argsize = sizeof(struct gnttab_unmap_grant_ref);
160 break;
161 case GNTTABOP_setup_table:
162 {
163 struct gnttab_setup_table *setup = op;
164
165 argsize = sizeof(*setup);
166
167 if (count != 1)
168 return -EINVAL;
169 desc1 = __xencomm_map_no_alloc
170 (xen_guest_handle(setup->frame_list),
171 setup->nr_frames *
172 sizeof(*xen_guest_handle(setup->frame_list)),
173 *xc_area);
174 if (desc1 == NULL)
175 return -EINVAL;
176 (*xc_area)++;
177 set_xen_guest_handle(setup->frame_list, (void *)desc1);
178 break;
179 }
180 case GNTTABOP_dump_table:
181 argsize = sizeof(struct gnttab_dump_table);
182 break;
183 case GNTTABOP_transfer:
184 argsize = sizeof(struct gnttab_transfer);
185 break;
186 case GNTTABOP_copy:
187 argsize = sizeof(struct gnttab_copy);
188 break;
189 case GNTTABOP_query_size:
190 argsize = sizeof(struct gnttab_query_size);
191 break;
192 default:
193 printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n",
194 __func__, cmd);
195 BUG();
196 }
197
198 *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area);
199 if (*desc == NULL)
200 return -EINVAL;
201 (*xc_area)++;
202
203 return 0;
204}
205
206int
207xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
208 unsigned int count)
209{
210 int rc;
211 struct xencomm_handle *desc;
212 XENCOMM_MINI_ALIGNED(xc_area, 2);
213
214 rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc);
215 if (rc)
216 return rc;
217
218 return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
219}
220EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op);
221
222int
223xencomm_hypercall_sched_op(int cmd, void *arg)
224{
225 struct xencomm_handle *desc;
226 unsigned int argsize;
227
228 switch (cmd) {
229 case SCHEDOP_yield:
230 case SCHEDOP_block:
231 argsize = 0;
232 break;
233 case SCHEDOP_shutdown:
234 argsize = sizeof(struct sched_shutdown);
235 break;
236 case SCHEDOP_poll:
237 {
238 struct sched_poll *poll = arg;
239 struct xencomm_handle *ports;
240
241 argsize = sizeof(struct sched_poll);
242 ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports),
243 sizeof(*xen_guest_handle(poll->ports)));
244
245 set_xen_guest_handle(poll->ports, (void *)ports);
246 break;
247 }
248 default:
249 printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd);
250 return -ENOSYS;
251 }
252
253 desc = xencomm_map_no_alloc(arg, argsize);
254 if (desc == NULL)
255 return -EINVAL;
256
257 return xencomm_arch_hypercall_sched_op(cmd, desc);
258}
259EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op);
260
261int
262xencomm_hypercall_multicall(void *call_list, int nr_calls)
263{
264 int rc;
265 int i;
266 struct multicall_entry *mce;
267 struct xencomm_handle *desc;
268 XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2);
269
270 for (i = 0; i < nr_calls; i++) {
271 mce = (struct multicall_entry *)call_list + i;
272
273 switch (mce->op) {
274 case __HYPERVISOR_update_va_mapping:
275 case __HYPERVISOR_mmu_update:
276 /* No-op on ia64. */
277 break;
278 case __HYPERVISOR_grant_table_op:
279 rc = xencommize_grant_table_op
280 (&xc_area,
281 mce->args[0], (void *)mce->args[1],
282 mce->args[2], &desc);
283 if (rc)
284 return rc;
285 mce->args[1] = (unsigned long)desc;
286 break;
287 case __HYPERVISOR_memory_op:
288 default:
289 printk(KERN_DEBUG
290 "%s: unhandled multicall op entry op %lu\n",
291 __func__, mce->op);
292 return -ENOSYS;
293 }
294 }
295
296 desc = xencomm_map_no_alloc(call_list,
297 nr_calls * sizeof(struct multicall_entry));
298 if (desc == NULL)
299 return -EINVAL;
300
301 return xencomm_arch_hypercall_multicall(desc, nr_calls);
302}
303EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall);
304
305int
306xencomm_hypercall_callback_op(int cmd, void *arg)
307{
308 unsigned int argsize;
309 switch (cmd) {
310 case CALLBACKOP_register:
311 argsize = sizeof(struct callback_register);
312 break;
313 case CALLBACKOP_unregister:
314 argsize = sizeof(struct callback_unregister);
315 break;
316 default:
317 printk(KERN_DEBUG
318 "%s: unknown callback op %d\n", __func__, cmd);
319 return -ENOSYS;
320 }
321
322 return xencomm_arch_hypercall_callback_op
323 (cmd, xencomm_map_no_alloc(arg, argsize));
324}
325
326static int
327xencommize_memory_reservation(struct xencomm_mini *xc_area,
328 struct xen_memory_reservation *mop)
329{
330 struct xencomm_handle *desc;
331
332 desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start),
333 mop->nr_extents *
334 sizeof(*xen_guest_handle(mop->extent_start)),
335 xc_area);
336 if (desc == NULL)
337 return -EINVAL;
338
339 set_xen_guest_handle(mop->extent_start, (void *)desc);
340 return 0;
341}
342
343int
344xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
345{
346 GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} };
347 struct xen_memory_reservation *xmr = NULL;
348 int rc;
349 struct xencomm_handle *desc;
350 unsigned int argsize;
351 XENCOMM_MINI_ALIGNED(xc_area, 2);
352
353 switch (cmd) {
354 case XENMEM_increase_reservation:
355 case XENMEM_decrease_reservation:
356 case XENMEM_populate_physmap:
357 xmr = (struct xen_memory_reservation *)arg;
358 set_xen_guest_handle(extent_start_va[0],
359 xen_guest_handle(xmr->extent_start));
360
361 argsize = sizeof(*xmr);
362 rc = xencommize_memory_reservation(xc_area, xmr);
363 if (rc)
364 return rc;
365 xc_area++;
366 break;
367
368 case XENMEM_maximum_ram_page:
369 argsize = 0;
370 break;
371
372 case XENMEM_add_to_physmap:
373 argsize = sizeof(struct xen_add_to_physmap);
374 break;
375
376 default:
377 printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd);
378 return -ENOSYS;
379 }
380
381 desc = xencomm_map_no_alloc(arg, argsize);
382 if (desc == NULL)
383 return -EINVAL;
384
385 rc = xencomm_arch_hypercall_memory_op(cmd, desc);
386
387 switch (cmd) {
388 case XENMEM_increase_reservation:
389 case XENMEM_decrease_reservation:
390 case XENMEM_populate_physmap:
391 set_xen_guest_handle(xmr->extent_start,
392 xen_guest_handle(extent_start_va[0]));
393 break;
394 }
395
396 return rc;
397}
398EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op);
399
400int
401xencomm_hypercall_suspend(unsigned long srec)
402{
403 struct sched_shutdown arg;
404
405 arg.reason = SHUTDOWN_suspend;
406
407 return xencomm_arch_hypercall_sched_op(
408 SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg)));
409}
410
411long
412xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
413{
414 unsigned int argsize;
415 switch (cmd) {
416 case VCPUOP_register_runstate_memory_area: {
417 struct vcpu_register_runstate_memory_area *area =
418 (struct vcpu_register_runstate_memory_area *)arg;
419 argsize = sizeof(*arg);
420 set_xen_guest_handle(area->addr.h,
421 (void *)xencomm_map_no_alloc(area->addr.v,
422 sizeof(area->addr.v)));
423 break;
424 }
425
426 default:
427 printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd);
428 return -ENOSYS;
429 }
430
431 return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
432 xencomm_map_no_alloc(arg, argsize));
433}
434
435long
436xencomm_hypercall_opt_feature(void *arg)
437{
438 return xencomm_arch_hypercall_opt_feature(
439 xencomm_map_no_alloc(arg,
440 sizeof(struct xen_ia64_opt_feature)));
441}
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
new file mode 100644
index 000000000000..04cd12350455
--- /dev/null
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -0,0 +1,364 @@
1/******************************************************************************
2 * arch/ia64/xen/xen_pv_ops.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/console.h>
24#include <linux/irq.h>
25#include <linux/kernel.h>
26#include <linux/pm.h>
27
28#include <asm/xen/hypervisor.h>
29#include <asm/xen/xencomm.h>
30#include <asm/xen/privop.h>
31
32#include "irq_xen.h"
33#include "time.h"
34
35/***************************************************************************
36 * general info
37 */
38static struct pv_info xen_info __initdata = {
39 .kernel_rpl = 2, /* or 1: determin at runtime */
40 .paravirt_enabled = 1,
41 .name = "Xen/ia64",
42};
43
44#define IA64_RSC_PL_SHIFT 2
45#define IA64_RSC_PL_BIT_SIZE 2
46#define IA64_RSC_PL_MASK \
47 (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
48
49static void __init
50xen_info_init(void)
51{
52 /* Xenified Linux/ia64 may run on pl = 1 or 2.
53 * determin at run time. */
54 unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
55 unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
56 xen_info.kernel_rpl = rpl;
57}
58
59/***************************************************************************
60 * pv_init_ops
61 * initialization hooks.
62 */
63
64static void
65xen_panic_hypercall(struct unw_frame_info *info, void *arg)
66{
67 current->thread.ksp = (__u64)info->sw - 16;
68 HYPERVISOR_shutdown(SHUTDOWN_crash);
69 /* we're never actually going to get here... */
70}
71
72static int
73xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
74{
75 unw_init_running(xen_panic_hypercall, NULL);
76 /* we're never actually going to get here... */
77 return NOTIFY_DONE;
78}
79
80static struct notifier_block xen_panic_block = {
81 xen_panic_event, NULL, 0 /* try to go last */
82};
83
84static void xen_pm_power_off(void)
85{
86 local_irq_disable();
87 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
88}
89
90static void __init
91xen_banner(void)
92{
93 printk(KERN_INFO
94 "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
95 "flags=0x%x\n",
96 xen_info.kernel_rpl,
97 HYPERVISOR_shared_info->arch.start_info_pfn,
98 xen_start_info->nr_pages, xen_start_info->flags);
99}
100
101static int __init
102xen_reserve_memory(struct rsvd_region *region)
103{
104 region->start = (unsigned long)__va(
105 (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
106 region->end = region->start + PAGE_SIZE;
107 return 1;
108}
109
110static void __init
111xen_arch_setup_early(void)
112{
113 struct shared_info *s;
114 BUG_ON(!xen_pv_domain());
115
116 s = HYPERVISOR_shared_info;
117 xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
118
119 /* Must be done before any hypercall. */
120 xencomm_initialize();
121
122 xen_setup_features();
123 /* Register a call for panic conditions. */
124 atomic_notifier_chain_register(&panic_notifier_list,
125 &xen_panic_block);
126 pm_power_off = xen_pm_power_off;
127
128 xen_ia64_enable_opt_feature();
129}
130
131static void __init
132xen_arch_setup_console(char **cmdline_p)
133{
134 add_preferred_console("xenboot", 0, NULL);
135 add_preferred_console("tty", 0, NULL);
136 /* use hvc_xen */
137 add_preferred_console("hvc", 0, NULL);
138
139#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
140 conswitchp = NULL;
141#endif
142}
143
144static int __init
145xen_arch_setup_nomca(void)
146{
147 return 1;
148}
149
150static void __init
151xen_post_smp_prepare_boot_cpu(void)
152{
153 xen_setup_vcpu_info_placement();
154}
155
156static const struct pv_init_ops xen_init_ops __initdata = {
157 .banner = xen_banner,
158
159 .reserve_memory = xen_reserve_memory,
160
161 .arch_setup_early = xen_arch_setup_early,
162 .arch_setup_console = xen_arch_setup_console,
163 .arch_setup_nomca = xen_arch_setup_nomca,
164
165 .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
166};
167
168/***************************************************************************
169 * pv_cpu_ops
170 * intrinsics hooks.
171 */
172
173static void xen_setreg(int regnum, unsigned long val)
174{
175 switch (regnum) {
176 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
177 xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
178 break;
179#ifdef CONFIG_IA32_SUPPORT
180 case _IA64_REG_AR_EFLAG:
181 xen_set_eflag(val);
182 break;
183#endif
184 case _IA64_REG_CR_TPR:
185 xen_set_tpr(val);
186 break;
187 case _IA64_REG_CR_ITM:
188 xen_set_itm(val);
189 break;
190 case _IA64_REG_CR_EOI:
191 xen_eoi(val);
192 break;
193 default:
194 ia64_native_setreg_func(regnum, val);
195 break;
196 }
197}
198
199static unsigned long xen_getreg(int regnum)
200{
201 unsigned long res;
202
203 switch (regnum) {
204 case _IA64_REG_PSR:
205 res = xen_get_psr();
206 break;
207#ifdef CONFIG_IA32_SUPPORT
208 case _IA64_REG_AR_EFLAG:
209 res = xen_get_eflag();
210 break;
211#endif
212 case _IA64_REG_CR_IVR:
213 res = xen_get_ivr();
214 break;
215 case _IA64_REG_CR_TPR:
216 res = xen_get_tpr();
217 break;
218 default:
219 res = ia64_native_getreg_func(regnum);
220 break;
221 }
222 return res;
223}
224
225/* turning on interrupts is a bit more complicated.. write to the
226 * memory-mapped virtual psr.i bit first (to avoid race condition),
227 * then if any interrupts were pending, we have to execute a hyperprivop
228 * to ensure the pending interrupt gets delivered; else we're done! */
229static void
230xen_ssm_i(void)
231{
232 int old = xen_get_virtual_psr_i();
233 xen_set_virtual_psr_i(1);
234 barrier();
235 if (!old && xen_get_virtual_pend())
236 xen_hyper_ssm_i();
237}
238
239/* turning off interrupts can be paravirtualized simply by writing
240 * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
241static void
242xen_rsm_i(void)
243{
244 xen_set_virtual_psr_i(0);
245 barrier();
246}
247
248static unsigned long
249xen_get_psr_i(void)
250{
251 return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
252}
253
254static void
255xen_intrin_local_irq_restore(unsigned long mask)
256{
257 if (mask & IA64_PSR_I)
258 xen_ssm_i();
259 else
260 xen_rsm_i();
261}
262
263static const struct pv_cpu_ops xen_cpu_ops __initdata = {
264 .fc = xen_fc,
265 .thash = xen_thash,
266 .get_cpuid = xen_get_cpuid,
267 .get_pmd = xen_get_pmd,
268 .getreg = xen_getreg,
269 .setreg = xen_setreg,
270 .ptcga = xen_ptcga,
271 .get_rr = xen_get_rr,
272 .set_rr = xen_set_rr,
273 .set_rr0_to_rr4 = xen_set_rr0_to_rr4,
274 .ssm_i = xen_ssm_i,
275 .rsm_i = xen_rsm_i,
276 .get_psr_i = xen_get_psr_i,
277 .intrin_local_irq_restore
278 = xen_intrin_local_irq_restore,
279};
280
281/******************************************************************************
282 * replacement of hand written assembly codes.
283 */
284
285extern char xen_switch_to;
286extern char xen_leave_syscall;
287extern char xen_work_processed_syscall;
288extern char xen_leave_kernel;
289
290const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
291 .switch_to = (unsigned long)&xen_switch_to,
292 .leave_syscall = (unsigned long)&xen_leave_syscall,
293 .work_processed_syscall = (unsigned long)&xen_work_processed_syscall,
294 .leave_kernel = (unsigned long)&xen_leave_kernel,
295};
296
297/***************************************************************************
298 * pv_iosapic_ops
299 * iosapic read/write hooks.
300 */
301static void
302xen_pcat_compat_init(void)
303{
304 /* nothing */
305}
306
307static struct irq_chip*
308xen_iosapic_get_irq_chip(unsigned long trigger)
309{
310 return NULL;
311}
312
313static unsigned int
314xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
315{
316 struct physdev_apic apic_op;
317 int ret;
318
319 apic_op.apic_physbase = (unsigned long)iosapic -
320 __IA64_UNCACHED_OFFSET;
321 apic_op.reg = reg;
322 ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
323 if (ret)
324 return ret;
325 return apic_op.value;
326}
327
328static void
329xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
330{
331 struct physdev_apic apic_op;
332
333 apic_op.apic_physbase = (unsigned long)iosapic -
334 __IA64_UNCACHED_OFFSET;
335 apic_op.reg = reg;
336 apic_op.value = val;
337 HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
338}
339
340static const struct pv_iosapic_ops xen_iosapic_ops __initdata = {
341 .pcat_compat_init = xen_pcat_compat_init,
342 .__get_irq_chip = xen_iosapic_get_irq_chip,
343
344 .__read = xen_iosapic_read,
345 .__write = xen_iosapic_write,
346};
347
348/***************************************************************************
349 * pv_ops initialization
350 */
351
352void __init
353xen_setup_pv_ops(void)
354{
355 xen_info_init();
356 pv_info = xen_info;
357 pv_init_ops = xen_init_ops;
358 pv_cpu_ops = xen_cpu_ops;
359 pv_iosapic_ops = xen_iosapic_ops;
360 pv_irq_ops = xen_irq_ops;
361 pv_time_ops = xen_time_ops;
362
363 paravirt_cpu_asm_init(&xen_cpu_asm_switch);
364}
diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c
new file mode 100644
index 000000000000..1f5d7ac82e97
--- /dev/null
+++ b/arch/ia64/xen/xencomm.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/mm.h>
20
21static unsigned long kernel_virtual_offset;
22static int is_xencomm_initialized;
23
24/* for xen early printk. It uses console io hypercall which uses xencomm.
25 * However early printk may use it before xencomm initialization.
26 */
27int
28xencomm_is_initialized(void)
29{
30 return is_xencomm_initialized;
31}
32
33void
34xencomm_initialize(void)
35{
36 kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START);
37 is_xencomm_initialized = 1;
38}
39
40/* Translate virtual address to physical address. */
41unsigned long
42xencomm_vtop(unsigned long vaddr)
43{
44 struct page *page;
45 struct vm_area_struct *vma;
46
47 if (vaddr == 0)
48 return 0UL;
49
50 if (REGION_NUMBER(vaddr) == 5) {
51 pgd_t *pgd;
52 pud_t *pud;
53 pmd_t *pmd;
54 pte_t *ptep;
55
56 /* On ia64, TASK_SIZE refers to current. It is not initialized
57 during boot.
58 Furthermore the kernel is relocatable and __pa() doesn't
59 work on addresses. */
60 if (vaddr >= KERNEL_START
61 && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE))
62 return vaddr - kernel_virtual_offset;
63
64 /* In kernel area -- virtually mapped. */
65 pgd = pgd_offset_k(vaddr);
66 if (pgd_none(*pgd) || pgd_bad(*pgd))
67 return ~0UL;
68
69 pud = pud_offset(pgd, vaddr);
70 if (pud_none(*pud) || pud_bad(*pud))
71 return ~0UL;
72
73 pmd = pmd_offset(pud, vaddr);
74 if (pmd_none(*pmd) || pmd_bad(*pmd))
75 return ~0UL;
76
77 ptep = pte_offset_kernel(pmd, vaddr);
78 if (!ptep)
79 return ~0UL;
80
81 return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
82 }
83
84 if (vaddr > TASK_SIZE) {
85 /* percpu variables */
86 if (REGION_NUMBER(vaddr) == 7 &&
87 REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS))
88 ia64_tpa(vaddr);
89
90 /* kernel address */
91 return __pa(vaddr);
92 }
93
94 /* XXX double-check (lack of) locking */
95 vma = find_extend_vma(current->mm, vaddr);
96 if (!vma)
97 return ~0UL;
98
99 /* We assume the page is modified. */
100 page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
101 if (!page)
102 return ~0UL;
103
104 return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
105}
diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S
new file mode 100644
index 000000000000..3e71d50584d9
--- /dev/null
+++ b/arch/ia64/xen/xenivt.S
@@ -0,0 +1,52 @@
1/*
2 * arch/ia64/xen/ivt.S
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 *
7 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
8 * VA Linux Systems Japan K.K.
9 * pv_ops.
10 */
11
12#include <asm/asmmacro.h>
13#include <asm/kregs.h>
14#include <asm/pgtable.h>
15
16#include "../kernel/minstate.h"
17
18 .section .text,"ax"
19GLOBAL_ENTRY(xen_event_callback)
20 mov r31=pr // prepare to save predicates
21 ;;
22 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
23 ;;
24 movl r3=XSI_PSR_IC
25 mov r14=1
26 ;;
27 st4 [r3]=r14
28 ;;
29 adds r3=8,r2 // set up second base pointer for SAVE_REST
30 srlz.i // ensure everybody knows psr.ic is back on
31 ;;
32 SAVE_REST
33 ;;
341:
35 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
36 add out0=16,sp // pass pointer to pt_regs as first arg
37 ;;
38 br.call.sptk.many b0=xen_evtchn_do_upcall
39 ;;
40 movl r20=XSI_PSR_I_ADDR
41 ;;
42 ld8 r20=[r20]
43 ;;
44 adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending
45 ;;
46 ld1 r20=[r20]
47 ;;
48 cmp.ne p6,p0=r20,r0 // if there are pending events,
49 (p6) br.spnt.few 1b // call evtchn_do_upcall again.
50 br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is
51 // paravirtualized as xen_leave_kernel
52END(xen_event_callback)
diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S
new file mode 100644
index 000000000000..28fed1fcc079
--- /dev/null
+++ b/arch/ia64/xen/xensetup.S
@@ -0,0 +1,83 @@
1/*
2 * Support routines for Xen
3 *
4 * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
5 */
6
7#include <asm/processor.h>
8#include <asm/asmmacro.h>
9#include <asm/pgtable.h>
10#include <asm/system.h>
11#include <asm/paravirt.h>
12#include <asm/xen/privop.h>
13#include <linux/elfnote.h>
14#include <linux/init.h>
15#include <xen/interface/elfnote.h>
16
17 .section .data.read_mostly
18 .align 8
19 .global xen_domain_type
20xen_domain_type:
21 data4 XEN_NATIVE_ASM
22 .previous
23
24 __INIT
25ENTRY(startup_xen)
26 // Calculate load offset.
27 // The constant, LOAD_OFFSET, can't be used because the boot
28 // loader doesn't always load to the LMA specified by the vmlinux.lds.
29 mov r9=ip // must be the first instruction to make sure
30 // that r9 = the physical address of startup_xen.
31 // Usually r9 = startup_xen - LOAD_OFFSET
32 movl r8=startup_xen
33 ;;
34 sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET.
35
36 mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN
37 movl r11=_start
38 ;;
39 add r11=r11,r9
40 movl r8=hypervisor_type
41 ;;
42 add r8=r8,r9
43 mov b0=r11
44 ;;
45 st8 [r8]=r10
46 br.cond.sptk.many b0
47 ;;
48END(startup_xen)
49
50 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
51 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
52 ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
53 ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET)
54
55#define isBP p3 // are we the Bootstrap Processor?
56
57 .text
58
59GLOBAL_ENTRY(xen_setup_hook)
60 mov r8=XEN_PV_DOMAIN_ASM
61(isBP) movl r9=xen_domain_type;;
62(isBP) st4 [r9]=r8
63 movl r10=xen_ivt;;
64
65 mov cr.iva=r10
66
67 /* Set xsi base. */
68#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600
69(isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA
70(isBP) movl r28=XSI_BASE;;
71(isBP) break 0x1000;;
72
73 /* setup pv_ops */
74(isBP) mov r4=rp
75 ;;
76(isBP) br.call.sptk.many rp=xen_setup_pv_ops
77 ;;
78(isBP) mov rp=r4
79 ;;
80
81 br.ret.sptk.many rp
82 ;;
83END(xen_setup_hook)