aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig12
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/configs/xen_domu_defconfig199
-rw-r--r--arch/ia64/include/asm/acpi.h2
-rw-r--r--arch/ia64/include/asm/machvec.h2
-rw-r--r--arch/ia64/include/asm/machvec_xen.h22
-rw-r--r--arch/ia64/include/asm/meminit.h1
-rw-r--r--arch/ia64/include/asm/paravirt.h1
-rw-r--r--arch/ia64/include/asm/pvclock-abi.h2
-rw-r--r--arch/ia64/include/asm/sync_bitops.h51
-rw-r--r--arch/ia64/include/asm/xen/events.h41
-rw-r--r--arch/ia64/include/asm/xen/hypercall.h265
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h61
-rw-r--r--arch/ia64/include/asm/xen/inst.h486
-rw-r--r--arch/ia64/include/asm/xen/interface.h363
-rw-r--r--arch/ia64/include/asm/xen/irq.h44
-rw-r--r--arch/ia64/include/asm/xen/minstate.h143
-rw-r--r--arch/ia64/include/asm/xen/page-coherent.h38
-rw-r--r--arch/ia64/include/asm/xen/page.h65
-rw-r--r--arch/ia64/include/asm/xen/patchlist.h38
-rw-r--r--arch/ia64/include/asm/xen/privop.h135
-rw-r--r--arch/ia64/include/asm/xen/xcom_hcall.h51
-rw-r--r--arch/ia64/include/asm/xen/xencomm.h42
-rw-r--r--arch/ia64/include/uapi/asm/break.h9
-rw-r--r--arch/ia64/kernel/acpi.c3
-rw-r--r--arch/ia64/kernel/asm-offsets.c32
-rw-r--r--arch/ia64/kernel/head.S3
-rw-r--r--arch/ia64/kernel/nr-irqs.c4
-rw-r--r--arch/ia64/kernel/paravirt_inst.h3
-rw-r--r--arch/ia64/kernel/paravirt_patchlist.h4
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S6
-rw-r--r--arch/ia64/xen/Kconfig25
-rw-r--r--arch/ia64/xen/Makefile37
-rw-r--r--arch/ia64/xen/gate-data.S3
-rw-r--r--arch/ia64/xen/grant-table.c94
-rw-r--r--arch/ia64/xen/hypercall.S88
-rw-r--r--arch/ia64/xen/hypervisor.c97
-rw-r--r--arch/ia64/xen/irq_xen.c443
-rw-r--r--arch/ia64/xen/irq_xen.h34
-rw-r--r--arch/ia64/xen/machvec.c4
-rw-r--r--arch/ia64/xen/suspend.c59
-rw-r--r--arch/ia64/xen/time.c257
-rw-r--r--arch/ia64/xen/time.h24
-rw-r--r--arch/ia64/xen/xcom_hcall.c441
-rw-r--r--arch/ia64/xen/xen_pv_ops.c1141
-rw-r--r--arch/ia64/xen/xencomm.c106
-rw-r--r--arch/ia64/xen/xenivt.S52
-rw-r--r--arch/ia64/xen/xensetup.S80
48 files changed, 1 insertions, 5114 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 4e4119b0e691..a8c3a11dc5ab 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -147,9 +147,6 @@ config PARAVIRT
147 over full virtualization. However, when run without a hypervisor 147 over full virtualization. However, when run without a hypervisor
148 the kernel is theoretically slower and slightly larger. 148 the kernel is theoretically slower and slightly larger.
149 149
150
151source "arch/ia64/xen/Kconfig"
152
153endif 150endif
154 151
155choice 152choice
@@ -175,7 +172,6 @@ config IA64_GENERIC
175 SGI-SN2 For SGI Altix systems 172 SGI-SN2 For SGI Altix systems
176 SGI-UV For SGI UV systems 173 SGI-UV For SGI UV systems
177 Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> 174 Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
178 Xen-domU For xen domU system
179 175
180 If you don't know what to do, choose "generic". 176 If you don't know what to do, choose "generic".
181 177
@@ -231,14 +227,6 @@ config IA64_HP_SIM
231 bool "Ski-simulator" 227 bool "Ski-simulator"
232 select SWIOTLB 228 select SWIOTLB
233 229
234config IA64_XEN_GUEST
235 bool "Xen guest"
236 select SWIOTLB
237 depends on XEN
238 help
239 Build a kernel that runs on Xen guest domain. At this moment only
240 16KB page size in supported.
241
242endchoice 230endchoice
243 231
244choice 232choice
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index be7bfa12b705..f37238f45bcd 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -51,11 +51,9 @@ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/
51core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ 51core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
52core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ 52core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
53core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ 53core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
54core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/
55core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ 54core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
56core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ 55core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
57core-$(CONFIG_KVM) += arch/ia64/kvm/ 56core-$(CONFIG_KVM) += arch/ia64/kvm/
58core-$(CONFIG_XEN) += arch/ia64/xen/
59 57
60drivers-$(CONFIG_PCI) += arch/ia64/pci/ 58drivers-$(CONFIG_PCI) += arch/ia64/pci/
61drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ 59drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig
deleted file mode 100644
index b025acfde5c1..000000000000
--- a/arch/ia64/configs/xen_domu_defconfig
+++ /dev/null
@@ -1,199 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y
4CONFIG_IKCONFIG=y
5CONFIG_IKCONFIG_PROC=y
6CONFIG_LOG_BUF_SHIFT=20
7CONFIG_SYSFS_DEPRECATED_V2=y
8CONFIG_BLK_DEV_INITRD=y
9CONFIG_KALLSYMS_ALL=y
10CONFIG_MODULES=y
11CONFIG_MODULE_UNLOAD=y
12CONFIG_MODVERSIONS=y
13CONFIG_MODULE_SRCVERSION_ALL=y
14# CONFIG_BLK_DEV_BSG is not set
15CONFIG_PARAVIRT_GUEST=y
16CONFIG_IA64_XEN_GUEST=y
17CONFIG_MCKINLEY=y
18CONFIG_IA64_CYCLONE=y
19CONFIG_SMP=y
20CONFIG_NR_CPUS=16
21CONFIG_HOTPLUG_CPU=y
22CONFIG_PERMIT_BSP_REMOVE=y
23CONFIG_FORCE_CPEI_RETARGET=y
24CONFIG_IA64_MCA_RECOVERY=y
25CONFIG_PERFMON=y
26CONFIG_IA64_PALINFO=y
27CONFIG_KEXEC=y
28CONFIG_EFI_VARS=y
29CONFIG_BINFMT_MISC=m
30CONFIG_ACPI_PROCFS=y
31CONFIG_ACPI_BUTTON=m
32CONFIG_ACPI_FAN=m
33CONFIG_ACPI_PROCESSOR=m
34CONFIG_ACPI_CONTAINER=m
35CONFIG_HOTPLUG_PCI=y
36CONFIG_HOTPLUG_PCI_ACPI=m
37CONFIG_PACKET=y
38CONFIG_UNIX=y
39CONFIG_INET=y
40CONFIG_IP_MULTICAST=y
41CONFIG_ARPD=y
42CONFIG_SYN_COOKIES=y
43# CONFIG_INET_LRO is not set
44# CONFIG_IPV6 is not set
45CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
46CONFIG_BLK_DEV_LOOP=m
47CONFIG_BLK_DEV_CRYPTOLOOP=m
48CONFIG_BLK_DEV_NBD=m
49CONFIG_BLK_DEV_RAM=y
50CONFIG_IDE=y
51CONFIG_BLK_DEV_IDECD=y
52CONFIG_BLK_DEV_GENERIC=y
53CONFIG_BLK_DEV_CMD64X=y
54CONFIG_BLK_DEV_PIIX=y
55CONFIG_SCSI=y
56CONFIG_BLK_DEV_SD=y
57CONFIG_CHR_DEV_ST=m
58CONFIG_BLK_DEV_SR=m
59CONFIG_CHR_DEV_SG=m
60CONFIG_SCSI_SYM53C8XX_2=y
61CONFIG_SCSI_QLOGIC_1280=y
62CONFIG_MD=y
63CONFIG_BLK_DEV_MD=m
64CONFIG_MD_LINEAR=m
65CONFIG_MD_RAID0=m
66CONFIG_MD_RAID1=m
67CONFIG_MD_MULTIPATH=m
68CONFIG_BLK_DEV_DM=m
69CONFIG_DM_CRYPT=m
70CONFIG_DM_SNAPSHOT=m
71CONFIG_DM_MIRROR=m
72CONFIG_DM_ZERO=m
73CONFIG_FUSION=y
74CONFIG_FUSION_SPI=y
75CONFIG_FUSION_FC=y
76CONFIG_FUSION_CTL=y
77CONFIG_NETDEVICES=y
78CONFIG_DUMMY=m
79CONFIG_NET_ETHERNET=y
80CONFIG_NET_TULIP=y
81CONFIG_TULIP=m
82CONFIG_NET_PCI=y
83CONFIG_NET_VENDOR_INTEL=y
84CONFIG_E100=m
85CONFIG_E1000=y
86CONFIG_TIGON3=y
87CONFIG_NETCONSOLE=y
88# CONFIG_SERIO_SERPORT is not set
89CONFIG_GAMEPORT=m
90CONFIG_SERIAL_NONSTANDARD=y
91CONFIG_SERIAL_8250=y
92CONFIG_SERIAL_8250_CONSOLE=y
93CONFIG_SERIAL_8250_NR_UARTS=6
94CONFIG_SERIAL_8250_EXTENDED=y
95CONFIG_SERIAL_8250_SHARE_IRQ=y
96# CONFIG_HW_RANDOM is not set
97CONFIG_EFI_RTC=y
98CONFIG_RAW_DRIVER=m
99CONFIG_HPET=y
100CONFIG_AGP=m
101CONFIG_DRM=m
102CONFIG_DRM_TDFX=m
103CONFIG_DRM_R128=m
104CONFIG_DRM_RADEON=m
105CONFIG_DRM_MGA=m
106CONFIG_DRM_SIS=m
107CONFIG_HID_GYRATION=y
108CONFIG_HID_NTRIG=y
109CONFIG_HID_PANTHERLORD=y
110CONFIG_HID_PETALYNX=y
111CONFIG_HID_SAMSUNG=y
112CONFIG_HID_SONY=y
113CONFIG_HID_SUNPLUS=y
114CONFIG_HID_TOPSEED=y
115CONFIG_USB=y
116CONFIG_USB_DEVICEFS=y
117CONFIG_USB_EHCI_HCD=m
118CONFIG_USB_OHCI_HCD=m
119CONFIG_USB_UHCI_HCD=y
120CONFIG_USB_STORAGE=m
121CONFIG_EXT2_FS=y
122CONFIG_EXT2_FS_XATTR=y
123CONFIG_EXT2_FS_POSIX_ACL=y
124CONFIG_EXT2_FS_SECURITY=y
125CONFIG_EXT3_FS=y
126CONFIG_EXT3_FS_POSIX_ACL=y
127CONFIG_EXT3_FS_SECURITY=y
128CONFIG_REISERFS_FS=y
129CONFIG_REISERFS_FS_XATTR=y
130CONFIG_REISERFS_FS_POSIX_ACL=y
131CONFIG_REISERFS_FS_SECURITY=y
132CONFIG_XFS_FS=y
133CONFIG_AUTOFS_FS=y
134CONFIG_AUTOFS4_FS=y
135CONFIG_ISO9660_FS=m
136CONFIG_JOLIET=y
137CONFIG_UDF_FS=m
138CONFIG_VFAT_FS=y
139CONFIG_NTFS_FS=m
140CONFIG_PROC_KCORE=y
141CONFIG_TMPFS=y
142CONFIG_HUGETLBFS=y
143CONFIG_NFS_FS=m
144CONFIG_NFS_V3=y
145CONFIG_NFS_V4=y
146CONFIG_NFSD=m
147CONFIG_NFSD_V4=y
148CONFIG_SMB_FS=m
149CONFIG_SMB_NLS_DEFAULT=y
150CONFIG_CIFS=m
151CONFIG_PARTITION_ADVANCED=y
152CONFIG_SGI_PARTITION=y
153CONFIG_EFI_PARTITION=y
154CONFIG_NLS_CODEPAGE_437=y
155CONFIG_NLS_CODEPAGE_737=m
156CONFIG_NLS_CODEPAGE_775=m
157CONFIG_NLS_CODEPAGE_850=m
158CONFIG_NLS_CODEPAGE_852=m
159CONFIG_NLS_CODEPAGE_855=m
160CONFIG_NLS_CODEPAGE_857=m
161CONFIG_NLS_CODEPAGE_860=m
162CONFIG_NLS_CODEPAGE_861=m
163CONFIG_NLS_CODEPAGE_862=m
164CONFIG_NLS_CODEPAGE_863=m
165CONFIG_NLS_CODEPAGE_864=m
166CONFIG_NLS_CODEPAGE_865=m
167CONFIG_NLS_CODEPAGE_866=m
168CONFIG_NLS_CODEPAGE_869=m
169CONFIG_NLS_CODEPAGE_936=m
170CONFIG_NLS_CODEPAGE_950=m
171CONFIG_NLS_CODEPAGE_932=m
172CONFIG_NLS_CODEPAGE_949=m
173CONFIG_NLS_CODEPAGE_874=m
174CONFIG_NLS_ISO8859_8=m
175CONFIG_NLS_CODEPAGE_1250=m
176CONFIG_NLS_CODEPAGE_1251=m
177CONFIG_NLS_ISO8859_1=y
178CONFIG_NLS_ISO8859_2=m
179CONFIG_NLS_ISO8859_3=m
180CONFIG_NLS_ISO8859_4=m
181CONFIG_NLS_ISO8859_5=m
182CONFIG_NLS_ISO8859_6=m
183CONFIG_NLS_ISO8859_7=m
184CONFIG_NLS_ISO8859_9=m
185CONFIG_NLS_ISO8859_13=m
186CONFIG_NLS_ISO8859_14=m
187CONFIG_NLS_ISO8859_15=m
188CONFIG_NLS_KOI8_R=m
189CONFIG_NLS_KOI8_U=m
190CONFIG_NLS_UTF8=m
191CONFIG_MAGIC_SYSRQ=y
192CONFIG_DEBUG_KERNEL=y
193CONFIG_DEBUG_MUTEXES=y
194# CONFIG_RCU_CPU_STALL_DETECTOR is not set
195CONFIG_IA64_GRANULE_16MB=y
196CONFIG_CRYPTO_ECB=m
197CONFIG_CRYPTO_PCBC=m
198CONFIG_CRYPTO_MD5=y
199# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index faa1bf0da815..d651102a4d45 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -111,8 +111,6 @@ static inline const char *acpi_get_sysname (void)
111 return "uv"; 111 return "uv";
112# elif defined (CONFIG_IA64_DIG) 112# elif defined (CONFIG_IA64_DIG)
113 return "dig"; 113 return "dig";
114# elif defined (CONFIG_IA64_XEN_GUEST)
115 return "xen";
116# elif defined(CONFIG_IA64_DIG_VTD) 114# elif defined(CONFIG_IA64_DIG_VTD)
117 return "dig_vtd"; 115 return "dig_vtd";
118# else 116# else
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 2d1ad4b11a85..9c39bdfc2da8 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -113,8 +113,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
113# include <asm/machvec_sn2.h> 113# include <asm/machvec_sn2.h>
114# elif defined (CONFIG_IA64_SGI_UV) 114# elif defined (CONFIG_IA64_SGI_UV)
115# include <asm/machvec_uv.h> 115# include <asm/machvec_uv.h>
116# elif defined (CONFIG_IA64_XEN_GUEST)
117# include <asm/machvec_xen.h>
118# elif defined (CONFIG_IA64_GENERIC) 116# elif defined (CONFIG_IA64_GENERIC)
119 117
120# ifdef MACHVEC_PLATFORM_HEADER 118# ifdef MACHVEC_PLATFORM_HEADER
diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h
deleted file mode 100644
index 8b8bd0eb3923..000000000000
--- a/arch/ia64/include/asm/machvec_xen.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASM_IA64_MACHVEC_XEN_h
2#define _ASM_IA64_MACHVEC_XEN_h
3
4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_cpu_init_t xen_cpu_init;
6extern ia64_mv_irq_init_t xen_irq_init;
7extern ia64_mv_send_ipi_t xen_platform_send_ipi;
8
9/*
10 * This stuff has dual use!
11 *
12 * For a generic kernel, the macros are used to initialize the
13 * platform's machvec structure. When compiling a non-generic kernel,
14 * the macros are used directly.
15 */
16#define ia64_platform_name "xen"
17#define platform_setup dig_setup
18#define platform_cpu_init xen_cpu_init
19#define platform_irq_init xen_irq_init
20#define platform_send_ipi xen_platform_send_ipi
21
22#endif /* _ASM_IA64_MACHVEC_XEN_h */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 61c7b1750b16..092f1c91b36c 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -18,7 +18,6 @@
18 * - crash dumping code reserved region 18 * - crash dumping code reserved region
19 * - Kernel memory map built from EFI memory map 19 * - Kernel memory map built from EFI memory map
20 * - ELF core header 20 * - ELF core header
21 * - xen start info if CONFIG_XEN
22 * 21 *
23 * More could be added if necessary 22 * More could be added if necessary
24 */ 23 */
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index b149b88ea795..b53518a98026 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -75,7 +75,6 @@ void *paravirt_get_gate_section(void);
75#ifdef CONFIG_PARAVIRT_GUEST 75#ifdef CONFIG_PARAVIRT_GUEST
76 76
77#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 77#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
78#define PARAVIRT_HYPERVISOR_TYPE_XEN 1
79 78
80#ifndef __ASSEMBLY__ 79#ifndef __ASSEMBLY__
81 80
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h
index 44ef9ef8f5b3..42b233bedeb5 100644
--- a/arch/ia64/include/asm/pvclock-abi.h
+++ b/arch/ia64/include/asm/pvclock-abi.h
@@ -11,7 +11,7 @@
11/* 11/*
12 * These structs MUST NOT be changed. 12 * These structs MUST NOT be changed.
13 * They are the ABI between hypervisor and guest OS. 13 * They are the ABI between hypervisor and guest OS.
14 * Both Xen and KVM are using this. 14 * KVM is using this.
15 * 15 *
16 * pvclock_vcpu_time_info holds the system time and the tsc timestamp 16 * pvclock_vcpu_time_info holds the system time and the tsc timestamp
17 * of the last update. So the guest can use the tsc delta to get a 17 * of the last update. So the guest can use the tsc delta to get a
diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h
deleted file mode 100644
index 593c12eeb270..000000000000
--- a/arch/ia64/include/asm/sync_bitops.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef _ASM_IA64_SYNC_BITOPS_H
2#define _ASM_IA64_SYNC_BITOPS_H
3
4/*
5 * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp>
6 *
7 * Based on synch_bitops.h which Dan Magenhaimer wrote.
8 *
9 * bit operations which provide guaranteed strong synchronisation
10 * when communicating with Xen or other guest OSes running on other CPUs.
11 */
12
13static inline void sync_set_bit(int nr, volatile void *addr)
14{
15 set_bit(nr, addr);
16}
17
18static inline void sync_clear_bit(int nr, volatile void *addr)
19{
20 clear_bit(nr, addr);
21}
22
23static inline void sync_change_bit(int nr, volatile void *addr)
24{
25 change_bit(nr, addr);
26}
27
28static inline int sync_test_and_set_bit(int nr, volatile void *addr)
29{
30 return test_and_set_bit(nr, addr);
31}
32
33static inline int sync_test_and_clear_bit(int nr, volatile void *addr)
34{
35 return test_and_clear_bit(nr, addr);
36}
37
38static inline int sync_test_and_change_bit(int nr, volatile void *addr)
39{
40 return test_and_change_bit(nr, addr);
41}
42
43static inline int sync_test_bit(int nr, const volatile void *addr)
44{
45 return test_bit(nr, addr);
46}
47
48#define sync_cmpxchg(ptr, old, new) \
49 ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new)))
50
51#endif /* _ASM_IA64_SYNC_BITOPS_H */
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
deleted file mode 100644
index baa74c82aa71..000000000000
--- a/arch/ia64/include/asm/xen/events.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/events.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22#ifndef _ASM_IA64_XEN_EVENTS_H
23#define _ASM_IA64_XEN_EVENTS_H
24
25enum ipi_vector {
26 XEN_RESCHEDULE_VECTOR,
27 XEN_IPI_VECTOR,
28 XEN_CMCP_VECTOR,
29 XEN_CPEP_VECTOR,
30
31 XEN_NR_IPIS,
32};
33
34static inline int xen_irqs_disabled(struct pt_regs *regs)
35{
36 return !(ia64_psr(regs)->i);
37}
38
39#define irq_ctx_init(cpu) do { } while (0)
40
41#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h
deleted file mode 100644
index ed28bcd5bb85..000000000000
--- a/arch/ia64/include/asm/xen/hypercall.h
+++ /dev/null
@@ -1,265 +0,0 @@
1/******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _ASM_IA64_XEN_HYPERCALL_H
34#define _ASM_IA64_XEN_HYPERCALL_H
35
36#include <xen/interface/xen.h>
37#include <xen/interface/physdev.h>
38#include <xen/interface/sched.h>
39#include <asm/xen/xcom_hcall.h>
40struct xencomm_handle;
41extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
42 unsigned long a3, unsigned long a4,
43 unsigned long a5, unsigned long cmd);
44
45/*
46 * Assembler stubs for hyper-calls.
47 */
48
49#define _hypercall0(type, name) \
50({ \
51 long __res; \
52 __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\
53 (type)__res; \
54})
55
56#define _hypercall1(type, name, a1) \
57({ \
58 long __res; \
59 __res = __hypercall((unsigned long)a1, \
60 0, 0, 0, 0, __HYPERVISOR_##name); \
61 (type)__res; \
62})
63
64#define _hypercall2(type, name, a1, a2) \
65({ \
66 long __res; \
67 __res = __hypercall((unsigned long)a1, \
68 (unsigned long)a2, \
69 0, 0, 0, __HYPERVISOR_##name); \
70 (type)__res; \
71})
72
73#define _hypercall3(type, name, a1, a2, a3) \
74({ \
75 long __res; \
76 __res = __hypercall((unsigned long)a1, \
77 (unsigned long)a2, \
78 (unsigned long)a3, \
79 0, 0, __HYPERVISOR_##name); \
80 (type)__res; \
81})
82
83#define _hypercall4(type, name, a1, a2, a3, a4) \
84({ \
85 long __res; \
86 __res = __hypercall((unsigned long)a1, \
87 (unsigned long)a2, \
88 (unsigned long)a3, \
89 (unsigned long)a4, \
90 0, __HYPERVISOR_##name); \
91 (type)__res; \
92})
93
94#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
95({ \
96 long __res; \
97 __res = __hypercall((unsigned long)a1, \
98 (unsigned long)a2, \
99 (unsigned long)a3, \
100 (unsigned long)a4, \
101 (unsigned long)a5, \
102 __HYPERVISOR_##name); \
103 (type)__res; \
104})
105
106
107static inline int
108xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
109{
110 return _hypercall2(int, sched_op, cmd, arg);
111}
112
113static inline long
114HYPERVISOR_set_timer_op(u64 timeout)
115{
116 unsigned long timeout_hi = (unsigned long)(timeout >> 32);
117 unsigned long timeout_lo = (unsigned long)timeout;
118 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
119}
120
121static inline int
122xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
123 int nr_calls)
124{
125 return _hypercall2(int, multicall, call_list, nr_calls);
126}
127
128static inline int
129xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
130{
131 return _hypercall2(int, memory_op, cmd, arg);
132}
133
134static inline int
135xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
136{
137 return _hypercall2(int, event_channel_op, cmd, arg);
138}
139
140static inline int
141xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
142{
143 return _hypercall2(int, xen_version, cmd, arg);
144}
145
146static inline int
147xencomm_arch_hypercall_console_io(int cmd, int count,
148 struct xencomm_handle *str)
149{
150 return _hypercall3(int, console_io, cmd, count, str);
151}
152
153static inline int
154xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
155{
156 return _hypercall2(int, physdev_op, cmd, arg);
157}
158
159static inline int
160xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
161 struct xencomm_handle *uop,
162 unsigned int count)
163{
164 return _hypercall3(int, grant_table_op, cmd, uop, count);
165}
166
167int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
168
169extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
170
171static inline int
172xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
173{
174 return _hypercall2(int, callback_op, cmd, arg);
175}
176
177static inline long
178xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
179{
180 return _hypercall3(long, vcpu_op, cmd, cpu, arg);
181}
182
183static inline int
184HYPERVISOR_physdev_op(int cmd, void *arg)
185{
186 switch (cmd) {
187 case PHYSDEVOP_eoi:
188 return _hypercall1(int, ia64_fast_eoi,
189 ((struct physdev_eoi *)arg)->irq);
190 default:
191 return xencomm_hypercall_physdev_op(cmd, arg);
192 }
193}
194
195static inline long
196xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
197{
198 return _hypercall1(long, opt_feature, arg);
199}
200
201/* for balloon driver */
202#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
203
204/* Use xencomm to do hypercalls. */
205#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
206#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
207#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
208#define HYPERVISOR_multicall xencomm_hypercall_multicall
209#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
210#define HYPERVISOR_console_io xencomm_hypercall_console_io
211#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
212#define HYPERVISOR_suspend xencomm_hypercall_suspend
213#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
214#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
215
216/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
217#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; })
218
219static inline int
220HYPERVISOR_shutdown(
221 unsigned int reason)
222{
223 struct sched_shutdown sched_shutdown = {
224 .reason = reason
225 };
226
227 int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
228
229 return rc;
230}
231
232/* for netfront.c, netback.c */
233#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
234
235static inline void
236MULTI_update_va_mapping(
237 struct multicall_entry *mcl, unsigned long va,
238 pte_t new_val, unsigned long flags)
239{
240 mcl->op = __HYPERVISOR_update_va_mapping;
241 mcl->result = 0;
242}
243
244static inline void
245MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
246 void *uop, unsigned int count)
247{
248 mcl->op = __HYPERVISOR_grant_table_op;
249 mcl->args[0] = cmd;
250 mcl->args[1] = (unsigned long)uop;
251 mcl->args[2] = count;
252}
253
254static inline void
255MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
256 int count, int *success_count, domid_t domid)
257{
258 mcl->op = __HYPERVISOR_mmu_update;
259 mcl->args[0] = (unsigned long)req;
260 mcl->args[1] = count;
261 mcl->args[2] = (unsigned long)success_count;
262 mcl->args[3] = domid;
263}
264
265#endif /* _ASM_IA64_XEN_HYPERCALL_H */
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
deleted file mode 100644
index 67455c2ed2b1..000000000000
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/******************************************************************************
2 * hypervisor.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _ASM_IA64_XEN_HYPERVISOR_H
34#define _ASM_IA64_XEN_HYPERVISOR_H
35
36#include <linux/err.h>
37#include <xen/interface/xen.h>
38#include <xen/interface/version.h> /* to compile feature.c */
39#include <xen/features.h> /* to comiple xen-netfront.c */
40#include <xen/xen.h>
41#include <asm/xen/hypercall.h>
42
43#ifdef CONFIG_XEN
44extern struct shared_info *HYPERVISOR_shared_info;
45extern struct start_info *xen_start_info;
46
47void __init xen_setup_vcpu_info_placement(void);
48void force_evtchn_callback(void);
49
50/* for drivers/xen/balloon/balloon.c */
51#ifdef CONFIG_XEN_SCRUB_PAGES
52#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
53#else
54#define scrub_pages(_p, _n) ((void)0)
55#endif
56
57/* For setup_arch() in arch/ia64/kernel/setup.c */
58void xen_ia64_enable_opt_feature(void);
59#endif
60
61#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
deleted file mode 100644
index c53a47611208..000000000000
--- a/arch/ia64/include/asm/xen/inst.h
+++ /dev/null
@@ -1,486 +0,0 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <asm/xen/privop.h>
24
25#define ia64_ivt xen_ivt
26#define DO_SAVE_MIN XEN_DO_SAVE_MIN
27
28#define __paravirt_switch_to xen_switch_to
29#define __paravirt_leave_syscall xen_leave_syscall
30#define __paravirt_work_processed_syscall xen_work_processed_syscall
31#define __paravirt_leave_kernel xen_leave_kernel
32#define __paravirt_pending_syscall_end xen_work_pending_syscall_end
33#define __paravirt_work_processed_syscall_target \
34 xen_work_processed_syscall
35
36#define paravirt_fsyscall_table xen_fsyscall_table
37#define paravirt_fsys_bubble_down xen_fsys_bubble_down
38
39#define MOV_FROM_IFA(reg) \
40 movl reg = XSI_IFA; \
41 ;; \
42 ld8 reg = [reg]
43
44#define MOV_FROM_ITIR(reg) \
45 movl reg = XSI_ITIR; \
46 ;; \
47 ld8 reg = [reg]
48
49#define MOV_FROM_ISR(reg) \
50 movl reg = XSI_ISR; \
51 ;; \
52 ld8 reg = [reg]
53
54#define MOV_FROM_IHA(reg) \
55 movl reg = XSI_IHA; \
56 ;; \
57 ld8 reg = [reg]
58
59#define MOV_FROM_IPSR(pred, reg) \
60(pred) movl reg = XSI_IPSR; \
61 ;; \
62(pred) ld8 reg = [reg]
63
64#define MOV_FROM_IIM(reg) \
65 movl reg = XSI_IIM; \
66 ;; \
67 ld8 reg = [reg]
68
69#define MOV_FROM_IIP(reg) \
70 movl reg = XSI_IIP; \
71 ;; \
72 ld8 reg = [reg]
73
74.macro __MOV_FROM_IVR reg, clob
75 .ifc "\reg", "r8"
76 XEN_HYPER_GET_IVR
77 .exitm
78 .endif
79 .ifc "\clob", "r8"
80 XEN_HYPER_GET_IVR
81 ;;
82 mov \reg = r8
83 .exitm
84 .endif
85
86 mov \clob = r8
87 ;;
88 XEN_HYPER_GET_IVR
89 ;;
90 mov \reg = r8
91 ;;
92 mov r8 = \clob
93.endm
94#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
95
96.macro __MOV_FROM_PSR pred, reg, clob
97 .ifc "\reg", "r8"
98 (\pred) XEN_HYPER_GET_PSR;
99 .exitm
100 .endif
101 .ifc "\clob", "r8"
102 (\pred) XEN_HYPER_GET_PSR
103 ;;
104 (\pred) mov \reg = r8
105 .exitm
106 .endif
107
108 (\pred) mov \clob = r8
109 (\pred) XEN_HYPER_GET_PSR
110 ;;
111 (\pred) mov \reg = r8
112 (\pred) mov r8 = \clob
113.endm
114#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
115
116/* assuming ar.itc is read with interrupt disabled. */
117#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
118(pred) movl clob = XSI_ITC_OFFSET; \
119 ;; \
120(pred) ld8 clob = [clob]; \
121(pred) mov reg = ar.itc; \
122 ;; \
123(pred) add reg = reg, clob; \
124 ;; \
125(pred) movl clob = XSI_ITC_LAST; \
126 ;; \
127(pred) ld8 clob = [clob]; \
128 ;; \
129(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \
130 ;; \
131(pred_clob) add reg = 1, clob; \
132 ;; \
133(pred) movl clob = XSI_ITC_LAST; \
134 ;; \
135(pred) st8 [clob] = reg
136
137
138#define MOV_TO_IFA(reg, clob) \
139 movl clob = XSI_IFA; \
140 ;; \
141 st8 [clob] = reg \
142
143#define MOV_TO_ITIR(pred, reg, clob) \
144(pred) movl clob = XSI_ITIR; \
145 ;; \
146(pred) st8 [clob] = reg
147
148#define MOV_TO_IHA(pred, reg, clob) \
149(pred) movl clob = XSI_IHA; \
150 ;; \
151(pred) st8 [clob] = reg
152
153#define MOV_TO_IPSR(pred, reg, clob) \
154(pred) movl clob = XSI_IPSR; \
155 ;; \
156(pred) st8 [clob] = reg; \
157 ;;
158
159#define MOV_TO_IFS(pred, reg, clob) \
160(pred) movl clob = XSI_IFS; \
161 ;; \
162(pred) st8 [clob] = reg; \
163 ;;
164
165#define MOV_TO_IIP(reg, clob) \
166 movl clob = XSI_IIP; \
167 ;; \
168 st8 [clob] = reg
169
170.macro ____MOV_TO_KR kr, reg, clob0, clob1
171 .ifc "\clob0", "r9"
172 .error "clob0 \clob0 must not be r9"
173 .endif
174 .ifc "\clob1", "r8"
175 .error "clob1 \clob1 must not be r8"
176 .endif
177
178 .ifnc "\reg", "r9"
179 .ifnc "\clob1", "r9"
180 mov \clob1 = r9
181 .endif
182 mov r9 = \reg
183 .endif
184 .ifnc "\clob0", "r8"
185 mov \clob0 = r8
186 .endif
187 mov r8 = \kr
188 ;;
189 XEN_HYPER_SET_KR
190
191 .ifnc "\reg", "r9"
192 .ifnc "\clob1", "r9"
193 mov r9 = \clob1
194 .endif
195 .endif
196 .ifnc "\clob0", "r8"
197 mov r8 = \clob0
198 .endif
199.endm
200
201.macro __MOV_TO_KR kr, reg, clob0, clob1
202 .ifc "\clob0", "r9"
203 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
204 .exitm
205 .endif
206 .ifc "\clob1", "r8"
207 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
208 .exitm
209 .endif
210
211 ____MOV_TO_KR \kr, \reg, \clob0, \clob1
212.endm
213
214#define MOV_TO_KR(kr, reg, clob0, clob1) \
215 __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
216
217
218.macro __ITC_I pred, reg, clob
219 .ifc "\reg", "r8"
220 (\pred) XEN_HYPER_ITC_I
221 .exitm
222 .endif
223 .ifc "\clob", "r8"
224 (\pred) mov r8 = \reg
225 ;;
226 (\pred) XEN_HYPER_ITC_I
227 .exitm
228 .endif
229
230 (\pred) mov \clob = r8
231 (\pred) mov r8 = \reg
232 ;;
233 (\pred) XEN_HYPER_ITC_I
234 ;;
235 (\pred) mov r8 = \clob
236 ;;
237.endm
238#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
239
240.macro __ITC_D pred, reg, clob
241 .ifc "\reg", "r8"
242 (\pred) XEN_HYPER_ITC_D
243 ;;
244 .exitm
245 .endif
246 .ifc "\clob", "r8"
247 (\pred) mov r8 = \reg
248 ;;
249 (\pred) XEN_HYPER_ITC_D
250 ;;
251 .exitm
252 .endif
253
254 (\pred) mov \clob = r8
255 (\pred) mov r8 = \reg
256 ;;
257 (\pred) XEN_HYPER_ITC_D
258 ;;
259 (\pred) mov r8 = \clob
260 ;;
261.endm
262#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
263
264.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
265 .ifc "\reg", "r8"
266 (\pred_i)XEN_HYPER_ITC_I
267 ;;
268 (\pred_d)XEN_HYPER_ITC_D
269 ;;
270 .exitm
271 .endif
272 .ifc "\clob", "r8"
273 mov r8 = \reg
274 ;;
275 (\pred_i)XEN_HYPER_ITC_I
276 ;;
277 (\pred_d)XEN_HYPER_ITC_D
278 ;;
279 .exitm
280 .endif
281
282 mov \clob = r8
283 mov r8 = \reg
284 ;;
285 (\pred_i)XEN_HYPER_ITC_I
286 ;;
287 (\pred_d)XEN_HYPER_ITC_D
288 ;;
289 mov r8 = \clob
290 ;;
291.endm
292#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
293 __ITC_I_AND_D pred_i, pred_d, reg, clob
294
295.macro __THASH pred, reg0, reg1, clob
296 .ifc "\reg0", "r8"
297 (\pred) mov r8 = \reg1
298 (\pred) XEN_HYPER_THASH
299 .exitm
300 .endc
301 .ifc "\reg1", "r8"
302 (\pred) XEN_HYPER_THASH
303 ;;
304 (\pred) mov \reg0 = r8
305 ;;
306 .exitm
307 .endif
308 .ifc "\clob", "r8"
309 (\pred) mov r8 = \reg1
310 (\pred) XEN_HYPER_THASH
311 ;;
312 (\pred) mov \reg0 = r8
313 ;;
314 .exitm
315 .endif
316
317 (\pred) mov \clob = r8
318 (\pred) mov r8 = \reg1
319 (\pred) XEN_HYPER_THASH
320 ;;
321 (\pred) mov \reg0 = r8
322 (\pred) mov r8 = \clob
323 ;;
324.endm
325#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
326
327#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
328 mov clob0 = 1; \
329 movl clob1 = XSI_PSR_IC; \
330 ;; \
331 st4 [clob1] = clob0 \
332 ;;
333
334#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
335 ;; \
336 srlz.d; \
337 mov clob1 = 1; \
338 movl clob0 = XSI_PSR_IC; \
339 ;; \
340 st4 [clob0] = clob1
341
342#define RSM_PSR_IC(clob) \
343 movl clob = XSI_PSR_IC; \
344 ;; \
345 st4 [clob] = r0; \
346 ;;
347
348/* pred will be clobbered */
349#define MASK_TO_PEND_OFS (-1)
350#define SSM_PSR_I(pred, pred_clob, clob) \
351(pred) movl clob = XSI_PSR_I_ADDR \
352 ;; \
353(pred) ld8 clob = [clob] \
354 ;; \
355 /* if (pred) vpsr.i = 1 */ \
356 /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
357(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
358 ;; \
359 /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
360(pred) ld1 clob = [clob] \
361 ;; \
362(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
363 ;; \
364(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
365
366#define RSM_PSR_I(pred, clob0, clob1) \
367 movl clob0 = XSI_PSR_I_ADDR; \
368 mov clob1 = 1; \
369 ;; \
370 ld8 clob0 = [clob0]; \
371 ;; \
372(pred) st1 [clob0] = clob1
373
374#define RSM_PSR_I_IC(clob0, clob1, clob2) \
375 movl clob0 = XSI_PSR_I_ADDR; \
376 movl clob1 = XSI_PSR_IC; \
377 ;; \
378 ld8 clob0 = [clob0]; \
379 mov clob2 = 1; \
380 ;; \
381 /* note: clears both vpsr.i and vpsr.ic! */ \
382 st1 [clob0] = clob2; \
383 st4 [clob1] = r0; \
384 ;;
385
386#define RSM_PSR_DT \
387 XEN_HYPER_RSM_PSR_DT
388
389#define RSM_PSR_BE_I(clob0, clob1) \
390 RSM_PSR_I(p0, clob0, clob1); \
391 rum psr.be
392
393#define SSM_PSR_DT_AND_SRLZ_I \
394 XEN_HYPER_SSM_PSR_DT
395
396#define BSW_0(clob0, clob1, clob2) \
397 ;; \
398 /* r16-r31 all now hold bank1 values */ \
399 mov clob2 = ar.unat; \
400 movl clob0 = XSI_BANK1_R16; \
401 movl clob1 = XSI_BANK1_R16 + 8; \
402 ;; \
403.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
404.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
405 ;; \
406.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
407.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
408 ;; \
409.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
410.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
411 ;; \
412.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
413.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
414 ;; \
415.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
416.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
417 ;; \
418.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
419.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
420 ;; \
421.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
422.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
423 ;; \
424.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
425.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
426 ;; \
427 mov clob1 = ar.unat; \
428 movl clob0 = XSI_B1NAT; \
429 ;; \
430 st8 [clob0] = clob1; \
431 mov ar.unat = clob2; \
432 movl clob0 = XSI_BANKNUM; \
433 ;; \
434 st4 [clob0] = r0
435
436
437 /* FIXME: THIS CODE IS NOT NaT SAFE! */
438#define XEN_BSW_1(clob) \
439 mov clob = ar.unat; \
440 movl r30 = XSI_B1NAT; \
441 ;; \
442 ld8 r30 = [r30]; \
443 mov r31 = 1; \
444 ;; \
445 mov ar.unat = r30; \
446 movl r30 = XSI_BANKNUM; \
447 ;; \
448 st4 [r30] = r31; \
449 movl r30 = XSI_BANK1_R16; \
450 movl r31 = XSI_BANK1_R16+8; \
451 ;; \
452 ld8.fill r16 = [r30], 16; \
453 ld8.fill r17 = [r31], 16; \
454 ;; \
455 ld8.fill r18 = [r30], 16; \
456 ld8.fill r19 = [r31], 16; \
457 ;; \
458 ld8.fill r20 = [r30], 16; \
459 ld8.fill r21 = [r31], 16; \
460 ;; \
461 ld8.fill r22 = [r30], 16; \
462 ld8.fill r23 = [r31], 16; \
463 ;; \
464 ld8.fill r24 = [r30], 16; \
465 ld8.fill r25 = [r31], 16; \
466 ;; \
467 ld8.fill r26 = [r30], 16; \
468 ld8.fill r27 = [r31], 16; \
469 ;; \
470 ld8.fill r28 = [r30], 16; \
471 ld8.fill r29 = [r31], 16; \
472 ;; \
473 ld8.fill r30 = [r30]; \
474 ld8.fill r31 = [r31]; \
475 ;; \
476 mov ar.unat = clob
477
478#define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
479
480
481#define COVER \
482 XEN_HYPER_COVER
483
484#define RFI \
485 XEN_HYPER_RFI; \
486 dv_serialize_data
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
deleted file mode 100644
index e88c5de27410..000000000000
--- a/arch/ia64/include/asm/xen/interface.h
+++ /dev/null
@@ -1,363 +0,0 @@
1/******************************************************************************
2 * arch-ia64/hypervisor-if.h
3 *
4 * Guest OS interface to IA64 Xen.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright by those who contributed. (in alphabetical order)
25 *
26 * Anthony Xu <anthony.xu@intel.com>
27 * Eddie Dong <eddie.dong@intel.com>
28 * Fred Yang <fred.yang@intel.com>
29 * Kevin Tian <kevin.tian@intel.com>
30 * Alex Williamson <alex.williamson@hp.com>
31 * Chris Wright <chrisw@sous-sol.org>
32 * Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
33 * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
34 * Hollis Blanchard <hollisb@us.ibm.com>
35 * Isaku Yamahata <yamahata@valinux.co.jp>
36 * Jan Beulich <jbeulich@novell.com>
37 * John Levon <john.levon@sun.com>
38 * Kazuhiro Suzuki <kaz@jp.fujitsu.com>
39 * Keir Fraser <keir.fraser@citrix.com>
40 * Kouya Shimura <kouya@jp.fujitsu.com>
41 * Masaki Kanno <kanno.masaki@jp.fujitsu.com>
42 * Matt Chapman <matthewc@hp.com>
43 * Matthew Chapman <matthewc@hp.com>
44 * Samuel Thibault <samuel.thibault@eu.citrix.com>
45 * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
46 * Tristan Gingold <tgingold@free.fr>
47 * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
48 * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
49 * Zhang Xin <xing.z.zhang@intel.com>
50 * Zhang xiantao <xiantao.zhang@intel.com>
51 * dan.magenheimer@hp.com
52 * ian.pratt@cl.cam.ac.uk
53 * michael.fetterman@cl.cam.ac.uk
54 */
55
56#ifndef _ASM_IA64_XEN_INTERFACE_H
57#define _ASM_IA64_XEN_INTERFACE_H
58
59#define __DEFINE_GUEST_HANDLE(name, type) \
60 typedef struct { type *p; } __guest_handle_ ## name
61
62#define DEFINE_GUEST_HANDLE_STRUCT(name) \
63 __DEFINE_GUEST_HANDLE(name, struct name)
64#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
65#define GUEST_HANDLE(name) __guest_handle_ ## name
66#define GUEST_HANDLE_64(name) GUEST_HANDLE(name)
67#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
68
69#ifndef __ASSEMBLY__
70/* Explicitly size integers that represent pfns in the public interface
71 * with Xen so that we could have one ABI that works for 32 and 64 bit
72 * guests. */
73typedef unsigned long xen_pfn_t;
74typedef unsigned long xen_ulong_t;
75/* Guest handles for primitive C types. */
76__DEFINE_GUEST_HANDLE(uchar, unsigned char);
77__DEFINE_GUEST_HANDLE(uint, unsigned int);
78__DEFINE_GUEST_HANDLE(ulong, unsigned long);
79
80DEFINE_GUEST_HANDLE(char);
81DEFINE_GUEST_HANDLE(int);
82DEFINE_GUEST_HANDLE(long);
83DEFINE_GUEST_HANDLE(void);
84DEFINE_GUEST_HANDLE(uint64_t);
85DEFINE_GUEST_HANDLE(uint32_t);
86
87DEFINE_GUEST_HANDLE(xen_pfn_t);
88#define PRI_xen_pfn "lx"
89#endif
90
91/* Arch specific VIRQs definition */
92#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
93#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
94#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
95
96/* Maximum number of virtual CPUs in multi-processor guests. */
97/* keep sizeof(struct shared_page) <= PAGE_SIZE.
98 * this is checked in arch/ia64/xen/hypervisor.c. */
99#define MAX_VIRT_CPUS 64
100
101#ifndef __ASSEMBLY__
102
103#define INVALID_MFN (~0UL)
104
105union vac {
106 unsigned long value;
107 struct {
108 int a_int:1;
109 int a_from_int_cr:1;
110 int a_to_int_cr:1;
111 int a_from_psr:1;
112 int a_from_cpuid:1;
113 int a_cover:1;
114 int a_bsw:1;
115 long reserved:57;
116 };
117};
118
119union vdc {
120 unsigned long value;
121 struct {
122 int d_vmsw:1;
123 int d_extint:1;
124 int d_ibr_dbr:1;
125 int d_pmc:1;
126 int d_to_pmd:1;
127 int d_itm:1;
128 long reserved:58;
129 };
130};
131
132struct mapped_regs {
133 union vac vac;
134 union vdc vdc;
135 unsigned long virt_env_vaddr;
136 unsigned long reserved1[29];
137 unsigned long vhpi;
138 unsigned long reserved2[95];
139 union {
140 unsigned long vgr[16];
141 unsigned long bank1_regs[16]; /* bank1 regs (r16-r31)
142 when bank0 active */
143 };
144 union {
145 unsigned long vbgr[16];
146 unsigned long bank0_regs[16]; /* bank0 regs (r16-r31)
147 when bank1 active */
148 };
149 unsigned long vnat;
150 unsigned long vbnat;
151 unsigned long vcpuid[5];
152 unsigned long reserved3[11];
153 unsigned long vpsr;
154 unsigned long vpr;
155 unsigned long reserved4[76];
156 union {
157 unsigned long vcr[128];
158 struct {
159 unsigned long dcr; /* CR0 */
160 unsigned long itm;
161 unsigned long iva;
162 unsigned long rsv1[5];
163 unsigned long pta; /* CR8 */
164 unsigned long rsv2[7];
165 unsigned long ipsr; /* CR16 */
166 unsigned long isr;
167 unsigned long rsv3;
168 unsigned long iip;
169 unsigned long ifa;
170 unsigned long itir;
171 unsigned long iipa;
172 unsigned long ifs;
173 unsigned long iim; /* CR24 */
174 unsigned long iha;
175 unsigned long rsv4[38];
176 unsigned long lid; /* CR64 */
177 unsigned long ivr;
178 unsigned long tpr;
179 unsigned long eoi;
180 unsigned long irr[4];
181 unsigned long itv; /* CR72 */
182 unsigned long pmv;
183 unsigned long cmcv;
184 unsigned long rsv5[5];
185 unsigned long lrr0; /* CR80 */
186 unsigned long lrr1;
187 unsigned long rsv6[46];
188 };
189 };
190 union {
191 unsigned long reserved5[128];
192 struct {
193 unsigned long precover_ifs;
194 unsigned long unat; /* not sure if this is needed
195 until NaT arch is done */
196 int interrupt_collection_enabled; /* virtual psr.ic */
197
198 /* virtual interrupt deliverable flag is
199 * evtchn_upcall_mask in shared info area now.
200 * interrupt_mask_addr is the address
201 * of evtchn_upcall_mask for current vcpu
202 */
203 unsigned char *interrupt_mask_addr;
204 int pending_interruption;
205 unsigned char vpsr_pp;
206 unsigned char vpsr_dfh;
207 unsigned char hpsr_dfh;
208 unsigned char hpsr_mfh;
209 unsigned long reserved5_1[4];
210 int metaphysical_mode; /* 1 = use metaphys mapping
211 0 = use virtual */
212 int banknum; /* 0 or 1, which virtual
213 register bank is active */
214 unsigned long rrs[8]; /* region registers */
215 unsigned long krs[8]; /* kernel registers */
216 unsigned long tmp[16]; /* temp registers
217 (e.g. for hyperprivops) */
218
219 /* itc paravirtualization
220 * vAR.ITC = mAR.ITC + itc_offset
221 * itc_last is one which was lastly passed to
222 * the guest OS in order to prevent it from
223 * going backwords.
224 */
225 unsigned long itc_offset;
226 unsigned long itc_last;
227 };
228 };
229};
230
231struct arch_vcpu_info {
232 /* nothing */
233};
234
235/*
236 * This structure is used for magic page in domain pseudo physical address
237 * space and the result of XENMEM_machine_memory_map.
238 * As the XENMEM_machine_memory_map result,
239 * xen_memory_map::nr_entries indicates the size in bytes
240 * including struct xen_ia64_memmap_info. Not the number of entries.
241 */
242struct xen_ia64_memmap_info {
243 uint64_t efi_memmap_size; /* size of EFI memory map */
244 uint64_t efi_memdesc_size; /* size of an EFI memory map
245 * descriptor */
246 uint32_t efi_memdesc_version; /* memory descriptor version */
247 void *memdesc[0]; /* array of efi_memory_desc_t */
248};
249
250struct arch_shared_info {
251 /* PFN of the start_info page. */
252 unsigned long start_info_pfn;
253
254 /* Interrupt vector for event channel. */
255 int evtchn_vector;
256
257 /* PFN of memmap_info page */
258 unsigned int memmap_info_num_pages; /* currently only = 1 case is
259 supported. */
260 unsigned long memmap_info_pfn;
261
262 uint64_t pad[31];
263};
264
265struct xen_callback {
266 unsigned long ip;
267};
268typedef struct xen_callback xen_callback_t;
269
270#endif /* !__ASSEMBLY__ */
271
272#include <asm/pvclock-abi.h>
273
274/* Size of the shared_info area (this is not related to page size). */
275#define XSI_SHIFT 14
276#define XSI_SIZE (1 << XSI_SHIFT)
277/* Log size of mapped_regs area (64 KB - only 4KB is used). */
278#define XMAPPEDREGS_SHIFT 12
279#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
280/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
281#define XMAPPEDREGS_OFS XSI_SIZE
282
283/* Hyperprivops. */
284#define HYPERPRIVOP_START 0x1
285#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
286#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
287#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
288#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
289#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
290#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
291#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
292#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
293#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
294#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
295#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
296#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
297#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
298#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
299#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
300#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
301#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
302#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
303#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
304#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
305#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
306#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
307#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
308#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
309#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
310#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19)
311#define HYPERPRIVOP_MAX (0x1a)
312
313/* Fast and light hypercalls. */
314#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
315
316/* Xencomm macros. */
317#define XENCOMM_INLINE_MASK 0xf800000000000000UL
318#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
319
320#ifndef __ASSEMBLY__
321
322/*
323 * Optimization features.
324 * The hypervisor may do some special optimizations for guests. This hypercall
325 * can be used to switch on/of these special optimizations.
326 */
327#define __HYPERVISOR_opt_feature 0x700UL
328
329#define XEN_IA64_OPTF_OFF 0x0
330#define XEN_IA64_OPTF_ON 0x1
331
332/*
333 * If this feature is switched on, the hypervisor inserts the
334 * tlb entries without calling the guests traphandler.
335 * This is useful in guests using region 7 for identity mapping
336 * like the linux kernel does.
337 */
338#define XEN_IA64_OPTF_IDENT_MAP_REG7 1
339
340/* Identity mapping of region 4 addresses in HVM. */
341#define XEN_IA64_OPTF_IDENT_MAP_REG4 2
342
343/* Identity mapping of region 5 addresses in HVM. */
344#define XEN_IA64_OPTF_IDENT_MAP_REG5 3
345
346#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0)
347
348struct xen_ia64_opt_feature {
349 unsigned long cmd; /* Which feature */
350 unsigned char on; /* Switch feature on/off */
351 union {
352 struct {
353 /* The page protection bit mask of the pte.
354 * This will be or'ed with the pte. */
355 unsigned long pgprot;
356 unsigned long key; /* A protection key for itir.*/
357 };
358 };
359};
360
361#endif /* __ASSEMBLY__ */
362
363#endif /* _ASM_IA64_XEN_INTERFACE_H */
diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h
deleted file mode 100644
index a90450983003..000000000000
--- a/arch/ia64/include/asm/xen/irq.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/irq.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_IRQ_H
24#define _ASM_IA64_XEN_IRQ_H
25
26/*
27 * The flat IRQ space is divided into two regions:
28 * 1. A one-to-one mapping of real physical IRQs. This space is only used
29 * if we have physical device-access privilege. This region is at the
30 * start of the IRQ space so that existing device drivers do not need
31 * to be modified to translate physical IRQ numbers into our IRQ space.
32 * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
33 * are bound using the provided bind/unbind functions.
34 */
35
36#define XEN_PIRQ_BASE 0
37#define XEN_NR_PIRQS 256
38
39#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS)
40#define XEN_NR_DYNIRQS (NR_CPUS * 8)
41
42#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS)
43
44#endif /* _ASM_IA64_XEN_IRQ_H */
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
deleted file mode 100644
index 00cf03e0cb82..000000000000
--- a/arch/ia64/include/asm/xen/minstate.h
+++ /dev/null
@@ -1,143 +0,0 @@
1
2#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
3/* read ar.itc in advance, and use it before leaving bank 0 */
4#define XEN_ACCOUNT_GET_STAMP \
5 MOV_FROM_ITC(pUStk, p6, r20, r2);
6#else
7#define XEN_ACCOUNT_GET_STAMP
8#endif
9
10/*
11 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
12 * the minimum state necessary that allows us to turn psr.ic back
13 * on.
14 *
15 * Assumed state upon entry:
16 * psr.ic: off
17 * r31: contains saved predicates (pr)
18 *
19 * Upon exit, the state is as follows:
20 * psr.ic: off
21 * r2 = points to &pt_regs.r16
22 * r8 = contents of ar.ccv
23 * r9 = contents of ar.csd
24 * r10 = contents of ar.ssd
25 * r11 = FPSR_DEFAULT
26 * r12 = kernel sp (kernel virtual address)
27 * r13 = points to current task_struct (kernel virtual address)
28 * p15 = TRUE if psr.i is set in cr.ipsr
29 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
30 * preserved
31 * CONFIG_XEN note: p6/p7 are not preserved
32 *
33 * Note that psr.ic is NOT turned on by this macro. This is so that
34 * we can pass interruption state as arguments to a handler.
35 */
36#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
37 mov r16=IA64_KR(CURRENT); /* M */ \
38 mov r27=ar.rsc; /* M */ \
39 mov r20=r1; /* A */ \
40 mov r25=ar.unat; /* M */ \
41 MOV_FROM_IPSR(p0,r29); /* M */ \
42 MOV_FROM_IIP(r28); /* M */ \
43 mov r21=ar.fpsr; /* M */ \
44 mov r26=ar.pfs; /* I */ \
45 __COVER; /* B;; (or nothing) */ \
46 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
47 ;; \
48 ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
49 st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
50 adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
51 /* switch from user to kernel RBS: */ \
52 ;; \
53 invala; /* M */ \
54 /* SAVE_IFS;*/ /* see xen special handling below */ \
55 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
56 ;; \
57(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
58 ;; \
59(pUStk) mov.m r24=ar.rnat; \
60(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
61(pKStk) mov r1=sp; /* get sp */ \
62 ;; \
63(pUStk) lfetch.fault.excl.nt1 [r22]; \
64(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
65(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
66 ;; \
67(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
68(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
69 ;; \
70(pUStk) mov r18=ar.bsp; \
71(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
72 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
73 adds r16=PT(CR_IPSR),r1; \
74 ;; \
75 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
76 st8 [r16]=r29; /* save cr.ipsr */ \
77 ;; \
78 lfetch.fault.excl.nt1 [r17]; \
79 tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
80 mov r29=b0 \
81 ;; \
82 WORKAROUND; \
83 adds r16=PT(R8),r1; /* initialize first base pointer */ \
84 adds r17=PT(R9),r1; /* initialize second base pointer */ \
85(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
86 ;; \
87.mem.offset 0,0; st8.spill [r16]=r8,16; \
88.mem.offset 8,0; st8.spill [r17]=r9,16; \
89 ;; \
90.mem.offset 0,0; st8.spill [r16]=r10,24; \
91 movl r8=XSI_PRECOVER_IFS; \
92.mem.offset 8,0; st8.spill [r17]=r11,24; \
93 ;; \
94 /* xen special handling for possibly lazy cover */ \
95 /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \
96 ld8 r30=[r8]; \
97(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
98 st8 [r16]=r28,16; /* save cr.iip */ \
99 ;; \
100 st8 [r17]=r30,16; /* save cr.ifs */ \
101 mov r8=ar.ccv; \
102 mov r9=ar.csd; \
103 mov r10=ar.ssd; \
104 movl r11=FPSR_DEFAULT; /* L-unit */ \
105 ;; \
106 st8 [r16]=r25,16; /* save ar.unat */ \
107 st8 [r17]=r26,16; /* save ar.pfs */ \
108 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
109 ;; \
110 st8 [r16]=r27,16; /* save ar.rsc */ \
111(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
112(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
113 ;; /* avoid RAW on r16 & r17 */ \
114(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
115 st8 [r17]=r31,16; /* save predicates */ \
116(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
117 ;; \
118 st8 [r16]=r29,16; /* save b0 */ \
119 st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
120 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
121 ;; \
122.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
123.mem.offset 8,0; st8.spill [r17]=r12,16; \
124 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
125 ;; \
126.mem.offset 0,0; st8.spill [r16]=r13,16; \
127.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
128 mov r13=IA64_KR(CURRENT); /* establish `current' */ \
129 ;; \
130.mem.offset 0,0; st8.spill [r16]=r15,16; \
131.mem.offset 8,0; st8.spill [r17]=r14,16; \
132 ;; \
133.mem.offset 0,0; st8.spill [r16]=r2,16; \
134.mem.offset 8,0; st8.spill [r17]=r3,16; \
135 XEN_ACCOUNT_GET_STAMP \
136 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
137 ;; \
138 EXTRA; \
139 movl r1=__gp; /* establish kernel global pointer */ \
140 ;; \
141 ACCOUNT_SYS_ENTER \
142 BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \
143 ;;
diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h
deleted file mode 100644
index 96e42f97fa1f..000000000000
--- a/arch/ia64/include/asm/xen/page-coherent.h
+++ /dev/null
@@ -1,38 +0,0 @@
1#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H
2#define _ASM_IA64_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h>
7
8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 dma_addr_t *dma_handle, gfp_t flags,
10 struct dma_attrs *attrs)
11{
12 void *vstart = (void*)__get_free_pages(flags, get_order(size));
13 *dma_handle = virt_to_phys(vstart);
14 return vstart;
15}
16
17static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
18 void *cpu_addr, dma_addr_t dma_handle,
19 struct dma_attrs *attrs)
20{
21 free_pages((unsigned long) cpu_addr, get_order(size));
22}
23
24static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
25 unsigned long offset, size_t size, enum dma_data_direction dir,
26 struct dma_attrs *attrs) { }
27
28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
29 size_t size, enum dma_data_direction dir,
30 struct dma_attrs *attrs) { }
31
32static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
33 dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
34
35static inline void xen_dma_sync_single_for_device(struct device *hwdev,
36 dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
37
38#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */
diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h
deleted file mode 100644
index 03441a780b5b..000000000000
--- a/arch/ia64/include/asm/xen/page.h
+++ /dev/null
@@ -1,65 +0,0 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/page.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_PAGE_H
24#define _ASM_IA64_XEN_PAGE_H
25
26#define INVALID_P2M_ENTRY (~0UL)
27
28static inline unsigned long mfn_to_pfn(unsigned long mfn)
29{
30 return mfn;
31}
32
33static inline unsigned long pfn_to_mfn(unsigned long pfn)
34{
35 return pfn;
36}
37
38#define phys_to_machine_mapping_valid(_x) (1)
39
40static inline void *mfn_to_virt(unsigned long mfn)
41{
42 return __va(mfn << PAGE_SHIFT);
43}
44
45static inline unsigned long virt_to_mfn(void *virt)
46{
47 return __pa(virt) >> PAGE_SHIFT;
48}
49
50/* for tpmfront.c */
51static inline unsigned long virt_to_machine(void *virt)
52{
53 return __pa(virt);
54}
55
56static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
57{
58 /* nothing */
59}
60
61#define pte_mfn(_x) pte_pfn(_x)
62#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */
63#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */
64
65#endif /* _ASM_IA64_XEN_PAGE_H */
diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h
deleted file mode 100644
index eae944e88846..000000000000
--- a/arch/ia64/include/asm/xen/patchlist.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/patchlist.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#define __paravirt_start_gate_fsyscall_patchlist \
24 __xen_start_gate_fsyscall_patchlist
25#define __paravirt_end_gate_fsyscall_patchlist \
26 __xen_end_gate_fsyscall_patchlist
27#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
28 __xen_start_gate_brl_fsys_bubble_down_patchlist
29#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
30 __xen_end_gate_brl_fsys_bubble_down_patchlist
31#define __paravirt_start_gate_vtop_patchlist \
32 __xen_start_gate_vtop_patchlist
33#define __paravirt_end_gate_vtop_patchlist \
34 __xen_end_gate_vtop_patchlist
35#define __paravirt_start_gate_mckinley_e9_patchlist \
36 __xen_start_gate_mckinley_e9_patchlist
37#define __paravirt_end_gate_mckinley_e9_patchlist \
38 __xen_end_gate_mckinley_e9_patchlist
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h
deleted file mode 100644
index fb4ec5e0b066..000000000000
--- a/arch/ia64/include/asm/xen/privop.h
+++ /dev/null
@@ -1,135 +0,0 @@
1#ifndef _ASM_IA64_XEN_PRIVOP_H
2#define _ASM_IA64_XEN_PRIVOP_H
3
4/*
5 * Copyright (C) 2005 Hewlett-Packard Co
6 * Dan Magenheimer <dan.magenheimer@hp.com>
7 *
8 * Paravirtualizations of privileged operations for Xen/ia64
9 *
10 *
11 * inline privop and paravirt_alt support
12 * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
13 * VA Linux Systems Japan K.K.
14 *
15 */
16
17#ifndef __ASSEMBLY__
18#include <linux/types.h> /* arch-ia64.h requires uint64_t */
19#endif
20#include <asm/xen/interface.h>
21
22/* At 1 MB, before per-cpu space but still addressable using addl instead
23 of movl. */
24#define XSI_BASE 0xfffffffffff00000
25
26/* Address of mapped regs. */
27#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
28
29#ifdef __ASSEMBLY__
30#define XEN_HYPER_RFI break HYPERPRIVOP_RFI
31#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT
32#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT
33#define XEN_HYPER_COVER break HYPERPRIVOP_COVER
34#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D
35#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I
36#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I
37#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR
38#define XEN_HYPER_THASH break HYPERPRIVOP_THASH
39#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D
40#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR
41#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
42#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4
43
44#define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
45#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
46#define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
47#define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
48#define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
49#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS)
50#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
51#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS)
52#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS)
53#define XSI_IIP (XSI_BASE + XSI_IIP_OFS)
54#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS)
55#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
56#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
57#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
58#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS)
59#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS)
60#endif
61
62#ifndef __ASSEMBLY__
63
64/************************************************/
65/* Instructions paravirtualized for correctness */
66/************************************************/
67
68/* "fc" and "thash" are privilege-sensitive instructions, meaning they
69 * may have different semantics depending on whether they are executed
70 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
71 * be allowed to execute directly, lest incorrect semantics result. */
72extern void xen_fc(void *addr);
73extern unsigned long xen_thash(unsigned long addr);
74
75/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
76 * is not currently used (though it may be in a long-format VHPT system!)
77 * and the semantics of cover only change if psr.ic is off which is very
78 * rare (and currently non-existent outside of assembly code */
79
80/* There are also privilege-sensitive registers. These registers are
81 * readable at any privilege level but only writable at PL0. */
82extern unsigned long xen_get_cpuid(int index);
83extern unsigned long xen_get_pmd(int index);
84
85#ifndef ASM_SUPPORTED
86extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
87extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
88#endif
89
90/************************************************/
91/* Instructions paravirtualized for performance */
92/************************************************/
93
94/* Xen uses memory-mapped virtual privileged registers for access to many
95 * performance-sensitive privileged registers. Some, like the processor
96 * status register (psr), are broken up into multiple memory locations.
97 * Others, like "pend", are abstractions based on privileged registers.
98 * "Pend" is guaranteed to be set if reading cr.ivr would return a
99 * (non-spurious) interrupt. */
100#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
101
102#define XSI_PSR_I \
103 (*XEN_MAPPEDREGS->interrupt_mask_addr)
104#define xen_get_virtual_psr_i() \
105 (!XSI_PSR_I)
106#define xen_set_virtual_psr_i(_val) \
107 ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
108#define xen_set_virtual_psr_ic(_val) \
109 ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
110#define xen_get_virtual_pend() \
111 (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
112
113#ifndef ASM_SUPPORTED
114/* Although all privileged operations can be left to trap and will
115 * be properly handled by Xen, some are frequent enough that we use
116 * hyperprivops for performance. */
117extern unsigned long xen_get_psr(void);
118extern unsigned long xen_get_ivr(void);
119extern unsigned long xen_get_tpr(void);
120extern void xen_hyper_ssm_i(void);
121extern void xen_set_itm(unsigned long);
122extern void xen_set_tpr(unsigned long);
123extern void xen_eoi(unsigned long);
124extern unsigned long xen_get_rr(unsigned long index);
125extern void xen_set_rr(unsigned long index, unsigned long val);
126extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
127 unsigned long val2, unsigned long val3,
128 unsigned long val4);
129extern void xen_set_kr(unsigned long index, unsigned long val);
130extern void xen_ptcga(unsigned long addr, unsigned long size);
131#endif /* !ASM_SUPPORTED */
132
133#endif /* !__ASSEMBLY__ */
134
135#endif /* _ASM_IA64_XEN_PRIVOP_H */
diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h
deleted file mode 100644
index 20b2950c71b6..000000000000
--- a/arch/ia64/include/asm/xen/xcom_hcall.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_IA64_XEN_XCOM_HCALL_H
20#define _ASM_IA64_XEN_XCOM_HCALL_H
21
22/* These function creates inline or mini descriptor for the parameters and
23 calls the corresponding xencomm_arch_hypercall_X.
24 Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
25 they want to use their own wrapper. */
26extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
27
28extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
29
30extern int xencomm_hypercall_xen_version(int cmd, void *arg);
31
32extern int xencomm_hypercall_physdev_op(int cmd, void *op);
33
34extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
35 unsigned int count);
36
37extern int xencomm_hypercall_sched_op(int cmd, void *arg);
38
39extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
40
41extern int xencomm_hypercall_callback_op(int cmd, void *arg);
42
43extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
44
45extern int xencomm_hypercall_suspend(unsigned long srec);
46
47extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
48
49extern long xencomm_hypercall_opt_feature(void *arg);
50
51#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */
diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h
deleted file mode 100644
index cded677bebf2..000000000000
--- a/arch/ia64/include/asm/xen/xencomm.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_IA64_XEN_XENCOMM_H
20#define _ASM_IA64_XEN_XENCOMM_H
21
22#include <xen/xencomm.h>
23#include <asm/pgtable.h>
24
25/* Must be called before any hypercall. */
26extern void xencomm_initialize(void);
27extern int xencomm_is_initialized(void);
28
29/* Check if virtual contiguity means physical contiguity
30 * where the passed address is a pointer value in virtual address.
31 * On ia64, identity mapping area in region 7 or the piece of region 5
32 * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL]
33 */
34static inline int xencomm_is_phys_contiguous(unsigned long addr)
35{
36 return (PAGE_OFFSET <= addr &&
37 addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||
38 (KERNEL_START <= addr &&
39 addr < KERNEL_START + KERNEL_TR_PAGE_SIZE);
40}
41
42#endif /* _ASM_IA64_XEN_XENCOMM_H */
diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h
index e90c40ec9edf..f03402039896 100644
--- a/arch/ia64/include/uapi/asm/break.h
+++ b/arch/ia64/include/uapi/asm/break.h
@@ -20,13 +20,4 @@
20 */ 20 */
21#define __IA64_BREAK_SYSCALL 0x100000 21#define __IA64_BREAK_SYSCALL 0x100000
22 22
23/*
24 * Xen specific break numbers:
25 */
26#define __IA64_XEN_HYPERCALL 0x1000
27/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used
28 for xen hyperprivops */
29#define __IA64_XEN_HYPERPRIVOP_START 0x1
30#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a
31
32#endif /* _ASM_IA64_BREAK_H */ 23#endif /* _ASM_IA64_BREAK_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 59d52e3aef12..bfa19311e09c 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -53,7 +53,6 @@
53#include <asm/numa.h> 53#include <asm/numa.h>
54#include <asm/sal.h> 54#include <asm/sal.h>
55#include <asm/cyclone.h> 55#include <asm/cyclone.h>
56#include <asm/xen/hypervisor.h>
57 56
58#define BAD_MADT_ENTRY(entry, end) ( \ 57#define BAD_MADT_ENTRY(entry, end) ( \
59 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 58 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
@@ -120,8 +119,6 @@ acpi_get_sysname(void)
120 return "uv"; 119 return "uv";
121 else 120 else
122 return "sn2"; 121 return "sn2";
123 } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) {
124 return "xen";
125 } 122 }
126 123
127#ifdef CONFIG_INTEL_IOMMU 124#ifdef CONFIG_INTEL_IOMMU
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 46c9e3007315..60ef83e6db71 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -16,9 +16,6 @@
16#include <asm/sigcontext.h> 16#include <asm/sigcontext.h>
17#include <asm/mca.h> 17#include <asm/mca.h>
18 18
19#include <asm/xen/interface.h>
20#include <asm/xen/hypervisor.h>
21
22#include "../kernel/sigframe.h" 19#include "../kernel/sigframe.h"
23#include "../kernel/fsyscall_gtod_data.h" 20#include "../kernel/fsyscall_gtod_data.h"
24 21
@@ -290,33 +287,4 @@ void foo(void)
290 DEFINE(IA64_ITC_LASTCYCLE_OFFSET, 287 DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
291 offsetof (struct itc_jitter_data_t, itc_lastcycle)); 288 offsetof (struct itc_jitter_data_t, itc_lastcycle));
292 289
293#ifdef CONFIG_XEN
294 BLANK();
295
296 DEFINE(XEN_NATIVE_ASM, XEN_NATIVE);
297 DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN);
298
299#define DEFINE_MAPPED_REG_OFS(sym, field) \
300 DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field)))
301
302 DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
303 DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
304 DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
305 DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
306 DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
307 DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
308 DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
309 DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
310 DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
311 DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
312 DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
313 DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
314 DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
315 DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
316 DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
317 DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
318 DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
319 DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset);
320 DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last);
321#endif /* CONFIG_XEN */
322} 290}
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 991ca336b8a2..e6f80fcf013b 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -416,8 +416,6 @@ start_ap:
416 416
417default_setup_hook = 0 // Currently nothing needs to be done. 417default_setup_hook = 0 // Currently nothing needs to be done.
418 418
419 .weak xen_setup_hook
420
421 .global hypervisor_type 419 .global hypervisor_type
422hypervisor_type: 420hypervisor_type:
423 data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT 421 data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
@@ -426,7 +424,6 @@ hypervisor_type:
426 424
427hypervisor_setup_hooks: 425hypervisor_setup_hooks:
428 data8 default_setup_hook 426 data8 default_setup_hook
429 data8 xen_setup_hook
430num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8 427num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
431 .previous 428 .previous
432 429
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
index ee564575148e..f6769cd54bd9 100644
--- a/arch/ia64/kernel/nr-irqs.c
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -10,15 +10,11 @@
10#include <linux/kbuild.h> 10#include <linux/kbuild.h>
11#include <linux/threads.h> 11#include <linux/threads.h>
12#include <asm/native/irq.h> 12#include <asm/native/irq.h>
13#include <asm/xen/irq.h>
14 13
15void foo(void) 14void foo(void)
16{ 15{
17 union paravirt_nr_irqs_max { 16 union paravirt_nr_irqs_max {
18 char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; 17 char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS];
19#ifdef CONFIG_XEN
20 char xen_nr_irqs[XEN_NR_IRQS];
21#endif
22 }; 18 };
23 19
24 DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max)); 20 DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max));
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
index 64d6d810c64b..1ad7512b5f65 100644
--- a/arch/ia64/kernel/paravirt_inst.h
+++ b/arch/ia64/kernel/paravirt_inst.h
@@ -22,9 +22,6 @@
22 22
23#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK 23#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
24#include <asm/native/pvchk_inst.h> 24#include <asm/native/pvchk_inst.h>
25#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN)
26#include <asm/xen/inst.h>
27#include <asm/xen/minstate.h>
28#else 25#else
29#include <asm/native/inst.h> 26#include <asm/native/inst.h>
30#endif 27#endif
diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h
index 0684aa6c6507..67cffc3643a3 100644
--- a/arch/ia64/kernel/paravirt_patchlist.h
+++ b/arch/ia64/kernel/paravirt_patchlist.h
@@ -20,9 +20,5 @@
20 * 20 *
21 */ 21 */
22 22
23#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN)
24#include <asm/xen/patchlist.h>
25#else
26#include <asm/native/patchlist.h> 23#include <asm/native/patchlist.h>
27#endif
28 24
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 0ccb28fab27e..84f8a52ac5ae 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -182,12 +182,6 @@ SECTIONS {
182 __start_gate_section = .; 182 __start_gate_section = .;
183 *(.data..gate) 183 *(.data..gate)
184 __stop_gate_section = .; 184 __stop_gate_section = .;
185#ifdef CONFIG_XEN
186 . = ALIGN(PAGE_SIZE);
187 __xen_start_gate_section = .;
188 *(.data..gate.xen)
189 __xen_stop_gate_section = .;
190#endif
191 } 185 }
192 /* 186 /*
193 * make sure the gate page doesn't expose 187 * make sure the gate page doesn't expose
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig
deleted file mode 100644
index 5d8a06b0ddf7..000000000000
--- a/arch/ia64/xen/Kconfig
+++ /dev/null
@@ -1,25 +0,0 @@
1#
2# This Kconfig describes xen/ia64 options
3#
4
5config XEN
6 bool "Xen hypervisor support"
7 default y
8 depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB
9 select XEN_XENCOMM
10 select NO_IDLE_HZ
11 # followings are required to save/restore.
12 select ARCH_SUSPEND_POSSIBLE
13 select SUSPEND
14 select PM_SLEEP
15 help
16 Enable Xen hypervisor support. Resulting kernel runs
17 both as a guest OS on Xen and natively on hardware.
18
19config XEN_XENCOMM
20 depends on XEN
21 bool
22
23config NO_IDLE_HZ
24 depends on XEN
25 bool
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile
deleted file mode 100644
index e6f4a0a74228..000000000000
--- a/arch/ia64/xen/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Makefile for Xen components
3#
4
5obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \
6 hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \
7 gate-data.o
8
9obj-$(CONFIG_IA64_GENERIC) += machvec.o
10
11# The gate DSO image is built using a special linker script.
12include $(srctree)/arch/ia64/kernel/Makefile.gate
13
14# tell compiled for xen
15CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN
16AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN
17
18# use same file of native.
19$(obj)/gate.o: $(src)/../kernel/gate.S FORCE
20 $(call if_changed_dep,as_o_S)
21$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE
22 $(call if_changed_dep,cpp_lds_S)
23
24
25AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN
26
27# xen multi compile
28ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S
29ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o))
30obj-y += $(ASM_PARAVIRT_OBJS)
31define paravirtualized_xen
32AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN
33endef
34$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o))))
35
36$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE
37 $(call if_changed_dep,as_o_S)
diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S
deleted file mode 100644
index 6f95b6b32a4e..000000000000
--- a/arch/ia64/xen/gate-data.S
+++ /dev/null
@@ -1,3 +0,0 @@
1 .section .data..gate.xen, "aw"
2
3 .incbin "arch/ia64/xen/gate.so"
diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c
deleted file mode 100644
index c18281332f84..000000000000
--- a/arch/ia64/xen/grant-table.c
+++ /dev/null
@@ -1,94 +0,0 @@
1/******************************************************************************
2 * arch/ia64/xen/grant-table.c
3 *
4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/vmalloc.h>
25#include <linux/slab.h>
26#include <linux/mm.h>
27
28#include <xen/interface/xen.h>
29#include <xen/interface/memory.h>
30#include <xen/grant_table.h>
31
32#include <asm/xen/hypervisor.h>
33
34/****************************************************************************
35 * grant table hack
36 * cmd: GNTTABOP_xxx
37 */
38
39int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
40 unsigned long max_nr_gframes,
41 struct grant_entry **__shared)
42{
43 *__shared = __va(frames[0] << PAGE_SHIFT);
44 return 0;
45}
46
47void arch_gnttab_unmap_shared(struct grant_entry *shared,
48 unsigned long nr_gframes)
49{
50 /* nothing */
51}
52
53static void
54gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
55{
56 uint32_t flags;
57
58 flags = uop->flags;
59
60 if (flags & GNTMAP_host_map) {
61 if (flags & GNTMAP_application_map) {
62 printk(KERN_DEBUG
63 "GNTMAP_application_map is not supported yet: "
64 "flags 0x%x\n", flags);
65 BUG();
66 }
67 if (flags & GNTMAP_contains_pte) {
68 printk(KERN_DEBUG
69 "GNTMAP_contains_pte is not supported yet: "
70 "flags 0x%x\n", flags);
71 BUG();
72 }
73 } else if (flags & GNTMAP_device_map) {
74 printk("GNTMAP_device_map is not supported yet 0x%x\n", flags);
75 BUG(); /* not yet. actually this flag is not used. */
76 } else {
77 BUG();
78 }
79}
80
81int
82HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
83{
84 if (cmd == GNTTABOP_map_grant_ref) {
85 unsigned int i;
86 for (i = 0; i < count; i++) {
87 gnttab_map_grant_ref_pre(
88 (struct gnttab_map_grant_ref *)uop + i);
89 }
90 }
91 return xencomm_hypercall_grant_table_op(cmd, uop, count);
92}
93
94EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
deleted file mode 100644
index 08847aa12583..000000000000
--- a/arch/ia64/xen/hypercall.S
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Support routines for Xen hypercalls
3 *
4 * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
5 * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com>
6 */
7
8#include <asm/asmmacro.h>
9#include <asm/intrinsics.h>
10#include <asm/xen/privop.h>
11
12#ifdef __INTEL_COMPILER
13/*
14 * Hypercalls without parameter.
15 */
16#define __HCALL0(name,hcall) \
17 GLOBAL_ENTRY(name); \
18 break hcall; \
19 br.ret.sptk.many rp; \
20 END(name)
21
22/*
23 * Hypercalls with 1 parameter.
24 */
25#define __HCALL1(name,hcall) \
26 GLOBAL_ENTRY(name); \
27 mov r8=r32; \
28 break hcall; \
29 br.ret.sptk.many rp; \
30 END(name)
31
32/*
33 * Hypercalls with 2 parameters.
34 */
35#define __HCALL2(name,hcall) \
36 GLOBAL_ENTRY(name); \
37 mov r8=r32; \
38 mov r9=r33; \
39 break hcall; \
40 br.ret.sptk.many rp; \
41 END(name)
42
43__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR)
44__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR)
45__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR)
46__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I)
47
48__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR)
49__HCALL1(xen_eoi, HYPERPRIVOP_EOI)
50__HCALL1(xen_thash, HYPERPRIVOP_THASH)
51__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM)
52__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR)
53__HCALL1(xen_fc, HYPERPRIVOP_FC)
54__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID)
55__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD)
56
57__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA)
58__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
59__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
60
61GLOBAL_ENTRY(xen_set_rr0_to_rr4)
62 mov r8=r32
63 mov r9=r33
64 mov r10=r34
65 mov r11=r35
66 mov r14=r36
67 XEN_HYPER_SET_RR0_TO_RR4
68 br.ret.sptk.many rp
69 ;;
70END(xen_set_rr0_to_rr4)
71#endif
72
73GLOBAL_ENTRY(xen_send_ipi)
74 mov r14=r32
75 mov r15=r33
76 mov r2=0x400
77 break 0x1000
78 ;;
79 br.ret.sptk.many rp
80 ;;
81END(xen_send_ipi)
82
83GLOBAL_ENTRY(__hypercall)
84 mov r2=r37
85 break 0x1000
86 br.ret.sptk.many b0
87 ;;
88END(__hypercall)
diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c
deleted file mode 100644
index fab62528a80b..000000000000
--- a/arch/ia64/xen/hypervisor.c
+++ /dev/null
@@ -1,97 +0,0 @@
1/******************************************************************************
2 * arch/ia64/xen/hypervisor.c
3 *
4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/efi.h>
24#include <linux/export.h>
25#include <asm/xen/hypervisor.h>
26#include <asm/xen/privop.h>
27
28#include "irq_xen.h"
29
30struct shared_info *HYPERVISOR_shared_info __read_mostly =
31 (struct shared_info *)XSI_BASE;
32EXPORT_SYMBOL(HYPERVISOR_shared_info);
33
34DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
35
36struct start_info *xen_start_info;
37EXPORT_SYMBOL(xen_start_info);
38
39EXPORT_SYMBOL(xen_domain_type);
40
41EXPORT_SYMBOL(__hypercall);
42
43/* Stolen from arch/x86/xen/enlighten.c */
44/*
45 * Flag to determine whether vcpu info placement is available on all
46 * VCPUs. We assume it is to start with, and then set it to zero on
47 * the first failure. This is because it can succeed on some VCPUs
48 * and not others, since it can involve hypervisor memory allocation,
49 * or because the guest failed to guarantee all the appropriate
50 * constraints on all VCPUs (ie buffer can't cross a page boundary).
51 *
52 * Note that any particular CPU may be using a placed vcpu structure,
53 * but we can only optimise if the all are.
54 *
55 * 0: not available, 1: available
56 */
57
58static void __init xen_vcpu_setup(int cpu)
59{
60 /*
61 * WARNING:
62 * before changing MAX_VIRT_CPUS,
63 * check that shared_info fits on a page
64 */
65 BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE);
66 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
67}
68
69void __init xen_setup_vcpu_info_placement(void)
70{
71 int cpu;
72
73 for_each_possible_cpu(cpu)
74 xen_vcpu_setup(cpu);
75}
76
77void
78xen_cpu_init(void)
79{
80 xen_smp_intr_init();
81}
82
83/**************************************************************************
84 * opt feature
85 */
86void
87xen_ia64_enable_opt_feature(void)
88{
89 /* Enable region 7 identity map optimizations in Xen */
90 struct xen_ia64_opt_feature optf;
91
92 optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7;
93 optf.on = XEN_IA64_OPTF_ON;
94 optf.pgprot = pgprot_val(PAGE_KERNEL);
95 optf.key = 0; /* No key on linux. */
96 HYPERVISOR_opt_feature(&optf);
97}
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
deleted file mode 100644
index efb74dafec4d..000000000000
--- a/arch/ia64/xen/irq_xen.c
+++ /dev/null
@@ -1,443 +0,0 @@
1/******************************************************************************
2 * arch/ia64/xen/irq_xen.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/cpu.h>
24
25#include <xen/interface/xen.h>
26#include <xen/interface/callback.h>
27#include <xen/events.h>
28
29#include <asm/xen/privop.h>
30
31#include "irq_xen.h"
32
33/***************************************************************************
34 * pv_irq_ops
35 * irq operations
36 */
37
38static int
39xen_assign_irq_vector(int irq)
40{
41 struct physdev_irq irq_op;
42
43 irq_op.irq = irq;
44 if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
45 return -ENOSPC;
46
47 return irq_op.vector;
48}
49
50static void
51xen_free_irq_vector(int vector)
52{
53 struct physdev_irq irq_op;
54
55 if (vector < IA64_FIRST_DEVICE_VECTOR ||
56 vector > IA64_LAST_DEVICE_VECTOR)
57 return;
58
59 irq_op.vector = vector;
60 if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
61 printk(KERN_WARNING "%s: xen_free_irq_vector fail vector=%d\n",
62 __func__, vector);
63}
64
65
66static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
67static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
68static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
69static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
70static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
71static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
72#define NAME_SIZE 15
73static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
74static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
75static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
76static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
77static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
78static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
79#undef NAME_SIZE
80
81struct saved_irq {
82 unsigned int irq;
83 struct irqaction *action;
84};
85/* 16 should be far optimistic value, since only several percpu irqs
86 * are registered early.
87 */
88#define MAX_LATE_IRQ 16
89static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
90static unsigned short late_irq_cnt;
91static unsigned short saved_irq_cnt;
92static int xen_slab_ready;
93
94#ifdef CONFIG_SMP
95#include <linux/sched.h>
96
97/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
98 * it ends up to issue several memory accesses upon percpu data and
99 * thus adds unnecessary traffic to other paths.
100 */
101static irqreturn_t
102xen_dummy_handler(int irq, void *dev_id)
103{
104 return IRQ_HANDLED;
105}
106
107static irqreturn_t
108xen_resched_handler(int irq, void *dev_id)
109{
110 scheduler_ipi();
111 return IRQ_HANDLED;
112}
113
114static struct irqaction xen_ipi_irqaction = {
115 .handler = handle_IPI,
116 .flags = IRQF_DISABLED,
117 .name = "IPI"
118};
119
120static struct irqaction xen_resched_irqaction = {
121 .handler = xen_resched_handler,
122 .flags = IRQF_DISABLED,
123 .name = "resched"
124};
125
126static struct irqaction xen_tlb_irqaction = {
127 .handler = xen_dummy_handler,
128 .flags = IRQF_DISABLED,
129 .name = "tlb_flush"
130};
131#endif
132
133/*
134 * This is xen version percpu irq registration, which needs bind
135 * to xen specific evtchn sub-system. One trick here is that xen
136 * evtchn binding interface depends on kmalloc because related
137 * port needs to be freed at device/cpu down. So we cache the
138 * registration on BSP before slab is ready and then deal them
139 * at later point. For rest instances happening after slab ready,
140 * we hook them to xen evtchn immediately.
141 *
142 * FIXME: MCA is not supported by far, and thus "nomca" boot param is
143 * required.
144 */
145static void
146__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
147 struct irqaction *action, int save)
148{
149 int irq = 0;
150
151 if (xen_slab_ready) {
152 switch (vec) {
153 case IA64_TIMER_VECTOR:
154 snprintf(per_cpu(xen_timer_name, cpu),
155 sizeof(per_cpu(xen_timer_name, cpu)),
156 "%s%d", action->name, cpu);
157 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
158 action->handler, action->flags,
159 per_cpu(xen_timer_name, cpu), action->dev_id);
160 per_cpu(xen_timer_irq, cpu) = irq;
161 break;
162 case IA64_IPI_RESCHEDULE:
163 snprintf(per_cpu(xen_resched_name, cpu),
164 sizeof(per_cpu(xen_resched_name, cpu)),
165 "%s%d", action->name, cpu);
166 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
167 action->handler, action->flags,
168 per_cpu(xen_resched_name, cpu), action->dev_id);
169 per_cpu(xen_resched_irq, cpu) = irq;
170 break;
171 case IA64_IPI_VECTOR:
172 snprintf(per_cpu(xen_ipi_name, cpu),
173 sizeof(per_cpu(xen_ipi_name, cpu)),
174 "%s%d", action->name, cpu);
175 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
176 action->handler, action->flags,
177 per_cpu(xen_ipi_name, cpu), action->dev_id);
178 per_cpu(xen_ipi_irq, cpu) = irq;
179 break;
180 case IA64_CMC_VECTOR:
181 snprintf(per_cpu(xen_cmc_name, cpu),
182 sizeof(per_cpu(xen_cmc_name, cpu)),
183 "%s%d", action->name, cpu);
184 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
185 action->handler,
186 action->flags,
187 per_cpu(xen_cmc_name, cpu),
188 action->dev_id);
189 per_cpu(xen_cmc_irq, cpu) = irq;
190 break;
191 case IA64_CMCP_VECTOR:
192 snprintf(per_cpu(xen_cmcp_name, cpu),
193 sizeof(per_cpu(xen_cmcp_name, cpu)),
194 "%s%d", action->name, cpu);
195 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
196 action->handler,
197 action->flags,
198 per_cpu(xen_cmcp_name, cpu),
199 action->dev_id);
200 per_cpu(xen_cmcp_irq, cpu) = irq;
201 break;
202 case IA64_CPEP_VECTOR:
203 snprintf(per_cpu(xen_cpep_name, cpu),
204 sizeof(per_cpu(xen_cpep_name, cpu)),
205 "%s%d", action->name, cpu);
206 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
207 action->handler,
208 action->flags,
209 per_cpu(xen_cpep_name, cpu),
210 action->dev_id);
211 per_cpu(xen_cpep_irq, cpu) = irq;
212 break;
213 case IA64_CPE_VECTOR:
214 case IA64_MCA_RENDEZ_VECTOR:
215 case IA64_PERFMON_VECTOR:
216 case IA64_MCA_WAKEUP_VECTOR:
217 case IA64_SPURIOUS_INT_VECTOR:
218 /* No need to complain, these aren't supported. */
219 break;
220 default:
221 printk(KERN_WARNING "Percpu irq %d is unsupported "
222 "by xen!\n", vec);
223 break;
224 }
225 BUG_ON(irq < 0);
226
227 if (irq > 0) {
228 /*
229 * Mark percpu. Without this, migrate_irqs() will
230 * mark the interrupt for migrations and trigger it
231 * on cpu hotplug.
232 */
233 irq_set_status_flags(irq, IRQ_PER_CPU);
234 }
235 }
236
237 /* For BSP, we cache registered percpu irqs, and then re-walk
238 * them when initializing APs
239 */
240 if (!cpu && save) {
241 BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
242 saved_percpu_irqs[saved_irq_cnt].irq = vec;
243 saved_percpu_irqs[saved_irq_cnt].action = action;
244 saved_irq_cnt++;
245 if (!xen_slab_ready)
246 late_irq_cnt++;
247 }
248}
249
250static void
251xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
252{
253 __xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
254}
255
256static void
257xen_bind_early_percpu_irq(void)
258{
259 int i;
260
261 xen_slab_ready = 1;
262 /* There's no race when accessing this cached array, since only
263 * BSP will face with such step shortly
264 */
265 for (i = 0; i < late_irq_cnt; i++)
266 __xen_register_percpu_irq(smp_processor_id(),
267 saved_percpu_irqs[i].irq,
268 saved_percpu_irqs[i].action, 0);
269}
270
271/* FIXME: There's no obvious point to check whether slab is ready. So
272 * a hack is used here by utilizing a late time hook.
273 */
274
275#ifdef CONFIG_HOTPLUG_CPU
276static int unbind_evtchn_callback(struct notifier_block *nfb,
277 unsigned long action, void *hcpu)
278{
279 unsigned int cpu = (unsigned long)hcpu;
280
281 if (action == CPU_DEAD) {
282 /* Unregister evtchn. */
283 if (per_cpu(xen_cpep_irq, cpu) >= 0) {
284 unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
285 NULL);
286 per_cpu(xen_cpep_irq, cpu) = -1;
287 }
288 if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
289 unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
290 NULL);
291 per_cpu(xen_cmcp_irq, cpu) = -1;
292 }
293 if (per_cpu(xen_cmc_irq, cpu) >= 0) {
294 unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
295 per_cpu(xen_cmc_irq, cpu) = -1;
296 }
297 if (per_cpu(xen_ipi_irq, cpu) >= 0) {
298 unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
299 per_cpu(xen_ipi_irq, cpu) = -1;
300 }
301 if (per_cpu(xen_resched_irq, cpu) >= 0) {
302 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
303 NULL);
304 per_cpu(xen_resched_irq, cpu) = -1;
305 }
306 if (per_cpu(xen_timer_irq, cpu) >= 0) {
307 unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
308 NULL);
309 per_cpu(xen_timer_irq, cpu) = -1;
310 }
311 }
312 return NOTIFY_OK;
313}
314
315static struct notifier_block unbind_evtchn_notifier = {
316 .notifier_call = unbind_evtchn_callback,
317 .priority = 0
318};
319#endif
320
321void xen_smp_intr_init_early(unsigned int cpu)
322{
323#ifdef CONFIG_SMP
324 unsigned int i;
325
326 for (i = 0; i < saved_irq_cnt; i++)
327 __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
328 saved_percpu_irqs[i].action, 0);
329#endif
330}
331
332void xen_smp_intr_init(void)
333{
334#ifdef CONFIG_SMP
335 unsigned int cpu = smp_processor_id();
336 struct callback_register event = {
337 .type = CALLBACKTYPE_event,
338 .address = { .ip = (unsigned long)&xen_event_callback },
339 };
340
341 if (cpu == 0) {
342 /* Initialization was already done for boot cpu. */
343#ifdef CONFIG_HOTPLUG_CPU
344 /* Register the notifier only once. */
345 register_cpu_notifier(&unbind_evtchn_notifier);
346#endif
347 return;
348 }
349
350 /* This should be piggyback when setup vcpu guest context */
351 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
352#endif /* CONFIG_SMP */
353}
354
355void __init
356xen_irq_init(void)
357{
358 struct callback_register event = {
359 .type = CALLBACKTYPE_event,
360 .address = { .ip = (unsigned long)&xen_event_callback },
361 };
362
363 xen_init_IRQ();
364 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
365 late_time_init = xen_bind_early_percpu_irq;
366}
367
368void
369xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
370{
371#ifdef CONFIG_SMP
372 /* TODO: we need to call vcpu_up here */
373 if (unlikely(vector == ap_wakeup_vector)) {
374 /* XXX
375 * This should be in __cpu_up(cpu) in ia64 smpboot.c
376 * like x86. But don't want to modify it,
377 * keep it untouched.
378 */
379 xen_smp_intr_init_early(cpu);
380
381 xen_send_ipi(cpu, vector);
382 /* vcpu_prepare_and_up(cpu); */
383 return;
384 }
385#endif
386
387 switch (vector) {
388 case IA64_IPI_VECTOR:
389 xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
390 break;
391 case IA64_IPI_RESCHEDULE:
392 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
393 break;
394 case IA64_CMCP_VECTOR:
395 xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
396 break;
397 case IA64_CPEP_VECTOR:
398 xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
399 break;
400 case IA64_TIMER_VECTOR: {
401 /* this is used only once by check_sal_cache_flush()
402 at boot time */
403 static int used = 0;
404 if (!used) {
405 xen_send_ipi(cpu, IA64_TIMER_VECTOR);
406 used = 1;
407 break;
408 }
409 /* fallthrough */
410 }
411 default:
412 printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
413 vector);
414 notify_remote_via_irq(0); /* defaults to 0 irq */
415 break;
416 }
417}
418
419static void __init
420xen_register_ipi(void)
421{
422#ifdef CONFIG_SMP
423 register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
424 register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
425 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
426#endif
427}
428
429static void
430xen_resend_irq(unsigned int vector)
431{
432 (void)resend_irq_on_evtchn(vector);
433}
434
435const struct pv_irq_ops xen_irq_ops __initconst = {
436 .register_ipi = xen_register_ipi,
437
438 .assign_irq_vector = xen_assign_irq_vector,
439 .free_irq_vector = xen_free_irq_vector,
440 .register_percpu_irq = xen_register_percpu_irq,
441
442 .resend_irq = xen_resend_irq,
443};
diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h
deleted file mode 100644
index 1778517b90fe..000000000000
--- a/arch/ia64/xen/irq_xen.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/******************************************************************************
2 * arch/ia64/xen/irq_xen.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef IRQ_XEN_H
24#define IRQ_XEN_H
25
26extern void (*late_time_init)(void);
27extern char xen_event_callback;
28void __init xen_init_IRQ(void);
29
30extern const struct pv_irq_ops xen_irq_ops __initconst;
31extern void xen_smp_intr_init(void);
32extern void xen_send_ipi(int cpu, int vec);
33
34#endif /* IRQ_XEN_H */
diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c
deleted file mode 100644
index 4ad588a7c279..000000000000
--- a/arch/ia64/xen/machvec.c
+++ /dev/null
@@ -1,4 +0,0 @@
1#define MACHVEC_PLATFORM_NAME xen
2#define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h>
3#include <asm/machvec_init.h>
4
diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c
deleted file mode 100644
index 419c8620945a..000000000000
--- a/arch/ia64/xen/suspend.c
+++ /dev/null
@@ -1,59 +0,0 @@
1/******************************************************************************
2 * arch/ia64/xen/suspend.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * suspend/resume
22 */
23
24#include <xen/xen-ops.h>
25#include <asm/xen/hypervisor.h>
26#include "time.h"
27
28void
29xen_mm_pin_all(void)
30{
31 /* nothing */
32}
33
34void
35xen_mm_unpin_all(void)
36{
37 /* nothing */
38}
39
40void
41xen_arch_pre_suspend()
42{
43 /* nothing */
44}
45
46void
47xen_arch_post_suspend(int suspend_cancelled)
48{
49 if (suspend_cancelled)
50 return;
51
52 xen_ia64_enable_opt_feature();
53 /* add more if necessary */
54}
55
56void xen_arch_resume(void)
57{
58 xen_timer_resume_on_aps();
59}
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
deleted file mode 100644
index 1f8244a78bee..000000000000
--- a/arch/ia64/xen/time.c
+++ /dev/null
@@ -1,257 +0,0 @@
1/******************************************************************************
2 * arch/ia64/xen/time.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/delay.h>
24#include <linux/kernel_stat.h>
25#include <linux/posix-timers.h>
26#include <linux/irq.h>
27#include <linux/clocksource.h>
28
29#include <asm/timex.h>
30
31#include <asm/xen/hypervisor.h>
32
33#include <xen/interface/vcpu.h>
34
35#include "../kernel/fsyscall_gtod_data.h"
36
37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
38static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
39static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
40
41/* taken from i386/kernel/time-xen.c */
42static void xen_init_missing_ticks_accounting(int cpu)
43{
44 struct vcpu_register_runstate_memory_area area;
45 struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
46 int rc;
47
48 memset(runstate, 0, sizeof(*runstate));
49
50 area.addr.v = runstate;
51 rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu,
52 &area);
53 WARN_ON(rc && rc != -ENOSYS);
54
55 per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
56 per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
57 + runstate->time[RUNSTATE_offline];
58}
59
60/*
61 * Runstate accounting
62 */
63/* stolen from arch/x86/xen/time.c */
64static void get_runstate_snapshot(struct vcpu_runstate_info *res)
65{
66 u64 state_time;
67 struct vcpu_runstate_info *state;
68
69 BUG_ON(preemptible());
70
71 state = &__get_cpu_var(xen_runstate);
72
73 /*
74 * The runstate info is always updated by the hypervisor on
75 * the current CPU, so there's no need to use anything
76 * stronger than a compiler barrier when fetching it.
77 */
78 do {
79 state_time = state->state_entry_time;
80 rmb();
81 *res = *state;
82 rmb();
83 } while (state->state_entry_time != state_time);
84}
85
86#define NS_PER_TICK (1000000000LL/HZ)
87
88static unsigned long
89consider_steal_time(unsigned long new_itm)
90{
91 unsigned long stolen, blocked;
92 unsigned long delta_itm = 0, stolentick = 0;
93 int cpu = smp_processor_id();
94 struct vcpu_runstate_info runstate;
95 struct task_struct *p = current;
96
97 get_runstate_snapshot(&runstate);
98
99 /*
100 * Check for vcpu migration effect
101 * In this case, itc value is reversed.
102 * This causes huge stolen value.
103 * This function just checks and reject this effect.
104 */
105 if (!time_after_eq(runstate.time[RUNSTATE_blocked],
106 per_cpu(xen_blocked_time, cpu)))
107 blocked = 0;
108
109 if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
110 runstate.time[RUNSTATE_offline],
111 per_cpu(xen_stolen_time, cpu)))
112 stolen = 0;
113
114 if (!time_after(delta_itm + new_itm, ia64_get_itc()))
115 stolentick = ia64_get_itc() - new_itm;
116
117 do_div(stolentick, NS_PER_TICK);
118 stolentick++;
119
120 do_div(stolen, NS_PER_TICK);
121
122 if (stolen > stolentick)
123 stolen = stolentick;
124
125 stolentick -= stolen;
126 do_div(blocked, NS_PER_TICK);
127
128 if (blocked > stolentick)
129 blocked = stolentick;
130
131 if (stolen > 0 || blocked > 0) {
132 account_steal_ticks(stolen);
133 account_idle_ticks(blocked);
134 run_local_timers();
135
136 rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
137
138 scheduler_tick();
139 run_posix_cpu_timers(p);
140 delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
141
142 if (cpu == time_keeper_id)
143 xtime_update(stolen + blocked);
144
145 local_cpu_data->itm_next = delta_itm + new_itm;
146
147 per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
148 per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
149 }
150 return delta_itm;
151}
152
153static int xen_do_steal_accounting(unsigned long *new_itm)
154{
155 unsigned long delta_itm;
156 delta_itm = consider_steal_time(*new_itm);
157 *new_itm += delta_itm;
158 if (time_after(*new_itm, ia64_get_itc()) && delta_itm)
159 return 1;
160
161 return 0;
162}
163
164static void xen_itc_jitter_data_reset(void)
165{
166 u64 lcycle, ret;
167
168 do {
169 lcycle = itc_jitter_data.itc_lastcycle;
170 ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0);
171 } while (unlikely(ret != lcycle));
172}
173
174/* based on xen_sched_clock() in arch/x86/xen/time.c. */
175/*
176 * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined,
177 * something similar logic should be implemented here.
178 */
179/*
180 * Xen sched_clock implementation. Returns the number of unstolen
181 * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
182 * states.
183 */
184static unsigned long long xen_sched_clock(void)
185{
186 struct vcpu_runstate_info runstate;
187
188 unsigned long long now;
189 unsigned long long offset;
190 unsigned long long ret;
191
192 /*
193 * Ideally sched_clock should be called on a per-cpu basis
194 * anyway, so preempt should already be disabled, but that's
195 * not current practice at the moment.
196 */
197 preempt_disable();
198
199 /*
200 * both ia64_native_sched_clock() and xen's runstate are
201 * based on mAR.ITC. So difference of them makes sense.
202 */
203 now = ia64_native_sched_clock();
204
205 get_runstate_snapshot(&runstate);
206
207 WARN_ON(runstate.state != RUNSTATE_running);
208
209 offset = 0;
210 if (now > runstate.state_entry_time)
211 offset = now - runstate.state_entry_time;
212 ret = runstate.time[RUNSTATE_blocked] +
213 runstate.time[RUNSTATE_running] +
214 offset;
215
216 preempt_enable();
217
218 return ret;
219}
220
221struct pv_time_ops xen_time_ops __initdata = {
222 .init_missing_ticks_accounting = xen_init_missing_ticks_accounting,
223 .do_steal_accounting = xen_do_steal_accounting,
224 .clocksource_resume = xen_itc_jitter_data_reset,
225 .sched_clock = xen_sched_clock,
226};
227
228/* Called after suspend, to resume time. */
229static void xen_local_tick_resume(void)
230{
231 /* Just trigger a tick. */
232 ia64_cpu_local_tick();
233 touch_softlockup_watchdog();
234}
235
236void
237xen_timer_resume(void)
238{
239 unsigned int cpu;
240
241 xen_local_tick_resume();
242
243 for_each_online_cpu(cpu)
244 xen_init_missing_ticks_accounting(cpu);
245}
246
247static void ia64_cpu_local_tick_fn(void *unused)
248{
249 xen_local_tick_resume();
250 xen_init_missing_ticks_accounting(smp_processor_id());
251}
252
253void
254xen_timer_resume_on_aps(void)
255{
256 smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1);
257}
diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h
deleted file mode 100644
index f98d7e1a42f0..000000000000
--- a/arch/ia64/xen/time.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/******************************************************************************
2 * arch/ia64/xen/time.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23extern struct pv_time_ops xen_time_ops __initdata;
24void xen_timer_resume_on_aps(void);
diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c
deleted file mode 100644
index ccaf7431f7c8..000000000000
--- a/arch/ia64/xen/xcom_hcall.c
+++ /dev/null
@@ -1,441 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Tristan Gingold <tristan.gingold@bull.net>
17 *
18 * Copyright (c) 2007
19 * Isaku Yamahata <yamahata at valinux co jp>
20 * VA Linux Systems Japan K.K.
21 * consolidate mini and inline version.
22 */
23
24#include <linux/module.h>
25#include <xen/interface/xen.h>
26#include <xen/interface/memory.h>
27#include <xen/interface/grant_table.h>
28#include <xen/interface/callback.h>
29#include <xen/interface/vcpu.h>
30#include <asm/xen/hypervisor.h>
31#include <asm/xen/xencomm.h>
32
33/* Xencomm notes:
34 * This file defines hypercalls to be used by xencomm. The hypercalls simply
35 * create inlines or mini descriptors for pointers and then call the raw arch
36 * hypercall xencomm_arch_hypercall_XXX
37 *
38 * If the arch wants to directly use these hypercalls, simply define macros
39 * in asm/xen/hypercall.h, eg:
40 * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op
41 *
42 * The arch may also define HYPERVISOR_xxx as a function and do more operations
43 * before/after doing the hypercall.
44 *
45 * Note: because only inline or mini descriptors are created these functions
46 * must only be called with in kernel memory parameters.
47 */
48
49int
50xencomm_hypercall_console_io(int cmd, int count, char *str)
51{
52 /* xen early printk uses console io hypercall before
53 * xencomm initialization. In that case, we just ignore it.
54 */
55 if (!xencomm_is_initialized())
56 return 0;
57
58 return xencomm_arch_hypercall_console_io
59 (cmd, count, xencomm_map_no_alloc(str, count));
60}
61EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io);
62
63int
64xencomm_hypercall_event_channel_op(int cmd, void *op)
65{
66 struct xencomm_handle *desc;
67 desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op));
68 if (desc == NULL)
69 return -EINVAL;
70
71 return xencomm_arch_hypercall_event_channel_op(cmd, desc);
72}
73EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op);
74
75int
76xencomm_hypercall_xen_version(int cmd, void *arg)
77{
78 struct xencomm_handle *desc;
79 unsigned int argsize;
80
81 switch (cmd) {
82 case XENVER_version:
83 /* do not actually pass an argument */
84 return xencomm_arch_hypercall_xen_version(cmd, 0);
85 case XENVER_extraversion:
86 argsize = sizeof(struct xen_extraversion);
87 break;
88 case XENVER_compile_info:
89 argsize = sizeof(struct xen_compile_info);
90 break;
91 case XENVER_capabilities:
92 argsize = sizeof(struct xen_capabilities_info);
93 break;
94 case XENVER_changeset:
95 argsize = sizeof(struct xen_changeset_info);
96 break;
97 case XENVER_platform_parameters:
98 argsize = sizeof(struct xen_platform_parameters);
99 break;
100 case XENVER_get_features:
101 argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info);
102 break;
103
104 default:
105 printk(KERN_DEBUG
106 "%s: unknown version op %d\n", __func__, cmd);
107 return -ENOSYS;
108 }
109
110 desc = xencomm_map_no_alloc(arg, argsize);
111 if (desc == NULL)
112 return -EINVAL;
113
114 return xencomm_arch_hypercall_xen_version(cmd, desc);
115}
116EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version);
117
118int
119xencomm_hypercall_physdev_op(int cmd, void *op)
120{
121 unsigned int argsize;
122
123 switch (cmd) {
124 case PHYSDEVOP_apic_read:
125 case PHYSDEVOP_apic_write:
126 argsize = sizeof(struct physdev_apic);
127 break;
128 case PHYSDEVOP_alloc_irq_vector:
129 case PHYSDEVOP_free_irq_vector:
130 argsize = sizeof(struct physdev_irq);
131 break;
132 case PHYSDEVOP_irq_status_query:
133 argsize = sizeof(struct physdev_irq_status_query);
134 break;
135
136 default:
137 printk(KERN_DEBUG
138 "%s: unknown physdev op %d\n", __func__, cmd);
139 return -ENOSYS;
140 }
141
142 return xencomm_arch_hypercall_physdev_op
143 (cmd, xencomm_map_no_alloc(op, argsize));
144}
145
146static int
147xencommize_grant_table_op(struct xencomm_mini **xc_area,
148 unsigned int cmd, void *op, unsigned int count,
149 struct xencomm_handle **desc)
150{
151 struct xencomm_handle *desc1;
152 unsigned int argsize;
153
154 switch (cmd) {
155 case GNTTABOP_map_grant_ref:
156 argsize = sizeof(struct gnttab_map_grant_ref);
157 break;
158 case GNTTABOP_unmap_grant_ref:
159 argsize = sizeof(struct gnttab_unmap_grant_ref);
160 break;
161 case GNTTABOP_setup_table:
162 {
163 struct gnttab_setup_table *setup = op;
164
165 argsize = sizeof(*setup);
166
167 if (count != 1)
168 return -EINVAL;
169 desc1 = __xencomm_map_no_alloc
170 (xen_guest_handle(setup->frame_list),
171 setup->nr_frames *
172 sizeof(*xen_guest_handle(setup->frame_list)),
173 *xc_area);
174 if (desc1 == NULL)
175 return -EINVAL;
176 (*xc_area)++;
177 set_xen_guest_handle(setup->frame_list, (void *)desc1);
178 break;
179 }
180 case GNTTABOP_dump_table:
181 argsize = sizeof(struct gnttab_dump_table);
182 break;
183 case GNTTABOP_transfer:
184 argsize = sizeof(struct gnttab_transfer);
185 break;
186 case GNTTABOP_copy:
187 argsize = sizeof(struct gnttab_copy);
188 break;
189 case GNTTABOP_query_size:
190 argsize = sizeof(struct gnttab_query_size);
191 break;
192 default:
193 printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n",
194 __func__, cmd);
195 BUG();
196 }
197
198 *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area);
199 if (*desc == NULL)
200 return -EINVAL;
201 (*xc_area)++;
202
203 return 0;
204}
205
206int
207xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
208 unsigned int count)
209{
210 int rc;
211 struct xencomm_handle *desc;
212 XENCOMM_MINI_ALIGNED(xc_area, 2);
213
214 rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc);
215 if (rc)
216 return rc;
217
218 return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
219}
220EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op);
221
222int
223xencomm_hypercall_sched_op(int cmd, void *arg)
224{
225 struct xencomm_handle *desc;
226 unsigned int argsize;
227
228 switch (cmd) {
229 case SCHEDOP_yield:
230 case SCHEDOP_block:
231 argsize = 0;
232 break;
233 case SCHEDOP_shutdown:
234 argsize = sizeof(struct sched_shutdown);
235 break;
236 case SCHEDOP_poll:
237 {
238 struct sched_poll *poll = arg;
239 struct xencomm_handle *ports;
240
241 argsize = sizeof(struct sched_poll);
242 ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports),
243 sizeof(*xen_guest_handle(poll->ports)));
244
245 set_xen_guest_handle(poll->ports, (void *)ports);
246 break;
247 }
248 default:
249 printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd);
250 return -ENOSYS;
251 }
252
253 desc = xencomm_map_no_alloc(arg, argsize);
254 if (desc == NULL)
255 return -EINVAL;
256
257 return xencomm_arch_hypercall_sched_op(cmd, desc);
258}
259EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op);
260
261int
262xencomm_hypercall_multicall(void *call_list, int nr_calls)
263{
264 int rc;
265 int i;
266 struct multicall_entry *mce;
267 struct xencomm_handle *desc;
268 XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2);
269
270 for (i = 0; i < nr_calls; i++) {
271 mce = (struct multicall_entry *)call_list + i;
272
273 switch (mce->op) {
274 case __HYPERVISOR_update_va_mapping:
275 case __HYPERVISOR_mmu_update:
276 /* No-op on ia64. */
277 break;
278 case __HYPERVISOR_grant_table_op:
279 rc = xencommize_grant_table_op
280 (&xc_area,
281 mce->args[0], (void *)mce->args[1],
282 mce->args[2], &desc);
283 if (rc)
284 return rc;
285 mce->args[1] = (unsigned long)desc;
286 break;
287 case __HYPERVISOR_memory_op:
288 default:
289 printk(KERN_DEBUG
290 "%s: unhandled multicall op entry op %lu\n",
291 __func__, mce->op);
292 return -ENOSYS;
293 }
294 }
295
296 desc = xencomm_map_no_alloc(call_list,
297 nr_calls * sizeof(struct multicall_entry));
298 if (desc == NULL)
299 return -EINVAL;
300
301 return xencomm_arch_hypercall_multicall(desc, nr_calls);
302}
303EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall);
304
305int
306xencomm_hypercall_callback_op(int cmd, void *arg)
307{
308 unsigned int argsize;
309 switch (cmd) {
310 case CALLBACKOP_register:
311 argsize = sizeof(struct callback_register);
312 break;
313 case CALLBACKOP_unregister:
314 argsize = sizeof(struct callback_unregister);
315 break;
316 default:
317 printk(KERN_DEBUG
318 "%s: unknown callback op %d\n", __func__, cmd);
319 return -ENOSYS;
320 }
321
322 return xencomm_arch_hypercall_callback_op
323 (cmd, xencomm_map_no_alloc(arg, argsize));
324}
325
326static int
327xencommize_memory_reservation(struct xencomm_mini *xc_area,
328 struct xen_memory_reservation *mop)
329{
330 struct xencomm_handle *desc;
331
332 desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start),
333 mop->nr_extents *
334 sizeof(*xen_guest_handle(mop->extent_start)),
335 xc_area);
336 if (desc == NULL)
337 return -EINVAL;
338
339 set_xen_guest_handle(mop->extent_start, (void *)desc);
340 return 0;
341}
342
343int
344xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
345{
346 GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} };
347 struct xen_memory_reservation *xmr = NULL;
348 int rc;
349 struct xencomm_handle *desc;
350 unsigned int argsize;
351 XENCOMM_MINI_ALIGNED(xc_area, 2);
352
353 switch (cmd) {
354 case XENMEM_increase_reservation:
355 case XENMEM_decrease_reservation:
356 case XENMEM_populate_physmap:
357 xmr = (struct xen_memory_reservation *)arg;
358 set_xen_guest_handle(extent_start_va[0],
359 xen_guest_handle(xmr->extent_start));
360
361 argsize = sizeof(*xmr);
362 rc = xencommize_memory_reservation(xc_area, xmr);
363 if (rc)
364 return rc;
365 xc_area++;
366 break;
367
368 case XENMEM_maximum_ram_page:
369 argsize = 0;
370 break;
371
372 case XENMEM_add_to_physmap:
373 argsize = sizeof(struct xen_add_to_physmap);
374 break;
375
376 default:
377 printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd);
378 return -ENOSYS;
379 }
380
381 desc = xencomm_map_no_alloc(arg, argsize);
382 if (desc == NULL)
383 return -EINVAL;
384
385 rc = xencomm_arch_hypercall_memory_op(cmd, desc);
386
387 switch (cmd) {
388 case XENMEM_increase_reservation:
389 case XENMEM_decrease_reservation:
390 case XENMEM_populate_physmap:
391 set_xen_guest_handle(xmr->extent_start,
392 xen_guest_handle(extent_start_va[0]));
393 break;
394 }
395
396 return rc;
397}
398EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op);
399
400int
401xencomm_hypercall_suspend(unsigned long srec)
402{
403 struct sched_shutdown arg;
404
405 arg.reason = SHUTDOWN_suspend;
406
407 return xencomm_arch_hypercall_sched_op(
408 SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg)));
409}
410
411long
412xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
413{
414 unsigned int argsize;
415 switch (cmd) {
416 case VCPUOP_register_runstate_memory_area: {
417 struct vcpu_register_runstate_memory_area *area =
418 (struct vcpu_register_runstate_memory_area *)arg;
419 argsize = sizeof(*arg);
420 set_xen_guest_handle(area->addr.h,
421 (void *)xencomm_map_no_alloc(area->addr.v,
422 sizeof(area->addr.v)));
423 break;
424 }
425
426 default:
427 printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd);
428 return -ENOSYS;
429 }
430
431 return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
432 xencomm_map_no_alloc(arg, argsize));
433}
434
435long
436xencomm_hypercall_opt_feature(void *arg)
437{
438 return xencomm_arch_hypercall_opt_feature(
439 xencomm_map_no_alloc(arg,
440 sizeof(struct xen_ia64_opt_feature)));
441}
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
deleted file mode 100644
index 3e8d350fdf39..000000000000
--- a/arch/ia64/xen/xen_pv_ops.c
+++ /dev/null
@@ -1,1141 +0,0 @@
1/******************************************************************************
2 * arch/ia64/xen/xen_pv_ops.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/console.h>
24#include <linux/irq.h>
25#include <linux/kernel.h>
26#include <linux/pm.h>
27#include <linux/unistd.h>
28
29#include <asm/xen/hypervisor.h>
30#include <asm/xen/xencomm.h>
31#include <asm/xen/privop.h>
32
33#include "irq_xen.h"
34#include "time.h"
35
36/***************************************************************************
37 * general info
38 */
39static struct pv_info xen_info __initdata = {
40 .kernel_rpl = 2, /* or 1: determin at runtime */
41 .paravirt_enabled = 1,
42 .name = "Xen/ia64",
43};
44
45#define IA64_RSC_PL_SHIFT 2
46#define IA64_RSC_PL_BIT_SIZE 2
47#define IA64_RSC_PL_MASK \
48 (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
49
50static void __init
51xen_info_init(void)
52{
53 /* Xenified Linux/ia64 may run on pl = 1 or 2.
54 * determin at run time. */
55 unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
56 unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
57 xen_info.kernel_rpl = rpl;
58}
59
60/***************************************************************************
61 * pv_init_ops
62 * initialization hooks.
63 */
64
65static void
66xen_panic_hypercall(struct unw_frame_info *info, void *arg)
67{
68 current->thread.ksp = (__u64)info->sw - 16;
69 HYPERVISOR_shutdown(SHUTDOWN_crash);
70 /* we're never actually going to get here... */
71}
72
73static int
74xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
75{
76 unw_init_running(xen_panic_hypercall, NULL);
77 /* we're never actually going to get here... */
78 return NOTIFY_DONE;
79}
80
81static struct notifier_block xen_panic_block = {
82 xen_panic_event, NULL, 0 /* try to go last */
83};
84
85static void xen_pm_power_off(void)
86{
87 local_irq_disable();
88 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
89}
90
91static void __init
92xen_banner(void)
93{
94 printk(KERN_INFO
95 "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
96 "flags=0x%x\n",
97 xen_info.kernel_rpl,
98 HYPERVISOR_shared_info->arch.start_info_pfn,
99 xen_start_info->nr_pages, xen_start_info->flags);
100}
101
102static int __init
103xen_reserve_memory(struct rsvd_region *region)
104{
105 region->start = (unsigned long)__va(
106 (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
107 region->end = region->start + PAGE_SIZE;
108 return 1;
109}
110
111static void __init
112xen_arch_setup_early(void)
113{
114 struct shared_info *s;
115 BUG_ON(!xen_pv_domain());
116
117 s = HYPERVISOR_shared_info;
118 xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
119
120 /* Must be done before any hypercall. */
121 xencomm_initialize();
122
123 xen_setup_features();
124 /* Register a call for panic conditions. */
125 atomic_notifier_chain_register(&panic_notifier_list,
126 &xen_panic_block);
127 pm_power_off = xen_pm_power_off;
128
129 xen_ia64_enable_opt_feature();
130}
131
132static void __init
133xen_arch_setup_console(char **cmdline_p)
134{
135 add_preferred_console("xenboot", 0, NULL);
136 add_preferred_console("tty", 0, NULL);
137 /* use hvc_xen */
138 add_preferred_console("hvc", 0, NULL);
139
140#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
141 conswitchp = NULL;
142#endif
143}
144
145static int __init
146xen_arch_setup_nomca(void)
147{
148 return 1;
149}
150
151static void __init
152xen_post_smp_prepare_boot_cpu(void)
153{
154 xen_setup_vcpu_info_placement();
155}
156
157#ifdef ASM_SUPPORTED
158static unsigned long __init_or_module
159xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
160#endif
161static void __init
162xen_patch_branch(unsigned long tag, unsigned long type);
163
164static const struct pv_init_ops xen_init_ops __initconst = {
165 .banner = xen_banner,
166
167 .reserve_memory = xen_reserve_memory,
168
169 .arch_setup_early = xen_arch_setup_early,
170 .arch_setup_console = xen_arch_setup_console,
171 .arch_setup_nomca = xen_arch_setup_nomca,
172
173 .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
174#ifdef ASM_SUPPORTED
175 .patch_bundle = xen_patch_bundle,
176#endif
177 .patch_branch = xen_patch_branch,
178};
179
180/***************************************************************************
181 * pv_fsys_data
182 * addresses for fsys
183 */
184
185extern unsigned long xen_fsyscall_table[NR_syscalls];
186extern char xen_fsys_bubble_down[];
187struct pv_fsys_data xen_fsys_data __initdata = {
188 .fsyscall_table = (unsigned long *)xen_fsyscall_table,
189 .fsys_bubble_down = (void *)xen_fsys_bubble_down,
190};
191
192/***************************************************************************
193 * pv_patchdata
194 * patchdata addresses
195 */
196
197#define DECLARE(name) \
198 extern unsigned long __xen_start_gate_##name##_patchlist[]; \
199 extern unsigned long __xen_end_gate_##name##_patchlist[]
200
201DECLARE(fsyscall);
202DECLARE(brl_fsys_bubble_down);
203DECLARE(vtop);
204DECLARE(mckinley_e9);
205
206extern unsigned long __xen_start_gate_section[];
207
208#define ASSIGN(name) \
209 .start_##name##_patchlist = \
210 (unsigned long)__xen_start_gate_##name##_patchlist, \
211 .end_##name##_patchlist = \
212 (unsigned long)__xen_end_gate_##name##_patchlist
213
214static struct pv_patchdata xen_patchdata __initdata = {
215 ASSIGN(fsyscall),
216 ASSIGN(brl_fsys_bubble_down),
217 ASSIGN(vtop),
218 ASSIGN(mckinley_e9),
219
220 .gate_section = (void*)__xen_start_gate_section,
221};
222
223/***************************************************************************
224 * pv_cpu_ops
225 * intrinsics hooks.
226 */
227
228#ifndef ASM_SUPPORTED
229static void
230xen_set_itm_with_offset(unsigned long val)
231{
232 /* ia64_cpu_local_tick() calls this with interrupt enabled. */
233 /* WARN_ON(!irqs_disabled()); */
234 xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
235}
236
237static unsigned long
238xen_get_itm_with_offset(void)
239{
240 /* unused at this moment */
241 printk(KERN_DEBUG "%s is called.\n", __func__);
242
243 WARN_ON(!irqs_disabled());
244 return ia64_native_getreg(_IA64_REG_CR_ITM) +
245 XEN_MAPPEDREGS->itc_offset;
246}
247
248/* ia64_set_itc() is only called by
249 * cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
250 * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
251 */
252static void
253xen_set_itc(unsigned long val)
254{
255 unsigned long mitc;
256
257 WARN_ON(!irqs_disabled());
258 mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
259 XEN_MAPPEDREGS->itc_offset = val - mitc;
260 XEN_MAPPEDREGS->itc_last = val;
261}
262
263static unsigned long
264xen_get_itc(void)
265{
266 unsigned long res;
267 unsigned long itc_offset;
268 unsigned long itc_last;
269 unsigned long ret_itc_last;
270
271 itc_offset = XEN_MAPPEDREGS->itc_offset;
272 do {
273 itc_last = XEN_MAPPEDREGS->itc_last;
274 res = ia64_native_getreg(_IA64_REG_AR_ITC);
275 res += itc_offset;
276 if (itc_last >= res)
277 res = itc_last + 1;
278 ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
279 itc_last, res);
280 } while (unlikely(ret_itc_last != itc_last));
281 return res;
282
283#if 0
284 /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled.
285 Should it be paravirtualized instead? */
286 WARN_ON(!irqs_disabled());
287 itc_offset = XEN_MAPPEDREGS->itc_offset;
288 itc_last = XEN_MAPPEDREGS->itc_last;
289 res = ia64_native_getreg(_IA64_REG_AR_ITC);
290 res += itc_offset;
291 if (itc_last >= res)
292 res = itc_last + 1;
293 XEN_MAPPEDREGS->itc_last = res;
294 return res;
295#endif
296}
297
298static void xen_setreg(int regnum, unsigned long val)
299{
300 switch (regnum) {
301 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
302 xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
303 break;
304 case _IA64_REG_AR_ITC:
305 xen_set_itc(val);
306 break;
307 case _IA64_REG_CR_TPR:
308 xen_set_tpr(val);
309 break;
310 case _IA64_REG_CR_ITM:
311 xen_set_itm_with_offset(val);
312 break;
313 case _IA64_REG_CR_EOI:
314 xen_eoi(val);
315 break;
316 default:
317 ia64_native_setreg_func(regnum, val);
318 break;
319 }
320}
321
322static unsigned long xen_getreg(int regnum)
323{
324 unsigned long res;
325
326 switch (regnum) {
327 case _IA64_REG_PSR:
328 res = xen_get_psr();
329 break;
330 case _IA64_REG_AR_ITC:
331 res = xen_get_itc();
332 break;
333 case _IA64_REG_CR_ITM:
334 res = xen_get_itm_with_offset();
335 break;
336 case _IA64_REG_CR_IVR:
337 res = xen_get_ivr();
338 break;
339 case _IA64_REG_CR_TPR:
340 res = xen_get_tpr();
341 break;
342 default:
343 res = ia64_native_getreg_func(regnum);
344 break;
345 }
346 return res;
347}
348
349/* turning on interrupts is a bit more complicated.. write to the
350 * memory-mapped virtual psr.i bit first (to avoid race condition),
351 * then if any interrupts were pending, we have to execute a hyperprivop
352 * to ensure the pending interrupt gets delivered; else we're done! */
353static void
354xen_ssm_i(void)
355{
356 int old = xen_get_virtual_psr_i();
357 xen_set_virtual_psr_i(1);
358 barrier();
359 if (!old && xen_get_virtual_pend())
360 xen_hyper_ssm_i();
361}
362
363/* turning off interrupts can be paravirtualized simply by writing
364 * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
365static void
366xen_rsm_i(void)
367{
368 xen_set_virtual_psr_i(0);
369 barrier();
370}
371
372static unsigned long
373xen_get_psr_i(void)
374{
375 return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
376}
377
378static void
379xen_intrin_local_irq_restore(unsigned long mask)
380{
381 if (mask & IA64_PSR_I)
382 xen_ssm_i();
383 else
384 xen_rsm_i();
385}
386#else
387#define __DEFINE_FUNC(name, code) \
388 extern const char xen_ ## name ## _direct_start[]; \
389 extern const char xen_ ## name ## _direct_end[]; \
390 asm (".align 32\n" \
391 ".proc xen_" #name "\n" \
392 "xen_" #name ":\n" \
393 "xen_" #name "_direct_start:\n" \
394 code \
395 "xen_" #name "_direct_end:\n" \
396 "br.cond.sptk.many b6\n" \
397 ".endp xen_" #name "\n")
398
399#define DEFINE_VOID_FUNC0(name, code) \
400 extern void \
401 xen_ ## name (void); \
402 __DEFINE_FUNC(name, code)
403
404#define DEFINE_VOID_FUNC1(name, code) \
405 extern void \
406 xen_ ## name (unsigned long arg); \
407 __DEFINE_FUNC(name, code)
408
409#define DEFINE_VOID_FUNC1_VOID(name, code) \
410 extern void \
411 xen_ ## name (void *arg); \
412 __DEFINE_FUNC(name, code)
413
414#define DEFINE_VOID_FUNC2(name, code) \
415 extern void \
416 xen_ ## name (unsigned long arg0, \
417 unsigned long arg1); \
418 __DEFINE_FUNC(name, code)
419
420#define DEFINE_FUNC0(name, code) \
421 extern unsigned long \
422 xen_ ## name (void); \
423 __DEFINE_FUNC(name, code)
424
425#define DEFINE_FUNC1(name, type, code) \
426 extern unsigned long \
427 xen_ ## name (type arg); \
428 __DEFINE_FUNC(name, code)
429
430#define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
431
432/*
433 * static void xen_set_itm_with_offset(unsigned long val)
434 * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
435 */
436/* 2 bundles */
437DEFINE_VOID_FUNC1(set_itm_with_offset,
438 "mov r2 = " __stringify(XSI_BASE) " + "
439 __stringify(XSI_ITC_OFFSET_OFS) "\n"
440 ";;\n"
441 "ld8 r3 = [r2]\n"
442 ";;\n"
443 "sub r8 = r8, r3\n"
444 "break " __stringify(HYPERPRIVOP_SET_ITM) "\n");
445
446/*
447 * static unsigned long xen_get_itm_with_offset(void)
448 * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset;
449 */
450/* 2 bundles */
451DEFINE_FUNC0(get_itm_with_offset,
452 "mov r2 = " __stringify(XSI_BASE) " + "
453 __stringify(XSI_ITC_OFFSET_OFS) "\n"
454 ";;\n"
455 "ld8 r3 = [r2]\n"
456 "mov r8 = cr.itm\n"
457 ";;\n"
458 "add r8 = r8, r2\n");
459
460/*
461 * static void xen_set_itc(unsigned long val)
462 * unsigned long mitc;
463 *
464 * WARN_ON(!irqs_disabled());
465 * mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
466 * XEN_MAPPEDREGS->itc_offset = val - mitc;
467 * XEN_MAPPEDREGS->itc_last = val;
468 */
469/* 2 bundles */
470DEFINE_VOID_FUNC1(set_itc,
471 "mov r2 = " __stringify(XSI_BASE) " + "
472 __stringify(XSI_ITC_LAST_OFS) "\n"
473 "mov r3 = ar.itc\n"
474 ";;\n"
475 "sub r3 = r8, r3\n"
476 "st8 [r2] = r8, "
477 __stringify(XSI_ITC_LAST_OFS) " - "
478 __stringify(XSI_ITC_OFFSET_OFS) "\n"
479 ";;\n"
480 "st8 [r2] = r3\n");
481
482/*
483 * static unsigned long xen_get_itc(void)
484 * unsigned long res;
485 * unsigned long itc_offset;
486 * unsigned long itc_last;
487 * unsigned long ret_itc_last;
488 *
489 * itc_offset = XEN_MAPPEDREGS->itc_offset;
490 * do {
491 * itc_last = XEN_MAPPEDREGS->itc_last;
492 * res = ia64_native_getreg(_IA64_REG_AR_ITC);
493 * res += itc_offset;
494 * if (itc_last >= res)
495 * res = itc_last + 1;
496 * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
497 * itc_last, res);
498 * } while (unlikely(ret_itc_last != itc_last));
499 * return res;
500 */
501/* 5 bundles */
502DEFINE_FUNC0(get_itc,
503 "mov r2 = " __stringify(XSI_BASE) " + "
504 __stringify(XSI_ITC_OFFSET_OFS) "\n"
505 ";;\n"
506 "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - "
507 __stringify(XSI_ITC_OFFSET_OFS) "\n"
508 /* r9 = itc_offset */
509 /* r2 = XSI_ITC_OFFSET */
510 "888:\n"
511 "mov r8 = ar.itc\n" /* res = ar.itc */
512 ";;\n"
513 "ld8 r3 = [r2]\n" /* r3 = itc_last */
514 "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */
515 ";;\n"
516 "cmp.gtu p6, p0 = r3, r8\n"
517 ";;\n"
518 "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */
519 ";;\n"
520 "mov ar.ccv = r8\n"
521 ";;\n"
522 "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n"
523 ";;\n"
524 "cmp.ne p6, p0 = r10, r3\n"
525 "(p6) hint @pause\n"
526 "(p6) br.cond.spnt 888b\n");
527
528DEFINE_VOID_FUNC1_VOID(fc,
529 "break " __stringify(HYPERPRIVOP_FC) "\n");
530
531/*
532 * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR
533 * masked_addr = *psr_i_addr_addr
534 * pending_intr_addr = masked_addr - 1
535 * if (val & IA64_PSR_I) {
536 * masked = *masked_addr
537 * *masked_addr = 0:xen_set_virtual_psr_i(1)
538 * compiler barrier
539 * if (masked) {
540 * uint8_t pending = *pending_intr_addr;
541 * if (pending)
542 * XEN_HYPER_SSM_I
543 * }
544 * } else {
545 * *masked_addr = 1:xen_set_virtual_psr_i(0)
546 * }
547 */
548/* 6 bundles */
549DEFINE_VOID_FUNC1(intrin_local_irq_restore,
550 /* r8 = input value: 0 or IA64_PSR_I
551 * p6 = (flags & IA64_PSR_I)
552 * = if clause
553 * p7 = !(flags & IA64_PSR_I)
554 * = else clause
555 */
556 "cmp.ne p6, p7 = r8, r0\n"
557 "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
558 ";;\n"
559 /* r9 = XEN_PSR_I_ADDR */
560 "ld8 r9 = [r9]\n"
561 ";;\n"
562
563 /* r10 = masked previous value */
564 "(p6) ld1.acq r10 = [r9]\n"
565 ";;\n"
566
567 /* p8 = !masked interrupt masked previously? */
568 "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
569
570 /* p7 = else clause */
571 "(p7) mov r11 = 1\n"
572 ";;\n"
573 /* masked = 1 */
574 "(p7) st1.rel [r9] = r11\n"
575
576 /* p6 = if clause */
577 /* masked = 0
578 * r9 = masked_addr - 1
579 * = pending_intr_addr
580 */
581 "(p8) st1.rel [r9] = r0, -1\n"
582 ";;\n"
583 /* r8 = pending_intr */
584 "(p8) ld1.acq r11 = [r9]\n"
585 ";;\n"
586 /* p9 = interrupt pending? */
587 "(p8) cmp.ne.unc p9, p10 = r11, r0\n"
588 ";;\n"
589 "(p10) mf\n"
590 /* issue hypercall to trigger interrupt */
591 "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n");
592
593DEFINE_VOID_FUNC2(ptcga,
594 "break " __stringify(HYPERPRIVOP_PTC_GA) "\n");
595DEFINE_VOID_FUNC2(set_rr,
596 "break " __stringify(HYPERPRIVOP_SET_RR) "\n");
597
598/*
599 * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR;
600 * tmp = *tmp
601 * tmp = *tmp;
602 * psr_i = tmp? 0: IA64_PSR_I;
603 */
604/* 4 bundles */
605DEFINE_FUNC0(get_psr_i,
606 "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
607 ";;\n"
608 "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */
609 "mov r8 = 0\n" /* psr_i = 0 */
610 ";;\n"
611 "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */
612 ";;\n"
613 "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */
614 ";;\n"
615 "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n");
616
617DEFINE_FUNC1(thash, unsigned long,
618 "break " __stringify(HYPERPRIVOP_THASH) "\n");
619DEFINE_FUNC1(get_cpuid, int,
620 "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n");
621DEFINE_FUNC1(get_pmd, int,
622 "break " __stringify(HYPERPRIVOP_GET_PMD) "\n");
623DEFINE_FUNC1(get_rr, unsigned long,
624 "break " __stringify(HYPERPRIVOP_GET_RR) "\n");
625
626/*
627 * void xen_privop_ssm_i(void)
628 *
629 * int masked = !xen_get_virtual_psr_i();
630 * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr)
631 * xen_set_virtual_psr_i(1)
632 * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0
633 * // compiler barrier
634 * if (masked) {
635 * uint8_t* pend_int_addr =
636 * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1;
637 * uint8_t pending = *pend_int_addr;
638 * if (pending)
639 * XEN_HYPER_SSM_I
640 * }
641 */
642/* 4 bundles */
643DEFINE_VOID_FUNC0(ssm_i,
644 "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
645 ";;\n"
646 "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */
647 ";;\n"
648 "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */
649 ";;\n"
650 "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt
651 * r8 = XEN_PSR_I_ADDR - 1
652 * = pend_int_addr
653 */
654 "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I
655 * previously interrupt
656 * masked?
657 */
658 ";;\n"
659 "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */
660 ";;\n"
661 "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/
662 ";;\n"
663 /* issue hypercall to get interrupt */
664 "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
665 ";;\n");
666
667/*
668 * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr
669 * = XEN_PSR_I_ADDR_ADDR;
670 * psr_i_addr = *psr_i_addr_addr;
671 * *psr_i_addr = 1;
672 */
673/* 2 bundles */
674DEFINE_VOID_FUNC0(rsm_i,
675 "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
676 /* r8 = XEN_PSR_I_ADDR */
677 "mov r9 = 1\n"
678 ";;\n"
679 "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */
680 ";;\n"
681 "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */
682
683extern void
684xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
685 unsigned long val2, unsigned long val3,
686 unsigned long val4);
687__DEFINE_FUNC(set_rr0_to_rr4,
688 "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n");
689
690
691extern unsigned long xen_getreg(int regnum);
692#define __DEFINE_GET_REG(id, privop) \
693 "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
694 ";;\n" \
695 "cmp.eq p6, p0 = r2, r8\n" \
696 ";;\n" \
697 "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \
698 "(p6) br.cond.sptk.many b6\n" \
699 ";;\n"
700
701__DEFINE_FUNC(getreg,
702 __DEFINE_GET_REG(PSR, PSR)
703
704 /* get_itc */
705 "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
706 ";;\n"
707 "cmp.eq p6, p0 = r2, r8\n"
708 ";;\n"
709 "(p6) br.cond.spnt xen_get_itc\n"
710 ";;\n"
711
712 /* get itm */
713 "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
714 ";;\n"
715 "cmp.eq p6, p0 = r2, r8\n"
716 ";;\n"
717 "(p6) br.cond.spnt xen_get_itm_with_offset\n"
718 ";;\n"
719
720 __DEFINE_GET_REG(CR_IVR, IVR)
721 __DEFINE_GET_REG(CR_TPR, TPR)
722
723 /* fall back */
724 "movl r2 = ia64_native_getreg_func\n"
725 ";;\n"
726 "mov b7 = r2\n"
727 ";;\n"
728 "br.cond.sptk.many b7\n");
729
730extern void xen_setreg(int regnum, unsigned long val);
731#define __DEFINE_SET_REG(id, privop) \
732 "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
733 ";;\n" \
734 "cmp.eq p6, p0 = r2, r9\n" \
735 ";;\n" \
736 "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \
737 "(p6) br.cond.sptk.many b6\n" \
738 ";;\n"
739
740__DEFINE_FUNC(setreg,
741 /* kr0 .. kr 7*/
742 /*
743 * if (_IA64_REG_AR_KR0 <= regnum &&
744 * regnum <= _IA64_REG_AR_KR7) {
745 * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0
746 * register __val asm ("r9") = val
747 * "break HYPERPRIVOP_SET_KR"
748 * }
749 */
750 "mov r17 = r9\n"
751 "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n"
752 ";;\n"
753 "cmp.ge p6, p0 = r9, r2\n"
754 "sub r17 = r17, r2\n"
755 ";;\n"
756 "(p6) cmp.ge.unc p7, p0 = "
757 __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0)
758 ", r17\n"
759 ";;\n"
760 "(p7) mov r9 = r8\n"
761 ";;\n"
762 "(p7) mov r8 = r17\n"
763 "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n"
764
765 /* set itm */
766 "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
767 ";;\n"
768 "cmp.eq p6, p0 = r2, r8\n"
769 ";;\n"
770 "(p6) br.cond.spnt xen_set_itm_with_offset\n"
771
772 /* set itc */
773 "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
774 ";;\n"
775 "cmp.eq p6, p0 = r2, r8\n"
776 ";;\n"
777 "(p6) br.cond.spnt xen_set_itc\n"
778
779 __DEFINE_SET_REG(CR_TPR, SET_TPR)
780 __DEFINE_SET_REG(CR_EOI, EOI)
781
782 /* fall back */
783 "movl r2 = ia64_native_setreg_func\n"
784 ";;\n"
785 "mov b7 = r2\n"
786 ";;\n"
787 "br.cond.sptk.many b7\n");
788#endif
789
790static const struct pv_cpu_ops xen_cpu_ops __initconst = {
791 .fc = xen_fc,
792 .thash = xen_thash,
793 .get_cpuid = xen_get_cpuid,
794 .get_pmd = xen_get_pmd,
795 .getreg = xen_getreg,
796 .setreg = xen_setreg,
797 .ptcga = xen_ptcga,
798 .get_rr = xen_get_rr,
799 .set_rr = xen_set_rr,
800 .set_rr0_to_rr4 = xen_set_rr0_to_rr4,
801 .ssm_i = xen_ssm_i,
802 .rsm_i = xen_rsm_i,
803 .get_psr_i = xen_get_psr_i,
804 .intrin_local_irq_restore
805 = xen_intrin_local_irq_restore,
806};
807
808/******************************************************************************
809 * replacement of hand written assembly codes.
810 */
811
812extern char xen_switch_to;
813extern char xen_leave_syscall;
814extern char xen_work_processed_syscall;
815extern char xen_leave_kernel;
816
817const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
818 .switch_to = (unsigned long)&xen_switch_to,
819 .leave_syscall = (unsigned long)&xen_leave_syscall,
820 .work_processed_syscall = (unsigned long)&xen_work_processed_syscall,
821 .leave_kernel = (unsigned long)&xen_leave_kernel,
822};
823
824/***************************************************************************
825 * pv_iosapic_ops
826 * iosapic read/write hooks.
827 */
828static void
829xen_pcat_compat_init(void)
830{
831 /* nothing */
832}
833
834static struct irq_chip*
835xen_iosapic_get_irq_chip(unsigned long trigger)
836{
837 return NULL;
838}
839
840static unsigned int
841xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
842{
843 struct physdev_apic apic_op;
844 int ret;
845
846 apic_op.apic_physbase = (unsigned long)iosapic -
847 __IA64_UNCACHED_OFFSET;
848 apic_op.reg = reg;
849 ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
850 if (ret)
851 return ret;
852 return apic_op.value;
853}
854
855static void
856xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
857{
858 struct physdev_apic apic_op;
859
860 apic_op.apic_physbase = (unsigned long)iosapic -
861 __IA64_UNCACHED_OFFSET;
862 apic_op.reg = reg;
863 apic_op.value = val;
864 HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
865}
866
867static struct pv_iosapic_ops xen_iosapic_ops __initdata = {
868 .pcat_compat_init = xen_pcat_compat_init,
869 .__get_irq_chip = xen_iosapic_get_irq_chip,
870
871 .__read = xen_iosapic_read,
872 .__write = xen_iosapic_write,
873};
874
875/***************************************************************************
876 * pv_ops initialization
877 */
878
879void __init
880xen_setup_pv_ops(void)
881{
882 xen_info_init();
883 pv_info = xen_info;
884 pv_init_ops = xen_init_ops;
885 pv_fsys_data = xen_fsys_data;
886 pv_patchdata = xen_patchdata;
887 pv_cpu_ops = xen_cpu_ops;
888 pv_iosapic_ops = xen_iosapic_ops;
889 pv_irq_ops = xen_irq_ops;
890 pv_time_ops = xen_time_ops;
891
892 paravirt_cpu_asm_init(&xen_cpu_asm_switch);
893}
894
895#ifdef ASM_SUPPORTED
896/***************************************************************************
897 * binary pacthing
898 * pv_init_ops.patch_bundle
899 */
900
901#define DEFINE_FUNC_GETREG(name, privop) \
902 DEFINE_FUNC0(get_ ## name, \
903 "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n")
904
905DEFINE_FUNC_GETREG(psr, PSR);
906DEFINE_FUNC_GETREG(eflag, EFLAG);
907DEFINE_FUNC_GETREG(ivr, IVR);
908DEFINE_FUNC_GETREG(tpr, TPR);
909
910#define DEFINE_FUNC_SET_KR(n) \
911 DEFINE_VOID_FUNC0(set_kr ## n, \
912 ";;\n" \
913 "mov r9 = r8\n" \
914 "mov r8 = " #n "\n" \
915 "break " __stringify(HYPERPRIVOP_SET_KR) "\n")
916
917DEFINE_FUNC_SET_KR(0);
918DEFINE_FUNC_SET_KR(1);
919DEFINE_FUNC_SET_KR(2);
920DEFINE_FUNC_SET_KR(3);
921DEFINE_FUNC_SET_KR(4);
922DEFINE_FUNC_SET_KR(5);
923DEFINE_FUNC_SET_KR(6);
924DEFINE_FUNC_SET_KR(7);
925
926#define __DEFINE_FUNC_SETREG(name, privop) \
927 DEFINE_VOID_FUNC0(name, \
928 "break "__stringify(HYPERPRIVOP_ ## privop) "\n")
929
930#define DEFINE_FUNC_SETREG(name, privop) \
931 __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop)
932
933DEFINE_FUNC_SETREG(eflag, EFLAG);
934DEFINE_FUNC_SETREG(tpr, TPR);
935__DEFINE_FUNC_SETREG(eoi, EOI);
936
937extern const char xen_check_events[];
938extern const char __xen_intrin_local_irq_restore_direct_start[];
939extern const char __xen_intrin_local_irq_restore_direct_end[];
940extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc;
941
942asm (
943 ".align 32\n"
944 ".proc xen_check_events\n"
945 "xen_check_events:\n"
946 /* masked = 0
947 * r9 = masked_addr - 1
948 * = pending_intr_addr
949 */
950 "st1.rel [r9] = r0, -1\n"
951 ";;\n"
952 /* r8 = pending_intr */
953 "ld1.acq r11 = [r9]\n"
954 ";;\n"
955 /* p9 = interrupt pending? */
956 "cmp.ne p9, p10 = r11, r0\n"
957 ";;\n"
958 "(p10) mf\n"
959 /* issue hypercall to trigger interrupt */
960 "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
961 "br.cond.sptk.many b6\n"
962 ".endp xen_check_events\n"
963 "\n"
964 ".align 32\n"
965 ".proc __xen_intrin_local_irq_restore_direct\n"
966 "__xen_intrin_local_irq_restore_direct:\n"
967 "__xen_intrin_local_irq_restore_direct_start:\n"
968 "1:\n"
969 "{\n"
970 "cmp.ne p6, p7 = r8, r0\n"
971 "mov r17 = ip\n" /* get ip to calc return address */
972 "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n"
973 ";;\n"
974 "}\n"
975 "{\n"
976 /* r9 = XEN_PSR_I_ADDR */
977 "ld8 r9 = [r9]\n"
978 ";;\n"
979 /* r10 = masked previous value */
980 "(p6) ld1.acq r10 = [r9]\n"
981 "adds r17 = 1f - 1b, r17\n" /* calculate return address */
982 ";;\n"
983 "}\n"
984 "{\n"
985 /* p8 = !masked interrupt masked previously? */
986 "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
987 "\n"
988 /* p7 = else clause */
989 "(p7) mov r11 = 1\n"
990 ";;\n"
991 "(p8) mov b6 = r17\n" /* set return address */
992 "}\n"
993 "{\n"
994 /* masked = 1 */
995 "(p7) st1.rel [r9] = r11\n"
996 "\n"
997 "[99:]\n"
998 "(p8) brl.cond.dptk.few xen_check_events\n"
999 "}\n"
1000 /* pv calling stub is 5 bundles. fill nop to adjust return address */
1001 "{\n"
1002 "nop 0\n"
1003 "nop 0\n"
1004 "nop 0\n"
1005 "}\n"
1006 "1:\n"
1007 "__xen_intrin_local_irq_restore_direct_end:\n"
1008 ".endp __xen_intrin_local_irq_restore_direct\n"
1009 "\n"
1010 ".align 8\n"
1011 "__xen_intrin_local_irq_restore_direct_reloc:\n"
1012 "data8 99b\n"
1013);
1014
1015static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[]
1016__initdata_or_module =
1017{
1018#define XEN_PATCH_BUNDLE_ELEM(name, type) \
1019 { \
1020 (void*)xen_ ## name ## _direct_start, \
1021 (void*)xen_ ## name ## _direct_end, \
1022 PARAVIRT_PATCH_TYPE_ ## type, \
1023 }
1024
1025 XEN_PATCH_BUNDLE_ELEM(fc, FC),
1026 XEN_PATCH_BUNDLE_ELEM(thash, THASH),
1027 XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
1028 XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
1029 XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
1030 XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
1031 XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
1032 XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
1033 XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
1034 XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
1035 XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
1036 {
1037 (void*)__xen_intrin_local_irq_restore_direct_start,
1038 (void*)__xen_intrin_local_irq_restore_direct_end,
1039 PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE,
1040 },
1041
1042#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
1043 { \
1044 xen_get_ ## name ## _direct_start, \
1045 xen_get_ ## name ## _direct_end, \
1046 PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
1047 }
1048
1049 XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
1050 XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG),
1051
1052 XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR),
1053 XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR),
1054
1055 XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC),
1056 XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM),
1057
1058
1059#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
1060 { \
1061 xen_ ## name ## _direct_start, \
1062 xen_ ## name ## _direct_end, \
1063 PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
1064 }
1065
1066#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
1067 __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg)
1068
1069 XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0),
1070 XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1),
1071 XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2),
1072 XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3),
1073 XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4),
1074 XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5),
1075 XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6),
1076 XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7),
1077
1078 XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG),
1079 XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR),
1080 __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI),
1081
1082 XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC),
1083 XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM),
1084};
1085
1086static unsigned long __init_or_module
1087xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
1088{
1089 const unsigned long nelems = sizeof(xen_patch_bundle_elems) /
1090 sizeof(xen_patch_bundle_elems[0]);
1091 unsigned long used;
1092 const struct paravirt_patch_bundle_elem *found;
1093
1094 used = __paravirt_patch_apply_bundle(sbundle, ebundle, type,
1095 xen_patch_bundle_elems, nelems,
1096 &found);
1097
1098 if (found == NULL)
1099 /* fallback */
1100 return ia64_native_patch_bundle(sbundle, ebundle, type);
1101 if (used == 0)
1102 return used;
1103
1104 /* relocation */
1105 switch (type) {
1106 case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: {
1107 unsigned long reloc =
1108 __xen_intrin_local_irq_restore_direct_reloc;
1109 unsigned long reloc_offset = reloc - (unsigned long)
1110 __xen_intrin_local_irq_restore_direct_start;
1111 unsigned long tag = (unsigned long)sbundle + reloc_offset;
1112 paravirt_patch_reloc_brl(tag, xen_check_events);
1113 break;
1114 }
1115 default:
1116 /* nothing */
1117 break;
1118 }
1119 return used;
1120}
1121#endif /* ASM_SUPPOTED */
1122
1123const struct paravirt_patch_branch_target xen_branch_target[]
1124__initconst = {
1125#define PARAVIRT_BR_TARGET(name, type) \
1126 { \
1127 &xen_ ## name, \
1128 PARAVIRT_PATCH_TYPE_BR_ ## type, \
1129 }
1130 PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
1131 PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
1132 PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
1133 PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
1134};
1135
1136static void __init
1137xen_patch_branch(unsigned long tag, unsigned long type)
1138{
1139 __paravirt_patch_apply_branch(tag, type, xen_branch_target,
1140 ARRAY_SIZE(xen_branch_target));
1141}
diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c
deleted file mode 100644
index 73d903ca2d64..000000000000
--- a/arch/ia64/xen/xencomm.c
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/mm.h>
20#include <linux/err.h>
21
22static unsigned long kernel_virtual_offset;
23static int is_xencomm_initialized;
24
25/* for xen early printk. It uses console io hypercall which uses xencomm.
26 * However early printk may use it before xencomm initialization.
27 */
28int
29xencomm_is_initialized(void)
30{
31 return is_xencomm_initialized;
32}
33
34void
35xencomm_initialize(void)
36{
37 kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START);
38 is_xencomm_initialized = 1;
39}
40
41/* Translate virtual address to physical address. */
42unsigned long
43xencomm_vtop(unsigned long vaddr)
44{
45 struct page *page;
46 struct vm_area_struct *vma;
47
48 if (vaddr == 0)
49 return 0UL;
50
51 if (REGION_NUMBER(vaddr) == 5) {
52 pgd_t *pgd;
53 pud_t *pud;
54 pmd_t *pmd;
55 pte_t *ptep;
56
57 /* On ia64, TASK_SIZE refers to current. It is not initialized
58 during boot.
59 Furthermore the kernel is relocatable and __pa() doesn't
60 work on addresses. */
61 if (vaddr >= KERNEL_START
62 && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE))
63 return vaddr - kernel_virtual_offset;
64
65 /* In kernel area -- virtually mapped. */
66 pgd = pgd_offset_k(vaddr);
67 if (pgd_none(*pgd) || pgd_bad(*pgd))
68 return ~0UL;
69
70 pud = pud_offset(pgd, vaddr);
71 if (pud_none(*pud) || pud_bad(*pud))
72 return ~0UL;
73
74 pmd = pmd_offset(pud, vaddr);
75 if (pmd_none(*pmd) || pmd_bad(*pmd))
76 return ~0UL;
77
78 ptep = pte_offset_kernel(pmd, vaddr);
79 if (!ptep)
80 return ~0UL;
81
82 return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
83 }
84
85 if (vaddr > TASK_SIZE) {
86 /* percpu variables */
87 if (REGION_NUMBER(vaddr) == 7 &&
88 REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS))
89 ia64_tpa(vaddr);
90
91 /* kernel address */
92 return __pa(vaddr);
93 }
94
95 /* XXX double-check (lack of) locking */
96 vma = find_extend_vma(current->mm, vaddr);
97 if (!vma)
98 return ~0UL;
99
100 /* We assume the page is modified. */
101 page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
102 if (IS_ERR_OR_NULL(page))
103 return ~0UL;
104
105 return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
106}
diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S
deleted file mode 100644
index 3e71d50584d9..000000000000
--- a/arch/ia64/xen/xenivt.S
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * arch/ia64/xen/ivt.S
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 *
7 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
8 * VA Linux Systems Japan K.K.
9 * pv_ops.
10 */
11
12#include <asm/asmmacro.h>
13#include <asm/kregs.h>
14#include <asm/pgtable.h>
15
16#include "../kernel/minstate.h"
17
18 .section .text,"ax"
19GLOBAL_ENTRY(xen_event_callback)
20 mov r31=pr // prepare to save predicates
21 ;;
22 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
23 ;;
24 movl r3=XSI_PSR_IC
25 mov r14=1
26 ;;
27 st4 [r3]=r14
28 ;;
29 adds r3=8,r2 // set up second base pointer for SAVE_REST
30 srlz.i // ensure everybody knows psr.ic is back on
31 ;;
32 SAVE_REST
33 ;;
341:
35 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
36 add out0=16,sp // pass pointer to pt_regs as first arg
37 ;;
38 br.call.sptk.many b0=xen_evtchn_do_upcall
39 ;;
40 movl r20=XSI_PSR_I_ADDR
41 ;;
42 ld8 r20=[r20]
43 ;;
44 adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending
45 ;;
46 ld1 r20=[r20]
47 ;;
48 cmp.ne p6,p0=r20,r0 // if there are pending events,
49 (p6) br.spnt.few 1b // call evtchn_do_upcall again.
50 br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is
51 // paravirtualized as xen_leave_kernel
52END(xen_event_callback)
diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S
deleted file mode 100644
index e29519ebe2d2..000000000000
--- a/arch/ia64/xen/xensetup.S
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * Support routines for Xen
3 *
4 * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
5 */
6
7#include <asm/processor.h>
8#include <asm/asmmacro.h>
9#include <asm/pgtable.h>
10#include <asm/paravirt.h>
11#include <asm/xen/privop.h>
12#include <linux/elfnote.h>
13#include <linux/init.h>
14#include <xen/interface/elfnote.h>
15
16 .section .data..read_mostly
17 .align 8
18 .global xen_domain_type
19xen_domain_type:
20 data4 XEN_NATIVE_ASM
21 .previous
22
23 __INIT
24ENTRY(startup_xen)
25 // Calculate load offset.
26 // The constant, LOAD_OFFSET, can't be used because the boot
27 // loader doesn't always load to the LMA specified by the vmlinux.lds.
28 mov r9=ip // must be the first instruction to make sure
29 // that r9 = the physical address of startup_xen.
30 // Usually r9 = startup_xen - LOAD_OFFSET
31 movl r8=startup_xen
32 ;;
33 sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET.
34
35 mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN
36 movl r11=_start
37 ;;
38 add r11=r11,r9
39 movl r8=hypervisor_type
40 ;;
41 add r8=r8,r9
42 mov b0=r11
43 ;;
44 st8 [r8]=r10
45 br.cond.sptk.many b0
46 ;;
47END(startup_xen)
48
49 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
50 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
51 ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
52 ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET)
53
54#define isBP p3 // are we the Bootstrap Processor?
55
56GLOBAL_ENTRY(xen_setup_hook)
57 mov r8=XEN_PV_DOMAIN_ASM
58(isBP) movl r9=xen_domain_type;;
59(isBP) st4 [r9]=r8
60 movl r10=xen_ivt;;
61
62 mov cr.iva=r10
63
64 /* Set xsi base. */
65#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600
66(isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA
67(isBP) movl r28=XSI_BASE;;
68(isBP) break 0x1000;;
69
70 /* setup pv_ops */
71(isBP) mov r4=rp
72 ;;
73(isBP) br.call.sptk.many rp=xen_setup_pv_ops
74 ;;
75(isBP) mov rp=r4
76 ;;
77
78 br.ret.sptk.many rp
79 ;;
80END(xen_setup_hook)