aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-23 11:07:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-23 11:07:35 -0400
commit72441bdc76f7f71d7b75cdaa48f26dbb1f3d932e (patch)
treedf818d5e210c0c31c6634d43401d02a99ee87d1b /arch/ia64/include
parent2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff)
parentfe393164c529f72def1952fb66c11732d0984d78 (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (41 commits) [IA64] Fix annoying IA64_TR_ALLOC_MAX message. [IA64] kill sys32_pipe [IA64] remove sys32_pause [IA64] Add Variable Page Size and IA64 Support in Intel IOMMU ia64/pv_ops: paravirtualized instruction checker. ia64/xen: a recipe for using xen/ia64 with pv_ops. ia64/pv_ops: update Kconfig for paravirtualized guest and xen. ia64/xen: preliminary support for save/restore. ia64/xen: define xen machine vector for domU. ia64/pv_ops/xen: implement xen pv_time_ops. ia64/pv_ops/xen: implement xen pv_irq_ops. ia64/pv_ops/xen: define the nubmer of irqs which xen needs. ia64/pv_ops/xen: implement xen pv_iosapic_ops. ia64/pv_ops/xen: paravirtualize entry.S for ia64/xen. ia64/pv_ops/xen: paravirtualize ivt.S for xen. ia64/pv_ops/xen: paravirtualize DO_SAVE_MIN for xen. ia64/pv_ops/xen: define xen paravirtualized instructions for hand written assembly code ia64/pv_ops/xen: define xen pv_cpu_ops. ia64/pv_ops/xen: define xen pv_init_ops for various xen initialization. ia64/pv_ops/xen: elf note based xen startup. ...
Diffstat (limited to 'arch/ia64/include')
-rw-r--r--arch/ia64/include/asm/break.h9
-rw-r--r--arch/ia64/include/asm/cacheflush.h2
-rw-r--r--arch/ia64/include/asm/device.h3
-rw-r--r--arch/ia64/include/asm/dma-mapping.h50
-rw-r--r--arch/ia64/include/asm/iommu.h16
-rw-r--r--arch/ia64/include/asm/kregs.h2
-rw-r--r--arch/ia64/include/asm/machvec.h4
-rw-r--r--arch/ia64/include/asm/machvec_dig_vtd.h38
-rw-r--r--arch/ia64/include/asm/machvec_init.h1
-rw-r--r--arch/ia64/include/asm/machvec_xen.h22
-rw-r--r--arch/ia64/include/asm/meminit.h3
-rw-r--r--arch/ia64/include/asm/native/inst.h10
-rw-r--r--arch/ia64/include/asm/native/pvchk_inst.h263
-rw-r--r--arch/ia64/include/asm/paravirt.h4
-rw-r--r--arch/ia64/include/asm/pci.h3
-rw-r--r--arch/ia64/include/asm/ptrace.h8
-rw-r--r--arch/ia64/include/asm/pvclock-abi.h48
-rw-r--r--arch/ia64/include/asm/swiotlb.h56
-rw-r--r--arch/ia64/include/asm/sync_bitops.h51
-rw-r--r--arch/ia64/include/asm/syscall.h163
-rw-r--r--arch/ia64/include/asm/thread_info.h3
-rw-r--r--arch/ia64/include/asm/timex.h2
-rw-r--r--arch/ia64/include/asm/unistd.h1
-rw-r--r--arch/ia64/include/asm/xen/events.h50
-rw-r--r--arch/ia64/include/asm/xen/grant_table.h29
-rw-r--r--arch/ia64/include/asm/xen/hypercall.h265
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h89
-rw-r--r--arch/ia64/include/asm/xen/inst.h458
-rw-r--r--arch/ia64/include/asm/xen/interface.h346
-rw-r--r--arch/ia64/include/asm/xen/irq.h44
-rw-r--r--arch/ia64/include/asm/xen/minstate.h134
-rw-r--r--arch/ia64/include/asm/xen/page.h65
-rw-r--r--arch/ia64/include/asm/xen/privop.h129
-rw-r--r--arch/ia64/include/asm/xen/xcom_hcall.h51
-rw-r--r--arch/ia64/include/asm/xen/xencomm.h42
35 files changed, 2455 insertions, 9 deletions
diff --git a/arch/ia64/include/asm/break.h b/arch/ia64/include/asm/break.h
index f03402039896..e90c40ec9edf 100644
--- a/arch/ia64/include/asm/break.h
+++ b/arch/ia64/include/asm/break.h
@@ -20,4 +20,13 @@
20 */ 20 */
21#define __IA64_BREAK_SYSCALL 0x100000 21#define __IA64_BREAK_SYSCALL 0x100000
22 22
23/*
24 * Xen specific break numbers:
25 */
26#define __IA64_XEN_HYPERCALL 0x1000
27/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used
28 for xen hyperprivops */
29#define __IA64_XEN_HYPERPRIVOP_START 0x1
30#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a
31
23#endif /* _ASM_IA64_BREAK_H */ 32#endif /* _ASM_IA64_BREAK_H */
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index afcfbda76e20..c8ce2719fee8 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -34,6 +34,8 @@ do { \
34#define flush_dcache_mmap_unlock(mapping) do { } while (0) 34#define flush_dcache_mmap_unlock(mapping) do { } while (0)
35 35
36extern void flush_icache_range (unsigned long start, unsigned long end); 36extern void flush_icache_range (unsigned long start, unsigned long end);
37extern void clflush_cache_range(void *addr, int size);
38
37 39
38#define flush_icache_user_range(vma, page, user_addr, len) \ 40#define flush_icache_user_range(vma, page, user_addr, len) \
39do { \ 41do { \
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
index 3db6daf7f251..41ab85d66f33 100644
--- a/arch/ia64/include/asm/device.h
+++ b/arch/ia64/include/asm/device.h
@@ -10,6 +10,9 @@ struct dev_archdata {
10#ifdef CONFIG_ACPI 10#ifdef CONFIG_ACPI
11 void *acpi_handle; 11 void *acpi_handle;
12#endif 12#endif
13#ifdef CONFIG_DMAR
14 void *iommu; /* hook for IOMMU specific extension */
15#endif
13}; 16};
14 17
15#endif /* _ASM_IA64_DEVICE_H */ 18#endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 06ff1ba21465..bbab7e2b0fc9 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -7,6 +7,49 @@
7 */ 7 */
8#include <asm/machvec.h> 8#include <asm/machvec.h>
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <asm/swiotlb.h>
11
12struct dma_mapping_ops {
13 int (*mapping_error)(struct device *dev,
14 dma_addr_t dma_addr);
15 void* (*alloc_coherent)(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp);
17 void (*free_coherent)(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
20 size_t size, int direction);
21 void (*unmap_single)(struct device *dev, dma_addr_t addr,
22 size_t size, int direction);
23 void (*sync_single_for_cpu)(struct device *hwdev,
24 dma_addr_t dma_handle, size_t size,
25 int direction);
26 void (*sync_single_for_device)(struct device *hwdev,
27 dma_addr_t dma_handle, size_t size,
28 int direction);
29 void (*sync_single_range_for_cpu)(struct device *hwdev,
30 dma_addr_t dma_handle, unsigned long offset,
31 size_t size, int direction);
32 void (*sync_single_range_for_device)(struct device *hwdev,
33 dma_addr_t dma_handle, unsigned long offset,
34 size_t size, int direction);
35 void (*sync_sg_for_cpu)(struct device *hwdev,
36 struct scatterlist *sg, int nelems,
37 int direction);
38 void (*sync_sg_for_device)(struct device *hwdev,
39 struct scatterlist *sg, int nelems,
40 int direction);
41 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
42 int nents, int direction);
43 void (*unmap_sg)(struct device *hwdev,
44 struct scatterlist *sg, int nents,
45 int direction);
46 int (*dma_supported_op)(struct device *hwdev, u64 mask);
47 int is_phys;
48};
49
50extern struct dma_mapping_ops *dma_ops;
51extern struct ia64_machine_vector ia64_mv;
52extern void set_iommu_machvec(void);
10 53
11#define dma_alloc_coherent(dev, size, handle, gfp) \ 54#define dma_alloc_coherent(dev, size, handle, gfp) \
12 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) 55 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
@@ -96,4 +139,11 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
96 139
97#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 140#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
98 141
142static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
143{
144 return dma_ops;
145}
146
147
148
99#endif /* _ASM_IA64_DMA_MAPPING_H */ 149#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
new file mode 100644
index 000000000000..5fb2bb93de3b
--- /dev/null
+++ b/arch/ia64/include/asm/iommu.h
@@ -0,0 +1,16 @@
1#ifndef _ASM_IA64_IOMMU_H
2#define _ASM_IA64_IOMMU_H 1
3
4#define cpu_has_x2apic 0
5/* 10 seconds */
6#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
7
8extern void pci_iommu_shutdown(void);
9extern void no_iommu_init(void);
10extern int force_iommu, no_iommu;
11extern int iommu_detected;
12extern void iommu_dma_init(void);
13extern void machvec_init(const char *name);
14extern int forbid_dac;
15
16#endif
diff --git a/arch/ia64/include/asm/kregs.h b/arch/ia64/include/asm/kregs.h
index aefcdfee7f23..39e65f6639f5 100644
--- a/arch/ia64/include/asm/kregs.h
+++ b/arch/ia64/include/asm/kregs.h
@@ -32,7 +32,7 @@
32#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ 32#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */
33 33
34#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/ 34#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/
35#define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/ 35#define IA64_TR_ALLOC_MAX 64 /* Max number for dynamic use*/
36 36
37/* Processor status register bits: */ 37/* Processor status register bits: */
38#define IA64_PSR_BE_BIT 1 38#define IA64_PSR_BE_BIT 1
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 2b850ccafef5..1ea28bcee33b 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -120,6 +120,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
120# include <asm/machvec_hpsim.h> 120# include <asm/machvec_hpsim.h>
121# elif defined (CONFIG_IA64_DIG) 121# elif defined (CONFIG_IA64_DIG)
122# include <asm/machvec_dig.h> 122# include <asm/machvec_dig.h>
123# elif defined(CONFIG_IA64_DIG_VTD)
124# include <asm/machvec_dig_vtd.h>
123# elif defined (CONFIG_IA64_HP_ZX1) 125# elif defined (CONFIG_IA64_HP_ZX1)
124# include <asm/machvec_hpzx1.h> 126# include <asm/machvec_hpzx1.h>
125# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) 127# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
@@ -128,6 +130,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
128# include <asm/machvec_sn2.h> 130# include <asm/machvec_sn2.h>
129# elif defined (CONFIG_IA64_SGI_UV) 131# elif defined (CONFIG_IA64_SGI_UV)
130# include <asm/machvec_uv.h> 132# include <asm/machvec_uv.h>
133# elif defined (CONFIG_IA64_XEN_GUEST)
134# include <asm/machvec_xen.h>
131# elif defined (CONFIG_IA64_GENERIC) 135# elif defined (CONFIG_IA64_GENERIC)
132 136
133# ifdef MACHVEC_PLATFORM_HEADER 137# ifdef MACHVEC_PLATFORM_HEADER
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
new file mode 100644
index 000000000000..3400b561e711
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -0,0 +1,38 @@
1#ifndef _ASM_IA64_MACHVEC_DIG_VTD_h
2#define _ASM_IA64_MACHVEC_DIG_VTD_h
3
4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent;
6extern ia64_mv_dma_free_coherent vtd_free_coherent;
7extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs;
11extern ia64_mv_dma_supported iommu_dma_supported;
12extern ia64_mv_dma_mapping_error vtd_dma_mapping_error;
13extern ia64_mv_dma_init pci_iommu_alloc;
14
15/*
16 * This stuff has dual use!
17 *
18 * For a generic kernel, the macros are used to initialize the
19 * platform's machvec structure. When compiling a non-generic kernel,
20 * the macros are used directly.
21 */
22#define platform_name "dig_vtd"
23#define platform_setup dig_setup
24#define platform_dma_init pci_iommu_alloc
25#define platform_dma_alloc_coherent vtd_alloc_coherent
26#define platform_dma_free_coherent vtd_free_coherent
27#define platform_dma_map_single_attrs vtd_map_single_attrs
28#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs
29#define platform_dma_map_sg_attrs vtd_map_sg_attrs
30#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs
31#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
32#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
33#define platform_dma_sync_single_for_device machvec_dma_sync_single
34#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
35#define platform_dma_supported iommu_dma_supported
36#define platform_dma_mapping_error vtd_dma_mapping_error
37
38#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h
index 7f21249fba3f..ef964b286842 100644
--- a/arch/ia64/include/asm/machvec_init.h
+++ b/arch/ia64/include/asm/machvec_init.h
@@ -1,3 +1,4 @@
1#include <asm/iommu.h>
1#include <asm/machvec.h> 2#include <asm/machvec.h>
2 3
3extern ia64_mv_send_ipi_t ia64_send_ipi; 4extern ia64_mv_send_ipi_t ia64_send_ipi;
diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h
new file mode 100644
index 000000000000..55f9228056cd
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_xen.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_IA64_MACHVEC_XEN_h
2#define _ASM_IA64_MACHVEC_XEN_h
3
4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_cpu_init_t xen_cpu_init;
6extern ia64_mv_irq_init_t xen_irq_init;
7extern ia64_mv_send_ipi_t xen_platform_send_ipi;
8
9/*
10 * This stuff has dual use!
11 *
12 * For a generic kernel, the macros are used to initialize the
13 * platform's machvec structure. When compiling a non-generic kernel,
14 * the macros are used directly.
15 */
16#define platform_name "xen"
17#define platform_setup dig_setup
18#define platform_cpu_init xen_cpu_init
19#define platform_irq_init xen_irq_init
20#define platform_send_ipi xen_platform_send_ipi
21
22#endif /* _ASM_IA64_MACHVEC_XEN_h */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 7245a5781594..6bc96ee54327 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -18,10 +18,11 @@
18 * - crash dumping code reserved region 18 * - crash dumping code reserved region
19 * - Kernel memory map built from EFI memory map 19 * - Kernel memory map built from EFI memory map
20 * - ELF core header 20 * - ELF core header
21 * - xen start info if CONFIG_XEN
21 * 22 *
22 * More could be added if necessary 23 * More could be added if necessary
23 */ 24 */
24#define IA64_MAX_RSVD_REGIONS 8 25#define IA64_MAX_RSVD_REGIONS 9
25 26
26struct rsvd_region { 27struct rsvd_region {
27 unsigned long start; /* virtual address of beginning of element */ 28 unsigned long start; /* virtual address of beginning of element */
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
index c8efbf7b849e..0a1026cca4fa 100644
--- a/arch/ia64/include/asm/native/inst.h
+++ b/arch/ia64/include/asm/native/inst.h
@@ -36,8 +36,13 @@
36 ;; \ 36 ;; \
37 movl clob = PARAVIRT_POISON; \ 37 movl clob = PARAVIRT_POISON; \
38 ;; 38 ;;
39# define CLOBBER_PRED(pred_clob) \
40 ;; \
41 cmp.eq pred_clob, p0 = r0, r0 \
42 ;;
39#else 43#else
40# define CLOBBER(clob) /* nothing */ 44# define CLOBBER(clob) /* nothing */
45# define CLOBBER_PRED(pred_clob) /* nothing */
41#endif 46#endif
42 47
43#define MOV_FROM_IFA(reg) \ 48#define MOV_FROM_IFA(reg) \
@@ -136,7 +141,8 @@
136 141
137#define SSM_PSR_I(pred, pred_clob, clob) \ 142#define SSM_PSR_I(pred, pred_clob, clob) \
138(pred) ssm psr.i \ 143(pred) ssm psr.i \
139 CLOBBER(clob) 144 CLOBBER(clob) \
145 CLOBBER_PRED(pred_clob)
140 146
141#define RSM_PSR_I(pred, clob0, clob1) \ 147#define RSM_PSR_I(pred, clob0, clob1) \
142(pred) rsm psr.i \ 148(pred) rsm psr.i \
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h
new file mode 100644
index 000000000000..b8e6eb1090d7
--- /dev/null
+++ b/arch/ia64/include/asm/native/pvchk_inst.h
@@ -0,0 +1,263 @@
1#ifndef _ASM_NATIVE_PVCHK_INST_H
2#define _ASM_NATIVE_PVCHK_INST_H
3
4/******************************************************************************
5 * arch/ia64/include/asm/native/pvchk_inst.h
6 * Checker for paravirtualizations of privileged operations.
7 *
8 * Copyright (C) 2005 Hewlett-Packard Co
9 * Dan Magenheimer <dan.magenheimer@hp.com>
10 *
11 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
12 * VA Linux Systems Japan K.K.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 *
28 */
29
30/**********************************************
31 * Instructions paravirtualized for correctness
32 **********************************************/
33
34/* "fc" and "thash" are privilege-sensitive instructions, meaning they
35 * may have different semantics depending on whether they are executed
36 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
37 * be allowed to execute directly, lest incorrect semantics result.
38 */
39
40#define fc .error "fc should not be used directly."
41#define thash .error "thash should not be used directly."
42
43/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
44 * is not currently used (though it may be in a long-format VHPT system!)
45 * and the semantics of cover only change if psr.ic is off which is very
46 * rare (and currently non-existent outside of assembly code
47 */
48#define ttag .error "ttag should not be used directly."
49#define cover .error "cover should not be used directly."
50
51/* There are also privilege-sensitive registers. These registers are
52 * readable at any privilege level but only writable at PL0.
53 */
54#define cpuid .error "cpuid should not be used directly."
55#define pmd .error "pmd should not be used directly."
56
57/*
58 * mov ar.eflag =
59 * mov = ar.eflag
60 */
61
62/**********************************************
63 * Instructions paravirtualized for performance
64 **********************************************/
65/*
66 * Those instructions include '.' which can't be handled by cpp.
67 * or can't be handled by cpp easily.
68 * They are handled by sed instead of cpp.
69 */
70
71/* for .S
72 * itc.i
73 * itc.d
74 *
75 * bsw.0
76 * bsw.1
77 *
78 * ssm psr.ic | PSR_DEFAULT_BITS
79 * ssm psr.ic
80 * rsm psr.ic
81 * ssm psr.i
82 * rsm psr.i
83 * rsm psr.i | psr.ic
84 * rsm psr.dt
85 * ssm psr.dt
86 *
87 * mov = cr.ifa
88 * mov = cr.itir
89 * mov = cr.isr
90 * mov = cr.iha
91 * mov = cr.ipsr
92 * mov = cr.iim
93 * mov = cr.iip
94 * mov = cr.ivr
95 * mov = psr
96 *
97 * mov cr.ifa =
98 * mov cr.itir =
99 * mov cr.iha =
100 * mov cr.ipsr =
101 * mov cr.ifs =
102 * mov cr.iip =
103 * mov cr.kr =
104 */
105
106/* for intrinsics
107 * ssm psr.i
108 * rsm psr.i
109 * mov = psr
110 * mov = ivr
111 * mov = tpr
112 * mov cr.itm =
113 * mov eoi =
114 * mov rr[] =
115 * mov = rr[]
116 * mov = kr
117 * mov kr =
118 * ptc.ga
119 */
120
121/*************************************************************
122 * define paravirtualized instrcution macros as nop to ingore.
123 * and check whether arguments are appropriate.
124 *************************************************************/
125
126/* check whether reg is a regular register */
127.macro is_rreg_in reg
128 .ifc "\reg", "r0"
129 nop 0
130 .exitm
131 .endif
132 ;;
133 mov \reg = r0
134 ;;
135.endm
136#define IS_RREG_IN(reg) is_rreg_in reg ;
137
138#define IS_RREG_OUT(reg) \
139 ;; \
140 mov reg = r0 \
141 ;;
142
143#define IS_RREG_CLOB(reg) IS_RREG_OUT(reg)
144
145/* check whether pred is a predicate register */
146#define IS_PRED_IN(pred) \
147 ;; \
148 (pred) nop 0 \
149 ;;
150
151#define IS_PRED_OUT(pred) \
152 ;; \
153 cmp.eq pred, p0 = r0, r0 \
154 ;;
155
156#define IS_PRED_CLOB(pred) IS_PRED_OUT(pred)
157
158
159#define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND) \
160 nop 0
161#define MOV_FROM_IFA(reg) \
162 IS_RREG_OUT(reg)
163#define MOV_FROM_ITIR(reg) \
164 IS_RREG_OUT(reg)
165#define MOV_FROM_ISR(reg) \
166 IS_RREG_OUT(reg)
167#define MOV_FROM_IHA(reg) \
168 IS_RREG_OUT(reg)
169#define MOV_FROM_IPSR(pred, reg) \
170 IS_PRED_IN(pred) \
171 IS_RREG_OUT(reg)
172#define MOV_FROM_IIM(reg) \
173 IS_RREG_OUT(reg)
174#define MOV_FROM_IIP(reg) \
175 IS_RREG_OUT(reg)
176#define MOV_FROM_IVR(reg, clob) \
177 IS_RREG_OUT(reg) \
178 IS_RREG_CLOB(clob)
179#define MOV_FROM_PSR(pred, reg, clob) \
180 IS_PRED_IN(pred) \
181 IS_RREG_OUT(reg) \
182 IS_RREG_CLOB(clob)
183#define MOV_TO_IFA(reg, clob) \
184 IS_RREG_IN(reg) \
185 IS_RREG_CLOB(clob)
186#define MOV_TO_ITIR(pred, reg, clob) \
187 IS_PRED_IN(pred) \
188 IS_RREG_IN(reg) \
189 IS_RREG_CLOB(clob)
190#define MOV_TO_IHA(pred, reg, clob) \
191 IS_PRED_IN(pred) \
192 IS_RREG_IN(reg) \
193 IS_RREG_CLOB(clob)
194#define MOV_TO_IPSR(pred, reg, clob) \
195 IS_PRED_IN(pred) \
196 IS_RREG_IN(reg) \
197 IS_RREG_CLOB(clob)
198#define MOV_TO_IFS(pred, reg, clob) \
199 IS_PRED_IN(pred) \
200 IS_RREG_IN(reg) \
201 IS_RREG_CLOB(clob)
202#define MOV_TO_IIP(reg, clob) \
203 IS_RREG_IN(reg) \
204 IS_RREG_CLOB(clob)
205#define MOV_TO_KR(kr, reg, clob0, clob1) \
206 IS_RREG_IN(reg) \
207 IS_RREG_CLOB(clob0) \
208 IS_RREG_CLOB(clob1)
209#define ITC_I(pred, reg, clob) \
210 IS_PRED_IN(pred) \
211 IS_RREG_IN(reg) \
212 IS_RREG_CLOB(clob)
213#define ITC_D(pred, reg, clob) \
214 IS_PRED_IN(pred) \
215 IS_RREG_IN(reg) \
216 IS_RREG_CLOB(clob)
217#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
218 IS_PRED_IN(pred_i) \
219 IS_PRED_IN(pred_d) \
220 IS_RREG_IN(reg) \
221 IS_RREG_CLOB(clob)
222#define THASH(pred, reg0, reg1, clob) \
223 IS_PRED_IN(pred) \
224 IS_RREG_OUT(reg0) \
225 IS_RREG_IN(reg1) \
226 IS_RREG_CLOB(clob)
227#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
228 IS_RREG_CLOB(clob0) \
229 IS_RREG_CLOB(clob1)
230#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
231 IS_RREG_CLOB(clob0) \
232 IS_RREG_CLOB(clob1)
233#define RSM_PSR_IC(clob) \
234 IS_RREG_CLOB(clob)
235#define SSM_PSR_I(pred, pred_clob, clob) \
236 IS_PRED_IN(pred) \
237 IS_PRED_CLOB(pred_clob) \
238 IS_RREG_CLOB(clob)
239#define RSM_PSR_I(pred, clob0, clob1) \
240 IS_PRED_IN(pred) \
241 IS_RREG_CLOB(clob0) \
242 IS_RREG_CLOB(clob1)
243#define RSM_PSR_I_IC(clob0, clob1, clob2) \
244 IS_RREG_CLOB(clob0) \
245 IS_RREG_CLOB(clob1) \
246 IS_RREG_CLOB(clob2)
247#define RSM_PSR_DT \
248 nop 0
249#define SSM_PSR_DT_AND_SRLZ_I \
250 nop 0
251#define BSW_0(clob0, clob1, clob2) \
252 IS_RREG_CLOB(clob0) \
253 IS_RREG_CLOB(clob1) \
254 IS_RREG_CLOB(clob2)
255#define BSW_1(clob0, clob1) \
256 IS_RREG_CLOB(clob0) \
257 IS_RREG_CLOB(clob1)
258#define COVER \
259 nop 0
260#define RFI \
261 br.ret.sptk.many rp /* defining nop causes dependency error */
262
263#endif /* _ASM_NATIVE_PVCHK_INST_H */
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 660cab044834..2bf3636473fe 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -117,7 +117,7 @@ static inline void paravirt_post_smp_prepare_boot_cpu(void)
117struct pv_iosapic_ops { 117struct pv_iosapic_ops {
118 void (*pcat_compat_init)(void); 118 void (*pcat_compat_init)(void);
119 119
120 struct irq_chip *(*get_irq_chip)(unsigned long trigger); 120 struct irq_chip *(*__get_irq_chip)(unsigned long trigger);
121 121
122 unsigned int (*__read)(char __iomem *iosapic, unsigned int reg); 122 unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
123 void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val); 123 void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
@@ -135,7 +135,7 @@ iosapic_pcat_compat_init(void)
135static inline struct irq_chip* 135static inline struct irq_chip*
136iosapic_get_irq_chip(unsigned long trigger) 136iosapic_get_irq_chip(unsigned long trigger)
137{ 137{
138 return pv_iosapic_ops.get_irq_chip(trigger); 138 return pv_iosapic_ops.__get_irq_chip(trigger);
139} 139}
140 140
141static inline unsigned int 141static inline unsigned int
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index ce342fb74246..1d660d89db0d 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -156,4 +156,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
156 return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14); 156 return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
157} 157}
158 158
159#ifdef CONFIG_DMAR
160extern void pci_iommu_alloc(void);
161#endif
159#endif /* _ASM_IA64_PCI_H */ 162#endif /* _ASM_IA64_PCI_H */
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index 15f8dcfe6eee..6417c1ecb44e 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -240,6 +240,12 @@ struct switch_stack {
240 */ 240 */
241# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri) 241# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
242 242
243static inline unsigned long user_stack_pointer(struct pt_regs *regs)
244{
245 /* FIXME: should this be bspstore + nr_dirty regs? */
246 return regs->ar_bspstore;
247}
248
243#define regs_return_value(regs) ((regs)->r8) 249#define regs_return_value(regs) ((regs)->r8)
244 250
245/* Conserve space in histogram by encoding slot bits in address 251/* Conserve space in histogram by encoding slot bits in address
@@ -319,6 +325,8 @@ struct switch_stack {
319 #define arch_has_block_step() (1) 325 #define arch_has_block_step() (1)
320 extern void user_enable_block_step(struct task_struct *); 326 extern void user_enable_block_step(struct task_struct *);
321 327
328#define __ARCH_WANT_COMPAT_SYS_PTRACE
329
322#endif /* !__KERNEL__ */ 330#endif /* !__KERNEL__ */
323 331
324/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */ 332/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h
new file mode 100644
index 000000000000..44ef9ef8f5b3
--- /dev/null
+++ b/arch/ia64/include/asm/pvclock-abi.h
@@ -0,0 +1,48 @@
1/*
2 * same structure to x86's
3 * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic.
4 * For now, define same duplicated definitions.
5 */
6
7#ifndef _ASM_IA64__PVCLOCK_ABI_H
8#define _ASM_IA64__PVCLOCK_ABI_H
9#ifndef __ASSEMBLY__
10
11/*
12 * These structs MUST NOT be changed.
13 * They are the ABI between hypervisor and guest OS.
14 * Both Xen and KVM are using this.
15 *
16 * pvclock_vcpu_time_info holds the system time and the tsc timestamp
17 * of the last update. So the guest can use the tsc delta to get a
18 * more precise system time. There is one per virtual cpu.
19 *
20 * pvclock_wall_clock references the point in time when the system
21 * time was zero (usually boot time), thus the guest calculates the
22 * current wall clock by adding the system time.
23 *
24 * Protocol for the "version" fields is: hypervisor raises it (making
25 * it uneven) before it starts updating the fields and raises it again
26 * (making it even) when it is done. Thus the guest can make sure the
27 * time values it got are consistent by checking the version before
28 * and after reading them.
29 */
30
31struct pvclock_vcpu_time_info {
32 u32 version;
33 u32 pad0;
34 u64 tsc_timestamp;
35 u64 system_time;
36 u32 tsc_to_system_mul;
37 s8 tsc_shift;
38 u8 pad[3];
39} __attribute__((__packed__)); /* 32 bytes */
40
41struct pvclock_wall_clock {
42 u32 version;
43 u32 sec;
44 u32 nsec;
45} __attribute__((__packed__));
46
47#endif /* __ASSEMBLY__ */
48#endif /* _ASM_IA64__PVCLOCK_ABI_H */
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
new file mode 100644
index 000000000000..fb79423834d0
--- /dev/null
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -0,0 +1,56 @@
1#ifndef ASM_IA64__SWIOTLB_H
2#define ASM_IA64__SWIOTLB_H
3
4#include <linux/dma-mapping.h>
5
6/* SWIOTLB interface */
7
8extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
9 size_t size, int dir);
10extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flags);
12extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
13 size_t size, int dir);
14extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t dev_addr,
16 size_t size, int dir);
17extern void swiotlb_sync_single_for_device(struct device *hwdev,
18 dma_addr_t dev_addr,
19 size_t size, int dir);
20extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
21 dma_addr_t dev_addr,
22 unsigned long offset,
23 size_t size, int dir);
24extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
25 dma_addr_t dev_addr,
26 unsigned long offset,
27 size_t size, int dir);
28extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
29 struct scatterlist *sg, int nelems,
30 int dir);
31extern void swiotlb_sync_sg_for_device(struct device *hwdev,
32 struct scatterlist *sg, int nelems,
33 int dir);
34extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction);
36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
37 int nents, int direction);
38extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
39extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle);
41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
42extern void swiotlb_init(void);
43
44extern int swiotlb_force;
45
46#ifdef CONFIG_SWIOTLB
47extern int swiotlb;
48extern void pci_swiotlb_init(void);
49#else
50#define swiotlb 0
51static inline void pci_swiotlb_init(void)
52{
53}
54#endif
55
56#endif /* ASM_IA64__SWIOTLB_H */
diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h
new file mode 100644
index 000000000000..593c12eeb270
--- /dev/null
+++ b/arch/ia64/include/asm/sync_bitops.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_IA64_SYNC_BITOPS_H
2#define _ASM_IA64_SYNC_BITOPS_H
3
4/*
5 * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp>
6 *
7 * Based on synch_bitops.h which Dan Magenhaimer wrote.
8 *
9 * bit operations which provide guaranteed strong synchronisation
10 * when communicating with Xen or other guest OSes running on other CPUs.
11 */
12
13static inline void sync_set_bit(int nr, volatile void *addr)
14{
15 set_bit(nr, addr);
16}
17
18static inline void sync_clear_bit(int nr, volatile void *addr)
19{
20 clear_bit(nr, addr);
21}
22
23static inline void sync_change_bit(int nr, volatile void *addr)
24{
25 change_bit(nr, addr);
26}
27
28static inline int sync_test_and_set_bit(int nr, volatile void *addr)
29{
30 return test_and_set_bit(nr, addr);
31}
32
33static inline int sync_test_and_clear_bit(int nr, volatile void *addr)
34{
35 return test_and_clear_bit(nr, addr);
36}
37
38static inline int sync_test_and_change_bit(int nr, volatile void *addr)
39{
40 return test_and_change_bit(nr, addr);
41}
42
43static inline int sync_test_bit(int nr, const volatile void *addr)
44{
45 return test_bit(nr, addr);
46}
47
48#define sync_cmpxchg(ptr, old, new) \
49 ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new)))
50
51#endif /* _ASM_IA64_SYNC_BITOPS_H */
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
new file mode 100644
index 000000000000..2f758a42f94b
--- /dev/null
+++ b/arch/ia64/include/asm/syscall.h
@@ -0,0 +1,163 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * Copyright (C) 2008 Intel Corp. Shaohua Li <shaohua.li@intel.com>
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * See asm-generic/syscall.h for descriptions of what we must do here.
11 */
12
13#ifndef _ASM_SYSCALL_H
14#define _ASM_SYSCALL_H 1
15
16#include <linux/sched.h>
17#include <linux/err.h>
18
19static inline long syscall_get_nr(struct task_struct *task,
20 struct pt_regs *regs)
21{
22 if ((long)regs->cr_ifs < 0) /* Not a syscall */
23 return -1;
24
25#ifdef CONFIG_IA32_SUPPORT
26 if (IS_IA32_PROCESS(regs))
27 return regs->r1;
28#endif
29
30 return regs->r15;
31}
32
33static inline void syscall_rollback(struct task_struct *task,
34 struct pt_regs *regs)
35{
36#ifdef CONFIG_IA32_SUPPORT
37 if (IS_IA32_PROCESS(regs))
38 regs->r8 = regs->r1;
39#endif
40
41 /* do nothing */
42}
43
44static inline long syscall_get_error(struct task_struct *task,
45 struct pt_regs *regs)
46{
47#ifdef CONFIG_IA32_SUPPORT
48 if (IS_IA32_PROCESS(regs))
49 return regs->r8;
50#endif
51
52 return regs->r10 == -1 ? regs->r8:0;
53}
54
55static inline long syscall_get_return_value(struct task_struct *task,
56 struct pt_regs *regs)
57{
58 return regs->r8;
59}
60
61static inline void syscall_set_return_value(struct task_struct *task,
62 struct pt_regs *regs,
63 int error, long val)
64{
65#ifdef CONFIG_IA32_SUPPORT
66 if (IS_IA32_PROCESS(regs)) {
67 regs->r8 = (long) error ? error : val;
68 return;
69 }
70#endif
71
72 if (error) {
73 /* error < 0, but ia64 uses > 0 return value */
74 regs->r8 = -error;
75 regs->r10 = -1;
76 } else {
77 regs->r8 = val;
78 regs->r10 = 0;
79 }
80}
81
82extern void ia64_syscall_get_set_arguments(struct task_struct *task,
83 struct pt_regs *regs, unsigned int i, unsigned int n,
84 unsigned long *args, int rw);
85static inline void syscall_get_arguments(struct task_struct *task,
86 struct pt_regs *regs,
87 unsigned int i, unsigned int n,
88 unsigned long *args)
89{
90 BUG_ON(i + n > 6);
91
92#ifdef CONFIG_IA32_SUPPORT
93 if (IS_IA32_PROCESS(regs)) {
94 switch (i + n) {
95 case 6:
96 if (!n--) break;
97 *args++ = regs->r13;
98 case 5:
99 if (!n--) break;
100 *args++ = regs->r15;
101 case 4:
102 if (!n--) break;
103 *args++ = regs->r14;
104 case 3:
105 if (!n--) break;
106 *args++ = regs->r10;
107 case 2:
108 if (!n--) break;
109 *args++ = regs->r9;
110 case 1:
111 if (!n--) break;
112 *args++ = regs->r11;
113 case 0:
114 if (!n--) break;
115 default:
116 BUG();
117 break;
118 }
119
120 return;
121 }
122#endif
123 ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
124}
125
126static inline void syscall_set_arguments(struct task_struct *task,
127 struct pt_regs *regs,
128 unsigned int i, unsigned int n,
129 unsigned long *args)
130{
131 BUG_ON(i + n > 6);
132
133#ifdef CONFIG_IA32_SUPPORT
134 if (IS_IA32_PROCESS(regs)) {
135 switch (i + n) {
136 case 6:
137 if (!n--) break;
138 regs->r13 = *args++;
139 case 5:
140 if (!n--) break;
141 regs->r15 = *args++;
142 case 4:
143 if (!n--) break;
144 regs->r14 = *args++;
145 case 3:
146 if (!n--) break;
147 regs->r10 = *args++;
148 case 2:
149 if (!n--) break;
150 regs->r9 = *args++;
151 case 1:
152 if (!n--) break;
153 regs->r11 = *args++;
154 case 0:
155 if (!n--) break;
156 }
157
158 return;
159 }
160#endif
161 ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
162}
163#endif /* _ASM_SYSCALL_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 7c60fcdd2efd..ae6922626bf4 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -87,9 +87,6 @@ struct thread_info {
87#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) 87#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
88#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) 88#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
89 89
90#define tsk_set_notify_resume(tsk) \
91 set_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME)
92extern void tsk_clear_notify_resume(struct task_struct *tsk);
93#endif /* !__ASSEMBLY */ 90#endif /* !__ASSEMBLY */
94 91
95/* 92/*
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
index 05a6baf8a472..4e03cfe74a0c 100644
--- a/arch/ia64/include/asm/timex.h
+++ b/arch/ia64/include/asm/timex.h
@@ -39,4 +39,6 @@ get_cycles (void)
39 return ret; 39 return ret;
40} 40}
41 41
42extern void ia64_cpu_local_tick (void);
43
42#endif /* _ASM_IA64_TIMEX_H */ 44#endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index d535833aab5e..f791576355ad 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -337,6 +337,7 @@
337# define __ARCH_WANT_SYS_NICE 337# define __ARCH_WANT_SYS_NICE
338# define __ARCH_WANT_SYS_OLD_GETRLIMIT 338# define __ARCH_WANT_SYS_OLD_GETRLIMIT
339# define __ARCH_WANT_SYS_OLDUMOUNT 339# define __ARCH_WANT_SYS_OLDUMOUNT
340# define __ARCH_WANT_SYS_PAUSE
340# define __ARCH_WANT_SYS_SIGPENDING 341# define __ARCH_WANT_SYS_SIGPENDING
341# define __ARCH_WANT_SYS_SIGPROCMASK 342# define __ARCH_WANT_SYS_SIGPROCMASK
342# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND 343# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
new file mode 100644
index 000000000000..73248781fba8
--- /dev/null
+++ b/arch/ia64/include/asm/xen/events.h
@@ -0,0 +1,50 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/events.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22#ifndef _ASM_IA64_XEN_EVENTS_H
23#define _ASM_IA64_XEN_EVENTS_H
24
25enum ipi_vector {
26 XEN_RESCHEDULE_VECTOR,
27 XEN_IPI_VECTOR,
28 XEN_CMCP_VECTOR,
29 XEN_CPEP_VECTOR,
30
31 XEN_NR_IPIS,
32};
33
34static inline int xen_irqs_disabled(struct pt_regs *regs)
35{
36 return !(ia64_psr(regs)->i);
37}
38
39static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
40{
41 struct pt_regs *old_regs;
42 old_regs = set_irq_regs(regs);
43 irq_enter();
44 __do_IRQ(irq);
45 irq_exit();
46 set_irq_regs(old_regs);
47}
48#define irq_ctx_init(cpu) do { } while (0)
49
50#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/include/asm/xen/grant_table.h b/arch/ia64/include/asm/xen/grant_table.h
new file mode 100644
index 000000000000..2b1fae0e2d11
--- /dev/null
+++ b/arch/ia64/include/asm/xen/grant_table.h
@@ -0,0 +1,29 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/grant_table.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_GRANT_TABLE_H
24#define _ASM_IA64_XEN_GRANT_TABLE_H
25
26struct vm_struct *xen_alloc_vm_area(unsigned long size);
27void xen_free_vm_area(struct vm_struct *area);
28
29#endif /* _ASM_IA64_XEN_GRANT_TABLE_H */
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h
new file mode 100644
index 000000000000..96fc62366aa4
--- /dev/null
+++ b/arch/ia64/include/asm/xen/hypercall.h
@@ -0,0 +1,265 @@
1/******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _ASM_IA64_XEN_HYPERCALL_H
34#define _ASM_IA64_XEN_HYPERCALL_H
35
36#include <xen/interface/xen.h>
37#include <xen/interface/physdev.h>
38#include <xen/interface/sched.h>
39#include <asm/xen/xcom_hcall.h>
40struct xencomm_handle;
41extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
42 unsigned long a3, unsigned long a4,
43 unsigned long a5, unsigned long cmd);
44
45/*
46 * Assembler stubs for hyper-calls.
47 */
48
49#define _hypercall0(type, name) \
50({ \
51 long __res; \
52 __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\
53 (type)__res; \
54})
55
56#define _hypercall1(type, name, a1) \
57({ \
58 long __res; \
59 __res = __hypercall((unsigned long)a1, \
60 0, 0, 0, 0, __HYPERVISOR_##name); \
61 (type)__res; \
62})
63
64#define _hypercall2(type, name, a1, a2) \
65({ \
66 long __res; \
67 __res = __hypercall((unsigned long)a1, \
68 (unsigned long)a2, \
69 0, 0, 0, __HYPERVISOR_##name); \
70 (type)__res; \
71})
72
73#define _hypercall3(type, name, a1, a2, a3) \
74({ \
75 long __res; \
76 __res = __hypercall((unsigned long)a1, \
77 (unsigned long)a2, \
78 (unsigned long)a3, \
79 0, 0, __HYPERVISOR_##name); \
80 (type)__res; \
81})
82
83#define _hypercall4(type, name, a1, a2, a3, a4) \
84({ \
85 long __res; \
86 __res = __hypercall((unsigned long)a1, \
87 (unsigned long)a2, \
88 (unsigned long)a3, \
89 (unsigned long)a4, \
90 0, __HYPERVISOR_##name); \
91 (type)__res; \
92})
93
94#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
95({ \
96 long __res; \
97 __res = __hypercall((unsigned long)a1, \
98 (unsigned long)a2, \
99 (unsigned long)a3, \
100 (unsigned long)a4, \
101 (unsigned long)a5, \
102 __HYPERVISOR_##name); \
103 (type)__res; \
104})
105
106
107static inline int
108xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
109{
110 return _hypercall2(int, sched_op_new, cmd, arg);
111}
112
113static inline long
114HYPERVISOR_set_timer_op(u64 timeout)
115{
116 unsigned long timeout_hi = (unsigned long)(timeout >> 32);
117 unsigned long timeout_lo = (unsigned long)timeout;
118 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
119}
120
121static inline int
122xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
123 int nr_calls)
124{
125 return _hypercall2(int, multicall, call_list, nr_calls);
126}
127
128static inline int
129xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
130{
131 return _hypercall2(int, memory_op, cmd, arg);
132}
133
134static inline int
135xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
136{
137 return _hypercall2(int, event_channel_op, cmd, arg);
138}
139
140static inline int
141xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
142{
143 return _hypercall2(int, xen_version, cmd, arg);
144}
145
146static inline int
147xencomm_arch_hypercall_console_io(int cmd, int count,
148 struct xencomm_handle *str)
149{
150 return _hypercall3(int, console_io, cmd, count, str);
151}
152
153static inline int
154xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
155{
156 return _hypercall2(int, physdev_op, cmd, arg);
157}
158
159static inline int
160xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
161 struct xencomm_handle *uop,
162 unsigned int count)
163{
164 return _hypercall3(int, grant_table_op, cmd, uop, count);
165}
166
167int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
168
169extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
170
171static inline int
172xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
173{
174 return _hypercall2(int, callback_op, cmd, arg);
175}
176
177static inline long
178xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
179{
180 return _hypercall3(long, vcpu_op, cmd, cpu, arg);
181}
182
183static inline int
184HYPERVISOR_physdev_op(int cmd, void *arg)
185{
186 switch (cmd) {
187 case PHYSDEVOP_eoi:
188 return _hypercall1(int, ia64_fast_eoi,
189 ((struct physdev_eoi *)arg)->irq);
190 default:
191 return xencomm_hypercall_physdev_op(cmd, arg);
192 }
193}
194
195static inline long
196xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
197{
198 return _hypercall1(long, opt_feature, arg);
199}
200
201/* for balloon driver */
202#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
203
204/* Use xencomm to do hypercalls. */
205#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
206#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
207#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
208#define HYPERVISOR_multicall xencomm_hypercall_multicall
209#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
210#define HYPERVISOR_console_io xencomm_hypercall_console_io
211#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
212#define HYPERVISOR_suspend xencomm_hypercall_suspend
213#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
214#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
215
216/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
217#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; })
218
219static inline int
220HYPERVISOR_shutdown(
221 unsigned int reason)
222{
223 struct sched_shutdown sched_shutdown = {
224 .reason = reason
225 };
226
227 int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
228
229 return rc;
230}
231
232/* for netfront.c, netback.c */
233#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
234
235static inline void
236MULTI_update_va_mapping(
237 struct multicall_entry *mcl, unsigned long va,
238 pte_t new_val, unsigned long flags)
239{
240 mcl->op = __HYPERVISOR_update_va_mapping;
241 mcl->result = 0;
242}
243
244static inline void
245MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
246 void *uop, unsigned int count)
247{
248 mcl->op = __HYPERVISOR_grant_table_op;
249 mcl->args[0] = cmd;
250 mcl->args[1] = (unsigned long)uop;
251 mcl->args[2] = count;
252}
253
254static inline void
255MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
256 int count, int *success_count, domid_t domid)
257{
258 mcl->op = __HYPERVISOR_mmu_update;
259 mcl->args[0] = (unsigned long)req;
260 mcl->args[1] = count;
261 mcl->args[2] = (unsigned long)success_count;
262 mcl->args[3] = domid;
263}
264
265#endif /* _ASM_IA64_XEN_HYPERCALL_H */
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
new file mode 100644
index 000000000000..7a804e80fc67
--- /dev/null
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -0,0 +1,89 @@
1/******************************************************************************
2 * hypervisor.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _ASM_IA64_XEN_HYPERVISOR_H
34#define _ASM_IA64_XEN_HYPERVISOR_H
35
36#ifdef CONFIG_XEN
37
38#include <linux/init.h>
39#include <xen/interface/xen.h>
40#include <xen/interface/version.h> /* to compile feature.c */
41#include <xen/features.h> /* to comiple xen-netfront.c */
42#include <asm/xen/hypercall.h>
43
44/* xen_domain_type is set before executing any C code by early_xen_setup */
45enum xen_domain_type {
46 XEN_NATIVE,
47 XEN_PV_DOMAIN,
48 XEN_HVM_DOMAIN,
49};
50
51extern enum xen_domain_type xen_domain_type;
52
53#define xen_domain() (xen_domain_type != XEN_NATIVE)
54#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
55#define xen_initial_domain() (xen_pv_domain() && \
56 (xen_start_info->flags & SIF_INITDOMAIN))
57#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
58
59/* deprecated. remove this */
60#define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN)
61
62extern struct shared_info *HYPERVISOR_shared_info;
63extern struct start_info *xen_start_info;
64
65void __init xen_setup_vcpu_info_placement(void);
66void force_evtchn_callback(void);
67
68/* for drivers/xen/balloon/balloon.c */
69#ifdef CONFIG_XEN_SCRUB_PAGES
70#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
71#else
72#define scrub_pages(_p, _n) ((void)0)
73#endif
74
75/* For setup_arch() in arch/ia64/kernel/setup.c */
76void xen_ia64_enable_opt_feature(void);
77
78#else /* CONFIG_XEN */
79
80#define xen_domain() (0)
81#define xen_pv_domain() (0)
82#define xen_initial_domain() (0)
83#define xen_hvm_domain() (0)
84#define is_running_on_xen() (0) /* deprecated. remove this */
85#endif
86
87#define is_initial_xendomain() (0) /* deprecated. remove this */
88
89#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
new file mode 100644
index 000000000000..19c2ae1d878a
--- /dev/null
+++ b/arch/ia64/include/asm/xen/inst.h
@@ -0,0 +1,458 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <asm/xen/privop.h>
24
25#define ia64_ivt xen_ivt
26#define DO_SAVE_MIN XEN_DO_SAVE_MIN
27
28#define __paravirt_switch_to xen_switch_to
29#define __paravirt_leave_syscall xen_leave_syscall
30#define __paravirt_work_processed_syscall xen_work_processed_syscall
31#define __paravirt_leave_kernel xen_leave_kernel
32#define __paravirt_pending_syscall_end xen_work_pending_syscall_end
33#define __paravirt_work_processed_syscall_target \
34 xen_work_processed_syscall
35
36#define MOV_FROM_IFA(reg) \
37 movl reg = XSI_IFA; \
38 ;; \
39 ld8 reg = [reg]
40
41#define MOV_FROM_ITIR(reg) \
42 movl reg = XSI_ITIR; \
43 ;; \
44 ld8 reg = [reg]
45
46#define MOV_FROM_ISR(reg) \
47 movl reg = XSI_ISR; \
48 ;; \
49 ld8 reg = [reg]
50
51#define MOV_FROM_IHA(reg) \
52 movl reg = XSI_IHA; \
53 ;; \
54 ld8 reg = [reg]
55
56#define MOV_FROM_IPSR(pred, reg) \
57(pred) movl reg = XSI_IPSR; \
58 ;; \
59(pred) ld8 reg = [reg]
60
61#define MOV_FROM_IIM(reg) \
62 movl reg = XSI_IIM; \
63 ;; \
64 ld8 reg = [reg]
65
66#define MOV_FROM_IIP(reg) \
67 movl reg = XSI_IIP; \
68 ;; \
69 ld8 reg = [reg]
70
71.macro __MOV_FROM_IVR reg, clob
72 .ifc "\reg", "r8"
73 XEN_HYPER_GET_IVR
74 .exitm
75 .endif
76 .ifc "\clob", "r8"
77 XEN_HYPER_GET_IVR
78 ;;
79 mov \reg = r8
80 .exitm
81 .endif
82
83 mov \clob = r8
84 ;;
85 XEN_HYPER_GET_IVR
86 ;;
87 mov \reg = r8
88 ;;
89 mov r8 = \clob
90.endm
91#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
92
93.macro __MOV_FROM_PSR pred, reg, clob
94 .ifc "\reg", "r8"
95 (\pred) XEN_HYPER_GET_PSR;
96 .exitm
97 .endif
98 .ifc "\clob", "r8"
99 (\pred) XEN_HYPER_GET_PSR
100 ;;
101 (\pred) mov \reg = r8
102 .exitm
103 .endif
104
105 (\pred) mov \clob = r8
106 (\pred) XEN_HYPER_GET_PSR
107 ;;
108 (\pred) mov \reg = r8
109 (\pred) mov r8 = \clob
110.endm
111#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
112
113
114#define MOV_TO_IFA(reg, clob) \
115 movl clob = XSI_IFA; \
116 ;; \
117 st8 [clob] = reg \
118
119#define MOV_TO_ITIR(pred, reg, clob) \
120(pred) movl clob = XSI_ITIR; \
121 ;; \
122(pred) st8 [clob] = reg
123
124#define MOV_TO_IHA(pred, reg, clob) \
125(pred) movl clob = XSI_IHA; \
126 ;; \
127(pred) st8 [clob] = reg
128
129#define MOV_TO_IPSR(pred, reg, clob) \
130(pred) movl clob = XSI_IPSR; \
131 ;; \
132(pred) st8 [clob] = reg; \
133 ;;
134
135#define MOV_TO_IFS(pred, reg, clob) \
136(pred) movl clob = XSI_IFS; \
137 ;; \
138(pred) st8 [clob] = reg; \
139 ;;
140
141#define MOV_TO_IIP(reg, clob) \
142 movl clob = XSI_IIP; \
143 ;; \
144 st8 [clob] = reg
145
146.macro ____MOV_TO_KR kr, reg, clob0, clob1
147 .ifc "\clob0", "r9"
148 .error "clob0 \clob0 must not be r9"
149 .endif
150 .ifc "\clob1", "r8"
151 .error "clob1 \clob1 must not be r8"
152 .endif
153
154 .ifnc "\reg", "r9"
155 .ifnc "\clob1", "r9"
156 mov \clob1 = r9
157 .endif
158 mov r9 = \reg
159 .endif
160 .ifnc "\clob0", "r8"
161 mov \clob0 = r8
162 .endif
163 mov r8 = \kr
164 ;;
165 XEN_HYPER_SET_KR
166
167 .ifnc "\reg", "r9"
168 .ifnc "\clob1", "r9"
169 mov r9 = \clob1
170 .endif
171 .endif
172 .ifnc "\clob0", "r8"
173 mov r8 = \clob0
174 .endif
175.endm
176
177.macro __MOV_TO_KR kr, reg, clob0, clob1
178 .ifc "\clob0", "r9"
179 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
180 .exitm
181 .endif
182 .ifc "\clob1", "r8"
183 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
184 .exitm
185 .endif
186
187 ____MOV_TO_KR \kr, \reg, \clob0, \clob1
188.endm
189
190#define MOV_TO_KR(kr, reg, clob0, clob1) \
191 __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
192
193
194.macro __ITC_I pred, reg, clob
195 .ifc "\reg", "r8"
196 (\pred) XEN_HYPER_ITC_I
197 .exitm
198 .endif
199 .ifc "\clob", "r8"
200 (\pred) mov r8 = \reg
201 ;;
202 (\pred) XEN_HYPER_ITC_I
203 .exitm
204 .endif
205
206 (\pred) mov \clob = r8
207 (\pred) mov r8 = \reg
208 ;;
209 (\pred) XEN_HYPER_ITC_I
210 ;;
211 (\pred) mov r8 = \clob
212 ;;
213.endm
214#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
215
216.macro __ITC_D pred, reg, clob
217 .ifc "\reg", "r8"
218 (\pred) XEN_HYPER_ITC_D
219 ;;
220 .exitm
221 .endif
222 .ifc "\clob", "r8"
223 (\pred) mov r8 = \reg
224 ;;
225 (\pred) XEN_HYPER_ITC_D
226 ;;
227 .exitm
228 .endif
229
230 (\pred) mov \clob = r8
231 (\pred) mov r8 = \reg
232 ;;
233 (\pred) XEN_HYPER_ITC_D
234 ;;
235 (\pred) mov r8 = \clob
236 ;;
237.endm
238#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
239
240.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
241 .ifc "\reg", "r8"
242 (\pred_i)XEN_HYPER_ITC_I
243 ;;
244 (\pred_d)XEN_HYPER_ITC_D
245 ;;
246 .exitm
247 .endif
248 .ifc "\clob", "r8"
249 mov r8 = \reg
250 ;;
251 (\pred_i)XEN_HYPER_ITC_I
252 ;;
253 (\pred_d)XEN_HYPER_ITC_D
254 ;;
255 .exitm
256 .endif
257
258 mov \clob = r8
259 mov r8 = \reg
260 ;;
261 (\pred_i)XEN_HYPER_ITC_I
262 ;;
263 (\pred_d)XEN_HYPER_ITC_D
264 ;;
265 mov r8 = \clob
266 ;;
267.endm
268#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
269 __ITC_I_AND_D pred_i, pred_d, reg, clob
270
271.macro __THASH pred, reg0, reg1, clob
272 .ifc "\reg0", "r8"
273 (\pred) mov r8 = \reg1
274 (\pred) XEN_HYPER_THASH
275 .exitm
276 .endc
277 .ifc "\reg1", "r8"
278 (\pred) XEN_HYPER_THASH
279 ;;
280 (\pred) mov \reg0 = r8
281 ;;
282 .exitm
283 .endif
284 .ifc "\clob", "r8"
285 (\pred) mov r8 = \reg1
286 (\pred) XEN_HYPER_THASH
287 ;;
288 (\pred) mov \reg0 = r8
289 ;;
290 .exitm
291 .endif
292
293 (\pred) mov \clob = r8
294 (\pred) mov r8 = \reg1
295 (\pred) XEN_HYPER_THASH
296 ;;
297 (\pred) mov \reg0 = r8
298 (\pred) mov r8 = \clob
299 ;;
300.endm
301#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
302
303#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
304 mov clob0 = 1; \
305 movl clob1 = XSI_PSR_IC; \
306 ;; \
307 st4 [clob1] = clob0 \
308 ;;
309
310#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
311 ;; \
312 srlz.d; \
313 mov clob1 = 1; \
314 movl clob0 = XSI_PSR_IC; \
315 ;; \
316 st4 [clob0] = clob1
317
318#define RSM_PSR_IC(clob) \
319 movl clob = XSI_PSR_IC; \
320 ;; \
321 st4 [clob] = r0; \
322 ;;
323
324/* pred will be clobbered */
325#define MASK_TO_PEND_OFS (-1)
326#define SSM_PSR_I(pred, pred_clob, clob) \
327(pred) movl clob = XSI_PSR_I_ADDR \
328 ;; \
329(pred) ld8 clob = [clob] \
330 ;; \
331 /* if (pred) vpsr.i = 1 */ \
332 /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
333(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
334 ;; \
335 /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
336(pred) ld1 clob = [clob] \
337 ;; \
338(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
339 ;; \
340(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
341
342#define RSM_PSR_I(pred, clob0, clob1) \
343 movl clob0 = XSI_PSR_I_ADDR; \
344 mov clob1 = 1; \
345 ;; \
346 ld8 clob0 = [clob0]; \
347 ;; \
348(pred) st1 [clob0] = clob1
349
350#define RSM_PSR_I_IC(clob0, clob1, clob2) \
351 movl clob0 = XSI_PSR_I_ADDR; \
352 movl clob1 = XSI_PSR_IC; \
353 ;; \
354 ld8 clob0 = [clob0]; \
355 mov clob2 = 1; \
356 ;; \
357 /* note: clears both vpsr.i and vpsr.ic! */ \
358 st1 [clob0] = clob2; \
359 st4 [clob1] = r0; \
360 ;;
361
362#define RSM_PSR_DT \
363 XEN_HYPER_RSM_PSR_DT
364
365#define SSM_PSR_DT_AND_SRLZ_I \
366 XEN_HYPER_SSM_PSR_DT
367
368#define BSW_0(clob0, clob1, clob2) \
369 ;; \
370 /* r16-r31 all now hold bank1 values */ \
371 mov clob2 = ar.unat; \
372 movl clob0 = XSI_BANK1_R16; \
373 movl clob1 = XSI_BANK1_R16 + 8; \
374 ;; \
375.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
376.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
377 ;; \
378.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
379.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
380 ;; \
381.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
382.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
383 ;; \
384.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
385.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
386 ;; \
387.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
388.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
389 ;; \
390.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
391.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
392 ;; \
393.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
394.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
395 ;; \
396.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
397.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
398 ;; \
399 mov clob1 = ar.unat; \
400 movl clob0 = XSI_B1NAT; \
401 ;; \
402 st8 [clob0] = clob1; \
403 mov ar.unat = clob2; \
404 movl clob0 = XSI_BANKNUM; \
405 ;; \
406 st4 [clob0] = r0
407
408
409 /* FIXME: THIS CODE IS NOT NaT SAFE! */
410#define XEN_BSW_1(clob) \
411 mov clob = ar.unat; \
412 movl r30 = XSI_B1NAT; \
413 ;; \
414 ld8 r30 = [r30]; \
415 mov r31 = 1; \
416 ;; \
417 mov ar.unat = r30; \
418 movl r30 = XSI_BANKNUM; \
419 ;; \
420 st4 [r30] = r31; \
421 movl r30 = XSI_BANK1_R16; \
422 movl r31 = XSI_BANK1_R16+8; \
423 ;; \
424 ld8.fill r16 = [r30], 16; \
425 ld8.fill r17 = [r31], 16; \
426 ;; \
427 ld8.fill r18 = [r30], 16; \
428 ld8.fill r19 = [r31], 16; \
429 ;; \
430 ld8.fill r20 = [r30], 16; \
431 ld8.fill r21 = [r31], 16; \
432 ;; \
433 ld8.fill r22 = [r30], 16; \
434 ld8.fill r23 = [r31], 16; \
435 ;; \
436 ld8.fill r24 = [r30], 16; \
437 ld8.fill r25 = [r31], 16; \
438 ;; \
439 ld8.fill r26 = [r30], 16; \
440 ld8.fill r27 = [r31], 16; \
441 ;; \
442 ld8.fill r28 = [r30], 16; \
443 ld8.fill r29 = [r31], 16; \
444 ;; \
445 ld8.fill r30 = [r30]; \
446 ld8.fill r31 = [r31]; \
447 ;; \
448 mov ar.unat = clob
449
450#define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
451
452
453#define COVER \
454 XEN_HYPER_COVER
455
456#define RFI \
457 XEN_HYPER_RFI; \
458 dv_serialize_data
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
new file mode 100644
index 000000000000..f00fab40854d
--- /dev/null
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -0,0 +1,346 @@
1/******************************************************************************
2 * arch-ia64/hypervisor-if.h
3 *
4 * Guest OS interface to IA64 Xen.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright by those who contributed. (in alphabetical order)
25 *
26 * Anthony Xu <anthony.xu@intel.com>
27 * Eddie Dong <eddie.dong@intel.com>
28 * Fred Yang <fred.yang@intel.com>
29 * Kevin Tian <kevin.tian@intel.com>
30 * Alex Williamson <alex.williamson@hp.com>
31 * Chris Wright <chrisw@sous-sol.org>
32 * Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
33 * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
34 * Hollis Blanchard <hollisb@us.ibm.com>
35 * Isaku Yamahata <yamahata@valinux.co.jp>
36 * Jan Beulich <jbeulich@novell.com>
37 * John Levon <john.levon@sun.com>
38 * Kazuhiro Suzuki <kaz@jp.fujitsu.com>
39 * Keir Fraser <keir.fraser@citrix.com>
40 * Kouya Shimura <kouya@jp.fujitsu.com>
41 * Masaki Kanno <kanno.masaki@jp.fujitsu.com>
42 * Matt Chapman <matthewc@hp.com>
43 * Matthew Chapman <matthewc@hp.com>
44 * Samuel Thibault <samuel.thibault@eu.citrix.com>
45 * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
46 * Tristan Gingold <tgingold@free.fr>
47 * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
48 * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
49 * Zhang Xin <xing.z.zhang@intel.com>
50 * Zhang xiantao <xiantao.zhang@intel.com>
51 * dan.magenheimer@hp.com
52 * ian.pratt@cl.cam.ac.uk
53 * michael.fetterman@cl.cam.ac.uk
54 */
55
56#ifndef _ASM_IA64_XEN_INTERFACE_H
57#define _ASM_IA64_XEN_INTERFACE_H
58
59#define __DEFINE_GUEST_HANDLE(name, type) \
60 typedef struct { type *p; } __guest_handle_ ## name
61
62#define DEFINE_GUEST_HANDLE_STRUCT(name) \
63 __DEFINE_GUEST_HANDLE(name, struct name)
64#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
65#define GUEST_HANDLE(name) __guest_handle_ ## name
66#define GUEST_HANDLE_64(name) GUEST_HANDLE(name)
67#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
68
69#ifndef __ASSEMBLY__
70/* Guest handles for primitive C types. */
71__DEFINE_GUEST_HANDLE(uchar, unsigned char);
72__DEFINE_GUEST_HANDLE(uint, unsigned int);
73__DEFINE_GUEST_HANDLE(ulong, unsigned long);
74__DEFINE_GUEST_HANDLE(u64, unsigned long);
75DEFINE_GUEST_HANDLE(char);
76DEFINE_GUEST_HANDLE(int);
77DEFINE_GUEST_HANDLE(long);
78DEFINE_GUEST_HANDLE(void);
79
80typedef unsigned long xen_pfn_t;
81DEFINE_GUEST_HANDLE(xen_pfn_t);
82#define PRI_xen_pfn "lx"
83#endif
84
85/* Arch specific VIRQs definition */
86#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
87#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
88#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
89
90/* Maximum number of virtual CPUs in multi-processor guests. */
91/* keep sizeof(struct shared_page) <= PAGE_SIZE.
92 * this is checked in arch/ia64/xen/hypervisor.c. */
93#define MAX_VIRT_CPUS 64
94
95#ifndef __ASSEMBLY__
96
97#define INVALID_MFN (~0UL)
98
99union vac {
100 unsigned long value;
101 struct {
102 int a_int:1;
103 int a_from_int_cr:1;
104 int a_to_int_cr:1;
105 int a_from_psr:1;
106 int a_from_cpuid:1;
107 int a_cover:1;
108 int a_bsw:1;
109 long reserved:57;
110 };
111};
112
113union vdc {
114 unsigned long value;
115 struct {
116 int d_vmsw:1;
117 int d_extint:1;
118 int d_ibr_dbr:1;
119 int d_pmc:1;
120 int d_to_pmd:1;
121 int d_itm:1;
122 long reserved:58;
123 };
124};
125
126struct mapped_regs {
127 union vac vac;
128 union vdc vdc;
129 unsigned long virt_env_vaddr;
130 unsigned long reserved1[29];
131 unsigned long vhpi;
132 unsigned long reserved2[95];
133 union {
134 unsigned long vgr[16];
135 unsigned long bank1_regs[16]; /* bank1 regs (r16-r31)
136 when bank0 active */
137 };
138 union {
139 unsigned long vbgr[16];
140 unsigned long bank0_regs[16]; /* bank0 regs (r16-r31)
141 when bank1 active */
142 };
143 unsigned long vnat;
144 unsigned long vbnat;
145 unsigned long vcpuid[5];
146 unsigned long reserved3[11];
147 unsigned long vpsr;
148 unsigned long vpr;
149 unsigned long reserved4[76];
150 union {
151 unsigned long vcr[128];
152 struct {
153 unsigned long dcr; /* CR0 */
154 unsigned long itm;
155 unsigned long iva;
156 unsigned long rsv1[5];
157 unsigned long pta; /* CR8 */
158 unsigned long rsv2[7];
159 unsigned long ipsr; /* CR16 */
160 unsigned long isr;
161 unsigned long rsv3;
162 unsigned long iip;
163 unsigned long ifa;
164 unsigned long itir;
165 unsigned long iipa;
166 unsigned long ifs;
167 unsigned long iim; /* CR24 */
168 unsigned long iha;
169 unsigned long rsv4[38];
170 unsigned long lid; /* CR64 */
171 unsigned long ivr;
172 unsigned long tpr;
173 unsigned long eoi;
174 unsigned long irr[4];
175 unsigned long itv; /* CR72 */
176 unsigned long pmv;
177 unsigned long cmcv;
178 unsigned long rsv5[5];
179 unsigned long lrr0; /* CR80 */
180 unsigned long lrr1;
181 unsigned long rsv6[46];
182 };
183 };
184 union {
185 unsigned long reserved5[128];
186 struct {
187 unsigned long precover_ifs;
188 unsigned long unat; /* not sure if this is needed
189 until NaT arch is done */
190 int interrupt_collection_enabled; /* virtual psr.ic */
191
192 /* virtual interrupt deliverable flag is
193 * evtchn_upcall_mask in shared info area now.
194 * interrupt_mask_addr is the address
195 * of evtchn_upcall_mask for current vcpu
196 */
197 unsigned char *interrupt_mask_addr;
198 int pending_interruption;
199 unsigned char vpsr_pp;
200 unsigned char vpsr_dfh;
201 unsigned char hpsr_dfh;
202 unsigned char hpsr_mfh;
203 unsigned long reserved5_1[4];
204 int metaphysical_mode; /* 1 = use metaphys mapping
205 0 = use virtual */
206 int banknum; /* 0 or 1, which virtual
207 register bank is active */
208 unsigned long rrs[8]; /* region registers */
209 unsigned long krs[8]; /* kernel registers */
210 unsigned long tmp[16]; /* temp registers
211 (e.g. for hyperprivops) */
212 };
213 };
214};
215
216struct arch_vcpu_info {
217 /* nothing */
218};
219
220/*
221 * This structure is used for magic page in domain pseudo physical address
222 * space and the result of XENMEM_machine_memory_map.
223 * As the XENMEM_machine_memory_map result,
224 * xen_memory_map::nr_entries indicates the size in bytes
225 * including struct xen_ia64_memmap_info. Not the number of entries.
226 */
227struct xen_ia64_memmap_info {
228 uint64_t efi_memmap_size; /* size of EFI memory map */
229 uint64_t efi_memdesc_size; /* size of an EFI memory map
230 * descriptor */
231 uint32_t efi_memdesc_version; /* memory descriptor version */
232 void *memdesc[0]; /* array of efi_memory_desc_t */
233};
234
235struct arch_shared_info {
236 /* PFN of the start_info page. */
237 unsigned long start_info_pfn;
238
239 /* Interrupt vector for event channel. */
240 int evtchn_vector;
241
242 /* PFN of memmap_info page */
243 unsigned int memmap_info_num_pages; /* currently only = 1 case is
244 supported. */
245 unsigned long memmap_info_pfn;
246
247 uint64_t pad[31];
248};
249
250struct xen_callback {
251 unsigned long ip;
252};
253typedef struct xen_callback xen_callback_t;
254
255#endif /* !__ASSEMBLY__ */
256
257/* Size of the shared_info area (this is not related to page size). */
258#define XSI_SHIFT 14
259#define XSI_SIZE (1 << XSI_SHIFT)
260/* Log size of mapped_regs area (64 KB - only 4KB is used). */
261#define XMAPPEDREGS_SHIFT 12
262#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
263/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
264#define XMAPPEDREGS_OFS XSI_SIZE
265
266/* Hyperprivops. */
267#define HYPERPRIVOP_START 0x1
268#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
269#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
270#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
271#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
272#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
273#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
274#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
275#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
276#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
277#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
278#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
279#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
280#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
281#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
282#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
283#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
284#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
285#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
286#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
287#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
288#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
289#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
290#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
291#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
292#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
293#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19)
294#define HYPERPRIVOP_MAX (0x1a)
295
296/* Fast and light hypercalls. */
297#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
298
299/* Xencomm macros. */
300#define XENCOMM_INLINE_MASK 0xf800000000000000UL
301#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
302
303#ifndef __ASSEMBLY__
304
305/*
306 * Optimization features.
307 * The hypervisor may do some special optimizations for guests. This hypercall
308 * can be used to switch on/of these special optimizations.
309 */
310#define __HYPERVISOR_opt_feature 0x700UL
311
312#define XEN_IA64_OPTF_OFF 0x0
313#define XEN_IA64_OPTF_ON 0x1
314
315/*
316 * If this feature is switched on, the hypervisor inserts the
317 * tlb entries without calling the guests traphandler.
318 * This is useful in guests using region 7 for identity mapping
319 * like the linux kernel does.
320 */
321#define XEN_IA64_OPTF_IDENT_MAP_REG7 1
322
323/* Identity mapping of region 4 addresses in HVM. */
324#define XEN_IA64_OPTF_IDENT_MAP_REG4 2
325
326/* Identity mapping of region 5 addresses in HVM. */
327#define XEN_IA64_OPTF_IDENT_MAP_REG5 3
328
329#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0)
330
331struct xen_ia64_opt_feature {
332 unsigned long cmd; /* Which feature */
333 unsigned char on; /* Switch feature on/off */
334 union {
335 struct {
336 /* The page protection bit mask of the pte.
337 * This will be or'ed with the pte. */
338 unsigned long pgprot;
339 unsigned long key; /* A protection key for itir.*/
340 };
341 };
342};
343
344#endif /* __ASSEMBLY__ */
345
346#endif /* _ASM_IA64_XEN_INTERFACE_H */
diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h
new file mode 100644
index 000000000000..a90450983003
--- /dev/null
+++ b/arch/ia64/include/asm/xen/irq.h
@@ -0,0 +1,44 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/irq.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_IRQ_H
24#define _ASM_IA64_XEN_IRQ_H
25
26/*
27 * The flat IRQ space is divided into two regions:
28 * 1. A one-to-one mapping of real physical IRQs. This space is only used
29 * if we have physical device-access privilege. This region is at the
30 * start of the IRQ space so that existing device drivers do not need
31 * to be modified to translate physical IRQ numbers into our IRQ space.
32 * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
33 * are bound using the provided bind/unbind functions.
34 */
35
36#define XEN_PIRQ_BASE 0
37#define XEN_NR_PIRQS 256
38
39#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS)
40#define XEN_NR_DYNIRQS (NR_CPUS * 8)
41
42#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS)
43
44#endif /* _ASM_IA64_XEN_IRQ_H */
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
new file mode 100644
index 000000000000..4d92d9bbda7b
--- /dev/null
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -0,0 +1,134 @@
1/*
2 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
3 * the minimum state necessary that allows us to turn psr.ic back
4 * on.
5 *
6 * Assumed state upon entry:
7 * psr.ic: off
8 * r31: contains saved predicates (pr)
9 *
10 * Upon exit, the state is as follows:
11 * psr.ic: off
12 * r2 = points to &pt_regs.r16
13 * r8 = contents of ar.ccv
14 * r9 = contents of ar.csd
15 * r10 = contents of ar.ssd
16 * r11 = FPSR_DEFAULT
17 * r12 = kernel sp (kernel virtual address)
18 * r13 = points to current task_struct (kernel virtual address)
19 * p15 = TRUE if psr.i is set in cr.ipsr
20 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
21 * preserved
22 * CONFIG_XEN note: p6/p7 are not preserved
23 *
24 * Note that psr.ic is NOT turned on by this macro. This is so that
25 * we can pass interruption state as arguments to a handler.
26 */
27#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
28 mov r16=IA64_KR(CURRENT); /* M */ \
29 mov r27=ar.rsc; /* M */ \
30 mov r20=r1; /* A */ \
31 mov r25=ar.unat; /* M */ \
32 MOV_FROM_IPSR(p0,r29); /* M */ \
33 MOV_FROM_IIP(r28); /* M */ \
34 mov r21=ar.fpsr; /* M */ \
35 mov r26=ar.pfs; /* I */ \
36 __COVER; /* B;; (or nothing) */ \
37 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
38 ;; \
39 ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
40 st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
41 adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
42 /* switch from user to kernel RBS: */ \
43 ;; \
44 invala; /* M */ \
45 /* SAVE_IFS;*/ /* see xen special handling below */ \
46 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
47 ;; \
48(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
49 ;; \
50(pUStk) mov.m r24=ar.rnat; \
51(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
52(pKStk) mov r1=sp; /* get sp */ \
53 ;; \
54(pUStk) lfetch.fault.excl.nt1 [r22]; \
55(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
56(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
57 ;; \
58(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
59(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
60 ;; \
61(pUStk) mov r18=ar.bsp; \
62(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
63 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
64 adds r16=PT(CR_IPSR),r1; \
65 ;; \
66 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
67 st8 [r16]=r29; /* save cr.ipsr */ \
68 ;; \
69 lfetch.fault.excl.nt1 [r17]; \
70 tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
71 mov r29=b0 \
72 ;; \
73 WORKAROUND; \
74 adds r16=PT(R8),r1; /* initialize first base pointer */ \
75 adds r17=PT(R9),r1; /* initialize second base pointer */ \
76(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
77 ;; \
78.mem.offset 0,0; st8.spill [r16]=r8,16; \
79.mem.offset 8,0; st8.spill [r17]=r9,16; \
80 ;; \
81.mem.offset 0,0; st8.spill [r16]=r10,24; \
82 movl r8=XSI_PRECOVER_IFS; \
83.mem.offset 8,0; st8.spill [r17]=r11,24; \
84 ;; \
85 /* xen special handling for possibly lazy cover */ \
86 /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \
87 ld8 r30=[r8]; \
88(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
89 st8 [r16]=r28,16; /* save cr.iip */ \
90 ;; \
91 st8 [r17]=r30,16; /* save cr.ifs */ \
92 mov r8=ar.ccv; \
93 mov r9=ar.csd; \
94 mov r10=ar.ssd; \
95 movl r11=FPSR_DEFAULT; /* L-unit */ \
96 ;; \
97 st8 [r16]=r25,16; /* save ar.unat */ \
98 st8 [r17]=r26,16; /* save ar.pfs */ \
99 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
100 ;; \
101 st8 [r16]=r27,16; /* save ar.rsc */ \
102(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
103(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
104 ;; /* avoid RAW on r16 & r17 */ \
105(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
106 st8 [r17]=r31,16; /* save predicates */ \
107(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
108 ;; \
109 st8 [r16]=r29,16; /* save b0 */ \
110 st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
111 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
112 ;; \
113.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
114.mem.offset 8,0; st8.spill [r17]=r12,16; \
115 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
116 ;; \
117.mem.offset 0,0; st8.spill [r16]=r13,16; \
118.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
119 mov r13=IA64_KR(CURRENT); /* establish `current' */ \
120 ;; \
121.mem.offset 0,0; st8.spill [r16]=r15,16; \
122.mem.offset 8,0; st8.spill [r17]=r14,16; \
123 ;; \
124.mem.offset 0,0; st8.spill [r16]=r2,16; \
125.mem.offset 8,0; st8.spill [r17]=r3,16; \
126 ACCOUNT_GET_STAMP \
127 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
128 ;; \
129 EXTRA; \
130 movl r1=__gp; /* establish kernel global pointer */ \
131 ;; \
132 ACCOUNT_SYS_ENTER \
133 BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \
134 ;;
diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h
new file mode 100644
index 000000000000..03441a780b5b
--- /dev/null
+++ b/arch/ia64/include/asm/xen/page.h
@@ -0,0 +1,65 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/page.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_PAGE_H
24#define _ASM_IA64_XEN_PAGE_H
25
26#define INVALID_P2M_ENTRY (~0UL)
27
28static inline unsigned long mfn_to_pfn(unsigned long mfn)
29{
30 return mfn;
31}
32
33static inline unsigned long pfn_to_mfn(unsigned long pfn)
34{
35 return pfn;
36}
37
38#define phys_to_machine_mapping_valid(_x) (1)
39
40static inline void *mfn_to_virt(unsigned long mfn)
41{
42 return __va(mfn << PAGE_SHIFT);
43}
44
45static inline unsigned long virt_to_mfn(void *virt)
46{
47 return __pa(virt) >> PAGE_SHIFT;
48}
49
50/* for tpmfront.c */
51static inline unsigned long virt_to_machine(void *virt)
52{
53 return __pa(virt);
54}
55
56static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
57{
58 /* nothing */
59}
60
61#define pte_mfn(_x) pte_pfn(_x)
62#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */
63#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */
64
65#endif /* _ASM_IA64_XEN_PAGE_H */
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h
new file mode 100644
index 000000000000..71ec7546e100
--- /dev/null
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -0,0 +1,129 @@
1#ifndef _ASM_IA64_XEN_PRIVOP_H
2#define _ASM_IA64_XEN_PRIVOP_H
3
4/*
5 * Copyright (C) 2005 Hewlett-Packard Co
6 * Dan Magenheimer <dan.magenheimer@hp.com>
7 *
8 * Paravirtualizations of privileged operations for Xen/ia64
9 *
10 *
11 * inline privop and paravirt_alt support
12 * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
13 * VA Linux Systems Japan K.K.
14 *
15 */
16
17#ifndef __ASSEMBLY__
18#include <linux/types.h> /* arch-ia64.h requires uint64_t */
19#endif
20#include <asm/xen/interface.h>
21
22/* At 1 MB, before per-cpu space but still addressable using addl instead
23 of movl. */
24#define XSI_BASE 0xfffffffffff00000
25
26/* Address of mapped regs. */
27#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
28
29#ifdef __ASSEMBLY__
30#define XEN_HYPER_RFI break HYPERPRIVOP_RFI
31#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT
32#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT
33#define XEN_HYPER_COVER break HYPERPRIVOP_COVER
34#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D
35#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I
36#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I
37#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR
38#define XEN_HYPER_THASH break HYPERPRIVOP_THASH
39#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D
40#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR
41#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
42#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4
43
44#define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
45#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
46#define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
47#define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
48#define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
49#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS)
50#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
51#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS)
52#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS)
53#define XSI_IIP (XSI_BASE + XSI_IIP_OFS)
54#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS)
55#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
56#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
57#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
58#endif
59
60#ifndef __ASSEMBLY__
61
62/************************************************/
63/* Instructions paravirtualized for correctness */
64/************************************************/
65
66/* "fc" and "thash" are privilege-sensitive instructions, meaning they
67 * may have different semantics depending on whether they are executed
68 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
69 * be allowed to execute directly, lest incorrect semantics result. */
70extern void xen_fc(unsigned long addr);
71extern unsigned long xen_thash(unsigned long addr);
72
73/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
74 * is not currently used (though it may be in a long-format VHPT system!)
75 * and the semantics of cover only change if psr.ic is off which is very
76 * rare (and currently non-existent outside of assembly code */
77
78/* There are also privilege-sensitive registers. These registers are
79 * readable at any privilege level but only writable at PL0. */
80extern unsigned long xen_get_cpuid(int index);
81extern unsigned long xen_get_pmd(int index);
82
83extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
84extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
85
86/************************************************/
87/* Instructions paravirtualized for performance */
88/************************************************/
89
90/* Xen uses memory-mapped virtual privileged registers for access to many
91 * performance-sensitive privileged registers. Some, like the processor
92 * status register (psr), are broken up into multiple memory locations.
93 * Others, like "pend", are abstractions based on privileged registers.
94 * "Pend" is guaranteed to be set if reading cr.ivr would return a
95 * (non-spurious) interrupt. */
96#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
97
98#define XSI_PSR_I \
99 (*XEN_MAPPEDREGS->interrupt_mask_addr)
100#define xen_get_virtual_psr_i() \
101 (!XSI_PSR_I)
102#define xen_set_virtual_psr_i(_val) \
103 ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
104#define xen_set_virtual_psr_ic(_val) \
105 ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
106#define xen_get_virtual_pend() \
107 (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
108
109/* Although all privileged operations can be left to trap and will
110 * be properly handled by Xen, some are frequent enough that we use
111 * hyperprivops for performance. */
112extern unsigned long xen_get_psr(void);
113extern unsigned long xen_get_ivr(void);
114extern unsigned long xen_get_tpr(void);
115extern void xen_hyper_ssm_i(void);
116extern void xen_set_itm(unsigned long);
117extern void xen_set_tpr(unsigned long);
118extern void xen_eoi(unsigned long);
119extern unsigned long xen_get_rr(unsigned long index);
120extern void xen_set_rr(unsigned long index, unsigned long val);
121extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
122 unsigned long val2, unsigned long val3,
123 unsigned long val4);
124extern void xen_set_kr(unsigned long index, unsigned long val);
125extern void xen_ptcga(unsigned long addr, unsigned long size);
126
127#endif /* !__ASSEMBLY__ */
128
129#endif /* _ASM_IA64_XEN_PRIVOP_H */
diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h
new file mode 100644
index 000000000000..20b2950c71b6
--- /dev/null
+++ b/arch/ia64/include/asm/xen/xcom_hcall.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_IA64_XEN_XCOM_HCALL_H
20#define _ASM_IA64_XEN_XCOM_HCALL_H
21
22/* These function creates inline or mini descriptor for the parameters and
23 calls the corresponding xencomm_arch_hypercall_X.
24 Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
25 they want to use their own wrapper. */
26extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
27
28extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
29
30extern int xencomm_hypercall_xen_version(int cmd, void *arg);
31
32extern int xencomm_hypercall_physdev_op(int cmd, void *op);
33
34extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
35 unsigned int count);
36
37extern int xencomm_hypercall_sched_op(int cmd, void *arg);
38
39extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
40
41extern int xencomm_hypercall_callback_op(int cmd, void *arg);
42
43extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
44
45extern int xencomm_hypercall_suspend(unsigned long srec);
46
47extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
48
49extern long xencomm_hypercall_opt_feature(void *arg);
50
51#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */
diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h
new file mode 100644
index 000000000000..cded677bebf2
--- /dev/null
+++ b/arch/ia64/include/asm/xen/xencomm.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_IA64_XEN_XENCOMM_H
20#define _ASM_IA64_XEN_XENCOMM_H
21
22#include <xen/xencomm.h>
23#include <asm/pgtable.h>
24
25/* Must be called before any hypercall. */
26extern void xencomm_initialize(void);
27extern int xencomm_is_initialized(void);
28
29/* Check if virtual contiguity means physical contiguity
30 * where the passed address is a pointer value in virtual address.
31 * On ia64, identity mapping area in region 7 or the piece of region 5
32 * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL]
33 */
34static inline int xencomm_is_phys_contiguous(unsigned long addr)
35{
36 return (PAGE_OFFSET <= addr &&
37 addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||
38 (KERNEL_START <= addr &&
39 addr < KERNEL_START + KERNEL_TR_PAGE_SIZE);
40}
41
42#endif /* _ASM_IA64_XEN_XENCOMM_H */