aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include/asm
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-04-07 16:34:16 -0400
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-04-07 16:34:16 -0400
commit38f4b8c0da01ae7cd9b93386842ce272d6fde9ab (patch)
tree3c8c52201aac038094bfea7efdd0984a8f62045e /arch/ia64/include/asm
parenta811454027352c762e0d5bba1b1d8f7d26bf96ae (diff)
parent8e2c4f2844c0e8dcdfe312e5f2204854ca8532c6 (diff)
Merge commit 'origin/master' into for-linus/xen/master
* commit 'origin/master': (4825 commits) Fix build errors due to CONFIG_BRANCH_TRACER=y parport: Use the PCI IRQ if offered tty: jsm cleanups Adjust path to gpio headers KGDB_SERIAL_CONSOLE check for module Change KCONFIG name tty: Blackin CTS/RTS Change hardware flow control from poll to interrupt driven Add support for the MAX3100 SPI UART. lanana: assign a device name and numbering for MAX3100 serqt: initial clean up pass for tty side tty: Use the generic RS485 ioctl on CRIS tty: Correct inline types for tty_driver_kref_get() splice: fix deadlock in splicing to file nilfs2: support nanosecond timestamp nilfs2: introduce secondary super block nilfs2: simplify handling of active state of segments nilfs2: mark minor flag for checkpoint created by internal operation nilfs2: clean up sketch file nilfs2: super block operations fix endian bug ... Conflicts: arch/x86/include/asm/thread_info.h arch/x86/lguest/boot.c drivers/xen/manage.c
Diffstat (limited to 'arch/ia64/include/asm')
-rw-r--r--arch/ia64/include/asm/dma-mapping.h194
-rw-r--r--arch/ia64/include/asm/ftrace.h28
-rw-r--r--arch/ia64/include/asm/hardirq.h10
-rw-r--r--arch/ia64/include/asm/intrinsics.h6
-rw-r--r--arch/ia64/include/asm/machvec.h102
-rw-r--r--arch/ia64/include/asm/machvec_dig_vtd.h20
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1.h23
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1_swiotlb.h27
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h27
-rw-r--r--arch/ia64/include/asm/mmu_context.h6
-rw-r--r--arch/ia64/include/asm/module.h6
-rw-r--r--arch/ia64/include/asm/native/inst.h13
-rw-r--r--arch/ia64/include/asm/native/patchlist.h38
-rw-r--r--arch/ia64/include/asm/native/pvchk_inst.h8
-rw-r--r--arch/ia64/include/asm/paravirt.h65
-rw-r--r--arch/ia64/include/asm/paravirt_patch.h143
-rw-r--r--arch/ia64/include/asm/paravirt_privop.h365
-rw-r--r--arch/ia64/include/asm/smp.h3
-rw-r--r--arch/ia64/include/asm/spinlock.h77
-rw-r--r--arch/ia64/include/asm/timex.h1
-rw-r--r--arch/ia64/include/asm/topology.h10
-rw-r--r--arch/ia64/include/asm/uv/uv_hub.h6
-rw-r--r--arch/ia64/include/asm/uv/uv_mmrs.h158
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h39
-rw-r--r--arch/ia64/include/asm/xen/inst.h28
-rw-r--r--arch/ia64/include/asm/xen/interface.h9
-rw-r--r--arch/ia64/include/asm/xen/minstate.h11
-rw-r--r--arch/ia64/include/asm/xen/patchlist.h38
-rw-r--r--arch/ia64/include/asm/xen/privop.h8
29 files changed, 1133 insertions, 336 deletions
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 1f912d927585..36c0009dbece 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -11,99 +11,128 @@
11 11
12#define ARCH_HAS_DMA_GET_REQUIRED_MASK 12#define ARCH_HAS_DMA_GET_REQUIRED_MASK
13 13
14struct dma_mapping_ops { 14extern struct dma_map_ops *dma_ops;
15 int (*mapping_error)(struct device *dev,
16 dma_addr_t dma_addr);
17 void* (*alloc_coherent)(struct device *dev, size_t size,
18 dma_addr_t *dma_handle, gfp_t gfp);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *vaddr, dma_addr_t dma_handle);
21 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
22 size_t size, int direction);
23 void (*unmap_single)(struct device *dev, dma_addr_t addr,
24 size_t size, int direction);
25 void (*sync_single_for_cpu)(struct device *hwdev,
26 dma_addr_t dma_handle, size_t size,
27 int direction);
28 void (*sync_single_for_device)(struct device *hwdev,
29 dma_addr_t dma_handle, size_t size,
30 int direction);
31 void (*sync_single_range_for_cpu)(struct device *hwdev,
32 dma_addr_t dma_handle, unsigned long offset,
33 size_t size, int direction);
34 void (*sync_single_range_for_device)(struct device *hwdev,
35 dma_addr_t dma_handle, unsigned long offset,
36 size_t size, int direction);
37 void (*sync_sg_for_cpu)(struct device *hwdev,
38 struct scatterlist *sg, int nelems,
39 int direction);
40 void (*sync_sg_for_device)(struct device *hwdev,
41 struct scatterlist *sg, int nelems,
42 int direction);
43 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
44 int nents, int direction);
45 void (*unmap_sg)(struct device *hwdev,
46 struct scatterlist *sg, int nents,
47 int direction);
48 int (*dma_supported_op)(struct device *hwdev, u64 mask);
49 int is_phys;
50};
51
52extern struct dma_mapping_ops *dma_ops;
53extern struct ia64_machine_vector ia64_mv; 15extern struct ia64_machine_vector ia64_mv;
54extern void set_iommu_machvec(void); 16extern void set_iommu_machvec(void);
55 17
56#define dma_alloc_coherent(dev, size, handle, gfp) \ 18extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
57 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) 19 enum dma_data_direction);
20extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
21 enum dma_data_direction);
58 22
59/* coherent mem. is cheap */ 23static inline void *dma_alloc_coherent(struct device *dev, size_t size,
60static inline void * 24 dma_addr_t *daddr, gfp_t gfp)
61dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
62 gfp_t flag)
63{ 25{
64 return dma_alloc_coherent(dev, size, dma_handle, flag); 26 struct dma_map_ops *ops = platform_dma_get_ops(dev);
27 return ops->alloc_coherent(dev, size, daddr, gfp);
65} 28}
66#define dma_free_coherent platform_dma_free_coherent 29
67static inline void 30static inline void dma_free_coherent(struct device *dev, size_t size,
68dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, 31 void *caddr, dma_addr_t daddr)
69 dma_addr_t dma_handle) 32{
33 struct dma_map_ops *ops = platform_dma_get_ops(dev);
34 ops->free_coherent(dev, size, caddr, daddr);
35}
36
37#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
38#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
39
40static inline dma_addr_t dma_map_single_attrs(struct device *dev,
41 void *caddr, size_t size,
42 enum dma_data_direction dir,
43 struct dma_attrs *attrs)
44{
45 struct dma_map_ops *ops = platform_dma_get_ops(dev);
46 return ops->map_page(dev, virt_to_page(caddr),
47 (unsigned long)caddr & ~PAGE_MASK, size,
48 dir, attrs);
49}
50
51static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
52 size_t size,
53 enum dma_data_direction dir,
54 struct dma_attrs *attrs)
55{
56 struct dma_map_ops *ops = platform_dma_get_ops(dev);
57 ops->unmap_page(dev, daddr, size, dir, attrs);
58}
59
60#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
61#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
62
63static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
64 int nents, enum dma_data_direction dir,
65 struct dma_attrs *attrs)
66{
67 struct dma_map_ops *ops = platform_dma_get_ops(dev);
68 return ops->map_sg(dev, sgl, nents, dir, attrs);
69}
70
71static inline void dma_unmap_sg_attrs(struct device *dev,
72 struct scatterlist *sgl, int nents,
73 enum dma_data_direction dir,
74 struct dma_attrs *attrs)
75{
76 struct dma_map_ops *ops = platform_dma_get_ops(dev);
77 ops->unmap_sg(dev, sgl, nents, dir, attrs);
78}
79
80#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
81#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
82
83static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
84 size_t size,
85 enum dma_data_direction dir)
70{ 86{
71 dma_free_coherent(dev, size, cpu_addr, dma_handle); 87 struct dma_map_ops *ops = platform_dma_get_ops(dev);
88 ops->sync_single_for_cpu(dev, daddr, size, dir);
72} 89}
73#define dma_map_single_attrs platform_dma_map_single_attrs 90
74static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 91static inline void dma_sync_sg_for_cpu(struct device *dev,
75 size_t size, int dir) 92 struct scatterlist *sgl,
93 int nents, enum dma_data_direction dir)
76{ 94{
77 return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); 95 struct dma_map_ops *ops = platform_dma_get_ops(dev);
96 ops->sync_sg_for_cpu(dev, sgl, nents, dir);
78} 97}
79#define dma_map_sg_attrs platform_dma_map_sg_attrs 98
80static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, 99static inline void dma_sync_single_for_device(struct device *dev,
81 int nents, int dir) 100 dma_addr_t daddr,
101 size_t size,
102 enum dma_data_direction dir)
82{ 103{
83 return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); 104 struct dma_map_ops *ops = platform_dma_get_ops(dev);
105 ops->sync_single_for_device(dev, daddr, size, dir);
84} 106}
85#define dma_unmap_single_attrs platform_dma_unmap_single_attrs 107
86static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, 108static inline void dma_sync_sg_for_device(struct device *dev,
87 size_t size, int dir) 109 struct scatterlist *sgl,
110 int nents,
111 enum dma_data_direction dir)
88{ 112{
89 return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); 113 struct dma_map_ops *ops = platform_dma_get_ops(dev);
114 ops->sync_sg_for_device(dev, sgl, nents, dir);
90} 115}
91#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs 116
92static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 117static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
93 int nents, int dir) 118{
119 struct dma_map_ops *ops = platform_dma_get_ops(dev);
120 return ops->mapping_error(dev, daddr);
121}
122
123static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
124 size_t offset, size_t size,
125 enum dma_data_direction dir)
94{ 126{
95 return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); 127 struct dma_map_ops *ops = platform_dma_get_ops(dev);
128 return ops->map_page(dev, page, offset, size, dir, NULL);
96} 129}
97#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
98#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
99#define dma_sync_single_for_device platform_dma_sync_single_for_device
100#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
101#define dma_mapping_error platform_dma_mapping_error
102 130
103#define dma_map_page(dev, pg, off, size, dir) \ 131static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
104 dma_map_single(dev, page_address(pg) + (off), (size), (dir)) 132 size_t size, enum dma_data_direction dir)
105#define dma_unmap_page(dev, dma_addr, size, dir) \ 133{
106 dma_unmap_single(dev, dma_addr, size, dir) 134 dma_unmap_single(dev, addr, size, dir);
135}
107 136
108/* 137/*
109 * Rest of this file is part of the "Advanced DMA API". Use at your own risk. 138 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
@@ -115,7 +144,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
115#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ 144#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
116 dma_sync_single_for_device(dev, dma_handle, size, dir) 145 dma_sync_single_for_device(dev, dma_handle, size, dir)
117 146
118#define dma_supported platform_dma_supported 147static inline int dma_supported(struct device *dev, u64 mask)
148{
149 struct dma_map_ops *ops = platform_dma_get_ops(dev);
150 return ops->dma_supported(dev, mask);
151}
119 152
120static inline int 153static inline int
121dma_set_mask (struct device *dev, u64 mask) 154dma_set_mask (struct device *dev, u64 mask)
@@ -141,11 +174,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
141 174
142#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 175#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
143 176
144static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
145{
146 return dma_ops;
147}
148
149
150
151#endif /* _ASM_IA64_DMA_MAPPING_H */ 177#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
new file mode 100644
index 000000000000..d20db3c2a656
--- /dev/null
+++ b/arch/ia64/include/asm/ftrace.h
@@ -0,0 +1,28 @@
1#ifndef _ASM_IA64_FTRACE_H
2#define _ASM_IA64_FTRACE_H
3
4#ifdef CONFIG_FUNCTION_TRACER
5#define MCOUNT_INSN_SIZE 32 /* sizeof mcount call */
6
7#ifndef __ASSEMBLY__
8extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
9#define mcount _mcount
10
11#include <asm/kprobes.h>
12/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
13#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
14#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
15
16static inline unsigned long ftrace_call_adjust(unsigned long addr)
17{
18 /* second bundle, insn 2 */
19 return addr - 0x12;
20}
21
22struct dyn_arch_ftrace {
23};
24#endif
25
26#endif /* CONFIG_FUNCTION_TRACER */
27
28#endif /* _ASM_IA64_FTRACE_H */
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h
index 140e495b8e0e..d514cd9edb49 100644
--- a/arch/ia64/include/asm/hardirq.h
+++ b/arch/ia64/include/asm/hardirq.h
@@ -20,16 +20,6 @@
20 20
21#define local_softirq_pending() (local_cpu_data->softirq_pending) 21#define local_softirq_pending() (local_cpu_data->softirq_pending)
22 22
23#define HARDIRQ_BITS 14
24
25/*
26 * The hardirq mask has to be large enough to have space for potentially all IRQ sources
27 * in the system nesting on a single CPU:
28 */
29#if (1 << HARDIRQ_BITS) < NR_IRQS
30# error HARDIRQ_BITS is too low!
31#endif
32
33extern void __iomem *ipi_base_addr; 23extern void __iomem *ipi_base_addr;
34 24
35void ack_bad_irq(unsigned int irq); 25void ack_bad_irq(unsigned int irq);
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
index c47830e26cb7..111ed5222892 100644
--- a/arch/ia64/include/asm/intrinsics.h
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -202,7 +202,11 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
202 202
203#ifndef __ASSEMBLY__ 203#ifndef __ASSEMBLY__
204#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) 204#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__)
205#define IA64_INTRINSIC_API(name) pv_cpu_ops.name 205#ifdef ASM_SUPPORTED
206# define IA64_INTRINSIC_API(name) paravirt_ ## name
207#else
208# define IA64_INTRINSIC_API(name) pv_cpu_ops.name
209#endif
206#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name 210#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
207#else 211#else
208#define IA64_INTRINSIC_API(name) ia64_native_ ## name 212#define IA64_INTRINSIC_API(name) ia64_native_ ## name
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index fe87b2121707..367d299d9938 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -11,7 +11,6 @@
11#define _ASM_IA64_MACHVEC_H 11#define _ASM_IA64_MACHVEC_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/swiotlb.h>
15 14
16/* forward declarations: */ 15/* forward declarations: */
17struct device; 16struct device;
@@ -45,24 +44,8 @@ typedef void ia64_mv_kernel_launch_event_t(void);
45 44
46/* DMA-mapping interface: */ 45/* DMA-mapping interface: */
47typedef void ia64_mv_dma_init (void); 46typedef void ia64_mv_dma_init (void);
48typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
49typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
50typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
51typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
52typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
53typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
54typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
55typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
56typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
57typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
58typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
59typedef int ia64_mv_dma_supported (struct device *, u64);
60
61typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
62typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
63typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
64typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
65typedef u64 ia64_mv_dma_get_required_mask (struct device *); 47typedef u64 ia64_mv_dma_get_required_mask (struct device *);
48typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
66 49
67/* 50/*
68 * WARNING: The legacy I/O space is _architected_. Platforms are 51 * WARNING: The legacy I/O space is _architected_. Platforms are
@@ -114,8 +97,6 @@ machvec_noop_bus (struct pci_bus *bus)
114 97
115extern void machvec_setup (char **); 98extern void machvec_setup (char **);
116extern void machvec_timer_interrupt (int, void *); 99extern void machvec_timer_interrupt (int, void *);
117extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
118extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
119extern void machvec_tlb_migrate_finish (struct mm_struct *); 100extern void machvec_tlb_migrate_finish (struct mm_struct *);
120 101
121# if defined (CONFIG_IA64_HP_SIM) 102# if defined (CONFIG_IA64_HP_SIM)
@@ -148,19 +129,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
148# define platform_global_tlb_purge ia64_mv.global_tlb_purge 129# define platform_global_tlb_purge ia64_mv.global_tlb_purge
149# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish 130# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
150# define platform_dma_init ia64_mv.dma_init 131# define platform_dma_init ia64_mv.dma_init
151# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
152# define platform_dma_free_coherent ia64_mv.dma_free_coherent
153# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs
154# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs
155# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs
156# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
157# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
158# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
159# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
160# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
161# define platform_dma_mapping_error ia64_mv.dma_mapping_error
162# define platform_dma_supported ia64_mv.dma_supported
163# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask 132# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
133# define platform_dma_get_ops ia64_mv.dma_get_ops
164# define platform_irq_to_vector ia64_mv.irq_to_vector 134# define platform_irq_to_vector ia64_mv.irq_to_vector
165# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 135# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
166# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem 136# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
@@ -203,19 +173,8 @@ struct ia64_machine_vector {
203 ia64_mv_global_tlb_purge_t *global_tlb_purge; 173 ia64_mv_global_tlb_purge_t *global_tlb_purge;
204 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; 174 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
205 ia64_mv_dma_init *dma_init; 175 ia64_mv_dma_init *dma_init;
206 ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
207 ia64_mv_dma_free_coherent *dma_free_coherent;
208 ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
209 ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
210 ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
211 ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
212 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
213 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
214 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
215 ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
216 ia64_mv_dma_mapping_error *dma_mapping_error;
217 ia64_mv_dma_supported *dma_supported;
218 ia64_mv_dma_get_required_mask *dma_get_required_mask; 176 ia64_mv_dma_get_required_mask *dma_get_required_mask;
177 ia64_mv_dma_get_ops *dma_get_ops;
219 ia64_mv_irq_to_vector *irq_to_vector; 178 ia64_mv_irq_to_vector *irq_to_vector;
220 ia64_mv_local_vector_to_irq *local_vector_to_irq; 179 ia64_mv_local_vector_to_irq *local_vector_to_irq;
221 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; 180 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
@@ -254,19 +213,8 @@ struct ia64_machine_vector {
254 platform_global_tlb_purge, \ 213 platform_global_tlb_purge, \
255 platform_tlb_migrate_finish, \ 214 platform_tlb_migrate_finish, \
256 platform_dma_init, \ 215 platform_dma_init, \
257 platform_dma_alloc_coherent, \
258 platform_dma_free_coherent, \
259 platform_dma_map_single_attrs, \
260 platform_dma_unmap_single_attrs, \
261 platform_dma_map_sg_attrs, \
262 platform_dma_unmap_sg_attrs, \
263 platform_dma_sync_single_for_cpu, \
264 platform_dma_sync_sg_for_cpu, \
265 platform_dma_sync_single_for_device, \
266 platform_dma_sync_sg_for_device, \
267 platform_dma_mapping_error, \
268 platform_dma_supported, \
269 platform_dma_get_required_mask, \ 216 platform_dma_get_required_mask, \
217 platform_dma_get_ops, \
270 platform_irq_to_vector, \ 218 platform_irq_to_vector, \
271 platform_local_vector_to_irq, \ 219 platform_local_vector_to_irq, \
272 platform_pci_get_legacy_mem, \ 220 platform_pci_get_legacy_mem, \
@@ -302,6 +250,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
302# error Unknown configuration. Update arch/ia64/include/asm/machvec.h. 250# error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
303# endif /* CONFIG_IA64_GENERIC */ 251# endif /* CONFIG_IA64_GENERIC */
304 252
253extern void swiotlb_dma_init(void);
254extern struct dma_map_ops *dma_get_ops(struct device *);
255
305/* 256/*
306 * Define default versions so we can extend machvec for new platforms without having 257 * Define default versions so we can extend machvec for new platforms without having
307 * to update the machvec files for all existing platforms. 258 * to update the machvec files for all existing platforms.
@@ -332,43 +283,10 @@ extern void machvec_init_from_cmdline(const char *cmdline);
332# define platform_kernel_launch_event machvec_noop 283# define platform_kernel_launch_event machvec_noop
333#endif 284#endif
334#ifndef platform_dma_init 285#ifndef platform_dma_init
335# define platform_dma_init swiotlb_init 286# define platform_dma_init swiotlb_dma_init
336#endif
337#ifndef platform_dma_alloc_coherent
338# define platform_dma_alloc_coherent swiotlb_alloc_coherent
339#endif
340#ifndef platform_dma_free_coherent
341# define platform_dma_free_coherent swiotlb_free_coherent
342#endif
343#ifndef platform_dma_map_single_attrs
344# define platform_dma_map_single_attrs swiotlb_map_single_attrs
345#endif
346#ifndef platform_dma_unmap_single_attrs
347# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs
348#endif
349#ifndef platform_dma_map_sg_attrs
350# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs
351#endif
352#ifndef platform_dma_unmap_sg_attrs
353# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs
354#endif
355#ifndef platform_dma_sync_single_for_cpu
356# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
357#endif
358#ifndef platform_dma_sync_sg_for_cpu
359# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
360#endif
361#ifndef platform_dma_sync_single_for_device
362# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
363#endif
364#ifndef platform_dma_sync_sg_for_device
365# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
366#endif
367#ifndef platform_dma_mapping_error
368# define platform_dma_mapping_error swiotlb_dma_mapping_error
369#endif 287#endif
370#ifndef platform_dma_supported 288#ifndef platform_dma_get_ops
371# define platform_dma_supported swiotlb_dma_supported 289# define platform_dma_get_ops dma_get_ops
372#endif 290#endif
373#ifndef platform_dma_get_required_mask 291#ifndef platform_dma_get_required_mask
374# define platform_dma_get_required_mask ia64_dma_get_required_mask 292# define platform_dma_get_required_mask ia64_dma_get_required_mask
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
index 3400b561e711..6ab1de5c45ef 100644
--- a/arch/ia64/include/asm/machvec_dig_vtd.h
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -2,14 +2,6 @@
2#define _ASM_IA64_MACHVEC_DIG_VTD_h 2#define _ASM_IA64_MACHVEC_DIG_VTD_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent;
6extern ia64_mv_dma_free_coherent vtd_free_coherent;
7extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs;
11extern ia64_mv_dma_supported iommu_dma_supported;
12extern ia64_mv_dma_mapping_error vtd_dma_mapping_error;
13extern ia64_mv_dma_init pci_iommu_alloc; 5extern ia64_mv_dma_init pci_iommu_alloc;
14 6
15/* 7/*
@@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc;
22#define platform_name "dig_vtd" 14#define platform_name "dig_vtd"
23#define platform_setup dig_setup 15#define platform_setup dig_setup
24#define platform_dma_init pci_iommu_alloc 16#define platform_dma_init pci_iommu_alloc
25#define platform_dma_alloc_coherent vtd_alloc_coherent
26#define platform_dma_free_coherent vtd_free_coherent
27#define platform_dma_map_single_attrs vtd_map_single_attrs
28#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs
29#define platform_dma_map_sg_attrs vtd_map_sg_attrs
30#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs
31#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
32#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
33#define platform_dma_sync_single_for_device machvec_dma_sync_single
34#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
35#define platform_dma_supported iommu_dma_supported
36#define platform_dma_mapping_error vtd_dma_mapping_error
37 17
38#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ 18#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h
index 2f57f5144b9f..3bd83d78a412 100644
--- a/arch/ia64/include/asm/machvec_hpzx1.h
+++ b/arch/ia64/include/asm/machvec_hpzx1.h
@@ -2,14 +2,7 @@
2#define _ASM_IA64_MACHVEC_HPZX1_h 2#define _ASM_IA64_MACHVEC_HPZX1_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 5extern ia64_mv_dma_init sba_dma_init;
6extern ia64_mv_dma_free_coherent sba_free_coherent;
7extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
11extern ia64_mv_dma_supported sba_dma_supported;
12extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
13 6
14/* 7/*
15 * This stuff has dual use! 8 * This stuff has dual use!
@@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
20 */ 13 */
21#define platform_name "hpzx1" 14#define platform_name "hpzx1"
22#define platform_setup dig_setup 15#define platform_setup dig_setup
23#define platform_dma_init machvec_noop 16#define platform_dma_init sba_dma_init
24#define platform_dma_alloc_coherent sba_alloc_coherent
25#define platform_dma_free_coherent sba_free_coherent
26#define platform_dma_map_single_attrs sba_map_single_attrs
27#define platform_dma_unmap_single_attrs sba_unmap_single_attrs
28#define platform_dma_map_sg_attrs sba_map_sg_attrs
29#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs
30#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
31#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
32#define platform_dma_sync_single_for_device machvec_dma_sync_single
33#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
34#define platform_dma_supported sba_dma_supported
35#define platform_dma_mapping_error sba_dma_mapping_error
36 17
37#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ 18#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
index a842cdda827b..1091ac39740c 100644
--- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
+++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
@@ -2,18 +2,7 @@
2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h 2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; 5extern ia64_mv_dma_get_ops hwsw_dma_get_ops;
6extern ia64_mv_dma_free_coherent hwsw_free_coherent;
7extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs;
11extern ia64_mv_dma_supported hwsw_dma_supported;
12extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
13extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
14extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu;
15extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device;
16extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
17 6
18/* 7/*
19 * This stuff has dual use! 8 * This stuff has dual use!
@@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
23 * the macros are used directly. 12 * the macros are used directly.
24 */ 13 */
25#define platform_name "hpzx1_swiotlb" 14#define platform_name "hpzx1_swiotlb"
26
27#define platform_setup dig_setup 15#define platform_setup dig_setup
28#define platform_dma_init machvec_noop 16#define platform_dma_init machvec_noop
29#define platform_dma_alloc_coherent hwsw_alloc_coherent 17#define platform_dma_get_ops hwsw_dma_get_ops
30#define platform_dma_free_coherent hwsw_free_coherent
31#define platform_dma_map_single_attrs hwsw_map_single_attrs
32#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs
33#define platform_dma_map_sg_attrs hwsw_map_sg_attrs
34#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs
35#define platform_dma_supported hwsw_dma_supported
36#define platform_dma_mapping_error hwsw_dma_mapping_error
37#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
38#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu
39#define platform_dma_sync_single_for_device hwsw_sync_single_for_device
40#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device
41 18
42#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ 19#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index f1a6e0d6dfa5..f061a30aac42 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -55,19 +55,8 @@ extern ia64_mv_readb_t __sn_readb_relaxed;
55extern ia64_mv_readw_t __sn_readw_relaxed; 55extern ia64_mv_readw_t __sn_readw_relaxed;
56extern ia64_mv_readl_t __sn_readl_relaxed; 56extern ia64_mv_readl_t __sn_readl_relaxed;
57extern ia64_mv_readq_t __sn_readq_relaxed; 57extern ia64_mv_readq_t __sn_readq_relaxed;
58extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
59extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
60extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs;
61extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
62extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs;
63extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs;
64extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
65extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
66extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
67extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
68extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
69extern ia64_mv_dma_supported sn_dma_supported;
70extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; 58extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
59extern ia64_mv_dma_init sn_dma_init;
71extern ia64_mv_migrate_t sn_migrate; 60extern ia64_mv_migrate_t sn_migrate;
72extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; 61extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
73extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; 62extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
@@ -111,20 +100,8 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
111#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem 100#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
112#define platform_pci_legacy_read sn_pci_legacy_read 101#define platform_pci_legacy_read sn_pci_legacy_read
113#define platform_pci_legacy_write sn_pci_legacy_write 102#define platform_pci_legacy_write sn_pci_legacy_write
114#define platform_dma_init machvec_noop
115#define platform_dma_alloc_coherent sn_dma_alloc_coherent
116#define platform_dma_free_coherent sn_dma_free_coherent
117#define platform_dma_map_single_attrs sn_dma_map_single_attrs
118#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs
119#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs
120#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs
121#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
122#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
123#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
124#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
125#define platform_dma_mapping_error sn_dma_mapping_error
126#define platform_dma_supported sn_dma_supported
127#define platform_dma_get_required_mask sn_dma_get_required_mask 103#define platform_dma_get_required_mask sn_dma_get_required_mask
104#define platform_dma_init sn_dma_init
128#define platform_migrate sn_migrate 105#define platform_migrate sn_migrate
129#define platform_kernel_launch_event sn_kernel_launch_event 106#define platform_kernel_launch_event sn_kernel_launch_event
130#ifdef CONFIG_PCI_MSI 107#ifdef CONFIG_PCI_MSI
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
index 040bc87db930..7f2a456603cb 100644
--- a/arch/ia64/include/asm/mmu_context.h
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm)
87 /* re-check, now that we've got the lock: */ 87 /* re-check, now that we've got the lock: */
88 context = mm->context; 88 context = mm->context;
89 if (context == 0) { 89 if (context == 0) {
90 cpus_clear(mm->cpu_vm_mask); 90 cpumask_clear(mm_cpumask(mm));
91 if (ia64_ctx.next >= ia64_ctx.limit) { 91 if (ia64_ctx.next >= ia64_ctx.limit) {
92 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, 92 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
93 ia64_ctx.max_ctx, ia64_ctx.next); 93 ia64_ctx.max_ctx, ia64_ctx.next);
@@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm)
166 166
167 do { 167 do {
168 context = get_mmu_context(mm); 168 context = get_mmu_context(mm);
169 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 169 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
170 cpu_set(smp_processor_id(), mm->cpu_vm_mask); 170 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
171 reload_context(context); 171 reload_context(context);
172 /* 172 /*
173 * in the unlikely event of a TLB-flush by another thread, 173 * in the unlikely event of a TLB-flush by another thread,
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
index d2da61e4c49b..908eaef42a08 100644
--- a/arch/ia64/include/asm/module.h
+++ b/arch/ia64/include/asm/module.h
@@ -16,6 +16,12 @@ struct mod_arch_specific {
16 struct elf64_shdr *got; /* global offset table */ 16 struct elf64_shdr *got; /* global offset table */
17 struct elf64_shdr *opd; /* official procedure descriptors */ 17 struct elf64_shdr *opd; /* official procedure descriptors */
18 struct elf64_shdr *unwind; /* unwind-table section */ 18 struct elf64_shdr *unwind; /* unwind-table section */
19#ifdef CONFIG_PARAVIRT
20 struct elf64_shdr *paravirt_bundles;
21 /* paravirt_alt_bundle_patch table */
22 struct elf64_shdr *paravirt_insts;
23 /* paravirt_alt_inst_patch table */
24#endif
19 unsigned long gp; /* global-pointer for module */ 25 unsigned long gp; /* global-pointer for module */
20 26
21 void *core_unw_table; /* core unwind-table cookie returned by unwinder */ 27 void *core_unw_table; /* core unwind-table cookie returned by unwinder */
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
index 0a1026cca4fa..d2d46efb3e6e 100644
--- a/arch/ia64/include/asm/native/inst.h
+++ b/arch/ia64/include/asm/native/inst.h
@@ -30,6 +30,9 @@
30#define __paravirt_work_processed_syscall_target \ 30#define __paravirt_work_processed_syscall_target \
31 ia64_work_processed_syscall 31 ia64_work_processed_syscall
32 32
33#define paravirt_fsyscall_table ia64_native_fsyscall_table
34#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down
35
33#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK 36#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
34# define PARAVIRT_POISON 0xdeadbeefbaadf00d 37# define PARAVIRT_POISON 0xdeadbeefbaadf00d
35# define CLOBBER(clob) \ 38# define CLOBBER(clob) \
@@ -74,6 +77,11 @@
74(pred) mov reg = psr \ 77(pred) mov reg = psr \
75 CLOBBER(clob) 78 CLOBBER(clob)
76 79
80#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
81(pred) mov reg = ar.itc \
82 CLOBBER(clob) \
83 CLOBBER_PRED(pred_clob)
84
77#define MOV_TO_IFA(reg, clob) \ 85#define MOV_TO_IFA(reg, clob) \
78 mov cr.ifa = reg \ 86 mov cr.ifa = reg \
79 CLOBBER(clob) 87 CLOBBER(clob)
@@ -158,6 +166,11 @@
158#define RSM_PSR_DT \ 166#define RSM_PSR_DT \
159 rsm psr.dt 167 rsm psr.dt
160 168
169#define RSM_PSR_BE_I(clob0, clob1) \
170 rsm psr.be | psr.i \
171 CLOBBER(clob0) \
172 CLOBBER(clob1)
173
161#define SSM_PSR_DT_AND_SRLZ_I \ 174#define SSM_PSR_DT_AND_SRLZ_I \
162 ssm psr.dt \ 175 ssm psr.dt \
163 ;; \ 176 ;; \
diff --git a/arch/ia64/include/asm/native/patchlist.h b/arch/ia64/include/asm/native/patchlist.h
new file mode 100644
index 000000000000..be16ca9311bf
--- /dev/null
+++ b/arch/ia64/include/asm/native/patchlist.h
@@ -0,0 +1,38 @@
1/******************************************************************************
2 * arch/ia64/include/asm/native/inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#define __paravirt_start_gate_fsyscall_patchlist \
24 __ia64_native_start_gate_fsyscall_patchlist
25#define __paravirt_end_gate_fsyscall_patchlist \
26 __ia64_native_end_gate_fsyscall_patchlist
27#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
28 __ia64_native_start_gate_brl_fsys_bubble_down_patchlist
29#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
30 __ia64_native_end_gate_brl_fsys_bubble_down_patchlist
31#define __paravirt_start_gate_vtop_patchlist \
32 __ia64_native_start_gate_vtop_patchlist
33#define __paravirt_end_gate_vtop_patchlist \
34 __ia64_native_end_gate_vtop_patchlist
35#define __paravirt_start_gate_mckinley_e9_patchlist \
36 __ia64_native_start_gate_mckinley_e9_patchlist
37#define __paravirt_end_gate_mckinley_e9_patchlist \
38 __ia64_native_end_gate_mckinley_e9_patchlist
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h
index b8e6eb1090d7..8d72962ec838 100644
--- a/arch/ia64/include/asm/native/pvchk_inst.h
+++ b/arch/ia64/include/asm/native/pvchk_inst.h
@@ -180,6 +180,11 @@
180 IS_PRED_IN(pred) \ 180 IS_PRED_IN(pred) \
181 IS_RREG_OUT(reg) \ 181 IS_RREG_OUT(reg) \
182 IS_RREG_CLOB(clob) 182 IS_RREG_CLOB(clob)
183#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
184 IS_PRED_IN(pred) \
185 IS_PRED_CLOB(pred_clob) \
186 IS_RREG_OUT(reg) \
187 IS_RREG_CLOB(clob)
183#define MOV_TO_IFA(reg, clob) \ 188#define MOV_TO_IFA(reg, clob) \
184 IS_RREG_IN(reg) \ 189 IS_RREG_IN(reg) \
185 IS_RREG_CLOB(clob) 190 IS_RREG_CLOB(clob)
@@ -246,6 +251,9 @@
246 IS_RREG_CLOB(clob2) 251 IS_RREG_CLOB(clob2)
247#define RSM_PSR_DT \ 252#define RSM_PSR_DT \
248 nop 0 253 nop 0
254#define RSM_PSR_BE_I(clob0, clob1) \
255 IS_RREG_CLOB(clob0) \
256 IS_RREG_CLOB(clob1)
249#define SSM_PSR_DT_AND_SRLZ_I \ 257#define SSM_PSR_DT_AND_SRLZ_I \
250 nop 0 258 nop 0
251#define BSW_0(clob0, clob1, clob2) \ 259#define BSW_0(clob0, clob1, clob2) \
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 2bf3636473fe..2eb0a981a09a 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -22,6 +22,56 @@
22#ifndef __ASM_PARAVIRT_H 22#ifndef __ASM_PARAVIRT_H
23#define __ASM_PARAVIRT_H 23#define __ASM_PARAVIRT_H
24 24
25#ifndef __ASSEMBLY__
26/******************************************************************************
27 * fsys related addresses
28 */
29struct pv_fsys_data {
30 unsigned long *fsyscall_table;
31 void *fsys_bubble_down;
32};
33
34extern struct pv_fsys_data pv_fsys_data;
35
36unsigned long *paravirt_get_fsyscall_table(void);
37char *paravirt_get_fsys_bubble_down(void);
38
39/******************************************************************************
40 * patchlist addresses for gate page
41 */
42enum pv_gate_patchlist {
43 PV_GATE_START_FSYSCALL,
44 PV_GATE_END_FSYSCALL,
45
46 PV_GATE_START_BRL_FSYS_BUBBLE_DOWN,
47 PV_GATE_END_BRL_FSYS_BUBBLE_DOWN,
48
49 PV_GATE_START_VTOP,
50 PV_GATE_END_VTOP,
51
52 PV_GATE_START_MCKINLEY_E9,
53 PV_GATE_END_MCKINLEY_E9,
54};
55
56struct pv_patchdata {
57 unsigned long start_fsyscall_patchlist;
58 unsigned long end_fsyscall_patchlist;
59 unsigned long start_brl_fsys_bubble_down_patchlist;
60 unsigned long end_brl_fsys_bubble_down_patchlist;
61 unsigned long start_vtop_patchlist;
62 unsigned long end_vtop_patchlist;
63 unsigned long start_mckinley_e9_patchlist;
64 unsigned long end_mckinley_e9_patchlist;
65
66 void *gate_section;
67};
68
69extern struct pv_patchdata pv_patchdata;
70
71unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type);
72void *paravirt_get_gate_section(void);
73#endif
74
25#ifdef CONFIG_PARAVIRT_GUEST 75#ifdef CONFIG_PARAVIRT_GUEST
26 76
27#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 77#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
@@ -68,6 +118,14 @@ struct pv_init_ops {
68 int (*arch_setup_nomca)(void); 118 int (*arch_setup_nomca)(void);
69 119
70 void (*post_smp_prepare_boot_cpu)(void); 120 void (*post_smp_prepare_boot_cpu)(void);
121
122#ifdef ASM_SUPPORTED
123 unsigned long (*patch_bundle)(void *sbundle, void *ebundle,
124 unsigned long type);
125 unsigned long (*patch_inst)(unsigned long stag, unsigned long etag,
126 unsigned long type);
127#endif
128 void (*patch_branch)(unsigned long tag, unsigned long type);
71}; 129};
72 130
73extern struct pv_init_ops pv_init_ops; 131extern struct pv_init_ops pv_init_ops;
@@ -210,6 +268,8 @@ struct pv_time_ops {
210 int (*do_steal_accounting)(unsigned long *new_itm); 268 int (*do_steal_accounting)(unsigned long *new_itm);
211 269
212 void (*clocksource_resume)(void); 270 void (*clocksource_resume)(void);
271
272 unsigned long long (*sched_clock)(void);
213}; 273};
214 274
215extern struct pv_time_ops pv_time_ops; 275extern struct pv_time_ops pv_time_ops;
@@ -227,6 +287,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm)
227 return pv_time_ops.do_steal_accounting(new_itm); 287 return pv_time_ops.do_steal_accounting(new_itm);
228} 288}
229 289
290static inline unsigned long long paravirt_sched_clock(void)
291{
292 return pv_time_ops.sched_clock();
293}
294
230#endif /* !__ASSEMBLY__ */ 295#endif /* !__ASSEMBLY__ */
231 296
232#else 297#else
diff --git a/arch/ia64/include/asm/paravirt_patch.h b/arch/ia64/include/asm/paravirt_patch.h
new file mode 100644
index 000000000000..128ff5db6e67
--- /dev/null
+++ b/arch/ia64/include/asm/paravirt_patch.h
@@ -0,0 +1,143 @@
1/******************************************************************************
2 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21#ifndef __ASM_PARAVIRT_PATCH_H
22#define __ASM_PARAVIRT_PATCH_H
23
24#ifdef __ASSEMBLY__
25
26 .section .paravirt_branches, "a"
27 .previous
28#define PARAVIRT_PATCH_SITE_BR(type) \
29 { \
30 [1:] ; \
31 br.cond.sptk.many 2f ; \
32 nop.b 0 ; \
33 nop.b 0;; ; \
34 } ; \
35 2: \
36 .xdata8 ".paravirt_branches", 1b, type
37
38#else
39
40#include <linux/stringify.h>
41#include <asm/intrinsics.h>
42
43/* for binary patch */
44struct paravirt_patch_site_bundle {
45 void *sbundle;
46 void *ebundle;
47 unsigned long type;
48};
49
50/* label means the beginning of new bundle */
51#define paravirt_alt_bundle(instr, privop) \
52 "\t998:\n" \
53 "\t" instr "\n" \
54 "\t999:\n" \
55 "\t.pushsection .paravirt_bundles, \"a\"\n" \
56 "\t.popsection\n" \
57 "\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \
58 __stringify(privop) "\n"
59
60
61struct paravirt_patch_bundle_elem {
62 const void *sbundle;
63 const void *ebundle;
64 unsigned long type;
65};
66
67
68struct paravirt_patch_site_inst {
69 unsigned long stag;
70 unsigned long etag;
71 unsigned long type;
72};
73
74#define paravirt_alt_inst(instr, privop) \
75 "\t[998:]\n" \
76 "\t" instr "\n" \
77 "\t[999:]\n" \
78 "\t.pushsection .paravirt_insts, \"a\"\n" \
79 "\t.popsection\n" \
80 "\t.xdata8 \".paravirt_insts\", 998b, 999b, " \
81 __stringify(privop) "\n"
82
83struct paravirt_patch_site_branch {
84 unsigned long tag;
85 unsigned long type;
86};
87
88struct paravirt_patch_branch_target {
89 const void *entry;
90 unsigned long type;
91};
92
93void
94__paravirt_patch_apply_branch(
95 unsigned long tag, unsigned long type,
96 const struct paravirt_patch_branch_target *entries,
97 unsigned int nr_entries);
98
99void
100paravirt_patch_reloc_br(unsigned long tag, const void *target);
101
102void
103paravirt_patch_reloc_brl(unsigned long tag, const void *target);
104
105
106#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT)
107unsigned long
108ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
109
110unsigned long
111__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
112 const struct paravirt_patch_bundle_elem *elems,
113 unsigned long nelems,
114 const struct paravirt_patch_bundle_elem **found);
115
116void
117paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
118 const struct paravirt_patch_site_bundle *end);
119
120void
121paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
122 const struct paravirt_patch_site_inst *end);
123
124void paravirt_patch_apply(void);
125#else
126#define paravirt_patch_apply_bundle(start, end) do { } while (0)
127#define paravirt_patch_apply_inst(start, end) do { } while (0)
128#define paravirt_patch_apply() do { } while (0)
129#endif
130
131#endif /* !__ASSEMBLEY__ */
132
133#endif /* __ASM_PARAVIRT_PATCH_H */
134
135/*
136 * Local variables:
137 * mode: C
138 * c-set-style: "linux"
139 * c-basic-offset: 8
140 * tab-width: 8
141 * indent-tabs-mode: t
142 * End:
143 */
diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h
index 33c8e55f5775..3d2951130b5f 100644
--- a/arch/ia64/include/asm/paravirt_privop.h
+++ b/arch/ia64/include/asm/paravirt_privop.h
@@ -33,7 +33,7 @@
33 */ 33 */
34 34
35struct pv_cpu_ops { 35struct pv_cpu_ops {
36 void (*fc)(unsigned long addr); 36 void (*fc)(void *addr);
37 unsigned long (*thash)(unsigned long addr); 37 unsigned long (*thash)(unsigned long addr);
38 unsigned long (*get_cpuid)(int index); 38 unsigned long (*get_cpuid)(int index);
39 unsigned long (*get_pmd)(int index); 39 unsigned long (*get_pmd)(int index);
@@ -60,12 +60,18 @@ extern unsigned long ia64_native_getreg_func(int regnum);
60/* Instructions paravirtualized for performance */ 60/* Instructions paravirtualized for performance */
61/************************************************/ 61/************************************************/
62 62
63#ifndef ASM_SUPPORTED
64#define paravirt_ssm_i() pv_cpu_ops.ssm_i()
65#define paravirt_rsm_i() pv_cpu_ops.rsm_i()
66#define __paravirt_getreg() pv_cpu_ops.getreg()
67#endif
68
63/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). 69/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
64 * static inline function doesn't satisfy it. */ 70 * static inline function doesn't satisfy it. */
65#define paravirt_ssm(mask) \ 71#define paravirt_ssm(mask) \
66 do { \ 72 do { \
67 if ((mask) == IA64_PSR_I) \ 73 if ((mask) == IA64_PSR_I) \
68 pv_cpu_ops.ssm_i(); \ 74 paravirt_ssm_i(); \
69 else \ 75 else \
70 ia64_native_ssm(mask); \ 76 ia64_native_ssm(mask); \
71 } while (0) 77 } while (0)
@@ -73,7 +79,7 @@ extern unsigned long ia64_native_getreg_func(int regnum);
73#define paravirt_rsm(mask) \ 79#define paravirt_rsm(mask) \
74 do { \ 80 do { \
75 if ((mask) == IA64_PSR_I) \ 81 if ((mask) == IA64_PSR_I) \
76 pv_cpu_ops.rsm_i(); \ 82 paravirt_rsm_i(); \
77 else \ 83 else \
78 ia64_native_rsm(mask); \ 84 ia64_native_rsm(mask); \
79 } while (0) 85 } while (0)
@@ -86,7 +92,7 @@ extern unsigned long ia64_native_getreg_func(int regnum);
86 if ((reg) == _IA64_REG_IP) \ 92 if ((reg) == _IA64_REG_IP) \
87 res = ia64_native_getreg(_IA64_REG_IP); \ 93 res = ia64_native_getreg(_IA64_REG_IP); \
88 else \ 94 else \
89 res = pv_cpu_ops.getreg(reg); \ 95 res = __paravirt_getreg(reg); \
90 res; \ 96 res; \
91 }) 97 })
92 98
@@ -112,6 +118,12 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
112 118
113#endif /* CONFIG_PARAVIRT */ 119#endif /* CONFIG_PARAVIRT */
114 120
121#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)
122#define paravirt_dv_serialize_data() ia64_dv_serialize_data()
123#else
124#define paravirt_dv_serialize_data() /* nothing */
125#endif
126
115/* these routines utilize privilege-sensitive or performance-sensitive 127/* these routines utilize privilege-sensitive or performance-sensitive
116 * privileged instructions so the code must be replaced with 128 * privileged instructions so the code must be replaced with
117 * paravirtualized versions */ 129 * paravirtualized versions */
@@ -121,4 +133,349 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
121 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) 133 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
122#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) 134#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
123 135
136
137#if defined(CONFIG_PARAVIRT)
138/******************************************************************************
139 * binary patching infrastructure
140 */
141#define PARAVIRT_PATCH_TYPE_FC 1
142#define PARAVIRT_PATCH_TYPE_THASH 2
143#define PARAVIRT_PATCH_TYPE_GET_CPUID 3
144#define PARAVIRT_PATCH_TYPE_GET_PMD 4
145#define PARAVIRT_PATCH_TYPE_PTCGA 5
146#define PARAVIRT_PATCH_TYPE_GET_RR 6
147#define PARAVIRT_PATCH_TYPE_SET_RR 7
148#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8
149#define PARAVIRT_PATCH_TYPE_SSM_I 9
150#define PARAVIRT_PATCH_TYPE_RSM_I 10
151#define PARAVIRT_PATCH_TYPE_GET_PSR_I 11
152#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12
153
154/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
155#define PARAVIRT_PATCH_TYPE_GETREG 0x10000000
156#define PARAVIRT_PATCH_TYPE_SETREG 0x20000000
157
158/*
159 * struct task_struct* (*ia64_switch_to)(void* next_task);
160 * void *ia64_leave_syscall;
161 * void *ia64_work_processed_syscall
162 * void *ia64_leave_kernel;
163 */
164
165#define PARAVIRT_PATCH_TYPE_BR_START 0x30000000
166#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \
167 (PARAVIRT_PATCH_TYPE_BR_START + 0)
168#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \
169 (PARAVIRT_PATCH_TYPE_BR_START + 1)
170#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \
171 (PARAVIRT_PATCH_TYPE_BR_START + 2)
172#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \
173 (PARAVIRT_PATCH_TYPE_BR_START + 3)
174
175#ifdef ASM_SUPPORTED
176#include <asm/paravirt_patch.h>
177
178/*
179 * pv_cpu_ops calling stub.
180 * normal function call convension can't be written by gcc
181 * inline assembly.
182 *
183 * from the caller's point of view,
184 * the following registers will be clobbered.
185 * r2, r3
186 * r8-r15
187 * r16, r17
188 * b6, b7
189 * p6-p15
190 * ar.ccv
191 *
192 * from the callee's point of view ,
193 * the following registers can be used.
194 * r2, r3: scratch
195 * r8: scratch, input argument0 and return value
196 * r0-r15: scratch, input argument1-5
197 * b6: return pointer
198 * b7: scratch
199 * p6-p15: scratch
200 * ar.ccv: scratch
201 *
202 * other registers must not be changed. especially
203 * b0: rp: preserved. gcc ignores b0 in clobbered register.
204 * r16: saved gp
205 */
206/* 5 bundles */
207#define __PARAVIRT_BR \
208 ";;\n" \
209 "{ .mlx\n" \
210 "nop 0\n" \
211 "movl r2 = %[op_addr]\n"/* get function pointer address */ \
212 ";;\n" \
213 "}\n" \
214 "1:\n" \
215 "{ .mii\n" \
216 "ld8 r2 = [r2]\n" /* load function descriptor address */ \
217 "mov r17 = ip\n" /* get ip to calc return address */ \
218 "mov r16 = gp\n" /* save gp */ \
219 ";;\n" \
220 "}\n" \
221 "{ .mii\n" \
222 "ld8 r3 = [r2], 8\n" /* load entry address */ \
223 "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \
224 ";;\n" \
225 "mov b7 = r3\n" /* set entry address */ \
226 "}\n" \
227 "{ .mib\n" \
228 "ld8 gp = [r2]\n" /* load gp value */ \
229 "mov b6 = r17\n" /* set return address */ \
230 "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \
231 "}\n" \
232 "1:\n" \
233 "{ .mii\n" \
234 "mov gp = r16\n" /* restore gp value */ \
235 "nop 0\n" \
236 "nop 0\n" \
237 ";;\n" \
238 "}\n"
239
240#define PARAVIRT_OP(op) \
241 [op_addr] "i"(&pv_cpu_ops.op)
242
243#define PARAVIRT_TYPE(type) \
244 PARAVIRT_PATCH_TYPE_ ## type
245
246#define PARAVIRT_REG_CLOBBERS0 \
247 "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
248 "r15", "r16", "r17"
249
250#define PARAVIRT_REG_CLOBBERS1 \
251 "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
252 "r15", "r16", "r17"
253
254#define PARAVIRT_REG_CLOBBERS2 \
255 "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \
256 "r15", "r16", "r17"
257
258#define PARAVIRT_REG_CLOBBERS5 \
259 "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \
260 "r15", "r16", "r17"
261
262#define PARAVIRT_BR_CLOBBERS \
263 "b6", "b7"
264
265#define PARAVIRT_PR_CLOBBERS \
266 "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
267
268#define PARAVIRT_AR_CLOBBERS \
269 "ar.ccv"
270
271#define PARAVIRT_CLOBBERS0 \
272 PARAVIRT_REG_CLOBBERS0, \
273 PARAVIRT_BR_CLOBBERS, \
274 PARAVIRT_PR_CLOBBERS, \
275 PARAVIRT_AR_CLOBBERS, \
276 "memory"
277
278#define PARAVIRT_CLOBBERS1 \
279 PARAVIRT_REG_CLOBBERS1, \
280 PARAVIRT_BR_CLOBBERS, \
281 PARAVIRT_PR_CLOBBERS, \
282 PARAVIRT_AR_CLOBBERS, \
283 "memory"
284
285#define PARAVIRT_CLOBBERS2 \
286 PARAVIRT_REG_CLOBBERS2, \
287 PARAVIRT_BR_CLOBBERS, \
288 PARAVIRT_PR_CLOBBERS, \
289 PARAVIRT_AR_CLOBBERS, \
290 "memory"
291
292#define PARAVIRT_CLOBBERS5 \
293 PARAVIRT_REG_CLOBBERS5, \
294 PARAVIRT_BR_CLOBBERS, \
295 PARAVIRT_PR_CLOBBERS, \
296 PARAVIRT_AR_CLOBBERS, \
297 "memory"
298
299#define PARAVIRT_BR0(op, type) \
300 register unsigned long ia64_clobber asm ("r8"); \
301 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
302 PARAVIRT_TYPE(type)) \
303 : "=r"(ia64_clobber) \
304 : PARAVIRT_OP(op) \
305 : PARAVIRT_CLOBBERS0)
306
307#define PARAVIRT_BR0_RET(op, type) \
308 register unsigned long ia64_intri_res asm ("r8"); \
309 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
310 PARAVIRT_TYPE(type)) \
311 : "=r"(ia64_intri_res) \
312 : PARAVIRT_OP(op) \
313 : PARAVIRT_CLOBBERS0)
314
315#define PARAVIRT_BR1(op, type, arg1) \
316 register unsigned long __##arg1 asm ("r8") = arg1; \
317 register unsigned long ia64_clobber asm ("r8"); \
318 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
319 PARAVIRT_TYPE(type)) \
320 : "=r"(ia64_clobber) \
321 : PARAVIRT_OP(op), "0"(__##arg1) \
322 : PARAVIRT_CLOBBERS1)
323
324#define PARAVIRT_BR1_RET(op, type, arg1) \
325 register unsigned long ia64_intri_res asm ("r8"); \
326 register unsigned long __##arg1 asm ("r8") = arg1; \
327 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
328 PARAVIRT_TYPE(type)) \
329 : "=r"(ia64_intri_res) \
330 : PARAVIRT_OP(op), "0"(__##arg1) \
331 : PARAVIRT_CLOBBERS1)
332
333#define PARAVIRT_BR1_VOID(op, type, arg1) \
334 register void *__##arg1 asm ("r8") = arg1; \
335 register unsigned long ia64_clobber asm ("r8"); \
336 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
337 PARAVIRT_TYPE(type)) \
338 : "=r"(ia64_clobber) \
339 : PARAVIRT_OP(op), "0"(__##arg1) \
340 : PARAVIRT_CLOBBERS1)
341
342#define PARAVIRT_BR2(op, type, arg1, arg2) \
343 register unsigned long __##arg1 asm ("r8") = arg1; \
344 register unsigned long __##arg2 asm ("r9") = arg2; \
345 register unsigned long ia64_clobber1 asm ("r8"); \
346 register unsigned long ia64_clobber2 asm ("r9"); \
347 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
348 PARAVIRT_TYPE(type)) \
349 : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \
350 : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \
351 : PARAVIRT_CLOBBERS2)
352
353
354#define PARAVIRT_DEFINE_CPU_OP0(op, type) \
355 static inline void \
356 paravirt_ ## op (void) \
357 { \
358 PARAVIRT_BR0(op, type); \
359 }
360
361#define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \
362 static inline unsigned long \
363 paravirt_ ## op (void) \
364 { \
365 PARAVIRT_BR0_RET(op, type); \
366 return ia64_intri_res; \
367 }
368
369#define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \
370 static inline void \
371 paravirt_ ## op (void *arg1) \
372 { \
373 PARAVIRT_BR1_VOID(op, type, arg1); \
374 }
375
376#define PARAVIRT_DEFINE_CPU_OP1(op, type) \
377 static inline void \
378 paravirt_ ## op (unsigned long arg1) \
379 { \
380 PARAVIRT_BR1(op, type, arg1); \
381 }
382
383#define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \
384 static inline unsigned long \
385 paravirt_ ## op (unsigned long arg1) \
386 { \
387 PARAVIRT_BR1_RET(op, type, arg1); \
388 return ia64_intri_res; \
389 }
390
391#define PARAVIRT_DEFINE_CPU_OP2(op, type) \
392 static inline void \
393 paravirt_ ## op (unsigned long arg1, \
394 unsigned long arg2) \
395 { \
396 PARAVIRT_BR2(op, type, arg1, arg2); \
397 }
398
399
400PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC);
401PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
402PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
403PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
404PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
405PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
406PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
407PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
408PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
409PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
410PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
411
412static inline void
413paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
414 unsigned long val2, unsigned long val3,
415 unsigned long val4)
416{
417 register unsigned long __val0 asm ("r8") = val0;
418 register unsigned long __val1 asm ("r9") = val1;
419 register unsigned long __val2 asm ("r10") = val2;
420 register unsigned long __val3 asm ("r11") = val3;
421 register unsigned long __val4 asm ("r14") = val4;
422
423 register unsigned long ia64_clobber0 asm ("r8");
424 register unsigned long ia64_clobber1 asm ("r9");
425 register unsigned long ia64_clobber2 asm ("r10");
426 register unsigned long ia64_clobber3 asm ("r11");
427 register unsigned long ia64_clobber4 asm ("r14");
428
429 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
430 PARAVIRT_TYPE(SET_RR0_TO_RR4))
431 : "=r"(ia64_clobber0),
432 "=r"(ia64_clobber1),
433 "=r"(ia64_clobber2),
434 "=r"(ia64_clobber3),
435 "=r"(ia64_clobber4)
436 : PARAVIRT_OP(set_rr0_to_rr4),
437 "0"(__val0), "1"(__val1), "2"(__val2),
438 "3"(__val3), "4"(__val4)
439 : PARAVIRT_CLOBBERS5);
440}
441
442/* unsigned long paravirt_getreg(int reg) */
443#define __paravirt_getreg(reg) \
444 ({ \
445 register unsigned long ia64_intri_res asm ("r8"); \
446 register unsigned long __reg asm ("r8") = (reg); \
447 \
448 BUILD_BUG_ON(!__builtin_constant_p(reg)); \
449 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
450 PARAVIRT_TYPE(GETREG) \
451 + (reg)) \
452 : "=r"(ia64_intri_res) \
453 : PARAVIRT_OP(getreg), "0"(__reg) \
454 : PARAVIRT_CLOBBERS1); \
455 \
456 ia64_intri_res; \
457 })
458
459/* void paravirt_setreg(int reg, unsigned long val) */
460#define paravirt_setreg(reg, val) \
461 do { \
462 register unsigned long __val asm ("r8") = val; \
463 register unsigned long __reg asm ("r9") = reg; \
464 register unsigned long ia64_clobber1 asm ("r8"); \
465 register unsigned long ia64_clobber2 asm ("r9"); \
466 \
467 BUILD_BUG_ON(!__builtin_constant_p(reg)); \
468 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
469 PARAVIRT_TYPE(SETREG) \
470 + (reg)) \
471 : "=r"(ia64_clobber1), \
472 "=r"(ia64_clobber2) \
473 : PARAVIRT_OP(setreg), \
474 "1"(__reg), "0"(__val) \
475 : PARAVIRT_CLOBBERS2); \
476 } while (0)
477
478#endif /* ASM_SUPPORTED */
479#endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
480
124#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ 481#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index 21c402365d0e..598408336251 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -126,7 +126,8 @@ extern void identify_siblings (struct cpuinfo_ia64 *);
126extern int is_multithreading_enabled(void); 126extern int is_multithreading_enabled(void);
127 127
128extern void arch_send_call_function_single_ipi(int cpu); 128extern void arch_send_call_function_single_ipi(int cpu);
129extern void arch_send_call_function_ipi(cpumask_t mask); 129extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
130#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
130 131
131#else /* CONFIG_SMP */ 132#else /* CONFIG_SMP */
132 133
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 0229fb95fb38..13ab71576bc7 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -120,6 +120,38 @@ do { \
120#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 120#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
121#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 121#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
122 122
123#ifdef ASM_SUPPORTED
124
125static __always_inline void
126__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
127{
128 __asm__ __volatile__ (
129 "tbit.nz p6, p0 = %1,%2\n"
130 "br.few 3f\n"
131 "1:\n"
132 "fetchadd4.rel r2 = [%0], -1;;\n"
133 "(p6) ssm psr.i\n"
134 "2:\n"
135 "hint @pause\n"
136 "ld4 r2 = [%0];;\n"
137 "cmp4.lt p7,p0 = r2, r0\n"
138 "(p7) br.cond.spnt.few 2b\n"
139 "(p6) rsm psr.i\n"
140 ";;\n"
141 "3:\n"
142 "fetchadd4.acq r2 = [%0], 1;;\n"
143 "cmp4.lt p7,p0 = r2, r0\n"
144 "(p7) br.cond.spnt.few 1b\n"
145 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
146 : "p6", "p7", "r2", "memory");
147}
148
149#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
150
151#else /* !ASM_SUPPORTED */
152
153#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
154
123#define __raw_read_lock(rw) \ 155#define __raw_read_lock(rw) \
124do { \ 156do { \
125 raw_rwlock_t *__read_lock_ptr = (rw); \ 157 raw_rwlock_t *__read_lock_ptr = (rw); \
@@ -131,6 +163,8 @@ do { \
131 } \ 163 } \
132} while (0) 164} while (0)
133 165
166#endif /* !ASM_SUPPORTED */
167
134#define __raw_read_unlock(rw) \ 168#define __raw_read_unlock(rw) \
135do { \ 169do { \
136 raw_rwlock_t *__read_lock_ptr = (rw); \ 170 raw_rwlock_t *__read_lock_ptr = (rw); \
@@ -138,20 +172,33 @@ do { \
138} while (0) 172} while (0)
139 173
140#ifdef ASM_SUPPORTED 174#ifdef ASM_SUPPORTED
141#define __raw_write_lock(rw) \ 175
142do { \ 176static __always_inline void
143 __asm__ __volatile__ ( \ 177__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
144 "mov ar.ccv = r0\n" \ 178{
145 "dep r29 = -1, r0, 31, 1;;\n" \ 179 __asm__ __volatile__ (
146 "1:\n" \ 180 "tbit.nz p6, p0 = %1, %2\n"
147 "ld4 r2 = [%0];;\n" \ 181 "mov ar.ccv = r0\n"
148 "cmp4.eq p0,p7 = r0,r2\n" \ 182 "dep r29 = -1, r0, 31, 1\n"
149 "(p7) br.cond.spnt.few 1b \n" \ 183 "br.few 3f;;\n"
150 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \ 184 "1:\n"
151 "cmp4.eq p0,p7 = r0, r2\n" \ 185 "(p6) ssm psr.i\n"
152 "(p7) br.cond.spnt.few 1b;;\n" \ 186 "2:\n"
153 :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ 187 "hint @pause\n"
154} while(0) 188 "ld4 r2 = [%0];;\n"
189 "cmp4.eq p0,p7 = r0, r2\n"
190 "(p7) br.cond.spnt.few 2b\n"
191 "(p6) rsm psr.i\n"
192 ";;\n"
193 "3:\n"
194 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
195 "cmp4.eq p0,p7 = r0, r2\n"
196 "(p7) br.cond.spnt.few 1b;;\n"
197 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
198 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
199}
200
201#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
155 202
156#define __raw_write_trylock(rw) \ 203#define __raw_write_trylock(rw) \
157({ \ 204({ \
@@ -174,6 +221,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
174 221
175#else /* !ASM_SUPPORTED */ 222#else /* !ASM_SUPPORTED */
176 223
224#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
225
177#define __raw_write_lock(l) \ 226#define __raw_write_lock(l) \
178({ \ 227({ \
179 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 228 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
index 4e03cfe74a0c..86c7db861180 100644
--- a/arch/ia64/include/asm/timex.h
+++ b/arch/ia64/include/asm/timex.h
@@ -40,5 +40,6 @@ get_cycles (void)
40} 40}
41 41
42extern void ia64_cpu_local_tick (void); 42extern void ia64_cpu_local_tick (void);
43extern unsigned long long ia64_native_sched_clock (void);
43 44
44#endif /* _ASM_IA64_TIMEX_H */ 45#endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 3193f4417e16..7b4c8c70b2d1 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -44,11 +44,6 @@
44#define parent_node(nid) (nid) 44#define parent_node(nid) (nid)
45 45
46/* 46/*
47 * Returns the number of the first CPU on Node 'node'.
48 */
49#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
50
51/*
52 * Determines the node for a given pci bus 47 * Determines the node for a given pci bus
53 */ 48 */
54#define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node 49#define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node
@@ -117,11 +112,6 @@ void build_cpu_to_node_map(void);
117 112
118extern void arch_fix_phys_package_id(int num, u32 slot); 113extern void arch_fix_phys_package_id(int num, u32 slot);
119 114
120#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
121 CPU_MASK_ALL : \
122 node_to_cpumask(pcibus_to_node(bus)) \
123 )
124
125#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 115#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
126 cpu_all_mask : \ 116 cpu_all_mask : \
127 cpumask_of_node(pcibus_to_node(bus))) 117 cpumask_of_node(pcibus_to_node(bus)))
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h
index f607018af4a1..53e9dfacd073 100644
--- a/arch/ia64/include/asm/uv/uv_hub.h
+++ b/arch/ia64/include/asm/uv/uv_hub.h
@@ -305,5 +305,11 @@ static inline int uv_num_possible_blades(void)
305 return 1; 305 return 1;
306} 306}
307 307
308static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
309{
310 /* not currently needed on ia64 */
311}
312
313
308#endif /* __ASM_IA64_UV_HUB__ */ 314#endif /* __ASM_IA64_UV_HUB__ */
309 315
diff --git a/arch/ia64/include/asm/uv/uv_mmrs.h b/arch/ia64/include/asm/uv/uv_mmrs.h
index c149ef085437..fe0b8f05e1a8 100644
--- a/arch/ia64/include/asm/uv/uv_mmrs.h
+++ b/arch/ia64/include/asm/uv/uv_mmrs.h
@@ -8,8 +8,8 @@
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef __ASM_IA64_UV_MMRS__ 11#ifndef _ASM_IA64_UV_UV_MMRS_H
12#define __ASM_IA64_UV_MMRS__ 12#define _ASM_IA64_UV_UV_MMRS_H
13 13
14#define UV_MMR_ENABLE (1UL << 63) 14#define UV_MMR_ENABLE (1UL << 63)
15 15
@@ -243,6 +243,158 @@ union uvh_event_occurred0_u {
243#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 243#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
244 244
245/* ========================================================================= */ 245/* ========================================================================= */
246/* UVH_GR0_TLB_INT0_CONFIG */
247/* ========================================================================= */
248#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
249
250#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
251#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
252#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
253#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
254#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
255#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
256#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
257#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
258#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
259#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
260#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
261#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
262#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
263#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
264#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
265#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
266
267union uvh_gr0_tlb_int0_config_u {
268 unsigned long v;
269 struct uvh_gr0_tlb_int0_config_s {
270 unsigned long vector_ : 8; /* RW */
271 unsigned long dm : 3; /* RW */
272 unsigned long destmode : 1; /* RW */
273 unsigned long status : 1; /* RO */
274 unsigned long p : 1; /* RO */
275 unsigned long rsvd_14 : 1; /* */
276 unsigned long t : 1; /* RO */
277 unsigned long m : 1; /* RW */
278 unsigned long rsvd_17_31: 15; /* */
279 unsigned long apic_id : 32; /* RW */
280 } s;
281};
282
283/* ========================================================================= */
284/* UVH_GR0_TLB_INT1_CONFIG */
285/* ========================================================================= */
286#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
287
288#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
289#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
290#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
291#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
292#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
293#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
294#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
295#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
296#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
297#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
298#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
299#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
300#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
301#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
302#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
303#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
304
305union uvh_gr0_tlb_int1_config_u {
306 unsigned long v;
307 struct uvh_gr0_tlb_int1_config_s {
308 unsigned long vector_ : 8; /* RW */
309 unsigned long dm : 3; /* RW */
310 unsigned long destmode : 1; /* RW */
311 unsigned long status : 1; /* RO */
312 unsigned long p : 1; /* RO */
313 unsigned long rsvd_14 : 1; /* */
314 unsigned long t : 1; /* RO */
315 unsigned long m : 1; /* RW */
316 unsigned long rsvd_17_31: 15; /* */
317 unsigned long apic_id : 32; /* RW */
318 } s;
319};
320
321/* ========================================================================= */
322/* UVH_GR1_TLB_INT0_CONFIG */
323/* ========================================================================= */
324#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
325
326#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
327#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
328#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
329#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
330#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
331#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
332#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
333#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
334#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
335#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
336#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
337#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
338#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
339#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
340#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
341#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
342
343union uvh_gr1_tlb_int0_config_u {
344 unsigned long v;
345 struct uvh_gr1_tlb_int0_config_s {
346 unsigned long vector_ : 8; /* RW */
347 unsigned long dm : 3; /* RW */
348 unsigned long destmode : 1; /* RW */
349 unsigned long status : 1; /* RO */
350 unsigned long p : 1; /* RO */
351 unsigned long rsvd_14 : 1; /* */
352 unsigned long t : 1; /* RO */
353 unsigned long m : 1; /* RW */
354 unsigned long rsvd_17_31: 15; /* */
355 unsigned long apic_id : 32; /* RW */
356 } s;
357};
358
359/* ========================================================================= */
360/* UVH_GR1_TLB_INT1_CONFIG */
361/* ========================================================================= */
362#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
363
364#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
365#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
366#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
367#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
368#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
369#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
370#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
371#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
372#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
373#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
374#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
375#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
376#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
377#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
378#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
379#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
380
381union uvh_gr1_tlb_int1_config_u {
382 unsigned long v;
383 struct uvh_gr1_tlb_int1_config_s {
384 unsigned long vector_ : 8; /* RW */
385 unsigned long dm : 3; /* RW */
386 unsigned long destmode : 1; /* RW */
387 unsigned long status : 1; /* RO */
388 unsigned long p : 1; /* RO */
389 unsigned long rsvd_14 : 1; /* */
390 unsigned long t : 1; /* RO */
391 unsigned long m : 1; /* RW */
392 unsigned long rsvd_17_31: 15; /* */
393 unsigned long apic_id : 32; /* RW */
394 } s;
395};
396
397/* ========================================================================= */
246/* UVH_INT_CMPB */ 398/* UVH_INT_CMPB */
247/* ========================================================================= */ 399/* ========================================================================= */
248#define UVH_INT_CMPB 0x22080UL 400#define UVH_INT_CMPB 0x22080UL
@@ -670,4 +822,4 @@ union uvh_si_alias2_overlay_config_u {
670}; 822};
671 823
672 824
673#endif /* __ASM_IA64_UV_MMRS__ */ 825#endif /* _ASM_IA64_UV_UV_MMRS_H */
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 7a804e80fc67..e425227a418e 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -33,9 +33,6 @@
33#ifndef _ASM_IA64_XEN_HYPERVISOR_H 33#ifndef _ASM_IA64_XEN_HYPERVISOR_H
34#define _ASM_IA64_XEN_HYPERVISOR_H 34#define _ASM_IA64_XEN_HYPERVISOR_H
35 35
36#ifdef CONFIG_XEN
37
38#include <linux/init.h>
39#include <xen/interface/xen.h> 36#include <xen/interface/xen.h>
40#include <xen/interface/version.h> /* to compile feature.c */ 37#include <xen/interface/version.h> /* to compile feature.c */
41#include <xen/features.h> /* to comiple xen-netfront.c */ 38#include <xen/features.h> /* to comiple xen-netfront.c */
@@ -43,22 +40,32 @@
43 40
44/* xen_domain_type is set before executing any C code by early_xen_setup */ 41/* xen_domain_type is set before executing any C code by early_xen_setup */
45enum xen_domain_type { 42enum xen_domain_type {
46 XEN_NATIVE, 43 XEN_NATIVE, /* running on bare hardware */
47 XEN_PV_DOMAIN, 44 XEN_PV_DOMAIN, /* running in a PV domain */
48 XEN_HVM_DOMAIN, 45 XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
49}; 46};
50 47
48#ifdef CONFIG_XEN
51extern enum xen_domain_type xen_domain_type; 49extern enum xen_domain_type xen_domain_type;
50#else
51#define xen_domain_type XEN_NATIVE
52#endif
52 53
53#define xen_domain() (xen_domain_type != XEN_NATIVE) 54#define xen_domain() (xen_domain_type != XEN_NATIVE)
54#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN) 55#define xen_pv_domain() (xen_domain() && \
55#define xen_initial_domain() (xen_pv_domain() && \ 56 xen_domain_type == XEN_PV_DOMAIN)
57#define xen_hvm_domain() (xen_domain() && \
58 xen_domain_type == XEN_HVM_DOMAIN)
59
60#ifdef CONFIG_XEN_DOM0
61#define xen_initial_domain() (xen_pv_domain() && \
56 (xen_start_info->flags & SIF_INITDOMAIN)) 62 (xen_start_info->flags & SIF_INITDOMAIN))
57#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) 63#else
64#define xen_initial_domain() (0)
65#endif
58 66
59/* deprecated. remove this */
60#define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN)
61 67
68#ifdef CONFIG_XEN
62extern struct shared_info *HYPERVISOR_shared_info; 69extern struct shared_info *HYPERVISOR_shared_info;
63extern struct start_info *xen_start_info; 70extern struct start_info *xen_start_info;
64 71
@@ -74,16 +81,6 @@ void force_evtchn_callback(void);
74 81
75/* For setup_arch() in arch/ia64/kernel/setup.c */ 82/* For setup_arch() in arch/ia64/kernel/setup.c */
76void xen_ia64_enable_opt_feature(void); 83void xen_ia64_enable_opt_feature(void);
77
78#else /* CONFIG_XEN */
79
80#define xen_domain() (0)
81#define xen_pv_domain() (0)
82#define xen_initial_domain() (0)
83#define xen_hvm_domain() (0)
84#define is_running_on_xen() (0) /* deprecated. remove this */
85#endif 84#endif
86 85
87#define is_initial_xendomain() (0) /* deprecated. remove this */
88
89#endif /* _ASM_IA64_XEN_HYPERVISOR_H */ 86#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
index 19c2ae1d878a..c53a47611208 100644
--- a/arch/ia64/include/asm/xen/inst.h
+++ b/arch/ia64/include/asm/xen/inst.h
@@ -33,6 +33,9 @@
33#define __paravirt_work_processed_syscall_target \ 33#define __paravirt_work_processed_syscall_target \
34 xen_work_processed_syscall 34 xen_work_processed_syscall
35 35
36#define paravirt_fsyscall_table xen_fsyscall_table
37#define paravirt_fsys_bubble_down xen_fsys_bubble_down
38
36#define MOV_FROM_IFA(reg) \ 39#define MOV_FROM_IFA(reg) \
37 movl reg = XSI_IFA; \ 40 movl reg = XSI_IFA; \
38 ;; \ 41 ;; \
@@ -110,6 +113,27 @@
110.endm 113.endm
111#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob 114#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
112 115
116/* assuming ar.itc is read with interrupt disabled. */
117#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
118(pred) movl clob = XSI_ITC_OFFSET; \
119 ;; \
120(pred) ld8 clob = [clob]; \
121(pred) mov reg = ar.itc; \
122 ;; \
123(pred) add reg = reg, clob; \
124 ;; \
125(pred) movl clob = XSI_ITC_LAST; \
126 ;; \
127(pred) ld8 clob = [clob]; \
128 ;; \
129(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \
130 ;; \
131(pred_clob) add reg = 1, clob; \
132 ;; \
133(pred) movl clob = XSI_ITC_LAST; \
134 ;; \
135(pred) st8 [clob] = reg
136
113 137
114#define MOV_TO_IFA(reg, clob) \ 138#define MOV_TO_IFA(reg, clob) \
115 movl clob = XSI_IFA; \ 139 movl clob = XSI_IFA; \
@@ -362,6 +386,10 @@
362#define RSM_PSR_DT \ 386#define RSM_PSR_DT \
363 XEN_HYPER_RSM_PSR_DT 387 XEN_HYPER_RSM_PSR_DT
364 388
389#define RSM_PSR_BE_I(clob0, clob1) \
390 RSM_PSR_I(p0, clob0, clob1); \
391 rum psr.be
392
365#define SSM_PSR_DT_AND_SRLZ_I \ 393#define SSM_PSR_DT_AND_SRLZ_I \
366 XEN_HYPER_SSM_PSR_DT 394 XEN_HYPER_SSM_PSR_DT
367 395
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
index f00fab40854d..e951e740bdf2 100644
--- a/arch/ia64/include/asm/xen/interface.h
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -209,6 +209,15 @@ struct mapped_regs {
209 unsigned long krs[8]; /* kernel registers */ 209 unsigned long krs[8]; /* kernel registers */
210 unsigned long tmp[16]; /* temp registers 210 unsigned long tmp[16]; /* temp registers
211 (e.g. for hyperprivops) */ 211 (e.g. for hyperprivops) */
212
213 /* itc paravirtualization
214 * vAR.ITC = mAR.ITC + itc_offset
215 * itc_last is one which was lastly passed to
216 * the guest OS in order to prevent it from
217 * going backwords.
218 */
219 unsigned long itc_offset;
220 unsigned long itc_last;
212 }; 221 };
213 }; 222 };
214}; 223};
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
index 4d92d9bbda7b..c57fa910f2c9 100644
--- a/arch/ia64/include/asm/xen/minstate.h
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -1,3 +1,12 @@
1
2#ifdef CONFIG_VIRT_CPU_ACCOUNTING
3/* read ar.itc in advance, and use it before leaving bank 0 */
4#define XEN_ACCOUNT_GET_STAMP \
5 MOV_FROM_ITC(pUStk, p6, r20, r2);
6#else
7#define XEN_ACCOUNT_GET_STAMP
8#endif
9
1/* 10/*
2 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves 11 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
3 * the minimum state necessary that allows us to turn psr.ic back 12 * the minimum state necessary that allows us to turn psr.ic back
@@ -123,7 +132,7 @@
123 ;; \ 132 ;; \
124.mem.offset 0,0; st8.spill [r16]=r2,16; \ 133.mem.offset 0,0; st8.spill [r16]=r2,16; \
125.mem.offset 8,0; st8.spill [r17]=r3,16; \ 134.mem.offset 8,0; st8.spill [r17]=r3,16; \
126 ACCOUNT_GET_STAMP \ 135 XEN_ACCOUNT_GET_STAMP \
127 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ 136 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
128 ;; \ 137 ;; \
129 EXTRA; \ 138 EXTRA; \
diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h
new file mode 100644
index 000000000000..eae944e88846
--- /dev/null
+++ b/arch/ia64/include/asm/xen/patchlist.h
@@ -0,0 +1,38 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/patchlist.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#define __paravirt_start_gate_fsyscall_patchlist \
24 __xen_start_gate_fsyscall_patchlist
25#define __paravirt_end_gate_fsyscall_patchlist \
26 __xen_end_gate_fsyscall_patchlist
27#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
28 __xen_start_gate_brl_fsys_bubble_down_patchlist
29#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
30 __xen_end_gate_brl_fsys_bubble_down_patchlist
31#define __paravirt_start_gate_vtop_patchlist \
32 __xen_start_gate_vtop_patchlist
33#define __paravirt_end_gate_vtop_patchlist \
34 __xen_end_gate_vtop_patchlist
35#define __paravirt_start_gate_mckinley_e9_patchlist \
36 __xen_start_gate_mckinley_e9_patchlist
37#define __paravirt_end_gate_mckinley_e9_patchlist \
38 __xen_end_gate_mckinley_e9_patchlist
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h
index 71ec7546e100..fb4ec5e0b066 100644
--- a/arch/ia64/include/asm/xen/privop.h
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -55,6 +55,8 @@
55#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) 55#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
56#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) 56#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
57#define XSI_IHA (XSI_BASE + XSI_IHA_OFS) 57#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
58#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS)
59#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS)
58#endif 60#endif
59 61
60#ifndef __ASSEMBLY__ 62#ifndef __ASSEMBLY__
@@ -67,7 +69,7 @@
67 * may have different semantics depending on whether they are executed 69 * may have different semantics depending on whether they are executed
68 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't 70 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
69 * be allowed to execute directly, lest incorrect semantics result. */ 71 * be allowed to execute directly, lest incorrect semantics result. */
70extern void xen_fc(unsigned long addr); 72extern void xen_fc(void *addr);
71extern unsigned long xen_thash(unsigned long addr); 73extern unsigned long xen_thash(unsigned long addr);
72 74
73/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" 75/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
@@ -80,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr);
80extern unsigned long xen_get_cpuid(int index); 82extern unsigned long xen_get_cpuid(int index);
81extern unsigned long xen_get_pmd(int index); 83extern unsigned long xen_get_pmd(int index);
82 84
85#ifndef ASM_SUPPORTED
83extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ 86extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
84extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ 87extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
88#endif
85 89
86/************************************************/ 90/************************************************/
87/* Instructions paravirtualized for performance */ 91/* Instructions paravirtualized for performance */
@@ -106,6 +110,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
106#define xen_get_virtual_pend() \ 110#define xen_get_virtual_pend() \
107 (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) 111 (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
108 112
113#ifndef ASM_SUPPORTED
109/* Although all privileged operations can be left to trap and will 114/* Although all privileged operations can be left to trap and will
110 * be properly handled by Xen, some are frequent enough that we use 115 * be properly handled by Xen, some are frequent enough that we use
111 * hyperprivops for performance. */ 116 * hyperprivops for performance. */
@@ -123,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
123 unsigned long val4); 128 unsigned long val4);
124extern void xen_set_kr(unsigned long index, unsigned long val); 129extern void xen_set_kr(unsigned long index, unsigned long val);
125extern void xen_ptcga(unsigned long addr, unsigned long size); 130extern void xen_ptcga(unsigned long addr, unsigned long size);
131#endif /* !ASM_SUPPORTED */
126 132
127#endif /* !__ASSEMBLY__ */ 133#endif /* !__ASSEMBLY__ */
128 134