diff options
author | Len Brown <len.brown@intel.com> | 2009-01-09 03:39:43 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2009-01-09 03:39:43 -0500 |
commit | b2576e1d4408e134e2188c967b1f28af39cd79d4 (patch) | |
tree | 004f3c82faab760f304ce031d6d2f572e7746a50 /arch/ia64 | |
parent | 3cc8a5f4ba91f67bbdb81a43a99281a26aab8d77 (diff) | |
parent | 2150edc6c5cf00f7adb54538b9ea2a3e9cedca3f (diff) |
Merge branch 'linus' into release
Diffstat (limited to 'arch/ia64')
45 files changed, 1248 insertions, 1125 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 6bd91ed7cd03..3d31636cbafb 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -99,7 +99,7 @@ config GENERIC_IOMAP | |||
99 | bool | 99 | bool |
100 | default y | 100 | default y |
101 | 101 | ||
102 | config SCHED_NO_NO_OMIT_FRAME_POINTER | 102 | config SCHED_OMIT_FRAME_POINTER |
103 | bool | 103 | bool |
104 | default y | 104 | default y |
105 | 105 | ||
@@ -687,3 +687,6 @@ config IRQ_PER_CPU | |||
687 | 687 | ||
688 | config IOMMU_HELPER | 688 | config IOMMU_HELPER |
689 | def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) | 689 | def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) |
690 | |||
691 | config IOMMU_API | ||
692 | def_bool (DMAR) | ||
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c index c2f58ff364e7..cc0a3182db3c 100644 --- a/arch/ia64/hp/sim/hpsim_irq.c +++ b/arch/ia64/hp/sim/hpsim_irq.c | |||
@@ -22,7 +22,7 @@ hpsim_irq_noop (unsigned int irq) | |||
22 | } | 22 | } |
23 | 23 | ||
24 | static void | 24 | static void |
25 | hpsim_set_affinity_noop (unsigned int a, cpumask_t b) | 25 | hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) |
26 | { | 26 | { |
27 | } | 27 | } |
28 | 28 | ||
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index 3d47839a0c48..e4d8fde68103 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c | |||
@@ -167,6 +167,15 @@ netdev_read(int fd, unsigned char *buf, unsigned int len) | |||
167 | return ia64_ssc(fd, __pa(buf), len, 0, SSC_NETDEV_RECV); | 167 | return ia64_ssc(fd, __pa(buf), len, 0, SSC_NETDEV_RECV); |
168 | } | 168 | } |
169 | 169 | ||
170 | static const struct net_device_ops simeth_netdev_ops = { | ||
171 | .ndo_open = simeth_open, | ||
172 | .ndo_stop = simeth_close, | ||
173 | .ndo_start_xmit = simeth_tx, | ||
174 | .ndo_get_stats = simeth_get_stats, | ||
175 | .ndo_set_multicast_list = set_multicast_list, /* not yet used */ | ||
176 | |||
177 | }; | ||
178 | |||
170 | /* | 179 | /* |
171 | * Function shared with module code, so cannot be in init section | 180 | * Function shared with module code, so cannot be in init section |
172 | * | 181 | * |
@@ -206,14 +215,10 @@ simeth_probe1(void) | |||
206 | 215 | ||
207 | memcpy(dev->dev_addr, mac_addr, sizeof(mac_addr)); | 216 | memcpy(dev->dev_addr, mac_addr, sizeof(mac_addr)); |
208 | 217 | ||
209 | local = dev->priv; | 218 | local = netdev_priv(dev); |
210 | local->simfd = fd; /* keep track of underlying file descriptor */ | 219 | local->simfd = fd; /* keep track of underlying file descriptor */ |
211 | 220 | ||
212 | dev->open = simeth_open; | 221 | dev->netdev_ops = &simeth_netdev_ops; |
213 | dev->stop = simeth_close; | ||
214 | dev->hard_start_xmit = simeth_tx; | ||
215 | dev->get_stats = simeth_get_stats; | ||
216 | dev->set_multicast_list = set_multicast_list; /* no yet used */ | ||
217 | 222 | ||
218 | err = register_netdev(dev); | 223 | err = register_netdev(dev); |
219 | if (err) { | 224 | if (err) { |
@@ -325,7 +330,7 @@ simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr) | |||
325 | * we get DOWN then UP. | 330 | * we get DOWN then UP. |
326 | */ | 331 | */ |
327 | 332 | ||
328 | local = dev->priv; | 333 | local = netdev_priv(dev); |
329 | /* now do it for real */ | 334 | /* now do it for real */ |
330 | r = event == NETDEV_UP ? | 335 | r = event == NETDEV_UP ? |
331 | netdev_attach(local->simfd, dev->irq, ntohl(ifa->ifa_local)): | 336 | netdev_attach(local->simfd, dev->irq, ntohl(ifa->ifa_local)): |
@@ -380,7 +385,7 @@ frame_print(unsigned char *from, unsigned char *frame, int len) | |||
380 | static int | 385 | static int |
381 | simeth_tx(struct sk_buff *skb, struct net_device *dev) | 386 | simeth_tx(struct sk_buff *skb, struct net_device *dev) |
382 | { | 387 | { |
383 | struct simeth_local *local = dev->priv; | 388 | struct simeth_local *local = netdev_priv(dev); |
384 | 389 | ||
385 | #if 0 | 390 | #if 0 |
386 | /* ensure we have at least ETH_ZLEN bytes (min frame size) */ | 391 | /* ensure we have at least ETH_ZLEN bytes (min frame size) */ |
@@ -443,7 +448,7 @@ simeth_rx(struct net_device *dev) | |||
443 | int len; | 448 | int len; |
444 | int rcv_count = SIMETH_RECV_MAX; | 449 | int rcv_count = SIMETH_RECV_MAX; |
445 | 450 | ||
446 | local = dev->priv; | 451 | local = netdev_priv(dev); |
447 | /* | 452 | /* |
448 | * the loop concept has been borrowed from other drivers | 453 | * the loop concept has been borrowed from other drivers |
449 | * looks to me like it's a throttling thing to avoid pushing to many | 454 | * looks to me like it's a throttling thing to avoid pushing to many |
@@ -507,7 +512,7 @@ simeth_interrupt(int irq, void *dev_id) | |||
507 | static struct net_device_stats * | 512 | static struct net_device_stats * |
508 | simeth_get_stats(struct net_device *dev) | 513 | simeth_get_stats(struct net_device *dev) |
509 | { | 514 | { |
510 | struct simeth_local *local = dev->priv; | 515 | struct simeth_local *local = netdev_priv(dev); |
511 | 516 | ||
512 | return &local->stats; | 517 | return &local->stats; |
513 | } | 518 | } |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 5e92ae00bdbb..16ef61a91d95 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -1767,25 +1767,24 @@ groups16_from_user(struct group_info *group_info, short __user *grouplist) | |||
1767 | asmlinkage long | 1767 | asmlinkage long |
1768 | sys32_getgroups16 (int gidsetsize, short __user *grouplist) | 1768 | sys32_getgroups16 (int gidsetsize, short __user *grouplist) |
1769 | { | 1769 | { |
1770 | const struct cred *cred = current_cred(); | ||
1770 | int i; | 1771 | int i; |
1771 | 1772 | ||
1772 | if (gidsetsize < 0) | 1773 | if (gidsetsize < 0) |
1773 | return -EINVAL; | 1774 | return -EINVAL; |
1774 | 1775 | ||
1775 | get_group_info(current->group_info); | 1776 | i = cred->group_info->ngroups; |
1776 | i = current->group_info->ngroups; | ||
1777 | if (gidsetsize) { | 1777 | if (gidsetsize) { |
1778 | if (i > gidsetsize) { | 1778 | if (i > gidsetsize) { |
1779 | i = -EINVAL; | 1779 | i = -EINVAL; |
1780 | goto out; | 1780 | goto out; |
1781 | } | 1781 | } |
1782 | if (groups16_to_user(grouplist, current->group_info)) { | 1782 | if (groups16_to_user(grouplist, cred->group_info)) { |
1783 | i = -EFAULT; | 1783 | i = -EFAULT; |
1784 | goto out; | 1784 | goto out; |
1785 | } | 1785 | } |
1786 | } | 1786 | } |
1787 | out: | 1787 | out: |
1788 | put_group_info(current->group_info); | ||
1789 | return i; | 1788 | return i; |
1790 | } | 1789 | } |
1791 | 1790 | ||
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index ccbe8ae47a61..3b25bd9dca91 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild | |||
@@ -14,3 +14,4 @@ unifdef-y += gcc_intrin.h | |||
14 | unifdef-y += intrinsics.h | 14 | unifdef-y += intrinsics.h |
15 | unifdef-y += perfmon.h | 15 | unifdef-y += perfmon.h |
16 | unifdef-y += ustack.h | 16 | unifdef-y += ustack.h |
17 | unifdef-y += swab.h | ||
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 50c2b83fd5a0..d37292bd9875 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h | |||
@@ -17,12 +17,6 @@ | |||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | /* | ||
21 | * On IA-64, counter must always be volatile to ensure that that the | ||
22 | * memory accesses are ordered. | ||
23 | */ | ||
24 | typedef struct { volatile __s32 counter; } atomic_t; | ||
25 | typedef struct { volatile __s64 counter; } atomic64_t; | ||
26 | 20 | ||
27 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) | 21 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) |
28 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) | 22 | #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) |
diff --git a/arch/ia64/include/asm/byteorder.h b/arch/ia64/include/asm/byteorder.h index 69bd41d7c26e..0f84c5cb703d 100644 --- a/arch/ia64/include/asm/byteorder.h +++ b/arch/ia64/include/asm/byteorder.h | |||
@@ -1,42 +1,7 @@ | |||
1 | #ifndef _ASM_IA64_BYTEORDER_H | 1 | #ifndef _ASM_IA64_BYTEORDER_H |
2 | #define _ASM_IA64_BYTEORDER_H | 2 | #define _ASM_IA64_BYTEORDER_H |
3 | 3 | ||
4 | /* | 4 | #include <asm/swab.h> |
5 | * Modified 1998, 1999 | ||
6 | * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. | ||
7 | */ | ||
8 | |||
9 | #include <asm/types.h> | ||
10 | #include <asm/intrinsics.h> | ||
11 | #include <linux/compiler.h> | ||
12 | |||
13 | static __inline__ __attribute_const__ __u64 | ||
14 | __ia64_swab64 (__u64 x) | ||
15 | { | ||
16 | __u64 result; | ||
17 | |||
18 | result = ia64_mux1(x, ia64_mux1_rev); | ||
19 | return result; | ||
20 | } | ||
21 | |||
22 | static __inline__ __attribute_const__ __u32 | ||
23 | __ia64_swab32 (__u32 x) | ||
24 | { | ||
25 | return __ia64_swab64(x) >> 32; | ||
26 | } | ||
27 | |||
28 | static __inline__ __attribute_const__ __u16 | ||
29 | __ia64_swab16(__u16 x) | ||
30 | { | ||
31 | return __ia64_swab64(x) >> 48; | ||
32 | } | ||
33 | |||
34 | #define __arch__swab64(x) __ia64_swab64(x) | ||
35 | #define __arch__swab32(x) __ia64_swab32(x) | ||
36 | #define __arch__swab16(x) __ia64_swab16(x) | ||
37 | |||
38 | #define __BYTEORDER_HAS_U64__ | ||
39 | |||
40 | #include <linux/byteorder/little_endian.h> | 5 | #include <linux/byteorder/little_endian.h> |
41 | 6 | ||
42 | #endif /* _ASM_IA64_BYTEORDER_H */ | 7 | #endif /* _ASM_IA64_BYTEORDER_H */ |
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h index 3627116fb0e2..36429a532630 100644 --- a/arch/ia64/include/asm/irq.h +++ b/arch/ia64/include/asm/irq.h | |||
@@ -27,7 +27,7 @@ irq_canonicalize (int irq) | |||
27 | } | 27 | } |
28 | 28 | ||
29 | extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); | 29 | extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); |
30 | bool is_affinity_mask_valid(cpumask_t cpumask); | 30 | bool is_affinity_mask_valid(cpumask_var_t cpumask); |
31 | 31 | ||
32 | #define is_affinity_mask_valid is_affinity_mask_valid | 32 | #define is_affinity_mask_valid is_affinity_mask_valid |
33 | 33 | ||
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index f38472ac2267..68aa6da807c1 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h | |||
@@ -166,8 +166,6 @@ struct saved_vpd { | |||
166 | }; | 166 | }; |
167 | 167 | ||
168 | struct kvm_regs { | 168 | struct kvm_regs { |
169 | char *saved_guest; | ||
170 | char *saved_stack; | ||
171 | struct saved_vpd vpd; | 169 | struct saved_vpd vpd; |
172 | /*Arch-regs*/ | 170 | /*Arch-regs*/ |
173 | int mp_state; | 171 | int mp_state; |
@@ -200,6 +198,10 @@ struct kvm_regs { | |||
200 | unsigned long fp_psr; /*used for lazy float register */ | 198 | unsigned long fp_psr; /*used for lazy float register */ |
201 | unsigned long saved_gp; | 199 | unsigned long saved_gp; |
202 | /*for phycial emulation */ | 200 | /*for phycial emulation */ |
201 | |||
202 | union context saved_guest; | ||
203 | |||
204 | unsigned long reserved[64]; /* for future use */ | ||
203 | }; | 205 | }; |
204 | 206 | ||
205 | struct kvm_sregs { | 207 | struct kvm_sregs { |
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index c60d324da540..348663661659 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h | |||
@@ -23,17 +23,6 @@ | |||
23 | #ifndef __ASM_KVM_HOST_H | 23 | #ifndef __ASM_KVM_HOST_H |
24 | #define __ASM_KVM_HOST_H | 24 | #define __ASM_KVM_HOST_H |
25 | 25 | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/kvm.h> | ||
30 | #include <linux/kvm_para.h> | ||
31 | #include <linux/kvm_types.h> | ||
32 | |||
33 | #include <asm/pal.h> | ||
34 | #include <asm/sal.h> | ||
35 | |||
36 | #define KVM_MAX_VCPUS 4 | ||
37 | #define KVM_MEMORY_SLOTS 32 | 26 | #define KVM_MEMORY_SLOTS 32 |
38 | /* memory slots that does not exposed to userspace */ | 27 | /* memory slots that does not exposed to userspace */ |
39 | #define KVM_PRIVATE_MEM_SLOTS 4 | 28 | #define KVM_PRIVATE_MEM_SLOTS 4 |
@@ -50,70 +39,132 @@ | |||
50 | #define EXIT_REASON_EXTERNAL_INTERRUPT 6 | 39 | #define EXIT_REASON_EXTERNAL_INTERRUPT 6 |
51 | #define EXIT_REASON_IPI 7 | 40 | #define EXIT_REASON_IPI 7 |
52 | #define EXIT_REASON_PTC_G 8 | 41 | #define EXIT_REASON_PTC_G 8 |
42 | #define EXIT_REASON_DEBUG 20 | ||
53 | 43 | ||
54 | /*Define vmm address space and vm data space.*/ | 44 | /*Define vmm address space and vm data space.*/ |
55 | #define KVM_VMM_SIZE (16UL<<20) | 45 | #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20) |
56 | #define KVM_VMM_SHIFT 24 | 46 | #define KVM_VMM_SHIFT 24 |
57 | #define KVM_VMM_BASE 0xD000000000000000UL | 47 | #define KVM_VMM_BASE 0xD000000000000000 |
58 | #define VMM_SIZE (8UL<<20) | 48 | #define VMM_SIZE (__IA64_UL_CONST(8)<<20) |
59 | 49 | ||
60 | /* | 50 | /* |
61 | * Define vm_buffer, used by PAL Services, base address. | 51 | * Define vm_buffer, used by PAL Services, base address. |
62 | * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M | 52 | * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M |
63 | */ | 53 | */ |
64 | #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) | 54 | #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) |
65 | #define KVM_VM_BUFFER_SIZE (8UL<<20) | 55 | #define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20) |
66 | 56 | ||
67 | /*Define Virtual machine data layout.*/ | 57 | /* |
68 | #define KVM_VM_DATA_SHIFT 24 | 58 | * kvm guest's data area looks as follow: |
69 | #define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT) | 59 | * |
70 | #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE) | 60 | * +----------------------+ ------- KVM_VM_DATA_SIZE |
71 | 61 | * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET | |
72 | 62 | * | | | / | | |
73 | #define KVM_P2M_BASE KVM_VM_DATA_BASE | 63 | * | .......... | | /vcpu's struct&stack | |
74 | #define KVM_P2M_OFS 0 | 64 | * | .......... | | /---------------------|---- 0 |
75 | #define KVM_P2M_SIZE (8UL << 20) | 65 | * | vcpu[5]'s data | | / vpd | |
76 | 66 | * | vcpu[4]'s data | |/-----------------------| | |
77 | #define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE) | 67 | * | vcpu[3]'s data | / vtlb | |
78 | #define KVM_VHPT_OFS KVM_P2M_SIZE | 68 | * | vcpu[2]'s data | /|------------------------| |
79 | #define KVM_VHPT_BLOCK_SIZE (2UL << 20) | 69 | * | vcpu[1]'s data |/ | vhpt | |
80 | #define VHPT_SHIFT 18 | 70 | * | vcpu[0]'s data |____________________________| |
81 | #define VHPT_SIZE (1UL << VHPT_SHIFT) | 71 | * +----------------------+ | |
82 | #define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5)) | 72 | * | memory dirty log | | |
83 | 73 | * +----------------------+ | | |
84 | #define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE) | 74 | * | vm's data struct | | |
85 | #define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE) | 75 | * +----------------------+ | |
86 | #define KVM_VTLB_BLOCK_SIZE (1UL<<20) | 76 | * | | | |
87 | #define VTLB_SHIFT 17 | 77 | * | | | |
88 | #define VTLB_SIZE (1UL<<VTLB_SHIFT) | 78 | * | | | |
89 | #define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5)) | 79 | * | | | |
90 | 80 | * | | | | |
91 | #define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE) | 81 | * | | | |
92 | #define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE) | 82 | * | | | |
93 | #define KVM_VPD_BLOCK_SIZE (2UL<<20) | 83 | * | vm's p2m table | | |
94 | #define VPD_SHIFT 16 | 84 | * | | | |
95 | #define VPD_SIZE (1UL<<VPD_SHIFT) | 85 | * | | | |
96 | 86 | * | | | | | |
97 | #define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE) | 87 | * vm's data->| | | | |
98 | #define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE) | 88 | * +----------------------+ ------- 0 |
99 | #define KVM_VCPU_BLOCK_SIZE (2UL<<20) | 89 | * To support large memory, needs to increase the size of p2m. |
100 | #define VCPU_SHIFT 18 | 90 | * To support more vcpus, needs to ensure it has enough space to |
101 | #define VCPU_SIZE (1UL<<VCPU_SHIFT) | 91 | * hold vcpus' data. |
102 | #define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE | 92 | */ |
103 | 93 | ||
104 | #define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE) | 94 | #define KVM_VM_DATA_SHIFT 26 |
105 | #define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE) | 95 | #define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT) |
106 | #define KVM_VM_BLOCK_SIZE (1UL<<19) | 96 | #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE) |
107 | 97 | ||
108 | #define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE) | 98 | #define KVM_P2M_BASE KVM_VM_DATA_BASE |
109 | #define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE) | 99 | #define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20) |
110 | #define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19) | 100 | |
111 | 101 | #define VHPT_SHIFT 16 | |
112 | /* Get vpd, vhpt, tlb, vcpu, base*/ | 102 | #define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT) |
113 | #define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE) | 103 | #define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5)) |
114 | #define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE) | 104 | |
115 | #define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE) | 105 | #define VTLB_SHIFT 16 |
116 | #define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE) | 106 | #define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT) |
107 | #define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5)) | ||
108 | |||
109 | #define VPD_SHIFT 16 | ||
110 | #define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT) | ||
111 | |||
112 | #define VCPU_STRUCT_SHIFT 16 | ||
113 | #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) | ||
114 | |||
115 | #define KVM_STK_OFFSET VCPU_STRUCT_SIZE | ||
116 | |||
117 | #define KVM_VM_STRUCT_SHIFT 19 | ||
118 | #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) | ||
119 | |||
120 | #define KVM_MEM_DIRY_LOG_SHIFT 19 | ||
121 | #define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT) | ||
122 | |||
123 | #ifndef __ASSEMBLY__ | ||
124 | |||
125 | /*Define the max vcpus and memory for Guests.*/ | ||
126 | #define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\ | ||
127 | KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data) | ||
128 | #define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT) | ||
129 | |||
130 | #define VMM_LOG_LEN 256 | ||
131 | |||
132 | #include <linux/types.h> | ||
133 | #include <linux/mm.h> | ||
134 | #include <linux/kvm.h> | ||
135 | #include <linux/kvm_para.h> | ||
136 | #include <linux/kvm_types.h> | ||
137 | |||
138 | #include <asm/pal.h> | ||
139 | #include <asm/sal.h> | ||
140 | #include <asm/page.h> | ||
141 | |||
142 | struct kvm_vcpu_data { | ||
143 | char vcpu_vhpt[VHPT_SIZE]; | ||
144 | char vcpu_vtlb[VTLB_SIZE]; | ||
145 | char vcpu_vpd[VPD_SIZE]; | ||
146 | char vcpu_struct[VCPU_STRUCT_SIZE]; | ||
147 | }; | ||
148 | |||
149 | struct kvm_vm_data { | ||
150 | char kvm_p2m[KVM_P2M_SIZE]; | ||
151 | char kvm_vm_struct[KVM_VM_STRUCT_SIZE]; | ||
152 | char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE]; | ||
153 | struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; | ||
154 | }; | ||
155 | |||
156 | #define VCPU_BASE(n) KVM_VM_DATA_BASE + \ | ||
157 | offsetof(struct kvm_vm_data, vcpu_data[n]) | ||
158 | #define VM_BASE KVM_VM_DATA_BASE + \ | ||
159 | offsetof(struct kvm_vm_data, kvm_vm_struct) | ||
160 | #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ | ||
161 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log) | ||
162 | |||
163 | #define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt)) | ||
164 | #define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb)) | ||
165 | #define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd)) | ||
166 | #define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \ | ||
167 | offsetof(struct kvm_vcpu_data, vcpu_struct)) | ||
117 | 168 | ||
118 | /*IO section definitions*/ | 169 | /*IO section definitions*/ |
119 | #define IOREQ_READ 1 | 170 | #define IOREQ_READ 1 |
@@ -389,6 +440,7 @@ struct kvm_vcpu_arch { | |||
389 | 440 | ||
390 | unsigned long opcode; | 441 | unsigned long opcode; |
391 | unsigned long cause; | 442 | unsigned long cause; |
443 | char log_buf[VMM_LOG_LEN]; | ||
392 | union context host; | 444 | union context host; |
393 | union context guest; | 445 | union context guest; |
394 | }; | 446 | }; |
@@ -403,20 +455,19 @@ struct kvm_sal_data { | |||
403 | }; | 455 | }; |
404 | 456 | ||
405 | struct kvm_arch { | 457 | struct kvm_arch { |
458 | spinlock_t dirty_log_lock; | ||
459 | |||
406 | unsigned long vm_base; | 460 | unsigned long vm_base; |
407 | unsigned long metaphysical_rr0; | 461 | unsigned long metaphysical_rr0; |
408 | unsigned long metaphysical_rr4; | 462 | unsigned long metaphysical_rr4; |
409 | unsigned long vmm_init_rr; | 463 | unsigned long vmm_init_rr; |
410 | unsigned long vhpt_base; | 464 | |
411 | unsigned long vtlb_base; | ||
412 | unsigned long vpd_base; | ||
413 | spinlock_t dirty_log_lock; | ||
414 | struct kvm_ioapic *vioapic; | 465 | struct kvm_ioapic *vioapic; |
415 | struct kvm_vm_stat stat; | 466 | struct kvm_vm_stat stat; |
416 | struct kvm_sal_data rdv_sal_data; | 467 | struct kvm_sal_data rdv_sal_data; |
417 | 468 | ||
418 | struct list_head assigned_dev_head; | 469 | struct list_head assigned_dev_head; |
419 | struct dmar_domain *intel_iommu_domain; | 470 | struct iommu_domain *iommu_domain; |
420 | struct hlist_head irq_ack_notifier_list; | 471 | struct hlist_head irq_ack_notifier_list; |
421 | 472 | ||
422 | unsigned long irq_sources_bitmap; | 473 | unsigned long irq_sources_bitmap; |
@@ -512,7 +563,7 @@ struct kvm_pt_regs { | |||
512 | 563 | ||
513 | static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) | 564 | static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) |
514 | { | 565 | { |
515 | return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1; | 566 | return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1; |
516 | } | 567 | } |
517 | 568 | ||
518 | typedef int kvm_vmm_entry(void); | 569 | typedef int kvm_vmm_entry(void); |
@@ -531,5 +582,6 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); | |||
531 | void kvm_sal_emul(struct kvm_vcpu *vcpu); | 582 | void kvm_sal_emul(struct kvm_vcpu *vcpu); |
532 | 583 | ||
533 | static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {} | 584 | static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {} |
585 | #endif /* __ASSEMBLY__*/ | ||
534 | 586 | ||
535 | #endif | 587 | #endif |
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index 12d96e0cd513..21c402365d0e 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h | |||
@@ -57,7 +57,6 @@ extern struct smp_boot_data { | |||
57 | 57 | ||
58 | extern char no_int_routing __devinitdata; | 58 | extern char no_int_routing __devinitdata; |
59 | 59 | ||
60 | extern cpumask_t cpu_online_map; | ||
61 | extern cpumask_t cpu_core_map[NR_CPUS]; | 60 | extern cpumask_t cpu_core_map[NR_CPUS]; |
62 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | 61 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); |
63 | extern int smp_num_siblings; | 62 | extern int smp_num_siblings; |
diff --git a/arch/ia64/include/asm/swab.h b/arch/ia64/include/asm/swab.h new file mode 100644 index 000000000000..6aa58b699eea --- /dev/null +++ b/arch/ia64/include/asm/swab.h | |||
@@ -0,0 +1,34 @@ | |||
1 | #ifndef _ASM_IA64_SWAB_H | ||
2 | #define _ASM_IA64_SWAB_H | ||
3 | |||
4 | /* | ||
5 | * Modified 1998, 1999 | ||
6 | * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. | ||
7 | */ | ||
8 | |||
9 | #include <asm/types.h> | ||
10 | #include <asm/intrinsics.h> | ||
11 | #include <linux/compiler.h> | ||
12 | |||
13 | static __inline__ __attribute_const__ __u64 __arch_swab64(__u64 x) | ||
14 | { | ||
15 | __u64 result; | ||
16 | |||
17 | result = ia64_mux1(x, ia64_mux1_rev); | ||
18 | return result; | ||
19 | } | ||
20 | #define __arch_swab64 __arch_swab64 | ||
21 | |||
22 | static __inline__ __attribute_const__ __u32 __arch_swab32(__u32 x) | ||
23 | { | ||
24 | return __arch_swab64(x) >> 32; | ||
25 | } | ||
26 | #define __arch_swab32 __arch_swab32 | ||
27 | |||
28 | static __inline__ __attribute_const__ __u16 __arch_swab16(__u16 x) | ||
29 | { | ||
30 | return __arch_swab64(x) >> 48; | ||
31 | } | ||
32 | #define __arch_swab16 __arch_swab16 | ||
33 | |||
34 | #endif /* _ASM_IA64_SWAB_H */ | ||
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h index fb79423834d0..dcbaea7ce128 100644 --- a/arch/ia64/include/asm/swiotlb.h +++ b/arch/ia64/include/asm/swiotlb.h | |||
@@ -2,44 +2,7 @@ | |||
2 | #define ASM_IA64__SWIOTLB_H | 2 | #define ASM_IA64__SWIOTLB_H |
3 | 3 | ||
4 | #include <linux/dma-mapping.h> | 4 | #include <linux/dma-mapping.h> |
5 | 5 | #include <linux/swiotlb.h> | |
6 | /* SWIOTLB interface */ | ||
7 | |||
8 | extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, | ||
9 | size_t size, int dir); | ||
10 | extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | ||
11 | dma_addr_t *dma_handle, gfp_t flags); | ||
12 | extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | ||
13 | size_t size, int dir); | ||
14 | extern void swiotlb_sync_single_for_cpu(struct device *hwdev, | ||
15 | dma_addr_t dev_addr, | ||
16 | size_t size, int dir); | ||
17 | extern void swiotlb_sync_single_for_device(struct device *hwdev, | ||
18 | dma_addr_t dev_addr, | ||
19 | size_t size, int dir); | ||
20 | extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, | ||
21 | dma_addr_t dev_addr, | ||
22 | unsigned long offset, | ||
23 | size_t size, int dir); | ||
24 | extern void swiotlb_sync_single_range_for_device(struct device *hwdev, | ||
25 | dma_addr_t dev_addr, | ||
26 | unsigned long offset, | ||
27 | size_t size, int dir); | ||
28 | extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, | ||
29 | struct scatterlist *sg, int nelems, | ||
30 | int dir); | ||
31 | extern void swiotlb_sync_sg_for_device(struct device *hwdev, | ||
32 | struct scatterlist *sg, int nelems, | ||
33 | int dir); | ||
34 | extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, | ||
35 | int nents, int direction); | ||
36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | ||
37 | int nents, int direction); | ||
38 | extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); | ||
39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, | ||
40 | void *vaddr, dma_addr_t dma_handle); | ||
41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); | ||
42 | extern void swiotlb_init(void); | ||
43 | 6 | ||
44 | extern int swiotlb_force; | 7 | extern int swiotlb_force; |
45 | 8 | ||
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 35bcb641c9e5..76a33a91ca69 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h | |||
@@ -34,6 +34,7 @@ | |||
34 | * Returns a bitmask of CPUs on Node 'node'. | 34 | * Returns a bitmask of CPUs on Node 'node'. |
35 | */ | 35 | */ |
36 | #define node_to_cpumask(node) (node_to_cpu_mask[node]) | 36 | #define node_to_cpumask(node) (node_to_cpu_mask[node]) |
37 | #define cpumask_of_node(node) (&node_to_cpu_mask[node]) | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * Returns the number of the node containing Node 'nid'. | 40 | * Returns the number of the node containing Node 'nid'. |
@@ -45,7 +46,7 @@ | |||
45 | /* | 46 | /* |
46 | * Returns the number of the first CPU on Node 'node'. | 47 | * Returns the number of the first CPU on Node 'node'. |
47 | */ | 48 | */ |
48 | #define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) | 49 | #define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) |
49 | 50 | ||
50 | /* | 51 | /* |
51 | * Determines the node for a given pci bus | 52 | * Determines the node for a given pci bus |
@@ -55,7 +56,6 @@ | |||
55 | void build_cpu_to_node_map(void); | 56 | void build_cpu_to_node_map(void); |
56 | 57 | ||
57 | #define SD_CPU_INIT (struct sched_domain) { \ | 58 | #define SD_CPU_INIT (struct sched_domain) { \ |
58 | .span = CPU_MASK_NONE, \ | ||
59 | .parent = NULL, \ | 59 | .parent = NULL, \ |
60 | .child = NULL, \ | 60 | .child = NULL, \ |
61 | .groups = NULL, \ | 61 | .groups = NULL, \ |
@@ -80,7 +80,6 @@ void build_cpu_to_node_map(void); | |||
80 | 80 | ||
81 | /* sched_domains SD_NODE_INIT for IA64 NUMA machines */ | 81 | /* sched_domains SD_NODE_INIT for IA64 NUMA machines */ |
82 | #define SD_NODE_INIT (struct sched_domain) { \ | 82 | #define SD_NODE_INIT (struct sched_domain) { \ |
83 | .span = CPU_MASK_NONE, \ | ||
84 | .parent = NULL, \ | 83 | .parent = NULL, \ |
85 | .child = NULL, \ | 84 | .child = NULL, \ |
86 | .groups = NULL, \ | 85 | .groups = NULL, \ |
@@ -111,6 +110,8 @@ void build_cpu_to_node_map(void); | |||
111 | #define topology_core_id(cpu) (cpu_data(cpu)->core_id) | 110 | #define topology_core_id(cpu) (cpu_data(cpu)->core_id) |
112 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) | 111 | #define topology_core_siblings(cpu) (cpu_core_map[cpu]) |
113 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | 112 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) |
113 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | ||
114 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | ||
114 | #define smt_capable() (smp_num_siblings > 1) | 115 | #define smt_capable() (smp_num_siblings > 1) |
115 | #endif | 116 | #endif |
116 | 117 | ||
@@ -121,6 +122,10 @@ extern void arch_fix_phys_package_id(int num, u32 slot); | |||
121 | node_to_cpumask(pcibus_to_node(bus)) \ | 122 | node_to_cpumask(pcibus_to_node(bus)) \ |
122 | ) | 123 | ) |
123 | 124 | ||
125 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | ||
126 | cpu_all_mask : \ | ||
127 | cpumask_from_node(pcibus_to_node(bus))) | ||
128 | |||
124 | #include <asm-generic/topology.h> | 129 | #include <asm-generic/topology.h> |
125 | 130 | ||
126 | #endif /* _ASM_IA64_TOPOLOGY_H */ | 131 | #endif /* _ASM_IA64_TOPOLOGY_H */ |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index c19b686db9b8..d541671caf4a 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -203,7 +203,6 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) | |||
203 | Boot-time Table Parsing | 203 | Boot-time Table Parsing |
204 | -------------------------------------------------------------------------- */ | 204 | -------------------------------------------------------------------------- */ |
205 | 205 | ||
206 | static int total_cpus __initdata; | ||
207 | static int available_cpus __initdata; | 206 | static int available_cpus __initdata; |
208 | struct acpi_table_madt *acpi_madt __initdata; | 207 | struct acpi_table_madt *acpi_madt __initdata; |
209 | static u8 has_8259; | 208 | static u8 has_8259; |
@@ -1002,7 +1001,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) | |||
1002 | node = pxm_to_node(pxm); | 1001 | node = pxm_to_node(pxm); |
1003 | 1002 | ||
1004 | if (node >= MAX_NUMNODES || !node_online(node) || | 1003 | if (node >= MAX_NUMNODES || !node_online(node) || |
1005 | cpus_empty(node_to_cpumask(node))) | 1004 | cpumask_empty(cpumask_of_node(node))) |
1006 | return AE_OK; | 1005 | return AE_OK; |
1007 | 1006 | ||
1008 | /* We know a gsi to node mapping! */ | 1007 | /* We know a gsi to node mapping! */ |
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index 9d7e1c66faf4..5b0e830c6f33 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
18 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
19 | 19 | ||
20 | static struct fs_struct init_fs = INIT_FS; | ||
21 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | 20 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); |
22 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | 21 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); |
23 | struct mm_struct init_mm = INIT_MM(init_mm); | 22 | struct mm_struct init_mm = INIT_MM(init_mm); |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 5c4674ae8aea..5cfd3d91001a 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -330,25 +330,25 @@ unmask_irq (unsigned int irq) | |||
330 | 330 | ||
331 | 331 | ||
332 | static void | 332 | static void |
333 | iosapic_set_affinity (unsigned int irq, cpumask_t mask) | 333 | iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) |
334 | { | 334 | { |
335 | #ifdef CONFIG_SMP | 335 | #ifdef CONFIG_SMP |
336 | u32 high32, low32; | 336 | u32 high32, low32; |
337 | int dest, rte_index; | 337 | int cpu, dest, rte_index; |
338 | int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; | 338 | int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; |
339 | struct iosapic_rte_info *rte; | 339 | struct iosapic_rte_info *rte; |
340 | struct iosapic *iosapic; | 340 | struct iosapic *iosapic; |
341 | 341 | ||
342 | irq &= (~IA64_IRQ_REDIRECTED); | 342 | irq &= (~IA64_IRQ_REDIRECTED); |
343 | 343 | ||
344 | cpus_and(mask, mask, cpu_online_map); | 344 | cpu = cpumask_first_and(cpu_online_mask, mask); |
345 | if (cpus_empty(mask)) | 345 | if (cpu >= nr_cpu_ids) |
346 | return; | 346 | return; |
347 | 347 | ||
348 | if (irq_prepare_move(irq, first_cpu(mask))) | 348 | if (irq_prepare_move(irq, cpu)) |
349 | return; | 349 | return; |
350 | 350 | ||
351 | dest = cpu_physical_id(first_cpu(mask)); | 351 | dest = cpu_physical_id(cpu); |
352 | 352 | ||
353 | if (!iosapic_intr_info[irq].count) | 353 | if (!iosapic_intr_info[irq].count) |
354 | return; /* not an IOSAPIC interrupt */ | 354 | return; /* not an IOSAPIC interrupt */ |
@@ -695,32 +695,31 @@ get_target_cpu (unsigned int gsi, int irq) | |||
695 | #ifdef CONFIG_NUMA | 695 | #ifdef CONFIG_NUMA |
696 | { | 696 | { |
697 | int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; | 697 | int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; |
698 | cpumask_t cpu_mask; | 698 | const struct cpumask *cpu_mask; |
699 | 699 | ||
700 | iosapic_index = find_iosapic(gsi); | 700 | iosapic_index = find_iosapic(gsi); |
701 | if (iosapic_index < 0 || | 701 | if (iosapic_index < 0 || |
702 | iosapic_lists[iosapic_index].node == MAX_NUMNODES) | 702 | iosapic_lists[iosapic_index].node == MAX_NUMNODES) |
703 | goto skip_numa_setup; | 703 | goto skip_numa_setup; |
704 | 704 | ||
705 | cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); | 705 | cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node); |
706 | cpus_and(cpu_mask, cpu_mask, domain); | 706 | num_cpus = 0; |
707 | for_each_cpu_mask(numa_cpu, cpu_mask) { | 707 | for_each_cpu_and(numa_cpu, cpu_mask, &domain) { |
708 | if (!cpu_online(numa_cpu)) | 708 | if (cpu_online(numa_cpu)) |
709 | cpu_clear(numa_cpu, cpu_mask); | 709 | num_cpus++; |
710 | } | 710 | } |
711 | 711 | ||
712 | num_cpus = cpus_weight(cpu_mask); | ||
713 | |||
714 | if (!num_cpus) | 712 | if (!num_cpus) |
715 | goto skip_numa_setup; | 713 | goto skip_numa_setup; |
716 | 714 | ||
717 | /* Use irq assignment to distribute across cpus in node */ | 715 | /* Use irq assignment to distribute across cpus in node */ |
718 | cpu_index = irq % num_cpus; | 716 | cpu_index = irq % num_cpus; |
719 | 717 | ||
720 | for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) | 718 | for_each_cpu_and(numa_cpu, cpu_mask, &domain) |
721 | numa_cpu = next_cpu(numa_cpu, cpu_mask); | 719 | if (cpu_online(numa_cpu) && i++ >= cpu_index) |
720 | break; | ||
722 | 721 | ||
723 | if (numa_cpu != NR_CPUS) | 722 | if (numa_cpu < nr_cpu_ids) |
724 | return cpu_physical_id(numa_cpu); | 723 | return cpu_physical_id(numa_cpu); |
725 | } | 724 | } |
726 | skip_numa_setup: | 725 | skip_numa_setup: |
@@ -731,7 +730,7 @@ skip_numa_setup: | |||
731 | * case of NUMA.) | 730 | * case of NUMA.) |
732 | */ | 731 | */ |
733 | do { | 732 | do { |
734 | if (++cpu >= NR_CPUS) | 733 | if (++cpu >= nr_cpu_ids) |
735 | cpu = 0; | 734 | cpu = 0; |
736 | } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); | 735 | } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); |
737 | 736 | ||
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7fd18f54c056..95ff16cb05d8 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -112,11 +112,11 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | bool is_affinity_mask_valid(cpumask_t cpumask) | 115 | bool is_affinity_mask_valid(cpumask_var_t cpumask) |
116 | { | 116 | { |
117 | if (ia64_platform_is("sn2")) { | 117 | if (ia64_platform_is("sn2")) { |
118 | /* Only allow one CPU to be specified in the smp_affinity mask */ | 118 | /* Only allow one CPU to be specified in the smp_affinity mask */ |
119 | if (cpus_weight(cpumask) != 1) | 119 | if (cpumask_weight(cpumask) != 1) |
120 | return false; | 120 | return false; |
121 | } | 121 | } |
122 | return true; | 122 | return true; |
@@ -133,7 +133,6 @@ unsigned int vectors_in_migration[NR_IRQS]; | |||
133 | */ | 133 | */ |
134 | static void migrate_irqs(void) | 134 | static void migrate_irqs(void) |
135 | { | 135 | { |
136 | cpumask_t mask; | ||
137 | irq_desc_t *desc; | 136 | irq_desc_t *desc; |
138 | int irq, new_cpu; | 137 | int irq, new_cpu; |
139 | 138 | ||
@@ -152,15 +151,14 @@ static void migrate_irqs(void) | |||
152 | if (desc->status == IRQ_PER_CPU) | 151 | if (desc->status == IRQ_PER_CPU) |
153 | continue; | 152 | continue; |
154 | 153 | ||
155 | cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); | 154 | if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) |
156 | if (any_online_cpu(mask) == NR_CPUS) { | 155 | >= nr_cpu_ids) { |
157 | /* | 156 | /* |
158 | * Save it for phase 2 processing | 157 | * Save it for phase 2 processing |
159 | */ | 158 | */ |
160 | vectors_in_migration[irq] = irq; | 159 | vectors_in_migration[irq] = irq; |
161 | 160 | ||
162 | new_cpu = any_online_cpu(cpu_online_map); | 161 | new_cpu = any_online_cpu(cpu_online_map); |
163 | mask = cpumask_of_cpu(new_cpu); | ||
164 | 162 | ||
165 | /* | 163 | /* |
166 | * Al three are essential, currently WARN_ON.. maybe panic? | 164 | * Al three are essential, currently WARN_ON.. maybe panic? |
@@ -168,7 +166,8 @@ static void migrate_irqs(void) | |||
168 | if (desc->chip && desc->chip->disable && | 166 | if (desc->chip && desc->chip->disable && |
169 | desc->chip->enable && desc->chip->set_affinity) { | 167 | desc->chip->enable && desc->chip->set_affinity) { |
170 | desc->chip->disable(irq); | 168 | desc->chip->disable(irq); |
171 | desc->chip->set_affinity(irq, mask); | 169 | desc->chip->set_affinity(irq, |
170 | cpumask_of(new_cpu)); | ||
172 | desc->chip->enable(irq); | 171 | desc->chip->enable(irq); |
173 | } else { | 172 | } else { |
174 | WARN_ON((!(desc->chip) || !(desc->chip->disable) || | 173 | WARN_ON((!(desc->chip) || !(desc->chip->disable) || |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f07688da947c..f90be51b1123 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -434,7 +434,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
434 | /* | 434 | /* |
435 | * It is possible to have multiple instances associated with a given | 435 | * It is possible to have multiple instances associated with a given |
436 | * task either because an multiple functions in the call path | 436 | * task either because an multiple functions in the call path |
437 | * have a return probe installed on them, and/or more then one return | 437 | * have a return probe installed on them, and/or more than one return |
438 | * return probe was registered for a target function. | 438 | * return probe was registered for a target function. |
439 | * | 439 | * |
440 | * We can handle this because: | 440 | * We can handle this because: |
@@ -670,9 +670,11 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
670 | 670 | ||
671 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 671 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
672 | { | 672 | { |
673 | mutex_lock(&kprobe_mutex); | 673 | if (p->ainsn.insn) { |
674 | free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); | 674 | free_insn_slot(p->ainsn.insn, |
675 | mutex_unlock(&kprobe_mutex); | 675 | p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); |
676 | p->ainsn.insn = NULL; | ||
677 | } | ||
676 | } | 678 | } |
677 | /* | 679 | /* |
678 | * We are resuming execution after a single step fault, so the pt_regs | 680 | * We are resuming execution after a single step fault, so the pt_regs |
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index fab1d21a4f2c..f94aaa86933f 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c | |||
@@ -158,7 +158,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr) | |||
158 | ia64_mlogbuf_dump(); | 158 | ia64_mlogbuf_dump(); |
159 | printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, " | 159 | printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, " |
160 | "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n", | 160 | "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n", |
161 | raw_smp_processor_id(), current->pid, current->uid, | 161 | raw_smp_processor_id(), current->pid, current_uid(), |
162 | iip, ipsr, paddr, current->comm); | 162 | iip, ipsr, paddr, current->comm); |
163 | 163 | ||
164 | spin_lock(&mca_bh_lock); | 164 | spin_lock(&mca_bh_lock); |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 702a09c13238..890339339035 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -49,11 +49,12 @@ | |||
49 | static struct irq_chip ia64_msi_chip; | 49 | static struct irq_chip ia64_msi_chip; |
50 | 50 | ||
51 | #ifdef CONFIG_SMP | 51 | #ifdef CONFIG_SMP |
52 | static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | 52 | static void ia64_set_msi_irq_affinity(unsigned int irq, |
53 | const cpumask_t *cpu_mask) | ||
53 | { | 54 | { |
54 | struct msi_msg msg; | 55 | struct msi_msg msg; |
55 | u32 addr, data; | 56 | u32 addr, data; |
56 | int cpu = first_cpu(cpu_mask); | 57 | int cpu = first_cpu(*cpu_mask); |
57 | 58 | ||
58 | if (!cpu_online(cpu)) | 59 | if (!cpu_online(cpu)) |
59 | return; | 60 | return; |
@@ -166,12 +167,11 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
166 | 167 | ||
167 | #ifdef CONFIG_DMAR | 168 | #ifdef CONFIG_DMAR |
168 | #ifdef CONFIG_SMP | 169 | #ifdef CONFIG_SMP |
169 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 170 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
170 | { | 171 | { |
171 | struct irq_cfg *cfg = irq_cfg + irq; | 172 | struct irq_cfg *cfg = irq_cfg + irq; |
172 | struct msi_msg msg; | 173 | struct msi_msg msg; |
173 | int cpu = first_cpu(mask); | 174 | int cpu = cpumask_first(mask); |
174 | |||
175 | 175 | ||
176 | if (!cpu_online(cpu)) | 176 | if (!cpu_online(cpu)) |
177 | return; | 177 | return; |
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
187 | msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); | 187 | msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); |
188 | 188 | ||
189 | dmar_msi_write(irq, &msg); | 189 | dmar_msi_write(irq, &msg); |
190 | irq_desc[irq].affinity = mask; | 190 | irq_desc[irq].affinity = *mask; |
191 | } | 191 | } |
192 | #endif /* CONFIG_SMP */ | 192 | #endif /* CONFIG_SMP */ |
193 | 193 | ||
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index 2a92f637431d..d0ada067a4af 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c | |||
@@ -39,7 +39,7 @@ int iommu_detected __read_mostly; | |||
39 | be probably a smaller DMA mask, but this is bug-to-bug compatible | 39 | be probably a smaller DMA mask, but this is bug-to-bug compatible |
40 | to i386. */ | 40 | to i386. */ |
41 | struct device fallback_dev = { | 41 | struct device fallback_dev = { |
42 | .bus_id = "fallback device", | 42 | .init_name = "fallback device", |
43 | .coherent_dma_mask = DMA_32BIT_MASK, | 43 | .coherent_dma_mask = DMA_32BIT_MASK, |
44 | .dma_mask = &fallback_dev.coherent_dma_mask, | 44 | .dma_mask = &fallback_dev.coherent_dma_mask, |
45 | }; | 45 | }; |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 6543a5547c84..0e499757309b 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -2220,8 +2220,8 @@ pfm_alloc_file(pfm_context_t *ctx) | |||
2220 | DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode)); | 2220 | DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode)); |
2221 | 2221 | ||
2222 | inode->i_mode = S_IFCHR|S_IRUGO; | 2222 | inode->i_mode = S_IFCHR|S_IRUGO; |
2223 | inode->i_uid = current->fsuid; | 2223 | inode->i_uid = current_fsuid(); |
2224 | inode->i_gid = current->fsgid; | 2224 | inode->i_gid = current_fsgid(); |
2225 | 2225 | ||
2226 | sprintf(name, "[%lu]", inode->i_ino); | 2226 | sprintf(name, "[%lu]", inode->i_ino); |
2227 | this.name = name; | 2227 | this.name = name; |
@@ -2399,22 +2399,33 @@ error_kmem: | |||
2399 | static int | 2399 | static int |
2400 | pfm_bad_permissions(struct task_struct *task) | 2400 | pfm_bad_permissions(struct task_struct *task) |
2401 | { | 2401 | { |
2402 | const struct cred *tcred; | ||
2403 | uid_t uid = current_uid(); | ||
2404 | gid_t gid = current_gid(); | ||
2405 | int ret; | ||
2406 | |||
2407 | rcu_read_lock(); | ||
2408 | tcred = __task_cred(task); | ||
2409 | |||
2402 | /* inspired by ptrace_attach() */ | 2410 | /* inspired by ptrace_attach() */ |
2403 | DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", | 2411 | DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", |
2404 | current->uid, | 2412 | uid, |
2405 | current->gid, | 2413 | gid, |
2406 | task->euid, | 2414 | tcred->euid, |
2407 | task->suid, | 2415 | tcred->suid, |
2408 | task->uid, | 2416 | tcred->uid, |
2409 | task->egid, | 2417 | tcred->egid, |
2410 | task->sgid)); | 2418 | tcred->sgid)); |
2411 | 2419 | ||
2412 | return ((current->uid != task->euid) | 2420 | ret = ((uid != tcred->euid) |
2413 | || (current->uid != task->suid) | 2421 | || (uid != tcred->suid) |
2414 | || (current->uid != task->uid) | 2422 | || (uid != tcred->uid) |
2415 | || (current->gid != task->egid) | 2423 | || (gid != tcred->egid) |
2416 | || (current->gid != task->sgid) | 2424 | || (gid != tcred->sgid) |
2417 | || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE); | 2425 | || (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE); |
2426 | |||
2427 | rcu_read_unlock(); | ||
2428 | return ret; | ||
2418 | } | 2429 | } |
2419 | 2430 | ||
2420 | static int | 2431 | static int |
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index e12500a9c443..e1821ca4c7df 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
@@ -229,7 +229,7 @@ ia64_rt_sigreturn (struct sigscratch *scr) | |||
229 | si.si_errno = 0; | 229 | si.si_errno = 0; |
230 | si.si_code = SI_KERNEL; | 230 | si.si_code = SI_KERNEL; |
231 | si.si_pid = task_pid_vnr(current); | 231 | si.si_pid = task_pid_vnr(current); |
232 | si.si_uid = current->uid; | 232 | si.si_uid = current_uid(); |
233 | si.si_addr = sc; | 233 | si.si_addr = sc; |
234 | force_sig_info(SIGSEGV, &si, current); | 234 | force_sig_info(SIGSEGV, &si, current); |
235 | return retval; | 235 | return retval; |
@@ -326,7 +326,7 @@ force_sigsegv_info (int sig, void __user *addr) | |||
326 | si.si_errno = 0; | 326 | si.si_errno = 0; |
327 | si.si_code = SI_KERNEL; | 327 | si.si_code = SI_KERNEL; |
328 | si.si_pid = task_pid_vnr(current); | 328 | si.si_pid = task_pid_vnr(current); |
329 | si.si_uid = current->uid; | 329 | si.si_uid = current_uid(); |
330 | si.si_addr = addr; | 330 | si.si_addr = addr; |
331 | force_sig_info(SIGSEGV, &si, current); | 331 | force_sig_info(SIGSEGV, &si, current); |
332 | return 0; | 332 | return 0; |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 1dcbb85fc4ee..11463994a7d5 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -131,12 +131,6 @@ struct task_struct *task_for_booting_cpu; | |||
131 | */ | 131 | */ |
132 | DEFINE_PER_CPU(int, cpu_state); | 132 | DEFINE_PER_CPU(int, cpu_state); |
133 | 133 | ||
134 | /* Bitmasks of currently online, and possible CPUs */ | ||
135 | cpumask_t cpu_online_map; | ||
136 | EXPORT_SYMBOL(cpu_online_map); | ||
137 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
138 | EXPORT_SYMBOL(cpu_possible_map); | ||
139 | |||
140 | cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; | 134 | cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; |
141 | EXPORT_SYMBOL(cpu_core_map); | 135 | EXPORT_SYMBOL(cpu_core_map); |
142 | DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); | 136 | DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); |
@@ -688,7 +682,7 @@ int migrate_platform_irqs(unsigned int cpu) | |||
688 | { | 682 | { |
689 | int new_cpei_cpu; | 683 | int new_cpei_cpu; |
690 | irq_desc_t *desc = NULL; | 684 | irq_desc_t *desc = NULL; |
691 | cpumask_t mask; | 685 | const struct cpumask *mask; |
692 | int retval = 0; | 686 | int retval = 0; |
693 | 687 | ||
694 | /* | 688 | /* |
@@ -701,7 +695,7 @@ int migrate_platform_irqs(unsigned int cpu) | |||
701 | * Now re-target the CPEI to a different processor | 695 | * Now re-target the CPEI to a different processor |
702 | */ | 696 | */ |
703 | new_cpei_cpu = any_online_cpu(cpu_online_map); | 697 | new_cpei_cpu = any_online_cpu(cpu_online_map); |
704 | mask = cpumask_of_cpu(new_cpei_cpu); | 698 | mask = cpumask_of(new_cpei_cpu); |
705 | set_cpei_target_cpu(new_cpei_cpu); | 699 | set_cpei_target_cpu(new_cpei_cpu); |
706 | desc = irq_desc + ia64_cpe_irq; | 700 | desc = irq_desc + ia64_cpe_irq; |
707 | /* | 701 | /* |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 65c10a42c88f..f0ebb342409d 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -93,13 +93,14 @@ void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) | |||
93 | now = ia64_get_itc(); | 93 | now = ia64_get_itc(); |
94 | 94 | ||
95 | delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); | 95 | delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); |
96 | account_system_time(prev, 0, delta_stime); | 96 | if (idle_task(smp_processor_id()) != prev) |
97 | account_system_time_scaled(prev, delta_stime); | 97 | account_system_time(prev, 0, delta_stime, delta_stime); |
98 | else | ||
99 | account_idle_time(delta_stime); | ||
98 | 100 | ||
99 | if (pi->ac_utime) { | 101 | if (pi->ac_utime) { |
100 | delta_utime = cycle_to_cputime(pi->ac_utime); | 102 | delta_utime = cycle_to_cputime(pi->ac_utime); |
101 | account_user_time(prev, delta_utime); | 103 | account_user_time(prev, delta_utime, delta_utime); |
102 | account_user_time_scaled(prev, delta_utime); | ||
103 | } | 104 | } |
104 | 105 | ||
105 | pi->ac_stamp = ni->ac_stamp = now; | 106 | pi->ac_stamp = ni->ac_stamp = now; |
@@ -122,8 +123,10 @@ void account_system_vtime(struct task_struct *tsk) | |||
122 | now = ia64_get_itc(); | 123 | now = ia64_get_itc(); |
123 | 124 | ||
124 | delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); | 125 | delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); |
125 | account_system_time(tsk, 0, delta_stime); | 126 | if (irq_count() || idle_task(smp_processor_id()) != tsk) |
126 | account_system_time_scaled(tsk, delta_stime); | 127 | account_system_time(tsk, 0, delta_stime, delta_stime); |
128 | else | ||
129 | account_idle_time(delta_stime); | ||
127 | ti->ac_stime = 0; | 130 | ti->ac_stime = 0; |
128 | 131 | ||
129 | ti->ac_stamp = now; | 132 | ti->ac_stamp = now; |
@@ -143,8 +146,7 @@ void account_process_tick(struct task_struct *p, int user_tick) | |||
143 | 146 | ||
144 | if (ti->ac_utime) { | 147 | if (ti->ac_utime) { |
145 | delta_utime = cycle_to_cputime(ti->ac_utime); | 148 | delta_utime = cycle_to_cputime(ti->ac_utime); |
146 | account_user_time(p, delta_utime); | 149 | account_user_time(p, delta_utime, delta_utime); |
147 | account_user_time_scaled(p, delta_utime); | ||
148 | ti->ac_utime = 0; | 150 | ti->ac_utime = 0; |
149 | } | 151 | } |
150 | } | 152 | } |
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index c75b914f2d6b..a8d61a3e9a94 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -219,7 +219,7 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) | |||
219 | cpumask_t shared_cpu_map; | 219 | cpumask_t shared_cpu_map; |
220 | 220 | ||
221 | cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); | 221 | cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); |
222 | len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); | 222 | len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map); |
223 | len += sprintf(buf+len, "\n"); | 223 | len += sprintf(buf+len, "\n"); |
224 | return len; | 224 | return len; |
225 | } | 225 | } |
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index 92cef66ca268..0bb99b732908 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile | |||
@@ -51,8 +51,8 @@ EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | |||
51 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ | 51 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
52 | coalesced_mmio.o irq_comm.o) | 52 | coalesced_mmio.o irq_comm.o) |
53 | 53 | ||
54 | ifeq ($(CONFIG_DMAR),y) | 54 | ifeq ($(CONFIG_IOMMU_API),y) |
55 | common-objs += $(addprefix ../../../virt/kvm/, vtd.o) | 55 | common-objs += $(addprefix ../../../virt/kvm/, iommu.o) |
56 | endif | 56 | endif |
57 | 57 | ||
58 | kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o | 58 | kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o |
@@ -60,7 +60,7 @@ obj-$(CONFIG_KVM) += kvm.o | |||
60 | 60 | ||
61 | CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 | 61 | CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 |
62 | kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ | 62 | kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ |
63 | vtlb.o process.o | 63 | vtlb.o process.o kvm_lib.o |
64 | #Add link memcpy and memset to avoid possible structure assignment error | 64 | #Add link memcpy and memset to avoid possible structure assignment error |
65 | kvm-intel-objs += memcpy.o memset.o | 65 | kvm-intel-objs += memcpy.o memset.o |
66 | obj-$(CONFIG_KVM_INTEL) += kvm-intel.o | 66 | obj-$(CONFIG_KVM_INTEL) += kvm-intel.o |
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c index 4e3dc13a619c..0c3564a7a033 100644 --- a/arch/ia64/kvm/asm-offsets.c +++ b/arch/ia64/kvm/asm-offsets.c | |||
@@ -24,19 +24,10 @@ | |||
24 | 24 | ||
25 | #include <linux/autoconf.h> | 25 | #include <linux/autoconf.h> |
26 | #include <linux/kvm_host.h> | 26 | #include <linux/kvm_host.h> |
27 | #include <linux/kbuild.h> | ||
27 | 28 | ||
28 | #include "vcpu.h" | 29 | #include "vcpu.h" |
29 | 30 | ||
30 | #define task_struct kvm_vcpu | ||
31 | |||
32 | #define DEFINE(sym, val) \ | ||
33 | asm volatile("\n->" #sym " (%0) " #val : : "i" (val)) | ||
34 | |||
35 | #define BLANK() asm volatile("\n->" : :) | ||
36 | |||
37 | #define OFFSET(_sym, _str, _mem) \ | ||
38 | DEFINE(_sym, offsetof(_str, _mem)); | ||
39 | |||
40 | void foo(void) | 31 | void foo(void) |
41 | { | 32 | { |
42 | DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu)); | 33 | DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu)); |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index af1464f7a6ad..4e586f6110aa 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/bitops.h> | 31 | #include <linux/bitops.h> |
32 | #include <linux/hrtimer.h> | 32 | #include <linux/hrtimer.h> |
33 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
34 | #include <linux/iommu.h> | ||
34 | #include <linux/intel-iommu.h> | 35 | #include <linux/intel-iommu.h> |
35 | 36 | ||
36 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
@@ -180,7 +181,6 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
180 | 181 | ||
181 | switch (ext) { | 182 | switch (ext) { |
182 | case KVM_CAP_IRQCHIP: | 183 | case KVM_CAP_IRQCHIP: |
183 | case KVM_CAP_USER_MEMORY: | ||
184 | case KVM_CAP_MP_STATE: | 184 | case KVM_CAP_MP_STATE: |
185 | 185 | ||
186 | r = 1; | 186 | r = 1; |
@@ -189,7 +189,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
189 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 189 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
190 | break; | 190 | break; |
191 | case KVM_CAP_IOMMU: | 191 | case KVM_CAP_IOMMU: |
192 | r = intel_iommu_found(); | 192 | r = iommu_found(); |
193 | break; | 193 | break; |
194 | default: | 194 | default: |
195 | r = 0; | 195 | r = 0; |
@@ -439,7 +439,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
439 | expires = div64_u64(itc_diff, cyc_per_usec); | 439 | expires = div64_u64(itc_diff, cyc_per_usec); |
440 | kt = ktime_set(0, 1000 * expires); | 440 | kt = ktime_set(0, 1000 * expires); |
441 | 441 | ||
442 | down_read(&vcpu->kvm->slots_lock); | ||
443 | vcpu->arch.ht_active = 1; | 442 | vcpu->arch.ht_active = 1; |
444 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | 443 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); |
445 | 444 | ||
@@ -452,7 +451,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
452 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | 451 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) |
453 | vcpu->arch.mp_state = | 452 | vcpu->arch.mp_state = |
454 | KVM_MP_STATE_RUNNABLE; | 453 | KVM_MP_STATE_RUNNABLE; |
455 | up_read(&vcpu->kvm->slots_lock); | ||
456 | 454 | ||
457 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | 455 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) |
458 | return -EINTR; | 456 | return -EINTR; |
@@ -476,6 +474,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu, | |||
476 | return 1; | 474 | return 1; |
477 | } | 475 | } |
478 | 476 | ||
477 | static int handle_vcpu_debug(struct kvm_vcpu *vcpu, | ||
478 | struct kvm_run *kvm_run) | ||
479 | { | ||
480 | printk("VMM: %s", vcpu->arch.log_buf); | ||
481 | return 1; | ||
482 | } | ||
483 | |||
479 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | 484 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, |
480 | struct kvm_run *kvm_run) = { | 485 | struct kvm_run *kvm_run) = { |
481 | [EXIT_REASON_VM_PANIC] = handle_vm_error, | 486 | [EXIT_REASON_VM_PANIC] = handle_vm_error, |
@@ -487,6 +492,7 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
487 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | 492 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
488 | [EXIT_REASON_IPI] = handle_ipi, | 493 | [EXIT_REASON_IPI] = handle_ipi, |
489 | [EXIT_REASON_PTC_G] = handle_global_purge, | 494 | [EXIT_REASON_PTC_G] = handle_global_purge, |
495 | [EXIT_REASON_DEBUG] = handle_vcpu_debug, | ||
490 | 496 | ||
491 | }; | 497 | }; |
492 | 498 | ||
@@ -698,27 +704,24 @@ out: | |||
698 | return r; | 704 | return r; |
699 | } | 705 | } |
700 | 706 | ||
701 | /* | ||
702 | * Allocate 16M memory for every vm to hold its specific data. | ||
703 | * Its memory map is defined in kvm_host.h. | ||
704 | */ | ||
705 | static struct kvm *kvm_alloc_kvm(void) | 707 | static struct kvm *kvm_alloc_kvm(void) |
706 | { | 708 | { |
707 | 709 | ||
708 | struct kvm *kvm; | 710 | struct kvm *kvm; |
709 | uint64_t vm_base; | 711 | uint64_t vm_base; |
710 | 712 | ||
713 | BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); | ||
714 | |||
711 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); | 715 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); |
712 | 716 | ||
713 | if (!vm_base) | 717 | if (!vm_base) |
714 | return ERR_PTR(-ENOMEM); | 718 | return ERR_PTR(-ENOMEM); |
715 | printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base); | ||
716 | 719 | ||
717 | /* Zero all pages before use! */ | ||
718 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | 720 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); |
719 | 721 | kvm = (struct kvm *)(vm_base + | |
720 | kvm = (struct kvm *)(vm_base + KVM_VM_OFS); | 722 | offsetof(struct kvm_vm_data, kvm_vm_struct)); |
721 | kvm->arch.vm_base = vm_base; | 723 | kvm->arch.vm_base = vm_base; |
724 | printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); | ||
722 | 725 | ||
723 | return kvm; | 726 | return kvm; |
724 | } | 727 | } |
@@ -760,21 +763,12 @@ static void kvm_build_io_pmt(struct kvm *kvm) | |||
760 | 763 | ||
761 | static void kvm_init_vm(struct kvm *kvm) | 764 | static void kvm_init_vm(struct kvm *kvm) |
762 | { | 765 | { |
763 | long vm_base; | ||
764 | |||
765 | BUG_ON(!kvm); | 766 | BUG_ON(!kvm); |
766 | 767 | ||
767 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; | 768 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; |
768 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; | 769 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; |
769 | kvm->arch.vmm_init_rr = VMM_INIT_RR; | 770 | kvm->arch.vmm_init_rr = VMM_INIT_RR; |
770 | 771 | ||
771 | vm_base = kvm->arch.vm_base; | ||
772 | if (vm_base) { | ||
773 | kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS; | ||
774 | kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS; | ||
775 | kvm->arch.vpd_base = vm_base + KVM_VPD_OFS; | ||
776 | } | ||
777 | |||
778 | /* | 772 | /* |
779 | *Fill P2M entries for MMIO/IO ranges | 773 | *Fill P2M entries for MMIO/IO ranges |
780 | */ | 774 | */ |
@@ -838,9 +832,8 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
838 | 832 | ||
839 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 833 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
840 | { | 834 | { |
841 | int i; | ||
842 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 835 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
843 | int r; | 836 | int i; |
844 | 837 | ||
845 | vcpu_load(vcpu); | 838 | vcpu_load(vcpu); |
846 | 839 | ||
@@ -857,18 +850,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
857 | 850 | ||
858 | vpd->vpr = regs->vpd.vpr; | 851 | vpd->vpr = regs->vpd.vpr; |
859 | 852 | ||
860 | r = -EFAULT; | 853 | memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context)); |
861 | r = copy_from_user(&vcpu->arch.guest, regs->saved_guest, | ||
862 | sizeof(union context)); | ||
863 | if (r) | ||
864 | goto out; | ||
865 | r = copy_from_user(vcpu + 1, regs->saved_stack + | ||
866 | sizeof(struct kvm_vcpu), | ||
867 | IA64_STK_OFFSET - sizeof(struct kvm_vcpu)); | ||
868 | if (r) | ||
869 | goto out; | ||
870 | vcpu->arch.exit_data = | ||
871 | ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data; | ||
872 | 854 | ||
873 | RESTORE_REGS(mp_state); | 855 | RESTORE_REGS(mp_state); |
874 | RESTORE_REGS(vmm_rr); | 856 | RESTORE_REGS(vmm_rr); |
@@ -902,9 +884,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
902 | set_bit(KVM_REQ_RESUME, &vcpu->requests); | 884 | set_bit(KVM_REQ_RESUME, &vcpu->requests); |
903 | 885 | ||
904 | vcpu_put(vcpu); | 886 | vcpu_put(vcpu); |
905 | r = 0; | 887 | |
906 | out: | 888 | return 0; |
907 | return r; | ||
908 | } | 889 | } |
909 | 890 | ||
910 | long kvm_arch_vm_ioctl(struct file *filp, | 891 | long kvm_arch_vm_ioctl(struct file *filp, |
@@ -1166,10 +1147,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1166 | /*Set entry address for first run.*/ | 1147 | /*Set entry address for first run.*/ |
1167 | regs->cr_iip = PALE_RESET_ENTRY; | 1148 | regs->cr_iip = PALE_RESET_ENTRY; |
1168 | 1149 | ||
1169 | /*Initilize itc offset for vcpus*/ | 1150 | /*Initialize itc offset for vcpus*/ |
1170 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); | 1151 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); |
1171 | for (i = 0; i < MAX_VCPU_NUM; i++) { | 1152 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
1172 | v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); | 1153 | v = (struct kvm_vcpu *)((char *)vcpu + |
1154 | sizeof(struct kvm_vcpu_data) * i); | ||
1173 | v->arch.itc_offset = itc_offset; | 1155 | v->arch.itc_offset = itc_offset; |
1174 | v->arch.last_itc = 0; | 1156 | v->arch.last_itc = 0; |
1175 | } | 1157 | } |
@@ -1183,7 +1165,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1183 | vcpu->arch.apic->vcpu = vcpu; | 1165 | vcpu->arch.apic->vcpu = vcpu; |
1184 | 1166 | ||
1185 | p_ctx->gr[1] = 0; | 1167 | p_ctx->gr[1] = 0; |
1186 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET); | 1168 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); |
1187 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; | 1169 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; |
1188 | p_ctx->psr = 0x1008522000UL; | 1170 | p_ctx->psr = 0x1008522000UL; |
1189 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ | 1171 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ |
@@ -1218,12 +1200,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1218 | vcpu->arch.hlt_timer.function = hlt_timer_fn; | 1200 | vcpu->arch.hlt_timer.function = hlt_timer_fn; |
1219 | 1201 | ||
1220 | vcpu->arch.last_run_cpu = -1; | 1202 | vcpu->arch.last_run_cpu = -1; |
1221 | vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id); | 1203 | vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); |
1222 | vcpu->arch.vsa_base = kvm_vsa_base; | 1204 | vcpu->arch.vsa_base = kvm_vsa_base; |
1223 | vcpu->arch.__gp = kvm_vmm_gp; | 1205 | vcpu->arch.__gp = kvm_vmm_gp; |
1224 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); | 1206 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); |
1225 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id); | 1207 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); |
1226 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id); | 1208 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); |
1227 | init_ptce_info(vcpu); | 1209 | init_ptce_info(vcpu); |
1228 | 1210 | ||
1229 | r = 0; | 1211 | r = 0; |
@@ -1273,12 +1255,22 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1273 | int r; | 1255 | int r; |
1274 | int cpu; | 1256 | int cpu; |
1275 | 1257 | ||
1258 | BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); | ||
1259 | |||
1260 | r = -EINVAL; | ||
1261 | if (id >= KVM_MAX_VCPUS) { | ||
1262 | printk(KERN_ERR"kvm: Can't configure vcpus > %ld", | ||
1263 | KVM_MAX_VCPUS); | ||
1264 | goto fail; | ||
1265 | } | ||
1266 | |||
1276 | r = -ENOMEM; | 1267 | r = -ENOMEM; |
1277 | if (!vm_base) { | 1268 | if (!vm_base) { |
1278 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); | 1269 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); |
1279 | goto fail; | 1270 | goto fail; |
1280 | } | 1271 | } |
1281 | vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id); | 1272 | vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, |
1273 | vcpu_data[id].vcpu_struct)); | ||
1282 | vcpu->kvm = kvm; | 1274 | vcpu->kvm = kvm; |
1283 | 1275 | ||
1284 | cpu = get_cpu(); | 1276 | cpu = get_cpu(); |
@@ -1374,9 +1366,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1374 | 1366 | ||
1375 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 1367 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
1376 | { | 1368 | { |
1377 | int i; | ||
1378 | int r; | ||
1379 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 1369 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
1370 | int i; | ||
1371 | |||
1380 | vcpu_load(vcpu); | 1372 | vcpu_load(vcpu); |
1381 | 1373 | ||
1382 | for (i = 0; i < 16; i++) { | 1374 | for (i = 0; i < 16; i++) { |
@@ -1391,14 +1383,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1391 | regs->vpd.vpsr = vpd->vpsr; | 1383 | regs->vpd.vpsr = vpd->vpsr; |
1392 | regs->vpd.vpr = vpd->vpr; | 1384 | regs->vpd.vpr = vpd->vpr; |
1393 | 1385 | ||
1394 | r = -EFAULT; | 1386 | memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context)); |
1395 | r = copy_to_user(regs->saved_guest, &vcpu->arch.guest, | 1387 | |
1396 | sizeof(union context)); | ||
1397 | if (r) | ||
1398 | goto out; | ||
1399 | r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET); | ||
1400 | if (r) | ||
1401 | goto out; | ||
1402 | SAVE_REGS(mp_state); | 1388 | SAVE_REGS(mp_state); |
1403 | SAVE_REGS(vmm_rr); | 1389 | SAVE_REGS(vmm_rr); |
1404 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); | 1390 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); |
@@ -1426,10 +1412,9 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1426 | SAVE_REGS(metaphysical_saved_rr4); | 1412 | SAVE_REGS(metaphysical_saved_rr4); |
1427 | SAVE_REGS(fp_psr); | 1413 | SAVE_REGS(fp_psr); |
1428 | SAVE_REGS(saved_gp); | 1414 | SAVE_REGS(saved_gp); |
1415 | |||
1429 | vcpu_put(vcpu); | 1416 | vcpu_put(vcpu); |
1430 | r = 0; | 1417 | return 0; |
1431 | out: | ||
1432 | return r; | ||
1433 | } | 1418 | } |
1434 | 1419 | ||
1435 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 1420 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
@@ -1457,6 +1442,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
1457 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | 1442 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; |
1458 | unsigned long base_gfn = memslot->base_gfn; | 1443 | unsigned long base_gfn = memslot->base_gfn; |
1459 | 1444 | ||
1445 | if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) | ||
1446 | return -ENOMEM; | ||
1447 | |||
1460 | for (i = 0; i < npages; i++) { | 1448 | for (i = 0; i < npages; i++) { |
1461 | pfn = gfn_to_pfn(kvm, base_gfn + i); | 1449 | pfn = gfn_to_pfn(kvm, base_gfn + i); |
1462 | if (!kvm_is_mmio_pfn(pfn)) { | 1450 | if (!kvm_is_mmio_pfn(pfn)) { |
@@ -1631,8 +1619,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
1631 | struct kvm_memory_slot *memslot; | 1619 | struct kvm_memory_slot *memslot; |
1632 | int r, i; | 1620 | int r, i; |
1633 | long n, base; | 1621 | long n, base; |
1634 | unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS | 1622 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
1635 | + KVM_MEM_DIRTY_LOG_OFS); | 1623 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); |
1636 | 1624 | ||
1637 | r = -EINVAL; | 1625 | r = -EINVAL; |
1638 | if (log->slot >= KVM_MEMORY_SLOTS) | 1626 | if (log->slot >= KVM_MEMORY_SLOTS) |
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c new file mode 100644 index 000000000000..a85cb611ecd7 --- /dev/null +++ b/arch/ia64/kvm/kvm_lib.c | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * kvm_lib.c: Compile some libraries for kvm-intel module. | ||
3 | * | ||
4 | * Just include kernel's library, and disable symbols export. | ||
5 | * Copyright (C) 2008, Intel Corporation. | ||
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | #undef CONFIG_MODULES | ||
14 | #include "../../../lib/vsprintf.c" | ||
15 | #include "../../../lib/ctype.c" | ||
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h index 2cc41d17cf99..b2bcaa2787aa 100644 --- a/arch/ia64/kvm/kvm_minstate.h +++ b/arch/ia64/kvm/kvm_minstate.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <asm/asmmacro.h> | 24 | #include <asm/asmmacro.h> |
25 | #include <asm/types.h> | 25 | #include <asm/types.h> |
26 | #include <asm/kregs.h> | 26 | #include <asm/kregs.h> |
27 | #include <asm/kvm_host.h> | ||
28 | |||
27 | #include "asm-offsets.h" | 29 | #include "asm-offsets.h" |
28 | 30 | ||
29 | #define KVM_MINSTATE_START_SAVE_MIN \ | 31 | #define KVM_MINSTATE_START_SAVE_MIN \ |
@@ -33,7 +35,7 @@ | |||
33 | addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ | 35 | addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ |
34 | ;; \ | 36 | ;; \ |
35 | lfetch.fault.excl.nt1 [r22]; \ | 37 | lfetch.fault.excl.nt1 [r22]; \ |
36 | addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | 38 | addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \ |
37 | mov r23 = ar.bspstore; /* save ar.bspstore */ \ | 39 | mov r23 = ar.bspstore; /* save ar.bspstore */ \ |
38 | ;; \ | 40 | ;; \ |
39 | mov ar.bspstore = r22; /* switch to kernel RBS */\ | 41 | mov ar.bspstore = r22; /* switch to kernel RBS */\ |
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h index e585c4607344..dd979e00b574 100644 --- a/arch/ia64/kvm/misc.h +++ b/arch/ia64/kvm/misc.h | |||
@@ -27,7 +27,8 @@ | |||
27 | */ | 27 | */ |
28 | static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) | 28 | static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) |
29 | { | 29 | { |
30 | return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS); | 30 | return (uint64_t *)(kvm->arch.vm_base + |
31 | offsetof(struct kvm_vm_data, kvm_p2m)); | ||
31 | } | 32 | } |
32 | 33 | ||
33 | static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, | 34 | static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, |
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index 7f1a858bc69f..21f63fffc379 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c | |||
@@ -66,31 +66,25 @@ void lsapic_write(struct kvm_vcpu *v, unsigned long addr, | |||
66 | 66 | ||
67 | switch (addr) { | 67 | switch (addr) { |
68 | case PIB_OFST_INTA: | 68 | case PIB_OFST_INTA: |
69 | /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/ | 69 | panic_vm(v, "Undefined write on PIB INTA\n"); |
70 | panic_vm(v); | ||
71 | break; | 70 | break; |
72 | case PIB_OFST_XTP: | 71 | case PIB_OFST_XTP: |
73 | if (length == 1) { | 72 | if (length == 1) { |
74 | vlsapic_write_xtp(v, val); | 73 | vlsapic_write_xtp(v, val); |
75 | } else { | 74 | } else { |
76 | /*panic_domain(NULL, | 75 | panic_vm(v, "Undefined write on PIB XTP\n"); |
77 | "Undefined write on PIB XTP\n");*/ | ||
78 | panic_vm(v); | ||
79 | } | 76 | } |
80 | break; | 77 | break; |
81 | default: | 78 | default: |
82 | if (PIB_LOW_HALF(addr)) { | 79 | if (PIB_LOW_HALF(addr)) { |
83 | /*lower half */ | 80 | /*Lower half */ |
84 | if (length != 8) | 81 | if (length != 8) |
85 | /*panic_domain(NULL, | 82 | panic_vm(v, "Can't LHF write with size %ld!\n", |
86 | "Can't LHF write with size %ld!\n", | 83 | length); |
87 | length);*/ | ||
88 | panic_vm(v); | ||
89 | else | 84 | else |
90 | vlsapic_write_ipi(v, addr, val); | 85 | vlsapic_write_ipi(v, addr, val); |
91 | } else { /* upper half | 86 | } else { /*Upper half */ |
92 | printk("IPI-UHF write %lx\n",addr);*/ | 87 | panic_vm(v, "IPI-UHF write %lx\n", addr); |
93 | panic_vm(v); | ||
94 | } | 88 | } |
95 | break; | 89 | break; |
96 | } | 90 | } |
@@ -108,22 +102,18 @@ unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr, | |||
108 | if (length == 1) /* 1 byte load */ | 102 | if (length == 1) /* 1 byte load */ |
109 | ; /* There is no i8259, there is no INTA access*/ | 103 | ; /* There is no i8259, there is no INTA access*/ |
110 | else | 104 | else |
111 | /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */ | 105 | panic_vm(v, "Undefined read on PIB INTA\n"); |
112 | panic_vm(v); | ||
113 | 106 | ||
114 | break; | 107 | break; |
115 | case PIB_OFST_XTP: | 108 | case PIB_OFST_XTP: |
116 | if (length == 1) { | 109 | if (length == 1) { |
117 | result = VLSAPIC_XTP(v); | 110 | result = VLSAPIC_XTP(v); |
118 | /* printk("read xtp %lx\n", result); */ | ||
119 | } else { | 111 | } else { |
120 | /*panic_domain(NULL, | 112 | panic_vm(v, "Undefined read on PIB XTP\n"); |
121 | "Undefined read on PIB XTP\n");*/ | ||
122 | panic_vm(v); | ||
123 | } | 113 | } |
124 | break; | 114 | break; |
125 | default: | 115 | default: |
126 | panic_vm(v); | 116 | panic_vm(v, "Undefined addr access for lsapic!\n"); |
127 | break; | 117 | break; |
128 | } | 118 | } |
129 | return result; | 119 | return result; |
@@ -162,7 +152,7 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, | |||
162 | /* it's necessary to ensure zero extending */ | 152 | /* it's necessary to ensure zero extending */ |
163 | *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); | 153 | *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); |
164 | } else | 154 | } else |
165 | panic_vm(vcpu); | 155 | panic_vm(vcpu, "Unhandled mmio access returned!\n"); |
166 | out: | 156 | out: |
167 | local_irq_restore(psr); | 157 | local_irq_restore(psr); |
168 | return ; | 158 | return ; |
@@ -324,7 +314,9 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) | |||
324 | return; | 314 | return; |
325 | } else { | 315 | } else { |
326 | inst_type = -1; | 316 | inst_type = -1; |
327 | panic_vm(vcpu); | 317 | panic_vm(vcpu, "Unsupported MMIO access instruction! \ |
318 | Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", | ||
319 | bundle.i64[0], bundle.i64[1]); | ||
328 | } | 320 | } |
329 | 321 | ||
330 | size = 1 << size; | 322 | size = 1 << size; |
@@ -335,7 +327,7 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) | |||
335 | if (inst_type == SL_INTEGER) | 327 | if (inst_type == SL_INTEGER) |
336 | vcpu_set_gr(vcpu, inst.M1.r1, data, 0); | 328 | vcpu_set_gr(vcpu, inst.M1.r1, data, 0); |
337 | else | 329 | else |
338 | panic_vm(vcpu); | 330 | panic_vm(vcpu, "Unsupported instruction type!\n"); |
339 | 331 | ||
340 | } | 332 | } |
341 | vcpu_increment_iip(vcpu); | 333 | vcpu_increment_iip(vcpu); |
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 800817307b7b..552d07724207 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c | |||
@@ -527,7 +527,8 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim, | |||
527 | vector = vec2off[vec]; | 527 | vector = vec2off[vec]; |
528 | 528 | ||
529 | if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { | 529 | if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { |
530 | panic_vm(vcpu); | 530 | panic_vm(vcpu, "Interruption with vector :0x%lx occurs " |
531 | "with psr.ic = 0\n", vector); | ||
531 | return; | 532 | return; |
532 | } | 533 | } |
533 | 534 | ||
@@ -586,7 +587,7 @@ static void set_pal_call_result(struct kvm_vcpu *vcpu) | |||
586 | vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); | 587 | vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); |
587 | vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); | 588 | vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); |
588 | } else | 589 | } else |
589 | panic_vm(vcpu); | 590 | panic_vm(vcpu, "Mis-set for exit reason!\n"); |
590 | } | 591 | } |
591 | 592 | ||
592 | static void set_sal_call_data(struct kvm_vcpu *vcpu) | 593 | static void set_sal_call_data(struct kvm_vcpu *vcpu) |
@@ -614,7 +615,7 @@ static void set_sal_call_result(struct kvm_vcpu *vcpu) | |||
614 | vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); | 615 | vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); |
615 | vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); | 616 | vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); |
616 | } else | 617 | } else |
617 | panic_vm(vcpu); | 618 | panic_vm(vcpu, "Mis-set for exit reason!\n"); |
618 | } | 619 | } |
619 | 620 | ||
620 | void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, | 621 | void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, |
@@ -680,7 +681,7 @@ static void generate_exirq(struct kvm_vcpu *vcpu) | |||
680 | vpsr = VCPU(vcpu, vpsr); | 681 | vpsr = VCPU(vcpu, vpsr); |
681 | isr = vpsr & IA64_PSR_RI; | 682 | isr = vpsr & IA64_PSR_RI; |
682 | if (!(vpsr & IA64_PSR_IC)) | 683 | if (!(vpsr & IA64_PSR_IC)) |
683 | panic_vm(vcpu); | 684 | panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n"); |
684 | reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ | 685 | reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ |
685 | } | 686 | } |
686 | 687 | ||
@@ -941,8 +942,20 @@ static void vcpu_do_resume(struct kvm_vcpu *vcpu) | |||
941 | ia64_set_pta(vcpu->arch.vhpt.pta.val); | 942 | ia64_set_pta(vcpu->arch.vhpt.pta.val); |
942 | } | 943 | } |
943 | 944 | ||
945 | static void vmm_sanity_check(struct kvm_vcpu *vcpu) | ||
946 | { | ||
947 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
948 | |||
949 | if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) { | ||
950 | panic_vm(vcpu, "Failed to do vmm sanity check," | ||
951 | "it maybe caused by crashed vmm!!\n\n"); | ||
952 | } | ||
953 | } | ||
954 | |||
944 | static void kvm_do_resume_op(struct kvm_vcpu *vcpu) | 955 | static void kvm_do_resume_op(struct kvm_vcpu *vcpu) |
945 | { | 956 | { |
957 | vmm_sanity_check(vcpu); /*Guarantee vcpu runing on healthy vmm!*/ | ||
958 | |||
946 | if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { | 959 | if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { |
947 | vcpu_do_resume(vcpu); | 960 | vcpu_do_resume(vcpu); |
948 | return; | 961 | return; |
@@ -968,3 +981,11 @@ void vmm_transition(struct kvm_vcpu *vcpu) | |||
968 | 1, 0, 0, 0, 0, 0); | 981 | 1, 0, 0, 0, 0, 0); |
969 | kvm_do_resume_op(vcpu); | 982 | kvm_do_resume_op(vcpu); |
970 | } | 983 | } |
984 | |||
985 | void vmm_panic_handler(u64 vec) | ||
986 | { | ||
987 | struct kvm_vcpu *vcpu = current_vcpu; | ||
988 | vmm_sanity = 0; | ||
989 | panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n", | ||
990 | vec2off[vec]); | ||
991 | } | ||
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index e44027ce5667..ecd526b55323 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c | |||
@@ -816,8 +816,9 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) | |||
816 | unsigned long vitv = VCPU(vcpu, itv); | 816 | unsigned long vitv = VCPU(vcpu, itv); |
817 | 817 | ||
818 | if (vcpu->vcpu_id == 0) { | 818 | if (vcpu->vcpu_id == 0) { |
819 | for (i = 0; i < MAX_VCPU_NUM; i++) { | 819 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
820 | v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); | 820 | v = (struct kvm_vcpu *)((char *)vcpu + |
821 | sizeof(struct kvm_vcpu_data) * i); | ||
821 | VMX(v, itc_offset) = itc_offset; | 822 | VMX(v, itc_offset) = itc_offset; |
822 | VMX(v, last_itc) = 0; | 823 | VMX(v, last_itc) = 0; |
823 | } | 824 | } |
@@ -1650,7 +1651,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) | |||
1650 | * Otherwise panic | 1651 | * Otherwise panic |
1651 | */ | 1652 | */ |
1652 | if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) | 1653 | if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) |
1653 | panic_vm(vcpu); | 1654 | panic_vm(vcpu, "Only support guests with vpsr.pk =0 \ |
1655 | & vpsr.is=0\n"); | ||
1654 | 1656 | ||
1655 | /* | 1657 | /* |
1656 | * For those IA64_PSR bits: id/da/dd/ss/ed/ia | 1658 | * For those IA64_PSR bits: id/da/dd/ss/ed/ia |
@@ -2103,7 +2105,7 @@ void kvm_init_all_rr(struct kvm_vcpu *vcpu) | |||
2103 | 2105 | ||
2104 | if (is_physical_mode(vcpu)) { | 2106 | if (is_physical_mode(vcpu)) { |
2105 | if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) | 2107 | if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) |
2106 | panic_vm(vcpu); | 2108 | panic_vm(vcpu, "Machine Status conflicts!\n"); |
2107 | 2109 | ||
2108 | ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); | 2110 | ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); |
2109 | ia64_dv_serialize_data(); | 2111 | ia64_dv_serialize_data(); |
@@ -2152,10 +2154,70 @@ int vmm_entry(void) | |||
2152 | return 0; | 2154 | return 0; |
2153 | } | 2155 | } |
2154 | 2156 | ||
2155 | void panic_vm(struct kvm_vcpu *v) | 2157 | static void kvm_show_registers(struct kvm_pt_regs *regs) |
2156 | { | 2158 | { |
2159 | unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; | ||
2160 | |||
2161 | struct kvm_vcpu *vcpu = current_vcpu; | ||
2162 | if (vcpu != NULL) | ||
2163 | printk("vcpu 0x%p vcpu %d\n", | ||
2164 | vcpu, vcpu->vcpu_id); | ||
2165 | |||
2166 | printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n", | ||
2167 | regs->cr_ipsr, regs->cr_ifs, ip); | ||
2168 | |||
2169 | printk("unat: %016lx pfs : %016lx rsc : %016lx\n", | ||
2170 | regs->ar_unat, regs->ar_pfs, regs->ar_rsc); | ||
2171 | printk("rnat: %016lx bspstore: %016lx pr : %016lx\n", | ||
2172 | regs->ar_rnat, regs->ar_bspstore, regs->pr); | ||
2173 | printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", | ||
2174 | regs->loadrs, regs->ar_ccv, regs->ar_fpsr); | ||
2175 | printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); | ||
2176 | printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, | ||
2177 | regs->b6, regs->b7); | ||
2178 | printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", | ||
2179 | regs->f6.u.bits[1], regs->f6.u.bits[0], | ||
2180 | regs->f7.u.bits[1], regs->f7.u.bits[0]); | ||
2181 | printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", | ||
2182 | regs->f8.u.bits[1], regs->f8.u.bits[0], | ||
2183 | regs->f9.u.bits[1], regs->f9.u.bits[0]); | ||
2184 | printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", | ||
2185 | regs->f10.u.bits[1], regs->f10.u.bits[0], | ||
2186 | regs->f11.u.bits[1], regs->f11.u.bits[0]); | ||
2187 | |||
2188 | printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, | ||
2189 | regs->r2, regs->r3); | ||
2190 | printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, | ||
2191 | regs->r9, regs->r10); | ||
2192 | printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, | ||
2193 | regs->r12, regs->r13); | ||
2194 | printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, | ||
2195 | regs->r15, regs->r16); | ||
2196 | printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, | ||
2197 | regs->r18, regs->r19); | ||
2198 | printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, | ||
2199 | regs->r21, regs->r22); | ||
2200 | printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, | ||
2201 | regs->r24, regs->r25); | ||
2202 | printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, | ||
2203 | regs->r27, regs->r28); | ||
2204 | printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, | ||
2205 | regs->r30, regs->r31); | ||
2206 | |||
2207 | } | ||
2208 | |||
2209 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...) | ||
2210 | { | ||
2211 | va_list args; | ||
2212 | char buf[256]; | ||
2213 | |||
2214 | struct kvm_pt_regs *regs = vcpu_regs(v); | ||
2157 | struct exit_ctl_data *p = &v->arch.exit_data; | 2215 | struct exit_ctl_data *p = &v->arch.exit_data; |
2158 | 2216 | va_start(args, fmt); | |
2217 | vsnprintf(buf, sizeof(buf), fmt, args); | ||
2218 | va_end(args); | ||
2219 | printk(buf); | ||
2220 | kvm_show_registers(regs); | ||
2159 | p->exit_reason = EXIT_REASON_VM_PANIC; | 2221 | p->exit_reason = EXIT_REASON_VM_PANIC; |
2160 | vmm_transition(v); | 2222 | vmm_transition(v); |
2161 | /*Never to return*/ | 2223 | /*Never to return*/ |
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index e9b2a4e121c0..b2f12a562bdf 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
@@ -737,9 +737,12 @@ void kvm_init_vtlb(struct kvm_vcpu *v); | |||
737 | void kvm_init_vhpt(struct kvm_vcpu *v); | 737 | void kvm_init_vhpt(struct kvm_vcpu *v); |
738 | void thash_init(struct thash_cb *hcb, u64 sz); | 738 | void thash_init(struct thash_cb *hcb, u64 sz); |
739 | 739 | ||
740 | void panic_vm(struct kvm_vcpu *v); | 740 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); |
741 | 741 | ||
742 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, | 742 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, |
743 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); | 743 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); |
744 | |||
745 | extern long vmm_sanity; | ||
746 | |||
744 | #endif | 747 | #endif |
745 | #endif /* __VCPU_H__ */ | 748 | #endif /* __VCPU_H__ */ |
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c index 2275bf4e681a..9eee5c04bacc 100644 --- a/arch/ia64/kvm/vmm.c +++ b/arch/ia64/kvm/vmm.c | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | 22 | ||
23 | #include<linux/kernel.h> | ||
23 | #include<linux/module.h> | 24 | #include<linux/module.h> |
24 | #include<asm/fpswa.h> | 25 | #include<asm/fpswa.h> |
25 | 26 | ||
@@ -31,6 +32,8 @@ MODULE_LICENSE("GPL"); | |||
31 | extern char kvm_ia64_ivt; | 32 | extern char kvm_ia64_ivt; |
32 | extern fpswa_interface_t *vmm_fpswa_interface; | 33 | extern fpswa_interface_t *vmm_fpswa_interface; |
33 | 34 | ||
35 | long vmm_sanity = 1; | ||
36 | |||
34 | struct kvm_vmm_info vmm_info = { | 37 | struct kvm_vmm_info vmm_info = { |
35 | .module = THIS_MODULE, | 38 | .module = THIS_MODULE, |
36 | .vmm_entry = vmm_entry, | 39 | .vmm_entry = vmm_entry, |
@@ -62,5 +65,31 @@ void vmm_spin_unlock(spinlock_t *lock) | |||
62 | { | 65 | { |
63 | _vmm_raw_spin_unlock(lock); | 66 | _vmm_raw_spin_unlock(lock); |
64 | } | 67 | } |
68 | |||
69 | static void vcpu_debug_exit(struct kvm_vcpu *vcpu) | ||
70 | { | ||
71 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
72 | long psr; | ||
73 | |||
74 | local_irq_save(psr); | ||
75 | p->exit_reason = EXIT_REASON_DEBUG; | ||
76 | vmm_transition(vcpu); | ||
77 | local_irq_restore(psr); | ||
78 | } | ||
79 | |||
80 | asmlinkage int printk(const char *fmt, ...) | ||
81 | { | ||
82 | struct kvm_vcpu *vcpu = current_vcpu; | ||
83 | va_list args; | ||
84 | int r; | ||
85 | |||
86 | memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN); | ||
87 | va_start(args, fmt); | ||
88 | r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args); | ||
89 | va_end(args); | ||
90 | vcpu_debug_exit(vcpu); | ||
91 | return r; | ||
92 | } | ||
93 | |||
65 | module_init(kvm_vmm_init) | 94 | module_init(kvm_vmm_init) |
66 | module_exit(kvm_vmm_exit) | 95 | module_exit(kvm_vmm_exit) |
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S index c1d7251a1480..3ef1a017a318 100644 --- a/arch/ia64/kvm/vmm_ivt.S +++ b/arch/ia64/kvm/vmm_ivt.S | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * /ia64/kvm_ivt.S | 2 | * arch/ia64/kvm/vmm_ivt.S |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co |
5 | * Stephane Eranian <eranian@hpl.hp.com> | 5 | * Stephane Eranian <eranian@hpl.hp.com> |
@@ -70,32 +70,39 @@ | |||
70 | # define PSR_DEFAULT_BITS 0 | 70 | # define PSR_DEFAULT_BITS 0 |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | |||
74 | #define KVM_FAULT(n) \ | 73 | #define KVM_FAULT(n) \ |
75 | kvm_fault_##n:; \ | 74 | kvm_fault_##n:; \ |
76 | mov r19=n;; \ | 75 | mov r19=n;; \ |
77 | br.sptk.many kvm_fault_##n; \ | 76 | br.sptk.many kvm_vmm_panic; \ |
78 | ;; \ | 77 | ;; \ |
79 | |||
80 | 78 | ||
81 | #define KVM_REFLECT(n) \ | 79 | #define KVM_REFLECT(n) \ |
82 | mov r31=pr; \ | 80 | mov r31=pr; \ |
83 | mov r19=n; /* prepare to save predicates */ \ | 81 | mov r19=n; /* prepare to save predicates */ \ |
84 | mov r29=cr.ipsr; \ | 82 | mov r29=cr.ipsr; \ |
85 | ;; \ | 83 | ;; \ |
86 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ | 84 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ |
87 | (p7)br.sptk.many kvm_dispatch_reflection; \ | 85 | (p7) br.sptk.many kvm_dispatch_reflection; \ |
88 | br.sptk.many kvm_panic; \ | 86 | br.sptk.many kvm_vmm_panic; \ |
89 | 87 | ||
90 | 88 | GLOBAL_ENTRY(kvm_vmm_panic) | |
91 | GLOBAL_ENTRY(kvm_panic) | 89 | KVM_SAVE_MIN_WITH_COVER_R19 |
92 | br.sptk.many kvm_panic | 90 | alloc r14=ar.pfs,0,0,1,0 |
93 | ;; | 91 | mov out0=r15 |
94 | END(kvm_panic) | 92 | adds r3=8,r2 // set up second base pointer |
95 | 93 | ;; | |
96 | 94 | ssm psr.ic | |
97 | 95 | ;; | |
98 | 96 | srlz.i // guarantee that interruption collection is on | |
97 | ;; | ||
98 | //(p15) ssm psr.i // restore psr.i | ||
99 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
100 | ;; | ||
101 | KVM_SAVE_REST | ||
102 | mov rp=r14 | ||
103 | ;; | ||
104 | br.call.sptk.many b6=vmm_panic_handler; | ||
105 | END(kvm_vmm_panic) | ||
99 | 106 | ||
100 | .section .text.ivt,"ax" | 107 | .section .text.ivt,"ax" |
101 | 108 | ||
@@ -105,308 +112,307 @@ kvm_ia64_ivt: | |||
105 | /////////////////////////////////////////////////////////////// | 112 | /////////////////////////////////////////////////////////////// |
106 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) | 113 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) |
107 | ENTRY(kvm_vhpt_miss) | 114 | ENTRY(kvm_vhpt_miss) |
108 | KVM_FAULT(0) | 115 | KVM_FAULT(0) |
109 | END(kvm_vhpt_miss) | 116 | END(kvm_vhpt_miss) |
110 | 117 | ||
111 | |||
112 | .org kvm_ia64_ivt+0x400 | 118 | .org kvm_ia64_ivt+0x400 |
113 | //////////////////////////////////////////////////////////////// | 119 | //////////////////////////////////////////////////////////////// |
114 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) | 120 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) |
115 | ENTRY(kvm_itlb_miss) | 121 | ENTRY(kvm_itlb_miss) |
116 | mov r31 = pr | 122 | mov r31 = pr |
117 | mov r29=cr.ipsr; | 123 | mov r29=cr.ipsr; |
118 | ;; | 124 | ;; |
119 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; | 125 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; |
120 | (p6) br.sptk kvm_alt_itlb_miss | 126 | (p6) br.sptk kvm_alt_itlb_miss |
121 | mov r19 = 1 | 127 | mov r19 = 1 |
122 | br.sptk kvm_itlb_miss_dispatch | 128 | br.sptk kvm_itlb_miss_dispatch |
123 | KVM_FAULT(1); | 129 | KVM_FAULT(1); |
124 | END(kvm_itlb_miss) | 130 | END(kvm_itlb_miss) |
125 | 131 | ||
126 | .org kvm_ia64_ivt+0x0800 | 132 | .org kvm_ia64_ivt+0x0800 |
127 | ////////////////////////////////////////////////////////////////// | 133 | ////////////////////////////////////////////////////////////////// |
128 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) | 134 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) |
129 | ENTRY(kvm_dtlb_miss) | 135 | ENTRY(kvm_dtlb_miss) |
130 | mov r31 = pr | 136 | mov r31 = pr |
131 | mov r29=cr.ipsr; | 137 | mov r29=cr.ipsr; |
132 | ;; | 138 | ;; |
133 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; | 139 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; |
134 | (p6)br.sptk kvm_alt_dtlb_miss | 140 | (p6) br.sptk kvm_alt_dtlb_miss |
135 | br.sptk kvm_dtlb_miss_dispatch | 141 | br.sptk kvm_dtlb_miss_dispatch |
136 | END(kvm_dtlb_miss) | 142 | END(kvm_dtlb_miss) |
137 | 143 | ||
138 | .org kvm_ia64_ivt+0x0c00 | 144 | .org kvm_ia64_ivt+0x0c00 |
139 | //////////////////////////////////////////////////////////////////// | 145 | //////////////////////////////////////////////////////////////////// |
140 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) | 146 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) |
141 | ENTRY(kvm_alt_itlb_miss) | 147 | ENTRY(kvm_alt_itlb_miss) |
142 | mov r16=cr.ifa // get address that caused the TLB miss | 148 | mov r16=cr.ifa // get address that caused the TLB miss |
143 | ;; | 149 | ;; |
144 | movl r17=PAGE_KERNEL | 150 | movl r17=PAGE_KERNEL |
145 | mov r24=cr.ipsr | 151 | mov r24=cr.ipsr |
146 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 152 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
147 | ;; | 153 | ;; |
148 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | 154 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits |
149 | ;; | 155 | ;; |
150 | or r19=r17,r19 // insert PTE control bits into r19 | 156 | or r19=r17,r19 // insert PTE control bits into r19 |
151 | ;; | 157 | ;; |
152 | movl r20=IA64_GRANULE_SHIFT<<2 | 158 | movl r20=IA64_GRANULE_SHIFT<<2 |
153 | ;; | 159 | ;; |
154 | mov cr.itir=r20 | 160 | mov cr.itir=r20 |
155 | ;; | 161 | ;; |
156 | itc.i r19 // insert the TLB entry | 162 | itc.i r19 // insert the TLB entry |
157 | mov pr=r31,-1 | 163 | mov pr=r31,-1 |
158 | rfi | 164 | rfi |
159 | END(kvm_alt_itlb_miss) | 165 | END(kvm_alt_itlb_miss) |
160 | 166 | ||
161 | .org kvm_ia64_ivt+0x1000 | 167 | .org kvm_ia64_ivt+0x1000 |
162 | ///////////////////////////////////////////////////////////////////// | 168 | ///////////////////////////////////////////////////////////////////// |
163 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) | 169 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) |
164 | ENTRY(kvm_alt_dtlb_miss) | 170 | ENTRY(kvm_alt_dtlb_miss) |
165 | mov r16=cr.ifa // get address that caused the TLB miss | 171 | mov r16=cr.ifa // get address that caused the TLB miss |
166 | ;; | 172 | ;; |
167 | movl r17=PAGE_KERNEL | 173 | movl r17=PAGE_KERNEL |
168 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 174 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
169 | mov r24=cr.ipsr | 175 | mov r24=cr.ipsr |
170 | ;; | 176 | ;; |
171 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | 177 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits |
172 | ;; | 178 | ;; |
173 | or r19=r19,r17 // insert PTE control bits into r19 | 179 | or r19=r19,r17 // insert PTE control bits into r19 |
174 | ;; | 180 | ;; |
175 | movl r20=IA64_GRANULE_SHIFT<<2 | 181 | movl r20=IA64_GRANULE_SHIFT<<2 |
176 | ;; | 182 | ;; |
177 | mov cr.itir=r20 | 183 | mov cr.itir=r20 |
178 | ;; | 184 | ;; |
179 | itc.d r19 // insert the TLB entry | 185 | itc.d r19 // insert the TLB entry |
180 | mov pr=r31,-1 | 186 | mov pr=r31,-1 |
181 | rfi | 187 | rfi |
182 | END(kvm_alt_dtlb_miss) | 188 | END(kvm_alt_dtlb_miss) |
183 | 189 | ||
184 | .org kvm_ia64_ivt+0x1400 | 190 | .org kvm_ia64_ivt+0x1400 |
185 | ////////////////////////////////////////////////////////////////////// | 191 | ////////////////////////////////////////////////////////////////////// |
186 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) | 192 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) |
187 | ENTRY(kvm_nested_dtlb_miss) | 193 | ENTRY(kvm_nested_dtlb_miss) |
188 | KVM_FAULT(5) | 194 | KVM_FAULT(5) |
189 | END(kvm_nested_dtlb_miss) | 195 | END(kvm_nested_dtlb_miss) |
190 | 196 | ||
191 | .org kvm_ia64_ivt+0x1800 | 197 | .org kvm_ia64_ivt+0x1800 |
192 | ///////////////////////////////////////////////////////////////////// | 198 | ///////////////////////////////////////////////////////////////////// |
193 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) | 199 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) |
194 | ENTRY(kvm_ikey_miss) | 200 | ENTRY(kvm_ikey_miss) |
195 | KVM_REFLECT(6) | 201 | KVM_REFLECT(6) |
196 | END(kvm_ikey_miss) | 202 | END(kvm_ikey_miss) |
197 | 203 | ||
198 | .org kvm_ia64_ivt+0x1c00 | 204 | .org kvm_ia64_ivt+0x1c00 |
199 | ///////////////////////////////////////////////////////////////////// | 205 | ///////////////////////////////////////////////////////////////////// |
200 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) | 206 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) |
201 | ENTRY(kvm_dkey_miss) | 207 | ENTRY(kvm_dkey_miss) |
202 | KVM_REFLECT(7) | 208 | KVM_REFLECT(7) |
203 | END(kvm_dkey_miss) | 209 | END(kvm_dkey_miss) |
204 | 210 | ||
205 | .org kvm_ia64_ivt+0x2000 | 211 | .org kvm_ia64_ivt+0x2000 |
206 | //////////////////////////////////////////////////////////////////// | 212 | //////////////////////////////////////////////////////////////////// |
207 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) | 213 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) |
208 | ENTRY(kvm_dirty_bit) | 214 | ENTRY(kvm_dirty_bit) |
209 | KVM_REFLECT(8) | 215 | KVM_REFLECT(8) |
210 | END(kvm_dirty_bit) | 216 | END(kvm_dirty_bit) |
211 | 217 | ||
212 | .org kvm_ia64_ivt+0x2400 | 218 | .org kvm_ia64_ivt+0x2400 |
213 | //////////////////////////////////////////////////////////////////// | 219 | //////////////////////////////////////////////////////////////////// |
214 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) | 220 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) |
215 | ENTRY(kvm_iaccess_bit) | 221 | ENTRY(kvm_iaccess_bit) |
216 | KVM_REFLECT(9) | 222 | KVM_REFLECT(9) |
217 | END(kvm_iaccess_bit) | 223 | END(kvm_iaccess_bit) |
218 | 224 | ||
219 | .org kvm_ia64_ivt+0x2800 | 225 | .org kvm_ia64_ivt+0x2800 |
220 | /////////////////////////////////////////////////////////////////// | 226 | /////////////////////////////////////////////////////////////////// |
221 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) | 227 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) |
222 | ENTRY(kvm_daccess_bit) | 228 | ENTRY(kvm_daccess_bit) |
223 | KVM_REFLECT(10) | 229 | KVM_REFLECT(10) |
224 | END(kvm_daccess_bit) | 230 | END(kvm_daccess_bit) |
225 | 231 | ||
226 | .org kvm_ia64_ivt+0x2c00 | 232 | .org kvm_ia64_ivt+0x2c00 |
227 | ///////////////////////////////////////////////////////////////// | 233 | ///////////////////////////////////////////////////////////////// |
228 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) | 234 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) |
229 | ENTRY(kvm_break_fault) | 235 | ENTRY(kvm_break_fault) |
230 | mov r31=pr | 236 | mov r31=pr |
231 | mov r19=11 | 237 | mov r19=11 |
232 | mov r29=cr.ipsr | 238 | mov r29=cr.ipsr |
233 | ;; | 239 | ;; |
234 | KVM_SAVE_MIN_WITH_COVER_R19 | 240 | KVM_SAVE_MIN_WITH_COVER_R19 |
235 | ;; | 241 | ;; |
236 | alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) | 242 | alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!) |
237 | mov out0=cr.ifa | 243 | mov out0=cr.ifa |
238 | mov out2=cr.isr // FIXME: pity to make this slow access twice | 244 | mov out2=cr.isr // FIXME: pity to make this slow access twice |
239 | mov out3=cr.iim // FIXME: pity to make this slow access twice | 245 | mov out3=cr.iim // FIXME: pity to make this slow access twice |
240 | adds r3=8,r2 // set up second base pointer | 246 | adds r3=8,r2 // set up second base pointer |
241 | ;; | 247 | ;; |
242 | ssm psr.ic | 248 | ssm psr.ic |
243 | ;; | 249 | ;; |
244 | srlz.i // guarantee that interruption collection is on | 250 | srlz.i // guarantee that interruption collection is on |
245 | ;; | 251 | ;; |
246 | //(p15)ssm psr.i // restore psr.i | 252 | //(p15)ssm psr.i // restore psr.i |
247 | addl r14=@gprel(ia64_leave_hypervisor),gp | 253 | addl r14=@gprel(ia64_leave_hypervisor),gp |
248 | ;; | 254 | ;; |
249 | KVM_SAVE_REST | 255 | KVM_SAVE_REST |
250 | mov rp=r14 | 256 | mov rp=r14 |
251 | ;; | 257 | ;; |
252 | adds out1=16,sp | 258 | adds out1=16,sp |
253 | br.call.sptk.many b6=kvm_ia64_handle_break | 259 | br.call.sptk.many b6=kvm_ia64_handle_break |
254 | ;; | 260 | ;; |
255 | END(kvm_break_fault) | 261 | END(kvm_break_fault) |
256 | 262 | ||
257 | .org kvm_ia64_ivt+0x3000 | 263 | .org kvm_ia64_ivt+0x3000 |
258 | ///////////////////////////////////////////////////////////////// | 264 | ///////////////////////////////////////////////////////////////// |
259 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) | 265 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) |
260 | ENTRY(kvm_interrupt) | 266 | ENTRY(kvm_interrupt) |
261 | mov r31=pr // prepare to save predicates | 267 | mov r31=pr // prepare to save predicates |
262 | mov r19=12 | 268 | mov r19=12 |
263 | mov r29=cr.ipsr | 269 | mov r29=cr.ipsr |
264 | ;; | 270 | ;; |
265 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT | 271 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT |
266 | tbit.z p0,p15=r29,IA64_PSR_I_BIT | 272 | tbit.z p0,p15=r29,IA64_PSR_I_BIT |
267 | ;; | 273 | ;; |
268 | (p7) br.sptk kvm_dispatch_interrupt | 274 | (p7) br.sptk kvm_dispatch_interrupt |
269 | ;; | 275 | ;; |
270 | mov r27=ar.rsc /* M */ | 276 | mov r27=ar.rsc /* M */ |
271 | mov r20=r1 /* A */ | 277 | mov r20=r1 /* A */ |
272 | mov r25=ar.unat /* M */ | 278 | mov r25=ar.unat /* M */ |
273 | mov r26=ar.pfs /* I */ | 279 | mov r26=ar.pfs /* I */ |
274 | mov r28=cr.iip /* M */ | 280 | mov r28=cr.iip /* M */ |
275 | cover /* B (or nothing) */ | 281 | cover /* B (or nothing) */ |
276 | ;; | 282 | ;; |
277 | mov r1=sp | 283 | mov r1=sp |
278 | ;; | 284 | ;; |
279 | invala /* M */ | 285 | invala /* M */ |
280 | mov r30=cr.ifs | 286 | mov r30=cr.ifs |
281 | ;; | 287 | ;; |
282 | addl r1=-VMM_PT_REGS_SIZE,r1 | 288 | addl r1=-VMM_PT_REGS_SIZE,r1 |
283 | ;; | 289 | ;; |
284 | adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ | 290 | adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ |
285 | adds r16=PT(CR_IPSR),r1 | 291 | adds r16=PT(CR_IPSR),r1 |
286 | ;; | 292 | ;; |
287 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES | 293 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES |
288 | st8 [r16]=r29 /* save cr.ipsr */ | 294 | st8 [r16]=r29 /* save cr.ipsr */ |
289 | ;; | 295 | ;; |
290 | lfetch.fault.excl.nt1 [r17] | 296 | lfetch.fault.excl.nt1 [r17] |
291 | mov r29=b0 | 297 | mov r29=b0 |
292 | ;; | 298 | ;; |
293 | adds r16=PT(R8),r1 /* initialize first base pointer */ | 299 | adds r16=PT(R8),r1 /* initialize first base pointer */ |
294 | adds r17=PT(R9),r1 /* initialize second base pointer */ | 300 | adds r17=PT(R9),r1 /* initialize second base pointer */ |
295 | mov r18=r0 /* make sure r18 isn't NaT */ | 301 | mov r18=r0 /* make sure r18 isn't NaT */ |
296 | ;; | 302 | ;; |
297 | .mem.offset 0,0; st8.spill [r16]=r8,16 | 303 | .mem.offset 0,0; st8.spill [r16]=r8,16 |
298 | .mem.offset 8,0; st8.spill [r17]=r9,16 | 304 | .mem.offset 8,0; st8.spill [r17]=r9,16 |
299 | ;; | 305 | ;; |
300 | .mem.offset 0,0; st8.spill [r16]=r10,24 | 306 | .mem.offset 0,0; st8.spill [r16]=r10,24 |
301 | .mem.offset 8,0; st8.spill [r17]=r11,24 | 307 | .mem.offset 8,0; st8.spill [r17]=r11,24 |
302 | ;; | 308 | ;; |
303 | st8 [r16]=r28,16 /* save cr.iip */ | 309 | st8 [r16]=r28,16 /* save cr.iip */ |
304 | st8 [r17]=r30,16 /* save cr.ifs */ | 310 | st8 [r17]=r30,16 /* save cr.ifs */ |
305 | mov r8=ar.fpsr /* M */ | 311 | mov r8=ar.fpsr /* M */ |
306 | mov r9=ar.csd | 312 | mov r9=ar.csd |
307 | mov r10=ar.ssd | 313 | mov r10=ar.ssd |
308 | movl r11=FPSR_DEFAULT /* L-unit */ | 314 | movl r11=FPSR_DEFAULT /* L-unit */ |
309 | ;; | 315 | ;; |
310 | st8 [r16]=r25,16 /* save ar.unat */ | 316 | st8 [r16]=r25,16 /* save ar.unat */ |
311 | st8 [r17]=r26,16 /* save ar.pfs */ | 317 | st8 [r17]=r26,16 /* save ar.pfs */ |
312 | shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ | 318 | shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ |
313 | ;; | 319 | ;; |
314 | st8 [r16]=r27,16 /* save ar.rsc */ | 320 | st8 [r16]=r27,16 /* save ar.rsc */ |
315 | adds r17=16,r17 /* skip over ar_rnat field */ | 321 | adds r17=16,r17 /* skip over ar_rnat field */ |
316 | ;; | 322 | ;; |
317 | st8 [r17]=r31,16 /* save predicates */ | 323 | st8 [r17]=r31,16 /* save predicates */ |
318 | adds r16=16,r16 /* skip over ar_bspstore field */ | 324 | adds r16=16,r16 /* skip over ar_bspstore field */ |
319 | ;; | 325 | ;; |
320 | st8 [r16]=r29,16 /* save b0 */ | 326 | st8 [r16]=r29,16 /* save b0 */ |
321 | st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ | 327 | st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ |
322 | ;; | 328 | ;; |
323 | .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ | 329 | .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ |
324 | .mem.offset 8,0; st8.spill [r17]=r12,16 | 330 | .mem.offset 8,0; st8.spill [r17]=r12,16 |
325 | adds r12=-16,r1 | 331 | adds r12=-16,r1 |
326 | /* switch to kernel memory stack (with 16 bytes of scratch) */ | 332 | /* switch to kernel memory stack (with 16 bytes of scratch) */ |
327 | ;; | 333 | ;; |
328 | .mem.offset 0,0; st8.spill [r16]=r13,16 | 334 | .mem.offset 0,0; st8.spill [r16]=r13,16 |
329 | .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ | 335 | .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ |
330 | ;; | 336 | ;; |
331 | .mem.offset 0,0; st8.spill [r16]=r15,16 | 337 | .mem.offset 0,0; st8.spill [r16]=r15,16 |
332 | .mem.offset 8,0; st8.spill [r17]=r14,16 | 338 | .mem.offset 8,0; st8.spill [r17]=r14,16 |
333 | dep r14=-1,r0,60,4 | 339 | dep r14=-1,r0,60,4 |
334 | ;; | 340 | ;; |
335 | .mem.offset 0,0; st8.spill [r16]=r2,16 | 341 | .mem.offset 0,0; st8.spill [r16]=r2,16 |
336 | .mem.offset 8,0; st8.spill [r17]=r3,16 | 342 | .mem.offset 8,0; st8.spill [r17]=r3,16 |
337 | adds r2=VMM_PT_REGS_R16_OFFSET,r1 | 343 | adds r2=VMM_PT_REGS_R16_OFFSET,r1 |
338 | adds r14 = VMM_VCPU_GP_OFFSET,r13 | 344 | adds r14 = VMM_VCPU_GP_OFFSET,r13 |
339 | ;; | 345 | ;; |
340 | mov r8=ar.ccv | 346 | mov r8=ar.ccv |
341 | ld8 r14 = [r14] | 347 | ld8 r14 = [r14] |
342 | ;; | 348 | ;; |
343 | mov r1=r14 /* establish kernel global pointer */ | 349 | mov r1=r14 /* establish kernel global pointer */ |
344 | ;; \ | 350 | ;; \ |
345 | bsw.1 | 351 | bsw.1 |
346 | ;; | 352 | ;; |
347 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | 353 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group |
348 | mov out0=r13 | 354 | mov out0=r13 |
349 | ;; | 355 | ;; |
350 | ssm psr.ic | 356 | ssm psr.ic |
351 | ;; | 357 | ;; |
352 | srlz.i | 358 | srlz.i |
353 | ;; | 359 | ;; |
354 | //(p15) ssm psr.i | 360 | //(p15) ssm psr.i |
355 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 361 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
356 | srlz.i // ensure everybody knows psr.ic is back on | 362 | srlz.i // ensure everybody knows psr.ic is back on |
357 | ;; | 363 | ;; |
358 | .mem.offset 0,0; st8.spill [r2]=r16,16 | 364 | .mem.offset 0,0; st8.spill [r2]=r16,16 |
359 | .mem.offset 8,0; st8.spill [r3]=r17,16 | 365 | .mem.offset 8,0; st8.spill [r3]=r17,16 |
360 | ;; | 366 | ;; |
361 | .mem.offset 0,0; st8.spill [r2]=r18,16 | 367 | .mem.offset 0,0; st8.spill [r2]=r18,16 |
362 | .mem.offset 8,0; st8.spill [r3]=r19,16 | 368 | .mem.offset 8,0; st8.spill [r3]=r19,16 |
363 | ;; | 369 | ;; |
364 | .mem.offset 0,0; st8.spill [r2]=r20,16 | 370 | .mem.offset 0,0; st8.spill [r2]=r20,16 |
365 | .mem.offset 8,0; st8.spill [r3]=r21,16 | 371 | .mem.offset 8,0; st8.spill [r3]=r21,16 |
366 | mov r18=b6 | 372 | mov r18=b6 |
367 | ;; | 373 | ;; |
368 | .mem.offset 0,0; st8.spill [r2]=r22,16 | 374 | .mem.offset 0,0; st8.spill [r2]=r22,16 |
369 | .mem.offset 8,0; st8.spill [r3]=r23,16 | 375 | .mem.offset 8,0; st8.spill [r3]=r23,16 |
370 | mov r19=b7 | 376 | mov r19=b7 |
371 | ;; | 377 | ;; |
372 | .mem.offset 0,0; st8.spill [r2]=r24,16 | 378 | .mem.offset 0,0; st8.spill [r2]=r24,16 |
373 | .mem.offset 8,0; st8.spill [r3]=r25,16 | 379 | .mem.offset 8,0; st8.spill [r3]=r25,16 |
374 | ;; | 380 | ;; |
375 | .mem.offset 0,0; st8.spill [r2]=r26,16 | 381 | .mem.offset 0,0; st8.spill [r2]=r26,16 |
376 | .mem.offset 8,0; st8.spill [r3]=r27,16 | 382 | .mem.offset 8,0; st8.spill [r3]=r27,16 |
377 | ;; | 383 | ;; |
378 | .mem.offset 0,0; st8.spill [r2]=r28,16 | 384 | .mem.offset 0,0; st8.spill [r2]=r28,16 |
379 | .mem.offset 8,0; st8.spill [r3]=r29,16 | 385 | .mem.offset 8,0; st8.spill [r3]=r29,16 |
380 | ;; | 386 | ;; |
381 | .mem.offset 0,0; st8.spill [r2]=r30,16 | 387 | .mem.offset 0,0; st8.spill [r2]=r30,16 |
382 | .mem.offset 8,0; st8.spill [r3]=r31,32 | 388 | .mem.offset 8,0; st8.spill [r3]=r31,32 |
383 | ;; | 389 | ;; |
384 | mov ar.fpsr=r11 /* M-unit */ | 390 | mov ar.fpsr=r11 /* M-unit */ |
385 | st8 [r2]=r8,8 /* ar.ccv */ | 391 | st8 [r2]=r8,8 /* ar.ccv */ |
386 | adds r24=PT(B6)-PT(F7),r3 | 392 | adds r24=PT(B6)-PT(F7),r3 |
387 | ;; | 393 | ;; |
388 | stf.spill [r2]=f6,32 | 394 | stf.spill [r2]=f6,32 |
389 | stf.spill [r3]=f7,32 | 395 | stf.spill [r3]=f7,32 |
390 | ;; | 396 | ;; |
391 | stf.spill [r2]=f8,32 | 397 | stf.spill [r2]=f8,32 |
392 | stf.spill [r3]=f9,32 | 398 | stf.spill [r3]=f9,32 |
393 | ;; | 399 | ;; |
394 | stf.spill [r2]=f10 | 400 | stf.spill [r2]=f10 |
395 | stf.spill [r3]=f11 | 401 | stf.spill [r3]=f11 |
396 | adds r25=PT(B7)-PT(F11),r3 | 402 | adds r25=PT(B7)-PT(F11),r3 |
397 | ;; | 403 | ;; |
398 | st8 [r24]=r18,16 /* b6 */ | 404 | st8 [r24]=r18,16 /* b6 */ |
399 | st8 [r25]=r19,16 /* b7 */ | 405 | st8 [r25]=r19,16 /* b7 */ |
400 | ;; | 406 | ;; |
401 | st8 [r24]=r9 /* ar.csd */ | 407 | st8 [r24]=r9 /* ar.csd */ |
402 | st8 [r25]=r10 /* ar.ssd */ | 408 | st8 [r25]=r10 /* ar.ssd */ |
403 | ;; | 409 | ;; |
404 | srlz.d // make sure we see the effect of cr.ivr | 410 | srlz.d // make sure we see the effect of cr.ivr |
405 | addl r14=@gprel(ia64_leave_nested),gp | 411 | addl r14=@gprel(ia64_leave_nested),gp |
406 | ;; | 412 | ;; |
407 | mov rp=r14 | 413 | mov rp=r14 |
408 | br.call.sptk.many b6=kvm_ia64_handle_irq | 414 | br.call.sptk.many b6=kvm_ia64_handle_irq |
409 | ;; | 415 | ;; |
410 | END(kvm_interrupt) | 416 | END(kvm_interrupt) |
411 | 417 | ||
412 | .global kvm_dispatch_vexirq | 418 | .global kvm_dispatch_vexirq |
@@ -414,387 +420,385 @@ END(kvm_interrupt) | |||
414 | ////////////////////////////////////////////////////////////////////// | 420 | ////////////////////////////////////////////////////////////////////// |
415 | // 0x3400 Entry 13 (size 64 bundles) Reserved | 421 | // 0x3400 Entry 13 (size 64 bundles) Reserved |
416 | ENTRY(kvm_virtual_exirq) | 422 | ENTRY(kvm_virtual_exirq) |
417 | mov r31=pr | 423 | mov r31=pr |
418 | mov r19=13 | 424 | mov r19=13 |
419 | mov r30 =r0 | 425 | mov r30 =r0 |
420 | ;; | 426 | ;; |
421 | kvm_dispatch_vexirq: | 427 | kvm_dispatch_vexirq: |
422 | cmp.eq p6,p0 = 1,r30 | 428 | cmp.eq p6,p0 = 1,r30 |
423 | ;; | 429 | ;; |
424 | (p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 430 | (p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
425 | ;; | 431 | ;; |
426 | (p6)ld8 r1 = [r29] | 432 | (p6) ld8 r1 = [r29] |
427 | ;; | 433 | ;; |
428 | KVM_SAVE_MIN_WITH_COVER_R19 | 434 | KVM_SAVE_MIN_WITH_COVER_R19 |
429 | alloc r14=ar.pfs,0,0,1,0 | 435 | alloc r14=ar.pfs,0,0,1,0 |
430 | mov out0=r13 | 436 | mov out0=r13 |
431 | 437 | ||
432 | ssm psr.ic | 438 | ssm psr.ic |
433 | ;; | 439 | ;; |
434 | srlz.i // guarantee that interruption collection is on | 440 | srlz.i // guarantee that interruption collection is on |
435 | ;; | 441 | ;; |
436 | //(p15) ssm psr.i // restore psr.i | 442 | //(p15) ssm psr.i // restore psr.i |
437 | adds r3=8,r2 // set up second base pointer | 443 | adds r3=8,r2 // set up second base pointer |
438 | ;; | 444 | ;; |
439 | KVM_SAVE_REST | 445 | KVM_SAVE_REST |
440 | addl r14=@gprel(ia64_leave_hypervisor),gp | 446 | addl r14=@gprel(ia64_leave_hypervisor),gp |
441 | ;; | 447 | ;; |
442 | mov rp=r14 | 448 | mov rp=r14 |
443 | br.call.sptk.many b6=kvm_vexirq | 449 | br.call.sptk.many b6=kvm_vexirq |
444 | END(kvm_virtual_exirq) | 450 | END(kvm_virtual_exirq) |
445 | 451 | ||
446 | .org kvm_ia64_ivt+0x3800 | 452 | .org kvm_ia64_ivt+0x3800 |
447 | ///////////////////////////////////////////////////////////////////// | 453 | ///////////////////////////////////////////////////////////////////// |
448 | // 0x3800 Entry 14 (size 64 bundles) Reserved | 454 | // 0x3800 Entry 14 (size 64 bundles) Reserved |
449 | KVM_FAULT(14) | 455 | KVM_FAULT(14) |
450 | // this code segment is from 2.6.16.13 | 456 | // this code segment is from 2.6.16.13 |
451 | |||
452 | 457 | ||
453 | .org kvm_ia64_ivt+0x3c00 | 458 | .org kvm_ia64_ivt+0x3c00 |
454 | /////////////////////////////////////////////////////////////////////// | 459 | /////////////////////////////////////////////////////////////////////// |
455 | // 0x3c00 Entry 15 (size 64 bundles) Reserved | 460 | // 0x3c00 Entry 15 (size 64 bundles) Reserved |
456 | KVM_FAULT(15) | 461 | KVM_FAULT(15) |
457 | |||
458 | 462 | ||
459 | .org kvm_ia64_ivt+0x4000 | 463 | .org kvm_ia64_ivt+0x4000 |
460 | /////////////////////////////////////////////////////////////////////// | 464 | /////////////////////////////////////////////////////////////////////// |
461 | // 0x4000 Entry 16 (size 64 bundles) Reserved | 465 | // 0x4000 Entry 16 (size 64 bundles) Reserved |
462 | KVM_FAULT(16) | 466 | KVM_FAULT(16) |
463 | 467 | ||
464 | .org kvm_ia64_ivt+0x4400 | 468 | .org kvm_ia64_ivt+0x4400 |
465 | ////////////////////////////////////////////////////////////////////// | 469 | ////////////////////////////////////////////////////////////////////// |
466 | // 0x4400 Entry 17 (size 64 bundles) Reserved | 470 | // 0x4400 Entry 17 (size 64 bundles) Reserved |
467 | KVM_FAULT(17) | 471 | KVM_FAULT(17) |
468 | 472 | ||
469 | .org kvm_ia64_ivt+0x4800 | 473 | .org kvm_ia64_ivt+0x4800 |
470 | ////////////////////////////////////////////////////////////////////// | 474 | ////////////////////////////////////////////////////////////////////// |
471 | // 0x4800 Entry 18 (size 64 bundles) Reserved | 475 | // 0x4800 Entry 18 (size 64 bundles) Reserved |
472 | KVM_FAULT(18) | 476 | KVM_FAULT(18) |
473 | 477 | ||
474 | .org kvm_ia64_ivt+0x4c00 | 478 | .org kvm_ia64_ivt+0x4c00 |
475 | ////////////////////////////////////////////////////////////////////// | 479 | ////////////////////////////////////////////////////////////////////// |
476 | // 0x4c00 Entry 19 (size 64 bundles) Reserved | 480 | // 0x4c00 Entry 19 (size 64 bundles) Reserved |
477 | KVM_FAULT(19) | 481 | KVM_FAULT(19) |
478 | 482 | ||
479 | .org kvm_ia64_ivt+0x5000 | 483 | .org kvm_ia64_ivt+0x5000 |
480 | ////////////////////////////////////////////////////////////////////// | 484 | ////////////////////////////////////////////////////////////////////// |
481 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present | 485 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present |
482 | ENTRY(kvm_page_not_present) | 486 | ENTRY(kvm_page_not_present) |
483 | KVM_REFLECT(20) | 487 | KVM_REFLECT(20) |
484 | END(kvm_page_not_present) | 488 | END(kvm_page_not_present) |
485 | 489 | ||
486 | .org kvm_ia64_ivt+0x5100 | 490 | .org kvm_ia64_ivt+0x5100 |
487 | /////////////////////////////////////////////////////////////////////// | 491 | /////////////////////////////////////////////////////////////////////// |
488 | // 0x5100 Entry 21 (size 16 bundles) Key Permission vector | 492 | // 0x5100 Entry 21 (size 16 bundles) Key Permission vector |
489 | ENTRY(kvm_key_permission) | 493 | ENTRY(kvm_key_permission) |
490 | KVM_REFLECT(21) | 494 | KVM_REFLECT(21) |
491 | END(kvm_key_permission) | 495 | END(kvm_key_permission) |
492 | 496 | ||
493 | .org kvm_ia64_ivt+0x5200 | 497 | .org kvm_ia64_ivt+0x5200 |
494 | ////////////////////////////////////////////////////////////////////// | 498 | ////////////////////////////////////////////////////////////////////// |
495 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) | 499 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) |
496 | ENTRY(kvm_iaccess_rights) | 500 | ENTRY(kvm_iaccess_rights) |
497 | KVM_REFLECT(22) | 501 | KVM_REFLECT(22) |
498 | END(kvm_iaccess_rights) | 502 | END(kvm_iaccess_rights) |
499 | 503 | ||
500 | .org kvm_ia64_ivt+0x5300 | 504 | .org kvm_ia64_ivt+0x5300 |
501 | ////////////////////////////////////////////////////////////////////// | 505 | ////////////////////////////////////////////////////////////////////// |
502 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) | 506 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) |
503 | ENTRY(kvm_daccess_rights) | 507 | ENTRY(kvm_daccess_rights) |
504 | KVM_REFLECT(23) | 508 | KVM_REFLECT(23) |
505 | END(kvm_daccess_rights) | 509 | END(kvm_daccess_rights) |
506 | 510 | ||
507 | .org kvm_ia64_ivt+0x5400 | 511 | .org kvm_ia64_ivt+0x5400 |
508 | ///////////////////////////////////////////////////////////////////// | 512 | ///////////////////////////////////////////////////////////////////// |
509 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) | 513 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) |
510 | ENTRY(kvm_general_exception) | 514 | ENTRY(kvm_general_exception) |
511 | KVM_REFLECT(24) | 515 | KVM_REFLECT(24) |
512 | KVM_FAULT(24) | 516 | KVM_FAULT(24) |
513 | END(kvm_general_exception) | 517 | END(kvm_general_exception) |
514 | 518 | ||
515 | .org kvm_ia64_ivt+0x5500 | 519 | .org kvm_ia64_ivt+0x5500 |
516 | ////////////////////////////////////////////////////////////////////// | 520 | ////////////////////////////////////////////////////////////////////// |
517 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) | 521 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) |
518 | ENTRY(kvm_disabled_fp_reg) | 522 | ENTRY(kvm_disabled_fp_reg) |
519 | KVM_REFLECT(25) | 523 | KVM_REFLECT(25) |
520 | END(kvm_disabled_fp_reg) | 524 | END(kvm_disabled_fp_reg) |
521 | 525 | ||
522 | .org kvm_ia64_ivt+0x5600 | 526 | .org kvm_ia64_ivt+0x5600 |
523 | //////////////////////////////////////////////////////////////////// | 527 | //////////////////////////////////////////////////////////////////// |
524 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) | 528 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) |
525 | ENTRY(kvm_nat_consumption) | 529 | ENTRY(kvm_nat_consumption) |
526 | KVM_REFLECT(26) | 530 | KVM_REFLECT(26) |
527 | END(kvm_nat_consumption) | 531 | END(kvm_nat_consumption) |
528 | 532 | ||
529 | .org kvm_ia64_ivt+0x5700 | 533 | .org kvm_ia64_ivt+0x5700 |
530 | ///////////////////////////////////////////////////////////////////// | 534 | ///////////////////////////////////////////////////////////////////// |
531 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) | 535 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) |
532 | ENTRY(kvm_speculation_vector) | 536 | ENTRY(kvm_speculation_vector) |
533 | KVM_REFLECT(27) | 537 | KVM_REFLECT(27) |
534 | END(kvm_speculation_vector) | 538 | END(kvm_speculation_vector) |
535 | 539 | ||
536 | .org kvm_ia64_ivt+0x5800 | 540 | .org kvm_ia64_ivt+0x5800 |
537 | ///////////////////////////////////////////////////////////////////// | 541 | ///////////////////////////////////////////////////////////////////// |
538 | // 0x5800 Entry 28 (size 16 bundles) Reserved | 542 | // 0x5800 Entry 28 (size 16 bundles) Reserved |
539 | KVM_FAULT(28) | 543 | KVM_FAULT(28) |
540 | 544 | ||
541 | .org kvm_ia64_ivt+0x5900 | 545 | .org kvm_ia64_ivt+0x5900 |
542 | /////////////////////////////////////////////////////////////////// | 546 | /////////////////////////////////////////////////////////////////// |
543 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) | 547 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) |
544 | ENTRY(kvm_debug_vector) | 548 | ENTRY(kvm_debug_vector) |
545 | KVM_FAULT(29) | 549 | KVM_FAULT(29) |
546 | END(kvm_debug_vector) | 550 | END(kvm_debug_vector) |
547 | 551 | ||
548 | .org kvm_ia64_ivt+0x5a00 | 552 | .org kvm_ia64_ivt+0x5a00 |
549 | /////////////////////////////////////////////////////////////// | 553 | /////////////////////////////////////////////////////////////// |
550 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) | 554 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) |
551 | ENTRY(kvm_unaligned_access) | 555 | ENTRY(kvm_unaligned_access) |
552 | KVM_REFLECT(30) | 556 | KVM_REFLECT(30) |
553 | END(kvm_unaligned_access) | 557 | END(kvm_unaligned_access) |
554 | 558 | ||
555 | .org kvm_ia64_ivt+0x5b00 | 559 | .org kvm_ia64_ivt+0x5b00 |
556 | ////////////////////////////////////////////////////////////////////// | 560 | ////////////////////////////////////////////////////////////////////// |
557 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) | 561 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) |
558 | ENTRY(kvm_unsupported_data_reference) | 562 | ENTRY(kvm_unsupported_data_reference) |
559 | KVM_REFLECT(31) | 563 | KVM_REFLECT(31) |
560 | END(kvm_unsupported_data_reference) | 564 | END(kvm_unsupported_data_reference) |
561 | 565 | ||
562 | .org kvm_ia64_ivt+0x5c00 | 566 | .org kvm_ia64_ivt+0x5c00 |
563 | //////////////////////////////////////////////////////////////////// | 567 | //////////////////////////////////////////////////////////////////// |
564 | // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) | 568 | // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) |
565 | ENTRY(kvm_floating_point_fault) | 569 | ENTRY(kvm_floating_point_fault) |
566 | KVM_REFLECT(32) | 570 | KVM_REFLECT(32) |
567 | END(kvm_floating_point_fault) | 571 | END(kvm_floating_point_fault) |
568 | 572 | ||
569 | .org kvm_ia64_ivt+0x5d00 | 573 | .org kvm_ia64_ivt+0x5d00 |
570 | ///////////////////////////////////////////////////////////////////// | 574 | ///////////////////////////////////////////////////////////////////// |
571 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) | 575 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) |
572 | ENTRY(kvm_floating_point_trap) | 576 | ENTRY(kvm_floating_point_trap) |
573 | KVM_REFLECT(33) | 577 | KVM_REFLECT(33) |
574 | END(kvm_floating_point_trap) | 578 | END(kvm_floating_point_trap) |
575 | 579 | ||
576 | .org kvm_ia64_ivt+0x5e00 | 580 | .org kvm_ia64_ivt+0x5e00 |
577 | ////////////////////////////////////////////////////////////////////// | 581 | ////////////////////////////////////////////////////////////////////// |
578 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) | 582 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) |
579 | ENTRY(kvm_lower_privilege_trap) | 583 | ENTRY(kvm_lower_privilege_trap) |
580 | KVM_REFLECT(34) | 584 | KVM_REFLECT(34) |
581 | END(kvm_lower_privilege_trap) | 585 | END(kvm_lower_privilege_trap) |
582 | 586 | ||
583 | .org kvm_ia64_ivt+0x5f00 | 587 | .org kvm_ia64_ivt+0x5f00 |
584 | ////////////////////////////////////////////////////////////////////// | 588 | ////////////////////////////////////////////////////////////////////// |
585 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) | 589 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) |
586 | ENTRY(kvm_taken_branch_trap) | 590 | ENTRY(kvm_taken_branch_trap) |
587 | KVM_REFLECT(35) | 591 | KVM_REFLECT(35) |
588 | END(kvm_taken_branch_trap) | 592 | END(kvm_taken_branch_trap) |
589 | 593 | ||
590 | .org kvm_ia64_ivt+0x6000 | 594 | .org kvm_ia64_ivt+0x6000 |
591 | //////////////////////////////////////////////////////////////////// | 595 | //////////////////////////////////////////////////////////////////// |
592 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) | 596 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) |
593 | ENTRY(kvm_single_step_trap) | 597 | ENTRY(kvm_single_step_trap) |
594 | KVM_REFLECT(36) | 598 | KVM_REFLECT(36) |
595 | END(kvm_single_step_trap) | 599 | END(kvm_single_step_trap) |
596 | .global kvm_virtualization_fault_back | 600 | .global kvm_virtualization_fault_back |
597 | .org kvm_ia64_ivt+0x6100 | 601 | .org kvm_ia64_ivt+0x6100 |
598 | ///////////////////////////////////////////////////////////////////// | 602 | ///////////////////////////////////////////////////////////////////// |
599 | // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault | 603 | // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault |
600 | ENTRY(kvm_virtualization_fault) | 604 | ENTRY(kvm_virtualization_fault) |
601 | mov r31=pr | 605 | mov r31=pr |
602 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 606 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
603 | ;; | 607 | ;; |
604 | st8 [r16] = r1 | 608 | st8 [r16] = r1 |
605 | adds r17 = VMM_VCPU_GP_OFFSET, r21 | 609 | adds r17 = VMM_VCPU_GP_OFFSET, r21 |
606 | ;; | 610 | ;; |
607 | ld8 r1 = [r17] | 611 | ld8 r1 = [r17] |
608 | cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 | 612 | cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 |
609 | cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 | 613 | cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 |
610 | cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 | 614 | cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 |
611 | cmp.eq p9,p0=EVENT_RSM,r24 | 615 | cmp.eq p9,p0=EVENT_RSM,r24 |
612 | cmp.eq p10,p0=EVENT_SSM,r24 | 616 | cmp.eq p10,p0=EVENT_SSM,r24 |
613 | cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 | 617 | cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 |
614 | cmp.eq p12,p0=EVENT_THASH,r24 | 618 | cmp.eq p12,p0=EVENT_THASH,r24 |
615 | (p6) br.dptk.many kvm_asm_mov_from_ar | 619 | (p6) br.dptk.many kvm_asm_mov_from_ar |
616 | (p7) br.dptk.many kvm_asm_mov_from_rr | 620 | (p7) br.dptk.many kvm_asm_mov_from_rr |
617 | (p8) br.dptk.many kvm_asm_mov_to_rr | 621 | (p8) br.dptk.many kvm_asm_mov_to_rr |
618 | (p9) br.dptk.many kvm_asm_rsm | 622 | (p9) br.dptk.many kvm_asm_rsm |
619 | (p10) br.dptk.many kvm_asm_ssm | 623 | (p10) br.dptk.many kvm_asm_ssm |
620 | (p11) br.dptk.many kvm_asm_mov_to_psr | 624 | (p11) br.dptk.many kvm_asm_mov_to_psr |
621 | (p12) br.dptk.many kvm_asm_thash | 625 | (p12) br.dptk.many kvm_asm_thash |
622 | ;; | 626 | ;; |
623 | kvm_virtualization_fault_back: | 627 | kvm_virtualization_fault_back: |
624 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 628 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
625 | ;; | 629 | ;; |
626 | ld8 r1 = [r16] | 630 | ld8 r1 = [r16] |
627 | ;; | 631 | ;; |
628 | mov r19=37 | 632 | mov r19=37 |
629 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 | 633 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 |
630 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 | 634 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 |
631 | ;; | 635 | ;; |
632 | st8 [r16] = r24 | 636 | st8 [r16] = r24 |
633 | st8 [r17] = r25 | 637 | st8 [r17] = r25 |
634 | ;; | 638 | ;; |
635 | cmp.ne p6,p0=EVENT_RFI, r24 | 639 | cmp.ne p6,p0=EVENT_RFI, r24 |
636 | (p6) br.sptk kvm_dispatch_virtualization_fault | 640 | (p6) br.sptk kvm_dispatch_virtualization_fault |
637 | ;; | 641 | ;; |
638 | adds r18=VMM_VPD_BASE_OFFSET,r21 | 642 | adds r18=VMM_VPD_BASE_OFFSET,r21 |
639 | ;; | 643 | ;; |
640 | ld8 r18=[r18] | 644 | ld8 r18=[r18] |
641 | ;; | 645 | ;; |
642 | adds r18=VMM_VPD_VIFS_OFFSET,r18 | 646 | adds r18=VMM_VPD_VIFS_OFFSET,r18 |
643 | ;; | 647 | ;; |
644 | ld8 r18=[r18] | 648 | ld8 r18=[r18] |
645 | ;; | 649 | ;; |
646 | tbit.z p6,p0=r18,63 | 650 | tbit.z p6,p0=r18,63 |
647 | (p6) br.sptk kvm_dispatch_virtualization_fault | 651 | (p6) br.sptk kvm_dispatch_virtualization_fault |
648 | ;; | 652 | ;; |
649 | //if vifs.v=1 desert current register frame | 653 | //if vifs.v=1 desert current register frame |
650 | alloc r18=ar.pfs,0,0,0,0 | 654 | alloc r18=ar.pfs,0,0,0,0 |
651 | br.sptk kvm_dispatch_virtualization_fault | 655 | br.sptk kvm_dispatch_virtualization_fault |
652 | END(kvm_virtualization_fault) | 656 | END(kvm_virtualization_fault) |
653 | 657 | ||
654 | .org kvm_ia64_ivt+0x6200 | 658 | .org kvm_ia64_ivt+0x6200 |
655 | ////////////////////////////////////////////////////////////// | 659 | ////////////////////////////////////////////////////////////// |
656 | // 0x6200 Entry 38 (size 16 bundles) Reserved | 660 | // 0x6200 Entry 38 (size 16 bundles) Reserved |
657 | KVM_FAULT(38) | 661 | KVM_FAULT(38) |
658 | 662 | ||
659 | .org kvm_ia64_ivt+0x6300 | 663 | .org kvm_ia64_ivt+0x6300 |
660 | ///////////////////////////////////////////////////////////////// | 664 | ///////////////////////////////////////////////////////////////// |
661 | // 0x6300 Entry 39 (size 16 bundles) Reserved | 665 | // 0x6300 Entry 39 (size 16 bundles) Reserved |
662 | KVM_FAULT(39) | 666 | KVM_FAULT(39) |
663 | 667 | ||
664 | .org kvm_ia64_ivt+0x6400 | 668 | .org kvm_ia64_ivt+0x6400 |
665 | ///////////////////////////////////////////////////////////////// | 669 | ///////////////////////////////////////////////////////////////// |
666 | // 0x6400 Entry 40 (size 16 bundles) Reserved | 670 | // 0x6400 Entry 40 (size 16 bundles) Reserved |
667 | KVM_FAULT(40) | 671 | KVM_FAULT(40) |
668 | 672 | ||
669 | .org kvm_ia64_ivt+0x6500 | 673 | .org kvm_ia64_ivt+0x6500 |
670 | ////////////////////////////////////////////////////////////////// | 674 | ////////////////////////////////////////////////////////////////// |
671 | // 0x6500 Entry 41 (size 16 bundles) Reserved | 675 | // 0x6500 Entry 41 (size 16 bundles) Reserved |
672 | KVM_FAULT(41) | 676 | KVM_FAULT(41) |
673 | 677 | ||
674 | .org kvm_ia64_ivt+0x6600 | 678 | .org kvm_ia64_ivt+0x6600 |
675 | ////////////////////////////////////////////////////////////////// | 679 | ////////////////////////////////////////////////////////////////// |
676 | // 0x6600 Entry 42 (size 16 bundles) Reserved | 680 | // 0x6600 Entry 42 (size 16 bundles) Reserved |
677 | KVM_FAULT(42) | 681 | KVM_FAULT(42) |
678 | 682 | ||
679 | .org kvm_ia64_ivt+0x6700 | 683 | .org kvm_ia64_ivt+0x6700 |
680 | ////////////////////////////////////////////////////////////////// | 684 | ////////////////////////////////////////////////////////////////// |
681 | // 0x6700 Entry 43 (size 16 bundles) Reserved | 685 | // 0x6700 Entry 43 (size 16 bundles) Reserved |
682 | KVM_FAULT(43) | 686 | KVM_FAULT(43) |
683 | 687 | ||
684 | .org kvm_ia64_ivt+0x6800 | 688 | .org kvm_ia64_ivt+0x6800 |
685 | ////////////////////////////////////////////////////////////////// | 689 | ////////////////////////////////////////////////////////////////// |
686 | // 0x6800 Entry 44 (size 16 bundles) Reserved | 690 | // 0x6800 Entry 44 (size 16 bundles) Reserved |
687 | KVM_FAULT(44) | 691 | KVM_FAULT(44) |
688 | 692 | ||
689 | .org kvm_ia64_ivt+0x6900 | 693 | .org kvm_ia64_ivt+0x6900 |
690 | /////////////////////////////////////////////////////////////////// | 694 | /////////////////////////////////////////////////////////////////// |
691 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception | 695 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception |
692 | //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) | 696 | //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) |
693 | ENTRY(kvm_ia32_exception) | 697 | ENTRY(kvm_ia32_exception) |
694 | KVM_FAULT(45) | 698 | KVM_FAULT(45) |
695 | END(kvm_ia32_exception) | 699 | END(kvm_ia32_exception) |
696 | 700 | ||
697 | .org kvm_ia64_ivt+0x6a00 | 701 | .org kvm_ia64_ivt+0x6a00 |
698 | //////////////////////////////////////////////////////////////////// | 702 | //////////////////////////////////////////////////////////////////// |
699 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) | 703 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) |
700 | ENTRY(kvm_ia32_intercept) | 704 | ENTRY(kvm_ia32_intercept) |
701 | KVM_FAULT(47) | 705 | KVM_FAULT(47) |
702 | END(kvm_ia32_intercept) | 706 | END(kvm_ia32_intercept) |
703 | 707 | ||
704 | .org kvm_ia64_ivt+0x6c00 | 708 | .org kvm_ia64_ivt+0x6c00 |
705 | ///////////////////////////////////////////////////////////////////// | 709 | ///////////////////////////////////////////////////////////////////// |
706 | // 0x6c00 Entry 48 (size 16 bundles) Reserved | 710 | // 0x6c00 Entry 48 (size 16 bundles) Reserved |
707 | KVM_FAULT(48) | 711 | KVM_FAULT(48) |
708 | 712 | ||
709 | .org kvm_ia64_ivt+0x6d00 | 713 | .org kvm_ia64_ivt+0x6d00 |
710 | ////////////////////////////////////////////////////////////////////// | 714 | ////////////////////////////////////////////////////////////////////// |
711 | // 0x6d00 Entry 49 (size 16 bundles) Reserved | 715 | // 0x6d00 Entry 49 (size 16 bundles) Reserved |
712 | KVM_FAULT(49) | 716 | KVM_FAULT(49) |
713 | 717 | ||
714 | .org kvm_ia64_ivt+0x6e00 | 718 | .org kvm_ia64_ivt+0x6e00 |
715 | ////////////////////////////////////////////////////////////////////// | 719 | ////////////////////////////////////////////////////////////////////// |
716 | // 0x6e00 Entry 50 (size 16 bundles) Reserved | 720 | // 0x6e00 Entry 50 (size 16 bundles) Reserved |
717 | KVM_FAULT(50) | 721 | KVM_FAULT(50) |
718 | 722 | ||
719 | .org kvm_ia64_ivt+0x6f00 | 723 | .org kvm_ia64_ivt+0x6f00 |
720 | ///////////////////////////////////////////////////////////////////// | 724 | ///////////////////////////////////////////////////////////////////// |
721 | // 0x6f00 Entry 51 (size 16 bundles) Reserved | 725 | // 0x6f00 Entry 51 (size 16 bundles) Reserved |
722 | KVM_FAULT(52) | 726 | KVM_FAULT(52) |
723 | 727 | ||
724 | .org kvm_ia64_ivt+0x7100 | 728 | .org kvm_ia64_ivt+0x7100 |
725 | //////////////////////////////////////////////////////////////////// | 729 | //////////////////////////////////////////////////////////////////// |
726 | // 0x7100 Entry 53 (size 16 bundles) Reserved | 730 | // 0x7100 Entry 53 (size 16 bundles) Reserved |
727 | KVM_FAULT(53) | 731 | KVM_FAULT(53) |
728 | 732 | ||
729 | .org kvm_ia64_ivt+0x7200 | 733 | .org kvm_ia64_ivt+0x7200 |
730 | ///////////////////////////////////////////////////////////////////// | 734 | ///////////////////////////////////////////////////////////////////// |
731 | // 0x7200 Entry 54 (size 16 bundles) Reserved | 735 | // 0x7200 Entry 54 (size 16 bundles) Reserved |
732 | KVM_FAULT(54) | 736 | KVM_FAULT(54) |
733 | 737 | ||
734 | .org kvm_ia64_ivt+0x7300 | 738 | .org kvm_ia64_ivt+0x7300 |
735 | //////////////////////////////////////////////////////////////////// | 739 | //////////////////////////////////////////////////////////////////// |
736 | // 0x7300 Entry 55 (size 16 bundles) Reserved | 740 | // 0x7300 Entry 55 (size 16 bundles) Reserved |
737 | KVM_FAULT(55) | 741 | KVM_FAULT(55) |
738 | 742 | ||
739 | .org kvm_ia64_ivt+0x7400 | 743 | .org kvm_ia64_ivt+0x7400 |
740 | //////////////////////////////////////////////////////////////////// | 744 | //////////////////////////////////////////////////////////////////// |
741 | // 0x7400 Entry 56 (size 16 bundles) Reserved | 745 | // 0x7400 Entry 56 (size 16 bundles) Reserved |
742 | KVM_FAULT(56) | 746 | KVM_FAULT(56) |
743 | 747 | ||
744 | .org kvm_ia64_ivt+0x7500 | 748 | .org kvm_ia64_ivt+0x7500 |
745 | ///////////////////////////////////////////////////////////////////// | 749 | ///////////////////////////////////////////////////////////////////// |
746 | // 0x7500 Entry 57 (size 16 bundles) Reserved | 750 | // 0x7500 Entry 57 (size 16 bundles) Reserved |
747 | KVM_FAULT(57) | 751 | KVM_FAULT(57) |
748 | 752 | ||
749 | .org kvm_ia64_ivt+0x7600 | 753 | .org kvm_ia64_ivt+0x7600 |
750 | ///////////////////////////////////////////////////////////////////// | 754 | ///////////////////////////////////////////////////////////////////// |
751 | // 0x7600 Entry 58 (size 16 bundles) Reserved | 755 | // 0x7600 Entry 58 (size 16 bundles) Reserved |
752 | KVM_FAULT(58) | 756 | KVM_FAULT(58) |
753 | 757 | ||
754 | .org kvm_ia64_ivt+0x7700 | 758 | .org kvm_ia64_ivt+0x7700 |
755 | //////////////////////////////////////////////////////////////////// | 759 | //////////////////////////////////////////////////////////////////// |
756 | // 0x7700 Entry 59 (size 16 bundles) Reserved | 760 | // 0x7700 Entry 59 (size 16 bundles) Reserved |
757 | KVM_FAULT(59) | 761 | KVM_FAULT(59) |
758 | 762 | ||
759 | .org kvm_ia64_ivt+0x7800 | 763 | .org kvm_ia64_ivt+0x7800 |
760 | //////////////////////////////////////////////////////////////////// | 764 | //////////////////////////////////////////////////////////////////// |
761 | // 0x7800 Entry 60 (size 16 bundles) Reserved | 765 | // 0x7800 Entry 60 (size 16 bundles) Reserved |
762 | KVM_FAULT(60) | 766 | KVM_FAULT(60) |
763 | 767 | ||
764 | .org kvm_ia64_ivt+0x7900 | 768 | .org kvm_ia64_ivt+0x7900 |
765 | ///////////////////////////////////////////////////////////////////// | 769 | ///////////////////////////////////////////////////////////////////// |
766 | // 0x7900 Entry 61 (size 16 bundles) Reserved | 770 | // 0x7900 Entry 61 (size 16 bundles) Reserved |
767 | KVM_FAULT(61) | 771 | KVM_FAULT(61) |
768 | 772 | ||
769 | .org kvm_ia64_ivt+0x7a00 | 773 | .org kvm_ia64_ivt+0x7a00 |
770 | ///////////////////////////////////////////////////////////////////// | 774 | ///////////////////////////////////////////////////////////////////// |
771 | // 0x7a00 Entry 62 (size 16 bundles) Reserved | 775 | // 0x7a00 Entry 62 (size 16 bundles) Reserved |
772 | KVM_FAULT(62) | 776 | KVM_FAULT(62) |
773 | 777 | ||
774 | .org kvm_ia64_ivt+0x7b00 | 778 | .org kvm_ia64_ivt+0x7b00 |
775 | ///////////////////////////////////////////////////////////////////// | 779 | ///////////////////////////////////////////////////////////////////// |
776 | // 0x7b00 Entry 63 (size 16 bundles) Reserved | 780 | // 0x7b00 Entry 63 (size 16 bundles) Reserved |
777 | KVM_FAULT(63) | 781 | KVM_FAULT(63) |
778 | 782 | ||
779 | .org kvm_ia64_ivt+0x7c00 | 783 | .org kvm_ia64_ivt+0x7c00 |
780 | //////////////////////////////////////////////////////////////////// | 784 | //////////////////////////////////////////////////////////////////// |
781 | // 0x7c00 Entry 64 (size 16 bundles) Reserved | 785 | // 0x7c00 Entry 64 (size 16 bundles) Reserved |
782 | KVM_FAULT(64) | 786 | KVM_FAULT(64) |
783 | 787 | ||
784 | .org kvm_ia64_ivt+0x7d00 | 788 | .org kvm_ia64_ivt+0x7d00 |
785 | ///////////////////////////////////////////////////////////////////// | 789 | ///////////////////////////////////////////////////////////////////// |
786 | // 0x7d00 Entry 65 (size 16 bundles) Reserved | 790 | // 0x7d00 Entry 65 (size 16 bundles) Reserved |
787 | KVM_FAULT(65) | 791 | KVM_FAULT(65) |
788 | 792 | ||
789 | .org kvm_ia64_ivt+0x7e00 | 793 | .org kvm_ia64_ivt+0x7e00 |
790 | ///////////////////////////////////////////////////////////////////// | 794 | ///////////////////////////////////////////////////////////////////// |
791 | // 0x7e00 Entry 66 (size 16 bundles) Reserved | 795 | // 0x7e00 Entry 66 (size 16 bundles) Reserved |
792 | KVM_FAULT(66) | 796 | KVM_FAULT(66) |
793 | 797 | ||
794 | .org kvm_ia64_ivt+0x7f00 | 798 | .org kvm_ia64_ivt+0x7f00 |
795 | //////////////////////////////////////////////////////////////////// | 799 | //////////////////////////////////////////////////////////////////// |
796 | // 0x7f00 Entry 67 (size 16 bundles) Reserved | 800 | // 0x7f00 Entry 67 (size 16 bundles) Reserved |
797 | KVM_FAULT(67) | 801 | KVM_FAULT(67) |
798 | 802 | ||
799 | .org kvm_ia64_ivt+0x8000 | 803 | .org kvm_ia64_ivt+0x8000 |
800 | // There is no particular reason for this code to be here, other than that | 804 | // There is no particular reason for this code to be here, other than that |
@@ -804,132 +808,128 @@ END(kvm_ia32_intercept) | |||
804 | 808 | ||
805 | 809 | ||
806 | ENTRY(kvm_dtlb_miss_dispatch) | 810 | ENTRY(kvm_dtlb_miss_dispatch) |
807 | mov r19 = 2 | 811 | mov r19 = 2 |
808 | KVM_SAVE_MIN_WITH_COVER_R19 | 812 | KVM_SAVE_MIN_WITH_COVER_R19 |
809 | alloc r14=ar.pfs,0,0,3,0 | 813 | alloc r14=ar.pfs,0,0,3,0 |
810 | mov out0=cr.ifa | 814 | mov out0=cr.ifa |
811 | mov out1=r15 | 815 | mov out1=r15 |
812 | adds r3=8,r2 // set up second base pointer | 816 | adds r3=8,r2 // set up second base pointer |
813 | ;; | 817 | ;; |
814 | ssm psr.ic | 818 | ssm psr.ic |
815 | ;; | 819 | ;; |
816 | srlz.i // guarantee that interruption collection is on | 820 | srlz.i // guarantee that interruption collection is on |
817 | ;; | 821 | ;; |
818 | //(p15) ssm psr.i // restore psr.i | 822 | //(p15) ssm psr.i // restore psr.i |
819 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp | 823 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp |
820 | ;; | 824 | ;; |
821 | KVM_SAVE_REST | 825 | KVM_SAVE_REST |
822 | KVM_SAVE_EXTRA | 826 | KVM_SAVE_EXTRA |
823 | mov rp=r14 | 827 | mov rp=r14 |
824 | ;; | 828 | ;; |
825 | adds out2=16,r12 | 829 | adds out2=16,r12 |
826 | br.call.sptk.many b6=kvm_page_fault | 830 | br.call.sptk.many b6=kvm_page_fault |
827 | END(kvm_dtlb_miss_dispatch) | 831 | END(kvm_dtlb_miss_dispatch) |
828 | 832 | ||
829 | ENTRY(kvm_itlb_miss_dispatch) | 833 | ENTRY(kvm_itlb_miss_dispatch) |
830 | 834 | ||
831 | KVM_SAVE_MIN_WITH_COVER_R19 | 835 | KVM_SAVE_MIN_WITH_COVER_R19 |
832 | alloc r14=ar.pfs,0,0,3,0 | 836 | alloc r14=ar.pfs,0,0,3,0 |
833 | mov out0=cr.ifa | 837 | mov out0=cr.ifa |
834 | mov out1=r15 | 838 | mov out1=r15 |
835 | adds r3=8,r2 // set up second base pointer | 839 | adds r3=8,r2 // set up second base pointer |
836 | ;; | 840 | ;; |
837 | ssm psr.ic | 841 | ssm psr.ic |
838 | ;; | 842 | ;; |
839 | srlz.i // guarantee that interruption collection is on | 843 | srlz.i // guarantee that interruption collection is on |
840 | ;; | 844 | ;; |
841 | //(p15) ssm psr.i // restore psr.i | 845 | //(p15) ssm psr.i // restore psr.i |
842 | addl r14=@gprel(ia64_leave_hypervisor),gp | 846 | addl r14=@gprel(ia64_leave_hypervisor),gp |
843 | ;; | 847 | ;; |
844 | KVM_SAVE_REST | 848 | KVM_SAVE_REST |
845 | mov rp=r14 | 849 | mov rp=r14 |
846 | ;; | 850 | ;; |
847 | adds out2=16,r12 | 851 | adds out2=16,r12 |
848 | br.call.sptk.many b6=kvm_page_fault | 852 | br.call.sptk.many b6=kvm_page_fault |
849 | END(kvm_itlb_miss_dispatch) | 853 | END(kvm_itlb_miss_dispatch) |
850 | 854 | ||
851 | ENTRY(kvm_dispatch_reflection) | 855 | ENTRY(kvm_dispatch_reflection) |
852 | /* | 856 | /* |
853 | * Input: | 857 | * Input: |
854 | * psr.ic: off | 858 | * psr.ic: off |
855 | * r19: intr type (offset into ivt, see ia64_int.h) | 859 | * r19: intr type (offset into ivt, see ia64_int.h) |
856 | * r31: contains saved predicates (pr) | 860 | * r31: contains saved predicates (pr) |
857 | */ | 861 | */ |
858 | KVM_SAVE_MIN_WITH_COVER_R19 | 862 | KVM_SAVE_MIN_WITH_COVER_R19 |
859 | alloc r14=ar.pfs,0,0,5,0 | 863 | alloc r14=ar.pfs,0,0,5,0 |
860 | mov out0=cr.ifa | 864 | mov out0=cr.ifa |
861 | mov out1=cr.isr | 865 | mov out1=cr.isr |
862 | mov out2=cr.iim | 866 | mov out2=cr.iim |
863 | mov out3=r15 | 867 | mov out3=r15 |
864 | adds r3=8,r2 // set up second base pointer | 868 | adds r3=8,r2 // set up second base pointer |
865 | ;; | 869 | ;; |
866 | ssm psr.ic | 870 | ssm psr.ic |
867 | ;; | 871 | ;; |
868 | srlz.i // guarantee that interruption collection is on | 872 | srlz.i // guarantee that interruption collection is on |
869 | ;; | 873 | ;; |
870 | //(p15) ssm psr.i // restore psr.i | 874 | //(p15) ssm psr.i // restore psr.i |
871 | addl r14=@gprel(ia64_leave_hypervisor),gp | 875 | addl r14=@gprel(ia64_leave_hypervisor),gp |
872 | ;; | 876 | ;; |
873 | KVM_SAVE_REST | 877 | KVM_SAVE_REST |
874 | mov rp=r14 | 878 | mov rp=r14 |
875 | ;; | 879 | ;; |
876 | adds out4=16,r12 | 880 | adds out4=16,r12 |
877 | br.call.sptk.many b6=reflect_interruption | 881 | br.call.sptk.many b6=reflect_interruption |
878 | END(kvm_dispatch_reflection) | 882 | END(kvm_dispatch_reflection) |
879 | 883 | ||
880 | ENTRY(kvm_dispatch_virtualization_fault) | 884 | ENTRY(kvm_dispatch_virtualization_fault) |
881 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 | 885 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 |
882 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 | 886 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 |
883 | ;; | 887 | ;; |
884 | st8 [r16] = r24 | 888 | st8 [r16] = r24 |
885 | st8 [r17] = r25 | 889 | st8 [r17] = r25 |
886 | ;; | 890 | ;; |
887 | KVM_SAVE_MIN_WITH_COVER_R19 | 891 | KVM_SAVE_MIN_WITH_COVER_R19 |
888 | ;; | 892 | ;; |
889 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | 893 | alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!) |
890 | mov out0=r13 //vcpu | 894 | mov out0=r13 //vcpu |
891 | adds r3=8,r2 // set up second base pointer | 895 | adds r3=8,r2 // set up second base pointer |
892 | ;; | 896 | ;; |
893 | ssm psr.ic | 897 | ssm psr.ic |
894 | ;; | 898 | ;; |
895 | srlz.i // guarantee that interruption collection is on | 899 | srlz.i // guarantee that interruption collection is on |
896 | ;; | 900 | ;; |
897 | //(p15) ssm psr.i // restore psr.i | 901 | //(p15) ssm psr.i // restore psr.i |
898 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp | 902 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp |
899 | ;; | 903 | ;; |
900 | KVM_SAVE_REST | 904 | KVM_SAVE_REST |
901 | KVM_SAVE_EXTRA | 905 | KVM_SAVE_EXTRA |
902 | mov rp=r14 | 906 | mov rp=r14 |
903 | ;; | 907 | ;; |
904 | adds out1=16,sp //regs | 908 | adds out1=16,sp //regs |
905 | br.call.sptk.many b6=kvm_emulate | 909 | br.call.sptk.many b6=kvm_emulate |
906 | END(kvm_dispatch_virtualization_fault) | 910 | END(kvm_dispatch_virtualization_fault) |
907 | 911 | ||
908 | 912 | ||
909 | ENTRY(kvm_dispatch_interrupt) | 913 | ENTRY(kvm_dispatch_interrupt) |
910 | KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 | 914 | KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 |
911 | ;; | 915 | ;; |
912 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | 916 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group |
913 | //mov out0=cr.ivr // pass cr.ivr as first arg | 917 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
914 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 918 | ;; |
915 | ;; | 919 | ssm psr.ic |
916 | ssm psr.ic | 920 | ;; |
917 | ;; | 921 | srlz.i |
918 | srlz.i | 922 | ;; |
919 | ;; | 923 | //(p15) ssm psr.i |
920 | //(p15) ssm psr.i | 924 | addl r14=@gprel(ia64_leave_hypervisor),gp |
921 | addl r14=@gprel(ia64_leave_hypervisor),gp | 925 | ;; |
922 | ;; | 926 | KVM_SAVE_REST |
923 | KVM_SAVE_REST | 927 | mov rp=r14 |
924 | mov rp=r14 | 928 | ;; |
925 | ;; | 929 | mov out0=r13 // pass pointer to pt_regs as second arg |
926 | mov out0=r13 // pass pointer to pt_regs as second arg | 930 | br.call.sptk.many b6=kvm_ia64_handle_irq |
927 | br.call.sptk.many b6=kvm_ia64_handle_irq | ||
928 | END(kvm_dispatch_interrupt) | 931 | END(kvm_dispatch_interrupt) |
929 | 932 | ||
930 | |||
931 | |||
932 | |||
933 | GLOBAL_ENTRY(ia64_leave_nested) | 933 | GLOBAL_ENTRY(ia64_leave_nested) |
934 | rsm psr.i | 934 | rsm psr.i |
935 | ;; | 935 | ;; |
@@ -1008,7 +1008,7 @@ GLOBAL_ENTRY(ia64_leave_nested) | |||
1008 | ;; | 1008 | ;; |
1009 | ldf.fill f11=[r2] | 1009 | ldf.fill f11=[r2] |
1010 | // mov r18=r13 | 1010 | // mov r18=r13 |
1011 | // mov r21=r13 | 1011 | // mov r21=r13 |
1012 | adds r16=PT(CR_IPSR)+16,r12 | 1012 | adds r16=PT(CR_IPSR)+16,r12 |
1013 | adds r17=PT(CR_IIP)+16,r12 | 1013 | adds r17=PT(CR_IIP)+16,r12 |
1014 | ;; | 1014 | ;; |
@@ -1058,138 +1058,135 @@ GLOBAL_ENTRY(ia64_leave_nested) | |||
1058 | rfi | 1058 | rfi |
1059 | END(ia64_leave_nested) | 1059 | END(ia64_leave_nested) |
1060 | 1060 | ||
1061 | |||
1062 | |||
1063 | GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) | 1061 | GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) |
1064 | /* | 1062 | /* |
1065 | * work.need_resched etc. mustn't get changed | 1063 | * work.need_resched etc. mustn't get changed |
1066 | *by this CPU before it returns to | 1064 | *by this CPU before it returns to |
1067 | ;; | 1065 | * user- or fsys-mode, hence we disable interrupts early on: |
1068 | * user- or fsys-mode, hence we disable interrupts early on: | 1066 | */ |
1069 | */ | 1067 | adds r2 = PT(R4)+16,r12 |
1070 | adds r2 = PT(R4)+16,r12 | 1068 | adds r3 = PT(R5)+16,r12 |
1071 | adds r3 = PT(R5)+16,r12 | 1069 | adds r8 = PT(EML_UNAT)+16,r12 |
1072 | adds r8 = PT(EML_UNAT)+16,r12 | 1070 | ;; |
1073 | ;; | 1071 | ld8 r8 = [r8] |
1074 | ld8 r8 = [r8] | 1072 | ;; |
1075 | ;; | 1073 | mov ar.unat=r8 |
1076 | mov ar.unat=r8 | 1074 | ;; |
1077 | ;; | 1075 | ld8.fill r4=[r2],16 //load r4 |
1078 | ld8.fill r4=[r2],16 //load r4 | 1076 | ld8.fill r5=[r3],16 //load r5 |
1079 | ld8.fill r5=[r3],16 //load r5 | 1077 | ;; |
1080 | ;; | 1078 | ld8.fill r6=[r2] //load r6 |
1081 | ld8.fill r6=[r2] //load r6 | 1079 | ld8.fill r7=[r3] //load r7 |
1082 | ld8.fill r7=[r3] //load r7 | 1080 | ;; |
1083 | ;; | ||
1084 | END(ia64_leave_hypervisor_prepare) | 1081 | END(ia64_leave_hypervisor_prepare) |
1085 | //fall through | 1082 | //fall through |
1086 | GLOBAL_ENTRY(ia64_leave_hypervisor) | 1083 | GLOBAL_ENTRY(ia64_leave_hypervisor) |
1087 | rsm psr.i | 1084 | rsm psr.i |
1088 | ;; | 1085 | ;; |
1089 | br.call.sptk.many b0=leave_hypervisor_tail | 1086 | br.call.sptk.many b0=leave_hypervisor_tail |
1090 | ;; | 1087 | ;; |
1091 | adds r20=PT(PR)+16,r12 | 1088 | adds r20=PT(PR)+16,r12 |
1092 | adds r8=PT(EML_UNAT)+16,r12 | 1089 | adds r8=PT(EML_UNAT)+16,r12 |
1093 | ;; | 1090 | ;; |
1094 | ld8 r8=[r8] | 1091 | ld8 r8=[r8] |
1095 | ;; | 1092 | ;; |
1096 | mov ar.unat=r8 | 1093 | mov ar.unat=r8 |
1097 | ;; | 1094 | ;; |
1098 | lfetch [r20],PT(CR_IPSR)-PT(PR) | 1095 | lfetch [r20],PT(CR_IPSR)-PT(PR) |
1099 | adds r2 = PT(B6)+16,r12 | 1096 | adds r2 = PT(B6)+16,r12 |
1100 | adds r3 = PT(B7)+16,r12 | 1097 | adds r3 = PT(B7)+16,r12 |
1101 | ;; | 1098 | ;; |
1102 | lfetch [r20] | 1099 | lfetch [r20] |
1103 | ;; | 1100 | ;; |
1104 | ld8 r24=[r2],16 /* B6 */ | 1101 | ld8 r24=[r2],16 /* B6 */ |
1105 | ld8 r25=[r3],16 /* B7 */ | 1102 | ld8 r25=[r3],16 /* B7 */ |
1106 | ;; | 1103 | ;; |
1107 | ld8 r26=[r2],16 /* ar_csd */ | 1104 | ld8 r26=[r2],16 /* ar_csd */ |
1108 | ld8 r27=[r3],16 /* ar_ssd */ | 1105 | ld8 r27=[r3],16 /* ar_ssd */ |
1109 | mov b6 = r24 | 1106 | mov b6 = r24 |
1110 | ;; | 1107 | ;; |
1111 | ld8.fill r8=[r2],16 | 1108 | ld8.fill r8=[r2],16 |
1112 | ld8.fill r9=[r3],16 | 1109 | ld8.fill r9=[r3],16 |
1113 | mov b7 = r25 | 1110 | mov b7 = r25 |
1114 | ;; | 1111 | ;; |
1115 | mov ar.csd = r26 | 1112 | mov ar.csd = r26 |
1116 | mov ar.ssd = r27 | 1113 | mov ar.ssd = r27 |
1117 | ;; | 1114 | ;; |
1118 | ld8.fill r10=[r2],PT(R15)-PT(R10) | 1115 | ld8.fill r10=[r2],PT(R15)-PT(R10) |
1119 | ld8.fill r11=[r3],PT(R14)-PT(R11) | 1116 | ld8.fill r11=[r3],PT(R14)-PT(R11) |
1120 | ;; | 1117 | ;; |
1121 | ld8.fill r15=[r2],PT(R16)-PT(R15) | 1118 | ld8.fill r15=[r2],PT(R16)-PT(R15) |
1122 | ld8.fill r14=[r3],PT(R17)-PT(R14) | 1119 | ld8.fill r14=[r3],PT(R17)-PT(R14) |
1123 | ;; | 1120 | ;; |
1124 | ld8.fill r16=[r2],16 | 1121 | ld8.fill r16=[r2],16 |
1125 | ld8.fill r17=[r3],16 | 1122 | ld8.fill r17=[r3],16 |
1126 | ;; | 1123 | ;; |
1127 | ld8.fill r18=[r2],16 | 1124 | ld8.fill r18=[r2],16 |
1128 | ld8.fill r19=[r3],16 | 1125 | ld8.fill r19=[r3],16 |
1129 | ;; | 1126 | ;; |
1130 | ld8.fill r20=[r2],16 | 1127 | ld8.fill r20=[r2],16 |
1131 | ld8.fill r21=[r3],16 | 1128 | ld8.fill r21=[r3],16 |
1132 | ;; | 1129 | ;; |
1133 | ld8.fill r22=[r2],16 | 1130 | ld8.fill r22=[r2],16 |
1134 | ld8.fill r23=[r3],16 | 1131 | ld8.fill r23=[r3],16 |
1135 | ;; | 1132 | ;; |
1136 | ld8.fill r24=[r2],16 | 1133 | ld8.fill r24=[r2],16 |
1137 | ld8.fill r25=[r3],16 | 1134 | ld8.fill r25=[r3],16 |
1138 | ;; | 1135 | ;; |
1139 | ld8.fill r26=[r2],16 | 1136 | ld8.fill r26=[r2],16 |
1140 | ld8.fill r27=[r3],16 | 1137 | ld8.fill r27=[r3],16 |
1141 | ;; | 1138 | ;; |
1142 | ld8.fill r28=[r2],16 | 1139 | ld8.fill r28=[r2],16 |
1143 | ld8.fill r29=[r3],16 | 1140 | ld8.fill r29=[r3],16 |
1144 | ;; | 1141 | ;; |
1145 | ld8.fill r30=[r2],PT(F6)-PT(R30) | 1142 | ld8.fill r30=[r2],PT(F6)-PT(R30) |
1146 | ld8.fill r31=[r3],PT(F7)-PT(R31) | 1143 | ld8.fill r31=[r3],PT(F7)-PT(R31) |
1147 | ;; | 1144 | ;; |
1148 | rsm psr.i | psr.ic | 1145 | rsm psr.i | psr.ic |
1149 | // initiate turning off of interrupt and interruption collection | 1146 | // initiate turning off of interrupt and interruption collection |
1150 | invala // invalidate ALAT | 1147 | invala // invalidate ALAT |
1151 | ;; | 1148 | ;; |
1152 | srlz.i // ensure interruption collection is off | 1149 | srlz.i // ensure interruption collection is off |
1153 | ;; | 1150 | ;; |
1154 | bsw.0 | 1151 | bsw.0 |
1155 | ;; | 1152 | ;; |
1156 | adds r16 = PT(CR_IPSR)+16,r12 | 1153 | adds r16 = PT(CR_IPSR)+16,r12 |
1157 | adds r17 = PT(CR_IIP)+16,r12 | 1154 | adds r17 = PT(CR_IIP)+16,r12 |
1158 | mov r21=r13 // get current | 1155 | mov r21=r13 // get current |
1159 | ;; | 1156 | ;; |
1160 | ld8 r31=[r16],16 // load cr.ipsr | 1157 | ld8 r31=[r16],16 // load cr.ipsr |
1161 | ld8 r30=[r17],16 // load cr.iip | 1158 | ld8 r30=[r17],16 // load cr.iip |
1162 | ;; | 1159 | ;; |
1163 | ld8 r29=[r16],16 // load cr.ifs | 1160 | ld8 r29=[r16],16 // load cr.ifs |
1164 | ld8 r28=[r17],16 // load ar.unat | 1161 | ld8 r28=[r17],16 // load ar.unat |
1165 | ;; | 1162 | ;; |
1166 | ld8 r27=[r16],16 // load ar.pfs | 1163 | ld8 r27=[r16],16 // load ar.pfs |
1167 | ld8 r26=[r17],16 // load ar.rsc | 1164 | ld8 r26=[r17],16 // load ar.rsc |
1168 | ;; | 1165 | ;; |
1169 | ld8 r25=[r16],16 // load ar.rnat | 1166 | ld8 r25=[r16],16 // load ar.rnat |
1170 | ld8 r24=[r17],16 // load ar.bspstore | 1167 | ld8 r24=[r17],16 // load ar.bspstore |
1171 | ;; | 1168 | ;; |
1172 | ld8 r23=[r16],16 // load predicates | 1169 | ld8 r23=[r16],16 // load predicates |
1173 | ld8 r22=[r17],16 // load b0 | 1170 | ld8 r22=[r17],16 // load b0 |
1174 | ;; | 1171 | ;; |
1175 | ld8 r20=[r16],16 // load ar.rsc value for "loadrs" | 1172 | ld8 r20=[r16],16 // load ar.rsc value for "loadrs" |
1176 | ld8.fill r1=[r17],16 //load r1 | 1173 | ld8.fill r1=[r17],16 //load r1 |
1177 | ;; | 1174 | ;; |
1178 | ld8.fill r12=[r16],16 //load r12 | 1175 | ld8.fill r12=[r16],16 //load r12 |
1179 | ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 | 1176 | ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 |
1180 | ;; | 1177 | ;; |
1181 | ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr | 1178 | ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr |
1182 | ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 | 1179 | ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 |
1183 | ;; | 1180 | ;; |
1184 | ld8.fill r3=[r16] //load r3 | 1181 | ld8.fill r3=[r16] //load r3 |
1185 | ld8 r18=[r17] //load ar_ccv | 1182 | ld8 r18=[r17] //load ar_ccv |
1186 | ;; | 1183 | ;; |
1187 | mov ar.fpsr=r19 | 1184 | mov ar.fpsr=r19 |
1188 | mov ar.ccv=r18 | 1185 | mov ar.ccv=r18 |
1189 | shr.u r18=r20,16 | 1186 | shr.u r18=r20,16 |
1190 | ;; | 1187 | ;; |
1191 | kvm_rbs_switch: | 1188 | kvm_rbs_switch: |
1192 | mov r19=96 | 1189 | mov r19=96 |
1193 | 1190 | ||
1194 | kvm_dont_preserve_current_frame: | 1191 | kvm_dont_preserve_current_frame: |
1195 | /* | 1192 | /* |
@@ -1201,76 +1198,76 @@ kvm_dont_preserve_current_frame: | |||
1201 | # define pReturn p7 | 1198 | # define pReturn p7 |
1202 | # define Nregs 14 | 1199 | # define Nregs 14 |
1203 | 1200 | ||
1204 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | 1201 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
1205 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) | 1202 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) |
1206 | sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize | 1203 | sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize |
1207 | ;; | 1204 | ;; |
1208 | mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" | 1205 | mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" |
1209 | shladd in0=loc1,3,r19 | 1206 | shladd in0=loc1,3,r19 |
1210 | mov in1=0 | 1207 | mov in1=0 |
1211 | ;; | 1208 | ;; |
1212 | TEXT_ALIGN(32) | 1209 | TEXT_ALIGN(32) |
1213 | kvm_rse_clear_invalid: | 1210 | kvm_rse_clear_invalid: |
1214 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | 1211 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
1215 | cmp.lt pRecurse,p0=Nregs*8,in0 | 1212 | cmp.lt pRecurse,p0=Nregs*8,in0 |
1216 | // if more than Nregs regs left to clear, (re)curse | 1213 | // if more than Nregs regs left to clear, (re)curse |
1217 | add out0=-Nregs*8,in0 | 1214 | add out0=-Nregs*8,in0 |
1218 | add out1=1,in1 // increment recursion count | 1215 | add out1=1,in1 // increment recursion count |
1219 | mov loc1=0 | 1216 | mov loc1=0 |
1220 | mov loc2=0 | 1217 | mov loc2=0 |
1221 | ;; | 1218 | ;; |
1222 | mov loc3=0 | 1219 | mov loc3=0 |
1223 | mov loc4=0 | 1220 | mov loc4=0 |
1224 | mov loc5=0 | 1221 | mov loc5=0 |
1225 | mov loc6=0 | 1222 | mov loc6=0 |
1226 | mov loc7=0 | 1223 | mov loc7=0 |
1227 | (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid | 1224 | (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid |
1228 | ;; | 1225 | ;; |
1229 | mov loc8=0 | 1226 | mov loc8=0 |
1230 | mov loc9=0 | 1227 | mov loc9=0 |
1231 | cmp.ne pReturn,p0=r0,in1 | 1228 | cmp.ne pReturn,p0=r0,in1 |
1232 | // if recursion count != 0, we need to do a br.ret | 1229 | // if recursion count != 0, we need to do a br.ret |
1233 | mov loc10=0 | 1230 | mov loc10=0 |
1234 | mov loc11=0 | 1231 | mov loc11=0 |
1235 | (pReturn) br.ret.dptk.many b0 | 1232 | (pReturn) br.ret.dptk.many b0 |
1236 | 1233 | ||
1237 | # undef pRecurse | 1234 | # undef pRecurse |
1238 | # undef pReturn | 1235 | # undef pReturn |
1239 | 1236 | ||
1240 | // loadrs has already been shifted | 1237 | // loadrs has already been shifted |
1241 | alloc r16=ar.pfs,0,0,0,0 // drop current register frame | 1238 | alloc r16=ar.pfs,0,0,0,0 // drop current register frame |
1242 | ;; | 1239 | ;; |
1243 | loadrs | 1240 | loadrs |
1244 | ;; | 1241 | ;; |
1245 | mov ar.bspstore=r24 | 1242 | mov ar.bspstore=r24 |
1246 | ;; | 1243 | ;; |
1247 | mov ar.unat=r28 | 1244 | mov ar.unat=r28 |
1248 | mov ar.rnat=r25 | 1245 | mov ar.rnat=r25 |
1249 | mov ar.rsc=r26 | 1246 | mov ar.rsc=r26 |
1250 | ;; | 1247 | ;; |
1251 | mov cr.ipsr=r31 | 1248 | mov cr.ipsr=r31 |
1252 | mov cr.iip=r30 | 1249 | mov cr.iip=r30 |
1253 | mov cr.ifs=r29 | 1250 | mov cr.ifs=r29 |
1254 | mov ar.pfs=r27 | 1251 | mov ar.pfs=r27 |
1255 | adds r18=VMM_VPD_BASE_OFFSET,r21 | 1252 | adds r18=VMM_VPD_BASE_OFFSET,r21 |
1256 | ;; | 1253 | ;; |
1257 | ld8 r18=[r18] //vpd | 1254 | ld8 r18=[r18] //vpd |
1258 | adds r17=VMM_VCPU_ISR_OFFSET,r21 | 1255 | adds r17=VMM_VCPU_ISR_OFFSET,r21 |
1259 | ;; | 1256 | ;; |
1260 | ld8 r17=[r17] | 1257 | ld8 r17=[r17] |
1261 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | 1258 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
1262 | ;; | 1259 | ;; |
1263 | ld8 r19=[r19] //vpsr | 1260 | ld8 r19=[r19] //vpsr |
1264 | mov r25=r18 | 1261 | mov r25=r18 |
1265 | adds r16= VMM_VCPU_GP_OFFSET,r21 | 1262 | adds r16= VMM_VCPU_GP_OFFSET,r21 |
1266 | ;; | 1263 | ;; |
1267 | ld8 r16= [r16] // Put gp in r24 | 1264 | ld8 r16= [r16] // Put gp in r24 |
1268 | movl r24=@gprel(ia64_vmm_entry) // calculate return address | 1265 | movl r24=@gprel(ia64_vmm_entry) // calculate return address |
1269 | ;; | 1266 | ;; |
1270 | add r24=r24,r16 | 1267 | add r24=r24,r16 |
1271 | ;; | 1268 | ;; |
1272 | br.sptk.many kvm_vps_sync_write // call the service | 1269 | br.sptk.many kvm_vps_sync_write // call the service |
1273 | ;; | 1270 | ;; |
1274 | END(ia64_leave_hypervisor) | 1271 | END(ia64_leave_hypervisor) |
1275 | // fall through | 1272 | // fall through |
1276 | GLOBAL_ENTRY(ia64_vmm_entry) | 1273 | GLOBAL_ENTRY(ia64_vmm_entry) |
@@ -1283,16 +1280,14 @@ GLOBAL_ENTRY(ia64_vmm_entry) | |||
1283 | * r22:b0 | 1280 | * r22:b0 |
1284 | * r23:predicate | 1281 | * r23:predicate |
1285 | */ | 1282 | */ |
1286 | mov r24=r22 | 1283 | mov r24=r22 |
1287 | mov r25=r18 | 1284 | mov r25=r18 |
1288 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic | 1285 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic |
1289 | (p1) br.cond.sptk.few kvm_vps_resume_normal | 1286 | (p1) br.cond.sptk.few kvm_vps_resume_normal |
1290 | (p2) br.cond.sptk.many kvm_vps_resume_handler | 1287 | (p2) br.cond.sptk.many kvm_vps_resume_handler |
1291 | ;; | 1288 | ;; |
1292 | END(ia64_vmm_entry) | 1289 | END(ia64_vmm_entry) |
1293 | 1290 | ||
1294 | |||
1295 | |||
1296 | /* | 1291 | /* |
1297 | * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, | 1292 | * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, |
1298 | * u64 arg3, u64 arg4, u64 arg5, | 1293 | * u64 arg3, u64 arg4, u64 arg5, |
@@ -1310,88 +1305,88 @@ psrsave = loc2 | |||
1310 | entry = loc3 | 1305 | entry = loc3 |
1311 | hostret = r24 | 1306 | hostret = r24 |
1312 | 1307 | ||
1313 | alloc pfssave=ar.pfs,4,4,0,0 | 1308 | alloc pfssave=ar.pfs,4,4,0,0 |
1314 | mov rpsave=rp | 1309 | mov rpsave=rp |
1315 | adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 | 1310 | adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 |
1316 | ;; | 1311 | ;; |
1317 | ld8 entry=[entry] | 1312 | ld8 entry=[entry] |
1318 | 1: mov hostret=ip | 1313 | 1: mov hostret=ip |
1319 | mov r25=in1 // copy arguments | 1314 | mov r25=in1 // copy arguments |
1320 | mov r26=in2 | 1315 | mov r26=in2 |
1321 | mov r27=in3 | 1316 | mov r27=in3 |
1322 | mov psrsave=psr | 1317 | mov psrsave=psr |
1323 | ;; | 1318 | ;; |
1324 | tbit.nz p6,p0=psrsave,14 // IA64_PSR_I | 1319 | tbit.nz p6,p0=psrsave,14 // IA64_PSR_I |
1325 | tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC | 1320 | tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC |
1326 | ;; | 1321 | ;; |
1327 | add hostret=2f-1b,hostret // calculate return address | 1322 | add hostret=2f-1b,hostret // calculate return address |
1328 | add entry=entry,in0 | 1323 | add entry=entry,in0 |
1329 | ;; | 1324 | ;; |
1330 | rsm psr.i | psr.ic | 1325 | rsm psr.i | psr.ic |
1331 | ;; | 1326 | ;; |
1332 | srlz.i | 1327 | srlz.i |
1333 | mov b6=entry | 1328 | mov b6=entry |
1334 | br.cond.sptk b6 // call the service | 1329 | br.cond.sptk b6 // call the service |
1335 | 2: | 1330 | 2: |
1336 | // Architectural sequence for enabling interrupts if necessary | 1331 | // Architectural sequence for enabling interrupts if necessary |
1337 | (p7) ssm psr.ic | 1332 | (p7) ssm psr.ic |
1338 | ;; | 1333 | ;; |
1339 | (p7) srlz.i | 1334 | (p7) srlz.i |
1340 | ;; | 1335 | ;; |
1341 | //(p6) ssm psr.i | 1336 | //(p6) ssm psr.i |
1342 | ;; | 1337 | ;; |
1343 | mov rp=rpsave | 1338 | mov rp=rpsave |
1344 | mov ar.pfs=pfssave | 1339 | mov ar.pfs=pfssave |
1345 | mov r8=r31 | 1340 | mov r8=r31 |
1346 | ;; | 1341 | ;; |
1347 | srlz.d | 1342 | srlz.d |
1348 | br.ret.sptk rp | 1343 | br.ret.sptk rp |
1349 | 1344 | ||
1350 | END(ia64_call_vsa) | 1345 | END(ia64_call_vsa) |
1351 | 1346 | ||
1352 | #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) | 1347 | #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) |
1353 | 1348 | ||
1354 | GLOBAL_ENTRY(vmm_reset_entry) | 1349 | GLOBAL_ENTRY(vmm_reset_entry) |
1355 | //set up ipsr, iip, vpd.vpsr, dcr | 1350 | //set up ipsr, iip, vpd.vpsr, dcr |
1356 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 | 1351 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 |
1357 | // For DCR: all bits 0 | 1352 | // For DCR: all bits 0 |
1358 | bsw.0 | 1353 | bsw.0 |
1359 | ;; | 1354 | ;; |
1360 | mov r21 =r13 | 1355 | mov r21 =r13 |
1361 | adds r14=-VMM_PT_REGS_SIZE, r12 | 1356 | adds r14=-VMM_PT_REGS_SIZE, r12 |
1362 | ;; | 1357 | ;; |
1363 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 | 1358 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 |
1364 | movl r10=0x8000000000000000 | 1359 | movl r10=0x8000000000000000 |
1365 | adds r16=PT(CR_IIP), r14 | 1360 | adds r16=PT(CR_IIP), r14 |
1366 | adds r20=PT(R1), r14 | 1361 | adds r20=PT(R1), r14 |
1367 | ;; | 1362 | ;; |
1368 | rsm psr.ic | psr.i | 1363 | rsm psr.ic | psr.i |
1369 | ;; | 1364 | ;; |
1370 | srlz.i | 1365 | srlz.i |
1371 | ;; | 1366 | ;; |
1372 | mov ar.rsc = 0 | 1367 | mov ar.rsc = 0 |
1373 | ;; | 1368 | ;; |
1374 | flushrs | 1369 | flushrs |
1375 | ;; | 1370 | ;; |
1376 | mov ar.bspstore = 0 | 1371 | mov ar.bspstore = 0 |
1377 | // clear BSPSTORE | 1372 | // clear BSPSTORE |
1378 | ;; | 1373 | ;; |
1379 | mov cr.ipsr=r6 | 1374 | mov cr.ipsr=r6 |
1380 | mov cr.ifs=r10 | 1375 | mov cr.ifs=r10 |
1381 | ld8 r4 = [r16] // Set init iip for first run. | 1376 | ld8 r4 = [r16] // Set init iip for first run. |
1382 | ld8 r1 = [r20] | 1377 | ld8 r1 = [r20] |
1383 | ;; | 1378 | ;; |
1384 | mov cr.iip=r4 | 1379 | mov cr.iip=r4 |
1385 | adds r16=VMM_VPD_BASE_OFFSET,r13 | 1380 | adds r16=VMM_VPD_BASE_OFFSET,r13 |
1386 | ;; | 1381 | ;; |
1387 | ld8 r18=[r16] | 1382 | ld8 r18=[r16] |
1388 | ;; | 1383 | ;; |
1389 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | 1384 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
1390 | ;; | 1385 | ;; |
1391 | ld8 r19=[r19] | 1386 | ld8 r19=[r19] |
1392 | mov r17=r0 | 1387 | mov r17=r0 |
1393 | mov r22=r0 | 1388 | mov r22=r0 |
1394 | mov r23=r0 | 1389 | mov r23=r0 |
1395 | br.cond.sptk ia64_vmm_entry | 1390 | br.cond.sptk ia64_vmm_entry |
1396 | br.ret.sptk b0 | 1391 | br.ret.sptk b0 |
1397 | END(vmm_reset_entry) | 1392 | END(vmm_reset_entry) |
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index e22b93361e08..6b6307a3bd55 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -183,8 +183,8 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) | |||
183 | u64 i, dirty_pages = 1; | 183 | u64 i, dirty_pages = 1; |
184 | u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; | 184 | u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; |
185 | spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); | 185 | spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); |
186 | void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE) | 186 | void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; |
187 | + KVM_MEM_DIRTY_LOG_OFS; | 187 | |
188 | dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; | 188 | dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; |
189 | 189 | ||
190 | vmm_spin_lock(lock); | 190 | vmm_spin_lock(lock); |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 054bcd9439aa..56e12903973c 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -692,7 +692,7 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
692 | pgdat = NODE_DATA(nid); | 692 | pgdat = NODE_DATA(nid); |
693 | 693 | ||
694 | zone = pgdat->node_zones + ZONE_NORMAL; | 694 | zone = pgdat->node_zones + ZONE_NORMAL; |
695 | ret = __add_pages(zone, start_pfn, nr_pages); | 695 | ret = __add_pages(nid, zone, start_pfn, nr_pages); |
696 | 696 | ||
697 | if (ret) | 697 | if (ret) |
698 | printk("%s: Problem encountered in __add_pages() as ret=%d\n", | 698 | printk("%s: Problem encountered in __add_pages() as ret=%d\n", |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 0c66dbdd1d72..66fd705e82c0 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -227,14 +227,14 @@ finish_up: | |||
227 | return new_irq_info; | 227 | return new_irq_info; |
228 | } | 228 | } |
229 | 229 | ||
230 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | 230 | static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) |
231 | { | 231 | { |
232 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; | 232 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
233 | nasid_t nasid; | 233 | nasid_t nasid; |
234 | int slice; | 234 | int slice; |
235 | 235 | ||
236 | nasid = cpuid_to_nasid(first_cpu(mask)); | 236 | nasid = cpuid_to_nasid(cpumask_first(mask)); |
237 | slice = cpuid_to_slice(first_cpu(mask)); | 237 | slice = cpuid_to_slice(cpumask_first(mask)); |
238 | 238 | ||
239 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, | 239 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
240 | sn_irq_lh[irq], list) | 240 | sn_irq_lh[irq], list) |
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 83f190ffe350..ca553b0429ce 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
@@ -151,7 +151,8 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | #ifdef CONFIG_SMP | 153 | #ifdef CONFIG_SMP |
154 | static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | 154 | static void sn_set_msi_irq_affinity(unsigned int irq, |
155 | const struct cpumask *cpu_mask) | ||
155 | { | 156 | { |
156 | struct msi_msg msg; | 157 | struct msi_msg msg; |
157 | int slice; | 158 | int slice; |
@@ -164,7 +165,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | |||
164 | struct sn_pcibus_provider *provider; | 165 | struct sn_pcibus_provider *provider; |
165 | unsigned int cpu; | 166 | unsigned int cpu; |
166 | 167 | ||
167 | cpu = first_cpu(cpu_mask); | 168 | cpu = cpumask_first(cpu_mask); |
168 | sn_irq_info = sn_msi_info[irq].sn_irq_info; | 169 | sn_irq_info = sn_msi_info[irq].sn_irq_info; |
169 | if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) | 170 | if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) |
170 | return; | 171 | return; |
@@ -204,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | |||
204 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); | 205 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); |
205 | 206 | ||
206 | write_msi_msg(irq, &msg); | 207 | write_msi_msg(irq, &msg); |
207 | irq_desc[irq].affinity = cpu_mask; | 208 | irq_desc[irq].affinity = *cpu_mask; |
208 | } | 209 | } |
209 | #endif /* CONFIG_SMP */ | 210 | #endif /* CONFIG_SMP */ |
210 | 211 | ||
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 636588e7e068..be339477f906 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
@@ -385,7 +385,6 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
385 | int j; | 385 | int j; |
386 | const char *slabname; | 386 | const char *slabname; |
387 | int ordinal; | 387 | int ordinal; |
388 | cpumask_t cpumask; | ||
389 | char slice; | 388 | char slice; |
390 | struct cpuinfo_ia64 *c; | 389 | struct cpuinfo_ia64 *c; |
391 | struct sn_hwperf_port_info *ptdata; | 390 | struct sn_hwperf_port_info *ptdata; |
@@ -473,23 +472,21 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
473 | * CPUs on this node, if any | 472 | * CPUs on this node, if any |
474 | */ | 473 | */ |
475 | if (!SN_HWPERF_IS_IONODE(obj)) { | 474 | if (!SN_HWPERF_IS_IONODE(obj)) { |
476 | cpumask = node_to_cpumask(ordinal); | 475 | for_each_cpu_and(i, cpu_online_mask, |
477 | for_each_online_cpu(i) { | 476 | cpumask_of_node(ordinal)) { |
478 | if (cpu_isset(i, cpumask)) { | 477 | slice = 'a' + cpuid_to_slice(i); |
479 | slice = 'a' + cpuid_to_slice(i); | 478 | c = cpu_data(i); |
480 | c = cpu_data(i); | 479 | seq_printf(s, "cpu %d %s%c local" |
481 | seq_printf(s, "cpu %d %s%c local" | 480 | " freq %luMHz, arch ia64", |
482 | " freq %luMHz, arch ia64", | 481 | i, obj->location, slice, |
483 | i, obj->location, slice, | 482 | c->proc_freq / 1000000); |
484 | c->proc_freq / 1000000); | 483 | for_each_online_cpu(j) { |
485 | for_each_online_cpu(j) { | 484 | seq_printf(s, j ? ":%d" : ", dist %d", |
486 | seq_printf(s, j ? ":%d" : ", dist %d", | 485 | node_distance( |
487 | node_distance( | ||
488 | cpu_to_node(i), | 486 | cpu_to_node(i), |
489 | cpu_to_node(j))); | 487 | cpu_to_node(j))); |
490 | } | ||
491 | seq_putc(s, '\n'); | ||
492 | } | 488 | } |
489 | seq_putc(s, '\n'); | ||
493 | } | 490 | } |
494 | } | 491 | } |
495 | } | 492 | } |
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index a88eba3314d7..3f864238566d 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c | |||
@@ -206,8 +206,7 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num, | |||
206 | cx_dev->dev.parent = NULL; | 206 | cx_dev->dev.parent = NULL; |
207 | cx_dev->dev.bus = &tiocx_bus_type; | 207 | cx_dev->dev.bus = &tiocx_bus_type; |
208 | cx_dev->dev.release = tiocx_bus_release; | 208 | cx_dev->dev.release = tiocx_bus_release; |
209 | snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d", | 209 | dev_set_name(&cx_dev->dev, "%d", cx_dev->cx_id.nasid); |
210 | cx_dev->cx_id.nasid); | ||
211 | device_register(&cx_dev->dev); | 210 | device_register(&cx_dev->dev); |
212 | get_device(&cx_dev->dev); | 211 | get_device(&cx_dev->dev); |
213 | 212 | ||