aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2008-10-22 23:57:26 -0400
committerLen Brown <len.brown@intel.com>2008-10-23 00:11:07 -0400
commit057316cc6a5b521b332a1d7ccc871cd60c904c74 (patch)
tree4333e608da237c73ff69b10878025cca96dcb4c8 /arch/ia64
parent3e2dab9a1c2deb03c311eb3f83466009147ed4d3 (diff)
parent2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff)
Merge branch 'linus' into test
Conflicts: MAINTAINERS arch/x86/kernel/acpi/boot.c arch/x86/kernel/acpi/sleep.c drivers/acpi/Kconfig drivers/pnp/Makefile drivers/pnp/quirks.c Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig10
-rw-r--r--arch/ia64/hp/common/sba_iommu.c5
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c2
-rw-r--r--arch/ia64/ia32/ia32_entry.S4
-rw-r--r--arch/ia64/ia32/ia32priv.h4
-rw-r--r--arch/ia64/ia32/sys_ia32.c91
-rw-r--r--arch/ia64/include/asm/a.out.h32
-rw-r--r--arch/ia64/include/asm/dma-mapping.h4
-rw-r--r--arch/ia64/include/asm/elf.h2
-rw-r--r--arch/ia64/include/asm/kvm_host.h6
-rw-r--r--arch/ia64/include/asm/pci.h12
-rw-r--r--arch/ia64/include/asm/siginfo.h5
-rw-r--r--arch/ia64/include/asm/statfs.h52
-rw-r--r--arch/ia64/kernel/crash_dump.c4
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/setup.c13
-rw-r--r--arch/ia64/kernel/smpboot.c1
-rw-r--r--arch/ia64/kvm/Kconfig2
-rw-r--r--arch/ia64/kvm/Makefile6
-rw-r--r--arch/ia64/kvm/irq.h31
-rw-r--r--arch/ia64/kvm/kvm-ia64.c66
-rw-r--r--arch/ia64/kvm/kvm_minstate.h23
-rw-r--r--arch/ia64/kvm/optvfault.S181
-rw-r--r--arch/ia64/kvm/process.c4
-rw-r--r--arch/ia64/kvm/vcpu.h26
-rw-r--r--arch/ia64/kvm/vmm_ivt.S39
-rw-r--r--arch/ia64/kvm/vtlb.c23
-rw-r--r--arch/ia64/mm/init.c18
-rw-r--r--arch/ia64/pci/pci.c7
29 files changed, 347 insertions, 328 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 48e496fe1e75..912c57db2d21 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -7,6 +7,8 @@ mainmenu "IA-64 Linux Kernel Configuration"
7 7
8source "init/Kconfig" 8source "init/Kconfig"
9 9
10source "kernel/Kconfig.freezer"
11
10menu "Processor type and features" 12menu "Processor type and features"
11 13
12config IA64 14config IA64
@@ -60,14 +62,6 @@ config RWSEM_XCHGADD_ALGORITHM
60 bool 62 bool
61 default y 63 default y
62 64
63config ARCH_HAS_ILOG2_U32
64 bool
65 default n
66
67config ARCH_HAS_ILOG2_U64
68 bool
69 default n
70
71config HUGETLB_PAGE_SIZE_VARIABLE 65config HUGETLB_PAGE_SIZE_VARIABLE
72 bool 66 bool
73 depends on HUGETLB_PAGE 67 depends on HUGETLB_PAGE
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 4956be40d7b5..d98f0f4ff83f 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2070,14 +2070,13 @@ sba_init(void)
2070 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) 2070 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2071 return 0; 2071 return 0;
2072 2072
2073#if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP) && \ 2073#if defined(CONFIG_IA64_GENERIC)
2074 defined(CONFIG_PROC_FS)
2075 /* If we are booting a kdump kernel, the sba_iommu will 2074 /* If we are booting a kdump kernel, the sba_iommu will
2076 * cause devices that were not shutdown properly to MCA 2075 * cause devices that were not shutdown properly to MCA
2077 * as soon as they are turned back on. Our only option for 2076 * as soon as they are turned back on. Our only option for
2078 * a successful kdump kernel boot is to use the swiotlb. 2077 * a successful kdump kernel boot is to use the swiotlb.
2079 */ 2078 */
2080 if (elfcorehdr_addr < ELFCORE_ADDR_MAX) { 2079 if (is_kdump_kernel()) {
2081 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) 2080 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2082 panic("Unable to initialize software I/O TLB:" 2081 panic("Unable to initialize software I/O TLB:"
2083 " Try machvec=dig boot option"); 2082 " Try machvec=dig boot option");
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index 4f0c30c38e99..f92bdaac8976 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -41,7 +41,7 @@ randomize_stack_top(unsigned long stack_top);
41#define elf_map elf32_map 41#define elf_map elf32_map
42 42
43#undef SET_PERSONALITY 43#undef SET_PERSONALITY
44#define SET_PERSONALITY(ex, ibcs2) elf32_set_personality() 44#define SET_PERSONALITY(ex) elf32_set_personality()
45 45
46#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack)) 46#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
47 47
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index ff88c48c5d19..53505bb04771 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -251,8 +251,8 @@ ia32_syscall_table:
251 data8 compat_sys_setrlimit /* 75 */ 251 data8 compat_sys_setrlimit /* 75 */
252 data8 compat_sys_old_getrlimit 252 data8 compat_sys_old_getrlimit
253 data8 compat_sys_getrusage 253 data8 compat_sys_getrusage
254 data8 sys32_gettimeofday 254 data8 compat_sys_gettimeofday
255 data8 sys32_settimeofday 255 data8 compat_sys_settimeofday
256 data8 sys32_getgroups16 /* 80 */ 256 data8 sys32_getgroups16 /* 80 */
257 data8 sys32_setgroups16 257 data8 sys32_setgroups16
258 data8 sys32_old_select 258 data8 sys32_old_select
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index dd0c53687a96..0f15349c3c6b 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -332,8 +332,8 @@ void ia64_elf32_init(struct pt_regs *regs);
332#define ELF_PLATFORM NULL 332#define ELF_PLATFORM NULL
333 333
334#ifdef __KERNEL__ 334#ifdef __KERNEL__
335# define SET_PERSONALITY(EX,IBCS2) \ 335# define SET_PERSONALITY(EX) \
336 (current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX) 336 (current->personality = PER_LINUX)
337#endif 337#endif
338 338
339#define IA32_EFLAG 0x200 339#define IA32_EFLAG 0x200
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index bf196cbb3796..f4430bb4bbdc 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -118,41 +118,6 @@ sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __use
118 return error; 118 return error;
119} 119}
120 120
121int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
122{
123 compat_ino_t ino;
124 int err;
125
126 if ((u64) stat->size > MAX_NON_LFS ||
127 !old_valid_dev(stat->dev) ||
128 !old_valid_dev(stat->rdev))
129 return -EOVERFLOW;
130
131 ino = stat->ino;
132 if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino)
133 return -EOVERFLOW;
134
135 if (clear_user(ubuf, sizeof(*ubuf)))
136 return -EFAULT;
137
138 err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);
139 err |= __put_user(ino, &ubuf->st_ino);
140 err |= __put_user(stat->mode, &ubuf->st_mode);
141 err |= __put_user(stat->nlink, &ubuf->st_nlink);
142 err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);
143 err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
144 err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);
145 err |= __put_user(stat->size, &ubuf->st_size);
146 err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
147 err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
148 err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
149 err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
150 err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
151 err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
152 err |= __put_user(stat->blksize, &ubuf->st_blksize);
153 err |= __put_user(stat->blocks, &ubuf->st_blocks);
154 return err;
155}
156 121
157#if PAGE_SHIFT > IA32_PAGE_SHIFT 122#if PAGE_SHIFT > IA32_PAGE_SHIFT
158 123
@@ -1148,68 +1113,12 @@ sys32_pipe (int __user *fd)
1148 return retval; 1113 return retval;
1149} 1114}
1150 1115
1151static inline long
1152get_tv32 (struct timeval *o, struct compat_timeval __user *i)
1153{
1154 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
1155 (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
1156}
1157
1158static inline long
1159put_tv32 (struct compat_timeval __user *o, struct timeval *i)
1160{
1161 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
1162 (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
1163}
1164
1165asmlinkage unsigned long 1116asmlinkage unsigned long
1166sys32_alarm (unsigned int seconds) 1117sys32_alarm (unsigned int seconds)
1167{ 1118{
1168 return alarm_setitimer(seconds); 1119 return alarm_setitimer(seconds);
1169} 1120}
1170 1121
1171/* Translations due to time_t size differences. Which affects all
1172 sorts of things, like timeval and itimerval. */
1173
1174extern struct timezone sys_tz;
1175
1176asmlinkage long
1177sys32_gettimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
1178{
1179 if (tv) {
1180 struct timeval ktv;
1181 do_gettimeofday(&ktv);
1182 if (put_tv32(tv, &ktv))
1183 return -EFAULT;
1184 }
1185 if (tz) {
1186 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
1187 return -EFAULT;
1188 }
1189 return 0;
1190}
1191
1192asmlinkage long
1193sys32_settimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
1194{
1195 struct timeval ktv;
1196 struct timespec kts;
1197 struct timezone ktz;
1198
1199 if (tv) {
1200 if (get_tv32(&ktv, tv))
1201 return -EFAULT;
1202 kts.tv_sec = ktv.tv_sec;
1203 kts.tv_nsec = ktv.tv_usec * 1000;
1204 }
1205 if (tz) {
1206 if (copy_from_user(&ktz, tz, sizeof(ktz)))
1207 return -EFAULT;
1208 }
1209
1210 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
1211}
1212
1213struct sel_arg_struct { 1122struct sel_arg_struct {
1214 unsigned int n; 1123 unsigned int n;
1215 unsigned int inp; 1124 unsigned int inp;
diff --git a/arch/ia64/include/asm/a.out.h b/arch/ia64/include/asm/a.out.h
deleted file mode 100644
index 193dcfb67596..000000000000
--- a/arch/ia64/include/asm/a.out.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef _ASM_IA64_A_OUT_H
2#define _ASM_IA64_A_OUT_H
3
4/*
5 * No a.out format has been (or should be) defined so this file is
6 * just a dummy that allows us to get binfmt_elf compiled. It
7 * probably would be better to clean up binfmt_elf.c so it does not
8 * necessarily depend on there being a.out support.
9 *
10 * Modified 1998-2002
11 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
12 */
13
14#include <linux/types.h>
15
16struct exec {
17 unsigned long a_info;
18 unsigned long a_text;
19 unsigned long a_data;
20 unsigned long a_bss;
21 unsigned long a_entry;
22};
23
24#define N_TXTADDR(x) 0
25#define N_DATADDR(x) 0
26#define N_BSSADDR(x) 0
27#define N_DRSIZE(x) 0
28#define N_TRSIZE(x) 0
29#define N_SYMSIZE(x) 0
30#define N_TXTOFF(x) 0
31
32#endif /* _ASM_IA64_A_OUT_H */
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 9f0df9bd46b7..06ff1ba21465 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -8,7 +8,9 @@
8#include <asm/machvec.h> 8#include <asm/machvec.h>
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10 10
11#define dma_alloc_coherent platform_dma_alloc_coherent 11#define dma_alloc_coherent(dev, size, handle, gfp) \
12 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
13
12/* coherent mem. is cheap */ 14/* coherent mem. is cheap */
13static inline void * 15static inline void *
14dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 16dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index 2acb6b6543c9..86eddee029cb 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -202,7 +202,7 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
202 relevant until we have real hardware to play with... */ 202 relevant until we have real hardware to play with... */
203#define ELF_PLATFORM NULL 203#define ELF_PLATFORM NULL
204 204
205#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX) 205#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
206#define elf_read_implies_exec(ex, executable_stack) \ 206#define elf_read_implies_exec(ex, executable_stack) \
207 ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0) 207 ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
208 208
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 1efe513a9941..85db124d37f6 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -132,7 +132,7 @@
132#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ 132#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
133#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ 133#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
134#define GPFN_GFW (6UL << 60) /* Guest Firmware */ 134#define GPFN_GFW (6UL << 60) /* Guest Firmware */
135#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */ 135#define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */
136 136
137#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ 137#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
138#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ 138#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
@@ -413,6 +413,10 @@ struct kvm_arch {
413 struct kvm_ioapic *vioapic; 413 struct kvm_ioapic *vioapic;
414 struct kvm_vm_stat stat; 414 struct kvm_vm_stat stat;
415 struct kvm_sal_data rdv_sal_data; 415 struct kvm_sal_data rdv_sal_data;
416
417 struct list_head assigned_dev_head;
418 struct dmar_domain *intel_iommu_domain;
419 struct hlist_head irq_ack_notifier_list;
416}; 420};
417 421
418union cpuid3_t { 422union cpuid3_t {
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 0149097b736d..ce342fb74246 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -95,16 +95,8 @@ extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
95 enum pci_mmap_state mmap_state, int write_combine); 95 enum pci_mmap_state mmap_state, int write_combine);
96#define HAVE_PCI_LEGACY 96#define HAVE_PCI_LEGACY
97extern int pci_mmap_legacy_page_range(struct pci_bus *bus, 97extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
98 struct vm_area_struct *vma); 98 struct vm_area_struct *vma,
99extern ssize_t pci_read_legacy_io(struct kobject *kobj, 99 enum pci_mmap_state mmap_state);
100 struct bin_attribute *bin_attr,
101 char *buf, loff_t off, size_t count);
102extern ssize_t pci_write_legacy_io(struct kobject *kobj,
103 struct bin_attribute *bin_attr,
104 char *buf, loff_t off, size_t count);
105extern int pci_mmap_legacy_mem(struct kobject *kobj,
106 struct bin_attribute *attr,
107 struct vm_area_struct *vma);
108 100
109#define pci_get_legacy_mem platform_pci_get_legacy_mem 101#define pci_get_legacy_mem platform_pci_get_legacy_mem
110#define pci_legacy_read platform_pci_legacy_read 102#define pci_legacy_read platform_pci_legacy_read
diff --git a/arch/ia64/include/asm/siginfo.h b/arch/ia64/include/asm/siginfo.h
index 9294e4b0c8bc..118d42979003 100644
--- a/arch/ia64/include/asm/siginfo.h
+++ b/arch/ia64/include/asm/siginfo.h
@@ -113,11 +113,6 @@ typedef struct siginfo {
113#undef NSIGSEGV 113#undef NSIGSEGV
114#define NSIGSEGV 3 114#define NSIGSEGV 3
115 115
116/*
117 * SIGTRAP si_codes
118 */
119#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
120#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */
121#undef NSIGTRAP 116#undef NSIGTRAP
122#define NSIGTRAP 4 117#define NSIGTRAP 4
123 118
diff --git a/arch/ia64/include/asm/statfs.h b/arch/ia64/include/asm/statfs.h
index 811097974f31..1e589669de56 100644
--- a/arch/ia64/include/asm/statfs.h
+++ b/arch/ia64/include/asm/statfs.h
@@ -8,55 +8,13 @@
8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co 8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
9 */ 9 */
10 10
11#ifndef __KERNEL_STRICT_NAMES
12# include <linux/types.h>
13typedef __kernel_fsid_t fsid_t;
14#endif
15
16/* 11/*
17 * This is ugly --- we're already 64-bit, so just duplicate the definitions 12 * We need compat_statfs64 to be packed, because the i386 ABI won't
13 * add padding at the end to bring it to a multiple of 8 bytes, but
14 * the IA64 ABI will.
18 */ 15 */
19struct statfs { 16#define ARCH_PACK_COMPAT_STATFS64 __attribute__((packed,aligned(4)))
20 long f_type;
21 long f_bsize;
22 long f_blocks;
23 long f_bfree;
24 long f_bavail;
25 long f_files;
26 long f_ffree;
27 __kernel_fsid_t f_fsid;
28 long f_namelen;
29 long f_frsize;
30 long f_spare[5];
31};
32
33
34struct statfs64 {
35 long f_type;
36 long f_bsize;
37 long f_blocks;
38 long f_bfree;
39 long f_bavail;
40 long f_files;
41 long f_ffree;
42 __kernel_fsid_t f_fsid;
43 long f_namelen;
44 long f_frsize;
45 long f_spare[5];
46};
47 17
48struct compat_statfs64 { 18#include <asm-generic/statfs.h>
49 __u32 f_type;
50 __u32 f_bsize;
51 __u64 f_blocks;
52 __u64 f_bfree;
53 __u64 f_bavail;
54 __u64 f_files;
55 __u64 f_ffree;
56 __kernel_fsid_t f_fsid;
57 __u32 f_namelen;
58 __u32 f_frsize;
59 __u32 f_spare[5];
60} __attribute__((packed));
61 19
62#endif /* _ASM_IA64_STATFS_H */ 20#endif /* _ASM_IA64_STATFS_H */
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index da60e90eeeb1..23e91290e41f 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -8,10 +8,14 @@
8 8
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/crash_dump.h>
11 12
12#include <asm/page.h> 13#include <asm/page.h>
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14 15
16/* Stores the physical address of elf header of crash image. */
17unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
18
15/** 19/**
16 * copy_oldmem_page - copy one page from "oldmem" 20 * copy_oldmem_page - copy one page from "oldmem"
17 * @pfn: page frame number to be copied 21 * @pfn: page frame number to be copied
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 51b75cea7018..efaff15d8cf1 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -1335,7 +1335,7 @@ kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n)
1335} 1335}
1336#endif 1336#endif
1337 1337
1338#ifdef CONFIG_PROC_VMCORE 1338#ifdef CONFIG_CRASH_DUMP
1339/* locate the size find a the descriptor at a certain address */ 1339/* locate the size find a the descriptor at a certain address */
1340unsigned long __init 1340unsigned long __init
1341vmcore_find_descriptor_size (unsigned long address) 1341vmcore_find_descriptor_size (unsigned long address)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index de636b215677..916ba898237f 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -352,7 +352,7 @@ reserve_memory (void)
352 } 352 }
353#endif 353#endif
354 354
355#ifdef CONFIG_PROC_VMCORE 355#ifdef CONFIG_CRASH_KERNEL
356 if (reserve_elfcorehdr(&rsvd_region[n].start, 356 if (reserve_elfcorehdr(&rsvd_region[n].start,
357 &rsvd_region[n].end) == 0) 357 &rsvd_region[n].end) == 0)
358 n++; 358 n++;
@@ -478,7 +478,12 @@ static __init int setup_nomca(char *s)
478} 478}
479early_param("nomca", setup_nomca); 479early_param("nomca", setup_nomca);
480 480
481#ifdef CONFIG_PROC_VMCORE 481/*
482 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
483 * is_kdump_kernel() to determine if we are booting after a panic. Hence
484 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
485 */
486#ifdef CONFIG_CRASH_DUMP
482/* elfcorehdr= specifies the location of elf core header 487/* elfcorehdr= specifies the location of elf core header
483 * stored by the crashed kernel. 488 * stored by the crashed kernel.
484 */ 489 */
@@ -502,11 +507,11 @@ int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end)
502 * to work properly. 507 * to work properly.
503 */ 508 */
504 509
505 if (elfcorehdr_addr >= ELFCORE_ADDR_MAX) 510 if (!is_vmcore_usable())
506 return -EINVAL; 511 return -EINVAL;
507 512
508 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { 513 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
509 elfcorehdr_addr = ELFCORE_ADDR_MAX; 514 vmcore_unusable();
510 return -EINVAL; 515 return -EINVAL;
511 } 516 }
512 517
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index d8f05e504fbf..1dcbb85fc4ee 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -401,6 +401,7 @@ smp_callin (void)
401 spin_lock(&vector_lock); 401 spin_lock(&vector_lock);
402 /* Setup the per cpu irq handling data structures */ 402 /* Setup the per cpu irq handling data structures */
403 __setup_vector_irq(cpuid); 403 __setup_vector_irq(cpuid);
404 notify_cpu_starting(cpuid);
404 cpu_set(cpuid, cpu_online_map); 405 cpu_set(cpuid, cpu_online_map);
405 per_cpu(cpu_state, cpuid) = CPU_ONLINE; 406 per_cpu(cpu_state, cpuid) = CPU_ONLINE;
406 spin_unlock(&vector_lock); 407 spin_unlock(&vector_lock);
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 7914e4828504..8e99fed6b3fd 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -46,4 +46,6 @@ config KVM_INTEL
46config KVM_TRACE 46config KVM_TRACE
47 bool 47 bool
48 48
49source drivers/virtio/Kconfig
50
49endif # VIRTUALIZATION 51endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index bf22fb9e6dcf..cf37f8f490c0 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -44,7 +44,11 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
44EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ 44EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
45 45
46common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ 46common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
47 coalesced_mmio.o) 47 coalesced_mmio.o irq_comm.o)
48
49ifeq ($(CONFIG_DMAR),y)
50common-objs += $(addprefix ../../../virt/kvm/, vtd.o)
51endif
48 52
49kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o 53kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
50obj-$(CONFIG_KVM) += kvm.o 54obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h
new file mode 100644
index 000000000000..c6786e8b1bf4
--- /dev/null
+++ b/arch/ia64/kvm/irq.h
@@ -0,0 +1,31 @@
1/*
2 * irq.h: In-kernel interrupt controller related definitions
3 * Copyright (c) 2008, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Xiantao Zhang <xiantao.zhang@intel.com>
20 *
21 */
22
23#ifndef __IRQ_H
24#define __IRQ_H
25
26static inline int irqchip_in_kernel(struct kvm *kvm)
27{
28 return 1;
29}
30
31#endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index cd0d1a7284b7..c0699f0e35a9 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -31,6 +31,7 @@
31#include <linux/bitops.h> 31#include <linux/bitops.h>
32#include <linux/hrtimer.h> 32#include <linux/hrtimer.h>
33#include <linux/uaccess.h> 33#include <linux/uaccess.h>
34#include <linux/intel-iommu.h>
34 35
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include <asm/gcc_intrin.h> 37#include <asm/gcc_intrin.h>
@@ -45,6 +46,7 @@
45#include "iodev.h" 46#include "iodev.h"
46#include "ioapic.h" 47#include "ioapic.h"
47#include "lapic.h" 48#include "lapic.h"
49#include "irq.h"
48 50
49static unsigned long kvm_vmm_base; 51static unsigned long kvm_vmm_base;
50static unsigned long kvm_vsa_base; 52static unsigned long kvm_vsa_base;
@@ -179,12 +181,16 @@ int kvm_dev_ioctl_check_extension(long ext)
179 switch (ext) { 181 switch (ext) {
180 case KVM_CAP_IRQCHIP: 182 case KVM_CAP_IRQCHIP:
181 case KVM_CAP_USER_MEMORY: 183 case KVM_CAP_USER_MEMORY:
184 case KVM_CAP_MP_STATE:
182 185
183 r = 1; 186 r = 1;
184 break; 187 break;
185 case KVM_CAP_COALESCED_MMIO: 188 case KVM_CAP_COALESCED_MMIO:
186 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 189 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
187 break; 190 break;
191 case KVM_CAP_IOMMU:
192 r = intel_iommu_found();
193 break;
188 default: 194 default:
189 r = 0; 195 r = 0;
190 } 196 }
@@ -771,6 +777,7 @@ static void kvm_init_vm(struct kvm *kvm)
771 */ 777 */
772 kvm_build_io_pmt(kvm); 778 kvm_build_io_pmt(kvm);
773 779
780 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
774} 781}
775 782
776struct kvm *kvm_arch_create_vm(void) 783struct kvm *kvm_arch_create_vm(void)
@@ -1334,6 +1341,10 @@ static void kvm_release_vm_pages(struct kvm *kvm)
1334 1341
1335void kvm_arch_destroy_vm(struct kvm *kvm) 1342void kvm_arch_destroy_vm(struct kvm *kvm)
1336{ 1343{
1344 kvm_iommu_unmap_guest(kvm);
1345#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1346 kvm_free_all_assigned_devices(kvm);
1347#endif
1337 kfree(kvm->arch.vioapic); 1348 kfree(kvm->arch.vioapic);
1338 kvm_release_vm_pages(kvm); 1349 kvm_release_vm_pages(kvm);
1339 kvm_free_physmem(kvm); 1350 kvm_free_physmem(kvm);
@@ -1435,17 +1446,24 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1435 int user_alloc) 1446 int user_alloc)
1436{ 1447{
1437 unsigned long i; 1448 unsigned long i;
1438 struct page *page; 1449 unsigned long pfn;
1439 int npages = mem->memory_size >> PAGE_SHIFT; 1450 int npages = mem->memory_size >> PAGE_SHIFT;
1440 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; 1451 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1441 unsigned long base_gfn = memslot->base_gfn; 1452 unsigned long base_gfn = memslot->base_gfn;
1442 1453
1443 for (i = 0; i < npages; i++) { 1454 for (i = 0; i < npages; i++) {
1444 page = gfn_to_page(kvm, base_gfn + i); 1455 pfn = gfn_to_pfn(kvm, base_gfn + i);
1445 kvm_set_pmt_entry(kvm, base_gfn + i, 1456 if (!kvm_is_mmio_pfn(pfn)) {
1446 page_to_pfn(page) << PAGE_SHIFT, 1457 kvm_set_pmt_entry(kvm, base_gfn + i,
1447 _PAGE_AR_RWX|_PAGE_MA_WB); 1458 pfn << PAGE_SHIFT,
1448 memslot->rmap[i] = (unsigned long)page; 1459 _PAGE_AR_RWX | _PAGE_MA_WB);
1460 memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
1461 } else {
1462 kvm_set_pmt_entry(kvm, base_gfn + i,
1463 GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
1464 _PAGE_MA_UC);
1465 memslot->rmap[i] = 0;
1466 }
1449 } 1467 }
1450 1468
1451 return 0; 1469 return 0;
@@ -1789,11 +1807,43 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1789int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 1807int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1790 struct kvm_mp_state *mp_state) 1808 struct kvm_mp_state *mp_state)
1791{ 1809{
1792 return -EINVAL; 1810 vcpu_load(vcpu);
1811 mp_state->mp_state = vcpu->arch.mp_state;
1812 vcpu_put(vcpu);
1813 return 0;
1814}
1815
1816static int vcpu_reset(struct kvm_vcpu *vcpu)
1817{
1818 int r;
1819 long psr;
1820 local_irq_save(psr);
1821 r = kvm_insert_vmm_mapping(vcpu);
1822 if (r)
1823 goto fail;
1824
1825 vcpu->arch.launched = 0;
1826 kvm_arch_vcpu_uninit(vcpu);
1827 r = kvm_arch_vcpu_init(vcpu);
1828 if (r)
1829 goto fail;
1830
1831 kvm_purge_vmm_mapping(vcpu);
1832 r = 0;
1833fail:
1834 local_irq_restore(psr);
1835 return r;
1793} 1836}
1794 1837
1795int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 1838int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1796 struct kvm_mp_state *mp_state) 1839 struct kvm_mp_state *mp_state)
1797{ 1840{
1798 return -EINVAL; 1841 int r = 0;
1842
1843 vcpu_load(vcpu);
1844 vcpu->arch.mp_state = mp_state->mp_state;
1845 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
1846 r = vcpu_reset(vcpu);
1847 vcpu_put(vcpu);
1848 return r;
1799} 1849}
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
index 13980d9b8bcf..2cc41d17cf99 100644
--- a/arch/ia64/kvm/kvm_minstate.h
+++ b/arch/ia64/kvm/kvm_minstate.h
@@ -50,27 +50,18 @@
50 50
51#define PAL_VSA_SYNC_READ \ 51#define PAL_VSA_SYNC_READ \
52 /* begin to call pal vps sync_read */ \ 52 /* begin to call pal vps sync_read */ \
53{.mii; \
53 add r25 = VMM_VPD_BASE_OFFSET, r21; \ 54 add r25 = VMM_VPD_BASE_OFFSET, r21; \
54 adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \ 55 nop 0x0; \
56 mov r24=ip; \
55 ;; \ 57 ;; \
58} \
59{.mmb \
60 add r24=0x20, r24; \
56 ld8 r25 = [r25]; /* read vpd base */ \ 61 ld8 r25 = [r25]; /* read vpd base */ \
57 ld8 r20 = [r20]; \ 62 br.cond.sptk kvm_vps_sync_read; /*call the service*/ \
58 ;; \
59 add r20 = PAL_VPS_SYNC_READ,r20; \
60 ;; \
61{ .mii; \
62 nop 0x0; \
63 mov r24 = ip; \
64 mov b0 = r20; \
65 ;; \ 63 ;; \
66}; \ 64}; \
67{ .mmb; \
68 add r24 = 0x20, r24; \
69 nop 0x0; \
70 br.cond.sptk b0; /* call the service */ \
71 ;; \
72};
73
74 65
75 66
76#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 67#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
index e4f15d641b22..634abad979b5 100644
--- a/arch/ia64/kvm/optvfault.S
+++ b/arch/ia64/kvm/optvfault.S
@@ -1,9 +1,12 @@
1/* 1/*
2 * arch/ia64/vmx/optvfault.S 2 * arch/ia64/kvm/optvfault.S
3 * optimize virtualization fault handler 3 * optimize virtualization fault handler
4 * 4 *
5 * Copyright (C) 2006 Intel Co 5 * Copyright (C) 2006 Intel Co
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> 6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 * Copyright (C) 2008 Intel Co
8 * Add the support for Tukwila processors.
9 * Xiantao Zhang <xiantao.zhang@intel.com>
7 */ 10 */
8 11
9#include <asm/asmmacro.h> 12#include <asm/asmmacro.h>
@@ -20,6 +23,98 @@
20#define ACCE_MOV_TO_PSR 23#define ACCE_MOV_TO_PSR
21#define ACCE_THASH 24#define ACCE_THASH
22 25
26#define VMX_VPS_SYNC_READ \
27 add r16=VMM_VPD_BASE_OFFSET,r21; \
28 mov r17 = b0; \
29 mov r18 = r24; \
30 mov r19 = r25; \
31 mov r20 = r31; \
32 ;; \
33{.mii; \
34 ld8 r16 = [r16]; \
35 nop 0x0; \
36 mov r24 = ip; \
37 ;; \
38}; \
39{.mmb; \
40 add r24=0x20, r24; \
41 mov r25 =r16; \
42 br.sptk.many kvm_vps_sync_read; \
43}; \
44 mov b0 = r17; \
45 mov r24 = r18; \
46 mov r25 = r19; \
47 mov r31 = r20
48
49ENTRY(kvm_vps_entry)
50 adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
51 ;;
52 ld8 r29 = [r29]
53 ;;
54 add r29 = r29, r30
55 ;;
56 mov b0 = r29
57 br.sptk.many b0
58END(kvm_vps_entry)
59
60/*
61 * Inputs:
62 * r24 : return address
63 * r25 : vpd
64 * r29 : scratch
65 *
66 */
67GLOBAL_ENTRY(kvm_vps_sync_read)
68 movl r30 = PAL_VPS_SYNC_READ
69 ;;
70 br.sptk.many kvm_vps_entry
71END(kvm_vps_sync_read)
72
73/*
74 * Inputs:
75 * r24 : return address
76 * r25 : vpd
77 * r29 : scratch
78 *
79 */
80GLOBAL_ENTRY(kvm_vps_sync_write)
81 movl r30 = PAL_VPS_SYNC_WRITE
82 ;;
83 br.sptk.many kvm_vps_entry
84END(kvm_vps_sync_write)
85
86/*
87 * Inputs:
88 * r23 : pr
89 * r24 : guest b0
90 * r25 : vpd
91 *
92 */
93GLOBAL_ENTRY(kvm_vps_resume_normal)
94 movl r30 = PAL_VPS_RESUME_NORMAL
95 ;;
96 mov pr=r23,-2
97 br.sptk.many kvm_vps_entry
98END(kvm_vps_resume_normal)
99
100/*
101 * Inputs:
102 * r23 : pr
103 * r24 : guest b0
104 * r25 : vpd
105 * r17 : isr
106 */
107GLOBAL_ENTRY(kvm_vps_resume_handler)
108 movl r30 = PAL_VPS_RESUME_HANDLER
109 ;;
110 ld8 r27=[r25]
111 shr r17=r17,IA64_ISR_IR_BIT
112 ;;
113 dep r27=r17,r27,63,1 // bit 63 of r27 indicate whether enable CFLE
114 mov pr=r23,-2
115 br.sptk.many kvm_vps_entry
116END(kvm_vps_resume_handler)
117
23//mov r1=ar3 118//mov r1=ar3
24GLOBAL_ENTRY(kvm_asm_mov_from_ar) 119GLOBAL_ENTRY(kvm_asm_mov_from_ar)
25#ifndef ACCE_MOV_FROM_AR 120#ifndef ACCE_MOV_FROM_AR
@@ -157,11 +252,11 @@ GLOBAL_ENTRY(kvm_asm_rsm)
157#ifndef ACCE_RSM 252#ifndef ACCE_RSM
158 br.many kvm_virtualization_fault_back 253 br.many kvm_virtualization_fault_back
159#endif 254#endif
160 add r16=VMM_VPD_BASE_OFFSET,r21 255 VMX_VPS_SYNC_READ
256 ;;
161 extr.u r26=r25,6,21 257 extr.u r26=r25,6,21
162 extr.u r27=r25,31,2 258 extr.u r27=r25,31,2
163 ;; 259 ;;
164 ld8 r16=[r16]
165 extr.u r28=r25,36,1 260 extr.u r28=r25,36,1
166 dep r26=r27,r26,21,2 261 dep r26=r27,r26,21,2
167 ;; 262 ;;
@@ -196,7 +291,7 @@ GLOBAL_ENTRY(kvm_asm_rsm)
196 tbit.nz p6,p0=r23,0 291 tbit.nz p6,p0=r23,0
197 ;; 292 ;;
198 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT 293 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
199 (p6) br.dptk kvm_resume_to_guest 294 (p6) br.dptk kvm_resume_to_guest_with_sync
200 ;; 295 ;;
201 add r26=VMM_VCPU_META_RR0_OFFSET,r21 296 add r26=VMM_VCPU_META_RR0_OFFSET,r21
202 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 297 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
@@ -212,7 +307,7 @@ GLOBAL_ENTRY(kvm_asm_rsm)
212 mov rr[r28]=r27 307 mov rr[r28]=r27
213 ;; 308 ;;
214 srlz.d 309 srlz.d
215 br.many kvm_resume_to_guest 310 br.many kvm_resume_to_guest_with_sync
216END(kvm_asm_rsm) 311END(kvm_asm_rsm)
217 312
218 313
@@ -221,11 +316,11 @@ GLOBAL_ENTRY(kvm_asm_ssm)
221#ifndef ACCE_SSM 316#ifndef ACCE_SSM
222 br.many kvm_virtualization_fault_back 317 br.many kvm_virtualization_fault_back
223#endif 318#endif
224 add r16=VMM_VPD_BASE_OFFSET,r21 319 VMX_VPS_SYNC_READ
320 ;;
225 extr.u r26=r25,6,21 321 extr.u r26=r25,6,21
226 extr.u r27=r25,31,2 322 extr.u r27=r25,31,2
227 ;; 323 ;;
228 ld8 r16=[r16]
229 extr.u r28=r25,36,1 324 extr.u r28=r25,36,1
230 dep r26=r27,r26,21,2 325 dep r26=r27,r26,21,2
231 ;; //r26 is imm24 326 ;; //r26 is imm24
@@ -271,7 +366,7 @@ kvm_asm_ssm_1:
271 tbit.nz p6,p0=r29,IA64_PSR_I_BIT 366 tbit.nz p6,p0=r29,IA64_PSR_I_BIT
272 ;; 367 ;;
273 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT 368 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
274 (p6) br.dptk kvm_resume_to_guest 369 (p6) br.dptk kvm_resume_to_guest_with_sync
275 ;; 370 ;;
276 add r29=VPD_VTPR_START_OFFSET,r16 371 add r29=VPD_VTPR_START_OFFSET,r16
277 add r30=VPD_VHPI_START_OFFSET,r16 372 add r30=VPD_VHPI_START_OFFSET,r16
@@ -286,7 +381,7 @@ kvm_asm_ssm_1:
286 ;; 381 ;;
287 cmp.gt p6,p0=r30,r17 382 cmp.gt p6,p0=r30,r17
288 (p6) br.dpnt.few kvm_asm_dispatch_vexirq 383 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
289 br.many kvm_resume_to_guest 384 br.many kvm_resume_to_guest_with_sync
290END(kvm_asm_ssm) 385END(kvm_asm_ssm)
291 386
292 387
@@ -295,10 +390,9 @@ GLOBAL_ENTRY(kvm_asm_mov_to_psr)
295#ifndef ACCE_MOV_TO_PSR 390#ifndef ACCE_MOV_TO_PSR
296 br.many kvm_virtualization_fault_back 391 br.many kvm_virtualization_fault_back
297#endif 392#endif
298 add r16=VMM_VPD_BASE_OFFSET,r21 393 VMX_VPS_SYNC_READ
299 extr.u r26=r25,13,7 //r2
300 ;; 394 ;;
301 ld8 r16=[r16] 395 extr.u r26=r25,13,7 //r2
302 addl r20=@gprel(asm_mov_from_reg),gp 396 addl r20=@gprel(asm_mov_from_reg),gp
303 ;; 397 ;;
304 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 398 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
@@ -374,7 +468,7 @@ kvm_asm_mov_to_psr_1:
374 ;; 468 ;;
375 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT 469 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
376 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT 470 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
377 (p6) br.dpnt.few kvm_resume_to_guest 471 (p6) br.dpnt.few kvm_resume_to_guest_with_sync
378 ;; 472 ;;
379 add r29=VPD_VTPR_START_OFFSET,r16 473 add r29=VPD_VTPR_START_OFFSET,r16
380 add r30=VPD_VHPI_START_OFFSET,r16 474 add r30=VPD_VHPI_START_OFFSET,r16
@@ -389,13 +483,29 @@ kvm_asm_mov_to_psr_1:
389 ;; 483 ;;
390 cmp.gt p6,p0=r30,r17 484 cmp.gt p6,p0=r30,r17
391 (p6) br.dpnt.few kvm_asm_dispatch_vexirq 485 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
392 br.many kvm_resume_to_guest 486 br.many kvm_resume_to_guest_with_sync
393END(kvm_asm_mov_to_psr) 487END(kvm_asm_mov_to_psr)
394 488
395 489
396ENTRY(kvm_asm_dispatch_vexirq) 490ENTRY(kvm_asm_dispatch_vexirq)
397//increment iip 491//increment iip
492 mov r17 = b0
493 mov r18 = r31
494{.mii
495 add r25=VMM_VPD_BASE_OFFSET,r21
496 nop 0x0
497 mov r24 = ip
498 ;;
499}
500{.mmb
501 add r24 = 0x20, r24
502 ld8 r25 = [r25]
503 br.sptk.many kvm_vps_sync_write
504}
505 mov b0 =r17
398 mov r16=cr.ipsr 506 mov r16=cr.ipsr
507 mov r31 = r18
508 mov r19 = 37
399 ;; 509 ;;
400 extr.u r17=r16,IA64_PSR_RI_BIT,2 510 extr.u r17=r16,IA64_PSR_RI_BIT,2
401 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 511 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
@@ -435,25 +545,31 @@ GLOBAL_ENTRY(kvm_asm_thash)
435 ;; 545 ;;
436kvm_asm_thash_back1: 546kvm_asm_thash_back1:
437 shr.u r23=r19,61 // get RR number 547 shr.u r23=r19,61 // get RR number
438 adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr 548 adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
439 adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta 549 adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
440 ;; 550 ;;
441 shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr 551 shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr
442 ld8 r17=[r16] // get PTA 552 ld8 r17=[r16] // get PTA
443 mov r26=1 553 mov r26=1
444 ;; 554 ;;
445 extr.u r29=r17,2,6 // get pta.size 555 extr.u r29=r17,2,6 // get pta.size
446 ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value 556 ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value
447 ;; 557 ;;
448 extr.u r25=r25,2,6 // get rr.ps 558 mov b0=r24
559 //Fallback to C if pta.vf is set
560 tbit.nz p6,p0=r17, 8
561 ;;
562 (p6) mov r24=EVENT_THASH
563 (p6) br.cond.dpnt.many kvm_virtualization_fault_back
564 extr.u r28=r28,2,6 // get rr.ps
449 shl r22=r26,r29 // 1UL << pta.size 565 shl r22=r26,r29 // 1UL << pta.size
450 ;; 566 ;;
451 shr.u r23=r19,r25 // vaddr >> rr.ps 567 shr.u r23=r19,r28 // vaddr >> rr.ps
452 adds r26=3,r29 // pta.size + 3 568 adds r26=3,r29 // pta.size + 3
453 shl r27=r17,3 // pta << 3 569 shl r27=r17,3 // pta << 3
454 ;; 570 ;;
455 shl r23=r23,3 // (vaddr >> rr.ps) << 3 571 shl r23=r23,3 // (vaddr >> rr.ps) << 3
456 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) 572 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
457 movl r16=7<<61 573 movl r16=7<<61
458 ;; 574 ;;
459 adds r22=-1,r22 // (1UL << pta.size) - 1 575 adds r22=-1,r22 // (1UL << pta.size) - 1
@@ -724,6 +840,29 @@ END(asm_mov_from_reg)
724 * r31: pr 840 * r31: pr
725 * r24: b0 841 * r24: b0
726 */ 842 */
843ENTRY(kvm_resume_to_guest_with_sync)
844 adds r19=VMM_VPD_BASE_OFFSET,r21
845 mov r16 = r31
846 mov r17 = r24
847 ;;
848{.mii
849 ld8 r25 =[r19]
850 nop 0x0
851 mov r24 = ip
852 ;;
853}
854{.mmb
855 add r24 =0x20, r24
856 nop 0x0
857 br.sptk.many kvm_vps_sync_write
858}
859
860 mov r31 = r16
861 mov r24 =r17
862 ;;
863 br.sptk.many kvm_resume_to_guest
864END(kvm_resume_to_guest_with_sync)
865
727ENTRY(kvm_resume_to_guest) 866ENTRY(kvm_resume_to_guest)
728 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 867 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
729 ;; 868 ;;
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 5a33f7ed29a0..3417783ae164 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -962,9 +962,9 @@ static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
962void vmm_transition(struct kvm_vcpu *vcpu) 962void vmm_transition(struct kvm_vcpu *vcpu)
963{ 963{
964 ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, 964 ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
965 0, 0, 0, 0, 0, 0); 965 1, 0, 0, 0, 0, 0);
966 vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); 966 vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
967 ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, 967 ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
968 0, 0, 0, 0, 0, 0); 968 1, 0, 0, 0, 0, 0);
969 kvm_do_resume_op(vcpu); 969 kvm_do_resume_op(vcpu);
970} 970}
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index b0fcfb62c49e..341e3fee280c 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -313,21 +313,21 @@ static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
313 trp->rid = rid; 313 trp->rid = rid;
314} 314}
315 315
316extern u64 kvm_lookup_mpa(u64 gpfn); 316extern u64 kvm_get_mpt_entry(u64 gpfn);
317extern u64 kvm_gpa_to_mpa(u64 gpa);
318
319/* Return I/O type if trye */
320#define __gpfn_is_io(gpfn) \
321 ({ \
322 u64 pte, ret = 0; \
323 pte = kvm_lookup_mpa(gpfn); \
324 if (!(pte & GPFN_INV_MASK)) \
325 ret = pte & GPFN_IO_MASK; \
326 ret; \
327 })
328 317
318/* Return I/ */
319static inline u64 __gpfn_is_io(u64 gpfn)
320{
321 u64 pte;
322 pte = kvm_get_mpt_entry(gpfn);
323 if (!(pte & GPFN_INV_MASK)) {
324 pte = pte & GPFN_IO_MASK;
325 if (pte != GPFN_PHYS_MMIO)
326 return pte;
327 }
328 return 0;
329}
329#endif 330#endif
330
331#define IA64_NO_FAULT 0 331#define IA64_NO_FAULT 0
332#define IA64_FAULT 1 332#define IA64_FAULT 1
333 333
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
index 3ee5f481c06d..c1d7251a1480 100644
--- a/arch/ia64/kvm/vmm_ivt.S
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -1261,11 +1261,6 @@ kvm_rse_clear_invalid:
1261 adds r19=VMM_VPD_VPSR_OFFSET,r18 1261 adds r19=VMM_VPD_VPSR_OFFSET,r18
1262 ;; 1262 ;;
1263 ld8 r19=[r19] //vpsr 1263 ld8 r19=[r19] //vpsr
1264 adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21
1265 ;;
1266 ld8 r20=[r20]
1267 ;;
1268//vsa_sync_write_start
1269 mov r25=r18 1264 mov r25=r18
1270 adds r16= VMM_VCPU_GP_OFFSET,r21 1265 adds r16= VMM_VCPU_GP_OFFSET,r21
1271 ;; 1266 ;;
@@ -1274,10 +1269,7 @@ kvm_rse_clear_invalid:
1274 ;; 1269 ;;
1275 add r24=r24,r16 1270 add r24=r24,r16
1276 ;; 1271 ;;
1277 add r16=PAL_VPS_SYNC_WRITE,r20 1272 br.sptk.many kvm_vps_sync_write // call the service
1278 ;;
1279 mov b0=r16
1280 br.cond.sptk b0 // call the service
1281 ;; 1273 ;;
1282END(ia64_leave_hypervisor) 1274END(ia64_leave_hypervisor)
1283// fall through 1275// fall through
@@ -1288,28 +1280,15 @@ GLOBAL_ENTRY(ia64_vmm_entry)
1288 * r17:cr.isr 1280 * r17:cr.isr
1289 * r18:vpd 1281 * r18:vpd
1290 * r19:vpsr 1282 * r19:vpsr
1291 * r20:__vsa_base
1292 * r22:b0 1283 * r22:b0
1293 * r23:predicate 1284 * r23:predicate
1294 */ 1285 */
1295 mov r24=r22 1286 mov r24=r22
1296 mov r25=r18 1287 mov r25=r18
1297 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic 1288 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
1289 (p1) br.cond.sptk.few kvm_vps_resume_normal
1290 (p2) br.cond.sptk.many kvm_vps_resume_handler
1298 ;; 1291 ;;
1299 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
1300 (p1) br.sptk.many ia64_vmm_entry_out
1301 ;;
1302 tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir
1303 ;;
1304 (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
1305 (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
1306 (p2) ld8 r26=[r25]
1307 ;;
1308ia64_vmm_entry_out:
1309 mov pr=r23,-2
1310 mov b0=r29
1311 ;;
1312 br.cond.sptk b0 // call pal service
1313END(ia64_vmm_entry) 1292END(ia64_vmm_entry)
1314 1293
1315 1294
@@ -1376,6 +1355,9 @@ GLOBAL_ENTRY(vmm_reset_entry)
1376 //set up ipsr, iip, vpd.vpsr, dcr 1355 //set up ipsr, iip, vpd.vpsr, dcr
1377 // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 1356 // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
1378 // For DCR: all bits 0 1357 // For DCR: all bits 0
1358 bsw.0
1359 ;;
1360 mov r21 =r13
1379 adds r14=-VMM_PT_REGS_SIZE, r12 1361 adds r14=-VMM_PT_REGS_SIZE, r12
1380 ;; 1362 ;;
1381 movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 1363 movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
@@ -1387,12 +1369,6 @@ GLOBAL_ENTRY(vmm_reset_entry)
1387 ;; 1369 ;;
1388 srlz.i 1370 srlz.i
1389 ;; 1371 ;;
1390 bsw.0
1391 ;;
1392 mov r21 =r13
1393 ;;
1394 bsw.1
1395 ;;
1396 mov ar.rsc = 0 1372 mov ar.rsc = 0
1397 ;; 1373 ;;
1398 flushrs 1374 flushrs
@@ -1406,12 +1382,9 @@ GLOBAL_ENTRY(vmm_reset_entry)
1406 ld8 r1 = [r20] 1382 ld8 r1 = [r20]
1407 ;; 1383 ;;
1408 mov cr.iip=r4 1384 mov cr.iip=r4
1409 ;;
1410 adds r16=VMM_VPD_BASE_OFFSET,r13 1385 adds r16=VMM_VPD_BASE_OFFSET,r13
1411 adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13
1412 ;; 1386 ;;
1413 ld8 r18=[r16] 1387 ld8 r18=[r16]
1414 ld8 r20=[r20]
1415 ;; 1388 ;;
1416 adds r19=VMM_VPD_VPSR_OFFSET,r18 1389 adds r19=VMM_VPD_VPSR_OFFSET,r18
1417 ;; 1390 ;;
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index def4576d22b1..e22b93361e08 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -390,7 +390,7 @@ void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
390 390
391u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) 391u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
392{ 392{
393 u64 ps, ps_mask, paddr, maddr; 393 u64 ps, ps_mask, paddr, maddr, io_mask;
394 union pte_flags phy_pte; 394 union pte_flags phy_pte;
395 395
396 ps = itir_ps(itir); 396 ps = itir_ps(itir);
@@ -398,8 +398,9 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
398 phy_pte.val = *pte; 398 phy_pte.val = *pte;
399 paddr = *pte; 399 paddr = *pte;
400 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); 400 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
401 maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT); 401 maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
402 if (maddr & GPFN_IO_MASK) { 402 io_mask = maddr & GPFN_IO_MASK;
403 if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
403 *pte |= VTLB_PTE_IO; 404 *pte |= VTLB_PTE_IO;
404 return -1; 405 return -1;
405 } 406 }
@@ -418,7 +419,7 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
418 u64 ifa, int type) 419 u64 ifa, int type)
419{ 420{
420 u64 ps; 421 u64 ps;
421 u64 phy_pte; 422 u64 phy_pte, io_mask, index;
422 union ia64_rr vrr, mrr; 423 union ia64_rr vrr, mrr;
423 int ret = 0; 424 int ret = 0;
424 425
@@ -426,13 +427,16 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
426 vrr.val = vcpu_get_rr(v, ifa); 427 vrr.val = vcpu_get_rr(v, ifa);
427 mrr.val = ia64_get_rr(ifa); 428 mrr.val = ia64_get_rr(ifa);
428 429
430 index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
431 io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
429 phy_pte = translate_phy_pte(&pte, itir, ifa); 432 phy_pte = translate_phy_pte(&pte, itir, ifa);
430 433
431 /* Ensure WB attribute if pte is related to a normal mem page, 434 /* Ensure WB attribute if pte is related to a normal mem page,
432 * which is required by vga acceleration since qemu maps shared 435 * which is required by vga acceleration since qemu maps shared
433 * vram buffer with WB. 436 * vram buffer with WB.
434 */ 437 */
435 if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) { 438 if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
439 io_mask != GPFN_PHYS_MMIO) {
436 pte &= ~_PAGE_MA_MASK; 440 pte &= ~_PAGE_MA_MASK;
437 phy_pte &= ~_PAGE_MA_MASK; 441 phy_pte &= ~_PAGE_MA_MASK;
438 } 442 }
@@ -566,12 +570,19 @@ void thash_init(struct thash_cb *hcb, u64 sz)
566 } 570 }
567} 571}
568 572
569u64 kvm_lookup_mpa(u64 gpfn) 573u64 kvm_get_mpt_entry(u64 gpfn)
570{ 574{
571 u64 *base = (u64 *) KVM_P2M_BASE; 575 u64 *base = (u64 *) KVM_P2M_BASE;
572 return *(base + gpfn); 576 return *(base + gpfn);
573} 577}
574 578
579u64 kvm_lookup_mpa(u64 gpfn)
580{
581 u64 maddr;
582 maddr = kvm_get_mpt_entry(gpfn);
583 return maddr&_PAGE_PPN_MASK;
584}
585
575u64 kvm_gpa_to_mpa(u64 gpa) 586u64 kvm_gpa_to_mpa(u64 gpa)
576{ 587{
577 u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT); 588 u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 200100ea7610..054bcd9439aa 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -21,7 +21,6 @@
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/kexec.h> 22#include <linux/kexec.h>
23 23
24#include <asm/a.out.h>
25#include <asm/dma.h> 24#include <asm/dma.h>
26#include <asm/ia32.h> 25#include <asm/ia32.h>
27#include <asm/io.h> 26#include <asm/io.h>
@@ -701,23 +700,6 @@ int arch_add_memory(int nid, u64 start, u64 size)
701 700
702 return ret; 701 return ret;
703} 702}
704#ifdef CONFIG_MEMORY_HOTREMOVE
705int remove_memory(u64 start, u64 size)
706{
707 unsigned long start_pfn, end_pfn;
708 unsigned long timeout = 120 * HZ;
709 int ret;
710 start_pfn = start >> PAGE_SHIFT;
711 end_pfn = start_pfn + (size >> PAGE_SHIFT);
712 ret = offline_pages(start_pfn, end_pfn, timeout);
713 if (ret)
714 goto out;
715 /* we can free mem_map at this point */
716out:
717 return ret;
718}
719EXPORT_SYMBOL_GPL(remove_memory);
720#endif /* CONFIG_MEMORY_HOTREMOVE */
721#endif 703#endif
722 704
723/* 705/*
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 7545037a8625..211fcfd115f9 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -614,12 +614,17 @@ char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
614 * vector to get the base address. 614 * vector to get the base address.
615 */ 615 */
616int 616int
617pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma) 617pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
618 enum pci_mmap_state mmap_state)
618{ 619{
619 unsigned long size = vma->vm_end - vma->vm_start; 620 unsigned long size = vma->vm_end - vma->vm_start;
620 pgprot_t prot; 621 pgprot_t prot;
621 char *addr; 622 char *addr;
622 623
624 /* We only support mmap'ing of legacy memory space */
625 if (mmap_state != pci_mmap_mem)
626 return -ENOSYS;
627
623 /* 628 /*
624 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt 629 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
625 * for more details. 630 * for more details.