aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/xen/interface.h1
-rw-r--r--arch/arm/xen/enlighten.c124
-rw-r--r--arch/x86/include/asm/xen/interface.h1
-rw-r--r--arch/x86/xen/Kconfig1
-rw-r--r--arch/x86/xen/enlighten.c109
-rw-r--r--arch/x86/xen/mmu.c17
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--drivers/xen/Kconfig3
-rw-r--r--drivers/xen/Makefile7
-rw-r--r--drivers/xen/balloon.c5
-rw-r--r--drivers/xen/cpu_hotplug.c4
-rw-r--r--drivers/xen/grant-table.c48
-rw-r--r--drivers/xen/privcmd.c76
-rw-r--r--drivers/xen/xen-acpi-pad.c182
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c118
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c1
-rw-r--r--include/xen/interface/event_channel.h13
-rw-r--r--include/xen/interface/memory.h44
-rw-r--r--include/xen/interface/platform.h17
-rw-r--r--include/xen/xen-ops.h9
23 files changed, 641 insertions, 147 deletions
diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h
index 5000397134b4..1151188bcd83 100644
--- a/arch/arm/include/asm/xen/interface.h
+++ b/arch/arm/include/asm/xen/interface.h
@@ -49,6 +49,7 @@ DEFINE_GUEST_HANDLE(void);
49DEFINE_GUEST_HANDLE(uint64_t); 49DEFINE_GUEST_HANDLE(uint64_t);
50DEFINE_GUEST_HANDLE(uint32_t); 50DEFINE_GUEST_HANDLE(uint32_t);
51DEFINE_GUEST_HANDLE(xen_pfn_t); 51DEFINE_GUEST_HANDLE(xen_pfn_t);
52DEFINE_GUEST_HANDLE(xen_ulong_t);
52 53
53/* Maximum number of virtual CPUs in multi-processor guests. */ 54/* Maximum number of virtual CPUs in multi-processor guests. */
54#define MAX_VIRT_CPUS 1 55#define MAX_VIRT_CPUS 1
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f57609275449..41a6a27128a2 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -8,6 +8,8 @@
8#include <xen/features.h> 8#include <xen/features.h>
9#include <xen/platform_pci.h> 9#include <xen/platform_pci.h>
10#include <xen/xenbus.h> 10#include <xen/xenbus.h>
11#include <xen/page.h>
12#include <xen/xen-ops.h>
11#include <asm/xen/hypervisor.h> 13#include <asm/xen/hypervisor.h>
12#include <asm/xen/hypercall.h> 14#include <asm/xen/hypercall.h>
13#include <linux/interrupt.h> 15#include <linux/interrupt.h>
@@ -17,6 +19,8 @@
17#include <linux/of_irq.h> 19#include <linux/of_irq.h>
18#include <linux/of_address.h> 20#include <linux/of_address.h>
19 21
22#include <linux/mm.h>
23
20struct start_info _xen_start_info; 24struct start_info _xen_start_info;
21struct start_info *xen_start_info = &_xen_start_info; 25struct start_info *xen_start_info = &_xen_start_info;
22EXPORT_SYMBOL_GPL(xen_start_info); 26EXPORT_SYMBOL_GPL(xen_start_info);
@@ -29,6 +33,10 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
29 33
30DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 34DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
31 35
36/* These are unused until we support booting "pre-ballooned" */
37unsigned long xen_released_pages;
38struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
39
32/* TODO: to be removed */ 40/* TODO: to be removed */
33__read_mostly int xen_have_vector_callback; 41__read_mostly int xen_have_vector_callback;
34EXPORT_SYMBOL_GPL(xen_have_vector_callback); 42EXPORT_SYMBOL_GPL(xen_have_vector_callback);
@@ -38,15 +46,106 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
38 46
39static __read_mostly int xen_events_irq = -1; 47static __read_mostly int xen_events_irq = -1;
40 48
49/* map fgmfn of domid to lpfn in the current domain */
50static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
51 unsigned int domid)
52{
53 int rc;
54 struct xen_add_to_physmap_range xatp = {
55 .domid = DOMID_SELF,
56 .foreign_domid = domid,
57 .size = 1,
58 .space = XENMAPSPACE_gmfn_foreign,
59 };
60 xen_ulong_t idx = fgmfn;
61 xen_pfn_t gpfn = lpfn;
62
63 set_xen_guest_handle(xatp.idxs, &idx);
64 set_xen_guest_handle(xatp.gpfns, &gpfn);
65
66 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
67 if (rc) {
68 pr_warn("Failed to map pfn to mfn rc:%d pfn:%lx mfn:%lx\n",
69 rc, lpfn, fgmfn);
70 return 1;
71 }
72 return 0;
73}
74
75struct remap_data {
76 xen_pfn_t fgmfn; /* foreign domain's gmfn */
77 pgprot_t prot;
78 domid_t domid;
79 struct vm_area_struct *vma;
80 int index;
81 struct page **pages;
82 struct xen_remap_mfn_info *info;
83};
84
85static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
86 void *data)
87{
88 struct remap_data *info = data;
89 struct page *page = info->pages[info->index++];
90 unsigned long pfn = page_to_pfn(page);
91 pte_t pte = pfn_pte(pfn, info->prot);
92
93 if (map_foreign_page(pfn, info->fgmfn, info->domid))
94 return -EFAULT;
95 set_pte_at(info->vma->vm_mm, addr, ptep, pte);
96
97 return 0;
98}
99
41int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 100int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
42 unsigned long addr, 101 unsigned long addr,
43 unsigned long mfn, int nr, 102 xen_pfn_t mfn, int nr,
44 pgprot_t prot, unsigned domid) 103 pgprot_t prot, unsigned domid,
104 struct page **pages)
45{ 105{
46 return -ENOSYS; 106 int err;
107 struct remap_data data;
108
109 /* TBD: Batching, current sole caller only does page at a time */
110 if (nr > 1)
111 return -EINVAL;
112
113 data.fgmfn = mfn;
114 data.prot = prot;
115 data.domid = domid;
116 data.vma = vma;
117 data.index = 0;
118 data.pages = pages;
119 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
120 remap_pte_fn, &data);
121 return err;
47} 122}
48EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 123EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
49 124
125int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
126 int nr, struct page **pages)
127{
128 int i;
129
130 for (i = 0; i < nr; i++) {
131 struct xen_remove_from_physmap xrp;
132 unsigned long rc, pfn;
133
134 pfn = page_to_pfn(pages[i]);
135
136 xrp.domid = DOMID_SELF;
137 xrp.gpfn = pfn;
138 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
139 if (rc) {
140 pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
141 pfn, rc);
142 return rc;
143 }
144 }
145 return 0;
146}
147EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
148
50/* 149/*
51 * see Documentation/devicetree/bindings/arm/xen.txt for the 150 * see Documentation/devicetree/bindings/arm/xen.txt for the
52 * documentation of the Xen Device Tree format. 151 * documentation of the Xen Device Tree format.
@@ -149,23 +248,6 @@ static int __init xen_init_events(void)
149} 248}
150postcore_initcall(xen_init_events); 249postcore_initcall(xen_init_events);
151 250
152/* XXX: only until balloon is properly working */
153int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
154{
155 *pages = alloc_pages(highmem ? GFP_HIGHUSER : GFP_KERNEL,
156 get_order(nr_pages));
157 if (*pages == NULL)
158 return -ENOMEM;
159 return 0;
160}
161EXPORT_SYMBOL_GPL(alloc_xenballooned_pages);
162
163void free_xenballooned_pages(int nr_pages, struct page **pages)
164{
165 kfree(*pages);
166 *pages = NULL;
167}
168EXPORT_SYMBOL_GPL(free_xenballooned_pages);
169 251
170/* In the hypervisor.S file. */ 252/* In the hypervisor.S file. */
171EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); 253EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
@@ -176,4 +258,4 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
176EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); 258EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
177EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); 259EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
178EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); 260EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
179EXPORT_SYMBOL_GPL(privcmd_call); 261EXPORT_SYMBOL_GPL(privcmd_call); \ No newline at end of file
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 54d52ff1304a..fd9cb7695b5f 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -63,6 +63,7 @@ DEFINE_GUEST_HANDLE(void);
63DEFINE_GUEST_HANDLE(uint64_t); 63DEFINE_GUEST_HANDLE(uint64_t);
64DEFINE_GUEST_HANDLE(uint32_t); 64DEFINE_GUEST_HANDLE(uint32_t);
65DEFINE_GUEST_HANDLE(xen_pfn_t); 65DEFINE_GUEST_HANDLE(xen_pfn_t);
66DEFINE_GUEST_HANDLE(xen_ulong_t);
66#endif 67#endif
67 68
68#ifndef HYPERVISOR_VIRT_START 69#ifndef HYPERVISOR_VIRT_START
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index fdce49c7aff6..c31ee77e1ec1 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -6,6 +6,7 @@ config XEN
6 bool "Xen guest support" 6 bool "Xen guest support"
7 select PARAVIRT 7 select PARAVIRT
8 select PARAVIRT_CLOCK 8 select PARAVIRT_CLOCK
9 select XEN_HAVE_PVMMU
9 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS) 10 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
10 depends on X86_CMPXCHG && X86_TSC 11 depends on X86_CMPXCHG && X86_TSC
11 help 12 help
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 586d83812b67..138e5667409a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -193,10 +193,11 @@ void xen_vcpu_restore(void)
193{ 193{
194 int cpu; 194 int cpu;
195 195
196 for_each_online_cpu(cpu) { 196 for_each_possible_cpu(cpu) {
197 bool other_cpu = (cpu != smp_processor_id()); 197 bool other_cpu = (cpu != smp_processor_id());
198 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
198 199
199 if (other_cpu && 200 if (other_cpu && is_up &&
200 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) 201 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
201 BUG(); 202 BUG();
202 203
@@ -205,7 +206,7 @@ void xen_vcpu_restore(void)
205 if (have_vcpu_info_placement) 206 if (have_vcpu_info_placement)
206 xen_vcpu_setup(cpu); 207 xen_vcpu_setup(cpu);
207 208
208 if (other_cpu && 209 if (other_cpu && is_up &&
209 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) 210 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
210 BUG(); 211 BUG();
211 } 212 }
@@ -223,6 +224,21 @@ static void __init xen_banner(void)
223 version >> 16, version & 0xffff, extra.extraversion, 224 version >> 16, version & 0xffff, extra.extraversion,
224 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 225 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
225} 226}
227/* Check if running on Xen version (major, minor) or later */
228bool
229xen_running_on_version_or_later(unsigned int major, unsigned int minor)
230{
231 unsigned int version;
232
233 if (!xen_domain())
234 return false;
235
236 version = HYPERVISOR_xen_version(XENVER_version, NULL);
237 if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
238 ((version >> 16) > major))
239 return true;
240 return false;
241}
226 242
227#define CPUID_THERM_POWER_LEAF 6 243#define CPUID_THERM_POWER_LEAF 6
228#define APERFMPERF_PRESENT 0 244#define APERFMPERF_PRESENT 0
@@ -287,8 +303,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
287 303
288static bool __init xen_check_mwait(void) 304static bool __init xen_check_mwait(void)
289{ 305{
290#if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \ 306#ifdef CONFIG_ACPI
291 !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
292 struct xen_platform_op op = { 307 struct xen_platform_op op = {
293 .cmd = XENPF_set_processor_pminfo, 308 .cmd = XENPF_set_processor_pminfo,
294 .u.set_pminfo.id = -1, 309 .u.set_pminfo.id = -1,
@@ -309,6 +324,13 @@ static bool __init xen_check_mwait(void)
309 if (!xen_initial_domain()) 324 if (!xen_initial_domain())
310 return false; 325 return false;
311 326
327 /*
328 * When running under platform earlier than Xen4.2, do not expose
329 * mwait, to avoid the risk of loading native acpi pad driver
330 */
331 if (!xen_running_on_version_or_later(4, 2))
332 return false;
333
312 ax = 1; 334 ax = 1;
313 cx = 0; 335 cx = 0;
314 336
@@ -1495,51 +1517,72 @@ asmlinkage void __init xen_start_kernel(void)
1495#endif 1517#endif
1496} 1518}
1497 1519
1498void __ref xen_hvm_init_shared_info(void) 1520#ifdef CONFIG_XEN_PVHVM
1521#define HVM_SHARED_INFO_ADDR 0xFE700000UL
1522static struct shared_info *xen_hvm_shared_info;
1523static unsigned long xen_hvm_sip_phys;
1524static int xen_major, xen_minor;
1525
1526static void xen_hvm_connect_shared_info(unsigned long pfn)
1499{ 1527{
1500 int cpu;
1501 struct xen_add_to_physmap xatp; 1528 struct xen_add_to_physmap xatp;
1502 static struct shared_info *shared_info_page = 0;
1503 1529
1504 if (!shared_info_page)
1505 shared_info_page = (struct shared_info *)
1506 extend_brk(PAGE_SIZE, PAGE_SIZE);
1507 xatp.domid = DOMID_SELF; 1530 xatp.domid = DOMID_SELF;
1508 xatp.idx = 0; 1531 xatp.idx = 0;
1509 xatp.space = XENMAPSPACE_shared_info; 1532 xatp.space = XENMAPSPACE_shared_info;
1510 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; 1533 xatp.gpfn = pfn;
1511 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 1534 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1512 BUG(); 1535 BUG();
1513 1536
1514 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; 1537}
1538static void __init xen_hvm_set_shared_info(struct shared_info *sip)
1539{
1540 int cpu;
1541
1542 HYPERVISOR_shared_info = sip;
1515 1543
1516 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 1544 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
1517 * page, we use it in the event channel upcall and in some pvclock 1545 * page, we use it in the event channel upcall and in some pvclock
1518 * related functions. We don't need the vcpu_info placement 1546 * related functions. We don't need the vcpu_info placement
1519 * optimizations because we don't use any pv_mmu or pv_irq op on 1547 * optimizations because we don't use any pv_mmu or pv_irq op on
1520 * HVM. 1548 * HVM. */
1521 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is 1549 for_each_online_cpu(cpu)
1522 * online but xen_hvm_init_shared_info is run at resume time too and
1523 * in that case multiple vcpus might be online. */
1524 for_each_online_cpu(cpu) {
1525 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1550 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1551}
1552
1553/* Reconnect the shared_info pfn to a (new) mfn */
1554void xen_hvm_resume_shared_info(void)
1555{
1556 xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
1557}
1558
1559/* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage.
1560 * On these old tools the shared info page will be placed in E820_Ram.
1561 * Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects
1562 * that nothing is mapped up to HVM_SHARED_INFO_ADDR.
1563 * Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used
1564 * here for the shared info page. */
1565static void __init xen_hvm_init_shared_info(void)
1566{
1567 if (xen_major < 4) {
1568 xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
1569 xen_hvm_sip_phys = __pa(xen_hvm_shared_info);
1570 } else {
1571 xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR;
1572 set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys);
1573 xen_hvm_shared_info =
1574 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1526 } 1575 }
1576 xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
1577 xen_hvm_set_shared_info(xen_hvm_shared_info);
1527} 1578}
1528 1579
1529#ifdef CONFIG_XEN_PVHVM
1530static void __init init_hvm_pv_info(void) 1580static void __init init_hvm_pv_info(void)
1531{ 1581{
1532 int major, minor; 1582 uint32_t ecx, edx, pages, msr, base;
1533 uint32_t eax, ebx, ecx, edx, pages, msr, base;
1534 u64 pfn; 1583 u64 pfn;
1535 1584
1536 base = xen_cpuid_base(); 1585 base = xen_cpuid_base();
1537 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1538
1539 major = eax >> 16;
1540 minor = eax & 0xffff;
1541 printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1542
1543 cpuid(base + 2, &pages, &msr, &ecx, &edx); 1586 cpuid(base + 2, &pages, &msr, &ecx, &edx);
1544 1587
1545 pfn = __pa(hypercall_page); 1588 pfn = __pa(hypercall_page);
@@ -1590,12 +1633,22 @@ static void __init xen_hvm_guest_init(void)
1590 1633
1591static bool __init xen_hvm_platform(void) 1634static bool __init xen_hvm_platform(void)
1592{ 1635{
1636 uint32_t eax, ebx, ecx, edx, base;
1637
1593 if (xen_pv_domain()) 1638 if (xen_pv_domain())
1594 return false; 1639 return false;
1595 1640
1596 if (!xen_cpuid_base()) 1641 base = xen_cpuid_base();
1642 if (!base)
1597 return false; 1643 return false;
1598 1644
1645 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1646
1647 xen_major = eax >> 16;
1648 xen_minor = eax & 0xffff;
1649
1650 printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor);
1651
1599 return true; 1652 return true;
1600} 1653}
1601 1654
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dcf5f2dd91ec..01de35c77221 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2497,8 +2497,10 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2497 2497
2498int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2498int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2499 unsigned long addr, 2499 unsigned long addr,
2500 unsigned long mfn, int nr, 2500 xen_pfn_t mfn, int nr,
2501 pgprot_t prot, unsigned domid) 2501 pgprot_t prot, unsigned domid,
2502 struct page **pages)
2503
2502{ 2504{
2503 struct remap_data rmd; 2505 struct remap_data rmd;
2504 struct mmu_update mmu_update[REMAP_BATCH_SIZE]; 2506 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
@@ -2542,3 +2544,14 @@ out:
2542 return err; 2544 return err;
2543} 2545}
2544EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2546EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2547
2548/* Returns: 0 success */
2549int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2550 int numpgs, struct page **pages)
2551{
2552 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2553 return 0;
2554
2555 return -EINVAL;
2556}
2557EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 353c50f18702..4f7d2599b484 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -254,7 +254,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
254 } 254 }
255 xen_init_lock_cpu(0); 255 xen_init_lock_cpu(0);
256 256
257 smp_store_cpu_info(0); 257 smp_store_boot_cpu_info();
258 cpu_data(0).x86_max_cores = 1; 258 cpu_data(0).x86_max_cores = 1;
259 259
260 for_each_possible_cpu(i) { 260 for_each_possible_cpu(i) {
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 45329c8c226e..ae8a00c39de4 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
30{ 30{
31#ifdef CONFIG_XEN_PVHVM 31#ifdef CONFIG_XEN_PVHVM
32 int cpu; 32 int cpu;
33 xen_hvm_init_shared_info(); 33 xen_hvm_resume_shared_info();
34 xen_callback_vector(); 34 xen_callback_vector();
35 xen_unplug_emulated_devices(); 35 xen_unplug_emulated_devices();
36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) { 36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index a95b41744ad0..d2e73d19d366 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -40,7 +40,7 @@ void xen_enable_syscall(void);
40void xen_vcpu_restore(void); 40void xen_vcpu_restore(void);
41 41
42void xen_callback_vector(void); 42void xen_callback_vector(void);
43void xen_hvm_init_shared_info(void); 43void xen_hvm_resume_shared_info(void);
44void xen_unplug_emulated_devices(void); 44void xen_unplug_emulated_devices(void);
45 45
46void __init xen_build_dynamic_phys_to_machine(void); 46void __init xen_build_dynamic_phys_to_machine(void);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 126d8ce591ce..cabfa97f4674 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -206,4 +206,7 @@ config XEN_MCE_LOG
206 Allow kernel fetching MCE error from Xen platform and 206 Allow kernel fetching MCE error from Xen platform and
207 converting it into Linux mcelog format for mcelog tools 207 converting it into Linux mcelog format for mcelog tools
208 208
209config XEN_HAVE_PVMMU
210 bool
211
209endmenu 212endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 74354708c6c4..fb213cf81a7b 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,9 +1,9 @@
1ifneq ($(CONFIG_ARM),y) 1ifneq ($(CONFIG_ARM),y)
2obj-y += manage.o balloon.o 2obj-y += manage.o
3obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 3obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
4endif 4endif
5obj-$(CONFIG_X86) += fallback.o 5obj-$(CONFIG_X86) += fallback.o
6obj-y += grant-table.o features.o events.o 6obj-y += grant-table.o features.o events.o balloon.o
7obj-y += xenbus/ 7obj-y += xenbus/
8 8
9nostackp := $(call cc-option, -fno-stack-protector) 9nostackp := $(call cc-option, -fno-stack-protector)
@@ -11,7 +11,8 @@ CFLAGS_features.o := $(nostackp)
11 11
12dom0-$(CONFIG_PCI) += pci.o 12dom0-$(CONFIG_PCI) += pci.o
13dom0-$(CONFIG_USB_SUPPORT) += dbgp.o 13dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
14dom0-$(CONFIG_ACPI) += acpi.o 14dom0-$(CONFIG_ACPI) += acpi.o $(xen-pad-y)
15xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
15dom0-$(CONFIG_X86) += pcpu.o 16dom0-$(CONFIG_X86) += pcpu.o
16obj-$(CONFIG_XEN_DOM0) += $(dom0-y) 17obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
17obj-$(CONFIG_BLOCK) += biomerge.o 18obj-$(CONFIG_BLOCK) += biomerge.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index d6886d90ccfd..a56776dbe095 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -359,6 +359,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
359 359
360 set_phys_to_machine(pfn, frame_list[i]); 360 set_phys_to_machine(pfn, frame_list[i]);
361 361
362#ifdef CONFIG_XEN_HAVE_PVMMU
362 /* Link back into the page tables if not highmem. */ 363 /* Link back into the page tables if not highmem. */
363 if (xen_pv_domain() && !PageHighMem(page)) { 364 if (xen_pv_domain() && !PageHighMem(page)) {
364 int ret; 365 int ret;
@@ -368,6 +369,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
368 0); 369 0);
369 BUG_ON(ret); 370 BUG_ON(ret);
370 } 371 }
372#endif
371 373
372 /* Relinquish the page back to the allocator. */ 374 /* Relinquish the page back to the allocator. */
373 ClearPageReserved(page); 375 ClearPageReserved(page);
@@ -416,13 +418,14 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
416 418
417 scrub_page(page); 419 scrub_page(page);
418 420
421#ifdef CONFIG_XEN_HAVE_PVMMU
419 if (xen_pv_domain() && !PageHighMem(page)) { 422 if (xen_pv_domain() && !PageHighMem(page)) {
420 ret = HYPERVISOR_update_va_mapping( 423 ret = HYPERVISOR_update_va_mapping(
421 (unsigned long)__va(pfn << PAGE_SHIFT), 424 (unsigned long)__va(pfn << PAGE_SHIFT),
422 __pte_ma(0), 0); 425 __pte_ma(0), 0);
423 BUG_ON(ret); 426 BUG_ON(ret);
424 } 427 }
425 428#endif
426 } 429 }
427 430
428 /* Ensure that ballooned highmem pages don't have kmaps. */ 431 /* Ensure that ballooned highmem pages don't have kmaps. */
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 4dcfced107f5..084041d42c9a 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -25,10 +25,10 @@ static void disable_hotplug_cpu(int cpu)
25static int vcpu_online(unsigned int cpu) 25static int vcpu_online(unsigned int cpu)
26{ 26{
27 int err; 27 int err;
28 char dir[32], state[32]; 28 char dir[16], state[16];
29 29
30 sprintf(dir, "cpu/%u", cpu); 30 sprintf(dir, "cpu/%u", cpu);
31 err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); 31 err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state);
32 if (err != 1) { 32 if (err != 1) {
33 if (!xen_initial_domain()) 33 if (!xen_initial_domain())
34 printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); 34 printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index b91f14e83164..95ce9d02ceca 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -56,10 +56,6 @@
56/* External tools reserve first few grant table entries. */ 56/* External tools reserve first few grant table entries. */
57#define NR_RESERVED_ENTRIES 8 57#define NR_RESERVED_ENTRIES 8
58#define GNTTAB_LIST_END 0xffffffff 58#define GNTTAB_LIST_END 0xffffffff
59#define GREFS_PER_GRANT_FRAME \
60(grant_table_version == 1 ? \
61(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \
62(PAGE_SIZE / sizeof(union grant_entry_v2)))
63 59
64static grant_ref_t **gnttab_list; 60static grant_ref_t **gnttab_list;
65static unsigned int nr_grant_frames; 61static unsigned int nr_grant_frames;
@@ -154,6 +150,7 @@ static struct gnttab_ops *gnttab_interface;
154static grant_status_t *grstatus; 150static grant_status_t *grstatus;
155 151
156static int grant_table_version; 152static int grant_table_version;
153static int grefs_per_grant_frame;
157 154
158static struct gnttab_free_callback *gnttab_free_callback_list; 155static struct gnttab_free_callback *gnttab_free_callback_list;
159 156
@@ -767,12 +764,14 @@ static int grow_gnttab_list(unsigned int more_frames)
767 unsigned int new_nr_grant_frames, extra_entries, i; 764 unsigned int new_nr_grant_frames, extra_entries, i;
768 unsigned int nr_glist_frames, new_nr_glist_frames; 765 unsigned int nr_glist_frames, new_nr_glist_frames;
769 766
767 BUG_ON(grefs_per_grant_frame == 0);
768
770 new_nr_grant_frames = nr_grant_frames + more_frames; 769 new_nr_grant_frames = nr_grant_frames + more_frames;
771 extra_entries = more_frames * GREFS_PER_GRANT_FRAME; 770 extra_entries = more_frames * grefs_per_grant_frame;
772 771
773 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 772 nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
774 new_nr_glist_frames = 773 new_nr_glist_frames =
775 (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 774 (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
776 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { 775 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
777 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); 776 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
778 if (!gnttab_list[i]) 777 if (!gnttab_list[i])
@@ -780,12 +779,12 @@ static int grow_gnttab_list(unsigned int more_frames)
780 } 779 }
781 780
782 781
783 for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; 782 for (i = grefs_per_grant_frame * nr_grant_frames;
784 i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) 783 i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
785 gnttab_entry(i) = i + 1; 784 gnttab_entry(i) = i + 1;
786 785
787 gnttab_entry(i) = gnttab_free_head; 786 gnttab_entry(i) = gnttab_free_head;
788 gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; 787 gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
789 gnttab_free_count += extra_entries; 788 gnttab_free_count += extra_entries;
790 789
791 nr_grant_frames = new_nr_grant_frames; 790 nr_grant_frames = new_nr_grant_frames;
@@ -957,7 +956,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
957 956
958static unsigned nr_status_frames(unsigned nr_grant_frames) 957static unsigned nr_status_frames(unsigned nr_grant_frames)
959{ 958{
960 return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; 959 BUG_ON(grefs_per_grant_frame == 0);
960 return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
961} 961}
962 962
963static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 963static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
@@ -1115,6 +1115,7 @@ static void gnttab_request_version(void)
1115 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); 1115 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1116 if (rc == 0 && gsv.version == 2) { 1116 if (rc == 0 && gsv.version == 2) {
1117 grant_table_version = 2; 1117 grant_table_version = 2;
1118 grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
1118 gnttab_interface = &gnttab_v2_ops; 1119 gnttab_interface = &gnttab_v2_ops;
1119 } else if (grant_table_version == 2) { 1120 } else if (grant_table_version == 2) {
1120 /* 1121 /*
@@ -1127,17 +1128,17 @@ static void gnttab_request_version(void)
1127 panic("we need grant tables version 2, but only version 1 is available"); 1128 panic("we need grant tables version 2, but only version 1 is available");
1128 } else { 1129 } else {
1129 grant_table_version = 1; 1130 grant_table_version = 1;
1131 grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
1130 gnttab_interface = &gnttab_v1_ops; 1132 gnttab_interface = &gnttab_v1_ops;
1131 } 1133 }
1132 printk(KERN_INFO "Grant tables using version %d layout.\n", 1134 printk(KERN_INFO "Grant tables using version %d layout.\n",
1133 grant_table_version); 1135 grant_table_version);
1134} 1136}
1135 1137
1136int gnttab_resume(void) 1138static int gnttab_setup(void)
1137{ 1139{
1138 unsigned int max_nr_gframes; 1140 unsigned int max_nr_gframes;
1139 1141
1140 gnttab_request_version();
1141 max_nr_gframes = gnttab_max_grant_frames(); 1142 max_nr_gframes = gnttab_max_grant_frames();
1142 if (max_nr_gframes < nr_grant_frames) 1143 if (max_nr_gframes < nr_grant_frames)
1143 return -ENOSYS; 1144 return -ENOSYS;
@@ -1160,6 +1161,12 @@ int gnttab_resume(void)
1160 return 0; 1161 return 0;
1161} 1162}
1162 1163
1164int gnttab_resume(void)
1165{
1166 gnttab_request_version();
1167 return gnttab_setup();
1168}
1169
1163int gnttab_suspend(void) 1170int gnttab_suspend(void)
1164{ 1171{
1165 gnttab_interface->unmap_frames(); 1172 gnttab_interface->unmap_frames();
@@ -1171,9 +1178,10 @@ static int gnttab_expand(unsigned int req_entries)
1171 int rc; 1178 int rc;
1172 unsigned int cur, extra; 1179 unsigned int cur, extra;
1173 1180
1181 BUG_ON(grefs_per_grant_frame == 0);
1174 cur = nr_grant_frames; 1182 cur = nr_grant_frames;
1175 extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / 1183 extra = ((req_entries + (grefs_per_grant_frame-1)) /
1176 GREFS_PER_GRANT_FRAME); 1184 grefs_per_grant_frame);
1177 if (cur + extra > gnttab_max_grant_frames()) 1185 if (cur + extra > gnttab_max_grant_frames())
1178 return -ENOSPC; 1186 return -ENOSPC;
1179 1187
@@ -1191,21 +1199,23 @@ int gnttab_init(void)
1191 unsigned int nr_init_grefs; 1199 unsigned int nr_init_grefs;
1192 int ret; 1200 int ret;
1193 1201
1202 gnttab_request_version();
1194 nr_grant_frames = 1; 1203 nr_grant_frames = 1;
1195 boot_max_nr_grant_frames = __max_nr_grant_frames(); 1204 boot_max_nr_grant_frames = __max_nr_grant_frames();
1196 1205
1197 /* Determine the maximum number of frames required for the 1206 /* Determine the maximum number of frames required for the
1198 * grant reference free list on the current hypervisor. 1207 * grant reference free list on the current hypervisor.
1199 */ 1208 */
1209 BUG_ON(grefs_per_grant_frame == 0);
1200 max_nr_glist_frames = (boot_max_nr_grant_frames * 1210 max_nr_glist_frames = (boot_max_nr_grant_frames *
1201 GREFS_PER_GRANT_FRAME / RPP); 1211 grefs_per_grant_frame / RPP);
1202 1212
1203 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), 1213 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
1204 GFP_KERNEL); 1214 GFP_KERNEL);
1205 if (gnttab_list == NULL) 1215 if (gnttab_list == NULL)
1206 return -ENOMEM; 1216 return -ENOMEM;
1207 1217
1208 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 1218 nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
1209 for (i = 0; i < nr_glist_frames; i++) { 1219 for (i = 0; i < nr_glist_frames; i++) {
1210 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 1220 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1211 if (gnttab_list[i] == NULL) { 1221 if (gnttab_list[i] == NULL) {
@@ -1214,12 +1224,12 @@ int gnttab_init(void)
1214 } 1224 }
1215 } 1225 }
1216 1226
1217 if (gnttab_resume() < 0) { 1227 if (gnttab_setup() < 0) {
1218 ret = -ENODEV; 1228 ret = -ENODEV;
1219 goto ini_nomem; 1229 goto ini_nomem;
1220 } 1230 }
1221 1231
1222 nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; 1232 nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
1223 1233
1224 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) 1234 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1225 gnttab_entry(i) = i + 1; 1235 gnttab_entry(i) = i + 1;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 71f5c459b088..421375a9196a 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -33,11 +33,14 @@
33#include <xen/features.h> 33#include <xen/features.h>
34#include <xen/page.h> 34#include <xen/page.h>
35#include <xen/xen-ops.h> 35#include <xen/xen-ops.h>
36#include <xen/balloon.h>
36 37
37#include "privcmd.h" 38#include "privcmd.h"
38 39
39MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
40 41
42#define PRIV_VMA_LOCKED ((void *)1)
43
41#ifndef HAVE_ARCH_PRIVCMD_MMAP 44#ifndef HAVE_ARCH_PRIVCMD_MMAP
42static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); 45static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
43#endif 46#endif
@@ -178,7 +181,7 @@ static int mmap_mfn_range(void *data, void *state)
178 msg->va & PAGE_MASK, 181 msg->va & PAGE_MASK,
179 msg->mfn, msg->npages, 182 msg->mfn, msg->npages,
180 vma->vm_page_prot, 183 vma->vm_page_prot,
181 st->domain); 184 st->domain, NULL);
182 if (rc < 0) 185 if (rc < 0)
183 return rc; 186 return rc;
184 187
@@ -196,8 +199,9 @@ static long privcmd_ioctl_mmap(void __user *udata)
196 LIST_HEAD(pagelist); 199 LIST_HEAD(pagelist);
197 struct mmap_mfn_state state; 200 struct mmap_mfn_state state;
198 201
199 if (!xen_initial_domain()) 202 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
200 return -EPERM; 203 if (xen_feature(XENFEAT_auto_translated_physmap))
204 return -ENOSYS;
201 205
202 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) 206 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
203 return -EFAULT; 207 return -EFAULT;
@@ -246,6 +250,7 @@ struct mmap_batch_state {
246 domid_t domain; 250 domid_t domain;
247 unsigned long va; 251 unsigned long va;
248 struct vm_area_struct *vma; 252 struct vm_area_struct *vma;
253 int index;
249 /* A tristate: 254 /* A tristate:
250 * 0 for no errors 255 * 0 for no errors
251 * 1 if at least one error has happened (and no 256 * 1 if at least one error has happened (and no
@@ -260,14 +265,24 @@ struct mmap_batch_state {
260 xen_pfn_t __user *user_mfn; 265 xen_pfn_t __user *user_mfn;
261}; 266};
262 267
268/* auto translated dom0 note: if domU being created is PV, then mfn is
269 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
270 */
263static int mmap_batch_fn(void *data, void *state) 271static int mmap_batch_fn(void *data, void *state)
264{ 272{
265 xen_pfn_t *mfnp = data; 273 xen_pfn_t *mfnp = data;
266 struct mmap_batch_state *st = state; 274 struct mmap_batch_state *st = state;
275 struct vm_area_struct *vma = st->vma;
276 struct page **pages = vma->vm_private_data;
277 struct page *cur_page = NULL;
267 int ret; 278 int ret;
268 279
280 if (xen_feature(XENFEAT_auto_translated_physmap))
281 cur_page = pages[st->index++];
282
269 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, 283 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
270 st->vma->vm_page_prot, st->domain); 284 st->vma->vm_page_prot, st->domain,
285 &cur_page);
271 286
272 /* Store error code for second pass. */ 287 /* Store error code for second pass. */
273 *(st->err++) = ret; 288 *(st->err++) = ret;
@@ -303,6 +318,32 @@ static int mmap_return_errors_v1(void *data, void *state)
303 return __put_user(*mfnp, st->user_mfn++); 318 return __put_user(*mfnp, st->user_mfn++);
304} 319}
305 320
321/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
322 * the vma with the page info to use later.
323 * Returns: 0 if success, otherwise -errno
324 */
325static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
326{
327 int rc;
328 struct page **pages;
329
330 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
331 if (pages == NULL)
332 return -ENOMEM;
333
334 rc = alloc_xenballooned_pages(numpgs, pages, 0);
335 if (rc != 0) {
336 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
337 numpgs, rc);
338 kfree(pages);
339 return -ENOMEM;
340 }
341 BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
342 vma->vm_private_data = pages;
343
344 return 0;
345}
346
306static struct vm_operations_struct privcmd_vm_ops; 347static struct vm_operations_struct privcmd_vm_ops;
307 348
308static long privcmd_ioctl_mmap_batch(void __user *udata, int version) 349static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
@@ -316,9 +357,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
316 int *err_array = NULL; 357 int *err_array = NULL;
317 struct mmap_batch_state state; 358 struct mmap_batch_state state;
318 359
319 if (!xen_initial_domain())
320 return -EPERM;
321
322 switch (version) { 360 switch (version) {
323 case 1: 361 case 1:
324 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch))) 362 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
@@ -370,10 +408,18 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
370 ret = -EINVAL; 408 ret = -EINVAL;
371 goto out; 409 goto out;
372 } 410 }
411 if (xen_feature(XENFEAT_auto_translated_physmap)) {
412 ret = alloc_empty_pages(vma, m.num);
413 if (ret < 0) {
414 up_write(&mm->mmap_sem);
415 goto out;
416 }
417 }
373 418
374 state.domain = m.dom; 419 state.domain = m.dom;
375 state.vma = vma; 420 state.vma = vma;
376 state.va = m.addr; 421 state.va = m.addr;
422 state.index = 0;
377 state.global_error = 0; 423 state.global_error = 0;
378 state.err = err_array; 424 state.err = err_array;
379 425
@@ -442,6 +488,19 @@ static long privcmd_ioctl(struct file *file,
442 return ret; 488 return ret;
443} 489}
444 490
491static void privcmd_close(struct vm_area_struct *vma)
492{
493 struct page **pages = vma->vm_private_data;
494 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
495
496 if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
497 return;
498
499 xen_unmap_domain_mfn_range(vma, numpgs, pages);
500 free_xenballooned_pages(numpgs, pages);
501 kfree(pages);
502}
503
445static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 504static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
446{ 505{
447 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", 506 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
@@ -452,6 +511,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
452} 511}
453 512
454static struct vm_operations_struct privcmd_vm_ops = { 513static struct vm_operations_struct privcmd_vm_ops = {
514 .close = privcmd_close,
455 .fault = privcmd_fault 515 .fault = privcmd_fault
456}; 516};
457 517
@@ -469,7 +529,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
469 529
470static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) 530static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
471{ 531{
472 return (xchg(&vma->vm_private_data, (void *)1) == NULL); 532 return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
473} 533}
474 534
475const struct file_operations xen_privcmd_fops = { 535const struct file_operations xen_privcmd_fops = {
diff --git a/drivers/xen/xen-acpi-pad.c b/drivers/xen/xen-acpi-pad.c
new file mode 100644
index 000000000000..da39191e7278
--- /dev/null
+++ b/drivers/xen/xen-acpi-pad.c
@@ -0,0 +1,182 @@
1/*
2 * xen-acpi-pad.c - Xen pad interface
3 *
4 * Copyright (c) 2012, Intel Corporation.
5 * Author: Liu, Jinsong <jinsong.liu@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <acpi/acpi_bus.h>
20#include <acpi/acpi_drivers.h>
21#include <asm/xen/hypercall.h>
22#include <xen/interface/version.h>
23#include <xen/xen-ops.h>
24
25#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
26#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
28static DEFINE_MUTEX(xen_cpu_lock);
29
30static int xen_acpi_pad_idle_cpus(unsigned int idle_nums)
31{
32 struct xen_platform_op op;
33
34 op.cmd = XENPF_core_parking;
35 op.u.core_parking.type = XEN_CORE_PARKING_SET;
36 op.u.core_parking.idle_nums = idle_nums;
37
38 return HYPERVISOR_dom0_op(&op);
39}
40
41static int xen_acpi_pad_idle_cpus_num(void)
42{
43 struct xen_platform_op op;
44
45 op.cmd = XENPF_core_parking;
46 op.u.core_parking.type = XEN_CORE_PARKING_GET;
47
48 return HYPERVISOR_dom0_op(&op)
49 ?: op.u.core_parking.idle_nums;
50}
51
52/*
53 * Query firmware how many CPUs should be idle
54 * return -1 on failure
55 */
56static int acpi_pad_pur(acpi_handle handle)
57{
58 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
59 union acpi_object *package;
60 int num = -1;
61
62 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
63 return num;
64
65 if (!buffer.length || !buffer.pointer)
66 return num;
67
68 package = buffer.pointer;
69
70 if (package->type == ACPI_TYPE_PACKAGE &&
71 package->package.count == 2 &&
72 package->package.elements[0].integer.value == 1) /* rev 1 */
73 num = package->package.elements[1].integer.value;
74
75 kfree(buffer.pointer);
76 return num;
77}
78
79/* Notify firmware how many CPUs are idle */
80static void acpi_pad_ost(acpi_handle handle, int stat,
81 uint32_t idle_nums)
82{
83 union acpi_object params[3] = {
84 {.type = ACPI_TYPE_INTEGER,},
85 {.type = ACPI_TYPE_INTEGER,},
86 {.type = ACPI_TYPE_BUFFER,},
87 };
88 struct acpi_object_list arg_list = {3, params};
89
90 params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
91 params[1].integer.value = stat;
92 params[2].buffer.length = 4;
93 params[2].buffer.pointer = (void *)&idle_nums;
94 acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
95}
96
97static void acpi_pad_handle_notify(acpi_handle handle)
98{
99 int idle_nums;
100
101 mutex_lock(&xen_cpu_lock);
102 idle_nums = acpi_pad_pur(handle);
103 if (idle_nums < 0) {
104 mutex_unlock(&xen_cpu_lock);
105 return;
106 }
107
108 idle_nums = xen_acpi_pad_idle_cpus(idle_nums)
109 ?: xen_acpi_pad_idle_cpus_num();
110 if (idle_nums >= 0)
111 acpi_pad_ost(handle, 0, idle_nums);
112 mutex_unlock(&xen_cpu_lock);
113}
114
115static void acpi_pad_notify(acpi_handle handle, u32 event,
116 void *data)
117{
118 switch (event) {
119 case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
120 acpi_pad_handle_notify(handle);
121 break;
122 default:
123 pr_warn("Unsupported event [0x%x]\n", event);
124 break;
125 }
126}
127
128static int acpi_pad_add(struct acpi_device *device)
129{
130 acpi_status status;
131
132 strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
133 strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
134
135 status = acpi_install_notify_handler(device->handle,
136 ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
137 if (ACPI_FAILURE(status))
138 return -ENODEV;
139
140 return 0;
141}
142
143static int acpi_pad_remove(struct acpi_device *device,
144 int type)
145{
146 mutex_lock(&xen_cpu_lock);
147 xen_acpi_pad_idle_cpus(0);
148 mutex_unlock(&xen_cpu_lock);
149
150 acpi_remove_notify_handler(device->handle,
151 ACPI_DEVICE_NOTIFY, acpi_pad_notify);
152 return 0;
153}
154
155static const struct acpi_device_id pad_device_ids[] = {
156 {"ACPI000C", 0},
157 {"", 0},
158};
159
160static struct acpi_driver acpi_pad_driver = {
161 .name = "processor_aggregator",
162 .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
163 .ids = pad_device_ids,
164 .ops = {
165 .add = acpi_pad_add,
166 .remove = acpi_pad_remove,
167 },
168};
169
170static int __init xen_acpi_pad_init(void)
171{
172 /* Only DOM0 is responsible for Xen acpi pad */
173 if (!xen_initial_domain())
174 return -ENODEV;
175
176 /* Only Xen4.2 or later support Xen acpi pad */
177 if (!xen_running_on_version_or_later(4, 2))
178 return -ENODEV;
179
180 return acpi_bus_register_driver(&acpi_pad_driver);
181}
182subsys_initcall(xen_acpi_pad_init);
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 961d664e2d2f..129e1674f4aa 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -142,7 +142,8 @@ static struct pcistub_device *pcistub_device_find(int domain, int bus,
142 if (psdev->dev != NULL 142 if (psdev->dev != NULL
143 && domain == pci_domain_nr(psdev->dev->bus) 143 && domain == pci_domain_nr(psdev->dev->bus)
144 && bus == psdev->dev->bus->number 144 && bus == psdev->dev->bus->number
145 && PCI_DEVFN(slot, func) == psdev->dev->devfn) { 145 && slot == PCI_SLOT(psdev->dev->devfn)
146 && func == PCI_FUNC(psdev->dev->devfn)) {
146 pcistub_device_get(psdev); 147 pcistub_device_get(psdev);
147 goto out; 148 goto out;
148 } 149 }
@@ -191,7 +192,8 @@ struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
191 if (psdev->dev != NULL 192 if (psdev->dev != NULL
192 && domain == pci_domain_nr(psdev->dev->bus) 193 && domain == pci_domain_nr(psdev->dev->bus)
193 && bus == psdev->dev->bus->number 194 && bus == psdev->dev->bus->number
194 && PCI_DEVFN(slot, func) == psdev->dev->devfn) { 195 && slot == PCI_SLOT(psdev->dev->devfn)
196 && func == PCI_FUNC(psdev->dev->devfn)) {
195 found_dev = pcistub_device_get_pci_dev(pdev, psdev); 197 found_dev = pcistub_device_get_pci_dev(pdev, psdev);
196 break; 198 break;
197 } 199 }
@@ -897,42 +899,35 @@ static struct pci_driver xen_pcibk_pci_driver = {
897static inline int str_to_slot(const char *buf, int *domain, int *bus, 899static inline int str_to_slot(const char *buf, int *domain, int *bus,
898 int *slot, int *func) 900 int *slot, int *func)
899{ 901{
900 int err; 902 int parsed = 0;
901 char wc = '*';
902 903
903 err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func); 904 switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func,
904 switch (err) { 905 &parsed)) {
905 case 3: 906 case 3:
906 *func = -1; 907 *func = -1;
907 err = sscanf(buf, " %x:%x:%x.%c", domain, bus, slot, &wc); 908 sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed);
908 break; 909 break;
909 case 2: 910 case 2:
910 *slot = *func = -1; 911 *slot = *func = -1;
911 err = sscanf(buf, " %x:%x:*.%c", domain, bus, &wc); 912 sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed);
912 if (err >= 2)
913 ++err;
914 break; 913 break;
915 } 914 }
916 if (err == 4 && wc == '*') 915 if (parsed && !buf[parsed])
917 return 0; 916 return 0;
918 else if (err < 0)
919 return -EINVAL;
920 917
921 /* try again without domain */ 918 /* try again without domain */
922 *domain = 0; 919 *domain = 0;
923 wc = '*'; 920 switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) {
924 err = sscanf(buf, " %x:%x.%x", bus, slot, func);
925 switch (err) {
926 case 2: 921 case 2:
927 *func = -1; 922 *func = -1;
928 err = sscanf(buf, " %x:%x.%c", bus, slot, &wc); 923 sscanf(buf, " %x:%x.* %n", bus, slot, &parsed);
929 break; 924 break;
930 case 1: 925 case 1:
931 *slot = *func = -1; 926 *slot = *func = -1;
932 err = sscanf(buf, " %x:*.%c", bus, &wc) + 1; 927 sscanf(buf, " %x:*.* %n", bus, &parsed);
933 break; 928 break;
934 } 929 }
935 if (err == 3 && wc == '*') 930 if (parsed && !buf[parsed])
936 return 0; 931 return 0;
937 932
938 return -EINVAL; 933 return -EINVAL;
@@ -941,13 +936,20 @@ static inline int str_to_slot(const char *buf, int *domain, int *bus,
941static inline int str_to_quirk(const char *buf, int *domain, int *bus, int 936static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
942 *slot, int *func, int *reg, int *size, int *mask) 937 *slot, int *func, int *reg, int *size, int *mask)
943{ 938{
944 int err; 939 int parsed = 0;
945 940
946 err = 941 sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func,
947 sscanf(buf, " %04x:%02x:%02x.%d-%08x:%1x:%08x", domain, bus, slot, 942 reg, size, mask, &parsed);
948 func, reg, size, mask); 943 if (parsed && !buf[parsed])
949 if (err == 7)
950 return 0; 944 return 0;
945
946 /* try again without domain */
947 *domain = 0;
948 sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size,
949 mask, &parsed);
950 if (parsed && !buf[parsed])
951 return 0;
952
951 return -EINVAL; 953 return -EINVAL;
952} 954}
953 955
@@ -955,7 +957,7 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
955{ 957{
956 struct pcistub_device_id *pci_dev_id; 958 struct pcistub_device_id *pci_dev_id;
957 unsigned long flags; 959 unsigned long flags;
958 int rc = 0; 960 int rc = 0, devfn = PCI_DEVFN(slot, func);
959 961
960 if (slot < 0) { 962 if (slot < 0) {
961 for (slot = 0; !rc && slot < 32; ++slot) 963 for (slot = 0; !rc && slot < 32; ++slot)
@@ -969,13 +971,24 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
969 return rc; 971 return rc;
970 } 972 }
971 973
974 if ((
975#if !defined(MODULE) /* pci_domains_supported is not being exported */ \
976 || !defined(CONFIG_PCI_DOMAINS)
977 !pci_domains_supported ? domain :
978#endif
979 domain < 0 || domain > 0xffff)
980 || bus < 0 || bus > 0xff
981 || PCI_SLOT(devfn) != slot
982 || PCI_FUNC(devfn) != func)
983 return -EINVAL;
984
972 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL); 985 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
973 if (!pci_dev_id) 986 if (!pci_dev_id)
974 return -ENOMEM; 987 return -ENOMEM;
975 988
976 pci_dev_id->domain = domain; 989 pci_dev_id->domain = domain;
977 pci_dev_id->bus = bus; 990 pci_dev_id->bus = bus;
978 pci_dev_id->devfn = PCI_DEVFN(slot, func); 991 pci_dev_id->devfn = devfn;
979 992
980 pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%d\n", 993 pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%d\n",
981 domain, bus, slot, func); 994 domain, bus, slot, func);
@@ -1016,14 +1029,18 @@ static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
1016 return err; 1029 return err;
1017} 1030}
1018 1031
1019static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg, 1032static int pcistub_reg_add(int domain, int bus, int slot, int func,
1020 int size, int mask) 1033 unsigned int reg, unsigned int size,
1034 unsigned int mask)
1021{ 1035{
1022 int err = 0; 1036 int err = 0;
1023 struct pcistub_device *psdev; 1037 struct pcistub_device *psdev;
1024 struct pci_dev *dev; 1038 struct pci_dev *dev;
1025 struct config_field *field; 1039 struct config_field *field;
1026 1040
1041 if (reg > 0xfff || (size < 4 && (mask >> (size * 8))))
1042 return -EINVAL;
1043
1027 psdev = pcistub_device_find(domain, bus, slot, func); 1044 psdev = pcistub_device_find(domain, bus, slot, func);
1028 if (!psdev) { 1045 if (!psdev) {
1029 err = -ENODEV; 1046 err = -ENODEV;
@@ -1254,13 +1271,11 @@ static ssize_t permissive_add(struct device_driver *drv, const char *buf,
1254 int err; 1271 int err;
1255 struct pcistub_device *psdev; 1272 struct pcistub_device *psdev;
1256 struct xen_pcibk_dev_data *dev_data; 1273 struct xen_pcibk_dev_data *dev_data;
1274
1257 err = str_to_slot(buf, &domain, &bus, &slot, &func); 1275 err = str_to_slot(buf, &domain, &bus, &slot, &func);
1258 if (err) 1276 if (err)
1259 goto out; 1277 goto out;
1260 if (slot < 0 || func < 0) { 1278
1261 err = -EINVAL;
1262 goto out;
1263 }
1264 psdev = pcistub_device_find(domain, bus, slot, func); 1279 psdev = pcistub_device_find(domain, bus, slot, func);
1265 if (!psdev) { 1280 if (!psdev) {
1266 err = -ENODEV; 1281 err = -ENODEV;
@@ -1339,8 +1354,6 @@ static int __init pcistub_init(void)
1339 1354
1340 if (pci_devs_to_hide && *pci_devs_to_hide) { 1355 if (pci_devs_to_hide && *pci_devs_to_hide) {
1341 do { 1356 do {
1342 char wc = '*';
1343
1344 parsed = 0; 1357 parsed = 0;
1345 1358
1346 err = sscanf(pci_devs_to_hide + pos, 1359 err = sscanf(pci_devs_to_hide + pos,
@@ -1349,51 +1362,48 @@ static int __init pcistub_init(void)
1349 switch (err) { 1362 switch (err) {
1350 case 3: 1363 case 3:
1351 func = -1; 1364 func = -1;
1352 err = sscanf(pci_devs_to_hide + pos, 1365 sscanf(pci_devs_to_hide + pos,
1353 " (%x:%x:%x.%c) %n", 1366 " (%x:%x:%x.*) %n",
1354 &domain, &bus, &slot, &wc, 1367 &domain, &bus, &slot, &parsed);
1355 &parsed);
1356 break; 1368 break;
1357 case 2: 1369 case 2:
1358 slot = func = -1; 1370 slot = func = -1;
1359 err = sscanf(pci_devs_to_hide + pos, 1371 sscanf(pci_devs_to_hide + pos,
1360 " (%x:%x:*.%c) %n", 1372 " (%x:%x:*.*) %n",
1361 &domain, &bus, &wc, &parsed) + 1; 1373 &domain, &bus, &parsed);
1362 break; 1374 break;
1363 } 1375 }
1364 1376
1365 if (err != 4 || wc != '*') { 1377 if (!parsed) {
1366 domain = 0; 1378 domain = 0;
1367 wc = '*';
1368 err = sscanf(pci_devs_to_hide + pos, 1379 err = sscanf(pci_devs_to_hide + pos,
1369 " (%x:%x.%x) %n", 1380 " (%x:%x.%x) %n",
1370 &bus, &slot, &func, &parsed); 1381 &bus, &slot, &func, &parsed);
1371 switch (err) { 1382 switch (err) {
1372 case 2: 1383 case 2:
1373 func = -1; 1384 func = -1;
1374 err = sscanf(pci_devs_to_hide + pos, 1385 sscanf(pci_devs_to_hide + pos,
1375 " (%x:%x.%c) %n", 1386 " (%x:%x.*) %n",
1376 &bus, &slot, &wc, 1387 &bus, &slot, &parsed);
1377 &parsed);
1378 break; 1388 break;
1379 case 1: 1389 case 1:
1380 slot = func = -1; 1390 slot = func = -1;
1381 err = sscanf(pci_devs_to_hide + pos, 1391 sscanf(pci_devs_to_hide + pos,
1382 " (%x:*.%c) %n", 1392 " (%x:*.*) %n",
1383 &bus, &wc, &parsed) + 1; 1393 &bus, &parsed);
1384 break; 1394 break;
1385 } 1395 }
1386 if (err != 3 || wc != '*')
1387 goto parse_error;
1388 } 1396 }
1389 1397
1398 if (parsed <= 0)
1399 goto parse_error;
1400
1390 err = pcistub_device_id_add(domain, bus, slot, func); 1401 err = pcistub_device_id_add(domain, bus, slot, func);
1391 if (err) 1402 if (err)
1392 goto out; 1403 goto out;
1393 1404
1394 /* if parsed<=0, we've reached the end of the string */
1395 pos += parsed; 1405 pos += parsed;
1396 } while (parsed > 0 && pci_devs_to_hide[pos]); 1406 } while (pci_devs_to_hide[pos]);
1397 } 1407 }
1398 1408
1399 /* If we're the first PCI Device Driver to register, we're the 1409 /* If we're the first PCI Device Driver to register, we're the
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index a7def010eba3..f72af87640e0 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -124,7 +124,7 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
124static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 124static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
125 struct pci_dev *dev) 125 struct pci_dev *dev)
126{ 126{
127 if (xen_pcibk_backend && xen_pcibk_backend->free) 127 if (xen_pcibk_backend && xen_pcibk_backend->release)
128 return xen_pcibk_backend->release(pdev, dev); 128 return xen_pcibk_backend->release(pdev, dev);
129} 129}
130 130
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index acedeabe589c..88e677b0de74 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -48,7 +48,6 @@
48#include <xen/xenbus.h> 48#include <xen/xenbus.h>
49#include <xen/xen.h> 49#include <xen/xen.h>
50#include "xenbus_comms.h" 50#include "xenbus_comms.h"
51#include <asm/xen/hypervisor.h>
52 51
53struct xs_stored_msg { 52struct xs_stored_msg {
54 struct list_head list; 53 struct list_head list;
diff --git a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h
index 2090881c3650..f4942921e202 100644
--- a/include/xen/interface/event_channel.h
+++ b/include/xen/interface/event_channel.h
@@ -177,6 +177,19 @@ struct evtchn_unmask {
177 evtchn_port_t port; 177 evtchn_port_t port;
178}; 178};
179 179
180/*
181 * EVTCHNOP_reset: Close all event channels associated with specified domain.
182 * NOTES:
183 * 1. <dom> may be specified as DOMID_SELF.
184 * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
185 */
186#define EVTCHNOP_reset 10
187struct evtchn_reset {
188 /* IN parameters. */
189 domid_t dom;
190};
191typedef struct evtchn_reset evtchn_reset_t;
192
180struct evtchn_op { 193struct evtchn_op {
181 uint32_t cmd; /* EVTCHNOP_* */ 194 uint32_t cmd; /* EVTCHNOP_* */
182 union { 195 union {
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 90712e2072d5..b40a4315cb8b 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -153,6 +153,14 @@ struct xen_machphys_mapping {
153}; 153};
154DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t); 154DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t);
155 155
156#define XENMAPSPACE_shared_info 0 /* shared info page */
157#define XENMAPSPACE_grant_table 1 /* grant table page */
158#define XENMAPSPACE_gmfn 2 /* GMFN */
159#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
160#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
161 * XENMEM_add_to_physmap_range only.
162 */
163
156/* 164/*
157 * Sets the GPFN at which a particular page appears in the specified guest's 165 * Sets the GPFN at which a particular page appears in the specified guest's
158 * pseudophysical address space. 166 * pseudophysical address space.
@@ -167,8 +175,6 @@ struct xen_add_to_physmap {
167 uint16_t size; 175 uint16_t size;
168 176
169 /* Source mapping space. */ 177 /* Source mapping space. */
170#define XENMAPSPACE_shared_info 0 /* shared info page */
171#define XENMAPSPACE_grant_table 1 /* grant table page */
172 unsigned int space; 178 unsigned int space;
173 179
174 /* Index into source mapping space. */ 180 /* Index into source mapping space. */
@@ -182,6 +188,24 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
182/*** REMOVED ***/ 188/*** REMOVED ***/
183/*#define XENMEM_translate_gpfn_list 8*/ 189/*#define XENMEM_translate_gpfn_list 8*/
184 190
191#define XENMEM_add_to_physmap_range 23
192struct xen_add_to_physmap_range {
193 /* Which domain to change the mapping for. */
194 domid_t domid;
195 uint16_t space; /* => enum phys_map_space */
196
197 /* Number of pages to go through */
198 uint16_t size;
199 domid_t foreign_domid; /* IFF gmfn_foreign */
200
201 /* Indexes into space being mapped. */
202 GUEST_HANDLE(xen_ulong_t) idxs;
203
204 /* GPFN in domid where the source mapping page should appear. */
205 GUEST_HANDLE(xen_pfn_t) gpfns;
206};
207DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range);
208
185/* 209/*
186 * Returns the pseudo-physical memory map as it was when the domain 210 * Returns the pseudo-physical memory map as it was when the domain
187 * was started (specified by XENMEM_set_memory_map). 211 * was started (specified by XENMEM_set_memory_map).
@@ -217,4 +241,20 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
217 * during a driver critical region. 241 * during a driver critical region.
218 */ 242 */
219extern spinlock_t xen_reservation_lock; 243extern spinlock_t xen_reservation_lock;
244
245/*
246 * Unmaps the page appearing at a particular GPFN from the specified guest's
247 * pseudophysical address space.
248 * arg == addr of xen_remove_from_physmap_t.
249 */
250#define XENMEM_remove_from_physmap 15
251struct xen_remove_from_physmap {
252 /* Which domain to change the mapping for. */
253 domid_t domid;
254
255 /* GPFN of the current mapping of the page. */
256 xen_pfn_t gpfn;
257};
258DEFINE_GUEST_HANDLE_STRUCT(xen_remove_from_physmap);
259
220#endif /* __XEN_PUBLIC_MEMORY_H__ */ 260#endif /* __XEN_PUBLIC_MEMORY_H__ */
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 4755b5fac9c7..5e36932ab407 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -324,6 +324,22 @@ struct xenpf_cpu_ol {
324}; 324};
325DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol); 325DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol);
326 326
327/*
328 * CMD 58 and 59 are reserved for cpu hotadd and memory hotadd,
329 * which are already occupied at Xen hypervisor side.
330 */
331#define XENPF_core_parking 60
332struct xenpf_core_parking {
333 /* IN variables */
334#define XEN_CORE_PARKING_SET 1
335#define XEN_CORE_PARKING_GET 2
336 uint32_t type;
337 /* IN variables: set cpu nums expected to be idled */
338 /* OUT variables: get cpu nums actually be idled */
339 uint32_t idle_nums;
340};
341DEFINE_GUEST_HANDLE_STRUCT(xenpf_core_parking);
342
327struct xen_platform_op { 343struct xen_platform_op {
328 uint32_t cmd; 344 uint32_t cmd;
329 uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ 345 uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -341,6 +357,7 @@ struct xen_platform_op {
341 struct xenpf_set_processor_pminfo set_pminfo; 357 struct xenpf_set_processor_pminfo set_pminfo;
342 struct xenpf_pcpuinfo pcpu_info; 358 struct xenpf_pcpuinfo pcpu_info;
343 struct xenpf_cpu_ol cpu_ol; 359 struct xenpf_cpu_ol cpu_ol;
360 struct xenpf_core_parking core_parking;
344 uint8_t pad[128]; 361 uint8_t pad[128];
345 } u; 362 } u;
346}; 363};
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 6a198e46ab6e..d6fe062cad6b 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -2,6 +2,7 @@
2#define INCLUDE_XEN_OPS_H 2#define INCLUDE_XEN_OPS_H
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <asm/xen/interface.h>
5 6
6DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); 7DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
7 8
@@ -26,7 +27,11 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
26struct vm_area_struct; 27struct vm_area_struct;
27int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 28int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
28 unsigned long addr, 29 unsigned long addr,
29 unsigned long mfn, int nr, 30 xen_pfn_t mfn, int nr,
30 pgprot_t prot, unsigned domid); 31 pgprot_t prot, unsigned domid,
32 struct page **pages);
33int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
34 int numpgs, struct page **pages);
31 35
36bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
32#endif /* INCLUDE_XEN_OPS_H */ 37#endif /* INCLUDE_XEN_OPS_H */