diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-24 19:02:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-24 19:02:08 -0400 |
commit | b5f4035adfffbcc6b478de5b8c44b618b3124aff (patch) | |
tree | e7a5f011d8aaf5c95edf933f98f25dfc8fa46837 | |
parent | ce004178be1bbaa292e9e6497939e2970300095a (diff) | |
parent | 68c2c39a76b094e9b2773e5846424ea674bf2c46 (diff) |
Merge tag 'stable/for-linus-3.5-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull Xen updates from Konrad Rzeszutek Wilk:
"Features:
* Extend the APIC ops implementation and add IRQ_WORKER vector
support so that 'perf' can work properly.
* Fix self-ballooning code, and balloon logic when booting as initial
domain.
* Move array printing code to generic debugfs
* Support XenBus domains.
* Lazily free grants when a domain is dead/non-existent.
* In M2P code use batching calls
Bug-fixes:
* Fix NULL dereference in allocation failure path (hvc_xen)
* Fix unbinding of IRQ_WORKER vector during vCPU hot-unplug
* Fix HVM guest resume - we would leak an PIRQ value instead of
reusing the existing one."
Fix up add-add onflicts in arch/x86/xen/enlighten.c due to addition of
apic ipi interface next to the new apic_id functions.
* tag 'stable/for-linus-3.5-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen: do not map the same GSI twice in PVHVM guests.
hvc_xen: NULL dereference on allocation failure
xen: Add selfballoning memory reservation tunable.
xenbus: Add support for xenbus backend in stub domain
xen/smp: unbind irqworkX when unplugging vCPUs.
xen: enter/exit lazy_mmu_mode around m2p_override calls
xen/acpi/sleep: Enable ACPI sleep via the __acpi_os_prepare_sleep
xen: implement IRQ_WORK_VECTOR handler
xen: implement apic ipi interface
xen/setup: update VA mapping when releasing memory during setup
xen/setup: Combine the two hypercall functions - since they are quite similar.
xen/setup: Populate freed MFNs from non-RAM E820 entries and gaps to E820 RAM
xen/setup: Only print "Freeing XXX-YYY pfn range: Z pages freed" if Z > 0
xen/gnttab: add deferred freeing logic
debugfs: Add support to print u32 array in debugfs
xen/p2m: An early bootup variant of set_phys_to_machine
xen/p2m: Collapse early_alloc_p2m_middle redundant checks.
xen/p2m: Allow alloc_p2m_middle to call reserve_brk depending on argument
xen/p2m: Move code around to allow for better re-usage.
-rw-r--r-- | arch/x86/include/asm/xen/events.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/xen/page.h | 1 | ||||
-rw-r--r-- | arch/x86/pci/xen.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/debugfs.c | 104 | ||||
-rw-r--r-- | arch/x86/xen/debugfs.h | 4 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 13 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 23 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 104 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 171 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 112 | ||||
-rw-r--r-- | arch/x86/xen/smp.h | 12 | ||||
-rw-r--r-- | arch/x86/xen/spinlock.c | 12 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 1 | ||||
-rw-r--r-- | drivers/xen/Makefile | 2 | ||||
-rw-r--r-- | drivers/xen/acpi.c | 62 | ||||
-rw-r--r-- | drivers/xen/events.c | 5 | ||||
-rw-r--r-- | drivers/xen/grant-table.c | 125 | ||||
-rw-r--r-- | drivers/xen/xen-selfballoon.c | 34 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_comms.c | 6 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_comms.h | 1 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_dev_backend.c | 51 | ||||
-rw-r--r-- | fs/debugfs/file.c | 128 | ||||
-rw-r--r-- | include/linux/debugfs.h | 11 | ||||
-rw-r--r-- | include/xen/acpi.h | 58 | ||||
-rw-r--r-- | include/xen/events.h | 3 | ||||
-rw-r--r-- | include/xen/grant_table.h | 2 | ||||
-rw-r--r-- | include/xen/xenbus_dev.h | 3 |
27 files changed, 827 insertions, 226 deletions
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h index 1df35417c412..cc146d51449e 100644 --- a/arch/x86/include/asm/xen/events.h +++ b/arch/x86/include/asm/xen/events.h | |||
@@ -6,6 +6,7 @@ enum ipi_vector { | |||
6 | XEN_CALL_FUNCTION_VECTOR, | 6 | XEN_CALL_FUNCTION_VECTOR, |
7 | XEN_CALL_FUNCTION_SINGLE_VECTOR, | 7 | XEN_CALL_FUNCTION_SINGLE_VECTOR, |
8 | XEN_SPIN_UNLOCK_VECTOR, | 8 | XEN_SPIN_UNLOCK_VECTOR, |
9 | XEN_IRQ_WORK_VECTOR, | ||
9 | 10 | ||
10 | XEN_NR_IPIS, | 11 | XEN_NR_IPIS, |
11 | }; | 12 | }; |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index c34f96c2f7a0..93971e841dd5 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -44,6 +44,7 @@ extern unsigned long machine_to_phys_nr; | |||
44 | 44 | ||
45 | extern unsigned long get_phys_to_machine(unsigned long pfn); | 45 | extern unsigned long get_phys_to_machine(unsigned long pfn); |
46 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); | 46 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
47 | extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn); | ||
47 | extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); | 48 | extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
48 | extern unsigned long set_phys_range_identity(unsigned long pfn_s, | 49 | extern unsigned long set_phys_range_identity(unsigned long pfn_s, |
49 | unsigned long pfn_e); | 50 | unsigned long pfn_e); |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 7415aa927913..56ab74989cf1 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -64,6 +64,10 @@ static int xen_register_pirq(u32 gsi, int gsi_override, int triggering, | |||
64 | int shareable = 0; | 64 | int shareable = 0; |
65 | char *name; | 65 | char *name; |
66 | 66 | ||
67 | irq = xen_irq_from_gsi(gsi); | ||
68 | if (irq > 0) | ||
69 | return irq; | ||
70 | |||
67 | if (set_pirq) | 71 | if (set_pirq) |
68 | pirq = gsi; | 72 | pirq = gsi; |
69 | 73 | ||
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c index ef1db1900d86..c8377fb26cdf 100644 --- a/arch/x86/xen/debugfs.c +++ b/arch/x86/xen/debugfs.c | |||
@@ -19,107 +19,3 @@ struct dentry * __init xen_init_debugfs(void) | |||
19 | return d_xen_debug; | 19 | return d_xen_debug; |
20 | } | 20 | } |
21 | 21 | ||
22 | struct array_data | ||
23 | { | ||
24 | void *array; | ||
25 | unsigned elements; | ||
26 | }; | ||
27 | |||
28 | static int u32_array_open(struct inode *inode, struct file *file) | ||
29 | { | ||
30 | file->private_data = NULL; | ||
31 | return nonseekable_open(inode, file); | ||
32 | } | ||
33 | |||
34 | static size_t format_array(char *buf, size_t bufsize, const char *fmt, | ||
35 | u32 *array, unsigned array_size) | ||
36 | { | ||
37 | size_t ret = 0; | ||
38 | unsigned i; | ||
39 | |||
40 | for(i = 0; i < array_size; i++) { | ||
41 | size_t len; | ||
42 | |||
43 | len = snprintf(buf, bufsize, fmt, array[i]); | ||
44 | len++; /* ' ' or '\n' */ | ||
45 | ret += len; | ||
46 | |||
47 | if (buf) { | ||
48 | buf += len; | ||
49 | bufsize -= len; | ||
50 | buf[-1] = (i == array_size-1) ? '\n' : ' '; | ||
51 | } | ||
52 | } | ||
53 | |||
54 | ret++; /* \0 */ | ||
55 | if (buf) | ||
56 | *buf = '\0'; | ||
57 | |||
58 | return ret; | ||
59 | } | ||
60 | |||
61 | static char *format_array_alloc(const char *fmt, u32 *array, unsigned array_size) | ||
62 | { | ||
63 | size_t len = format_array(NULL, 0, fmt, array, array_size); | ||
64 | char *ret; | ||
65 | |||
66 | ret = kmalloc(len, GFP_KERNEL); | ||
67 | if (ret == NULL) | ||
68 | return NULL; | ||
69 | |||
70 | format_array(ret, len, fmt, array, array_size); | ||
71 | return ret; | ||
72 | } | ||
73 | |||
74 | static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len, | ||
75 | loff_t *ppos) | ||
76 | { | ||
77 | struct inode *inode = file->f_path.dentry->d_inode; | ||
78 | struct array_data *data = inode->i_private; | ||
79 | size_t size; | ||
80 | |||
81 | if (*ppos == 0) { | ||
82 | if (file->private_data) { | ||
83 | kfree(file->private_data); | ||
84 | file->private_data = NULL; | ||
85 | } | ||
86 | |||
87 | file->private_data = format_array_alloc("%u", data->array, data->elements); | ||
88 | } | ||
89 | |||
90 | size = 0; | ||
91 | if (file->private_data) | ||
92 | size = strlen(file->private_data); | ||
93 | |||
94 | return simple_read_from_buffer(buf, len, ppos, file->private_data, size); | ||
95 | } | ||
96 | |||
97 | static int xen_array_release(struct inode *inode, struct file *file) | ||
98 | { | ||
99 | kfree(file->private_data); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static const struct file_operations u32_array_fops = { | ||
105 | .owner = THIS_MODULE, | ||
106 | .open = u32_array_open, | ||
107 | .release= xen_array_release, | ||
108 | .read = u32_array_read, | ||
109 | .llseek = no_llseek, | ||
110 | }; | ||
111 | |||
112 | struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode, | ||
113 | struct dentry *parent, | ||
114 | u32 *array, unsigned elements) | ||
115 | { | ||
116 | struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL); | ||
117 | |||
118 | if (data == NULL) | ||
119 | return NULL; | ||
120 | |||
121 | data->array = array; | ||
122 | data->elements = elements; | ||
123 | |||
124 | return debugfs_create_file(name, mode, parent, data, &u32_array_fops); | ||
125 | } | ||
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h index 78d25499be5b..12ebf3325c7b 100644 --- a/arch/x86/xen/debugfs.h +++ b/arch/x86/xen/debugfs.h | |||
@@ -3,8 +3,4 @@ | |||
3 | 3 | ||
4 | struct dentry * __init xen_init_debugfs(void); | 4 | struct dentry * __init xen_init_debugfs(void); |
5 | 5 | ||
6 | struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode, | ||
7 | struct dentry *parent, | ||
8 | u32 *array, unsigned elements); | ||
9 | |||
10 | #endif /* _XEN_DEBUGFS_H */ | 6 | #endif /* _XEN_DEBUGFS_H */ |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c0f5facdb10c..75f33b2a5933 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <xen/page.h> | 42 | #include <xen/page.h> |
43 | #include <xen/hvm.h> | 43 | #include <xen/hvm.h> |
44 | #include <xen/hvc-console.h> | 44 | #include <xen/hvc-console.h> |
45 | #include <xen/acpi.h> | ||
45 | 46 | ||
46 | #include <asm/paravirt.h> | 47 | #include <asm/paravirt.h> |
47 | #include <asm/apic.h> | 48 | #include <asm/apic.h> |
@@ -75,6 +76,7 @@ | |||
75 | 76 | ||
76 | #include "xen-ops.h" | 77 | #include "xen-ops.h" |
77 | #include "mmu.h" | 78 | #include "mmu.h" |
79 | #include "smp.h" | ||
78 | #include "multicalls.h" | 80 | #include "multicalls.h" |
79 | 81 | ||
80 | EXPORT_SYMBOL_GPL(hypercall_page); | 82 | EXPORT_SYMBOL_GPL(hypercall_page); |
@@ -883,6 +885,14 @@ static void set_xen_basic_apic_ops(void) | |||
883 | apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; | 885 | apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; |
884 | apic->set_apic_id = xen_set_apic_id; | 886 | apic->set_apic_id = xen_set_apic_id; |
885 | apic->get_apic_id = xen_get_apic_id; | 887 | apic->get_apic_id = xen_get_apic_id; |
888 | |||
889 | #ifdef CONFIG_SMP | ||
890 | apic->send_IPI_allbutself = xen_send_IPI_allbutself; | ||
891 | apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself; | ||
892 | apic->send_IPI_mask = xen_send_IPI_mask; | ||
893 | apic->send_IPI_all = xen_send_IPI_all; | ||
894 | apic->send_IPI_self = xen_send_IPI_self; | ||
895 | #endif | ||
886 | } | 896 | } |
887 | 897 | ||
888 | #endif | 898 | #endif |
@@ -1340,7 +1350,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1340 | 1350 | ||
1341 | xen_raw_console_write("mapping kernel into physical memory\n"); | 1351 | xen_raw_console_write("mapping kernel into physical memory\n"); |
1342 | pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); | 1352 | pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); |
1343 | xen_ident_map_ISA(); | ||
1344 | 1353 | ||
1345 | /* Allocate and initialize top and mid mfn levels for p2m structure */ | 1354 | /* Allocate and initialize top and mid mfn levels for p2m structure */ |
1346 | xen_build_mfn_list_list(); | 1355 | xen_build_mfn_list_list(); |
@@ -1400,6 +1409,8 @@ asmlinkage void __init xen_start_kernel(void) | |||
1400 | 1409 | ||
1401 | /* Make sure ACS will be enabled */ | 1410 | /* Make sure ACS will be enabled */ |
1402 | pci_request_acs(); | 1411 | pci_request_acs(); |
1412 | |||
1413 | xen_acpi_sleep_register(); | ||
1403 | } | 1414 | } |
1404 | #ifdef CONFIG_PCI | 1415 | #ifdef CONFIG_PCI |
1405 | /* PCI BIOS service won't work from a PV guest. */ | 1416 | /* PCI BIOS service won't work from a PV guest. */ |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 3506cd4f9a43..3a73785631ce 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1933,29 +1933,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
1933 | #endif | 1933 | #endif |
1934 | } | 1934 | } |
1935 | 1935 | ||
1936 | void __init xen_ident_map_ISA(void) | ||
1937 | { | ||
1938 | unsigned long pa; | ||
1939 | |||
1940 | /* | ||
1941 | * If we're dom0, then linear map the ISA machine addresses into | ||
1942 | * the kernel's address space. | ||
1943 | */ | ||
1944 | if (!xen_initial_domain()) | ||
1945 | return; | ||
1946 | |||
1947 | xen_raw_printk("Xen: setup ISA identity maps\n"); | ||
1948 | |||
1949 | for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) { | ||
1950 | pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO); | ||
1951 | |||
1952 | if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0)) | ||
1953 | BUG(); | ||
1954 | } | ||
1955 | |||
1956 | xen_flush_tlb(); | ||
1957 | } | ||
1958 | |||
1959 | static void __init xen_post_allocator_init(void) | 1936 | static void __init xen_post_allocator_init(void) |
1960 | { | 1937 | { |
1961 | pv_mmu_ops.set_pte = xen_set_pte; | 1938 | pv_mmu_ops.set_pte = xen_set_pte; |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 1b267e75158d..ffd08c414e91 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -499,16 +499,18 @@ static bool alloc_p2m(unsigned long pfn) | |||
499 | return true; | 499 | return true; |
500 | } | 500 | } |
501 | 501 | ||
502 | static bool __init __early_alloc_p2m(unsigned long pfn) | 502 | static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary) |
503 | { | 503 | { |
504 | unsigned topidx, mididx, idx; | 504 | unsigned topidx, mididx, idx; |
505 | unsigned long *p2m; | ||
506 | unsigned long *mid_mfn_p; | ||
505 | 507 | ||
506 | topidx = p2m_top_index(pfn); | 508 | topidx = p2m_top_index(pfn); |
507 | mididx = p2m_mid_index(pfn); | 509 | mididx = p2m_mid_index(pfn); |
508 | idx = p2m_index(pfn); | 510 | idx = p2m_index(pfn); |
509 | 511 | ||
510 | /* Pfff.. No boundary cross-over, lets get out. */ | 512 | /* Pfff.. No boundary cross-over, lets get out. */ |
511 | if (!idx) | 513 | if (!idx && check_boundary) |
512 | return false; | 514 | return false; |
513 | 515 | ||
514 | WARN(p2m_top[topidx][mididx] == p2m_identity, | 516 | WARN(p2m_top[topidx][mididx] == p2m_identity, |
@@ -522,24 +524,66 @@ static bool __init __early_alloc_p2m(unsigned long pfn) | |||
522 | return false; | 524 | return false; |
523 | 525 | ||
524 | /* Boundary cross-over for the edges: */ | 526 | /* Boundary cross-over for the edges: */ |
525 | if (idx) { | 527 | p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); |
526 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
527 | unsigned long *mid_mfn_p; | ||
528 | 528 | ||
529 | p2m_init(p2m); | 529 | p2m_init(p2m); |
530 | 530 | ||
531 | p2m_top[topidx][mididx] = p2m; | 531 | p2m_top[topidx][mididx] = p2m; |
532 | 532 | ||
533 | /* For save/restore we need to MFN of the P2M saved */ | 533 | /* For save/restore we need to MFN of the P2M saved */ |
534 | 534 | ||
535 | mid_mfn_p = p2m_top_mfn_p[topidx]; | 535 | mid_mfn_p = p2m_top_mfn_p[topidx]; |
536 | WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), | 536 | WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), |
537 | "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", | 537 | "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", |
538 | topidx, mididx); | 538 | topidx, mididx); |
539 | mid_mfn_p[mididx] = virt_to_mfn(p2m); | 539 | mid_mfn_p[mididx] = virt_to_mfn(p2m); |
540 | |||
541 | return true; | ||
542 | } | ||
543 | |||
544 | static bool __init early_alloc_p2m(unsigned long pfn) | ||
545 | { | ||
546 | unsigned topidx = p2m_top_index(pfn); | ||
547 | unsigned long *mid_mfn_p; | ||
548 | unsigned long **mid; | ||
549 | |||
550 | mid = p2m_top[topidx]; | ||
551 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
552 | if (mid == p2m_mid_missing) { | ||
553 | mid = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
554 | |||
555 | p2m_mid_init(mid); | ||
556 | |||
557 | p2m_top[topidx] = mid; | ||
540 | 558 | ||
559 | BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); | ||
541 | } | 560 | } |
542 | return idx != 0; | 561 | /* And the save/restore P2M tables.. */ |
562 | if (mid_mfn_p == p2m_mid_missing_mfn) { | ||
563 | mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
564 | p2m_mid_mfn_init(mid_mfn_p); | ||
565 | |||
566 | p2m_top_mfn_p[topidx] = mid_mfn_p; | ||
567 | p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); | ||
568 | /* Note: we don't set mid_mfn_p[midix] here, | ||
569 | * look in early_alloc_p2m_middle */ | ||
570 | } | ||
571 | return true; | ||
572 | } | ||
573 | bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) | ||
574 | { | ||
575 | if (unlikely(!__set_phys_to_machine(pfn, mfn))) { | ||
576 | if (!early_alloc_p2m(pfn)) | ||
577 | return false; | ||
578 | |||
579 | if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) | ||
580 | return false; | ||
581 | |||
582 | if (!__set_phys_to_machine(pfn, mfn)) | ||
583 | return false; | ||
584 | } | ||
585 | |||
586 | return true; | ||
543 | } | 587 | } |
544 | unsigned long __init set_phys_range_identity(unsigned long pfn_s, | 588 | unsigned long __init set_phys_range_identity(unsigned long pfn_s, |
545 | unsigned long pfn_e) | 589 | unsigned long pfn_e) |
@@ -559,35 +603,11 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, | |||
559 | pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); | 603 | pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); |
560 | pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) | 604 | pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) |
561 | { | 605 | { |
562 | unsigned topidx = p2m_top_index(pfn); | 606 | WARN_ON(!early_alloc_p2m(pfn)); |
563 | unsigned long *mid_mfn_p; | ||
564 | unsigned long **mid; | ||
565 | |||
566 | mid = p2m_top[topidx]; | ||
567 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
568 | if (mid == p2m_mid_missing) { | ||
569 | mid = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
570 | |||
571 | p2m_mid_init(mid); | ||
572 | |||
573 | p2m_top[topidx] = mid; | ||
574 | |||
575 | BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); | ||
576 | } | ||
577 | /* And the save/restore P2M tables.. */ | ||
578 | if (mid_mfn_p == p2m_mid_missing_mfn) { | ||
579 | mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
580 | p2m_mid_mfn_init(mid_mfn_p); | ||
581 | |||
582 | p2m_top_mfn_p[topidx] = mid_mfn_p; | ||
583 | p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); | ||
584 | /* Note: we don't set mid_mfn_p[midix] here, | ||
585 | * look in __early_alloc_p2m */ | ||
586 | } | ||
587 | } | 607 | } |
588 | 608 | ||
589 | __early_alloc_p2m(pfn_s); | 609 | early_alloc_p2m_middle(pfn_s, true); |
590 | __early_alloc_p2m(pfn_e); | 610 | early_alloc_p2m_middle(pfn_e, true); |
591 | 611 | ||
592 | for (pfn = pfn_s; pfn < pfn_e; pfn++) | 612 | for (pfn = pfn_s; pfn < pfn_e; pfn++) |
593 | if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) | 613 | if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 1ba8dff26753..3ebba0753d38 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <xen/interface/memory.h> | 26 | #include <xen/interface/memory.h> |
27 | #include <xen/interface/physdev.h> | 27 | #include <xen/interface/physdev.h> |
28 | #include <xen/features.h> | 28 | #include <xen/features.h> |
29 | |||
30 | #include "xen-ops.h" | 29 | #include "xen-ops.h" |
31 | #include "vdso.h" | 30 | #include "vdso.h" |
32 | 31 | ||
@@ -84,8 +83,8 @@ static void __init xen_add_extra_mem(u64 start, u64 size) | |||
84 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | 83 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
85 | } | 84 | } |
86 | 85 | ||
87 | static unsigned long __init xen_release_chunk(unsigned long start, | 86 | static unsigned long __init xen_do_chunk(unsigned long start, |
88 | unsigned long end) | 87 | unsigned long end, bool release) |
89 | { | 88 | { |
90 | struct xen_memory_reservation reservation = { | 89 | struct xen_memory_reservation reservation = { |
91 | .address_bits = 0, | 90 | .address_bits = 0, |
@@ -96,30 +95,138 @@ static unsigned long __init xen_release_chunk(unsigned long start, | |||
96 | unsigned long pfn; | 95 | unsigned long pfn; |
97 | int ret; | 96 | int ret; |
98 | 97 | ||
99 | for(pfn = start; pfn < end; pfn++) { | 98 | for (pfn = start; pfn < end; pfn++) { |
99 | unsigned long frame; | ||
100 | unsigned long mfn = pfn_to_mfn(pfn); | 100 | unsigned long mfn = pfn_to_mfn(pfn); |
101 | 101 | ||
102 | /* Make sure pfn exists to start with */ | 102 | if (release) { |
103 | if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) | 103 | /* Make sure pfn exists to start with */ |
104 | continue; | 104 | if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) |
105 | 105 | continue; | |
106 | set_xen_guest_handle(reservation.extent_start, &mfn); | 106 | frame = mfn; |
107 | } else { | ||
108 | if (mfn != INVALID_P2M_ENTRY) | ||
109 | continue; | ||
110 | frame = pfn; | ||
111 | } | ||
112 | set_xen_guest_handle(reservation.extent_start, &frame); | ||
107 | reservation.nr_extents = 1; | 113 | reservation.nr_extents = 1; |
108 | 114 | ||
109 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, | 115 | ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap, |
110 | &reservation); | 116 | &reservation); |
111 | WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); | 117 | WARN(ret != 1, "Failed to %s pfn %lx err=%d\n", |
118 | release ? "release" : "populate", pfn, ret); | ||
119 | |||
112 | if (ret == 1) { | 120 | if (ret == 1) { |
113 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | 121 | if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) { |
122 | if (release) | ||
123 | break; | ||
124 | set_xen_guest_handle(reservation.extent_start, &frame); | ||
125 | reservation.nr_extents = 1; | ||
126 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, | ||
127 | &reservation); | ||
128 | break; | ||
129 | } | ||
114 | len++; | 130 | len++; |
115 | } | 131 | } else |
132 | break; | ||
116 | } | 133 | } |
117 | printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n", | 134 | if (len) |
118 | start, end, len); | 135 | printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n", |
136 | release ? "Freeing" : "Populating", | ||
137 | start, end, len, | ||
138 | release ? "freed" : "added"); | ||
119 | 139 | ||
120 | return len; | 140 | return len; |
121 | } | 141 | } |
122 | 142 | ||
143 | static unsigned long __init xen_release_chunk(unsigned long start, | ||
144 | unsigned long end) | ||
145 | { | ||
146 | return xen_do_chunk(start, end, true); | ||
147 | } | ||
148 | |||
149 | static unsigned long __init xen_populate_chunk( | ||
150 | const struct e820entry *list, size_t map_size, | ||
151 | unsigned long max_pfn, unsigned long *last_pfn, | ||
152 | unsigned long credits_left) | ||
153 | { | ||
154 | const struct e820entry *entry; | ||
155 | unsigned int i; | ||
156 | unsigned long done = 0; | ||
157 | unsigned long dest_pfn; | ||
158 | |||
159 | for (i = 0, entry = list; i < map_size; i++, entry++) { | ||
160 | unsigned long credits = credits_left; | ||
161 | unsigned long s_pfn; | ||
162 | unsigned long e_pfn; | ||
163 | unsigned long pfns; | ||
164 | long capacity; | ||
165 | |||
166 | if (credits <= 0) | ||
167 | break; | ||
168 | |||
169 | if (entry->type != E820_RAM) | ||
170 | continue; | ||
171 | |||
172 | e_pfn = PFN_UP(entry->addr + entry->size); | ||
173 | |||
174 | /* We only care about E820 after the xen_start_info->nr_pages */ | ||
175 | if (e_pfn <= max_pfn) | ||
176 | continue; | ||
177 | |||
178 | s_pfn = PFN_DOWN(entry->addr); | ||
179 | /* If the E820 falls within the nr_pages, we want to start | ||
180 | * at the nr_pages PFN. | ||
181 | * If that would mean going past the E820 entry, skip it | ||
182 | */ | ||
183 | if (s_pfn <= max_pfn) { | ||
184 | capacity = e_pfn - max_pfn; | ||
185 | dest_pfn = max_pfn; | ||
186 | } else { | ||
187 | /* last_pfn MUST be within E820_RAM regions */ | ||
188 | if (*last_pfn && e_pfn >= *last_pfn) | ||
189 | s_pfn = *last_pfn; | ||
190 | capacity = e_pfn - s_pfn; | ||
191 | dest_pfn = s_pfn; | ||
192 | } | ||
193 | /* If we had filled this E820_RAM entry, go to the next one. */ | ||
194 | if (capacity <= 0) | ||
195 | continue; | ||
196 | |||
197 | if (credits > capacity) | ||
198 | credits = capacity; | ||
199 | |||
200 | pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false); | ||
201 | done += pfns; | ||
202 | credits_left -= pfns; | ||
203 | *last_pfn = (dest_pfn + pfns); | ||
204 | } | ||
205 | return done; | ||
206 | } | ||
207 | |||
208 | static void __init xen_set_identity_and_release_chunk( | ||
209 | unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, | ||
210 | unsigned long *released, unsigned long *identity) | ||
211 | { | ||
212 | unsigned long pfn; | ||
213 | |||
214 | /* | ||
215 | * If the PFNs are currently mapped, the VA mapping also needs | ||
216 | * to be updated to be 1:1. | ||
217 | */ | ||
218 | for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) | ||
219 | (void)HYPERVISOR_update_va_mapping( | ||
220 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
221 | mfn_pte(pfn, PAGE_KERNEL_IO), 0); | ||
222 | |||
223 | if (start_pfn < nr_pages) | ||
224 | *released += xen_release_chunk( | ||
225 | start_pfn, min(end_pfn, nr_pages)); | ||
226 | |||
227 | *identity += set_phys_range_identity(start_pfn, end_pfn); | ||
228 | } | ||
229 | |||
123 | static unsigned long __init xen_set_identity_and_release( | 230 | static unsigned long __init xen_set_identity_and_release( |
124 | const struct e820entry *list, size_t map_size, unsigned long nr_pages) | 231 | const struct e820entry *list, size_t map_size, unsigned long nr_pages) |
125 | { | 232 | { |
@@ -142,7 +249,6 @@ static unsigned long __init xen_set_identity_and_release( | |||
142 | */ | 249 | */ |
143 | for (i = 0, entry = list; i < map_size; i++, entry++) { | 250 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
144 | phys_addr_t end = entry->addr + entry->size; | 251 | phys_addr_t end = entry->addr + entry->size; |
145 | |||
146 | if (entry->type == E820_RAM || i == map_size - 1) { | 252 | if (entry->type == E820_RAM || i == map_size - 1) { |
147 | unsigned long start_pfn = PFN_DOWN(start); | 253 | unsigned long start_pfn = PFN_DOWN(start); |
148 | unsigned long end_pfn = PFN_UP(end); | 254 | unsigned long end_pfn = PFN_UP(end); |
@@ -150,20 +256,19 @@ static unsigned long __init xen_set_identity_and_release( | |||
150 | if (entry->type == E820_RAM) | 256 | if (entry->type == E820_RAM) |
151 | end_pfn = PFN_UP(entry->addr); | 257 | end_pfn = PFN_UP(entry->addr); |
152 | 258 | ||
153 | if (start_pfn < end_pfn) { | 259 | if (start_pfn < end_pfn) |
154 | if (start_pfn < nr_pages) | 260 | xen_set_identity_and_release_chunk( |
155 | released += xen_release_chunk( | 261 | start_pfn, end_pfn, nr_pages, |
156 | start_pfn, min(end_pfn, nr_pages)); | 262 | &released, &identity); |
157 | 263 | ||
158 | identity += set_phys_range_identity( | ||
159 | start_pfn, end_pfn); | ||
160 | } | ||
161 | start = end; | 264 | start = end; |
162 | } | 265 | } |
163 | } | 266 | } |
164 | 267 | ||
165 | printk(KERN_INFO "Released %lu pages of unused memory\n", released); | 268 | if (released) |
166 | printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); | 269 | printk(KERN_INFO "Released %lu pages of unused memory\n", released); |
270 | if (identity) | ||
271 | printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); | ||
167 | 272 | ||
168 | return released; | 273 | return released; |
169 | } | 274 | } |
@@ -217,7 +322,9 @@ char * __init xen_memory_setup(void) | |||
217 | int rc; | 322 | int rc; |
218 | struct xen_memory_map memmap; | 323 | struct xen_memory_map memmap; |
219 | unsigned long max_pages; | 324 | unsigned long max_pages; |
325 | unsigned long last_pfn = 0; | ||
220 | unsigned long extra_pages = 0; | 326 | unsigned long extra_pages = 0; |
327 | unsigned long populated; | ||
221 | int i; | 328 | int i; |
222 | int op; | 329 | int op; |
223 | 330 | ||
@@ -257,9 +364,20 @@ char * __init xen_memory_setup(void) | |||
257 | */ | 364 | */ |
258 | xen_released_pages = xen_set_identity_and_release( | 365 | xen_released_pages = xen_set_identity_and_release( |
259 | map, memmap.nr_entries, max_pfn); | 366 | map, memmap.nr_entries, max_pfn); |
260 | extra_pages += xen_released_pages; | ||
261 | 367 | ||
262 | /* | 368 | /* |
369 | * Populate back the non-RAM pages and E820 gaps that had been | ||
370 | * released. */ | ||
371 | populated = xen_populate_chunk(map, memmap.nr_entries, | ||
372 | max_pfn, &last_pfn, xen_released_pages); | ||
373 | |||
374 | extra_pages += (xen_released_pages - populated); | ||
375 | |||
376 | if (last_pfn > max_pfn) { | ||
377 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); | ||
378 | mem_end = PFN_PHYS(max_pfn); | ||
379 | } | ||
380 | /* | ||
263 | * Clamp the amount of extra memory to a EXTRA_MEM_RATIO | 381 | * Clamp the amount of extra memory to a EXTRA_MEM_RATIO |
264 | * factor the base size. On non-highmem systems, the base | 382 | * factor the base size. On non-highmem systems, the base |
265 | * size is the full initial memory allocation; on highmem it | 383 | * size is the full initial memory allocation; on highmem it |
@@ -272,7 +390,6 @@ char * __init xen_memory_setup(void) | |||
272 | */ | 390 | */ |
273 | extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), | 391 | extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), |
274 | extra_pages); | 392 | extra_pages); |
275 | |||
276 | i = 0; | 393 | i = 0; |
277 | while (i < memmap.nr_entries) { | 394 | while (i < memmap.nr_entries) { |
278 | u64 addr = map[i].addr; | 395 | u64 addr = map[i].addr; |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 3700945ed0d5..afb250d22a6b 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/irq_work.h> | ||
19 | 20 | ||
20 | #include <asm/paravirt.h> | 21 | #include <asm/paravirt.h> |
21 | #include <asm/desc.h> | 22 | #include <asm/desc.h> |
@@ -41,10 +42,12 @@ cpumask_var_t xen_cpu_initialized_map; | |||
41 | static DEFINE_PER_CPU(int, xen_resched_irq); | 42 | static DEFINE_PER_CPU(int, xen_resched_irq); |
42 | static DEFINE_PER_CPU(int, xen_callfunc_irq); | 43 | static DEFINE_PER_CPU(int, xen_callfunc_irq); |
43 | static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); | 44 | static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); |
45 | static DEFINE_PER_CPU(int, xen_irq_work); | ||
44 | static DEFINE_PER_CPU(int, xen_debug_irq) = -1; | 46 | static DEFINE_PER_CPU(int, xen_debug_irq) = -1; |
45 | 47 | ||
46 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | 48 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); |
47 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | 49 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); |
50 | static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); | ||
48 | 51 | ||
49 | /* | 52 | /* |
50 | * Reschedule call back. | 53 | * Reschedule call back. |
@@ -143,6 +146,17 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
143 | goto fail; | 146 | goto fail; |
144 | per_cpu(xen_callfuncsingle_irq, cpu) = rc; | 147 | per_cpu(xen_callfuncsingle_irq, cpu) = rc; |
145 | 148 | ||
149 | callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); | ||
150 | rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, | ||
151 | cpu, | ||
152 | xen_irq_work_interrupt, | ||
153 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | ||
154 | callfunc_name, | ||
155 | NULL); | ||
156 | if (rc < 0) | ||
157 | goto fail; | ||
158 | per_cpu(xen_irq_work, cpu) = rc; | ||
159 | |||
146 | return 0; | 160 | return 0; |
147 | 161 | ||
148 | fail: | 162 | fail: |
@@ -155,6 +169,8 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
155 | if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) | 169 | if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) |
156 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), | 170 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), |
157 | NULL); | 171 | NULL); |
172 | if (per_cpu(xen_irq_work, cpu) >= 0) | ||
173 | unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); | ||
158 | 174 | ||
159 | return rc; | 175 | return rc; |
160 | } | 176 | } |
@@ -407,6 +423,7 @@ static void xen_cpu_die(unsigned int cpu) | |||
407 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); | 423 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); |
408 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); | 424 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); |
409 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); | 425 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); |
426 | unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); | ||
410 | xen_uninit_lock_cpu(cpu); | 427 | xen_uninit_lock_cpu(cpu); |
411 | xen_teardown_timer(cpu); | 428 | xen_teardown_timer(cpu); |
412 | 429 | ||
@@ -469,8 +486,8 @@ static void xen_smp_send_reschedule(int cpu) | |||
469 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | 486 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); |
470 | } | 487 | } |
471 | 488 | ||
472 | static void xen_send_IPI_mask(const struct cpumask *mask, | 489 | static void __xen_send_IPI_mask(const struct cpumask *mask, |
473 | enum ipi_vector vector) | 490 | int vector) |
474 | { | 491 | { |
475 | unsigned cpu; | 492 | unsigned cpu; |
476 | 493 | ||
@@ -482,7 +499,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask) | |||
482 | { | 499 | { |
483 | int cpu; | 500 | int cpu; |
484 | 501 | ||
485 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | 502 | __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); |
486 | 503 | ||
487 | /* Make sure other vcpus get a chance to run if they need to. */ | 504 | /* Make sure other vcpus get a chance to run if they need to. */ |
488 | for_each_cpu(cpu, mask) { | 505 | for_each_cpu(cpu, mask) { |
@@ -495,10 +512,86 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask) | |||
495 | 512 | ||
496 | static void xen_smp_send_call_function_single_ipi(int cpu) | 513 | static void xen_smp_send_call_function_single_ipi(int cpu) |
497 | { | 514 | { |
498 | xen_send_IPI_mask(cpumask_of(cpu), | 515 | __xen_send_IPI_mask(cpumask_of(cpu), |
499 | XEN_CALL_FUNCTION_SINGLE_VECTOR); | 516 | XEN_CALL_FUNCTION_SINGLE_VECTOR); |
500 | } | 517 | } |
501 | 518 | ||
519 | static inline int xen_map_vector(int vector) | ||
520 | { | ||
521 | int xen_vector; | ||
522 | |||
523 | switch (vector) { | ||
524 | case RESCHEDULE_VECTOR: | ||
525 | xen_vector = XEN_RESCHEDULE_VECTOR; | ||
526 | break; | ||
527 | case CALL_FUNCTION_VECTOR: | ||
528 | xen_vector = XEN_CALL_FUNCTION_VECTOR; | ||
529 | break; | ||
530 | case CALL_FUNCTION_SINGLE_VECTOR: | ||
531 | xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR; | ||
532 | break; | ||
533 | case IRQ_WORK_VECTOR: | ||
534 | xen_vector = XEN_IRQ_WORK_VECTOR; | ||
535 | break; | ||
536 | default: | ||
537 | xen_vector = -1; | ||
538 | printk(KERN_ERR "xen: vector 0x%x is not implemented\n", | ||
539 | vector); | ||
540 | } | ||
541 | |||
542 | return xen_vector; | ||
543 | } | ||
544 | |||
545 | void xen_send_IPI_mask(const struct cpumask *mask, | ||
546 | int vector) | ||
547 | { | ||
548 | int xen_vector = xen_map_vector(vector); | ||
549 | |||
550 | if (xen_vector >= 0) | ||
551 | __xen_send_IPI_mask(mask, xen_vector); | ||
552 | } | ||
553 | |||
554 | void xen_send_IPI_all(int vector) | ||
555 | { | ||
556 | int xen_vector = xen_map_vector(vector); | ||
557 | |||
558 | if (xen_vector >= 0) | ||
559 | __xen_send_IPI_mask(cpu_online_mask, xen_vector); | ||
560 | } | ||
561 | |||
562 | void xen_send_IPI_self(int vector) | ||
563 | { | ||
564 | int xen_vector = xen_map_vector(vector); | ||
565 | |||
566 | if (xen_vector >= 0) | ||
567 | xen_send_IPI_one(smp_processor_id(), xen_vector); | ||
568 | } | ||
569 | |||
570 | void xen_send_IPI_mask_allbutself(const struct cpumask *mask, | ||
571 | int vector) | ||
572 | { | ||
573 | unsigned cpu; | ||
574 | unsigned int this_cpu = smp_processor_id(); | ||
575 | |||
576 | if (!(num_online_cpus() > 1)) | ||
577 | return; | ||
578 | |||
579 | for_each_cpu_and(cpu, mask, cpu_online_mask) { | ||
580 | if (this_cpu == cpu) | ||
581 | continue; | ||
582 | |||
583 | xen_smp_send_call_function_single_ipi(cpu); | ||
584 | } | ||
585 | } | ||
586 | |||
587 | void xen_send_IPI_allbutself(int vector) | ||
588 | { | ||
589 | int xen_vector = xen_map_vector(vector); | ||
590 | |||
591 | if (xen_vector >= 0) | ||
592 | xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector); | ||
593 | } | ||
594 | |||
502 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | 595 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) |
503 | { | 596 | { |
504 | irq_enter(); | 597 | irq_enter(); |
@@ -519,6 +612,16 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) | |||
519 | return IRQ_HANDLED; | 612 | return IRQ_HANDLED; |
520 | } | 613 | } |
521 | 614 | ||
615 | static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) | ||
616 | { | ||
617 | irq_enter(); | ||
618 | irq_work_run(); | ||
619 | inc_irq_stat(apic_irq_work_irqs); | ||
620 | irq_exit(); | ||
621 | |||
622 | return IRQ_HANDLED; | ||
623 | } | ||
624 | |||
522 | static const struct smp_ops xen_smp_ops __initconst = { | 625 | static const struct smp_ops xen_smp_ops __initconst = { |
523 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, | 626 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, |
524 | .smp_prepare_cpus = xen_smp_prepare_cpus, | 627 | .smp_prepare_cpus = xen_smp_prepare_cpus, |
@@ -565,6 +668,7 @@ static void xen_hvm_cpu_die(unsigned int cpu) | |||
565 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); | 668 | unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); |
566 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); | 669 | unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); |
567 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); | 670 | unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); |
671 | unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); | ||
568 | native_cpu_die(cpu); | 672 | native_cpu_die(cpu); |
569 | } | 673 | } |
570 | 674 | ||
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h new file mode 100644 index 000000000000..8981a76d081a --- /dev/null +++ b/arch/x86/xen/smp.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _XEN_SMP_H | ||
2 | |||
3 | extern void xen_send_IPI_mask(const struct cpumask *mask, | ||
4 | int vector); | ||
5 | extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, | ||
6 | int vector); | ||
7 | extern void xen_send_IPI_allbutself(int vector); | ||
8 | extern void physflat_send_IPI_allbutself(int vector); | ||
9 | extern void xen_send_IPI_all(int vector); | ||
10 | extern void xen_send_IPI_self(int vector); | ||
11 | |||
12 | #endif | ||
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index d69cc6c3f808..83e866d714ce 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -440,12 +440,12 @@ static int __init xen_spinlock_debugfs(void) | |||
440 | debugfs_create_u64("time_total", 0444, d_spin_debug, | 440 | debugfs_create_u64("time_total", 0444, d_spin_debug, |
441 | &spinlock_stats.time_total); | 441 | &spinlock_stats.time_total); |
442 | 442 | ||
443 | xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug, | 443 | debugfs_create_u32_array("histo_total", 0444, d_spin_debug, |
444 | spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1); | 444 | spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1); |
445 | xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug, | 445 | debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug, |
446 | spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1); | 446 | spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1); |
447 | xen_debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, | 447 | debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, |
448 | spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); | 448 | spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); |
449 | 449 | ||
450 | return 0; | 450 | return 0; |
451 | } | 451 | } |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 45c0c0667bd9..202d4c150154 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -28,7 +28,6 @@ void xen_setup_shared_info(void); | |||
28 | void xen_build_mfn_list_list(void); | 28 | void xen_build_mfn_list_list(void); |
29 | void xen_setup_machphys_mapping(void); | 29 | void xen_setup_machphys_mapping(void); |
30 | pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); | 30 | pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); |
31 | void xen_ident_map_ISA(void); | ||
32 | void xen_reserve_top(void); | 31 | void xen_reserve_top(void); |
33 | extern unsigned long xen_max_p2m_pfn; | 32 | extern unsigned long xen_max_p2m_pfn; |
34 | 33 | ||
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 9adc5be57b13..fc3488631136 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -17,7 +17,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o | |||
17 | obj-$(CONFIG_XEN_PVHVM) += platform-pci.o | 17 | obj-$(CONFIG_XEN_PVHVM) += platform-pci.o |
18 | obj-$(CONFIG_XEN_TMEM) += tmem.o | 18 | obj-$(CONFIG_XEN_TMEM) += tmem.o |
19 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o | 19 | obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o |
20 | obj-$(CONFIG_XEN_DOM0) += pci.o | 20 | obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o |
21 | obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ | 21 | obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ |
22 | obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o | 22 | obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o |
23 | obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o | 23 | obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o |
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c new file mode 100644 index 000000000000..119d42a2bf57 --- /dev/null +++ b/drivers/xen/acpi.c | |||
@@ -0,0 +1,62 @@ | |||
1 | /****************************************************************************** | ||
2 | * acpi.c | ||
3 | * acpi file for domain 0 kernel | ||
4 | * | ||
5 | * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | ||
6 | * Copyright (c) 2011 Yu Ke ke.yu@intel.com | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <xen/acpi.h> | ||
34 | #include <xen/interface/platform.h> | ||
35 | #include <asm/xen/hypercall.h> | ||
36 | #include <asm/xen/hypervisor.h> | ||
37 | |||
38 | int xen_acpi_notify_hypervisor_state(u8 sleep_state, | ||
39 | u32 pm1a_cnt, u32 pm1b_cnt) | ||
40 | { | ||
41 | struct xen_platform_op op = { | ||
42 | .cmd = XENPF_enter_acpi_sleep, | ||
43 | .interface_version = XENPF_INTERFACE_VERSION, | ||
44 | .u = { | ||
45 | .enter_acpi_sleep = { | ||
46 | .pm1a_cnt_val = (u16)pm1a_cnt, | ||
47 | .pm1b_cnt_val = (u16)pm1b_cnt, | ||
48 | .sleep_state = sleep_state, | ||
49 | }, | ||
50 | }, | ||
51 | }; | ||
52 | |||
53 | if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) { | ||
54 | WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!" | ||
55 | "Email xen-devel@lists.xensource.com Thank you.\n", \ | ||
56 | pm1a_cnt, pm1b_cnt); | ||
57 | return -1; | ||
58 | } | ||
59 | |||
60 | HYPERVISOR_dom0_op(&op); | ||
61 | return 1; | ||
62 | } | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 0a8a17cd80be..6908e4ce2a0d 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -611,7 +611,7 @@ static void disable_pirq(struct irq_data *data) | |||
611 | disable_dynirq(data); | 611 | disable_dynirq(data); |
612 | } | 612 | } |
613 | 613 | ||
614 | static int find_irq_by_gsi(unsigned gsi) | 614 | int xen_irq_from_gsi(unsigned gsi) |
615 | { | 615 | { |
616 | struct irq_info *info; | 616 | struct irq_info *info; |
617 | 617 | ||
@@ -625,6 +625,7 @@ static int find_irq_by_gsi(unsigned gsi) | |||
625 | 625 | ||
626 | return -1; | 626 | return -1; |
627 | } | 627 | } |
628 | EXPORT_SYMBOL_GPL(xen_irq_from_gsi); | ||
628 | 629 | ||
629 | /* | 630 | /* |
630 | * Do not make any assumptions regarding the relationship between the | 631 | * Do not make any assumptions regarding the relationship between the |
@@ -644,7 +645,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
644 | 645 | ||
645 | mutex_lock(&irq_mapping_update_lock); | 646 | mutex_lock(&irq_mapping_update_lock); |
646 | 647 | ||
647 | irq = find_irq_by_gsi(gsi); | 648 | irq = xen_irq_from_gsi(gsi); |
648 | if (irq != -1) { | 649 | if (irq != -1) { |
649 | printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", | 650 | printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", |
650 | irq, gsi); | 651 | irq, gsi); |
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index f100ce20b16b..0bfc1ef11259 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/vmalloc.h> | 38 | #include <linux/vmalloc.h> |
39 | #include <linux/uaccess.h> | 39 | #include <linux/uaccess.h> |
40 | #include <linux/io.h> | 40 | #include <linux/io.h> |
41 | #include <linux/hardirq.h> | ||
41 | 42 | ||
42 | #include <xen/xen.h> | 43 | #include <xen/xen.h> |
43 | #include <xen/interface/xen.h> | 44 | #include <xen/interface/xen.h> |
@@ -426,10 +427,8 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) | |||
426 | nflags = *pflags; | 427 | nflags = *pflags; |
427 | do { | 428 | do { |
428 | flags = nflags; | 429 | flags = nflags; |
429 | if (flags & (GTF_reading|GTF_writing)) { | 430 | if (flags & (GTF_reading|GTF_writing)) |
430 | printk(KERN_ALERT "WARNING: g.e. still in use!\n"); | ||
431 | return 0; | 431 | return 0; |
432 | } | ||
433 | } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags); | 432 | } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags); |
434 | 433 | ||
435 | return 1; | 434 | return 1; |
@@ -458,12 +457,103 @@ static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly) | |||
458 | return 1; | 457 | return 1; |
459 | } | 458 | } |
460 | 459 | ||
461 | int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) | 460 | static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) |
462 | { | 461 | { |
463 | return gnttab_interface->end_foreign_access_ref(ref, readonly); | 462 | return gnttab_interface->end_foreign_access_ref(ref, readonly); |
464 | } | 463 | } |
464 | |||
465 | int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) | ||
466 | { | ||
467 | if (_gnttab_end_foreign_access_ref(ref, readonly)) | ||
468 | return 1; | ||
469 | pr_warn("WARNING: g.e. %#x still in use!\n", ref); | ||
470 | return 0; | ||
471 | } | ||
465 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); | 472 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); |
466 | 473 | ||
474 | struct deferred_entry { | ||
475 | struct list_head list; | ||
476 | grant_ref_t ref; | ||
477 | bool ro; | ||
478 | uint16_t warn_delay; | ||
479 | struct page *page; | ||
480 | }; | ||
481 | static LIST_HEAD(deferred_list); | ||
482 | static void gnttab_handle_deferred(unsigned long); | ||
483 | static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0); | ||
484 | |||
485 | static void gnttab_handle_deferred(unsigned long unused) | ||
486 | { | ||
487 | unsigned int nr = 10; | ||
488 | struct deferred_entry *first = NULL; | ||
489 | unsigned long flags; | ||
490 | |||
491 | spin_lock_irqsave(&gnttab_list_lock, flags); | ||
492 | while (nr--) { | ||
493 | struct deferred_entry *entry | ||
494 | = list_first_entry(&deferred_list, | ||
495 | struct deferred_entry, list); | ||
496 | |||
497 | if (entry == first) | ||
498 | break; | ||
499 | list_del(&entry->list); | ||
500 | spin_unlock_irqrestore(&gnttab_list_lock, flags); | ||
501 | if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { | ||
502 | put_free_entry(entry->ref); | ||
503 | if (entry->page) { | ||
504 | pr_debug("freeing g.e. %#x (pfn %#lx)\n", | ||
505 | entry->ref, page_to_pfn(entry->page)); | ||
506 | __free_page(entry->page); | ||
507 | } else | ||
508 | pr_info("freeing g.e. %#x\n", entry->ref); | ||
509 | kfree(entry); | ||
510 | entry = NULL; | ||
511 | } else { | ||
512 | if (!--entry->warn_delay) | ||
513 | pr_info("g.e. %#x still pending\n", | ||
514 | entry->ref); | ||
515 | if (!first) | ||
516 | first = entry; | ||
517 | } | ||
518 | spin_lock_irqsave(&gnttab_list_lock, flags); | ||
519 | if (entry) | ||
520 | list_add_tail(&entry->list, &deferred_list); | ||
521 | else if (list_empty(&deferred_list)) | ||
522 | break; | ||
523 | } | ||
524 | if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) { | ||
525 | deferred_timer.expires = jiffies + HZ; | ||
526 | add_timer(&deferred_timer); | ||
527 | } | ||
528 | spin_unlock_irqrestore(&gnttab_list_lock, flags); | ||
529 | } | ||
530 | |||
531 | static void gnttab_add_deferred(grant_ref_t ref, bool readonly, | ||
532 | struct page *page) | ||
533 | { | ||
534 | struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); | ||
535 | const char *what = KERN_WARNING "leaking"; | ||
536 | |||
537 | if (entry) { | ||
538 | unsigned long flags; | ||
539 | |||
540 | entry->ref = ref; | ||
541 | entry->ro = readonly; | ||
542 | entry->page = page; | ||
543 | entry->warn_delay = 60; | ||
544 | spin_lock_irqsave(&gnttab_list_lock, flags); | ||
545 | list_add_tail(&entry->list, &deferred_list); | ||
546 | if (!timer_pending(&deferred_timer)) { | ||
547 | deferred_timer.expires = jiffies + HZ; | ||
548 | add_timer(&deferred_timer); | ||
549 | } | ||
550 | spin_unlock_irqrestore(&gnttab_list_lock, flags); | ||
551 | what = KERN_DEBUG "deferring"; | ||
552 | } | ||
553 | printk("%s g.e. %#x (pfn %#lx)\n", | ||
554 | what, ref, page ? page_to_pfn(page) : -1); | ||
555 | } | ||
556 | |||
467 | void gnttab_end_foreign_access(grant_ref_t ref, int readonly, | 557 | void gnttab_end_foreign_access(grant_ref_t ref, int readonly, |
468 | unsigned long page) | 558 | unsigned long page) |
469 | { | 559 | { |
@@ -471,12 +561,9 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly, | |||
471 | put_free_entry(ref); | 561 | put_free_entry(ref); |
472 | if (page != 0) | 562 | if (page != 0) |
473 | free_page(page); | 563 | free_page(page); |
474 | } else { | 564 | } else |
475 | /* XXX This needs to be fixed so that the ref and page are | 565 | gnttab_add_deferred(ref, readonly, |
476 | placed on a list to be freed up later. */ | 566 | page ? virt_to_page(page) : NULL); |
477 | printk(KERN_WARNING | ||
478 | "WARNING: leaking g.e. and page still in use!\n"); | ||
479 | } | ||
480 | } | 567 | } |
481 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); | 568 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); |
482 | 569 | ||
@@ -741,6 +828,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
741 | struct page **pages, unsigned int count) | 828 | struct page **pages, unsigned int count) |
742 | { | 829 | { |
743 | int i, ret; | 830 | int i, ret; |
831 | bool lazy = false; | ||
744 | pte_t *pte; | 832 | pte_t *pte; |
745 | unsigned long mfn; | 833 | unsigned long mfn; |
746 | 834 | ||
@@ -751,6 +839,11 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
751 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 839 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
752 | return ret; | 840 | return ret; |
753 | 841 | ||
842 | if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { | ||
843 | arch_enter_lazy_mmu_mode(); | ||
844 | lazy = true; | ||
845 | } | ||
846 | |||
754 | for (i = 0; i < count; i++) { | 847 | for (i = 0; i < count; i++) { |
755 | /* Do not add to override if the map failed. */ | 848 | /* Do not add to override if the map failed. */ |
756 | if (map_ops[i].status) | 849 | if (map_ops[i].status) |
@@ -769,6 +862,9 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
769 | return ret; | 862 | return ret; |
770 | } | 863 | } |
771 | 864 | ||
865 | if (lazy) | ||
866 | arch_leave_lazy_mmu_mode(); | ||
867 | |||
772 | return ret; | 868 | return ret; |
773 | } | 869 | } |
774 | EXPORT_SYMBOL_GPL(gnttab_map_refs); | 870 | EXPORT_SYMBOL_GPL(gnttab_map_refs); |
@@ -777,6 +873,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
777 | struct page **pages, unsigned int count, bool clear_pte) | 873 | struct page **pages, unsigned int count, bool clear_pte) |
778 | { | 874 | { |
779 | int i, ret; | 875 | int i, ret; |
876 | bool lazy = false; | ||
780 | 877 | ||
781 | ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); | 878 | ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); |
782 | if (ret) | 879 | if (ret) |
@@ -785,12 +882,20 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
785 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 882 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
786 | return ret; | 883 | return ret; |
787 | 884 | ||
885 | if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { | ||
886 | arch_enter_lazy_mmu_mode(); | ||
887 | lazy = true; | ||
888 | } | ||
889 | |||
788 | for (i = 0; i < count; i++) { | 890 | for (i = 0; i < count; i++) { |
789 | ret = m2p_remove_override(pages[i], clear_pte); | 891 | ret = m2p_remove_override(pages[i], clear_pte); |
790 | if (ret) | 892 | if (ret) |
791 | return ret; | 893 | return ret; |
792 | } | 894 | } |
793 | 895 | ||
896 | if (lazy) | ||
897 | arch_leave_lazy_mmu_mode(); | ||
898 | |||
794 | return ret; | 899 | return ret; |
795 | } | 900 | } |
796 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs); | 901 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs); |
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 146c94897016..7d041cb6da26 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c | |||
@@ -105,6 +105,12 @@ static unsigned int selfballoon_interval __read_mostly = 5; | |||
105 | */ | 105 | */ |
106 | static unsigned int selfballoon_min_usable_mb; | 106 | static unsigned int selfballoon_min_usable_mb; |
107 | 107 | ||
108 | /* | ||
109 | * Amount of RAM in MB to add to the target number of pages. | ||
110 | * Can be used to reserve some more room for caches and the like. | ||
111 | */ | ||
112 | static unsigned int selfballoon_reserved_mb; | ||
113 | |||
108 | static void selfballoon_process(struct work_struct *work); | 114 | static void selfballoon_process(struct work_struct *work); |
109 | static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process); | 115 | static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process); |
110 | 116 | ||
@@ -217,7 +223,8 @@ static void selfballoon_process(struct work_struct *work) | |||
217 | cur_pages = totalram_pages; | 223 | cur_pages = totalram_pages; |
218 | tgt_pages = cur_pages; /* default is no change */ | 224 | tgt_pages = cur_pages; /* default is no change */ |
219 | goal_pages = percpu_counter_read_positive(&vm_committed_as) + | 225 | goal_pages = percpu_counter_read_positive(&vm_committed_as) + |
220 | totalreserve_pages; | 226 | totalreserve_pages + |
227 | MB2PAGES(selfballoon_reserved_mb); | ||
221 | #ifdef CONFIG_FRONTSWAP | 228 | #ifdef CONFIG_FRONTSWAP |
222 | /* allow space for frontswap pages to be repatriated */ | 229 | /* allow space for frontswap pages to be repatriated */ |
223 | if (frontswap_selfshrinking && frontswap_enabled) | 230 | if (frontswap_selfshrinking && frontswap_enabled) |
@@ -397,6 +404,30 @@ static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR, | |||
397 | show_selfballoon_min_usable_mb, | 404 | show_selfballoon_min_usable_mb, |
398 | store_selfballoon_min_usable_mb); | 405 | store_selfballoon_min_usable_mb); |
399 | 406 | ||
407 | SELFBALLOON_SHOW(selfballoon_reserved_mb, "%d\n", | ||
408 | selfballoon_reserved_mb); | ||
409 | |||
410 | static ssize_t store_selfballoon_reserved_mb(struct device *dev, | ||
411 | struct device_attribute *attr, | ||
412 | const char *buf, | ||
413 | size_t count) | ||
414 | { | ||
415 | unsigned long val; | ||
416 | int err; | ||
417 | |||
418 | if (!capable(CAP_SYS_ADMIN)) | ||
419 | return -EPERM; | ||
420 | err = strict_strtoul(buf, 10, &val); | ||
421 | if (err || val == 0) | ||
422 | return -EINVAL; | ||
423 | selfballoon_reserved_mb = val; | ||
424 | return count; | ||
425 | } | ||
426 | |||
427 | static DEVICE_ATTR(selfballoon_reserved_mb, S_IRUGO | S_IWUSR, | ||
428 | show_selfballoon_reserved_mb, | ||
429 | store_selfballoon_reserved_mb); | ||
430 | |||
400 | 431 | ||
401 | #ifdef CONFIG_FRONTSWAP | 432 | #ifdef CONFIG_FRONTSWAP |
402 | SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking); | 433 | SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking); |
@@ -480,6 +511,7 @@ static struct attribute *selfballoon_attrs[] = { | |||
480 | &dev_attr_selfballoon_downhysteresis.attr, | 511 | &dev_attr_selfballoon_downhysteresis.attr, |
481 | &dev_attr_selfballoon_uphysteresis.attr, | 512 | &dev_attr_selfballoon_uphysteresis.attr, |
482 | &dev_attr_selfballoon_min_usable_mb.attr, | 513 | &dev_attr_selfballoon_min_usable_mb.attr, |
514 | &dev_attr_selfballoon_reserved_mb.attr, | ||
483 | #ifdef CONFIG_FRONTSWAP | 515 | #ifdef CONFIG_FRONTSWAP |
484 | &dev_attr_frontswap_selfshrinking.attr, | 516 | &dev_attr_frontswap_selfshrinking.attr, |
485 | &dev_attr_frontswap_hysteresis.attr, | 517 | &dev_attr_frontswap_hysteresis.attr, |
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index 2eff7a6aaa20..52fe7ad07666 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c | |||
@@ -234,3 +234,9 @@ int xb_init_comms(void) | |||
234 | 234 | ||
235 | return 0; | 235 | return 0; |
236 | } | 236 | } |
237 | |||
238 | void xb_deinit_comms(void) | ||
239 | { | ||
240 | unbind_from_irqhandler(xenbus_irq, &xb_waitq); | ||
241 | xenbus_irq = 0; | ||
242 | } | ||
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h index 6e42800fa499..c8abd3b8a6c4 100644 --- a/drivers/xen/xenbus/xenbus_comms.h +++ b/drivers/xen/xenbus/xenbus_comms.h | |||
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | int xs_init(void); | 36 | int xs_init(void); |
37 | int xb_init_comms(void); | 37 | int xb_init_comms(void); |
38 | void xb_deinit_comms(void); | ||
38 | 39 | ||
39 | /* Low level routines. */ | 40 | /* Low level routines. */ |
40 | int xb_write(const void *data, unsigned len); | 41 | int xb_write(const void *data, unsigned len); |
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c index 3d3be78c1093..be738c43104b 100644 --- a/drivers/xen/xenbus/xenbus_dev_backend.c +++ b/drivers/xen/xenbus/xenbus_dev_backend.c | |||
@@ -8,7 +8,11 @@ | |||
8 | 8 | ||
9 | #include <xen/xen.h> | 9 | #include <xen/xen.h> |
10 | #include <xen/page.h> | 10 | #include <xen/page.h> |
11 | #include <xen/xenbus.h> | ||
11 | #include <xen/xenbus_dev.h> | 12 | #include <xen/xenbus_dev.h> |
13 | #include <xen/grant_table.h> | ||
14 | #include <xen/events.h> | ||
15 | #include <asm/xen/hypervisor.h> | ||
12 | 16 | ||
13 | #include "xenbus_comms.h" | 17 | #include "xenbus_comms.h" |
14 | 18 | ||
@@ -22,6 +26,50 @@ static int xenbus_backend_open(struct inode *inode, struct file *filp) | |||
22 | return nonseekable_open(inode, filp); | 26 | return nonseekable_open(inode, filp); |
23 | } | 27 | } |
24 | 28 | ||
29 | static long xenbus_alloc(domid_t domid) | ||
30 | { | ||
31 | struct evtchn_alloc_unbound arg; | ||
32 | int err = -EEXIST; | ||
33 | |||
34 | xs_suspend(); | ||
35 | |||
36 | /* If xenstored_ready is nonzero, that means we have already talked to | ||
37 | * xenstore and set up watches. These watches will be restored by | ||
38 | * xs_resume, but that requires communication over the port established | ||
39 | * below that is not visible to anyone until the ioctl returns. | ||
40 | * | ||
41 | * This can be resolved by splitting the ioctl into two parts | ||
42 | * (postponing the resume until xenstored is active) but this is | ||
43 | * unnecessarily complex for the intended use where xenstored is only | ||
44 | * started once - so return -EEXIST if it's already running. | ||
45 | */ | ||
46 | if (xenstored_ready) | ||
47 | goto out_err; | ||
48 | |||
49 | gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid, | ||
50 | virt_to_mfn(xen_store_interface), 0 /* writable */); | ||
51 | |||
52 | arg.dom = DOMID_SELF; | ||
53 | arg.remote_dom = domid; | ||
54 | |||
55 | err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &arg); | ||
56 | if (err) | ||
57 | goto out_err; | ||
58 | |||
59 | if (xen_store_evtchn > 0) | ||
60 | xb_deinit_comms(); | ||
61 | |||
62 | xen_store_evtchn = arg.port; | ||
63 | |||
64 | xs_resume(); | ||
65 | |||
66 | return arg.port; | ||
67 | |||
68 | out_err: | ||
69 | xs_suspend_cancel(); | ||
70 | return err; | ||
71 | } | ||
72 | |||
25 | static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data) | 73 | static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data) |
26 | { | 74 | { |
27 | if (!capable(CAP_SYS_ADMIN)) | 75 | if (!capable(CAP_SYS_ADMIN)) |
@@ -33,6 +81,9 @@ static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned l | |||
33 | return xen_store_evtchn; | 81 | return xen_store_evtchn; |
34 | return -ENODEV; | 82 | return -ENODEV; |
35 | 83 | ||
84 | case IOCTL_XENBUS_BACKEND_SETUP: | ||
85 | return xenbus_alloc(data); | ||
86 | |||
36 | default: | 87 | default: |
37 | return -ENOTTY; | 88 | return -ENOTTY; |
38 | } | 89 | } |
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 5dfafdd1dbd3..2340f6978d6e 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/namei.h> | 20 | #include <linux/namei.h> |
21 | #include <linux/debugfs.h> | 21 | #include <linux/debugfs.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/slab.h> | ||
23 | 24 | ||
24 | static ssize_t default_read_file(struct file *file, char __user *buf, | 25 | static ssize_t default_read_file(struct file *file, char __user *buf, |
25 | size_t count, loff_t *ppos) | 26 | size_t count, loff_t *ppos) |
@@ -520,6 +521,133 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode, | |||
520 | } | 521 | } |
521 | EXPORT_SYMBOL_GPL(debugfs_create_blob); | 522 | EXPORT_SYMBOL_GPL(debugfs_create_blob); |
522 | 523 | ||
524 | struct array_data { | ||
525 | void *array; | ||
526 | u32 elements; | ||
527 | }; | ||
528 | |||
529 | static int u32_array_open(struct inode *inode, struct file *file) | ||
530 | { | ||
531 | file->private_data = NULL; | ||
532 | return nonseekable_open(inode, file); | ||
533 | } | ||
534 | |||
535 | static size_t format_array(char *buf, size_t bufsize, const char *fmt, | ||
536 | u32 *array, u32 array_size) | ||
537 | { | ||
538 | size_t ret = 0; | ||
539 | u32 i; | ||
540 | |||
541 | for (i = 0; i < array_size; i++) { | ||
542 | size_t len; | ||
543 | |||
544 | len = snprintf(buf, bufsize, fmt, array[i]); | ||
545 | len++; /* ' ' or '\n' */ | ||
546 | ret += len; | ||
547 | |||
548 | if (buf) { | ||
549 | buf += len; | ||
550 | bufsize -= len; | ||
551 | buf[-1] = (i == array_size-1) ? '\n' : ' '; | ||
552 | } | ||
553 | } | ||
554 | |||
555 | ret++; /* \0 */ | ||
556 | if (buf) | ||
557 | *buf = '\0'; | ||
558 | |||
559 | return ret; | ||
560 | } | ||
561 | |||
562 | static char *format_array_alloc(const char *fmt, u32 *array, | ||
563 | u32 array_size) | ||
564 | { | ||
565 | size_t len = format_array(NULL, 0, fmt, array, array_size); | ||
566 | char *ret; | ||
567 | |||
568 | ret = kmalloc(len, GFP_KERNEL); | ||
569 | if (ret == NULL) | ||
570 | return NULL; | ||
571 | |||
572 | format_array(ret, len, fmt, array, array_size); | ||
573 | return ret; | ||
574 | } | ||
575 | |||
576 | static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len, | ||
577 | loff_t *ppos) | ||
578 | { | ||
579 | struct inode *inode = file->f_path.dentry->d_inode; | ||
580 | struct array_data *data = inode->i_private; | ||
581 | size_t size; | ||
582 | |||
583 | if (*ppos == 0) { | ||
584 | if (file->private_data) { | ||
585 | kfree(file->private_data); | ||
586 | file->private_data = NULL; | ||
587 | } | ||
588 | |||
589 | file->private_data = format_array_alloc("%u", data->array, | ||
590 | data->elements); | ||
591 | } | ||
592 | |||
593 | size = 0; | ||
594 | if (file->private_data) | ||
595 | size = strlen(file->private_data); | ||
596 | |||
597 | return simple_read_from_buffer(buf, len, ppos, | ||
598 | file->private_data, size); | ||
599 | } | ||
600 | |||
601 | static int u32_array_release(struct inode *inode, struct file *file) | ||
602 | { | ||
603 | kfree(file->private_data); | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | static const struct file_operations u32_array_fops = { | ||
609 | .owner = THIS_MODULE, | ||
610 | .open = u32_array_open, | ||
611 | .release = u32_array_release, | ||
612 | .read = u32_array_read, | ||
613 | .llseek = no_llseek, | ||
614 | }; | ||
615 | |||
616 | /** | ||
617 | * debugfs_create_u32_array - create a debugfs file that is used to read u32 | ||
618 | * array. | ||
619 | * @name: a pointer to a string containing the name of the file to create. | ||
620 | * @mode: the permission that the file should have. | ||
621 | * @parent: a pointer to the parent dentry for this file. This should be a | ||
622 | * directory dentry if set. If this parameter is %NULL, then the | ||
623 | * file will be created in the root of the debugfs filesystem. | ||
624 | * @array: u32 array that provides data. | ||
625 | * @elements: total number of elements in the array. | ||
626 | * | ||
627 | * This function creates a file in debugfs with the given name that exports | ||
628 | * @array as data. If the @mode variable is so set it can be read from. | ||
629 | * Writing is not supported. Seek within the file is also not supported. | ||
630 | * Once array is created its size can not be changed. | ||
631 | * | ||
632 | * The function returns a pointer to dentry on success. If debugfs is not | ||
633 | * enabled in the kernel, the value -%ENODEV will be returned. | ||
634 | */ | ||
635 | struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, | ||
636 | struct dentry *parent, | ||
637 | u32 *array, u32 elements) | ||
638 | { | ||
639 | struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL); | ||
640 | |||
641 | if (data == NULL) | ||
642 | return NULL; | ||
643 | |||
644 | data->array = array; | ||
645 | data->elements = elements; | ||
646 | |||
647 | return debugfs_create_file(name, mode, parent, data, &u32_array_fops); | ||
648 | } | ||
649 | EXPORT_SYMBOL_GPL(debugfs_create_u32_array); | ||
650 | |||
523 | #ifdef CONFIG_HAS_IOMEM | 651 | #ifdef CONFIG_HAS_IOMEM |
524 | 652 | ||
525 | /* | 653 | /* |
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index ae36b72c22f3..66c434f5dd1e 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
@@ -93,6 +93,10 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode, | |||
93 | int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, | 93 | int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, |
94 | int nregs, void __iomem *base, char *prefix); | 94 | int nregs, void __iomem *base, char *prefix); |
95 | 95 | ||
96 | struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, | ||
97 | struct dentry *parent, | ||
98 | u32 *array, u32 elements); | ||
99 | |||
96 | bool debugfs_initialized(void); | 100 | bool debugfs_initialized(void); |
97 | 101 | ||
98 | #else | 102 | #else |
@@ -219,6 +223,13 @@ static inline bool debugfs_initialized(void) | |||
219 | return false; | 223 | return false; |
220 | } | 224 | } |
221 | 225 | ||
226 | static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, | ||
227 | struct dentry *parent, | ||
228 | u32 *array, u32 elements) | ||
229 | { | ||
230 | return ERR_PTR(-ENODEV); | ||
231 | } | ||
232 | |||
222 | #endif | 233 | #endif |
223 | 234 | ||
224 | #endif | 235 | #endif |
diff --git a/include/xen/acpi.h b/include/xen/acpi.h new file mode 100644 index 000000000000..48a9c0171b65 --- /dev/null +++ b/include/xen/acpi.h | |||
@@ -0,0 +1,58 @@ | |||
1 | /****************************************************************************** | ||
2 | * acpi.h | ||
3 | * acpi file for domain 0 kernel | ||
4 | * | ||
5 | * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | ||
6 | * Copyright (c) 2011 Yu Ke <ke.yu@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef _XEN_ACPI_H | ||
34 | #define _XEN_ACPI_H | ||
35 | |||
36 | #include <linux/types.h> | ||
37 | |||
38 | #ifdef CONFIG_XEN_DOM0 | ||
39 | #include <asm/xen/hypervisor.h> | ||
40 | #include <xen/xen.h> | ||
41 | #include <linux/acpi.h> | ||
42 | |||
43 | int xen_acpi_notify_hypervisor_state(u8 sleep_state, | ||
44 | u32 pm1a_cnt, u32 pm1b_cnd); | ||
45 | |||
46 | static inline void xen_acpi_sleep_register(void) | ||
47 | { | ||
48 | if (xen_initial_domain()) | ||
49 | acpi_os_set_prepare_sleep( | ||
50 | &xen_acpi_notify_hypervisor_state); | ||
51 | } | ||
52 | #else | ||
53 | static inline void xen_acpi_sleep_register(void) | ||
54 | { | ||
55 | } | ||
56 | #endif | ||
57 | |||
58 | #endif /* _XEN_ACPI_H */ | ||
diff --git a/include/xen/events.h b/include/xen/events.h index 0f773708e02c..04399b28e821 100644 --- a/include/xen/events.h +++ b/include/xen/events.h | |||
@@ -103,6 +103,9 @@ int xen_irq_from_pirq(unsigned pirq); | |||
103 | /* Return the pirq allocated to the irq. */ | 103 | /* Return the pirq allocated to the irq. */ |
104 | int xen_pirq_from_irq(unsigned irq); | 104 | int xen_pirq_from_irq(unsigned irq); |
105 | 105 | ||
106 | /* Return the irq allocated to the gsi */ | ||
107 | int xen_irq_from_gsi(unsigned gsi); | ||
108 | |||
106 | /* Determine whether to ignore this IRQ if it is passed to a guest. */ | 109 | /* Determine whether to ignore this IRQ if it is passed to a guest. */ |
107 | int xen_test_irq_shared(int irq); | 110 | int xen_test_irq_shared(int irq); |
108 | 111 | ||
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 15f8a00ff003..11e27c3af3cb 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h | |||
@@ -46,6 +46,8 @@ | |||
46 | 46 | ||
47 | #include <xen/features.h> | 47 | #include <xen/features.h> |
48 | 48 | ||
49 | #define GNTTAB_RESERVED_XENSTORE 1 | ||
50 | |||
49 | /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ | 51 | /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ |
50 | #define NR_GRANT_FRAMES 4 | 52 | #define NR_GRANT_FRAMES 4 |
51 | 53 | ||
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h index ac5f0fe47ed9..bbee8c6a349d 100644 --- a/include/xen/xenbus_dev.h +++ b/include/xen/xenbus_dev.h | |||
@@ -38,4 +38,7 @@ | |||
38 | #define IOCTL_XENBUS_BACKEND_EVTCHN \ | 38 | #define IOCTL_XENBUS_BACKEND_EVTCHN \ |
39 | _IOC(_IOC_NONE, 'B', 0, 0) | 39 | _IOC(_IOC_NONE, 'B', 0, 0) |
40 | 40 | ||
41 | #define IOCTL_XENBUS_BACKEND_SETUP \ | ||
42 | _IOC(_IOC_NONE, 'B', 1, 0) | ||
43 | |||
41 | #endif /* __LINUX_XEN_XENBUS_DEV_H__ */ | 44 | #endif /* __LINUX_XEN_XENBUS_DEV_H__ */ |