aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 19:02:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 19:02:08 -0400
commitb5f4035adfffbcc6b478de5b8c44b618b3124aff (patch)
treee7a5f011d8aaf5c95edf933f98f25dfc8fa46837 /arch/x86/xen
parentce004178be1bbaa292e9e6497939e2970300095a (diff)
parent68c2c39a76b094e9b2773e5846424ea674bf2c46 (diff)
Merge tag 'stable/for-linus-3.5-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull Xen updates from Konrad Rzeszutek Wilk: "Features: * Extend the APIC ops implementation and add IRQ_WORKER vector support so that 'perf' can work properly. * Fix self-ballooning code, and balloon logic when booting as initial domain. * Move array printing code to generic debugfs * Support XenBus domains. * Lazily free grants when a domain is dead/non-existent. * In M2P code use batching calls Bug-fixes: * Fix NULL dereference in allocation failure path (hvc_xen) * Fix unbinding of IRQ_WORKER vector during vCPU hot-unplug * Fix HVM guest resume - we would leak an PIRQ value instead of reusing the existing one." Fix up add-add onflicts in arch/x86/xen/enlighten.c due to addition of apic ipi interface next to the new apic_id functions. * tag 'stable/for-linus-3.5-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: xen: do not map the same GSI twice in PVHVM guests. hvc_xen: NULL dereference on allocation failure xen: Add selfballoning memory reservation tunable. xenbus: Add support for xenbus backend in stub domain xen/smp: unbind irqworkX when unplugging vCPUs. xen: enter/exit lazy_mmu_mode around m2p_override calls xen/acpi/sleep: Enable ACPI sleep via the __acpi_os_prepare_sleep xen: implement IRQ_WORK_VECTOR handler xen: implement apic ipi interface xen/setup: update VA mapping when releasing memory during setup xen/setup: Combine the two hypercall functions - since they are quite similar. xen/setup: Populate freed MFNs from non-RAM E820 entries and gaps to E820 RAM xen/setup: Only print "Freeing XXX-YYY pfn range: Z pages freed" if Z > 0 xen/gnttab: add deferred freeing logic debugfs: Add support to print u32 array in debugfs xen/p2m: An early bootup variant of set_phys_to_machine xen/p2m: Collapse early_alloc_p2m_middle redundant checks. xen/p2m: Allow alloc_p2m_middle to call reserve_brk depending on argument xen/p2m: Move code around to allow for better re-usage.
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/debugfs.c104
-rw-r--r--arch/x86/xen/debugfs.h4
-rw-r--r--arch/x86/xen/enlighten.c13
-rw-r--r--arch/x86/xen/mmu.c23
-rw-r--r--arch/x86/xen/p2m.c104
-rw-r--r--arch/x86/xen/setup.c171
-rw-r--r--arch/x86/xen/smp.c112
-rw-r--r--arch/x86/xen/smp.h12
-rw-r--r--arch/x86/xen/spinlock.c12
-rw-r--r--arch/x86/xen/xen-ops.h1
10 files changed, 344 insertions, 212 deletions
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index ef1db1900d86..c8377fb26cdf 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -19,107 +19,3 @@ struct dentry * __init xen_init_debugfs(void)
19 return d_xen_debug; 19 return d_xen_debug;
20} 20}
21 21
22struct array_data
23{
24 void *array;
25 unsigned elements;
26};
27
28static int u32_array_open(struct inode *inode, struct file *file)
29{
30 file->private_data = NULL;
31 return nonseekable_open(inode, file);
32}
33
34static size_t format_array(char *buf, size_t bufsize, const char *fmt,
35 u32 *array, unsigned array_size)
36{
37 size_t ret = 0;
38 unsigned i;
39
40 for(i = 0; i < array_size; i++) {
41 size_t len;
42
43 len = snprintf(buf, bufsize, fmt, array[i]);
44 len++; /* ' ' or '\n' */
45 ret += len;
46
47 if (buf) {
48 buf += len;
49 bufsize -= len;
50 buf[-1] = (i == array_size-1) ? '\n' : ' ';
51 }
52 }
53
54 ret++; /* \0 */
55 if (buf)
56 *buf = '\0';
57
58 return ret;
59}
60
61static char *format_array_alloc(const char *fmt, u32 *array, unsigned array_size)
62{
63 size_t len = format_array(NULL, 0, fmt, array, array_size);
64 char *ret;
65
66 ret = kmalloc(len, GFP_KERNEL);
67 if (ret == NULL)
68 return NULL;
69
70 format_array(ret, len, fmt, array, array_size);
71 return ret;
72}
73
74static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
75 loff_t *ppos)
76{
77 struct inode *inode = file->f_path.dentry->d_inode;
78 struct array_data *data = inode->i_private;
79 size_t size;
80
81 if (*ppos == 0) {
82 if (file->private_data) {
83 kfree(file->private_data);
84 file->private_data = NULL;
85 }
86
87 file->private_data = format_array_alloc("%u", data->array, data->elements);
88 }
89
90 size = 0;
91 if (file->private_data)
92 size = strlen(file->private_data);
93
94 return simple_read_from_buffer(buf, len, ppos, file->private_data, size);
95}
96
97static int xen_array_release(struct inode *inode, struct file *file)
98{
99 kfree(file->private_data);
100
101 return 0;
102}
103
104static const struct file_operations u32_array_fops = {
105 .owner = THIS_MODULE,
106 .open = u32_array_open,
107 .release= xen_array_release,
108 .read = u32_array_read,
109 .llseek = no_llseek,
110};
111
112struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
113 struct dentry *parent,
114 u32 *array, unsigned elements)
115{
116 struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
117
118 if (data == NULL)
119 return NULL;
120
121 data->array = array;
122 data->elements = elements;
123
124 return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
125}
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h
index 78d25499be5b..12ebf3325c7b 100644
--- a/arch/x86/xen/debugfs.h
+++ b/arch/x86/xen/debugfs.h
@@ -3,8 +3,4 @@
3 3
4struct dentry * __init xen_init_debugfs(void); 4struct dentry * __init xen_init_debugfs(void);
5 5
6struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
7 struct dentry *parent,
8 u32 *array, unsigned elements);
9
10#endif /* _XEN_DEBUGFS_H */ 6#endif /* _XEN_DEBUGFS_H */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c0f5facdb10c..75f33b2a5933 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -42,6 +42,7 @@
42#include <xen/page.h> 42#include <xen/page.h>
43#include <xen/hvm.h> 43#include <xen/hvm.h>
44#include <xen/hvc-console.h> 44#include <xen/hvc-console.h>
45#include <xen/acpi.h>
45 46
46#include <asm/paravirt.h> 47#include <asm/paravirt.h>
47#include <asm/apic.h> 48#include <asm/apic.h>
@@ -75,6 +76,7 @@
75 76
76#include "xen-ops.h" 77#include "xen-ops.h"
77#include "mmu.h" 78#include "mmu.h"
79#include "smp.h"
78#include "multicalls.h" 80#include "multicalls.h"
79 81
80EXPORT_SYMBOL_GPL(hypercall_page); 82EXPORT_SYMBOL_GPL(hypercall_page);
@@ -883,6 +885,14 @@ static void set_xen_basic_apic_ops(void)
883 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; 885 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
884 apic->set_apic_id = xen_set_apic_id; 886 apic->set_apic_id = xen_set_apic_id;
885 apic->get_apic_id = xen_get_apic_id; 887 apic->get_apic_id = xen_get_apic_id;
888
889#ifdef CONFIG_SMP
890 apic->send_IPI_allbutself = xen_send_IPI_allbutself;
891 apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
892 apic->send_IPI_mask = xen_send_IPI_mask;
893 apic->send_IPI_all = xen_send_IPI_all;
894 apic->send_IPI_self = xen_send_IPI_self;
895#endif
886} 896}
887 897
888#endif 898#endif
@@ -1340,7 +1350,6 @@ asmlinkage void __init xen_start_kernel(void)
1340 1350
1341 xen_raw_console_write("mapping kernel into physical memory\n"); 1351 xen_raw_console_write("mapping kernel into physical memory\n");
1342 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 1352 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
1343 xen_ident_map_ISA();
1344 1353
1345 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1354 /* Allocate and initialize top and mid mfn levels for p2m structure */
1346 xen_build_mfn_list_list(); 1355 xen_build_mfn_list_list();
@@ -1400,6 +1409,8 @@ asmlinkage void __init xen_start_kernel(void)
1400 1409
1401 /* Make sure ACS will be enabled */ 1410 /* Make sure ACS will be enabled */
1402 pci_request_acs(); 1411 pci_request_acs();
1412
1413 xen_acpi_sleep_register();
1403 } 1414 }
1404#ifdef CONFIG_PCI 1415#ifdef CONFIG_PCI
1405 /* PCI BIOS service won't work from a PV guest. */ 1416 /* PCI BIOS service won't work from a PV guest. */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3506cd4f9a43..3a73785631ce 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1933,29 +1933,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1933#endif 1933#endif
1934} 1934}
1935 1935
1936void __init xen_ident_map_ISA(void)
1937{
1938 unsigned long pa;
1939
1940 /*
1941 * If we're dom0, then linear map the ISA machine addresses into
1942 * the kernel's address space.
1943 */
1944 if (!xen_initial_domain())
1945 return;
1946
1947 xen_raw_printk("Xen: setup ISA identity maps\n");
1948
1949 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
1950 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
1951
1952 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
1953 BUG();
1954 }
1955
1956 xen_flush_tlb();
1957}
1958
1959static void __init xen_post_allocator_init(void) 1936static void __init xen_post_allocator_init(void)
1960{ 1937{
1961 pv_mmu_ops.set_pte = xen_set_pte; 1938 pv_mmu_ops.set_pte = xen_set_pte;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 1b267e75158d..ffd08c414e91 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -499,16 +499,18 @@ static bool alloc_p2m(unsigned long pfn)
499 return true; 499 return true;
500} 500}
501 501
502static bool __init __early_alloc_p2m(unsigned long pfn) 502static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
503{ 503{
504 unsigned topidx, mididx, idx; 504 unsigned topidx, mididx, idx;
505 unsigned long *p2m;
506 unsigned long *mid_mfn_p;
505 507
506 topidx = p2m_top_index(pfn); 508 topidx = p2m_top_index(pfn);
507 mididx = p2m_mid_index(pfn); 509 mididx = p2m_mid_index(pfn);
508 idx = p2m_index(pfn); 510 idx = p2m_index(pfn);
509 511
510 /* Pfff.. No boundary cross-over, lets get out. */ 512 /* Pfff.. No boundary cross-over, lets get out. */
511 if (!idx) 513 if (!idx && check_boundary)
512 return false; 514 return false;
513 515
514 WARN(p2m_top[topidx][mididx] == p2m_identity, 516 WARN(p2m_top[topidx][mididx] == p2m_identity,
@@ -522,24 +524,66 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
522 return false; 524 return false;
523 525
524 /* Boundary cross-over for the edges: */ 526 /* Boundary cross-over for the edges: */
525 if (idx) { 527 p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
526 unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
527 unsigned long *mid_mfn_p;
528 528
529 p2m_init(p2m); 529 p2m_init(p2m);
530 530
531 p2m_top[topidx][mididx] = p2m; 531 p2m_top[topidx][mididx] = p2m;
532 532
533 /* For save/restore we need to MFN of the P2M saved */ 533 /* For save/restore we need to MFN of the P2M saved */
534 534
535 mid_mfn_p = p2m_top_mfn_p[topidx]; 535 mid_mfn_p = p2m_top_mfn_p[topidx];
536 WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), 536 WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
537 "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", 537 "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
538 topidx, mididx); 538 topidx, mididx);
539 mid_mfn_p[mididx] = virt_to_mfn(p2m); 539 mid_mfn_p[mididx] = virt_to_mfn(p2m);
540
541 return true;
542}
543
544static bool __init early_alloc_p2m(unsigned long pfn)
545{
546 unsigned topidx = p2m_top_index(pfn);
547 unsigned long *mid_mfn_p;
548 unsigned long **mid;
549
550 mid = p2m_top[topidx];
551 mid_mfn_p = p2m_top_mfn_p[topidx];
552 if (mid == p2m_mid_missing) {
553 mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
554
555 p2m_mid_init(mid);
556
557 p2m_top[topidx] = mid;
540 558
559 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
541 } 560 }
542 return idx != 0; 561 /* And the save/restore P2M tables.. */
562 if (mid_mfn_p == p2m_mid_missing_mfn) {
563 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
564 p2m_mid_mfn_init(mid_mfn_p);
565
566 p2m_top_mfn_p[topidx] = mid_mfn_p;
567 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
568 /* Note: we don't set mid_mfn_p[midix] here,
569 * look in early_alloc_p2m_middle */
570 }
571 return true;
572}
573bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
574{
575 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
576 if (!early_alloc_p2m(pfn))
577 return false;
578
579 if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
580 return false;
581
582 if (!__set_phys_to_machine(pfn, mfn))
583 return false;
584 }
585
586 return true;
543} 587}
544unsigned long __init set_phys_range_identity(unsigned long pfn_s, 588unsigned long __init set_phys_range_identity(unsigned long pfn_s,
545 unsigned long pfn_e) 589 unsigned long pfn_e)
@@ -559,35 +603,11 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
559 pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); 603 pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
560 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) 604 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
561 { 605 {
562 unsigned topidx = p2m_top_index(pfn); 606 WARN_ON(!early_alloc_p2m(pfn));
563 unsigned long *mid_mfn_p;
564 unsigned long **mid;
565
566 mid = p2m_top[topidx];
567 mid_mfn_p = p2m_top_mfn_p[topidx];
568 if (mid == p2m_mid_missing) {
569 mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
570
571 p2m_mid_init(mid);
572
573 p2m_top[topidx] = mid;
574
575 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
576 }
577 /* And the save/restore P2M tables.. */
578 if (mid_mfn_p == p2m_mid_missing_mfn) {
579 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
580 p2m_mid_mfn_init(mid_mfn_p);
581
582 p2m_top_mfn_p[topidx] = mid_mfn_p;
583 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
584 /* Note: we don't set mid_mfn_p[midix] here,
585 * look in __early_alloc_p2m */
586 }
587 } 607 }
588 608
589 __early_alloc_p2m(pfn_s); 609 early_alloc_p2m_middle(pfn_s, true);
590 __early_alloc_p2m(pfn_e); 610 early_alloc_p2m_middle(pfn_e, true);
591 611
592 for (pfn = pfn_s; pfn < pfn_e; pfn++) 612 for (pfn = pfn_s; pfn < pfn_e; pfn++)
593 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) 613 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1ba8dff26753..3ebba0753d38 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -26,7 +26,6 @@
26#include <xen/interface/memory.h> 26#include <xen/interface/memory.h>
27#include <xen/interface/physdev.h> 27#include <xen/interface/physdev.h>
28#include <xen/features.h> 28#include <xen/features.h>
29
30#include "xen-ops.h" 29#include "xen-ops.h"
31#include "vdso.h" 30#include "vdso.h"
32 31
@@ -84,8 +83,8 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
84 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 83 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
85} 84}
86 85
87static unsigned long __init xen_release_chunk(unsigned long start, 86static unsigned long __init xen_do_chunk(unsigned long start,
88 unsigned long end) 87 unsigned long end, bool release)
89{ 88{
90 struct xen_memory_reservation reservation = { 89 struct xen_memory_reservation reservation = {
91 .address_bits = 0, 90 .address_bits = 0,
@@ -96,30 +95,138 @@ static unsigned long __init xen_release_chunk(unsigned long start,
96 unsigned long pfn; 95 unsigned long pfn;
97 int ret; 96 int ret;
98 97
99 for(pfn = start; pfn < end; pfn++) { 98 for (pfn = start; pfn < end; pfn++) {
99 unsigned long frame;
100 unsigned long mfn = pfn_to_mfn(pfn); 100 unsigned long mfn = pfn_to_mfn(pfn);
101 101
102 /* Make sure pfn exists to start with */ 102 if (release) {
103 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) 103 /* Make sure pfn exists to start with */
104 continue; 104 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
105 105 continue;
106 set_xen_guest_handle(reservation.extent_start, &mfn); 106 frame = mfn;
107 } else {
108 if (mfn != INVALID_P2M_ENTRY)
109 continue;
110 frame = pfn;
111 }
112 set_xen_guest_handle(reservation.extent_start, &frame);
107 reservation.nr_extents = 1; 113 reservation.nr_extents = 1;
108 114
109 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, 115 ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
110 &reservation); 116 &reservation);
111 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); 117 WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
118 release ? "release" : "populate", pfn, ret);
119
112 if (ret == 1) { 120 if (ret == 1) {
113 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 121 if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
122 if (release)
123 break;
124 set_xen_guest_handle(reservation.extent_start, &frame);
125 reservation.nr_extents = 1;
126 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
127 &reservation);
128 break;
129 }
114 len++; 130 len++;
115 } 131 } else
132 break;
116 } 133 }
117 printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n", 134 if (len)
118 start, end, len); 135 printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
136 release ? "Freeing" : "Populating",
137 start, end, len,
138 release ? "freed" : "added");
119 139
120 return len; 140 return len;
121} 141}
122 142
143static unsigned long __init xen_release_chunk(unsigned long start,
144 unsigned long end)
145{
146 return xen_do_chunk(start, end, true);
147}
148
149static unsigned long __init xen_populate_chunk(
150 const struct e820entry *list, size_t map_size,
151 unsigned long max_pfn, unsigned long *last_pfn,
152 unsigned long credits_left)
153{
154 const struct e820entry *entry;
155 unsigned int i;
156 unsigned long done = 0;
157 unsigned long dest_pfn;
158
159 for (i = 0, entry = list; i < map_size; i++, entry++) {
160 unsigned long credits = credits_left;
161 unsigned long s_pfn;
162 unsigned long e_pfn;
163 unsigned long pfns;
164 long capacity;
165
166 if (credits <= 0)
167 break;
168
169 if (entry->type != E820_RAM)
170 continue;
171
172 e_pfn = PFN_UP(entry->addr + entry->size);
173
174 /* We only care about E820 after the xen_start_info->nr_pages */
175 if (e_pfn <= max_pfn)
176 continue;
177
178 s_pfn = PFN_DOWN(entry->addr);
179 /* If the E820 falls within the nr_pages, we want to start
180 * at the nr_pages PFN.
181 * If that would mean going past the E820 entry, skip it
182 */
183 if (s_pfn <= max_pfn) {
184 capacity = e_pfn - max_pfn;
185 dest_pfn = max_pfn;
186 } else {
187 /* last_pfn MUST be within E820_RAM regions */
188 if (*last_pfn && e_pfn >= *last_pfn)
189 s_pfn = *last_pfn;
190 capacity = e_pfn - s_pfn;
191 dest_pfn = s_pfn;
192 }
193 /* If we had filled this E820_RAM entry, go to the next one. */
194 if (capacity <= 0)
195 continue;
196
197 if (credits > capacity)
198 credits = capacity;
199
200 pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
201 done += pfns;
202 credits_left -= pfns;
203 *last_pfn = (dest_pfn + pfns);
204 }
205 return done;
206}
207
208static void __init xen_set_identity_and_release_chunk(
209 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
210 unsigned long *released, unsigned long *identity)
211{
212 unsigned long pfn;
213
214 /*
215 * If the PFNs are currently mapped, the VA mapping also needs
216 * to be updated to be 1:1.
217 */
218 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
219 (void)HYPERVISOR_update_va_mapping(
220 (unsigned long)__va(pfn << PAGE_SHIFT),
221 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
222
223 if (start_pfn < nr_pages)
224 *released += xen_release_chunk(
225 start_pfn, min(end_pfn, nr_pages));
226
227 *identity += set_phys_range_identity(start_pfn, end_pfn);
228}
229
123static unsigned long __init xen_set_identity_and_release( 230static unsigned long __init xen_set_identity_and_release(
124 const struct e820entry *list, size_t map_size, unsigned long nr_pages) 231 const struct e820entry *list, size_t map_size, unsigned long nr_pages)
125{ 232{
@@ -142,7 +249,6 @@ static unsigned long __init xen_set_identity_and_release(
142 */ 249 */
143 for (i = 0, entry = list; i < map_size; i++, entry++) { 250 for (i = 0, entry = list; i < map_size; i++, entry++) {
144 phys_addr_t end = entry->addr + entry->size; 251 phys_addr_t end = entry->addr + entry->size;
145
146 if (entry->type == E820_RAM || i == map_size - 1) { 252 if (entry->type == E820_RAM || i == map_size - 1) {
147 unsigned long start_pfn = PFN_DOWN(start); 253 unsigned long start_pfn = PFN_DOWN(start);
148 unsigned long end_pfn = PFN_UP(end); 254 unsigned long end_pfn = PFN_UP(end);
@@ -150,20 +256,19 @@ static unsigned long __init xen_set_identity_and_release(
150 if (entry->type == E820_RAM) 256 if (entry->type == E820_RAM)
151 end_pfn = PFN_UP(entry->addr); 257 end_pfn = PFN_UP(entry->addr);
152 258
153 if (start_pfn < end_pfn) { 259 if (start_pfn < end_pfn)
154 if (start_pfn < nr_pages) 260 xen_set_identity_and_release_chunk(
155 released += xen_release_chunk( 261 start_pfn, end_pfn, nr_pages,
156 start_pfn, min(end_pfn, nr_pages)); 262 &released, &identity);
157 263
158 identity += set_phys_range_identity(
159 start_pfn, end_pfn);
160 }
161 start = end; 264 start = end;
162 } 265 }
163 } 266 }
164 267
165 printk(KERN_INFO "Released %lu pages of unused memory\n", released); 268 if (released)
166 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); 269 printk(KERN_INFO "Released %lu pages of unused memory\n", released);
270 if (identity)
271 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
167 272
168 return released; 273 return released;
169} 274}
@@ -217,7 +322,9 @@ char * __init xen_memory_setup(void)
217 int rc; 322 int rc;
218 struct xen_memory_map memmap; 323 struct xen_memory_map memmap;
219 unsigned long max_pages; 324 unsigned long max_pages;
325 unsigned long last_pfn = 0;
220 unsigned long extra_pages = 0; 326 unsigned long extra_pages = 0;
327 unsigned long populated;
221 int i; 328 int i;
222 int op; 329 int op;
223 330
@@ -257,9 +364,20 @@ char * __init xen_memory_setup(void)
257 */ 364 */
258 xen_released_pages = xen_set_identity_and_release( 365 xen_released_pages = xen_set_identity_and_release(
259 map, memmap.nr_entries, max_pfn); 366 map, memmap.nr_entries, max_pfn);
260 extra_pages += xen_released_pages;
261 367
262 /* 368 /*
369 * Populate back the non-RAM pages and E820 gaps that had been
370 * released. */
371 populated = xen_populate_chunk(map, memmap.nr_entries,
372 max_pfn, &last_pfn, xen_released_pages);
373
374 extra_pages += (xen_released_pages - populated);
375
376 if (last_pfn > max_pfn) {
377 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
378 mem_end = PFN_PHYS(max_pfn);
379 }
380 /*
263 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO 381 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
264 * factor the base size. On non-highmem systems, the base 382 * factor the base size. On non-highmem systems, the base
265 * size is the full initial memory allocation; on highmem it 383 * size is the full initial memory allocation; on highmem it
@@ -272,7 +390,6 @@ char * __init xen_memory_setup(void)
272 */ 390 */
273 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), 391 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
274 extra_pages); 392 extra_pages);
275
276 i = 0; 393 i = 0;
277 while (i < memmap.nr_entries) { 394 while (i < memmap.nr_entries) {
278 u64 addr = map[i].addr; 395 u64 addr = map[i].addr;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3700945ed0d5..afb250d22a6b 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/irq_work.h>
19 20
20#include <asm/paravirt.h> 21#include <asm/paravirt.h>
21#include <asm/desc.h> 22#include <asm/desc.h>
@@ -41,10 +42,12 @@ cpumask_var_t xen_cpu_initialized_map;
41static DEFINE_PER_CPU(int, xen_resched_irq); 42static DEFINE_PER_CPU(int, xen_resched_irq);
42static DEFINE_PER_CPU(int, xen_callfunc_irq); 43static DEFINE_PER_CPU(int, xen_callfunc_irq);
43static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); 44static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
45static DEFINE_PER_CPU(int, xen_irq_work);
44static DEFINE_PER_CPU(int, xen_debug_irq) = -1; 46static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
45 47
46static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 48static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
47static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 49static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
48 51
49/* 52/*
50 * Reschedule call back. 53 * Reschedule call back.
@@ -143,6 +146,17 @@ static int xen_smp_intr_init(unsigned int cpu)
143 goto fail; 146 goto fail;
144 per_cpu(xen_callfuncsingle_irq, cpu) = rc; 147 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
145 148
149 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
150 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
151 cpu,
152 xen_irq_work_interrupt,
153 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
154 callfunc_name,
155 NULL);
156 if (rc < 0)
157 goto fail;
158 per_cpu(xen_irq_work, cpu) = rc;
159
146 return 0; 160 return 0;
147 161
148 fail: 162 fail:
@@ -155,6 +169,8 @@ static int xen_smp_intr_init(unsigned int cpu)
155 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) 169 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
156 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), 170 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
157 NULL); 171 NULL);
172 if (per_cpu(xen_irq_work, cpu) >= 0)
173 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
158 174
159 return rc; 175 return rc;
160} 176}
@@ -407,6 +423,7 @@ static void xen_cpu_die(unsigned int cpu)
407 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 423 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
408 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 424 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
409 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); 425 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
426 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
410 xen_uninit_lock_cpu(cpu); 427 xen_uninit_lock_cpu(cpu);
411 xen_teardown_timer(cpu); 428 xen_teardown_timer(cpu);
412 429
@@ -469,8 +486,8 @@ static void xen_smp_send_reschedule(int cpu)
469 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 486 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
470} 487}
471 488
472static void xen_send_IPI_mask(const struct cpumask *mask, 489static void __xen_send_IPI_mask(const struct cpumask *mask,
473 enum ipi_vector vector) 490 int vector)
474{ 491{
475 unsigned cpu; 492 unsigned cpu;
476 493
@@ -482,7 +499,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
482{ 499{
483 int cpu; 500 int cpu;
484 501
485 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 502 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
486 503
487 /* Make sure other vcpus get a chance to run if they need to. */ 504 /* Make sure other vcpus get a chance to run if they need to. */
488 for_each_cpu(cpu, mask) { 505 for_each_cpu(cpu, mask) {
@@ -495,10 +512,86 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
495 512
496static void xen_smp_send_call_function_single_ipi(int cpu) 513static void xen_smp_send_call_function_single_ipi(int cpu)
497{ 514{
498 xen_send_IPI_mask(cpumask_of(cpu), 515 __xen_send_IPI_mask(cpumask_of(cpu),
499 XEN_CALL_FUNCTION_SINGLE_VECTOR); 516 XEN_CALL_FUNCTION_SINGLE_VECTOR);
500} 517}
501 518
519static inline int xen_map_vector(int vector)
520{
521 int xen_vector;
522
523 switch (vector) {
524 case RESCHEDULE_VECTOR:
525 xen_vector = XEN_RESCHEDULE_VECTOR;
526 break;
527 case CALL_FUNCTION_VECTOR:
528 xen_vector = XEN_CALL_FUNCTION_VECTOR;
529 break;
530 case CALL_FUNCTION_SINGLE_VECTOR:
531 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
532 break;
533 case IRQ_WORK_VECTOR:
534 xen_vector = XEN_IRQ_WORK_VECTOR;
535 break;
536 default:
537 xen_vector = -1;
538 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
539 vector);
540 }
541
542 return xen_vector;
543}
544
545void xen_send_IPI_mask(const struct cpumask *mask,
546 int vector)
547{
548 int xen_vector = xen_map_vector(vector);
549
550 if (xen_vector >= 0)
551 __xen_send_IPI_mask(mask, xen_vector);
552}
553
554void xen_send_IPI_all(int vector)
555{
556 int xen_vector = xen_map_vector(vector);
557
558 if (xen_vector >= 0)
559 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
560}
561
562void xen_send_IPI_self(int vector)
563{
564 int xen_vector = xen_map_vector(vector);
565
566 if (xen_vector >= 0)
567 xen_send_IPI_one(smp_processor_id(), xen_vector);
568}
569
570void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
571 int vector)
572{
573 unsigned cpu;
574 unsigned int this_cpu = smp_processor_id();
575
576 if (!(num_online_cpus() > 1))
577 return;
578
579 for_each_cpu_and(cpu, mask, cpu_online_mask) {
580 if (this_cpu == cpu)
581 continue;
582
583 xen_smp_send_call_function_single_ipi(cpu);
584 }
585}
586
587void xen_send_IPI_allbutself(int vector)
588{
589 int xen_vector = xen_map_vector(vector);
590
591 if (xen_vector >= 0)
592 xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
593}
594
502static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 595static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
503{ 596{
504 irq_enter(); 597 irq_enter();
@@ -519,6 +612,16 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
519 return IRQ_HANDLED; 612 return IRQ_HANDLED;
520} 613}
521 614
615static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
616{
617 irq_enter();
618 irq_work_run();
619 inc_irq_stat(apic_irq_work_irqs);
620 irq_exit();
621
622 return IRQ_HANDLED;
623}
624
522static const struct smp_ops xen_smp_ops __initconst = { 625static const struct smp_ops xen_smp_ops __initconst = {
523 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 626 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
524 .smp_prepare_cpus = xen_smp_prepare_cpus, 627 .smp_prepare_cpus = xen_smp_prepare_cpus,
@@ -565,6 +668,7 @@ static void xen_hvm_cpu_die(unsigned int cpu)
565 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 668 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
566 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 669 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
567 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); 670 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
671 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
568 native_cpu_die(cpu); 672 native_cpu_die(cpu);
569} 673}
570 674
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
new file mode 100644
index 000000000000..8981a76d081a
--- /dev/null
+++ b/arch/x86/xen/smp.h
@@ -0,0 +1,12 @@
1#ifndef _XEN_SMP_H
2
3extern void xen_send_IPI_mask(const struct cpumask *mask,
4 int vector);
5extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
6 int vector);
7extern void xen_send_IPI_allbutself(int vector);
8extern void physflat_send_IPI_allbutself(int vector);
9extern void xen_send_IPI_all(int vector);
10extern void xen_send_IPI_self(int vector);
11
12#endif
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index d69cc6c3f808..83e866d714ce 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -440,12 +440,12 @@ static int __init xen_spinlock_debugfs(void)
440 debugfs_create_u64("time_total", 0444, d_spin_debug, 440 debugfs_create_u64("time_total", 0444, d_spin_debug,
441 &spinlock_stats.time_total); 441 &spinlock_stats.time_total);
442 442
443 xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug, 443 debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
444 spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1); 444 spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
445 xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug, 445 debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
446 spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1); 446 spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
447 xen_debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, 447 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
448 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); 448 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
449 449
450 return 0; 450 return 0;
451} 451}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 45c0c0667bd9..202d4c150154 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
28void xen_build_mfn_list_list(void); 28void xen_build_mfn_list_list(void);
29void xen_setup_machphys_mapping(void); 29void xen_setup_machphys_mapping(void);
30pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); 30pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
31void xen_ident_map_ISA(void);
32void xen_reserve_top(void); 31void xen_reserve_top(void);
33extern unsigned long xen_max_p2m_pfn; 32extern unsigned long xen_max_p2m_pfn;
34 33