aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/balloon.c175
-rw-r--r--drivers/xen/cpu_hotplug.c90
-rw-r--r--drivers/xen/events.c40
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c8
6 files changed, 140 insertions, 176 deletions
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 363286c54290..d2a8fdf0e191 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,4 +1,5 @@
1obj-y += grant-table.o features.o events.o manage.o 1obj-y += grant-table.o features.o events.o manage.o
2obj-y += xenbus/ 2obj-y += xenbus/
3obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
3obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 4obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
4obj-$(CONFIG_XEN_BALLOON) += balloon.o 5obj-$(CONFIG_XEN_BALLOON) += balloon.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 2e15da5459cf..8c83abc73400 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -53,7 +53,6 @@
53#include <asm/tlb.h> 53#include <asm/tlb.h>
54 54
55#include <xen/interface/memory.h> 55#include <xen/interface/memory.h>
56#include <xen/balloon.h>
57#include <xen/xenbus.h> 56#include <xen/xenbus.h>
58#include <xen/features.h> 57#include <xen/features.h>
59#include <xen/page.h> 58#include <xen/page.h>
@@ -226,9 +225,8 @@ static int increase_reservation(unsigned long nr_pages)
226 } 225 }
227 226
228 set_xen_guest_handle(reservation.extent_start, frame_list); 227 set_xen_guest_handle(reservation.extent_start, frame_list);
229 reservation.nr_extents = nr_pages; 228 reservation.nr_extents = nr_pages;
230 rc = HYPERVISOR_memory_op( 229 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
231 XENMEM_populate_physmap, &reservation);
232 if (rc < nr_pages) { 230 if (rc < nr_pages) {
233 if (rc > 0) { 231 if (rc > 0) {
234 int ret; 232 int ret;
@@ -236,7 +234,7 @@ static int increase_reservation(unsigned long nr_pages)
236 /* We hit the Xen hard limit: reprobe. */ 234 /* We hit the Xen hard limit: reprobe. */
237 reservation.nr_extents = rc; 235 reservation.nr_extents = rc;
238 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, 236 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
239 &reservation); 237 &reservation);
240 BUG_ON(ret != rc); 238 BUG_ON(ret != rc);
241 } 239 }
242 if (rc >= 0) 240 if (rc >= 0)
@@ -420,7 +418,7 @@ static int __init balloon_init(void)
420 unsigned long pfn; 418 unsigned long pfn;
421 struct page *page; 419 struct page *page;
422 420
423 if (!is_running_on_xen()) 421 if (!xen_pv_domain())
424 return -ENODEV; 422 return -ENODEV;
425 423
426 pr_info("xen_balloon: Initialising balloon driver.\n"); 424 pr_info("xen_balloon: Initialising balloon driver.\n");
@@ -464,136 +462,13 @@ static void balloon_exit(void)
464 462
465module_exit(balloon_exit); 463module_exit(balloon_exit);
466 464
467static void balloon_update_driver_allowance(long delta) 465#define BALLOON_SHOW(name, format, args...) \
468{ 466 static ssize_t show_##name(struct sys_device *dev, \
469 unsigned long flags; 467 struct sysdev_attribute *attr, \
470 468 char *buf) \
471 spin_lock_irqsave(&balloon_lock, flags); 469 { \
472 balloon_stats.driver_pages += delta; 470 return sprintf(buf, format, ##args); \
473 spin_unlock_irqrestore(&balloon_lock, flags); 471 } \
474}
475
476static int dealloc_pte_fn(
477 pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
478{
479 unsigned long mfn = pte_mfn(*pte);
480 int ret;
481 struct xen_memory_reservation reservation = {
482 .nr_extents = 1,
483 .extent_order = 0,
484 .domid = DOMID_SELF
485 };
486 set_xen_guest_handle(reservation.extent_start, &mfn);
487 set_pte_at(&init_mm, addr, pte, __pte_ma(0ull));
488 set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
489 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
490 BUG_ON(ret != 1);
491 return 0;
492}
493
494static struct page **alloc_empty_pages_and_pagevec(int nr_pages)
495{
496 unsigned long vaddr, flags;
497 struct page *page, **pagevec;
498 int i, ret;
499
500 pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
501 if (pagevec == NULL)
502 return NULL;
503
504 for (i = 0; i < nr_pages; i++) {
505 page = pagevec[i] = alloc_page(GFP_KERNEL);
506 if (page == NULL)
507 goto err;
508
509 vaddr = (unsigned long)page_address(page);
510
511 scrub_page(page);
512
513 spin_lock_irqsave(&balloon_lock, flags);
514
515 if (xen_feature(XENFEAT_auto_translated_physmap)) {
516 unsigned long gmfn = page_to_pfn(page);
517 struct xen_memory_reservation reservation = {
518 .nr_extents = 1,
519 .extent_order = 0,
520 .domid = DOMID_SELF
521 };
522 set_xen_guest_handle(reservation.extent_start, &gmfn);
523 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
524 &reservation);
525 if (ret == 1)
526 ret = 0; /* success */
527 } else {
528 ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE,
529 dealloc_pte_fn, NULL);
530 }
531
532 if (ret != 0) {
533 spin_unlock_irqrestore(&balloon_lock, flags);
534 __free_page(page);
535 goto err;
536 }
537
538 totalram_pages = --balloon_stats.current_pages;
539
540 spin_unlock_irqrestore(&balloon_lock, flags);
541 }
542
543 out:
544 schedule_work(&balloon_worker);
545 flush_tlb_all();
546 return pagevec;
547
548 err:
549 spin_lock_irqsave(&balloon_lock, flags);
550 while (--i >= 0)
551 balloon_append(pagevec[i]);
552 spin_unlock_irqrestore(&balloon_lock, flags);
553 kfree(pagevec);
554 pagevec = NULL;
555 goto out;
556}
557
558static void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
559{
560 unsigned long flags;
561 int i;
562
563 if (pagevec == NULL)
564 return;
565
566 spin_lock_irqsave(&balloon_lock, flags);
567 for (i = 0; i < nr_pages; i++) {
568 BUG_ON(page_count(pagevec[i]) != 1);
569 balloon_append(pagevec[i]);
570 }
571 spin_unlock_irqrestore(&balloon_lock, flags);
572
573 kfree(pagevec);
574
575 schedule_work(&balloon_worker);
576}
577
578static void balloon_release_driver_page(struct page *page)
579{
580 unsigned long flags;
581
582 spin_lock_irqsave(&balloon_lock, flags);
583 balloon_append(page);
584 balloon_stats.driver_pages--;
585 spin_unlock_irqrestore(&balloon_lock, flags);
586
587 schedule_work(&balloon_worker);
588}
589
590
591#define BALLOON_SHOW(name, format, args...) \
592 static ssize_t show_##name(struct sys_device *dev, \
593 char *buf) \
594 { \
595 return sprintf(buf, format, ##args); \
596 } \
597 static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) 472 static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
598 473
599BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); 474BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
@@ -604,7 +479,8 @@ BALLOON_SHOW(hard_limit_kb,
604 (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0); 479 (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0);
605BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages)); 480BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
606 481
607static ssize_t show_target_kb(struct sys_device *dev, char *buf) 482static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
483 char *buf)
608{ 484{
609 return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages)); 485 return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
610} 486}
@@ -614,19 +490,14 @@ static ssize_t store_target_kb(struct sys_device *dev,
614 const char *buf, 490 const char *buf,
615 size_t count) 491 size_t count)
616{ 492{
617 char memstring[64], *endchar; 493 char *endchar;
618 unsigned long long target_bytes; 494 unsigned long long target_bytes;
619 495
620 if (!capable(CAP_SYS_ADMIN)) 496 if (!capable(CAP_SYS_ADMIN))
621 return -EPERM; 497 return -EPERM;
622 498
623 if (count <= 1) 499 target_bytes = memparse(buf, &endchar);
624 return -EBADMSG; /* runt */
625 if (count > sizeof(memstring))
626 return -EFBIG; /* too long */
627 strcpy(memstring, buf);
628 500
629 target_bytes = memparse(memstring, &endchar);
630 balloon_set_new_target(target_bytes >> PAGE_SHIFT); 501 balloon_set_new_target(target_bytes >> PAGE_SHIFT);
631 502
632 return count; 503 return count;
@@ -694,20 +565,4 @@ static int register_balloon(struct sys_device *sysdev)
694 return error; 565 return error;
695} 566}
696 567
697static void unregister_balloon(struct sys_device *sysdev)
698{
699 int i;
700
701 sysfs_remove_group(&sysdev->kobj, &balloon_info_group);
702 for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++)
703 sysdev_remove_file(sysdev, balloon_attrs[i]);
704 sysdev_unregister(sysdev);
705 sysdev_class_unregister(&balloon_sysdev_class);
706}
707
708static void balloon_sysfs_exit(void)
709{
710 unregister_balloon(&balloon_sysdev);
711}
712
713MODULE_LICENSE("GPL"); 568MODULE_LICENSE("GPL");
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
new file mode 100644
index 000000000000..565280ec1c6a
--- /dev/null
+++ b/drivers/xen/cpu_hotplug.c
@@ -0,0 +1,90 @@
1#include <linux/notifier.h>
2
3#include <xen/xenbus.h>
4
5#include <asm-x86/xen/hypervisor.h>
6#include <asm/cpu.h>
7
8static void enable_hotplug_cpu(int cpu)
9{
10 if (!cpu_present(cpu))
11 arch_register_cpu(cpu);
12
13 cpu_set(cpu, cpu_present_map);
14}
15
16static void disable_hotplug_cpu(int cpu)
17{
18 if (cpu_present(cpu))
19 arch_unregister_cpu(cpu);
20
21 cpu_clear(cpu, cpu_present_map);
22}
23
24static void vcpu_hotplug(unsigned int cpu)
25{
26 int err;
27 char dir[32], state[32];
28
29 if (!cpu_possible(cpu))
30 return;
31
32 sprintf(dir, "cpu/%u", cpu);
33 err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
34 if (err != 1) {
35 printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
36 return;
37 }
38
39 if (strcmp(state, "online") == 0) {
40 enable_hotplug_cpu(cpu);
41 } else if (strcmp(state, "offline") == 0) {
42 (void)cpu_down(cpu);
43 disable_hotplug_cpu(cpu);
44 } else {
45 printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
46 state, cpu);
47 }
48}
49
50static void handle_vcpu_hotplug_event(struct xenbus_watch *watch,
51 const char **vec, unsigned int len)
52{
53 unsigned int cpu;
54 char *cpustr;
55 const char *node = vec[XS_WATCH_PATH];
56
57 cpustr = strstr(node, "cpu/");
58 if (cpustr != NULL) {
59 sscanf(cpustr, "cpu/%u", &cpu);
60 vcpu_hotplug(cpu);
61 }
62}
63
64static int setup_cpu_watcher(struct notifier_block *notifier,
65 unsigned long event, void *data)
66{
67 static struct xenbus_watch cpu_watch = {
68 .node = "cpu",
69 .callback = handle_vcpu_hotplug_event};
70
71 (void)register_xenbus_watch(&cpu_watch);
72
73 return NOTIFY_DONE;
74}
75
76static int __init setup_vcpu_hotplug_event(void)
77{
78 static struct notifier_block xsn_cpu = {
79 .notifier_call = setup_cpu_watcher };
80
81 if (!xen_pv_domain())
82 return -ENODEV;
83
84 register_xenstore_notifier(&xsn_cpu);
85
86 return 0;
87}
88
89arch_initcall(setup_vcpu_hotplug_event);
90
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0e0c28574af8..c3290bc186a0 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -84,17 +84,6 @@ static int irq_bindcount[NR_IRQS];
84/* Xen will never allocate port zero for any purpose. */ 84/* Xen will never allocate port zero for any purpose. */
85#define VALID_EVTCHN(chn) ((chn) != 0) 85#define VALID_EVTCHN(chn) ((chn) != 0)
86 86
87/*
88 * Force a proper event-channel callback from Xen after clearing the
89 * callback mask. We do this in a very simple manner, by making a call
90 * down into Xen. The pending flag will be checked by Xen on return.
91 */
92void force_evtchn_callback(void)
93{
94 (void)HYPERVISOR_xen_version(0, NULL);
95}
96EXPORT_SYMBOL_GPL(force_evtchn_callback);
97
98static struct irq_chip xen_dynamic_chip; 87static struct irq_chip xen_dynamic_chip;
99 88
100/* Constructor for packed IRQ information. */ 89/* Constructor for packed IRQ information. */
@@ -175,6 +164,12 @@ static inline void set_evtchn(int port)
175 sync_set_bit(port, &s->evtchn_pending[0]); 164 sync_set_bit(port, &s->evtchn_pending[0]);
176} 165}
177 166
167static inline int test_evtchn(int port)
168{
169 struct shared_info *s = HYPERVISOR_shared_info;
170 return sync_test_bit(port, &s->evtchn_pending[0]);
171}
172
178 173
179/** 174/**
180 * notify_remote_via_irq - send event to remote end of event channel via irq 175 * notify_remote_via_irq - send event to remote end of event channel via irq
@@ -365,6 +360,10 @@ static void unbind_from_irq(unsigned int irq)
365 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) 360 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
366 [index_from_irq(irq)] = -1; 361 [index_from_irq(irq)] = -1;
367 break; 362 break;
363 case IRQT_IPI:
364 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
365 [index_from_irq(irq)] = -1;
366 break;
368 default: 367 default:
369 break; 368 break;
370 } 369 }
@@ -743,6 +742,25 @@ void xen_clear_irq_pending(int irq)
743 clear_evtchn(evtchn); 742 clear_evtchn(evtchn);
744} 743}
745 744
745void xen_set_irq_pending(int irq)
746{
747 int evtchn = evtchn_from_irq(irq);
748
749 if (VALID_EVTCHN(evtchn))
750 set_evtchn(evtchn);
751}
752
753bool xen_test_irq_pending(int irq)
754{
755 int evtchn = evtchn_from_irq(irq);
756 bool ret = false;
757
758 if (VALID_EVTCHN(evtchn))
759 ret = test_evtchn(evtchn);
760
761 return ret;
762}
763
746/* Poll waiting for an irq to become pending. In the usual case, the 764/* Poll waiting for an irq to become pending. In the usual case, the
747 irq will be disabled so it won't deliver an interrupt. */ 765 irq will be disabled so it won't deliver an interrupt. */
748void xen_poll_irq(int irq) 766void xen_poll_irq(int irq)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index e9e11168616a..06592b9da83c 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -508,7 +508,7 @@ static int __devinit gnttab_init(void)
508 unsigned int max_nr_glist_frames, nr_glist_frames; 508 unsigned int max_nr_glist_frames, nr_glist_frames;
509 unsigned int nr_init_grefs; 509 unsigned int nr_init_grefs;
510 510
511 if (!is_running_on_xen()) 511 if (!xen_domain())
512 return -ENODEV; 512 return -ENODEV;
513 513
514 nr_grant_frames = 1; 514 nr_grant_frames = 1;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 57ceb5346b74..7f24a98a446f 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -814,7 +814,7 @@ static int __init xenbus_probe_init(void)
814 DPRINTK(""); 814 DPRINTK("");
815 815
816 err = -ENODEV; 816 err = -ENODEV;
817 if (!is_running_on_xen()) 817 if (!xen_domain())
818 goto out_error; 818 goto out_error;
819 819
820 /* Register ourselves with the kernel bus subsystem */ 820 /* Register ourselves with the kernel bus subsystem */
@@ -829,7 +829,7 @@ static int __init xenbus_probe_init(void)
829 /* 829 /*
830 * Domain0 doesn't have a store_evtchn or store_mfn yet. 830 * Domain0 doesn't have a store_evtchn or store_mfn yet.
831 */ 831 */
832 if (is_initial_xendomain()) { 832 if (xen_initial_domain()) {
833 /* dom0 not yet supported */ 833 /* dom0 not yet supported */
834 } else { 834 } else {
835 xenstored_ready = 1; 835 xenstored_ready = 1;
@@ -846,7 +846,7 @@ static int __init xenbus_probe_init(void)
846 goto out_unreg_back; 846 goto out_unreg_back;
847 } 847 }
848 848
849 if (!is_initial_xendomain()) 849 if (!xen_initial_domain())
850 xenbus_probe(NULL); 850 xenbus_probe(NULL);
851 851
852 return 0; 852 return 0;
@@ -937,7 +937,7 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
937 unsigned long timeout = jiffies + 10*HZ; 937 unsigned long timeout = jiffies + 10*HZ;
938 struct device_driver *drv = xendrv ? &xendrv->driver : NULL; 938 struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
939 939
940 if (!ready_to_wait_for_devices || !is_running_on_xen()) 940 if (!ready_to_wait_for_devices || !xen_domain())
941 return; 941 return;
942 942
943 while (exists_disconnected_device(drv)) { 943 while (exists_disconnected_device(drv)) {