aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/grant-table.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 19:02:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 19:02:08 -0400
commitb5f4035adfffbcc6b478de5b8c44b618b3124aff (patch)
treee7a5f011d8aaf5c95edf933f98f25dfc8fa46837 /drivers/xen/grant-table.c
parentce004178be1bbaa292e9e6497939e2970300095a (diff)
parent68c2c39a76b094e9b2773e5846424ea674bf2c46 (diff)
Merge tag 'stable/for-linus-3.5-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull Xen updates from Konrad Rzeszutek Wilk: "Features: * Extend the APIC ops implementation and add IRQ_WORKER vector support so that 'perf' can work properly. * Fix self-ballooning code, and balloon logic when booting as initial domain. * Move array printing code to generic debugfs * Support XenBus domains. * Lazily free grants when a domain is dead/non-existent. * In M2P code use batching calls Bug-fixes: * Fix NULL dereference in allocation failure path (hvc_xen) * Fix unbinding of IRQ_WORKER vector during vCPU hot-unplug * Fix HVM guest resume - we would leak an PIRQ value instead of reusing the existing one." Fix up add-add onflicts in arch/x86/xen/enlighten.c due to addition of apic ipi interface next to the new apic_id functions. * tag 'stable/for-linus-3.5-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: xen: do not map the same GSI twice in PVHVM guests. hvc_xen: NULL dereference on allocation failure xen: Add selfballoning memory reservation tunable. xenbus: Add support for xenbus backend in stub domain xen/smp: unbind irqworkX when unplugging vCPUs. xen: enter/exit lazy_mmu_mode around m2p_override calls xen/acpi/sleep: Enable ACPI sleep via the __acpi_os_prepare_sleep xen: implement IRQ_WORK_VECTOR handler xen: implement apic ipi interface xen/setup: update VA mapping when releasing memory during setup xen/setup: Combine the two hypercall functions - since they are quite similar. xen/setup: Populate freed MFNs from non-RAM E820 entries and gaps to E820 RAM xen/setup: Only print "Freeing XXX-YYY pfn range: Z pages freed" if Z > 0 xen/gnttab: add deferred freeing logic debugfs: Add support to print u32 array in debugfs xen/p2m: An early bootup variant of set_phys_to_machine xen/p2m: Collapse early_alloc_p2m_middle redundant checks. xen/p2m: Allow alloc_p2m_middle to call reserve_brk depending on argument xen/p2m: Move code around to allow for better re-usage.
Diffstat (limited to 'drivers/xen/grant-table.c')
-rw-r--r--drivers/xen/grant-table.c125
1 files changed, 115 insertions, 10 deletions
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f100ce20b16b..0bfc1ef11259 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -38,6 +38,7 @@
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/uaccess.h> 39#include <linux/uaccess.h>
40#include <linux/io.h> 40#include <linux/io.h>
41#include <linux/hardirq.h>
41 42
42#include <xen/xen.h> 43#include <xen/xen.h>
43#include <xen/interface/xen.h> 44#include <xen/interface/xen.h>
@@ -426,10 +427,8 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
426 nflags = *pflags; 427 nflags = *pflags;
427 do { 428 do {
428 flags = nflags; 429 flags = nflags;
429 if (flags & (GTF_reading|GTF_writing)) { 430 if (flags & (GTF_reading|GTF_writing))
430 printk(KERN_ALERT "WARNING: g.e. still in use!\n");
431 return 0; 431 return 0;
432 }
433 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags); 432 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
434 433
435 return 1; 434 return 1;
@@ -458,12 +457,103 @@ static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
458 return 1; 457 return 1;
459} 458}
460 459
461int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 460static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
462{ 461{
463 return gnttab_interface->end_foreign_access_ref(ref, readonly); 462 return gnttab_interface->end_foreign_access_ref(ref, readonly);
464} 463}
464
465int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
466{
467 if (_gnttab_end_foreign_access_ref(ref, readonly))
468 return 1;
469 pr_warn("WARNING: g.e. %#x still in use!\n", ref);
470 return 0;
471}
465EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); 472EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
466 473
474struct deferred_entry {
475 struct list_head list;
476 grant_ref_t ref;
477 bool ro;
478 uint16_t warn_delay;
479 struct page *page;
480};
481static LIST_HEAD(deferred_list);
482static void gnttab_handle_deferred(unsigned long);
483static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
484
485static void gnttab_handle_deferred(unsigned long unused)
486{
487 unsigned int nr = 10;
488 struct deferred_entry *first = NULL;
489 unsigned long flags;
490
491 spin_lock_irqsave(&gnttab_list_lock, flags);
492 while (nr--) {
493 struct deferred_entry *entry
494 = list_first_entry(&deferred_list,
495 struct deferred_entry, list);
496
497 if (entry == first)
498 break;
499 list_del(&entry->list);
500 spin_unlock_irqrestore(&gnttab_list_lock, flags);
501 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
502 put_free_entry(entry->ref);
503 if (entry->page) {
504 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
505 entry->ref, page_to_pfn(entry->page));
506 __free_page(entry->page);
507 } else
508 pr_info("freeing g.e. %#x\n", entry->ref);
509 kfree(entry);
510 entry = NULL;
511 } else {
512 if (!--entry->warn_delay)
513 pr_info("g.e. %#x still pending\n",
514 entry->ref);
515 if (!first)
516 first = entry;
517 }
518 spin_lock_irqsave(&gnttab_list_lock, flags);
519 if (entry)
520 list_add_tail(&entry->list, &deferred_list);
521 else if (list_empty(&deferred_list))
522 break;
523 }
524 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
525 deferred_timer.expires = jiffies + HZ;
526 add_timer(&deferred_timer);
527 }
528 spin_unlock_irqrestore(&gnttab_list_lock, flags);
529}
530
531static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
532 struct page *page)
533{
534 struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
535 const char *what = KERN_WARNING "leaking";
536
537 if (entry) {
538 unsigned long flags;
539
540 entry->ref = ref;
541 entry->ro = readonly;
542 entry->page = page;
543 entry->warn_delay = 60;
544 spin_lock_irqsave(&gnttab_list_lock, flags);
545 list_add_tail(&entry->list, &deferred_list);
546 if (!timer_pending(&deferred_timer)) {
547 deferred_timer.expires = jiffies + HZ;
548 add_timer(&deferred_timer);
549 }
550 spin_unlock_irqrestore(&gnttab_list_lock, flags);
551 what = KERN_DEBUG "deferring";
552 }
553 printk("%s g.e. %#x (pfn %#lx)\n",
554 what, ref, page ? page_to_pfn(page) : -1);
555}
556
467void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 557void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
468 unsigned long page) 558 unsigned long page)
469{ 559{
@@ -471,12 +561,9 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
471 put_free_entry(ref); 561 put_free_entry(ref);
472 if (page != 0) 562 if (page != 0)
473 free_page(page); 563 free_page(page);
474 } else { 564 } else
475 /* XXX This needs to be fixed so that the ref and page are 565 gnttab_add_deferred(ref, readonly,
476 placed on a list to be freed up later. */ 566 page ? virt_to_page(page) : NULL);
477 printk(KERN_WARNING
478 "WARNING: leaking g.e. and page still in use!\n");
479 }
480} 567}
481EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); 568EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
482 569
@@ -741,6 +828,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
741 struct page **pages, unsigned int count) 828 struct page **pages, unsigned int count)
742{ 829{
743 int i, ret; 830 int i, ret;
831 bool lazy = false;
744 pte_t *pte; 832 pte_t *pte;
745 unsigned long mfn; 833 unsigned long mfn;
746 834
@@ -751,6 +839,11 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
751 if (xen_feature(XENFEAT_auto_translated_physmap)) 839 if (xen_feature(XENFEAT_auto_translated_physmap))
752 return ret; 840 return ret;
753 841
842 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
843 arch_enter_lazy_mmu_mode();
844 lazy = true;
845 }
846
754 for (i = 0; i < count; i++) { 847 for (i = 0; i < count; i++) {
755 /* Do not add to override if the map failed. */ 848 /* Do not add to override if the map failed. */
756 if (map_ops[i].status) 849 if (map_ops[i].status)
@@ -769,6 +862,9 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
769 return ret; 862 return ret;
770 } 863 }
771 864
865 if (lazy)
866 arch_leave_lazy_mmu_mode();
867
772 return ret; 868 return ret;
773} 869}
774EXPORT_SYMBOL_GPL(gnttab_map_refs); 870EXPORT_SYMBOL_GPL(gnttab_map_refs);
@@ -777,6 +873,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
777 struct page **pages, unsigned int count, bool clear_pte) 873 struct page **pages, unsigned int count, bool clear_pte)
778{ 874{
779 int i, ret; 875 int i, ret;
876 bool lazy = false;
780 877
781 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 878 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
782 if (ret) 879 if (ret)
@@ -785,12 +882,20 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
785 if (xen_feature(XENFEAT_auto_translated_physmap)) 882 if (xen_feature(XENFEAT_auto_translated_physmap))
786 return ret; 883 return ret;
787 884
885 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
886 arch_enter_lazy_mmu_mode();
887 lazy = true;
888 }
889
788 for (i = 0; i < count; i++) { 890 for (i = 0; i < count; i++) {
789 ret = m2p_remove_override(pages[i], clear_pte); 891 ret = m2p_remove_override(pages[i], clear_pte);
790 if (ret) 892 if (ret)
791 return ret; 893 return ret;
792 } 894 }
793 895
896 if (lazy)
897 arch_leave_lazy_mmu_mode();
898
794 return ret; 899 return ret;
795} 900}
796EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 901EXPORT_SYMBOL_GPL(gnttab_unmap_refs);