aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/grant-table.c
diff options
context:
space:
mode:
authorJennifer Herbert <jennifer.herbert@citrix.com>2014-12-09 13:28:37 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2015-01-28 09:03:14 -0500
commit3f9f1c67572f5e5e6dc84216d48d1480f3c4fcf6 (patch)
treef92be430b64202bce20f597e121f23a111cc3493 /drivers/xen/grant-table.c
parentc2677a6fc4dee765fff8f7ac3d61f657dc295650 (diff)
xen/grant-table: add a mechanism to safely unmap pages that are in use
Introduce gnttab_unmap_refs_async() that can be used to safely unmap pages that may be in use (ref count > 1). If the pages are in use the unmap is deferred and retried later. This polling is not very clever but it should be good enough if the cases where the delay is necessary are rare. The initial delay is 5 ms and is increased linearly on each subsequent retry (to reduce load if the page is in use for a long time). This is needed to allow block backends using grant mapping to safely use network storage (block or filesystem based such as iSCSI or NFS). The network storage driver may complete a block request whilst there is a queued network packet retry (because the ack from the remote end races with deciding to queue the retry). The pages for the retried packet would be grant unmapped and the network driver (or hardware) would access the unmapped page. Signed-off-by: Jennifer Herbert <jennifer.herbert@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'drivers/xen/grant-table.c')
-rw-r--r--drivers/xen/grant-table.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 89dcca448bb6..17972fbacddc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -42,6 +42,7 @@
42#include <linux/io.h> 42#include <linux/io.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/hardirq.h> 44#include <linux/hardirq.h>
45#include <linux/workqueue.h>
45 46
46#include <xen/xen.h> 47#include <xen/xen.h>
47#include <xen/interface/xen.h> 48#include <xen/interface/xen.h>
@@ -819,6 +820,49 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
819} 820}
820EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 821EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
821 822
823#define GNTTAB_UNMAP_REFS_DELAY 5
824
825static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
826
827static void gnttab_unmap_work(struct work_struct *work)
828{
829 struct gntab_unmap_queue_data
830 *unmap_data = container_of(work,
831 struct gntab_unmap_queue_data,
832 gnttab_work.work);
833 if (unmap_data->age != UINT_MAX)
834 unmap_data->age++;
835 __gnttab_unmap_refs_async(unmap_data);
836}
837
838static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
839{
840 int ret;
841 int pc;
842
843 for (pc = 0; pc < item->count; pc++) {
844 if (page_count(item->pages[pc]) > 1) {
845 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
846 schedule_delayed_work(&item->gnttab_work,
847 msecs_to_jiffies(delay));
848 return;
849 }
850 }
851
852 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
853 item->pages, item->count);
854 item->done(ret, item);
855}
856
857void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
858{
859 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
860 item->age = 0;
861
862 __gnttab_unmap_refs_async(item);
863}
864EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
865
822static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 866static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
823{ 867{
824 int rc; 868 int rc;