diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2015-12-11 12:08:48 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2016-01-04 12:21:05 -0500 |
commit | 2fb1ef4f1226ea6d6d3481036cabe01a4415b68c (patch) | |
tree | e0d8fdbc4e0aad5e3eb76a3d573c4e56733e9382 /drivers/block/xen-blkback | |
parent | 597957000ab5b1b38085c20868f3f7b9c305bae5 (diff) |
xen/blkback: pseudo support for multi hardware queues/rings
Preparatory patch for multiple hardware queues (rings). The number of
rings is unconditionally set to 1, larger number will be enabled in
"xen/blkback: get the number of hardware queues/rings from blkfront".
Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
Signed-off-by: Bob Liu <bob.liu@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
v2: Align variables in the structures.
Diffstat (limited to 'drivers/block/xen-blkback')
-rw-r--r-- | drivers/block/xen-blkback/common.h | 3 | ||||
-rw-r--r-- | drivers/block/xen-blkback/xenbus.c | 277 |
2 files changed, 175 insertions, 105 deletions
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index dbdf4164c83f..310eff3cf43f 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -340,7 +340,8 @@ struct xen_blkif { | |||
340 | struct work_struct free_work; | 340 | struct work_struct free_work; |
341 | unsigned int nr_ring_pages; | 341 | unsigned int nr_ring_pages; |
342 | /* All rings for this device. */ | 342 | /* All rings for this device. */ |
343 | struct xen_blkif_ring ring; | 343 | struct xen_blkif_ring *rings; |
344 | unsigned int nr_rings; | ||
344 | }; | 345 | }; |
345 | 346 | ||
346 | struct seg_buf { | 347 | struct seg_buf { |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index e4bfc928035d..f5bfedd0e948 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -86,9 +86,11 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) | |||
86 | { | 86 | { |
87 | int err; | 87 | int err; |
88 | char name[BLKBACK_NAME_LEN]; | 88 | char name[BLKBACK_NAME_LEN]; |
89 | struct xen_blkif_ring *ring; | ||
90 | int i; | ||
89 | 91 | ||
90 | /* Not ready to connect? */ | 92 | /* Not ready to connect? */ |
91 | if (!blkif->ring.irq || !blkif->vbd.bdev) | 93 | if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev) |
92 | return; | 94 | return; |
93 | 95 | ||
94 | /* Already connected? */ | 96 | /* Already connected? */ |
@@ -113,19 +115,55 @@ static void xen_update_blkif_status(struct xen_blkif *blkif) | |||
113 | } | 115 | } |
114 | invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); | 116 | invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); |
115 | 117 | ||
116 | blkif->ring.xenblkd = kthread_run(xen_blkif_schedule, &blkif->ring, "%s", name); | 118 | for (i = 0; i < blkif->nr_rings; i++) { |
117 | if (IS_ERR(blkif->ring.xenblkd)) { | 119 | ring = &blkif->rings[i]; |
118 | err = PTR_ERR(blkif->ring.xenblkd); | 120 | ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i); |
119 | blkif->ring.xenblkd = NULL; | 121 | if (IS_ERR(ring->xenblkd)) { |
120 | xenbus_dev_error(blkif->be->dev, err, "start xenblkd"); | 122 | err = PTR_ERR(ring->xenblkd); |
121 | return; | 123 | ring->xenblkd = NULL; |
124 | xenbus_dev_fatal(blkif->be->dev, err, | ||
125 | "start %s-%d xenblkd", name, i); | ||
126 | goto out; | ||
127 | } | ||
128 | } | ||
129 | return; | ||
130 | |||
131 | out: | ||
132 | while (--i >= 0) { | ||
133 | ring = &blkif->rings[i]; | ||
134 | kthread_stop(ring->xenblkd); | ||
122 | } | 135 | } |
136 | return; | ||
137 | } | ||
138 | |||
139 | static int xen_blkif_alloc_rings(struct xen_blkif *blkif) | ||
140 | { | ||
141 | unsigned int r; | ||
142 | |||
143 | blkif->rings = kzalloc(blkif->nr_rings * sizeof(struct xen_blkif_ring), GFP_KERNEL); | ||
144 | if (!blkif->rings) | ||
145 | return -ENOMEM; | ||
146 | |||
147 | for (r = 0; r < blkif->nr_rings; r++) { | ||
148 | struct xen_blkif_ring *ring = &blkif->rings[r]; | ||
149 | |||
150 | spin_lock_init(&ring->blk_ring_lock); | ||
151 | init_waitqueue_head(&ring->wq); | ||
152 | INIT_LIST_HEAD(&ring->pending_free); | ||
153 | |||
154 | spin_lock_init(&ring->pending_free_lock); | ||
155 | init_waitqueue_head(&ring->pending_free_wq); | ||
156 | init_waitqueue_head(&ring->shutdown_wq); | ||
157 | ring->blkif = blkif; | ||
158 | xen_blkif_get(blkif); | ||
159 | } | ||
160 | |||
161 | return 0; | ||
123 | } | 162 | } |
124 | 163 | ||
125 | static struct xen_blkif *xen_blkif_alloc(domid_t domid) | 164 | static struct xen_blkif *xen_blkif_alloc(domid_t domid) |
126 | { | 165 | { |
127 | struct xen_blkif *blkif; | 166 | struct xen_blkif *blkif; |
128 | struct xen_blkif_ring *ring; | ||
129 | 167 | ||
130 | BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); | 168 | BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); |
131 | 169 | ||
@@ -143,15 +181,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) | |||
143 | blkif->st_print = jiffies; | 181 | blkif->st_print = jiffies; |
144 | INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants); | 182 | INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants); |
145 | 183 | ||
146 | ring = &blkif->ring; | 184 | blkif->nr_rings = 1; |
147 | ring->blkif = blkif; | 185 | if (xen_blkif_alloc_rings(blkif)) { |
148 | spin_lock_init(&ring->blk_ring_lock); | 186 | kmem_cache_free(xen_blkif_cachep, blkif); |
149 | init_waitqueue_head(&ring->wq); | 187 | return ERR_PTR(-ENOMEM); |
150 | 188 | } | |
151 | INIT_LIST_HEAD(&ring->pending_free); | ||
152 | spin_lock_init(&ring->pending_free_lock); | ||
153 | init_waitqueue_head(&ring->pending_free_wq); | ||
154 | init_waitqueue_head(&ring->shutdown_wq); | ||
155 | 189 | ||
156 | return blkif; | 190 | return blkif; |
157 | } | 191 | } |
@@ -216,50 +250,54 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, | |||
216 | static int xen_blkif_disconnect(struct xen_blkif *blkif) | 250 | static int xen_blkif_disconnect(struct xen_blkif *blkif) |
217 | { | 251 | { |
218 | struct pending_req *req, *n; | 252 | struct pending_req *req, *n; |
219 | int i = 0, j; | 253 | unsigned int j, r; |
220 | struct xen_blkif_ring *ring = &blkif->ring; | ||
221 | 254 | ||
222 | if (ring->xenblkd) { | 255 | for (r = 0; r < blkif->nr_rings; r++) { |
223 | kthread_stop(ring->xenblkd); | 256 | struct xen_blkif_ring *ring = &blkif->rings[r]; |
224 | wake_up(&ring->shutdown_wq); | 257 | unsigned int i = 0; |
225 | ring->xenblkd = NULL; | ||
226 | } | ||
227 | 258 | ||
228 | /* The above kthread_stop() guarantees that at this point we | 259 | if (ring->xenblkd) { |
229 | * don't have any discard_io or other_io requests. So, checking | 260 | kthread_stop(ring->xenblkd); |
230 | * for inflight IO is enough. | 261 | wake_up(&ring->shutdown_wq); |
231 | */ | 262 | ring->xenblkd = NULL; |
232 | if (atomic_read(&ring->inflight) > 0) | 263 | } |
233 | return -EBUSY; | ||
234 | 264 | ||
235 | if (ring->irq) { | 265 | /* The above kthread_stop() guarantees that at this point we |
236 | unbind_from_irqhandler(ring->irq, ring); | 266 | * don't have any discard_io or other_io requests. So, checking |
237 | ring->irq = 0; | 267 | * for inflight IO is enough. |
238 | } | 268 | */ |
269 | if (atomic_read(&ring->inflight) > 0) | ||
270 | return -EBUSY; | ||
239 | 271 | ||
240 | if (ring->blk_rings.common.sring) { | 272 | if (ring->irq) { |
241 | xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); | 273 | unbind_from_irqhandler(ring->irq, ring); |
242 | ring->blk_rings.common.sring = NULL; | 274 | ring->irq = 0; |
243 | } | 275 | } |
244 | 276 | ||
245 | /* Remove all persistent grants and the cache of ballooned pages. */ | 277 | if (ring->blk_rings.common.sring) { |
246 | xen_blkbk_free_caches(ring); | 278 | xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); |
279 | ring->blk_rings.common.sring = NULL; | ||
280 | } | ||
247 | 281 | ||
248 | /* Check that there is no request in use */ | 282 | /* Remove all persistent grants and the cache of ballooned pages. */ |
249 | list_for_each_entry_safe(req, n, &ring->pending_free, free_list) { | 283 | xen_blkbk_free_caches(ring); |
250 | list_del(&req->free_list); | ||
251 | 284 | ||
252 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) | 285 | /* Check that there is no request in use */ |
253 | kfree(req->segments[j]); | 286 | list_for_each_entry_safe(req, n, &ring->pending_free, free_list) { |
287 | list_del(&req->free_list); | ||
254 | 288 | ||
255 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) | 289 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) |
256 | kfree(req->indirect_pages[j]); | 290 | kfree(req->segments[j]); |
257 | 291 | ||
258 | kfree(req); | 292 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) |
259 | i++; | 293 | kfree(req->indirect_pages[j]); |
260 | } | ||
261 | 294 | ||
262 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); | 295 | kfree(req); |
296 | i++; | ||
297 | } | ||
298 | |||
299 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); | ||
300 | } | ||
263 | blkif->nr_ring_pages = 0; | 301 | blkif->nr_ring_pages = 0; |
264 | 302 | ||
265 | return 0; | 303 | return 0; |
@@ -279,6 +317,7 @@ static void xen_blkif_free(struct xen_blkif *blkif) | |||
279 | BUG_ON(!list_empty(&blkif->free_pages)); | 317 | BUG_ON(!list_empty(&blkif->free_pages)); |
280 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); | 318 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); |
281 | 319 | ||
320 | kfree(blkif->rings); | ||
282 | kmem_cache_free(xen_blkif_cachep, blkif); | 321 | kmem_cache_free(xen_blkif_cachep, blkif); |
283 | } | 322 | } |
284 | 323 | ||
@@ -427,6 +466,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, | |||
427 | static int xen_blkbk_remove(struct xenbus_device *dev) | 466 | static int xen_blkbk_remove(struct xenbus_device *dev) |
428 | { | 467 | { |
429 | struct backend_info *be = dev_get_drvdata(&dev->dev); | 468 | struct backend_info *be = dev_get_drvdata(&dev->dev); |
469 | unsigned int i; | ||
430 | 470 | ||
431 | pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); | 471 | pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); |
432 | 472 | ||
@@ -443,7 +483,8 @@ static int xen_blkbk_remove(struct xenbus_device *dev) | |||
443 | 483 | ||
444 | if (be->blkif) { | 484 | if (be->blkif) { |
445 | xen_blkif_disconnect(be->blkif); | 485 | xen_blkif_disconnect(be->blkif); |
446 | xen_blkif_put(be->blkif); | 486 | for (i = 0; i < be->blkif->nr_rings; i++) |
487 | xen_blkif_put(be->blkif); | ||
447 | } | 488 | } |
448 | 489 | ||
449 | kfree(be->mode); | 490 | kfree(be->mode); |
@@ -826,51 +867,43 @@ again: | |||
826 | xenbus_transaction_end(xbt, 1); | 867 | xenbus_transaction_end(xbt, 1); |
827 | } | 868 | } |
828 | 869 | ||
829 | 870 | /* | |
830 | static int connect_ring(struct backend_info *be) | 871 | * Each ring may have multi pages, depends on "ring-page-order". |
872 | */ | ||
873 | static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir) | ||
831 | { | 874 | { |
832 | struct xenbus_device *dev = be->dev; | ||
833 | unsigned int ring_ref[XENBUS_MAX_RING_GRANTS]; | 875 | unsigned int ring_ref[XENBUS_MAX_RING_GRANTS]; |
834 | unsigned int evtchn, nr_grefs, ring_page_order; | ||
835 | unsigned int pers_grants; | ||
836 | char protocol[64] = ""; | ||
837 | struct pending_req *req, *n; | 876 | struct pending_req *req, *n; |
838 | int err, i, j; | 877 | int err, i, j; |
839 | struct xen_blkif_ring *ring = &be->blkif->ring; | 878 | struct xen_blkif *blkif = ring->blkif; |
840 | 879 | struct xenbus_device *dev = blkif->be->dev; | |
841 | pr_debug("%s %s\n", __func__, dev->otherend); | 880 | unsigned int ring_page_order, nr_grefs, evtchn; |
842 | 881 | ||
843 | err = xenbus_scanf(XBT_NIL, dev->otherend, "event-channel", "%u", | 882 | err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u", |
844 | &evtchn); | 883 | &evtchn); |
845 | if (err != 1) { | 884 | if (err != 1) { |
846 | err = -EINVAL; | 885 | err = -EINVAL; |
847 | xenbus_dev_fatal(dev, err, "reading %s/event-channel", | 886 | xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir); |
848 | dev->otherend); | ||
849 | return err; | 887 | return err; |
850 | } | 888 | } |
851 | pr_info("event-channel %u\n", evtchn); | ||
852 | 889 | ||
853 | err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", | 890 | err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", |
854 | &ring_page_order); | 891 | &ring_page_order); |
855 | if (err != 1) { | 892 | if (err != 1) { |
856 | err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", | 893 | err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]); |
857 | "%u", &ring_ref[0]); | ||
858 | if (err != 1) { | 894 | if (err != 1) { |
859 | err = -EINVAL; | 895 | err = -EINVAL; |
860 | xenbus_dev_fatal(dev, err, "reading %s/ring-ref", | 896 | xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); |
861 | dev->otherend); | ||
862 | return err; | 897 | return err; |
863 | } | 898 | } |
864 | nr_grefs = 1; | 899 | nr_grefs = 1; |
865 | pr_info("%s:using single page: ring-ref %d\n", dev->otherend, | ||
866 | ring_ref[0]); | ||
867 | } else { | 900 | } else { |
868 | unsigned int i; | 901 | unsigned int i; |
869 | 902 | ||
870 | if (ring_page_order > xen_blkif_max_ring_order) { | 903 | if (ring_page_order > xen_blkif_max_ring_order) { |
871 | err = -EINVAL; | 904 | err = -EINVAL; |
872 | xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d", | 905 | xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d", |
873 | dev->otherend, ring_page_order, | 906 | dir, ring_page_order, |
874 | xen_blkif_max_ring_order); | 907 | xen_blkif_max_ring_order); |
875 | return err; | 908 | return err; |
876 | } | 909 | } |
@@ -880,46 +913,17 @@ static int connect_ring(struct backend_info *be) | |||
880 | char ring_ref_name[RINGREF_NAME_LEN]; | 913 | char ring_ref_name[RINGREF_NAME_LEN]; |
881 | 914 | ||
882 | snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); | 915 | snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i); |
883 | err = xenbus_scanf(XBT_NIL, dev->otherend, ring_ref_name, | 916 | err = xenbus_scanf(XBT_NIL, dir, ring_ref_name, |
884 | "%u", &ring_ref[i]); | 917 | "%u", &ring_ref[i]); |
885 | if (err != 1) { | 918 | if (err != 1) { |
886 | err = -EINVAL; | 919 | err = -EINVAL; |
887 | xenbus_dev_fatal(dev, err, "reading %s/%s", | 920 | xenbus_dev_fatal(dev, err, "reading %s/%s", |
888 | dev->otherend, ring_ref_name); | 921 | dir, ring_ref_name); |
889 | return err; | 922 | return err; |
890 | } | 923 | } |
891 | pr_info("ring-ref%u: %u\n", i, ring_ref[i]); | ||
892 | } | 924 | } |
893 | } | 925 | } |
894 | 926 | blkif->nr_ring_pages = nr_grefs; | |
895 | be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; | ||
896 | err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", | ||
897 | "%63s", protocol, NULL); | ||
898 | if (err) | ||
899 | strcpy(protocol, "unspecified, assuming default"); | ||
900 | else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) | ||
901 | be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; | ||
902 | else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) | ||
903 | be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; | ||
904 | else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) | ||
905 | be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; | ||
906 | else { | ||
907 | xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); | ||
908 | return -1; | ||
909 | } | ||
910 | err = xenbus_gather(XBT_NIL, dev->otherend, | ||
911 | "feature-persistent", "%u", | ||
912 | &pers_grants, NULL); | ||
913 | if (err) | ||
914 | pers_grants = 0; | ||
915 | |||
916 | be->blkif->vbd.feature_gnt_persistent = pers_grants; | ||
917 | be->blkif->vbd.overflow_max_grants = 0; | ||
918 | be->blkif->nr_ring_pages = nr_grefs; | ||
919 | |||
920 | pr_info("ring-pages:%d, event-channel %d, protocol %d (%s) %s\n", | ||
921 | nr_grefs, evtchn, be->blkif->blk_protocol, protocol, | ||
922 | pers_grants ? "persistent grants" : ""); | ||
923 | 927 | ||
924 | for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { | 928 | for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { |
925 | req = kzalloc(sizeof(*req), GFP_KERNEL); | 929 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
@@ -964,6 +968,71 @@ fail: | |||
964 | kfree(req); | 968 | kfree(req); |
965 | } | 969 | } |
966 | return -ENOMEM; | 970 | return -ENOMEM; |
971 | |||
972 | } | ||
973 | |||
974 | static int connect_ring(struct backend_info *be) | ||
975 | { | ||
976 | struct xenbus_device *dev = be->dev; | ||
977 | unsigned int pers_grants; | ||
978 | char protocol[64] = ""; | ||
979 | int err, i; | ||
980 | char *xspath; | ||
981 | size_t xspathsize; | ||
982 | const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ | ||
983 | |||
984 | pr_debug("%s %s\n", __func__, dev->otherend); | ||
985 | |||
986 | be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; | ||
987 | err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", | ||
988 | "%63s", protocol, NULL); | ||
989 | if (err) | ||
990 | strcpy(protocol, "unspecified, assuming default"); | ||
991 | else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) | ||
992 | be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; | ||
993 | else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) | ||
994 | be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; | ||
995 | else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) | ||
996 | be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; | ||
997 | else { | ||
998 | xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); | ||
999 | return -1; | ||
1000 | } | ||
1001 | err = xenbus_gather(XBT_NIL, dev->otherend, | ||
1002 | "feature-persistent", "%u", | ||
1003 | &pers_grants, NULL); | ||
1004 | if (err) | ||
1005 | pers_grants = 0; | ||
1006 | |||
1007 | be->blkif->vbd.feature_gnt_persistent = pers_grants; | ||
1008 | be->blkif->vbd.overflow_max_grants = 0; | ||
1009 | |||
1010 | pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename, | ||
1011 | be->blkif->nr_rings, be->blkif->blk_protocol, protocol, | ||
1012 | pers_grants ? "persistent grants" : ""); | ||
1013 | |||
1014 | if (be->blkif->nr_rings == 1) | ||
1015 | return read_per_ring_refs(&be->blkif->rings[0], dev->otherend); | ||
1016 | else { | ||
1017 | xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; | ||
1018 | xspath = kmalloc(xspathsize, GFP_KERNEL); | ||
1019 | if (!xspath) { | ||
1020 | xenbus_dev_fatal(dev, -ENOMEM, "reading ring references"); | ||
1021 | return -ENOMEM; | ||
1022 | } | ||
1023 | |||
1024 | for (i = 0; i < be->blkif->nr_rings; i++) { | ||
1025 | memset(xspath, 0, xspathsize); | ||
1026 | snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i); | ||
1027 | err = read_per_ring_refs(&be->blkif->rings[i], xspath); | ||
1028 | if (err) { | ||
1029 | kfree(xspath); | ||
1030 | return err; | ||
1031 | } | ||
1032 | } | ||
1033 | kfree(xspath); | ||
1034 | } | ||
1035 | return 0; | ||
967 | } | 1036 | } |
968 | 1037 | ||
969 | static const struct xenbus_device_id xen_blkbk_ids[] = { | 1038 | static const struct xenbus_device_id xen_blkbk_ids[] = { |