aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoger Pau Monne <roger.pau@citrix.com>2013-10-29 13:31:14 -0400
committerJens Axboe <axboe@kernel.dk>2013-11-08 11:10:30 -0500
commitbfe11d6de1c416cea4f3f0f35f864162063ce3fa (patch)
tree25bc44562e7794cc41327c7c447cd49a9935a876
parentf1a3c6191369632ada5b709997b91a7f15045ff4 (diff)
xen-blkfront: restore the non-persistent data path
When persistent grants were added they were always used, even if the backend doesn't have this feature (there's no harm in always using the same set of pages). This restores the old data path when the backend doesn't have persistent grants, removing the burden of doing a memcpy when it is not actually needed. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reported-by: Felipe Franciosi <felipe.franciosi@citrix.com> Cc: Felipe Franciosi <felipe.franciosi@citrix.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: David Vrabel <david.vrabel@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> [v2: Fix up whitespace issues]
-rw-r--r--drivers/block/xen-blkfront.c125
1 files changed, 100 insertions, 25 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5b8a15483a4c..432db1b59b00 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -121,7 +121,8 @@ struct blkfront_info
121 struct work_struct work; 121 struct work_struct work;
122 struct gnttab_free_callback callback; 122 struct gnttab_free_callback callback;
123 struct blk_shadow shadow[BLK_RING_SIZE]; 123 struct blk_shadow shadow[BLK_RING_SIZE];
124 struct list_head persistent_gnts; 124 struct list_head grants;
125 struct list_head indirect_pages;
125 unsigned int persistent_gnts_c; 126 unsigned int persistent_gnts_c;
126 unsigned long shadow_free; 127 unsigned long shadow_free;
127 unsigned int feature_flush; 128 unsigned int feature_flush;
@@ -200,15 +201,17 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
200 if (!gnt_list_entry) 201 if (!gnt_list_entry)
201 goto out_of_memory; 202 goto out_of_memory;
202 203
203 granted_page = alloc_page(GFP_NOIO); 204 if (info->feature_persistent) {
204 if (!granted_page) { 205 granted_page = alloc_page(GFP_NOIO);
205 kfree(gnt_list_entry); 206 if (!granted_page) {
206 goto out_of_memory; 207 kfree(gnt_list_entry);
208 goto out_of_memory;
209 }
210 gnt_list_entry->pfn = page_to_pfn(granted_page);
207 } 211 }
208 212
209 gnt_list_entry->pfn = page_to_pfn(granted_page);
210 gnt_list_entry->gref = GRANT_INVALID_REF; 213 gnt_list_entry->gref = GRANT_INVALID_REF;
211 list_add(&gnt_list_entry->node, &info->persistent_gnts); 214 list_add(&gnt_list_entry->node, &info->grants);
212 i++; 215 i++;
213 } 216 }
214 217
@@ -216,9 +219,10 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
216 219
217out_of_memory: 220out_of_memory:
218 list_for_each_entry_safe(gnt_list_entry, n, 221 list_for_each_entry_safe(gnt_list_entry, n,
219 &info->persistent_gnts, node) { 222 &info->grants, node) {
220 list_del(&gnt_list_entry->node); 223 list_del(&gnt_list_entry->node);
221 __free_page(pfn_to_page(gnt_list_entry->pfn)); 224 if (info->feature_persistent)
225 __free_page(pfn_to_page(gnt_list_entry->pfn));
222 kfree(gnt_list_entry); 226 kfree(gnt_list_entry);
223 i--; 227 i--;
224 } 228 }
@@ -227,13 +231,14 @@ out_of_memory:
227} 231}
228 232
229static struct grant *get_grant(grant_ref_t *gref_head, 233static struct grant *get_grant(grant_ref_t *gref_head,
234 unsigned long pfn,
230 struct blkfront_info *info) 235 struct blkfront_info *info)
231{ 236{
232 struct grant *gnt_list_entry; 237 struct grant *gnt_list_entry;
233 unsigned long buffer_mfn; 238 unsigned long buffer_mfn;
234 239
235 BUG_ON(list_empty(&info->persistent_gnts)); 240 BUG_ON(list_empty(&info->grants));
236 gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant, 241 gnt_list_entry = list_first_entry(&info->grants, struct grant,
237 node); 242 node);
238 list_del(&gnt_list_entry->node); 243 list_del(&gnt_list_entry->node);
239 244
@@ -245,6 +250,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
245 /* Assign a gref to this page */ 250 /* Assign a gref to this page */
246 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); 251 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
247 BUG_ON(gnt_list_entry->gref == -ENOSPC); 252 BUG_ON(gnt_list_entry->gref == -ENOSPC);
253 if (!info->feature_persistent) {
254 BUG_ON(!pfn);
255 gnt_list_entry->pfn = pfn;
256 }
248 buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); 257 buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
249 gnttab_grant_foreign_access_ref(gnt_list_entry->gref, 258 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
250 info->xbdev->otherend_id, 259 info->xbdev->otherend_id,
@@ -480,22 +489,34 @@ static int blkif_queue_request(struct request *req)
480 489
481 if ((ring_req->operation == BLKIF_OP_INDIRECT) && 490 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
482 (i % SEGS_PER_INDIRECT_FRAME == 0)) { 491 (i % SEGS_PER_INDIRECT_FRAME == 0)) {
492 unsigned long pfn;
493
483 if (segments) 494 if (segments)
484 kunmap_atomic(segments); 495 kunmap_atomic(segments);
485 496
486 n = i / SEGS_PER_INDIRECT_FRAME; 497 n = i / SEGS_PER_INDIRECT_FRAME;
487 gnt_list_entry = get_grant(&gref_head, info); 498 if (!info->feature_persistent) {
499 struct page *indirect_page;
500
501 /* Fetch a pre-allocated page to use for indirect grefs */
502 BUG_ON(list_empty(&info->indirect_pages));
503 indirect_page = list_first_entry(&info->indirect_pages,
504 struct page, lru);
505 list_del(&indirect_page->lru);
506 pfn = page_to_pfn(indirect_page);
507 }
508 gnt_list_entry = get_grant(&gref_head, pfn, info);
488 info->shadow[id].indirect_grants[n] = gnt_list_entry; 509 info->shadow[id].indirect_grants[n] = gnt_list_entry;
489 segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); 510 segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
490 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; 511 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
491 } 512 }
492 513
493 gnt_list_entry = get_grant(&gref_head, info); 514 gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
494 ref = gnt_list_entry->gref; 515 ref = gnt_list_entry->gref;
495 516
496 info->shadow[id].grants_used[i] = gnt_list_entry; 517 info->shadow[id].grants_used[i] = gnt_list_entry;
497 518
498 if (rq_data_dir(req)) { 519 if (rq_data_dir(req) && info->feature_persistent) {
499 char *bvec_data; 520 char *bvec_data;
500 void *shared_data; 521 void *shared_data;
501 522
@@ -907,21 +928,36 @@ static void blkif_free(struct blkfront_info *info, int suspend)
907 blk_stop_queue(info->rq); 928 blk_stop_queue(info->rq);
908 929
909 /* Remove all persistent grants */ 930 /* Remove all persistent grants */
910 if (!list_empty(&info->persistent_gnts)) { 931 if (!list_empty(&info->grants)) {
911 list_for_each_entry_safe(persistent_gnt, n, 932 list_for_each_entry_safe(persistent_gnt, n,
912 &info->persistent_gnts, node) { 933 &info->grants, node) {
913 list_del(&persistent_gnt->node); 934 list_del(&persistent_gnt->node);
914 if (persistent_gnt->gref != GRANT_INVALID_REF) { 935 if (persistent_gnt->gref != GRANT_INVALID_REF) {
915 gnttab_end_foreign_access(persistent_gnt->gref, 936 gnttab_end_foreign_access(persistent_gnt->gref,
916 0, 0UL); 937 0, 0UL);
917 info->persistent_gnts_c--; 938 info->persistent_gnts_c--;
918 } 939 }
919 __free_page(pfn_to_page(persistent_gnt->pfn)); 940 if (info->feature_persistent)
941 __free_page(pfn_to_page(persistent_gnt->pfn));
920 kfree(persistent_gnt); 942 kfree(persistent_gnt);
921 } 943 }
922 } 944 }
923 BUG_ON(info->persistent_gnts_c != 0); 945 BUG_ON(info->persistent_gnts_c != 0);
924 946
947 /*
948 * Remove indirect pages, this only happens when using indirect
949 * descriptors but not persistent grants
950 */
951 if (!list_empty(&info->indirect_pages)) {
952 struct page *indirect_page, *n;
953
954 BUG_ON(info->feature_persistent);
955 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
956 list_del(&indirect_page->lru);
957 __free_page(indirect_page);
958 }
959 }
960
925 for (i = 0; i < BLK_RING_SIZE; i++) { 961 for (i = 0; i < BLK_RING_SIZE; i++) {
926 /* 962 /*
927 * Clear persistent grants present in requests already 963 * Clear persistent grants present in requests already
@@ -936,7 +972,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
936 for (j = 0; j < segs; j++) { 972 for (j = 0; j < segs; j++) {
937 persistent_gnt = info->shadow[i].grants_used[j]; 973 persistent_gnt = info->shadow[i].grants_used[j];
938 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 974 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
939 __free_page(pfn_to_page(persistent_gnt->pfn)); 975 if (info->feature_persistent)
976 __free_page(pfn_to_page(persistent_gnt->pfn));
940 kfree(persistent_gnt); 977 kfree(persistent_gnt);
941 } 978 }
942 979
@@ -995,7 +1032,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
995 nseg = s->req.operation == BLKIF_OP_INDIRECT ? 1032 nseg = s->req.operation == BLKIF_OP_INDIRECT ?
996 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; 1033 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
997 1034
998 if (bret->operation == BLKIF_OP_READ) { 1035 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
999 /* 1036 /*
1000 * Copy the data received from the backend into the bvec. 1037 * Copy the data received from the backend into the bvec.
1001 * Since bv_offset can be different than 0, and bv_len different 1038 * Since bv_offset can be different than 0, and bv_len different
@@ -1023,7 +1060,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1023 * we add it at the head of the list, so it will be 1060 * we add it at the head of the list, so it will be
1024 * reused first. 1061 * reused first.
1025 */ 1062 */
1026 list_add(&s->grants_used[i]->node, &info->persistent_gnts); 1063 if (!info->feature_persistent)
1064 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1065 s->grants_used[i]->gref);
1066 list_add(&s->grants_used[i]->node, &info->grants);
1027 info->persistent_gnts_c++; 1067 info->persistent_gnts_c++;
1028 } else { 1068 } else {
1029 /* 1069 /*
@@ -1034,19 +1074,29 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1034 */ 1074 */
1035 gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); 1075 gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
1036 s->grants_used[i]->gref = GRANT_INVALID_REF; 1076 s->grants_used[i]->gref = GRANT_INVALID_REF;
1037 list_add_tail(&s->grants_used[i]->node, &info->persistent_gnts); 1077 list_add_tail(&s->grants_used[i]->node, &info->grants);
1038 } 1078 }
1039 } 1079 }
1040 if (s->req.operation == BLKIF_OP_INDIRECT) { 1080 if (s->req.operation == BLKIF_OP_INDIRECT) {
1041 for (i = 0; i < INDIRECT_GREFS(nseg); i++) { 1081 for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
1042 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { 1082 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
1043 list_add(&s->indirect_grants[i]->node, &info->persistent_gnts); 1083 if (!info->feature_persistent)
1084 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1085 s->indirect_grants[i]->gref);
1086 list_add(&s->indirect_grants[i]->node, &info->grants);
1044 info->persistent_gnts_c++; 1087 info->persistent_gnts_c++;
1045 } else { 1088 } else {
1089 struct page *indirect_page;
1090
1046 gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); 1091 gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
1092 /*
1093 * Add the used indirect page back to the list of
1094 * available pages for indirect grefs.
1095 */
1096 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
1097 list_add(&indirect_page->lru, &info->indirect_pages);
1047 s->indirect_grants[i]->gref = GRANT_INVALID_REF; 1098 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1048 list_add_tail(&s->indirect_grants[i]->node, 1099 list_add_tail(&s->indirect_grants[i]->node, &info->grants);
1049 &info->persistent_gnts);
1050 } 1100 }
1051 } 1101 }
1052 } 1102 }
@@ -1341,7 +1391,8 @@ static int blkfront_probe(struct xenbus_device *dev,
1341 spin_lock_init(&info->io_lock); 1391 spin_lock_init(&info->io_lock);
1342 info->xbdev = dev; 1392 info->xbdev = dev;
1343 info->vdevice = vdevice; 1393 info->vdevice = vdevice;
1344 INIT_LIST_HEAD(&info->persistent_gnts); 1394 INIT_LIST_HEAD(&info->grants);
1395 INIT_LIST_HEAD(&info->indirect_pages);
1345 info->persistent_gnts_c = 0; 1396 info->persistent_gnts_c = 0;
1346 info->connected = BLKIF_STATE_DISCONNECTED; 1397 info->connected = BLKIF_STATE_DISCONNECTED;
1347 INIT_WORK(&info->work, blkif_restart_queue); 1398 INIT_WORK(&info->work, blkif_restart_queue);
@@ -1637,6 +1688,23 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
1637 if (err) 1688 if (err)
1638 goto out_of_memory; 1689 goto out_of_memory;
1639 1690
1691 if (!info->feature_persistent && info->max_indirect_segments) {
1692 /*
1693 * We are using indirect descriptors but not persistent
1694 * grants, we need to allocate a set of pages that can be
1695 * used for mapping indirect grefs
1696 */
1697 int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE;
1698
1699 BUG_ON(!list_empty(&info->indirect_pages));
1700 for (i = 0; i < num; i++) {
1701 struct page *indirect_page = alloc_page(GFP_NOIO);
1702 if (!indirect_page)
1703 goto out_of_memory;
1704 list_add(&indirect_page->lru, &info->indirect_pages);
1705 }
1706 }
1707
1640 for (i = 0; i < BLK_RING_SIZE; i++) { 1708 for (i = 0; i < BLK_RING_SIZE; i++) {
1641 info->shadow[i].grants_used = kzalloc( 1709 info->shadow[i].grants_used = kzalloc(
1642 sizeof(info->shadow[i].grants_used[0]) * segs, 1710 sizeof(info->shadow[i].grants_used[0]) * segs,
@@ -1667,6 +1735,13 @@ out_of_memory:
1667 kfree(info->shadow[i].indirect_grants); 1735 kfree(info->shadow[i].indirect_grants);
1668 info->shadow[i].indirect_grants = NULL; 1736 info->shadow[i].indirect_grants = NULL;
1669 } 1737 }
1738 if (!list_empty(&info->indirect_pages)) {
1739 struct page *indirect_page, *n;
1740 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
1741 list_del(&indirect_page->lru);
1742 __free_page(indirect_page);
1743 }
1744 }
1670 return -ENOMEM; 1745 return -ENOMEM;
1671} 1746}
1672 1747