aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r--drivers/block/xen-blkfront.c123
1 files changed, 98 insertions, 25 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b536a9cef91..773da7d6491 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -98,6 +98,9 @@ struct blkfront_info
98 unsigned long shadow_free; 98 unsigned long shadow_free;
99 unsigned int feature_flush; 99 unsigned int feature_flush;
100 unsigned int flush_op; 100 unsigned int flush_op;
101 unsigned int feature_discard;
102 unsigned int discard_granularity;
103 unsigned int discard_alignment;
101 int is_ready; 104 int is_ready;
102}; 105};
103 106
@@ -302,29 +305,36 @@ static int blkif_queue_request(struct request *req)
302 ring_req->operation = info->flush_op; 305 ring_req->operation = info->flush_op;
303 } 306 }
304 307
305 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 308 if (unlikely(req->cmd_flags & REQ_DISCARD)) {
306 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 309 /* id, sector_number and handle are set above. */
310 ring_req->operation = BLKIF_OP_DISCARD;
311 ring_req->nr_segments = 0;
312 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
313 } else {
314 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
315 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
307 316
308 for_each_sg(info->sg, sg, ring_req->nr_segments, i) { 317 for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
309 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg))); 318 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
310 fsect = sg->offset >> 9; 319 fsect = sg->offset >> 9;
311 lsect = fsect + (sg->length >> 9) - 1; 320 lsect = fsect + (sg->length >> 9) - 1;
312 /* install a grant reference. */ 321 /* install a grant reference. */
313 ref = gnttab_claim_grant_reference(&gref_head); 322 ref = gnttab_claim_grant_reference(&gref_head);
314 BUG_ON(ref == -ENOSPC); 323 BUG_ON(ref == -ENOSPC);
315 324
316 gnttab_grant_foreign_access_ref( 325 gnttab_grant_foreign_access_ref(
317 ref, 326 ref,
318 info->xbdev->otherend_id, 327 info->xbdev->otherend_id,
319 buffer_mfn, 328 buffer_mfn,
320 rq_data_dir(req) ); 329 rq_data_dir(req));
321 330
322 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); 331 info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
323 ring_req->u.rw.seg[i] = 332 ring_req->u.rw.seg[i] =
324 (struct blkif_request_segment) { 333 (struct blkif_request_segment) {
325 .gref = ref, 334 .gref = ref,
326 .first_sect = fsect, 335 .first_sect = fsect,
327 .last_sect = lsect }; 336 .last_sect = lsect };
337 }
328 } 338 }
329 339
330 info->ring.req_prod_pvt++; 340 info->ring.req_prod_pvt++;
@@ -370,7 +380,9 @@ static void do_blkif_request(struct request_queue *rq)
370 380
371 blk_start_request(req); 381 blk_start_request(req);
372 382
373 if (req->cmd_type != REQ_TYPE_FS) { 383 if ((req->cmd_type != REQ_TYPE_FS) ||
384 ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) &&
385 !info->flush_op)) {
374 __blk_end_request_all(req, -EIO); 386 __blk_end_request_all(req, -EIO);
375 continue; 387 continue;
376 } 388 }
@@ -399,6 +411,7 @@ wait:
399static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 411static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
400{ 412{
401 struct request_queue *rq; 413 struct request_queue *rq;
414 struct blkfront_info *info = gd->private_data;
402 415
403 rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 416 rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
404 if (rq == NULL) 417 if (rq == NULL)
@@ -406,6 +419,13 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
406 419
407 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 420 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
408 421
422 if (info->feature_discard) {
423 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
424 blk_queue_max_discard_sectors(rq, get_capacity(gd));
425 rq->limits.discard_granularity = info->discard_granularity;
426 rq->limits.discard_alignment = info->discard_alignment;
427 }
428
409 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 429 /* Hard sector size and max sectors impersonate the equiv. hardware. */
410 blk_queue_logical_block_size(rq, sector_size); 430 blk_queue_logical_block_size(rq, sector_size);
411 blk_queue_max_hw_sectors(rq, 512); 431 blk_queue_max_hw_sectors(rq, 512);
@@ -722,6 +742,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
722 742
723 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 743 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
724 switch (bret->operation) { 744 switch (bret->operation) {
745 case BLKIF_OP_DISCARD:
746 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
747 struct request_queue *rq = info->rq;
748 printk(KERN_WARNING "blkfront: %s: discard op failed\n",
749 info->gd->disk_name);
750 error = -EOPNOTSUPP;
751 info->feature_discard = 0;
752 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
753 }
754 __blk_end_request_all(req, error);
755 break;
725 case BLKIF_OP_FLUSH_DISKCACHE: 756 case BLKIF_OP_FLUSH_DISKCACHE:
726 case BLKIF_OP_WRITE_BARRIER: 757 case BLKIF_OP_WRITE_BARRIER:
727 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 758 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
@@ -1098,6 +1129,33 @@ blkfront_closing(struct blkfront_info *info)
1098 bdput(bdev); 1129 bdput(bdev);
1099} 1130}
1100 1131
1132static void blkfront_setup_discard(struct blkfront_info *info)
1133{
1134 int err;
1135 char *type;
1136 unsigned int discard_granularity;
1137 unsigned int discard_alignment;
1138
1139 type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
1140 if (IS_ERR(type))
1141 return;
1142
1143 if (strncmp(type, "phy", 3) == 0) {
1144 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1145 "discard-granularity", "%u", &discard_granularity,
1146 "discard-alignment", "%u", &discard_alignment,
1147 NULL);
1148 if (!err) {
1149 info->feature_discard = 1;
1150 info->discard_granularity = discard_granularity;
1151 info->discard_alignment = discard_alignment;
1152 }
1153 } else if (strncmp(type, "file", 4) == 0)
1154 info->feature_discard = 1;
1155
1156 kfree(type);
1157}
1158
1101/* 1159/*
1102 * Invoked when the backend is finally 'ready' (and has told produced 1160 * Invoked when the backend is finally 'ready' (and has told produced
1103 * the details about the physical device - #sectors, size, etc). 1161 * the details about the physical device - #sectors, size, etc).
@@ -1108,7 +1166,7 @@ static void blkfront_connect(struct blkfront_info *info)
1108 unsigned long sector_size; 1166 unsigned long sector_size;
1109 unsigned int binfo; 1167 unsigned int binfo;
1110 int err; 1168 int err;
1111 int barrier, flush; 1169 int barrier, flush, discard;
1112 1170
1113 switch (info->connected) { 1171 switch (info->connected) {
1114 case BLKIF_STATE_CONNECTED: 1172 case BLKIF_STATE_CONNECTED:
@@ -1178,7 +1236,14 @@ static void blkfront_connect(struct blkfront_info *info)
1178 info->feature_flush = REQ_FLUSH; 1236 info->feature_flush = REQ_FLUSH;
1179 info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; 1237 info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
1180 } 1238 }
1181 1239
1240 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1241 "feature-discard", "%d", &discard,
1242 NULL);
1243
1244 if (!err && discard)
1245 blkfront_setup_discard(info);
1246
1182 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1247 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
1183 if (err) { 1248 if (err) {
1184 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 1249 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
@@ -1385,6 +1450,8 @@ static struct xenbus_driver blkfront = {
1385 1450
1386static int __init xlblk_init(void) 1451static int __init xlblk_init(void)
1387{ 1452{
1453 int ret;
1454
1388 if (!xen_domain()) 1455 if (!xen_domain())
1389 return -ENODEV; 1456 return -ENODEV;
1390 1457
@@ -1394,7 +1461,13 @@ static int __init xlblk_init(void)
1394 return -ENODEV; 1461 return -ENODEV;
1395 } 1462 }
1396 1463
1397 return xenbus_register_frontend(&blkfront); 1464 ret = xenbus_register_frontend(&blkfront);
1465 if (ret) {
1466 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
1467 return ret;
1468 }
1469
1470 return 0;
1398} 1471}
1399module_init(xlblk_init); 1472module_init(xlblk_init);
1400 1473