aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLi Dongyang <lidongyang@novell.com>2011-09-01 06:39:10 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-10-13 09:48:30 -0400
commitb3cb0d6adc4bbc70b5e37e49a6068e973545ead7 (patch)
treeb52aeb45ea918b18c056aaeae2bb405d19100829 /drivers/block
parent32a8d26cc9b96629269e04ee6c583e14398f6f47 (diff)
xen-blkback: Implement discard requests ('feature-discard')
..aka ATA TRIM/SCSI UNMAP command to be passed through the frontend and used as appropiately by the backend. We also advertise certain granulity parameters to the frontend so it can plug them in. If the backend is a realy device - we just end up using 'blkdev_issue_discard' while for loopback devices - we just punch a hole in the image file. Signed-off-by: Li Dongyang <lidongyang@novell.com> [v1: Fixed up pr_debug and commit description] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/xen-blkback/blkback.c86
-rw-r--r--drivers/block/xen-blkback/common.h93
-rw-r--r--drivers/block/xen-blkback/xenbus.c58
3 files changed, 206 insertions, 31 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 2330a9ad5e95..9713d5a490e4 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -39,6 +39,9 @@
39#include <linux/list.h> 39#include <linux/list.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <linux/freezer.h> 41#include <linux/freezer.h>
42#include <linux/loop.h>
43#include <linux/falloc.h>
44#include <linux/fs.h>
42 45
43#include <xen/events.h> 46#include <xen/events.h>
44#include <xen/page.h> 47#include <xen/page.h>
@@ -258,13 +261,16 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
258 261
259static void print_stats(struct xen_blkif *blkif) 262static void print_stats(struct xen_blkif *blkif)
260{ 263{
261 pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d\n", 264 pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d"
265 " | ds %4d\n",
262 current->comm, blkif->st_oo_req, 266 current->comm, blkif->st_oo_req,
263 blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req); 267 blkif->st_rd_req, blkif->st_wr_req,
268 blkif->st_f_req, blkif->st_ds_req);
264 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); 269 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
265 blkif->st_rd_req = 0; 270 blkif->st_rd_req = 0;
266 blkif->st_wr_req = 0; 271 blkif->st_wr_req = 0;
267 blkif->st_oo_req = 0; 272 blkif->st_oo_req = 0;
273 blkif->st_ds_req = 0;
268} 274}
269 275
270int xen_blkif_schedule(void *arg) 276int xen_blkif_schedule(void *arg)
@@ -410,6 +416,42 @@ static int xen_blkbk_map(struct blkif_request *req,
410 return ret; 416 return ret;
411} 417}
412 418
419static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
420{
421 int err = 0;
422 int status = BLKIF_RSP_OKAY;
423 struct block_device *bdev = blkif->vbd.bdev;
424
425 if (blkif->blk_backend_type == BLKIF_BACKEND_PHY)
426 /* just forward the discard request */
427 err = blkdev_issue_discard(bdev,
428 req->u.discard.sector_number,
429 req->u.discard.nr_sectors,
430 GFP_KERNEL, 0);
431 else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
432 /* punch a hole in the backing file */
433 struct loop_device *lo = bdev->bd_disk->private_data;
434 struct file *file = lo->lo_backing_file;
435
436 if (file->f_op->fallocate)
437 err = file->f_op->fallocate(file,
438 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
439 req->u.discard.sector_number << 9,
440 req->u.discard.nr_sectors << 9);
441 else
442 err = -EOPNOTSUPP;
443 } else
444 err = -EOPNOTSUPP;
445
446 if (err == -EOPNOTSUPP) {
447 pr_debug(DRV_PFX "discard op failed, not supported\n");
448 status = BLKIF_RSP_EOPNOTSUPP;
449 } else if (err)
450 status = BLKIF_RSP_ERROR;
451
452 make_response(blkif, req->id, req->operation, status);
453}
454
413/* 455/*
414 * Completion callback on the bio's. Called as bh->b_end_io() 456 * Completion callback on the bio's. Called as bh->b_end_io()
415 */ 457 */
@@ -563,6 +605,10 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
563 blkif->st_f_req++; 605 blkif->st_f_req++;
564 operation = WRITE_FLUSH; 606 operation = WRITE_FLUSH;
565 break; 607 break;
608 case BLKIF_OP_DISCARD:
609 blkif->st_ds_req++;
610 operation = REQ_DISCARD;
611 break;
566 case BLKIF_OP_WRITE_BARRIER: 612 case BLKIF_OP_WRITE_BARRIER:
567 default: 613 default:
568 operation = 0; /* make gcc happy */ 614 operation = 0; /* make gcc happy */
@@ -572,7 +618,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
572 618
573 /* Check that the number of segments is sane. */ 619 /* Check that the number of segments is sane. */
574 nseg = req->nr_segments; 620 nseg = req->nr_segments;
575 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || 621 if (unlikely(nseg == 0 && operation != WRITE_FLUSH &&
622 operation != REQ_DISCARD) ||
576 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { 623 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
577 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", 624 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
578 nseg); 625 nseg);
@@ -627,10 +674,14 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
627 * the hypercall to unmap the grants - that is all done in 674 * the hypercall to unmap the grants - that is all done in
628 * xen_blkbk_unmap. 675 * xen_blkbk_unmap.
629 */ 676 */
630 if (xen_blkbk_map(req, pending_req, seg)) 677 if (operation != BLKIF_OP_DISCARD &&
678 xen_blkbk_map(req, pending_req, seg))
631 goto fail_flush; 679 goto fail_flush;
632 680
633 /* This corresponding xen_blkif_put is done in __end_block_io_op */ 681 /*
682 * This corresponding xen_blkif_put is done in __end_block_io_op, or
683 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
684 */
634 xen_blkif_get(blkif); 685 xen_blkif_get(blkif);
635 686
636 for (i = 0; i < nseg; i++) { 687 for (i = 0; i < nseg; i++) {
@@ -654,18 +705,25 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
654 preq.sector_number += seg[i].nsec; 705 preq.sector_number += seg[i].nsec;
655 } 706 }
656 707
657 /* This will be hit if the operation was a flush. */ 708 /* This will be hit if the operation was a flush or discard. */
658 if (!bio) { 709 if (!bio) {
659 BUG_ON(operation != WRITE_FLUSH); 710 BUG_ON(operation != WRITE_FLUSH && operation != REQ_DISCARD);
660 711
661 bio = bio_alloc(GFP_KERNEL, 0); 712 if (operation == WRITE_FLUSH) {
662 if (unlikely(bio == NULL)) 713 bio = bio_alloc(GFP_KERNEL, 0);
663 goto fail_put_bio; 714 if (unlikely(bio == NULL))
715 goto fail_put_bio;
664 716
665 biolist[nbio++] = bio; 717 biolist[nbio++] = bio;
666 bio->bi_bdev = preq.bdev; 718 bio->bi_bdev = preq.bdev;
667 bio->bi_private = pending_req; 719 bio->bi_private = pending_req;
668 bio->bi_end_io = end_block_io_op; 720 bio->bi_end_io = end_block_io_op;
721 } else if (operation == REQ_DISCARD) {
722 xen_blk_discard(blkif, req);
723 xen_blkif_put(blkif);
724 free_req(pending_req);
725 return 0;
726 }
669 } 727 }
670 728
671 /* 729 /*
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 9e40b283a468..bfb532ea5b1b 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -63,13 +63,26 @@ struct blkif_common_response {
63 63
64/* i386 protocol version */ 64/* i386 protocol version */
65#pragma pack(push, 4) 65#pragma pack(push, 4)
66
67struct blkif_x86_32_request_rw {
68 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
69 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
70};
71
72struct blkif_x86_32_request_discard {
73 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
74 uint64_t nr_sectors;
75};
76
66struct blkif_x86_32_request { 77struct blkif_x86_32_request {
67 uint8_t operation; /* BLKIF_OP_??? */ 78 uint8_t operation; /* BLKIF_OP_??? */
68 uint8_t nr_segments; /* number of segments */ 79 uint8_t nr_segments; /* number of segments */
69 blkif_vdev_t handle; /* only for read/write requests */ 80 blkif_vdev_t handle; /* only for read/write requests */
70 uint64_t id; /* private guest value, echoed in resp */ 81 uint64_t id; /* private guest value, echoed in resp */
71 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 82 union {
72 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 83 struct blkif_x86_32_request_rw rw;
84 struct blkif_x86_32_request_discard discard;
85 } u;
73}; 86};
74struct blkif_x86_32_response { 87struct blkif_x86_32_response {
75 uint64_t id; /* copied from request */ 88 uint64_t id; /* copied from request */
@@ -79,13 +92,26 @@ struct blkif_x86_32_response {
79#pragma pack(pop) 92#pragma pack(pop)
80 93
81/* x86_64 protocol version */ 94/* x86_64 protocol version */
95
96struct blkif_x86_64_request_rw {
97 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
98 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
99};
100
101struct blkif_x86_64_request_discard {
102 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
103 uint64_t nr_sectors;
104};
105
82struct blkif_x86_64_request { 106struct blkif_x86_64_request {
83 uint8_t operation; /* BLKIF_OP_??? */ 107 uint8_t operation; /* BLKIF_OP_??? */
84 uint8_t nr_segments; /* number of segments */ 108 uint8_t nr_segments; /* number of segments */
85 blkif_vdev_t handle; /* only for read/write requests */ 109 blkif_vdev_t handle; /* only for read/write requests */
86 uint64_t __attribute__((__aligned__(8))) id; 110 uint64_t __attribute__((__aligned__(8))) id;
87 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 111 union {
88 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 112 struct blkif_x86_64_request_rw rw;
113 struct blkif_x86_64_request_discard discard;
114 } u;
89}; 115};
90struct blkif_x86_64_response { 116struct blkif_x86_64_response {
91 uint64_t __attribute__((__aligned__(8))) id; 117 uint64_t __attribute__((__aligned__(8))) id;
@@ -113,6 +139,11 @@ enum blkif_protocol {
113 BLKIF_PROTOCOL_X86_64 = 3, 139 BLKIF_PROTOCOL_X86_64 = 3,
114}; 140};
115 141
142enum blkif_backend_type {
143 BLKIF_BACKEND_PHY = 1,
144 BLKIF_BACKEND_FILE = 2,
145};
146
116struct xen_vbd { 147struct xen_vbd {
117 /* What the domain refers to this vbd as. */ 148 /* What the domain refers to this vbd as. */
118 blkif_vdev_t handle; 149 blkif_vdev_t handle;
@@ -138,6 +169,7 @@ struct xen_blkif {
138 unsigned int irq; 169 unsigned int irq;
139 /* Comms information. */ 170 /* Comms information. */
140 enum blkif_protocol blk_protocol; 171 enum blkif_protocol blk_protocol;
172 enum blkif_backend_type blk_backend_type;
141 union blkif_back_rings blk_rings; 173 union blkif_back_rings blk_rings;
142 struct vm_struct *blk_ring_area; 174 struct vm_struct *blk_ring_area;
143 /* The VBD attached to this interface. */ 175 /* The VBD attached to this interface. */
@@ -159,6 +191,7 @@ struct xen_blkif {
159 int st_wr_req; 191 int st_wr_req;
160 int st_oo_req; 192 int st_oo_req;
161 int st_f_req; 193 int st_f_req;
194 int st_ds_req;
162 int st_rd_sect; 195 int st_rd_sect;
163 int st_wr_sect; 196 int st_wr_sect;
164 197
@@ -182,7 +215,7 @@ struct xen_blkif {
182 215
183struct phys_req { 216struct phys_req {
184 unsigned short dev; 217 unsigned short dev;
185 unsigned short nr_sects; 218 blkif_sector_t nr_sects;
186 struct block_device *bdev; 219 struct block_device *bdev;
187 blkif_sector_t sector_number; 220 blkif_sector_t sector_number;
188}; 221};
@@ -206,12 +239,25 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
206 dst->nr_segments = src->nr_segments; 239 dst->nr_segments = src->nr_segments;
207 dst->handle = src->handle; 240 dst->handle = src->handle;
208 dst->id = src->id; 241 dst->id = src->id;
209 dst->u.rw.sector_number = src->sector_number; 242 switch (src->operation) {
210 barrier(); 243 case BLKIF_OP_READ:
211 if (n > dst->nr_segments) 244 case BLKIF_OP_WRITE:
212 n = dst->nr_segments; 245 case BLKIF_OP_WRITE_BARRIER:
213 for (i = 0; i < n; i++) 246 case BLKIF_OP_FLUSH_DISKCACHE:
214 dst->u.rw.seg[i] = src->seg[i]; 247 dst->u.rw.sector_number = src->u.rw.sector_number;
248 barrier();
249 if (n > dst->nr_segments)
250 n = dst->nr_segments;
251 for (i = 0; i < n; i++)
252 dst->u.rw.seg[i] = src->u.rw.seg[i];
253 break;
254 case BLKIF_OP_DISCARD:
255 dst->u.discard.sector_number = src->u.discard.sector_number;
256 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
257 break;
258 default:
259 break;
260 }
215} 261}
216 262
217static inline void blkif_get_x86_64_req(struct blkif_request *dst, 263static inline void blkif_get_x86_64_req(struct blkif_request *dst,
@@ -222,12 +268,25 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
222 dst->nr_segments = src->nr_segments; 268 dst->nr_segments = src->nr_segments;
223 dst->handle = src->handle; 269 dst->handle = src->handle;
224 dst->id = src->id; 270 dst->id = src->id;
225 dst->u.rw.sector_number = src->sector_number; 271 switch (src->operation) {
226 barrier(); 272 case BLKIF_OP_READ:
227 if (n > dst->nr_segments) 273 case BLKIF_OP_WRITE:
228 n = dst->nr_segments; 274 case BLKIF_OP_WRITE_BARRIER:
229 for (i = 0; i < n; i++) 275 case BLKIF_OP_FLUSH_DISKCACHE:
230 dst->u.rw.seg[i] = src->seg[i]; 276 dst->u.rw.sector_number = src->u.rw.sector_number;
277 barrier();
278 if (n > dst->nr_segments)
279 n = dst->nr_segments;
280 for (i = 0; i < n; i++)
281 dst->u.rw.seg[i] = src->u.rw.seg[i];
282 break;
283 case BLKIF_OP_DISCARD:
284 dst->u.discard.sector_number = src->u.discard.sector_number;
285 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
286 break;
287 default:
288 break;
289 }
231} 290}
232 291
233#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */ 292#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3f129b45451a..2b3aef0332f3 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -272,6 +272,7 @@ VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
272VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req); 272VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
273VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req); 273VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
274VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req); 274VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req);
275VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req);
275VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect); 276VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
276VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect); 277VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
277 278
@@ -280,6 +281,7 @@ static struct attribute *xen_vbdstat_attrs[] = {
280 &dev_attr_rd_req.attr, 281 &dev_attr_rd_req.attr,
281 &dev_attr_wr_req.attr, 282 &dev_attr_wr_req.attr,
282 &dev_attr_f_req.attr, 283 &dev_attr_f_req.attr,
284 &dev_attr_ds_req.attr,
283 &dev_attr_rd_sect.attr, 285 &dev_attr_rd_sect.attr,
284 &dev_attr_wr_sect.attr, 286 &dev_attr_wr_sect.attr,
285 NULL 287 NULL
@@ -419,6 +421,60 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
419 return err; 421 return err;
420} 422}
421 423
424int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
425{
426 struct xenbus_device *dev = be->dev;
427 struct xen_blkif *blkif = be->blkif;
428 char *type;
429 int err;
430 int state = 0;
431
432 type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL);
433 if (!IS_ERR(type)) {
434 if (strncmp(type, "file", 4) == 0) {
435 state = 1;
436 blkif->blk_backend_type = BLKIF_BACKEND_FILE;
437 }
438 if (strncmp(type, "phy", 3) == 0) {
439 struct block_device *bdev = be->blkif->vbd.bdev;
440 struct request_queue *q = bdev_get_queue(bdev);
441 if (blk_queue_discard(q)) {
442 err = xenbus_printf(xbt, dev->nodename,
443 "discard-granularity", "%u",
444 q->limits.discard_granularity);
445 if (err) {
446 xenbus_dev_fatal(dev, err,
447 "writing discard-granularity");
448 goto kfree;
449 }
450 err = xenbus_printf(xbt, dev->nodename,
451 "discard-alignment", "%u",
452 q->limits.discard_alignment);
453 if (err) {
454 xenbus_dev_fatal(dev, err,
455 "writing discard-alignment");
456 goto kfree;
457 }
458 state = 1;
459 blkif->blk_backend_type = BLKIF_BACKEND_PHY;
460 }
461 }
462 } else {
463 err = PTR_ERR(type);
464 xenbus_dev_fatal(dev, err, "reading type");
465 goto out;
466 }
467
468 err = xenbus_printf(xbt, dev->nodename, "feature-discard",
469 "%d", state);
470 if (err)
471 xenbus_dev_fatal(dev, err, "writing feature-discard");
472kfree:
473 kfree(type);
474out:
475 return err;
476}
477
422/* 478/*
423 * Entry point to this code when a new device is created. Allocate the basic 479 * Entry point to this code when a new device is created. Allocate the basic
424 * structures, and watch the store waiting for the hotplug scripts to tell us 480 * structures, and watch the store waiting for the hotplug scripts to tell us
@@ -650,6 +706,8 @@ again:
650 if (err) 706 if (err)
651 goto abort; 707 goto abort;
652 708
709 err = xen_blkbk_discard(xbt, be);
710
653 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", 711 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
654 (unsigned long long)vbd_sz(&be->blkif->vbd)); 712 (unsigned long long)vbd_sz(&be->blkif->vbd));
655 if (err) { 713 if (err) {