aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/xen-blkback/blkback.c18
-rw-r--r--drivers/block/xen-blkback/common.h7
-rw-r--r--drivers/block/xen-blkback/xenbus.c12
-rw-r--r--drivers/block/xen-blkfront.c41
-rw-r--r--include/xen/interface/io/blkif.h18
5 files changed, 77 insertions, 19 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index d7104abc8b72..9d2261b02f24 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -422,13 +422,16 @@ static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
422 int status = BLKIF_RSP_OKAY; 422 int status = BLKIF_RSP_OKAY;
423 struct block_device *bdev = blkif->vbd.bdev; 423 struct block_device *bdev = blkif->vbd.bdev;
424 424
425 if (blkif->blk_backend_type == BLKIF_BACKEND_PHY) 425 if (blkif->blk_backend_type == BLKIF_BACKEND_PHY) {
426 unsigned long secure = (blkif->vbd.discard_secure &&
427 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
428 BLKDEV_DISCARD_SECURE : 0;
426 /* just forward the discard request */ 429 /* just forward the discard request */
427 err = blkdev_issue_discard(bdev, 430 err = blkdev_issue_discard(bdev,
428 req->u.discard.sector_number, 431 req->u.discard.sector_number,
429 req->u.discard.nr_sectors, 432 req->u.discard.nr_sectors,
430 GFP_KERNEL, 0); 433 GFP_KERNEL, secure);
431 else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) { 434 } else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
432 /* punch a hole in the backing file */ 435 /* punch a hole in the backing file */
433 struct loop_device *lo = bdev->bd_disk->private_data; 436 struct loop_device *lo = bdev->bd_disk->private_data;
434 struct file *file = lo->lo_backing_file; 437 struct file *file = lo->lo_backing_file;
@@ -643,8 +646,11 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
643 break; 646 break;
644 } 647 }
645 648
646 /* Check that the number of segments is sane. */ 649 if (unlikely(operation == REQ_DISCARD))
647 nseg = req->u.rw.nr_segments; 650 nseg = 0;
651 else
652 /* Check that the number of segments is sane. */
653 nseg = req->u.rw.nr_segments;
648 654
649 if (unlikely(nseg == 0 && operation != WRITE_FLUSH && 655 if (unlikely(nseg == 0 && operation != WRITE_FLUSH &&
650 operation != REQ_DISCARD) || 656 operation != REQ_DISCARD) ||
@@ -708,7 +714,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
708 * the hypercall to unmap the grants - that is all done in 714 * the hypercall to unmap the grants - that is all done in
709 * xen_blkbk_unmap. 715 * xen_blkbk_unmap.
710 */ 716 */
711 if (operation != REQ_DISCARD && xen_blkbk_map(req, pending_req, seg)) 717 if (nseg && xen_blkbk_map(req, pending_req, seg))
712 goto fail_flush; 718 goto fail_flush;
713 719
714 /* 720 /*
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dbfe7b3b0737..d0ee7edc9be8 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -69,7 +69,7 @@ struct blkif_x86_32_request_rw {
69} __attribute__((__packed__)); 69} __attribute__((__packed__));
70 70
71struct blkif_x86_32_request_discard { 71struct blkif_x86_32_request_discard {
72 uint8_t nr_segments; /* number of segments */ 72 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
73 blkif_vdev_t _pad1; /* was "handle" for read/write requests */ 73 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
74 uint64_t id; /* private guest value, echoed in resp */ 74 uint64_t id; /* private guest value, echoed in resp */
75 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 75 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
@@ -104,7 +104,7 @@ struct blkif_x86_64_request_rw {
104} __attribute__((__packed__)); 104} __attribute__((__packed__));
105 105
106struct blkif_x86_64_request_discard { 106struct blkif_x86_64_request_discard {
107 uint8_t nr_segments; /* number of segments */ 107 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
108 blkif_vdev_t _pad1; /* was "handle" for read/write requests */ 108 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
109 uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */ 109 uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
110 uint64_t id; 110 uint64_t id;
@@ -164,6 +164,7 @@ struct xen_vbd {
164 /* Cached size parameter. */ 164 /* Cached size parameter. */
165 sector_t size; 165 sector_t size;
166 bool flush_support; 166 bool flush_support;
167 bool discard_secure;
167}; 168};
168 169
169struct backend_info; 170struct backend_info;
@@ -261,6 +262,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
261 dst->u.rw.seg[i] = src->u.rw.seg[i]; 262 dst->u.rw.seg[i] = src->u.rw.seg[i];
262 break; 263 break;
263 case BLKIF_OP_DISCARD: 264 case BLKIF_OP_DISCARD:
265 dst->u.discard.flag = src->u.discard.flag;
264 dst->u.discard.sector_number = src->u.discard.sector_number; 266 dst->u.discard.sector_number = src->u.discard.sector_number;
265 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 267 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
266 break; 268 break;
@@ -290,6 +292,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
290 dst->u.rw.seg[i] = src->u.rw.seg[i]; 292 dst->u.rw.seg[i] = src->u.rw.seg[i];
291 break; 293 break;
292 case BLKIF_OP_DISCARD: 294 case BLKIF_OP_DISCARD:
295 dst->u.discard.flag = src->u.discard.flag;
293 dst->u.discard.sector_number = src->u.discard.sector_number; 296 dst->u.discard.sector_number = src->u.discard.sector_number;
294 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; 297 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
295 break; 298 break;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f759ad4584c3..187fd2c1a15d 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -338,6 +338,9 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
338 if (q && q->flush_flags) 338 if (q && q->flush_flags)
339 vbd->flush_support = true; 339 vbd->flush_support = true;
340 340
341 if (q && blk_queue_secdiscard(q))
342 vbd->discard_secure = true;
343
341 DPRINTK("Successful creation of handle=%04x (dom=%u)\n", 344 DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
342 handle, blkif->domid); 345 handle, blkif->domid);
343 return 0; 346 return 0;
@@ -420,6 +423,15 @@ int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
420 state = 1; 423 state = 1;
421 blkif->blk_backend_type = BLKIF_BACKEND_PHY; 424 blkif->blk_backend_type = BLKIF_BACKEND_PHY;
422 } 425 }
426 /* Optional. */
427 err = xenbus_printf(xbt, dev->nodename,
428 "discard-secure", "%d",
429 blkif->vbd.discard_secure);
430 if (err) {
431 xenbus_dev_fatal(dev, err,
432 "writting discard-secure");
433 goto kfree;
434 }
423 } 435 }
424 } else { 436 } else {
425 err = PTR_ERR(type); 437 err = PTR_ERR(type);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2c2c4be7d9d6..351ddeffd430 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -98,7 +98,8 @@ struct blkfront_info
98 unsigned long shadow_free; 98 unsigned long shadow_free;
99 unsigned int feature_flush; 99 unsigned int feature_flush;
100 unsigned int flush_op; 100 unsigned int flush_op;
101 unsigned int feature_discard; 101 unsigned int feature_discard:1;
102 unsigned int feature_secdiscard:1;
102 unsigned int discard_granularity; 103 unsigned int discard_granularity;
103 unsigned int discard_alignment; 104 unsigned int discard_alignment;
104 int is_ready; 105 int is_ready;
@@ -305,11 +306,14 @@ static int blkif_queue_request(struct request *req)
305 ring_req->operation = info->flush_op; 306 ring_req->operation = info->flush_op;
306 } 307 }
307 308
308 if (unlikely(req->cmd_flags & REQ_DISCARD)) { 309 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
309 /* id, sector_number and handle are set above. */ 310 /* id, sector_number and handle are set above. */
310 ring_req->operation = BLKIF_OP_DISCARD; 311 ring_req->operation = BLKIF_OP_DISCARD;
311 ring_req->u.discard.nr_segments = 0;
312 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); 312 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
313 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
314 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
315 else
316 ring_req->u.discard.flag = 0;
313 } else { 317 } else {
314 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req, 318 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
315 info->sg); 319 info->sg);
@@ -426,6 +430,8 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
426 blk_queue_max_discard_sectors(rq, get_capacity(gd)); 430 blk_queue_max_discard_sectors(rq, get_capacity(gd));
427 rq->limits.discard_granularity = info->discard_granularity; 431 rq->limits.discard_granularity = info->discard_granularity;
428 rq->limits.discard_alignment = info->discard_alignment; 432 rq->limits.discard_alignment = info->discard_alignment;
433 if (info->feature_secdiscard)
434 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
429 } 435 }
430 436
431 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 437 /* Hard sector size and max sectors impersonate the equiv. hardware. */
@@ -707,6 +713,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
707static void blkif_completion(struct blk_shadow *s) 713static void blkif_completion(struct blk_shadow *s)
708{ 714{
709 int i; 715 int i;
716 /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
717 * flag. */
710 for (i = 0; i < s->req.u.rw.nr_segments; i++) 718 for (i = 0; i < s->req.u.rw.nr_segments; i++)
711 gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL); 719 gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
712} 720}
@@ -738,7 +746,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
738 id = bret->id; 746 id = bret->id;
739 req = info->shadow[id].request; 747 req = info->shadow[id].request;
740 748
741 blkif_completion(&info->shadow[id]); 749 if (bret->operation != BLKIF_OP_DISCARD)
750 blkif_completion(&info->shadow[id]);
742 751
743 add_id_to_freelist(info, id); 752 add_id_to_freelist(info, id);
744 753
@@ -751,7 +760,9 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
751 info->gd->disk_name); 760 info->gd->disk_name);
752 error = -EOPNOTSUPP; 761 error = -EOPNOTSUPP;
753 info->feature_discard = 0; 762 info->feature_discard = 0;
763 info->feature_secdiscard = 0;
754 queue_flag_clear(QUEUE_FLAG_DISCARD, rq); 764 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
765 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
755 } 766 }
756 __blk_end_request_all(req, error); 767 __blk_end_request_all(req, error);
757 break; 768 break;
@@ -1039,13 +1050,15 @@ static int blkif_recover(struct blkfront_info *info)
1039 req->u.rw.id = get_id_from_freelist(info); 1050 req->u.rw.id = get_id_from_freelist(info);
1040 memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i])); 1051 memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));
1041 1052
1053 if (req->operation != BLKIF_OP_DISCARD) {
1042 /* Rewrite any grant references invalidated by susp/resume. */ 1054 /* Rewrite any grant references invalidated by susp/resume. */
1043 for (j = 0; j < req->u.rw.nr_segments; j++) 1055 for (j = 0; j < req->u.rw.nr_segments; j++)
1044 gnttab_grant_foreign_access_ref( 1056 gnttab_grant_foreign_access_ref(
1045 req->u.rw.seg[j].gref, 1057 req->u.rw.seg[j].gref,
1046 info->xbdev->otherend_id, 1058 info->xbdev->otherend_id,
1047 pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), 1059 pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
1048 rq_data_dir(info->shadow[req->u.rw.id].request)); 1060 rq_data_dir(info->shadow[req->u.rw.id].request));
1061 }
1049 info->shadow[req->u.rw.id].req = *req; 1062 info->shadow[req->u.rw.id].req = *req;
1050 1063
1051 info->ring.req_prod_pvt++; 1064 info->ring.req_prod_pvt++;
@@ -1137,11 +1150,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1137 char *type; 1150 char *type;
1138 unsigned int discard_granularity; 1151 unsigned int discard_granularity;
1139 unsigned int discard_alignment; 1152 unsigned int discard_alignment;
1153 unsigned int discard_secure;
1140 1154
1141 type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL); 1155 type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
1142 if (IS_ERR(type)) 1156 if (IS_ERR(type))
1143 return; 1157 return;
1144 1158
1159 info->feature_secdiscard = 0;
1145 if (strncmp(type, "phy", 3) == 0) { 1160 if (strncmp(type, "phy", 3) == 0) {
1146 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1161 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1147 "discard-granularity", "%u", &discard_granularity, 1162 "discard-granularity", "%u", &discard_granularity,
@@ -1152,6 +1167,12 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1152 info->discard_granularity = discard_granularity; 1167 info->discard_granularity = discard_granularity;
1153 info->discard_alignment = discard_alignment; 1168 info->discard_alignment = discard_alignment;
1154 } 1169 }
1170 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1171 "discard-secure", "%d", &discard_secure,
1172 NULL);
1173 if (!err)
1174 info->feature_secdiscard = discard_secure;
1175
1155 } else if (strncmp(type, "file", 4) == 0) 1176 } else if (strncmp(type, "file", 4) == 0)
1156 info->feature_discard = 1; 1177 info->feature_discard = 1;
1157 1178
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index f88e28b6a27c..ee338bfde18b 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -84,6 +84,21 @@ typedef uint64_t blkif_sector_t;
84 * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc 84 * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
85 * http://www.seagate.com/staticfiles/support/disc/manuals/ 85 * http://www.seagate.com/staticfiles/support/disc/manuals/
86 * Interface%20manuals/100293068c.pdf 86 * Interface%20manuals/100293068c.pdf
87 * The backend can optionally provide three extra XenBus attributes to
88 * further optimize the discard functionality:
89 * 'discard-aligment' - Devices that support discard functionality may
90 * internally allocate space in units that are bigger than the exported
91 * logical block size. The discard-alignment parameter indicates how many bytes
92 * the beginning of the partition is offset from the internal allocation unit's
93 * natural alignment.
94 * 'discard-granularity' - Devices that support discard functionality may
95 * internally allocate space using units that are bigger than the logical block
96 * size. The discard-granularity parameter indicates the size of the internal
97 * allocation unit in bytes if reported by the device. Otherwise the
98 * discard-granularity will be set to match the device's physical block size.
99 * 'discard-secure' - All copies of the discarded sectors (potentially created
100 * by garbage collection) must also be erased. To use this feature, the flag
101 * BLKIF_DISCARD_SECURE must be set in the blkif_request_trim.
87 */ 102 */
88#define BLKIF_OP_DISCARD 5 103#define BLKIF_OP_DISCARD 5
89 104
@@ -111,7 +126,8 @@ struct blkif_request_rw {
111} __attribute__((__packed__)); 126} __attribute__((__packed__));
112 127
113struct blkif_request_discard { 128struct blkif_request_discard {
114 uint8_t nr_segments; /* number of segments */ 129 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
130#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
115 blkif_vdev_t _pad1; /* only for read/write requests */ 131 blkif_vdev_t _pad1; /* only for read/write requests */
116#ifdef CONFIG_X86_64 132#ifdef CONFIG_X86_64
117 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ 133 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/