aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkfront.c
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-10-12 16:23:30 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-11-18 13:28:01 -0500
commit5ea42986694a96542644f9cae8b122d3a00c508f (patch)
treedd74685d8cd41e39ad14e708cb6e00e0ed016fb1 /drivers/block/xen-blkfront.c
parent97e36834f5a106459ab1b290e663a4eb6264639e (diff)
xen/blk[front|back]: Enhance discard support with secure erasing support.
Part of the blkdev_issue_discard(xx) operation is that it can also issue a secure discard operation that will permanantly remove the sectors in question. We advertise that we can support that via the 'discard-secure' attribute and on the request, if the 'secure' bit is set, we will attempt to pass in REQ_DISCARD | REQ_SECURE. CC: Li Dongyang <lidongyang@novell.com> [v1: Used 'flag' instead of 'secure:1' bit] [v2: Use 'reserved' uint8_t instead of adding a new value] [v3: Check for nseg when mapping instead of operation] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r--drivers/block/xen-blkfront.c41
1 files changed, 31 insertions, 10 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2c2c4be7d9d6..351ddeffd430 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -98,7 +98,8 @@ struct blkfront_info
98 unsigned long shadow_free; 98 unsigned long shadow_free;
99 unsigned int feature_flush; 99 unsigned int feature_flush;
100 unsigned int flush_op; 100 unsigned int flush_op;
101 unsigned int feature_discard; 101 unsigned int feature_discard:1;
102 unsigned int feature_secdiscard:1;
102 unsigned int discard_granularity; 103 unsigned int discard_granularity;
103 unsigned int discard_alignment; 104 unsigned int discard_alignment;
104 int is_ready; 105 int is_ready;
@@ -305,11 +306,14 @@ static int blkif_queue_request(struct request *req)
305 ring_req->operation = info->flush_op; 306 ring_req->operation = info->flush_op;
306 } 307 }
307 308
308 if (unlikely(req->cmd_flags & REQ_DISCARD)) { 309 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
309 /* id, sector_number and handle are set above. */ 310 /* id, sector_number and handle are set above. */
310 ring_req->operation = BLKIF_OP_DISCARD; 311 ring_req->operation = BLKIF_OP_DISCARD;
311 ring_req->u.discard.nr_segments = 0;
312 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); 312 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
313 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
314 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
315 else
316 ring_req->u.discard.flag = 0;
313 } else { 317 } else {
314 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req, 318 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
315 info->sg); 319 info->sg);
@@ -426,6 +430,8 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
426 blk_queue_max_discard_sectors(rq, get_capacity(gd)); 430 blk_queue_max_discard_sectors(rq, get_capacity(gd));
427 rq->limits.discard_granularity = info->discard_granularity; 431 rq->limits.discard_granularity = info->discard_granularity;
428 rq->limits.discard_alignment = info->discard_alignment; 432 rq->limits.discard_alignment = info->discard_alignment;
433 if (info->feature_secdiscard)
434 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
429 } 435 }
430 436
431 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 437 /* Hard sector size and max sectors impersonate the equiv. hardware. */
@@ -707,6 +713,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
707static void blkif_completion(struct blk_shadow *s) 713static void blkif_completion(struct blk_shadow *s)
708{ 714{
709 int i; 715 int i;
716 /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
717 * flag. */
710 for (i = 0; i < s->req.u.rw.nr_segments; i++) 718 for (i = 0; i < s->req.u.rw.nr_segments; i++)
711 gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL); 719 gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
712} 720}
@@ -738,7 +746,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
738 id = bret->id; 746 id = bret->id;
739 req = info->shadow[id].request; 747 req = info->shadow[id].request;
740 748
741 blkif_completion(&info->shadow[id]); 749 if (bret->operation != BLKIF_OP_DISCARD)
750 blkif_completion(&info->shadow[id]);
742 751
743 add_id_to_freelist(info, id); 752 add_id_to_freelist(info, id);
744 753
@@ -751,7 +760,9 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
751 info->gd->disk_name); 760 info->gd->disk_name);
752 error = -EOPNOTSUPP; 761 error = -EOPNOTSUPP;
753 info->feature_discard = 0; 762 info->feature_discard = 0;
763 info->feature_secdiscard = 0;
754 queue_flag_clear(QUEUE_FLAG_DISCARD, rq); 764 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
765 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
755 } 766 }
756 __blk_end_request_all(req, error); 767 __blk_end_request_all(req, error);
757 break; 768 break;
@@ -1039,13 +1050,15 @@ static int blkif_recover(struct blkfront_info *info)
1039 req->u.rw.id = get_id_from_freelist(info); 1050 req->u.rw.id = get_id_from_freelist(info);
1040 memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i])); 1051 memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));
1041 1052
1053 if (req->operation != BLKIF_OP_DISCARD) {
1042 /* Rewrite any grant references invalidated by susp/resume. */ 1054 /* Rewrite any grant references invalidated by susp/resume. */
1043 for (j = 0; j < req->u.rw.nr_segments; j++) 1055 for (j = 0; j < req->u.rw.nr_segments; j++)
1044 gnttab_grant_foreign_access_ref( 1056 gnttab_grant_foreign_access_ref(
1045 req->u.rw.seg[j].gref, 1057 req->u.rw.seg[j].gref,
1046 info->xbdev->otherend_id, 1058 info->xbdev->otherend_id,
1047 pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), 1059 pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
1048 rq_data_dir(info->shadow[req->u.rw.id].request)); 1060 rq_data_dir(info->shadow[req->u.rw.id].request));
1061 }
1049 info->shadow[req->u.rw.id].req = *req; 1062 info->shadow[req->u.rw.id].req = *req;
1050 1063
1051 info->ring.req_prod_pvt++; 1064 info->ring.req_prod_pvt++;
@@ -1137,11 +1150,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1137 char *type; 1150 char *type;
1138 unsigned int discard_granularity; 1151 unsigned int discard_granularity;
1139 unsigned int discard_alignment; 1152 unsigned int discard_alignment;
1153 unsigned int discard_secure;
1140 1154
1141 type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL); 1155 type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
1142 if (IS_ERR(type)) 1156 if (IS_ERR(type))
1143 return; 1157 return;
1144 1158
1159 info->feature_secdiscard = 0;
1145 if (strncmp(type, "phy", 3) == 0) { 1160 if (strncmp(type, "phy", 3) == 0) {
1146 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1161 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1147 "discard-granularity", "%u", &discard_granularity, 1162 "discard-granularity", "%u", &discard_granularity,
@@ -1152,6 +1167,12 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1152 info->discard_granularity = discard_granularity; 1167 info->discard_granularity = discard_granularity;
1153 info->discard_alignment = discard_alignment; 1168 info->discard_alignment = discard_alignment;
1154 } 1169 }
1170 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1171 "discard-secure", "%d", &discard_secure,
1172 NULL);
1173 if (!err)
1174 info->feature_secdiscard = discard_secure;
1175
1155 } else if (strncmp(type, "file", 4) == 0) 1176 } else if (strncmp(type, "file", 4) == 0)
1156 info->feature_discard = 1; 1177 info->feature_discard = 1;
1157 1178