aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/skd_main.c
diff options
context:
space:
mode:
authorJeff Moyer <jmoyer@redhat.com>2016-04-25 21:12:38 -0400
committerJens Axboe <axboe@fb.com>2016-04-25 21:12:38 -0400
commit49bdedb36271fe6259dd251bb63c5879fa7834e1 (patch)
treede1024f9c3f9607a08169ea0c8732c8152efeca8 /drivers/block/skd_main.c
parentc888a8f95ae5b1067855235b3b71c1ebccf504f5 (diff)
skd: remove broken discard support
Simply creating a file system on an skd device, followed by mount and fstrim will result in errors in the logs and then a BUG(). Let's remove discard support from that driver. As far as I can tell, it hasn't worked right since it was merged. This patch also has a side-effect of cleaning up an unintentional shadowed declaration inside of skd_end_request. I tested to ensure that I can still do I/O to the device using xfstests ./check -g quick. I didn't do anything more extensive than that, though. Signed-off-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block/skd_main.c')
-rw-r--r--drivers/block/skd_main.c59
1 files changed, 1 insertions, 58 deletions
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 41aaae30c005..910e065918af 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -133,7 +133,6 @@ MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
133#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60)) 133#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
134 134
135#define INQ_STD_NBYTES 36 135#define INQ_STD_NBYTES 36
136#define SKD_DISCARD_CDB_LENGTH 24
137 136
138enum skd_drvr_state { 137enum skd_drvr_state {
139 SKD_DRVR_STATE_LOAD, 138 SKD_DRVR_STATE_LOAD,
@@ -212,7 +211,6 @@ struct skd_request_context {
212 211
213 struct request *req; 212 struct request *req;
214 u8 flush_cmd; 213 u8 flush_cmd;
215 u8 discard_page;
216 214
217 u32 timeout_stamp; 215 u32 timeout_stamp;
218 u8 sg_data_dir; 216 u8 sg_data_dir;
@@ -230,7 +228,6 @@ struct skd_request_context {
230}; 228};
231#define SKD_DATA_DIR_HOST_TO_CARD 1 229#define SKD_DATA_DIR_HOST_TO_CARD 1
232#define SKD_DATA_DIR_CARD_TO_HOST 2 230#define SKD_DATA_DIR_CARD_TO_HOST 2
233#define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
234 231
235struct skd_special_context { 232struct skd_special_context {
236 struct skd_request_context req; 233 struct skd_request_context req;
@@ -540,31 +537,6 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
540 scsi_req->cdb[9] = 0; 537 scsi_req->cdb[9] = 0;
541} 538}
542 539
543static void
544skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
545 struct skd_request_context *skreq,
546 struct page *page,
547 u32 lba, u32 count)
548{
549 char *buf;
550 unsigned long len;
551 struct request *req;
552
553 buf = page_address(page);
554 len = SKD_DISCARD_CDB_LENGTH;
555
556 scsi_req->cdb[0] = UNMAP;
557 scsi_req->cdb[8] = len;
558
559 put_unaligned_be16(6 + 16, &buf[0]);
560 put_unaligned_be16(16, &buf[2]);
561 put_unaligned_be64(lba, &buf[8]);
562 put_unaligned_be32(count, &buf[16]);
563
564 req = skreq->req;
565 blk_add_request_payload(req, page, 0, len);
566}
567
568static void skd_request_fn_not_online(struct request_queue *q); 540static void skd_request_fn_not_online(struct request_queue *q);
569 541
570static void skd_request_fn(struct request_queue *q) 542static void skd_request_fn(struct request_queue *q)
@@ -575,7 +547,6 @@ static void skd_request_fn(struct request_queue *q)
575 struct skd_request_context *skreq; 547 struct skd_request_context *skreq;
576 struct request *req = NULL; 548 struct request *req = NULL;
577 struct skd_scsi_request *scsi_req; 549 struct skd_scsi_request *scsi_req;
578 struct page *page;
579 unsigned long io_flags; 550 unsigned long io_flags;
580 int error; 551 int error;
581 u32 lba; 552 u32 lba;
@@ -669,7 +640,6 @@ static void skd_request_fn(struct request_queue *q)
669 skreq->flush_cmd = 0; 640 skreq->flush_cmd = 0;
670 skreq->n_sg = 0; 641 skreq->n_sg = 0;
671 skreq->sg_byte_count = 0; 642 skreq->sg_byte_count = 0;
672 skreq->discard_page = 0;
673 643
674 /* 644 /*
675 * OK to now dequeue request from q. 645 * OK to now dequeue request from q.
@@ -735,18 +705,7 @@ static void skd_request_fn(struct request_queue *q)
735 else 705 else
736 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; 706 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
737 707
738 if (io_flags & REQ_DISCARD) { 708 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
739 page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
740 if (!page) {
741 pr_err("request_fn:Page allocation failed.\n");
742 skd_end_request(skdev, skreq, -ENOMEM);
743 break;
744 }
745 skreq->discard_page = 1;
746 req->completion_data = page;
747 skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
748
749 } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
750 skd_prep_zerosize_flush_cdb(scsi_req, skreq); 709 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
751 SKD_ASSERT(skreq->flush_cmd == 1); 710 SKD_ASSERT(skreq->flush_cmd == 1);
752 711
@@ -851,16 +810,6 @@ skip_sg:
851static void skd_end_request(struct skd_device *skdev, 810static void skd_end_request(struct skd_device *skdev,
852 struct skd_request_context *skreq, int error) 811 struct skd_request_context *skreq, int error)
853{ 812{
854 struct request *req = skreq->req;
855 unsigned int io_flags = req->cmd_flags;
856
857 if ((io_flags & REQ_DISCARD) &&
858 (skreq->discard_page == 1)) {
859 pr_debug("%s:%s:%d, free the page!",
860 skdev->name, __func__, __LINE__);
861 __free_page(req->completion_data);
862 }
863
864 if (unlikely(error)) { 813 if (unlikely(error)) {
865 struct request *req = skreq->req; 814 struct request *req = skreq->req;
866 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write"; 815 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
@@ -4419,12 +4368,6 @@ static int skd_cons_disk(struct skd_device *skdev)
4419 /* set sysfs ptimal_io_size to 8K */ 4368 /* set sysfs ptimal_io_size to 8K */
4420 blk_queue_io_opt(q, 8192); 4369 blk_queue_io_opt(q, 8192);
4421 4370
4422 /* DISCARD Flag initialization. */
4423 q->limits.discard_granularity = 8192;
4424 q->limits.discard_alignment = 0;
4425 blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
4426 q->limits.discard_zeroes_data = 1;
4427 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4428 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 4371 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4429 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 4372 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4430 4373