aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2013-11-01 12:14:56 -0400
committerJens Axboe <axboe@kernel.dk>2013-11-08 11:10:29 -0500
commitfcd37eb3c1347193935d07a82b84dfc7d418dd05 (patch)
tree38a0933d5bd43f2d5d43b98c00a6c1bfb8811788 /drivers/block
parent1762b57fcbe365c2e3f79769a7fe77942ea3165f (diff)
skd: rip out bio path
The skd driver has a selectable rq or bio based queueing model. For 3.14, we want to turn this into a single blk-mq interface instead. With the immutable biovecs being merged in 3.13, the bio model would need patches to even work. So rip it out, with a conversion pending for blk-mq in the next release. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/skd_main.c529
1 files changed, 62 insertions, 467 deletions
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 1a8717fce41d..49e1e8b48422 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -354,13 +354,7 @@ struct skd_device {
354 354
355 u32 timo_slot; 355 u32 timo_slot;
356 356
357
358 struct work_struct completion_worker; 357 struct work_struct completion_worker;
359
360 struct bio_list bio_queue;
361 int queue_stopped;
362
363 struct list_head flush_list;
364}; 358};
365 359
366#define SKD_FLUSH_JOB "skd-flush-jobs" 360#define SKD_FLUSH_JOB "skd-flush-jobs"
@@ -470,11 +464,6 @@ MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
470module_param(skd_isr_comp_limit, int, 0444); 464module_param(skd_isr_comp_limit, int, 0444);
471MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); 465MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
472 466
473static int skd_bio;
474module_param(skd_bio, int, 0444);
475MODULE_PARM_DESC(skd_bio,
476 "Register as a bio device instead of block (0, 1) default=0");
477
478/* Major device number dynamically assigned. */ 467/* Major device number dynamically assigned. */
479static u32 skd_major; 468static u32 skd_major;
480 469
@@ -512,11 +501,6 @@ static void skd_log_skmsg(struct skd_device *skdev,
512static void skd_log_skreq(struct skd_device *skdev, 501static void skd_log_skreq(struct skd_device *skdev,
513 struct skd_request_context *skreq, const char *event); 502 struct skd_request_context *skreq, const char *event);
514 503
515/* FLUSH FUA flag handling. */
516static int skd_flush_cmd_enqueue(struct skd_device *, void *);
517static void *skd_flush_cmd_dequeue(struct skd_device *);
518
519
520/* 504/*
521 ***************************************************************************** 505 *****************************************************************************
522 * READ/WRITE REQUESTS 506 * READ/WRITE REQUESTS
@@ -524,40 +508,25 @@ static void *skd_flush_cmd_dequeue(struct skd_device *);
524 */ 508 */
525static void skd_stop_queue(struct skd_device *skdev) 509static void skd_stop_queue(struct skd_device *skdev)
526{ 510{
527 if (!skd_bio) 511 blk_stop_queue(skdev->queue);
528 blk_stop_queue(skdev->queue);
529 else
530 skdev->queue_stopped = 1;
531} 512}
532 513
533static void skd_unstop_queue(struct skd_device *skdev) 514static void skd_unstop_queue(struct skd_device *skdev)
534{ 515{
535 if (!skd_bio) 516 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
536 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
537 else
538 skdev->queue_stopped = 0;
539} 517}
540 518
541static void skd_start_queue(struct skd_device *skdev) 519static void skd_start_queue(struct skd_device *skdev)
542{ 520{
543 if (!skd_bio) { 521 blk_start_queue(skdev->queue);
544 blk_start_queue(skdev->queue);
545 } else {
546 pr_err("(%s): Starting queue\n", skd_name(skdev));
547 skdev->queue_stopped = 0;
548 skd_request_fn(skdev->queue);
549 }
550} 522}
551 523
552static int skd_queue_stopped(struct skd_device *skdev) 524static int skd_queue_stopped(struct skd_device *skdev)
553{ 525{
554 if (!skd_bio) 526 return blk_queue_stopped(skdev->queue);
555 return blk_queue_stopped(skdev->queue);
556 else
557 return skdev->queue_stopped;
558} 527}
559 528
560static void skd_fail_all_pending_blk(struct skd_device *skdev) 529static void skd_fail_all_pending(struct skd_device *skdev)
561{ 530{
562 struct request_queue *q = skdev->queue; 531 struct request_queue *q = skdev->queue;
563 struct request *req; 532 struct request *req;
@@ -571,42 +540,6 @@ static void skd_fail_all_pending_blk(struct skd_device *skdev)
571 } 540 }
572} 541}
573 542
574static void skd_fail_all_pending_bio(struct skd_device *skdev)
575{
576 struct bio *bio;
577 int error = -EIO;
578
579 for (;; ) {
580 bio = bio_list_pop(&skdev->bio_queue);
581
582 if (bio == NULL)
583 break;
584
585 bio_endio(bio, error);
586 }
587}
588
589static void skd_fail_all_pending(struct skd_device *skdev)
590{
591 if (!skd_bio)
592 skd_fail_all_pending_blk(skdev);
593 else
594 skd_fail_all_pending_bio(skdev);
595}
596
597static void skd_make_request(struct request_queue *q, struct bio *bio)
598{
599 struct skd_device *skdev = q->queuedata;
600 unsigned long flags;
601
602 spin_lock_irqsave(&skdev->lock, flags);
603
604 bio_list_add(&skdev->bio_queue, bio);
605 skd_request_fn(skdev->queue);
606
607 spin_unlock_irqrestore(&skdev->lock, flags);
608}
609
610static void 543static void
611skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, 544skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
612 int data_dir, unsigned lba, 545 int data_dir, unsigned lba,
@@ -667,18 +600,9 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
667 put_unaligned_be64(lba, &buf[8]); 600 put_unaligned_be64(lba, &buf[8]);
668 put_unaligned_be32(count, &buf[16]); 601 put_unaligned_be32(count, &buf[16]);
669 602
670 if (!skd_bio) { 603 req = skreq->req;
671 req = skreq->req; 604 blk_add_request_payload(req, page, len);
672 blk_add_request_payload(req, page, len); 605 req->buffer = buf;
673 req->buffer = buf;
674 } else {
675 skreq->bio->bi_io_vec->bv_page = page;
676 skreq->bio->bi_io_vec->bv_offset = 0;
677 skreq->bio->bi_io_vec->bv_len = len;
678
679 skreq->bio->bi_vcnt = 1;
680 skreq->bio->bi_phys_segments = 1;
681 }
682} 606}
683 607
684static void skd_request_fn_not_online(struct request_queue *q); 608static void skd_request_fn_not_online(struct request_queue *q);
@@ -690,7 +614,6 @@ static void skd_request_fn(struct request_queue *q)
690 struct fit_msg_hdr *fmh = NULL; 614 struct fit_msg_hdr *fmh = NULL;
691 struct skd_request_context *skreq; 615 struct skd_request_context *skreq;
692 struct request *req = NULL; 616 struct request *req = NULL;
693 struct bio *bio = NULL;
694 struct skd_scsi_request *scsi_req; 617 struct skd_scsi_request *scsi_req;
695 struct page *page; 618 struct page *page;
696 unsigned long io_flags; 619 unsigned long io_flags;
@@ -732,60 +655,27 @@ static void skd_request_fn(struct request_queue *q)
732 655
733 flush = fua = 0; 656 flush = fua = 0;
734 657
735 if (!skd_bio) { 658 req = blk_peek_request(q);
736 req = blk_peek_request(q);
737
738 /* Are there any native requests to start? */
739 if (req == NULL)
740 break;
741
742 lba = (u32)blk_rq_pos(req);
743 count = blk_rq_sectors(req);
744 data_dir = rq_data_dir(req);
745 io_flags = req->cmd_flags;
746
747 if (io_flags & REQ_FLUSH)
748 flush++;
749
750 if (io_flags & REQ_FUA)
751 fua++;
752 659
753 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) " 660 /* Are there any native requests to start? */
754 "count=%u(0x%x) dir=%d\n", 661 if (req == NULL)
755 skdev->name, __func__, __LINE__, 662 break;
756 req, lba, lba, count, count, data_dir);
757 } else {
758 if (!list_empty(&skdev->flush_list)) {
759 /* Process data part of FLUSH request. */
760 bio = (struct bio *)skd_flush_cmd_dequeue(skdev);
761 flush++;
762 pr_debug("%s:%s:%d processing FLUSH request with data.\n",
763 skdev->name, __func__, __LINE__);
764 } else {
765 /* peek at our bio queue */
766 bio = bio_list_peek(&skdev->bio_queue);
767 }
768 663
769 /* Are there any native requests to start? */ 664 lba = (u32)blk_rq_pos(req);
770 if (bio == NULL) 665 count = blk_rq_sectors(req);
771 break; 666 data_dir = rq_data_dir(req);
667 io_flags = req->cmd_flags;
772 668
773 lba = (u32)bio->bi_sector; 669 if (io_flags & REQ_FLUSH)
774 count = bio_sectors(bio); 670 flush++;
775 data_dir = bio_data_dir(bio);
776 io_flags = bio->bi_rw;
777 671
778 pr_debug("%s:%s:%d new bio=%p lba=%u(0x%x) " 672 if (io_flags & REQ_FUA)
779 "count=%u(0x%x) dir=%d\n", 673 fua++;
780 skdev->name, __func__, __LINE__,
781 bio, lba, lba, count, count, data_dir);
782 674
783 if (io_flags & REQ_FLUSH) 675 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
784 flush++; 676 "count=%u(0x%x) dir=%d\n",
785 677 skdev->name, __func__, __LINE__,
786 if (io_flags & REQ_FUA) 678 req, lba, lba, count, count, data_dir);
787 fua++;
788 }
789 679
790 /* At this point we know there is a request 680 /* At this point we know there is a request
791 * (from our bio q or req q depending on the way 681 * (from our bio q or req q depending on the way
@@ -831,23 +721,9 @@ static void skd_request_fn(struct request_queue *q)
831 * the native request. Note that skd_request_context is 721 * the native request. Note that skd_request_context is
832 * available but is still at the head of the free list. 722 * available but is still at the head of the free list.
833 */ 723 */
834 if (!skd_bio) { 724 blk_start_request(req);
835 blk_start_request(req); 725 skreq->req = req;
836 skreq->req = req; 726 skreq->fitmsg_id = 0;
837 skreq->fitmsg_id = 0;
838 } else {
839 if (unlikely(flush == SKD_FLUSH_DATA_SECOND)) {
840 skreq->bio = bio;
841 } else {
842 skreq->bio = bio_list_pop(&skdev->bio_queue);
843 SKD_ASSERT(skreq->bio == bio);
844 skreq->start_time = jiffies;
845 part_inc_in_flight(&skdev->disk->part0,
846 bio_data_dir(bio));
847 }
848
849 skreq->fitmsg_id = 0;
850 }
851 727
852 /* Either a FIT msg is in progress or we have to start one. */ 728 /* Either a FIT msg is in progress or we have to start one. */
853 if (skmsg == NULL) { 729 if (skmsg == NULL) {
@@ -923,8 +799,7 @@ static void skd_request_fn(struct request_queue *q)
923 if (fua) 799 if (fua)
924 scsi_req->cdb[1] |= SKD_FUA_NV; 800 scsi_req->cdb[1] |= SKD_FUA_NV;
925 801
926 if ((!skd_bio && !req->bio) || 802 if (!req->bio)
927 (skd_bio && flush == SKD_FLUSH_ZERO_SIZE_FIRST))
928 goto skip_sg; 803 goto skip_sg;
929 804
930 error = skd_preop_sg_list(skdev, skreq); 805 error = skd_preop_sg_list(skdev, skreq);
@@ -1011,8 +886,7 @@ skip_sg:
1011 * If req is non-NULL it means there is something to do but 886 * If req is non-NULL it means there is something to do but
1012 * we are out of a resource. 887 * we are out of a resource.
1013 */ 888 */
1014 if (((!skd_bio) && req) || 889 if (req)
1015 ((skd_bio) && bio_list_peek(&skdev->bio_queue)))
1016 skd_stop_queue(skdev); 890 skd_stop_queue(skdev);
1017} 891}
1018 892
@@ -1045,7 +919,7 @@ static void skd_end_request_blk(struct skd_device *skdev,
1045 __blk_end_request_all(skreq->req, error); 919 __blk_end_request_all(skreq->req, error);
1046} 920}
1047 921
1048static int skd_preop_sg_list_blk(struct skd_device *skdev, 922static int skd_preop_sg_list(struct skd_device *skdev,
1049 struct skd_request_context *skreq) 923 struct skd_request_context *skreq)
1050{ 924{
1051 struct request *req = skreq->req; 925 struct request *req = skreq->req;
@@ -1108,7 +982,7 @@ static int skd_preop_sg_list_blk(struct skd_device *skdev,
1108 return 0; 982 return 0;
1109} 983}
1110 984
1111static void skd_postop_sg_list_blk(struct skd_device *skdev, 985static void skd_postop_sg_list(struct skd_device *skdev,
1112 struct skd_request_context *skreq) 986 struct skd_request_context *skreq)
1113{ 987{
1114 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; 988 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
@@ -1124,184 +998,10 @@ static void skd_postop_sg_list_blk(struct skd_device *skdev,
1124 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); 998 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
1125} 999}
1126 1000
1127static void skd_end_request_bio(struct skd_device *skdev,
1128 struct skd_request_context *skreq, int error)
1129{
1130 struct bio *bio = skreq->bio;
1131 int rw = bio_data_dir(bio);
1132 unsigned long io_flags = bio->bi_rw;
1133
1134 if ((io_flags & REQ_DISCARD) &&
1135 (skreq->discard_page == 1)) {
1136 pr_debug("%s:%s:%d biomode: skd_end_request: freeing DISCARD page.\n",
1137 skdev->name, __func__, __LINE__);
1138 free_page((unsigned long)page_address(bio->bi_io_vec->bv_page));
1139 }
1140
1141 if (unlikely(error)) {
1142 u32 lba = (u32)skreq->bio->bi_sector;
1143 u32 count = bio_sectors(skreq->bio);
1144 char *cmd = (rw == WRITE) ? "write" : "read";
1145 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
1146 skd_name(skdev), cmd, lba, count, skreq->id);
1147 }
1148 {
1149 int cpu = part_stat_lock();
1150
1151 if (likely(!error)) {
1152 part_stat_inc(cpu, &skdev->disk->part0, ios[rw]);
1153 part_stat_add(cpu, &skdev->disk->part0, sectors[rw],
1154 bio_sectors(bio));
1155 }
1156 part_stat_add(cpu, &skdev->disk->part0, ticks[rw],
1157 jiffies - skreq->start_time);
1158 part_dec_in_flight(&skdev->disk->part0, rw);
1159 part_stat_unlock();
1160 }
1161
1162 pr_debug("%s:%s:%d id=0x%x error=%d\n",
1163 skdev->name, __func__, __LINE__, skreq->id, error);
1164
1165 bio_endio(skreq->bio, error);
1166}
1167
1168static int skd_preop_sg_list_bio(struct skd_device *skdev,
1169 struct skd_request_context *skreq)
1170{
1171 struct bio *bio = skreq->bio;
1172 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
1173 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1174 int n_sg;
1175 int i;
1176 struct bio_vec *vec;
1177 struct fit_sg_descriptor *sgd;
1178 u64 dma_addr;
1179 u32 count;
1180 int errs = 0;
1181 unsigned int io_flags = 0;
1182 io_flags |= bio->bi_rw;
1183
1184 skreq->sg_byte_count = 0;
1185 n_sg = skreq->n_sg = skreq->bio->bi_vcnt;
1186
1187 if (n_sg <= 0)
1188 return -EINVAL;
1189
1190 if (n_sg > skdev->sgs_per_request) {
1191 pr_err("(%s): sg overflow n=%d\n",
1192 skd_name(skdev), n_sg);
1193 skreq->n_sg = 0;
1194 return -EIO;
1195 }
1196
1197 for (i = 0; i < skreq->n_sg; i++) {
1198 vec = bio_iovec_idx(bio, i);
1199 dma_addr = pci_map_page(skdev->pdev,
1200 vec->bv_page,
1201 vec->bv_offset, vec->bv_len, pci_dir);
1202 count = vec->bv_len;
1203
1204 if (count == 0 || count > 64u * 1024u || (count & 3) != 0
1205 || (dma_addr & 3) != 0) {
1206 pr_err(
1207 "(%s): Bad sg ix=%d count=%d addr=0x%llx\n",
1208 skd_name(skdev), i, count, dma_addr);
1209 errs++;
1210 }
1211
1212 sgd = &skreq->sksg_list[i];
1213
1214 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
1215 sgd->byte_count = vec->bv_len;
1216 skreq->sg_byte_count += vec->bv_len;
1217 sgd->host_side_addr = dma_addr;
1218 sgd->dev_side_addr = 0; /* not used */
1219 }
1220
1221 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
1222 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
1223
1224
1225 if (!(io_flags & REQ_DISCARD)) {
1226 count = bio_sectors(bio) << 9u;
1227 if (count != skreq->sg_byte_count) {
1228 pr_err("(%s): mismatch count sg=%d req=%d\n",
1229 skd_name(skdev), skreq->sg_byte_count, count);
1230 errs++;
1231 }
1232 }
1233
1234 if (unlikely(skdev->dbg_level > 1)) {
1235 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1236 skdev->name, __func__, __LINE__,
1237 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1238 for (i = 0; i < n_sg; i++) {
1239 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1240 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
1241 "addr=0x%llx next=0x%llx\n",
1242 skdev->name, __func__, __LINE__,
1243 i, sgd->byte_count, sgd->control,
1244 sgd->host_side_addr, sgd->next_desc_ptr);
1245 }
1246 }
1247
1248 if (errs != 0) {
1249 skd_postop_sg_list(skdev, skreq);
1250 skreq->n_sg = 0;
1251 return -EIO;
1252 }
1253
1254 return 0;
1255}
1256
1257static int skd_preop_sg_list(struct skd_device *skdev,
1258 struct skd_request_context *skreq)
1259{
1260 if (!skd_bio)
1261 return skd_preop_sg_list_blk(skdev, skreq);
1262 else
1263 return skd_preop_sg_list_bio(skdev, skreq);
1264}
1265
1266static void skd_postop_sg_list_bio(struct skd_device *skdev,
1267 struct skd_request_context *skreq)
1268{
1269 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
1270 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1271 int i;
1272 struct fit_sg_descriptor *sgd;
1273
1274 /*
1275 * restore the next ptr for next IO request so we
1276 * don't have to set it every time.
1277 */
1278 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
1279 skreq->sksg_dma_address +
1280 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
1281
1282 for (i = 0; i < skreq->n_sg; i++) {
1283 sgd = &skreq->sksg_list[i];
1284 pci_unmap_page(skdev->pdev, sgd->host_side_addr,
1285 sgd->byte_count, pci_dir);
1286 }
1287}
1288
1289static void skd_postop_sg_list(struct skd_device *skdev,
1290 struct skd_request_context *skreq)
1291{
1292 if (!skd_bio)
1293 skd_postop_sg_list_blk(skdev, skreq);
1294 else
1295 skd_postop_sg_list_bio(skdev, skreq);
1296}
1297
1298static void skd_end_request(struct skd_device *skdev, 1001static void skd_end_request(struct skd_device *skdev,
1299 struct skd_request_context *skreq, int error) 1002 struct skd_request_context *skreq, int error)
1300{ 1003{
1301 if (likely(!skd_bio)) 1004 skd_end_request_blk(skdev, skreq, error);
1302 skd_end_request_blk(skdev, skreq, error);
1303 else
1304 skd_end_request_bio(skdev, skreq, error);
1305} 1005}
1306 1006
1307static void skd_request_fn_not_online(struct request_queue *q) 1007static void skd_request_fn_not_online(struct request_queue *q)
@@ -2754,13 +2454,10 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
2754 break; 2454 break;
2755 2455
2756 case SKD_CHECK_STATUS_REQUEUE_REQUEST: 2456 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2757 if (!skd_bio) { 2457 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2758 if ((unsigned long) ++skreq->req->special < 2458 skd_log_skreq(skdev, skreq, "retry");
2759 SKD_MAX_RETRIES) { 2459 skd_requeue_request(skdev, skreq);
2760 skd_log_skreq(skdev, skreq, "retry"); 2460 break;
2761 skd_requeue_request(skdev, skreq);
2762 break;
2763 }
2764 } 2461 }
2765 /* fall through to report error */ 2462 /* fall through to report error */
2766 2463
@@ -2774,12 +2471,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
2774static void skd_requeue_request(struct skd_device *skdev, 2471static void skd_requeue_request(struct skd_device *skdev,
2775 struct skd_request_context *skreq) 2472 struct skd_request_context *skreq)
2776{ 2473{
2777 if (!skd_bio) { 2474 blk_requeue_request(skdev->queue, skreq->req);
2778 blk_requeue_request(skdev->queue, skreq->req);
2779 } else {
2780 bio_list_add_head(&skdev->bio_queue, skreq->bio);
2781 skreq->bio = NULL;
2782 }
2783} 2475}
2784 2476
2785 2477
@@ -2840,11 +2532,7 @@ static void skd_release_skreq(struct skd_device *skdev,
2840 /* 2532 /*
2841 * Reset backpointer 2533 * Reset backpointer
2842 */ 2534 */
2843 if (likely(!skd_bio)) 2535 skreq->req = NULL;
2844 skreq->req = NULL;
2845 else
2846 skreq->bio = NULL;
2847
2848 2536
2849 /* 2537 /*
2850 * Reclaim the skd_request_context 2538 * Reclaim the skd_request_context
@@ -3084,8 +2772,6 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
3084 u32 cmp_bytes = 0; 2772 u32 cmp_bytes = 0;
3085 int rc = 0; 2773 int rc = 0;
3086 int processed = 0; 2774 int processed = 0;
3087 int ret;
3088
3089 2775
3090 for (;; ) { 2776 for (;; ) {
3091 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); 2777 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
@@ -3180,8 +2866,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
3180 if (skreq->n_sg > 0) 2866 if (skreq->n_sg > 0)
3181 skd_postop_sg_list(skdev, skreq); 2867 skd_postop_sg_list(skdev, skreq);
3182 2868
3183 if (((!skd_bio) && !skreq->req) || 2869 if (!skreq->req) {
3184 ((skd_bio) && !skreq->bio)) {
3185 pr_debug("%s:%s:%d NULL backptr skdreq %p, " 2870 pr_debug("%s:%s:%d NULL backptr skdreq %p, "
3186 "req=0x%x req_id=0x%x\n", 2871 "req=0x%x req_id=0x%x\n",
3187 skdev->name, __func__, __LINE__, 2872 skdev->name, __func__, __LINE__,
@@ -3191,30 +2876,10 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
3191 * Capture the outcome and post it back to the 2876 * Capture the outcome and post it back to the
3192 * native request. 2877 * native request.
3193 */ 2878 */
3194 if (likely(cmp_status == SAM_STAT_GOOD)) { 2879 if (likely(cmp_status == SAM_STAT_GOOD))
3195 if (unlikely(skreq->flush_cmd)) { 2880 skd_end_request(skdev, skreq, 0);
3196 if (skd_bio) { 2881 else
3197 /* if empty size bio, we are all done */
3198 if (bio_sectors(skreq->bio) == 0) {
3199 skd_end_request(skdev, skreq, 0);
3200 } else {
3201 ret = skd_flush_cmd_enqueue(skdev, (void *)skreq->bio);
3202 if (ret != 0) {
3203 pr_err("Failed to enqueue flush bio with Data. Err=%d.\n", ret);
3204 skd_end_request(skdev, skreq, ret);
3205 } else {
3206 ((*enqueued)++);
3207 }
3208 }
3209 } else {
3210 skd_end_request(skdev, skreq, 0);
3211 }
3212 } else {
3213 skd_end_request(skdev, skreq, 0);
3214 }
3215 } else {
3216 skd_resolve_req_exception(skdev, skreq); 2882 skd_resolve_req_exception(skdev, skreq);
3217 }
3218 } 2883 }
3219 2884
3220 /* 2885 /*
@@ -3645,29 +3310,20 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue)
3645 skd_log_skreq(skdev, skreq, "recover"); 3310 skd_log_skreq(skdev, skreq, "recover");
3646 3311
3647 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); 3312 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3648 if (!skd_bio) 3313 SKD_ASSERT(skreq->req != NULL);
3649 SKD_ASSERT(skreq->req != NULL);
3650 else
3651 SKD_ASSERT(skreq->bio != NULL);
3652 3314
3653 /* Release DMA resources for the request. */ 3315 /* Release DMA resources for the request. */
3654 if (skreq->n_sg > 0) 3316 if (skreq->n_sg > 0)
3655 skd_postop_sg_list(skdev, skreq); 3317 skd_postop_sg_list(skdev, skreq);
3656 3318
3657 if (!skd_bio) { 3319 if (requeue &&
3658 if (requeue && 3320 (unsigned long) ++skreq->req->special <
3659 (unsigned long) ++skreq->req->special < 3321 SKD_MAX_RETRIES)
3660 SKD_MAX_RETRIES) 3322 skd_requeue_request(skdev, skreq);
3661 skd_requeue_request(skdev, skreq); 3323 else
3662 else
3663 skd_end_request(skdev, skreq, -EIO);
3664 } else
3665 skd_end_request(skdev, skreq, -EIO); 3324 skd_end_request(skdev, skreq, -EIO);
3666 3325
3667 if (!skd_bio) 3326 skreq->req = NULL;
3668 skreq->req = NULL;
3669 else
3670 skreq->bio = NULL;
3671 3327
3672 skreq->state = SKD_REQ_STATE_IDLE; 3328 skreq->state = SKD_REQ_STATE_IDLE;
3673 skreq->id += SKD_ID_INCR; 3329 skreq->id += SKD_ID_INCR;
@@ -4580,16 +4236,11 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
4580 skdev->sgs_per_request = skd_sgs_per_request; 4236 skdev->sgs_per_request = skd_sgs_per_request;
4581 skdev->dbg_level = skd_dbg_level; 4237 skdev->dbg_level = skd_dbg_level;
4582 4238
4583 if (skd_bio)
4584 bio_list_init(&skdev->bio_queue);
4585
4586
4587 atomic_set(&skdev->device_count, 0); 4239 atomic_set(&skdev->device_count, 0);
4588 4240
4589 spin_lock_init(&skdev->lock); 4241 spin_lock_init(&skdev->lock);
4590 4242
4591 INIT_WORK(&skdev->completion_worker, skd_completion_worker); 4243 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4592 INIT_LIST_HEAD(&skdev->flush_list);
4593 4244
4594 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); 4245 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
4595 rc = skd_cons_skcomp(skdev); 4246 rc = skd_cons_skcomp(skdev);
@@ -4941,13 +4592,7 @@ static int skd_cons_disk(struct skd_device *skdev)
4941 disk->fops = &skd_blockdev_ops; 4592 disk->fops = &skd_blockdev_ops;
4942 disk->private_data = skdev; 4593 disk->private_data = skdev;
4943 4594
4944 if (!skd_bio) { 4595 q = blk_init_queue(skd_request_fn, &skdev->lock);
4945 q = blk_init_queue(skd_request_fn, &skdev->lock);
4946 } else {
4947 q = blk_alloc_queue(GFP_KERNEL);
4948 q->queue_flags = QUEUE_FLAG_IO_STAT | QUEUE_FLAG_STACKABLE;
4949 }
4950
4951 if (!q) { 4596 if (!q) {
4952 rc = -ENOMEM; 4597 rc = -ENOMEM;
4953 goto err_out; 4598 goto err_out;
@@ -4957,11 +4602,6 @@ static int skd_cons_disk(struct skd_device *skdev)
4957 disk->queue = q; 4602 disk->queue = q;
4958 q->queuedata = skdev; 4603 q->queuedata = skdev;
4959 4604
4960 if (skd_bio) {
4961 q->queue_lock = &skdev->lock;
4962 blk_queue_make_request(q, skd_make_request);
4963 }
4964
4965 blk_queue_flush(q, REQ_FLUSH | REQ_FUA); 4605 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
4966 blk_queue_max_segments(q, skdev->sgs_per_request); 4606 blk_queue_max_segments(q, skdev->sgs_per_request);
4967 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); 4607 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
@@ -5794,35 +5434,19 @@ static void skd_log_skreq(struct skd_device *skdev,
5794 skdev->name, __func__, __LINE__, 5434 skdev->name, __func__, __LINE__,
5795 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); 5435 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
5796 5436
5797 if (!skd_bio) { 5437 if (skreq->req != NULL) {
5798 if (skreq->req != NULL) { 5438 struct request *req = skreq->req;
5799 struct request *req = skreq->req; 5439 u32 lba = (u32)blk_rq_pos(req);
5800 u32 lba = (u32)blk_rq_pos(req); 5440 u32 count = blk_rq_sectors(req);
5801 u32 count = blk_rq_sectors(req);
5802
5803 pr_debug("%s:%s:%d "
5804 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5805 skdev->name, __func__, __LINE__,
5806 req, lba, lba, count, count,
5807 (int)rq_data_dir(req));
5808 } else
5809 pr_debug("%s:%s:%d req=NULL\n",
5810 skdev->name, __func__, __LINE__);
5811 } else {
5812 if (skreq->bio != NULL) {
5813 struct bio *bio = skreq->bio;
5814 u32 lba = (u32)bio->bi_sector;
5815 u32 count = bio_sectors(bio);
5816 5441
5817 pr_debug("%s:%s:%d " 5442 pr_debug("%s:%s:%d "
5818 "bio=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", 5443 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5819 skdev->name, __func__, __LINE__, 5444 skdev->name, __func__, __LINE__,
5820 bio, lba, lba, count, count, 5445 req, lba, lba, count, count,
5821 (int)bio_data_dir(bio)); 5446 (int)rq_data_dir(req));
5822 } else 5447 } else
5823 pr_debug("%s:%s:%d req=NULL\n", 5448 pr_debug("%s:%s:%d req=NULL\n",
5824 skdev->name, __func__, __LINE__); 5449 skdev->name, __func__, __LINE__);
5825 }
5826} 5450}
5827 5451
5828/* 5452/*
@@ -5918,34 +5542,5 @@ static void __exit skd_exit(void)
5918 kmem_cache_destroy(skd_flush_slab); 5542 kmem_cache_destroy(skd_flush_slab);
5919} 5543}
5920 5544
5921static int
5922skd_flush_cmd_enqueue(struct skd_device *skdev, void *cmd)
5923{
5924 struct skd_flush_cmd *item;
5925
5926 item = kmem_cache_zalloc(skd_flush_slab, GFP_ATOMIC);
5927 if (!item) {
5928 pr_err("skd_flush_cmd_enqueue: Failed to allocated item.\n");
5929 return -ENOMEM;
5930 }
5931
5932 item->cmd = cmd;
5933 list_add_tail(&item->flist, &skdev->flush_list);
5934 return 0;
5935}
5936
5937static void *
5938skd_flush_cmd_dequeue(struct skd_device *skdev)
5939{
5940 void *cmd;
5941 struct skd_flush_cmd *item;
5942
5943 item = list_entry(skdev->flush_list.next, struct skd_flush_cmd, flist);
5944 list_del_init(&item->flist);
5945 cmd = item->cmd;
5946 kmem_cache_free(skd_flush_slab, item);
5947 return cmd;
5948}
5949
5950module_init(skd_init); 5545module_init(skd_init);
5951module_exit(skd_exit); 5546module_exit(skd_exit);