aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/rrpc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 20:23:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 20:23:49 -0500
commit3419b45039c6b799c974a8019361c045e7ca232c (patch)
tree36a63602036cc50f34fadcbd5d5d8fca94e44297 /drivers/lightnvm/rrpc.c
parent01504f5e9e071f1dde1062e3be15f54d4555308f (diff)
parentc1c534609fe8a859f9c8108a5591e6e8a97e34d1 (diff)
Merge branch 'for-4.4/io-poll' of git://git.kernel.dk/linux-block
Pull block IO poll support from Jens Axboe: "Various groups have been doing experimentation around IO polling for (really) fast devices. The code has been reviewed and has been sitting on the side for a few releases, but this is now good enough for coordinated benchmarking and further experimentation. Currently O_DIRECT sync read/write are supported. A framework is in the works that allows scalable stats tracking so we can auto-tune this. And we'll add libaio support as well soon. Fow now, it's an opt-in feature for test purposes" * 'for-4.4/io-poll' of git://git.kernel.dk/linux-block: direct-io: be sure to assign dio->bio_bdev for both paths directio: add block polling support NVMe: add blk polling support block: add block polling support blk-mq: return tag/queue combo in the make_request_fn handlers block: change ->make_request_fn() and users to return a queue cookie
Diffstat (limited to 'drivers/lightnvm/rrpc.c')
-rw-r--r--drivers/lightnvm/rrpc.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 64a888a5e9b3..7ba64c87ba1c 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -803,7 +803,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
803 return NVM_IO_OK; 803 return NVM_IO_OK;
804} 804}
805 805
806static void rrpc_make_rq(struct request_queue *q, struct bio *bio) 806static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
807{ 807{
808 struct rrpc *rrpc = q->queuedata; 808 struct rrpc *rrpc = q->queuedata;
809 struct nvm_rq *rqd; 809 struct nvm_rq *rqd;
@@ -811,21 +811,21 @@ static void rrpc_make_rq(struct request_queue *q, struct bio *bio)
811 811
812 if (bio->bi_rw & REQ_DISCARD) { 812 if (bio->bi_rw & REQ_DISCARD) {
813 rrpc_discard(rrpc, bio); 813 rrpc_discard(rrpc, bio);
814 return; 814 return BLK_QC_T_NONE;
815 } 815 }
816 816
817 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); 817 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
818 if (!rqd) { 818 if (!rqd) {
819 pr_err_ratelimited("rrpc: not able to queue bio."); 819 pr_err_ratelimited("rrpc: not able to queue bio.");
820 bio_io_error(bio); 820 bio_io_error(bio);
821 return; 821 return BLK_QC_T_NONE;
822 } 822 }
823 memset(rqd, 0, sizeof(struct nvm_rq)); 823 memset(rqd, 0, sizeof(struct nvm_rq));
824 824
825 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); 825 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
826 switch (err) { 826 switch (err) {
827 case NVM_IO_OK: 827 case NVM_IO_OK:
828 return; 828 return BLK_QC_T_NONE;
829 case NVM_IO_ERR: 829 case NVM_IO_ERR:
830 bio_io_error(bio); 830 bio_io_error(bio);
831 break; 831 break;
@@ -841,6 +841,7 @@ static void rrpc_make_rq(struct request_queue *q, struct bio *bio)
841 } 841 }
842 842
843 mempool_free(rqd, rrpc->rq_pool); 843 mempool_free(rqd, rrpc->rq_pool);
844 return BLK_QC_T_NONE;
844} 845}
845 846
846static void rrpc_requeue(struct work_struct *work) 847static void rrpc_requeue(struct work_struct *work)