aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMax Gurtovoy <maxg@mellanox.com>2017-08-14 08:29:26 -0400
committerSagi Grimberg <sagi@grimberg.me>2017-08-28 16:00:43 -0400
commita7b7c7a105a528e6c2a0a2581b814a5acacb4c38 (patch)
tree7d5746077429a3ff673e7992eb07ea4ddc6f3f33
parent17c39d053a46b300fee786857458857086a4844e (diff)
nvme-rdma: Use unlikely macro in the fast path
This patch slightly improves performance (mainly for small block sizes). Signed-off-by: Max Gurtovoy <maxg@mellanox.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--drivers/nvme/host/rdma.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index b51e7df63df5..6a7682620d87 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1047,7 +1047,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
1047 1047
1048 if (req->mr->need_inval) { 1048 if (req->mr->need_inval) {
1049 res = nvme_rdma_inv_rkey(queue, req); 1049 res = nvme_rdma_inv_rkey(queue, req);
1050 if (res < 0) { 1050 if (unlikely(res < 0)) {
1051 dev_err(ctrl->ctrl.device, 1051 dev_err(ctrl->ctrl.device,
1052 "Queueing INV WR for rkey %#x failed (%d)\n", 1052 "Queueing INV WR for rkey %#x failed (%d)\n",
1053 req->mr->rkey, res); 1053 req->mr->rkey, res);
@@ -1112,7 +1112,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
1112 int nr; 1112 int nr;
1113 1113
1114 nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE); 1114 nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
1115 if (nr < count) { 1115 if (unlikely(nr < count)) {
1116 if (nr < 0) 1116 if (nr < 0)
1117 return nr; 1117 return nr;
1118 return -EINVAL; 1118 return -EINVAL;
@@ -1248,7 +1248,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1248 first = &wr; 1248 first = &wr;
1249 1249
1250 ret = ib_post_send(queue->qp, first, &bad_wr); 1250 ret = ib_post_send(queue->qp, first, &bad_wr);
1251 if (ret) { 1251 if (unlikely(ret)) {
1252 dev_err(queue->ctrl->ctrl.device, 1252 dev_err(queue->ctrl->ctrl.device,
1253 "%s failed with error code %d\n", __func__, ret); 1253 "%s failed with error code %d\n", __func__, ret);
1254 } 1254 }
@@ -1274,7 +1274,7 @@ static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
1274 wr.num_sge = 1; 1274 wr.num_sge = 1;
1275 1275
1276 ret = ib_post_recv(queue->qp, &wr, &bad_wr); 1276 ret = ib_post_recv(queue->qp, &wr, &bad_wr);
1277 if (ret) { 1277 if (unlikely(ret)) {
1278 dev_err(queue->ctrl->ctrl.device, 1278 dev_err(queue->ctrl->ctrl.device,
1279 "%s failed with error code %d\n", __func__, ret); 1279 "%s failed with error code %d\n", __func__, ret);
1280 } 1280 }
@@ -1634,7 +1634,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1634 blk_mq_start_request(rq); 1634 blk_mq_start_request(rq);
1635 1635
1636 err = nvme_rdma_map_data(queue, rq, c); 1636 err = nvme_rdma_map_data(queue, rq, c);
1637 if (err < 0) { 1637 if (unlikely(err < 0)) {
1638 dev_err(queue->ctrl->ctrl.device, 1638 dev_err(queue->ctrl->ctrl.device,
1639 "Failed to map data (%d)\n", err); 1639 "Failed to map data (%d)\n", err);
1640 nvme_cleanup_cmd(rq); 1640 nvme_cleanup_cmd(rq);
@@ -1648,7 +1648,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1648 flush = true; 1648 flush = true;
1649 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 1649 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
1650 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); 1650 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
1651 if (err) { 1651 if (unlikely(err)) {
1652 nvme_rdma_unmap_data(queue, rq); 1652 nvme_rdma_unmap_data(queue, rq);
1653 goto err; 1653 goto err;
1654 } 1654 }