aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-02-22 13:32:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-02-22 13:32:26 -0500
commit168bd29830e8ebbffcd70d2af50249dca088e1a8 (patch)
tree9630030974a99bf50f3e4598605988b321c1c005
parenta3504f7a38233030def726fcfe692e786ab162df (diff)
parentf09ef134a7ca3f0d2ce485a757f5b79809ebb803 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "Small set of three regression fixing patches, things are looking pretty good here. - Fix cxgb4 to work again with non-4k page sizes - NULL pointer oops in SRP during sg_reset" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: iw_cxgb4: cq/qp mask depends on bar2 pages in a host page cxgb4: Export sge_host_page_size to ulds RDMA/srp: Rework SCSI device reset handling
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c15
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
4 files changed, 15 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index c13c0ba30f63..d499cd61c0e8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -783,6 +783,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
783static int c4iw_rdev_open(struct c4iw_rdev *rdev) 783static int c4iw_rdev_open(struct c4iw_rdev *rdev)
784{ 784{
785 int err; 785 int err;
786 unsigned int factor;
786 787
787 c4iw_init_dev_ucontext(rdev, &rdev->uctx); 788 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
788 789
@@ -806,8 +807,18 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
806 return -EINVAL; 807 return -EINVAL;
807 } 808 }
808 809
809 rdev->qpmask = rdev->lldi.udb_density - 1; 810 /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
810 rdev->cqmask = rdev->lldi.ucq_density - 1; 811 if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
812 pr_err("%s: unsupported sge host page size %u\n",
813 pci_name(rdev->lldi.pdev),
814 rdev->lldi.sge_host_page_size);
815 return -EINVAL;
816 }
817
818 factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
819 rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
820 rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
821
811 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n", 822 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
812 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, 823 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
813 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), 824 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 31d91538bbf4..694324b37480 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3032,7 +3032,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
3032{ 3032{
3033 struct srp_target_port *target = host_to_target(scmnd->device->host); 3033 struct srp_target_port *target = host_to_target(scmnd->device->host);
3034 struct srp_rdma_ch *ch; 3034 struct srp_rdma_ch *ch;
3035 int i, j;
3036 u8 status; 3035 u8 status;
3037 3036
3038 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 3037 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -3044,15 +3043,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
3044 if (status) 3043 if (status)
3045 return FAILED; 3044 return FAILED;
3046 3045
3047 for (i = 0; i < target->ch_count; i++) {
3048 ch = &target->ch[i];
3049 for (j = 0; j < target->req_ring_size; ++j) {
3050 struct srp_request *req = &ch->req_ring[j];
3051
3052 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
3053 }
3054 }
3055
3056 return SUCCESS; 3046 return SUCCESS;
3057} 3047}
3058 3048
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index c041f44324db..b3654598a2d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
660 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; 660 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
661 lld->udb_density = 1 << adap->params.sge.eq_qpp; 661 lld->udb_density = 1 << adap->params.sge.eq_qpp;
662 lld->ucq_density = 1 << adap->params.sge.iq_qpp; 662 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
663 lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
663 lld->filt_mode = adap->params.tp.vlan_pri_map; 664 lld->filt_mode = adap->params.tp.vlan_pri_map;
664 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 665 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
665 for (i = 0; i < NCHAN; i++) 666 for (i = 0; i < NCHAN; i++)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 5fa9a2d5fc4b..21da34a4ca24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -336,6 +336,7 @@ struct cxgb4_lld_info {
336 unsigned int cclk_ps; /* Core clock period in psec */ 336 unsigned int cclk_ps; /* Core clock period in psec */
337 unsigned short udb_density; /* # of user DB/page */ 337 unsigned short udb_density; /* # of user DB/page */
338 unsigned short ucq_density; /* # of user CQs/page */ 338 unsigned short ucq_density; /* # of user CQs/page */
339 unsigned int sge_host_page_size; /* SGE host page size */
339 unsigned short filt_mode; /* filter optional components */ 340 unsigned short filt_mode; /* filter optional components */
340 unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */ 341 unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
341 /* scheduler queue */ 342 /* scheduler queue */