aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio
diff options
context:
space:
mode:
authorHariprasad Shenai <hariprasad@chelsio.com>2014-06-06 12:10:42 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-11 01:49:54 -0400
commitcf38be6d61001b234d5b980d6e98702587638190 (patch)
tree97f370cf0284a42bcea1abf271487c5807cd7d24 /drivers/net/ethernet/chelsio
parentf8c1b7ce00254a5bb75d5b5e5ef1601326a0e08e (diff)
iw_cxgb4: Allocate and use IQs specifically for indirect interrupts
Currently indirect interrupts for RDMA CQs funnel through the LLD's RDMA RXQs, which also handle direct interrupts for offload CPLs during RDMA connection setup/teardown. The intended T4 usage model, however, is to have indirect interrupts flow through dedicated IQs. IE not to mix indirect interrupts with CPL messages in an IQ. This patch adds the concept of RDMA concentrator IQs, or CIQs, setup and maintained by the LLD and exported to iw_cxgb4 for use when creating CQs. RDMA CPLs will flow through the LLD's RDMA RXQs, and CQ interrupts flow through the CIQs. Design: cxgb4 creates and exports an array of CIQs for the RDMA ULD. These IQs are sized according to the max available CQs available at adapter init. In addition, these IQs don't need FL buffers since they only service indirect interrupts. One CIQ is setup per RX channel similar to the RDMA RXQs. iw_cxgb4 will utilize these CIQs based on the vector value passed into create_cq(). The num_comp_vectors advertised by iw_cxgb4 will be the number of CIQs configured, and thus the vector value will be the index into the array of CIQs. Based on original work by Steve Wise <swise@opengridcomputing.com> Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/chelsio')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c62
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
5 files changed, 76 insertions, 7 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 32db37709263..f503dce4ab17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -357,11 +357,17 @@ enum {
357 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ 357 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
358 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ 358 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
359 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ 359 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
360 MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
361 MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
360}; 362};
361 363
362enum { 364enum {
363 MAX_EGRQ = 128, /* max # of egress queues, including FLs */ 365 INGQ_EXTRAS = 2, /* firmware event queue and */
364 MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ 366 /* forwarded interrupts */
367 MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
368 + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
369 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
370 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
365}; 371};
366 372
367struct adapter; 373struct adapter;
@@ -538,6 +544,7 @@ struct sge {
538 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; 544 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
539 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; 545 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
540 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; 546 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
547 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
541 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; 548 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
542 549
543 struct sge_rspq intrq ____cacheline_aligned_in_smp; 550 struct sge_rspq intrq ____cacheline_aligned_in_smp;
@@ -548,8 +555,10 @@ struct sge {
548 u16 ethtxq_rover; /* Tx queue to clean up next */ 555 u16 ethtxq_rover; /* Tx queue to clean up next */
549 u16 ofldqsets; /* # of active offload queue sets */ 556 u16 ofldqsets; /* # of active offload queue sets */
550 u16 rdmaqs; /* # of available RDMA Rx queues */ 557 u16 rdmaqs; /* # of available RDMA Rx queues */
558 u16 rdmaciqs; /* # of available RDMA concentrator IQs */
551 u16 ofld_rxq[MAX_OFLD_QSETS]; 559 u16 ofld_rxq[MAX_OFLD_QSETS];
552 u16 rdma_rxq[NCHAN]; 560 u16 rdma_rxq[NCHAN];
561 u16 rdma_ciq[NCHAN];
553 u16 timer_val[SGE_NTIMERS]; 562 u16 timer_val[SGE_NTIMERS];
554 u8 counter_val[SGE_NCOUNTERS]; 563 u8 counter_val[SGE_NCOUNTERS];
555 u32 fl_pg_order; /* large page allocation size */ 564 u32 fl_pg_order; /* large page allocation size */
@@ -577,6 +586,7 @@ struct sge {
577#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) 586#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
578#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) 587#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
579#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) 588#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
589#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
580 590
581struct l2t_data; 591struct l2t_data;
582 592
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8cf6be93f491..c26c3f8e2795 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -818,12 +818,17 @@ static void name_msix_vecs(struct adapter *adap)
818 for_each_rdmarxq(&adap->sge, i) 818 for_each_rdmarxq(&adap->sge, i)
819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", 819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
820 adap->port[0]->name, i); 820 adap->port[0]->name, i);
821
822 for_each_rdmaciq(&adap->sge, i)
823 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
824 adap->port[0]->name, i);
821} 825}
822 826
823static int request_msix_queue_irqs(struct adapter *adap) 827static int request_msix_queue_irqs(struct adapter *adap)
824{ 828{
825 struct sge *s = &adap->sge; 829 struct sge *s = &adap->sge;
826 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2; 830 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
831 int msi_index = 2;
827 832
828 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 833 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
829 adap->msix_info[1].desc, &s->fw_evtq); 834 adap->msix_info[1].desc, &s->fw_evtq);
@@ -857,9 +862,21 @@ static int request_msix_queue_irqs(struct adapter *adap)
857 goto unwind; 862 goto unwind;
858 msi_index++; 863 msi_index++;
859 } 864 }
865 for_each_rdmaciq(s, rdmaciqqidx) {
866 err = request_irq(adap->msix_info[msi_index].vec,
867 t4_sge_intr_msix, 0,
868 adap->msix_info[msi_index].desc,
869 &s->rdmaciq[rdmaciqqidx].rspq);
870 if (err)
871 goto unwind;
872 msi_index++;
873 }
860 return 0; 874 return 0;
861 875
862unwind: 876unwind:
877 while (--rdmaciqqidx >= 0)
878 free_irq(adap->msix_info[--msi_index].vec,
879 &s->rdmaciq[rdmaciqqidx].rspq);
863 while (--rdmaqidx >= 0) 880 while (--rdmaqidx >= 0)
864 free_irq(adap->msix_info[--msi_index].vec, 881 free_irq(adap->msix_info[--msi_index].vec,
865 &s->rdmarxq[rdmaqidx].rspq); 882 &s->rdmarxq[rdmaqidx].rspq);
@@ -885,6 +902,8 @@ static void free_msix_queue_irqs(struct adapter *adap)
885 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); 902 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
886 for_each_rdmarxq(s, i) 903 for_each_rdmarxq(s, i)
887 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); 904 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
905 for_each_rdmaciq(s, i)
906 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
888} 907}
889 908
890/** 909/**
@@ -1047,7 +1066,8 @@ freeout: t4_free_sge_resources(adap);
1047 if (msi_idx > 0) 1066 if (msi_idx > 0)
1048 msi_idx++; 1067 msi_idx++;
1049 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, 1068 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1050 &q->fl, uldrx_handler); 1069 q->fl.size ? &q->fl : NULL,
1070 uldrx_handler);
1051 if (err) 1071 if (err)
1052 goto freeout; 1072 goto freeout;
1053 memset(&q->stats, 0, sizeof(q->stats)); 1073 memset(&q->stats, 0, sizeof(q->stats));
@@ -1064,13 +1084,28 @@ freeout: t4_free_sge_resources(adap);
1064 if (msi_idx > 0) 1084 if (msi_idx > 0)
1065 msi_idx++; 1085 msi_idx++;
1066 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], 1086 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1067 msi_idx, &q->fl, uldrx_handler); 1087 msi_idx, q->fl.size ? &q->fl : NULL,
1088 uldrx_handler);
1068 if (err) 1089 if (err)
1069 goto freeout; 1090 goto freeout;
1070 memset(&q->stats, 0, sizeof(q->stats)); 1091 memset(&q->stats, 0, sizeof(q->stats));
1071 s->rdma_rxq[i] = q->rspq.abs_id; 1092 s->rdma_rxq[i] = q->rspq.abs_id;
1072 } 1093 }
1073 1094
1095 for_each_rdmaciq(s, i) {
1096 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1097
1098 if (msi_idx > 0)
1099 msi_idx++;
1100 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1101 msi_idx, q->fl.size ? &q->fl : NULL,
1102 uldrx_handler);
1103 if (err)
1104 goto freeout;
1105 memset(&q->stats, 0, sizeof(q->stats));
1106 s->rdma_ciq[i] = q->rspq.abs_id;
1107 }
1108
1074 for_each_port(adap, i) { 1109 for_each_port(adap, i) {
1075 /* 1110 /*
1076 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't 1111 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
@@ -3789,7 +3824,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3789 lli.mtus = adap->params.mtus; 3824 lli.mtus = adap->params.mtus;
3790 if (uld == CXGB4_ULD_RDMA) { 3825 if (uld == CXGB4_ULD_RDMA) {
3791 lli.rxq_ids = adap->sge.rdma_rxq; 3826 lli.rxq_ids = adap->sge.rdma_rxq;
3827 lli.ciq_ids = adap->sge.rdma_ciq;
3792 lli.nrxq = adap->sge.rdmaqs; 3828 lli.nrxq = adap->sge.rdmaqs;
3829 lli.nciq = adap->sge.rdmaciqs;
3793 } else if (uld == CXGB4_ULD_ISCSI) { 3830 } else if (uld == CXGB4_ULD_ISCSI) {
3794 lli.rxq_ids = adap->sge.ofld_rxq; 3831 lli.rxq_ids = adap->sge.ofld_rxq;
3795 lli.nrxq = adap->sge.ofldqsets; 3832 lli.nrxq = adap->sge.ofldqsets;
@@ -5695,6 +5732,7 @@ static void cfg_queues(struct adapter *adap)
5695{ 5732{
5696 struct sge *s = &adap->sge; 5733 struct sge *s = &adap->sge;
5697 int i, q10g = 0, n10g = 0, qidx = 0; 5734 int i, q10g = 0, n10g = 0, qidx = 0;
5735 int ciq_size;
5698 5736
5699 for_each_port(adap, i) 5737 for_each_port(adap, i)
5700 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); 5738 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
@@ -5733,6 +5771,7 @@ static void cfg_queues(struct adapter *adap)
5733 s->ofldqsets = adap->params.nports; 5771 s->ofldqsets = adap->params.nports;
5734 /* For RDMA one Rx queue per channel suffices */ 5772 /* For RDMA one Rx queue per channel suffices */
5735 s->rdmaqs = adap->params.nports; 5773 s->rdmaqs = adap->params.nports;
5774 s->rdmaciqs = adap->params.nports;
5736 } 5775 }
5737 5776
5738 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 5777 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -5767,6 +5806,19 @@ static void cfg_queues(struct adapter *adap)
5767 r->fl.size = 72; 5806 r->fl.size = 72;
5768 } 5807 }
5769 5808
5809 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
5810 if (ciq_size > SGE_MAX_IQ_SIZE) {
5811 CH_WARN(adap, "CIQ size too small for available IQs\n");
5812 ciq_size = SGE_MAX_IQ_SIZE;
5813 }
5814
5815 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
5816 struct sge_ofld_rxq *r = &s->rdmaciq[i];
5817
5818 init_rspq(&r->rspq, 0, 0, ciq_size, 64);
5819 r->rspq.uld = CXGB4_ULD_RDMA;
5820 }
5821
5770 init_rspq(&s->fw_evtq, 6, 0, 512, 64); 5822 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5771 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); 5823 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5772} 5824}
@@ -5815,9 +5867,9 @@ static int enable_msix(struct adapter *adap)
5815 5867
5816 want = s->max_ethqsets + EXTRA_VECS; 5868 want = s->max_ethqsets + EXTRA_VECS;
5817 if (is_offload(adap)) { 5869 if (is_offload(adap)) {
5818 want += s->rdmaqs + s->ofldqsets; 5870 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
5819 /* need nchan for each possible ULD */ 5871 /* need nchan for each possible ULD */
5820 ofld_need = 2 * nchan; 5872 ofld_need = 3 * nchan;
5821 } 5873 }
5822 need = adap->params.nports + EXTRA_VECS + ofld_need; 5874 need = adap->params.nports + EXTRA_VECS + ofld_need;
5823 5875
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e274a047528f..87af314518a4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -232,8 +232,10 @@ struct cxgb4_lld_info {
232 const struct cxgb4_virt_res *vr; /* assorted HW resources */ 232 const struct cxgb4_virt_res *vr; /* assorted HW resources */
233 const unsigned short *mtus; /* MTU table */ 233 const unsigned short *mtus; /* MTU table */
234 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ 234 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
235 const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */
235 unsigned short nrxq; /* # of Rx queues */ 236 unsigned short nrxq; /* # of Rx queues */
236 unsigned short ntxq; /* # of Tx queues */ 237 unsigned short ntxq; /* # of Tx queues */
238 unsigned short nciq; /* # of concentrator IQ */
237 unsigned char nchan:4; /* # of channels */ 239 unsigned char nchan:4; /* # of channels */
238 unsigned char nports:4; /* # of ports */ 240 unsigned char nports:4; /* # of ports */
239 unsigned char wr_cred; /* WR 16-byte credits */ 241 unsigned char wr_cred; /* WR 16-byte credits */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cced1a3d5181..bd82939ae8f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2515,6 +2515,10 @@ void t4_free_sge_resources(struct adapter *adap)
2515 if (oq->rspq.desc) 2515 if (oq->rspq.desc)
2516 free_rspq_fl(adap, &oq->rspq, &oq->fl); 2516 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2517 } 2517 }
2518 for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) {
2519 if (oq->rspq.desc)
2520 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2521 }
2518 2522
2519 /* clean up offload Tx queues */ 2523 /* clean up offload Tx queues */
2520 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { 2524 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 1d1623be9f1e..71b799b5b0f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -68,6 +68,7 @@ enum {
68 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ 68 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
69 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ 69 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
70 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ 70 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
71 SGE_MAX_IQ_SIZE = 65520,
71 72
72 SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ 73 SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
73 SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ 74 SGE_TIMER_UPD_CIDX = 7, /* update cidx only */