aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-23 18:57:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-23 18:57:39 -0400
commited7d6bc23b6dd8f5272431ce930c84d1e537be49 (patch)
treee8854bc72e9cfebb7c1570729443c9ca2f9082ca
parentc130423620331a104492bbbcc49f25125e26a21a (diff)
parent2994a75183173defc21f0d26ce23b63232211b88 (diff)
Merge branch 'for-next-merge' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull more SCSI target updates from Nicholas Bellinger: "This series contains cxgb4 driver prerequisites for supporting iscsi segmentation offload (ISO), that will be utilized for a number of future v4.7 developments in iscsi-target for supporting generic hw offloads" * 'for-next-merge' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: cxgb4: update Kconfig and Makefile cxgb4: add iSCSI DDP page pod manager cxgb4, iw_cxgb4: move delayed ack macro definitions cxgb4: move VLAN_NONE macro definition cxgb4: update struct cxgb4_lld_info definition cxgb4: add definitions for iSCSI target ULD cxgb4, cxgb4i: move struct cpl_rx_data_ddp definition cxgb4, iw_cxgb4, cxgb4i: remove duplicate definitions cxgb4, iw_cxgb4: move definitions to common header file cxgb4: large receive offload support cxgb4: allocate resources for CXGB4_ULD_ISCSIT cxgb4: add new ULD type CXGB4_ULD_ISCSIT
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h99
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c97
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c464
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h310
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h217
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h8
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.h17
15 files changed, 1174 insertions, 139 deletions
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 343e8daf2270..1e26669793c3 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -753,103 +753,4 @@ struct fw_ri_wr {
753#define FW_RI_WR_P2PTYPE_G(x) \ 753#define FW_RI_WR_P2PTYPE_G(x) \
754 (((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M) 754 (((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M)
755 755
756struct tcp_options {
757 __be16 mss;
758 __u8 wsf;
759#if defined(__LITTLE_ENDIAN_BITFIELD)
760 __u8:4;
761 __u8 unknown:1;
762 __u8:1;
763 __u8 sack:1;
764 __u8 tstamp:1;
765#else
766 __u8 tstamp:1;
767 __u8 sack:1;
768 __u8:1;
769 __u8 unknown:1;
770 __u8:4;
771#endif
772};
773
774struct cpl_pass_accept_req {
775 union opcode_tid ot;
776 __be16 rsvd;
777 __be16 len;
778 __be32 hdr_len;
779 __be16 vlan;
780 __be16 l2info;
781 __be32 tos_stid;
782 struct tcp_options tcpopt;
783};
784
785/* cpl_pass_accept_req.hdr_len fields */
786#define SYN_RX_CHAN_S 0
787#define SYN_RX_CHAN_M 0xF
788#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
789#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
790
791#define TCP_HDR_LEN_S 10
792#define TCP_HDR_LEN_M 0x3F
793#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
794#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
795
796#define IP_HDR_LEN_S 16
797#define IP_HDR_LEN_M 0x3FF
798#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
799#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
800
801#define ETH_HDR_LEN_S 26
802#define ETH_HDR_LEN_M 0x1F
803#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
804#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
805
806/* cpl_pass_accept_req.l2info fields */
807#define SYN_MAC_IDX_S 0
808#define SYN_MAC_IDX_M 0x1FF
809#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
810#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
811
812#define SYN_XACT_MATCH_S 9
813#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
814#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
815
816#define SYN_INTF_S 12
817#define SYN_INTF_M 0xF
818#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
819#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
820
821struct ulptx_idata {
822 __be32 cmd_more;
823 __be32 len;
824};
825
826#define ULPTX_NSGE_S 0
827#define ULPTX_NSGE_M 0xFFFF
828#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
829
830#define RX_DACK_MODE_S 29
831#define RX_DACK_MODE_M 0x3
832#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
833#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
834
835#define RX_DACK_CHANGE_S 31
836#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
837#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
838
839enum { /* TCP congestion control algorithms */
840 CONG_ALG_RENO,
841 CONG_ALG_TAHOE,
842 CONG_ALG_NEWRENO,
843 CONG_ALG_HIGHSPEED
844};
845
846#define CONG_CNTRL_S 14
847#define CONG_CNTRL_M 0x3
848#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
849#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
850
851#define T5_ISS_S 18
852#define T5_ISS_V(x) ((x) << T5_ISS_S)
853#define T5_ISS_F T5_ISS_V(1U)
854
855#endif /* _T4FW_RI_API_H_ */ 756#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 4d187f22c48b..4686a85a8a22 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -96,6 +96,17 @@ config CHELSIO_T4_DCB
96 96
97 If unsure, say N. 97 If unsure, say N.
98 98
99config CHELSIO_T4_UWIRE
100 bool "Unified Wire Support for Chelsio T5 cards"
101 default n
102 depends on CHELSIO_T4
103 ---help---
104 Enable unified-wire offload features.
105 Say Y here if you want to enable unified-wire over Ethernet
106 in the driver.
107
108 If unsure, say N.
109
99config CHELSIO_T4_FCOE 110config CHELSIO_T4_FCOE
100 bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards" 111 bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards"
101 default n 112 default n
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index ace0ab98d0f1..85c92821b239 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o 7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
8cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o 8cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
9cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o 9cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
10cxgb4-$(CONFIG_CHELSIO_T4_UWIRE) += cxgb4_ppm.o
10cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o 11cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1dac6c6111bf..984a3cc26f86 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -404,6 +404,9 @@ enum {
404 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ 404 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
405 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ 405 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
406 MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */ 406 MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
407
408 /* # of streaming iSCSIT Rx queues */
409 MAX_ISCSIT_QUEUES = MAX_OFLD_QSETS,
407}; 410};
408 411
409enum { 412enum {
@@ -420,8 +423,8 @@ enum {
420enum { 423enum {
421 INGQ_EXTRAS = 2, /* firmware event queue and */ 424 INGQ_EXTRAS = 2, /* firmware event queue and */
422 /* forwarded interrupts */ 425 /* forwarded interrupts */
423 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES 426 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES +
424 + MAX_RDMA_CIQS + INGQ_EXTRAS, 427 MAX_RDMA_CIQS + MAX_ISCSIT_QUEUES + INGQ_EXTRAS,
425}; 428};
426 429
427struct adapter; 430struct adapter;
@@ -508,6 +511,15 @@ struct pkt_gl {
508 511
509typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, 512typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
510 const struct pkt_gl *gl); 513 const struct pkt_gl *gl);
514typedef void (*rspq_flush_handler_t)(struct sge_rspq *q);
515/* LRO related declarations for ULD */
516struct t4_lro_mgr {
517#define MAX_LRO_SESSIONS 64
518 u8 lro_session_cnt; /* # of sessions to aggregate */
519 unsigned long lro_pkts; /* # of LRO super packets */
520 unsigned long lro_merged; /* # of wire packets merged by LRO */
521 struct sk_buff_head lroq; /* list of aggregated sessions */
522};
511 523
512struct sge_rspq { /* state for an SGE response queue */ 524struct sge_rspq { /* state for an SGE response queue */
513 struct napi_struct napi; 525 struct napi_struct napi;
@@ -532,6 +544,8 @@ struct sge_rspq { /* state for an SGE response queue */
532 struct adapter *adap; 544 struct adapter *adap;
533 struct net_device *netdev; /* associated net device */ 545 struct net_device *netdev; /* associated net device */
534 rspq_handler_t handler; 546 rspq_handler_t handler;
547 rspq_flush_handler_t flush_handler;
548 struct t4_lro_mgr lro_mgr;
535#ifdef CONFIG_NET_RX_BUSY_POLL 549#ifdef CONFIG_NET_RX_BUSY_POLL
536#define CXGB_POLL_STATE_IDLE 0 550#define CXGB_POLL_STATE_IDLE 0
537#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */ 551#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */
@@ -641,6 +655,7 @@ struct sge {
641 655
642 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; 656 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
643 struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS]; 657 struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
658 struct sge_ofld_rxq iscsitrxq[MAX_ISCSIT_QUEUES];
644 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; 659 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
645 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; 660 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
646 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; 661 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
@@ -652,9 +667,11 @@ struct sge {
652 u16 ethqsets; /* # of active Ethernet queue sets */ 667 u16 ethqsets; /* # of active Ethernet queue sets */
653 u16 ethtxq_rover; /* Tx queue to clean up next */ 668 u16 ethtxq_rover; /* Tx queue to clean up next */
654 u16 iscsiqsets; /* # of active iSCSI queue sets */ 669 u16 iscsiqsets; /* # of active iSCSI queue sets */
670 u16 niscsitq; /* # of available iSCST Rx queues */
655 u16 rdmaqs; /* # of available RDMA Rx queues */ 671 u16 rdmaqs; /* # of available RDMA Rx queues */
656 u16 rdmaciqs; /* # of available RDMA concentrator IQs */ 672 u16 rdmaciqs; /* # of available RDMA concentrator IQs */
657 u16 iscsi_rxq[MAX_OFLD_QSETS]; 673 u16 iscsi_rxq[MAX_OFLD_QSETS];
674 u16 iscsit_rxq[MAX_ISCSIT_QUEUES];
658 u16 rdma_rxq[MAX_RDMA_QUEUES]; 675 u16 rdma_rxq[MAX_RDMA_QUEUES];
659 u16 rdma_ciq[MAX_RDMA_CIQS]; 676 u16 rdma_ciq[MAX_RDMA_CIQS];
660 u16 timer_val[SGE_NTIMERS]; 677 u16 timer_val[SGE_NTIMERS];
@@ -681,6 +698,7 @@ struct sge {
681 698
682#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) 699#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
683#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++) 700#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++)
701#define for_each_iscsitrxq(sge, i) for (i = 0; i < (sge)->niscsitq; i++)
684#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) 702#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
685#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++) 703#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
686 704
@@ -747,6 +765,8 @@ struct adapter {
747 struct list_head rcu_node; 765 struct list_head rcu_node;
748 struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ 766 struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
749 767
768 void *iscsi_ppm;
769
750 struct tid_info tids; 770 struct tid_info tids;
751 void **tid_release_head; 771 void **tid_release_head;
752 spinlock_t tid_release_lock; 772 spinlock_t tid_release_lock;
@@ -1113,7 +1133,8 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
1113int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); 1133int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
1114int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 1134int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1115 struct net_device *dev, int intr_idx, 1135 struct net_device *dev, int intr_idx,
1116 struct sge_fl *fl, rspq_handler_t hnd, int cong); 1136 struct sge_fl *fl, rspq_handler_t hnd,
1137 rspq_flush_handler_t flush_handler, int cong);
1117int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 1138int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
1118 struct net_device *dev, struct netdev_queue *netdevq, 1139 struct net_device *dev, struct netdev_queue *netdevq,
1119 unsigned int iqid); 1140 unsigned int iqid);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index e6a4072b494b..0bb41e9b9b1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2334,12 +2334,14 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
2334 struct adapter *adap = seq->private; 2334 struct adapter *adap = seq->private;
2335 int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); 2335 int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
2336 int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4); 2336 int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4);
2337 int iscsit_entries = DIV_ROUND_UP(adap->sge.niscsitq, 4);
2337 int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4); 2338 int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
2338 int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4); 2339 int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
2339 int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); 2340 int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
2340 int i, r = (uintptr_t)v - 1; 2341 int i, r = (uintptr_t)v - 1;
2341 int iscsi_idx = r - eth_entries; 2342 int iscsi_idx = r - eth_entries;
2342 int rdma_idx = iscsi_idx - iscsi_entries; 2343 int iscsit_idx = iscsi_idx - iscsi_entries;
2344 int rdma_idx = iscsit_idx - iscsit_entries;
2343 int ciq_idx = rdma_idx - rdma_entries; 2345 int ciq_idx = rdma_idx - rdma_entries;
2344 int ctrl_idx = ciq_idx - ciq_entries; 2346 int ctrl_idx = ciq_idx - ciq_entries;
2345 int fq_idx = ctrl_idx - ctrl_entries; 2347 int fq_idx = ctrl_idx - ctrl_entries;
@@ -2453,6 +2455,35 @@ do { \
2453 RL("FLLow:", fl.low); 2455 RL("FLLow:", fl.low);
2454 RL("FLStarving:", fl.starving); 2456 RL("FLStarving:", fl.starving);
2455 2457
2458 } else if (iscsit_idx < iscsit_entries) {
2459 const struct sge_ofld_rxq *rx =
2460 &adap->sge.iscsitrxq[iscsit_idx * 4];
2461 int n = min(4, adap->sge.niscsitq - 4 * iscsit_idx);
2462
2463 S("QType:", "iSCSIT");
2464 R("RspQ ID:", rspq.abs_id);
2465 R("RspQ size:", rspq.size);
2466 R("RspQE size:", rspq.iqe_len);
2467 R("RspQ CIDX:", rspq.cidx);
2468 R("RspQ Gen:", rspq.gen);
2469 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
2470 S3("u", "Intr pktcnt:",
2471 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
2472 R("FL ID:", fl.cntxt_id);
2473 R("FL size:", fl.size - 8);
2474 R("FL pend:", fl.pend_cred);
2475 R("FL avail:", fl.avail);
2476 R("FL PIDX:", fl.pidx);
2477 R("FL CIDX:", fl.cidx);
2478 RL("RxPackets:", stats.pkts);
2479 RL("RxImmPkts:", stats.imm);
2480 RL("RxNoMem:", stats.nomem);
2481 RL("FLAllocErr:", fl.alloc_failed);
2482 RL("FLLrgAlcErr:", fl.large_alloc_failed);
2483 RL("FLMapErr:", fl.mapping_err);
2484 RL("FLLow:", fl.low);
2485 RL("FLStarving:", fl.starving);
2486
2456 } else if (rdma_idx < rdma_entries) { 2487 } else if (rdma_idx < rdma_entries) {
2457 const struct sge_ofld_rxq *rx = 2488 const struct sge_ofld_rxq *rx =
2458 &adap->sge.rdmarxq[rdma_idx * 4]; 2489 &adap->sge.rdmarxq[rdma_idx * 4];
@@ -2543,6 +2574,7 @@ static int sge_queue_entries(const struct adapter *adap)
2543{ 2574{
2544 return DIV_ROUND_UP(adap->sge.ethqsets, 4) + 2575 return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
2545 DIV_ROUND_UP(adap->sge.iscsiqsets, 4) + 2576 DIV_ROUND_UP(adap->sge.iscsiqsets, 4) +
2577 DIV_ROUND_UP(adap->sge.niscsitq, 4) +
2546 DIV_ROUND_UP(adap->sge.rdmaqs, 4) + 2578 DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
2547 DIV_ROUND_UP(adap->sge.rdmaciqs, 4) + 2579 DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
2548 DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; 2580 DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index adad73f7c8cd..d1e3f0997d6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -227,7 +227,7 @@ static DEFINE_MUTEX(uld_mutex);
227static LIST_HEAD(adap_rcu_list); 227static LIST_HEAD(adap_rcu_list);
228static DEFINE_SPINLOCK(adap_rcu_lock); 228static DEFINE_SPINLOCK(adap_rcu_lock);
229static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; 229static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
230static const char *uld_str[] = { "RDMA", "iSCSI" }; 230static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
231 231
232static void link_report(struct net_device *dev) 232static void link_report(struct net_device *dev)
233{ 233{
@@ -664,6 +664,13 @@ out:
664 return 0; 664 return 0;
665} 665}
666 666
667/* Flush the aggregated lro sessions */
668static void uldrx_flush_handler(struct sge_rspq *q)
669{
670 if (ulds[q->uld].lro_flush)
671 ulds[q->uld].lro_flush(&q->lro_mgr);
672}
673
667/** 674/**
668 * uldrx_handler - response queue handler for ULD queues 675 * uldrx_handler - response queue handler for ULD queues
669 * @q: the response queue that received the packet 676 * @q: the response queue that received the packet
@@ -677,6 +684,7 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
677 const struct pkt_gl *gl) 684 const struct pkt_gl *gl)
678{ 685{
679 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); 686 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
687 int ret;
680 688
681 /* FW can send CPLs encapsulated in a CPL_FW4_MSG. 689 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
682 */ 690 */
@@ -684,10 +692,19 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
684 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) 692 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
685 rsp += 2; 693 rsp += 2;
686 694
687 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { 695 if (q->flush_handler)
696 ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
697 rsp, gl, &q->lro_mgr,
698 &q->napi);
699 else
700 ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
701 rsp, gl);
702
703 if (ret) {
688 rxq->stats.nomem++; 704 rxq->stats.nomem++;
689 return -1; 705 return -1;
690 } 706 }
707
691 if (gl == NULL) 708 if (gl == NULL)
692 rxq->stats.imm++; 709 rxq->stats.imm++;
693 else if (gl == CXGB4_MSG_AN) 710 else if (gl == CXGB4_MSG_AN)
@@ -754,6 +771,10 @@ static void name_msix_vecs(struct adapter *adap)
754 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d", 771 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
755 adap->port[0]->name, i); 772 adap->port[0]->name, i);
756 773
774 for_each_iscsitrxq(&adap->sge, i)
775 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
776 adap->port[0]->name, i);
777
757 for_each_rdmarxq(&adap->sge, i) 778 for_each_rdmarxq(&adap->sge, i)
758 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", 779 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
759 adap->port[0]->name, i); 780 adap->port[0]->name, i);
@@ -767,6 +788,7 @@ static int request_msix_queue_irqs(struct adapter *adap)
767{ 788{
768 struct sge *s = &adap->sge; 789 struct sge *s = &adap->sge;
769 int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; 790 int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
791 int iscsitqidx = 0;
770 int msi_index = 2; 792 int msi_index = 2;
771 793
772 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 794 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -792,6 +814,15 @@ static int request_msix_queue_irqs(struct adapter *adap)
792 goto unwind; 814 goto unwind;
793 msi_index++; 815 msi_index++;
794 } 816 }
817 for_each_iscsitrxq(s, iscsitqidx) {
818 err = request_irq(adap->msix_info[msi_index].vec,
819 t4_sge_intr_msix, 0,
820 adap->msix_info[msi_index].desc,
821 &s->iscsitrxq[iscsitqidx].rspq);
822 if (err)
823 goto unwind;
824 msi_index++;
825 }
795 for_each_rdmarxq(s, rdmaqidx) { 826 for_each_rdmarxq(s, rdmaqidx) {
796 err = request_irq(adap->msix_info[msi_index].vec, 827 err = request_irq(adap->msix_info[msi_index].vec,
797 t4_sge_intr_msix, 0, 828 t4_sge_intr_msix, 0,
@@ -819,6 +850,9 @@ unwind:
819 while (--rdmaqidx >= 0) 850 while (--rdmaqidx >= 0)
820 free_irq(adap->msix_info[--msi_index].vec, 851 free_irq(adap->msix_info[--msi_index].vec,
821 &s->rdmarxq[rdmaqidx].rspq); 852 &s->rdmarxq[rdmaqidx].rspq);
853 while (--iscsitqidx >= 0)
854 free_irq(adap->msix_info[--msi_index].vec,
855 &s->iscsitrxq[iscsitqidx].rspq);
822 while (--iscsiqidx >= 0) 856 while (--iscsiqidx >= 0)
823 free_irq(adap->msix_info[--msi_index].vec, 857 free_irq(adap->msix_info[--msi_index].vec,
824 &s->iscsirxq[iscsiqidx].rspq); 858 &s->iscsirxq[iscsiqidx].rspq);
@@ -840,6 +874,9 @@ static void free_msix_queue_irqs(struct adapter *adap)
840 for_each_iscsirxq(s, i) 874 for_each_iscsirxq(s, i)
841 free_irq(adap->msix_info[msi_index++].vec, 875 free_irq(adap->msix_info[msi_index++].vec,
842 &s->iscsirxq[i].rspq); 876 &s->iscsirxq[i].rspq);
877 for_each_iscsitrxq(s, i)
878 free_irq(adap->msix_info[msi_index++].vec,
879 &s->iscsitrxq[i].rspq);
843 for_each_rdmarxq(s, i) 880 for_each_rdmarxq(s, i)
844 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); 881 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
845 for_each_rdmaciq(s, i) 882 for_each_rdmaciq(s, i)
@@ -984,7 +1021,7 @@ static void enable_rx(struct adapter *adap)
984 1021
985static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, 1022static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
986 unsigned int nq, unsigned int per_chan, int msi_idx, 1023 unsigned int nq, unsigned int per_chan, int msi_idx,
987 u16 *ids) 1024 u16 *ids, bool lro)
988{ 1025{
989 int i, err; 1026 int i, err;
990 1027
@@ -994,7 +1031,9 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
994 err = t4_sge_alloc_rxq(adap, &q->rspq, false, 1031 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
995 adap->port[i / per_chan], 1032 adap->port[i / per_chan],
996 msi_idx, q->fl.size ? &q->fl : NULL, 1033 msi_idx, q->fl.size ? &q->fl : NULL,
997 uldrx_handler, 0); 1034 uldrx_handler,
1035 lro ? uldrx_flush_handler : NULL,
1036 0);
998 if (err) 1037 if (err)
999 return err; 1038 return err;
1000 memset(&q->stats, 0, sizeof(q->stats)); 1039 memset(&q->stats, 0, sizeof(q->stats));
@@ -1024,7 +1063,7 @@ static int setup_sge_queues(struct adapter *adap)
1024 msi_idx = 1; /* vector 0 is for non-queue interrupts */ 1063 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1025 else { 1064 else {
1026 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, 1065 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1027 NULL, NULL, -1); 1066 NULL, NULL, NULL, -1);
1028 if (err) 1067 if (err)
1029 return err; 1068 return err;
1030 msi_idx = -((int)s->intrq.abs_id + 1); 1069 msi_idx = -((int)s->intrq.abs_id + 1);
@@ -1044,7 +1083,7 @@ static int setup_sge_queues(struct adapter *adap)
1044 * new/deleted queues. 1083 * new/deleted queues.
1045 */ 1084 */
1046 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], 1085 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1047 msi_idx, NULL, fwevtq_handler, -1); 1086 msi_idx, NULL, fwevtq_handler, NULL, -1);
1048 if (err) { 1087 if (err) {
1049freeout: t4_free_sge_resources(adap); 1088freeout: t4_free_sge_resources(adap);
1050 return err; 1089 return err;
@@ -1062,6 +1101,7 @@ freeout: t4_free_sge_resources(adap);
1062 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, 1101 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1063 msi_idx, &q->fl, 1102 msi_idx, &q->fl,
1064 t4_ethrx_handler, 1103 t4_ethrx_handler,
1104 NULL,
1065 t4_get_mps_bg_map(adap, 1105 t4_get_mps_bg_map(adap,
1066 pi->tx_chan)); 1106 pi->tx_chan));
1067 if (err) 1107 if (err)
@@ -1087,18 +1127,19 @@ freeout: t4_free_sge_resources(adap);
1087 goto freeout; 1127 goto freeout;
1088 } 1128 }
1089 1129
1090#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \ 1130#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
1091 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \ 1131 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \
1092 if (err) \ 1132 if (err) \
1093 goto freeout; \ 1133 goto freeout; \
1094 if (msi_idx > 0) \ 1134 if (msi_idx > 0) \
1095 msi_idx += nq; \ 1135 msi_idx += nq; \
1096} while (0) 1136} while (0)
1097 1137
1098 ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq); 1138 ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
1099 ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq); 1139 ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
1140 ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
1100 j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */ 1141 j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
1101 ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq); 1142 ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
1102 1143
1103#undef ALLOC_OFLD_RXQS 1144#undef ALLOC_OFLD_RXQS
1104 1145
@@ -2430,6 +2471,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
2430 } else if (uld == CXGB4_ULD_ISCSI) { 2471 } else if (uld == CXGB4_ULD_ISCSI) {
2431 lli.rxq_ids = adap->sge.iscsi_rxq; 2472 lli.rxq_ids = adap->sge.iscsi_rxq;
2432 lli.nrxq = adap->sge.iscsiqsets; 2473 lli.nrxq = adap->sge.iscsiqsets;
2474 } else if (uld == CXGB4_ULD_ISCSIT) {
2475 lli.rxq_ids = adap->sge.iscsit_rxq;
2476 lli.nrxq = adap->sge.niscsitq;
2433 } 2477 }
2434 lli.ntxq = adap->sge.iscsiqsets; 2478 lli.ntxq = adap->sge.iscsiqsets;
2435 lli.nchan = adap->params.nports; 2479 lli.nchan = adap->params.nports;
@@ -2437,6 +2481,10 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
2437 lli.wr_cred = adap->params.ofldq_wr_cred; 2481 lli.wr_cred = adap->params.ofldq_wr_cred;
2438 lli.adapter_type = adap->params.chip; 2482 lli.adapter_type = adap->params.chip;
2439 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); 2483 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
2484 lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
2485 lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
2486 lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
2487 lli.iscsi_ppm = &adap->iscsi_ppm;
2440 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; 2488 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
2441 lli.udb_density = 1 << adap->params.sge.eq_qpp; 2489 lli.udb_density = 1 << adap->params.sge.eq_qpp;
2442 lli.ucq_density = 1 << adap->params.sge.iq_qpp; 2490 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
@@ -4336,6 +4384,9 @@ static void cfg_queues(struct adapter *adap)
4336 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) * 4384 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
4337 adap->params.nports; 4385 adap->params.nports;
4338 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports); 4386 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
4387
4388 if (!is_t4(adap->params.chip))
4389 s->niscsitq = s->iscsiqsets;
4339 } 4390 }
4340 4391
4341 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 4392 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -4362,6 +4413,16 @@ static void cfg_queues(struct adapter *adap)
4362 r->fl.size = 72; 4413 r->fl.size = 72;
4363 } 4414 }
4364 4415
4416 if (!is_t4(adap->params.chip)) {
4417 for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
4418 struct sge_ofld_rxq *r = &s->iscsitrxq[i];
4419
4420 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4421 r->rspq.uld = CXGB4_ULD_ISCSIT;
4422 r->fl.size = 72;
4423 }
4424 }
4425
4365 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { 4426 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4366 struct sge_ofld_rxq *r = &s->rdmarxq[i]; 4427 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4367 4428
@@ -4436,9 +4497,13 @@ static int enable_msix(struct adapter *adap)
4436 4497
4437 want = s->max_ethqsets + EXTRA_VECS; 4498 want = s->max_ethqsets + EXTRA_VECS;
4438 if (is_offload(adap)) { 4499 if (is_offload(adap)) {
4439 want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets; 4500 want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
4501 s->niscsitq;
4440 /* need nchan for each possible ULD */ 4502 /* need nchan for each possible ULD */
4441 ofld_need = 3 * nchan; 4503 if (is_t4(adap->params.chip))
4504 ofld_need = 3 * nchan;
4505 else
4506 ofld_need = 4 * nchan;
4442 } 4507 }
4443#ifdef CONFIG_CHELSIO_T4_DCB 4508#ifdef CONFIG_CHELSIO_T4_DCB
4444 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for 4509 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
@@ -4470,12 +4535,16 @@ static int enable_msix(struct adapter *adap)
4470 if (allocated < want) { 4535 if (allocated < want) {
4471 s->rdmaqs = nchan; 4536 s->rdmaqs = nchan;
4472 s->rdmaciqs = nchan; 4537 s->rdmaciqs = nchan;
4538
4539 if (!is_t4(adap->params.chip))
4540 s->niscsitq = nchan;
4473 } 4541 }
4474 4542
4475 /* leftovers go to OFLD */ 4543 /* leftovers go to OFLD */
4476 i = allocated - EXTRA_VECS - s->max_ethqsets - 4544 i = allocated - EXTRA_VECS - s->max_ethqsets -
4477 s->rdmaqs - s->rdmaciqs; 4545 s->rdmaqs - s->rdmaciqs - s->niscsitq;
4478 s->iscsiqsets = (i / nchan) * nchan; /* round down */ 4546 s->iscsiqsets = (i / nchan) * nchan; /* round down */
4547
4479 } 4548 }
4480 for (i = 0; i < allocated; ++i) 4549 for (i = 0; i < allocated; ++i)
4481 adap->msix_info[i].vec = entries[i].vector; 4550 adap->msix_info[i].vec = entries[i].vector;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c
new file mode 100644
index 000000000000..d88a7a7b2400
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c
@@ -0,0 +1,464 @@
1/*
2 * cxgb4_ppm.c: Chelsio common library for T4/T5 iSCSI PagePod Manager
3 *
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#include <linux/kernel.h>
14#include <linux/version.h>
15#include <linux/module.h>
16#include <linux/errno.h>
17#include <linux/types.h>
18#include <linux/debugfs.h>
19#include <linux/export.h>
20#include <linux/list.h>
21#include <linux/skbuff.h>
22#include <linux/pci.h>
23#include <linux/scatterlist.h>
24
25#include "cxgb4_ppm.h"
26
27/* Direct Data Placement -
28 * Directly place the iSCSI Data-In or Data-Out PDU's payload into
29 * pre-posted final destination host-memory buffers based on the
30 * Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT)
31 * in Data-Out PDUs. The host memory address is programmed into
32 * h/w in the format of pagepod entries. The location of the
33 * pagepod entry is encoded into ddp tag which is used as the base
34 * for ITT/TTT.
35 */
36
37/* Direct-Data Placement page size adjustment
38 */
39int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz)
40{
41 struct cxgbi_tag_format *tformat = &ppm->tformat;
42 int i;
43
44 for (i = 0; i < DDP_PGIDX_MAX; i++) {
45 if (pgsz == 1UL << (DDP_PGSZ_BASE_SHIFT +
46 tformat->pgsz_order[i])) {
47 pr_debug("%s: %s ppm, pgsz %lu -> idx %d.\n",
48 __func__, ppm->ndev->name, pgsz, i);
49 return i;
50 }
51 }
52 pr_info("ippm: ddp page size %lu not supported.\n", pgsz);
53 return DDP_PGIDX_MAX;
54}
55
56/* DDP setup & teardown
57 */
58static int ppm_find_unused_entries(unsigned long *bmap,
59 unsigned int max_ppods,
60 unsigned int start,
61 unsigned int nr,
62 unsigned int align_mask)
63{
64 unsigned long i;
65
66 i = bitmap_find_next_zero_area(bmap, max_ppods, start, nr, align_mask);
67
68 if (unlikely(i >= max_ppods) && (start > nr))
69 i = bitmap_find_next_zero_area(bmap, max_ppods, 0, start - 1,
70 align_mask);
71 if (unlikely(i >= max_ppods))
72 return -ENOSPC;
73
74 bitmap_set(bmap, i, nr);
75 return (int)i;
76}
77
78static void ppm_mark_entries(struct cxgbi_ppm *ppm, int i, int count,
79 unsigned long caller_data)
80{
81 struct cxgbi_ppod_data *pdata = ppm->ppod_data + i;
82
83 pdata->caller_data = caller_data;
84 pdata->npods = count;
85
86 if (pdata->color == ((1 << PPOD_IDX_SHIFT) - 1))
87 pdata->color = 0;
88 else
89 pdata->color++;
90}
91
92static int ppm_get_cpu_entries(struct cxgbi_ppm *ppm, unsigned int count,
93 unsigned long caller_data)
94{
95 struct cxgbi_ppm_pool *pool;
96 unsigned int cpu;
97 int i;
98
99 cpu = get_cpu();
100 pool = per_cpu_ptr(ppm->pool, cpu);
101 spin_lock_bh(&pool->lock);
102 put_cpu();
103
104 i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max,
105 pool->next, count, 0);
106 if (i < 0) {
107 pool->next = 0;
108 spin_unlock_bh(&pool->lock);
109 return -ENOSPC;
110 }
111
112 pool->next = i + count;
113 if (pool->next >= ppm->pool_index_max)
114 pool->next = 0;
115
116 spin_unlock_bh(&pool->lock);
117
118 pr_debug("%s: cpu %u, idx %d + %d (%d), next %u.\n",
119 __func__, cpu, i, count, i + cpu * ppm->pool_index_max,
120 pool->next);
121
122 i += cpu * ppm->pool_index_max;
123 ppm_mark_entries(ppm, i, count, caller_data);
124
125 return i;
126}
127
128static int ppm_get_entries(struct cxgbi_ppm *ppm, unsigned int count,
129 unsigned long caller_data)
130{
131 int i;
132
133 spin_lock_bh(&ppm->map_lock);
134 i = ppm_find_unused_entries(ppm->ppod_bmap, ppm->bmap_index_max,
135 ppm->next, count, 0);
136 if (i < 0) {
137 ppm->next = 0;
138 spin_unlock_bh(&ppm->map_lock);
139 pr_debug("ippm: NO suitable entries %u available.\n",
140 count);
141 return -ENOSPC;
142 }
143
144 ppm->next = i + count;
145 if (ppm->next >= ppm->bmap_index_max)
146 ppm->next = 0;
147
148 spin_unlock_bh(&ppm->map_lock);
149
150 pr_debug("%s: idx %d + %d (%d), next %u, caller_data 0x%lx.\n",
151 __func__, i, count, i + ppm->pool_rsvd, ppm->next,
152 caller_data);
153
154 i += ppm->pool_rsvd;
155 ppm_mark_entries(ppm, i, count, caller_data);
156
157 return i;
158}
159
160static void ppm_unmark_entries(struct cxgbi_ppm *ppm, int i, int count)
161{
162 pr_debug("%s: idx %d + %d.\n", __func__, i, count);
163
164 if (i < ppm->pool_rsvd) {
165 unsigned int cpu;
166 struct cxgbi_ppm_pool *pool;
167
168 cpu = i / ppm->pool_index_max;
169 i %= ppm->pool_index_max;
170
171 pool = per_cpu_ptr(ppm->pool, cpu);
172 spin_lock_bh(&pool->lock);
173 bitmap_clear(pool->bmap, i, count);
174
175 if (i < pool->next)
176 pool->next = i;
177 spin_unlock_bh(&pool->lock);
178
179 pr_debug("%s: cpu %u, idx %d, next %u.\n",
180 __func__, cpu, i, pool->next);
181 } else {
182 spin_lock_bh(&ppm->map_lock);
183
184 i -= ppm->pool_rsvd;
185 bitmap_clear(ppm->ppod_bmap, i, count);
186
187 if (i < ppm->next)
188 ppm->next = i;
189 spin_unlock_bh(&ppm->map_lock);
190
191 pr_debug("%s: idx %d, next %u.\n", __func__, i, ppm->next);
192 }
193}
194
195void cxgbi_ppm_ppod_release(struct cxgbi_ppm *ppm, u32 idx)
196{
197 struct cxgbi_ppod_data *pdata;
198
199 if (idx >= ppm->ppmax) {
200 pr_warn("ippm: idx too big %u > %u.\n", idx, ppm->ppmax);
201 return;
202 }
203
204 pdata = ppm->ppod_data + idx;
205 if (!pdata->npods) {
206 pr_warn("ippm: idx %u, npods 0.\n", idx);
207 return;
208 }
209
210 pr_debug("release idx %u, npods %u.\n", idx, pdata->npods);
211 ppm_unmark_entries(ppm, idx, pdata->npods);
212}
213EXPORT_SYMBOL(cxgbi_ppm_ppod_release);
214
215int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *ppm, unsigned short nr_pages,
216 u32 per_tag_pg_idx, u32 *ppod_idx,
217 u32 *ddp_tag, unsigned long caller_data)
218{
219 struct cxgbi_ppod_data *pdata;
220 unsigned int npods;
221 int idx = -1;
222 unsigned int hwidx;
223 u32 tag;
224
225 npods = (nr_pages + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
226 if (!npods) {
227 pr_warn("%s: pages %u -> npods %u, full.\n",
228 __func__, nr_pages, npods);
229 return -EINVAL;
230 }
231
232 /* grab from cpu pool first */
233 idx = ppm_get_cpu_entries(ppm, npods, caller_data);
234 /* try the general pool */
235 if (idx < 0)
236 idx = ppm_get_entries(ppm, npods, caller_data);
237 if (idx < 0) {
238 pr_debug("ippm: pages %u, nospc %u, nxt %u, 0x%lx.\n",
239 nr_pages, npods, ppm->next, caller_data);
240 return idx;
241 }
242
243 pdata = ppm->ppod_data + idx;
244 hwidx = ppm->base_idx + idx;
245
246 tag = cxgbi_ppm_make_ddp_tag(hwidx, pdata->color);
247
248 if (per_tag_pg_idx)
249 tag |= (per_tag_pg_idx << 30) & 0xC0000000;
250
251 *ppod_idx = idx;
252 *ddp_tag = tag;
253
254 pr_debug("ippm: sg %u, tag 0x%x(%u,%u), data 0x%lx.\n",
255 nr_pages, tag, idx, npods, caller_data);
256
257 return npods;
258}
259EXPORT_SYMBOL(cxgbi_ppm_ppods_reserve);
260
261void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
262 unsigned int tid, unsigned int offset,
263 unsigned int length,
264 struct cxgbi_pagepod_hdr *hdr)
265{
266 /* The ddp tag in pagepod should be with bit 31:30 set to 0.
267 * The ddp Tag on the wire should be with non-zero 31:30 to the peer
268 */
269 tag &= 0x3FFFFFFF;
270
271 hdr->vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
272
273 hdr->rsvd = 0;
274 hdr->pgsz_tag_clr = htonl(tag & ppm->tformat.idx_clr_mask);
275 hdr->max_offset = htonl(length);
276 hdr->page_offset = htonl(offset);
277
278 pr_debug("ippm: tag 0x%x, tid 0x%x, xfer %u, off %u.\n",
279 tag, tid, length, offset);
280}
281EXPORT_SYMBOL(cxgbi_ppm_make_ppod_hdr);
282
283static void ppm_free(struct cxgbi_ppm *ppm)
284{
285 vfree(ppm);
286}
287
288static void ppm_destroy(struct kref *kref)
289{
290 struct cxgbi_ppm *ppm = container_of(kref,
291 struct cxgbi_ppm,
292 refcnt);
293 pr_info("ippm: kref 0, destroy %s ppm 0x%p.\n",
294 ppm->ndev->name, ppm);
295
296 *ppm->ppm_pp = NULL;
297
298 free_percpu(ppm->pool);
299 ppm_free(ppm);
300}
301
302int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
303{
304 if (ppm) {
305 int rv;
306
307 rv = kref_put(&ppm->refcnt, ppm_destroy);
308 return rv;
309 }
310 return 1;
311}
312
313static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
314 unsigned int *pcpu_ppmax)
315{
316 struct cxgbi_ppm_pool *pools;
317 unsigned int ppmax = (*total) / num_possible_cpus();
318 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
319 unsigned int bmap;
320 unsigned int alloc_sz;
321 unsigned int count = 0;
322 unsigned int cpu;
323
324 /* make sure per cpu pool fits into PCPU_MIN_UNIT_SIZE */
325 if (ppmax > max)
326 ppmax = max;
327
328 /* pool size must be multiple of unsigned long */
329 bmap = BITS_TO_LONGS(ppmax);
330 ppmax = (bmap * sizeof(unsigned long)) << 3;
331
332 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
333 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool));
334
335 if (!pools)
336 return NULL;
337
338 for_each_possible_cpu(cpu) {
339 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
340
341 memset(ppool, 0, alloc_sz);
342 spin_lock_init(&ppool->lock);
343 count += ppmax;
344 }
345
346 *total = count;
347 *pcpu_ppmax = ppmax;
348
349 return pools;
350}
351
352int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
353 struct pci_dev *pdev, void *lldev,
354 struct cxgbi_tag_format *tformat,
355 unsigned int ppmax,
356 unsigned int llimit,
357 unsigned int start,
358 unsigned int reserve_factor)
359{
360 struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
361 struct cxgbi_ppm_pool *pool = NULL;
362 unsigned int ppmax_pool = 0;
363 unsigned int pool_index_max = 0;
364 unsigned int alloc_sz;
365 unsigned int ppod_bmap_size;
366
367 if (ppm) {
368 pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
369 ndev->name, ppm_pp, ppm, ppm->ppmax, ppmax);
370 kref_get(&ppm->refcnt);
371 return 1;
372 }
373
374 if (reserve_factor) {
375 ppmax_pool = ppmax / reserve_factor;
376 pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
377
378 pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
379 ndev->name, ppmax, ppmax_pool, pool_index_max);
380 }
381
382 ppod_bmap_size = BITS_TO_LONGS(ppmax - ppmax_pool);
383 alloc_sz = sizeof(struct cxgbi_ppm) +
384 ppmax * (sizeof(struct cxgbi_ppod_data)) +
385 ppod_bmap_size * sizeof(unsigned long);
386
387 ppm = vmalloc(alloc_sz);
388 if (!ppm)
389 goto release_ppm_pool;
390
391 memset(ppm, 0, alloc_sz);
392
393 ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]);
394
395 if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) {
396 unsigned int start = ppmax - ppmax_pool;
397 unsigned int end = ppod_bmap_size >> 3;
398
399 bitmap_set(ppm->ppod_bmap, ppmax, end - start);
400 pr_info("%s: %u - %u < %u * 8, mask extra bits %u, %u.\n",
401 __func__, ppmax, ppmax_pool, ppod_bmap_size, start,
402 end);
403 }
404
405 spin_lock_init(&ppm->map_lock);
406 kref_init(&ppm->refcnt);
407
408 memcpy(&ppm->tformat, tformat, sizeof(struct cxgbi_tag_format));
409
410 ppm->ppm_pp = ppm_pp;
411 ppm->ndev = ndev;
412 ppm->pdev = pdev;
413 ppm->lldev = lldev;
414 ppm->ppmax = ppmax;
415 ppm->next = 0;
416 ppm->llimit = llimit;
417 ppm->base_idx = start > llimit ?
418 (start - llimit + 1) >> PPOD_SIZE_SHIFT : 0;
419 ppm->bmap_index_max = ppmax - ppmax_pool;
420
421 ppm->pool = pool;
422 ppm->pool_rsvd = ppmax_pool;
423 ppm->pool_index_max = pool_index_max;
424
425 /* check one more time */
426 if (*ppm_pp) {
427 ppm_free(ppm);
428 ppm = (struct cxgbi_ppm *)(*ppm_pp);
429
430 pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
431 ndev->name, ppm_pp, *ppm_pp, ppm->ppmax, ppmax);
432
433 kref_get(&ppm->refcnt);
434 return 1;
435 }
436 *ppm_pp = ppm;
437
438 ppm->tformat.pgsz_idx_dflt = cxgbi_ppm_find_page_index(ppm, PAGE_SIZE);
439
440 pr_info("ippm %s: ppm 0x%p, 0x%p, base %u/%u, pg %lu,%u, rsvd %u,%u.\n",
441 ndev->name, ppm_pp, ppm, ppm->base_idx, ppm->ppmax, PAGE_SIZE,
442 ppm->tformat.pgsz_idx_dflt, ppm->pool_rsvd,
443 ppm->pool_index_max);
444
445 return 0;
446
447release_ppm_pool:
448 free_percpu(pool);
449 return -ENOMEM;
450}
451EXPORT_SYMBOL(cxgbi_ppm_init);
452
453unsigned int cxgbi_tagmask_set(unsigned int ppmax)
454{
455 unsigned int bits = fls(ppmax);
456
457 if (bits > PPOD_IDX_MAX_SIZE)
458 bits = PPOD_IDX_MAX_SIZE;
459
460 pr_info("ippm: ppmax %u/0x%x -> bits %u, tagmask 0x%x.\n",
461 ppmax, ppmax, bits, 1 << (bits + PPOD_IDX_SHIFT));
462
463 return 1 << (bits + PPOD_IDX_SHIFT);
464}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h
new file mode 100644
index 000000000000..d48732673b75
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h
@@ -0,0 +1,310 @@
1/*
2 * cxgb4_ppm.h: Chelsio common library for T4/T5 iSCSI ddp operation
3 *
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Written by: Karen Xie (kxie@chelsio.com)
11 */
12
13#ifndef __CXGB4PPM_H__
14#define __CXGB4PPM_H__
15
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/types.h>
19#include <linux/debugfs.h>
20#include <linux/list.h>
21#include <linux/netdevice.h>
22#include <linux/scatterlist.h>
23#include <linux/skbuff.h>
24#include <linux/vmalloc.h>
25#include <linux/bitmap.h>
26
27struct cxgbi_pagepod_hdr {
28 u32 vld_tid;
29 u32 pgsz_tag_clr;
30 u32 max_offset;
31 u32 page_offset;
32 u64 rsvd;
33};
34
35#define PPOD_PAGES_MAX 4
36struct cxgbi_pagepod {
37 struct cxgbi_pagepod_hdr hdr;
38 u64 addr[PPOD_PAGES_MAX + 1];
39};
40
41/* ddp tag format
42 * for a 32-bit tag:
43 * bit #
44 * 31 ..... ..... 0
45 * X Y...Y Z...Z, where
46 * ^ ^^^^^ ^^^^
47 * | | |____ when ddp bit = 0: color bits
48 * | |
49 * | |____ when ddp bit = 0: idx into the ddp memory region
50 * |
51 * |____ ddp bit: 0 - ddp tag, 1 - non-ddp tag
52 *
53 * [page selector:2] [sw/free bits] [0] [idx] [color:6]
54 */
55
56#define DDP_PGIDX_MAX 4
57#define DDP_PGSZ_BASE_SHIFT 12 /* base page 4K */
58
59struct cxgbi_task_tag_info {
60 unsigned char flags;
61#define CXGBI_PPOD_INFO_FLAG_VALID 0x1
62#define CXGBI_PPOD_INFO_FLAG_MAPPED 0x2
63 unsigned char cid;
64 unsigned short pg_shift;
65 unsigned int npods;
66 unsigned int idx;
67 unsigned int tag;
68 struct cxgbi_pagepod_hdr hdr;
69 int nents;
70 int nr_pages;
71 struct scatterlist *sgl;
72};
73
74struct cxgbi_tag_format {
75 unsigned char pgsz_order[DDP_PGIDX_MAX];
76 unsigned char pgsz_idx_dflt;
77 unsigned char free_bits:4;
78 unsigned char color_bits:4;
79 unsigned char idx_bits;
80 unsigned char rsvd_bits;
81 unsigned int no_ddp_mask;
82 unsigned int idx_mask;
83 unsigned int color_mask;
84 unsigned int idx_clr_mask;
85 unsigned int rsvd_mask;
86};
87
88struct cxgbi_ppod_data {
89 unsigned char pg_idx:2;
90 unsigned char color:6;
91 unsigned char chan_id;
92 unsigned short npods;
93 unsigned long caller_data;
94};
95
96/* per cpu ppm pool */
97struct cxgbi_ppm_pool {
98 unsigned int base; /* base index */
99 unsigned int next; /* next possible free index */
100 spinlock_t lock; /* ppm pool lock */
101 unsigned long bmap[0];
102} ____cacheline_aligned_in_smp;
103
104struct cxgbi_ppm {
105 struct kref refcnt;
106 struct net_device *ndev; /* net_device, 1st port */
107 struct pci_dev *pdev;
108 void *lldev;
109 void **ppm_pp;
110 struct cxgbi_tag_format tformat;
111 unsigned int ppmax;
112 unsigned int llimit;
113 unsigned int base_idx;
114
115 unsigned int pool_rsvd;
116 unsigned int pool_index_max;
117 struct cxgbi_ppm_pool __percpu *pool;
118 /* map lock */
119 spinlock_t map_lock; /* ppm map lock */
120 unsigned int bmap_index_max;
121 unsigned int next;
122 unsigned long *ppod_bmap;
123 struct cxgbi_ppod_data ppod_data[0];
124};
125
126#define DDP_THRESHOLD 512
127
128#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
129
130#define IPPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */
131#define PPOD_SIZE_SHIFT 6
132
133/* page pods are allocated in groups of this size (must be power of 2) */
134#define PPOD_CLUSTER_SIZE 16U
135
136#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
137#define ULPMEM_IDATA_MAX_NPPODS 3 /* (PPOD_SIZE * 3 + ulptx hdr) < 256B */
138#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
139
140#define PPOD_COLOR_SHIFT 0
141#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT)
142
143#define PPOD_IDX_SHIFT 6
144#define PPOD_IDX_MAX_SIZE 24
145
146#define PPOD_TID_SHIFT 0
147#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT)
148
149#define PPOD_TAG_SHIFT 6
150#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT)
151
152#define PPOD_VALID_SHIFT 24
153#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT)
154#define PPOD_VALID_FLAG PPOD_VALID(1U)
155
156#define PPOD_PI_EXTRACT_CTL_SHIFT 31
157#define PPOD_PI_EXTRACT_CTL(x) ((x) << PPOD_PI_EXTRACT_CTL_SHIFT)
158#define PPOD_PI_EXTRACT_CTL_FLAG V_PPOD_PI_EXTRACT_CTL(1U)
159
160#define PPOD_PI_TYPE_SHIFT 29
161#define PPOD_PI_TYPE_MASK 0x3
162#define PPOD_PI_TYPE(x) ((x) << PPOD_PI_TYPE_SHIFT)
163
164#define PPOD_PI_CHECK_CTL_SHIFT 27
165#define PPOD_PI_CHECK_CTL_MASK 0x3
166#define PPOD_PI_CHECK_CTL(x) ((x) << PPOD_PI_CHECK_CTL_SHIFT)
167
168#define PPOD_PI_REPORT_CTL_SHIFT 25
169#define PPOD_PI_REPORT_CTL_MASK 0x3
170#define PPOD_PI_REPORT_CTL(x) ((x) << PPOD_PI_REPORT_CTL_SHIFT)
171
172static inline int cxgbi_ppm_is_ddp_tag(struct cxgbi_ppm *ppm, u32 tag)
173{
174 return !(tag & ppm->tformat.no_ddp_mask);
175}
176
177static inline int cxgbi_ppm_sw_tag_is_usable(struct cxgbi_ppm *ppm,
178 u32 tag)
179{
180 /* the sw tag must be using <= 31 bits */
181 return !(tag & 0x80000000U);
182}
183
184static inline int cxgbi_ppm_make_non_ddp_tag(struct cxgbi_ppm *ppm,
185 u32 sw_tag,
186 u32 *final_tag)
187{
188 struct cxgbi_tag_format *tformat = &ppm->tformat;
189
190 if (!cxgbi_ppm_sw_tag_is_usable(ppm, sw_tag)) {
191 pr_info("sw_tag 0x%x NOT usable.\n", sw_tag);
192 return -EINVAL;
193 }
194
195 if (!sw_tag) {
196 *final_tag = tformat->no_ddp_mask;
197 } else {
198 unsigned int shift = tformat->idx_bits + tformat->color_bits;
199 u32 lower = sw_tag & tformat->idx_clr_mask;
200 u32 upper = (sw_tag >> shift) << (shift + 1);
201
202 *final_tag = upper | tformat->no_ddp_mask | lower;
203 }
204 return 0;
205}
206
207static inline u32 cxgbi_ppm_decode_non_ddp_tag(struct cxgbi_ppm *ppm,
208 u32 tag)
209{
210 struct cxgbi_tag_format *tformat = &ppm->tformat;
211 unsigned int shift = tformat->idx_bits + tformat->color_bits;
212 u32 lower = tag & tformat->idx_clr_mask;
213 u32 upper = (tag >> tformat->rsvd_bits) << shift;
214
215 return upper | lower;
216}
217
218static inline u32 cxgbi_ppm_ddp_tag_get_idx(struct cxgbi_ppm *ppm,
219 u32 ddp_tag)
220{
221 u32 hw_idx = (ddp_tag >> PPOD_IDX_SHIFT) &
222 ppm->tformat.idx_mask;
223
224 return hw_idx - ppm->base_idx;
225}
226
227static inline u32 cxgbi_ppm_make_ddp_tag(unsigned int hw_idx,
228 unsigned char color)
229{
230 return (hw_idx << PPOD_IDX_SHIFT) | ((u32)color);
231}
232
233static inline unsigned long
234cxgbi_ppm_get_tag_caller_data(struct cxgbi_ppm *ppm,
235 u32 ddp_tag)
236{
237 u32 idx = cxgbi_ppm_ddp_tag_get_idx(ppm, ddp_tag);
238
239 return ppm->ppod_data[idx].caller_data;
240}
241
242/* sw bits are the free bits */
243static inline int cxgbi_ppm_ddp_tag_update_sw_bits(struct cxgbi_ppm *ppm,
244 u32 val, u32 orig_tag,
245 u32 *final_tag)
246{
247 struct cxgbi_tag_format *tformat = &ppm->tformat;
248 u32 v = val >> tformat->free_bits;
249
250 if (v) {
251 pr_info("sw_bits 0x%x too large, avail bits %u.\n",
252 val, tformat->free_bits);
253 return -EINVAL;
254 }
255 if (!cxgbi_ppm_is_ddp_tag(ppm, orig_tag))
256 return -EINVAL;
257
258 *final_tag = (val << tformat->rsvd_bits) |
259 (orig_tag & ppm->tformat.rsvd_mask);
260 return 0;
261}
262
263static inline void cxgbi_ppm_ppod_clear(struct cxgbi_pagepod *ppod)
264{
265 ppod->hdr.vld_tid = 0U;
266}
267
268static inline void cxgbi_tagmask_check(unsigned int tagmask,
269 struct cxgbi_tag_format *tformat)
270{
271 unsigned int bits = fls(tagmask);
272
273 /* reserve top most 2 bits for page selector */
274 tformat->free_bits = 32 - 2 - bits;
275 tformat->rsvd_bits = bits;
276 tformat->color_bits = PPOD_IDX_SHIFT;
277 tformat->idx_bits = bits - 1 - PPOD_IDX_SHIFT;
278 tformat->no_ddp_mask = 1 << (bits - 1);
279 tformat->idx_mask = (1 << tformat->idx_bits) - 1;
280 tformat->color_mask = (1 << PPOD_IDX_SHIFT) - 1;
281 tformat->idx_clr_mask = (1 << (bits - 1)) - 1;
282 tformat->rsvd_mask = (1 << bits) - 1;
283
284 pr_info("ippm: tagmask 0x%x, rsvd %u=%u+%u+1, mask 0x%x,0x%x, "
285 "pg %u,%u,%u,%u.\n",
286 tagmask, tformat->rsvd_bits, tformat->idx_bits,
287 tformat->color_bits, tformat->no_ddp_mask, tformat->rsvd_mask,
288 tformat->pgsz_order[0], tformat->pgsz_order[1],
289 tformat->pgsz_order[2], tformat->pgsz_order[3]);
290}
291
292int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz);
293void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
294 unsigned int tid, unsigned int offset,
295 unsigned int length,
296 struct cxgbi_pagepod_hdr *hdr);
297void cxgbi_ppm_ppod_release(struct cxgbi_ppm *, u32 idx);
298int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *, unsigned short nr_pages,
299 u32 per_tag_pg_idx, u32 *ppod_idx, u32 *ddp_tag,
300 unsigned long caller_data);
301int cxgbi_ppm_init(void **ppm_pp, struct net_device *, struct pci_dev *,
302 void *lldev, struct cxgbi_tag_format *,
303 unsigned int ppmax, unsigned int llimit,
304 unsigned int start,
305 unsigned int reserve_factor);
306int cxgbi_ppm_release(struct cxgbi_ppm *ppm);
307void cxgbi_tagmask_check(unsigned int tagmask, struct cxgbi_tag_format *);
308unsigned int cxgbi_tagmask_set(unsigned int ppmax);
309
310#endif /*__CXGB4PPM_H__*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index cf711d5f15be..f3c58aaa932d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -191,6 +191,7 @@ static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
191enum cxgb4_uld { 191enum cxgb4_uld {
192 CXGB4_ULD_RDMA, 192 CXGB4_ULD_RDMA,
193 CXGB4_ULD_ISCSI, 193 CXGB4_ULD_ISCSI,
194 CXGB4_ULD_ISCSIT,
194 CXGB4_ULD_MAX 195 CXGB4_ULD_MAX
195}; 196};
196 197
@@ -212,6 +213,7 @@ struct l2t_data;
212struct net_device; 213struct net_device;
213struct pkt_gl; 214struct pkt_gl;
214struct tp_tcp_stats; 215struct tp_tcp_stats;
216struct t4_lro_mgr;
215 217
216struct cxgb4_range { 218struct cxgb4_range {
217 unsigned int start; 219 unsigned int start;
@@ -273,6 +275,10 @@ struct cxgb4_lld_info {
273 unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */ 275 unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */
274 unsigned int max_ird_adapter; /* Max IRD memory per adapter */ 276 unsigned int max_ird_adapter; /* Max IRD memory per adapter */
275 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ 277 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
278 unsigned int iscsi_tagmask; /* iscsi ddp tag mask */
279 unsigned int iscsi_pgsz_order; /* iscsi ddp page size orders */
280 unsigned int iscsi_llimit; /* chip's iscsi region llimit */
281 void **iscsi_ppm; /* iscsi page pod manager */
276 int nodeid; /* device numa node id */ 282 int nodeid; /* device numa node id */
277}; 283};
278 284
@@ -283,6 +289,11 @@ struct cxgb4_uld_info {
283 const struct pkt_gl *gl); 289 const struct pkt_gl *gl);
284 int (*state_change)(void *handle, enum cxgb4_state new_state); 290 int (*state_change)(void *handle, enum cxgb4_state new_state);
285 int (*control)(void *handle, enum cxgb4_control control, ...); 291 int (*control)(void *handle, enum cxgb4_control control, ...);
292 int (*lro_rx_handler)(void *handle, const __be64 *rsp,
293 const struct pkt_gl *gl,
294 struct t4_lro_mgr *lro_mgr,
295 struct napi_struct *napi);
296 void (*lro_flush)(struct t4_lro_mgr *);
286}; 297};
287 298
288int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); 299int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 5b0f3ef348e9..60a26037a1c6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -48,8 +48,6 @@
48#include "t4_regs.h" 48#include "t4_regs.h"
49#include "t4_values.h" 49#include "t4_values.h"
50 50
51#define VLAN_NONE 0xfff
52
53/* identifies sync vs async L2T_WRITE_REQs */ 51/* identifies sync vs async L2T_WRITE_REQs */
54#define SYNC_WR_S 12 52#define SYNC_WR_S 12
55#define SYNC_WR_V(x) ((x) << SYNC_WR_S) 53#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 4e2d47ac102b..79665bd8f881 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -39,6 +39,8 @@
39#include <linux/if_ether.h> 39#include <linux/if_ether.h>
40#include <linux/atomic.h> 40#include <linux/atomic.h>
41 41
42#define VLAN_NONE 0xfff
43
42enum { L2T_SIZE = 4096 }; /* # of L2T entries */ 44enum { L2T_SIZE = 4096 }; /* # of L2T entries */
43 45
44enum { 46enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index deca4a2956cc..13b144bcf725 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2157,8 +2157,11 @@ static int process_responses(struct sge_rspq *q, int budget)
2157 2157
2158 while (likely(budget_left)) { 2158 while (likely(budget_left)) {
2159 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 2159 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2160 if (!is_new_response(rc, q)) 2160 if (!is_new_response(rc, q)) {
2161 if (q->flush_handler)
2162 q->flush_handler(q);
2161 break; 2163 break;
2164 }
2162 2165
2163 dma_rmb(); 2166 dma_rmb();
2164 rsp_type = RSPD_TYPE_G(rc->type_gen); 2167 rsp_type = RSPD_TYPE_G(rc->type_gen);
@@ -2544,7 +2547,8 @@ static void __iomem *bar2_address(struct adapter *adapter,
2544 */ 2547 */
2545int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 2548int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2546 struct net_device *dev, int intr_idx, 2549 struct net_device *dev, int intr_idx,
2547 struct sge_fl *fl, rspq_handler_t hnd, int cong) 2550 struct sge_fl *fl, rspq_handler_t hnd,
2551 rspq_flush_handler_t flush_hnd, int cong)
2548{ 2552{
2549 int ret, flsz = 0; 2553 int ret, flsz = 0;
2550 struct fw_iq_cmd c; 2554 struct fw_iq_cmd c;
@@ -2648,6 +2652,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2648 iq->size--; /* subtract status entry */ 2652 iq->size--; /* subtract status entry */
2649 iq->netdev = dev; 2653 iq->netdev = dev;
2650 iq->handler = hnd; 2654 iq->handler = hnd;
2655 iq->flush_handler = flush_hnd;
2656
2657 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
2658 skb_queue_head_init(&iq->lro_mgr.lroq);
2651 2659
2652 /* set offset to -1 to distinguish ingress queues without FL */ 2660 /* set offset to -1 to distinguish ingress queues without FL */
2653 iq->offset = fl ? 0 : -1; 2661 iq->offset = fl ? 0 : -1;
@@ -2992,6 +3000,7 @@ void t4_free_sge_resources(struct adapter *adap)
2992 3000
2993 /* clean up RDMA and iSCSI Rx queues */ 3001 /* clean up RDMA and iSCSI Rx queues */
2994 t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq); 3002 t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
3003 t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq);
2995 t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq); 3004 t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
2996 t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq); 3005 t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
2997 3006
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 1d2d1da40c80..80417fc564d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -51,6 +51,7 @@ enum {
51 CPL_TX_PKT = 0xE, 51 CPL_TX_PKT = 0xE,
52 CPL_L2T_WRITE_REQ = 0x12, 52 CPL_L2T_WRITE_REQ = 0x12,
53 CPL_TID_RELEASE = 0x1A, 53 CPL_TID_RELEASE = 0x1A,
54 CPL_TX_DATA_ISO = 0x1F,
54 55
55 CPL_CLOSE_LISTSRV_RPL = 0x20, 56 CPL_CLOSE_LISTSRV_RPL = 0x20,
56 CPL_L2T_WRITE_RPL = 0x23, 57 CPL_L2T_WRITE_RPL = 0x23,
@@ -344,6 +345,87 @@ struct cpl_pass_open_rpl {
344 u8 status; 345 u8 status;
345}; 346};
346 347
348struct tcp_options {
349 __be16 mss;
350 __u8 wsf;
351#if defined(__LITTLE_ENDIAN_BITFIELD)
352 __u8:4;
353 __u8 unknown:1;
354 __u8:1;
355 __u8 sack:1;
356 __u8 tstamp:1;
357#else
358 __u8 tstamp:1;
359 __u8 sack:1;
360 __u8:1;
361 __u8 unknown:1;
362 __u8:4;
363#endif
364};
365
366struct cpl_pass_accept_req {
367 union opcode_tid ot;
368 __be16 rsvd;
369 __be16 len;
370 __be32 hdr_len;
371 __be16 vlan;
372 __be16 l2info;
373 __be32 tos_stid;
374 struct tcp_options tcpopt;
375};
376
377/* cpl_pass_accept_req.hdr_len fields */
378#define SYN_RX_CHAN_S 0
379#define SYN_RX_CHAN_M 0xF
380#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
381#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
382
383#define TCP_HDR_LEN_S 10
384#define TCP_HDR_LEN_M 0x3F
385#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
386#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
387
388#define IP_HDR_LEN_S 16
389#define IP_HDR_LEN_M 0x3FF
390#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
391#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
392
393#define ETH_HDR_LEN_S 26
394#define ETH_HDR_LEN_M 0x1F
395#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
396#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
397
398/* cpl_pass_accept_req.l2info fields */
399#define SYN_MAC_IDX_S 0
400#define SYN_MAC_IDX_M 0x1FF
401#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
402#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
403
404#define SYN_XACT_MATCH_S 9
405#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
406#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
407
408#define SYN_INTF_S 12
409#define SYN_INTF_M 0xF
410#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
411#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
412
413enum { /* TCP congestion control algorithms */
414 CONG_ALG_RENO,
415 CONG_ALG_TAHOE,
416 CONG_ALG_NEWRENO,
417 CONG_ALG_HIGHSPEED
418};
419
420#define CONG_CNTRL_S 14
421#define CONG_CNTRL_M 0x3
422#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
423#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
424
425#define T5_ISS_S 18
426#define T5_ISS_V(x) ((x) << T5_ISS_S)
427#define T5_ISS_F T5_ISS_V(1U)
428
347struct cpl_pass_accept_rpl { 429struct cpl_pass_accept_rpl {
348 WR_HDR; 430 WR_HDR;
349 union opcode_tid ot; 431 union opcode_tid ot;
@@ -818,6 +900,110 @@ struct cpl_iscsi_hdr {
818#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S) 900#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
819#define ISCSI_DDP_F ISCSI_DDP_V(1U) 901#define ISCSI_DDP_F ISCSI_DDP_V(1U)
820 902
903struct cpl_rx_data_ddp {
904 union opcode_tid ot;
905 __be16 urg;
906 __be16 len;
907 __be32 seq;
908 union {
909 __be32 nxt_seq;
910 __be32 ddp_report;
911 };
912 __be32 ulp_crc;
913 __be32 ddpvld;
914};
915
916#define cpl_rx_iscsi_ddp cpl_rx_data_ddp
917
918struct cpl_iscsi_data {
919 union opcode_tid ot;
920 __u8 rsvd0[2];
921 __be16 len;
922 __be32 seq;
923 __be16 urg;
924 __u8 rsvd1;
925 __u8 status;
926};
927
928struct cpl_tx_data_iso {
929 __be32 op_to_scsi;
930 __u8 reserved1;
931 __u8 ahs_len;
932 __be16 mpdu;
933 __be32 burst_size;
934 __be32 len;
935 __be32 reserved2_seglen_offset;
936 __be32 datasn_offset;
937 __be32 buffer_offset;
938 __be32 reserved3;
939
940 /* encapsulated CPL_TX_DATA follows here */
941};
942
943/* cpl_tx_data_iso.op_to_scsi fields */
944#define CPL_TX_DATA_ISO_OP_S 24
945#define CPL_TX_DATA_ISO_OP_M 0xff
946#define CPL_TX_DATA_ISO_OP_V(x) ((x) << CPL_TX_DATA_ISO_OP_S)
947#define CPL_TX_DATA_ISO_OP_G(x) \
948 (((x) >> CPL_TX_DATA_ISO_OP_S) & CPL_TX_DATA_ISO_OP_M)
949
950#define CPL_TX_DATA_ISO_FIRST_S 23
951#define CPL_TX_DATA_ISO_FIRST_M 0x1
952#define CPL_TX_DATA_ISO_FIRST_V(x) ((x) << CPL_TX_DATA_ISO_FIRST_S)
953#define CPL_TX_DATA_ISO_FIRST_G(x) \
954 (((x) >> CPL_TX_DATA_ISO_FIRST_S) & CPL_TX_DATA_ISO_FIRST_M)
955#define CPL_TX_DATA_ISO_FIRST_F CPL_TX_DATA_ISO_FIRST_V(1U)
956
957#define CPL_TX_DATA_ISO_LAST_S 22
958#define CPL_TX_DATA_ISO_LAST_M 0x1
959#define CPL_TX_DATA_ISO_LAST_V(x) ((x) << CPL_TX_DATA_ISO_LAST_S)
960#define CPL_TX_DATA_ISO_LAST_G(x) \
961 (((x) >> CPL_TX_DATA_ISO_LAST_S) & CPL_TX_DATA_ISO_LAST_M)
962#define CPL_TX_DATA_ISO_LAST_F CPL_TX_DATA_ISO_LAST_V(1U)
963
964#define CPL_TX_DATA_ISO_CPLHDRLEN_S 21
965#define CPL_TX_DATA_ISO_CPLHDRLEN_M 0x1
966#define CPL_TX_DATA_ISO_CPLHDRLEN_V(x) ((x) << CPL_TX_DATA_ISO_CPLHDRLEN_S)
967#define CPL_TX_DATA_ISO_CPLHDRLEN_G(x) \
968 (((x) >> CPL_TX_DATA_ISO_CPLHDRLEN_S) & CPL_TX_DATA_ISO_CPLHDRLEN_M)
969#define CPL_TX_DATA_ISO_CPLHDRLEN_F CPL_TX_DATA_ISO_CPLHDRLEN_V(1U)
970
971#define CPL_TX_DATA_ISO_HDRCRC_S 20
972#define CPL_TX_DATA_ISO_HDRCRC_M 0x1
973#define CPL_TX_DATA_ISO_HDRCRC_V(x) ((x) << CPL_TX_DATA_ISO_HDRCRC_S)
974#define CPL_TX_DATA_ISO_HDRCRC_G(x) \
975 (((x) >> CPL_TX_DATA_ISO_HDRCRC_S) & CPL_TX_DATA_ISO_HDRCRC_M)
976#define CPL_TX_DATA_ISO_HDRCRC_F CPL_TX_DATA_ISO_HDRCRC_V(1U)
977
978#define CPL_TX_DATA_ISO_PLDCRC_S 19
979#define CPL_TX_DATA_ISO_PLDCRC_M 0x1
980#define CPL_TX_DATA_ISO_PLDCRC_V(x) ((x) << CPL_TX_DATA_ISO_PLDCRC_S)
981#define CPL_TX_DATA_ISO_PLDCRC_G(x) \
982 (((x) >> CPL_TX_DATA_ISO_PLDCRC_S) & CPL_TX_DATA_ISO_PLDCRC_M)
983#define CPL_TX_DATA_ISO_PLDCRC_F CPL_TX_DATA_ISO_PLDCRC_V(1U)
984
985#define CPL_TX_DATA_ISO_IMMEDIATE_S 18
986#define CPL_TX_DATA_ISO_IMMEDIATE_M 0x1
987#define CPL_TX_DATA_ISO_IMMEDIATE_V(x) ((x) << CPL_TX_DATA_ISO_IMMEDIATE_S)
988#define CPL_TX_DATA_ISO_IMMEDIATE_G(x) \
989 (((x) >> CPL_TX_DATA_ISO_IMMEDIATE_S) & CPL_TX_DATA_ISO_IMMEDIATE_M)
990#define CPL_TX_DATA_ISO_IMMEDIATE_F CPL_TX_DATA_ISO_IMMEDIATE_V(1U)
991
992#define CPL_TX_DATA_ISO_SCSI_S 16
993#define CPL_TX_DATA_ISO_SCSI_M 0x3
994#define CPL_TX_DATA_ISO_SCSI_V(x) ((x) << CPL_TX_DATA_ISO_SCSI_S)
995#define CPL_TX_DATA_ISO_SCSI_G(x) \
996 (((x) >> CPL_TX_DATA_ISO_SCSI_S) & CPL_TX_DATA_ISO_SCSI_M)
997
998/* cpl_tx_data_iso.reserved2_seglen_offset fields */
999#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_S 0
1000#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_M 0xffffff
1001#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(x) \
1002 ((x) << CPL_TX_DATA_ISO_SEGLEN_OFFSET_S)
1003#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_G(x) \
1004 (((x) >> CPL_TX_DATA_ISO_SEGLEN_OFFSET_S) & \
1005 CPL_TX_DATA_ISO_SEGLEN_OFFSET_M)
1006
821struct cpl_rx_data { 1007struct cpl_rx_data {
822 union opcode_tid ot; 1008 union opcode_tid ot;
823 __be16 rsvd; 1009 __be16 rsvd;
@@ -854,6 +1040,15 @@ struct cpl_rx_data_ack {
854#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S) 1040#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S)
855#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U) 1041#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U)
856 1042
1043#define RX_DACK_MODE_S 29
1044#define RX_DACK_MODE_M 0x3
1045#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
1046#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
1047
1048#define RX_DACK_CHANGE_S 31
1049#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
1050#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
1051
857struct cpl_rx_pkt { 1052struct cpl_rx_pkt {
858 struct rss_header rsshdr; 1053 struct rss_header rsshdr;
859 u8 opcode; 1054 u8 opcode;
@@ -1090,6 +1285,12 @@ struct cpl_fw4_ack {
1090 __be64 rsvd1; 1285 __be64 rsvd1;
1091}; 1286};
1092 1287
1288enum {
1289 CPL_FW4_ACK_FLAGS_SEQVAL = 0x1, /* seqn valid */
1290 CPL_FW4_ACK_FLAGS_CH = 0x2, /* channel change complete */
1291 CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */
1292};
1293
1093struct cpl_fw6_msg { 1294struct cpl_fw6_msg {
1094 u8 opcode; 1295 u8 opcode;
1095 u8 type; 1296 u8 type;
@@ -1115,6 +1316,17 @@ struct cpl_fw6_msg_ofld_connection_wr_rpl {
1115 __u8 rsvd[2]; 1316 __u8 rsvd[2];
1116}; 1317};
1117 1318
1319struct cpl_tx_data {
1320 union opcode_tid ot;
1321 __be32 len;
1322 __be32 rsvd;
1323 __be32 flags;
1324};
1325
1326/* cpl_tx_data.flags field */
1327#define TX_FORCE_S 13
1328#define TX_FORCE_V(x) ((x) << TX_FORCE_S)
1329
1118enum { 1330enum {
1119 ULP_TX_MEM_READ = 2, 1331 ULP_TX_MEM_READ = 2,
1120 ULP_TX_MEM_WRITE = 3, 1332 ULP_TX_MEM_WRITE = 3,
@@ -1143,6 +1355,11 @@ struct ulptx_sgl {
1143 struct ulptx_sge_pair sge[0]; 1355 struct ulptx_sge_pair sge[0];
1144}; 1356};
1145 1357
1358struct ulptx_idata {
1359 __be32 cmd_more;
1360 __be32 len;
1361};
1362
1146#define ULPTX_NSGE_S 0 1363#define ULPTX_NSGE_S 0
1147#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) 1364#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
1148 1365
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index c8661c77b4e3..7ad6d4e75b2a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -101,6 +101,7 @@ enum fw_wr_opcodes {
101 FW_RI_BIND_MW_WR = 0x18, 101 FW_RI_BIND_MW_WR = 0x18,
102 FW_RI_FR_NSMR_WR = 0x19, 102 FW_RI_FR_NSMR_WR = 0x19,
103 FW_RI_INV_LSTAG_WR = 0x1a, 103 FW_RI_INV_LSTAG_WR = 0x1a,
104 FW_ISCSI_TX_DATA_WR = 0x45,
104 FW_LASTC2E_WR = 0x70 105 FW_LASTC2E_WR = 0x70
105}; 106};
106 107
@@ -561,7 +562,12 @@ enum fw_flowc_mnem {
561 FW_FLOWC_MNEM_SNDBUF, 562 FW_FLOWC_MNEM_SNDBUF,
562 FW_FLOWC_MNEM_MSS, 563 FW_FLOWC_MNEM_MSS,
563 FW_FLOWC_MNEM_TXDATAPLEN_MAX, 564 FW_FLOWC_MNEM_TXDATAPLEN_MAX,
564 FW_FLOWC_MNEM_SCHEDCLASS = 11, 565 FW_FLOWC_MNEM_TCPSTATE,
566 FW_FLOWC_MNEM_EOSTATE,
567 FW_FLOWC_MNEM_SCHEDCLASS,
568 FW_FLOWC_MNEM_DCBPRIO,
569 FW_FLOWC_MNEM_SND_SCALE,
570 FW_FLOWC_MNEM_RCV_SCALE,
565}; 571};
566 572
567struct fw_flowc_mnemval { 573struct fw_flowc_mnemval {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
index 22dd8d670e4a..2fd9c76fc21c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
@@ -25,21 +25,4 @@
25 25
26#define T5_ISS_VALID (1 << 18) 26#define T5_ISS_VALID (1 << 18)
27 27
28struct ulptx_idata {
29 __be32 cmd_more;
30 __be32 len;
31};
32
33struct cpl_rx_data_ddp {
34 union opcode_tid ot;
35 __be16 urg;
36 __be16 len;
37 __be32 seq;
38 union {
39 __be32 nxt_seq;
40 __be32 ddp_report;
41 };
42 __be32 ulp_crc;
43 __be32 ddpvld;
44};
45#endif /* __CXGB4I_H__ */ 28#endif /* __CXGB4I_H__ */