aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-21 19:40:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-21 19:40:26 -0500
commit184e2516614f7055d4c3a2e63fd8a3eb95fff6d6 (patch)
tree9822dd3cc97f8cfed3cbda6167818b60355cc7ec /drivers
parent0264405b84505f60ae00625f261e75a32c7ddf56 (diff)
parentd72623b665d84b1e07fe43854e83387fce8dd134 (diff)
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull more infiniband changes from Roland Dreier: "Second batch of InfiniBand/RDMA changes for 3.8: - cxgb4 changes to fix lookup engine hash collisions - mlx4 changes to make flow steering usable - fix to IPoIB to avoid pinning dst reference for too long" * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: RDMA/cxgb4: Fix bug for active and passive LE hash collision path RDMA/cxgb4: Fix LE hash collision bug for passive open connection RDMA/cxgb4: Fix LE hash collision bug for active open connection mlx4_core: Allow choosing flow steering mode mlx4_core: Adjustments to Flow Steering activation logic for SR-IOV mlx4_core: Fix error flow in the flow steering wrapper mlx4_core: Add QPN enforcement for flow steering rules set by VFs cxgb4: Add LE hash collision bug fix path in LLD driver cxgb4: Add T4 filter support IPoIB: Call skb_dst_drop() once skb is enqueued for sending
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c791
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c210
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c459
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h66
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h418
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c28
-rw-r--r--drivers/scsi/csiostor/t4fw_api_stor.h39
21 files changed, 2233 insertions, 214 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 5de86968379d..c13745cde7fa 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -38,10 +38,12 @@
38#include <linux/inetdevice.h> 38#include <linux/inetdevice.h>
39#include <linux/ip.h> 39#include <linux/ip.h>
40#include <linux/tcp.h> 40#include <linux/tcp.h>
41#include <linux/if_vlan.h>
41 42
42#include <net/neighbour.h> 43#include <net/neighbour.h>
43#include <net/netevent.h> 44#include <net/netevent.h>
44#include <net/route.h> 45#include <net/route.h>
46#include <net/tcp.h>
45 47
46#include "iw_cxgb4.h" 48#include "iw_cxgb4.h"
47 49
@@ -61,6 +63,14 @@ static char *states[] = {
61 NULL, 63 NULL,
62}; 64};
63 65
66static int nocong;
67module_param(nocong, int, 0644);
68MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
69
70static int enable_ecn;
71module_param(enable_ecn, int, 0644);
72MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
73
64static int dack_mode = 1; 74static int dack_mode = 1;
65module_param(dack_mode, int, 0644); 75module_param(dack_mode, int, 0644);
66MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 76MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
@@ -265,6 +275,7 @@ void _c4iw_free_ep(struct kref *kref)
265 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 275 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
266 dst_release(ep->dst); 276 dst_release(ep->dst);
267 cxgb4_l2t_release(ep->l2t); 277 cxgb4_l2t_release(ep->l2t);
278 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
268 } 279 }
269 kfree(ep); 280 kfree(ep);
270} 281}
@@ -441,6 +452,50 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
441 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 452 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
442} 453}
443 454
455#define VLAN_NONE 0xfff
456#define FILTER_SEL_VLAN_NONE 0xffff
457#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
458#define FILTER_SEL_WIDTH_VIN_P_FC \
459 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
460#define FILTER_SEL_WIDTH_TAG_P_FC \
461 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
462#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
463
464static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
465 struct l2t_entry *l2t)
466{
467 unsigned int ntuple = 0;
468 u32 viid;
469
470 switch (dev->rdev.lldi.filt_mode) {
471
472 /* default filter mode */
473 case HW_TPL_FR_MT_PR_IV_P_FC:
474 if (l2t->vlan == VLAN_NONE)
475 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
476 else {
477 ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
478 ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
479 }
480 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
481 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
482 break;
483 case HW_TPL_FR_MT_PR_OV_P_FC: {
484 viid = cxgb4_port_viid(l2t->neigh->dev);
485
486 ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
487 ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
488 ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
489 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
490 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
491 break;
492 }
493 default:
494 break;
495 }
496 return ntuple;
497}
498
444static int send_connect(struct c4iw_ep *ep) 499static int send_connect(struct c4iw_ep *ep)
445{ 500{
446 struct cpl_act_open_req *req; 501 struct cpl_act_open_req *req;
@@ -463,7 +518,8 @@ static int send_connect(struct c4iw_ep *ep)
463 518
464 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 519 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
465 wscale = compute_wscale(rcv_win); 520 wscale = compute_wscale(rcv_win);
466 opt0 = KEEP_ALIVE(1) | 521 opt0 = (nocong ? NO_CONG(1) : 0) |
522 KEEP_ALIVE(1) |
467 DELACK(1) | 523 DELACK(1) |
468 WND_SCALE(wscale) | 524 WND_SCALE(wscale) |
469 MSS_IDX(mtu_idx) | 525 MSS_IDX(mtu_idx) |
@@ -474,6 +530,7 @@ static int send_connect(struct c4iw_ep *ep)
474 ULP_MODE(ULP_MODE_TCPDDP) | 530 ULP_MODE(ULP_MODE_TCPDDP) |
475 RCV_BUFSIZ(rcv_win>>10); 531 RCV_BUFSIZ(rcv_win>>10);
476 opt2 = RX_CHANNEL(0) | 532 opt2 = RX_CHANNEL(0) |
533 CCTRL_ECN(enable_ecn) |
477 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 534 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
478 if (enable_tcp_timestamps) 535 if (enable_tcp_timestamps)
479 opt2 |= TSTAMPS_EN(1); 536 opt2 |= TSTAMPS_EN(1);
@@ -492,8 +549,9 @@ static int send_connect(struct c4iw_ep *ep)
492 req->local_ip = ep->com.local_addr.sin_addr.s_addr; 549 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
493 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 550 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
494 req->opt0 = cpu_to_be64(opt0); 551 req->opt0 = cpu_to_be64(opt0);
495 req->params = 0; 552 req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
496 req->opt2 = cpu_to_be32(opt2); 553 req->opt2 = cpu_to_be32(opt2);
554 set_bit(ACT_OPEN_REQ, &ep->com.history);
497 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 555 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
498} 556}
499 557
@@ -770,6 +828,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
770 /* setup the hwtid for this connection */ 828 /* setup the hwtid for this connection */
771 ep->hwtid = tid; 829 ep->hwtid = tid;
772 cxgb4_insert_tid(t, ep, tid); 830 cxgb4_insert_tid(t, ep, tid);
831 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
773 832
774 ep->snd_seq = be32_to_cpu(req->snd_isn); 833 ep->snd_seq = be32_to_cpu(req->snd_isn);
775 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 834 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
@@ -777,7 +836,9 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
777 set_emss(ep, ntohs(req->tcp_opt)); 836 set_emss(ep, ntohs(req->tcp_opt));
778 837
779 /* dealloc the atid */ 838 /* dealloc the atid */
839 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
780 cxgb4_free_atid(t, atid); 840 cxgb4_free_atid(t, atid);
841 set_bit(ACT_ESTAB, &ep->com.history);
781 842
782 /* start MPA negotiation */ 843 /* start MPA negotiation */
783 send_flowc(ep, NULL); 844 send_flowc(ep, NULL);
@@ -803,6 +864,7 @@ static void close_complete_upcall(struct c4iw_ep *ep)
803 ep->com.cm_id->rem_ref(ep->com.cm_id); 864 ep->com.cm_id->rem_ref(ep->com.cm_id);
804 ep->com.cm_id = NULL; 865 ep->com.cm_id = NULL;
805 ep->com.qp = NULL; 866 ep->com.qp = NULL;
867 set_bit(CLOSE_UPCALL, &ep->com.history);
806 } 868 }
807} 869}
808 870
@@ -811,6 +873,7 @@ static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
811 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 873 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
812 close_complete_upcall(ep); 874 close_complete_upcall(ep);
813 state_set(&ep->com, ABORTING); 875 state_set(&ep->com, ABORTING);
876 set_bit(ABORT_CONN, &ep->com.history);
814 return send_abort(ep, skb, gfp); 877 return send_abort(ep, skb, gfp);
815} 878}
816 879
@@ -825,6 +888,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)
825 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 888 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
826 ep, ep->com.cm_id, ep->hwtid); 889 ep, ep->com.cm_id, ep->hwtid);
827 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 890 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
891 set_bit(DISCONN_UPCALL, &ep->com.history);
828 } 892 }
829} 893}
830 894
@@ -843,6 +907,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
843 ep->com.cm_id->rem_ref(ep->com.cm_id); 907 ep->com.cm_id->rem_ref(ep->com.cm_id);
844 ep->com.cm_id = NULL; 908 ep->com.cm_id = NULL;
845 ep->com.qp = NULL; 909 ep->com.qp = NULL;
910 set_bit(ABORT_UPCALL, &ep->com.history);
846 } 911 }
847} 912}
848 913
@@ -875,6 +940,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
875 940
876 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 941 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
877 ep->hwtid, status); 942 ep->hwtid, status);
943 set_bit(CONN_RPL_UPCALL, &ep->com.history);
878 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 944 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
879 945
880 if (status < 0) { 946 if (status < 0) {
@@ -915,6 +981,7 @@ static void connect_request_upcall(struct c4iw_ep *ep)
915 ep->parent_ep->com.cm_id, 981 ep->parent_ep->com.cm_id,
916 &event); 982 &event);
917 } 983 }
984 set_bit(CONNREQ_UPCALL, &ep->com.history);
918 c4iw_put_ep(&ep->parent_ep->com); 985 c4iw_put_ep(&ep->parent_ep->com);
919 ep->parent_ep = NULL; 986 ep->parent_ep = NULL;
920} 987}
@@ -931,6 +998,7 @@ static void established_upcall(struct c4iw_ep *ep)
931 if (ep->com.cm_id) { 998 if (ep->com.cm_id) {
932 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 999 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
933 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1000 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1001 set_bit(ESTAB_UPCALL, &ep->com.history);
934 } 1002 }
935} 1003}
936 1004
@@ -1316,6 +1384,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1316 unsigned int dlen = ntohs(hdr->len); 1384 unsigned int dlen = ntohs(hdr->len);
1317 unsigned int tid = GET_TID(hdr); 1385 unsigned int tid = GET_TID(hdr);
1318 struct tid_info *t = dev->rdev.lldi.tids; 1386 struct tid_info *t = dev->rdev.lldi.tids;
1387 __u8 status = hdr->status;
1319 1388
1320 ep = lookup_tid(t, tid); 1389 ep = lookup_tid(t, tid);
1321 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1390 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
@@ -1338,9 +1407,9 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1338 case MPA_REP_SENT: 1407 case MPA_REP_SENT:
1339 break; 1408 break;
1340 default: 1409 default:
1341 printk(KERN_ERR MOD "%s Unexpected streaming data." 1410 pr_err("%s Unexpected streaming data." \
1342 " ep %p state %d tid %u\n", 1411 " ep %p state %d tid %u status %d\n",
1343 __func__, ep, state_read(&ep->com), ep->hwtid); 1412 __func__, ep, state_read(&ep->com), ep->hwtid, status);
1344 1413
1345 /* 1414 /*
1346 * The ep will timeout and inform the ULP of the failure. 1415 * The ep will timeout and inform the ULP of the failure.
@@ -1383,6 +1452,63 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1383 return 0; 1452 return 0;
1384} 1453}
1385 1454
1455static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1456{
1457 struct sk_buff *skb;
1458 struct fw_ofld_connection_wr *req;
1459 unsigned int mtu_idx;
1460 int wscale;
1461
1462 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1463 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1464 memset(req, 0, sizeof(*req));
1465 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1466 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
1467 req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
1468 ep->l2t));
1469 req->le.lport = ep->com.local_addr.sin_port;
1470 req->le.pport = ep->com.remote_addr.sin_port;
1471 req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr;
1472 req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr;
1473 req->tcb.t_state_to_astid =
1474 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
1475 V_FW_OFLD_CONNECTION_WR_ASTID(atid));
1476 req->tcb.cplrxdataack_cplpassacceptrpl =
1477 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
1478 req->tcb.tx_max = jiffies;
1479 req->tcb.rcv_adv = htons(1);
1480 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1481 wscale = compute_wscale(rcv_win);
1482 req->tcb.opt0 = TCAM_BYPASS(1) |
1483 (nocong ? NO_CONG(1) : 0) |
1484 KEEP_ALIVE(1) |
1485 DELACK(1) |
1486 WND_SCALE(wscale) |
1487 MSS_IDX(mtu_idx) |
1488 L2T_IDX(ep->l2t->idx) |
1489 TX_CHAN(ep->tx_chan) |
1490 SMAC_SEL(ep->smac_idx) |
1491 DSCP(ep->tos) |
1492 ULP_MODE(ULP_MODE_TCPDDP) |
1493 RCV_BUFSIZ(rcv_win >> 10);
1494 req->tcb.opt2 = PACE(1) |
1495 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1496 RX_CHANNEL(0) |
1497 CCTRL_ECN(enable_ecn) |
1498 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1499 if (enable_tcp_timestamps)
1500 req->tcb.opt2 |= TSTAMPS_EN(1);
1501 if (enable_tcp_sack)
1502 req->tcb.opt2 |= SACK_EN(1);
1503 if (wscale && enable_tcp_window_scaling)
1504 req->tcb.opt2 |= WND_SCALE_EN(1);
1505 req->tcb.opt0 = cpu_to_be64(req->tcb.opt0);
1506 req->tcb.opt2 = cpu_to_be32(req->tcb.opt2);
1507 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1508 set_bit(ACT_OFLD_CONN, &ep->com.history);
1509 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1510}
1511
1386/* 1512/*
1387 * Return whether a failed active open has allocated a TID 1513 * Return whether a failed active open has allocated a TID
1388 */ 1514 */
@@ -1392,6 +1518,111 @@ static inline int act_open_has_tid(int status)
1392 status != CPL_ERR_ARP_MISS; 1518 status != CPL_ERR_ARP_MISS;
1393} 1519}
1394 1520
1521#define ACT_OPEN_RETRY_COUNT 2
1522
1523static int c4iw_reconnect(struct c4iw_ep *ep)
1524{
1525 int err = 0;
1526 struct rtable *rt;
1527 struct port_info *pi;
1528 struct net_device *pdev;
1529 int step;
1530 struct neighbour *neigh;
1531
1532 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1533 init_timer(&ep->timer);
1534
1535 /*
1536 * Allocate an active TID to initiate a TCP connection.
1537 */
1538 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1539 if (ep->atid == -1) {
1540 pr_err("%s - cannot alloc atid.\n", __func__);
1541 err = -ENOMEM;
1542 goto fail2;
1543 }
1544 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
1545
1546 /* find a route */
1547 rt = find_route(ep->com.dev,
1548 ep->com.cm_id->local_addr.sin_addr.s_addr,
1549 ep->com.cm_id->remote_addr.sin_addr.s_addr,
1550 ep->com.cm_id->local_addr.sin_port,
1551 ep->com.cm_id->remote_addr.sin_port, 0);
1552 if (!rt) {
1553 pr_err("%s - cannot find route.\n", __func__);
1554 err = -EHOSTUNREACH;
1555 goto fail3;
1556 }
1557 ep->dst = &rt->dst;
1558
1559 neigh = dst_neigh_lookup(ep->dst,
1560 &ep->com.cm_id->remote_addr.sin_addr.s_addr);
1561 /* get a l2t entry */
1562 if (neigh->dev->flags & IFF_LOOPBACK) {
1563 PDBG("%s LOOPBACK\n", __func__);
1564 pdev = ip_dev_find(&init_net,
1565 ep->com.cm_id->remote_addr.sin_addr.s_addr);
1566 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1567 neigh, pdev, 0);
1568 pi = (struct port_info *)netdev_priv(pdev);
1569 ep->mtu = pdev->mtu;
1570 ep->tx_chan = cxgb4_port_chan(pdev);
1571 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1572 dev_put(pdev);
1573 } else {
1574 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1575 neigh, neigh->dev, 0);
1576 pi = (struct port_info *)netdev_priv(neigh->dev);
1577 ep->mtu = dst_mtu(ep->dst);
1578 ep->tx_chan = cxgb4_port_chan(neigh->dev);
1579 ep->smac_idx = (cxgb4_port_viid(neigh->dev) &
1580 0x7F) << 1;
1581 }
1582
1583 step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan;
1584 ep->txq_idx = pi->port_id * step;
1585 ep->ctrlq_idx = pi->port_id;
1586 step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan;
1587 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step];
1588
1589 if (!ep->l2t) {
1590 pr_err("%s - cannot alloc l2e.\n", __func__);
1591 err = -ENOMEM;
1592 goto fail4;
1593 }
1594
1595 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1596 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1597 ep->l2t->idx);
1598
1599 state_set(&ep->com, CONNECTING);
1600 ep->tos = 0;
1601
1602 /* send connect request to rnic */
1603 err = send_connect(ep);
1604 if (!err)
1605 goto out;
1606
1607 cxgb4_l2t_release(ep->l2t);
1608fail4:
1609 dst_release(ep->dst);
1610fail3:
1611 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
1612 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
1613fail2:
1614 /*
1615 * remember to send notification to upper layer.
1616 * We are in here so the upper layer is not aware that this is
1617 * re-connect attempt and so, upper layer is still waiting for
1618 * response of 1st connect request.
1619 */
1620 connect_reply_upcall(ep, -ECONNRESET);
1621 c4iw_put_ep(&ep->com);
1622out:
1623 return err;
1624}
1625
1395static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1626static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1396{ 1627{
1397 struct c4iw_ep *ep; 1628 struct c4iw_ep *ep;
@@ -1412,6 +1643,8 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1412 return 0; 1643 return 0;
1413 } 1644 }
1414 1645
1646 set_bit(ACT_OPEN_RPL, &ep->com.history);
1647
1415 /* 1648 /*
1416 * Log interesting failures. 1649 * Log interesting failures.
1417 */ 1650 */
@@ -1419,6 +1652,29 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1419 case CPL_ERR_CONN_RESET: 1652 case CPL_ERR_CONN_RESET:
1420 case CPL_ERR_CONN_TIMEDOUT: 1653 case CPL_ERR_CONN_TIMEDOUT:
1421 break; 1654 break;
1655 case CPL_ERR_TCAM_FULL:
1656 if (dev->rdev.lldi.enable_fw_ofld_conn) {
1657 mutex_lock(&dev->rdev.stats.lock);
1658 dev->rdev.stats.tcam_full++;
1659 mutex_unlock(&dev->rdev.stats.lock);
1660 send_fw_act_open_req(ep,
1661 GET_TID_TID(GET_AOPEN_ATID(
1662 ntohl(rpl->atid_status))));
1663 return 0;
1664 }
1665 break;
1666 case CPL_ERR_CONN_EXIST:
1667 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
1668 set_bit(ACT_RETRY_INUSE, &ep->com.history);
1669 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
1670 atid);
1671 cxgb4_free_atid(t, atid);
1672 dst_release(ep->dst);
1673 cxgb4_l2t_release(ep->l2t);
1674 c4iw_reconnect(ep);
1675 return 0;
1676 }
1677 break;
1422 default: 1678 default:
1423 printk(KERN_INFO MOD "Active open failure - " 1679 printk(KERN_INFO MOD "Active open failure - "
1424 "atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 1680 "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
@@ -1436,6 +1692,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1436 if (status && act_open_has_tid(status)) 1692 if (status && act_open_has_tid(status))
1437 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 1693 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1438 1694
1695 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
1439 cxgb4_free_atid(t, atid); 1696 cxgb4_free_atid(t, atid);
1440 dst_release(ep->dst); 1697 dst_release(ep->dst);
1441 cxgb4_l2t_release(ep->l2t); 1698 cxgb4_l2t_release(ep->l2t);
@@ -1452,13 +1709,14 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1452 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1709 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1453 1710
1454 if (!ep) { 1711 if (!ep) {
1455 printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); 1712 PDBG("%s stid %d lookup failure!\n", __func__, stid);
1456 return 0; 1713 goto out;
1457 } 1714 }
1458 PDBG("%s ep %p status %d error %d\n", __func__, ep, 1715 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1459 rpl->status, status2errno(rpl->status)); 1716 rpl->status, status2errno(rpl->status));
1460 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 1717 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1461 1718
1719out:
1462 return 0; 1720 return 0;
1463} 1721}
1464 1722
@@ -1510,14 +1768,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1510 skb_get(skb); 1768 skb_get(skb);
1511 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1769 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1512 wscale = compute_wscale(rcv_win); 1770 wscale = compute_wscale(rcv_win);
1513 opt0 = KEEP_ALIVE(1) | 1771 opt0 = (nocong ? NO_CONG(1) : 0) |
1772 KEEP_ALIVE(1) |
1514 DELACK(1) | 1773 DELACK(1) |
1515 WND_SCALE(wscale) | 1774 WND_SCALE(wscale) |
1516 MSS_IDX(mtu_idx) | 1775 MSS_IDX(mtu_idx) |
1517 L2T_IDX(ep->l2t->idx) | 1776 L2T_IDX(ep->l2t->idx) |
1518 TX_CHAN(ep->tx_chan) | 1777 TX_CHAN(ep->tx_chan) |
1519 SMAC_SEL(ep->smac_idx) | 1778 SMAC_SEL(ep->smac_idx) |
1520 DSCP(ep->tos) | 1779 DSCP(ep->tos >> 2) |
1521 ULP_MODE(ULP_MODE_TCPDDP) | 1780 ULP_MODE(ULP_MODE_TCPDDP) |
1522 RCV_BUFSIZ(rcv_win>>10); 1781 RCV_BUFSIZ(rcv_win>>10);
1523 opt2 = RX_CHANNEL(0) | 1782 opt2 = RX_CHANNEL(0) |
@@ -1529,6 +1788,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1529 opt2 |= SACK_EN(1); 1788 opt2 |= SACK_EN(1);
1530 if (wscale && enable_tcp_window_scaling) 1789 if (wscale && enable_tcp_window_scaling)
1531 opt2 |= WND_SCALE_EN(1); 1790 opt2 |= WND_SCALE_EN(1);
1791 if (enable_ecn) {
1792 const struct tcphdr *tcph;
1793 u32 hlen = ntohl(req->hdr_len);
1794
1795 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
1796 G_IP_HDR_LEN(hlen);
1797 if (tcph->ece && tcph->cwr)
1798 opt2 |= CCTRL_ECN(1);
1799 }
1532 1800
1533 rpl = cplhdr(skb); 1801 rpl = cplhdr(skb);
1534 INIT_TP_WR(rpl, ep->hwtid); 1802 INIT_TP_WR(rpl, ep->hwtid);
@@ -1645,22 +1913,30 @@ out:
1645 1913
1646static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 1914static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1647{ 1915{
1648 struct c4iw_ep *child_ep, *parent_ep; 1916 struct c4iw_ep *child_ep = NULL, *parent_ep;
1649 struct cpl_pass_accept_req *req = cplhdr(skb); 1917 struct cpl_pass_accept_req *req = cplhdr(skb);
1650 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 1918 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1651 struct tid_info *t = dev->rdev.lldi.tids; 1919 struct tid_info *t = dev->rdev.lldi.tids;
1652 unsigned int hwtid = GET_TID(req); 1920 unsigned int hwtid = GET_TID(req);
1653 struct dst_entry *dst; 1921 struct dst_entry *dst;
1654 struct rtable *rt; 1922 struct rtable *rt;
1655 __be32 local_ip, peer_ip; 1923 __be32 local_ip, peer_ip = 0;
1656 __be16 local_port, peer_port; 1924 __be16 local_port, peer_port;
1657 int err; 1925 int err;
1926 u16 peer_mss = ntohs(req->tcpopt.mss);
1658 1927
1659 parent_ep = lookup_stid(t, stid); 1928 parent_ep = lookup_stid(t, stid);
1660 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); 1929 if (!parent_ep) {
1661 1930 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
1931 goto reject;
1932 }
1662 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); 1933 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1663 1934
1935 PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
1936 "rport %d peer_mss %d\n", __func__, parent_ep, hwtid,
1937 ntohl(local_ip), ntohl(peer_ip), ntohs(local_port),
1938 ntohs(peer_port), peer_mss);
1939
1664 if (state_read(&parent_ep->com) != LISTEN) { 1940 if (state_read(&parent_ep->com) != LISTEN) {
1665 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1941 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1666 __func__); 1942 __func__);
@@ -1694,6 +1970,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1694 goto reject; 1970 goto reject;
1695 } 1971 }
1696 1972
1973 if (peer_mss && child_ep->mtu > (peer_mss + 40))
1974 child_ep->mtu = peer_mss + 40;
1975
1697 state_set(&child_ep->com, CONNECTING); 1976 state_set(&child_ep->com, CONNECTING);
1698 child_ep->com.dev = dev; 1977 child_ep->com.dev = dev;
1699 child_ep->com.cm_id = NULL; 1978 child_ep->com.cm_id = NULL;
@@ -1715,6 +1994,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1715 init_timer(&child_ep->timer); 1994 init_timer(&child_ep->timer);
1716 cxgb4_insert_tid(t, child_ep, hwtid); 1995 cxgb4_insert_tid(t, child_ep, hwtid);
1717 accept_cr(child_ep, peer_ip, skb, req); 1996 accept_cr(child_ep, peer_ip, skb, req);
1997 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
1718 goto out; 1998 goto out;
1719reject: 1999reject:
1720 reject_cr(dev, hwtid, peer_ip, skb); 2000 reject_cr(dev, hwtid, peer_ip, skb);
@@ -1734,12 +2014,17 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1734 ep->snd_seq = be32_to_cpu(req->snd_isn); 2014 ep->snd_seq = be32_to_cpu(req->snd_isn);
1735 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2015 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1736 2016
2017 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2018 ntohs(req->tcp_opt));
2019
1737 set_emss(ep, ntohs(req->tcp_opt)); 2020 set_emss(ep, ntohs(req->tcp_opt));
2021 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
1738 2022
1739 dst_confirm(ep->dst); 2023 dst_confirm(ep->dst);
1740 state_set(&ep->com, MPA_REQ_WAIT); 2024 state_set(&ep->com, MPA_REQ_WAIT);
1741 start_ep_timer(ep); 2025 start_ep_timer(ep);
1742 send_flowc(ep, skb); 2026 send_flowc(ep, skb);
2027 set_bit(PASS_ESTAB, &ep->com.history);
1743 2028
1744 return 0; 2029 return 0;
1745} 2030}
@@ -1759,6 +2044,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1759 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2044 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1760 dst_confirm(ep->dst); 2045 dst_confirm(ep->dst);
1761 2046
2047 set_bit(PEER_CLOSE, &ep->com.history);
1762 mutex_lock(&ep->com.mutex); 2048 mutex_lock(&ep->com.mutex);
1763 switch (ep->com.state) { 2049 switch (ep->com.state) {
1764 case MPA_REQ_WAIT: 2050 case MPA_REQ_WAIT:
@@ -1838,74 +2124,6 @@ static int is_neg_adv_abort(unsigned int status)
1838 status == CPL_ERR_PERSIST_NEG_ADVICE; 2124 status == CPL_ERR_PERSIST_NEG_ADVICE;
1839} 2125}
1840 2126
1841static int c4iw_reconnect(struct c4iw_ep *ep)
1842{
1843 struct rtable *rt;
1844 int err = 0;
1845
1846 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1847 init_timer(&ep->timer);
1848
1849 /*
1850 * Allocate an active TID to initiate a TCP connection.
1851 */
1852 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1853 if (ep->atid == -1) {
1854 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1855 err = -ENOMEM;
1856 goto fail2;
1857 }
1858
1859 /* find a route */
1860 rt = find_route(ep->com.dev,
1861 ep->com.cm_id->local_addr.sin_addr.s_addr,
1862 ep->com.cm_id->remote_addr.sin_addr.s_addr,
1863 ep->com.cm_id->local_addr.sin_port,
1864 ep->com.cm_id->remote_addr.sin_port, 0);
1865 if (!rt) {
1866 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1867 err = -EHOSTUNREACH;
1868 goto fail3;
1869 }
1870 ep->dst = &rt->dst;
1871
1872 err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
1873 ep->dst, ep->com.dev, false);
1874 if (err) {
1875 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1876 goto fail4;
1877 }
1878
1879 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1880 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1881 ep->l2t->idx);
1882
1883 state_set(&ep->com, CONNECTING);
1884 ep->tos = 0;
1885
1886 /* send connect request to rnic */
1887 err = send_connect(ep);
1888 if (!err)
1889 goto out;
1890
1891 cxgb4_l2t_release(ep->l2t);
1892fail4:
1893 dst_release(ep->dst);
1894fail3:
1895 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
1896fail2:
1897 /*
1898 * remember to send notification to upper layer.
1899 * We are in here so the upper layer is not aware that this is
1900 * re-connect attempt and so, upper layer is still waiting for
1901 * response of 1st connect request.
1902 */
1903 connect_reply_upcall(ep, -ECONNRESET);
1904 c4iw_put_ep(&ep->com);
1905out:
1906 return err;
1907}
1908
1909static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2127static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1910{ 2128{
1911 struct cpl_abort_req_rss *req = cplhdr(skb); 2129 struct cpl_abort_req_rss *req = cplhdr(skb);
@@ -1926,6 +2144,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1926 } 2144 }
1927 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2145 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
1928 ep->com.state); 2146 ep->com.state);
2147 set_bit(PEER_ABORT, &ep->com.history);
1929 2148
1930 /* 2149 /*
1931 * Wake up any threads in rdma_init() or rdma_fini(). 2150 * Wake up any threads in rdma_init() or rdma_fini().
@@ -2140,6 +2359,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2140 c4iw_put_ep(&ep->com); 2359 c4iw_put_ep(&ep->com);
2141 return -ECONNRESET; 2360 return -ECONNRESET;
2142 } 2361 }
2362 set_bit(ULP_REJECT, &ep->com.history);
2143 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2363 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2144 if (mpa_rev == 0) 2364 if (mpa_rev == 0)
2145 abort_connection(ep, NULL, GFP_KERNEL); 2365 abort_connection(ep, NULL, GFP_KERNEL);
@@ -2169,6 +2389,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2169 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2389 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2170 BUG_ON(!qp); 2390 BUG_ON(!qp);
2171 2391
2392 set_bit(ULP_ACCEPT, &ep->com.history);
2172 if ((conn_param->ord > c4iw_max_read_depth) || 2393 if ((conn_param->ord > c4iw_max_read_depth) ||
2173 (conn_param->ird > c4iw_max_read_depth)) { 2394 (conn_param->ird > c4iw_max_read_depth)) {
2174 abort_connection(ep, NULL, GFP_KERNEL); 2395 abort_connection(ep, NULL, GFP_KERNEL);
@@ -2292,6 +2513,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2292 err = -ENOMEM; 2513 err = -ENOMEM;
2293 goto fail2; 2514 goto fail2;
2294 } 2515 }
2516 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
2295 2517
2296 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, 2518 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
2297 ntohl(cm_id->local_addr.sin_addr.s_addr), 2519 ntohl(cm_id->local_addr.sin_addr.s_addr),
@@ -2337,6 +2559,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2337fail4: 2559fail4:
2338 dst_release(ep->dst); 2560 dst_release(ep->dst);
2339fail3: 2561fail3:
2562 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2340 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2563 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2341fail2: 2564fail2:
2342 cm_id->rem_ref(cm_id); 2565 cm_id->rem_ref(cm_id);
@@ -2351,7 +2574,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2351 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2574 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2352 struct c4iw_listen_ep *ep; 2575 struct c4iw_listen_ep *ep;
2353 2576
2354
2355 might_sleep(); 2577 might_sleep();
2356 2578
2357 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2579 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
@@ -2370,30 +2592,54 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2370 /* 2592 /*
2371 * Allocate a server TID. 2593 * Allocate a server TID.
2372 */ 2594 */
2373 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2595 if (dev->rdev.lldi.enable_fw_ofld_conn)
2596 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep);
2597 else
2598 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2599
2374 if (ep->stid == -1) { 2600 if (ep->stid == -1) {
2375 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 2601 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
2376 err = -ENOMEM; 2602 err = -ENOMEM;
2377 goto fail2; 2603 goto fail2;
2378 } 2604 }
2379 2605 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
2380 state_set(&ep->com, LISTEN); 2606 state_set(&ep->com, LISTEN);
2381 c4iw_init_wr_wait(&ep->com.wr_wait); 2607 if (dev->rdev.lldi.enable_fw_ofld_conn) {
2382 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, 2608 do {
2383 ep->com.local_addr.sin_addr.s_addr, 2609 err = cxgb4_create_server_filter(
2384 ep->com.local_addr.sin_port, 2610 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2385 ep->com.dev->rdev.lldi.rxq_ids[0]); 2611 ep->com.local_addr.sin_addr.s_addr,
2386 if (err) 2612 ep->com.local_addr.sin_port,
2387 goto fail3; 2613 0,
2388 2614 ep->com.dev->rdev.lldi.rxq_ids[0],
2389 /* wait for pass_open_rpl */ 2615 0,
2390 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2616 0);
2391 __func__); 2617 if (err == -EBUSY) {
2618 set_current_state(TASK_UNINTERRUPTIBLE);
2619 schedule_timeout(usecs_to_jiffies(100));
2620 }
2621 } while (err == -EBUSY);
2622 } else {
2623 c4iw_init_wr_wait(&ep->com.wr_wait);
2624 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
2625 ep->stid, ep->com.local_addr.sin_addr.s_addr,
2626 ep->com.local_addr.sin_port,
2627 0,
2628 ep->com.dev->rdev.lldi.rxq_ids[0]);
2629 if (!err)
2630 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
2631 &ep->com.wr_wait,
2632 0, 0, __func__);
2633 }
2392 if (!err) { 2634 if (!err) {
2393 cm_id->provider_data = ep; 2635 cm_id->provider_data = ep;
2394 goto out; 2636 goto out;
2395 } 2637 }
2396fail3: 2638 pr_err("%s cxgb4_create_server/filter failed err %d " \
2639 "stid %d laddr %08x lport %d\n", \
2640 __func__, err, ep->stid,
2641 ntohl(ep->com.local_addr.sin_addr.s_addr),
2642 ntohs(ep->com.local_addr.sin_port));
2397 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2643 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2398fail2: 2644fail2:
2399 cm_id->rem_ref(cm_id); 2645 cm_id->rem_ref(cm_id);
@@ -2412,12 +2658,19 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2412 2658
2413 might_sleep(); 2659 might_sleep();
2414 state_set(&ep->com, DEAD); 2660 state_set(&ep->com, DEAD);
2415 c4iw_init_wr_wait(&ep->com.wr_wait); 2661 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) {
2416 err = listen_stop(ep); 2662 err = cxgb4_remove_server_filter(
2417 if (err) 2663 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2418 goto done; 2664 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
2419 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2665 } else {
2420 __func__); 2666 c4iw_init_wr_wait(&ep->com.wr_wait);
2667 err = listen_stop(ep);
2668 if (err)
2669 goto done;
2670 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
2671 0, 0, __func__);
2672 }
2673 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
2421 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2674 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2422done: 2675done:
2423 cm_id->rem_ref(cm_id); 2676 cm_id->rem_ref(cm_id);
@@ -2481,10 +2734,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2481 2734
2482 if (close) { 2735 if (close) {
2483 if (abrupt) { 2736 if (abrupt) {
2737 set_bit(EP_DISC_ABORT, &ep->com.history);
2484 close_complete_upcall(ep); 2738 close_complete_upcall(ep);
2485 ret = send_abort(ep, NULL, gfp); 2739 ret = send_abort(ep, NULL, gfp);
2486 } else 2740 } else {
2741 set_bit(EP_DISC_CLOSE, &ep->com.history);
2487 ret = send_halfclose(ep, gfp); 2742 ret = send_halfclose(ep, gfp);
2743 }
2488 if (ret) 2744 if (ret)
2489 fatal = 1; 2745 fatal = 1;
2490 } 2746 }
@@ -2494,10 +2750,323 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2494 return ret; 2750 return ret;
2495} 2751}
2496 2752
2497static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) 2753static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2754 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
2755{
2756 struct c4iw_ep *ep;
2757 int atid = be32_to_cpu(req->tid);
2758
2759 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid);
2760 if (!ep)
2761 return;
2762
2763 switch (req->retval) {
2764 case FW_ENOMEM:
2765 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
2766 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2767 send_fw_act_open_req(ep, atid);
2768 return;
2769 }
2770 case FW_EADDRINUSE:
2771 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2772 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2773 send_fw_act_open_req(ep, atid);
2774 return;
2775 }
2776 break;
2777 default:
2778 pr_info("%s unexpected ofld conn wr retval %d\n",
2779 __func__, req->retval);
2780 break;
2781 }
2782 pr_err("active ofld_connect_wr failure %d atid %d\n",
2783 req->retval, atid);
2784 mutex_lock(&dev->rdev.stats.lock);
2785 dev->rdev.stats.act_ofld_conn_fails++;
2786 mutex_unlock(&dev->rdev.stats.lock);
2787 connect_reply_upcall(ep, status2errno(req->retval));
2788 state_set(&ep->com, DEAD);
2789 remove_handle(dev, &dev->atid_idr, atid);
2790 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
2791 dst_release(ep->dst);
2792 cxgb4_l2t_release(ep->l2t);
2793 c4iw_put_ep(&ep->com);
2794}
2795
2796static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2797 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
2798{
2799 struct sk_buff *rpl_skb;
2800 struct cpl_pass_accept_req *cpl;
2801 int ret;
2802
2803 rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie);
2804 BUG_ON(!rpl_skb);
2805 if (req->retval) {
2806 PDBG("%s passive open failure %d\n", __func__, req->retval);
2807 mutex_lock(&dev->rdev.stats.lock);
2808 dev->rdev.stats.pas_ofld_conn_fails++;
2809 mutex_unlock(&dev->rdev.stats.lock);
2810 kfree_skb(rpl_skb);
2811 } else {
2812 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
2813 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
2814 htonl(req->tid)));
2815 ret = pass_accept_req(dev, rpl_skb);
2816 if (!ret)
2817 kfree_skb(rpl_skb);
2818 }
2819 return;
2820}
2821
2822static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2498{ 2823{
2499 struct cpl_fw6_msg *rpl = cplhdr(skb); 2824 struct cpl_fw6_msg *rpl = cplhdr(skb);
2500 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 2825 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
2826
2827 switch (rpl->type) {
2828 case FW6_TYPE_CQE:
2829 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2830 break;
2831 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
2832 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
2833 switch (req->t_state) {
2834 case TCP_SYN_SENT:
2835 active_ofld_conn_reply(dev, skb, req);
2836 break;
2837 case TCP_SYN_RECV:
2838 passive_ofld_conn_reply(dev, skb, req);
2839 break;
2840 default:
2841 pr_err("%s unexpected ofld conn wr state %d\n",
2842 __func__, req->t_state);
2843 break;
2844 }
2845 break;
2846 }
2847 return 0;
2848}
2849
2850static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2851{
2852 u32 l2info;
2853 u16 vlantag, len, hdr_len;
2854 u8 intf;
2855 struct cpl_rx_pkt *cpl = cplhdr(skb);
2856 struct cpl_pass_accept_req *req;
2857 struct tcp_options_received tmp_opt;
2858
2859 /* Store values from cpl_rx_pkt in temporary location. */
2860 vlantag = cpl->vlan;
2861 len = cpl->len;
2862 l2info = cpl->l2info;
2863 hdr_len = cpl->hdr_len;
2864 intf = cpl->iff;
2865
2866 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
2867
2868 /*
2869 * We need to parse the TCP options from SYN packet.
2870 * to generate cpl_pass_accept_req.
2871 */
2872 memset(&tmp_opt, 0, sizeof(tmp_opt));
2873 tcp_clear_options(&tmp_opt);
2874 tcp_parse_options(skb, &tmp_opt, 0, 0, NULL);
2875
2876 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
2877 memset(req, 0, sizeof(*req));
2878 req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
2879 V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) |
2880 F_SYN_XACT_MATCH);
2881 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) |
2882 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) |
2883 V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) |
2884 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info))));
2885 req->vlan = vlantag;
2886 req->len = len;
2887 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
2888 PASS_OPEN_TOS(tos));
2889 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
2890 if (tmp_opt.wscale_ok)
2891 req->tcpopt.wsf = tmp_opt.snd_wscale;
2892 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
2893 if (tmp_opt.sack_ok)
2894 req->tcpopt.sack = 1;
2895 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
2896 return;
2897}
2898
2899static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
2900 __be32 laddr, __be16 lport,
2901 __be32 raddr, __be16 rport,
2902 u32 rcv_isn, u32 filter, u16 window,
2903 u32 rss_qid, u8 port_id)
2904{
2905 struct sk_buff *req_skb;
2906 struct fw_ofld_connection_wr *req;
2907 struct cpl_pass_accept_req *cpl = cplhdr(skb);
2908
2909 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
2910 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
2911 memset(req, 0, sizeof(*req));
2912 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
2913 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
2914 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
2915 req->le.filter = filter;
2916 req->le.lport = lport;
2917 req->le.pport = rport;
2918 req->le.u.ipv4.lip = laddr;
2919 req->le.u.ipv4.pip = raddr;
2920 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
2921 req->tcb.rcv_adv = htons(window);
2922 req->tcb.t_state_to_astid =
2923 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
2924 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
2925 V_FW_OFLD_CONNECTION_WR_ASTID(
2926 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
2927
2928 /*
2929 * We store the qid in opt2 which will be used by the firmware
2930 * to send us the wr response.
2931 */
2932 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
2933
2934 /*
2935 * We initialize the MSS index in TCB to 0xF.
2936 * So that when driver sends cpl_pass_accept_rpl
2937 * TCB picks up the correct value. If this was 0
2938 * TP will ignore any value > 0 for MSS index.
2939 */
2940 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
2941 req->cookie = cpu_to_be64((u64)skb);
2942
2943 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
2944 cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
2945}
2946
2947/*
2948 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
2949 * messages when a filter is being used instead of server to
2950 * redirect a syn packet. When packets hit filter they are redirected
2951 * to the offload queue and driver tries to establish the connection
2952 * using firmware work request.
2953 */
2954static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
2955{
2956 int stid;
2957 unsigned int filter;
2958 struct ethhdr *eh = NULL;
2959 struct vlan_ethhdr *vlan_eh = NULL;
2960 struct iphdr *iph;
2961 struct tcphdr *tcph;
2962 struct rss_header *rss = (void *)skb->data;
2963 struct cpl_rx_pkt *cpl = (void *)skb->data;
2964 struct cpl_pass_accept_req *req = (void *)(rss + 1);
2965 struct l2t_entry *e;
2966 struct dst_entry *dst;
2967 struct rtable *rt;
2968 struct c4iw_ep *lep;
2969 u16 window;
2970 struct port_info *pi;
2971 struct net_device *pdev;
2972 u16 rss_qid;
2973 int step;
2974 u32 tx_chan;
2975 struct neighbour *neigh;
2976
2977 /* Drop all non-SYN packets */
2978 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
2979 goto reject;
2980
2981 /*
2982 * Drop all packets which did not hit the filter.
2983 * Unlikely to happen.
2984 */
2985 if (!(rss->filter_hit && rss->filter_tid))
2986 goto reject;
2987
2988 /*
2989 * Calculate the server tid from filter hit index from cpl_rx_pkt.
2990 */
2991 stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base
2992 + dev->rdev.lldi.tids->nstids;
2993
2994 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
2995 if (!lep) {
2996 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
2997 goto reject;
2998 }
2999
3000 if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) {
3001 eh = (struct ethhdr *)(req + 1);
3002 iph = (struct iphdr *)(eh + 1);
3003 } else {
3004 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3005 iph = (struct iphdr *)(vlan_eh + 1);
3006 skb->vlan_tci = ntohs(cpl->vlan);
3007 }
3008
3009 if (iph->version != 0x4)
3010 goto reject;
3011
3012 tcph = (struct tcphdr *)(iph + 1);
3013 skb_set_network_header(skb, (void *)iph - (void *)rss);
3014 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3015 skb_get(skb);
3016
3017 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3018 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3019 ntohs(tcph->source), iph->tos);
3020
3021 rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
3022 iph->tos);
3023 if (!rt) {
3024 pr_err("%s - failed to find dst entry!\n",
3025 __func__);
3026 goto reject;
3027 }
3028 dst = &rt->dst;
3029 neigh = dst_neigh_lookup_skb(dst, skb);
3030
3031 if (neigh->dev->flags & IFF_LOOPBACK) {
3032 pdev = ip_dev_find(&init_net, iph->daddr);
3033 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3034 pdev, 0);
3035 pi = (struct port_info *)netdev_priv(pdev);
3036 tx_chan = cxgb4_port_chan(pdev);
3037 dev_put(pdev);
3038 } else {
3039 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3040 neigh->dev, 0);
3041 pi = (struct port_info *)netdev_priv(neigh->dev);
3042 tx_chan = cxgb4_port_chan(neigh->dev);
3043 }
3044 if (!e) {
3045 pr_err("%s - failed to allocate l2t entry!\n",
3046 __func__);
3047 goto free_dst;
3048 }
3049
3050 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3051 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
3052 window = htons(tcph->window);
3053
3054 /* Calcuate filter portion for LE region. */
3055 filter = cpu_to_be32(select_ntuple(dev, dst, e));
3056
3057 /*
3058 * Synthesize the cpl_pass_accept_req. We have everything except the
3059 * TID. Once firmware sends a reply with TID we update the TID field
3060 * in cpl and pass it through the regular cpl_pass_accept_req path.
3061 */
3062 build_cpl_pass_accept_req(skb, stid, iph->tos);
3063 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3064 tcph->source, ntohl(tcph->seq), filter, window,
3065 rss_qid, pi->port_id);
3066 cxgb4_l2t_release(e);
3067free_dst:
3068 dst_release(dst);
3069reject:
2501 return 0; 3070 return 0;
2502} 3071}
2503 3072
@@ -2520,7 +3089,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2520 [CPL_CLOSE_CON_RPL] = close_con_rpl, 3089 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2521 [CPL_RDMA_TERMINATE] = terminate, 3090 [CPL_RDMA_TERMINATE] = terminate,
2522 [CPL_FW4_ACK] = fw4_ack, 3091 [CPL_FW4_ACK] = fw4_ack,
2523 [CPL_FW6_MSG] = async_event 3092 [CPL_FW6_MSG] = deferred_fw6_msg,
3093 [CPL_RX_PKT] = rx_pkt
2524}; 3094};
2525 3095
2526static void process_timeout(struct c4iw_ep *ep) 3096static void process_timeout(struct c4iw_ep *ep)
@@ -2531,6 +3101,7 @@ static void process_timeout(struct c4iw_ep *ep)
2531 mutex_lock(&ep->com.mutex); 3101 mutex_lock(&ep->com.mutex);
2532 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 3102 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2533 ep->com.state); 3103 ep->com.state);
3104 set_bit(TIMEDOUT, &ep->com.history);
2534 switch (ep->com.state) { 3105 switch (ep->com.state) {
2535 case MPA_REQ_SENT: 3106 case MPA_REQ_SENT:
2536 __state_set(&ep->com, ABORTING); 3107 __state_set(&ep->com, ABORTING);
@@ -2651,7 +3222,7 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2651 PDBG("%s type %u\n", __func__, rpl->type); 3222 PDBG("%s type %u\n", __func__, rpl->type);
2652 3223
2653 switch (rpl->type) { 3224 switch (rpl->type) {
2654 case 1: 3225 case FW6_TYPE_WR_RPL:
2655 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 3226 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2656 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 3227 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
2657 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 3228 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
@@ -2659,7 +3230,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2659 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 3230 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2660 kfree_skb(skb); 3231 kfree_skb(skb);
2661 break; 3232 break;
2662 case 2: 3233 case FW6_TYPE_CQE:
3234 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
2663 sched(dev, skb); 3235 sched(dev, skb);
2664 break; 3236 break;
2665 default: 3237 default:
@@ -2722,7 +3294,8 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2722 [CPL_RDMA_TERMINATE] = sched, 3294 [CPL_RDMA_TERMINATE] = sched,
2723 [CPL_FW4_ACK] = sched, 3295 [CPL_FW4_ACK] = sched,
2724 [CPL_SET_TCB_RPL] = set_tcb_rpl, 3296 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2725 [CPL_FW6_MSG] = fw6_msg 3297 [CPL_FW6_MSG] = fw6_msg,
3298 [CPL_RX_PKT] = sched
2726}; 3299};
2727 3300
2728int __init c4iw_cm_init(void) 3301int __init c4iw_cm_init(void)
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index cb4ecd783700..ba11c76c0b5a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -279,6 +279,11 @@ static int stats_show(struct seq_file *seq, void *v)
279 seq_printf(seq, " DB State: %s Transitions %llu\n", 279 seq_printf(seq, " DB State: %s Transitions %llu\n",
280 db_state_str[dev->db_state], 280 db_state_str[dev->db_state],
281 dev->rdev.stats.db_state_transitions); 281 dev->rdev.stats.db_state_transitions);
282 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
283 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
284 dev->rdev.stats.act_ofld_conn_fails);
285 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
286 dev->rdev.stats.pas_ofld_conn_fails);
282 return 0; 287 return 0;
283} 288}
284 289
@@ -309,6 +314,9 @@ static ssize_t stats_clear(struct file *file, const char __user *buf,
309 dev->rdev.stats.db_empty = 0; 314 dev->rdev.stats.db_empty = 0;
310 dev->rdev.stats.db_drop = 0; 315 dev->rdev.stats.db_drop = 0;
311 dev->rdev.stats.db_state_transitions = 0; 316 dev->rdev.stats.db_state_transitions = 0;
317 dev->rdev.stats.tcam_full = 0;
318 dev->rdev.stats.act_ofld_conn_fails = 0;
319 dev->rdev.stats.pas_ofld_conn_fails = 0;
312 mutex_unlock(&dev->rdev.stats.lock); 320 mutex_unlock(&dev->rdev.stats.lock);
313 return count; 321 return count;
314} 322}
@@ -322,6 +330,113 @@ static const struct file_operations stats_debugfs_fops = {
322 .write = stats_clear, 330 .write = stats_clear,
323}; 331};
324 332
333static int dump_ep(int id, void *p, void *data)
334{
335 struct c4iw_ep *ep = p;
336 struct c4iw_debugfs_data *epd = data;
337 int space;
338 int cc;
339
340 space = epd->bufsize - epd->pos - 1;
341 if (space == 0)
342 return 1;
343
344 cc = snprintf(epd->buf + epd->pos, space,
345 "ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx "
346 "hwtid %d atid %d %pI4:%d <-> %pI4:%d\n",
347 ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state,
348 ep->com.flags, ep->com.history, ep->hwtid, ep->atid,
349 &ep->com.local_addr.sin_addr.s_addr,
350 ntohs(ep->com.local_addr.sin_port),
351 &ep->com.remote_addr.sin_addr.s_addr,
352 ntohs(ep->com.remote_addr.sin_port));
353 if (cc < space)
354 epd->pos += cc;
355 return 0;
356}
357
358static int dump_listen_ep(int id, void *p, void *data)
359{
360 struct c4iw_listen_ep *ep = p;
361 struct c4iw_debugfs_data *epd = data;
362 int space;
363 int cc;
364
365 space = epd->bufsize - epd->pos - 1;
366 if (space == 0)
367 return 1;
368
369 cc = snprintf(epd->buf + epd->pos, space,
370 "ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d "
371 "%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state,
372 ep->com.flags, ep->stid, ep->backlog,
373 &ep->com.local_addr.sin_addr.s_addr,
374 ntohs(ep->com.local_addr.sin_port));
375 if (cc < space)
376 epd->pos += cc;
377 return 0;
378}
379
380static int ep_release(struct inode *inode, struct file *file)
381{
382 struct c4iw_debugfs_data *epd = file->private_data;
383 if (!epd) {
384 pr_info("%s null qpd?\n", __func__);
385 return 0;
386 }
387 vfree(epd->buf);
388 kfree(epd);
389 return 0;
390}
391
392static int ep_open(struct inode *inode, struct file *file)
393{
394 struct c4iw_debugfs_data *epd;
395 int ret = 0;
396 int count = 1;
397
398 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
399 if (!epd) {
400 ret = -ENOMEM;
401 goto out;
402 }
403 epd->devp = inode->i_private;
404 epd->pos = 0;
405
406 spin_lock_irq(&epd->devp->lock);
407 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
408 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
409 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
410 spin_unlock_irq(&epd->devp->lock);
411
412 epd->bufsize = count * 160;
413 epd->buf = vmalloc(epd->bufsize);
414 if (!epd->buf) {
415 ret = -ENOMEM;
416 goto err1;
417 }
418
419 spin_lock_irq(&epd->devp->lock);
420 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
421 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
422 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
423 spin_unlock_irq(&epd->devp->lock);
424
425 file->private_data = epd;
426 goto out;
427err1:
428 kfree(epd);
429out:
430 return ret;
431}
432
433static const struct file_operations ep_debugfs_fops = {
434 .owner = THIS_MODULE,
435 .open = ep_open,
436 .release = ep_release,
437 .read = debugfs_read,
438};
439
325static int setup_debugfs(struct c4iw_dev *devp) 440static int setup_debugfs(struct c4iw_dev *devp)
326{ 441{
327 struct dentry *de; 442 struct dentry *de;
@@ -344,6 +459,11 @@ static int setup_debugfs(struct c4iw_dev *devp)
344 if (de && de->d_inode) 459 if (de && de->d_inode)
345 de->d_inode->i_size = 4096; 460 de->d_inode->i_size = 4096;
346 461
462 de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
463 (void *)devp, &ep_debugfs_fops);
464 if (de && de->d_inode)
465 de->d_inode->i_size = 4096;
466
347 return 0; 467 return 0;
348} 468}
349 469
@@ -475,6 +595,9 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
475 idr_destroy(&ctx->dev->cqidr); 595 idr_destroy(&ctx->dev->cqidr);
476 idr_destroy(&ctx->dev->qpidr); 596 idr_destroy(&ctx->dev->qpidr);
477 idr_destroy(&ctx->dev->mmidr); 597 idr_destroy(&ctx->dev->mmidr);
598 idr_destroy(&ctx->dev->hwtid_idr);
599 idr_destroy(&ctx->dev->stid_idr);
600 idr_destroy(&ctx->dev->atid_idr);
478 iounmap(ctx->dev->rdev.oc_mw_kva); 601 iounmap(ctx->dev->rdev.oc_mw_kva);
479 ib_dealloc_device(&ctx->dev->ibdev); 602 ib_dealloc_device(&ctx->dev->ibdev);
480 ctx->dev = NULL; 603 ctx->dev = NULL;
@@ -532,6 +655,9 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
532 idr_init(&devp->cqidr); 655 idr_init(&devp->cqidr);
533 idr_init(&devp->qpidr); 656 idr_init(&devp->qpidr);
534 idr_init(&devp->mmidr); 657 idr_init(&devp->mmidr);
658 idr_init(&devp->hwtid_idr);
659 idr_init(&devp->stid_idr);
660 idr_init(&devp->atid_idr);
535 spin_lock_init(&devp->lock); 661 spin_lock_init(&devp->lock);
536 mutex_init(&devp->rdev.stats.lock); 662 mutex_init(&devp->rdev.stats.lock);
537 mutex_init(&devp->db_mutex); 663 mutex_init(&devp->db_mutex);
@@ -577,14 +703,76 @@ out:
577 return ctx; 703 return ctx;
578} 704}
579 705
706static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
707 const __be64 *rsp,
708 u32 pktshift)
709{
710 struct sk_buff *skb;
711
712 /*
713 * Allocate space for cpl_pass_accept_req which will be synthesized by
714 * driver. Once the driver synthesizes the request the skb will go
715 * through the regular cpl_pass_accept_req processing.
716 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
717 * cpl_rx_pkt.
718 */
719 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
720 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
721 if (unlikely(!skb))
722 return NULL;
723
724 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
725 sizeof(struct rss_header) - pktshift);
726
727 /*
728 * This skb will contain:
729 * rss_header from the rspq descriptor (1 flit)
730 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
731 * space for the difference between the size of an
732 * rx_pkt and pass_accept_req cpl (1 flit)
733 * the packet data from the gl
734 */
735 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
736 sizeof(struct rss_header));
737 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
738 sizeof(struct cpl_pass_accept_req),
739 gl->va + pktshift,
740 gl->tot_len - pktshift);
741 return skb;
742}
743
744static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
745 const __be64 *rsp)
746{
747 unsigned int opcode = *(u8 *)rsp;
748 struct sk_buff *skb;
749
750 if (opcode != CPL_RX_PKT)
751 goto out;
752
753 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
754 if (skb == NULL)
755 goto out;
756
757 if (c4iw_handlers[opcode] == NULL) {
758 pr_info("%s no handler opcode 0x%x...\n", __func__,
759 opcode);
760 kfree_skb(skb);
761 goto out;
762 }
763 c4iw_handlers[opcode](dev, skb);
764 return 1;
765out:
766 return 0;
767}
768
580static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 769static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
581 const struct pkt_gl *gl) 770 const struct pkt_gl *gl)
582{ 771{
583 struct uld_ctx *ctx = handle; 772 struct uld_ctx *ctx = handle;
584 struct c4iw_dev *dev = ctx->dev; 773 struct c4iw_dev *dev = ctx->dev;
585 struct sk_buff *skb; 774 struct sk_buff *skb;
586 const struct cpl_act_establish *rpl; 775 u8 opcode;
587 unsigned int opcode;
588 776
589 if (gl == NULL) { 777 if (gl == NULL) {
590 /* omit RSS and rsp_ctrl at end of descriptor */ 778 /* omit RSS and rsp_ctrl at end of descriptor */
@@ -601,19 +789,29 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
601 u32 qid = be32_to_cpu(rc->pldbuflen_qid); 789 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
602 c4iw_ev_handler(dev, qid); 790 c4iw_ev_handler(dev, qid);
603 return 0; 791 return 0;
792 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
793 if (recv_rx_pkt(dev, gl, rsp))
794 return 0;
795
796 pr_info("%s: unexpected FL contents at %p, " \
797 "RSS %#llx, FL %#llx, len %u\n",
798 pci_name(ctx->lldi.pdev), gl->va,
799 (unsigned long long)be64_to_cpu(*rsp),
800 (unsigned long long)be64_to_cpu(*(u64 *)gl->va),
801 gl->tot_len);
802
803 return 0;
604 } else { 804 } else {
605 skb = cxgb4_pktgl_to_skb(gl, 128, 128); 805 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
606 if (unlikely(!skb)) 806 if (unlikely(!skb))
607 goto nomem; 807 goto nomem;
608 } 808 }
609 809
610 rpl = cplhdr(skb); 810 opcode = *(u8 *)rsp;
611 opcode = rpl->ot.opcode;
612
613 if (c4iw_handlers[opcode]) 811 if (c4iw_handlers[opcode])
614 c4iw_handlers[opcode](dev, skb); 812 c4iw_handlers[opcode](dev, skb);
615 else 813 else
616 printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__, 814 pr_info("%s no handler opcode 0x%x...\n", __func__,
617 opcode); 815 opcode);
618 816
619 return 0; 817 return 0;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 9beb3a9f0336..9c1644fb0259 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -130,6 +130,9 @@ struct c4iw_stats {
130 u64 db_empty; 130 u64 db_empty;
131 u64 db_drop; 131 u64 db_drop;
132 u64 db_state_transitions; 132 u64 db_state_transitions;
133 u64 tcam_full;
134 u64 act_ofld_conn_fails;
135 u64 pas_ofld_conn_fails;
133}; 136};
134 137
135struct c4iw_rdev { 138struct c4iw_rdev {
@@ -223,6 +226,9 @@ struct c4iw_dev {
223 struct dentry *debugfs_root; 226 struct dentry *debugfs_root;
224 enum db_state db_state; 227 enum db_state db_state;
225 int qpcnt; 228 int qpcnt;
229 struct idr hwtid_idr;
230 struct idr atid_idr;
231 struct idr stid_idr;
226}; 232};
227 233
228static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) 234static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -712,6 +718,31 @@ enum c4iw_ep_flags {
712 CLOSE_SENT = 3, 718 CLOSE_SENT = 3,
713}; 719};
714 720
721enum c4iw_ep_history {
722 ACT_OPEN_REQ = 0,
723 ACT_OFLD_CONN = 1,
724 ACT_OPEN_RPL = 2,
725 ACT_ESTAB = 3,
726 PASS_ACCEPT_REQ = 4,
727 PASS_ESTAB = 5,
728 ABORT_UPCALL = 6,
729 ESTAB_UPCALL = 7,
730 CLOSE_UPCALL = 8,
731 ULP_ACCEPT = 9,
732 ULP_REJECT = 10,
733 TIMEDOUT = 11,
734 PEER_ABORT = 12,
735 PEER_CLOSE = 13,
736 CONNREQ_UPCALL = 14,
737 ABORT_CONN = 15,
738 DISCONN_UPCALL = 16,
739 EP_DISC_CLOSE = 17,
740 EP_DISC_ABORT = 18,
741 CONN_RPL_UPCALL = 19,
742 ACT_RETRY_NOMEM = 20,
743 ACT_RETRY_INUSE = 21
744};
745
715struct c4iw_ep_common { 746struct c4iw_ep_common {
716 struct iw_cm_id *cm_id; 747 struct iw_cm_id *cm_id;
717 struct c4iw_qp *qp; 748 struct c4iw_qp *qp;
@@ -723,6 +754,7 @@ struct c4iw_ep_common {
723 struct sockaddr_in remote_addr; 754 struct sockaddr_in remote_addr;
724 struct c4iw_wr_wait wr_wait; 755 struct c4iw_wr_wait wr_wait;
725 unsigned long flags; 756 unsigned long flags;
757 unsigned long history;
726}; 758};
727 759
728struct c4iw_listen_ep { 760struct c4iw_listen_ep {
@@ -760,6 +792,7 @@ struct c4iw_ep {
760 u8 tos; 792 u8 tos;
761 u8 retry_with_mpa_v1; 793 u8 retry_with_mpa_v1;
762 u8 tried_with_mpa_v1; 794 u8 tried_with_mpa_v1;
795 unsigned int retry_count;
763}; 796};
764 797
765static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) 798static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 72ae63f0072d..03103d2bd641 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -752,6 +752,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
752 dev->trans_start = jiffies; 752 dev->trans_start = jiffies;
753 ++tx->tx_head; 753 ++tx->tx_head;
754 754
755 skb_orphan(skb);
756 skb_dst_drop(skb);
757
755 if (++priv->tx_outstanding == ipoib_sendq_size) { 758 if (++priv->tx_outstanding == ipoib_sendq_size) {
756 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", 759 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
757 tx->qp->qp_num); 760 tx->qp->qp_num);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f10221f40803..a1bca70e20aa 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -615,8 +615,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
615 615
616 address->last_send = priv->tx_head; 616 address->last_send = priv->tx_head;
617 ++priv->tx_head; 617 ++priv->tx_head;
618 skb_orphan(skb);
619 618
619 skb_orphan(skb);
620 skb_dst_drop(skb);
620 } 621 }
621 622
622 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) 623 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 378988b5709a..6db997c78a5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -35,6 +35,8 @@
35#ifndef __CXGB4_H__ 35#ifndef __CXGB4_H__
36#define __CXGB4_H__ 36#define __CXGB4_H__
37 37
38#include "t4_hw.h"
39
38#include <linux/bitops.h> 40#include <linux/bitops.h>
39#include <linux/cache.h> 41#include <linux/cache.h>
40#include <linux/interrupt.h> 42#include <linux/interrupt.h>
@@ -212,6 +214,8 @@ struct tp_err_stats {
212struct tp_params { 214struct tp_params {
213 unsigned int ntxchan; /* # of Tx channels */ 215 unsigned int ntxchan; /* # of Tx channels */
214 unsigned int tre; /* log2 of core clocks per TP tick */ 216 unsigned int tre; /* log2 of core clocks per TP tick */
217 unsigned short tx_modq_map; /* TX modulation scheduler queue to */
218 /* channel map */
215 219
216 uint32_t dack_re; /* DACK timer resolution */ 220 uint32_t dack_re; /* DACK timer resolution */
217 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ 221 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
@@ -526,6 +530,7 @@ struct adapter {
526 struct net_device *port[MAX_NPORTS]; 530 struct net_device *port[MAX_NPORTS];
527 u8 chan_map[NCHAN]; /* channel -> port map */ 531 u8 chan_map[NCHAN]; /* channel -> port map */
528 532
533 u32 filter_mode;
529 unsigned int l2t_start; 534 unsigned int l2t_start;
530 unsigned int l2t_end; 535 unsigned int l2t_end;
531 struct l2t_data *l2t; 536 struct l2t_data *l2t;
@@ -545,6 +550,129 @@ struct adapter {
545 spinlock_t stats_lock; 550 spinlock_t stats_lock;
546}; 551};
547 552
553/* Defined bit width of user definable filter tuples
554 */
555#define ETHTYPE_BITWIDTH 16
556#define FRAG_BITWIDTH 1
557#define MACIDX_BITWIDTH 9
558#define FCOE_BITWIDTH 1
559#define IPORT_BITWIDTH 3
560#define MATCHTYPE_BITWIDTH 3
561#define PROTO_BITWIDTH 8
562#define TOS_BITWIDTH 8
563#define PF_BITWIDTH 8
564#define VF_BITWIDTH 8
565#define IVLAN_BITWIDTH 16
566#define OVLAN_BITWIDTH 16
567
568/* Filter matching rules. These consist of a set of ingress packet field
569 * (value, mask) tuples. The associated ingress packet field matches the
570 * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
571 * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
572 * matches an ingress packet when all of the individual individual field
573 * matching rules are true.
574 *
575 * Partial field masks are always valid, however, while it may be easy to
576 * understand their meanings for some fields (e.g. IP address to match a
577 * subnet), for others making sensible partial masks is less intuitive (e.g.
578 * MPS match type) ...
579 *
580 * Most of the following data structures are modeled on T4 capabilities.
581 * Drivers for earlier chips use the subsets which make sense for those chips.
582 * We really need to come up with a hardware-independent mechanism to
583 * represent hardware filter capabilities ...
584 */
585struct ch_filter_tuple {
586 /* Compressed header matching field rules. The TP_VLAN_PRI_MAP
587 * register selects which of these fields will participate in the
588 * filter match rules -- up to a maximum of 36 bits. Because
589 * TP_VLAN_PRI_MAP is a global register, all filters must use the same
590 * set of fields.
591 */
592 uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
593 uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
594 uint32_t ivlan_vld:1; /* inner VLAN valid */
595 uint32_t ovlan_vld:1; /* outer VLAN valid */
596 uint32_t pfvf_vld:1; /* PF/VF valid */
597 uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
598 uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
599 uint32_t iport:IPORT_BITWIDTH; /* ingress port */
600 uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
601 uint32_t proto:PROTO_BITWIDTH; /* protocol type */
602 uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
603 uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
604 uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
605 uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
606 uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
607
608 /* Uncompressed header matching field rules. These are always
609 * available for field rules.
610 */
611 uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
612 uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
613 uint16_t lport; /* local port */
614 uint16_t fport; /* foreign port */
615};
616
617/* A filter ioctl command.
618 */
619struct ch_filter_specification {
620 /* Administrative fields for filter.
621 */
622 uint32_t hitcnts:1; /* count filter hits in TCB */
623 uint32_t prio:1; /* filter has priority over active/server */
624
625 /* Fundamental filter typing. This is the one element of filter
626 * matching that doesn't exist as a (value, mask) tuple.
627 */
628 uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
629
630 /* Packet dispatch information. Ingress packets which match the
631 * filter rules will be dropped, passed to the host or switched back
632 * out as egress packets.
633 */
634 uint32_t action:2; /* drop, pass, switch */
635
636 uint32_t rpttid:1; /* report TID in RSS hash field */
637
638 uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
639 uint32_t iq:10; /* ingress queue */
640
641 uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
642 uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
643 /* 1 => TCB contains IQ ID */
644
645 /* Switch proxy/rewrite fields. An ingress packet which matches a
646 * filter with "switch" set will be looped back out as an egress
647 * packet -- potentially with some Ethernet header rewriting.
648 */
649 uint32_t eport:2; /* egress port to switch packet out */
650 uint32_t newdmac:1; /* rewrite destination MAC address */
651 uint32_t newsmac:1; /* rewrite source MAC address */
652 uint32_t newvlan:2; /* rewrite VLAN Tag */
653 uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
654 uint8_t smac[ETH_ALEN]; /* new source MAC address */
655 uint16_t vlan; /* VLAN Tag to insert */
656
657 /* Filter rule value/mask pairs.
658 */
659 struct ch_filter_tuple val;
660 struct ch_filter_tuple mask;
661};
662
663enum {
664 FILTER_PASS = 0, /* default */
665 FILTER_DROP,
666 FILTER_SWITCH
667};
668
669enum {
670 VLAN_NOCHANGE = 0, /* default */
671 VLAN_REMOVE,
672 VLAN_INSERT,
673 VLAN_REWRITE
674};
675
548static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) 676static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
549{ 677{
550 return readl(adap->regs + reg_addr); 678 return readl(adap->regs + reg_addr);
@@ -701,6 +829,12 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
701void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 829void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
702 unsigned int data_reg, const u32 *vals, 830 unsigned int data_reg, const u32 *vals,
703 unsigned int nregs, unsigned int start_idx); 831 unsigned int nregs, unsigned int start_idx);
832void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
833 unsigned int data_reg, u32 *vals, unsigned int nregs,
834 unsigned int start_idx);
835
836struct fw_filter_wr;
837
704void t4_intr_enable(struct adapter *adapter); 838void t4_intr_enable(struct adapter *adapter);
705void t4_intr_disable(struct adapter *adapter); 839void t4_intr_disable(struct adapter *adapter);
706int t4_slow_intr_handler(struct adapter *adapter); 840int t4_slow_intr_handler(struct adapter *adapter);
@@ -737,6 +871,8 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
737void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 871void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
738 const unsigned short *alpha, const unsigned short *beta); 872 const unsigned short *alpha, const unsigned short *beta);
739 873
874void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
875
740void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 876void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
741 const u8 *addr); 877 const u8 *addr);
742int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 878int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a27b4ae20f43..f0718e1a8369 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -175,6 +175,30 @@ enum {
175 MIN_FL_ENTRIES = 16 175 MIN_FL_ENTRIES = 16
176}; 176};
177 177
178/* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
183 */
184struct filter_entry {
185 /* Administrative fields for filter.
186 */
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
189
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
193
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
198 */
199 struct ch_filter_specification fs;
200};
201
178#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 202#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
179 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
180 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -325,6 +349,9 @@ enum {
325 349
326static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT; 350static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
327 351
352module_param(tp_vlan_pri_map, uint, 0644);
353MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
354
328static struct dentry *cxgb4_debugfs_root; 355static struct dentry *cxgb4_debugfs_root;
329 356
330static LIST_HEAD(adapter_list); 357static LIST_HEAD(adapter_list);
@@ -506,8 +533,67 @@ static int link_start(struct net_device *dev)
506 return ret; 533 return ret;
507} 534}
508 535
509/* 536/* Clear a filter and release any of its resources that we own. This also
510 * Response queue handler for the FW event queue. 537 * clears the filter's "pending" status.
538 */
539static void clear_filter(struct adapter *adap, struct filter_entry *f)
540{
541 /* If the new or old filter have loopback rewriteing rules then we'll
542 * need to free any existing Layer Two Table (L2T) entries of the old
543 * filter rule. The firmware will handle freeing up any Source MAC
544 * Table (SMT) entries used for rewriting Source MAC Addresses in
545 * loopback rules.
546 */
547 if (f->l2t)
548 cxgb4_l2t_release(f->l2t);
549
550 /* The zeroing of the filter rule below clears the filter valid,
551 * pending, locked flags, l2t pointer, etc. so it's all we need for
552 * this operation.
553 */
554 memset(f, 0, sizeof(*f));
555}
556
557/* Handle a filter write/deletion reply.
558 */
559static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
560{
561 unsigned int idx = GET_TID(rpl);
562 unsigned int nidx = idx - adap->tids.ftid_base;
563 unsigned int ret;
564 struct filter_entry *f;
565
566 if (idx >= adap->tids.ftid_base && nidx <
567 (adap->tids.nftids + adap->tids.nsftids)) {
568 idx = nidx;
569 ret = GET_TCB_COOKIE(rpl->cookie);
570 f = &adap->tids.ftid_tab[idx];
571
572 if (ret == FW_FILTER_WR_FLT_DELETED) {
573 /* Clear the filter when we get confirmation from the
574 * hardware that the filter has been deleted.
575 */
576 clear_filter(adap, f);
577 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
578 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
579 idx);
580 clear_filter(adap, f);
581 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
582 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
583 f->pending = 0; /* asynchronous setup completed */
584 f->valid = 1;
585 } else {
586 /* Something went wrong. Issue a warning about the
587 * problem and clear everything out.
588 */
589 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
590 idx, ret);
591 clear_filter(adap, f);
592 }
593 }
594}
595
596/* Response queue handler for the FW event queue.
511 */ 597 */
512static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, 598static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
513 const struct pkt_gl *gl) 599 const struct pkt_gl *gl)
@@ -542,6 +628,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
542 const struct cpl_l2t_write_rpl *p = (void *)rsp; 628 const struct cpl_l2t_write_rpl *p = (void *)rsp;
543 629
544 do_l2t_write_rpl(q->adap, p); 630 do_l2t_write_rpl(q->adap, p);
631 } else if (opcode == CPL_SET_TCB_RPL) {
632 const struct cpl_set_tcb_rpl *p = (void *)rsp;
633
634 filter_rpl(q->adap, p);
545 } else 635 } else
546 dev_err(q->adap->pdev_dev, 636 dev_err(q->adap->pdev_dev,
547 "unexpected CPL %#x on FW event queue\n", opcode); 637 "unexpected CPL %#x on FW event queue\n", opcode);
@@ -983,6 +1073,148 @@ static void t4_free_mem(void *addr)
983 kfree(addr); 1073 kfree(addr);
984} 1074}
985 1075
1076/* Send a Work Request to write the filter at a specified index. We construct
1077 * a Firmware Filter Work Request to have the work done and put the indicated
1078 * filter into "pending" mode which will prevent any further actions against
1079 * it till we get a reply from the firmware on the completion status of the
1080 * request.
1081 */
1082static int set_filter_wr(struct adapter *adapter, int fidx)
1083{
1084 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1085 struct sk_buff *skb;
1086 struct fw_filter_wr *fwr;
1087 unsigned int ftid;
1088
1089 /* If the new filter requires loopback Destination MAC and/or VLAN
1090 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1091 * the filter.
1092 */
1093 if (f->fs.newdmac || f->fs.newvlan) {
1094 /* allocate L2T entry for new filter */
1095 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1096 if (f->l2t == NULL)
1097 return -EAGAIN;
1098 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1099 f->fs.eport, f->fs.dmac)) {
1100 cxgb4_l2t_release(f->l2t);
1101 f->l2t = NULL;
1102 return -ENOMEM;
1103 }
1104 }
1105
1106 ftid = adapter->tids.ftid_base + fidx;
1107
1108 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1109 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1110 memset(fwr, 0, sizeof(*fwr));
1111
1112 /* It would be nice to put most of the following in t4_hw.c but most
1113 * of the work is translating the cxgbtool ch_filter_specification
1114 * into the Work Request and the definition of that structure is
1115 * currently in cxgbtool.h which isn't appropriate to pull into the
1116 * common code. We may eventually try to come up with a more neutral
1117 * filter specification structure but for now it's easiest to simply
1118 * put this fairly direct code in line ...
1119 */
1120 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1121 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1122 fwr->tid_to_iq =
1123 htonl(V_FW_FILTER_WR_TID(ftid) |
1124 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1125 V_FW_FILTER_WR_NOREPLY(0) |
1126 V_FW_FILTER_WR_IQ(f->fs.iq));
1127 fwr->del_filter_to_l2tix =
1128 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1129 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1130 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1131 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1132 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1133 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1134 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1135 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1136 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1137 f->fs.newvlan == VLAN_REWRITE) |
1138 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1139 f->fs.newvlan == VLAN_REWRITE) |
1140 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1141 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1142 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1143 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1144 fwr->ethtype = htons(f->fs.val.ethtype);
1145 fwr->ethtypem = htons(f->fs.mask.ethtype);
1146 fwr->frag_to_ovlan_vldm =
1147 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1148 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1149 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1150 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1151 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1152 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1153 fwr->smac_sel = 0;
1154 fwr->rx_chan_rx_rpl_iq =
1155 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1156 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1157 fwr->maci_to_matchtypem =
1158 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1159 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1160 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1161 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1162 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1163 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1164 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1165 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1166 fwr->ptcl = f->fs.val.proto;
1167 fwr->ptclm = f->fs.mask.proto;
1168 fwr->ttyp = f->fs.val.tos;
1169 fwr->ttypm = f->fs.mask.tos;
1170 fwr->ivlan = htons(f->fs.val.ivlan);
1171 fwr->ivlanm = htons(f->fs.mask.ivlan);
1172 fwr->ovlan = htons(f->fs.val.ovlan);
1173 fwr->ovlanm = htons(f->fs.mask.ovlan);
1174 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1175 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1176 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1177 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1178 fwr->lp = htons(f->fs.val.lport);
1179 fwr->lpm = htons(f->fs.mask.lport);
1180 fwr->fp = htons(f->fs.val.fport);
1181 fwr->fpm = htons(f->fs.mask.fport);
1182 if (f->fs.newsmac)
1183 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1184
1185 /* Mark the filter as "pending" and ship off the Filter Work Request.
1186 * When we get the Work Request Reply we'll clear the pending status.
1187 */
1188 f->pending = 1;
1189 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1190 t4_ofld_send(adapter, skb);
1191 return 0;
1192}
1193
1194/* Delete the filter at a specified index.
1195 */
1196static int del_filter_wr(struct adapter *adapter, int fidx)
1197{
1198 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1199 struct sk_buff *skb;
1200 struct fw_filter_wr *fwr;
1201 unsigned int len, ftid;
1202
1203 len = sizeof(*fwr);
1204 ftid = adapter->tids.ftid_base + fidx;
1205
1206 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1207 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1208 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1209
1210 /* Mark the filter as "pending" and ship off the Filter Work Request.
1211 * When we get the Work Request Reply we'll clear the pending status.
1212 */
1213 f->pending = 1;
1214 t4_mgmt_tx(adapter, skb);
1215 return 0;
1216}
1217
986static inline int is_offload(const struct adapter *adap) 1218static inline int is_offload(const struct adapter *adap)
987{ 1219{
988 return adap->params.offload; 1220 return adap->params.offload;
@@ -2195,7 +2427,7 @@ int cxgb4_alloc_atid(struct tid_info *t, void *data)
2195 if (t->afree) { 2427 if (t->afree) {
2196 union aopen_entry *p = t->afree; 2428 union aopen_entry *p = t->afree;
2197 2429
2198 atid = p - t->atid_tab; 2430 atid = (p - t->atid_tab) + t->atid_base;
2199 t->afree = p->next; 2431 t->afree = p->next;
2200 p->data = data; 2432 p->data = data;
2201 t->atids_in_use++; 2433 t->atids_in_use++;
@@ -2210,7 +2442,7 @@ EXPORT_SYMBOL(cxgb4_alloc_atid);
2210 */ 2442 */
2211void cxgb4_free_atid(struct tid_info *t, unsigned int atid) 2443void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2212{ 2444{
2213 union aopen_entry *p = &t->atid_tab[atid]; 2445 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2214 2446
2215 spin_lock_bh(&t->atid_lock); 2447 spin_lock_bh(&t->atid_lock);
2216 p->next = t->afree; 2448 p->next = t->afree;
@@ -2249,8 +2481,34 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2249} 2481}
2250EXPORT_SYMBOL(cxgb4_alloc_stid); 2482EXPORT_SYMBOL(cxgb4_alloc_stid);
2251 2483
2252/* 2484/* Allocate a server filter TID and set it to the supplied value.
2253 * Release a server TID. 2485 */
2486int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
2487{
2488 int stid;
2489
2490 spin_lock_bh(&t->stid_lock);
2491 if (family == PF_INET) {
2492 stid = find_next_zero_bit(t->stid_bmap,
2493 t->nstids + t->nsftids, t->nstids);
2494 if (stid < (t->nstids + t->nsftids))
2495 __set_bit(stid, t->stid_bmap);
2496 else
2497 stid = -1;
2498 } else {
2499 stid = -1;
2500 }
2501 if (stid >= 0) {
2502 t->stid_tab[stid].data = data;
2503 stid += t->stid_base;
2504 t->stids_in_use++;
2505 }
2506 spin_unlock_bh(&t->stid_lock);
2507 return stid;
2508}
2509EXPORT_SYMBOL(cxgb4_alloc_sftid);
2510
2511/* Release a server TID.
2254 */ 2512 */
2255void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) 2513void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2256{ 2514{
@@ -2362,18 +2620,26 @@ EXPORT_SYMBOL(cxgb4_remove_tid);
2362static int tid_init(struct tid_info *t) 2620static int tid_init(struct tid_info *t)
2363{ 2621{
2364 size_t size; 2622 size_t size;
2623 unsigned int stid_bmap_size;
2365 unsigned int natids = t->natids; 2624 unsigned int natids = t->natids;
2366 2625
2367 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + 2626 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
2627 size = t->ntids * sizeof(*t->tid_tab) +
2628 natids * sizeof(*t->atid_tab) +
2368 t->nstids * sizeof(*t->stid_tab) + 2629 t->nstids * sizeof(*t->stid_tab) +
2369 BITS_TO_LONGS(t->nstids) * sizeof(long); 2630 t->nsftids * sizeof(*t->stid_tab) +
2631 stid_bmap_size * sizeof(long) +
2632 t->nftids * sizeof(*t->ftid_tab) +
2633 t->nsftids * sizeof(*t->ftid_tab);
2634
2370 t->tid_tab = t4_alloc_mem(size); 2635 t->tid_tab = t4_alloc_mem(size);
2371 if (!t->tid_tab) 2636 if (!t->tid_tab)
2372 return -ENOMEM; 2637 return -ENOMEM;
2373 2638
2374 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; 2639 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2375 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; 2640 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2376 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; 2641 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
2642 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
2377 spin_lock_init(&t->stid_lock); 2643 spin_lock_init(&t->stid_lock);
2378 spin_lock_init(&t->atid_lock); 2644 spin_lock_init(&t->atid_lock);
2379 2645
@@ -2388,7 +2654,7 @@ static int tid_init(struct tid_info *t)
2388 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; 2654 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2389 t->afree = t->atid_tab; 2655 t->afree = t->atid_tab;
2390 } 2656 }
2391 bitmap_zero(t->stid_bmap, t->nstids); 2657 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
2392 return 0; 2658 return 0;
2393} 2659}
2394 2660
@@ -2404,7 +2670,8 @@ static int tid_init(struct tid_info *t)
2404 * Returns <0 on error and one of the %NET_XMIT_* values on success. 2670 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2405 */ 2671 */
2406int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 2672int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2407 __be32 sip, __be16 sport, unsigned int queue) 2673 __be32 sip, __be16 sport, __be16 vlan,
2674 unsigned int queue)
2408{ 2675{
2409 unsigned int chan; 2676 unsigned int chan;
2410 struct sk_buff *skb; 2677 struct sk_buff *skb;
@@ -2750,6 +3017,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
2750{ 3017{
2751 void *handle; 3018 void *handle;
2752 struct cxgb4_lld_info lli; 3019 struct cxgb4_lld_info lli;
3020 unsigned short i;
2753 3021
2754 lli.pdev = adap->pdev; 3022 lli.pdev = adap->pdev;
2755 lli.l2t = adap->l2t; 3023 lli.l2t = adap->l2t;
@@ -2776,10 +3044,16 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
2776 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 3044 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2777 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> 3045 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2778 (adap->fn * 4)); 3046 (adap->fn * 4));
3047 lli.filt_mode = adap->filter_mode;
3048 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3049 for (i = 0; i < NCHAN; i++)
3050 lli.tx_modq[i] = i;
2779 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); 3051 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2780 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); 3052 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2781 lli.fw_vers = adap->params.fw_vers; 3053 lli.fw_vers = adap->params.fw_vers;
2782 lli.dbfifo_int_thresh = dbfifo_int_thresh; 3054 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3055 lli.sge_pktshift = adap->sge.pktshift;
3056 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
2783 3057
2784 handle = ulds[uld].add(&lli); 3058 handle = ulds[uld].add(&lli);
2785 if (IS_ERR(handle)) { 3059 if (IS_ERR(handle)) {
@@ -2999,6 +3273,126 @@ static int cxgb_close(struct net_device *dev)
2999 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); 3273 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
3000} 3274}
3001 3275
3276/* Return an error number if the indicated filter isn't writable ...
3277 */
3278static int writable_filter(struct filter_entry *f)
3279{
3280 if (f->locked)
3281 return -EPERM;
3282 if (f->pending)
3283 return -EBUSY;
3284
3285 return 0;
3286}
3287
3288/* Delete the filter at the specified index (if valid). The checks for all
3289 * the common problems with doing this like the filter being locked, currently
3290 * pending in another operation, etc.
3291 */
3292static int delete_filter(struct adapter *adapter, unsigned int fidx)
3293{
3294 struct filter_entry *f;
3295 int ret;
3296
3297 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
3298 return -EINVAL;
3299
3300 f = &adapter->tids.ftid_tab[fidx];
3301 ret = writable_filter(f);
3302 if (ret)
3303 return ret;
3304 if (f->valid)
3305 return del_filter_wr(adapter, fidx);
3306
3307 return 0;
3308}
3309
3310int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3311 __be32 sip, __be16 sport, __be16 vlan,
3312 unsigned int queue, unsigned char port, unsigned char mask)
3313{
3314 int ret;
3315 struct filter_entry *f;
3316 struct adapter *adap;
3317 int i;
3318 u8 *val;
3319
3320 adap = netdev2adap(dev);
3321
3322 /* Adjust stid to correct filter index */
3323 stid -= adap->tids.nstids;
3324 stid += adap->tids.nftids;
3325
3326 /* Check to make sure the filter requested is writable ...
3327 */
3328 f = &adap->tids.ftid_tab[stid];
3329 ret = writable_filter(f);
3330 if (ret)
3331 return ret;
3332
3333 /* Clear out any old resources being used by the filter before
3334 * we start constructing the new filter.
3335 */
3336 if (f->valid)
3337 clear_filter(adap, f);
3338
3339 /* Clear out filter specifications */
3340 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3341 f->fs.val.lport = cpu_to_be16(sport);
3342 f->fs.mask.lport = ~0;
3343 val = (u8 *)&sip;
3344 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
3345 for (i = 0; i < 4; i++) {
3346 f->fs.val.lip[i] = val[i];
3347 f->fs.mask.lip[i] = ~0;
3348 }
3349 if (adap->filter_mode & F_PORT) {
3350 f->fs.val.iport = port;
3351 f->fs.mask.iport = mask;
3352 }
3353 }
3354
3355 f->fs.dirsteer = 1;
3356 f->fs.iq = queue;
3357 /* Mark filter as locked */
3358 f->locked = 1;
3359 f->fs.rpttid = 1;
3360
3361 ret = set_filter_wr(adap, stid);
3362 if (ret) {
3363 clear_filter(adap, f);
3364 return ret;
3365 }
3366
3367 return 0;
3368}
3369EXPORT_SYMBOL(cxgb4_create_server_filter);
3370
3371int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
3372 unsigned int queue, bool ipv6)
3373{
3374 int ret;
3375 struct filter_entry *f;
3376 struct adapter *adap;
3377
3378 adap = netdev2adap(dev);
3379
3380 /* Adjust stid to correct filter index */
3381 stid -= adap->tids.nstids;
3382 stid += adap->tids.nftids;
3383
3384 f = &adap->tids.ftid_tab[stid];
3385 /* Unlock the filter */
3386 f->locked = 0;
3387
3388 ret = delete_filter(adap, stid);
3389 if (ret)
3390 return ret;
3391
3392 return 0;
3393}
3394EXPORT_SYMBOL(cxgb4_remove_server_filter);
3395
3002static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, 3396static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
3003 struct rtnl_link_stats64 *ns) 3397 struct rtnl_link_stats64 *ns)
3004{ 3398{
@@ -3245,6 +3639,34 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3245 v = t4_read_reg(adap, TP_PIO_DATA); 3639 v = t4_read_reg(adap, TP_PIO_DATA);
3246 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); 3640 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
3247 3641
3642 /* first 4 Tx modulation queues point to consecutive Tx channels */
3643 adap->params.tp.tx_modq_map = 0xE4;
3644 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3645 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
3646
3647 /* associate each Tx modulation queue with consecutive Tx channels */
3648 v = 0x84218421;
3649 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3650 &v, 1, A_TP_TX_SCHED_HDR);
3651 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3652 &v, 1, A_TP_TX_SCHED_FIFO);
3653 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3654 &v, 1, A_TP_TX_SCHED_PCMD);
3655
3656#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3657 if (is_offload(adap)) {
3658 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
3659 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3660 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3661 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3662 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3663 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
3664 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3665 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3666 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3667 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3668 }
3669
3248 /* get basic stuff going */ 3670 /* get basic stuff going */
3249 return t4_early_init(adap, adap->fn); 3671 return t4_early_init(adap, adap->fn);
3250} 3672}
@@ -4035,6 +4457,10 @@ static int adap_init0(struct adapter *adap)
4035 for (j = 0; j < NCHAN; j++) 4457 for (j = 0; j < NCHAN; j++)
4036 adap->params.tp.tx_modq[j] = j; 4458 adap->params.tp.tx_modq[j] = j;
4037 4459
4460 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4461 &adap->filter_mode, 1,
4462 TP_VLAN_PRI_MAP);
4463
4038 adap->flags |= FW_OK; 4464 adap->flags |= FW_OK;
4039 return 0; 4465 return 0;
4040 4466
@@ -4661,6 +5087,17 @@ static void remove_one(struct pci_dev *pdev)
4661 if (adapter->debugfs_root) 5087 if (adapter->debugfs_root)
4662 debugfs_remove_recursive(adapter->debugfs_root); 5088 debugfs_remove_recursive(adapter->debugfs_root);
4663 5089
5090 /* If we allocated filters, free up state associated with any
5091 * valid filters ...
5092 */
5093 if (adapter->tids.ftid_tab) {
5094 struct filter_entry *f = &adapter->tids.ftid_tab[0];
5095 for (i = 0; i < (adapter->tids.nftids +
5096 adapter->tids.nsftids); i++, f++)
5097 if (f->valid)
5098 clear_filter(adapter, f);
5099 }
5100
4664 if (adapter->flags & FULL_INIT_DONE) 5101 if (adapter->flags & FULL_INIT_DONE)
4665 cxgb_down(adapter); 5102 cxgb_down(adapter);
4666 5103
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 39bec73ff87c..e2bbc7f3e2de 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -38,6 +38,7 @@
38#include <linux/cache.h> 38#include <linux/cache.h>
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/inetdevice.h>
41#include <linux/atomic.h> 42#include <linux/atomic.h>
42 43
43/* CPL message priority levels */ 44/* CPL message priority levels */
@@ -97,7 +98,9 @@ struct tid_info {
97 98
98 union aopen_entry *atid_tab; 99 union aopen_entry *atid_tab;
99 unsigned int natids; 100 unsigned int natids;
101 unsigned int atid_base;
100 102
103 struct filter_entry *ftid_tab;
101 unsigned int nftids; 104 unsigned int nftids;
102 unsigned int ftid_base; 105 unsigned int ftid_base;
103 unsigned int aftid_base; 106 unsigned int aftid_base;
@@ -129,7 +132,7 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
129static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) 132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
130{ 133{
131 stid -= t->stid_base; 134 stid -= t->stid_base;
132 return stid < t->nstids ? t->stid_tab[stid].data : NULL; 135 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
133} 136}
134 137
135static inline void cxgb4_insert_tid(struct tid_info *t, void *data, 138static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
@@ -141,6 +144,7 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
141 144
142int cxgb4_alloc_atid(struct tid_info *t, void *data); 145int cxgb4_alloc_atid(struct tid_info *t, void *data);
143int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); 146int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
147int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data);
144void cxgb4_free_atid(struct tid_info *t, unsigned int atid); 148void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
145void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); 149void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
146void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); 150void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
@@ -148,8 +152,14 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
148struct in6_addr; 152struct in6_addr;
149 153
150int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 154int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
151 __be32 sip, __be16 sport, unsigned int queue); 155 __be32 sip, __be16 sport, __be16 vlan,
152 156 unsigned int queue);
157int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
158 __be32 sip, __be16 sport, __be16 vlan,
159 unsigned int queue,
160 unsigned char port, unsigned char mask);
161int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
162 unsigned int queue, bool ipv6);
153static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) 163static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
154{ 164{
155 skb_set_queue_mapping(skb, (queue << 1) | prio); 165 skb_set_queue_mapping(skb, (queue << 1) | prio);
@@ -221,9 +231,16 @@ struct cxgb4_lld_info {
221 unsigned int iscsi_iolen; /* iSCSI max I/O length */ 231 unsigned int iscsi_iolen; /* iSCSI max I/O length */
222 unsigned short udb_density; /* # of user DB/page */ 232 unsigned short udb_density; /* # of user DB/page */
223 unsigned short ucq_density; /* # of user CQs/page */ 233 unsigned short ucq_density; /* # of user CQs/page */
234 unsigned short filt_mode; /* filter optional components */
235 unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */
236 /* scheduler queue */
224 void __iomem *gts_reg; /* address of GTS register */ 237 void __iomem *gts_reg; /* address of GTS register */
225 void __iomem *db_reg; /* address of kernel doorbell */ 238 void __iomem *db_reg; /* address of kernel doorbell */
226 int dbfifo_int_thresh; /* doorbell fifo int threshold */ 239 int dbfifo_int_thresh; /* doorbell fifo int threshold */
240 unsigned int sge_pktshift; /* Padding between CPL and */
241 /* packet data */
242 bool enable_fw_ofld_conn; /* Enable connection through fw */
243 /* WR */
227}; 244};
228 245
229struct cxgb4_uld_info { 246struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 6ac77a62f361..29878098101e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -484,6 +484,38 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
484 handle_failed_resolution(adap, arpq); 484 handle_failed_resolution(adap, arpq);
485} 485}
486 486
487/* Allocate an L2T entry for use by a switching rule. Such need to be
488 * explicitly freed and while busy they are not on any hash chain, so normal
489 * address resolution updates do not see them.
490 */
491struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
492{
493 struct l2t_entry *e;
494
495 write_lock_bh(&d->lock);
496 e = alloc_l2e(d);
497 if (e) {
498 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
499 e->state = L2T_STATE_SWITCHING;
500 atomic_set(&e->refcnt, 1);
501 spin_unlock(&e->lock);
502 }
503 write_unlock_bh(&d->lock);
504 return e;
505}
506
507/* Sets/updates the contents of a switching L2T entry that has been allocated
508 * with an earlier call to @t4_l2t_alloc_switching.
509 */
510int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
511 u8 port, u8 *eth_addr)
512{
513 e->vlan = vlan;
514 e->lport = port;
515 memcpy(e->dmac, eth_addr, ETH_ALEN);
516 return write_l2e(adap, e, 0);
517}
518
487struct l2t_data *t4_init_l2t(void) 519struct l2t_data *t4_init_l2t(void)
488{ 520{
489 int i; 521 int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 02b31d0c6410..108c0f1fce1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -100,6 +100,9 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
100 unsigned int priority); 100 unsigned int priority);
101 101
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); 102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
105 u8 port, u8 *eth_addr);
103struct l2t_data *t4_init_l2t(void); 106struct l2t_data *t4_init_l2t(void);
104void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); 107void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
105 108
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8d9c7547b070..22f3af5166bf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -109,7 +109,7 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
109 * Reads registers that are accessed indirectly through an address/data 109 * Reads registers that are accessed indirectly through an address/data
110 * register pair. 110 * register pair.
111 */ 111 */
112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 112void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals, 113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx) 114 unsigned int nregs, unsigned int start_idx)
115{ 115{
@@ -2268,6 +2268,26 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2268 return 0; 2268 return 0;
2269} 2269}
2270 2270
2271/* t4_mk_filtdelwr - create a delete filter WR
2272 * @ftid: the filter ID
2273 * @wr: the filter work request to populate
2274 * @qid: ingress queue to receive the delete notification
2275 *
2276 * Creates a filter work request to delete the supplied filter. If @qid is
2277 * negative the delete notification is suppressed.
2278 */
2279void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2280{
2281 memset(wr, 0, sizeof(*wr));
2282 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2283 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2284 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2285 V_FW_FILTER_WR_NOREPLY(qid < 0));
2286 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2287 if (qid >= 0)
2288 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2289}
2290
2271#define INIT_CMD(var, cmd, rd_wr) do { \ 2291#define INIT_CMD(var, cmd, rd_wr) do { \
2272 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ 2292 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2273 FW_CMD_REQUEST | FW_CMD_##rd_wr); \ 2293 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index b760808fd6d9..261d17703adc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -193,8 +193,24 @@ struct work_request_hdr {
193 __be64 wr_lo; 193 __be64 wr_lo;
194}; 194};
195 195
196/* wr_hi fields */
197#define S_WR_OP 24
198#define V_WR_OP(x) ((__u64)(x) << S_WR_OP)
199
196#define WR_HDR struct work_request_hdr wr 200#define WR_HDR struct work_request_hdr wr
197 201
202/* option 0 fields */
203#define S_MSS_IDX 60
204#define M_MSS_IDX 0xF
205#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX)
206#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
207
208/* option 2 fields */
209#define S_RSS_QUEUE 0
210#define M_RSS_QUEUE 0x3FF
211#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
212#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE)
213
198struct cpl_pass_open_req { 214struct cpl_pass_open_req {
199 WR_HDR; 215 WR_HDR;
200 union opcode_tid ot; 216 union opcode_tid ot;
@@ -204,12 +220,14 @@ struct cpl_pass_open_req {
204 __be32 peer_ip; 220 __be32 peer_ip;
205 __be64 opt0; 221 __be64 opt0;
206#define TX_CHAN(x) ((x) << 2) 222#define TX_CHAN(x) ((x) << 2)
223#define NO_CONG(x) ((x) << 4)
207#define DELACK(x) ((x) << 5) 224#define DELACK(x) ((x) << 5)
208#define ULP_MODE(x) ((x) << 8) 225#define ULP_MODE(x) ((x) << 8)
209#define RCV_BUFSIZ(x) ((x) << 12) 226#define RCV_BUFSIZ(x) ((x) << 12)
210#define DSCP(x) ((x) << 22) 227#define DSCP(x) ((x) << 22)
211#define SMAC_SEL(x) ((u64)(x) << 28) 228#define SMAC_SEL(x) ((u64)(x) << 28)
212#define L2T_IDX(x) ((u64)(x) << 36) 229#define L2T_IDX(x) ((u64)(x) << 36)
230#define TCAM_BYPASS(x) ((u64)(x) << 48)
213#define NAGLE(x) ((u64)(x) << 49) 231#define NAGLE(x) ((u64)(x) << 49)
214#define WND_SCALE(x) ((u64)(x) << 50) 232#define WND_SCALE(x) ((u64)(x) << 50)
215#define KEEP_ALIVE(x) ((u64)(x) << 54) 233#define KEEP_ALIVE(x) ((u64)(x) << 54)
@@ -247,8 +265,10 @@ struct cpl_pass_accept_rpl {
247#define RSS_QUEUE_VALID (1 << 10) 265#define RSS_QUEUE_VALID (1 << 10)
248#define RX_COALESCE_VALID(x) ((x) << 11) 266#define RX_COALESCE_VALID(x) ((x) << 11)
249#define RX_COALESCE(x) ((x) << 12) 267#define RX_COALESCE(x) ((x) << 12)
268#define PACE(x) ((x) << 16)
250#define TX_QUEUE(x) ((x) << 23) 269#define TX_QUEUE(x) ((x) << 23)
251#define RX_CHANNEL(x) ((x) << 26) 270#define RX_CHANNEL(x) ((x) << 26)
271#define CCTRL_ECN(x) ((x) << 27)
252#define WND_SCALE_EN(x) ((x) << 28) 272#define WND_SCALE_EN(x) ((x) << 28)
253#define TSTAMPS_EN(x) ((x) << 29) 273#define TSTAMPS_EN(x) ((x) << 29)
254#define SACK_EN(x) ((x) << 30) 274#define SACK_EN(x) ((x) << 30)
@@ -292,6 +312,9 @@ struct cpl_pass_establish {
292 union opcode_tid ot; 312 union opcode_tid ot;
293 __be32 rsvd; 313 __be32 rsvd;
294 __be32 tos_stid; 314 __be32 tos_stid;
315#define PASS_OPEN_TID(x) ((x) << 0)
316#define PASS_OPEN_TOS(x) ((x) << 24)
317#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
295#define GET_POPEN_TID(x) ((x) & 0xffffff) 318#define GET_POPEN_TID(x) ((x) & 0xffffff)
296#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff) 319#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
297 __be16 mac_idx; 320 __be16 mac_idx;
@@ -332,6 +355,7 @@ struct cpl_set_tcb_field {
332 __be16 word_cookie; 355 __be16 word_cookie;
333#define TCB_WORD(x) ((x) << 0) 356#define TCB_WORD(x) ((x) << 0)
334#define TCB_COOKIE(x) ((x) << 5) 357#define TCB_COOKIE(x) ((x) << 5)
358#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
335 __be64 mask; 359 __be64 mask;
336 __be64 val; 360 __be64 val;
337}; 361};
@@ -536,6 +560,37 @@ struct cpl_rx_pkt {
536 __be16 err_vec; 560 __be16 err_vec;
537}; 561};
538 562
563/* rx_pkt.l2info fields */
564#define S_RX_ETHHDR_LEN 0
565#define M_RX_ETHHDR_LEN 0x1F
566#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
567#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
568
569#define S_RX_MACIDX 8
570#define M_RX_MACIDX 0x1FF
571#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
572#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
573
574#define S_RXF_SYN 21
575#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
576#define F_RXF_SYN V_RXF_SYN(1U)
577
578#define S_RX_CHAN 28
579#define M_RX_CHAN 0xF
580#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
581#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
582
583/* rx_pkt.hdr_len fields */
584#define S_RX_TCPHDR_LEN 0
585#define M_RX_TCPHDR_LEN 0x3F
586#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
587#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
588
589#define S_RX_IPHDR_LEN 6
590#define M_RX_IPHDR_LEN 0x3FF
591#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
592#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
593
539struct cpl_trace_pkt { 594struct cpl_trace_pkt {
540 u8 opcode; 595 u8 opcode;
541 u8 intf; 596 u8 intf;
@@ -634,6 +689,17 @@ struct cpl_fw6_msg {
634/* cpl_fw6_msg.type values */ 689/* cpl_fw6_msg.type values */
635enum { 690enum {
636 FW6_TYPE_CMD_RPL = 0, 691 FW6_TYPE_CMD_RPL = 0,
692 FW6_TYPE_WR_RPL = 1,
693 FW6_TYPE_CQE = 2,
694 FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3,
695};
696
697struct cpl_fw6_msg_ofld_connection_wr_rpl {
698 __u64 cookie;
699 __be32 tid; /* or atid in case of active failure */
700 __u8 t_state;
701 __u8 retval;
702 __u8 rsvd[2];
637}; 703};
638 704
639enum { 705enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 75393f5cff41..83ec5f7844ac 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1064,4 +1064,41 @@
1064#define ADDRESS(x) ((x) << ADDRESS_SHIFT) 1064#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
1065 1065
1066#define XGMAC_PORT_INT_CAUSE 0x10dc 1066#define XGMAC_PORT_INT_CAUSE 0x10dc
1067
1068#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
1069
1070#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34
1071
1072#define S_TX_MOD_QUEUE_REQ_MAP 0
1073#define M_TX_MOD_QUEUE_REQ_MAP 0xffffU
1074#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
1075
1076#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30
1077
1078#define S_TX_MODQ_WEIGHT3 24
1079#define M_TX_MODQ_WEIGHT3 0xffU
1080#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3)
1081
1082#define S_TX_MODQ_WEIGHT2 16
1083#define M_TX_MODQ_WEIGHT2 0xffU
1084#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2)
1085
1086#define S_TX_MODQ_WEIGHT1 8
1087#define M_TX_MODQ_WEIGHT1 0xffU
1088#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
1089
1090#define S_TX_MODQ_WEIGHT0 0
1091#define M_TX_MODQ_WEIGHT0 0xffU
1092#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
1093
1094#define A_TP_TX_SCHED_HDR 0x23
1095
1096#define A_TP_TX_SCHED_FIFO 0x24
1097
1098#define A_TP_TX_SCHED_PCMD 0x25
1099
1100#define S_PORT 1
1101#define V_PORT(x) ((x) << S_PORT)
1102#define F_PORT V_PORT(1U)
1103
1067#endif /* __T4_REGS_H */ 1104#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 0abc864cdd3a..a0dcccd846c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -35,6 +35,45 @@
35#ifndef _T4FW_INTERFACE_H_ 35#ifndef _T4FW_INTERFACE_H_
36#define _T4FW_INTERFACE_H_ 36#define _T4FW_INTERFACE_H_
37 37
38enum fw_retval {
39 FW_SUCCESS = 0, /* completed sucessfully */
40 FW_EPERM = 1, /* operation not permitted */
41 FW_ENOENT = 2, /* no such file or directory */
42 FW_EIO = 5, /* input/output error; hw bad */
43 FW_ENOEXEC = 8, /* exec format error; inv microcode */
44 FW_EAGAIN = 11, /* try again */
45 FW_ENOMEM = 12, /* out of memory */
46 FW_EFAULT = 14, /* bad address; fw bad */
47 FW_EBUSY = 16, /* resource busy */
48 FW_EEXIST = 17, /* file exists */
49 FW_EINVAL = 22, /* invalid argument */
50 FW_ENOSPC = 28, /* no space left on device */
51 FW_ENOSYS = 38, /* functionality not implemented */
52 FW_EPROTO = 71, /* protocol error */
53 FW_EADDRINUSE = 98, /* address already in use */
54 FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
55 FW_ENETDOWN = 100, /* network is down */
56 FW_ENETUNREACH = 101, /* network is unreachable */
57 FW_ENOBUFS = 105, /* no buffer space available */
58 FW_ETIMEDOUT = 110, /* timeout */
59 FW_EINPROGRESS = 115, /* fw internal */
60 FW_SCSI_ABORT_REQUESTED = 128, /* */
61 FW_SCSI_ABORT_TIMEDOUT = 129, /* */
62 FW_SCSI_ABORTED = 130, /* */
63 FW_SCSI_CLOSE_REQUESTED = 131, /* */
64 FW_ERR_LINK_DOWN = 132, /* */
65 FW_RDEV_NOT_READY = 133, /* */
66 FW_ERR_RDEV_LOST = 134, /* */
67 FW_ERR_RDEV_LOGO = 135, /* */
68 FW_FCOE_NO_XCHG = 136, /* */
69 FW_SCSI_RSP_ERR = 137, /* */
70 FW_ERR_RDEV_IMPL_LOGO = 138, /* */
71 FW_SCSI_UNDER_FLOW_ERR = 139, /* */
72 FW_SCSI_OVER_FLOW_ERR = 140, /* */
73 FW_SCSI_DDP_ERR = 141, /* DDP error*/
74 FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
75};
76
38#define FW_T4VF_SGE_BASE_ADDR 0x0000 77#define FW_T4VF_SGE_BASE_ADDR 0x0000
39#define FW_T4VF_MPS_BASE_ADDR 0x0100 78#define FW_T4VF_MPS_BASE_ADDR 0x0100
40#define FW_T4VF_PL_BASE_ADDR 0x0200 79#define FW_T4VF_PL_BASE_ADDR 0x0200
@@ -46,6 +85,7 @@ enum fw_wr_opcodes {
46 FW_ULPTX_WR = 0x04, 85 FW_ULPTX_WR = 0x04,
47 FW_TP_WR = 0x05, 86 FW_TP_WR = 0x05,
48 FW_ETH_TX_PKT_WR = 0x08, 87 FW_ETH_TX_PKT_WR = 0x08,
88 FW_OFLD_CONNECTION_WR = 0x2f,
49 FW_FLOWC_WR = 0x0a, 89 FW_FLOWC_WR = 0x0a,
50 FW_OFLD_TX_DATA_WR = 0x0b, 90 FW_OFLD_TX_DATA_WR = 0x0b,
51 FW_CMD_WR = 0x10, 91 FW_CMD_WR = 0x10,
@@ -81,6 +121,282 @@ struct fw_wr_hdr {
81#define FW_WR_LEN16(x) ((x) << 0) 121#define FW_WR_LEN16(x) ((x) << 0)
82 122
83#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B 123#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
124#define HW_TPL_FR_MT_PR_OV_P_FC 0X327
125
126/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
127enum fw_filter_wr_cookie {
128 FW_FILTER_WR_SUCCESS,
129 FW_FILTER_WR_FLT_ADDED,
130 FW_FILTER_WR_FLT_DELETED,
131 FW_FILTER_WR_SMT_TBL_FULL,
132 FW_FILTER_WR_EINVAL,
133};
134
135struct fw_filter_wr {
136 __be32 op_pkd;
137 __be32 len16_pkd;
138 __be64 r3;
139 __be32 tid_to_iq;
140 __be32 del_filter_to_l2tix;
141 __be16 ethtype;
142 __be16 ethtypem;
143 __u8 frag_to_ovlan_vldm;
144 __u8 smac_sel;
145 __be16 rx_chan_rx_rpl_iq;
146 __be32 maci_to_matchtypem;
147 __u8 ptcl;
148 __u8 ptclm;
149 __u8 ttyp;
150 __u8 ttypm;
151 __be16 ivlan;
152 __be16 ivlanm;
153 __be16 ovlan;
154 __be16 ovlanm;
155 __u8 lip[16];
156 __u8 lipm[16];
157 __u8 fip[16];
158 __u8 fipm[16];
159 __be16 lp;
160 __be16 lpm;
161 __be16 fp;
162 __be16 fpm;
163 __be16 r7;
164 __u8 sma[6];
165};
166
167#define S_FW_FILTER_WR_TID 12
168#define M_FW_FILTER_WR_TID 0xfffff
169#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
170#define G_FW_FILTER_WR_TID(x) \
171 (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID)
172
173#define S_FW_FILTER_WR_RQTYPE 11
174#define M_FW_FILTER_WR_RQTYPE 0x1
175#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
176#define G_FW_FILTER_WR_RQTYPE(x) \
177 (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE)
178#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U)
179
180#define S_FW_FILTER_WR_NOREPLY 10
181#define M_FW_FILTER_WR_NOREPLY 0x1
182#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
183#define G_FW_FILTER_WR_NOREPLY(x) \
184 (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY)
185#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U)
186
187#define S_FW_FILTER_WR_IQ 0
188#define M_FW_FILTER_WR_IQ 0x3ff
189#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
190#define G_FW_FILTER_WR_IQ(x) \
191 (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ)
192
193#define S_FW_FILTER_WR_DEL_FILTER 31
194#define M_FW_FILTER_WR_DEL_FILTER 0x1
195#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
196#define G_FW_FILTER_WR_DEL_FILTER(x) \
197 (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER)
198#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
199
200#define S_FW_FILTER_WR_RPTTID 25
201#define M_FW_FILTER_WR_RPTTID 0x1
202#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
203#define G_FW_FILTER_WR_RPTTID(x) \
204 (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID)
205#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U)
206
207#define S_FW_FILTER_WR_DROP 24
208#define M_FW_FILTER_WR_DROP 0x1
209#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
210#define G_FW_FILTER_WR_DROP(x) \
211 (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP)
212#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U)
213
214#define S_FW_FILTER_WR_DIRSTEER 23
215#define M_FW_FILTER_WR_DIRSTEER 0x1
216#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
217#define G_FW_FILTER_WR_DIRSTEER(x) \
218 (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER)
219#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U)
220
221#define S_FW_FILTER_WR_MASKHASH 22
222#define M_FW_FILTER_WR_MASKHASH 0x1
223#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
224#define G_FW_FILTER_WR_MASKHASH(x) \
225 (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH)
226#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U)
227
228#define S_FW_FILTER_WR_DIRSTEERHASH 21
229#define M_FW_FILTER_WR_DIRSTEERHASH 0x1
230#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
231#define G_FW_FILTER_WR_DIRSTEERHASH(x) \
232 (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH)
233#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U)
234
235#define S_FW_FILTER_WR_LPBK 20
236#define M_FW_FILTER_WR_LPBK 0x1
237#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
238#define G_FW_FILTER_WR_LPBK(x) \
239 (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK)
240#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U)
241
242#define S_FW_FILTER_WR_DMAC 19
243#define M_FW_FILTER_WR_DMAC 0x1
244#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
245#define G_FW_FILTER_WR_DMAC(x) \
246 (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC)
247#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U)
248
249#define S_FW_FILTER_WR_SMAC 18
250#define M_FW_FILTER_WR_SMAC 0x1
251#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC)
252#define G_FW_FILTER_WR_SMAC(x) \
253 (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC)
254#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U)
255
256#define S_FW_FILTER_WR_INSVLAN 17
257#define M_FW_FILTER_WR_INSVLAN 0x1
258#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
259#define G_FW_FILTER_WR_INSVLAN(x) \
260 (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN)
261#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U)
262
263#define S_FW_FILTER_WR_RMVLAN 16
264#define M_FW_FILTER_WR_RMVLAN 0x1
265#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
266#define G_FW_FILTER_WR_RMVLAN(x) \
267 (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN)
268#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U)
269
270#define S_FW_FILTER_WR_HITCNTS 15
271#define M_FW_FILTER_WR_HITCNTS 0x1
272#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
273#define G_FW_FILTER_WR_HITCNTS(x) \
274 (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS)
275#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U)
276
277#define S_FW_FILTER_WR_TXCHAN 13
278#define M_FW_FILTER_WR_TXCHAN 0x3
279#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
280#define G_FW_FILTER_WR_TXCHAN(x) \
281 (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN)
282
283#define S_FW_FILTER_WR_PRIO 12
284#define M_FW_FILTER_WR_PRIO 0x1
285#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
286#define G_FW_FILTER_WR_PRIO(x) \
287 (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO)
288#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U)
289
290#define S_FW_FILTER_WR_L2TIX 0
291#define M_FW_FILTER_WR_L2TIX 0xfff
292#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
293#define G_FW_FILTER_WR_L2TIX(x) \
294 (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX)
295
296#define S_FW_FILTER_WR_FRAG 7
297#define M_FW_FILTER_WR_FRAG 0x1
298#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
299#define G_FW_FILTER_WR_FRAG(x) \
300 (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG)
301#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U)
302
303#define S_FW_FILTER_WR_FRAGM 6
304#define M_FW_FILTER_WR_FRAGM 0x1
305#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
306#define G_FW_FILTER_WR_FRAGM(x) \
307 (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM)
308#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U)
309
310#define S_FW_FILTER_WR_IVLAN_VLD 5
311#define M_FW_FILTER_WR_IVLAN_VLD 0x1
312#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
313#define G_FW_FILTER_WR_IVLAN_VLD(x) \
314 (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD)
315#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U)
316
317#define S_FW_FILTER_WR_OVLAN_VLD 4
318#define M_FW_FILTER_WR_OVLAN_VLD 0x1
319#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
320#define G_FW_FILTER_WR_OVLAN_VLD(x) \
321 (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD)
322#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U)
323
324#define S_FW_FILTER_WR_IVLAN_VLDM 3
325#define M_FW_FILTER_WR_IVLAN_VLDM 0x1
326#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
327#define G_FW_FILTER_WR_IVLAN_VLDM(x) \
328 (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM)
329#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U)
330
331#define S_FW_FILTER_WR_OVLAN_VLDM 2
332#define M_FW_FILTER_WR_OVLAN_VLDM 0x1
333#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
334#define G_FW_FILTER_WR_OVLAN_VLDM(x) \
335 (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM)
336#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U)
337
338#define S_FW_FILTER_WR_RX_CHAN 15
339#define M_FW_FILTER_WR_RX_CHAN 0x1
340#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
341#define G_FW_FILTER_WR_RX_CHAN(x) \
342 (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN)
343#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U)
344
345#define S_FW_FILTER_WR_RX_RPL_IQ 0
346#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff
347#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
348#define G_FW_FILTER_WR_RX_RPL_IQ(x) \
349 (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ)
350
351#define S_FW_FILTER_WR_MACI 23
352#define M_FW_FILTER_WR_MACI 0x1ff
353#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
354#define G_FW_FILTER_WR_MACI(x) \
355 (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI)
356
357#define S_FW_FILTER_WR_MACIM 14
358#define M_FW_FILTER_WR_MACIM 0x1ff
359#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
360#define G_FW_FILTER_WR_MACIM(x) \
361 (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM)
362
363#define S_FW_FILTER_WR_FCOE 13
364#define M_FW_FILTER_WR_FCOE 0x1
365#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
366#define G_FW_FILTER_WR_FCOE(x) \
367 (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE)
368#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U)
369
370#define S_FW_FILTER_WR_FCOEM 12
371#define M_FW_FILTER_WR_FCOEM 0x1
372#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
373#define G_FW_FILTER_WR_FCOEM(x) \
374 (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM)
375#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U)
376
377#define S_FW_FILTER_WR_PORT 9
378#define M_FW_FILTER_WR_PORT 0x7
379#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
380#define G_FW_FILTER_WR_PORT(x) \
381 (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT)
382
383#define S_FW_FILTER_WR_PORTM 6
384#define M_FW_FILTER_WR_PORTM 0x7
385#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
386#define G_FW_FILTER_WR_PORTM(x) \
387 (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM)
388
389#define S_FW_FILTER_WR_MATCHTYPE 3
390#define M_FW_FILTER_WR_MATCHTYPE 0x7
391#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
392#define G_FW_FILTER_WR_MATCHTYPE(x) \
393 (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE)
394
395#define S_FW_FILTER_WR_MATCHTYPEM 0
396#define M_FW_FILTER_WR_MATCHTYPEM 0x7
397#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
398#define G_FW_FILTER_WR_MATCHTYPEM(x) \
399 (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
84 400
85struct fw_ulptx_wr { 401struct fw_ulptx_wr {
86 __be32 op_to_compl; 402 __be32 op_to_compl;
@@ -100,6 +416,108 @@ struct fw_eth_tx_pkt_wr {
100 __be64 r3; 416 __be64 r3;
101}; 417};
102 418
419struct fw_ofld_connection_wr {
420 __be32 op_compl;
421 __be32 len16_pkd;
422 __u64 cookie;
423 __be64 r2;
424 __be64 r3;
425 struct fw_ofld_connection_le {
426 __be32 version_cpl;
427 __be32 filter;
428 __be32 r1;
429 __be16 lport;
430 __be16 pport;
431 union fw_ofld_connection_leip {
432 struct fw_ofld_connection_le_ipv4 {
433 __be32 pip;
434 __be32 lip;
435 __be64 r0;
436 __be64 r1;
437 __be64 r2;
438 } ipv4;
439 struct fw_ofld_connection_le_ipv6 {
440 __be64 pip_hi;
441 __be64 pip_lo;
442 __be64 lip_hi;
443 __be64 lip_lo;
444 } ipv6;
445 } u;
446 } le;
447 struct fw_ofld_connection_tcb {
448 __be32 t_state_to_astid;
449 __be16 cplrxdataack_cplpassacceptrpl;
450 __be16 rcv_adv;
451 __be32 rcv_nxt;
452 __be32 tx_max;
453 __be64 opt0;
454 __be32 opt2;
455 __be32 r1;
456 __be64 r2;
457 __be64 r3;
458 } tcb;
459};
460
461#define S_FW_OFLD_CONNECTION_WR_VERSION 31
462#define M_FW_OFLD_CONNECTION_WR_VERSION 0x1
463#define V_FW_OFLD_CONNECTION_WR_VERSION(x) \
464 ((x) << S_FW_OFLD_CONNECTION_WR_VERSION)
465#define G_FW_OFLD_CONNECTION_WR_VERSION(x) \
466 (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \
467 M_FW_OFLD_CONNECTION_WR_VERSION)
468#define F_FW_OFLD_CONNECTION_WR_VERSION \
469 V_FW_OFLD_CONNECTION_WR_VERSION(1U)
470
471#define S_FW_OFLD_CONNECTION_WR_CPL 30
472#define M_FW_OFLD_CONNECTION_WR_CPL 0x1
473#define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL)
474#define G_FW_OFLD_CONNECTION_WR_CPL(x) \
475 (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL)
476#define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U)
477
478#define S_FW_OFLD_CONNECTION_WR_T_STATE 28
479#define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf
480#define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \
481 ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE)
482#define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \
483 (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \
484 M_FW_OFLD_CONNECTION_WR_T_STATE)
485
486#define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24
487#define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf
488#define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
489 ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE)
490#define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \
491 (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \
492 M_FW_OFLD_CONNECTION_WR_RCV_SCALE)
493
494#define S_FW_OFLD_CONNECTION_WR_ASTID 0
495#define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff
496#define V_FW_OFLD_CONNECTION_WR_ASTID(x) \
497 ((x) << S_FW_OFLD_CONNECTION_WR_ASTID)
498#define G_FW_OFLD_CONNECTION_WR_ASTID(x) \
499 (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID)
500
501#define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15
502#define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1
503#define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
504 ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
505#define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \
506 (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \
507 M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK)
508#define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \
509 V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U)
510
511#define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14
512#define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1
513#define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
514 ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
515#define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \
516 (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \
517 M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL)
518#define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \
519 V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U)
520
103enum fw_flowc_mnem { 521enum fw_flowc_mnem {
104 FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ 522 FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
105 FW_FLOWC_MNEM_CH, 523 FW_FLOWC_MNEM_CH,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 9a9de51ecc91..8b3d0512a46b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1338,6 +1338,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1338{ 1338{
1339 struct mlx4_cmd_mailbox *mailbox; 1339 struct mlx4_cmd_mailbox *mailbox;
1340 __be32 *outbox; 1340 __be32 *outbox;
1341 u32 dword_field;
1341 int err; 1342 int err;
1342 u8 byte_field; 1343 u8 byte_field;
1343 1344
@@ -1372,10 +1373,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1372 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 1373 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1373 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 1374 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1374 1375
1376 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1377 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1378 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1379 } else {
1380 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1381 if (byte_field & 0x8)
1382 param->steering_mode = MLX4_STEERING_MODE_B0;
1383 else
1384 param->steering_mode = MLX4_STEERING_MODE_A0;
1385 }
1375 /* steering attributes */ 1386 /* steering attributes */
1376 if (dev->caps.steering_mode == 1387 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1377 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1378
1379 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 1388 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1380 MLX4_GET(param->log_mc_entry_sz, outbox, 1389 MLX4_GET(param->log_mc_entry_sz, outbox,
1381 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 1390 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 2c2e7ade2a34..dbf2f69cc59f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -172,6 +172,7 @@ struct mlx4_init_hca_param {
172 u8 log_uar_sz; 172 u8 log_uar_sz;
173 u8 uar_page_sz; /* log pg sz in 4k chunks */ 173 u8 uar_page_sz; /* log pg sz in 4k chunks */
174 u8 fs_hash_enable_bits; 174 u8 fs_hash_enable_bits;
175 u8 steering_mode; /* for QUERY_HCA */
175 u64 dev_cap_enabled; 176 u64 dev_cap_enabled;
176}; 177};
177 178
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b2acbe7706a3..e1bafffbc3b1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -85,15 +85,15 @@ static int probe_vf;
85module_param(probe_vf, int, 0644); 85module_param(probe_vf, int, 0644);
86MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 86MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
87 87
88int mlx4_log_num_mgm_entry_size = 10; 88int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
89module_param_named(log_num_mgm_entry_size, 89module_param_named(log_num_mgm_entry_size,
90 mlx4_log_num_mgm_entry_size, int, 0444); 90 mlx4_log_num_mgm_entry_size, int, 0444);
91MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 91MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
92 " of qp per mcg, for example:" 92 " of qp per mcg, for example:"
93 " 10 gives 248.range: 9<=" 93 " 10 gives 248.range: 7 <="
94 " log_num_mgm_entry_size <= 12." 94 " log_num_mgm_entry_size <= 12."
95 " Not in use with device managed" 95 " To activate device managed"
96 " flow steering"); 96 " flow steering when available, set to -1");
97 97
98static bool enable_64b_cqe_eqe; 98static bool enable_64b_cqe_eqe;
99module_param(enable_64b_cqe_eqe, bool, 0444); 99module_param(enable_64b_cqe_eqe, bool, 0444);
@@ -281,28 +281,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
281 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 281 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
282 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 282 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
283 283
284 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
285 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
286 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
287 dev->caps.fs_log_max_ucast_qp_range_size =
288 dev_cap->fs_log_max_ucast_qp_range_size;
289 } else {
290 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
291 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
292 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
293 } else {
294 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
295
296 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
297 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
298 mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
299 "set to use B0 steering. Falling back to A0 steering mode.\n");
300 }
301 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
302 }
303 mlx4_dbg(dev, "Steering mode is: %s\n",
304 mlx4_steering_mode_str(dev->caps.steering_mode));
305
306 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 284 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
307 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 285 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
308 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 286 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -493,6 +471,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
493} 471}
494EXPORT_SYMBOL(mlx4_is_slave_active); 472EXPORT_SYMBOL(mlx4_is_slave_active);
495 473
474static void slave_adjust_steering_mode(struct mlx4_dev *dev,
475 struct mlx4_dev_cap *dev_cap,
476 struct mlx4_init_hca_param *hca_param)
477{
478 dev->caps.steering_mode = hca_param->steering_mode;
479 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
480 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
481 dev->caps.fs_log_max_ucast_qp_range_size =
482 dev_cap->fs_log_max_ucast_qp_range_size;
483 } else
484 dev->caps.num_qp_per_mgm =
485 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
486
487 mlx4_dbg(dev, "Steering mode is: %s\n",
488 mlx4_steering_mode_str(dev->caps.steering_mode));
489}
490
496static int mlx4_slave_cap(struct mlx4_dev *dev) 491static int mlx4_slave_cap(struct mlx4_dev *dev)
497{ 492{
498 int err; 493 int err;
@@ -635,6 +630,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
635 dev->caps.cqe_size = 32; 630 dev->caps.cqe_size = 32;
636 } 631 }
637 632
633 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
634
638 return 0; 635 return 0;
639 636
640err_mem: 637err_mem:
@@ -1321,6 +1318,59 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1321 } 1318 }
1322} 1319}
1323 1320
1321static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1322{
1323 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
1324
1325 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
1326 i++) {
1327 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
1328 break;
1329 }
1330
1331 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1332}
1333
1334static void choose_steering_mode(struct mlx4_dev *dev,
1335 struct mlx4_dev_cap *dev_cap)
1336{
1337 if (mlx4_log_num_mgm_entry_size == -1 &&
1338 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1339 (!mlx4_is_mfunc(dev) ||
1340 (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
1341 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1342 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1343 dev->oper_log_mgm_entry_size =
1344 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
1345 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1346 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
1347 dev->caps.fs_log_max_ucast_qp_range_size =
1348 dev_cap->fs_log_max_ucast_qp_range_size;
1349 } else {
1350 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1351 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1352 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
1353 else {
1354 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
1355
1356 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1357 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1358 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
1359 "set to use B0 steering. Falling back to A0 steering mode.\n");
1360 }
1361 dev->oper_log_mgm_entry_size =
1362 mlx4_log_num_mgm_entry_size > 0 ?
1363 mlx4_log_num_mgm_entry_size :
1364 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1365 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1366 }
1367 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
1368 "modparam log_num_mgm_entry_size = %d\n",
1369 mlx4_steering_mode_str(dev->caps.steering_mode),
1370 dev->oper_log_mgm_entry_size,
1371 mlx4_log_num_mgm_entry_size);
1372}
1373
1324static int mlx4_init_hca(struct mlx4_dev *dev) 1374static int mlx4_init_hca(struct mlx4_dev *dev)
1325{ 1375{
1326 struct mlx4_priv *priv = mlx4_priv(dev); 1376 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1360,6 +1410,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1360 goto err_stop_fw; 1410 goto err_stop_fw;
1361 } 1411 }
1362 1412
1413 choose_steering_mode(dev, &dev_cap);
1414
1363 if (mlx4_is_master(dev)) 1415 if (mlx4_is_master(dev))
1364 mlx4_parav_master_pf_caps(dev); 1416 mlx4_parav_master_pf_caps(dev);
1365 1417
@@ -2452,6 +2504,17 @@ static int __init mlx4_verify_params(void)
2452 port_type_array[0] = true; 2504 port_type_array[0] = true;
2453 } 2505 }
2454 2506
2507 if (mlx4_log_num_mgm_entry_size != -1 &&
2508 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2509 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
2510 pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
2511 "in legal range (-1 or %d..%d)\n",
2512 mlx4_log_num_mgm_entry_size,
2513 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2514 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2515 return -1;
2516 }
2517
2455 return 0; 2518 return 0;
2456} 2519}
2457 2520
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index e151c21baf2b..1ee4db3c6400 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -54,12 +54,7 @@ struct mlx4_mgm {
54 54
55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 55int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
56{ 56{
57 if (dev->caps.steering_mode == 57 return 1 << dev->oper_log_mgm_entry_size;
58 MLX4_STEERING_MODE_DEVICE_MANAGED)
59 return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
60 else
61 return min((1 << mlx4_log_num_mgm_entry_size),
62 MLX4_MAX_MGM_ENTRY_SIZE);
63} 58}
64 59
65int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 60int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 1cf42036d7bb..116c5c29d2d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -94,8 +94,10 @@ enum {
94}; 94};
95 95
96enum { 96enum {
97 MLX4_MAX_MGM_ENTRY_SIZE = 0x1000, 97 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10,
98 MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2), 98 MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
99 MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
100 MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
99 MLX4_MTT_ENTRY_PER_SEG = 8, 101 MLX4_MTT_ENTRY_PER_SEG = 8,
100}; 102};
101 103
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b05705f50f0f..561ed2a22a17 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3071,6 +3071,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3071 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 3071 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3072 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; 3072 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3073 int err; 3073 int err;
3074 int qpn;
3074 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 3075 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3075 struct _rule_hw *rule_header; 3076 struct _rule_hw *rule_header;
3076 int header_id; 3077 int header_id;
@@ -3080,13 +3081,21 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3080 return -EOPNOTSUPP; 3081 return -EOPNOTSUPP;
3081 3082
3082 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3083 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3084 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3085 err = get_res(dev, slave, qpn, RES_QP, NULL);
3086 if (err) {
3087 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3088 return err;
3089 }
3083 rule_header = (struct _rule_hw *)(ctrl + 1); 3090 rule_header = (struct _rule_hw *)(ctrl + 1);
3084 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); 3091 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3085 3092
3086 switch (header_id) { 3093 switch (header_id) {
3087 case MLX4_NET_TRANS_RULE_ID_ETH: 3094 case MLX4_NET_TRANS_RULE_ID_ETH:
3088 if (validate_eth_header_mac(slave, rule_header, rlist)) 3095 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3089 return -EINVAL; 3096 err = -EINVAL;
3097 goto err_put;
3098 }
3090 break; 3099 break;
3091 case MLX4_NET_TRANS_RULE_ID_IB: 3100 case MLX4_NET_TRANS_RULE_ID_IB:
3092 break; 3101 break;
@@ -3094,14 +3103,17 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3094 case MLX4_NET_TRANS_RULE_ID_TCP: 3103 case MLX4_NET_TRANS_RULE_ID_TCP:
3095 case MLX4_NET_TRANS_RULE_ID_UDP: 3104 case MLX4_NET_TRANS_RULE_ID_UDP:
3096 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); 3105 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3097 if (add_eth_header(dev, slave, inbox, rlist, header_id)) 3106 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3098 return -EINVAL; 3107 err = -EINVAL;
3108 goto err_put;
3109 }
3099 vhcr->in_modifier += 3110 vhcr->in_modifier +=
3100 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 3111 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3101 break; 3112 break;
3102 default: 3113 default:
3103 pr_err("Corrupted mailbox.\n"); 3114 pr_err("Corrupted mailbox.\n");
3104 return -EINVAL; 3115 err = -EINVAL;
3116 goto err_put;
3105 } 3117 }
3106 3118
3107 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, 3119 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
@@ -3109,16 +3121,18 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3109 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 3121 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3110 MLX4_CMD_NATIVE); 3122 MLX4_CMD_NATIVE);
3111 if (err) 3123 if (err)
3112 return err; 3124 goto err_put;
3113 3125
3114 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0); 3126 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
3115 if (err) { 3127 if (err) {
3116 mlx4_err(dev, "Fail to add flow steering resources.\n "); 3128 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3117 /* detach rule*/ 3129 /* detach rule*/
3118 mlx4_cmd(dev, vhcr->out_param, 0, 0, 3130 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3119 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 3131 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3120 MLX4_CMD_NATIVE); 3132 MLX4_CMD_NATIVE);
3121 } 3133 }
3134err_put:
3135 put_res(dev, slave, qpn, RES_QP);
3122 return err; 3136 return err;
3123} 3137}
3124 3138
diff --git a/drivers/scsi/csiostor/t4fw_api_stor.h b/drivers/scsi/csiostor/t4fw_api_stor.h
index 1223e0d5fc07..097e52c0f8e1 100644
--- a/drivers/scsi/csiostor/t4fw_api_stor.h
+++ b/drivers/scsi/csiostor/t4fw_api_stor.h
@@ -40,45 +40,6 @@
40 * R E T U R N V A L U E S 40 * R E T U R N V A L U E S
41 ********************************/ 41 ********************************/
42 42
43enum fw_retval {
44 FW_SUCCESS = 0, /* completed sucessfully */
45 FW_EPERM = 1, /* operation not permitted */
46 FW_ENOENT = 2, /* no such file or directory */
47 FW_EIO = 5, /* input/output error; hw bad */
48 FW_ENOEXEC = 8, /* exec format error; inv microcode */
49 FW_EAGAIN = 11, /* try again */
50 FW_ENOMEM = 12, /* out of memory */
51 FW_EFAULT = 14, /* bad address; fw bad */
52 FW_EBUSY = 16, /* resource busy */
53 FW_EEXIST = 17, /* file exists */
54 FW_EINVAL = 22, /* invalid argument */
55 FW_ENOSPC = 28, /* no space left on device */
56 FW_ENOSYS = 38, /* functionality not implemented */
57 FW_EPROTO = 71, /* protocol error */
58 FW_EADDRINUSE = 98, /* address already in use */
59 FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
60 FW_ENETDOWN = 100, /* network is down */
61 FW_ENETUNREACH = 101, /* network is unreachable */
62 FW_ENOBUFS = 105, /* no buffer space available */
63 FW_ETIMEDOUT = 110, /* timeout */
64 FW_EINPROGRESS = 115, /* fw internal */
65 FW_SCSI_ABORT_REQUESTED = 128, /* */
66 FW_SCSI_ABORT_TIMEDOUT = 129, /* */
67 FW_SCSI_ABORTED = 130, /* */
68 FW_SCSI_CLOSE_REQUESTED = 131, /* */
69 FW_ERR_LINK_DOWN = 132, /* */
70 FW_RDEV_NOT_READY = 133, /* */
71 FW_ERR_RDEV_LOST = 134, /* */
72 FW_ERR_RDEV_LOGO = 135, /* */
73 FW_FCOE_NO_XCHG = 136, /* */
74 FW_SCSI_RSP_ERR = 137, /* */
75 FW_ERR_RDEV_IMPL_LOGO = 138, /* */
76 FW_SCSI_UNDER_FLOW_ERR = 139, /* */
77 FW_SCSI_OVER_FLOW_ERR = 140, /* */
78 FW_SCSI_DDP_ERR = 141, /* DDP error*/
79 FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
80};
81
82enum fw_fcoe_link_sub_op { 43enum fw_fcoe_link_sub_op {
83 FCOE_LINK_DOWN = 0x0, 44 FCOE_LINK_DOWN = 0x0,
84 FCOE_LINK_UP = 0x1, 45 FCOE_LINK_UP = 0x1,