aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAnish Bhatt <anish@chelsio.com>2014-06-20 00:37:13 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-23 00:13:33 -0400
commit688848b1493a0a55059041dcc1ea332dabd1c75d (patch)
tree91ea595455c2efe148768edd40db211a57868f47 /drivers
parent76bcb31efc0685574fb123f7aaa92f8a50c14fd9 (diff)
cxgb4 : Integrate DCBx support into cxgb4 module. Register dbcnl_ops to give access to DCBx functions
Signed-off-by: Anish Bhatt <anish@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c199
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c76
3 files changed, 272 insertions, 14 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index f503dce4ab17..9d69c3ebbf00 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -373,6 +373,8 @@ enum {
373struct adapter; 373struct adapter;
374struct sge_rspq; 374struct sge_rspq;
375 375
376#include "cxgb4_dcb.h"
377
376struct port_info { 378struct port_info {
377 struct adapter *adapter; 379 struct adapter *adapter;
378 u16 viid; 380 u16 viid;
@@ -389,6 +391,9 @@ struct port_info {
389 u8 rss_mode; 391 u8 rss_mode;
390 struct link_config link_cfg; 392 struct link_config link_cfg;
391 u16 *rss; 393 u16 *rss;
394#ifdef CONFIG_CHELSIO_T4_DCB
395 struct port_dcb_info dcb; /* Data Center Bridging support */
396#endif
392}; 397};
393 398
394struct dentry; 399struct dentry;
@@ -1007,6 +1012,10 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1007int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 1012int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1008 unsigned int vf, unsigned int nparams, const u32 *params, 1013 unsigned int vf, unsigned int nparams, const u32 *params,
1009 const u32 *val); 1014 const u32 *val);
1015int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
1016 unsigned int pf, unsigned int vf,
1017 unsigned int nparams, const u32 *params,
1018 const u32 *val);
1010int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 1019int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
1011 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 1020 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
1012 unsigned int rxqi, unsigned int rxq, unsigned int tc, 1021 unsigned int rxqi, unsigned int rxq, unsigned int tc,
@@ -1025,6 +1034,8 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
1025 int idx, const u8 *addr, bool persist, bool add_smt); 1034 int idx, const u8 *addr, bool persist, bool add_smt);
1026int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 1035int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
1027 bool ucast, u64 vec, bool sleep_ok); 1036 bool ucast, u64 vec, bool sleep_ok);
1037int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
1038 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
1028int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 1039int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
1029 bool rx_en, bool tx_en); 1040 bool rx_en, bool tx_en);
1030int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 1041int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2f8d6b910383..74b0ce50a8ef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -67,6 +67,7 @@
67#include "t4_regs.h" 67#include "t4_regs.h"
68#include "t4_msg.h" 68#include "t4_msg.h"
69#include "t4fw_api.h" 69#include "t4fw_api.h"
70#include "cxgb4_dcb.h"
70#include "l2t.h" 71#include "l2t.h"
71 72
72#include <../drivers/net/bonding/bonding.h> 73#include <../drivers/net/bonding/bonding.h>
@@ -391,6 +392,17 @@ module_param_array(num_vf, uint, NULL, 0644);
391MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); 392MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
392#endif 393#endif
393 394
395/* TX Queue select used to determine what algorithm to use for selecting TX
396 * queue. Select between the kernel provided function (select_queue=0) or user
397 * cxgb_select_queue function (select_queue=1)
398 *
399 * Default: select_queue=0
400 */
401static int select_queue;
402module_param(select_queue, int, 0644);
403MODULE_PARM_DESC(select_queue,
404 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
405
394/* 406/*
395 * The filter TCAM has a fixed portion and a variable portion. The fixed 407 * The filter TCAM has a fixed portion and a variable portion. The fixed
396 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP 408 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
@@ -458,6 +470,42 @@ static void link_report(struct net_device *dev)
458 } 470 }
459} 471}
460 472
473#ifdef CONFIG_CHELSIO_T4_DCB
474/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
475static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
476{
477 struct port_info *pi = netdev_priv(dev);
478 struct adapter *adap = pi->adapter;
479 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
480 int i;
481
482 /* We use a simple mapping of Port TX Queue Index to DCB
483 * Priority when we're enabling DCB.
484 */
485 for (i = 0; i < pi->nqsets; i++, txq++) {
486 u32 name, value;
487 int err;
488
489 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
490 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
491 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
492 value = enable ? i : 0xffffffff;
493
494 /* Since we can be called while atomic (from "interrupt
495 * level") we need to issue the Set Parameters Commannd
496 * without sleeping (timeout < 0).
497 */
498 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
499 &name, &value);
500
501 if (err)
502 dev_err(adap->pdev_dev,
503 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
504 enable ? "set" : "unset", pi->port_id, i, -err);
505 }
506}
507#endif /* CONFIG_CHELSIO_T4_DCB */
508
461void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) 509void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
462{ 510{
463 struct net_device *dev = adapter->port[port_id]; 511 struct net_device *dev = adapter->port[port_id];
@@ -466,8 +514,13 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
466 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { 514 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
467 if (link_stat) 515 if (link_stat)
468 netif_carrier_on(dev); 516 netif_carrier_on(dev);
469 else 517 else {
518#ifdef CONFIG_CHELSIO_T4_DCB
519 cxgb4_dcb_state_init(dev);
520 dcb_tx_queue_prio_enable(dev, false);
521#endif /* CONFIG_CHELSIO_T4_DCB */
470 netif_carrier_off(dev); 522 netif_carrier_off(dev);
523 }
471 524
472 link_report(dev); 525 link_report(dev);
473 } 526 }
@@ -601,10 +654,45 @@ static int link_start(struct net_device *dev)
601 ret = t4_link_start(pi->adapter, mb, pi->tx_chan, 654 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
602 &pi->link_cfg); 655 &pi->link_cfg);
603 if (ret == 0) 656 if (ret == 0)
604 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true); 657 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
658 true, CXGB4_DCB_ENABLED);
659
605 return ret; 660 return ret;
606} 661}
607 662
663int cxgb4_dcb_enabled(const struct net_device *dev)
664{
665#ifdef CONFIG_CHELSIO_T4_DCB
666 struct port_info *pi = netdev_priv(dev);
667
668 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
669#else
670 return 0;
671#endif
672}
673EXPORT_SYMBOL(cxgb4_dcb_enabled);
674
675#ifdef CONFIG_CHELSIO_T4_DCB
676/* Handle a Data Center Bridging update message from the firmware. */
677static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
678{
679 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
680 struct net_device *dev = adap->port[port];
681 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
682 int new_dcb_enabled;
683
684 cxgb4_dcb_handle_fw_update(adap, pcmd);
685 new_dcb_enabled = cxgb4_dcb_enabled(dev);
686
687 /* If the DCB has become enabled or disabled on the port then we're
688 * going to need to set up/tear down DCB Priority parameters for the
689 * TX Queues associated with the port.
690 */
691 if (new_dcb_enabled != old_dcb_enabled)
692 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
693}
694#endif /* CONFIG_CHELSIO_T4_DCB */
695
608/* Clear a filter and release any of its resources that we own. This also 696/* Clear a filter and release any of its resources that we own. This also
609 * clears the filter's "pending" status. 697 * clears the filter's "pending" status.
610 */ 698 */
@@ -709,8 +797,32 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
709 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { 797 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
710 const struct cpl_fw6_msg *p = (void *)rsp; 798 const struct cpl_fw6_msg *p = (void *)rsp;
711 799
712 if (p->type == 0) 800#ifdef CONFIG_CHELSIO_T4_DCB
713 t4_handle_fw_rpl(q->adap, p->data); 801 const struct fw_port_cmd *pcmd = (const void *)p->data;
802 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
803 unsigned int action =
804 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
805
806 if (cmd == FW_PORT_CMD &&
807 action == FW_PORT_ACTION_GET_PORT_INFO) {
808 int port = FW_PORT_CMD_PORTID_GET(
809 be32_to_cpu(pcmd->op_to_portid));
810 struct net_device *dev = q->adap->port[port];
811 int state_input = ((pcmd->u.info.dcbxdis_pkd &
812 FW_PORT_CMD_DCBXDIS)
813 ? CXGB4_DCB_INPUT_FW_DISABLED
814 : CXGB4_DCB_INPUT_FW_ENABLED);
815
816 cxgb4_dcb_state_fsm(dev, state_input);
817 }
818
819 if (cmd == FW_PORT_CMD &&
820 action == FW_PORT_ACTION_L2_DCB_CFG)
821 dcb_rpl(q->adap, pcmd);
822 else
823#endif
824 if (p->type == 0)
825 t4_handle_fw_rpl(q->adap, p->data);
714 } else if (opcode == CPL_L2T_WRITE_RPL) { 826 } else if (opcode == CPL_L2T_WRITE_RPL) {
715 const struct cpl_l2t_write_rpl *p = (void *)rsp; 827 const struct cpl_l2t_write_rpl *p = (void *)rsp;
716 828
@@ -1290,6 +1402,48 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
1290 return 0; 1402 return 0;
1291} 1403}
1292 1404
1405static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1406 void *accel_priv, select_queue_fallback_t fallback)
1407{
1408 int txq;
1409
1410#ifdef CONFIG_CHELSIO_T4_DCB
1411 /* If a Data Center Bridging has been successfully negotiated on this
1412 * link then we'll use the skb's priority to map it to a TX Queue.
1413 * The skb's priority is determined via the VLAN Tag Priority Code
1414 * Point field.
1415 */
1416 if (cxgb4_dcb_enabled(dev)) {
1417 u16 vlan_tci;
1418 int err;
1419
1420 err = vlan_get_tag(skb, &vlan_tci);
1421 if (unlikely(err)) {
1422 if (net_ratelimit())
1423 netdev_warn(dev,
1424 "TX Packet without VLAN Tag on DCB Link\n");
1425 txq = 0;
1426 } else {
1427 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1428 }
1429 return txq;
1430 }
1431#endif /* CONFIG_CHELSIO_T4_DCB */
1432
1433 if (select_queue) {
1434 txq = (skb_rx_queue_recorded(skb)
1435 ? skb_get_rx_queue(skb)
1436 : smp_processor_id());
1437
1438 while (unlikely(txq >= dev->real_num_tx_queues))
1439 txq -= dev->real_num_tx_queues;
1440
1441 return txq;
1442 }
1443
1444 return fallback(dev, skb) % dev->real_num_tx_queues;
1445}
1446
1293static inline int is_offload(const struct adapter *adap) 1447static inline int is_offload(const struct adapter *adap)
1294{ 1448{
1295 return adap->params.offload; 1449 return adap->params.offload;
@@ -4601,6 +4755,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
4601 .ndo_open = cxgb_open, 4755 .ndo_open = cxgb_open,
4602 .ndo_stop = cxgb_close, 4756 .ndo_stop = cxgb_close,
4603 .ndo_start_xmit = t4_eth_xmit, 4757 .ndo_start_xmit = t4_eth_xmit,
4758 .ndo_select_queue = cxgb_select_queue,
4604 .ndo_get_stats64 = cxgb_get_stats, 4759 .ndo_get_stats64 = cxgb_get_stats,
4605 .ndo_set_rx_mode = cxgb_set_rxmode, 4760 .ndo_set_rx_mode = cxgb_set_rxmode,
4606 .ndo_set_mac_address = cxgb_set_mac_addr, 4761 .ndo_set_mac_address = cxgb_set_mac_addr,
@@ -5841,12 +5996,33 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5841static void cfg_queues(struct adapter *adap) 5996static void cfg_queues(struct adapter *adap)
5842{ 5997{
5843 struct sge *s = &adap->sge; 5998 struct sge *s = &adap->sge;
5844 int i, q10g = 0, n10g = 0, qidx = 0; 5999 int i, n10g = 0, qidx = 0;
6000#ifndef CONFIG_CHELSIO_T4_DCB
6001 int q10g = 0;
6002#endif
5845 int ciq_size; 6003 int ciq_size;
5846 6004
5847 for_each_port(adap, i) 6005 for_each_port(adap, i)
5848 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); 6006 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6007#ifdef CONFIG_CHELSIO_T4_DCB
6008 /* For Data Center Bridging support we need to be able to support up
6009 * to 8 Traffic Priorities; each of which will be assigned to its
6010 * own TX Queue in order to prevent Head-Of-Line Blocking.
6011 */
6012 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6013 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6014 MAX_ETH_QSETS, adap->params.nports * 8);
6015 BUG_ON(1);
6016 }
5849 6017
6018 for_each_port(adap, i) {
6019 struct port_info *pi = adap2pinfo(adap, i);
6020
6021 pi->first_qset = qidx;
6022 pi->nqsets = 8;
6023 qidx += pi->nqsets;
6024 }
6025#else /* !CONFIG_CHELSIO_T4_DCB */
5850 /* 6026 /*
5851 * We default to 1 queue per non-10G port and up to # of cores queues 6027 * We default to 1 queue per non-10G port and up to # of cores queues
5852 * per 10G port. 6028 * per 10G port.
@@ -5863,6 +6039,7 @@ static void cfg_queues(struct adapter *adap)
5863 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; 6039 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
5864 qidx += pi->nqsets; 6040 qidx += pi->nqsets;
5865 } 6041 }
6042#endif /* !CONFIG_CHELSIO_T4_DCB */
5866 6043
5867 s->ethqsets = qidx; 6044 s->ethqsets = qidx;
5868 s->max_ethqsets = qidx; /* MSI-X may lower it later */ 6045 s->max_ethqsets = qidx; /* MSI-X may lower it later */
@@ -5981,8 +6158,14 @@ static int enable_msix(struct adapter *adap)
5981 /* need nchan for each possible ULD */ 6158 /* need nchan for each possible ULD */
5982 ofld_need = 3 * nchan; 6159 ofld_need = 3 * nchan;
5983 } 6160 }
6161#ifdef CONFIG_CHELSIO_T4_DCB
6162 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6163 * each port.
6164 */
6165 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6166#else
5984 need = adap->params.nports + EXTRA_VECS + ofld_need; 6167 need = adap->params.nports + EXTRA_VECS + ofld_need;
5985 6168#endif
5986 want = pci_enable_msix_range(adap->pdev, entries, need, want); 6169 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5987 if (want < 0) 6170 if (want < 0)
5988 return want; 6171 return want;
@@ -6245,6 +6428,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6245 netdev->priv_flags |= IFF_UNICAST_FLT; 6428 netdev->priv_flags |= IFF_UNICAST_FLT;
6246 6429
6247 netdev->netdev_ops = &cxgb4_netdev_ops; 6430 netdev->netdev_ops = &cxgb4_netdev_ops;
6431#ifdef CONFIG_CHELSIO_T4_DCB
6432 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6433 cxgb4_dcb_state_init(netdev);
6434#endif
6248 netdev->ethtool_ops = &cxgb_ethtool_ops; 6435 netdev->ethtool_ops = &cxgb_ethtool_ops;
6249 } 6436 }
6250 6437
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index bba67681aeaa..2a9da077c806 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3175,6 +3175,46 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3175} 3175}
3176 3176
3177/** 3177/**
3178 * t4_set_params_nosleep - sets FW or device parameters
3179 * @adap: the adapter
3180 * @mbox: mailbox to use for the FW command
3181 * @pf: the PF
3182 * @vf: the VF
3183 * @nparams: the number of parameters
3184 * @params: the parameter names
3185 * @val: the parameter values
3186 *
3187 * Does not ever sleep
3188 * Sets the value of FW or device parameters. Up to 7 parameters can be
3189 * specified at once.
3190 */
3191int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3192 unsigned int pf, unsigned int vf,
3193 unsigned int nparams, const u32 *params,
3194 const u32 *val)
3195{
3196 struct fw_params_cmd c;
3197 __be32 *p = &c.param[0].mnem;
3198
3199 if (nparams > 7)
3200 return -EINVAL;
3201
3202 memset(&c, 0, sizeof(c));
3203 c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
3204 FW_CMD_REQUEST | FW_CMD_WRITE |
3205 FW_PARAMS_CMD_PFN(pf) |
3206 FW_PARAMS_CMD_VFN(vf));
3207 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3208
3209 while (nparams--) {
3210 *p++ = cpu_to_be32(*params++);
3211 *p++ = cpu_to_be32(*val++);
3212 }
3213
3214 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3215}
3216
3217/**
3178 * t4_set_params - sets FW or device parameters 3218 * t4_set_params - sets FW or device parameters
3179 * @adap: the adapter 3219 * @adap: the adapter
3180 * @mbox: mailbox to use for the FW command 3220 * @mbox: mailbox to use for the FW command
@@ -3499,6 +3539,33 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3499} 3539}
3500 3540
3501/** 3541/**
3542 * t4_enable_vi_params - enable/disable a virtual interface
3543 * @adap: the adapter
3544 * @mbox: mailbox to use for the FW command
3545 * @viid: the VI id
3546 * @rx_en: 1=enable Rx, 0=disable Rx
3547 * @tx_en: 1=enable Tx, 0=disable Tx
3548 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3549 *
3550 * Enables/disables a virtual interface. Note that setting DCB Enable
3551 * only makes sense when enabling a Virtual Interface ...
3552 */
3553int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3554 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3555{
3556 struct fw_vi_enable_cmd c;
3557
3558 memset(&c, 0, sizeof(c));
3559 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3560 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3561
3562 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3563 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3564 FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
3565 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3566}
3567
3568/**
3502 * t4_enable_vi - enable/disable a virtual interface 3569 * t4_enable_vi - enable/disable a virtual interface
3503 * @adap: the adapter 3570 * @adap: the adapter
3504 * @mbox: mailbox to use for the FW command 3571 * @mbox: mailbox to use for the FW command
@@ -3511,14 +3578,7 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3511int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 3578int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3512 bool rx_en, bool tx_en) 3579 bool rx_en, bool tx_en)
3513{ 3580{
3514 struct fw_vi_enable_cmd c; 3581 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
3515
3516 memset(&c, 0, sizeof(c));
3517 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3518 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3519 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3520 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
3521 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3522} 3582}
3523 3583
3524/** 3584/**