aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAriel Elior <ariele@broadcom.com>2013-09-04 07:09:21 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-05 12:44:31 -0400
commitb9871bcfd211d316adee317608dab44c58d6ea2d (patch)
tree973ef35c4836cf981c7b8c849eaceb2e9d29c6ce
parent53cf527513eed6e7170e9dceacd198f9267171b0 (diff)
bnx2x: VF RSS support - PF side
This patch adds support for Receive Side Scaling for queues of Virtual Functions on the PF side. This includes support for the requests for multiple queues from VF drivers, configuration of the HW for multiple queues per VF, and support for rss configuration of said queues. Signed-off-by: Ariel Elior <ariele@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h32
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c386
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h32
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c146
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h41
10 files changed, 513 insertions, 144 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 3e77a1b1a44a..0c338026ce01 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
825#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) 825#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
826 826
827#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ 827#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
828#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ 828#define BNX2X_DB_SHIFT 3 /* 8 bytes*/
829#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) 829#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
830#error "Min DB doorbell stride is 8" 830#error "Min DB doorbell stride is 8"
831#endif 831#endif
832#define DPM_TRIGER_TYPE 0x40
833#define DOORBELL(bp, cid, val) \ 832#define DOORBELL(bp, cid, val) \
834 do { \ 833 do { \
835 writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ 834 writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
836 DPM_TRIGER_TYPE); \
837 } while (0) 835 } while (0)
838 836
839/* TX CSUM helpers */ 837/* TX CSUM helpers */
@@ -1100,13 +1098,27 @@ struct bnx2x_port {
1100extern struct workqueue_struct *bnx2x_wq; 1098extern struct workqueue_struct *bnx2x_wq;
1101 1099
1102#define BNX2X_MAX_NUM_OF_VFS 64 1100#define BNX2X_MAX_NUM_OF_VFS 64
1103#define BNX2X_VF_CID_WND 0 1101#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
1104#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) 1102#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
1105#define BNX2X_CLIENTS_PER_VF 1 1103
1106#define BNX2X_FIRST_VF_CID 256 1104/* We need to reserve doorbell addresses for all VF and queue combinations */
1107#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) 1105#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
1106
1107/* The doorbell is configured to have the same number of CIDs for PFs and for
1108 * VFs. For this reason the PF CID zone is as large as the VF zone.
1109 */
1110#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
1111#define BNX2X_MAX_NUM_VF_QUEUES 64
1108#define BNX2X_VF_ID_INVALID 0xFF 1112#define BNX2X_VF_ID_INVALID 0xFF
1109 1113
1114/* the number of VF CIDS multiplied by the amount of bytes reserved for each
1115 * cid must not exceed the size of the VF doorbell
1116 */
1117#define BNX2X_VF_BAR_SIZE 512
1118#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
1119#error "VF doorbell bar size is 512"
1120#endif
1121
1110/* 1122/*
1111 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is 1123 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
1112 * control by the number of fast-path status blocks supported by the 1124 * control by the number of fast-path status blocks supported by the
@@ -1650,10 +1662,10 @@ struct bnx2x {
1650 dma_addr_t fw_stats_data_mapping; 1662 dma_addr_t fw_stats_data_mapping;
1651 int fw_stats_data_sz; 1663 int fw_stats_data_sz;
1652 1664
1653 /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB 1665 /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
1654 * context size we need 8 ILT entries. 1666 * context size we need 8 ILT entries.
1655 */ 1667 */
1656#define ILT_MAX_L2_LINES 8 1668#define ILT_MAX_L2_LINES 32
1657 struct hw_context context[ILT_MAX_L2_LINES]; 1669 struct hw_context context[ILT_MAX_L2_LINES];
1658 1670
1659 struct bnx2x_ilt *ilt; 1671 struct bnx2x_ilt *ilt;
@@ -1869,7 +1881,7 @@ extern int num_queues;
1869#define FUNC_FLG_TPA 0x0008 1881#define FUNC_FLG_TPA 0x0008
1870#define FUNC_FLG_SPQ 0x0010 1882#define FUNC_FLG_SPQ 0x0010
1871#define FUNC_FLG_LEADING 0x0020 /* PF only */ 1883#define FUNC_FLG_LEADING 0x0020 /* PF only */
1872 1884#define FUNC_FLG_LEADING_STATS 0x0040
1873struct bnx2x_func_init_params { 1885struct bnx2x_func_init_params {
1874 /* dma */ 1886 /* dma */
1875 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ 1887 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2e90868a9276..e7400d9d60c4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4784,6 +4784,11 @@ int bnx2x_resume(struct pci_dev *pdev)
4784void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 4784void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4785 u32 cid) 4785 u32 cid)
4786{ 4786{
4787 if (!cxt) {
4788 BNX2X_ERR("bad context pointer %p\n", cxt);
4789 return;
4790 }
4791
4787 /* ustorm cxt validation */ 4792 /* ustorm cxt validation */
4788 cxt->ustorm_ag_context.cdu_usage = 4793 cxt->ustorm_ag_context.cdu_usage =
4789 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), 4794 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 17f117c1d8d2..5729aa7be1d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6893,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6893 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6893 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6894 6894
6895 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6895 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6896 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 6896
6897 if (!CHIP_REV_IS_SLOW(bp)) 6897 if (!CHIP_REV_IS_SLOW(bp))
6898 /* enable hw interrupt from doorbell Q */ 6898 /* enable hw interrupt from doorbell Q */
6899 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6899 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 8e627b886d7b..5ecf267dc4cc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6335,6 +6335,7 @@
6335#define PCI_ID_VAL2 0x438 6335#define PCI_ID_VAL2 0x438
6336#define PCI_ID_VAL3 0x43c 6336#define PCI_ID_VAL3 0x43c
6337 6337
6338#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
6338#define GRC_CONFIG_REG_PF_INIT_VF 0x624 6339#define GRC_CONFIG_REG_PF_INIT_VF 0x624
6339#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf 6340#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
6340/* First VF_NUM for PF is encoded in this register. 6341/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 1d46b68fb766..9fbeee522d2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4416,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4416 rss_obj->config_rss = bnx2x_setup_rss; 4416 rss_obj->config_rss = bnx2x_setup_rss;
4417} 4417}
4418 4418
4419int validate_vlan_mac(struct bnx2x *bp,
4420 struct bnx2x_vlan_mac_obj *vlan_mac)
4421{
4422 if (!vlan_mac->get_n_elements) {
4423 BNX2X_ERR("vlan mac object was not intialized\n");
4424 return -EINVAL;
4425 }
4426 return 0;
4427}
4428
4419/********************** Queue state object ***********************************/ 4429/********************** Queue state object ***********************************/
4420 4430
4421/** 4431/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 533a3abd8c82..658f4e33abf9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1407,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
1407void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, 1407void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
1408 u8 *ind_table); 1408 u8 *ind_table);
1409 1409
1410int validate_vlan_mac(struct bnx2x *bp,
1411 struct bnx2x_vlan_mac_obj *vlan_mac);
1410#endif /* BNX2X_SP_VERBS */ 1412#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index fbc026c4cab2..73731eb68f2a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state {
170 BNX2X_VFOP_QTEARDOWN_DONE 170 BNX2X_VFOP_QTEARDOWN_DONE
171}; 171};
172 172
173enum bnx2x_vfop_rss_state {
174 BNX2X_VFOP_RSS_CONFIG,
175 BNX2X_VFOP_RSS_DONE
176};
177
173#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 178#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
174 179
175void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 180void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
265 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 270 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
266 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 271 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
267 272
268 if (vfq_is_leading(q)) {
269 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
270 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
271 }
272
273 /* Setup-op rx parameters */ 273 /* Setup-op rx parameters */
274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
@@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
398 BNX2X_Q_LOGICAL_STATE_STOPPED) { 398 BNX2X_Q_LOGICAL_STATE_STOPPED) {
399 DP(BNX2X_MSG_IOV, 399 DP(BNX2X_MSG_IOV,
400 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 400 "Entered qdtor but queue was already stopped. Aborting gracefully\n");
401 goto op_done; 401
402 /* next state */
403 vfop->state = BNX2X_VFOP_QDTOR_DONE;
404
405 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
402 } 406 }
403 407
404 /* next state */ 408 /* next state */
@@ -432,8 +436,10 @@ op_err:
432op_done: 436op_done:
433 case BNX2X_VFOP_QDTOR_DONE: 437 case BNX2X_VFOP_QDTOR_DONE:
434 /* invalidate the context */ 438 /* invalidate the context */
435 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 439 if (qdtor->cxt) {
436 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 440 qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
441 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
442 }
437 bnx2x_vfop_end(bp, vf, vfop); 443 bnx2x_vfop_end(bp, vf, vfop);
438 return; 444 return;
439 default: 445 default:
@@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
465 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
466 cmd->block); 472 cmd->block);
467 } 473 }
468 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); 474 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
475 vf->abs_vfid, vfop->rc);
469 return -ENOMEM; 476 return -ENOMEM;
470} 477}
471 478
@@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
474{ 481{
475 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 482 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
476 if (vf) { 483 if (vf) {
484 /* the first igu entry belonging to VFs of this PF */
485 if (!BP_VFDB(bp)->first_vf_igu_entry)
486 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
487
488 /* the first igu entry belonging to this VF */
477 if (!vf_sb_count(vf)) 489 if (!vf_sb_count(vf))
478 vf->igu_base_id = igu_sb_id; 490 vf->igu_base_id = igu_sb_id;
491
479 ++vf_sb_count(vf); 492 ++vf_sb_count(vf);
493 ++vf->sb_count;
480 } 494 }
495 BP_VFDB(bp)->vf_sbs_pool++;
481} 496}
482 497
483/* VFOP MAC/VLAN helpers */ 498/* VFOP MAC/VLAN helpers */
@@ -733,6 +748,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
733 int qid, bool drv_only) 748 int qid, bool drv_only)
734{ 749{
735 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 750 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
751 int rc;
736 752
737 if (vfop) { 753 if (vfop) {
738 struct bnx2x_vfop_args_filters filters = { 754 struct bnx2x_vfop_args_filters filters = {
@@ -752,6 +768,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
752 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 768 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
753 769
754 /* set object */ 770 /* set object */
771 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
772 if (rc)
773 return rc;
755 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 774 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
756 775
757 /* set extra args */ 776 /* set extra args */
@@ -772,6 +791,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
772 int qid, bool drv_only) 791 int qid, bool drv_only)
773{ 792{
774 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 793 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
794 int rc;
775 795
776 if (vfop) { 796 if (vfop) {
777 struct bnx2x_vfop_args_filters filters = { 797 struct bnx2x_vfop_args_filters filters = {
@@ -794,6 +814,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
794 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 814 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
795 815
796 /* set object */ 816 /* set object */
817 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
818 if (rc)
819 return rc;
797 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 820 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
798 821
799 /* set extra args */ 822 /* set extra args */
@@ -814,6 +837,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
814 int qid, u16 vid, bool add) 837 int qid, u16 vid, bool add)
815{ 838{
816 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 839 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
840 int rc;
817 841
818 if (vfop) { 842 if (vfop) {
819 struct bnx2x_vfop_args_filters filters = { 843 struct bnx2x_vfop_args_filters filters = {
@@ -834,6 +858,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
834 ramrod->user_req.u.vlan.vlan = vid; 858 ramrod->user_req.u.vlan.vlan = vid;
835 859
836 /* set object */ 860 /* set object */
861 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
862 if (rc)
863 return rc;
837 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 864 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
838 865
839 /* set extra args */ 866 /* set extra args */
@@ -853,6 +880,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
853 int qid, bool drv_only) 880 int qid, bool drv_only)
854{ 881{
855 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 882 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
883 int rc;
856 884
857 if (vfop) { 885 if (vfop) {
858 struct bnx2x_vfop_args_filters filters = { 886 struct bnx2x_vfop_args_filters filters = {
@@ -872,6 +900,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
872 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 900 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
873 901
874 /* set object */ 902 /* set object */
903 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
904 if (rc)
905 return rc;
875 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 906 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
876 907
877 /* set extra args */ 908 /* set extra args */
@@ -892,6 +923,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
892 int qid, bool drv_only) 923 int qid, bool drv_only)
893{ 924{
894 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 925 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
926 int rc;
895 927
896 if (vfop) { 928 if (vfop) {
897 struct bnx2x_vfop_args_filters filters = { 929 struct bnx2x_vfop_args_filters filters = {
@@ -911,6 +943,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
911 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 943 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
912 944
913 /* set object */ 945 /* set object */
946 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
947 if (rc)
948 return rc;
914 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 949 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
915 950
916 /* set extra args */ 951 /* set extra args */
@@ -1021,21 +1056,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1021 case BNX2X_VFOP_QFLR_CLR_VLAN: 1056 case BNX2X_VFOP_QFLR_CLR_VLAN:
1022 /* vlan-clear-all: driver-only, don't consume credit */ 1057 /* vlan-clear-all: driver-only, don't consume credit */
1023 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1058 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
1024 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); 1059 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
1060 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
1061 true);
1025 if (vfop->rc) 1062 if (vfop->rc)
1026 goto op_err; 1063 goto op_err;
1027 return; 1064 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1028 1065
1029 case BNX2X_VFOP_QFLR_CLR_MAC: 1066 case BNX2X_VFOP_QFLR_CLR_MAC:
1030 /* mac-clear-all: driver only consume credit */ 1067 /* mac-clear-all: driver only consume credit */
1031 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1068 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1032 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); 1069 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
1070 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
1071 true);
1033 DP(BNX2X_MSG_IOV, 1072 DP(BNX2X_MSG_IOV,
1034 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 1073 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
1035 vf->abs_vfid, vfop->rc); 1074 vf->abs_vfid, vfop->rc);
1036 if (vfop->rc) 1075 if (vfop->rc)
1037 goto op_err; 1076 goto op_err;
1038 return; 1077 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1039 1078
1040 case BNX2X_VFOP_QFLR_TERMINATE: 1079 case BNX2X_VFOP_QFLR_TERMINATE:
1041 qstate = &vfop->op_p->qctor.qstate; 1080 qstate = &vfop->op_p->qctor.qstate;
@@ -1332,10 +1371,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1332{ 1371{
1333 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1372 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1334 1373
1374 /* for non leading queues skip directly to qdown sate */
1335 if (vfop) { 1375 if (vfop) {
1336 vfop->args.qx.qid = qid; 1376 vfop->args.qx.qid = qid;
1337 bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, 1377 bnx2x_vfop_opset(qid == LEADING_IDX ?
1338 bnx2x_vfop_qdown, cmd->done); 1378 BNX2X_VFOP_QTEARDOWN_RXMODE :
1379 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
1380 cmd->done);
1339 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1381 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1340 cmd->block); 1382 cmd->block);
1341 } 1383 }
@@ -1488,15 +1530,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1488 * both known 1530 * both known
1489 */ 1531 */
1490static void 1532static void
1491bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) 1533bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1492{ 1534{
1535 struct vf_pf_resc_request *resc = &vf->alloc_resc;
1493 u16 vlan_count = 0; 1536 u16 vlan_count = 0;
1494 1537
1495 /* will be set only during VF-ACQUIRE */ 1538 /* will be set only during VF-ACQUIRE */
1496 resc->num_rxqs = 0; 1539 resc->num_rxqs = 0;
1497 resc->num_txqs = 0; 1540 resc->num_txqs = 0;
1498 1541
1499 /* no credit calculcis for macs (just yet) */ 1542 /* no credit calculations for macs (just yet) */
1500 resc->num_mac_filters = 1; 1543 resc->num_mac_filters = 1;
1501 1544
1502 /* divvy up vlan rules */ 1545 /* divvy up vlan rules */
@@ -1508,13 +1551,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
1508 resc->num_mc_filters = 0; 1551 resc->num_mc_filters = 0;
1509 1552
1510 /* num_sbs already set */ 1553 /* num_sbs already set */
1554 resc->num_sbs = vf->sb_count;
1511} 1555}
1512 1556
1513/* FLR routines: */ 1557/* FLR routines: */
1514static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1558static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1515{ 1559{
1516 /* reset the state variables */ 1560 /* reset the state variables */
1517 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 1561 bnx2x_iov_static_resc(bp, vf);
1518 vf->state = VF_FREE; 1562 vf->state = VF_FREE;
1519} 1563}
1520 1564
@@ -1734,8 +1778,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1734 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1778 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1735 * the Pf doorbell size although the 2 are independent. 1779 * the Pf doorbell size although the 2 are independent.
1736 */ 1780 */
1737 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 1781 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1738 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
1739 1782
1740 /* No security checks for now - 1783 /* No security checks for now -
1741 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1784 * configure single rule (out of 16) mask = 0x1, value = 0x0,
@@ -1802,7 +1845,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1802{ 1845{
1803 int sb_id; 1846 int sb_id;
1804 u32 val; 1847 u32 val;
1805 u8 fid; 1848 u8 fid, current_pf = 0;
1806 1849
1807 /* IGU in normal mode - read CAM */ 1850 /* IGU in normal mode - read CAM */
1808 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1851 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
@@ -1810,16 +1853,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1810 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1853 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1811 continue; 1854 continue;
1812 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1855 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1813 if (!(fid & IGU_FID_ENCODE_IS_PF)) 1856 if (fid & IGU_FID_ENCODE_IS_PF)
1857 current_pf = fid & IGU_FID_PF_NUM_MASK;
1858 else if (current_pf == BP_ABS_FUNC(bp))
1814 bnx2x_vf_set_igu_info(bp, sb_id, 1859 bnx2x_vf_set_igu_info(bp, sb_id,
1815 (fid & IGU_FID_VF_NUM_MASK)); 1860 (fid & IGU_FID_VF_NUM_MASK));
1816
1817 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1861 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1818 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1862 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1819 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1863 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1820 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1864 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1821 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1865 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1822 } 1866 }
1867 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1823} 1868}
1824 1869
1825static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1870static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1885,23 +1930,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1885 return 0; 1930 return 0;
1886} 1931}
1887 1932
1888static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1889{
1890 int i;
1891 u8 queue_count = 0;
1892
1893 if (IS_SRIOV(bp))
1894 for_each_vf(bp, i)
1895 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1896
1897 return queue_count;
1898}
1899
1900/* must be called after PF bars are mapped */ 1933/* must be called after PF bars are mapped */
1901int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1934int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1902 int num_vfs_param) 1935 int num_vfs_param)
1903{ 1936{
1904 int err, i, qcount; 1937 int err, i;
1905 struct bnx2x_sriov *iov; 1938 struct bnx2x_sriov *iov;
1906 struct pci_dev *dev = bp->pdev; 1939 struct pci_dev *dev = bp->pdev;
1907 1940
@@ -1999,12 +2032,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1999 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 2032 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
2000 bnx2x_get_vf_igu_cam_info(bp); 2033 bnx2x_get_vf_igu_cam_info(bp);
2001 2034
2002 /* get the total queue count and allocate the global queue arrays */
2003 qcount = bnx2x_iov_get_max_queue_count(bp);
2004
2005 /* allocate the queue arrays for all VFs */ 2035 /* allocate the queue arrays for all VFs */
2006 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), 2036 bp->vfdb->vfqs = kzalloc(
2007 GFP_KERNEL); 2037 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
2038 GFP_KERNEL);
2039
2040 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
2041
2008 if (!bp->vfdb->vfqs) { 2042 if (!bp->vfdb->vfqs) {
2009 BNX2X_ERR("failed to allocate vf queue array\n"); 2043 BNX2X_ERR("failed to allocate vf queue array\n");
2010 err = -ENOMEM; 2044 err = -ENOMEM;
@@ -2125,49 +2159,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2125 q_type); 2159 q_type);
2126 2160
2127 DP(BNX2X_MSG_IOV, 2161 DP(BNX2X_MSG_IOV,
2128 "initialized vf %d's queue object. func id set to %d\n", 2162 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
2129 vf->abs_vfid, q->sp_obj.func_id); 2163 vf->abs_vfid, q->sp_obj.func_id, q->cid);
2130
2131 /* mac/vlan objects are per queue, but only those
2132 * that belong to the leading queue are initialized
2133 */
2134 if (vfq_is_leading(q)) {
2135 /* mac */
2136 bnx2x_init_mac_obj(bp, &q->mac_obj,
2137 cl_id, q->cid, func_id,
2138 bnx2x_vf_sp(bp, vf, mac_rdata),
2139 bnx2x_vf_sp_map(bp, vf, mac_rdata),
2140 BNX2X_FILTER_MAC_PENDING,
2141 &vf->filter_state,
2142 BNX2X_OBJ_TYPE_RX_TX,
2143 &bp->macs_pool);
2144 /* vlan */
2145 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
2146 cl_id, q->cid, func_id,
2147 bnx2x_vf_sp(bp, vf, vlan_rdata),
2148 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
2149 BNX2X_FILTER_VLAN_PENDING,
2150 &vf->filter_state,
2151 BNX2X_OBJ_TYPE_RX_TX,
2152 &bp->vlans_pool);
2153
2154 /* mcast */
2155 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
2156 q->cid, func_id, func_id,
2157 bnx2x_vf_sp(bp, vf, mcast_rdata),
2158 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2159 BNX2X_FILTER_MCAST_PENDING,
2160 &vf->filter_state,
2161 BNX2X_OBJ_TYPE_RX_TX);
2162
2163 vf->leading_rss = cl_id;
2164 }
2165} 2164}
2166 2165
2167/* called by bnx2x_nic_load */ 2166/* called by bnx2x_nic_load */
2168int bnx2x_iov_nic_init(struct bnx2x *bp) 2167int bnx2x_iov_nic_init(struct bnx2x *bp)
2169{ 2168{
2170 int vfid, qcount, i; 2169 int vfid;
2171 2170
2172 if (!IS_SRIOV(bp)) { 2171 if (!IS_SRIOV(bp)) {
2173 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2172 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
@@ -2196,7 +2195,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
2196 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2195 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2197 2196
2198 /* init statically provisioned resources */ 2197 /* init statically provisioned resources */
2199 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 2198 bnx2x_iov_static_resc(bp, vf);
2200 2199
2201 /* queues are initialized during VF-ACQUIRE */ 2200 /* queues are initialized during VF-ACQUIRE */
2202 2201
@@ -2232,13 +2231,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
2232 } 2231 }
2233 2232
2234 /* Final VF init */ 2233 /* Final VF init */
2235 qcount = 0; 2234 for_each_vf(bp, vfid) {
2236 for_each_vf(bp, i) { 2235 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2237 struct bnx2x_virtf *vf = BP_VF(bp, i);
2238 2236
2239 /* fill in the BDF and bars */ 2237 /* fill in the BDF and bars */
2240 vf->bus = bnx2x_vf_bus(bp, i); 2238 vf->bus = bnx2x_vf_bus(bp, vfid);
2241 vf->devfn = bnx2x_vf_devfn(bp, i); 2239 vf->devfn = bnx2x_vf_devfn(bp, vfid);
2242 bnx2x_vf_set_bars(bp, vf); 2240 bnx2x_vf_set_bars(bp, vf);
2243 2241
2244 DP(BNX2X_MSG_IOV, 2242 DP(BNX2X_MSG_IOV,
@@ -2247,10 +2245,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
2247 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2245 (unsigned)vf->bars[0].bar, vf->bars[0].size,
2248 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2246 (unsigned)vf->bars[1].bar, vf->bars[1].size,
2249 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2247 (unsigned)vf->bars[2].bar, vf->bars[2].size);
2250
2251 /* set local queue arrays */
2252 vf->vfqs = &bp->vfdb->vfqs[qcount];
2253 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
2254 } 2248 }
2255 2249
2256 return 0; 2250 return 0;
@@ -2556,6 +2550,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2556 for_each_vfq(vf, j) { 2550 for_each_vfq(vf, j) {
2557 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2551 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2558 2552
2553 dma_addr_t q_stats_addr =
2554 vf->fw_stat_map + j * vf->stats_stride;
2555
2559 /* collect stats fro active queues only */ 2556 /* collect stats fro active queues only */
2560 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2557 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2561 BNX2X_Q_LOGICAL_STATE_STOPPED) 2558 BNX2X_Q_LOGICAL_STATE_STOPPED)
@@ -2563,13 +2560,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2563 2560
2564 /* create stats query entry for this queue */ 2561 /* create stats query entry for this queue */
2565 cur_query_entry->kind = STATS_TYPE_QUEUE; 2562 cur_query_entry->kind = STATS_TYPE_QUEUE;
2566 cur_query_entry->index = vfq_cl_id(vf, rxq); 2563 cur_query_entry->index = vfq_stat_id(vf, rxq);
2567 cur_query_entry->funcID = 2564 cur_query_entry->funcID =
2568 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2565 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2569 cur_query_entry->address.hi = 2566 cur_query_entry->address.hi =
2570 cpu_to_le32(U64_HI(vf->fw_stat_map)); 2567 cpu_to_le32(U64_HI(q_stats_addr));
2571 cur_query_entry->address.lo = 2568 cur_query_entry->address.lo =
2572 cpu_to_le32(U64_LO(vf->fw_stat_map)); 2569 cpu_to_le32(U64_LO(q_stats_addr));
2573 DP(BNX2X_MSG_IOV, 2570 DP(BNX2X_MSG_IOV,
2574 "added address %x %x for vf %d queue %d client %d\n", 2571 "added address %x %x for vf %d queue %d client %d\n",
2575 cur_query_entry->address.hi, 2572 cur_query_entry->address.hi,
@@ -2578,6 +2575,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2578 cur_query_entry++; 2575 cur_query_entry++;
2579 cur_data_offset += sizeof(struct per_queue_stats); 2576 cur_data_offset += sizeof(struct per_queue_stats);
2580 stats_count++; 2577 stats_count++;
2578
2579 /* all stats are coalesced to the leading queue */
2580 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
2581 break;
2581 } 2582 }
2582 } 2583 }
2583 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2584 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
@@ -2596,6 +2597,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
2596 for_each_vf(bp, i) { 2597 for_each_vf(bp, i) {
2597 struct bnx2x_virtf *vf = BP_VF(bp, i); 2598 struct bnx2x_virtf *vf = BP_VF(bp, i);
2598 2599
2600 if (!vf) {
2601 BNX2X_ERR("VF was null! skipping...\n");
2602 continue;
2603 }
2604
2599 if (!list_empty(&vf->op_list_head) && 2605 if (!list_empty(&vf->op_list_head) &&
2600 atomic_read(&vf->op_in_progress)) { 2606 atomic_read(&vf->op_in_progress)) {
2601 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2607 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
@@ -2743,7 +2749,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2743 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2749 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2744 2750
2745 if (!q) { 2751 if (!q) {
2746 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); 2752 BNX2X_ERR("q number %d was not allocated\n", i);
2747 return -EINVAL; 2753 return -EINVAL;
2748 } 2754 }
2749 2755
@@ -2947,6 +2953,43 @@ op_done:
2947 bnx2x_vfop_end(bp, vf, vfop); 2953 bnx2x_vfop_end(bp, vf, vfop);
2948} 2954}
2949 2955
2956static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
2957{
2958 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2959 enum bnx2x_vfop_rss_state state;
2960
2961 if (!vfop) {
2962 BNX2X_ERR("vfop was null\n");
2963 return;
2964 }
2965
2966 state = vfop->state;
2967 bnx2x_vfop_reset_wq(vf);
2968
2969 if (vfop->rc < 0)
2970 goto op_err;
2971
2972 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2973
2974 switch (state) {
2975 case BNX2X_VFOP_RSS_CONFIG:
2976 /* next state */
2977 vfop->state = BNX2X_VFOP_RSS_DONE;
2978 bnx2x_config_rss(bp, &vfop->op_p->rss);
2979 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
2980op_err:
2981 BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
2982op_done:
2983 case BNX2X_VFOP_RSS_DONE:
2984 bnx2x_vfop_end(bp, vf, vfop);
2985 return;
2986 default:
2987 bnx2x_vfop_default(state);
2988 }
2989op_pending:
2990 return;
2991}
2992
2950int bnx2x_vfop_release_cmd(struct bnx2x *bp, 2993int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2951 struct bnx2x_virtf *vf, 2994 struct bnx2x_virtf *vf,
2952 struct bnx2x_vfop_cmd *cmd) 2995 struct bnx2x_vfop_cmd *cmd)
@@ -2961,6 +3004,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2961 return -ENOMEM; 3004 return -ENOMEM;
2962} 3005}
2963 3006
3007int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
3008 struct bnx2x_virtf *vf,
3009 struct bnx2x_vfop_cmd *cmd)
3010{
3011 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3012
3013 if (vfop) {
3014 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
3015 cmd->done);
3016 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
3017 cmd->block);
3018 }
3019 return -ENOMEM;
3020}
3021
2964/* VF release ~ VF close + VF release-resources 3022/* VF release ~ VF close + VF release-resources
2965 * Release is the ultimate SW shutdown and is called whenever an 3023 * Release is the ultimate SW shutdown and is called whenever an
2966 * irrecoverable error is encountered. 3024 * irrecoverable error is encountered.
@@ -2972,6 +3030,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
2972 .block = block, 3030 .block = block,
2973 }; 3031 };
2974 int rc; 3032 int rc;
3033
3034 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2975 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 3035 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2976 3036
2977 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 3037 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
@@ -3000,6 +3060,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
3000void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3060void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3001 enum channel_tlvs tlv) 3061 enum channel_tlvs tlv)
3002{ 3062{
3063 /* we don't lock the channel for unsupported tlvs */
3064 if (!bnx2x_tlv_supported(tlv)) {
3065 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
3066 return;
3067 }
3068
3003 /* lock the channel */ 3069 /* lock the channel */
3004 mutex_lock(&vf->op_mutex); 3070 mutex_lock(&vf->op_mutex);
3005 3071
@@ -3014,19 +3080,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3014void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3080void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3015 enum channel_tlvs expected_tlv) 3081 enum channel_tlvs expected_tlv)
3016{ 3082{
3083 enum channel_tlvs current_tlv;
3084
3085 if (!vf) {
3086 BNX2X_ERR("VF was %p\n", vf);
3087 return;
3088 }
3089
3090 current_tlv = vf->op_current;
3091
3092 /* we don't unlock the channel for unsupported tlvs */
3093 if (!bnx2x_tlv_supported(expected_tlv))
3094 return;
3095
3017 WARN(expected_tlv != vf->op_current, 3096 WARN(expected_tlv != vf->op_current,
3018 "lock mismatch: expected %d found %d", expected_tlv, 3097 "lock mismatch: expected %d found %d", expected_tlv,
3019 vf->op_current); 3098 vf->op_current);
3020 3099
3100 /* record the locking op */
3101 vf->op_current = CHANNEL_TLV_NONE;
3102
3021 /* lock the channel */ 3103 /* lock the channel */
3022 mutex_unlock(&vf->op_mutex); 3104 mutex_unlock(&vf->op_mutex);
3023 3105
3024 /* log the unlock */ 3106 /* log the unlock */
3025 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3107 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
3026 vf->abs_vfid, vf->op_current); 3108 vf->abs_vfid, vf->op_current);
3027
3028 /* record the locking op */
3029 vf->op_current = CHANNEL_TLV_NONE;
3030} 3109}
3031 3110
3032int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3111int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
@@ -3057,11 +3136,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3057 return bnx2x_enable_sriov(bp); 3136 return bnx2x_enable_sriov(bp);
3058 } 3137 }
3059} 3138}
3139#define IGU_ENTRY_SIZE 4
3060 3140
3061int bnx2x_enable_sriov(struct bnx2x *bp) 3141int bnx2x_enable_sriov(struct bnx2x *bp)
3062{ 3142{
3063 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3143 int rc = 0, req_vfs = bp->requested_nr_virtfn;
3144 int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
3145 u32 igu_entry, address;
3146 u16 num_vf_queues;
3147
3148 if (req_vfs == 0)
3149 return 0;
3150
3151 first_vf = bp->vfdb->sriov.first_vf_in_pf;
3152
3153 /* statically distribute vf sb pool between VFs */
3154 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
3155 BP_VFDB(bp)->vf_sbs_pool / req_vfs);
3156
3157 /* zero previous values learned from igu cam */
3158 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
3159 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3160
3161 vf->sb_count = 0;
3162 vf_sb_count(BP_VF(bp, vf_idx)) = 0;
3163 }
3164 bp->vfdb->vf_sbs_pool = 0;
3165
3166 /* prepare IGU cam */
3167 sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
3168 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
3169 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3170 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
3171 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
3172 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
3173 IGU_REG_MAPPING_MEMORY_VALID;
3174 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
3175 sb_idx, vf_idx);
3176 REG_WR(bp, address, igu_entry);
3177 sb_idx++;
3178 address += IGU_ENTRY_SIZE;
3179 }
3180 }
3181
3182 /* Reinitialize vf database according to igu cam */
3183 bnx2x_get_vf_igu_cam_info(bp);
3184
3185 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
3186 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
3187
3188 qcount = 0;
3189 for_each_vf(bp, vf_idx) {
3190 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
3064 3191
3192 /* set local queue arrays */
3193 vf->vfqs = &bp->vfdb->vfqs[qcount];
3194 qcount += vf_sb_count(vf);
3195 }
3196
3197 /* prepare msix vectors in VF configuration space */
3198 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3199 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3200 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3201 num_vf_queues);
3202 }
3203 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3204
3205 /* enable sriov. This will probe all the VFs, and consequentially cause
3206 * the "acquire" messages to appear on the VF PF channel.
3207 */
3208 DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
3209 pci_disable_sriov(bp->pdev);
3065 rc = pci_enable_sriov(bp->pdev, req_vfs); 3210 rc = pci_enable_sriov(bp->pdev, req_vfs);
3066 if (rc) { 3211 if (rc) {
3067 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3212 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3089,9 +3234,8 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
3089 pci_disable_sriov(bp->pdev); 3234 pci_disable_sriov(bp->pdev);
3090} 3235}
3091 3236
3092static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3237int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
3093 struct bnx2x_virtf **vf, 3238 struct pf_vf_bulletin_content **bulletin)
3094 struct pf_vf_bulletin_content **bulletin)
3095{ 3239{
3096 if (bp->state != BNX2X_STATE_OPEN) { 3240 if (bp->state != BNX2X_STATE_OPEN) {
3097 BNX2X_ERR("vf ndo called though PF is down\n"); 3241 BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3114,7 +3258,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3114 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3258 *bulletin = BP_VF_BULLETIN(bp, vfidx);
3115 3259
3116 if (!*vf) { 3260 if (!*vf) {
3117 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3261 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
3262 vfidx);
3263 return -EINVAL;
3264 }
3265
3266 if (!(*vf)->vfqs) {
3267 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
3118 vfidx); 3268 vfidx);
3119 return -EINVAL; 3269 return -EINVAL;
3120 } 3270 }
@@ -3142,8 +3292,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3142 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3292 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3143 if (rc) 3293 if (rc)
3144 return rc; 3294 return rc;
3145 mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3295 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3146 vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3296 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3147 if (!mac_obj || !vlan_obj) { 3297 if (!mac_obj || !vlan_obj) {
3148 BNX2X_ERR("VF partially initialized\n"); 3298 BNX2X_ERR("VF partially initialized\n");
3149 return -EINVAL; 3299 return -EINVAL;
@@ -3155,10 +3305,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3155 ivi->spoofchk = 1; /*always enabled */ 3305 ivi->spoofchk = 1; /*always enabled */
3156 if (vf->state == VF_ENABLED) { 3306 if (vf->state == VF_ENABLED) {
3157 /* mac and vlan are in vlan_mac objects */ 3307 /* mac and vlan are in vlan_mac objects */
3158 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3308 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
3159 0, ETH_ALEN); 3309 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3160 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, 3310 0, ETH_ALEN);
3161 0, VLAN_HLEN); 3311 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
3312 vlan_obj->get_n_elements(bp, vlan_obj, 1,
3313 (u8 *)&ivi->vlan, 0,
3314 VLAN_HLEN);
3162 } else { 3315 } else {
3163 /* mac */ 3316 /* mac */
3164 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3317 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3226,14 +3379,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3226 return rc; 3379 return rc;
3227 } 3380 }
3228 3381
3229 /* is vf initialized and queue set up? */
3230 q_logical_state = 3382 q_logical_state =
3231 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3383 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3232 if (vf->state == VF_ENABLED && 3384 if (vf->state == VF_ENABLED &&
3233 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3385 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3234 /* configure the mac in device on this vf's queue */ 3386 /* configure the mac in device on this vf's queue */
3235 unsigned long ramrod_flags = 0; 3387 unsigned long ramrod_flags = 0;
3236 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3388 struct bnx2x_vlan_mac_obj *mac_obj =
3389 &bnx2x_leading_vfq(vf, mac_obj);
3390
3391 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3392 if (rc)
3393 return rc;
3237 3394
3238 /* must lock vfpf channel to protect against vf flows */ 3395 /* must lock vfpf channel to protect against vf flows */
3239 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3396 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
@@ -3293,18 +3450,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3293 3450
3294 /* is vf initialized and queue set up? */ 3451 /* is vf initialized and queue set up? */
3295 q_logical_state = 3452 q_logical_state =
3296 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3453 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
3297 if (vf->state == VF_ENABLED && 3454 if (vf->state == VF_ENABLED &&
3298 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3455 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3299 /* configure the vlan in device on this vf's queue */ 3456 /* configure the vlan in device on this vf's queue */
3300 unsigned long ramrod_flags = 0; 3457 unsigned long ramrod_flags = 0;
3301 unsigned long vlan_mac_flags = 0; 3458 unsigned long vlan_mac_flags = 0;
3302 struct bnx2x_vlan_mac_obj *vlan_obj = 3459 struct bnx2x_vlan_mac_obj *vlan_obj =
3303 &bnx2x_vfq(vf, 0, vlan_obj); 3460 &bnx2x_leading_vfq(vf, vlan_obj);
3304 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3461 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3305 struct bnx2x_queue_state_params q_params = {NULL}; 3462 struct bnx2x_queue_state_params q_params = {NULL};
3306 struct bnx2x_queue_update_params *update_params; 3463 struct bnx2x_queue_update_params *update_params;
3307 3464
3465 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3466 if (rc)
3467 return rc;
3308 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3468 memset(&ramrod_param, 0, sizeof(ramrod_param));
3309 3469
3310 /* must lock vfpf channel to protect against vf flows */ 3470 /* must lock vfpf channel to protect against vf flows */
@@ -3324,7 +3484,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3324 */ 3484 */
3325 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3485 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3326 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3486 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3327 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); 3487 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3328 update_params = &q_params.params.update; 3488 update_params = &q_params.params.update;
3329 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3489 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3330 &update_params->update_flags); 3490 &update_params->update_flags);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d143a7cdbbbe..8e9847fef861 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -81,6 +81,7 @@ struct bnx2x_vf_queue {
81 u32 cid; 81 u32 cid;
82 u16 index; 82 u16 index;
83 u16 sb_idx; 83 u16 sb_idx;
84 bool is_leading;
84}; 85};
85 86
86/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: 87/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
@@ -194,6 +195,7 @@ struct bnx2x_virtf {
194#define VF_CFG_INT_SIMD 0x0008 195#define VF_CFG_INT_SIMD 0x0008
195#define VF_CACHE_LINE 0x0010 196#define VF_CACHE_LINE 0x0010
196#define VF_CFG_VLAN 0x0020 197#define VF_CFG_VLAN 0x0020
198#define VF_CFG_STATS_COALESCE 0x0040
197 199
198 u8 state; 200 u8 state;
199#define VF_FREE 0 /* VF ready to be acquired holds no resc */ 201#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -213,6 +215,7 @@ struct bnx2x_virtf {
213 215
214 /* dma */ 216 /* dma */
215 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ 217 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
218 u16 stats_stride;
216 dma_addr_t spq_map; 219 dma_addr_t spq_map;
217 dma_addr_t bulletin_map; 220 dma_addr_t bulletin_map;
218 221
@@ -239,7 +242,10 @@ struct bnx2x_virtf {
239 u8 igu_base_id; /* base igu status block id */ 242 u8 igu_base_id; /* base igu status block id */
240 243
241 struct bnx2x_vf_queue *vfqs; 244 struct bnx2x_vf_queue *vfqs;
242#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) 245#define LEADING_IDX 0
246#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
247#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
248#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
243 249
244 u8 index; /* index in the vf array */ 250 u8 index; /* index in the vf array */
245 u8 abs_vfid; 251 u8 abs_vfid;
@@ -358,6 +364,10 @@ struct bnx2x_vf_sp {
358 struct client_init_ramrod_data init_data; 364 struct client_init_ramrod_data init_data;
359 struct client_update_ramrod_data update_data; 365 struct client_update_ramrod_data update_data;
360 } q_data; 366 } q_data;
367
368 union {
369 struct eth_rss_update_ramrod_data e2;
370 } rss_rdata;
361}; 371};
362 372
363struct hw_dma { 373struct hw_dma {
@@ -403,6 +413,10 @@ struct bnx2x_vfdb {
403 413
404#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) 414#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
405 u32 flrd_vfs[FLRD_VFS_DWORDS]; 415 u32 flrd_vfs[FLRD_VFS_DWORDS];
416
417 /* the number of msix vectors belonging to this PF designated for VFs */
418 u16 vf_sbs_pool;
419 u16 first_vf_igu_entry;
406}; 420};
407 421
408/* queue access */ 422/* queue access */
@@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
411 return &(vf->vfqs[index]); 425 return &(vf->vfqs[index]);
412} 426}
413 427
414static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
415{
416 return (vfq->index == 0);
417}
418
419/* FW ids */ 428/* FW ids */
420static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) 429static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
421{ 430{
@@ -434,7 +443,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
434 443
435static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 444static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
436{ 445{
437 return vfq_cl_id(vf, q); 446 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
447 return vf->leading_rss;
448 else
449 return vfq_cl_id(vf, q);
438} 450}
439 451
440static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 452static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
@@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
691 struct bnx2x_virtf *vf, 703 struct bnx2x_virtf *vf,
692 struct bnx2x_vfop_cmd *cmd); 704 struct bnx2x_vfop_cmd *cmd);
693 705
706int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
707 struct bnx2x_virtf *vf,
708 struct bnx2x_vfop_cmd *cmd);
709
694/* VF release ~ VF close + VF release-resources 710/* VF release ~ VF close + VF release-resources
695 * 711 *
696 * Release is the ultimate SW shutdown and is called whenever an 712 * Release is the ultimate SW shutdown and is called whenever an
@@ -758,7 +774,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp);
758void bnx2x_disable_sriov(struct bnx2x *bp); 774void bnx2x_disable_sriov(struct bnx2x *bp);
759static inline int bnx2x_vf_headroom(struct bnx2x *bp) 775static inline int bnx2x_vf_headroom(struct bnx2x *bp)
760{ 776{
761 return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; 777 return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
762} 778}
763void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); 779void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
764int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); 780int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 2088063151d6..a7e88a405a43 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
257 257
258 /* humble our request */ 258 /* humble our request */
259 req->resc_request.num_txqs = 259 req->resc_request.num_txqs =
260 bp->acquire_resp.resc.num_txqs; 260 min(req->resc_request.num_txqs,
261 bp->acquire_resp.resc.num_txqs);
261 req->resc_request.num_rxqs = 262 req->resc_request.num_rxqs =
262 bp->acquire_resp.resc.num_rxqs; 263 min(req->resc_request.num_rxqs,
264 bp->acquire_resp.resc.num_rxqs);
263 req->resc_request.num_sbs = 265 req->resc_request.num_sbs =
264 bp->acquire_resp.resc.num_sbs; 266 min(req->resc_request.num_sbs,
267 bp->acquire_resp.resc.num_sbs);
265 req->resc_request.num_mac_filters = 268 req->resc_request.num_mac_filters =
266 bp->acquire_resp.resc.num_mac_filters; 269 min(req->resc_request.num_mac_filters,
270 bp->acquire_resp.resc.num_mac_filters);
267 req->resc_request.num_vlan_filters = 271 req->resc_request.num_vlan_filters =
268 bp->acquire_resp.resc.num_vlan_filters; 272 min(req->resc_request.num_vlan_filters,
273 bp->acquire_resp.resc.num_vlan_filters);
269 req->resc_request.num_mc_filters = 274 req->resc_request.num_mc_filters =
270 bp->acquire_resp.resc.num_mc_filters; 275 min(req->resc_request.num_mc_filters,
276 bp->acquire_resp.resc.num_mc_filters);
271 277
272 /* Clear response buffer */ 278 /* Clear response buffer */
273 memset(&bp->vf2pf_mbox->resp, 0, 279 memset(&bp->vf2pf_mbox->resp, 0,
@@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
293 bp->common.flash_size = 0; 299 bp->common.flash_size = 0;
294 bp->flags |= 300 bp->flags |=
295 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; 301 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
296 bp->igu_sb_cnt = 1; 302 bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
297 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; 303 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
298 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, 304 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
299 sizeof(bp->fw_ver)); 305 sizeof(bp->fw_ver));
@@ -452,6 +458,53 @@ free_irq:
452 bnx2x_free_irq(bp); 458 bnx2x_free_irq(bp);
453} 459}
454 460
461static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
462 struct bnx2x_vf_queue *q)
463{
464 u8 cl_id = vfq_cl_id(vf, q);
465 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
466
467 /* mac */
468 bnx2x_init_mac_obj(bp, &q->mac_obj,
469 cl_id, q->cid, func_id,
470 bnx2x_vf_sp(bp, vf, mac_rdata),
471 bnx2x_vf_sp_map(bp, vf, mac_rdata),
472 BNX2X_FILTER_MAC_PENDING,
473 &vf->filter_state,
474 BNX2X_OBJ_TYPE_RX_TX,
475 &bp->macs_pool);
476 /* vlan */
477 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
478 cl_id, q->cid, func_id,
479 bnx2x_vf_sp(bp, vf, vlan_rdata),
480 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
481 BNX2X_FILTER_VLAN_PENDING,
482 &vf->filter_state,
483 BNX2X_OBJ_TYPE_RX_TX,
484 &bp->vlans_pool);
485
486 /* mcast */
487 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
488 q->cid, func_id, func_id,
489 bnx2x_vf_sp(bp, vf, mcast_rdata),
490 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
491 BNX2X_FILTER_MCAST_PENDING,
492 &vf->filter_state,
493 BNX2X_OBJ_TYPE_RX_TX);
494
495 /* rss */
496 bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
497 func_id, func_id,
498 bnx2x_vf_sp(bp, vf, rss_rdata),
499 bnx2x_vf_sp_map(bp, vf, rss_rdata),
500 BNX2X_FILTER_RSS_CONF_PENDING,
501 &vf->filter_state,
502 BNX2X_OBJ_TYPE_RX_TX);
503
504 vf->leading_rss = cl_id;
505 q->is_leading = true;
506}
507
455/* ask the pf to open a queue for the vf */ 508/* ask the pf to open a queue for the vf */
456int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) 509int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
457{ 510{
@@ -948,7 +1001,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
948 1001
949 /* fill in pfdev info */ 1002 /* fill in pfdev info */
950 resp->pfdev_info.chip_num = bp->common.chip_id; 1003 resp->pfdev_info.chip_num = bp->common.chip_id;
951 resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT); 1004 resp->pfdev_info.db_size = bp->db_size;
952 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; 1005 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
953 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | 1006 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
954 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); 1007 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
@@ -1054,8 +1107,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1054 /* record ghost addresses from vf message */ 1107 /* record ghost addresses from vf message */
1055 vf->spq_map = init->spq_addr; 1108 vf->spq_map = init->spq_addr;
1056 vf->fw_stat_map = init->stats_addr; 1109 vf->fw_stat_map = init->stats_addr;
1110 vf->stats_stride = init->stats_stride;
1057 vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); 1111 vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1058 1112
1113 /* set VF multiqueue statistics collection mode */
1114 if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1115 vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1116
1059 /* response */ 1117 /* response */
1060 bnx2x_vf_mbx_resp(bp, vf); 1118 bnx2x_vf_mbx_resp(bp, vf);
1061} 1119}
@@ -1080,6 +1138,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1080 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); 1138 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1081 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) 1139 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1082 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); 1140 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1141 if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
1142 __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
1083 1143
1084 /* outer vlan removal is set according to PF's multi function mode */ 1144 /* outer vlan removal is set according to PF's multi function mode */
1085 if (IS_MF_SD(bp)) 1145 if (IS_MF_SD(bp))
@@ -1113,6 +1173,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1113 struct bnx2x_queue_init_params *init_p; 1173 struct bnx2x_queue_init_params *init_p;
1114 struct bnx2x_queue_setup_params *setup_p; 1174 struct bnx2x_queue_setup_params *setup_p;
1115 1175
1176 if (bnx2x_vfq_is_leading(q))
1177 bnx2x_leading_vfq_init(bp, vf, q);
1178
1116 /* re-init the VF operation context */ 1179 /* re-init the VF operation context */
1117 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); 1180 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
1118 setup_p = &vf->op_params.qctor.prep_qsetup; 1181 setup_p = &vf->op_params.qctor.prep_qsetup;
@@ -1552,6 +1615,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1552 bnx2x_vf_mbx_resp(bp, vf); 1615 bnx2x_vf_mbx_resp(bp, vf);
1553} 1616}
1554 1617
1618static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1619 struct bnx2x_vf_mbx *mbx)
1620{
1621 struct bnx2x_vfop_cmd cmd = {
1622 .done = bnx2x_vf_mbx_resp,
1623 .block = false,
1624 };
1625 struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
1626 struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1627
1628 if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1629 rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1630 BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1631 vf->index);
1632 vf->op_rc = -EINVAL;
1633 goto mbx_resp;
1634 }
1635
1636 /* set vfop params according to rss tlv */
1637 memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
1638 T_ETH_INDIRECTION_TABLE_SIZE);
1639 memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
1640 sizeof(rss_tlv->rss_key));
1641 vf_op_params->rss_obj = &vf->rss_conf_obj;
1642 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1643
1644 /* flags handled individually for backward/forward compatability */
1645 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1646 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1647 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
1648 __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
1649 if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
1650 __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
1651 if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
1652 __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
1653 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
1654 __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
1655 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
1656 __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
1657 if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
1658 __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
1659 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
1660 __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
1661 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
1662 __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
1663
1664 if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
1665 rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
1666 (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
1667 rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
1668 BNX2X_ERR("about to hit a FW assert. aborting...\n");
1669 vf->op_rc = -EINVAL;
1670 goto mbx_resp;
1671 }
1672
1673 vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
1674
1675mbx_resp:
1676 if (vf->op_rc)
1677 bnx2x_vf_mbx_resp(bp, vf);
1678}
1679
1555/* dispatch request */ 1680/* dispatch request */
1556static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, 1681static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1557 struct bnx2x_vf_mbx *mbx) 1682 struct bnx2x_vf_mbx *mbx)
@@ -1588,6 +1713,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1588 case CHANNEL_TLV_RELEASE: 1713 case CHANNEL_TLV_RELEASE:
1589 bnx2x_vf_mbx_release_vf(bp, vf, mbx); 1714 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1590 break; 1715 break;
1716 case CHANNEL_TLV_UPDATE_RSS:
1717 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1718 break;
1591 } 1719 }
1592 1720
1593 } else { 1721 } else {
@@ -1607,7 +1735,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1607 /* test whether we can respond to the VF (do we have an address 1735 /* test whether we can respond to the VF (do we have an address
1608 * for it?) 1736 * for it?)
1609 */ 1737 */
1610 if (vf->state == VF_ACQUIRED) { 1738 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1611 /* mbx_resp uses the op_rc of the VF */ 1739 /* mbx_resp uses the op_rc of the VF */
1612 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; 1740 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1613 1741
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index f3ad174a3a63..1179fe06d0c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -51,6 +51,7 @@ struct hw_sb_info {
51#define VFPF_QUEUE_FLG_COS 0x0080 51#define VFPF_QUEUE_FLG_COS 0x0080
52#define VFPF_QUEUE_FLG_HC 0x0100 52#define VFPF_QUEUE_FLG_HC 0x0100
53#define VFPF_QUEUE_FLG_DHC 0x0200 53#define VFPF_QUEUE_FLG_DHC 0x0200
54#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
54 55
55#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) 56#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
56#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) 57#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
@@ -131,6 +132,27 @@ struct vfpf_q_op_tlv {
131 u8 padding[3]; 132 u8 padding[3];
132}; 133};
133 134
135/* receive side scaling tlv */
136struct vfpf_rss_tlv {
137 struct vfpf_first_tlv first_tlv;
138 u32 rss_flags;
139#define VFPF_RSS_MODE_DISABLED (1 << 0)
140#define VFPF_RSS_MODE_REGULAR (1 << 1)
141#define VFPF_RSS_SET_SRCH (1 << 2)
142#define VFPF_RSS_IPV4 (1 << 3)
143#define VFPF_RSS_IPV4_TCP (1 << 4)
144#define VFPF_RSS_IPV4_UDP (1 << 5)
145#define VFPF_RSS_IPV6 (1 << 6)
146#define VFPF_RSS_IPV6_TCP (1 << 7)
147#define VFPF_RSS_IPV6_UDP (1 << 8)
148 u8 rss_result_mask;
149 u8 ind_table_size;
150 u8 rss_key_size;
151 u8 padding;
152 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
153 u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
154};
155
134/* acquire response tlv - carries the allocated resources */ 156/* acquire response tlv - carries the allocated resources */
135struct pfvf_acquire_resp_tlv { 157struct pfvf_acquire_resp_tlv {
136 struct pfvf_tlv hdr; 158 struct pfvf_tlv hdr;
@@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv {
166 } resc; 188 } resc;
167}; 189};
168 190
191#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
192 * stats will be coalesced on
193 * the leading RSS queue
194 */
195
169/* Init VF */ 196/* Init VF */
170struct vfpf_init_tlv { 197struct vfpf_init_tlv {
171 struct vfpf_first_tlv first_tlv; 198 struct vfpf_first_tlv first_tlv;
172 aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ 199 aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
173 aligned_u64 spq_addr; 200 aligned_u64 spq_addr;
174 aligned_u64 stats_addr; 201 aligned_u64 stats_addr;
202 u16 stats_stride;
203 u32 flags;
204 u32 padding[2];
175}; 205};
176 206
177/* Setup Queue */ 207/* Setup Queue */
@@ -293,13 +323,14 @@ union vfpf_tlvs {
293 struct vfpf_q_op_tlv q_op; 323 struct vfpf_q_op_tlv q_op;
294 struct vfpf_setup_q_tlv setup_q; 324 struct vfpf_setup_q_tlv setup_q;
295 struct vfpf_set_q_filters_tlv set_q_filters; 325 struct vfpf_set_q_filters_tlv set_q_filters;
296 struct vfpf_release_tlv release; 326 struct vfpf_release_tlv release;
297 struct channel_list_end_tlv list_end; 327 struct vfpf_rss_tlv update_rss;
328 struct channel_list_end_tlv list_end;
298 struct tlv_buffer_size tlv_buf_size; 329 struct tlv_buffer_size tlv_buf_size;
299}; 330};
300 331
301union pfvf_tlvs { 332union pfvf_tlvs {
302 struct pfvf_general_resp_tlv general_resp; 333 struct pfvf_general_resp_tlv general_resp;
303 struct pfvf_acquire_resp_tlv acquire_resp; 334 struct pfvf_acquire_resp_tlv acquire_resp;
304 struct channel_list_end_tlv list_end; 335 struct channel_list_end_tlv list_end;
305 struct tlv_buffer_size tlv_buf_size; 336 struct tlv_buffer_size tlv_buf_size;
@@ -355,14 +386,18 @@ enum channel_tlvs {
355 CHANNEL_TLV_INIT, 386 CHANNEL_TLV_INIT,
356 CHANNEL_TLV_SETUP_Q, 387 CHANNEL_TLV_SETUP_Q,
357 CHANNEL_TLV_SET_Q_FILTERS, 388 CHANNEL_TLV_SET_Q_FILTERS,
389 CHANNEL_TLV_ACTIVATE_Q,
390 CHANNEL_TLV_DEACTIVATE_Q,
358 CHANNEL_TLV_TEARDOWN_Q, 391 CHANNEL_TLV_TEARDOWN_Q,
359 CHANNEL_TLV_CLOSE, 392 CHANNEL_TLV_CLOSE,
360 CHANNEL_TLV_RELEASE, 393 CHANNEL_TLV_RELEASE,
394 CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
361 CHANNEL_TLV_PF_RELEASE_VF, 395 CHANNEL_TLV_PF_RELEASE_VF,
362 CHANNEL_TLV_LIST_END, 396 CHANNEL_TLV_LIST_END,
363 CHANNEL_TLV_FLR, 397 CHANNEL_TLV_FLR,
364 CHANNEL_TLV_PF_SET_MAC, 398 CHANNEL_TLV_PF_SET_MAC,
365 CHANNEL_TLV_PF_SET_VLAN, 399 CHANNEL_TLV_PF_SET_VLAN,
400 CHANNEL_TLV_UPDATE_RSS,
366 CHANNEL_TLV_MAX 401 CHANNEL_TLV_MAX
367}; 402};
368 403