aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMichal Kalderon <michals@broadcom.com>2014-02-12 11:19:53 -0500
committerDavid S. Miller <davem@davemloft.net>2014-02-12 19:15:41 -0500
commit14a94ebd48c12f1aee7495c1a1518c33efd3647c (patch)
treea4d58048d32c0f771b4ead463adf24e55144653c /drivers/net
parentba72f32cb8ad1963ba55b30860971f4e2b0ec5e8 (diff)
bnx2x: Add support in PF driver for RSC
This provides PF-side support for VFs assigned to a VM running windows 2012 with the RSC feature enabled. Signed-off-by: Michal Kalderon <michals@broadcom.com> Signed-off-by: Yuval Mintz <yuvalmin@broadcom.com> Signed-off-by: Ariel Elior <ariele@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c145
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c82
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c75
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h22
8 files changed, 329 insertions, 48 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ae91e8f43622..c871d19ab6e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1270,6 +1270,7 @@ struct bnx2x_slowpath {
1270 union { 1270 union {
1271 struct client_init_ramrod_data init_data; 1271 struct client_init_ramrod_data init_data;
1272 struct client_update_ramrod_data update_data; 1272 struct client_update_ramrod_data update_data;
1273 struct tpa_update_ramrod_data tpa_data;
1273 } q_rdata; 1274 } q_rdata;
1274 1275
1275 union { 1276 union {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 38f04018110b..56a7d3f2128a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1814,6 +1814,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1814 drv_cmd = BNX2X_Q_CMD_EMPTY; 1814 drv_cmd = BNX2X_Q_CMD_EMPTY;
1815 break; 1815 break;
1816 1816
1817 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1818 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1819 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1820 break;
1821
1817 default: 1822 default:
1818 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1823 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1819 command, fp->index); 1824 command, fp->index);
@@ -3644,10 +3649,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3644 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3649 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3645 HW_CID(bp, cid)); 3650 HW_CID(bp, cid));
3646 3651
3647 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 3652 /* In some cases, type may already contain the func-id
3648 3653 * mainly in SRIOV related use cases, so we add it here only
3649 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3654 * if it's not already set.
3650 SPE_HDR_FUNCTION_ID); 3655 */
3656 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3657 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3658 SPE_HDR_CONN_TYPE;
3659 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3660 SPE_HDR_FUNCTION_ID);
3661 } else {
3662 type = cmd_type;
3663 }
3651 3664
3652 spe->hdr.type = cpu_to_le16(type); 3665 spe->hdr.type = cpu_to_le16(type);
3653 3666
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0fb6ff2ac8e3..270ba195a56b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2277 data->header.rule_cnt, p->rx_accept_flags, 2277 data->header.rule_cnt, p->rx_accept_flags,
2278 p->tx_accept_flags); 2278 p->tx_accept_flags);
2279 2279
2280 /* No need for an explicit memory barrier here as long we would 2280 /* No need for an explicit memory barrier here as long as we
2281 * need to ensure the ordering of writing to the SPQ element 2281 * ensure the ordering of writing to the SPQ element
2282 * and updating of the SPQ producer which involves a memory 2282 * and updating of the SPQ producer which involves a memory
2283 * read and we will have to put a full memory barrier there 2283 * read. If the memory read is removed we will have to put a
2284 * (inside bnx2x_sp_post()). 2284 * full memory barrier there (inside bnx2x_sp_post()).
2285 */ 2285 */
2286 2286
2287 /* Send a ramrod */ 2287 /* Send a ramrod */
@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2982 raw->clear_pending(raw); 2982 raw->clear_pending(raw);
2983 return 0; 2983 return 0;
2984 } else { 2984 } else {
2985 /* No need for an explicit memory barrier here as long we would 2985 /* No need for an explicit memory barrier here as long as we
2986 * need to ensure the ordering of writing to the SPQ element 2986 * ensure the ordering of writing to the SPQ element
2987 * and updating of the SPQ producer which involves a memory 2987 * and updating of the SPQ producer which involves a memory
2988 * read and we will have to put a full memory barrier there 2988 * read. If the memory read is removed we will have to put a
2989 * (inside bnx2x_sp_post()). 2989 * full memory barrier there (inside bnx2x_sp_post()).
2990 */ 2990 */
2991 2991
2992 /* Send a ramrod */ 2992 /* Send a ramrod */
@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3466 raw->clear_pending(raw); 3466 raw->clear_pending(raw);
3467 return 0; 3467 return 0;
3468 } else { 3468 } else {
3469 /* No need for an explicit memory barrier here as long we would 3469 /* No need for an explicit memory barrier here as long as we
3470 * need to ensure the ordering of writing to the SPQ element 3470 * ensure the ordering of writing to the SPQ element
3471 * and updating of the SPQ producer which involves a memory 3471 * and updating of the SPQ producer which involves a memory
3472 * read and we will have to put a full memory barrier there 3472 * read. If the memory read is removed we will have to put a
3473 * (inside bnx2x_sp_post()). 3473 * full memory barrier there (inside bnx2x_sp_post()).
3474 */ 3474 */
3475 3475
3476 /* Send a ramrod */ 3476 /* Send a ramrod */
@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4091 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4091 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4092 } 4092 }
4093 4093
4094 /* No need for an explicit memory barrier here as long we would 4094 /* No need for an explicit memory barrier here as long as we
4095 * need to ensure the ordering of writing to the SPQ element 4095 * ensure the ordering of writing to the SPQ element
4096 * and updating of the SPQ producer which involves a memory 4096 * and updating of the SPQ producer which involves a memory
4097 * read and we will have to put a full memory barrier there 4097 * read. If the memory read is removed we will have to put a
4098 * (inside bnx2x_sp_post()). 4098 * full memory barrier there (inside bnx2x_sp_post()).
4099 */ 4099 */
4100 4100
4101 /* Send a ramrod */ 4101 /* Send a ramrod */
@@ -4587,13 +4587,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4587 /* Fill the ramrod data */ 4587 /* Fill the ramrod data */
4588 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4588 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4589 4589
4590 /* No need for an explicit memory barrier here as long we would 4590 /* No need for an explicit memory barrier here as long as we
4591 * need to ensure the ordering of writing to the SPQ element 4591 * ensure the ordering of writing to the SPQ element
4592 * and updating of the SPQ producer which involves a memory 4592 * and updating of the SPQ producer which involves a memory
4593 * read and we will have to put a full memory barrier there 4593 * read. If the memory read is removed we will have to put a
4594 * (inside bnx2x_sp_post()). 4594 * full memory barrier there (inside bnx2x_sp_post()).
4595 */ 4595 */
4596
4597 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4596 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4598 U64_HI(data_mapping), 4597 U64_HI(data_mapping),
4599 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4598 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4615,13 +4614,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4615 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4614 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4616 bnx2x_q_fill_setup_data_e2(bp, params, rdata); 4615 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4617 4616
4618 /* No need for an explicit memory barrier here as long we would 4617 /* No need for an explicit memory barrier here as long as we
4619 * need to ensure the ordering of writing to the SPQ element 4618 * ensure the ordering of writing to the SPQ element
4620 * and updating of the SPQ producer which involves a memory 4619 * and updating of the SPQ producer which involves a memory
4621 * read and we will have to put a full memory barrier there 4620 * read. If the memory read is removed we will have to put a
4622 * (inside bnx2x_sp_post()). 4621 * full memory barrier there (inside bnx2x_sp_post()).
4623 */ 4622 */
4624
4625 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4623 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4626 U64_HI(data_mapping), 4624 U64_HI(data_mapping),
4627 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4625 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4659,13 +4657,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4659 o->cids[cid_index], rdata->general.client_id, 4657 o->cids[cid_index], rdata->general.client_id,
4660 rdata->general.sp_client_id, rdata->general.cos); 4658 rdata->general.sp_client_id, rdata->general.cos);
4661 4659
4662 /* No need for an explicit memory barrier here as long we would 4660 /* No need for an explicit memory barrier here as long as we
4663 * need to ensure the ordering of writing to the SPQ element 4661 * ensure the ordering of writing to the SPQ element
4664 * and updating of the SPQ producer which involves a memory 4662 * and updating of the SPQ producer which involves a memory
4665 * read and we will have to put a full memory barrier there 4663 * read. If the memory read is removed we will have to put a
4666 * (inside bnx2x_sp_post()). 4664 * full memory barrier there (inside bnx2x_sp_post()).
4667 */ 4665 */
4668
4669 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], 4666 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4670 U64_HI(data_mapping), 4667 U64_HI(data_mapping),
4671 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4668 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4760,13 +4757,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
4760 /* Fill the ramrod data */ 4757 /* Fill the ramrod data */
4761 bnx2x_q_fill_update_data(bp, o, update_params, rdata); 4758 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4762 4759
4763 /* No need for an explicit memory barrier here as long we would 4760 /* No need for an explicit memory barrier here as long as we
4764 * need to ensure the ordering of writing to the SPQ element 4761 * ensure the ordering of writing to the SPQ element
4765 * and updating of the SPQ producer which involves a memory 4762 * and updating of the SPQ producer which involves a memory
4766 * read and we will have to put a full memory barrier there 4763 * read. If the memory read is removed we will have to put a
4767 * (inside bnx2x_sp_post()). 4764 * full memory barrier there (inside bnx2x_sp_post()).
4768 */ 4765 */
4769
4770 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 4766 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4771 o->cids[cid_index], U64_HI(data_mapping), 4767 o->cids[cid_index], U64_HI(data_mapping),
4772 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4768 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4813,11 +4809,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4813 return bnx2x_q_send_update(bp, params); 4809 return bnx2x_q_send_update(bp, params);
4814} 4810}
4815 4811
4812static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
4813 struct bnx2x_queue_sp_obj *obj,
4814 struct bnx2x_queue_update_tpa_params *params,
4815 struct tpa_update_ramrod_data *data)
4816{
4817 data->client_id = obj->cl_id;
4818 data->complete_on_both_clients = params->complete_on_both_clients;
4819 data->dont_verify_rings_pause_thr_flg =
4820 params->dont_verify_thr;
4821 data->max_agg_size = cpu_to_le16(params->max_agg_sz);
4822 data->max_sges_for_packet = params->max_sges_pkt;
4823 data->max_tpa_queues = params->max_tpa_queues;
4824 data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
4825 data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
4826 data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
4827 data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
4828 data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
4829 data->tpa_mode = params->tpa_mode;
4830 data->update_ipv4 = params->update_ipv4;
4831 data->update_ipv6 = params->update_ipv6;
4832}
4833
4816static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, 4834static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4817 struct bnx2x_queue_state_params *params) 4835 struct bnx2x_queue_state_params *params)
4818{ 4836{
4819 /* TODO: Not implemented yet. */ 4837 struct bnx2x_queue_sp_obj *o = params->q_obj;
4820 return -1; 4838 struct tpa_update_ramrod_data *rdata =
4839 (struct tpa_update_ramrod_data *)o->rdata;
4840 dma_addr_t data_mapping = o->rdata_mapping;
4841 struct bnx2x_queue_update_tpa_params *update_tpa_params =
4842 &params->params.update_tpa;
4843 u16 type;
4844
4845 /* Clear the ramrod data */
4846 memset(rdata, 0, sizeof(*rdata));
4847
4848 /* Fill the ramrod data */
4849 bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
4850
4851 /* Add the function id inside the type, so that sp post function
4852 * doesn't automatically add the PF func-id, this is required
4853 * for operations done by PFs on behalf of their VFs
4854 */
4855 type = ETH_CONNECTION_TYPE |
4856 ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
4857
4858 /* No need for an explicit memory barrier here as long as we
4859 * ensure the ordering of writing to the SPQ element
4860 * and updating of the SPQ producer which involves a memory
4861 * read. If the memory read is removed we will have to put a
4862 * full memory barrier there (inside bnx2x_sp_post()).
4863 */
4864 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
4865 o->cids[BNX2X_PRIMARY_CID_INDEX],
4866 U64_HI(data_mapping),
4867 U64_LO(data_mapping), type);
4821} 4868}
4822 4869
4823static inline int bnx2x_q_send_halt(struct bnx2x *bp, 4870static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5647,6 +5694,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5647 rdata->tx_switch_suspend = switch_update_params->suspend; 5694 rdata->tx_switch_suspend = switch_update_params->suspend;
5648 rdata->echo = SWITCH_UPDATE; 5695 rdata->echo = SWITCH_UPDATE;
5649 5696
5697 /* No need for an explicit memory barrier here as long as we
5698 * ensure the ordering of writing to the SPQ element
5699 * and updating of the SPQ producer which involves a memory
5700 * read. If the memory read is removed we will have to put a
5701 * full memory barrier there (inside bnx2x_sp_post()).
5702 */
5650 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 5703 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5651 U64_HI(data_mapping), 5704 U64_HI(data_mapping),
5652 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5705 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5674,11 +5727,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5674 rdata->allowed_priorities = afex_update_params->allowed_priorities; 5727 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5675 rdata->echo = AFEX_UPDATE; 5728 rdata->echo = AFEX_UPDATE;
5676 5729
5677 /* No need for an explicit memory barrier here as long we would 5730 /* No need for an explicit memory barrier here as long as we
5678 * need to ensure the ordering of writing to the SPQ element 5731 * ensure the ordering of writing to the SPQ element
5679 * and updating of the SPQ producer which involves a memory 5732 * and updating of the SPQ producer which involves a memory
5680 * read and we will have to put a full memory barrier there 5733 * read. If the memory read is removed we will have to put a
5681 * (inside bnx2x_sp_post()). 5734 * full memory barrier there (inside bnx2x_sp_post()).
5682 */ 5735 */
5683 DP(BNX2X_MSG_SP, 5736 DP(BNX2X_MSG_SP,
5684 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", 5737 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5763,6 +5816,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5763 rdata->traffic_type_to_priority_cos[i] = 5816 rdata->traffic_type_to_priority_cos[i] =
5764 tx_start_params->traffic_type_to_priority_cos[i]; 5817 tx_start_params->traffic_type_to_priority_cos[i];
5765 5818
5819 /* No need for an explicit memory barrier here as long as we
5820 * ensure the ordering of writing to the SPQ element
5821 * and updating of the SPQ producer which involves a memory
5822 * read. If the memory read is removed we will have to put a
5823 * full memory barrier there (inside bnx2x_sp_post()).
5824 */
5766 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, 5825 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5767 U64_HI(data_mapping), 5826 U64_HI(data_mapping),
5768 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5827 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 00d7f214a40a..f7af21fc8ecc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
893 u8 cid_index; 893 u8 cid_index;
894}; 894};
895 895
896struct bnx2x_queue_update_tpa_params {
897 dma_addr_t sge_map;
898 u8 update_ipv4;
899 u8 update_ipv6;
900 u8 max_tpa_queues;
901 u8 max_sges_pkt;
902 u8 complete_on_both_clients;
903 u8 dont_verify_thr;
904 u8 tpa_mode;
905 u8 _pad;
906
907 u16 sge_buff_sz;
908 u16 max_agg_sz;
909
910 u16 sge_pause_thr_low;
911 u16 sge_pause_thr_high;
912};
913
896struct rxq_pause_params { 914struct rxq_pause_params {
897 u16 bd_th_lo; 915 u16 bd_th_lo;
898 u16 bd_th_hi; 916 u16 bd_th_hi;
@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
987 /* Params according to the current command */ 1005 /* Params according to the current command */
988 union { 1006 union {
989 struct bnx2x_queue_update_params update; 1007 struct bnx2x_queue_update_params update;
1008 struct bnx2x_queue_update_tpa_params update_tpa;
990 struct bnx2x_queue_setup_params setup; 1009 struct bnx2x_queue_setup_params setup;
991 struct bnx2x_queue_init_params init; 1010 struct bnx2x_queue_init_params init;
992 struct bnx2x_queue_setup_tx_only_params tx_only; 1011 struct bnx2x_queue_setup_tx_only_params tx_only;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5c4980c66417..a4a3d7e04df9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -176,6 +176,11 @@ enum bnx2x_vfop_rss_state {
176 BNX2X_VFOP_RSS_DONE 176 BNX2X_VFOP_RSS_DONE
177}; 177};
178 178
179enum bnx2x_vfop_tpa_state {
180 BNX2X_VFOP_TPA_CONFIG,
181 BNX2X_VFOP_TPA_DONE
182};
183
179#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 184#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
180 185
181void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 186void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -3047,6 +3052,83 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
3047 return -ENOMEM; 3052 return -ENOMEM;
3048} 3053}
3049 3054
3055/* VFOP tpa update, send update on all queues */
3056static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
3057{
3058 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
3059 struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
3060 enum bnx2x_vfop_tpa_state state = vfop->state;
3061
3062 bnx2x_vfop_reset_wq(vf);
3063
3064 if (vfop->rc < 0)
3065 goto op_err;
3066
3067 DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
3068 vf->abs_vfid, tpa_args->qid,
3069 state);
3070
3071 switch (state) {
3072 case BNX2X_VFOP_TPA_CONFIG:
3073
3074 if (tpa_args->qid < vf_rxq_count(vf)) {
3075 struct bnx2x_queue_state_params *qstate =
3076 &vf->op_params.qstate;
3077
3078 qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
3079
3080 /* The only thing that changes for the ramrod params
3081 * between calls is the sge_map
3082 */
3083 qstate->params.update_tpa.sge_map =
3084 tpa_args->sge_map[tpa_args->qid];
3085
3086 DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
3087 tpa_args->qid,
3088 U64_HI(qstate->params.update_tpa.sge_map),
3089 U64_LO(qstate->params.update_tpa.sge_map));
3090 qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
3091 vfop->rc = bnx2x_queue_state_change(bp, qstate);
3092
3093 tpa_args->qid++;
3094 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
3095 }
3096 vfop->state = BNX2X_VFOP_TPA_DONE;
3097 vfop->rc = 0;
3098 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
3099op_err:
3100 BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
3101op_done:
3102 case BNX2X_VFOP_TPA_DONE:
3103 bnx2x_vfop_end(bp, vf, vfop);
3104 return;
3105 default:
3106 bnx2x_vfop_default(state);
3107 }
3108op_pending:
3109 return;
3110}
3111
3112int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
3113 struct bnx2x_virtf *vf,
3114 struct bnx2x_vfop_cmd *cmd,
3115 struct vfpf_tpa_tlv *tpa_tlv)
3116{
3117 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3118
3119 if (vfop) {
3120 vfop->args.qx.qid = 0; /* loop */
3121 memcpy(&vfop->args.tpa.sge_map,
3122 tpa_tlv->tpa_client_info.sge_addr,
3123 sizeof(vfop->args.tpa.sge_map));
3124 bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
3125 bnx2x_vfop_tpa, cmd->done);
3126 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
3127 cmd->block);
3128 }
3129 return -ENOMEM;
3130}
3131
3050/* VF release ~ VF close + VF release-resources 3132/* VF release ~ VF close + VF release-resources
3051 * Release is the ultimate SW shutdown and is called whenever an 3133 * Release is the ultimate SW shutdown and is called whenever an
3052 * irrecoverable error is encountered. 3134 * irrecoverable error is encountered.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d9fcca1b5a9d..9b60e80c89fe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -100,6 +100,7 @@ union bnx2x_vfop_params {
100 struct bnx2x_mcast_ramrod_params mcast; 100 struct bnx2x_mcast_ramrod_params mcast;
101 struct bnx2x_config_rss_params rss; 101 struct bnx2x_config_rss_params rss;
102 struct bnx2x_vfop_qctor_params qctor; 102 struct bnx2x_vfop_qctor_params qctor;
103 struct bnx2x_queue_state_params qstate;
103}; 104};
104 105
105/* forward */ 106/* forward */
@@ -166,6 +167,11 @@ struct bnx2x_vfop_args_filters {
166 atomic_t *credit; /* non NULL means 'don't consume credit' */ 167 atomic_t *credit; /* non NULL means 'don't consume credit' */
167}; 168};
168 169
170struct bnx2x_vfop_args_tpa {
171 int qid;
172 dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
173};
174
169union bnx2x_vfop_args { 175union bnx2x_vfop_args {
170 struct bnx2x_vfop_args_mcast mc_list; 176 struct bnx2x_vfop_args_mcast mc_list;
171 struct bnx2x_vfop_args_qctor qctor; 177 struct bnx2x_vfop_args_qctor qctor;
@@ -173,6 +179,7 @@ union bnx2x_vfop_args {
173 struct bnx2x_vfop_args_defvlan defvlan; 179 struct bnx2x_vfop_args_defvlan defvlan;
174 struct bnx2x_vfop_args_qx qx; 180 struct bnx2x_vfop_args_qx qx;
175 struct bnx2x_vfop_args_filters filters; 181 struct bnx2x_vfop_args_filters filters;
182 struct bnx2x_vfop_args_tpa tpa;
176}; 183};
177 184
178struct bnx2x_vfop { 185struct bnx2x_vfop {
@@ -704,6 +711,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
704 struct bnx2x_virtf *vf, 711 struct bnx2x_virtf *vf,
705 struct bnx2x_vfop_cmd *cmd); 712 struct bnx2x_vfop_cmd *cmd);
706 713
714int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
715 struct bnx2x_virtf *vf,
716 struct bnx2x_vfop_cmd *cmd,
717 struct vfpf_tpa_tlv *tpa_tlv);
718
707/* VF release ~ VF close + VF release-resources 719/* VF release ~ VF close + VF release-resources
708 * 720 *
709 * Release is the ultimate SW shutdown and is called whenever an 721 * Release is the ultimate SW shutdown and is called whenever an
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index ebad48a330e7..dfaed288becd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1159,7 +1159,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1159 resp->pfdev_info.db_size = bp->db_size; 1159 resp->pfdev_info.db_size = bp->db_size;
1160 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; 1160 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
1161 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | 1161 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
1162 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); 1162 PFVF_CAP_TPA |
1163 PFVF_CAP_TPA_UPDATE);
1163 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, 1164 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
1164 sizeof(resp->pfdev_info.fw_ver)); 1165 sizeof(resp->pfdev_info.fw_ver));
1165 1166
@@ -1910,6 +1911,75 @@ mbx_resp:
1910 bnx2x_vf_mbx_resp(bp, vf); 1911 bnx2x_vf_mbx_resp(bp, vf);
1911} 1912}
1912 1913
1914static int bnx2x_validate_tpa_params(struct bnx2x *bp,
1915 struct vfpf_tpa_tlv *tpa_tlv)
1916{
1917 int rc = 0;
1918
1919 if (tpa_tlv->tpa_client_info.max_sges_for_packet >
1920 U_ETH_MAX_SGES_FOR_PACKET) {
1921 rc = -EINVAL;
1922 BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
1923 tpa_tlv->tpa_client_info.max_sges_for_packet,
1924 U_ETH_MAX_SGES_FOR_PACKET);
1925 }
1926
1927 if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
1928 rc = -EINVAL;
1929 BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
1930 tpa_tlv->tpa_client_info.max_tpa_queues,
1931 MAX_AGG_QS(bp));
1932 }
1933
1934 return rc;
1935}
1936
1937static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
1938 struct bnx2x_vf_mbx *mbx)
1939{
1940 struct bnx2x_vfop_cmd cmd = {
1941 .done = bnx2x_vf_mbx_resp,
1942 .block = false,
1943 };
1944 struct bnx2x_queue_update_tpa_params *vf_op_params =
1945 &vf->op_params.qstate.params.update_tpa;
1946 struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
1947
1948 memset(vf_op_params, 0, sizeof(*vf_op_params));
1949
1950 if (bnx2x_validate_tpa_params(bp, tpa_tlv))
1951 goto mbx_resp;
1952
1953 vf_op_params->complete_on_both_clients =
1954 tpa_tlv->tpa_client_info.complete_on_both_clients;
1955 vf_op_params->dont_verify_thr =
1956 tpa_tlv->tpa_client_info.dont_verify_thr;
1957 vf_op_params->max_agg_sz =
1958 tpa_tlv->tpa_client_info.max_agg_size;
1959 vf_op_params->max_sges_pkt =
1960 tpa_tlv->tpa_client_info.max_sges_for_packet;
1961 vf_op_params->max_tpa_queues =
1962 tpa_tlv->tpa_client_info.max_tpa_queues;
1963 vf_op_params->sge_buff_sz =
1964 tpa_tlv->tpa_client_info.sge_buff_size;
1965 vf_op_params->sge_pause_thr_high =
1966 tpa_tlv->tpa_client_info.sge_pause_thr_high;
1967 vf_op_params->sge_pause_thr_low =
1968 tpa_tlv->tpa_client_info.sge_pause_thr_low;
1969 vf_op_params->tpa_mode =
1970 tpa_tlv->tpa_client_info.tpa_mode;
1971 vf_op_params->update_ipv4 =
1972 tpa_tlv->tpa_client_info.update_ipv4;
1973 vf_op_params->update_ipv6 =
1974 tpa_tlv->tpa_client_info.update_ipv6;
1975
1976 vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
1977
1978mbx_resp:
1979 if (vf->op_rc)
1980 bnx2x_vf_mbx_resp(bp, vf);
1981}
1982
1913/* dispatch request */ 1983/* dispatch request */
1914static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, 1984static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1915 struct bnx2x_vf_mbx *mbx) 1985 struct bnx2x_vf_mbx *mbx)
@@ -1949,6 +2019,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1949 case CHANNEL_TLV_UPDATE_RSS: 2019 case CHANNEL_TLV_UPDATE_RSS:
1950 bnx2x_vf_mbx_update_rss(bp, vf, mbx); 2020 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1951 return; 2021 return;
2022 case CHANNEL_TLV_UPDATE_TPA:
2023 bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
2024 return;
1952 } 2025 }
1953 2026
1954 } else { 2027 } else {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 208568bc7a71..c922b81170e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
162#define PFVF_CAP_RSS 0x00000001 162#define PFVF_CAP_RSS 0x00000001
163#define PFVF_CAP_DHC 0x00000002 163#define PFVF_CAP_DHC 0x00000002
164#define PFVF_CAP_TPA 0x00000004 164#define PFVF_CAP_TPA 0x00000004
165#define PFVF_CAP_TPA_UPDATE 0x00000008
165 char fw_ver[32]; 166 char fw_ver[32];
166 u16 db_size; 167 u16 db_size;
167 u8 indices_per_sb; 168 u8 indices_per_sb;
@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
303 u32 rx_mask; /* see mask constants at the top of the file */ 304 u32 rx_mask; /* see mask constants at the top of the file */
304}; 305};
305 306
307struct vfpf_tpa_tlv {
308 struct vfpf_first_tlv first_tlv;
309
310 struct vf_pf_tpa_client_info {
311 aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
312 u8 update_ipv4;
313 u8 update_ipv6;
314 u8 max_tpa_queues;
315 u8 max_sges_for_packet;
316 u8 complete_on_both_clients;
317 u8 dont_verify_thr;
318 u8 tpa_mode;
319 u16 sge_buff_size;
320 u16 max_agg_size;
321 u16 sge_pause_thr_low;
322 u16 sge_pause_thr_high;
323 } tpa_client_info;
324};
325
306/* close VF (disable VF) */ 326/* close VF (disable VF) */
307struct vfpf_close_tlv { 327struct vfpf_close_tlv {
308 struct vfpf_first_tlv first_tlv; 328 struct vfpf_first_tlv first_tlv;
@@ -331,6 +351,7 @@ union vfpf_tlvs {
331 struct vfpf_set_q_filters_tlv set_q_filters; 351 struct vfpf_set_q_filters_tlv set_q_filters;
332 struct vfpf_release_tlv release; 352 struct vfpf_release_tlv release;
333 struct vfpf_rss_tlv update_rss; 353 struct vfpf_rss_tlv update_rss;
354 struct vfpf_tpa_tlv update_tpa;
334 struct channel_list_end_tlv list_end; 355 struct channel_list_end_tlv list_end;
335 struct tlv_buffer_size tlv_buf_size; 356 struct tlv_buffer_size tlv_buf_size;
336}; 357};
@@ -405,6 +426,7 @@ enum channel_tlvs {
405 CHANNEL_TLV_PF_SET_VLAN, 426 CHANNEL_TLV_PF_SET_VLAN,
406 CHANNEL_TLV_UPDATE_RSS, 427 CHANNEL_TLV_UPDATE_RSS,
407 CHANNEL_TLV_PHYS_PORT_ID, 428 CHANNEL_TLV_PHYS_PORT_ID,
429 CHANNEL_TLV_UPDATE_TPA,
408 CHANNEL_TLV_MAX 430 CHANNEL_TLV_MAX
409}; 431};
410 432