diff options
author | David S. Miller <davem@davemloft.net> | 2018-12-20 11:26:16 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-12-20 11:26:16 -0500 |
commit | 44a7b3b6e3a458f9549c2cc28e74ecdc470e42f1 (patch) | |
tree | f4f4cf87803b23e633daa18265d73693d8d44bf8 | |
parent | ac68a3d3c3eba61c693d63a89223e1df8fe1f0c6 (diff) | |
parent | 0c2ff8d796895448b3a23c9cf7f37e721daeea48 (diff) |
Merge branch 'bnxt_en-next'
Michael Chan says:
====================
bnxt_en: Update for net-next.
Three main changes in this series, besides the usual firmware spec
update:
1. Add support for a new firmware communication channel direct to the
firmware processor that handles flow offloads. This speeds up
flow offload operations.
2. Use 64-bit internal flow handles to increase the number of flows
that can be offloaded.
3. Add level-2 context memory paging so that we can configure more
context memory for RDMA on the 57500 chips. Allocate more context
memory if RDMA is enabled on the 57500 chips.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 311 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.h | 102 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 614 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 108 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h | 5 |
5 files changed, 936 insertions, 204 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 218a6dff3efc..3aa80da973d7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -1812,7 +1812,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) | |||
1812 | case CMPL_BASE_TYPE_HWRM_DONE: | 1812 | case CMPL_BASE_TYPE_HWRM_DONE: |
1813 | seq_id = le16_to_cpu(h_cmpl->sequence_id); | 1813 | seq_id = le16_to_cpu(h_cmpl->sequence_id); |
1814 | if (seq_id == bp->hwrm_intr_seq_id) | 1814 | if (seq_id == bp->hwrm_intr_seq_id) |
1815 | bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; | 1815 | bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; |
1816 | else | 1816 | else |
1817 | netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); | 1817 | netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); |
1818 | break; | 1818 | break; |
@@ -2375,7 +2375,11 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) | |||
2375 | rmem->pg_arr[i] = NULL; | 2375 | rmem->pg_arr[i] = NULL; |
2376 | } | 2376 | } |
2377 | if (rmem->pg_tbl) { | 2377 | if (rmem->pg_tbl) { |
2378 | dma_free_coherent(&pdev->dev, rmem->nr_pages * 8, | 2378 | size_t pg_tbl_size = rmem->nr_pages * 8; |
2379 | |||
2380 | if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) | ||
2381 | pg_tbl_size = rmem->page_size; | ||
2382 | dma_free_coherent(&pdev->dev, pg_tbl_size, | ||
2379 | rmem->pg_tbl, rmem->pg_tbl_map); | 2383 | rmem->pg_tbl, rmem->pg_tbl_map); |
2380 | rmem->pg_tbl = NULL; | 2384 | rmem->pg_tbl = NULL; |
2381 | } | 2385 | } |
@@ -2393,9 +2397,12 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) | |||
2393 | 2397 | ||
2394 | if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) | 2398 | if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) |
2395 | valid_bit = PTU_PTE_VALID; | 2399 | valid_bit = PTU_PTE_VALID; |
2396 | if (rmem->nr_pages > 1) { | 2400 | if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { |
2397 | rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, | 2401 | size_t pg_tbl_size = rmem->nr_pages * 8; |
2398 | rmem->nr_pages * 8, | 2402 | |
2403 | if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) | ||
2404 | pg_tbl_size = rmem->page_size; | ||
2405 | rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, | ||
2399 | &rmem->pg_tbl_map, | 2406 | &rmem->pg_tbl_map, |
2400 | GFP_KERNEL); | 2407 | GFP_KERNEL); |
2401 | if (!rmem->pg_tbl) | 2408 | if (!rmem->pg_tbl) |
@@ -2412,7 +2419,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) | |||
2412 | if (!rmem->pg_arr[i]) | 2419 | if (!rmem->pg_arr[i]) |
2413 | return -ENOMEM; | 2420 | return -ENOMEM; |
2414 | 2421 | ||
2415 | if (rmem->nr_pages > 1) { | 2422 | if (rmem->nr_pages > 1 || rmem->depth > 0) { |
2416 | if (i == rmem->nr_pages - 2 && | 2423 | if (i == rmem->nr_pages - 2 && |
2417 | (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) | 2424 | (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) |
2418 | extra_bits |= PTU_PTE_NEXT_TO_LAST; | 2425 | extra_bits |= PTU_PTE_NEXT_TO_LAST; |
@@ -3279,6 +3286,27 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp) | |||
3279 | bp->hwrm_cmd_resp_dma_addr); | 3286 | bp->hwrm_cmd_resp_dma_addr); |
3280 | bp->hwrm_cmd_resp_addr = NULL; | 3287 | bp->hwrm_cmd_resp_addr = NULL; |
3281 | } | 3288 | } |
3289 | |||
3290 | if (bp->hwrm_cmd_kong_resp_addr) { | ||
3291 | dma_free_coherent(&pdev->dev, PAGE_SIZE, | ||
3292 | bp->hwrm_cmd_kong_resp_addr, | ||
3293 | bp->hwrm_cmd_kong_resp_dma_addr); | ||
3294 | bp->hwrm_cmd_kong_resp_addr = NULL; | ||
3295 | } | ||
3296 | } | ||
3297 | |||
3298 | static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) | ||
3299 | { | ||
3300 | struct pci_dev *pdev = bp->pdev; | ||
3301 | |||
3302 | bp->hwrm_cmd_kong_resp_addr = | ||
3303 | dma_alloc_coherent(&pdev->dev, PAGE_SIZE, | ||
3304 | &bp->hwrm_cmd_kong_resp_dma_addr, | ||
3305 | GFP_KERNEL); | ||
3306 | if (!bp->hwrm_cmd_kong_resp_addr) | ||
3307 | return -ENOMEM; | ||
3308 | |||
3309 | return 0; | ||
3282 | } | 3310 | } |
3283 | 3311 | ||
3284 | static int bnxt_alloc_hwrm_resources(struct bnxt *bp) | 3312 | static int bnxt_alloc_hwrm_resources(struct bnxt *bp) |
@@ -3740,7 +3768,10 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, | |||
3740 | req->req_type = cpu_to_le16(req_type); | 3768 | req->req_type = cpu_to_le16(req_type); |
3741 | req->cmpl_ring = cpu_to_le16(cmpl_ring); | 3769 | req->cmpl_ring = cpu_to_le16(cmpl_ring); |
3742 | req->target_id = cpu_to_le16(target_id); | 3770 | req->target_id = cpu_to_le16(target_id); |
3743 | req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); | 3771 | if (bnxt_kong_hwrm_message(bp, req)) |
3772 | req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); | ||
3773 | else | ||
3774 | req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); | ||
3744 | } | 3775 | } |
3745 | 3776 | ||
3746 | static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, | 3777 | static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, |
@@ -3755,11 +3786,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, | |||
3755 | struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; | 3786 | struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; |
3756 | u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; | 3787 | u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; |
3757 | struct hwrm_short_input short_input = {0}; | 3788 | struct hwrm_short_input short_input = {0}; |
3758 | 3789 | u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; | |
3759 | req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); | 3790 | u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; |
3760 | memset(resp, 0, PAGE_SIZE); | 3791 | u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; |
3761 | cp_ring_id = le16_to_cpu(req->cmpl_ring); | 3792 | u16 dst = BNXT_HWRM_CHNL_CHIMP; |
3762 | intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; | ||
3763 | 3793 | ||
3764 | if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { | 3794 | if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { |
3765 | if (msg_len > bp->hwrm_max_ext_req_len || | 3795 | if (msg_len > bp->hwrm_max_ext_req_len || |
@@ -3767,6 +3797,23 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, | |||
3767 | return -EINVAL; | 3797 | return -EINVAL; |
3768 | } | 3798 | } |
3769 | 3799 | ||
3800 | if (bnxt_hwrm_kong_chnl(bp, req)) { | ||
3801 | dst = BNXT_HWRM_CHNL_KONG; | ||
3802 | bar_offset = BNXT_GRCPF_REG_KONG_COMM; | ||
3803 | doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; | ||
3804 | resp = bp->hwrm_cmd_kong_resp_addr; | ||
3805 | resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; | ||
3806 | } | ||
3807 | |||
3808 | memset(resp, 0, PAGE_SIZE); | ||
3809 | cp_ring_id = le16_to_cpu(req->cmpl_ring); | ||
3810 | intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; | ||
3811 | |||
3812 | req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); | ||
3813 | /* currently supports only one outstanding message */ | ||
3814 | if (intr_process) | ||
3815 | bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); | ||
3816 | |||
3770 | if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || | 3817 | if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || |
3771 | msg_len > BNXT_HWRM_MAX_REQ_LEN) { | 3818 | msg_len > BNXT_HWRM_MAX_REQ_LEN) { |
3772 | void *short_cmd_req = bp->hwrm_short_cmd_req_addr; | 3819 | void *short_cmd_req = bp->hwrm_short_cmd_req_addr; |
@@ -3800,17 +3847,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, | |||
3800 | } | 3847 | } |
3801 | 3848 | ||
3802 | /* Write request msg to hwrm channel */ | 3849 | /* Write request msg to hwrm channel */ |
3803 | __iowrite32_copy(bp->bar0, data, msg_len / 4); | 3850 | __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); |
3804 | 3851 | ||
3805 | for (i = msg_len; i < max_req_len; i += 4) | 3852 | for (i = msg_len; i < max_req_len; i += 4) |
3806 | writel(0, bp->bar0 + i); | 3853 | writel(0, bp->bar0 + bar_offset + i); |
3807 | |||
3808 | /* currently supports only one outstanding message */ | ||
3809 | if (intr_process) | ||
3810 | bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); | ||
3811 | 3854 | ||
3812 | /* Ring channel doorbell */ | 3855 | /* Ring channel doorbell */ |
3813 | writel(1, bp->bar0 + 0x100); | 3856 | writel(1, bp->bar0 + doorbell_offset); |
3814 | 3857 | ||
3815 | if (!timeout) | 3858 | if (!timeout) |
3816 | timeout = DFLT_HWRM_CMD_TIMEOUT; | 3859 | timeout = DFLT_HWRM_CMD_TIMEOUT; |
@@ -3825,10 +3868,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, | |||
3825 | tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; | 3868 | tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; |
3826 | timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; | 3869 | timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; |
3827 | tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); | 3870 | tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); |
3828 | resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; | 3871 | resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); |
3872 | |||
3829 | if (intr_process) { | 3873 | if (intr_process) { |
3874 | u16 seq_id = bp->hwrm_intr_seq_id; | ||
3875 | |||
3830 | /* Wait until hwrm response cmpl interrupt is processed */ | 3876 | /* Wait until hwrm response cmpl interrupt is processed */ |
3831 | while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && | 3877 | while (bp->hwrm_intr_seq_id != (u16)~seq_id && |
3832 | i++ < tmo_count) { | 3878 | i++ < tmo_count) { |
3833 | /* on first few passes, just barely sleep */ | 3879 | /* on first few passes, just barely sleep */ |
3834 | if (i < HWRM_SHORT_TIMEOUT_COUNTER) | 3880 | if (i < HWRM_SHORT_TIMEOUT_COUNTER) |
@@ -3839,14 +3885,14 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, | |||
3839 | HWRM_MAX_TIMEOUT); | 3885 | HWRM_MAX_TIMEOUT); |
3840 | } | 3886 | } |
3841 | 3887 | ||
3842 | if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { | 3888 | if (bp->hwrm_intr_seq_id != (u16)~seq_id) { |
3843 | netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", | 3889 | netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", |
3844 | le16_to_cpu(req->req_type)); | 3890 | le16_to_cpu(req->req_type)); |
3845 | return -1; | 3891 | return -1; |
3846 | } | 3892 | } |
3847 | len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> | 3893 | len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> |
3848 | HWRM_RESP_LEN_SFT; | 3894 | HWRM_RESP_LEN_SFT; |
3849 | valid = bp->hwrm_cmd_resp_addr + len - 1; | 3895 | valid = resp_addr + len - 1; |
3850 | } else { | 3896 | } else { |
3851 | int j; | 3897 | int j; |
3852 | 3898 | ||
@@ -3874,7 +3920,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, | |||
3874 | } | 3920 | } |
3875 | 3921 | ||
3876 | /* Last byte of resp contains valid bit */ | 3922 | /* Last byte of resp contains valid bit */ |
3877 | valid = bp->hwrm_cmd_resp_addr + len - 1; | 3923 | valid = resp_addr + len - 1; |
3878 | for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { | 3924 | for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { |
3879 | /* make sure we read from updated DMA memory */ | 3925 | /* make sure we read from updated DMA memory */ |
3880 | dma_rmb(); | 3926 | dma_rmb(); |
@@ -4009,6 +4055,10 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) | |||
4009 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); | 4055 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); |
4010 | } | 4056 | } |
4011 | 4057 | ||
4058 | if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) | ||
4059 | req.flags |= cpu_to_le32( | ||
4060 | FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); | ||
4061 | |||
4012 | mutex_lock(&bp->hwrm_cmd_lock); | 4062 | mutex_lock(&bp->hwrm_cmd_lock); |
4013 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 4063 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
4014 | if (rc) | 4064 | if (rc) |
@@ -4137,12 +4187,11 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, | |||
4137 | static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, | 4187 | static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, |
4138 | struct bnxt_ntuple_filter *fltr) | 4188 | struct bnxt_ntuple_filter *fltr) |
4139 | { | 4189 | { |
4140 | int rc = 0; | 4190 | struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; |
4141 | struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; | 4191 | struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; |
4142 | struct hwrm_cfa_ntuple_filter_alloc_output *resp = | 4192 | struct hwrm_cfa_ntuple_filter_alloc_output *resp; |
4143 | bp->hwrm_cmd_resp_addr; | ||
4144 | struct flow_keys *keys = &fltr->fkeys; | 4193 | struct flow_keys *keys = &fltr->fkeys; |
4145 | struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; | 4194 | int rc = 0; |
4146 | 4195 | ||
4147 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); | 4196 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); |
4148 | req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; | 4197 | req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; |
@@ -4188,8 +4237,10 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, | |||
4188 | req.dst_id = cpu_to_le16(vnic->fw_vnic_id); | 4237 | req.dst_id = cpu_to_le16(vnic->fw_vnic_id); |
4189 | mutex_lock(&bp->hwrm_cmd_lock); | 4238 | mutex_lock(&bp->hwrm_cmd_lock); |
4190 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 4239 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
4191 | if (!rc) | 4240 | if (!rc) { |
4241 | resp = bnxt_get_hwrm_resp_addr(bp, &req); | ||
4192 | fltr->filter_id = resp->ntuple_filter_id; | 4242 | fltr->filter_id = resp->ntuple_filter_id; |
4243 | } | ||
4193 | mutex_unlock(&bp->hwrm_cmd_lock); | 4244 | mutex_unlock(&bp->hwrm_cmd_lock); |
4194 | return rc; | 4245 | return rc; |
4195 | } | 4246 | } |
@@ -6000,8 +6051,11 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, | |||
6000 | pg_size = 2 << 4; | 6051 | pg_size = 2 << 4; |
6001 | 6052 | ||
6002 | *pg_attr = pg_size; | 6053 | *pg_attr = pg_size; |
6003 | if (rmem->nr_pages > 1) { | 6054 | if (rmem->depth >= 1) { |
6004 | *pg_attr |= 1; | 6055 | if (rmem->depth == 2) |
6056 | *pg_attr |= 2; | ||
6057 | else | ||
6058 | *pg_attr |= 1; | ||
6005 | *pg_dir = cpu_to_le64(rmem->pg_tbl_map); | 6059 | *pg_dir = cpu_to_le64(rmem->pg_tbl_map); |
6006 | } else { | 6060 | } else { |
6007 | *pg_dir = cpu_to_le64(rmem->dma_arr[0]); | 6061 | *pg_dir = cpu_to_le64(rmem->dma_arr[0]); |
@@ -6078,6 +6132,22 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) | |||
6078 | &req.stat_pg_size_stat_lvl, | 6132 | &req.stat_pg_size_stat_lvl, |
6079 | &req.stat_page_dir); | 6133 | &req.stat_page_dir); |
6080 | } | 6134 | } |
6135 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { | ||
6136 | ctx_pg = &ctx->mrav_mem; | ||
6137 | req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); | ||
6138 | req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); | ||
6139 | bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, | ||
6140 | &req.mrav_pg_size_mrav_lvl, | ||
6141 | &req.mrav_page_dir); | ||
6142 | } | ||
6143 | if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { | ||
6144 | ctx_pg = &ctx->tim_mem; | ||
6145 | req.tim_num_entries = cpu_to_le32(ctx_pg->entries); | ||
6146 | req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); | ||
6147 | bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, | ||
6148 | &req.tim_pg_size_tim_lvl, | ||
6149 | &req.tim_page_dir); | ||
6150 | } | ||
6081 | for (i = 0, num_entries = &req.tqm_sp_num_entries, | 6151 | for (i = 0, num_entries = &req.tqm_sp_num_entries, |
6082 | pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, | 6152 | pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, |
6083 | pg_dir = &req.tqm_sp_page_dir, | 6153 | pg_dir = &req.tqm_sp_page_dir, |
@@ -6098,25 +6168,104 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) | |||
6098 | } | 6168 | } |
6099 | 6169 | ||
6100 | static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, | 6170 | static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, |
6101 | struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size) | 6171 | struct bnxt_ctx_pg_info *ctx_pg) |
6102 | { | 6172 | { |
6103 | struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; | 6173 | struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; |
6104 | 6174 | ||
6105 | if (!mem_size) | ||
6106 | return 0; | ||
6107 | |||
6108 | rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); | ||
6109 | if (rmem->nr_pages > MAX_CTX_PAGES) { | ||
6110 | rmem->nr_pages = 0; | ||
6111 | return -EINVAL; | ||
6112 | } | ||
6113 | rmem->page_size = BNXT_PAGE_SIZE; | 6175 | rmem->page_size = BNXT_PAGE_SIZE; |
6114 | rmem->pg_arr = ctx_pg->ctx_pg_arr; | 6176 | rmem->pg_arr = ctx_pg->ctx_pg_arr; |
6115 | rmem->dma_arr = ctx_pg->ctx_dma_arr; | 6177 | rmem->dma_arr = ctx_pg->ctx_dma_arr; |
6116 | rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; | 6178 | rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; |
6179 | if (rmem->depth >= 1) | ||
6180 | rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; | ||
6117 | return bnxt_alloc_ring(bp, rmem); | 6181 | return bnxt_alloc_ring(bp, rmem); |
6118 | } | 6182 | } |
6119 | 6183 | ||
6184 | static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, | ||
6185 | struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, | ||
6186 | u8 depth) | ||
6187 | { | ||
6188 | struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; | ||
6189 | int rc; | ||
6190 | |||
6191 | if (!mem_size) | ||
6192 | return 0; | ||
6193 | |||
6194 | ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); | ||
6195 | if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { | ||
6196 | ctx_pg->nr_pages = 0; | ||
6197 | return -EINVAL; | ||
6198 | } | ||
6199 | if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { | ||
6200 | int nr_tbls, i; | ||
6201 | |||
6202 | rmem->depth = 2; | ||
6203 | ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), | ||
6204 | GFP_KERNEL); | ||
6205 | if (!ctx_pg->ctx_pg_tbl) | ||
6206 | return -ENOMEM; | ||
6207 | nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); | ||
6208 | rmem->nr_pages = nr_tbls; | ||
6209 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); | ||
6210 | if (rc) | ||
6211 | return rc; | ||
6212 | for (i = 0; i < nr_tbls; i++) { | ||
6213 | struct bnxt_ctx_pg_info *pg_tbl; | ||
6214 | |||
6215 | pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); | ||
6216 | if (!pg_tbl) | ||
6217 | return -ENOMEM; | ||
6218 | ctx_pg->ctx_pg_tbl[i] = pg_tbl; | ||
6219 | rmem = &pg_tbl->ring_mem; | ||
6220 | rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; | ||
6221 | rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; | ||
6222 | rmem->depth = 1; | ||
6223 | rmem->nr_pages = MAX_CTX_PAGES; | ||
6224 | if (i == (nr_tbls - 1)) | ||
6225 | rmem->nr_pages = ctx_pg->nr_pages % | ||
6226 | MAX_CTX_PAGES; | ||
6227 | rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); | ||
6228 | if (rc) | ||
6229 | break; | ||
6230 | } | ||
6231 | } else { | ||
6232 | rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); | ||
6233 | if (rmem->nr_pages > 1 || depth) | ||
6234 | rmem->depth = 1; | ||
6235 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); | ||
6236 | } | ||
6237 | return rc; | ||
6238 | } | ||
6239 | |||
6240 | static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, | ||
6241 | struct bnxt_ctx_pg_info *ctx_pg) | ||
6242 | { | ||
6243 | struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; | ||
6244 | |||
6245 | if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || | ||
6246 | ctx_pg->ctx_pg_tbl) { | ||
6247 | int i, nr_tbls = rmem->nr_pages; | ||
6248 | |||
6249 | for (i = 0; i < nr_tbls; i++) { | ||
6250 | struct bnxt_ctx_pg_info *pg_tbl; | ||
6251 | struct bnxt_ring_mem_info *rmem2; | ||
6252 | |||
6253 | pg_tbl = ctx_pg->ctx_pg_tbl[i]; | ||
6254 | if (!pg_tbl) | ||
6255 | continue; | ||
6256 | rmem2 = &pg_tbl->ring_mem; | ||
6257 | bnxt_free_ring(bp, rmem2); | ||
6258 | ctx_pg->ctx_pg_arr[i] = NULL; | ||
6259 | kfree(pg_tbl); | ||
6260 | ctx_pg->ctx_pg_tbl[i] = NULL; | ||
6261 | } | ||
6262 | kfree(ctx_pg->ctx_pg_tbl); | ||
6263 | ctx_pg->ctx_pg_tbl = NULL; | ||
6264 | } | ||
6265 | bnxt_free_ring(bp, rmem); | ||
6266 | ctx_pg->nr_pages = 0; | ||
6267 | } | ||
6268 | |||
6120 | static void bnxt_free_ctx_mem(struct bnxt *bp) | 6269 | static void bnxt_free_ctx_mem(struct bnxt *bp) |
6121 | { | 6270 | { |
6122 | struct bnxt_ctx_mem_info *ctx = bp->ctx; | 6271 | struct bnxt_ctx_mem_info *ctx = bp->ctx; |
@@ -6127,16 +6276,18 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) | |||
6127 | 6276 | ||
6128 | if (ctx->tqm_mem[0]) { | 6277 | if (ctx->tqm_mem[0]) { |
6129 | for (i = 0; i < bp->max_q + 1; i++) | 6278 | for (i = 0; i < bp->max_q + 1; i++) |
6130 | bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem); | 6279 | bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); |
6131 | kfree(ctx->tqm_mem[0]); | 6280 | kfree(ctx->tqm_mem[0]); |
6132 | ctx->tqm_mem[0] = NULL; | 6281 | ctx->tqm_mem[0] = NULL; |
6133 | } | 6282 | } |
6134 | 6283 | ||
6135 | bnxt_free_ring(bp, &ctx->stat_mem.ring_mem); | 6284 | bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); |
6136 | bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem); | 6285 | bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); |
6137 | bnxt_free_ring(bp, &ctx->cq_mem.ring_mem); | 6286 | bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); |
6138 | bnxt_free_ring(bp, &ctx->srq_mem.ring_mem); | 6287 | bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); |
6139 | bnxt_free_ring(bp, &ctx->qp_mem.ring_mem); | 6288 | bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); |
6289 | bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); | ||
6290 | bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); | ||
6140 | ctx->flags &= ~BNXT_CTX_FLAG_INITED; | 6291 | ctx->flags &= ~BNXT_CTX_FLAG_INITED; |
6141 | } | 6292 | } |
6142 | 6293 | ||
@@ -6145,6 +6296,9 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) | |||
6145 | struct bnxt_ctx_pg_info *ctx_pg; | 6296 | struct bnxt_ctx_pg_info *ctx_pg; |
6146 | struct bnxt_ctx_mem_info *ctx; | 6297 | struct bnxt_ctx_mem_info *ctx; |
6147 | u32 mem_size, ena, entries; | 6298 | u32 mem_size, ena, entries; |
6299 | u32 extra_srqs = 0; | ||
6300 | u32 extra_qps = 0; | ||
6301 | u8 pg_lvl = 1; | ||
6148 | int i, rc; | 6302 | int i, rc; |
6149 | 6303 | ||
6150 | rc = bnxt_hwrm_func_backing_store_qcaps(bp); | 6304 | rc = bnxt_hwrm_func_backing_store_qcaps(bp); |
@@ -6157,24 +6311,31 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) | |||
6157 | if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) | 6311 | if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) |
6158 | return 0; | 6312 | return 0; |
6159 | 6313 | ||
6314 | if (bp->flags & BNXT_FLAG_ROCE_CAP) { | ||
6315 | pg_lvl = 2; | ||
6316 | extra_qps = 65536; | ||
6317 | extra_srqs = 8192; | ||
6318 | } | ||
6319 | |||
6160 | ctx_pg = &ctx->qp_mem; | 6320 | ctx_pg = &ctx->qp_mem; |
6161 | ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; | 6321 | ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + |
6322 | extra_qps; | ||
6162 | mem_size = ctx->qp_entry_size * ctx_pg->entries; | 6323 | mem_size = ctx->qp_entry_size * ctx_pg->entries; |
6163 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); | 6324 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); |
6164 | if (rc) | 6325 | if (rc) |
6165 | return rc; | 6326 | return rc; |
6166 | 6327 | ||
6167 | ctx_pg = &ctx->srq_mem; | 6328 | ctx_pg = &ctx->srq_mem; |
6168 | ctx_pg->entries = ctx->srq_max_l2_entries; | 6329 | ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; |
6169 | mem_size = ctx->srq_entry_size * ctx_pg->entries; | 6330 | mem_size = ctx->srq_entry_size * ctx_pg->entries; |
6170 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); | 6331 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); |
6171 | if (rc) | 6332 | if (rc) |
6172 | return rc; | 6333 | return rc; |
6173 | 6334 | ||
6174 | ctx_pg = &ctx->cq_mem; | 6335 | ctx_pg = &ctx->cq_mem; |
6175 | ctx_pg->entries = ctx->cq_max_l2_entries; | 6336 | ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; |
6176 | mem_size = ctx->cq_entry_size * ctx_pg->entries; | 6337 | mem_size = ctx->cq_entry_size * ctx_pg->entries; |
6177 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); | 6338 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); |
6178 | if (rc) | 6339 | if (rc) |
6179 | return rc; | 6340 | return rc; |
6180 | 6341 | ||
@@ -6182,26 +6343,47 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) | |||
6182 | ctx_pg->entries = ctx->vnic_max_vnic_entries + | 6343 | ctx_pg->entries = ctx->vnic_max_vnic_entries + |
6183 | ctx->vnic_max_ring_table_entries; | 6344 | ctx->vnic_max_ring_table_entries; |
6184 | mem_size = ctx->vnic_entry_size * ctx_pg->entries; | 6345 | mem_size = ctx->vnic_entry_size * ctx_pg->entries; |
6185 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); | 6346 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); |
6186 | if (rc) | 6347 | if (rc) |
6187 | return rc; | 6348 | return rc; |
6188 | 6349 | ||
6189 | ctx_pg = &ctx->stat_mem; | 6350 | ctx_pg = &ctx->stat_mem; |
6190 | ctx_pg->entries = ctx->stat_max_entries; | 6351 | ctx_pg->entries = ctx->stat_max_entries; |
6191 | mem_size = ctx->stat_entry_size * ctx_pg->entries; | 6352 | mem_size = ctx->stat_entry_size * ctx_pg->entries; |
6192 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); | 6353 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); |
6193 | if (rc) | 6354 | if (rc) |
6194 | return rc; | 6355 | return rc; |
6195 | 6356 | ||
6196 | entries = ctx->qp_max_l2_entries; | 6357 | ena = 0; |
6358 | if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) | ||
6359 | goto skip_rdma; | ||
6360 | |||
6361 | ctx_pg = &ctx->mrav_mem; | ||
6362 | ctx_pg->entries = extra_qps * 4; | ||
6363 | mem_size = ctx->mrav_entry_size * ctx_pg->entries; | ||
6364 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2); | ||
6365 | if (rc) | ||
6366 | return rc; | ||
6367 | ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; | ||
6368 | |||
6369 | ctx_pg = &ctx->tim_mem; | ||
6370 | ctx_pg->entries = ctx->qp_mem.entries; | ||
6371 | mem_size = ctx->tim_entry_size * ctx_pg->entries; | ||
6372 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); | ||
6373 | if (rc) | ||
6374 | return rc; | ||
6375 | ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; | ||
6376 | |||
6377 | skip_rdma: | ||
6378 | entries = ctx->qp_max_l2_entries + extra_qps; | ||
6197 | entries = roundup(entries, ctx->tqm_entries_multiple); | 6379 | entries = roundup(entries, ctx->tqm_entries_multiple); |
6198 | entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, | 6380 | entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, |
6199 | ctx->tqm_max_entries_per_ring); | 6381 | ctx->tqm_max_entries_per_ring); |
6200 | for (i = 0, ena = 0; i < bp->max_q + 1; i++) { | 6382 | for (i = 0; i < bp->max_q + 1; i++) { |
6201 | ctx_pg = ctx->tqm_mem[i]; | 6383 | ctx_pg = ctx->tqm_mem[i]; |
6202 | ctx_pg->entries = entries; | 6384 | ctx_pg->entries = entries; |
6203 | mem_size = ctx->tqm_entry_size * entries; | 6385 | mem_size = ctx->tqm_entry_size * entries; |
6204 | rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); | 6386 | rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); |
6205 | if (rc) | 6387 | if (rc) |
6206 | return rc; | 6388 | return rc; |
6207 | ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; | 6389 | ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; |
@@ -6481,6 +6663,13 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) | |||
6481 | (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) | 6663 | (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) |
6482 | bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; | 6664 | bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; |
6483 | 6665 | ||
6666 | if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) | ||
6667 | bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; | ||
6668 | |||
6669 | if (dev_caps_cfg & | ||
6670 | VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) | ||
6671 | bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; | ||
6672 | |||
6484 | hwrm_ver_get_exit: | 6673 | hwrm_ver_get_exit: |
6485 | mutex_unlock(&bp->hwrm_cmd_lock); | 6674 | mutex_unlock(&bp->hwrm_cmd_lock); |
6486 | return rc; | 6675 | return rc; |
@@ -9227,7 +9416,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) | |||
9227 | * 1 coal_buf x bufs_per_record = 1 completion record. | 9416 | * 1 coal_buf x bufs_per_record = 1 completion record. |
9228 | */ | 9417 | */ |
9229 | coal = &bp->rx_coal; | 9418 | coal = &bp->rx_coal; |
9230 | coal->coal_ticks = 14; | 9419 | coal->coal_ticks = 10; |
9231 | coal->coal_bufs = 30; | 9420 | coal->coal_bufs = 30; |
9232 | coal->coal_ticks_irq = 1; | 9421 | coal->coal_ticks_irq = 1; |
9233 | coal->coal_bufs_irq = 2; | 9422 | coal->coal_bufs_irq = 2; |
@@ -10219,6 +10408,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
10219 | if (rc) | 10408 | if (rc) |
10220 | goto init_err_pci_clean; | 10409 | goto init_err_pci_clean; |
10221 | 10410 | ||
10411 | if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { | ||
10412 | rc = bnxt_alloc_kong_hwrm_resources(bp); | ||
10413 | if (rc) | ||
10414 | bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; | ||
10415 | } | ||
10416 | |||
10222 | if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || | 10417 | if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || |
10223 | bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { | 10418 | bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { |
10224 | rc = bnxt_alloc_hwrm_short_cmd_req(bp); | 10419 | rc = bnxt_alloc_hwrm_short_cmd_req(bp); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 4fdfd7a87805..a451796deefe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
@@ -567,7 +567,6 @@ struct nqe_cn { | |||
567 | #define HWRM_RESP_LEN_MASK 0xffff0000 | 567 | #define HWRM_RESP_LEN_MASK 0xffff0000 |
568 | #define HWRM_RESP_LEN_SFT 16 | 568 | #define HWRM_RESP_LEN_SFT 16 |
569 | #define HWRM_RESP_VALID_MASK 0xff000000 | 569 | #define HWRM_RESP_VALID_MASK 0xff000000 |
570 | #define HWRM_SEQ_ID_INVALID -1 | ||
571 | #define BNXT_HWRM_REQ_MAX_SIZE 128 | 570 | #define BNXT_HWRM_REQ_MAX_SIZE 128 |
572 | #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ | 571 | #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ |
573 | BNXT_HWRM_REQ_MAX_SIZE) | 572 | BNXT_HWRM_REQ_MAX_SIZE) |
@@ -585,6 +584,9 @@ struct nqe_cn { | |||
585 | 584 | ||
586 | #define HWRM_VALID_BIT_DELAY_USEC 20 | 585 | #define HWRM_VALID_BIT_DELAY_USEC 20 |
587 | 586 | ||
587 | #define BNXT_HWRM_CHNL_CHIMP 0 | ||
588 | #define BNXT_HWRM_CHNL_KONG 1 | ||
589 | |||
588 | #define BNXT_RX_EVENT 1 | 590 | #define BNXT_RX_EVENT 1 |
589 | #define BNXT_AGG_EVENT 2 | 591 | #define BNXT_AGG_EVENT 2 |
590 | #define BNXT_TX_EVENT 4 | 592 | #define BNXT_TX_EVENT 4 |
@@ -615,9 +617,12 @@ struct bnxt_sw_rx_agg_bd { | |||
615 | struct bnxt_ring_mem_info { | 617 | struct bnxt_ring_mem_info { |
616 | int nr_pages; | 618 | int nr_pages; |
617 | int page_size; | 619 | int page_size; |
618 | u32 flags; | 620 | u16 flags; |
619 | #define BNXT_RMEM_VALID_PTE_FLAG 1 | 621 | #define BNXT_RMEM_VALID_PTE_FLAG 1 |
620 | #define BNXT_RMEM_RING_PTE_FLAG 2 | 622 | #define BNXT_RMEM_RING_PTE_FLAG 2 |
623 | #define BNXT_RMEM_USE_FULL_PAGE_FLAG 4 | ||
624 | |||
625 | u16 depth; | ||
621 | 626 | ||
622 | void **pg_arr; | 627 | void **pg_arr; |
623 | dma_addr_t *dma_arr; | 628 | dma_addr_t *dma_arr; |
@@ -1113,9 +1118,14 @@ struct bnxt_test_info { | |||
1113 | char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; | 1118 | char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; |
1114 | }; | 1119 | }; |
1115 | 1120 | ||
1116 | #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 | 1121 | #define BNXT_GRCPF_REG_CHIMP_COMM 0x0 |
1117 | #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 | 1122 | #define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100 |
1118 | #define BNXT_CAG_REG_BASE 0x300000 | 1123 | #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 |
1124 | #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 | ||
1125 | #define BNXT_CAG_REG_BASE 0x300000 | ||
1126 | |||
1127 | #define BNXT_GRCPF_REG_KONG_COMM 0xA00 | ||
1128 | #define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00 | ||
1119 | 1129 | ||
1120 | struct bnxt_tc_flow_stats { | 1130 | struct bnxt_tc_flow_stats { |
1121 | u64 packets; | 1131 | u64 packets; |
@@ -1183,12 +1193,15 @@ struct bnxt_vf_rep { | |||
1183 | #define PTU_PTE_NEXT_TO_LAST 0x4UL | 1193 | #define PTU_PTE_NEXT_TO_LAST 0x4UL |
1184 | 1194 | ||
1185 | #define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8) | 1195 | #define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8) |
1196 | #define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES) | ||
1186 | 1197 | ||
1187 | struct bnxt_ctx_pg_info { | 1198 | struct bnxt_ctx_pg_info { |
1188 | u32 entries; | 1199 | u32 entries; |
1200 | u32 nr_pages; | ||
1189 | void *ctx_pg_arr[MAX_CTX_PAGES]; | 1201 | void *ctx_pg_arr[MAX_CTX_PAGES]; |
1190 | dma_addr_t ctx_dma_arr[MAX_CTX_PAGES]; | 1202 | dma_addr_t ctx_dma_arr[MAX_CTX_PAGES]; |
1191 | struct bnxt_ring_mem_info ring_mem; | 1203 | struct bnxt_ring_mem_info ring_mem; |
1204 | struct bnxt_ctx_pg_info **ctx_pg_tbl; | ||
1192 | }; | 1205 | }; |
1193 | 1206 | ||
1194 | struct bnxt_ctx_mem_info { | 1207 | struct bnxt_ctx_mem_info { |
@@ -1224,6 +1237,8 @@ struct bnxt_ctx_mem_info { | |||
1224 | struct bnxt_ctx_pg_info cq_mem; | 1237 | struct bnxt_ctx_pg_info cq_mem; |
1225 | struct bnxt_ctx_pg_info vnic_mem; | 1238 | struct bnxt_ctx_pg_info vnic_mem; |
1226 | struct bnxt_ctx_pg_info stat_mem; | 1239 | struct bnxt_ctx_pg_info stat_mem; |
1240 | struct bnxt_ctx_pg_info mrav_mem; | ||
1241 | struct bnxt_ctx_pg_info tim_mem; | ||
1227 | struct bnxt_ctx_pg_info *tqm_mem[9]; | 1242 | struct bnxt_ctx_pg_info *tqm_mem[9]; |
1228 | }; | 1243 | }; |
1229 | 1244 | ||
@@ -1457,20 +1472,25 @@ struct bnxt { | |||
1457 | u32 msg_enable; | 1472 | u32 msg_enable; |
1458 | 1473 | ||
1459 | u32 fw_cap; | 1474 | u32 fw_cap; |
1460 | #define BNXT_FW_CAP_SHORT_CMD 0x00000001 | 1475 | #define BNXT_FW_CAP_SHORT_CMD 0x00000001 |
1461 | #define BNXT_FW_CAP_LLDP_AGENT 0x00000002 | 1476 | #define BNXT_FW_CAP_LLDP_AGENT 0x00000002 |
1462 | #define BNXT_FW_CAP_DCBX_AGENT 0x00000004 | 1477 | #define BNXT_FW_CAP_DCBX_AGENT 0x00000004 |
1463 | #define BNXT_FW_CAP_NEW_RM 0x00000008 | 1478 | #define BNXT_FW_CAP_NEW_RM 0x00000008 |
1464 | #define BNXT_FW_CAP_IF_CHANGE 0x00000010 | 1479 | #define BNXT_FW_CAP_IF_CHANGE 0x00000010 |
1480 | #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080 | ||
1481 | #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400 | ||
1465 | 1482 | ||
1466 | #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) | 1483 | #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) |
1467 | u32 hwrm_spec_code; | 1484 | u32 hwrm_spec_code; |
1468 | u16 hwrm_cmd_seq; | 1485 | u16 hwrm_cmd_seq; |
1469 | u32 hwrm_intr_seq_id; | 1486 | u16 hwrm_cmd_kong_seq; |
1487 | u16 hwrm_intr_seq_id; | ||
1470 | void *hwrm_short_cmd_req_addr; | 1488 | void *hwrm_short_cmd_req_addr; |
1471 | dma_addr_t hwrm_short_cmd_req_dma_addr; | 1489 | dma_addr_t hwrm_short_cmd_req_dma_addr; |
1472 | void *hwrm_cmd_resp_addr; | 1490 | void *hwrm_cmd_resp_addr; |
1473 | dma_addr_t hwrm_cmd_resp_dma_addr; | 1491 | dma_addr_t hwrm_cmd_resp_dma_addr; |
1492 | void *hwrm_cmd_kong_resp_addr; | ||
1493 | dma_addr_t hwrm_cmd_kong_resp_dma_addr; | ||
1474 | 1494 | ||
1475 | struct rtnl_link_stats64 net_stats_prev; | 1495 | struct rtnl_link_stats64 net_stats_prev; |
1476 | struct rx_port_stats *hw_rx_port_stats; | 1496 | struct rx_port_stats *hw_rx_port_stats; |
@@ -1672,6 +1692,66 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, | |||
1672 | } | 1692 | } |
1673 | } | 1693 | } |
1674 | 1694 | ||
1695 | static inline bool bnxt_cfa_hwrm_message(u16 req_type) | ||
1696 | { | ||
1697 | switch (req_type) { | ||
1698 | case HWRM_CFA_ENCAP_RECORD_ALLOC: | ||
1699 | case HWRM_CFA_ENCAP_RECORD_FREE: | ||
1700 | case HWRM_CFA_DECAP_FILTER_ALLOC: | ||
1701 | case HWRM_CFA_DECAP_FILTER_FREE: | ||
1702 | case HWRM_CFA_NTUPLE_FILTER_ALLOC: | ||
1703 | case HWRM_CFA_NTUPLE_FILTER_FREE: | ||
1704 | case HWRM_CFA_NTUPLE_FILTER_CFG: | ||
1705 | case HWRM_CFA_EM_FLOW_ALLOC: | ||
1706 | case HWRM_CFA_EM_FLOW_FREE: | ||
1707 | case HWRM_CFA_EM_FLOW_CFG: | ||
1708 | case HWRM_CFA_FLOW_ALLOC: | ||
1709 | case HWRM_CFA_FLOW_FREE: | ||
1710 | case HWRM_CFA_FLOW_INFO: | ||
1711 | case HWRM_CFA_FLOW_FLUSH: | ||
1712 | case HWRM_CFA_FLOW_STATS: | ||
1713 | case HWRM_CFA_METER_PROFILE_ALLOC: | ||
1714 | case HWRM_CFA_METER_PROFILE_FREE: | ||
1715 | case HWRM_CFA_METER_PROFILE_CFG: | ||
1716 | case HWRM_CFA_METER_INSTANCE_ALLOC: | ||
1717 | case HWRM_CFA_METER_INSTANCE_FREE: | ||
1718 | return true; | ||
1719 | default: | ||
1720 | return false; | ||
1721 | } | ||
1722 | } | ||
1723 | |||
1724 | static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req) | ||
1725 | { | ||
1726 | return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && | ||
1727 | bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type))); | ||
1728 | } | ||
1729 | |||
1730 | static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req) | ||
1731 | { | ||
1732 | return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && | ||
1733 | req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr)); | ||
1734 | } | ||
1735 | |||
1736 | static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req) | ||
1737 | { | ||
1738 | if (bnxt_hwrm_kong_chnl(bp, (struct input *)req)) | ||
1739 | return bp->hwrm_cmd_kong_resp_addr; | ||
1740 | else | ||
1741 | return bp->hwrm_cmd_resp_addr; | ||
1742 | } | ||
1743 | |||
1744 | static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst) | ||
1745 | { | ||
1746 | u16 seq_id; | ||
1747 | |||
1748 | if (dst == BNXT_HWRM_CHNL_CHIMP) | ||
1749 | seq_id = bp->hwrm_cmd_seq++; | ||
1750 | else | ||
1751 | seq_id = bp->hwrm_cmd_kong_seq++; | ||
1752 | return seq_id; | ||
1753 | } | ||
1754 | |||
1675 | extern const u16 bnxt_lhint_arr[]; | 1755 | extern const u16 bnxt_lhint_arr[]; |
1676 | 1756 | ||
1677 | int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, | 1757 | int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 5dd086059568..f1aaac8e6268 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | |||
@@ -194,6 +194,8 @@ struct cmd_nums { | |||
194 | #define HWRM_STAT_CTX_QUERY 0xb2UL | 194 | #define HWRM_STAT_CTX_QUERY 0xb2UL |
195 | #define HWRM_STAT_CTX_CLR_STATS 0xb3UL | 195 | #define HWRM_STAT_CTX_CLR_STATS 0xb3UL |
196 | #define HWRM_PORT_QSTATS_EXT 0xb4UL | 196 | #define HWRM_PORT_QSTATS_EXT 0xb4UL |
197 | #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL | ||
198 | #define HWRM_PORT_PHY_MDIO_READ 0xb6UL | ||
197 | #define HWRM_FW_RESET 0xc0UL | 199 | #define HWRM_FW_RESET 0xc0UL |
198 | #define HWRM_FW_QSTATUS 0xc1UL | 200 | #define HWRM_FW_QSTATUS 0xc1UL |
199 | #define HWRM_FW_HEALTH_CHECK 0xc2UL | 201 | #define HWRM_FW_HEALTH_CHECK 0xc2UL |
@@ -213,6 +215,7 @@ struct cmd_nums { | |||
213 | #define HWRM_WOL_FILTER_FREE 0xf1UL | 215 | #define HWRM_WOL_FILTER_FREE 0xf1UL |
214 | #define HWRM_WOL_FILTER_QCFG 0xf2UL | 216 | #define HWRM_WOL_FILTER_QCFG 0xf2UL |
215 | #define HWRM_WOL_REASON_QCFG 0xf3UL | 217 | #define HWRM_WOL_REASON_QCFG 0xf3UL |
218 | #define HWRM_CFA_METER_QCAPS 0xf4UL | ||
216 | #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL | 219 | #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL |
217 | #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL | 220 | #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL |
218 | #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL | 221 | #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL |
@@ -239,6 +242,24 @@ struct cmd_nums { | |||
239 | #define HWRM_FW_IPC_MSG 0x110UL | 242 | #define HWRM_FW_IPC_MSG 0x110UL |
240 | #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL | 243 | #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL |
241 | #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL | 244 | #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL |
245 | #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL | ||
246 | #define HWRM_CFA_FLOW_AGING_CFG 0x114UL | ||
247 | #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL | ||
248 | #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL | ||
249 | #define HWRM_CFA_CTX_MEM_RGTR 0x117UL | ||
250 | #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL | ||
251 | #define HWRM_CFA_CTX_MEM_QCTX 0x119UL | ||
252 | #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL | ||
253 | #define HWRM_CFA_COUNTER_QCAPS 0x11bUL | ||
254 | #define HWRM_CFA_COUNTER_CFG 0x11cUL | ||
255 | #define HWRM_CFA_COUNTER_QCFG 0x11dUL | ||
256 | #define HWRM_CFA_COUNTER_QSTATS 0x11eUL | ||
257 | #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL | ||
258 | #define HWRM_CFA_EEM_QCAPS 0x120UL | ||
259 | #define HWRM_CFA_EEM_CFG 0x121UL | ||
260 | #define HWRM_CFA_EEM_QCFG 0x122UL | ||
261 | #define HWRM_CFA_EEM_OP 0x123UL | ||
262 | #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL | ||
242 | #define HWRM_ENGINE_CKV_HELLO 0x12dUL | 263 | #define HWRM_ENGINE_CKV_HELLO 0x12dUL |
243 | #define HWRM_ENGINE_CKV_STATUS 0x12eUL | 264 | #define HWRM_ENGINE_CKV_STATUS 0x12eUL |
244 | #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL | 265 | #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL |
@@ -335,6 +356,8 @@ struct ret_codes { | |||
335 | #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL | 356 | #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL |
336 | #define HWRM_ERR_CODE_NO_BUFFER 0x8UL | 357 | #define HWRM_ERR_CODE_NO_BUFFER 0x8UL |
337 | #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL | 358 | #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL |
359 | #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL | ||
360 | #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL | ||
338 | #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL | 361 | #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL |
339 | #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL | 362 | #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL |
340 | #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL | 363 | #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL |
@@ -363,8 +386,8 @@ struct hwrm_err_output { | |||
363 | #define HWRM_VERSION_MAJOR 1 | 386 | #define HWRM_VERSION_MAJOR 1 |
364 | #define HWRM_VERSION_MINOR 10 | 387 | #define HWRM_VERSION_MINOR 10 |
365 | #define HWRM_VERSION_UPDATE 0 | 388 | #define HWRM_VERSION_UPDATE 0 |
366 | #define HWRM_VERSION_RSVD 3 | 389 | #define HWRM_VERSION_RSVD 33 |
367 | #define HWRM_VERSION_STR "1.10.0.3" | 390 | #define HWRM_VERSION_STR "1.10.0.33" |
368 | 391 | ||
369 | /* hwrm_ver_get_input (size:192b/24B) */ | 392 | /* hwrm_ver_get_input (size:192b/24B) */ |
370 | struct hwrm_ver_get_input { | 393 | struct hwrm_ver_get_input { |
@@ -411,6 +434,10 @@ struct hwrm_ver_get_output { | |||
411 | #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL | 434 | #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL |
412 | #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL | 435 | #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL |
413 | #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL | 436 | #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL |
437 | #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL | ||
438 | #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL | ||
439 | #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL | ||
440 | #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL | ||
414 | u8 roce_fw_maj_8b; | 441 | u8 roce_fw_maj_8b; |
415 | u8 roce_fw_min_8b; | 442 | u8 roce_fw_min_8b; |
416 | u8 roce_fw_bld_8b; | 443 | u8 roce_fw_bld_8b; |
@@ -465,14 +492,27 @@ struct hwrm_ver_get_output { | |||
465 | /* eject_cmpl (size:128b/16B) */ | 492 | /* eject_cmpl (size:128b/16B) */ |
466 | struct eject_cmpl { | 493 | struct eject_cmpl { |
467 | __le16 type; | 494 | __le16 type; |
468 | #define EJECT_CMPL_TYPE_MASK 0x3fUL | 495 | #define EJECT_CMPL_TYPE_MASK 0x3fUL |
469 | #define EJECT_CMPL_TYPE_SFT 0 | 496 | #define EJECT_CMPL_TYPE_SFT 0 |
470 | #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL | 497 | #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL |
471 | #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT | 498 | #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT |
499 | #define EJECT_CMPL_FLAGS_MASK 0xffc0UL | ||
500 | #define EJECT_CMPL_FLAGS_SFT 6 | ||
501 | #define EJECT_CMPL_FLAGS_ERROR 0x40UL | ||
472 | __le16 len; | 502 | __le16 len; |
473 | __le32 opaque; | 503 | __le32 opaque; |
474 | __le32 v; | 504 | __le16 v; |
475 | #define EJECT_CMPL_V 0x1UL | 505 | #define EJECT_CMPL_V 0x1UL |
506 | #define EJECT_CMPL_ERRORS_MASK 0xfffeUL | ||
507 | #define EJECT_CMPL_ERRORS_SFT 1 | ||
508 | #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL | ||
509 | #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 | ||
510 | #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1) | ||
511 | #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1) | ||
512 | #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1) | ||
513 | #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1) | ||
514 | #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH | ||
515 | __le16 reserved16; | ||
476 | __le32 unused_2; | 516 | __le32 unused_2; |
477 | }; | 517 | }; |
478 | 518 | ||
@@ -552,6 +592,10 @@ struct hwrm_async_event_cmpl { | |||
552 | #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL | 592 | #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL |
553 | #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL | 593 | #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL |
554 | #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL | 594 | #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL |
595 | #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL | ||
596 | #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL | ||
597 | #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL | ||
598 | #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL | ||
555 | #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL | 599 | #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL |
556 | #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR | 600 | #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR |
557 | __le32 event_data2; | 601 | __le32 event_data2; |
@@ -647,6 +691,39 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change { | |||
647 | #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL | 691 | #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL |
648 | }; | 692 | }; |
649 | 693 | ||
694 | /* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */ | ||
695 | struct hwrm_async_event_cmpl_reset_notify { | ||
696 | __le16 type; | ||
697 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL | ||
698 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0 | ||
699 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL | ||
700 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT | ||
701 | __le16 event_id; | ||
702 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL | ||
703 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY | ||
704 | __le32 event_data2; | ||
705 | u8 opaque_v; | ||
706 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL | ||
707 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL | ||
708 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1 | ||
709 | u8 timestamp_lo; | ||
710 | __le16 timestamp_hi; | ||
711 | __le32 event_data1; | ||
712 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL | ||
713 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0 | ||
714 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL | ||
715 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL | ||
716 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN | ||
717 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL | ||
718 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8 | ||
719 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8) | ||
720 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8) | ||
721 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8) | ||
722 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL | ||
723 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL | ||
724 | #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16 | ||
725 | }; | ||
726 | |||
650 | /* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ | 727 | /* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ |
651 | struct hwrm_async_event_cmpl_vf_cfg_change { | 728 | struct hwrm_async_event_cmpl_vf_cfg_change { |
652 | __le16 type; | 729 | __le16 type; |
@@ -672,6 +749,74 @@ struct hwrm_async_event_cmpl_vf_cfg_change { | |||
672 | #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL | 749 | #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL |
673 | }; | 750 | }; |
674 | 751 | ||
752 | /* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */ | ||
753 | struct hwrm_async_event_cmpl_hw_flow_aged { | ||
754 | __le16 type; | ||
755 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL | ||
756 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0 | ||
757 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL | ||
758 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT | ||
759 | __le16 event_id; | ||
760 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL | ||
761 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED | ||
762 | __le32 event_data2; | ||
763 | u8 opaque_v; | ||
764 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL | ||
765 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL | ||
766 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1 | ||
767 | u8 timestamp_lo; | ||
768 | __le16 timestamp_hi; | ||
769 | __le32 event_data1; | ||
770 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL | ||
771 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0 | ||
772 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL | ||
773 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31) | ||
774 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31) | ||
775 | #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX | ||
776 | }; | ||
777 | |||
778 | /* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */ | ||
779 | struct hwrm_async_event_cmpl_eem_cache_flush_req { | ||
780 | __le16 type; | ||
781 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL | ||
782 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0 | ||
783 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL | ||
784 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT | ||
785 | __le16 event_id; | ||
786 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL | ||
787 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ | ||
788 | __le32 event_data2; | ||
789 | u8 opaque_v; | ||
790 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL | ||
791 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL | ||
792 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1 | ||
793 | u8 timestamp_lo; | ||
794 | __le16 timestamp_hi; | ||
795 | __le32 event_data1; | ||
796 | }; | ||
797 | |||
798 | /* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */ | ||
799 | struct hwrm_async_event_cmpl_eem_cache_flush_done { | ||
800 | __le16 type; | ||
801 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL | ||
802 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0 | ||
803 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL | ||
804 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT | ||
805 | __le16 event_id; | ||
806 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL | ||
807 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE | ||
808 | __le32 event_data2; | ||
809 | u8 opaque_v; | ||
810 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL | ||
811 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL | ||
812 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1 | ||
813 | u8 timestamp_lo; | ||
814 | __le16 timestamp_hi; | ||
815 | __le32 event_data1; | ||
816 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL | ||
817 | #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0 | ||
818 | }; | ||
819 | |||
675 | /* hwrm_func_reset_input (size:192b/24B) */ | 820 | /* hwrm_func_reset_input (size:192b/24B) */ |
676 | struct hwrm_func_reset_input { | 821 | struct hwrm_func_reset_input { |
677 | __le16 req_type; | 822 | __le16 req_type; |
@@ -867,6 +1012,8 @@ struct hwrm_func_qcaps_output { | |||
867 | #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL | 1012 | #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL |
868 | #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL | 1013 | #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL |
869 | #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL | 1014 | #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL |
1015 | #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL | ||
1016 | #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL | ||
870 | u8 mac_address[6]; | 1017 | u8 mac_address[6]; |
871 | __le16 max_rsscos_ctx; | 1018 | __le16 max_rsscos_ctx; |
872 | __le16 max_cmpl_rings; | 1019 | __le16 max_cmpl_rings; |
@@ -902,7 +1049,7 @@ struct hwrm_func_qcfg_input { | |||
902 | u8 unused_0[6]; | 1049 | u8 unused_0[6]; |
903 | }; | 1050 | }; |
904 | 1051 | ||
905 | /* hwrm_func_qcfg_output (size:640b/80B) */ | 1052 | /* hwrm_func_qcfg_output (size:704b/88B) */ |
906 | struct hwrm_func_qcfg_output { | 1053 | struct hwrm_func_qcfg_output { |
907 | __le16 error_code; | 1054 | __le16 error_code; |
908 | __le16 req_type; | 1055 | __le16 req_type; |
@@ -919,6 +1066,7 @@ struct hwrm_func_qcfg_output { | |||
919 | #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL | 1066 | #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL |
920 | #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL | 1067 | #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL |
921 | #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL | 1068 | #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL |
1069 | #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL | ||
922 | u8 mac_address[6]; | 1070 | u8 mac_address[6]; |
923 | __le16 pci_id; | 1071 | __le16 pci_id; |
924 | __le16 alloc_rsscos_ctx; | 1072 | __le16 alloc_rsscos_ctx; |
@@ -1000,7 +1148,11 @@ struct hwrm_func_qcfg_output { | |||
1000 | __le16 alloc_sp_tx_rings; | 1148 | __le16 alloc_sp_tx_rings; |
1001 | __le16 alloc_stat_ctx; | 1149 | __le16 alloc_stat_ctx; |
1002 | __le16 alloc_msix; | 1150 | __le16 alloc_msix; |
1003 | u8 unused_2[5]; | 1151 | __le16 registered_vfs; |
1152 | u8 unused_1[3]; | ||
1153 | u8 always_1; | ||
1154 | __le32 reset_addr_poll; | ||
1155 | u8 unused_2[3]; | ||
1004 | u8 valid; | 1156 | u8 valid; |
1005 | }; | 1157 | }; |
1006 | 1158 | ||
@@ -1031,6 +1183,7 @@ struct hwrm_func_cfg_input { | |||
1031 | #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL | 1183 | #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL |
1032 | #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL | 1184 | #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL |
1033 | #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL | 1185 | #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL |
1186 | #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL | ||
1034 | __le32 enables; | 1187 | __le32 enables; |
1035 | #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL | 1188 | #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL |
1036 | #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL | 1189 | #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL |
@@ -1235,6 +1388,7 @@ struct hwrm_func_drv_rgtr_input { | |||
1235 | #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL | 1388 | #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL |
1236 | #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL | 1389 | #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL |
1237 | #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL | 1390 | #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL |
1391 | #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL | ||
1238 | __le32 enables; | 1392 | __le32 enables; |
1239 | #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL | 1393 | #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL |
1240 | #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL | 1394 | #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL |
@@ -1888,7 +2042,8 @@ struct hwrm_func_drv_if_change_output { | |||
1888 | __le16 seq_id; | 2042 | __le16 seq_id; |
1889 | __le16 resp_len; | 2043 | __le16 resp_len; |
1890 | __le32 flags; | 2044 | __le32 flags; |
1891 | #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL | 2045 | #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL |
2046 | #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL | ||
1892 | u8 unused_0[3]; | 2047 | u8 unused_0[3]; |
1893 | u8 valid; | 2048 | u8 valid; |
1894 | }; | 2049 | }; |
@@ -2864,6 +3019,60 @@ struct hwrm_port_phy_i2c_read_output { | |||
2864 | u8 valid; | 3019 | u8 valid; |
2865 | }; | 3020 | }; |
2866 | 3021 | ||
3022 | /* hwrm_port_phy_mdio_write_input (size:320b/40B) */ | ||
3023 | struct hwrm_port_phy_mdio_write_input { | ||
3024 | __le16 req_type; | ||
3025 | __le16 cmpl_ring; | ||
3026 | __le16 seq_id; | ||
3027 | __le16 target_id; | ||
3028 | __le64 resp_addr; | ||
3029 | __le32 unused_0[2]; | ||
3030 | __le16 port_id; | ||
3031 | u8 phy_addr; | ||
3032 | u8 dev_addr; | ||
3033 | __le16 reg_addr; | ||
3034 | __le16 reg_data; | ||
3035 | u8 cl45_mdio; | ||
3036 | u8 unused_1[7]; | ||
3037 | }; | ||
3038 | |||
3039 | /* hwrm_port_phy_mdio_write_output (size:128b/16B) */ | ||
3040 | struct hwrm_port_phy_mdio_write_output { | ||
3041 | __le16 error_code; | ||
3042 | __le16 req_type; | ||
3043 | __le16 seq_id; | ||
3044 | __le16 resp_len; | ||
3045 | u8 unused_0[7]; | ||
3046 | u8 valid; | ||
3047 | }; | ||
3048 | |||
3049 | /* hwrm_port_phy_mdio_read_input (size:256b/32B) */ | ||
3050 | struct hwrm_port_phy_mdio_read_input { | ||
3051 | __le16 req_type; | ||
3052 | __le16 cmpl_ring; | ||
3053 | __le16 seq_id; | ||
3054 | __le16 target_id; | ||
3055 | __le64 resp_addr; | ||
3056 | __le32 unused_0[2]; | ||
3057 | __le16 port_id; | ||
3058 | u8 phy_addr; | ||
3059 | u8 dev_addr; | ||
3060 | __le16 reg_addr; | ||
3061 | u8 cl45_mdio; | ||
3062 | u8 unused_1; | ||
3063 | }; | ||
3064 | |||
3065 | /* hwrm_port_phy_mdio_read_output (size:128b/16B) */ | ||
3066 | struct hwrm_port_phy_mdio_read_output { | ||
3067 | __le16 error_code; | ||
3068 | __le16 req_type; | ||
3069 | __le16 seq_id; | ||
3070 | __le16 resp_len; | ||
3071 | __le16 reg_data; | ||
3072 | u8 unused_0[5]; | ||
3073 | u8 valid; | ||
3074 | }; | ||
3075 | |||
2867 | /* hwrm_port_led_cfg_input (size:512b/64B) */ | 3076 | /* hwrm_port_led_cfg_input (size:512b/64B) */ |
2868 | struct hwrm_port_led_cfg_input { | 3077 | struct hwrm_port_led_cfg_input { |
2869 | __le16 req_type; | 3078 | __le16 req_type; |
@@ -4869,6 +5078,10 @@ struct hwrm_ring_grp_free_output { | |||
4869 | u8 unused_0[7]; | 5078 | u8 unused_0[7]; |
4870 | u8 valid; | 5079 | u8 valid; |
4871 | }; | 5080 | }; |
5081 | #define DEFAULT_FLOW_ID 0xFFFFFFFFUL | ||
5082 | #define ROCEV1_FLOW_ID 0xFFFFFFFEUL | ||
5083 | #define ROCEV2_FLOW_ID 0xFFFFFFFDUL | ||
5084 | #define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL | ||
4872 | 5085 | ||
4873 | /* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ | 5086 | /* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ |
4874 | struct hwrm_cfa_l2_filter_alloc_input { | 5087 | struct hwrm_cfa_l2_filter_alloc_input { |
@@ -4937,20 +5150,21 @@ struct hwrm_cfa_l2_filter_alloc_input { | |||
4937 | u8 unused_3; | 5150 | u8 unused_3; |
4938 | __le32 src_id; | 5151 | __le32 src_id; |
4939 | u8 tunnel_type; | 5152 | u8 tunnel_type; |
4940 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL | 5153 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL |
4941 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL | 5154 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL |
4942 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL | 5155 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL |
4943 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL | 5156 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL |
4944 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL | 5157 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL |
4945 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL | 5158 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL |
4946 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL | 5159 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL |
4947 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL | 5160 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL |
4948 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL | 5161 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL |
4949 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL | 5162 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL |
4950 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL | 5163 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL |
4951 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL | 5164 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL |
4952 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL | 5165 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL |
4953 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL | 5166 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL |
5167 | #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL | ||
4954 | u8 unused_4; | 5168 | u8 unused_4; |
4955 | __le16 dst_id; | 5169 | __le16 dst_id; |
4956 | __le16 mirror_vnic_id; | 5170 | __le16 mirror_vnic_id; |
@@ -5108,20 +5322,21 @@ struct hwrm_cfa_tunnel_filter_alloc_input { | |||
5108 | u8 l3_addr_type; | 5322 | u8 l3_addr_type; |
5109 | u8 t_l3_addr_type; | 5323 | u8 t_l3_addr_type; |
5110 | u8 tunnel_type; | 5324 | u8 tunnel_type; |
5111 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL | 5325 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL |
5112 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL | 5326 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL |
5113 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL | 5327 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL |
5114 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL | 5328 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL |
5115 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL | 5329 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL |
5116 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL | 5330 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL |
5117 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL | 5331 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL |
5118 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL | 5332 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL |
5119 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL | 5333 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL |
5120 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL | 5334 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL |
5121 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL | 5335 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL |
5122 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL | 5336 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL |
5123 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL | 5337 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL |
5124 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL | 5338 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL |
5339 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL | ||
5125 | u8 tunnel_flags; | 5340 | u8 tunnel_flags; |
5126 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL | 5341 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL |
5127 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL | 5342 | #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL |
@@ -5326,20 +5541,21 @@ struct hwrm_cfa_ntuple_filter_alloc_input { | |||
5326 | __le16 dst_id; | 5541 | __le16 dst_id; |
5327 | __le16 mirror_vnic_id; | 5542 | __le16 mirror_vnic_id; |
5328 | u8 tunnel_type; | 5543 | u8 tunnel_type; |
5329 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL | 5544 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL |
5330 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL | 5545 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL |
5331 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL | 5546 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL |
5332 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL | 5547 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL |
5333 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL | 5548 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL |
5334 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL | 5549 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL |
5335 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL | 5550 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL |
5336 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL | 5551 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL |
5337 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL | 5552 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL |
5338 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL | 5553 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL |
5339 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL | 5554 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL |
5340 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL | 5555 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL |
5341 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL | 5556 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL |
5342 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL | 5557 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL |
5558 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL | ||
5343 | u8 pri_hint; | 5559 | u8 pri_hint; |
5344 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL | 5560 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL |
5345 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL | 5561 | #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL |
@@ -5459,20 +5675,21 @@ struct hwrm_cfa_decap_filter_alloc_input { | |||
5459 | #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL | 5675 | #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL |
5460 | __be32 tunnel_id; | 5676 | __be32 tunnel_id; |
5461 | u8 tunnel_type; | 5677 | u8 tunnel_type; |
5462 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL | 5678 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL |
5463 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL | 5679 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL |
5464 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL | 5680 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL |
5465 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL | 5681 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL |
5466 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL | 5682 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL |
5467 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL | 5683 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL |
5468 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL | 5684 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL |
5469 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL | 5685 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL |
5470 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL | 5686 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL |
5471 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL | 5687 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL |
5472 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL | 5688 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL |
5473 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL | 5689 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL |
5474 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL | 5690 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL |
5475 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL | 5691 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL |
5692 | #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL | ||
5476 | u8 unused_0; | 5693 | u8 unused_0; |
5477 | __le16 unused_1; | 5694 | __le16 unused_1; |
5478 | u8 src_macaddr[6]; | 5695 | u8 src_macaddr[6]; |
@@ -5559,20 +5776,23 @@ struct hwrm_cfa_flow_alloc_input { | |||
5559 | #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL | 5776 | #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL |
5560 | #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL | 5777 | #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL |
5561 | #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL | 5778 | #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL |
5779 | #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL | ||
5562 | __le16 src_fid; | 5780 | __le16 src_fid; |
5563 | __le32 tunnel_handle; | 5781 | __le32 tunnel_handle; |
5564 | __le16 action_flags; | 5782 | __le16 action_flags; |
5565 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL | 5783 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL |
5566 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL | 5784 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL |
5567 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL | 5785 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL |
5568 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL | 5786 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL |
5569 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL | 5787 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL |
5570 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL | 5788 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL |
5571 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL | 5789 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL |
5572 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL | 5790 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL |
5573 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL | 5791 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL |
5574 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL | 5792 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL |
5575 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL | 5793 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL |
5794 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL | ||
5795 | #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL | ||
5576 | __le16 dst_fid; | 5796 | __le16 dst_fid; |
5577 | __be16 l2_rewrite_vlan_tpid; | 5797 | __be16 l2_rewrite_vlan_tpid; |
5578 | __be16 l2_rewrite_vlan_tci; | 5798 | __be16 l2_rewrite_vlan_tci; |
@@ -5597,20 +5817,21 @@ struct hwrm_cfa_flow_alloc_input { | |||
5597 | __be16 l2_rewrite_smac[3]; | 5817 | __be16 l2_rewrite_smac[3]; |
5598 | u8 ip_proto; | 5818 | u8 ip_proto; |
5599 | u8 tunnel_type; | 5819 | u8 tunnel_type; |
5600 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL | 5820 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL |
5601 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL | 5821 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL |
5602 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL | 5822 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL |
5603 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL | 5823 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL |
5604 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL | 5824 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL |
5605 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL | 5825 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL |
5606 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL | 5826 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL |
5607 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL | 5827 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL |
5608 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL | 5828 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL |
5609 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL | 5829 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL |
5610 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL | 5830 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL |
5611 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL | 5831 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL |
5612 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL | 5832 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL |
5613 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL | 5833 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL |
5834 | #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL | ||
5614 | }; | 5835 | }; |
5615 | 5836 | ||
5616 | /* hwrm_cfa_flow_alloc_output (size:256b/32B) */ | 5837 | /* hwrm_cfa_flow_alloc_output (size:256b/32B) */ |
@@ -5623,7 +5844,8 @@ struct hwrm_cfa_flow_alloc_output { | |||
5623 | u8 unused_0[2]; | 5844 | u8 unused_0[2]; |
5624 | __le32 flow_id; | 5845 | __le32 flow_id; |
5625 | __le64 ext_flow_handle; | 5846 | __le64 ext_flow_handle; |
5626 | u8 unused_1[7]; | 5847 | __le32 flow_counter_id; |
5848 | u8 unused_1[3]; | ||
5627 | u8 valid; | 5849 | u8 valid; |
5628 | }; | 5850 | }; |
5629 | 5851 | ||
@@ -5651,6 +5873,46 @@ struct hwrm_cfa_flow_free_output { | |||
5651 | u8 valid; | 5873 | u8 valid; |
5652 | }; | 5874 | }; |
5653 | 5875 | ||
5876 | /* hwrm_cfa_flow_info_input (size:256b/32B) */ | ||
5877 | struct hwrm_cfa_flow_info_input { | ||
5878 | __le16 req_type; | ||
5879 | __le16 cmpl_ring; | ||
5880 | __le16 seq_id; | ||
5881 | __le16 target_id; | ||
5882 | __le64 resp_addr; | ||
5883 | __le16 flow_handle; | ||
5884 | #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL | ||
5885 | #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0 | ||
5886 | #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL | ||
5887 | #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL | ||
5888 | #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL | ||
5889 | #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL | ||
5890 | u8 unused_0[6]; | ||
5891 | __le64 ext_flow_handle; | ||
5892 | }; | ||
5893 | |||
5894 | /* hwrm_cfa_flow_info_output (size:448b/56B) */ | ||
5895 | struct hwrm_cfa_flow_info_output { | ||
5896 | __le16 error_code; | ||
5897 | __le16 req_type; | ||
5898 | __le16 seq_id; | ||
5899 | __le16 resp_len; | ||
5900 | u8 flags; | ||
5901 | u8 profile; | ||
5902 | __le16 src_fid; | ||
5903 | __le16 dst_fid; | ||
5904 | __le16 l2_ctxt_id; | ||
5905 | __le64 em_info; | ||
5906 | __le64 tcam_info; | ||
5907 | __le64 vfp_tcam_info; | ||
5908 | __le16 ar_id; | ||
5909 | __le16 flow_handle; | ||
5910 | __le32 tunnel_handle; | ||
5911 | __le16 flow_timer; | ||
5912 | u8 unused_0[5]; | ||
5913 | u8 valid; | ||
5914 | }; | ||
5915 | |||
5654 | /* hwrm_cfa_flow_stats_input (size:640b/80B) */ | 5916 | /* hwrm_cfa_flow_stats_input (size:640b/80B) */ |
5655 | struct hwrm_cfa_flow_stats_input { | 5917 | struct hwrm_cfa_flow_stats_input { |
5656 | __le16 req_type; | 5918 | __le16 req_type; |
@@ -5757,6 +6019,128 @@ struct hwrm_cfa_vfr_free_output { | |||
5757 | u8 valid; | 6019 | u8 valid; |
5758 | }; | 6020 | }; |
5759 | 6021 | ||
6022 | /* hwrm_cfa_eem_qcaps_input (size:192b/24B) */ | ||
6023 | struct hwrm_cfa_eem_qcaps_input { | ||
6024 | __le16 req_type; | ||
6025 | __le16 cmpl_ring; | ||
6026 | __le16 seq_id; | ||
6027 | __le16 target_id; | ||
6028 | __le64 resp_addr; | ||
6029 | __le32 flags; | ||
6030 | #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL | ||
6031 | #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL | ||
6032 | #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL | ||
6033 | __le32 unused_0; | ||
6034 | }; | ||
6035 | |||
6036 | /* hwrm_cfa_eem_qcaps_output (size:256b/32B) */ | ||
6037 | struct hwrm_cfa_eem_qcaps_output { | ||
6038 | __le16 error_code; | ||
6039 | __le16 req_type; | ||
6040 | __le16 seq_id; | ||
6041 | __le16 resp_len; | ||
6042 | __le32 flags; | ||
6043 | #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL | ||
6044 | #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL | ||
6045 | __le32 unused_0; | ||
6046 | __le32 supported; | ||
6047 | #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL | ||
6048 | #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL | ||
6049 | #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL | ||
6050 | #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL | ||
6051 | __le32 max_entries_supported; | ||
6052 | __le16 key_entry_size; | ||
6053 | __le16 record_entry_size; | ||
6054 | __le16 efc_entry_size; | ||
6055 | u8 unused_1; | ||
6056 | u8 valid; | ||
6057 | }; | ||
6058 | |||
6059 | /* hwrm_cfa_eem_cfg_input (size:320b/40B) */ | ||
6060 | struct hwrm_cfa_eem_cfg_input { | ||
6061 | __le16 req_type; | ||
6062 | __le16 cmpl_ring; | ||
6063 | __le16 seq_id; | ||
6064 | __le16 target_id; | ||
6065 | __le64 resp_addr; | ||
6066 | __le32 flags; | ||
6067 | #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL | ||
6068 | #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL | ||
6069 | #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL | ||
6070 | __le32 unused_0; | ||
6071 | __le32 num_entries; | ||
6072 | __le32 unused_1; | ||
6073 | __le16 key0_ctx_id; | ||
6074 | __le16 key1_ctx_id; | ||
6075 | __le16 record_ctx_id; | ||
6076 | __le16 efc_ctx_id; | ||
6077 | }; | ||
6078 | |||
6079 | /* hwrm_cfa_eem_cfg_output (size:128b/16B) */ | ||
6080 | struct hwrm_cfa_eem_cfg_output { | ||
6081 | __le16 error_code; | ||
6082 | __le16 req_type; | ||
6083 | __le16 seq_id; | ||
6084 | __le16 resp_len; | ||
6085 | u8 unused_0[7]; | ||
6086 | u8 valid; | ||
6087 | }; | ||
6088 | |||
6089 | /* hwrm_cfa_eem_qcfg_input (size:192b/24B) */ | ||
6090 | struct hwrm_cfa_eem_qcfg_input { | ||
6091 | __le16 req_type; | ||
6092 | __le16 cmpl_ring; | ||
6093 | __le16 seq_id; | ||
6094 | __le16 target_id; | ||
6095 | __le64 resp_addr; | ||
6096 | __le32 flags; | ||
6097 | #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL | ||
6098 | #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL | ||
6099 | __le32 unused_0; | ||
6100 | }; | ||
6101 | |||
6102 | /* hwrm_cfa_eem_qcfg_output (size:128b/16B) */ | ||
6103 | struct hwrm_cfa_eem_qcfg_output { | ||
6104 | __le16 error_code; | ||
6105 | __le16 req_type; | ||
6106 | __le16 seq_id; | ||
6107 | __le16 resp_len; | ||
6108 | __le32 flags; | ||
6109 | #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL | ||
6110 | #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL | ||
6111 | #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL | ||
6112 | __le32 num_entries; | ||
6113 | }; | ||
6114 | |||
6115 | /* hwrm_cfa_eem_op_input (size:192b/24B) */ | ||
6116 | struct hwrm_cfa_eem_op_input { | ||
6117 | __le16 req_type; | ||
6118 | __le16 cmpl_ring; | ||
6119 | __le16 seq_id; | ||
6120 | __le16 target_id; | ||
6121 | __le64 resp_addr; | ||
6122 | __le32 flags; | ||
6123 | #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL | ||
6124 | #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL | ||
6125 | __le16 unused_0; | ||
6126 | __le16 op; | ||
6127 | #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL | ||
6128 | #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL | ||
6129 | #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL | ||
6130 | #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL | ||
6131 | #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP | ||
6132 | }; | ||
6133 | |||
6134 | /* hwrm_cfa_eem_op_output (size:128b/16B) */ | ||
6135 | struct hwrm_cfa_eem_op_output { | ||
6136 | __le16 error_code; | ||
6137 | __le16 req_type; | ||
6138 | __le16 seq_id; | ||
6139 | __le16 resp_len; | ||
6140 | u8 unused_0[7]; | ||
6141 | u8 valid; | ||
6142 | }; | ||
6143 | |||
5760 | /* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ | 6144 | /* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ |
5761 | struct hwrm_tunnel_dst_port_query_input { | 6145 | struct hwrm_tunnel_dst_port_query_input { |
5762 | __le16 req_type; | 6146 | __le16 req_type; |
@@ -5765,12 +6149,13 @@ struct hwrm_tunnel_dst_port_query_input { | |||
5765 | __le16 target_id; | 6149 | __le16 target_id; |
5766 | __le64 resp_addr; | 6150 | __le64 resp_addr; |
5767 | u8 tunnel_type; | 6151 | u8 tunnel_type; |
5768 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL | 6152 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL |
5769 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL | 6153 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL |
5770 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL | 6154 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL |
5771 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL | 6155 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL |
5772 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL | 6156 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL |
5773 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE | 6157 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL |
6158 | #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 | ||
5774 | u8 unused_0[7]; | 6159 | u8 unused_0[7]; |
5775 | }; | 6160 | }; |
5776 | 6161 | ||
@@ -5794,12 +6179,13 @@ struct hwrm_tunnel_dst_port_alloc_input { | |||
5794 | __le16 target_id; | 6179 | __le16 target_id; |
5795 | __le64 resp_addr; | 6180 | __le64 resp_addr; |
5796 | u8 tunnel_type; | 6181 | u8 tunnel_type; |
5797 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL | 6182 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL |
5798 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL | 6183 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL |
5799 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL | 6184 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL |
5800 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL | 6185 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL |
5801 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL | 6186 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL |
5802 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE | 6187 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL |
6188 | #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 | ||
5803 | u8 unused_0; | 6189 | u8 unused_0; |
5804 | __be16 tunnel_dst_port_val; | 6190 | __be16 tunnel_dst_port_val; |
5805 | u8 unused_1[4]; | 6191 | u8 unused_1[4]; |
@@ -5824,12 +6210,13 @@ struct hwrm_tunnel_dst_port_free_input { | |||
5824 | __le16 target_id; | 6210 | __le16 target_id; |
5825 | __le64 resp_addr; | 6211 | __le64 resp_addr; |
5826 | u8 tunnel_type; | 6212 | u8 tunnel_type; |
5827 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL | 6213 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL |
5828 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL | 6214 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL |
5829 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL | 6215 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL |
5830 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL | 6216 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL |
5831 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL | 6217 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL |
5832 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE | 6218 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL |
6219 | #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 | ||
5833 | u8 unused_0; | 6220 | u8 unused_0; |
5834 | __le16 tunnel_dst_port_id; | 6221 | __le16 tunnel_dst_port_id; |
5835 | u8 unused_1[4]; | 6222 | u8 unused_1[4]; |
@@ -6040,7 +6427,9 @@ struct hwrm_fw_reset_input { | |||
6040 | #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL | 6427 | #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL |
6041 | #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE | 6428 | #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE |
6042 | u8 host_idx; | 6429 | u8 host_idx; |
6043 | u8 unused_0[5]; | 6430 | u8 flags; |
6431 | #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL | ||
6432 | u8 unused_0[4]; | ||
6044 | }; | 6433 | }; |
6045 | 6434 | ||
6046 | /* hwrm_fw_reset_output (size:128b/16B) */ | 6435 | /* hwrm_fw_reset_output (size:128b/16B) */ |
@@ -6137,6 +6526,7 @@ struct hwrm_struct_hdr { | |||
6137 | #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL | 6526 | #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL |
6138 | #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL | 6527 | #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL |
6139 | #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL | 6528 | #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL |
6529 | #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL | ||
6140 | #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL | 6530 | #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL |
6141 | #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL | 6531 | #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL |
6142 | #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL | 6532 | #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 749f63beddd8..c683b5e96b1d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | |||
@@ -337,18 +337,21 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, | |||
337 | return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); | 337 | return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); |
338 | } | 338 | } |
339 | 339 | ||
340 | static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) | 340 | static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, |
341 | struct bnxt_tc_flow_node *flow_node) | ||
341 | { | 342 | { |
342 | struct hwrm_cfa_flow_free_input req = { 0 }; | 343 | struct hwrm_cfa_flow_free_input req = { 0 }; |
343 | int rc; | 344 | int rc; |
344 | 345 | ||
345 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); | 346 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); |
346 | req.flow_handle = flow_handle; | 347 | if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) |
348 | req.ext_flow_handle = flow_node->ext_flow_handle; | ||
349 | else | ||
350 | req.flow_handle = flow_node->flow_handle; | ||
347 | 351 | ||
348 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 352 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
349 | if (rc) | 353 | if (rc) |
350 | netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", | 354 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
351 | __func__, flow_handle, rc); | ||
352 | 355 | ||
353 | if (rc) | 356 | if (rc) |
354 | rc = -EIO; | 357 | rc = -EIO; |
@@ -418,13 +421,14 @@ static bool bits_set(void *key, int len) | |||
418 | 421 | ||
419 | static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, | 422 | static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, |
420 | __le16 ref_flow_handle, | 423 | __le16 ref_flow_handle, |
421 | __le32 tunnel_handle, __le16 *flow_handle) | 424 | __le32 tunnel_handle, |
425 | struct bnxt_tc_flow_node *flow_node) | ||
422 | { | 426 | { |
423 | struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; | ||
424 | struct bnxt_tc_actions *actions = &flow->actions; | 427 | struct bnxt_tc_actions *actions = &flow->actions; |
425 | struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; | 428 | struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; |
426 | struct bnxt_tc_l3_key *l3_key = &flow->l3_key; | 429 | struct bnxt_tc_l3_key *l3_key = &flow->l3_key; |
427 | struct hwrm_cfa_flow_alloc_input req = { 0 }; | 430 | struct hwrm_cfa_flow_alloc_input req = { 0 }; |
431 | struct hwrm_cfa_flow_alloc_output *resp; | ||
428 | u16 flow_flags = 0, action_flags = 0; | 432 | u16 flow_flags = 0, action_flags = 0; |
429 | int rc; | 433 | int rc; |
430 | 434 | ||
@@ -527,8 +531,23 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, | |||
527 | 531 | ||
528 | mutex_lock(&bp->hwrm_cmd_lock); | 532 | mutex_lock(&bp->hwrm_cmd_lock); |
529 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 533 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
530 | if (!rc) | 534 | if (!rc) { |
531 | *flow_handle = resp->flow_handle; | 535 | resp = bnxt_get_hwrm_resp_addr(bp, &req); |
536 | /* CFA_FLOW_ALLOC response interpretation: | ||
537 | * fw with fw with | ||
538 | * 16-bit 64-bit | ||
539 | * flow handle flow handle | ||
540 | * =========== =========== | ||
541 | * flow_handle flow handle flow context id | ||
542 | * ext_flow_handle INVALID flow handle | ||
543 | * flow_id INVALID flow counter id | ||
544 | */ | ||
545 | flow_node->flow_handle = resp->flow_handle; | ||
546 | if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) { | ||
547 | flow_node->ext_flow_handle = resp->ext_flow_handle; | ||
548 | flow_node->flow_id = resp->flow_id; | ||
549 | } | ||
550 | } | ||
532 | mutex_unlock(&bp->hwrm_cmd_lock); | 551 | mutex_unlock(&bp->hwrm_cmd_lock); |
533 | 552 | ||
534 | if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) | 553 | if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) |
@@ -544,9 +563,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, | |||
544 | __le32 ref_decap_handle, | 563 | __le32 ref_decap_handle, |
545 | __le32 *decap_filter_handle) | 564 | __le32 *decap_filter_handle) |
546 | { | 565 | { |
547 | struct hwrm_cfa_decap_filter_alloc_output *resp = | ||
548 | bp->hwrm_cmd_resp_addr; | ||
549 | struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; | 566 | struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; |
567 | struct hwrm_cfa_decap_filter_alloc_output *resp; | ||
550 | struct ip_tunnel_key *tun_key = &flow->tun_key; | 568 | struct ip_tunnel_key *tun_key = &flow->tun_key; |
551 | u32 enables = 0; | 569 | u32 enables = 0; |
552 | int rc; | 570 | int rc; |
@@ -599,10 +617,12 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, | |||
599 | 617 | ||
600 | mutex_lock(&bp->hwrm_cmd_lock); | 618 | mutex_lock(&bp->hwrm_cmd_lock); |
601 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 619 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
602 | if (!rc) | 620 | if (!rc) { |
621 | resp = bnxt_get_hwrm_resp_addr(bp, &req); | ||
603 | *decap_filter_handle = resp->decap_filter_id; | 622 | *decap_filter_handle = resp->decap_filter_id; |
604 | else | 623 | } else { |
605 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); | 624 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
625 | } | ||
606 | mutex_unlock(&bp->hwrm_cmd_lock); | 626 | mutex_unlock(&bp->hwrm_cmd_lock); |
607 | 627 | ||
608 | if (rc) | 628 | if (rc) |
@@ -633,9 +653,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, | |||
633 | struct bnxt_tc_l2_key *l2_info, | 653 | struct bnxt_tc_l2_key *l2_info, |
634 | __le32 *encap_record_handle) | 654 | __le32 *encap_record_handle) |
635 | { | 655 | { |
636 | struct hwrm_cfa_encap_record_alloc_output *resp = | ||
637 | bp->hwrm_cmd_resp_addr; | ||
638 | struct hwrm_cfa_encap_record_alloc_input req = { 0 }; | 656 | struct hwrm_cfa_encap_record_alloc_input req = { 0 }; |
657 | struct hwrm_cfa_encap_record_alloc_output *resp; | ||
639 | struct hwrm_cfa_encap_data_vxlan *encap = | 658 | struct hwrm_cfa_encap_data_vxlan *encap = |
640 | (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; | 659 | (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; |
641 | struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = | 660 | struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = |
@@ -667,10 +686,12 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, | |||
667 | 686 | ||
668 | mutex_lock(&bp->hwrm_cmd_lock); | 687 | mutex_lock(&bp->hwrm_cmd_lock); |
669 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 688 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
670 | if (!rc) | 689 | if (!rc) { |
690 | resp = bnxt_get_hwrm_resp_addr(bp, &req); | ||
671 | *encap_record_handle = resp->encap_record_id; | 691 | *encap_record_handle = resp->encap_record_id; |
672 | else | 692 | } else { |
673 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); | 693 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
694 | } | ||
674 | mutex_unlock(&bp->hwrm_cmd_lock); | 695 | mutex_unlock(&bp->hwrm_cmd_lock); |
675 | 696 | ||
676 | if (rc) | 697 | if (rc) |
@@ -1224,7 +1245,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, | |||
1224 | int rc; | 1245 | int rc; |
1225 | 1246 | ||
1226 | /* send HWRM cmd to free the flow-id */ | 1247 | /* send HWRM cmd to free the flow-id */ |
1227 | bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle); | 1248 | bnxt_hwrm_cfa_flow_free(bp, flow_node); |
1228 | 1249 | ||
1229 | mutex_lock(&tc_info->lock); | 1250 | mutex_lock(&tc_info->lock); |
1230 | 1251 | ||
@@ -1246,6 +1267,12 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, | |||
1246 | return 0; | 1267 | return 0; |
1247 | } | 1268 | } |
1248 | 1269 | ||
1270 | static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, | ||
1271 | u16 src_fid) | ||
1272 | { | ||
1273 | flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; | ||
1274 | } | ||
1275 | |||
1249 | static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, | 1276 | static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, |
1250 | u16 src_fid) | 1277 | u16 src_fid) |
1251 | { | 1278 | { |
@@ -1293,6 +1320,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, | |||
1293 | 1320 | ||
1294 | bnxt_tc_set_src_fid(bp, flow, src_fid); | 1321 | bnxt_tc_set_src_fid(bp, flow, src_fid); |
1295 | 1322 | ||
1323 | if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) | ||
1324 | bnxt_tc_set_flow_dir(bp, flow, src_fid); | ||
1325 | |||
1296 | if (!bnxt_tc_can_offload(bp, flow)) { | 1326 | if (!bnxt_tc_can_offload(bp, flow)) { |
1297 | rc = -ENOSPC; | 1327 | rc = -ENOSPC; |
1298 | goto free_node; | 1328 | goto free_node; |
@@ -1320,7 +1350,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, | |||
1320 | 1350 | ||
1321 | /* send HWRM cmd to alloc the flow */ | 1351 | /* send HWRM cmd to alloc the flow */ |
1322 | rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, | 1352 | rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, |
1323 | tunnel_handle, &new_node->flow_handle); | 1353 | tunnel_handle, new_node); |
1324 | if (rc) | 1354 | if (rc) |
1325 | goto put_tunnel; | 1355 | goto put_tunnel; |
1326 | 1356 | ||
@@ -1336,7 +1366,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, | |||
1336 | return 0; | 1366 | return 0; |
1337 | 1367 | ||
1338 | hwrm_flow_free: | 1368 | hwrm_flow_free: |
1339 | bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle); | 1369 | bnxt_hwrm_cfa_flow_free(bp, new_node); |
1340 | put_tunnel: | 1370 | put_tunnel: |
1341 | bnxt_tc_put_tunnel_handle(bp, flow, new_node); | 1371 | bnxt_tc_put_tunnel_handle(bp, flow, new_node); |
1342 | put_l2: | 1372 | put_l2: |
@@ -1397,13 +1427,40 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, | |||
1397 | return 0; | 1427 | return 0; |
1398 | } | 1428 | } |
1399 | 1429 | ||
1430 | static void bnxt_fill_cfa_stats_req(struct bnxt *bp, | ||
1431 | struct bnxt_tc_flow_node *flow_node, | ||
1432 | __le16 *flow_handle, __le32 *flow_id) | ||
1433 | { | ||
1434 | u16 handle; | ||
1435 | |||
1436 | if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) { | ||
1437 | *flow_id = flow_node->flow_id; | ||
1438 | |||
1439 | /* If flow_id is used to fetch flow stats then: | ||
1440 | * 1. lower 12 bits of flow_handle must be set to all 1s. | ||
1441 | * 2. 15th bit of flow_handle must specify the flow | ||
1442 | * direction (TX/RX). | ||
1443 | */ | ||
1444 | if (flow_node->flow.dir == BNXT_DIR_RX) | ||
1445 | handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | | ||
1446 | CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; | ||
1447 | else | ||
1448 | handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; | ||
1449 | |||
1450 | *flow_handle = cpu_to_le16(handle); | ||
1451 | } else { | ||
1452 | *flow_handle = flow_node->flow_handle; | ||
1453 | } | ||
1454 | } | ||
1455 | |||
1400 | static int | 1456 | static int |
1401 | bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, | 1457 | bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, |
1402 | struct bnxt_tc_stats_batch stats_batch[]) | 1458 | struct bnxt_tc_stats_batch stats_batch[]) |
1403 | { | 1459 | { |
1404 | struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr; | ||
1405 | struct hwrm_cfa_flow_stats_input req = { 0 }; | 1460 | struct hwrm_cfa_flow_stats_input req = { 0 }; |
1461 | struct hwrm_cfa_flow_stats_output *resp; | ||
1406 | __le16 *req_flow_handles = &req.flow_handle_0; | 1462 | __le16 *req_flow_handles = &req.flow_handle_0; |
1463 | __le32 *req_flow_ids = &req.flow_id_0; | ||
1407 | int rc, i; | 1464 | int rc, i; |
1408 | 1465 | ||
1409 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); | 1466 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); |
@@ -1411,14 +1468,19 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, | |||
1411 | for (i = 0; i < num_flows; i++) { | 1468 | for (i = 0; i < num_flows; i++) { |
1412 | struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; | 1469 | struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; |
1413 | 1470 | ||
1414 | req_flow_handles[i] = flow_node->flow_handle; | 1471 | bnxt_fill_cfa_stats_req(bp, flow_node, |
1472 | &req_flow_handles[i], &req_flow_ids[i]); | ||
1415 | } | 1473 | } |
1416 | 1474 | ||
1417 | mutex_lock(&bp->hwrm_cmd_lock); | 1475 | mutex_lock(&bp->hwrm_cmd_lock); |
1418 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 1476 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
1419 | if (!rc) { | 1477 | if (!rc) { |
1420 | __le64 *resp_packets = &resp->packet_0; | 1478 | __le64 *resp_packets; |
1421 | __le64 *resp_bytes = &resp->byte_0; | 1479 | __le64 *resp_bytes; |
1480 | |||
1481 | resp = bnxt_get_hwrm_resp_addr(bp, &req); | ||
1482 | resp_packets = &resp->packet_0; | ||
1483 | resp_bytes = &resp->byte_0; | ||
1422 | 1484 | ||
1423 | for (i = 0; i < num_flows; i++) { | 1485 | for (i = 0; i < num_flows; i++) { |
1424 | stats_batch[i].hw_stats.packets = | 1486 | stats_batch[i].hw_stats.packets = |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h index 97e09a880693..8a0968967bc5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h | |||
@@ -98,6 +98,9 @@ struct bnxt_tc_flow { | |||
98 | 98 | ||
99 | /* flow applicable to pkts ingressing on this fid */ | 99 | /* flow applicable to pkts ingressing on this fid */ |
100 | u16 src_fid; | 100 | u16 src_fid; |
101 | u8 dir; | ||
102 | #define BNXT_DIR_RX 1 | ||
103 | #define BNXT_DIR_TX 0 | ||
101 | struct bnxt_tc_l2_key l2_key; | 104 | struct bnxt_tc_l2_key l2_key; |
102 | struct bnxt_tc_l2_key l2_mask; | 105 | struct bnxt_tc_l2_key l2_mask; |
103 | struct bnxt_tc_l3_key l3_key; | 106 | struct bnxt_tc_l3_key l3_key; |
@@ -170,7 +173,9 @@ struct bnxt_tc_flow_node { | |||
170 | 173 | ||
171 | struct bnxt_tc_flow flow; | 174 | struct bnxt_tc_flow flow; |
172 | 175 | ||
176 | __le64 ext_flow_handle; | ||
173 | __le16 flow_handle; | 177 | __le16 flow_handle; |
178 | __le32 flow_id; | ||
174 | 179 | ||
175 | /* L2 node in l2 hashtable that shares flow's l2 key */ | 180 | /* L2 node in l2 hashtable that shares flow's l2 key */ |
176 | struct bnxt_tc_l2_node *l2_node; | 181 | struct bnxt_tc_l2_node *l2_node; |