diff options
author | David S. Miller <davem@davemloft.net> | 2015-10-16 02:28:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-10-16 02:28:03 -0400 |
commit | 181e4246b4666bc3af148c1dacb330c9be2acf76 (patch) | |
tree | cec72dd135a148ae1f1588299011a1d5334d954d | |
parent | 96aec911482246a319bf457f39fa848ce436c8fd (diff) | |
parent | 5cd16d8c78fd17520fff437256f0c3a4e960fd5d (diff) |
Merge branch 'mlxsw-cleanups'
Jiri Pirko says:
====================
mlxsw: Driver update, cleanups
This patchset contains various cleanups and improvements in mlxsw driver.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/cmd.h | 26 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/core.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/pci.c | 49 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/pci.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/reg.h | 121 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 34 |
6 files changed, 103 insertions, 134 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 770db17eb03f..c7889f4fa0d7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h | |||
@@ -464,6 +464,8 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8); | |||
464 | * passed in this command must be pinned. | 464 | * passed in this command must be pinned. |
465 | */ | 465 | */ |
466 | 466 | ||
467 | #define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32 | ||
468 | |||
467 | static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core, | 469 | static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core, |
468 | char *in_mbox, u32 vpm_entries_count) | 470 | char *in_mbox, u32 vpm_entries_count) |
469 | { | 471 | { |
@@ -568,7 +570,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1); | |||
568 | */ | 570 | */ |
569 | MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1); | 571 | MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1); |
570 | 572 | ||
571 | /* cmd_mbox_config_profile_set_fid_based | 573 | /* cmd_mbox_config_profile_set_flood_mode |
572 | * Capability bit. Setting a bit to 1 configures the profile | 574 | * Capability bit. Setting a bit to 1 configures the profile |
573 | * according to the mailbox contents. | 575 | * according to the mailbox contents. |
574 | */ | 576 | */ |
@@ -649,12 +651,8 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12); | |||
649 | MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16); | 651 | MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16); |
650 | 652 | ||
651 | /* cmd_mbox_config_profile_max_flood_tables | 653 | /* cmd_mbox_config_profile_max_flood_tables |
652 | * Maximum number of Flooding Tables. Flooding Tables are associated to | 654 | * Maximum number of single-entry flooding tables. Different flooding tables |
653 | * the different packet types for the different switch partitions. | 655 | * can be associated with different packet types. |
654 | * Note that the table size depends on the fid_based mode. | ||
655 | * In SwitchX silicon, tables are split equally between the switch | ||
656 | * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated | ||
657 | * with swid-1 and the last 4 are associated with swid-2. | ||
658 | */ | 656 | */ |
659 | MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4); | 657 | MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4); |
660 | 658 | ||
@@ -665,12 +663,14 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4); | |||
665 | */ | 663 | */ |
666 | MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4); | 664 | MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4); |
667 | 665 | ||
668 | /* cmd_mbox_config_profile_fid_based | 666 | /* cmd_mbox_config_profile_flood_mode |
669 | * FID Based Flood Mode | 667 | * Flooding mode to use. |
670 | * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID | 668 | * 0-2 - Backward compatible modes for SwitchX devices. |
671 | * 01 Use FID to offset the index to the Port Group Table (pgi) | 669 | * 3 - Mixed mode, where: |
672 | * 10 Use FID to offset the index to the Port Group Table (pgi) and | 670 | * max_flood_tables indicates the number of single-entry tables. |
673 | * the Multicast ID | 671 | * max_vid_flood_tables indicates the number of per-VID tables. |
672 | * max_fid_offset_flood_tables indicates the number of FID-offset tables. | ||
673 | * max_fid_flood_tables indicates the number of per-FID tables. | ||
674 | */ | 674 | */ |
675 | MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2); | 675 | MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2); |
676 | 676 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index dbcaf5df8967..9f4a0bf01336 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c | |||
@@ -506,7 +506,6 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) | |||
506 | return err; | 506 | return err; |
507 | 507 | ||
508 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, | 508 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, |
509 | MLXSW_REG_HTGT_TRAP_GROUP_EMAD, | ||
510 | MLXSW_TRAP_ID_ETHEMAD); | 509 | MLXSW_TRAP_ID_ETHEMAD); |
511 | return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); | 510 | return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); |
512 | } | 511 | } |
@@ -551,8 +550,8 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) | |||
551 | { | 550 | { |
552 | char hpkt_pl[MLXSW_REG_HPKT_LEN]; | 551 | char hpkt_pl[MLXSW_REG_HPKT_LEN]; |
553 | 552 | ||
553 | mlxsw_core->emad.use_emad = false; | ||
554 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, | 554 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, |
555 | MLXSW_REG_HTGT_TRAP_GROUP_EMAD, | ||
556 | MLXSW_TRAP_ID_ETHEMAD); | 555 | MLXSW_TRAP_ID_ETHEMAD); |
557 | mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); | 556 | mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); |
558 | 557 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 462cea31ecbb..974ce47cec05 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c | |||
@@ -171,8 +171,8 @@ struct mlxsw_pci { | |||
171 | struct msix_entry msix_entry; | 171 | struct msix_entry msix_entry; |
172 | struct mlxsw_core *core; | 172 | struct mlxsw_core *core; |
173 | struct { | 173 | struct { |
174 | u16 num_pages; | ||
175 | struct mlxsw_pci_mem_item *items; | 174 | struct mlxsw_pci_mem_item *items; |
175 | unsigned int count; | ||
176 | } fw_area; | 176 | } fw_area; |
177 | struct { | 177 | struct { |
178 | struct mlxsw_pci_mem_item out_mbox; | 178 | struct mlxsw_pci_mem_item out_mbox; |
@@ -431,8 +431,7 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, | |||
431 | 431 | ||
432 | mapaddr = pci_map_single(pdev, frag_data, frag_len, direction); | 432 | mapaddr = pci_map_single(pdev, frag_data, frag_len, direction); |
433 | if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) { | 433 | if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) { |
434 | if (net_ratelimit()) | 434 | dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n"); |
435 | dev_err(&pdev->dev, "failed to dma map tx frag\n"); | ||
436 | return -EIO; | 435 | return -EIO; |
437 | } | 436 | } |
438 | mlxsw_pci_wqe_address_set(wqe, index, mapaddr); | 437 | mlxsw_pci_wqe_address_set(wqe, index, mapaddr); |
@@ -497,6 +496,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, | |||
497 | struct mlxsw_pci_queue *q) | 496 | struct mlxsw_pci_queue *q) |
498 | { | 497 | { |
499 | struct mlxsw_pci_queue_elem_info *elem_info; | 498 | struct mlxsw_pci_queue_elem_info *elem_info; |
499 | u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci); | ||
500 | int i; | 500 | int i; |
501 | int err; | 501 | int err; |
502 | 502 | ||
@@ -504,9 +504,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, | |||
504 | q->consumer_counter = 0; | 504 | q->consumer_counter = 0; |
505 | 505 | ||
506 | /* Set CQ of same number of this RDQ with base | 506 | /* Set CQ of same number of this RDQ with base |
507 | * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs. | 507 | * above SDQ count as the lower ones are assigned to SDQs. |
508 | */ | 508 | */ |
509 | mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT); | 509 | mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num); |
510 | mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ | 510 | mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ |
511 | for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { | 511 | for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { |
512 | dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); | 512 | dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); |
@@ -699,8 +699,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, | |||
699 | put_new_skb: | 699 | put_new_skb: |
700 | memset(wqe, 0, q->elem_size); | 700 | memset(wqe, 0, q->elem_size); |
701 | err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); | 701 | err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); |
702 | if (err && net_ratelimit()) | 702 | if (err) |
703 | dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n"); | 703 | dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); |
704 | /* Everything is set up, ring doorbell to pass elem to HW */ | 704 | /* Everything is set up, ring doorbell to pass elem to HW */ |
705 | q->producer_counter++; | 705 | q->producer_counter++; |
706 | mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); | 706 | mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); |
@@ -830,7 +830,8 @@ static void mlxsw_pci_eq_tasklet(unsigned long data) | |||
830 | { | 830 | { |
831 | struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; | 831 | struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; |
832 | struct mlxsw_pci *mlxsw_pci = q->pci; | 832 | struct mlxsw_pci *mlxsw_pci = q->pci; |
833 | unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)]; | 833 | u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci); |
834 | unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)]; | ||
834 | char *eqe; | 835 | char *eqe; |
835 | u8 cqn; | 836 | u8 cqn; |
836 | bool cq_handle = false; | 837 | bool cq_handle = false; |
@@ -866,7 +867,7 @@ static void mlxsw_pci_eq_tasklet(unsigned long data) | |||
866 | 867 | ||
867 | if (!cq_handle) | 868 | if (!cq_handle) |
868 | return; | 869 | return; |
869 | for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) { | 870 | for_each_set_bit(cqn, active_cqns, cq_count) { |
870 | q = mlxsw_pci_cq_get(mlxsw_pci, cqn); | 871 | q = mlxsw_pci_cq_get(mlxsw_pci, cqn); |
871 | mlxsw_pci_queue_tasklet_schedule(q); | 872 | mlxsw_pci_queue_tasklet_schedule(q); |
872 | } | 873 | } |
@@ -1067,10 +1068,8 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) | |||
1067 | num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); | 1068 | num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); |
1068 | eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); | 1069 | eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); |
1069 | 1070 | ||
1070 | if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) || | 1071 | if (num_sdqs + num_rdqs > num_cqs || |
1071 | (num_rdqs != MLXSW_PCI_RDQS_COUNT) || | 1072 | num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) { |
1072 | (num_cqs != MLXSW_PCI_CQS_COUNT) || | ||
1073 | (num_eqs != MLXSW_PCI_EQS_COUNT)) { | ||
1074 | dev_err(&pdev->dev, "Unsupported number of queues\n"); | 1073 | dev_err(&pdev->dev, "Unsupported number of queues\n"); |
1075 | return -EINVAL; | 1074 | return -EINVAL; |
1076 | } | 1075 | } |
@@ -1272,6 +1271,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, | |||
1272 | u16 num_pages) | 1271 | u16 num_pages) |
1273 | { | 1272 | { |
1274 | struct mlxsw_pci_mem_item *mem_item; | 1273 | struct mlxsw_pci_mem_item *mem_item; |
1274 | int nent = 0; | ||
1275 | int i; | 1275 | int i; |
1276 | int err; | 1276 | int err; |
1277 | 1277 | ||
@@ -1279,7 +1279,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, | |||
1279 | GFP_KERNEL); | 1279 | GFP_KERNEL); |
1280 | if (!mlxsw_pci->fw_area.items) | 1280 | if (!mlxsw_pci->fw_area.items) |
1281 | return -ENOMEM; | 1281 | return -ENOMEM; |
1282 | mlxsw_pci->fw_area.num_pages = num_pages; | 1282 | mlxsw_pci->fw_area.count = num_pages; |
1283 | 1283 | ||
1284 | mlxsw_cmd_mbox_zero(mbox); | 1284 | mlxsw_cmd_mbox_zero(mbox); |
1285 | for (i = 0; i < num_pages; i++) { | 1285 | for (i = 0; i < num_pages; i++) { |
@@ -1293,13 +1293,22 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, | |||
1293 | err = -ENOMEM; | 1293 | err = -ENOMEM; |
1294 | goto err_alloc; | 1294 | goto err_alloc; |
1295 | } | 1295 | } |
1296 | mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr); | 1296 | mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); |
1297 | mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */ | 1297 | mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ |
1298 | if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { | ||
1299 | err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); | ||
1300 | if (err) | ||
1301 | goto err_cmd_map_fa; | ||
1302 | nent = 0; | ||
1303 | mlxsw_cmd_mbox_zero(mbox); | ||
1304 | } | ||
1298 | } | 1305 | } |
1299 | 1306 | ||
1300 | err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages); | 1307 | if (nent) { |
1301 | if (err) | 1308 | err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); |
1302 | goto err_cmd_map_fa; | 1309 | if (err) |
1310 | goto err_cmd_map_fa; | ||
1311 | } | ||
1303 | 1312 | ||
1304 | return 0; | 1313 | return 0; |
1305 | 1314 | ||
@@ -1322,7 +1331,7 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci) | |||
1322 | 1331 | ||
1323 | mlxsw_cmd_unmap_fa(mlxsw_pci->core); | 1332 | mlxsw_cmd_unmap_fa(mlxsw_pci->core); |
1324 | 1333 | ||
1325 | for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) { | 1334 | for (i = 0; i < mlxsw_pci->fw_area.count; i++) { |
1326 | mem_item = &mlxsw_pci->fw_area.items[i]; | 1335 | mem_item = &mlxsw_pci->fw_area.items[i]; |
1327 | 1336 | ||
1328 | pci_free_consistent(mlxsw_pci->pdev, mem_item->size, | 1337 | pci_free_consistent(mlxsw_pci->pdev, mem_item->size, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 1ef9664b4512..5b3453b6cf5d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h | |||
@@ -71,9 +71,7 @@ | |||
71 | #define MLXSW_PCI_DOORBELL(offset, type_offset, num) \ | 71 | #define MLXSW_PCI_DOORBELL(offset, type_offset, num) \ |
72 | ((offset) + (type_offset) + (num) * 4) | 72 | ((offset) + (type_offset) + (num) * 4) |
73 | 73 | ||
74 | #define MLXSW_PCI_RDQS_COUNT 24 | 74 | #define MLXSW_PCI_CQS_MAX 96 |
75 | #define MLXSW_PCI_SDQS_COUNT 24 | ||
76 | #define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT) | ||
77 | #define MLXSW_PCI_EQS_COUNT 2 | 75 | #define MLXSW_PCI_EQS_COUNT 2 |
78 | #define MLXSW_PCI_EQ_ASYNC_NUM 0 | 76 | #define MLXSW_PCI_EQ_ASYNC_NUM 0 |
79 | #define MLXSW_PCI_EQ_COMP_NUM 1 | 77 | #define MLXSW_PCI_EQ_COMP_NUM 1 |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 096e1c12175a..7b245af10e42 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h | |||
@@ -99,57 +99,6 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = { | |||
99 | */ | 99 | */ |
100 | MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); | 100 | MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); |
101 | 101 | ||
102 | /* SMID - Switch Multicast ID | ||
103 | * -------------------------- | ||
104 | * In multi-chip configuration, each device should maintain mapping between | ||
105 | * Multicast ID (MID) into a list of local ports. This mapping is used in all | ||
106 | * the devices other than the ingress device, and is implemented as part of the | ||
107 | * FDB. The MID record maps from a MID, which is a unique identi- fier of the | ||
108 | * multicast group within the stacking domain, into a list of local ports into | ||
109 | * which the packet is replicated. | ||
110 | */ | ||
111 | #define MLXSW_REG_SMID_ID 0x2007 | ||
112 | #define MLXSW_REG_SMID_LEN 0x420 | ||
113 | |||
114 | static const struct mlxsw_reg_info mlxsw_reg_smid = { | ||
115 | .id = MLXSW_REG_SMID_ID, | ||
116 | .len = MLXSW_REG_SMID_LEN, | ||
117 | }; | ||
118 | |||
119 | /* reg_smid_swid | ||
120 | * Switch partition ID. | ||
121 | * Access: Index | ||
122 | */ | ||
123 | MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); | ||
124 | |||
125 | /* reg_smid_mid | ||
126 | * Multicast identifier - global identifier that represents the multicast group | ||
127 | * across all devices | ||
128 | * Access: Index | ||
129 | */ | ||
130 | MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); | ||
131 | |||
132 | /* reg_smid_port | ||
133 | * Local port memebership (1 bit per port). | ||
134 | * Access: RW | ||
135 | */ | ||
136 | MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); | ||
137 | |||
138 | /* reg_smid_port_mask | ||
139 | * Local port mask (1 bit per port). | ||
140 | * Access: W | ||
141 | */ | ||
142 | MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); | ||
143 | |||
144 | static inline void mlxsw_reg_smid_pack(char *payload, u16 mid) | ||
145 | { | ||
146 | MLXSW_REG_ZERO(smid, payload); | ||
147 | mlxsw_reg_smid_swid_set(payload, 0); | ||
148 | mlxsw_reg_smid_mid_set(payload, mid); | ||
149 | mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1); | ||
150 | mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); | ||
151 | } | ||
152 | |||
153 | /* SSPR - Switch System Port Record Register | 102 | /* SSPR - Switch System Port Record Register |
154 | * ----------------------------------------- | 103 | * ----------------------------------------- |
155 | * Configures the system port to local port mapping. | 104 | * Configures the system port to local port mapping. |
@@ -212,7 +161,7 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port) | |||
212 | * ------------------------------------------- | 161 | * ------------------------------------------- |
213 | * Configures the spanning tree state of a physical port. | 162 | * Configures the spanning tree state of a physical port. |
214 | */ | 163 | */ |
215 | #define MLXSW_REG_SPMS_ID 0x200d | 164 | #define MLXSW_REG_SPMS_ID 0x200D |
216 | #define MLXSW_REG_SPMS_LEN 0x404 | 165 | #define MLXSW_REG_SPMS_LEN 0x404 |
217 | 166 | ||
218 | static const struct mlxsw_reg_info mlxsw_reg_spms = { | 167 | static const struct mlxsw_reg_info mlxsw_reg_spms = { |
@@ -243,11 +192,15 @@ enum mlxsw_reg_spms_state { | |||
243 | */ | 192 | */ |
244 | MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2); | 193 | MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2); |
245 | 194 | ||
246 | static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid, | 195 | static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port) |
247 | enum mlxsw_reg_spms_state state) | ||
248 | { | 196 | { |
249 | MLXSW_REG_ZERO(spms, payload); | 197 | MLXSW_REG_ZERO(spms, payload); |
250 | mlxsw_reg_spms_local_port_set(payload, local_port); | 198 | mlxsw_reg_spms_local_port_set(payload, local_port); |
199 | } | ||
200 | |||
201 | static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid, | ||
202 | enum mlxsw_reg_spms_state state) | ||
203 | { | ||
251 | mlxsw_reg_spms_state_set(payload, vid, state); | 204 | mlxsw_reg_spms_state_set(payload, vid, state); |
252 | } | 205 | } |
253 | 206 | ||
@@ -256,7 +209,7 @@ static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid, | |||
256 | * The following register controls the association of flooding tables and MIDs | 209 | * The following register controls the association of flooding tables and MIDs |
257 | * to packet types used for flooding. | 210 | * to packet types used for flooding. |
258 | */ | 211 | */ |
259 | #define MLXSW_REG_SFGC_ID 0x2011 | 212 | #define MLXSW_REG_SFGC_ID 0x2011 |
260 | #define MLXSW_REG_SFGC_LEN 0x10 | 213 | #define MLXSW_REG_SFGC_LEN 0x10 |
261 | 214 | ||
262 | static const struct mlxsw_reg_info mlxsw_reg_sfgc = { | 215 | static const struct mlxsw_reg_info mlxsw_reg_sfgc = { |
@@ -265,13 +218,15 @@ static const struct mlxsw_reg_info mlxsw_reg_sfgc = { | |||
265 | }; | 218 | }; |
266 | 219 | ||
267 | enum mlxsw_reg_sfgc_type { | 220 | enum mlxsw_reg_sfgc_type { |
268 | MLXSW_REG_SFGC_TYPE_BROADCAST = 0, | 221 | MLXSW_REG_SFGC_TYPE_BROADCAST, |
269 | MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1, | 222 | MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST, |
270 | MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2, | 223 | MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4, |
271 | MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3, | 224 | MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6, |
272 | MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5, | 225 | MLXSW_REG_SFGC_TYPE_RESERVED, |
273 | MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6, | 226 | MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP, |
274 | MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7, | 227 | MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL, |
228 | MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST, | ||
229 | MLXSW_REG_SFGC_TYPE_MAX, | ||
275 | }; | 230 | }; |
276 | 231 | ||
277 | /* reg_sfgc_type | 232 | /* reg_sfgc_type |
@@ -1013,7 +968,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port) | |||
1013 | * Controls the association of a port with a switch partition and enables | 968 | * Controls the association of a port with a switch partition and enables |
1014 | * configuring ports as stacking ports. | 969 | * configuring ports as stacking ports. |
1015 | */ | 970 | */ |
1016 | #define MLXSW_REG_PSPA_ID 0x500d | 971 | #define MLXSW_REG_PSPA_ID 0x500D |
1017 | #define MLXSW_REG_PSPA_LEN 0x8 | 972 | #define MLXSW_REG_PSPA_LEN 0x8 |
1018 | 973 | ||
1019 | static const struct mlxsw_reg_info mlxsw_reg_pspa = { | 974 | static const struct mlxsw_reg_info mlxsw_reg_pspa = { |
@@ -1074,8 +1029,11 @@ MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8); | |||
1074 | */ | 1029 | */ |
1075 | MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4); | 1030 | MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4); |
1076 | 1031 | ||
1077 | #define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0 | 1032 | enum mlxsw_reg_htgt_trap_group { |
1078 | #define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1 | 1033 | MLXSW_REG_HTGT_TRAP_GROUP_EMAD, |
1034 | MLXSW_REG_HTGT_TRAP_GROUP_RX, | ||
1035 | MLXSW_REG_HTGT_TRAP_GROUP_CTRL, | ||
1036 | }; | ||
1079 | 1037 | ||
1080 | /* reg_htgt_trap_group | 1038 | /* reg_htgt_trap_group |
1081 | * Trap group number. User defined number specifying which trap groups | 1039 | * Trap group number. User defined number specifying which trap groups |
@@ -1142,6 +1100,7 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6); | |||
1142 | 1100 | ||
1143 | #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15 | 1101 | #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15 |
1144 | #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14 | 1102 | #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14 |
1103 | #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13 | ||
1145 | 1104 | ||
1146 | /* reg_htgt_local_path_rdq | 1105 | /* reg_htgt_local_path_rdq |
1147 | * Receive descriptor queue (RDQ) to use for the trap group. | 1106 | * Receive descriptor queue (RDQ) to use for the trap group. |
@@ -1149,21 +1108,29 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6); | |||
1149 | */ | 1108 | */ |
1150 | MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6); | 1109 | MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6); |
1151 | 1110 | ||
1152 | static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group) | 1111 | static inline void mlxsw_reg_htgt_pack(char *payload, |
1112 | enum mlxsw_reg_htgt_trap_group group) | ||
1153 | { | 1113 | { |
1154 | u8 swid, rdq; | 1114 | u8 swid, rdq; |
1155 | 1115 | ||
1156 | MLXSW_REG_ZERO(htgt, payload); | 1116 | MLXSW_REG_ZERO(htgt, payload); |
1157 | if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) { | 1117 | switch (group) { |
1118 | case MLXSW_REG_HTGT_TRAP_GROUP_EMAD: | ||
1158 | swid = MLXSW_PORT_SWID_ALL_SWIDS; | 1119 | swid = MLXSW_PORT_SWID_ALL_SWIDS; |
1159 | rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD; | 1120 | rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD; |
1160 | } else { | 1121 | break; |
1122 | case MLXSW_REG_HTGT_TRAP_GROUP_RX: | ||
1161 | swid = 0; | 1123 | swid = 0; |
1162 | rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX; | 1124 | rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX; |
1125 | break; | ||
1126 | case MLXSW_REG_HTGT_TRAP_GROUP_CTRL: | ||
1127 | swid = 0; | ||
1128 | rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL; | ||
1129 | break; | ||
1163 | } | 1130 | } |
1164 | mlxsw_reg_htgt_swid_set(payload, swid); | 1131 | mlxsw_reg_htgt_swid_set(payload, swid); |
1165 | mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL); | 1132 | mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL); |
1166 | mlxsw_reg_htgt_trap_group_set(payload, trap_group); | 1133 | mlxsw_reg_htgt_trap_group_set(payload, group); |
1167 | mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE); | 1134 | mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE); |
1168 | mlxsw_reg_htgt_pid_set(payload, 0); | 1135 | mlxsw_reg_htgt_pid_set(payload, 0); |
1169 | mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU); | 1136 | mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU); |
@@ -1254,12 +1221,22 @@ enum { | |||
1254 | */ | 1221 | */ |
1255 | MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2); | 1222 | MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2); |
1256 | 1223 | ||
1257 | static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, | 1224 | static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id) |
1258 | u8 trap_group, u16 trap_id) | ||
1259 | { | 1225 | { |
1226 | enum mlxsw_reg_htgt_trap_group trap_group; | ||
1227 | |||
1260 | MLXSW_REG_ZERO(hpkt, payload); | 1228 | MLXSW_REG_ZERO(hpkt, payload); |
1261 | mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED); | 1229 | mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED); |
1262 | mlxsw_reg_hpkt_action_set(payload, action); | 1230 | mlxsw_reg_hpkt_action_set(payload, action); |
1231 | switch (trap_id) { | ||
1232 | case MLXSW_TRAP_ID_ETHEMAD: | ||
1233 | case MLXSW_TRAP_ID_PUDE: | ||
1234 | trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD; | ||
1235 | break; | ||
1236 | default: | ||
1237 | trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX; | ||
1238 | break; | ||
1239 | } | ||
1263 | mlxsw_reg_hpkt_trap_group_set(payload, trap_group); | 1240 | mlxsw_reg_hpkt_trap_group_set(payload, trap_group); |
1264 | mlxsw_reg_hpkt_trap_id_set(payload, trap_id); | 1241 | mlxsw_reg_hpkt_trap_id_set(payload, trap_id); |
1265 | mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); | 1242 | mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); |
@@ -1272,8 +1249,6 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) | |||
1272 | return "SGCR"; | 1249 | return "SGCR"; |
1273 | case MLXSW_REG_SPAD_ID: | 1250 | case MLXSW_REG_SPAD_ID: |
1274 | return "SPAD"; | 1251 | return "SPAD"; |
1275 | case MLXSW_REG_SMID_ID: | ||
1276 | return "SMID"; | ||
1277 | case MLXSW_REG_SSPR_ID: | 1252 | case MLXSW_REG_SSPR_ID: |
1278 | return "SSPR"; | 1253 | return "SSPR"; |
1279 | case MLXSW_REG_SPMS_ID: | 1254 | case MLXSW_REG_SPMS_ID: |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index d448431bbc83..4f72e0a423d9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c | |||
@@ -57,13 +57,11 @@ static const char mlxsw_sx_driver_version[] = "1.0"; | |||
57 | 57 | ||
58 | struct mlxsw_sx_port; | 58 | struct mlxsw_sx_port; |
59 | 59 | ||
60 | #define MLXSW_SW_HW_ID_LEN 6 | ||
61 | |||
62 | struct mlxsw_sx { | 60 | struct mlxsw_sx { |
63 | struct mlxsw_sx_port **ports; | 61 | struct mlxsw_sx_port **ports; |
64 | struct mlxsw_core *core; | 62 | struct mlxsw_core *core; |
65 | const struct mlxsw_bus_info *bus_info; | 63 | const struct mlxsw_bus_info *bus_info; |
66 | u8 hw_id[MLXSW_SW_HW_ID_LEN]; | 64 | u8 hw_id[ETH_ALEN]; |
67 | }; | 65 | }; |
68 | 66 | ||
69 | struct mlxsw_sx_port_pcpu_stats { | 67 | struct mlxsw_sx_port_pcpu_stats { |
@@ -925,7 +923,8 @@ static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port, | |||
925 | spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); | 923 | spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); |
926 | if (!spms_pl) | 924 | if (!spms_pl) |
927 | return -ENOMEM; | 925 | return -ENOMEM; |
928 | mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state); | 926 | mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port); |
927 | mlxsw_reg_spms_vid_pack(spms_pl, vid, state); | ||
929 | err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl); | 928 | err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl); |
930 | kfree(spms_pl); | 929 | kfree(spms_pl); |
931 | return err; | 930 | return err; |
@@ -1178,8 +1177,7 @@ static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx, | |||
1178 | if (err) | 1177 | if (err) |
1179 | return err; | 1178 | return err; |
1180 | 1179 | ||
1181 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, | 1180 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); |
1182 | MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id); | ||
1183 | err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); | 1181 | err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); |
1184 | if (err) | 1182 | if (err) |
1185 | goto err_event_trap_set; | 1183 | goto err_event_trap_set; |
@@ -1212,9 +1210,8 @@ static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port, | |||
1212 | struct mlxsw_sx_port_pcpu_stats *pcpu_stats; | 1210 | struct mlxsw_sx_port_pcpu_stats *pcpu_stats; |
1213 | 1211 | ||
1214 | if (unlikely(!mlxsw_sx_port)) { | 1212 | if (unlikely(!mlxsw_sx_port)) { |
1215 | if (net_ratelimit()) | 1213 | dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n", |
1216 | dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n", | 1214 | local_port); |
1217 | local_port); | ||
1218 | return; | 1215 | return; |
1219 | } | 1216 | } |
1220 | 1217 | ||
@@ -1316,6 +1313,11 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx) | |||
1316 | if (err) | 1313 | if (err) |
1317 | return err; | 1314 | return err; |
1318 | 1315 | ||
1316 | mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); | ||
1317 | err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl); | ||
1318 | if (err) | ||
1319 | return err; | ||
1320 | |||
1319 | for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { | 1321 | for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { |
1320 | err = mlxsw_core_rx_listener_register(mlxsw_sx->core, | 1322 | err = mlxsw_core_rx_listener_register(mlxsw_sx->core, |
1321 | &mlxsw_sx_rx_listener[i], | 1323 | &mlxsw_sx_rx_listener[i], |
@@ -1324,7 +1326,6 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx) | |||
1324 | goto err_rx_listener_register; | 1326 | goto err_rx_listener_register; |
1325 | 1327 | ||
1326 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, | 1328 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, |
1327 | MLXSW_REG_HTGT_TRAP_GROUP_RX, | ||
1328 | mlxsw_sx_rx_listener[i].trap_id); | 1329 | mlxsw_sx_rx_listener[i].trap_id); |
1329 | err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); | 1330 | err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); |
1330 | if (err) | 1331 | if (err) |
@@ -1339,7 +1340,6 @@ err_rx_trap_set: | |||
1339 | err_rx_listener_register: | 1340 | err_rx_listener_register: |
1340 | for (i--; i >= 0; i--) { | 1341 | for (i--; i >= 0; i--) { |
1341 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, | 1342 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, |
1342 | MLXSW_REG_HTGT_TRAP_GROUP_RX, | ||
1343 | mlxsw_sx_rx_listener[i].trap_id); | 1343 | mlxsw_sx_rx_listener[i].trap_id); |
1344 | mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); | 1344 | mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); |
1345 | 1345 | ||
@@ -1357,7 +1357,6 @@ static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx) | |||
1357 | 1357 | ||
1358 | for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { | 1358 | for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { |
1359 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, | 1359 | mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, |
1360 | MLXSW_REG_HTGT_TRAP_GROUP_RX, | ||
1361 | mlxsw_sx_rx_listener[i].trap_id); | 1360 | mlxsw_sx_rx_listener[i].trap_id); |
1362 | mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); | 1361 | mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); |
1363 | 1362 | ||
@@ -1371,20 +1370,9 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx) | |||
1371 | { | 1370 | { |
1372 | char sfgc_pl[MLXSW_REG_SFGC_LEN]; | 1371 | char sfgc_pl[MLXSW_REG_SFGC_LEN]; |
1373 | char sgcr_pl[MLXSW_REG_SGCR_LEN]; | 1372 | char sgcr_pl[MLXSW_REG_SGCR_LEN]; |
1374 | char *smid_pl; | ||
1375 | char *sftr_pl; | 1373 | char *sftr_pl; |
1376 | int err; | 1374 | int err; |
1377 | 1375 | ||
1378 | /* Due to FW bug, we must configure SMID. */ | ||
1379 | smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); | ||
1380 | if (!smid_pl) | ||
1381 | return -ENOMEM; | ||
1382 | mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID); | ||
1383 | err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl); | ||
1384 | kfree(smid_pl); | ||
1385 | if (err) | ||
1386 | return err; | ||
1387 | |||
1388 | /* Configure a flooding table, which includes only CPU port. */ | 1376 | /* Configure a flooding table, which includes only CPU port. */ |
1389 | sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); | 1377 | sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); |
1390 | if (!sftr_pl) | 1378 | if (!sftr_pl) |