aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h440
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c923
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h118
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c748
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c70
-rw-r--r--include/linux/netdevice.h18
-rw-r--r--net/core/dev.c46
7 files changed, 2079 insertions, 284 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 5ddc1d3c7e25..9280d96bb291 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3,7 +3,7 @@
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com> 4 * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
5 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 5 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
6 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 6 * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met: 9 * modification, are permitted provided that the following conditions are met:
@@ -3454,6 +3454,436 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
3454 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); 3454 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
3455} 3455}
3456 3456
3457/* RALTA - Router Algorithmic LPM Tree Allocation Register
3458 * -------------------------------------------------------
3459 * RALTA is used to allocate the LPM trees of the SHSPM method.
3460 */
3461#define MLXSW_REG_RALTA_ID 0x8010
3462#define MLXSW_REG_RALTA_LEN 0x04
3463
3464static const struct mlxsw_reg_info mlxsw_reg_ralta = {
3465 .id = MLXSW_REG_RALTA_ID,
3466 .len = MLXSW_REG_RALTA_LEN,
3467};
3468
3469/* reg_ralta_op
3470 * opcode (valid for Write, must be 0 on Read)
3471 * 0 - allocate a tree
3472 * 1 - deallocate a tree
3473 * Access: OP
3474 */
3475MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2);
3476
3477enum mlxsw_reg_ralxx_protocol {
3478 MLXSW_REG_RALXX_PROTOCOL_IPV4,
3479 MLXSW_REG_RALXX_PROTOCOL_IPV6,
3480};
3481
3482/* reg_ralta_protocol
3483 * Protocol.
3484 * Deallocation opcode: Reserved.
3485 * Access: RW
3486 */
3487MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4);
3488
3489/* reg_ralta_tree_id
3490 * An identifier (numbered from 1..cap_shspm_max_trees-1) representing
3491 * the tree identifier (managed by software).
3492 * Note that tree_id 0 is allocated for a default-route tree.
3493 * Access: Index
3494 */
3495MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8);
3496
3497static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc,
3498 enum mlxsw_reg_ralxx_protocol protocol,
3499 u8 tree_id)
3500{
3501 MLXSW_REG_ZERO(ralta, payload);
3502 mlxsw_reg_ralta_op_set(payload, !alloc);
3503 mlxsw_reg_ralta_protocol_set(payload, protocol);
3504 mlxsw_reg_ralta_tree_id_set(payload, tree_id);
3505}
3506
3507/* RALST - Router Algorithmic LPM Structure Tree Register
3508 * ------------------------------------------------------
3509 * RALST is used to set and query the structure of an LPM tree.
3510 * The structure of the tree must be sorted as a sorted binary tree, while
3511 * each node is a bin that is tagged as the length of the prefixes the lookup
3512 * will refer to. Therefore, bin X refers to a set of entries with prefixes
3513 * of X bits to match with the destination address. The bin 0 indicates
3514 * the default action, when there is no match of any prefix.
3515 */
3516#define MLXSW_REG_RALST_ID 0x8011
3517#define MLXSW_REG_RALST_LEN 0x104
3518
3519static const struct mlxsw_reg_info mlxsw_reg_ralst = {
3520 .id = MLXSW_REG_RALST_ID,
3521 .len = MLXSW_REG_RALST_LEN,
3522};
3523
3524/* reg_ralst_root_bin
3525 * The bin number of the root bin.
3526 * 0<root_bin=<(length of IP address)
3527 * For a default-route tree configure 0xff
3528 * Access: RW
3529 */
3530MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8);
3531
3532/* reg_ralst_tree_id
3533 * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
3534 * Access: Index
3535 */
3536MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8);
3537
3538#define MLXSW_REG_RALST_BIN_NO_CHILD 0xff
3539#define MLXSW_REG_RALST_BIN_OFFSET 0x04
3540#define MLXSW_REG_RALST_BIN_COUNT 128
3541
3542/* reg_ralst_left_child_bin
3543 * Holding the children of the bin according to the stored tree's structure.
3544 * For trees composed of less than 4 blocks, the bins in excess are reserved.
3545 * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
3546 * Access: RW
3547 */
3548MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false);
3549
3550/* reg_ralst_right_child_bin
3551 * Holding the children of the bin according to the stored tree's structure.
3552 * For trees composed of less than 4 blocks, the bins in excess are reserved.
3553 * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
3554 * Access: RW
3555 */
3556MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00,
3557 false);
3558
3559static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id)
3560{
3561 MLXSW_REG_ZERO(ralst, payload);
3562
3563 /* Initialize all bins to have no left or right child */
3564 memset(payload + MLXSW_REG_RALST_BIN_OFFSET,
3565 MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2);
3566
3567 mlxsw_reg_ralst_root_bin_set(payload, root_bin);
3568 mlxsw_reg_ralst_tree_id_set(payload, tree_id);
3569}
3570
3571static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number,
3572 u8 left_child_bin,
3573 u8 right_child_bin)
3574{
3575 int bin_index = bin_number - 1;
3576
3577 mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin);
3578 mlxsw_reg_ralst_right_child_bin_set(payload, bin_index,
3579 right_child_bin);
3580}
3581
3582/* RALTB - Router Algorithmic LPM Tree Binding Register
3583 * ----------------------------------------------------
3584 * RALTB is used to bind virtual router and protocol to an allocated LPM tree.
3585 */
3586#define MLXSW_REG_RALTB_ID 0x8012
3587#define MLXSW_REG_RALTB_LEN 0x04
3588
3589static const struct mlxsw_reg_info mlxsw_reg_raltb = {
3590 .id = MLXSW_REG_RALTB_ID,
3591 .len = MLXSW_REG_RALTB_LEN,
3592};
3593
3594/* reg_raltb_virtual_router
3595 * Virtual Router ID
3596 * Range is 0..cap_max_virtual_routers-1
3597 * Access: Index
3598 */
3599MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16);
3600
3601/* reg_raltb_protocol
3602 * Protocol.
3603 * Access: Index
3604 */
3605MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4);
3606
3607/* reg_raltb_tree_id
3608 * Tree to be used for the {virtual_router, protocol}
3609 * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
3610 * By default, all Unicast IPv4 and IPv6 are bound to tree_id 0.
3611 * Access: RW
3612 */
3613MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8);
3614
3615static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router,
3616 enum mlxsw_reg_ralxx_protocol protocol,
3617 u8 tree_id)
3618{
3619 MLXSW_REG_ZERO(raltb, payload);
3620 mlxsw_reg_raltb_virtual_router_set(payload, virtual_router);
3621 mlxsw_reg_raltb_protocol_set(payload, protocol);
3622 mlxsw_reg_raltb_tree_id_set(payload, tree_id);
3623}
3624
3625/* RALUE - Router Algorithmic LPM Unicast Entry Register
3626 * -----------------------------------------------------
3627 * RALUE is used to configure and query LPM entries that serve
3628 * the Unicast protocols.
3629 */
3630#define MLXSW_REG_RALUE_ID 0x8013
3631#define MLXSW_REG_RALUE_LEN 0x38
3632
3633static const struct mlxsw_reg_info mlxsw_reg_ralue = {
3634 .id = MLXSW_REG_RALUE_ID,
3635 .len = MLXSW_REG_RALUE_LEN,
3636};
3637
3638/* reg_ralue_protocol
3639 * Protocol.
3640 * Access: Index
3641 */
3642MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4);
3643
3644enum mlxsw_reg_ralue_op {
3645 /* Read operation. If entry doesn't exist, the operation fails. */
3646 MLXSW_REG_RALUE_OP_QUERY_READ = 0,
3647 /* Clear on read operation. Used to read entry and
3648 * clear Activity bit.
3649 */
3650 MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1,
3651 /* Write operation. Used to write a new entry to the table. All RW
3652 * fields are written for new entry. Activity bit is set
3653 * for new entries.
3654 */
3655 MLXSW_REG_RALUE_OP_WRITE_WRITE = 0,
3656 /* Update operation. Used to update an existing route entry and
3657 * only update the RW fields that are detailed in the field
3658 * op_u_mask. If entry doesn't exist, the operation fails.
3659 */
3660 MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1,
3661 /* Clear activity. The Activity bit (the field a) is cleared
3662 * for the entry.
3663 */
3664 MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2,
3665 /* Delete operation. Used to delete an existing entry. If entry
3666 * doesn't exist, the operation fails.
3667 */
3668 MLXSW_REG_RALUE_OP_WRITE_DELETE = 3,
3669};
3670
3671/* reg_ralue_op
3672 * Operation.
3673 * Access: OP
3674 */
3675MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3);
3676
3677/* reg_ralue_a
3678 * Activity. Set for new entries. Set if a packet lookup has hit on the
3679 * specific entry, only if the entry is a route. To clear the a bit, use
3680 * "clear activity" op.
3681 * Enabled by activity_dis in RGCR
3682 * Access: RO
3683 */
3684MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1);
3685
3686/* reg_ralue_virtual_router
3687 * Virtual Router ID
3688 * Range is 0..cap_max_virtual_routers-1
3689 * Access: Index
3690 */
3691MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16);
3692
3693#define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0)
3694#define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1)
3695#define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2)
3696
3697/* reg_ralue_op_u_mask
3698 * opcode update mask.
3699 * On read operation, this field is reserved.
3700 * This field is valid for update opcode, otherwise - reserved.
3701 * This field is a bitmask of the fields that should be updated.
3702 * Access: WO
3703 */
3704MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3);
3705
3706/* reg_ralue_prefix_len
3707 * Number of bits in the prefix of the LPM route.
3708 * Note that for IPv6 prefixes, if prefix_len>64 the entry consumes
3709 * two entries in the physical HW table.
3710 * Access: Index
3711 */
3712MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8);
3713
3714/* reg_ralue_dip*
3715 * The prefix of the route or of the marker that the object of the LPM
3716 * is compared with. The most significant bits of the dip are the prefix.
3717 * The list significant bits must be '0' if the prefix_len is smaller
3718 * than 128 for IPv6 or smaller than 32 for IPv4.
3719 * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved.
3720 * Access: Index
3721 */
3722MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32);
3723
3724enum mlxsw_reg_ralue_entry_type {
3725 MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1,
3726 MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2,
3727 MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3,
3728};
3729
3730/* reg_ralue_entry_type
3731 * Entry type.
3732 * Note - for Marker entries, the action_type and action fields are reserved.
3733 * Access: RW
3734 */
3735MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2);
3736
3737/* reg_ralue_bmp_len
3738 * The best match prefix length in the case that there is no match for
3739 * longer prefixes.
3740 * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
3741 * Note for any update operation with entry_type modification this
3742 * field must be set.
3743 * Access: RW
3744 */
3745MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8);
3746
3747enum mlxsw_reg_ralue_action_type {
3748 MLXSW_REG_RALUE_ACTION_TYPE_REMOTE,
3749 MLXSW_REG_RALUE_ACTION_TYPE_LOCAL,
3750 MLXSW_REG_RALUE_ACTION_TYPE_IP2ME,
3751};
3752
3753/* reg_ralue_action_type
3754 * Action Type
3755 * Indicates how the IP address is connected.
3756 * It can be connected to a local subnet through local_erif or can be
3757 * on a remote subnet connected through a next-hop router,
3758 * or transmitted to the CPU.
3759 * Reserved when entry_type = MARKER_ENTRY
3760 * Access: RW
3761 */
3762MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2);
3763
3764enum mlxsw_reg_ralue_trap_action {
3765 MLXSW_REG_RALUE_TRAP_ACTION_NOP,
3766 MLXSW_REG_RALUE_TRAP_ACTION_TRAP,
3767 MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU,
3768 MLXSW_REG_RALUE_TRAP_ACTION_MIRROR,
3769 MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR,
3770};
3771
3772/* reg_ralue_trap_action
3773 * Trap action.
3774 * For IP2ME action, only NOP and MIRROR are possible.
3775 * Access: RW
3776 */
3777MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4);
3778
3779/* reg_ralue_trap_id
3780 * Trap ID to be reported to CPU.
3781 * Trap ID is RTR_INGRESS0 or RTR_INGRESS1.
3782 * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved.
3783 * Access: RW
3784 */
3785MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9);
3786
3787/* reg_ralue_adjacency_index
3788 * Points to the first entry of the group-based ECMP.
3789 * Only relevant in case of REMOTE action.
3790 * Access: RW
3791 */
3792MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24);
3793
3794/* reg_ralue_ecmp_size
3795 * Amount of sequential entries starting
3796 * from the adjacency_index (the number of ECMPs).
3797 * The valid range is 1-64, 512, 1024, 2048 and 4096.
3798 * Reserved when trap_action is TRAP or DISCARD_ERROR.
3799 * Only relevant in case of REMOTE action.
3800 * Access: RW
3801 */
3802MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13);
3803
3804/* reg_ralue_local_erif
3805 * Egress Router Interface.
3806 * Only relevant in case of LOCAL action.
3807 * Access: RW
3808 */
3809MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16);
3810
3811/* reg_ralue_v
3812 * Valid bit for the tunnel_ptr field.
3813 * If valid = 0 then trap to CPU as IP2ME trap ID.
3814 * If valid = 1 and the packet format allows NVE or IPinIP tunnel
3815 * decapsulation then tunnel decapsulation is done.
3816 * If valid = 1 and packet format does not allow NVE or IPinIP tunnel
3817 * decapsulation then trap as IP2ME trap ID.
3818 * Only relevant in case of IP2ME action.
3819 * Access: RW
3820 */
3821MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1);
3822
3823/* reg_ralue_tunnel_ptr
3824 * Tunnel Pointer for NVE or IPinIP tunnel decapsulation.
3825 * For Spectrum, pointer to KVD Linear.
3826 * Only relevant in case of IP2ME action.
3827 * Access: RW
3828 */
3829MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24);
3830
3831static inline void mlxsw_reg_ralue_pack(char *payload,
3832 enum mlxsw_reg_ralxx_protocol protocol,
3833 enum mlxsw_reg_ralue_op op,
3834 u16 virtual_router, u8 prefix_len)
3835{
3836 MLXSW_REG_ZERO(ralue, payload);
3837 mlxsw_reg_ralue_protocol_set(payload, protocol);
3838 mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
3839 mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
3840 mlxsw_reg_ralue_entry_type_set(payload,
3841 MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
3842 mlxsw_reg_ralue_bmp_len_set(payload, prefix_len);
3843}
3844
3845static inline void mlxsw_reg_ralue_pack4(char *payload,
3846 enum mlxsw_reg_ralxx_protocol protocol,
3847 enum mlxsw_reg_ralue_op op,
3848 u16 virtual_router, u8 prefix_len,
3849 u32 dip)
3850{
3851 mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
3852 mlxsw_reg_ralue_dip4_set(payload, dip);
3853}
3854
3855static inline void
3856mlxsw_reg_ralue_act_remote_pack(char *payload,
3857 enum mlxsw_reg_ralue_trap_action trap_action,
3858 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
3859{
3860 mlxsw_reg_ralue_action_type_set(payload,
3861 MLXSW_REG_RALUE_ACTION_TYPE_REMOTE);
3862 mlxsw_reg_ralue_trap_action_set(payload, trap_action);
3863 mlxsw_reg_ralue_trap_id_set(payload, trap_id);
3864 mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index);
3865 mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size);
3866}
3867
3868static inline void
3869mlxsw_reg_ralue_act_local_pack(char *payload,
3870 enum mlxsw_reg_ralue_trap_action trap_action,
3871 u16 trap_id, u16 local_erif)
3872{
3873 mlxsw_reg_ralue_action_type_set(payload,
3874 MLXSW_REG_RALUE_ACTION_TYPE_LOCAL);
3875 mlxsw_reg_ralue_trap_action_set(payload, trap_action);
3876 mlxsw_reg_ralue_trap_id_set(payload, trap_id);
3877 mlxsw_reg_ralue_local_erif_set(payload, local_erif);
3878}
3879
3880static inline void
3881mlxsw_reg_ralue_act_ip2me_pack(char *payload)
3882{
3883 mlxsw_reg_ralue_action_type_set(payload,
3884 MLXSW_REG_RALUE_ACTION_TYPE_IP2ME);
3885}
3886
3457/* MFCR - Management Fan Control Register 3887/* MFCR - Management Fan Control Register
3458 * -------------------------------------- 3888 * --------------------------------------
3459 * This register controls the settings of the Fan Speed PWM mechanism. 3889 * This register controls the settings of the Fan Speed PWM mechanism.
@@ -4196,6 +4626,14 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
4196 return "RGCR"; 4626 return "RGCR";
4197 case MLXSW_REG_RITR_ID: 4627 case MLXSW_REG_RITR_ID:
4198 return "RITR"; 4628 return "RITR";
4629 case MLXSW_REG_RALTA_ID:
4630 return "RALTA";
4631 case MLXSW_REG_RALST_ID:
4632 return "RALST";
4633 case MLXSW_REG_RALTB_ID:
4634 return "RALTB";
4635 case MLXSW_REG_RALUE_ID:
4636 return "RALUE";
4199 case MLXSW_REG_MFCR_ID: 4637 case MLXSW_REG_MFCR_ID:
4200 return "MFCR"; 4638 return "MFCR";
4201 case MLXSW_REG_MFSC_ID: 4639 case MLXSW_REG_MFSC_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f0799898817d..7b2b741b2a23 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -51,6 +51,7 @@
51#include <linux/list.h> 51#include <linux/list.h>
52#include <linux/notifier.h> 52#include <linux/notifier.h>
53#include <linux/dcbnl.h> 53#include <linux/dcbnl.h>
54#include <linux/inetdevice.h>
54#include <net/switchdev.h> 55#include <net/switchdev.h>
55#include <generated/utsrelease.h> 56#include <generated/utsrelease.h>
56 57
@@ -210,23 +211,6 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
210 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 211 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
211} 212}
212 213
213static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
214 u16 vid, enum mlxsw_reg_spms_state state)
215{
216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217 char *spms_pl;
218 int err;
219
220 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
221 if (!spms_pl)
222 return -ENOMEM;
223 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
224 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
225 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
226 kfree(spms_pl);
227 return err;
228}
229
230static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 214static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
231{ 215{
232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -637,87 +621,6 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
637 return 0; 621 return 0;
638} 622}
639 623
640static struct mlxsw_sp_fid *
641mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
642{
643 struct mlxsw_sp_fid *f;
644
645 list_for_each_entry(f, &mlxsw_sp->port_vfids.list, list) {
646 if (f->vid == vid)
647 return f;
648 }
649
650 return NULL;
651}
652
653static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
654{
655 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
656 MLXSW_SP_VFID_PORT_MAX);
657}
658
659static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
660{
661 char sfmr_pl[MLXSW_REG_SFMR_LEN];
662
663 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
664 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
665}
666
667static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
668
669static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
670 u16 vid)
671{
672 struct device *dev = mlxsw_sp->bus_info->dev;
673 struct mlxsw_sp_fid *f;
674 u16 vfid, fid;
675 int err;
676
677 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
678 if (vfid == MLXSW_SP_VFID_PORT_MAX) {
679 dev_err(dev, "No available vFIDs\n");
680 return ERR_PTR(-ERANGE);
681 }
682
683 fid = mlxsw_sp_vfid_to_fid(vfid);
684 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
685 if (err) {
686 dev_err(dev, "Failed to create FID=%d\n", fid);
687 return ERR_PTR(err);
688 }
689
690 f = kzalloc(sizeof(*f), GFP_KERNEL);
691 if (!f)
692 goto err_allocate_vfid;
693
694 f->leave = mlxsw_sp_vport_vfid_leave;
695 f->fid = fid;
696 f->vid = vid;
697
698 list_add(&f->list, &mlxsw_sp->port_vfids.list);
699 set_bit(vfid, mlxsw_sp->port_vfids.mapped);
700
701 return f;
702
703err_allocate_vfid:
704 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
705 return ERR_PTR(-ENOMEM);
706}
707
708static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
709 struct mlxsw_sp_fid *f)
710{
711 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
712
713 clear_bit(vfid, mlxsw_sp->port_vfids.mapped);
714 list_del(&f->list);
715
716 mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
717
718 kfree(f);
719}
720
721static struct mlxsw_sp_port * 624static struct mlxsw_sp_port *
722mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 625mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
723{ 626{
@@ -750,67 +653,6 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
750 kfree(mlxsw_sp_vport); 653 kfree(mlxsw_sp_vport);
751} 654}
752 655
753static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
754 bool valid)
755{
756 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
757 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
758
759 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
760 vid);
761}
762
763static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport)
764{
765 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
766 struct mlxsw_sp_fid *f;
767 int err;
768
769 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, vid);
770 if (!f) {
771 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, vid);
772 if (IS_ERR(f))
773 return PTR_ERR(f);
774 }
775
776 if (!f->ref_count) {
777 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
778 if (err)
779 goto err_vport_flood_set;
780 }
781
782 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
783 if (err)
784 goto err_vport_fid_map;
785
786 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
787 f->ref_count++;
788
789 return 0;
790
791err_vport_fid_map:
792 if (!f->ref_count)
793 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
794err_vport_flood_set:
795 if (!f->ref_count)
796 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
797 return err;
798}
799
800static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
801{
802 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
803
804 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
805
806 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
807
808 if (--f->ref_count == 0) {
809 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
810 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
811 }
812}
813
814int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 656int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
815 u16 vid) 657 u16 vid)
816{ 658{
@@ -848,12 +690,6 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
848 } 690 }
849 } 691 }
850 692
851 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
852 if (err) {
853 netdev_err(dev, "Failed to join vFID\n");
854 goto err_vport_vfid_join;
855 }
856
857 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 693 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
858 if (err) { 694 if (err) {
859 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); 695 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
@@ -867,22 +703,11 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
867 goto err_port_add_vid; 703 goto err_port_add_vid;
868 } 704 }
869 705
870 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
871 MLXSW_REG_SPMS_STATE_FORWARDING);
872 if (err) {
873 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
874 goto err_port_stp_state_set;
875 }
876
877 return 0; 706 return 0;
878 707
879err_port_stp_state_set:
880 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
881err_port_add_vid: 708err_port_add_vid:
882 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 709 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
883err_port_vid_learning_set: 710err_port_vid_learning_set:
884 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
885err_vport_vfid_join:
886 if (list_is_singular(&mlxsw_sp_port->vports_list)) 711 if (list_is_singular(&mlxsw_sp_port->vports_list))
887 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 712 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
888err_port_vp_mode_trans: 713err_port_vp_mode_trans:
@@ -910,13 +735,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
910 return 0; 735 return 0;
911 } 736 }
912 737
913 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
914 MLXSW_REG_SPMS_STATE_DISCARDING);
915 if (err) {
916 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
917 return err;
918 }
919
920 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 738 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
921 if (err) { 739 if (err) {
922 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 740 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
@@ -2417,8 +2235,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2417 mlxsw_sp->core = mlxsw_core; 2235 mlxsw_sp->core = mlxsw_core;
2418 mlxsw_sp->bus_info = mlxsw_bus_info; 2236 mlxsw_sp->bus_info = mlxsw_bus_info;
2419 INIT_LIST_HEAD(&mlxsw_sp->fids); 2237 INIT_LIST_HEAD(&mlxsw_sp->fids);
2420 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); 2238 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
2421 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2422 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 2239 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2423 2240
2424 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2241 err = mlxsw_sp_base_mac_get(mlxsw_sp);
@@ -2503,6 +2320,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2503 mlxsw_sp_buffers_fini(mlxsw_sp); 2320 mlxsw_sp_buffers_fini(mlxsw_sp);
2504 mlxsw_sp_traps_fini(mlxsw_sp); 2321 mlxsw_sp_traps_fini(mlxsw_sp);
2505 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2322 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2323 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
2506 WARN_ON(!list_empty(&mlxsw_sp->fids)); 2324 WARN_ON(!list_empty(&mlxsw_sp->fids));
2507 for (i = 0; i < MLXSW_SP_RIF_MAX; i++) 2325 for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2508 WARN_ON_ONCE(mlxsw_sp->rifs[i]); 2326 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
@@ -2567,6 +2385,559 @@ static struct mlxsw_driver mlxsw_sp_driver = {
2567 .profile = &mlxsw_sp_config_profile, 2385 .profile = &mlxsw_sp_config_profile,
2568}; 2386};
2569 2387
2388static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2389{
2390 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2391}
2392
2393static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
2394{
2395 struct net_device *lower_dev;
2396 struct list_head *iter;
2397
2398 if (mlxsw_sp_port_dev_check(dev))
2399 return netdev_priv(dev);
2400
2401 netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
2402 if (mlxsw_sp_port_dev_check(lower_dev))
2403 return netdev_priv(lower_dev);
2404 }
2405 return NULL;
2406}
2407
2408static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
2409{
2410 struct mlxsw_sp_port *mlxsw_sp_port;
2411
2412 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2413 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
2414}
2415
2416static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
2417{
2418 struct net_device *lower_dev;
2419 struct list_head *iter;
2420
2421 if (mlxsw_sp_port_dev_check(dev))
2422 return netdev_priv(dev);
2423
2424 netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
2425 if (mlxsw_sp_port_dev_check(lower_dev))
2426 return netdev_priv(lower_dev);
2427 }
2428 return NULL;
2429}
2430
2431struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
2432{
2433 struct mlxsw_sp_port *mlxsw_sp_port;
2434
2435 rcu_read_lock();
2436 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2437 if (mlxsw_sp_port)
2438 dev_hold(mlxsw_sp_port->dev);
2439 rcu_read_unlock();
2440 return mlxsw_sp_port;
2441}
2442
2443void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
2444{
2445 dev_put(mlxsw_sp_port->dev);
2446}
2447
2448static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
2449 unsigned long event)
2450{
2451 switch (event) {
2452 case NETDEV_UP:
2453 if (!r)
2454 return true;
2455 r->ref_count++;
2456 return false;
2457 case NETDEV_DOWN:
2458 if (r && --r->ref_count == 0)
2459 return true;
2460 /* It is possible we already removed the RIF ourselves
2461 * if it was assigned to a netdev that is now a bridge
2462 * or LAG slave.
2463 */
2464 return false;
2465 }
2466
2467 return false;
2468}
2469
2470static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2471{
2472 int i;
2473
2474 for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2475 if (!mlxsw_sp->rifs[i])
2476 return i;
2477
2478 return MLXSW_SP_RIF_MAX;
2479}
2480
2481static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2482 bool *p_lagged, u16 *p_system_port)
2483{
2484 u8 local_port = mlxsw_sp_vport->local_port;
2485
2486 *p_lagged = mlxsw_sp_vport->lagged;
2487 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2488}
2489
2490static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
2491 struct net_device *l3_dev, u16 rif,
2492 bool create)
2493{
2494 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2495 bool lagged = mlxsw_sp_vport->lagged;
2496 char ritr_pl[MLXSW_REG_RITR_LEN];
2497 u16 system_port;
2498
2499 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
2500 l3_dev->mtu, l3_dev->dev_addr);
2501
2502 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2503 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2504 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2505
2506 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2507}
2508
2509static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2510
2511static struct mlxsw_sp_fid *
2512mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2513{
2514 struct mlxsw_sp_fid *f;
2515
2516 f = kzalloc(sizeof(*f), GFP_KERNEL);
2517 if (!f)
2518 return NULL;
2519
2520 f->leave = mlxsw_sp_vport_rif_sp_leave;
2521 f->ref_count = 0;
2522 f->dev = l3_dev;
2523 f->fid = fid;
2524
2525 return f;
2526}
2527
2528static struct mlxsw_sp_rif *
2529mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
2530{
2531 struct mlxsw_sp_rif *r;
2532
2533 r = kzalloc(sizeof(*r), GFP_KERNEL);
2534 if (!r)
2535 return NULL;
2536
2537 ether_addr_copy(r->addr, l3_dev->dev_addr);
2538 r->mtu = l3_dev->mtu;
2539 r->ref_count = 1;
2540 r->dev = l3_dev;
2541 r->rif = rif;
2542 r->f = f;
2543
2544 return r;
2545}
2546
2547static struct mlxsw_sp_rif *
2548mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2549 struct net_device *l3_dev)
2550{
2551 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2552 struct mlxsw_sp_fid *f;
2553 struct mlxsw_sp_rif *r;
2554 u16 fid, rif;
2555 int err;
2556
2557 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2558 if (rif == MLXSW_SP_RIF_MAX)
2559 return ERR_PTR(-ERANGE);
2560
2561 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
2562 if (err)
2563 return ERR_PTR(err);
2564
2565 fid = mlxsw_sp_rif_sp_to_fid(rif);
2566 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2567 if (err)
2568 goto err_rif_fdb_op;
2569
2570 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2571 if (!f) {
2572 err = -ENOMEM;
2573 goto err_rfid_alloc;
2574 }
2575
2576 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2577 if (!r) {
2578 err = -ENOMEM;
2579 goto err_rif_alloc;
2580 }
2581
2582 f->r = r;
2583 mlxsw_sp->rifs[rif] = r;
2584
2585 return r;
2586
2587err_rif_alloc:
2588 kfree(f);
2589err_rfid_alloc:
2590 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2591err_rif_fdb_op:
2592 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2593 return ERR_PTR(err);
2594}
2595
2596static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2597 struct mlxsw_sp_rif *r)
2598{
2599 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2600 struct net_device *l3_dev = r->dev;
2601 struct mlxsw_sp_fid *f = r->f;
2602 u16 fid = f->fid;
2603 u16 rif = r->rif;
2604
2605 mlxsw_sp->rifs[rif] = NULL;
2606 f->r = NULL;
2607
2608 kfree(r);
2609
2610 kfree(f);
2611
2612 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2613
2614 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2615}
2616
2617static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2618 struct net_device *l3_dev)
2619{
2620 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2621 struct mlxsw_sp_rif *r;
2622
2623 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2624 if (!r) {
2625 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2626 if (IS_ERR(r))
2627 return PTR_ERR(r);
2628 }
2629
2630 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
2631 r->f->ref_count++;
2632
2633 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
2634
2635 return 0;
2636}
2637
2638static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2639{
2640 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2641
2642 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2643
2644 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2645 if (--f->ref_count == 0)
2646 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
2647}
2648
2649static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2650 struct net_device *port_dev,
2651 unsigned long event, u16 vid)
2652{
2653 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2654 struct mlxsw_sp_port *mlxsw_sp_vport;
2655
2656 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2657 if (WARN_ON(!mlxsw_sp_vport))
2658 return -EINVAL;
2659
2660 switch (event) {
2661 case NETDEV_UP:
2662 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2663 case NETDEV_DOWN:
2664 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2665 break;
2666 }
2667
2668 return 0;
2669}
2670
2671static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2672 unsigned long event)
2673{
2674 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2675 return 0;
2676
2677 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2678}
2679
2680static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2681 struct net_device *lag_dev,
2682 unsigned long event, u16 vid)
2683{
2684 struct net_device *port_dev;
2685 struct list_head *iter;
2686 int err;
2687
2688 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2689 if (mlxsw_sp_port_dev_check(port_dev)) {
2690 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2691 event, vid);
2692 if (err)
2693 return err;
2694 }
2695 }
2696
2697 return 0;
2698}
2699
2700static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2701 unsigned long event)
2702{
2703 if (netif_is_bridge_port(lag_dev))
2704 return 0;
2705
2706 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2707}
2708
2709static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2710 struct net_device *l3_dev)
2711{
2712 u16 fid;
2713
2714 if (is_vlan_dev(l3_dev))
2715 fid = vlan_dev_vlan_id(l3_dev);
2716 else if (mlxsw_sp->master_bridge.dev == l3_dev)
2717 fid = 1;
2718 else
2719 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2720
2721 return mlxsw_sp_fid_find(mlxsw_sp, fid);
2722}
2723
2724static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2725{
2726 if (mlxsw_sp_fid_is_vfid(fid))
2727 return MLXSW_REG_RITR_FID_IF;
2728 else
2729 return MLXSW_REG_RITR_VLAN_IF;
2730}
2731
2732static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
2733 struct net_device *l3_dev,
2734 u16 fid, u16 rif,
2735 bool create)
2736{
2737 enum mlxsw_reg_ritr_if_type rif_type;
2738 char ritr_pl[MLXSW_REG_RITR_LEN];
2739
2740 rif_type = mlxsw_sp_rif_type_get(fid);
2741 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
2742 l3_dev->dev_addr);
2743 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
2744
2745 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2746}
2747
2748static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
2749 struct net_device *l3_dev,
2750 struct mlxsw_sp_fid *f)
2751{
2752 struct mlxsw_sp_rif *r;
2753 u16 rif;
2754 int err;
2755
2756 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2757 if (rif == MLXSW_SP_RIF_MAX)
2758 return -ERANGE;
2759
2760 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
2761 if (err)
2762 return err;
2763
2764 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
2765 if (err)
2766 goto err_rif_fdb_op;
2767
2768 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2769 if (!r) {
2770 err = -ENOMEM;
2771 goto err_rif_alloc;
2772 }
2773
2774 f->r = r;
2775 mlxsw_sp->rifs[rif] = r;
2776
2777 netdev_dbg(l3_dev, "RIF=%d created\n", rif);
2778
2779 return 0;
2780
2781err_rif_alloc:
2782 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2783err_rif_fdb_op:
2784 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2785 return err;
2786}
2787
2788void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
2789 struct mlxsw_sp_rif *r)
2790{
2791 struct net_device *l3_dev = r->dev;
2792 struct mlxsw_sp_fid *f = r->f;
2793 u16 rif = r->rif;
2794
2795 mlxsw_sp->rifs[rif] = NULL;
2796 f->r = NULL;
2797
2798 kfree(r);
2799
2800 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2801
2802 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2803
2804 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
2805}
2806
2807static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
2808 struct net_device *br_dev,
2809 unsigned long event)
2810{
2811 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
2812 struct mlxsw_sp_fid *f;
2813
2814 /* FID can either be an actual FID if the L3 device is the
2815 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
2816 * L3 device is a VLAN-unaware bridge and we get a vFID.
2817 */
2818 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
2819 if (WARN_ON(!f))
2820 return -EINVAL;
2821
2822 switch (event) {
2823 case NETDEV_UP:
2824 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
2825 case NETDEV_DOWN:
2826 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
2827 break;
2828 }
2829
2830 return 0;
2831}
2832
2833static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
2834 unsigned long event)
2835{
2836 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
2837 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
2838 u16 vid = vlan_dev_vlan_id(vlan_dev);
2839
2840 if (mlxsw_sp_port_dev_check(real_dev))
2841 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
2842 vid);
2843 else if (netif_is_lag_master(real_dev))
2844 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
2845 vid);
2846 else if (netif_is_bridge_master(real_dev) &&
2847 mlxsw_sp->master_bridge.dev == real_dev)
2848 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
2849 event);
2850
2851 return 0;
2852}
2853
2854static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
2855 unsigned long event, void *ptr)
2856{
2857 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
2858 struct net_device *dev = ifa->ifa_dev->dev;
2859 struct mlxsw_sp *mlxsw_sp;
2860 struct mlxsw_sp_rif *r;
2861 int err = 0;
2862
2863 mlxsw_sp = mlxsw_sp_lower_get(dev);
2864 if (!mlxsw_sp)
2865 goto out;
2866
2867 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
2868 if (!mlxsw_sp_rif_should_config(r, event))
2869 goto out;
2870
2871 if (mlxsw_sp_port_dev_check(dev))
2872 err = mlxsw_sp_inetaddr_port_event(dev, event);
2873 else if (netif_is_lag_master(dev))
2874 err = mlxsw_sp_inetaddr_lag_event(dev, event);
2875 else if (netif_is_bridge_master(dev))
2876 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
2877 else if (is_vlan_dev(dev))
2878 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
2879
2880out:
2881 return notifier_from_errno(err);
2882}
2883
2884static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
2885 const char *mac, int mtu)
2886{
2887 char ritr_pl[MLXSW_REG_RITR_LEN];
2888 int err;
2889
2890 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2891 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2892 if (err)
2893 return err;
2894
2895 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
2896 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
2897 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
2898 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2899}
2900
2901static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
2902{
2903 struct mlxsw_sp *mlxsw_sp;
2904 struct mlxsw_sp_rif *r;
2905 int err;
2906
2907 mlxsw_sp = mlxsw_sp_lower_get(dev);
2908 if (!mlxsw_sp)
2909 return 0;
2910
2911 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
2912 if (!r)
2913 return 0;
2914
2915 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
2916 if (err)
2917 return err;
2918
2919 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
2920 if (err)
2921 goto err_rif_edit;
2922
2923 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
2924 if (err)
2925 goto err_rif_fdb_op;
2926
2927 ether_addr_copy(r->addr, dev->dev_addr);
2928 r->mtu = dev->mtu;
2929
2930 netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
2931
2932 return 0;
2933
2934err_rif_fdb_op:
2935 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
2936err_rif_edit:
2937 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
2938 return err;
2939}
2940
2570static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, 2941static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
2571 u16 fid) 2942 u16 fid)
2572{ 2943{
@@ -2647,9 +3018,15 @@ int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
2647 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); 3018 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
2648} 3019}
2649 3020
2650static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3021static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
2651{ 3022{
2652 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3023 struct mlxsw_sp_fid *f, *tmp;
3024
3025 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3026 if (--f->ref_count == 0)
3027 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3028 else
3029 WARN_ON_ONCE(1);
2653} 3030}
2654 3031
2655static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 3032static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
@@ -2668,8 +3045,15 @@ static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2668 3045
2669static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) 3046static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
2670{ 3047{
2671 if (--mlxsw_sp->master_bridge.ref_count == 0) 3048 if (--mlxsw_sp->master_bridge.ref_count == 0) {
2672 mlxsw_sp->master_bridge.dev = NULL; 3049 mlxsw_sp->master_bridge.dev = NULL;
3050 /* It's possible upper VLAN devices are still holding
3051 * references to underlying FIDs. Drop the reference
3052 * and release the resources if it was the last one.
3053 * If it wasn't, then something bad happened.
3054 */
3055 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3056 }
2673} 3057}
2674 3058
2675static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 3059static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -3138,47 +3522,97 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3138 return 0; 3522 return 0;
3139} 3523}
3140 3524
3141static struct mlxsw_sp_fid * 3525static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
3142mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, 3526 struct net_device *vlan_dev)
3143 const struct net_device *br_dev)
3144{ 3527{
3528 u16 fid = vlan_dev_vlan_id(vlan_dev);
3145 struct mlxsw_sp_fid *f; 3529 struct mlxsw_sp_fid *f;
3146 3530
3147 list_for_each_entry(f, &mlxsw_sp->br_vfids.list, list) { 3531 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3148 if (f->dev == br_dev) 3532 if (!f) {
3149 return f; 3533 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
3534 if (IS_ERR(f))
3535 return PTR_ERR(f);
3150 } 3536 }
3151 3537
3152 return NULL; 3538 f->ref_count++;
3539
3540 return 0;
3153} 3541}
3154 3542
3155static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) 3543static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
3544 struct net_device *vlan_dev)
3156{ 3545{
3157 return vfid - MLXSW_SP_VFID_PORT_MAX; 3546 u16 fid = vlan_dev_vlan_id(vlan_dev);
3547 struct mlxsw_sp_fid *f;
3548
3549 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3550 if (f && f->r)
3551 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3552 if (f && --f->ref_count == 0)
3553 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3158} 3554}
3159 3555
3160static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) 3556static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
3557 unsigned long event, void *ptr)
3161{ 3558{
3162 return MLXSW_SP_VFID_PORT_MAX + br_vfid; 3559 struct netdev_notifier_changeupper_info *info;
3560 struct net_device *upper_dev;
3561 struct mlxsw_sp *mlxsw_sp;
3562 int err;
3563
3564 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3565 if (!mlxsw_sp)
3566 return 0;
3567 if (br_dev != mlxsw_sp->master_bridge.dev)
3568 return 0;
3569
3570 info = ptr;
3571
3572 switch (event) {
3573 case NETDEV_CHANGEUPPER:
3574 upper_dev = info->upper_dev;
3575 if (!is_vlan_dev(upper_dev))
3576 break;
3577 if (info->linking) {
3578 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
3579 upper_dev);
3580 if (err)
3581 return err;
3582 } else {
3583 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
3584 }
3585 break;
3586 }
3587
3588 return 0;
3163} 3589}
3164 3590
3165static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) 3591static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3166{ 3592{
3167 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, 3593 return find_first_zero_bit(mlxsw_sp->vfids.mapped,
3168 MLXSW_SP_VFID_BR_MAX); 3594 MLXSW_SP_VFID_MAX);
3169} 3595}
3170 3596
3171static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 3597static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
3598{
3599 char sfmr_pl[MLXSW_REG_SFMR_LEN];
3172 3600
3173static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, 3601 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
3174 struct net_device *br_dev) 3602 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
3603}
3604
3605static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3606
3607static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
3608 struct net_device *br_dev)
3175{ 3609{
3176 struct device *dev = mlxsw_sp->bus_info->dev; 3610 struct device *dev = mlxsw_sp->bus_info->dev;
3177 struct mlxsw_sp_fid *f; 3611 struct mlxsw_sp_fid *f;
3178 u16 vfid, fid; 3612 u16 vfid, fid;
3179 int err; 3613 int err;
3180 3614
3181 vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); 3615 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
3182 if (vfid == MLXSW_SP_VFID_MAX) { 3616 if (vfid == MLXSW_SP_VFID_MAX) {
3183 dev_err(dev, "No available vFIDs\n"); 3617 dev_err(dev, "No available vFIDs\n");
3184 return ERR_PTR(-ERANGE); 3618 return ERR_PTR(-ERANGE);
@@ -3195,12 +3629,12 @@ static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3195 if (!f) 3629 if (!f)
3196 goto err_allocate_vfid; 3630 goto err_allocate_vfid;
3197 3631
3198 f->leave = mlxsw_sp_vport_br_vfid_leave; 3632 f->leave = mlxsw_sp_vport_vfid_leave;
3199 f->fid = fid; 3633 f->fid = fid;
3200 f->dev = br_dev; 3634 f->dev = br_dev;
3201 3635
3202 list_add(&f->list, &mlxsw_sp->br_vfids.list); 3636 list_add(&f->list, &mlxsw_sp->vfids.list);
3203 set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped); 3637 set_bit(vfid, mlxsw_sp->vfids.mapped);
3204 3638
3205 return f; 3639 return f;
3206 3640
@@ -3209,29 +3643,42 @@ err_allocate_vfid:
3209 return ERR_PTR(-ENOMEM); 3643 return ERR_PTR(-ENOMEM);
3210} 3644}
3211 3645
3212static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 3646static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3213 struct mlxsw_sp_fid *f) 3647 struct mlxsw_sp_fid *f)
3214{ 3648{
3215 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); 3649 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3216 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid); 3650 u16 fid = f->fid;
3217 3651
3218 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); 3652 clear_bit(vfid, mlxsw_sp->vfids.mapped);
3219 list_del(&f->list); 3653 list_del(&f->list);
3220 3654
3221 mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false); 3655 if (f->r)
3656 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3222 3657
3223 kfree(f); 3658 kfree(f);
3659
3660 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3224} 3661}
3225 3662
3226static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3663static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
3227 struct net_device *br_dev) 3664 bool valid)
3665{
3666 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
3667 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3668
3669 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
3670 vid);
3671}
3672
3673static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3674 struct net_device *br_dev)
3228{ 3675{
3229 struct mlxsw_sp_fid *f; 3676 struct mlxsw_sp_fid *f;
3230 int err; 3677 int err;
3231 3678
3232 f = mlxsw_sp_br_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); 3679 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3233 if (!f) { 3680 if (!f) {
3234 f = mlxsw_sp_br_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); 3681 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3235 if (IS_ERR(f)) 3682 if (IS_ERR(f))
3236 return PTR_ERR(f); 3683 return PTR_ERR(f);
3237 } 3684 }
@@ -3255,11 +3702,11 @@ err_vport_fid_map:
3255 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 3702 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3256err_vport_flood_set: 3703err_vport_flood_set:
3257 if (!f->ref_count) 3704 if (!f->ref_count)
3258 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 3705 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3259 return err; 3706 return err;
3260} 3707}
3261 3708
3262static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 3709static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3263{ 3710{
3264 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3711 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3265 3712
@@ -3273,22 +3720,24 @@ static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3273 3720
3274 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 3721 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3275 if (--f->ref_count == 0) 3722 if (--f->ref_count == 0)
3276 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 3723 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3277} 3724}
3278 3725
3279static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3726static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3280 struct net_device *br_dev) 3727 struct net_device *br_dev)
3281{ 3728{
3729 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3282 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 3730 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3283 struct net_device *dev = mlxsw_sp_vport->dev; 3731 struct net_device *dev = mlxsw_sp_vport->dev;
3284 int err; 3732 int err;
3285 3733
3286 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 3734 if (f && !WARN_ON(!f->leave))
3735 f->leave(mlxsw_sp_vport);
3287 3736
3288 err = mlxsw_sp_vport_br_vfid_join(mlxsw_sp_vport, br_dev); 3737 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
3289 if (err) { 3738 if (err) {
3290 netdev_err(dev, "Failed to join vFID\n"); 3739 netdev_err(dev, "Failed to join vFID\n");
3291 goto err_vport_br_vfid_join; 3740 return err;
3292 } 3741 }
3293 3742
3294 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 3743 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
@@ -3305,9 +3754,7 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3305 return 0; 3754 return 0;
3306 3755
3307err_port_vid_learning_set: 3756err_port_vid_learning_set:
3308 mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport); 3757 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3309err_vport_br_vfid_join:
3310 mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3311 return err; 3758 return err;
3312} 3759}
3313 3760
@@ -3317,12 +3764,7 @@ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3317 3764
3318 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 3765 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3319 3766
3320 mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport); 3767 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3321
3322 mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3323
3324 mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3325 MLXSW_REG_SPMS_STATE_FORWARDING);
3326 3768
3327 mlxsw_sp_vport->learning = 0; 3769 mlxsw_sp_vport->learning = 0;
3328 mlxsw_sp_vport->learning_sync = 0; 3770 mlxsw_sp_vport->learning_sync = 0;
@@ -3338,7 +3780,7 @@ mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3338 3780
3339 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 3781 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3340 vport.list) { 3782 vport.list) {
3341 struct net_device *dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); 3783 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
3342 3784
3343 if (dev && dev == br_dev) 3785 if (dev && dev == br_dev)
3344 return false; 3786 return false;
@@ -3432,10 +3874,14 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3432 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3874 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3433 int err = 0; 3875 int err = 0;
3434 3876
3435 if (mlxsw_sp_port_dev_check(dev)) 3877 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
3878 err = mlxsw_sp_netdevice_router_port_event(dev);
3879 else if (mlxsw_sp_port_dev_check(dev))
3436 err = mlxsw_sp_netdevice_port_event(dev, event, ptr); 3880 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3437 else if (netif_is_lag_master(dev)) 3881 else if (netif_is_lag_master(dev))
3438 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 3882 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3883 else if (netif_is_bridge_master(dev))
3884 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
3439 else if (is_vlan_dev(dev)) 3885 else if (is_vlan_dev(dev))
3440 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 3886 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3441 3887
@@ -3446,11 +3892,17 @@ static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3446 .notifier_call = mlxsw_sp_netdevice_event, 3892 .notifier_call = mlxsw_sp_netdevice_event,
3447}; 3893};
3448 3894
3895static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
3896 .notifier_call = mlxsw_sp_inetaddr_event,
3897 .priority = 10, /* Must be called before FIB notifier block */
3898};
3899
3449static int __init mlxsw_sp_module_init(void) 3900static int __init mlxsw_sp_module_init(void)
3450{ 3901{
3451 int err; 3902 int err;
3452 3903
3453 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3904 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3905 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
3454 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 3906 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3455 if (err) 3907 if (err)
3456 goto err_core_driver_register; 3908 goto err_core_driver_register;
@@ -3464,6 +3916,7 @@ err_core_driver_register:
3464static void __exit mlxsw_sp_module_exit(void) 3916static void __exit mlxsw_sp_module_exit(void)
3465{ 3917{
3466 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 3918 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3919 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
3467 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3920 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3468} 3921}
3469 3922
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 83d5807832a0..958e821ce845 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -43,15 +43,17 @@
43#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
44#include <linux/list.h> 44#include <linux/list.h>
45#include <linux/dcbnl.h> 45#include <linux/dcbnl.h>
46#include <linux/in6.h>
46#include <net/switchdev.h> 47#include <net/switchdev.h>
47 48
48#include "port.h" 49#include "port.h"
49#include "core.h" 50#include "core.h"
50 51
51#define MLXSW_SP_VFID_BASE VLAN_N_VID 52#define MLXSW_SP_VFID_BASE VLAN_N_VID
52#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ 53#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
53#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */ 54
54#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) 55#define MLXSW_SP_RFID_BASE 15360
56#define MLXSW_SP_RIF_MAX 800
55 57
56#define MLXSW_SP_LAG_MAX 64 58#define MLXSW_SP_LAG_MAX 64
57#define MLXSW_SP_PORT_PER_LAG_MAX 16 59#define MLXSW_SP_PORT_PER_LAG_MAX 16
@@ -60,6 +62,12 @@
60 62
61#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 63#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
62 64
65#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
66#define MLXSW_SP_LPM_TREE_MAX 22
67#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
68
69#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
70
63#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ 71#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
64 72
65#define MLXSW_SP_BYTES_PER_CELL 96 73#define MLXSW_SP_BYTES_PER_CELL 96
@@ -74,8 +82,6 @@
74 82
75#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ 83#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
76 84
77#define MLXSW_SP_RIF_MAX 800
78
79static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay) 85static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
80{ 86{
81 delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE)); 87 delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
@@ -94,12 +100,16 @@ struct mlxsw_sp_fid {
94 struct list_head list; 100 struct list_head list;
95 unsigned int ref_count; 101 unsigned int ref_count;
96 struct net_device *dev; 102 struct net_device *dev;
103 struct mlxsw_sp_rif *r;
97 u16 fid; 104 u16 fid;
98 u16 vid;
99}; 105};
100 106
101struct mlxsw_sp_rif { 107struct mlxsw_sp_rif {
102 struct net_device *dev; 108 struct net_device *dev;
109 unsigned int ref_count;
110 struct mlxsw_sp_fid *f;
111 unsigned char addr[ETH_ALEN];
112 int mtu;
103 u16 rif; 113 u16 rif;
104}; 114};
105 115
@@ -123,7 +133,17 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
123 133
124static inline bool mlxsw_sp_fid_is_vfid(u16 fid) 134static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
125{ 135{
126 return fid >= MLXSW_SP_VFID_BASE; 136 return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
137}
138
139static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
140{
141 return fid >= MLXSW_SP_RFID_BASE;
142}
143
144static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
145{
146 return MLXSW_SP_RFID_BASE + rif;
127} 147}
128 148
129struct mlxsw_sp_sb_pr { 149struct mlxsw_sp_sb_pr {
@@ -160,15 +180,45 @@ struct mlxsw_sp_sb {
160 } ports[MLXSW_PORT_MAX_PORTS]; 180 } ports[MLXSW_PORT_MAX_PORTS];
161}; 181};
162 182
183#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
184
185struct mlxsw_sp_prefix_usage {
186 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
187};
188
189enum mlxsw_sp_l3proto {
190 MLXSW_SP_L3_PROTO_IPV4,
191 MLXSW_SP_L3_PROTO_IPV6,
192};
193
194struct mlxsw_sp_lpm_tree {
195 u8 id; /* tree ID */
196 unsigned int ref_count;
197 enum mlxsw_sp_l3proto proto;
198 struct mlxsw_sp_prefix_usage prefix_usage;
199};
200
201struct mlxsw_sp_fib;
202
203struct mlxsw_sp_vr {
204 u16 id; /* virtual router ID */
205 bool used;
206 enum mlxsw_sp_l3proto proto;
207 u32 tb_id; /* kernel fib table id */
208 struct mlxsw_sp_lpm_tree *lpm_tree;
209 struct mlxsw_sp_fib *fib;
210};
211
212struct mlxsw_sp_router {
213 struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
214 struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
215};
216
163struct mlxsw_sp { 217struct mlxsw_sp {
164 struct { 218 struct {
165 struct list_head list; 219 struct list_head list;
166 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_PORT_MAX); 220 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
167 } port_vfids; 221 } vfids;
168 struct {
169 struct list_head list;
170 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_BR_MAX);
171 } br_vfids;
172 struct { 222 struct {
173 struct list_head list; 223 struct list_head list;
174 DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX); 224 DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
@@ -192,6 +242,7 @@ struct mlxsw_sp {
192 struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; 242 struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
193 u8 port_to_module[MLXSW_PORT_MAX_PORTS]; 243 u8 port_to_module[MLXSW_PORT_MAX_PORTS];
194 struct mlxsw_sp_sb sb; 244 struct mlxsw_sp_sb sb;
245 struct mlxsw_sp_router router;
195}; 246};
196 247
197static inline struct mlxsw_sp_upper * 248static inline struct mlxsw_sp_upper *
@@ -250,6 +301,9 @@ struct mlxsw_sp_port {
250 struct list_head vports_list; 301 struct list_head vports_list;
251}; 302};
252 303
304struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
305void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
306
253static inline bool 307static inline bool
254mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) 308mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
255{ 309{
@@ -295,7 +349,7 @@ mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
295} 349}
296 350
297static inline struct net_device * 351static inline struct net_device *
298mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 352mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
299{ 353{
300 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 354 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
301 355
@@ -333,6 +387,31 @@ mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
333 return NULL; 387 return NULL;
334} 388}
335 389
390static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
391 u16 fid)
392{
393 struct mlxsw_sp_fid *f;
394
395 list_for_each_entry(f, &mlxsw_sp->fids, list)
396 if (f->fid == fid)
397 return f;
398
399 return NULL;
400}
401
402static inline struct mlxsw_sp_fid *
403mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
404 const struct net_device *br_dev)
405{
406 struct mlxsw_sp_fid *f;
407
408 list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
409 if (f->dev == br_dev)
410 return f;
411
412 return NULL;
413}
414
336static inline struct mlxsw_sp_rif * 415static inline struct mlxsw_sp_rif *
337mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, 416mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
338 const struct net_device *dev) 417 const struct net_device *dev)
@@ -403,6 +482,12 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
403void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 482void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
404int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); 483int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
405int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid); 484int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
485int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
486 bool adding);
487struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
488void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
489void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
490 struct mlxsw_sp_rif *r);
406int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 491int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
407 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 492 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
408 bool dwrr, u8 dwrr_weight); 493 bool dwrr, u8 dwrr_weight);
@@ -434,5 +519,10 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
434 519
435int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); 520int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
436void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); 521void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
522int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
523 const struct switchdev_obj_ipv4_fib *fib4,
524 struct switchdev_trans *trans);
525int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
526 const struct switchdev_obj_ipv4_fib *fib4);
437 527
438#endif 528#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 8d70496ca396..7e3992a681b3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -35,11 +35,515 @@
35 35
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/types.h> 37#include <linux/types.h>
38#include <linux/rhashtable.h>
39#include <linux/bitops.h>
40#include <linux/in6.h>
38 41
39#include "spectrum.h" 42#include "spectrum.h"
40#include "core.h" 43#include "core.h"
41#include "reg.h" 44#include "reg.h"
42 45
46#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
47 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
48
49static bool
50mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
51 struct mlxsw_sp_prefix_usage *prefix_usage2)
52{
53 unsigned char prefix;
54
55 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
56 if (!test_bit(prefix, prefix_usage2->b))
57 return false;
58 }
59 return true;
60}
61
62static bool
63mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
64 struct mlxsw_sp_prefix_usage *prefix_usage2)
65{
66 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
67}
68
69static bool
70mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
71{
72 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
73
74 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
75}
76
77static void
78mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
79 struct mlxsw_sp_prefix_usage *prefix_usage2)
80{
81 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
82}
83
84static void
85mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
86{
87 memset(prefix_usage, 0, sizeof(*prefix_usage));
88}
89
90static void
91mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
92 unsigned char prefix_len)
93{
94 set_bit(prefix_len, prefix_usage->b);
95}
96
97static void
98mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
99 unsigned char prefix_len)
100{
101 clear_bit(prefix_len, prefix_usage->b);
102}
103
104struct mlxsw_sp_fib_key {
105 unsigned char addr[sizeof(struct in6_addr)];
106 unsigned char prefix_len;
107};
108
109enum mlxsw_sp_fib_entry_type {
110 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
111 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
112 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
113};
114
115struct mlxsw_sp_fib_entry {
116 struct rhash_head ht_node;
117 struct mlxsw_sp_fib_key key;
118 enum mlxsw_sp_fib_entry_type type;
119 u8 added:1;
120 u16 rif; /* used for action local */
121 struct mlxsw_sp_vr *vr;
122};
123
124struct mlxsw_sp_fib {
125 struct rhashtable ht;
126 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
127 struct mlxsw_sp_prefix_usage prefix_usage;
128};
129
130static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
131 .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
132 .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
133 .key_len = sizeof(struct mlxsw_sp_fib_key),
134 .automatic_shrinking = true,
135};
136
137static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
138 struct mlxsw_sp_fib_entry *fib_entry)
139{
140 unsigned char prefix_len = fib_entry->key.prefix_len;
141 int err;
142
143 err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
144 mlxsw_sp_fib_ht_params);
145 if (err)
146 return err;
147 if (fib->prefix_ref_count[prefix_len]++ == 0)
148 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
149 return 0;
150}
151
152static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
153 struct mlxsw_sp_fib_entry *fib_entry)
154{
155 unsigned char prefix_len = fib_entry->key.prefix_len;
156
157 if (--fib->prefix_ref_count[prefix_len] == 0)
158 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
159 rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
160 mlxsw_sp_fib_ht_params);
161}
162
163static struct mlxsw_sp_fib_entry *
164mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
165 size_t addr_len, unsigned char prefix_len)
166{
167 struct mlxsw_sp_fib_entry *fib_entry;
168
169 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
170 if (!fib_entry)
171 return NULL;
172 memcpy(fib_entry->key.addr, addr, addr_len);
173 fib_entry->key.prefix_len = prefix_len;
174 return fib_entry;
175}
176
177static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
178{
179 kfree(fib_entry);
180}
181
182static struct mlxsw_sp_fib_entry *
183mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
184 size_t addr_len, unsigned char prefix_len)
185{
186 struct mlxsw_sp_fib_key key = {{ 0 } };
187
188 memcpy(key.addr, addr, addr_len);
189 key.prefix_len = prefix_len;
190 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
191}
192
193static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
194{
195 struct mlxsw_sp_fib *fib;
196 int err;
197
198 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
199 if (!fib)
200 return ERR_PTR(-ENOMEM);
201 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
202 if (err)
203 goto err_rhashtable_init;
204 return fib;
205
206err_rhashtable_init:
207 kfree(fib);
208 return ERR_PTR(err);
209}
210
211static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
212{
213 rhashtable_destroy(&fib->ht);
214 kfree(fib);
215}
216
217static struct mlxsw_sp_lpm_tree *
218mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
219{
220 static struct mlxsw_sp_lpm_tree *lpm_tree;
221 int i;
222
223 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
224 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
225 if (lpm_tree->ref_count == 0) {
226 if (one_reserved)
227 one_reserved = false;
228 else
229 return lpm_tree;
230 }
231 }
232 return NULL;
233}
234
235static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
236 struct mlxsw_sp_lpm_tree *lpm_tree)
237{
238 char ralta_pl[MLXSW_REG_RALTA_LEN];
239
240 mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
241 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
242}
243
244static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
245 struct mlxsw_sp_lpm_tree *lpm_tree)
246{
247 char ralta_pl[MLXSW_REG_RALTA_LEN];
248
249 mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
250 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
251}
252
253static int
254mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
255 struct mlxsw_sp_prefix_usage *prefix_usage,
256 struct mlxsw_sp_lpm_tree *lpm_tree)
257{
258 char ralst_pl[MLXSW_REG_RALST_LEN];
259 u8 root_bin = 0;
260 u8 prefix;
261 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
262
263 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
264 root_bin = prefix;
265
266 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
267 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
268 if (prefix == 0)
269 continue;
270 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
271 MLXSW_REG_RALST_BIN_NO_CHILD);
272 last_prefix = prefix;
273 }
274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
275}
276
277static struct mlxsw_sp_lpm_tree *
278mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
279 struct mlxsw_sp_prefix_usage *prefix_usage,
280 enum mlxsw_sp_l3proto proto, bool one_reserved)
281{
282 struct mlxsw_sp_lpm_tree *lpm_tree;
283 int err;
284
285 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
286 if (!lpm_tree)
287 return ERR_PTR(-EBUSY);
288 lpm_tree->proto = proto;
289 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
290 if (err)
291 return ERR_PTR(err);
292
293 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
294 lpm_tree);
295 if (err)
296 goto err_left_struct_set;
297 return lpm_tree;
298
299err_left_struct_set:
300 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
301 return ERR_PTR(err);
302}
303
304static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
305 struct mlxsw_sp_lpm_tree *lpm_tree)
306{
307 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
308}
309
310static struct mlxsw_sp_lpm_tree *
311mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
312 struct mlxsw_sp_prefix_usage *prefix_usage,
313 enum mlxsw_sp_l3proto proto, bool one_reserved)
314{
315 struct mlxsw_sp_lpm_tree *lpm_tree;
316 int i;
317
318 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
319 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
320 if (lpm_tree->proto == proto &&
321 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
322 prefix_usage))
323 goto inc_ref_count;
324 }
325 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
326 proto, one_reserved);
327 if (IS_ERR(lpm_tree))
328 return lpm_tree;
329
330inc_ref_count:
331 lpm_tree->ref_count++;
332 return lpm_tree;
333}
334
335static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
336 struct mlxsw_sp_lpm_tree *lpm_tree)
337{
338 if (--lpm_tree->ref_count == 0)
339 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
340 return 0;
341}
342
343static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
344{
345 struct mlxsw_sp_lpm_tree *lpm_tree;
346 int i;
347
348 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
349 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
350 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
351 }
352}
353
354static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
355{
356 struct mlxsw_sp_vr *vr;
357 int i;
358
359 for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
360 vr = &mlxsw_sp->router.vrs[i];
361 if (!vr->used)
362 return vr;
363 }
364 return NULL;
365}
366
367static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
368 struct mlxsw_sp_vr *vr)
369{
370 char raltb_pl[MLXSW_REG_RALTB_LEN];
371
372 mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
373 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
374}
375
376static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
377 struct mlxsw_sp_vr *vr)
378{
379 char raltb_pl[MLXSW_REG_RALTB_LEN];
380
381 /* Bind to tree 0 which is default */
382 mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
383 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
384}
385
386static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
387{
388 /* For our purpose, squash main and local table into one */
389 if (tb_id == RT_TABLE_LOCAL)
390 tb_id = RT_TABLE_MAIN;
391 return tb_id;
392}
393
394static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
395 u32 tb_id,
396 enum mlxsw_sp_l3proto proto)
397{
398 struct mlxsw_sp_vr *vr;
399 int i;
400
401 tb_id = mlxsw_sp_fix_tb_id(tb_id);
402 for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
403 vr = &mlxsw_sp->router.vrs[i];
404 if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
405 return vr;
406 }
407 return NULL;
408}
409
410static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
411 unsigned char prefix_len,
412 u32 tb_id,
413 enum mlxsw_sp_l3proto proto)
414{
415 struct mlxsw_sp_prefix_usage req_prefix_usage;
416 struct mlxsw_sp_lpm_tree *lpm_tree;
417 struct mlxsw_sp_vr *vr;
418 int err;
419
420 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
421 if (!vr)
422 return ERR_PTR(-EBUSY);
423 vr->fib = mlxsw_sp_fib_create();
424 if (IS_ERR(vr->fib))
425 return ERR_CAST(vr->fib);
426
427 vr->proto = proto;
428 vr->tb_id = tb_id;
429 mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
430 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
431 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
432 proto, true);
433 if (IS_ERR(lpm_tree)) {
434 err = PTR_ERR(lpm_tree);
435 goto err_tree_get;
436 }
437 vr->lpm_tree = lpm_tree;
438 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
439 if (err)
440 goto err_tree_bind;
441
442 vr->used = true;
443 return vr;
444
445err_tree_bind:
446 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
447err_tree_get:
448 mlxsw_sp_fib_destroy(vr->fib);
449
450 return ERR_PTR(err);
451}
452
453static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
454 struct mlxsw_sp_vr *vr)
455{
456 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
457 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
458 mlxsw_sp_fib_destroy(vr->fib);
459 vr->used = false;
460}
461
462static int
463mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
464 struct mlxsw_sp_prefix_usage *req_prefix_usage)
465{
466 struct mlxsw_sp_lpm_tree *lpm_tree;
467
468 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
469 &vr->lpm_tree->prefix_usage))
470 return 0;
471
472 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
473 vr->proto, false);
474 if (IS_ERR(lpm_tree)) {
475 /* We failed to get a tree according to the required
476 * prefix usage. However, the current tree might be still good
477 * for us if our requirement is subset of the prefixes used
478 * in the tree.
479 */
480 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
481 &vr->lpm_tree->prefix_usage))
482 return 0;
483 return PTR_ERR(lpm_tree);
484 }
485
486 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
487 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
488 vr->lpm_tree = lpm_tree;
489 return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
490}
491
492static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
493 unsigned char prefix_len,
494 u32 tb_id,
495 enum mlxsw_sp_l3proto proto)
496{
497 struct mlxsw_sp_vr *vr;
498 int err;
499
500 tb_id = mlxsw_sp_fix_tb_id(tb_id);
501 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
502 if (!vr) {
503 vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
504 if (IS_ERR(vr))
505 return vr;
506 } else {
507 struct mlxsw_sp_prefix_usage req_prefix_usage;
508
509 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
510 &vr->fib->prefix_usage);
511 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
512 /* Need to replace LPM tree in case new prefix is required. */
513 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
514 &req_prefix_usage);
515 if (err)
516 return ERR_PTR(err);
517 }
518 return vr;
519}
520
521static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
522{
523 /* Destroy virtual router entity in case the associated FIB is empty
524 * and allow it to be used for other tables in future. Otherwise,
525 * check if some prefix usage did not disappear and change tree if
526 * that is the case. Note that in case new, smaller tree cannot be
527 * allocated, the original one will be kept being used.
528 */
529 if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
530 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
531 else
532 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
533 &vr->fib->prefix_usage);
534}
535
536static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
537{
538 struct mlxsw_sp_vr *vr;
539 int i;
540
541 for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
542 vr = &mlxsw_sp->router.vrs[i];
543 vr->id = i;
544 }
545}
546
43static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) 547static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
44{ 548{
45 char rgcr_pl[MLXSW_REG_RGCR_LEN]; 549 char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -59,10 +563,252 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
59 563
60int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) 564int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
61{ 565{
62 return __mlxsw_sp_router_init(mlxsw_sp); 566 int err;
567
568 err = __mlxsw_sp_router_init(mlxsw_sp);
569 if (err)
570 return err;
571 mlxsw_sp_lpm_init(mlxsw_sp);
572 mlxsw_sp_vrs_init(mlxsw_sp);
573 return 0;
63} 574}
64 575
65void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) 576void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
66{ 577{
67 __mlxsw_sp_router_fini(mlxsw_sp); 578 __mlxsw_sp_router_fini(mlxsw_sp);
68} 579}
580
581static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
582 struct mlxsw_sp_fib_entry *fib_entry,
583 enum mlxsw_reg_ralue_op op)
584{
585 char ralue_pl[MLXSW_REG_RALUE_LEN];
586 u32 *p_dip = (u32 *) fib_entry->key.addr;
587 struct mlxsw_sp_vr *vr = fib_entry->vr;
588
589 mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
590 fib_entry->key.prefix_len, *p_dip);
591 mlxsw_reg_ralue_act_local_pack(ralue_pl,
592 MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
593 fib_entry->rif);
594 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
595}
596
597static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
598 struct mlxsw_sp_fib_entry *fib_entry,
599 enum mlxsw_reg_ralue_op op)
600{
601 char ralue_pl[MLXSW_REG_RALUE_LEN];
602 u32 *p_dip = (u32 *) fib_entry->key.addr;
603 struct mlxsw_sp_vr *vr = fib_entry->vr;
604
605 mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
606 fib_entry->key.prefix_len, *p_dip);
607 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
608 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
609}
610
611static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
612 struct mlxsw_sp_fib_entry *fib_entry,
613 enum mlxsw_reg_ralue_op op)
614{
615 switch (fib_entry->type) {
616 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
617 return -EINVAL;
618 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
619 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
620 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
621 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
622 }
623 return -EINVAL;
624}
625
626static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
627 struct mlxsw_sp_fib_entry *fib_entry,
628 enum mlxsw_reg_ralue_op op)
629{
630 switch (fib_entry->vr->proto) {
631 case MLXSW_SP_L3_PROTO_IPV4:
632 return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
633 case MLXSW_SP_L3_PROTO_IPV6:
634 return -EINVAL;
635 }
636 return -EINVAL;
637}
638
639static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
640 struct mlxsw_sp_fib_entry *fib_entry)
641{
642 enum mlxsw_reg_ralue_op op;
643
644 op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
645 MLXSW_REG_RALUE_OP_WRITE_UPDATE;
646 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
647}
648
649static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
650 struct mlxsw_sp_fib_entry *fib_entry)
651{
652 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
653 MLXSW_REG_RALUE_OP_WRITE_DELETE);
654}
655
656struct mlxsw_sp_router_fib4_add_info {
657 struct switchdev_trans_item tritem;
658 struct mlxsw_sp *mlxsw_sp;
659 struct mlxsw_sp_fib_entry *fib_entry;
660};
661
662static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
663{
664 const struct mlxsw_sp_router_fib4_add_info *info = data;
665 struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
666 struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
667
668 mlxsw_sp_fib_entry_destroy(fib_entry);
669 mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr);
670 kfree(info);
671}
672
673static int
674mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
675 const struct switchdev_obj_ipv4_fib *fib4,
676 struct mlxsw_sp_fib_entry *fib_entry)
677{
678 struct fib_info *fi = fib4->fi;
679
680 if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) {
681 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
682 return 0;
683 }
684 if (fib4->type != RTN_UNICAST)
685 return -EINVAL;
686
687 if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
688 struct mlxsw_sp_rif *r;
689
690 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
691 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev);
692 if (!r)
693 return -EINVAL;
694 fib_entry->rif = r->rif;
695 return 0;
696 }
697 return -EINVAL;
698}
699
700static int
701mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
702 const struct switchdev_obj_ipv4_fib *fib4,
703 struct switchdev_trans *trans)
704{
705 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
706 struct mlxsw_sp_router_fib4_add_info *info;
707 struct mlxsw_sp_fib_entry *fib_entry;
708 struct mlxsw_sp_vr *vr;
709 int err;
710
711 vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
712 MLXSW_SP_L3_PROTO_IPV4);
713 if (IS_ERR(vr))
714 return PTR_ERR(vr);
715
716 fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
717 sizeof(fib4->dst), fib4->dst_len);
718 if (!fib_entry) {
719 err = -ENOMEM;
720 goto err_fib_entry_create;
721 }
722 fib_entry->vr = vr;
723
724 err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
725 if (err)
726 goto err_fib4_entry_init;
727
728 info = kmalloc(sizeof(*info), GFP_KERNEL);
729 if (!info) {
730 err = -ENOMEM;
731 goto err_alloc_info;
732 }
733 info->mlxsw_sp = mlxsw_sp;
734 info->fib_entry = fib_entry;
735 switchdev_trans_item_enqueue(trans, info,
736 mlxsw_sp_router_fib4_add_info_destroy,
737 &info->tritem);
738 return 0;
739
740err_alloc_info:
741err_fib4_entry_init:
742 mlxsw_sp_fib_entry_destroy(fib_entry);
743err_fib_entry_create:
744 mlxsw_sp_vr_put(mlxsw_sp, vr);
745 return err;
746}
747
748static int
749mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
750 const struct switchdev_obj_ipv4_fib *fib4,
751 struct switchdev_trans *trans)
752{
753 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
754 struct mlxsw_sp_router_fib4_add_info *info;
755 struct mlxsw_sp_fib_entry *fib_entry;
756 struct mlxsw_sp_vr *vr;
757 int err;
758
759 info = switchdev_trans_item_dequeue(trans);
760 fib_entry = info->fib_entry;
761 kfree(info);
762
763 vr = fib_entry->vr;
764 err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry);
765 if (err)
766 goto err_fib_entry_insert;
767 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
768 if (err)
769 goto err_fib_entry_add;
770 return 0;
771
772err_fib_entry_add:
773 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
774err_fib_entry_insert:
775 mlxsw_sp_fib_entry_destroy(fib_entry);
776 mlxsw_sp_vr_put(mlxsw_sp, vr);
777 return err;
778}
779
780int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
781 const struct switchdev_obj_ipv4_fib *fib4,
782 struct switchdev_trans *trans)
783{
784 if (switchdev_trans_ph_prepare(trans))
785 return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port,
786 fib4, trans);
787 return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port,
788 fib4, trans);
789}
790
791int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
792 const struct switchdev_obj_ipv4_fib *fib4)
793{
794 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
795 struct mlxsw_sp_fib_entry *fib_entry;
796 struct mlxsw_sp_vr *vr;
797
798 vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
799 if (!vr) {
800 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
801 return -ENOENT;
802 }
803 fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
804 sizeof(fib4->dst), fib4->dst_len);
805 if (!fib_entry) {
806 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
807 return PTR_ERR(vr);
808 }
809 mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry);
810 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
811 mlxsw_sp_fib_entry_destroy(fib_entry);
812 mlxsw_sp_vr_put(mlxsw_sp, vr);
813 return 0;
814}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 927117e2bcd8..a1ad5e6bdfa8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -166,11 +166,6 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); 166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
167} 167}
168 168
169static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
170{
171 return vfid >= MLXSW_SP_VFID_PORT_MAX;
172}
173
174static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 169static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
175 u16 idx_begin, u16 idx_end, bool set, 170 u16 idx_begin, u16 idx_end, bool set,
176 bool only_uc) 171 bool only_uc)
@@ -182,15 +177,10 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
182 char *sftr_pl; 177 char *sftr_pl;
183 int err; 178 int err;
184 179
185 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 180 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
186 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 181 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
187 if (mlxsw_sp_vfid_is_vport_br(idx_begin)) 182 else
188 local_port = mlxsw_sp_port->local_port;
189 else
190 local_port = MLXSW_PORT_CPU_PORT;
191 } else {
192 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 183 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
193 }
194 184
195 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 185 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
196 if (!sftr_pl) 186 if (!sftr_pl)
@@ -384,18 +374,6 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
384 return err; 374 return err;
385} 375}
386 376
387static struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
388 u16 fid)
389{
390 struct mlxsw_sp_fid *f;
391
392 list_for_each_entry(f, &mlxsw_sp->fids, list)
393 if (f->fid == fid)
394 return f;
395
396 return NULL;
397}
398
399static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) 377static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
400{ 378{
401 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 379 char sfmr_pl[MLXSW_REG_SFMR_LEN];
@@ -426,8 +404,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
426 return f; 404 return f;
427} 405}
428 406
429static struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, 407struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
430 u16 fid)
431{ 408{
432 struct mlxsw_sp_fid *f; 409 struct mlxsw_sp_fid *f;
433 int err; 410 int err;
@@ -462,13 +439,15 @@ err_fid_map:
462 return ERR_PTR(err); 439 return ERR_PTR(err);
463} 440}
464 441
465static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, 442void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
466 struct mlxsw_sp_fid *f)
467{ 443{
468 u16 fid = f->fid; 444 u16 fid = f->fid;
469 445
470 list_del(&f->list); 446 list_del(&f->list);
471 447
448 if (f->r)
449 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
450
472 kfree(f); 451 kfree(f);
473 452
474 mlxsw_sp_fid_op(mlxsw_sp, fid, false); 453 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
@@ -753,9 +732,10 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
753 MLXSW_REG_SFD_OP_WRITE_REMOVE; 732 MLXSW_REG_SFD_OP_WRITE_REMOVE;
754} 733}
755 734
756static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 735static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
757 const char *mac, u16 fid, bool adding, 736 const char *mac, u16 fid, bool adding,
758 bool dynamic) 737 enum mlxsw_reg_sfd_rec_action action,
738 bool dynamic)
759{ 739{
760 char *sfd_pl; 740 char *sfd_pl;
761 int err; 741 int err;
@@ -766,14 +746,29 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
766 746
767 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 747 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
768 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 748 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
769 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 749 mac, fid, action, local_port);
770 local_port);
771 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 750 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
772 kfree(sfd_pl); 751 kfree(sfd_pl);
773 752
774 return err; 753 return err;
775} 754}
776 755
756static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
757 const char *mac, u16 fid, bool adding,
758 bool dynamic)
759{
760 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
761 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
762}
763
764int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
765 bool adding)
766{
767 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
768 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
769 false);
770}
771
777static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 772static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
778 const char *mac, u16 fid, u16 lag_vid, 773 const char *mac, u16 fid, u16 lag_vid,
779 bool adding, bool dynamic) 774 bool adding, bool dynamic)
@@ -978,6 +973,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
978 SWITCHDEV_OBJ_PORT_VLAN(obj), 973 SWITCHDEV_OBJ_PORT_VLAN(obj),
979 trans); 974 trans);
980 break; 975 break;
976 case SWITCHDEV_OBJ_ID_IPV4_FIB:
977 err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
978 SWITCHDEV_OBJ_IPV4_FIB(obj),
979 trans);
980 break;
981 case SWITCHDEV_OBJ_ID_PORT_FDB: 981 case SWITCHDEV_OBJ_ID_PORT_FDB:
982 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, 982 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
983 SWITCHDEV_OBJ_PORT_FDB(obj), 983 SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -1123,6 +1123,10 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
1123 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1123 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1124 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1124 SWITCHDEV_OBJ_PORT_VLAN(obj));
1125 break; 1125 break;
1126 case SWITCHDEV_OBJ_ID_IPV4_FIB:
1127 err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
1128 SWITCHDEV_OBJ_IPV4_FIB(obj));
1129 break;
1126 case SWITCHDEV_OBJ_ID_PORT_FDB: 1130 case SWITCHDEV_OBJ_ID_PORT_FDB:
1127 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, 1131 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1128 SWITCHDEV_OBJ_PORT_FDB(obj)); 1132 SWITCHDEV_OBJ_PORT_FDB(obj));
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7dc2ec74122a..0c6ee2c5099f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3804,12 +3804,30 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3804 3804
3805void *netdev_lower_get_next(struct net_device *dev, 3805void *netdev_lower_get_next(struct net_device *dev,
3806 struct list_head **iter); 3806 struct list_head **iter);
3807
3807#define netdev_for_each_lower_dev(dev, ldev, iter) \ 3808#define netdev_for_each_lower_dev(dev, ldev, iter) \
3808 for (iter = (dev)->adj_list.lower.next, \ 3809 for (iter = (dev)->adj_list.lower.next, \
3809 ldev = netdev_lower_get_next(dev, &(iter)); \ 3810 ldev = netdev_lower_get_next(dev, &(iter)); \
3810 ldev; \ 3811 ldev; \
3811 ldev = netdev_lower_get_next(dev, &(iter))) 3812 ldev = netdev_lower_get_next(dev, &(iter)))
3812 3813
3814struct net_device *netdev_all_lower_get_next(struct net_device *dev,
3815 struct list_head **iter);
3816struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
3817 struct list_head **iter);
3818
3819#define netdev_for_each_all_lower_dev(dev, ldev, iter) \
3820 for (iter = (dev)->all_adj_list.lower.next, \
3821 ldev = netdev_all_lower_get_next(dev, &(iter)); \
3822 ldev; \
3823 ldev = netdev_all_lower_get_next(dev, &(iter)))
3824
3825#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
3826 for (iter = (dev)->all_adj_list.lower.next, \
3827 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
3828 ldev; \
3829 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
3830
3813void *netdev_adjacent_get_private(struct list_head *adj_list); 3831void *netdev_adjacent_get_private(struct list_head *adj_list);
3814void *netdev_lower_get_first_private_rcu(struct net_device *dev); 3832void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3815struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 3833struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
diff --git a/net/core/dev.c b/net/core/dev.c
index aba10d2a8bc3..a4f3b0a9aeaf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5445,6 +5445,52 @@ void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5445EXPORT_SYMBOL(netdev_lower_get_next); 5445EXPORT_SYMBOL(netdev_lower_get_next);
5446 5446
5447/** 5447/**
5448 * netdev_all_lower_get_next - Get the next device from all lower neighbour list
5449 * @dev: device
5450 * @iter: list_head ** of the current position
5451 *
5452 * Gets the next netdev_adjacent from the dev's all lower neighbour
5453 * list, starting from iter position. The caller must hold RTNL lock or
5454 * its own locking that guarantees that the neighbour all lower
5455 * list will remain unchanged.
5456 */
5457struct net_device *netdev_all_lower_get_next(struct net_device *dev, struct list_head **iter)
5458{
5459 struct netdev_adjacent *lower;
5460
5461 lower = list_entry(*iter, struct netdev_adjacent, list);
5462
5463 if (&lower->list == &dev->all_adj_list.lower)
5464 return NULL;
5465
5466 *iter = lower->list.next;
5467
5468 return lower->dev;
5469}
5470EXPORT_SYMBOL(netdev_all_lower_get_next);
5471
5472/**
5473 * netdev_all_lower_get_next_rcu - Get the next device from all
5474 * lower neighbour list, RCU variant
5475 * @dev: device
5476 * @iter: list_head ** of the current position
5477 *
5478 * Gets the next netdev_adjacent from the dev's all lower neighbour
5479 * list, starting from iter position. The caller must hold RCU read lock.
5480 */
5481struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
5482 struct list_head **iter)
5483{
5484 struct netdev_adjacent *lower;
5485
5486 lower = list_first_or_null_rcu(&dev->all_adj_list.lower,
5487 struct netdev_adjacent, list);
5488
5489 return lower ? lower->dev : NULL;
5490}
5491EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
5492
5493/**
5448 * netdev_lower_get_first_private_rcu - Get the first ->private from the 5494 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5449 * lower neighbour list, RCU 5495 * lower neighbour list, RCU
5450 * variant 5496 * variant