diff options
author | David S. Miller <davem@davemloft.net> | 2017-04-24 15:58:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-04-24 15:58:03 -0400 |
commit | 38baf3a68bd2a8bca346abf0ba4b98a691543e7e (patch) | |
tree | b5d4b695efb171176e4990e5d19b96b6f2da00cd | |
parent | fc1f8f4f310ac65b1337e2d7ba52ae4ff2b7c849 (diff) | |
parent | 5e82c9e4ed60beba83f46a1a5a8307b99a23e982 (diff) |
Merge tag 'mlx5-fixes-2017-04-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says:
====================
Mellanox, mlx5 fixes 2017-04-22
This series contains some mlx5 fixes for net.
For your convenience, the series doesn't introduce any conflict with
the ongoing net-next pull request.
Please pull and let me know if there's any problem.
For -stable:
("net/mlx5: E-Switch, Correctly deal with inline mode on ConnectX-5") kernels >= 4.10
("net/mlx5e: Fix ETHTOOL_GRXCLSRLALL handling") kernels >= 4.8
("net/mlx5e: Fix small packet threshold") kernels >= 4.7
("net/mlx5: Fix driver load bad flow when having fw initializing timeout") kernels >= 4.4
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
6 files changed, 76 insertions, 53 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index dc52053128bc..3d9490cd2db1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -90,7 +90,7 @@ | |||
90 | #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) | 90 | #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) |
91 | 91 | ||
92 | #define MLX5_UMR_ALIGN (2048) | 92 | #define MLX5_UMR_ALIGN (2048) |
93 | #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) | 93 | #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) |
94 | 94 | ||
95 | #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) | 95 | #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) |
96 | #define MLX5E_DEFAULT_LRO_TIMEOUT 32 | 96 | #define MLX5E_DEFAULT_LRO_TIMEOUT 32 |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index d55fff0ba388..26fc77e80f7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c | |||
@@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i | |||
564 | int idx = 0; | 564 | int idx = 0; |
565 | int err = 0; | 565 | int err = 0; |
566 | 566 | ||
567 | info->data = MAX_NUM_OF_ETHTOOL_RULES; | ||
567 | while ((!err || err == -ENOENT) && idx < info->rule_cnt) { | 568 | while ((!err || err == -ENOENT) && idx < info->rule_cnt) { |
568 | err = mlx5e_ethtool_get_flow(priv, info, location); | 569 | err = mlx5e_ethtool_get_flow(priv, info, location); |
569 | if (!err) | 570 | if (!err) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index fade7233dac5..5436866798f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -639,7 +639,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, | |||
639 | 639 | ||
640 | if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) && | 640 | if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) && |
641 | rep->vport != FDB_UPLINK_VPORT) { | 641 | rep->vport != FDB_UPLINK_VPORT) { |
642 | if (min_inline > esw->offloads.inline_mode) { | 642 | if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && |
643 | esw->offloads.inline_mode < min_inline) { | ||
643 | netdev_warn(priv->netdev, | 644 | netdev_warn(priv->netdev, |
644 | "Flow is not offloaded due to min inline setting, required %d actual %d\n", | 645 | "Flow is not offloaded due to min inline setting, required %d actual %d\n", |
645 | min_inline, esw->offloads.inline_mode); | 646 | min_inline, esw->offloads.inline_mode); |
@@ -785,16 +786,15 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, | |||
785 | return 0; | 786 | return 0; |
786 | } | 787 | } |
787 | 788 | ||
788 | static int gen_vxlan_header_ipv4(struct net_device *out_dev, | 789 | static void gen_vxlan_header_ipv4(struct net_device *out_dev, |
789 | char buf[], | 790 | char buf[], int encap_size, |
790 | unsigned char h_dest[ETH_ALEN], | 791 | unsigned char h_dest[ETH_ALEN], |
791 | int ttl, | 792 | int ttl, |
792 | __be32 daddr, | 793 | __be32 daddr, |
793 | __be32 saddr, | 794 | __be32 saddr, |
794 | __be16 udp_dst_port, | 795 | __be16 udp_dst_port, |
795 | __be32 vx_vni) | 796 | __be32 vx_vni) |
796 | { | 797 | { |
797 | int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN; | ||
798 | struct ethhdr *eth = (struct ethhdr *)buf; | 798 | struct ethhdr *eth = (struct ethhdr *)buf; |
799 | struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); | 799 | struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); |
800 | struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); | 800 | struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); |
@@ -817,20 +817,17 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev, | |||
817 | udp->dest = udp_dst_port; | 817 | udp->dest = udp_dst_port; |
818 | vxh->vx_flags = VXLAN_HF_VNI; | 818 | vxh->vx_flags = VXLAN_HF_VNI; |
819 | vxh->vx_vni = vxlan_vni_field(vx_vni); | 819 | vxh->vx_vni = vxlan_vni_field(vx_vni); |
820 | |||
821 | return encap_size; | ||
822 | } | 820 | } |
823 | 821 | ||
824 | static int gen_vxlan_header_ipv6(struct net_device *out_dev, | 822 | static void gen_vxlan_header_ipv6(struct net_device *out_dev, |
825 | char buf[], | 823 | char buf[], int encap_size, |
826 | unsigned char h_dest[ETH_ALEN], | 824 | unsigned char h_dest[ETH_ALEN], |
827 | int ttl, | 825 | int ttl, |
828 | struct in6_addr *daddr, | 826 | struct in6_addr *daddr, |
829 | struct in6_addr *saddr, | 827 | struct in6_addr *saddr, |
830 | __be16 udp_dst_port, | 828 | __be16 udp_dst_port, |
831 | __be32 vx_vni) | 829 | __be32 vx_vni) |
832 | { | 830 | { |
833 | int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN; | ||
834 | struct ethhdr *eth = (struct ethhdr *)buf; | 831 | struct ethhdr *eth = (struct ethhdr *)buf; |
835 | struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); | 832 | struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); |
836 | struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); | 833 | struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); |
@@ -852,8 +849,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev, | |||
852 | udp->dest = udp_dst_port; | 849 | udp->dest = udp_dst_port; |
853 | vxh->vx_flags = VXLAN_HF_VNI; | 850 | vxh->vx_flags = VXLAN_HF_VNI; |
854 | vxh->vx_vni = vxlan_vni_field(vx_vni); | 851 | vxh->vx_vni = vxlan_vni_field(vx_vni); |
855 | |||
856 | return encap_size; | ||
857 | } | 852 | } |
858 | 853 | ||
859 | static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | 854 | static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, |
@@ -862,13 +857,20 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
862 | struct net_device **out_dev) | 857 | struct net_device **out_dev) |
863 | { | 858 | { |
864 | int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); | 859 | int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); |
860 | int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN; | ||
865 | struct ip_tunnel_key *tun_key = &e->tun_info.key; | 861 | struct ip_tunnel_key *tun_key = &e->tun_info.key; |
866 | int encap_size, ttl, err; | ||
867 | struct neighbour *n = NULL; | 862 | struct neighbour *n = NULL; |
868 | struct flowi4 fl4 = {}; | 863 | struct flowi4 fl4 = {}; |
869 | char *encap_header; | 864 | char *encap_header; |
865 | int ttl, err; | ||
866 | |||
867 | if (max_encap_size < ipv4_encap_size) { | ||
868 | mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", | ||
869 | ipv4_encap_size, max_encap_size); | ||
870 | return -EOPNOTSUPP; | ||
871 | } | ||
870 | 872 | ||
871 | encap_header = kzalloc(max_encap_size, GFP_KERNEL); | 873 | encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); |
872 | if (!encap_header) | 874 | if (!encap_header) |
873 | return -ENOMEM; | 875 | return -ENOMEM; |
874 | 876 | ||
@@ -903,11 +905,11 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
903 | 905 | ||
904 | switch (e->tunnel_type) { | 906 | switch (e->tunnel_type) { |
905 | case MLX5_HEADER_TYPE_VXLAN: | 907 | case MLX5_HEADER_TYPE_VXLAN: |
906 | encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, | 908 | gen_vxlan_header_ipv4(*out_dev, encap_header, |
907 | e->h_dest, ttl, | 909 | ipv4_encap_size, e->h_dest, ttl, |
908 | fl4.daddr, | 910 | fl4.daddr, |
909 | fl4.saddr, tun_key->tp_dst, | 911 | fl4.saddr, tun_key->tp_dst, |
910 | tunnel_id_to_key32(tun_key->tun_id)); | 912 | tunnel_id_to_key32(tun_key->tun_id)); |
911 | break; | 913 | break; |
912 | default: | 914 | default: |
913 | err = -EOPNOTSUPP; | 915 | err = -EOPNOTSUPP; |
@@ -915,7 +917,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
915 | } | 917 | } |
916 | 918 | ||
917 | err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, | 919 | err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, |
918 | encap_size, encap_header, &e->encap_id); | 920 | ipv4_encap_size, encap_header, &e->encap_id); |
919 | out: | 921 | out: |
920 | if (err && n) | 922 | if (err && n) |
921 | neigh_release(n); | 923 | neigh_release(n); |
@@ -930,13 +932,20 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
930 | 932 | ||
931 | { | 933 | { |
932 | int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); | 934 | int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); |
935 | int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN; | ||
933 | struct ip_tunnel_key *tun_key = &e->tun_info.key; | 936 | struct ip_tunnel_key *tun_key = &e->tun_info.key; |
934 | int encap_size, err, ttl = 0; | ||
935 | struct neighbour *n = NULL; | 937 | struct neighbour *n = NULL; |
936 | struct flowi6 fl6 = {}; | 938 | struct flowi6 fl6 = {}; |
937 | char *encap_header; | 939 | char *encap_header; |
940 | int err, ttl = 0; | ||
941 | |||
942 | if (max_encap_size < ipv6_encap_size) { | ||
943 | mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", | ||
944 | ipv6_encap_size, max_encap_size); | ||
945 | return -EOPNOTSUPP; | ||
946 | } | ||
938 | 947 | ||
939 | encap_header = kzalloc(max_encap_size, GFP_KERNEL); | 948 | encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); |
940 | if (!encap_header) | 949 | if (!encap_header) |
941 | return -ENOMEM; | 950 | return -ENOMEM; |
942 | 951 | ||
@@ -972,11 +981,11 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
972 | 981 | ||
973 | switch (e->tunnel_type) { | 982 | switch (e->tunnel_type) { |
974 | case MLX5_HEADER_TYPE_VXLAN: | 983 | case MLX5_HEADER_TYPE_VXLAN: |
975 | encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header, | 984 | gen_vxlan_header_ipv6(*out_dev, encap_header, |
976 | e->h_dest, ttl, | 985 | ipv6_encap_size, e->h_dest, ttl, |
977 | &fl6.daddr, | 986 | &fl6.daddr, |
978 | &fl6.saddr, tun_key->tp_dst, | 987 | &fl6.saddr, tun_key->tp_dst, |
979 | tunnel_id_to_key32(tun_key->tun_id)); | 988 | tunnel_id_to_key32(tun_key->tun_id)); |
980 | break; | 989 | break; |
981 | default: | 990 | default: |
982 | err = -EOPNOTSUPP; | 991 | err = -EOPNOTSUPP; |
@@ -984,7 +993,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
984 | } | 993 | } |
985 | 994 | ||
986 | err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, | 995 | err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, |
987 | encap_size, encap_header, &e->encap_id); | 996 | ipv6_encap_size, encap_header, &e->encap_id); |
988 | out: | 997 | out: |
989 | if (err && n) | 998 | if (err && n) |
990 | neigh_release(n); | 999 | neigh_release(n); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 307ec6c5fd3b..d111cebca9f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -911,8 +911,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) | |||
911 | struct mlx5_core_dev *dev = devlink_priv(devlink); | 911 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
912 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 912 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
913 | int num_vports = esw->enabled_vports; | 913 | int num_vports = esw->enabled_vports; |
914 | int err; | 914 | int err, vport; |
915 | int vport; | ||
916 | u8 mlx5_mode; | 915 | u8 mlx5_mode; |
917 | 916 | ||
918 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 917 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) |
@@ -921,9 +920,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) | |||
921 | if (esw->mode == SRIOV_NONE) | 920 | if (esw->mode == SRIOV_NONE) |
922 | return -EOPNOTSUPP; | 921 | return -EOPNOTSUPP; |
923 | 922 | ||
924 | if (MLX5_CAP_ETH(dev, wqe_inline_mode) != | 923 | switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { |
925 | MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) | 924 | case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: |
925 | if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) | ||
926 | return 0; | ||
927 | /* fall through */ | ||
928 | case MLX5_CAP_INLINE_MODE_L2: | ||
929 | esw_warn(dev, "Inline mode can't be set\n"); | ||
926 | return -EOPNOTSUPP; | 930 | return -EOPNOTSUPP; |
931 | case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: | ||
932 | break; | ||
933 | } | ||
927 | 934 | ||
928 | if (esw->offloads.num_flows > 0) { | 935 | if (esw->offloads.num_flows > 0) { |
929 | esw_warn(dev, "Can't set inline mode when flows are configured\n"); | 936 | esw_warn(dev, "Can't set inline mode when flows are configured\n"); |
@@ -966,18 +973,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) | |||
966 | if (esw->mode == SRIOV_NONE) | 973 | if (esw->mode == SRIOV_NONE) |
967 | return -EOPNOTSUPP; | 974 | return -EOPNOTSUPP; |
968 | 975 | ||
969 | if (MLX5_CAP_ETH(dev, wqe_inline_mode) != | ||
970 | MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) | ||
971 | return -EOPNOTSUPP; | ||
972 | |||
973 | return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); | 976 | return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); |
974 | } | 977 | } |
975 | 978 | ||
976 | int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) | 979 | int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) |
977 | { | 980 | { |
981 | u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; | ||
978 | struct mlx5_core_dev *dev = esw->dev; | 982 | struct mlx5_core_dev *dev = esw->dev; |
979 | int vport; | 983 | int vport; |
980 | u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; | ||
981 | 984 | ||
982 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | 985 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) |
983 | return -EOPNOTSUPP; | 986 | return -EOPNOTSUPP; |
@@ -985,10 +988,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) | |||
985 | if (esw->mode == SRIOV_NONE) | 988 | if (esw->mode == SRIOV_NONE) |
986 | return -EOPNOTSUPP; | 989 | return -EOPNOTSUPP; |
987 | 990 | ||
988 | if (MLX5_CAP_ETH(dev, wqe_inline_mode) != | 991 | switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { |
989 | MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) | 992 | case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: |
990 | return -EOPNOTSUPP; | 993 | mlx5_mode = MLX5_INLINE_MODE_NONE; |
994 | goto out; | ||
995 | case MLX5_CAP_INLINE_MODE_L2: | ||
996 | mlx5_mode = MLX5_INLINE_MODE_L2; | ||
997 | goto out; | ||
998 | case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: | ||
999 | goto query_vports; | ||
1000 | } | ||
991 | 1001 | ||
1002 | query_vports: | ||
992 | for (vport = 1; vport <= nvfs; vport++) { | 1003 | for (vport = 1; vport <= nvfs; vport++) { |
993 | mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); | 1004 | mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); |
994 | if (vport > 1 && prev_mlx5_mode != mlx5_mode) | 1005 | if (vport > 1 && prev_mlx5_mode != mlx5_mode) |
@@ -996,6 +1007,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) | |||
996 | prev_mlx5_mode = mlx5_mode; | 1007 | prev_mlx5_mode = mlx5_mode; |
997 | } | 1008 | } |
998 | 1009 | ||
1010 | out: | ||
999 | *mode = mlx5_mode; | 1011 | *mode = mlx5_mode; |
1000 | return 0; | 1012 | return 0; |
1001 | } | 1013 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 60154a175bd3..0ad66324247f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -1029,7 +1029,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
1029 | if (err) { | 1029 | if (err) { |
1030 | dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", | 1030 | dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", |
1031 | FW_INIT_TIMEOUT_MILI); | 1031 | FW_INIT_TIMEOUT_MILI); |
1032 | goto out_err; | 1032 | goto err_cmd_cleanup; |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | err = mlx5_core_enable_hca(dev, 0); | 1035 | err = mlx5_core_enable_hca(dev, 0); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 2e6b0f290ddc..222b25908d01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c | |||
@@ -87,6 +87,7 @@ static void up_rel_func(struct kref *kref) | |||
87 | struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count); | 87 | struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count); |
88 | 88 | ||
89 | list_del(&up->list); | 89 | list_del(&up->list); |
90 | iounmap(up->map); | ||
90 | if (mlx5_cmd_free_uar(up->mdev, up->index)) | 91 | if (mlx5_cmd_free_uar(up->mdev, up->index)) |
91 | mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); | 92 | mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); |
92 | kfree(up->reg_bitmap); | 93 | kfree(up->reg_bitmap); |