aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/rx.c')
-rw-r--r--drivers/net/ethernet/sfc/rx.c102
1 files changed, 41 insertions, 61 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8956995b2fe7..02b0b5272c14 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
842 struct efx_nic *efx = netdev_priv(net_dev); 842 struct efx_nic *efx = netdev_priv(net_dev);
843 struct efx_channel *channel; 843 struct efx_channel *channel;
844 struct efx_filter_spec spec; 844 struct efx_filter_spec spec;
845 const __be16 *ports; 845 struct flow_keys fk;
846 __be16 ether_type;
847 int nhoff;
848 int rc; 846 int rc;
849 847
850 /* The core RPS/RFS code has already parsed and validated 848 if (flow_id == RPS_FLOW_ID_INVALID)
851 * VLAN, IP and transport headers. We assume they are in the 849 return -EINVAL;
852 * header area.
853 */
854
855 if (skb->protocol == htons(ETH_P_8021Q)) {
856 const struct vlan_hdr *vh =
857 (const struct vlan_hdr *)skb->data;
858 850
859 /* We can't filter on the IP 5-tuple and the vlan 851 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
860 * together, so just strip the vlan header and filter 852 return -EPROTONOSUPPORT;
861 * on the IP part.
862 */
863 EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
864 ether_type = vh->h_vlan_encapsulated_proto;
865 nhoff = sizeof(struct vlan_hdr);
866 } else {
867 ether_type = skb->protocol;
868 nhoff = 0;
869 }
870 853
871 if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) 854 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
855 return -EPROTONOSUPPORT;
856 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
872 return -EPROTONOSUPPORT; 857 return -EPROTONOSUPPORT;
873 858
874 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 859 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
878 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 863 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
879 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | 864 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
880 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; 865 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
881 spec.ether_type = ether_type; 866 spec.ether_type = fk.basic.n_proto;
882 867 spec.ip_proto = fk.basic.ip_proto;
883 if (ether_type == htons(ETH_P_IP)) { 868
884 const struct iphdr *ip = 869 if (fk.basic.n_proto == htons(ETH_P_IP)) {
885 (const struct iphdr *)(skb->data + nhoff); 870 spec.rem_host[0] = fk.addrs.v4addrs.src;
886 871 spec.loc_host[0] = fk.addrs.v4addrs.dst;
887 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
888 if (ip_is_fragment(ip))
889 return -EPROTONOSUPPORT;
890 spec.ip_proto = ip->protocol;
891 spec.rem_host[0] = ip->saddr;
892 spec.loc_host[0] = ip->daddr;
893 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
894 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
895 } else { 872 } else {
896 const struct ipv6hdr *ip6 = 873 memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
897 (const struct ipv6hdr *)(skb->data + nhoff); 874 memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
898
899 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
900 nhoff + sizeof(*ip6) + 4);
901 spec.ip_proto = ip6->nexthdr;
902 memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
903 memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
904 ports = (const __be16 *)(ip6 + 1);
905 } 875 }
906 876
907 spec.rem_port = ports[0]; 877 spec.rem_port = fk.ports.src;
908 spec.loc_port = ports[1]; 878 spec.loc_port = fk.ports.dst;
909 879
910 rc = efx->type->filter_rfs_insert(efx, &spec); 880 rc = efx->type->filter_rfs_insert(efx, &spec);
911 if (rc < 0) 881 if (rc < 0)
912 return rc; 882 return rc;
913 883
914 /* Remember this so we can check whether to expire the filter later */ 884 /* Remember this so we can check whether to expire the filter later */
915 efx->rps_flow_id[rc] = flow_id; 885 channel = efx_get_channel(efx, rxq_index);
916 channel = efx_get_channel(efx, skb_get_rx_queue(skb)); 886 channel->rps_flow_id[rc] = flow_id;
917 ++channel->rfs_filters_added; 887 ++channel->rfs_filters_added;
918 888
919 if (ether_type == htons(ETH_P_IP)) 889 if (spec.ether_type == htons(ETH_P_IP))
920 netif_info(efx, rx_status, efx->net_dev, 890 netif_info(efx, rx_status, efx->net_dev,
921 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 891 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
922 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 892 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
923 spec.rem_host, ntohs(ports[0]), spec.loc_host, 893 spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
924 ntohs(ports[1]), rxq_index, flow_id, rc); 894 ntohs(spec.loc_port), rxq_index, flow_id, rc);
925 else 895 else
926 netif_info(efx, rx_status, efx->net_dev, 896 netif_info(efx, rx_status, efx->net_dev,
927 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", 897 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
928 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 898 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
929 spec.rem_host, ntohs(ports[0]), spec.loc_host, 899 spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
930 ntohs(ports[1]), rxq_index, flow_id, rc); 900 ntohs(spec.loc_port), rxq_index, flow_id, rc);
931 901
932 return rc; 902 return rc;
933} 903}
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
935bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) 905bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
936{ 906{
937 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); 907 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
938 unsigned int index, size; 908 unsigned int channel_idx, index, size;
939 u32 flow_id; 909 u32 flow_id;
940 910
941 if (!spin_trylock_bh(&efx->filter_lock)) 911 if (!spin_trylock_bh(&efx->filter_lock))
942 return false; 912 return false;
943 913
944 expire_one = efx->type->filter_rfs_expire_one; 914 expire_one = efx->type->filter_rfs_expire_one;
915 channel_idx = efx->rps_expire_channel;
945 index = efx->rps_expire_index; 916 index = efx->rps_expire_index;
946 size = efx->type->max_rx_ip_filters; 917 size = efx->type->max_rx_ip_filters;
947 while (quota--) { 918 while (quota--) {
948 flow_id = efx->rps_flow_id[index]; 919 struct efx_channel *channel = efx_get_channel(efx, channel_idx);
949 if (expire_one(efx, flow_id, index)) 920 flow_id = channel->rps_flow_id[index];
921
922 if (flow_id != RPS_FLOW_ID_INVALID &&
923 expire_one(efx, flow_id, index)) {
950 netif_info(efx, rx_status, efx->net_dev, 924 netif_info(efx, rx_status, efx->net_dev,
951 "expired filter %d [flow %u]\n", 925 "expired filter %d [queue %u flow %u]\n",
952 index, flow_id); 926 index, channel_idx, flow_id);
953 if (++index == size) 927 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
928 }
929 if (++index == size) {
930 if (++channel_idx == efx->n_channels)
931 channel_idx = 0;
954 index = 0; 932 index = 0;
933 }
955 } 934 }
935 efx->rps_expire_channel = channel_idx;
956 efx->rps_expire_index = index; 936 efx->rps_expire_index = index;
957 937
958 spin_unlock_bh(&efx->filter_lock); 938 spin_unlock_bh(&efx->filter_lock);