aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_ethtool.c
diff options
context:
space:
mode:
authorPeter Waskiewicz <peter.p.waskiewicz.jr@intel.com>2010-02-10 11:07:54 -0500
committerDavid S. Miller <davem@davemloft.net>2010-02-10 23:03:14 -0500
commit9a713e7c7cca2f31c89367bb7b48310ab8a3e630 (patch)
tree5403bff53e13f4c41afeb50ef5f22aed6be13117 /drivers/net/ixgbe/ixgbe_ethtool.c
parent15682bc488d4af8c9bb998844a94281025e0a333 (diff)
ixgbe: Add support for the new ethtool n-tuple programming interface
This patch adds n-tuple filter programming to 82599. Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_ethtool.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c111
1 files changed, 108 insertions, 3 deletions
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 07a9410c08d4..0d234346a4ea 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -979,6 +979,9 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
979 return IXGBE_TEST_LEN; 979 return IXGBE_TEST_LEN;
980 case ETH_SS_STATS: 980 case ETH_SS_STATS:
981 return IXGBE_STATS_LEN; 981 return IXGBE_STATS_LEN;
982 case ETH_SS_NTUPLE_FILTERS:
983 return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
984 ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
982 default: 985 default:
983 return -EOPNOTSUPP; 986 return -EOPNOTSUPP;
984 } 987 }
@@ -2150,23 +2153,124 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2150static int ixgbe_set_flags(struct net_device *netdev, u32 data) 2153static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2151{ 2154{
2152 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2155 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2156 bool need_reset = false;
2153 2157
2154 ethtool_op_set_flags(netdev, data); 2158 ethtool_op_set_flags(netdev, data);
2155 2159
2156 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2157 return 0;
2158
2159 /* if state changes we need to update adapter->flags and reset */ 2160 /* if state changes we need to update adapter->flags and reset */
2160 if ((!!(data & ETH_FLAG_LRO)) != 2161 if ((!!(data & ETH_FLAG_LRO)) !=
2161 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { 2162 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2162 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2163 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2164 need_reset = true;
2165 }
2166
2167 /*
2168 * Check if Flow Director n-tuple support was enabled or disabled. If
2169 * the state changed, we need to reset.
2170 */
2171 if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
2172 (!(data & ETH_FLAG_NTUPLE))) {
2173 /* turn off Flow Director perfect, set hash and reset */
2174 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2175 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
2176 need_reset = true;
2177 } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
2178 (data & ETH_FLAG_NTUPLE)) {
2179 /* turn off Flow Director hash, enable perfect and reset */
2180 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
2181 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2182 need_reset = true;
2183 } else {
2184 /* no state change */
2185 }
2186
2187 if (need_reset) {
2163 if (netif_running(netdev)) 2188 if (netif_running(netdev))
2164 ixgbe_reinit_locked(adapter); 2189 ixgbe_reinit_locked(adapter);
2165 else 2190 else
2166 ixgbe_reset(adapter); 2191 ixgbe_reset(adapter);
2167 } 2192 }
2193
2168 return 0; 2194 return 0;
2195}
2196
2197static int ixgbe_set_rx_ntuple(struct net_device *dev,
2198 struct ethtool_rx_ntuple *cmd)
2199{
2200 struct ixgbe_adapter *adapter = netdev_priv(dev);
2201 struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
2202 struct ixgbe_atr_input input_struct;
2203 struct ixgbe_atr_input_masks input_masks;
2204 int target_queue;
2205
2206 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2207 return -EOPNOTSUPP;
2208
2209 /*
2210 * Don't allow programming if the action is a queue greater than
2211 * the number of online Tx queues.
2212 */
2213 if ((fs.action >= adapter->num_tx_queues) ||
2214 (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
2215 return -EINVAL;
2216
2217 memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
2218 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
2219
2220 input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
2221 input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
2222 input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
2223 input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
2224 input_masks.vlan_id_mask = fs.vlan_tag_mask;
2225 /* only use the lowest 2 bytes for flex bytes */
2226 input_masks.data_mask = (fs.data_mask & 0xffff);
2227
2228 switch (fs.flow_type) {
2229 case TCP_V4_FLOW:
2230 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
2231 break;
2232 case UDP_V4_FLOW:
2233 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
2234 break;
2235 case SCTP_V4_FLOW:
2236 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
2237 break;
2238 default:
2239 return -1;
2240 }
2169 2241
2242 /* Mask bits from the inputs based on user-supplied mask */
2243 ixgbe_atr_set_src_ipv4_82599(&input_struct,
2244 (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
2245 ixgbe_atr_set_dst_ipv4_82599(&input_struct,
2246 (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
2247 /* 82599 expects these to be byte-swapped for perfect filtering */
2248 ixgbe_atr_set_src_port_82599(&input_struct,
2249 ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
2250 ixgbe_atr_set_dst_port_82599(&input_struct,
2251 ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
2252
2253 /* VLAN and Flex bytes are either completely masked or not */
2254 if (!fs.vlan_tag_mask)
2255 ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
2256
2257 if (!input_masks.data_mask)
2258 /* make sure we only use the first 2 bytes of user data */
2259 ixgbe_atr_set_flex_byte_82599(&input_struct,
2260 (fs.data & 0xffff));
2261
2262 /* determine if we need to drop or route the packet */
2263 if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
2264 target_queue = MAX_RX_QUEUES - 1;
2265 else
2266 target_queue = fs.action;
2267
2268 spin_lock(&adapter->fdir_perfect_lock);
2269 ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
2270 &input_masks, 0, target_queue);
2271 spin_unlock(&adapter->fdir_perfect_lock);
2272
2273 return 0;
2170} 2274}
2171 2275
2172static const struct ethtool_ops ixgbe_ethtool_ops = { 2276static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2204,6 +2308,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
2204 .set_coalesce = ixgbe_set_coalesce, 2308 .set_coalesce = ixgbe_set_coalesce,
2205 .get_flags = ethtool_op_get_flags, 2309 .get_flags = ethtool_op_get_flags,
2206 .set_flags = ixgbe_set_flags, 2310 .set_flags = ixgbe_set_flags,
2311 .set_rx_ntuple = ixgbe_set_rx_ntuple,
2207}; 2312};
2208 2313
2209void ixgbe_set_ethtool_ops(struct net_device *netdev) 2314void ixgbe_set_ethtool_ops(struct net_device *netdev)