aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Fastabend <john.r.fastabend@intel.com>2012-10-24 04:13:09 -0400
committerDavid S. Miller <davem@davemloft.net>2012-10-31 13:18:29 -0400
commit815cccbf10b27115fb3e5827bef26768616e5e27 (patch)
treec8c4a2a2bc95bc89926c91c67c98b020be1efaa7
parent2469ffd723f76ac2d3ce3d4f31ee31ee0a06cd38 (diff)
ixgbe: add setlink, getlink support to ixgbe and ixgbevf
This adds support for the net device ops to manage the embedded hardware bridge on ixgbe devices. With this patch the bridge mode can be toggled between VEB and VEPA to support stacking macvlan devices or using the embedded switch without any SW component in 802.1Qbg/br environments. Additionally, this adds source address pruning to the ixgbevf driver to prune any frames sent back from a reflective relay on the switch. This is required because the existing hardware does not support this. Without it frames get pushed into the stack with its own src mac which is invalid per 802.1Qbg VEPA definition. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c10
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--net/core/rtnetlink.c50
5 files changed, 122 insertions, 3 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 88d636a7459c..9a88e01216bb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
44#include <linux/ethtool.h> 44#include <linux/ethtool.h>
45#include <linux/if.h> 45#include <linux/if.h>
46#include <linux/if_vlan.h> 46#include <linux/if_vlan.h>
47#include <linux/if_bridge.h>
47#include <linux/prefetch.h> 48#include <linux/prefetch.h>
48#include <scsi/fc/fc_fcoe.h> 49#include <scsi/fc/fc_fcoe.h>
49 50
@@ -3224,7 +3225,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3224 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); 3225 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3225 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); 3226 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3226 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); 3227 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3227 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3228 3228
3229 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ 3229 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3230 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); 3230 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
@@ -3247,8 +3247,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3247 3247
3248 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3248 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3249 3249
3250 /* enable Tx loopback for VF/PF communication */
3251 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3252 3250
3253 /* Enable MAC Anti-Spoofing */ 3251 /* Enable MAC Anti-Spoofing */
3254 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), 3252 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
@@ -7025,6 +7023,59 @@ static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
7025 return idx; 7023 return idx;
7026} 7024}
7027 7025
7026static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7027 struct nlmsghdr *nlh)
7028{
7029 struct ixgbe_adapter *adapter = netdev_priv(dev);
7030 struct nlattr *attr, *br_spec;
7031 int rem;
7032
7033 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7034 return -EOPNOTSUPP;
7035
7036 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7037
7038 nla_for_each_nested(attr, br_spec, rem) {
7039 __u16 mode;
7040 u32 reg = 0;
7041
7042 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7043 continue;
7044
7045 mode = nla_get_u16(attr);
7046 if (mode == BRIDGE_MODE_VEPA)
7047 reg = 0;
7048 else if (mode == BRIDGE_MODE_VEB)
7049 reg = IXGBE_PFDTXGSWC_VT_LBEN;
7050 else
7051 return -EINVAL;
7052
7053 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
7054
7055 e_info(drv, "enabling bridge mode: %s\n",
7056 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
7057 }
7058
7059 return 0;
7060}
7061
7062static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7063 struct net_device *dev)
7064{
7065 struct ixgbe_adapter *adapter = netdev_priv(dev);
7066 u16 mode;
7067
7068 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7069 return 0;
7070
7071 if (IXGBE_READ_REG(&adapter->hw, IXGBE_PFDTXGSWC) & 1)
7072 mode = BRIDGE_MODE_VEB;
7073 else
7074 mode = BRIDGE_MODE_VEPA;
7075
7076 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
7077}
7078
7028static const struct net_device_ops ixgbe_netdev_ops = { 7079static const struct net_device_ops ixgbe_netdev_ops = {
7029 .ndo_open = ixgbe_open, 7080 .ndo_open = ixgbe_open,
7030 .ndo_stop = ixgbe_close, 7081 .ndo_stop = ixgbe_close,
@@ -7064,6 +7115,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7064 .ndo_fdb_add = ixgbe_ndo_fdb_add, 7115 .ndo_fdb_add = ixgbe_ndo_fdb_add,
7065 .ndo_fdb_del = ixgbe_ndo_fdb_del, 7116 .ndo_fdb_del = ixgbe_ndo_fdb_del,
7066 .ndo_fdb_dump = ixgbe_ndo_fdb_dump, 7117 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
7118 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
7119 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7067}; 7120};
7068 7121
7069/** 7122/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 96876b7442b1..7e3ac28ffba8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -117,6 +117,9 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
117 } 117 }
118 } 118 }
119 119
120 /* Initialize default switching mode VEB */
121 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
122
120 /* If call to enable VFs succeeded then allocate memory 123 /* If call to enable VFs succeeded then allocate memory
121 * for per VF control structures. 124 * for per VF control structures.
122 */ 125 */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 07d7eaba6f1b..ac6a76deb01d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -478,6 +478,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
478 } 478 }
479 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 479 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
480 480
481 /* Workaround hardware that can't do proper VEPA multicast
482 * source pruning.
483 */
484 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
485 !(compare_ether_addr(adapter->netdev->dev_addr,
486 eth_hdr(skb)->h_source))) {
487 dev_kfree_skb_irq(skb);
488 goto next_desc;
489 }
490
481 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); 491 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
482 492
483next_desc: 493next_desc:
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 7002bbfd5d4a..489dd7bb28ec 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -69,4 +69,7 @@ extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
69 struct netlink_callback *cb, 69 struct netlink_callback *cb,
70 struct net_device *dev, 70 struct net_device *dev,
71 int idx); 71 int idx);
72
73extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
74 struct net_device *dev, u16 mode);
72#endif /* __LINUX_RTNETLINK_H */ 75#endif /* __LINUX_RTNETLINK_H */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8d2af0f77d36..51dc58ff0091 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2252,6 +2252,56 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
2252 return skb->len; 2252 return skb->len;
2253} 2253}
2254 2254
2255int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2256 struct net_device *dev, u16 mode)
2257{
2258 struct nlmsghdr *nlh;
2259 struct ifinfomsg *ifm;
2260 struct nlattr *br_afspec;
2261 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
2262
2263 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
2264 if (nlh == NULL)
2265 return -EMSGSIZE;
2266
2267 ifm = nlmsg_data(nlh);
2268 ifm->ifi_family = AF_BRIDGE;
2269 ifm->__ifi_pad = 0;
2270 ifm->ifi_type = dev->type;
2271 ifm->ifi_index = dev->ifindex;
2272 ifm->ifi_flags = dev_get_flags(dev);
2273 ifm->ifi_change = 0;
2274
2275
2276 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
2277 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
2278 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
2279 (dev->master &&
2280 nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
2281 (dev->addr_len &&
2282 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
2283 (dev->ifindex != dev->iflink &&
2284 nla_put_u32(skb, IFLA_LINK, dev->iflink)))
2285 goto nla_put_failure;
2286
2287 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
2288 if (!br_afspec)
2289 goto nla_put_failure;
2290
2291 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF) ||
2292 nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
2293 nla_nest_cancel(skb, br_afspec);
2294 goto nla_put_failure;
2295 }
2296 nla_nest_end(skb, br_afspec);
2297
2298 return nlmsg_end(skb, nlh);
2299nla_put_failure:
2300 nlmsg_cancel(skb, nlh);
2301 return -EMSGSIZE;
2302}
2303EXPORT_SYMBOL(ndo_dflt_bridge_getlink);
2304
2255static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) 2305static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2256{ 2306{
2257 struct net *net = sock_net(skb->sk); 2307 struct net *net = sock_net(skb->sk);