aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorScott Feldman <sfeldma@gmail.com>2015-03-06 00:21:20 -0500
committerDavid S. Miller <davem@davemloft.net>2015-03-06 00:24:58 -0500
commitc1beeef7a32a791a60e2adcc217d4461cd1e25d1 (patch)
treefa5bf3aaa17cb488284366d10a314e8abfb66583 /drivers/net/ethernet
parent8e05fd7166c6123334b7a739a697d677747aa462 (diff)
rocker: implement IPv4 fib offloading
The driver implements ndo_switch_fib_ipv4_add/del ops to add/del/mod IPv4 routes to/from switchdev device. Once a route is added to the device, and the route's nexthops are resolved to neighbor MAC address, the device will forward matching pkts rather than the kernel. This offloads the L3 forwarding path from the kernel to the device. Note that control and management planes are still mananged by Linux; only the data plane is offloaded. Standard routing control protocols such as OSPF and BGP run on Linux and manage the kernel's FIB via standard rtm netlink msgs...nothing changes here. A new hash table is added to rocker to track neighbors. The driver listens for neighbor updates events using netevent notifier NETEVENT_NEIGH_UPDATE. Any ARP table updates for ports on this device are recorded in this table. Routes installed to the device with nexthops that reference neighbors in this table are "qualified". In the case of a route with nexthops not resolved in the table, the kernel is asked to resolve the nexthop. The driver uses fib_info->fib_priority for the priority field in rocker's unicast routing table. The device can only forward to pkts matching route dst to resolved nexthops. Currently, the device only supports single-path routes (i.e. routes with one nexthop). Equal Cost Multipath (ECMP) route support will be added in followup patches. This patch is driver support for unicast IPv4 routing only. Followup patches will add driver and infrastructure for IPv6 routing and multicast routing. Signed-off-by: Scott Feldman <sfeldma@gmail.com> Signed-off-by: Jiri Pirko <jiri@resnulli.us> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/rocker/rocker.c483
1 files changed, 436 insertions, 47 deletions
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index a5d1e6ea7d58..d04d3b374e31 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -32,6 +32,9 @@
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <net/switchdev.h> 33#include <net/switchdev.h>
34#include <net/rtnetlink.h> 34#include <net/rtnetlink.h>
35#include <net/ip_fib.h>
36#include <net/netevent.h>
37#include <net/arp.h>
35#include <asm-generic/io-64-nonatomic-lo-hi.h> 38#include <asm-generic/io-64-nonatomic-lo-hi.h>
36#include <generated/utsrelease.h> 39#include <generated/utsrelease.h>
37 40
@@ -111,9 +114,10 @@ struct rocker_flow_tbl_key {
111 114
112struct rocker_flow_tbl_entry { 115struct rocker_flow_tbl_entry {
113 struct hlist_node entry; 116 struct hlist_node entry;
114 u32 ref_count; 117 u32 cmd;
115 u64 cookie; 118 u64 cookie;
116 struct rocker_flow_tbl_key key; 119 struct rocker_flow_tbl_key key;
120 size_t key_len;
117 u32 key_crc32; /* key */ 121 u32 key_crc32; /* key */
118}; 122};
119 123
@@ -161,6 +165,16 @@ struct rocker_internal_vlan_tbl_entry {
161 __be16 vlan_id; 165 __be16 vlan_id;
162}; 166};
163 167
168struct rocker_neigh_tbl_entry {
169 struct hlist_node entry;
170 __be32 ip_addr; /* key */
171 struct net_device *dev;
172 u32 ref_count;
173 u32 index;
174 u8 eth_dst[ETH_ALEN];
175 bool ttl_check;
176};
177
164struct rocker_desc_info { 178struct rocker_desc_info {
165 char *data; /* mapped */ 179 char *data; /* mapped */
166 size_t data_size; 180 size_t data_size;
@@ -234,6 +248,9 @@ struct rocker {
234 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; 248 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
235 DECLARE_HASHTABLE(internal_vlan_tbl, 8); 249 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
236 spinlock_t internal_vlan_tbl_lock; 250 spinlock_t internal_vlan_tbl_lock;
251 DECLARE_HASHTABLE(neigh_tbl, 16);
252 spinlock_t neigh_tbl_lock;
253 u32 neigh_tbl_next_index;
237}; 254};
238 255
239static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 256static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
@@ -256,7 +273,6 @@ enum {
256 ROCKER_PRIORITY_VLAN = 1, 273 ROCKER_PRIORITY_VLAN = 1,
257 ROCKER_PRIORITY_TERM_MAC_UCAST = 0, 274 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
258 ROCKER_PRIORITY_TERM_MAC_MCAST = 1, 275 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
259 ROCKER_PRIORITY_UNICAST_ROUTING = 1,
260 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, 276 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
261 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, 277 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
262 ROCKER_PRIORITY_BRIDGING_VLAN = 3, 278 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
@@ -1940,8 +1956,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
1940 struct rocker_tlv *cmd_info; 1956 struct rocker_tlv *cmd_info;
1941 int err = 0; 1957 int err = 0;
1942 1958
1943 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 1959 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
1944 ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
1945 return -EMSGSIZE; 1960 return -EMSGSIZE;
1946 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 1961 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1947 if (!cmd_info) 1962 if (!cmd_info)
@@ -1998,8 +2013,7 @@ static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
1998 const struct rocker_flow_tbl_entry *entry = priv; 2013 const struct rocker_flow_tbl_entry *entry = priv;
1999 struct rocker_tlv *cmd_info; 2014 struct rocker_tlv *cmd_info;
2000 2015
2001 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, 2016 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2002 ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
2003 return -EMSGSIZE; 2017 return -EMSGSIZE;
2004 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); 2018 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2005 if (!cmd_info) 2019 if (!cmd_info)
@@ -2168,9 +2182,9 @@ static int rocker_cmd_group_tbl_del(struct rocker *rocker,
2168 return 0; 2182 return 0;
2169} 2183}
2170 2184
2171/***************************************** 2185/***************************************************
2172 * Flow, group, FDB, internal VLAN tables 2186 * Flow, group, FDB, internal VLAN and neigh tables
2173 *****************************************/ 2187 ***************************************************/
2174 2188
2175static int rocker_init_tbls(struct rocker *rocker) 2189static int rocker_init_tbls(struct rocker *rocker)
2176{ 2190{
@@ -2186,6 +2200,9 @@ static int rocker_init_tbls(struct rocker *rocker)
2186 hash_init(rocker->internal_vlan_tbl); 2200 hash_init(rocker->internal_vlan_tbl);
2187 spin_lock_init(&rocker->internal_vlan_tbl_lock); 2201 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2188 2202
2203 hash_init(rocker->neigh_tbl);
2204 spin_lock_init(&rocker->neigh_tbl_lock);
2205
2189 return 0; 2206 return 0;
2190} 2207}
2191 2208
@@ -2196,6 +2213,7 @@ static void rocker_free_tbls(struct rocker *rocker)
2196 struct rocker_group_tbl_entry *group_entry; 2213 struct rocker_group_tbl_entry *group_entry;
2197 struct rocker_fdb_tbl_entry *fdb_entry; 2214 struct rocker_fdb_tbl_entry *fdb_entry;
2198 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; 2215 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2216 struct rocker_neigh_tbl_entry *neigh_entry;
2199 struct hlist_node *tmp; 2217 struct hlist_node *tmp;
2200 int bkt; 2218 int bkt;
2201 2219
@@ -2219,16 +2237,22 @@ static void rocker_free_tbls(struct rocker *rocker)
2219 tmp, internal_vlan_entry, entry) 2237 tmp, internal_vlan_entry, entry)
2220 hash_del(&internal_vlan_entry->entry); 2238 hash_del(&internal_vlan_entry->entry);
2221 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); 2239 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2240
2241 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2242 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2243 hash_del(&neigh_entry->entry);
2244 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2222} 2245}
2223 2246
2224static struct rocker_flow_tbl_entry * 2247static struct rocker_flow_tbl_entry *
2225rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match) 2248rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
2226{ 2249{
2227 struct rocker_flow_tbl_entry *found; 2250 struct rocker_flow_tbl_entry *found;
2251 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2228 2252
2229 hash_for_each_possible(rocker->flow_tbl, found, 2253 hash_for_each_possible(rocker->flow_tbl, found,
2230 entry, match->key_crc32) { 2254 entry, match->key_crc32) {
2231 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) 2255 if (memcmp(&found->key, &match->key, key_len) == 0)
2232 return found; 2256 return found;
2233 } 2257 }
2234 2258
@@ -2241,42 +2265,34 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2241{ 2265{
2242 struct rocker *rocker = rocker_port->rocker; 2266 struct rocker *rocker = rocker_port->rocker;
2243 struct rocker_flow_tbl_entry *found; 2267 struct rocker_flow_tbl_entry *found;
2268 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2244 unsigned long flags; 2269 unsigned long flags;
2245 bool add_to_hw = false;
2246 int err = 0;
2247 2270
2248 match->key_crc32 = crc32(~0, &match->key, sizeof(match->key)); 2271 match->key_crc32 = crc32(~0, &match->key, key_len);
2249 2272
2250 spin_lock_irqsave(&rocker->flow_tbl_lock, flags); 2273 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2251 2274
2252 found = rocker_flow_tbl_find(rocker, match); 2275 found = rocker_flow_tbl_find(rocker, match);
2253 2276
2254 if (found) { 2277 if (found) {
2255 kfree(match); 2278 match->cookie = found->cookie;
2279 hash_del(&found->entry);
2280 kfree(found);
2281 found = match;
2282 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2256 } else { 2283 } else {
2257 found = match; 2284 found = match;
2258 found->cookie = rocker->flow_tbl_next_cookie++; 2285 found->cookie = rocker->flow_tbl_next_cookie++;
2259 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); 2286 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2260 add_to_hw = true;
2261 } 2287 }
2262 2288
2263 found->ref_count++; 2289 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2264 2290
2265 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); 2291 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2266 2292
2267 if (add_to_hw) { 2293 return rocker_cmd_exec(rocker, rocker_port,
2268 err = rocker_cmd_exec(rocker, rocker_port, 2294 rocker_cmd_flow_tbl_add,
2269 rocker_cmd_flow_tbl_add, 2295 found, NULL, NULL, nowait);
2270 found, NULL, NULL, nowait);
2271 if (err) {
2272 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2273 hash_del(&found->entry);
2274 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2275 kfree(found);
2276 }
2277 }
2278
2279 return err;
2280} 2296}
2281 2297
2282static int rocker_flow_tbl_del(struct rocker_port *rocker_port, 2298static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
@@ -2285,29 +2301,26 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2285{ 2301{
2286 struct rocker *rocker = rocker_port->rocker; 2302 struct rocker *rocker = rocker_port->rocker;
2287 struct rocker_flow_tbl_entry *found; 2303 struct rocker_flow_tbl_entry *found;
2304 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2288 unsigned long flags; 2305 unsigned long flags;
2289 bool del_from_hw = false;
2290 int err = 0; 2306 int err = 0;
2291 2307
2292 match->key_crc32 = crc32(~0, &match->key, sizeof(match->key)); 2308 match->key_crc32 = crc32(~0, &match->key, key_len);
2293 2309
2294 spin_lock_irqsave(&rocker->flow_tbl_lock, flags); 2310 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2295 2311
2296 found = rocker_flow_tbl_find(rocker, match); 2312 found = rocker_flow_tbl_find(rocker, match);
2297 2313
2298 if (found) { 2314 if (found) {
2299 found->ref_count--; 2315 hash_del(&found->entry);
2300 if (found->ref_count == 0) { 2316 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2301 hash_del(&found->entry);
2302 del_from_hw = true;
2303 }
2304 } 2317 }
2305 2318
2306 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); 2319 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2307 2320
2308 kfree(match); 2321 kfree(match);
2309 2322
2310 if (del_from_hw) { 2323 if (found) {
2311 err = rocker_cmd_exec(rocker, rocker_port, 2324 err = rocker_cmd_exec(rocker, rocker_port,
2312 rocker_cmd_flow_tbl_del, 2325 rocker_cmd_flow_tbl_del,
2313 found, NULL, NULL, nowait); 2326 found, NULL, NULL, nowait);
@@ -2467,6 +2480,31 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2467 return rocker_flow_tbl_do(rocker_port, flags, entry); 2480 return rocker_flow_tbl_do(rocker_port, flags, entry);
2468} 2481}
2469 2482
2483static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2484 __be16 eth_type, __be32 dst,
2485 __be32 dst_mask, u32 priority,
2486 enum rocker_of_dpa_table_id goto_tbl,
2487 u32 group_id, int flags)
2488{
2489 struct rocker_flow_tbl_entry *entry;
2490
2491 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2492 if (!entry)
2493 return -ENOMEM;
2494
2495 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2496 entry->key.priority = priority;
2497 entry->key.ucast_routing.eth_type = eth_type;
2498 entry->key.ucast_routing.dst4 = dst;
2499 entry->key.ucast_routing.dst4_mask = dst_mask;
2500 entry->key.ucast_routing.goto_tbl = goto_tbl;
2501 entry->key.ucast_routing.group_id = group_id;
2502 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2503 ucast_routing.group_id);
2504
2505 return rocker_flow_tbl_do(rocker_port, flags, entry);
2506}
2507
2470static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, 2508static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2471 int flags, u32 in_pport, 2509 int flags, u32 in_pport,
2472 u32 in_pport_mask, 2510 u32 in_pport_mask,
@@ -2554,7 +2592,6 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2554 struct rocker *rocker = rocker_port->rocker; 2592 struct rocker *rocker = rocker_port->rocker;
2555 struct rocker_group_tbl_entry *found; 2593 struct rocker_group_tbl_entry *found;
2556 unsigned long flags; 2594 unsigned long flags;
2557 int err = 0;
2558 2595
2559 spin_lock_irqsave(&rocker->group_tbl_lock, flags); 2596 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2560 2597
@@ -2574,12 +2611,9 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2574 2611
2575 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); 2612 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2576 2613
2577 if (found->cmd) 2614 return rocker_cmd_exec(rocker, rocker_port,
2578 err = rocker_cmd_exec(rocker, rocker_port, 2615 rocker_cmd_group_tbl_add,
2579 rocker_cmd_group_tbl_add, 2616 found, NULL, NULL, nowait);
2580 found, NULL, NULL, nowait);
2581
2582 return err;
2583} 2617}
2584 2618
2585static int rocker_group_tbl_del(struct rocker_port *rocker_port, 2619static int rocker_group_tbl_del(struct rocker_port *rocker_port,
@@ -2675,6 +2709,244 @@ static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2675 group_id); 2709 group_id);
2676} 2710}
2677 2711
2712static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2713 int flags, u32 index, u8 *src_mac,
2714 u8 *dst_mac, __be16 vlan_id,
2715 bool ttl_check, u32 pport)
2716{
2717 struct rocker_group_tbl_entry *entry;
2718
2719 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2720 if (!entry)
2721 return -ENOMEM;
2722
2723 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2724 if (src_mac)
2725 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2726 if (dst_mac)
2727 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2728 entry->l3_unicast.vlan_id = vlan_id;
2729 entry->l3_unicast.ttl_check = ttl_check;
2730 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2731
2732 return rocker_group_tbl_do(rocker_port, flags, entry);
2733}
2734
2735static struct rocker_neigh_tbl_entry *
2736 rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
2737{
2738 struct rocker_neigh_tbl_entry *found;
2739
2740 hash_for_each_possible(rocker->neigh_tbl, found, entry, ip_addr)
2741 if (found->ip_addr == ip_addr)
2742 return found;
2743
2744 return NULL;
2745}
2746
2747static void _rocker_neigh_add(struct rocker *rocker,
2748 struct rocker_neigh_tbl_entry *entry)
2749{
2750 entry->index = rocker->neigh_tbl_next_index++;
2751 entry->ref_count++;
2752 hash_add(rocker->neigh_tbl, &entry->entry, entry->ip_addr);
2753}
2754
2755static void _rocker_neigh_del(struct rocker *rocker,
2756 struct rocker_neigh_tbl_entry *entry)
2757{
2758 if (--entry->ref_count == 0) {
2759 hash_del(&entry->entry);
2760 kfree(entry);
2761 }
2762}
2763
2764static void _rocker_neigh_update(struct rocker *rocker,
2765 struct rocker_neigh_tbl_entry *entry,
2766 u8 *eth_dst, bool ttl_check)
2767{
2768 if (eth_dst) {
2769 ether_addr_copy(entry->eth_dst, eth_dst);
2770 entry->ttl_check = ttl_check;
2771 } else {
2772 entry->ref_count++;
2773 }
2774}
2775
2776static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2777 int flags, __be32 ip_addr, u8 *eth_dst)
2778{
2779 struct rocker *rocker = rocker_port->rocker;
2780 struct rocker_neigh_tbl_entry *entry;
2781 struct rocker_neigh_tbl_entry *found;
2782 unsigned long lock_flags;
2783 __be16 eth_type = htons(ETH_P_IP);
2784 enum rocker_of_dpa_table_id goto_tbl =
2785 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2786 u32 group_id;
2787 u32 priority = 0;
2788 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2789 bool updating;
2790 bool removing;
2791 int err = 0;
2792
2793 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2794 if (!entry)
2795 return -ENOMEM;
2796
2797 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2798
2799 found = rocker_neigh_tbl_find(rocker, ip_addr);
2800
2801 updating = found && adding;
2802 removing = found && !adding;
2803 adding = !found && adding;
2804
2805 if (adding) {
2806 entry->ip_addr = ip_addr;
2807 entry->dev = rocker_port->dev;
2808 ether_addr_copy(entry->eth_dst, eth_dst);
2809 entry->ttl_check = true;
2810 _rocker_neigh_add(rocker, entry);
2811 } else if (removing) {
2812 memcpy(entry, found, sizeof(*entry));
2813 _rocker_neigh_del(rocker, found);
2814 } else if (updating) {
2815 _rocker_neigh_update(rocker, found, eth_dst, true);
2816 memcpy(entry, found, sizeof(*entry));
2817 } else {
2818 err = -ENOENT;
2819 }
2820
2821 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2822
2823 if (err)
2824 goto err_out;
2825
2826 /* For each active neighbor, we have an L3 unicast group and
2827 * a /32 route to the neighbor, which uses the L3 unicast
2828 * group. The L3 unicast group can also be referred to by
2829 * other routes' nexthops.
2830 */
2831
2832 err = rocker_group_l3_unicast(rocker_port, flags,
2833 entry->index,
2834 rocker_port->dev->dev_addr,
2835 entry->eth_dst,
2836 rocker_port->internal_vlan_id,
2837 entry->ttl_check,
2838 rocker_port->pport);
2839 if (err) {
2840 netdev_err(rocker_port->dev,
2841 "Error (%d) L3 unicast group index %d\n",
2842 err, entry->index);
2843 goto err_out;
2844 }
2845
2846 if (adding || removing) {
2847 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
2848 err = rocker_flow_tbl_ucast4_routing(rocker_port,
2849 eth_type, ip_addr,
2850 inet_make_mask(32),
2851 priority, goto_tbl,
2852 group_id, flags);
2853
2854 if (err)
2855 netdev_err(rocker_port->dev,
2856 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
2857 err, &entry->ip_addr, group_id);
2858 }
2859
2860err_out:
2861 if (!adding)
2862 kfree(entry);
2863
2864 return err;
2865}
2866
2867static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
2868 __be32 ip_addr)
2869{
2870 struct net_device *dev = rocker_port->dev;
2871 struct neighbour *n = __ipv4_neigh_lookup(dev, ip_addr);
2872 int err = 0;
2873
2874 if (!n)
2875 n = neigh_create(&arp_tbl, &ip_addr, dev);
2876 if (!n)
2877 return -ENOMEM;
2878
2879 /* If the neigh is already resolved, then go ahead and
2880 * install the entry, otherwise start the ARP process to
2881 * resolve the neigh.
2882 */
2883
2884 if (n->nud_state & NUD_VALID)
2885 err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
2886 else
2887 neigh_event_send(n, NULL);
2888
2889 return err;
2890}
2891
2892static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
2893 __be32 ip_addr, u32 *index)
2894{
2895 struct rocker *rocker = rocker_port->rocker;
2896 struct rocker_neigh_tbl_entry *entry;
2897 struct rocker_neigh_tbl_entry *found;
2898 unsigned long lock_flags;
2899 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2900 bool updating;
2901 bool removing;
2902 bool resolved = true;
2903 int err = 0;
2904
2905 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2906 if (!entry)
2907 return -ENOMEM;
2908
2909 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2910
2911 found = rocker_neigh_tbl_find(rocker, ip_addr);
2912 if (found)
2913 *index = found->index;
2914
2915 updating = found && adding;
2916 removing = found && !adding;
2917 adding = !found && adding;
2918
2919 if (adding) {
2920 entry->ip_addr = ip_addr;
2921 entry->dev = rocker_port->dev;
2922 _rocker_neigh_add(rocker, entry);
2923 *index = entry->index;
2924 resolved = false;
2925 } else if (removing) {
2926 _rocker_neigh_del(rocker, found);
2927 } else if (updating) {
2928 _rocker_neigh_update(rocker, found, NULL, false);
2929 resolved = !is_zero_ether_addr(found->eth_dst);
2930 } else {
2931 err = -ENOENT;
2932 }
2933
2934 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2935
2936 if (!adding)
2937 kfree(entry);
2938
2939 if (err)
2940 return err;
2941
2942 /* Resolved means neigh ip_addr is resolved to neigh mac. */
2943
2944 if (!resolved)
2945 err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
2946
2947 return err;
2948}
2949
2678static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, 2950static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
2679 int flags, __be16 vlan_id) 2951 int flags, __be16 vlan_id)
2680{ 2952{
@@ -3429,6 +3701,51 @@ not_found:
3429 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); 3701 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3430} 3702}
3431 3703
3704static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
3705 int dst_len, struct fib_info *fi, u32 tb_id,
3706 int flags)
3707{
3708 struct fib_nh *nh;
3709 __be16 eth_type = htons(ETH_P_IP);
3710 __be32 dst_mask = inet_make_mask(dst_len);
3711 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3712 u32 priority = fi->fib_priority;
3713 enum rocker_of_dpa_table_id goto_tbl =
3714 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3715 u32 group_id;
3716 bool nh_on_port;
3717 bool has_gw;
3718 u32 index;
3719 int err;
3720
3721 /* XXX support ECMP */
3722
3723 nh = fi->fib_nh;
3724 nh_on_port = (fi->fib_dev == rocker_port->dev);
3725 has_gw = !!nh->nh_gw;
3726
3727 if (has_gw && nh_on_port) {
3728 err = rocker_port_ipv4_nh(rocker_port, flags,
3729 nh->nh_gw, &index);
3730 if (err)
3731 return err;
3732
3733 group_id = ROCKER_GROUP_L3_UNICAST(index);
3734 } else {
3735 /* Send to CPU for processing */
3736 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3737 }
3738
3739 err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
3740 dst_mask, priority, goto_tbl,
3741 group_id, flags);
3742 if (err)
3743 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
3744 err, &dst);
3745
3746 return err;
3747}
3748
3432/***************** 3749/*****************
3433 * Net device ops 3750 * Net device ops
3434 *****************/ 3751 *****************/
@@ -3830,6 +4147,30 @@ static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
3830 return rocker_port_stp_update(rocker_port, state); 4147 return rocker_port_stp_update(rocker_port, state);
3831} 4148}
3832 4149
4150static int rocker_port_switch_fib_ipv4_add(struct net_device *dev,
4151 __be32 dst, int dst_len,
4152 struct fib_info *fi,
4153 u8 tos, u8 type, u32 tb_id)
4154{
4155 struct rocker_port *rocker_port = netdev_priv(dev);
4156 int flags = 0;
4157
4158 return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
4159 fi, tb_id, flags);
4160}
4161
4162static int rocker_port_switch_fib_ipv4_del(struct net_device *dev,
4163 __be32 dst, int dst_len,
4164 struct fib_info *fi,
4165 u8 tos, u8 type, u32 tb_id)
4166{
4167 struct rocker_port *rocker_port = netdev_priv(dev);
4168 int flags = ROCKER_OP_FLAG_REMOVE;
4169
4170 return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
4171 fi, tb_id, flags);
4172}
4173
3833static const struct net_device_ops rocker_port_netdev_ops = { 4174static const struct net_device_ops rocker_port_netdev_ops = {
3834 .ndo_open = rocker_port_open, 4175 .ndo_open = rocker_port_open,
3835 .ndo_stop = rocker_port_stop, 4176 .ndo_stop = rocker_port_stop,
@@ -3844,6 +4185,8 @@ static const struct net_device_ops rocker_port_netdev_ops = {
3844 .ndo_bridge_getlink = rocker_port_bridge_getlink, 4185 .ndo_bridge_getlink = rocker_port_bridge_getlink,
3845 .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get, 4186 .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get,
3846 .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update, 4187 .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update,
4188 .ndo_switch_fib_ipv4_add = rocker_port_switch_fib_ipv4_add,
4189 .ndo_switch_fib_ipv4_del = rocker_port_switch_fib_ipv4_del,
3847}; 4190};
3848 4191
3849/******************** 4192/********************
@@ -4204,8 +4547,9 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4204 NAPI_POLL_WEIGHT); 4547 NAPI_POLL_WEIGHT);
4205 rocker_carrier_init(rocker_port); 4548 rocker_carrier_init(rocker_port);
4206 4549
4207 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 4550 dev->features |= NETIF_F_NETNS_LOCAL |
4208 NETIF_F_HW_SWITCH_OFFLOAD; 4551 NETIF_F_HW_VLAN_CTAG_FILTER |
4552 NETIF_F_HW_SWITCH_OFFLOAD;
4209 4553
4210 err = register_netdev(dev); 4554 err = register_netdev(dev);
4211 if (err) { 4555 if (err) {
@@ -4546,6 +4890,48 @@ static struct notifier_block rocker_netdevice_nb __read_mostly = {
4546 .notifier_call = rocker_netdevice_event, 4890 .notifier_call = rocker_netdevice_event,
4547}; 4891};
4548 4892
4893/************************************
4894 * Net event notifier event handler
4895 ************************************/
4896
4897static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
4898{
4899 struct rocker_port *rocker_port = netdev_priv(dev);
4900 int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
4901 __be32 ip_addr = *(__be32 *)n->primary_key;
4902
4903 return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
4904}
4905
4906static int rocker_netevent_event(struct notifier_block *unused,
4907 unsigned long event, void *ptr)
4908{
4909 struct net_device *dev;
4910 struct neighbour *n = ptr;
4911 int err;
4912
4913 switch (event) {
4914 case NETEVENT_NEIGH_UPDATE:
4915 if (n->tbl != &arp_tbl)
4916 return NOTIFY_DONE;
4917 dev = n->dev;
4918 if (!rocker_port_dev_check(dev))
4919 return NOTIFY_DONE;
4920 err = rocker_neigh_update(dev, n);
4921 if (err)
4922 netdev_warn(dev,
4923 "failed to handle neigh update (err %d)\n",
4924 err);
4925 break;
4926 }
4927
4928 return NOTIFY_DONE;
4929}
4930
4931static struct notifier_block rocker_netevent_nb __read_mostly = {
4932 .notifier_call = rocker_netevent_event,
4933};
4934
4549/*********************** 4935/***********************
4550 * Module init and exit 4936 * Module init and exit
4551 ***********************/ 4937 ***********************/
@@ -4555,18 +4941,21 @@ static int __init rocker_module_init(void)
4555 int err; 4941 int err;
4556 4942
4557 register_netdevice_notifier(&rocker_netdevice_nb); 4943 register_netdevice_notifier(&rocker_netdevice_nb);
4944 register_netevent_notifier(&rocker_netevent_nb);
4558 err = pci_register_driver(&rocker_pci_driver); 4945 err = pci_register_driver(&rocker_pci_driver);
4559 if (err) 4946 if (err)
4560 goto err_pci_register_driver; 4947 goto err_pci_register_driver;
4561 return 0; 4948 return 0;
4562 4949
4563err_pci_register_driver: 4950err_pci_register_driver:
4951 unregister_netdevice_notifier(&rocker_netevent_nb);
4564 unregister_netdevice_notifier(&rocker_netdevice_nb); 4952 unregister_netdevice_notifier(&rocker_netdevice_nb);
4565 return err; 4953 return err;
4566} 4954}
4567 4955
4568static void __exit rocker_module_exit(void) 4956static void __exit rocker_module_exit(void)
4569{ 4957{
4958 unregister_netevent_notifier(&rocker_netevent_nb);
4570 unregister_netdevice_notifier(&rocker_netdevice_nb); 4959 unregister_netdevice_notifier(&rocker_netdevice_nb);
4571 pci_unregister_driver(&rocker_pci_driver); 4960 pci_unregister_driver(&rocker_pci_driver);
4572} 4961}