diff options
-rw-r--r-- | Documentation/networking/team.txt | 2 | ||||
-rw-r--r-- | MAINTAINERS | 7 | ||||
-rw-r--r-- | drivers/net/Kconfig | 2 | ||||
-rw-r--r-- | drivers/net/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/team/Kconfig | 43 | ||||
-rw-r--r-- | drivers/net/team/Makefile | 7 | ||||
-rw-r--r-- | drivers/net/team/team.c | 1583 | ||||
-rw-r--r-- | drivers/net/team/team_mode_activebackup.c | 137 | ||||
-rw-r--r-- | drivers/net/team/team_mode_roundrobin.c | 107 | ||||
-rw-r--r-- | include/linux/Kbuild | 1 | ||||
-rw-r--r-- | include/linux/if.h | 1 | ||||
-rw-r--r-- | include/linux/if_team.h | 242 |
12 files changed, 2133 insertions, 0 deletions
diff --git a/Documentation/networking/team.txt b/Documentation/networking/team.txt new file mode 100644 index 000000000000..5a013686b9ea --- /dev/null +++ b/Documentation/networking/team.txt | |||
@@ -0,0 +1,2 @@ | |||
1 | Team devices are driven from userspace via libteam library which is here: | ||
2 | https://github.com/jpirko/libteam | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 4808256446f2..8d941692c394 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6484,6 +6484,13 @@ W: http://tcp-lp-mod.sourceforge.net/ | |||
6484 | S: Maintained | 6484 | S: Maintained |
6485 | F: net/ipv4/tcp_lp.c | 6485 | F: net/ipv4/tcp_lp.c |
6486 | 6486 | ||
6487 | TEAM DRIVER | ||
6488 | M: Jiri Pirko <jpirko@redhat.com> | ||
6489 | L: netdev@vger.kernel.org | ||
6490 | S: Supported | ||
6491 | F: drivers/net/team/ | ||
6492 | F: include/linux/if_team.h | ||
6493 | |||
6487 | TEGRA SUPPORT | 6494 | TEGRA SUPPORT |
6488 | M: Colin Cross <ccross@android.com> | 6495 | M: Colin Cross <ccross@android.com> |
6489 | M: Olof Johansson <olof@lixom.net> | 6496 | M: Olof Johansson <olof@lixom.net> |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 583f66cd5bbd..b3020bea39e4 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -125,6 +125,8 @@ config IFB | |||
125 | 'ifb1' etc. | 125 | 'ifb1' etc. |
126 | Look at the iproute2 documentation directory for usage etc | 126 | Look at the iproute2 documentation directory for usage etc |
127 | 127 | ||
128 | source "drivers/net/team/Kconfig" | ||
129 | |||
128 | config MACVLAN | 130 | config MACVLAN |
129 | tristate "MAC-VLAN support (EXPERIMENTAL)" | 131 | tristate "MAC-VLAN support (EXPERIMENTAL)" |
130 | depends on EXPERIMENTAL | 132 | depends on EXPERIMENTAL |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index fa877cd2b139..4e4ebfe1aa53 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -17,6 +17,7 @@ obj-$(CONFIG_NET) += Space.o loopback.o | |||
17 | obj-$(CONFIG_NETCONSOLE) += netconsole.o | 17 | obj-$(CONFIG_NETCONSOLE) += netconsole.o |
18 | obj-$(CONFIG_PHYLIB) += phy/ | 18 | obj-$(CONFIG_PHYLIB) += phy/ |
19 | obj-$(CONFIG_RIONET) += rionet.o | 19 | obj-$(CONFIG_RIONET) += rionet.o |
20 | obj-$(CONFIG_NET_TEAM) += team/ | ||
20 | obj-$(CONFIG_TUN) += tun.o | 21 | obj-$(CONFIG_TUN) += tun.o |
21 | obj-$(CONFIG_VETH) += veth.o | 22 | obj-$(CONFIG_VETH) += veth.o |
22 | obj-$(CONFIG_VIRTIO_NET) += virtio_net.o | 23 | obj-$(CONFIG_VIRTIO_NET) += virtio_net.o |
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig new file mode 100644 index 000000000000..248a144033ca --- /dev/null +++ b/drivers/net/team/Kconfig | |||
@@ -0,0 +1,43 @@ | |||
1 | menuconfig NET_TEAM | ||
2 | tristate "Ethernet team driver support (EXPERIMENTAL)" | ||
3 | depends on EXPERIMENTAL | ||
4 | ---help--- | ||
5 | This allows one to create virtual interfaces that teams together | ||
6 | multiple ethernet devices. | ||
7 | |||
8 | Team devices can be added using the "ip" command from the | ||
9 | iproute2 package: | ||
10 | |||
11 | "ip link add link [ address MAC ] [ NAME ] type team" | ||
12 | |||
13 | To compile this driver as a module, choose M here: the module | ||
14 | will be called team. | ||
15 | |||
16 | if NET_TEAM | ||
17 | |||
18 | config NET_TEAM_MODE_ROUNDROBIN | ||
19 | tristate "Round-robin mode support" | ||
20 | depends on NET_TEAM | ||
21 | ---help--- | ||
22 | Basic mode where port used for transmitting packets is selected in | ||
23 | round-robin fashion using packet counter. | ||
24 | |||
25 | All added ports are setup to have bond's mac address. | ||
26 | |||
27 | To compile this team mode as a module, choose M here: the module | ||
28 | will be called team_mode_roundrobin. | ||
29 | |||
30 | config NET_TEAM_MODE_ACTIVEBACKUP | ||
31 | tristate "Active-backup mode support" | ||
32 | depends on NET_TEAM | ||
33 | ---help--- | ||
34 | Only one port is active at a time and the rest of ports are used | ||
35 | for backup. | ||
36 | |||
37 | Mac addresses of ports are not modified. Userspace is responsible | ||
38 | to do so. | ||
39 | |||
40 | To compile this team mode as a module, choose M here: the module | ||
41 | will be called team_mode_activebackup. | ||
42 | |||
43 | endif # NET_TEAM | ||
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile new file mode 100644 index 000000000000..85f2028a87af --- /dev/null +++ b/drivers/net/team/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for the network team driver | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_NET_TEAM) += team.o | ||
6 | obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o | ||
7 | obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o | ||
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c new file mode 100644 index 000000000000..60672bb09960 --- /dev/null +++ b/drivers/net/team/team.c | |||
@@ -0,0 +1,1583 @@ | |||
1 | /* | ||
2 | * net/drivers/team/team.c - Network team device driver | ||
3 | * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/rcupdate.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/ctype.h> | ||
19 | #include <linux/notifier.h> | ||
20 | #include <linux/netdevice.h> | ||
21 | #include <linux/if_arp.h> | ||
22 | #include <linux/socket.h> | ||
23 | #include <linux/etherdevice.h> | ||
24 | #include <linux/rtnetlink.h> | ||
25 | #include <net/rtnetlink.h> | ||
26 | #include <net/genetlink.h> | ||
27 | #include <net/netlink.h> | ||
28 | #include <linux/if_team.h> | ||
29 | |||
30 | #define DRV_NAME "team" | ||
31 | |||
32 | |||
33 | /********** | ||
34 | * Helpers | ||
35 | **********/ | ||
36 | |||
37 | #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) | ||
38 | |||
39 | static struct team_port *team_port_get_rcu(const struct net_device *dev) | ||
40 | { | ||
41 | struct team_port *port = rcu_dereference(dev->rx_handler_data); | ||
42 | |||
43 | return team_port_exists(dev) ? port : NULL; | ||
44 | } | ||
45 | |||
46 | static struct team_port *team_port_get_rtnl(const struct net_device *dev) | ||
47 | { | ||
48 | struct team_port *port = rtnl_dereference(dev->rx_handler_data); | ||
49 | |||
50 | return team_port_exists(dev) ? port : NULL; | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * Since the ability to change mac address for open port device is tested in | ||
55 | * team_port_add, this function can be called without control of return value | ||
56 | */ | ||
57 | static int __set_port_mac(struct net_device *port_dev, | ||
58 | const unsigned char *dev_addr) | ||
59 | { | ||
60 | struct sockaddr addr; | ||
61 | |||
62 | memcpy(addr.sa_data, dev_addr, ETH_ALEN); | ||
63 | addr.sa_family = ARPHRD_ETHER; | ||
64 | return dev_set_mac_address(port_dev, &addr); | ||
65 | } | ||
66 | |||
67 | int team_port_set_orig_mac(struct team_port *port) | ||
68 | { | ||
69 | return __set_port_mac(port->dev, port->orig.dev_addr); | ||
70 | } | ||
71 | |||
72 | int team_port_set_team_mac(struct team_port *port) | ||
73 | { | ||
74 | return __set_port_mac(port->dev, port->team->dev->dev_addr); | ||
75 | } | ||
76 | EXPORT_SYMBOL(team_port_set_team_mac); | ||
77 | |||
78 | |||
79 | /******************* | ||
80 | * Options handling | ||
81 | *******************/ | ||
82 | |||
83 | void team_options_register(struct team *team, struct team_option *option, | ||
84 | size_t option_count) | ||
85 | { | ||
86 | int i; | ||
87 | |||
88 | for (i = 0; i < option_count; i++, option++) | ||
89 | list_add_tail(&option->list, &team->option_list); | ||
90 | } | ||
91 | EXPORT_SYMBOL(team_options_register); | ||
92 | |||
93 | static void __team_options_change_check(struct team *team, | ||
94 | struct team_option *changed_option); | ||
95 | |||
96 | static void __team_options_unregister(struct team *team, | ||
97 | struct team_option *option, | ||
98 | size_t option_count) | ||
99 | { | ||
100 | int i; | ||
101 | |||
102 | for (i = 0; i < option_count; i++, option++) | ||
103 | list_del(&option->list); | ||
104 | } | ||
105 | |||
106 | void team_options_unregister(struct team *team, struct team_option *option, | ||
107 | size_t option_count) | ||
108 | { | ||
109 | __team_options_unregister(team, option, option_count); | ||
110 | __team_options_change_check(team, NULL); | ||
111 | } | ||
112 | EXPORT_SYMBOL(team_options_unregister); | ||
113 | |||
114 | static int team_option_get(struct team *team, struct team_option *option, | ||
115 | void *arg) | ||
116 | { | ||
117 | return option->getter(team, arg); | ||
118 | } | ||
119 | |||
120 | static int team_option_set(struct team *team, struct team_option *option, | ||
121 | void *arg) | ||
122 | { | ||
123 | int err; | ||
124 | |||
125 | err = option->setter(team, arg); | ||
126 | if (err) | ||
127 | return err; | ||
128 | |||
129 | __team_options_change_check(team, option); | ||
130 | return err; | ||
131 | } | ||
132 | |||
133 | /**************** | ||
134 | * Mode handling | ||
135 | ****************/ | ||
136 | |||
137 | static LIST_HEAD(mode_list); | ||
138 | static DEFINE_SPINLOCK(mode_list_lock); | ||
139 | |||
140 | static struct team_mode *__find_mode(const char *kind) | ||
141 | { | ||
142 | struct team_mode *mode; | ||
143 | |||
144 | list_for_each_entry(mode, &mode_list, list) { | ||
145 | if (strcmp(mode->kind, kind) == 0) | ||
146 | return mode; | ||
147 | } | ||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | static bool is_good_mode_name(const char *name) | ||
152 | { | ||
153 | while (*name != '\0') { | ||
154 | if (!isalpha(*name) && !isdigit(*name) && *name != '_') | ||
155 | return false; | ||
156 | name++; | ||
157 | } | ||
158 | return true; | ||
159 | } | ||
160 | |||
161 | int team_mode_register(struct team_mode *mode) | ||
162 | { | ||
163 | int err = 0; | ||
164 | |||
165 | if (!is_good_mode_name(mode->kind) || | ||
166 | mode->priv_size > TEAM_MODE_PRIV_SIZE) | ||
167 | return -EINVAL; | ||
168 | spin_lock(&mode_list_lock); | ||
169 | if (__find_mode(mode->kind)) { | ||
170 | err = -EEXIST; | ||
171 | goto unlock; | ||
172 | } | ||
173 | list_add_tail(&mode->list, &mode_list); | ||
174 | unlock: | ||
175 | spin_unlock(&mode_list_lock); | ||
176 | return err; | ||
177 | } | ||
178 | EXPORT_SYMBOL(team_mode_register); | ||
179 | |||
180 | int team_mode_unregister(struct team_mode *mode) | ||
181 | { | ||
182 | spin_lock(&mode_list_lock); | ||
183 | list_del_init(&mode->list); | ||
184 | spin_unlock(&mode_list_lock); | ||
185 | return 0; | ||
186 | } | ||
187 | EXPORT_SYMBOL(team_mode_unregister); | ||
188 | |||
189 | static struct team_mode *team_mode_get(const char *kind) | ||
190 | { | ||
191 | struct team_mode *mode; | ||
192 | |||
193 | spin_lock(&mode_list_lock); | ||
194 | mode = __find_mode(kind); | ||
195 | if (!mode) { | ||
196 | spin_unlock(&mode_list_lock); | ||
197 | request_module("team-mode-%s", kind); | ||
198 | spin_lock(&mode_list_lock); | ||
199 | mode = __find_mode(kind); | ||
200 | } | ||
201 | if (mode) | ||
202 | if (!try_module_get(mode->owner)) | ||
203 | mode = NULL; | ||
204 | |||
205 | spin_unlock(&mode_list_lock); | ||
206 | return mode; | ||
207 | } | ||
208 | |||
209 | static void team_mode_put(const struct team_mode *mode) | ||
210 | { | ||
211 | module_put(mode->owner); | ||
212 | } | ||
213 | |||
214 | static bool team_dummy_transmit(struct team *team, struct sk_buff *skb) | ||
215 | { | ||
216 | dev_kfree_skb_any(skb); | ||
217 | return false; | ||
218 | } | ||
219 | |||
220 | rx_handler_result_t team_dummy_receive(struct team *team, | ||
221 | struct team_port *port, | ||
222 | struct sk_buff *skb) | ||
223 | { | ||
224 | return RX_HANDLER_ANOTHER; | ||
225 | } | ||
226 | |||
227 | static void team_adjust_ops(struct team *team) | ||
228 | { | ||
229 | /* | ||
230 | * To avoid checks in rx/tx skb paths, ensure here that non-null and | ||
231 | * correct ops are always set. | ||
232 | */ | ||
233 | |||
234 | if (list_empty(&team->port_list) || | ||
235 | !team->mode || !team->mode->ops->transmit) | ||
236 | team->ops.transmit = team_dummy_transmit; | ||
237 | else | ||
238 | team->ops.transmit = team->mode->ops->transmit; | ||
239 | |||
240 | if (list_empty(&team->port_list) || | ||
241 | !team->mode || !team->mode->ops->receive) | ||
242 | team->ops.receive = team_dummy_receive; | ||
243 | else | ||
244 | team->ops.receive = team->mode->ops->receive; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * We can benefit from the fact that it's ensured no port is present | ||
249 | * at the time of mode change. Therefore no packets are in fly so there's no | ||
250 | * need to set mode operations in any special way. | ||
251 | */ | ||
252 | static int __team_change_mode(struct team *team, | ||
253 | const struct team_mode *new_mode) | ||
254 | { | ||
255 | /* Check if mode was previously set and do cleanup if so */ | ||
256 | if (team->mode) { | ||
257 | void (*exit_op)(struct team *team) = team->ops.exit; | ||
258 | |||
259 | /* Clear ops area so no callback is called any longer */ | ||
260 | memset(&team->ops, 0, sizeof(struct team_mode_ops)); | ||
261 | team_adjust_ops(team); | ||
262 | |||
263 | if (exit_op) | ||
264 | exit_op(team); | ||
265 | team_mode_put(team->mode); | ||
266 | team->mode = NULL; | ||
267 | /* zero private data area */ | ||
268 | memset(&team->mode_priv, 0, | ||
269 | sizeof(struct team) - offsetof(struct team, mode_priv)); | ||
270 | } | ||
271 | |||
272 | if (!new_mode) | ||
273 | return 0; | ||
274 | |||
275 | if (new_mode->ops->init) { | ||
276 | int err; | ||
277 | |||
278 | err = new_mode->ops->init(team); | ||
279 | if (err) | ||
280 | return err; | ||
281 | } | ||
282 | |||
283 | team->mode = new_mode; | ||
284 | memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops)); | ||
285 | team_adjust_ops(team); | ||
286 | |||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static int team_change_mode(struct team *team, const char *kind) | ||
291 | { | ||
292 | struct team_mode *new_mode; | ||
293 | struct net_device *dev = team->dev; | ||
294 | int err; | ||
295 | |||
296 | if (!list_empty(&team->port_list)) { | ||
297 | netdev_err(dev, "No ports can be present during mode change\n"); | ||
298 | return -EBUSY; | ||
299 | } | ||
300 | |||
301 | if (team->mode && strcmp(team->mode->kind, kind) == 0) { | ||
302 | netdev_err(dev, "Unable to change to the same mode the team is in\n"); | ||
303 | return -EINVAL; | ||
304 | } | ||
305 | |||
306 | new_mode = team_mode_get(kind); | ||
307 | if (!new_mode) { | ||
308 | netdev_err(dev, "Mode \"%s\" not found\n", kind); | ||
309 | return -EINVAL; | ||
310 | } | ||
311 | |||
312 | err = __team_change_mode(team, new_mode); | ||
313 | if (err) { | ||
314 | netdev_err(dev, "Failed to change to mode \"%s\"\n", kind); | ||
315 | team_mode_put(new_mode); | ||
316 | return err; | ||
317 | } | ||
318 | |||
319 | netdev_info(dev, "Mode changed to \"%s\"\n", kind); | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | |||
324 | /************************ | ||
325 | * Rx path frame handler | ||
326 | ************************/ | ||
327 | |||
328 | /* note: already called with rcu_read_lock */ | ||
329 | static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) | ||
330 | { | ||
331 | struct sk_buff *skb = *pskb; | ||
332 | struct team_port *port; | ||
333 | struct team *team; | ||
334 | rx_handler_result_t res; | ||
335 | |||
336 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
337 | if (!skb) | ||
338 | return RX_HANDLER_CONSUMED; | ||
339 | |||
340 | *pskb = skb; | ||
341 | |||
342 | port = team_port_get_rcu(skb->dev); | ||
343 | team = port->team; | ||
344 | |||
345 | res = team->ops.receive(team, port, skb); | ||
346 | if (res == RX_HANDLER_ANOTHER) { | ||
347 | struct team_pcpu_stats *pcpu_stats; | ||
348 | |||
349 | pcpu_stats = this_cpu_ptr(team->pcpu_stats); | ||
350 | u64_stats_update_begin(&pcpu_stats->syncp); | ||
351 | pcpu_stats->rx_packets++; | ||
352 | pcpu_stats->rx_bytes += skb->len; | ||
353 | if (skb->pkt_type == PACKET_MULTICAST) | ||
354 | pcpu_stats->rx_multicast++; | ||
355 | u64_stats_update_end(&pcpu_stats->syncp); | ||
356 | |||
357 | skb->dev = team->dev; | ||
358 | } else { | ||
359 | this_cpu_inc(team->pcpu_stats->rx_dropped); | ||
360 | } | ||
361 | |||
362 | return res; | ||
363 | } | ||
364 | |||
365 | |||
366 | /**************** | ||
367 | * Port handling | ||
368 | ****************/ | ||
369 | |||
370 | static bool team_port_find(const struct team *team, | ||
371 | const struct team_port *port) | ||
372 | { | ||
373 | struct team_port *cur; | ||
374 | |||
375 | list_for_each_entry(cur, &team->port_list, list) | ||
376 | if (cur == port) | ||
377 | return true; | ||
378 | return false; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * Add/delete port to the team port list. Write guarded by rtnl_lock. | ||
383 | * Takes care of correct port->index setup (might be racy). | ||
384 | */ | ||
385 | static void team_port_list_add_port(struct team *team, | ||
386 | struct team_port *port) | ||
387 | { | ||
388 | port->index = team->port_count++; | ||
389 | hlist_add_head_rcu(&port->hlist, | ||
390 | team_port_index_hash(team, port->index)); | ||
391 | list_add_tail_rcu(&port->list, &team->port_list); | ||
392 | } | ||
393 | |||
394 | static void __reconstruct_port_hlist(struct team *team, int rm_index) | ||
395 | { | ||
396 | int i; | ||
397 | struct team_port *port; | ||
398 | |||
399 | for (i = rm_index + 1; i < team->port_count; i++) { | ||
400 | port = team_get_port_by_index(team, i); | ||
401 | hlist_del_rcu(&port->hlist); | ||
402 | port->index--; | ||
403 | hlist_add_head_rcu(&port->hlist, | ||
404 | team_port_index_hash(team, port->index)); | ||
405 | } | ||
406 | } | ||
407 | |||
408 | static void team_port_list_del_port(struct team *team, | ||
409 | struct team_port *port) | ||
410 | { | ||
411 | int rm_index = port->index; | ||
412 | |||
413 | hlist_del_rcu(&port->hlist); | ||
414 | list_del_rcu(&port->list); | ||
415 | __reconstruct_port_hlist(team, rm_index); | ||
416 | team->port_count--; | ||
417 | } | ||
418 | |||
419 | #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ | ||
420 | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ | ||
421 | NETIF_F_HIGHDMA | NETIF_F_LRO) | ||
422 | |||
423 | static void __team_compute_features(struct team *team) | ||
424 | { | ||
425 | struct team_port *port; | ||
426 | u32 vlan_features = TEAM_VLAN_FEATURES; | ||
427 | unsigned short max_hard_header_len = ETH_HLEN; | ||
428 | |||
429 | list_for_each_entry(port, &team->port_list, list) { | ||
430 | vlan_features = netdev_increment_features(vlan_features, | ||
431 | port->dev->vlan_features, | ||
432 | TEAM_VLAN_FEATURES); | ||
433 | |||
434 | if (port->dev->hard_header_len > max_hard_header_len) | ||
435 | max_hard_header_len = port->dev->hard_header_len; | ||
436 | } | ||
437 | |||
438 | team->dev->vlan_features = vlan_features; | ||
439 | team->dev->hard_header_len = max_hard_header_len; | ||
440 | |||
441 | netdev_change_features(team->dev); | ||
442 | } | ||
443 | |||
444 | static void team_compute_features(struct team *team) | ||
445 | { | ||
446 | spin_lock(&team->lock); | ||
447 | __team_compute_features(team); | ||
448 | spin_unlock(&team->lock); | ||
449 | } | ||
450 | |||
451 | static int team_port_enter(struct team *team, struct team_port *port) | ||
452 | { | ||
453 | int err = 0; | ||
454 | |||
455 | dev_hold(team->dev); | ||
456 | port->dev->priv_flags |= IFF_TEAM_PORT; | ||
457 | if (team->ops.port_enter) { | ||
458 | err = team->ops.port_enter(team, port); | ||
459 | if (err) { | ||
460 | netdev_err(team->dev, "Device %s failed to enter team mode\n", | ||
461 | port->dev->name); | ||
462 | goto err_port_enter; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | return 0; | ||
467 | |||
468 | err_port_enter: | ||
469 | port->dev->priv_flags &= ~IFF_TEAM_PORT; | ||
470 | dev_put(team->dev); | ||
471 | |||
472 | return err; | ||
473 | } | ||
474 | |||
475 | static void team_port_leave(struct team *team, struct team_port *port) | ||
476 | { | ||
477 | if (team->ops.port_leave) | ||
478 | team->ops.port_leave(team, port); | ||
479 | port->dev->priv_flags &= ~IFF_TEAM_PORT; | ||
480 | dev_put(team->dev); | ||
481 | } | ||
482 | |||
483 | static void __team_port_change_check(struct team_port *port, bool linkup); | ||
484 | |||
485 | static int team_port_add(struct team *team, struct net_device *port_dev) | ||
486 | { | ||
487 | struct net_device *dev = team->dev; | ||
488 | struct team_port *port; | ||
489 | char *portname = port_dev->name; | ||
490 | int err; | ||
491 | |||
492 | if (port_dev->flags & IFF_LOOPBACK || | ||
493 | port_dev->type != ARPHRD_ETHER) { | ||
494 | netdev_err(dev, "Device %s is of an unsupported type\n", | ||
495 | portname); | ||
496 | return -EINVAL; | ||
497 | } | ||
498 | |||
499 | if (team_port_exists(port_dev)) { | ||
500 | netdev_err(dev, "Device %s is already a port " | ||
501 | "of a team device\n", portname); | ||
502 | return -EBUSY; | ||
503 | } | ||
504 | |||
505 | if (port_dev->flags & IFF_UP) { | ||
506 | netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", | ||
507 | portname); | ||
508 | return -EBUSY; | ||
509 | } | ||
510 | |||
511 | port = kzalloc(sizeof(struct team_port), GFP_KERNEL); | ||
512 | if (!port) | ||
513 | return -ENOMEM; | ||
514 | |||
515 | port->dev = port_dev; | ||
516 | port->team = team; | ||
517 | |||
518 | port->orig.mtu = port_dev->mtu; | ||
519 | err = dev_set_mtu(port_dev, dev->mtu); | ||
520 | if (err) { | ||
521 | netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err); | ||
522 | goto err_set_mtu; | ||
523 | } | ||
524 | |||
525 | memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN); | ||
526 | |||
527 | err = team_port_enter(team, port); | ||
528 | if (err) { | ||
529 | netdev_err(dev, "Device %s failed to enter team mode\n", | ||
530 | portname); | ||
531 | goto err_port_enter; | ||
532 | } | ||
533 | |||
534 | err = dev_open(port_dev); | ||
535 | if (err) { | ||
536 | netdev_dbg(dev, "Device %s opening failed\n", | ||
537 | portname); | ||
538 | goto err_dev_open; | ||
539 | } | ||
540 | |||
541 | err = netdev_set_master(port_dev, dev); | ||
542 | if (err) { | ||
543 | netdev_err(dev, "Device %s failed to set master\n", portname); | ||
544 | goto err_set_master; | ||
545 | } | ||
546 | |||
547 | err = netdev_rx_handler_register(port_dev, team_handle_frame, | ||
548 | port); | ||
549 | if (err) { | ||
550 | netdev_err(dev, "Device %s failed to register rx_handler\n", | ||
551 | portname); | ||
552 | goto err_handler_register; | ||
553 | } | ||
554 | |||
555 | team_port_list_add_port(team, port); | ||
556 | team_adjust_ops(team); | ||
557 | __team_compute_features(team); | ||
558 | __team_port_change_check(port, !!netif_carrier_ok(port_dev)); | ||
559 | |||
560 | netdev_info(dev, "Port device %s added\n", portname); | ||
561 | |||
562 | return 0; | ||
563 | |||
564 | err_handler_register: | ||
565 | netdev_set_master(port_dev, NULL); | ||
566 | |||
567 | err_set_master: | ||
568 | dev_close(port_dev); | ||
569 | |||
570 | err_dev_open: | ||
571 | team_port_leave(team, port); | ||
572 | team_port_set_orig_mac(port); | ||
573 | |||
574 | err_port_enter: | ||
575 | dev_set_mtu(port_dev, port->orig.mtu); | ||
576 | |||
577 | err_set_mtu: | ||
578 | kfree(port); | ||
579 | |||
580 | return err; | ||
581 | } | ||
582 | |||
583 | static int team_port_del(struct team *team, struct net_device *port_dev) | ||
584 | { | ||
585 | struct net_device *dev = team->dev; | ||
586 | struct team_port *port; | ||
587 | char *portname = port_dev->name; | ||
588 | |||
589 | port = team_port_get_rtnl(port_dev); | ||
590 | if (!port || !team_port_find(team, port)) { | ||
591 | netdev_err(dev, "Device %s does not act as a port of this team\n", | ||
592 | portname); | ||
593 | return -ENOENT; | ||
594 | } | ||
595 | |||
596 | __team_port_change_check(port, false); | ||
597 | team_port_list_del_port(team, port); | ||
598 | team_adjust_ops(team); | ||
599 | netdev_rx_handler_unregister(port_dev); | ||
600 | netdev_set_master(port_dev, NULL); | ||
601 | dev_close(port_dev); | ||
602 | team_port_leave(team, port); | ||
603 | team_port_set_orig_mac(port); | ||
604 | dev_set_mtu(port_dev, port->orig.mtu); | ||
605 | synchronize_rcu(); | ||
606 | kfree(port); | ||
607 | netdev_info(dev, "Port device %s removed\n", portname); | ||
608 | __team_compute_features(team); | ||
609 | |||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | |||
614 | /***************** | ||
615 | * Net device ops | ||
616 | *****************/ | ||
617 | |||
618 | static const char team_no_mode_kind[] = "*NOMODE*"; | ||
619 | |||
620 | static int team_mode_option_get(struct team *team, void *arg) | ||
621 | { | ||
622 | const char **str = arg; | ||
623 | |||
624 | *str = team->mode ? team->mode->kind : team_no_mode_kind; | ||
625 | return 0; | ||
626 | } | ||
627 | |||
628 | static int team_mode_option_set(struct team *team, void *arg) | ||
629 | { | ||
630 | const char **str = arg; | ||
631 | |||
632 | return team_change_mode(team, *str); | ||
633 | } | ||
634 | |||
635 | static struct team_option team_options[] = { | ||
636 | { | ||
637 | .name = "mode", | ||
638 | .type = TEAM_OPTION_TYPE_STRING, | ||
639 | .getter = team_mode_option_get, | ||
640 | .setter = team_mode_option_set, | ||
641 | }, | ||
642 | }; | ||
643 | |||
644 | static int team_init(struct net_device *dev) | ||
645 | { | ||
646 | struct team *team = netdev_priv(dev); | ||
647 | int i; | ||
648 | |||
649 | team->dev = dev; | ||
650 | spin_lock_init(&team->lock); | ||
651 | |||
652 | team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); | ||
653 | if (!team->pcpu_stats) | ||
654 | return -ENOMEM; | ||
655 | |||
656 | for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) | ||
657 | INIT_HLIST_HEAD(&team->port_hlist[i]); | ||
658 | INIT_LIST_HEAD(&team->port_list); | ||
659 | |||
660 | team_adjust_ops(team); | ||
661 | |||
662 | INIT_LIST_HEAD(&team->option_list); | ||
663 | team_options_register(team, team_options, ARRAY_SIZE(team_options)); | ||
664 | netif_carrier_off(dev); | ||
665 | |||
666 | return 0; | ||
667 | } | ||
668 | |||
669 | static void team_uninit(struct net_device *dev) | ||
670 | { | ||
671 | struct team *team = netdev_priv(dev); | ||
672 | struct team_port *port; | ||
673 | struct team_port *tmp; | ||
674 | |||
675 | spin_lock(&team->lock); | ||
676 | list_for_each_entry_safe(port, tmp, &team->port_list, list) | ||
677 | team_port_del(team, port->dev); | ||
678 | |||
679 | __team_change_mode(team, NULL); /* cleanup */ | ||
680 | __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); | ||
681 | spin_unlock(&team->lock); | ||
682 | } | ||
683 | |||
684 | static void team_destructor(struct net_device *dev) | ||
685 | { | ||
686 | struct team *team = netdev_priv(dev); | ||
687 | |||
688 | free_percpu(team->pcpu_stats); | ||
689 | free_netdev(dev); | ||
690 | } | ||
691 | |||
692 | static int team_open(struct net_device *dev) | ||
693 | { | ||
694 | netif_carrier_on(dev); | ||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | static int team_close(struct net_device *dev) | ||
699 | { | ||
700 | netif_carrier_off(dev); | ||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | /* | ||
705 | * note: already called with rcu_read_lock | ||
706 | */ | ||
707 | static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) | ||
708 | { | ||
709 | struct team *team = netdev_priv(dev); | ||
710 | bool tx_success = false; | ||
711 | unsigned int len = skb->len; | ||
712 | |||
713 | tx_success = team->ops.transmit(team, skb); | ||
714 | if (tx_success) { | ||
715 | struct team_pcpu_stats *pcpu_stats; | ||
716 | |||
717 | pcpu_stats = this_cpu_ptr(team->pcpu_stats); | ||
718 | u64_stats_update_begin(&pcpu_stats->syncp); | ||
719 | pcpu_stats->tx_packets++; | ||
720 | pcpu_stats->tx_bytes += len; | ||
721 | u64_stats_update_end(&pcpu_stats->syncp); | ||
722 | } else { | ||
723 | this_cpu_inc(team->pcpu_stats->tx_dropped); | ||
724 | } | ||
725 | |||
726 | return NETDEV_TX_OK; | ||
727 | } | ||
728 | |||
729 | static void team_change_rx_flags(struct net_device *dev, int change) | ||
730 | { | ||
731 | struct team *team = netdev_priv(dev); | ||
732 | struct team_port *port; | ||
733 | int inc; | ||
734 | |||
735 | rcu_read_lock(); | ||
736 | list_for_each_entry_rcu(port, &team->port_list, list) { | ||
737 | if (change & IFF_PROMISC) { | ||
738 | inc = dev->flags & IFF_PROMISC ? 1 : -1; | ||
739 | dev_set_promiscuity(port->dev, inc); | ||
740 | } | ||
741 | if (change & IFF_ALLMULTI) { | ||
742 | inc = dev->flags & IFF_ALLMULTI ? 1 : -1; | ||
743 | dev_set_allmulti(port->dev, inc); | ||
744 | } | ||
745 | } | ||
746 | rcu_read_unlock(); | ||
747 | } | ||
748 | |||
749 | static void team_set_rx_mode(struct net_device *dev) | ||
750 | { | ||
751 | struct team *team = netdev_priv(dev); | ||
752 | struct team_port *port; | ||
753 | |||
754 | rcu_read_lock(); | ||
755 | list_for_each_entry_rcu(port, &team->port_list, list) { | ||
756 | dev_uc_sync(port->dev, dev); | ||
757 | dev_mc_sync(port->dev, dev); | ||
758 | } | ||
759 | rcu_read_unlock(); | ||
760 | } | ||
761 | |||
762 | static int team_set_mac_address(struct net_device *dev, void *p) | ||
763 | { | ||
764 | struct team *team = netdev_priv(dev); | ||
765 | struct team_port *port; | ||
766 | struct sockaddr *addr = p; | ||
767 | |||
768 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | ||
769 | rcu_read_lock(); | ||
770 | list_for_each_entry_rcu(port, &team->port_list, list) | ||
771 | if (team->ops.port_change_mac) | ||
772 | team->ops.port_change_mac(team, port); | ||
773 | rcu_read_unlock(); | ||
774 | return 0; | ||
775 | } | ||
776 | |||
777 | static int team_change_mtu(struct net_device *dev, int new_mtu) | ||
778 | { | ||
779 | struct team *team = netdev_priv(dev); | ||
780 | struct team_port *port; | ||
781 | int err; | ||
782 | |||
783 | /* | ||
784 | * Alhough this is reader, it's guarded by team lock. It's not possible | ||
785 | * to traverse list in reverse under rcu_read_lock | ||
786 | */ | ||
787 | spin_lock(&team->lock); | ||
788 | list_for_each_entry(port, &team->port_list, list) { | ||
789 | err = dev_set_mtu(port->dev, new_mtu); | ||
790 | if (err) { | ||
791 | netdev_err(dev, "Device %s failed to change mtu", | ||
792 | port->dev->name); | ||
793 | goto unwind; | ||
794 | } | ||
795 | } | ||
796 | spin_unlock(&team->lock); | ||
797 | |||
798 | dev->mtu = new_mtu; | ||
799 | |||
800 | return 0; | ||
801 | |||
802 | unwind: | ||
803 | list_for_each_entry_continue_reverse(port, &team->port_list, list) | ||
804 | dev_set_mtu(port->dev, dev->mtu); | ||
805 | spin_unlock(&team->lock); | ||
806 | |||
807 | return err; | ||
808 | } | ||
809 | |||
810 | static struct rtnl_link_stats64 * | ||
811 | team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | ||
812 | { | ||
813 | struct team *team = netdev_priv(dev); | ||
814 | struct team_pcpu_stats *p; | ||
815 | u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; | ||
816 | u32 rx_dropped = 0, tx_dropped = 0; | ||
817 | unsigned int start; | ||
818 | int i; | ||
819 | |||
820 | for_each_possible_cpu(i) { | ||
821 | p = per_cpu_ptr(team->pcpu_stats, i); | ||
822 | do { | ||
823 | start = u64_stats_fetch_begin_bh(&p->syncp); | ||
824 | rx_packets = p->rx_packets; | ||
825 | rx_bytes = p->rx_bytes; | ||
826 | rx_multicast = p->rx_multicast; | ||
827 | tx_packets = p->tx_packets; | ||
828 | tx_bytes = p->tx_bytes; | ||
829 | } while (u64_stats_fetch_retry_bh(&p->syncp, start)); | ||
830 | |||
831 | stats->rx_packets += rx_packets; | ||
832 | stats->rx_bytes += rx_bytes; | ||
833 | stats->multicast += rx_multicast; | ||
834 | stats->tx_packets += tx_packets; | ||
835 | stats->tx_bytes += tx_bytes; | ||
836 | /* | ||
837 | * rx_dropped & tx_dropped are u32, updated | ||
838 | * without syncp protection. | ||
839 | */ | ||
840 | rx_dropped += p->rx_dropped; | ||
841 | tx_dropped += p->tx_dropped; | ||
842 | } | ||
843 | stats->rx_dropped = rx_dropped; | ||
844 | stats->tx_dropped = tx_dropped; | ||
845 | return stats; | ||
846 | } | ||
847 | |||
848 | static void team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid) | ||
849 | { | ||
850 | struct team *team = netdev_priv(dev); | ||
851 | struct team_port *port; | ||
852 | |||
853 | rcu_read_lock(); | ||
854 | list_for_each_entry_rcu(port, &team->port_list, list) { | ||
855 | const struct net_device_ops *ops = port->dev->netdev_ops; | ||
856 | |||
857 | if (ops->ndo_vlan_rx_add_vid) | ||
858 | ops->ndo_vlan_rx_add_vid(port->dev, vid); | ||
859 | } | ||
860 | rcu_read_unlock(); | ||
861 | } | ||
862 | |||
863 | static void team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) | ||
864 | { | ||
865 | struct team *team = netdev_priv(dev); | ||
866 | struct team_port *port; | ||
867 | |||
868 | rcu_read_lock(); | ||
869 | list_for_each_entry_rcu(port, &team->port_list, list) { | ||
870 | const struct net_device_ops *ops = port->dev->netdev_ops; | ||
871 | |||
872 | if (ops->ndo_vlan_rx_kill_vid) | ||
873 | ops->ndo_vlan_rx_kill_vid(port->dev, vid); | ||
874 | } | ||
875 | rcu_read_unlock(); | ||
876 | } | ||
877 | |||
878 | static int team_add_slave(struct net_device *dev, struct net_device *port_dev) | ||
879 | { | ||
880 | struct team *team = netdev_priv(dev); | ||
881 | int err; | ||
882 | |||
883 | spin_lock(&team->lock); | ||
884 | err = team_port_add(team, port_dev); | ||
885 | spin_unlock(&team->lock); | ||
886 | return err; | ||
887 | } | ||
888 | |||
889 | static int team_del_slave(struct net_device *dev, struct net_device *port_dev) | ||
890 | { | ||
891 | struct team *team = netdev_priv(dev); | ||
892 | int err; | ||
893 | |||
894 | spin_lock(&team->lock); | ||
895 | err = team_port_del(team, port_dev); | ||
896 | spin_unlock(&team->lock); | ||
897 | return err; | ||
898 | } | ||
899 | |||
900 | static const struct net_device_ops team_netdev_ops = { | ||
901 | .ndo_init = team_init, | ||
902 | .ndo_uninit = team_uninit, | ||
903 | .ndo_open = team_open, | ||
904 | .ndo_stop = team_close, | ||
905 | .ndo_start_xmit = team_xmit, | ||
906 | .ndo_change_rx_flags = team_change_rx_flags, | ||
907 | .ndo_set_rx_mode = team_set_rx_mode, | ||
908 | .ndo_set_mac_address = team_set_mac_address, | ||
909 | .ndo_change_mtu = team_change_mtu, | ||
910 | .ndo_get_stats64 = team_get_stats64, | ||
911 | .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid, | ||
912 | .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid, | ||
913 | .ndo_add_slave = team_add_slave, | ||
914 | .ndo_del_slave = team_del_slave, | ||
915 | }; | ||
916 | |||
917 | |||
918 | /*********************** | ||
919 | * rt netlink interface | ||
920 | ***********************/ | ||
921 | |||
922 | static void team_setup(struct net_device *dev) | ||
923 | { | ||
924 | ether_setup(dev); | ||
925 | |||
926 | dev->netdev_ops = &team_netdev_ops; | ||
927 | dev->destructor = team_destructor; | ||
928 | dev->tx_queue_len = 0; | ||
929 | dev->flags |= IFF_MULTICAST; | ||
930 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); | ||
931 | |||
932 | /* | ||
933 | * Indicate we support unicast address filtering. That way core won't | ||
934 | * bring us to promisc mode in case a unicast addr is added. | ||
935 | * Let this up to underlay drivers. | ||
936 | */ | ||
937 | dev->priv_flags |= IFF_UNICAST_FLT; | ||
938 | |||
939 | dev->features |= NETIF_F_LLTX; | ||
940 | dev->features |= NETIF_F_GRO; | ||
941 | dev->hw_features = NETIF_F_HW_VLAN_TX | | ||
942 | NETIF_F_HW_VLAN_RX | | ||
943 | NETIF_F_HW_VLAN_FILTER; | ||
944 | |||
945 | dev->features |= dev->hw_features; | ||
946 | } | ||
947 | |||
948 | static int team_newlink(struct net *src_net, struct net_device *dev, | ||
949 | struct nlattr *tb[], struct nlattr *data[]) | ||
950 | { | ||
951 | int err; | ||
952 | |||
953 | if (tb[IFLA_ADDRESS] == NULL) | ||
954 | random_ether_addr(dev->dev_addr); | ||
955 | |||
956 | err = register_netdevice(dev); | ||
957 | if (err) | ||
958 | return err; | ||
959 | |||
960 | return 0; | ||
961 | } | ||
962 | |||
963 | static int team_validate(struct nlattr *tb[], struct nlattr *data[]) | ||
964 | { | ||
965 | if (tb[IFLA_ADDRESS]) { | ||
966 | if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) | ||
967 | return -EINVAL; | ||
968 | if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) | ||
969 | return -EADDRNOTAVAIL; | ||
970 | } | ||
971 | return 0; | ||
972 | } | ||
973 | |||
974 | static struct rtnl_link_ops team_link_ops __read_mostly = { | ||
975 | .kind = DRV_NAME, | ||
976 | .priv_size = sizeof(struct team), | ||
977 | .setup = team_setup, | ||
978 | .newlink = team_newlink, | ||
979 | .validate = team_validate, | ||
980 | }; | ||
981 | |||
982 | |||
983 | /*********************************** | ||
984 | * Generic netlink custom interface | ||
985 | ***********************************/ | ||
986 | |||
987 | static struct genl_family team_nl_family = { | ||
988 | .id = GENL_ID_GENERATE, | ||
989 | .name = TEAM_GENL_NAME, | ||
990 | .version = TEAM_GENL_VERSION, | ||
991 | .maxattr = TEAM_ATTR_MAX, | ||
992 | .netnsok = true, | ||
993 | }; | ||
994 | |||
995 | static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = { | ||
996 | [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, }, | ||
997 | [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 }, | ||
998 | [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED }, | ||
999 | [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED }, | ||
1000 | }; | ||
1001 | |||
1002 | static const struct nla_policy | ||
1003 | team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { | ||
1004 | [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, }, | ||
1005 | [TEAM_ATTR_OPTION_NAME] = { | ||
1006 | .type = NLA_STRING, | ||
1007 | .len = TEAM_STRING_MAX_LEN, | ||
1008 | }, | ||
1009 | [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, | ||
1010 | [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, | ||
1011 | [TEAM_ATTR_OPTION_DATA] = { | ||
1012 | .type = NLA_BINARY, | ||
1013 | .len = TEAM_STRING_MAX_LEN, | ||
1014 | }, | ||
1015 | }; | ||
1016 | |||
1017 | static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) | ||
1018 | { | ||
1019 | struct sk_buff *msg; | ||
1020 | void *hdr; | ||
1021 | int err; | ||
1022 | |||
1023 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
1024 | if (!msg) | ||
1025 | return -ENOMEM; | ||
1026 | |||
1027 | hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, | ||
1028 | &team_nl_family, 0, TEAM_CMD_NOOP); | ||
1029 | if (IS_ERR(hdr)) { | ||
1030 | err = PTR_ERR(hdr); | ||
1031 | goto err_msg_put; | ||
1032 | } | ||
1033 | |||
1034 | genlmsg_end(msg, hdr); | ||
1035 | |||
1036 | return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); | ||
1037 | |||
1038 | err_msg_put: | ||
1039 | nlmsg_free(msg); | ||
1040 | |||
1041 | return err; | ||
1042 | } | ||
1043 | |||
1044 | /* | ||
1045 | * Netlink cmd functions should be locked by following two functions. | ||
1046 | * To ensure team_uninit would not be called in between, hold rcu_read_lock | ||
1047 | * all the time. | ||
1048 | */ | ||
1049 | static struct team *team_nl_team_get(struct genl_info *info) | ||
1050 | { | ||
1051 | struct net *net = genl_info_net(info); | ||
1052 | int ifindex; | ||
1053 | struct net_device *dev; | ||
1054 | struct team *team; | ||
1055 | |||
1056 | if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX]) | ||
1057 | return NULL; | ||
1058 | |||
1059 | ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]); | ||
1060 | rcu_read_lock(); | ||
1061 | dev = dev_get_by_index_rcu(net, ifindex); | ||
1062 | if (!dev || dev->netdev_ops != &team_netdev_ops) { | ||
1063 | rcu_read_unlock(); | ||
1064 | return NULL; | ||
1065 | } | ||
1066 | |||
1067 | team = netdev_priv(dev); | ||
1068 | spin_lock(&team->lock); | ||
1069 | return team; | ||
1070 | } | ||
1071 | |||
1072 | static void team_nl_team_put(struct team *team) | ||
1073 | { | ||
1074 | spin_unlock(&team->lock); | ||
1075 | rcu_read_unlock(); | ||
1076 | } | ||
1077 | |||
1078 | static int team_nl_send_generic(struct genl_info *info, struct team *team, | ||
1079 | int (*fill_func)(struct sk_buff *skb, | ||
1080 | struct genl_info *info, | ||
1081 | int flags, struct team *team)) | ||
1082 | { | ||
1083 | struct sk_buff *skb; | ||
1084 | int err; | ||
1085 | |||
1086 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
1087 | if (!skb) | ||
1088 | return -ENOMEM; | ||
1089 | |||
1090 | err = fill_func(skb, info, NLM_F_ACK, team); | ||
1091 | if (err < 0) | ||
1092 | goto err_fill; | ||
1093 | |||
1094 | err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); | ||
1095 | return err; | ||
1096 | |||
1097 | err_fill: | ||
1098 | nlmsg_free(skb); | ||
1099 | return err; | ||
1100 | } | ||
1101 | |||
1102 | static int team_nl_fill_options_get_changed(struct sk_buff *skb, | ||
1103 | u32 pid, u32 seq, int flags, | ||
1104 | struct team *team, | ||
1105 | struct team_option *changed_option) | ||
1106 | { | ||
1107 | struct nlattr *option_list; | ||
1108 | void *hdr; | ||
1109 | struct team_option *option; | ||
1110 | |||
1111 | hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, | ||
1112 | TEAM_CMD_OPTIONS_GET); | ||
1113 | if (IS_ERR(hdr)) | ||
1114 | return PTR_ERR(hdr); | ||
1115 | |||
1116 | NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); | ||
1117 | option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); | ||
1118 | if (!option_list) | ||
1119 | return -EMSGSIZE; | ||
1120 | |||
1121 | list_for_each_entry(option, &team->option_list, list) { | ||
1122 | struct nlattr *option_item; | ||
1123 | long arg; | ||
1124 | |||
1125 | option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); | ||
1126 | if (!option_item) | ||
1127 | goto nla_put_failure; | ||
1128 | NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name); | ||
1129 | if (option == changed_option) | ||
1130 | NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED); | ||
1131 | switch (option->type) { | ||
1132 | case TEAM_OPTION_TYPE_U32: | ||
1133 | NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32); | ||
1134 | team_option_get(team, option, &arg); | ||
1135 | NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg); | ||
1136 | break; | ||
1137 | case TEAM_OPTION_TYPE_STRING: | ||
1138 | NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING); | ||
1139 | team_option_get(team, option, &arg); | ||
1140 | NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA, | ||
1141 | (char *) arg); | ||
1142 | break; | ||
1143 | default: | ||
1144 | BUG(); | ||
1145 | } | ||
1146 | nla_nest_end(skb, option_item); | ||
1147 | } | ||
1148 | |||
1149 | nla_nest_end(skb, option_list); | ||
1150 | return genlmsg_end(skb, hdr); | ||
1151 | |||
1152 | nla_put_failure: | ||
1153 | genlmsg_cancel(skb, hdr); | ||
1154 | return -EMSGSIZE; | ||
1155 | } | ||
1156 | |||
1157 | static int team_nl_fill_options_get(struct sk_buff *skb, | ||
1158 | struct genl_info *info, int flags, | ||
1159 | struct team *team) | ||
1160 | { | ||
1161 | return team_nl_fill_options_get_changed(skb, info->snd_pid, | ||
1162 | info->snd_seq, NLM_F_ACK, | ||
1163 | team, NULL); | ||
1164 | } | ||
1165 | |||
1166 | static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) | ||
1167 | { | ||
1168 | struct team *team; | ||
1169 | int err; | ||
1170 | |||
1171 | team = team_nl_team_get(info); | ||
1172 | if (!team) | ||
1173 | return -EINVAL; | ||
1174 | |||
1175 | err = team_nl_send_generic(info, team, team_nl_fill_options_get); | ||
1176 | |||
1177 | team_nl_team_put(team); | ||
1178 | |||
1179 | return err; | ||
1180 | } | ||
1181 | |||
1182 | static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | ||
1183 | { | ||
1184 | struct team *team; | ||
1185 | int err = 0; | ||
1186 | int i; | ||
1187 | struct nlattr *nl_option; | ||
1188 | |||
1189 | team = team_nl_team_get(info); | ||
1190 | if (!team) | ||
1191 | return -EINVAL; | ||
1192 | |||
1193 | err = -EINVAL; | ||
1194 | if (!info->attrs[TEAM_ATTR_LIST_OPTION]) { | ||
1195 | err = -EINVAL; | ||
1196 | goto team_put; | ||
1197 | } | ||
1198 | |||
1199 | nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { | ||
1200 | struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1]; | ||
1201 | enum team_option_type opt_type; | ||
1202 | struct team_option *option; | ||
1203 | char *opt_name; | ||
1204 | bool opt_found = false; | ||
1205 | |||
1206 | if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) { | ||
1207 | err = -EINVAL; | ||
1208 | goto team_put; | ||
1209 | } | ||
1210 | err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX, | ||
1211 | nl_option, team_nl_option_policy); | ||
1212 | if (err) | ||
1213 | goto team_put; | ||
1214 | if (!mode_attrs[TEAM_ATTR_OPTION_NAME] || | ||
1215 | !mode_attrs[TEAM_ATTR_OPTION_TYPE] || | ||
1216 | !mode_attrs[TEAM_ATTR_OPTION_DATA]) { | ||
1217 | err = -EINVAL; | ||
1218 | goto team_put; | ||
1219 | } | ||
1220 | switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) { | ||
1221 | case NLA_U32: | ||
1222 | opt_type = TEAM_OPTION_TYPE_U32; | ||
1223 | break; | ||
1224 | case NLA_STRING: | ||
1225 | opt_type = TEAM_OPTION_TYPE_STRING; | ||
1226 | break; | ||
1227 | default: | ||
1228 | goto team_put; | ||
1229 | } | ||
1230 | |||
1231 | opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]); | ||
1232 | list_for_each_entry(option, &team->option_list, list) { | ||
1233 | long arg; | ||
1234 | struct nlattr *opt_data_attr; | ||
1235 | |||
1236 | if (option->type != opt_type || | ||
1237 | strcmp(option->name, opt_name)) | ||
1238 | continue; | ||
1239 | opt_found = true; | ||
1240 | opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA]; | ||
1241 | switch (opt_type) { | ||
1242 | case TEAM_OPTION_TYPE_U32: | ||
1243 | arg = nla_get_u32(opt_data_attr); | ||
1244 | break; | ||
1245 | case TEAM_OPTION_TYPE_STRING: | ||
1246 | arg = (long) nla_data(opt_data_attr); | ||
1247 | break; | ||
1248 | default: | ||
1249 | BUG(); | ||
1250 | } | ||
1251 | err = team_option_set(team, option, &arg); | ||
1252 | if (err) | ||
1253 | goto team_put; | ||
1254 | } | ||
1255 | if (!opt_found) { | ||
1256 | err = -ENOENT; | ||
1257 | goto team_put; | ||
1258 | } | ||
1259 | } | ||
1260 | |||
1261 | team_put: | ||
1262 | team_nl_team_put(team); | ||
1263 | |||
1264 | return err; | ||
1265 | } | ||
1266 | |||
1267 | static int team_nl_fill_port_list_get_changed(struct sk_buff *skb, | ||
1268 | u32 pid, u32 seq, int flags, | ||
1269 | struct team *team, | ||
1270 | struct team_port *changed_port) | ||
1271 | { | ||
1272 | struct nlattr *port_list; | ||
1273 | void *hdr; | ||
1274 | struct team_port *port; | ||
1275 | |||
1276 | hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, | ||
1277 | TEAM_CMD_PORT_LIST_GET); | ||
1278 | if (IS_ERR(hdr)) | ||
1279 | return PTR_ERR(hdr); | ||
1280 | |||
1281 | NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); | ||
1282 | port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); | ||
1283 | if (!port_list) | ||
1284 | return -EMSGSIZE; | ||
1285 | |||
1286 | list_for_each_entry(port, &team->port_list, list) { | ||
1287 | struct nlattr *port_item; | ||
1288 | |||
1289 | port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); | ||
1290 | if (!port_item) | ||
1291 | goto nla_put_failure; | ||
1292 | NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex); | ||
1293 | if (port == changed_port) | ||
1294 | NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED); | ||
1295 | if (port->linkup) | ||
1296 | NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP); | ||
1297 | NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed); | ||
1298 | NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex); | ||
1299 | nla_nest_end(skb, port_item); | ||
1300 | } | ||
1301 | |||
1302 | nla_nest_end(skb, port_list); | ||
1303 | return genlmsg_end(skb, hdr); | ||
1304 | |||
1305 | nla_put_failure: | ||
1306 | genlmsg_cancel(skb, hdr); | ||
1307 | return -EMSGSIZE; | ||
1308 | } | ||
1309 | |||
1310 | static int team_nl_fill_port_list_get(struct sk_buff *skb, | ||
1311 | struct genl_info *info, int flags, | ||
1312 | struct team *team) | ||
1313 | { | ||
1314 | return team_nl_fill_port_list_get_changed(skb, info->snd_pid, | ||
1315 | info->snd_seq, NLM_F_ACK, | ||
1316 | team, NULL); | ||
1317 | } | ||
1318 | |||
1319 | static int team_nl_cmd_port_list_get(struct sk_buff *skb, | ||
1320 | struct genl_info *info) | ||
1321 | { | ||
1322 | struct team *team; | ||
1323 | int err; | ||
1324 | |||
1325 | team = team_nl_team_get(info); | ||
1326 | if (!team) | ||
1327 | return -EINVAL; | ||
1328 | |||
1329 | err = team_nl_send_generic(info, team, team_nl_fill_port_list_get); | ||
1330 | |||
1331 | team_nl_team_put(team); | ||
1332 | |||
1333 | return err; | ||
1334 | } | ||
1335 | |||
1336 | static struct genl_ops team_nl_ops[] = { | ||
1337 | { | ||
1338 | .cmd = TEAM_CMD_NOOP, | ||
1339 | .doit = team_nl_cmd_noop, | ||
1340 | .policy = team_nl_policy, | ||
1341 | }, | ||
1342 | { | ||
1343 | .cmd = TEAM_CMD_OPTIONS_SET, | ||
1344 | .doit = team_nl_cmd_options_set, | ||
1345 | .policy = team_nl_policy, | ||
1346 | .flags = GENL_ADMIN_PERM, | ||
1347 | }, | ||
1348 | { | ||
1349 | .cmd = TEAM_CMD_OPTIONS_GET, | ||
1350 | .doit = team_nl_cmd_options_get, | ||
1351 | .policy = team_nl_policy, | ||
1352 | .flags = GENL_ADMIN_PERM, | ||
1353 | }, | ||
1354 | { | ||
1355 | .cmd = TEAM_CMD_PORT_LIST_GET, | ||
1356 | .doit = team_nl_cmd_port_list_get, | ||
1357 | .policy = team_nl_policy, | ||
1358 | .flags = GENL_ADMIN_PERM, | ||
1359 | }, | ||
1360 | }; | ||
1361 | |||
1362 | static struct genl_multicast_group team_change_event_mcgrp = { | ||
1363 | .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, | ||
1364 | }; | ||
1365 | |||
1366 | static int team_nl_send_event_options_get(struct team *team, | ||
1367 | struct team_option *changed_option) | ||
1368 | { | ||
1369 | struct sk_buff *skb; | ||
1370 | int err; | ||
1371 | struct net *net = dev_net(team->dev); | ||
1372 | |||
1373 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
1374 | if (!skb) | ||
1375 | return -ENOMEM; | ||
1376 | |||
1377 | err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team, | ||
1378 | changed_option); | ||
1379 | if (err < 0) | ||
1380 | goto err_fill; | ||
1381 | |||
1382 | err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, | ||
1383 | GFP_KERNEL); | ||
1384 | return err; | ||
1385 | |||
1386 | err_fill: | ||
1387 | nlmsg_free(skb); | ||
1388 | return err; | ||
1389 | } | ||
1390 | |||
1391 | static int team_nl_send_event_port_list_get(struct team_port *port) | ||
1392 | { | ||
1393 | struct sk_buff *skb; | ||
1394 | int err; | ||
1395 | struct net *net = dev_net(port->team->dev); | ||
1396 | |||
1397 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
1398 | if (!skb) | ||
1399 | return -ENOMEM; | ||
1400 | |||
1401 | err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0, | ||
1402 | port->team, port); | ||
1403 | if (err < 0) | ||
1404 | goto err_fill; | ||
1405 | |||
1406 | err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, | ||
1407 | GFP_KERNEL); | ||
1408 | return err; | ||
1409 | |||
1410 | err_fill: | ||
1411 | nlmsg_free(skb); | ||
1412 | return err; | ||
1413 | } | ||
1414 | |||
1415 | static int team_nl_init(void) | ||
1416 | { | ||
1417 | int err; | ||
1418 | |||
1419 | err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, | ||
1420 | ARRAY_SIZE(team_nl_ops)); | ||
1421 | if (err) | ||
1422 | return err; | ||
1423 | |||
1424 | err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); | ||
1425 | if (err) | ||
1426 | goto err_change_event_grp_reg; | ||
1427 | |||
1428 | return 0; | ||
1429 | |||
1430 | err_change_event_grp_reg: | ||
1431 | genl_unregister_family(&team_nl_family); | ||
1432 | |||
1433 | return err; | ||
1434 | } | ||
1435 | |||
1436 | static void team_nl_fini(void) | ||
1437 | { | ||
1438 | genl_unregister_family(&team_nl_family); | ||
1439 | } | ||
1440 | |||
1441 | |||
1442 | /****************** | ||
1443 | * Change checkers | ||
1444 | ******************/ | ||
1445 | |||
1446 | static void __team_options_change_check(struct team *team, | ||
1447 | struct team_option *changed_option) | ||
1448 | { | ||
1449 | int err; | ||
1450 | |||
1451 | err = team_nl_send_event_options_get(team, changed_option); | ||
1452 | if (err) | ||
1453 | netdev_warn(team->dev, "Failed to send options change via netlink\n"); | ||
1454 | } | ||
1455 | |||
1456 | /* rtnl lock is held */ | ||
1457 | static void __team_port_change_check(struct team_port *port, bool linkup) | ||
1458 | { | ||
1459 | int err; | ||
1460 | |||
1461 | if (port->linkup == linkup) | ||
1462 | return; | ||
1463 | |||
1464 | port->linkup = linkup; | ||
1465 | if (linkup) { | ||
1466 | struct ethtool_cmd ecmd; | ||
1467 | |||
1468 | err = __ethtool_get_settings(port->dev, &ecmd); | ||
1469 | if (!err) { | ||
1470 | port->speed = ethtool_cmd_speed(&ecmd); | ||
1471 | port->duplex = ecmd.duplex; | ||
1472 | goto send_event; | ||
1473 | } | ||
1474 | } | ||
1475 | port->speed = 0; | ||
1476 | port->duplex = 0; | ||
1477 | |||
1478 | send_event: | ||
1479 | err = team_nl_send_event_port_list_get(port); | ||
1480 | if (err) | ||
1481 | netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", | ||
1482 | port->dev->name); | ||
1483 | |||
1484 | } | ||
1485 | |||
1486 | static void team_port_change_check(struct team_port *port, bool linkup) | ||
1487 | { | ||
1488 | struct team *team = port->team; | ||
1489 | |||
1490 | spin_lock(&team->lock); | ||
1491 | __team_port_change_check(port, linkup); | ||
1492 | spin_unlock(&team->lock); | ||
1493 | } | ||
1494 | |||
1495 | /************************************ | ||
1496 | * Net device notifier event handler | ||
1497 | ************************************/ | ||
1498 | |||
1499 | static int team_device_event(struct notifier_block *unused, | ||
1500 | unsigned long event, void *ptr) | ||
1501 | { | ||
1502 | struct net_device *dev = (struct net_device *) ptr; | ||
1503 | struct team_port *port; | ||
1504 | |||
1505 | port = team_port_get_rtnl(dev); | ||
1506 | if (!port) | ||
1507 | return NOTIFY_DONE; | ||
1508 | |||
1509 | switch (event) { | ||
1510 | case NETDEV_UP: | ||
1511 | if (netif_carrier_ok(dev)) | ||
1512 | team_port_change_check(port, true); | ||
1513 | case NETDEV_DOWN: | ||
1514 | team_port_change_check(port, false); | ||
1515 | case NETDEV_CHANGE: | ||
1516 | if (netif_running(port->dev)) | ||
1517 | team_port_change_check(port, | ||
1518 | !!netif_carrier_ok(port->dev)); | ||
1519 | break; | ||
1520 | case NETDEV_UNREGISTER: | ||
1521 | team_del_slave(port->team->dev, dev); | ||
1522 | break; | ||
1523 | case NETDEV_FEAT_CHANGE: | ||
1524 | team_compute_features(port->team); | ||
1525 | break; | ||
1526 | case NETDEV_CHANGEMTU: | ||
1527 | /* Forbid to change mtu of underlaying device */ | ||
1528 | return NOTIFY_BAD; | ||
1529 | case NETDEV_PRE_TYPE_CHANGE: | ||
1530 | /* Forbid to change type of underlaying device */ | ||
1531 | return NOTIFY_BAD; | ||
1532 | } | ||
1533 | return NOTIFY_DONE; | ||
1534 | } | ||
1535 | |||
1536 | static struct notifier_block team_notifier_block __read_mostly = { | ||
1537 | .notifier_call = team_device_event, | ||
1538 | }; | ||
1539 | |||
1540 | |||
1541 | /*********************** | ||
1542 | * Module init and exit | ||
1543 | ***********************/ | ||
1544 | |||
1545 | static int __init team_module_init(void) | ||
1546 | { | ||
1547 | int err; | ||
1548 | |||
1549 | register_netdevice_notifier(&team_notifier_block); | ||
1550 | |||
1551 | err = rtnl_link_register(&team_link_ops); | ||
1552 | if (err) | ||
1553 | goto err_rtnl_reg; | ||
1554 | |||
1555 | err = team_nl_init(); | ||
1556 | if (err) | ||
1557 | goto err_nl_init; | ||
1558 | |||
1559 | return 0; | ||
1560 | |||
1561 | err_nl_init: | ||
1562 | rtnl_link_unregister(&team_link_ops); | ||
1563 | |||
1564 | err_rtnl_reg: | ||
1565 | unregister_netdevice_notifier(&team_notifier_block); | ||
1566 | |||
1567 | return err; | ||
1568 | } | ||
1569 | |||
1570 | static void __exit team_module_exit(void) | ||
1571 | { | ||
1572 | team_nl_fini(); | ||
1573 | rtnl_link_unregister(&team_link_ops); | ||
1574 | unregister_netdevice_notifier(&team_notifier_block); | ||
1575 | } | ||
1576 | |||
1577 | module_init(team_module_init); | ||
1578 | module_exit(team_module_exit); | ||
1579 | |||
1580 | MODULE_LICENSE("GPL v2"); | ||
1581 | MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); | ||
1582 | MODULE_DESCRIPTION("Ethernet team device driver"); | ||
1583 | MODULE_ALIAS_RTNL_LINK(DRV_NAME); | ||
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c new file mode 100644 index 000000000000..6fe920c440b3 --- /dev/null +++ b/drivers/net/team/team_mode_activebackup.c | |||
@@ -0,0 +1,137 @@ | |||
1 | /* | ||
2 | * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team | ||
3 | * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/netdevice.h> | ||
17 | #include <net/rtnetlink.h> | ||
18 | #include <linux/if_team.h> | ||
19 | |||
20 | struct ab_priv { | ||
21 | struct team_port __rcu *active_port; | ||
22 | }; | ||
23 | |||
24 | static struct ab_priv *ab_priv(struct team *team) | ||
25 | { | ||
26 | return (struct ab_priv *) &team->mode_priv; | ||
27 | } | ||
28 | |||
29 | static rx_handler_result_t ab_receive(struct team *team, struct team_port *port, | ||
30 | struct sk_buff *skb) { | ||
31 | struct team_port *active_port; | ||
32 | |||
33 | active_port = rcu_dereference(ab_priv(team)->active_port); | ||
34 | if (active_port != port) | ||
35 | return RX_HANDLER_EXACT; | ||
36 | return RX_HANDLER_ANOTHER; | ||
37 | } | ||
38 | |||
39 | static bool ab_transmit(struct team *team, struct sk_buff *skb) | ||
40 | { | ||
41 | struct team_port *active_port; | ||
42 | |||
43 | active_port = rcu_dereference(ab_priv(team)->active_port); | ||
44 | if (unlikely(!active_port)) | ||
45 | goto drop; | ||
46 | skb->dev = active_port->dev; | ||
47 | if (dev_queue_xmit(skb)) | ||
48 | return false; | ||
49 | return true; | ||
50 | |||
51 | drop: | ||
52 | dev_kfree_skb_any(skb); | ||
53 | return false; | ||
54 | } | ||
55 | |||
56 | static void ab_port_leave(struct team *team, struct team_port *port) | ||
57 | { | ||
58 | if (ab_priv(team)->active_port == port) | ||
59 | rcu_assign_pointer(ab_priv(team)->active_port, NULL); | ||
60 | } | ||
61 | |||
62 | static int ab_active_port_get(struct team *team, void *arg) | ||
63 | { | ||
64 | u32 *ifindex = arg; | ||
65 | |||
66 | *ifindex = 0; | ||
67 | if (ab_priv(team)->active_port) | ||
68 | *ifindex = ab_priv(team)->active_port->dev->ifindex; | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int ab_active_port_set(struct team *team, void *arg) | ||
73 | { | ||
74 | u32 *ifindex = arg; | ||
75 | struct team_port *port; | ||
76 | |||
77 | list_for_each_entry_rcu(port, &team->port_list, list) { | ||
78 | if (port->dev->ifindex == *ifindex) { | ||
79 | rcu_assign_pointer(ab_priv(team)->active_port, port); | ||
80 | return 0; | ||
81 | } | ||
82 | } | ||
83 | return -ENOENT; | ||
84 | } | ||
85 | |||
86 | static struct team_option ab_options[] = { | ||
87 | { | ||
88 | .name = "activeport", | ||
89 | .type = TEAM_OPTION_TYPE_U32, | ||
90 | .getter = ab_active_port_get, | ||
91 | .setter = ab_active_port_set, | ||
92 | }, | ||
93 | }; | ||
94 | |||
95 | int ab_init(struct team *team) | ||
96 | { | ||
97 | team_options_register(team, ab_options, ARRAY_SIZE(ab_options)); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | void ab_exit(struct team *team) | ||
102 | { | ||
103 | team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options)); | ||
104 | } | ||
105 | |||
106 | static const struct team_mode_ops ab_mode_ops = { | ||
107 | .init = ab_init, | ||
108 | .exit = ab_exit, | ||
109 | .receive = ab_receive, | ||
110 | .transmit = ab_transmit, | ||
111 | .port_leave = ab_port_leave, | ||
112 | }; | ||
113 | |||
114 | static struct team_mode ab_mode = { | ||
115 | .kind = "activebackup", | ||
116 | .owner = THIS_MODULE, | ||
117 | .priv_size = sizeof(struct ab_priv), | ||
118 | .ops = &ab_mode_ops, | ||
119 | }; | ||
120 | |||
121 | static int __init ab_init_module(void) | ||
122 | { | ||
123 | return team_mode_register(&ab_mode); | ||
124 | } | ||
125 | |||
126 | static void __exit ab_cleanup_module(void) | ||
127 | { | ||
128 | team_mode_unregister(&ab_mode); | ||
129 | } | ||
130 | |||
131 | module_init(ab_init_module); | ||
132 | module_exit(ab_cleanup_module); | ||
133 | |||
134 | MODULE_LICENSE("GPL v2"); | ||
135 | MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); | ||
136 | MODULE_DESCRIPTION("Active-backup mode for team"); | ||
137 | MODULE_ALIAS("team-mode-activebackup"); | ||
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c new file mode 100644 index 000000000000..a0e8f806331a --- /dev/null +++ b/drivers/net/team/team_mode_roundrobin.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team | ||
3 | * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/netdevice.h> | ||
17 | #include <linux/if_team.h> | ||
18 | |||
19 | struct rr_priv { | ||
20 | unsigned int sent_packets; | ||
21 | }; | ||
22 | |||
23 | static struct rr_priv *rr_priv(struct team *team) | ||
24 | { | ||
25 | return (struct rr_priv *) &team->mode_priv; | ||
26 | } | ||
27 | |||
28 | static struct team_port *__get_first_port_up(struct team *team, | ||
29 | struct team_port *port) | ||
30 | { | ||
31 | struct team_port *cur; | ||
32 | |||
33 | if (port->linkup) | ||
34 | return port; | ||
35 | cur = port; | ||
36 | list_for_each_entry_continue_rcu(cur, &team->port_list, list) | ||
37 | if (cur->linkup) | ||
38 | return cur; | ||
39 | list_for_each_entry_rcu(cur, &team->port_list, list) { | ||
40 | if (cur == port) | ||
41 | break; | ||
42 | if (cur->linkup) | ||
43 | return cur; | ||
44 | } | ||
45 | return NULL; | ||
46 | } | ||
47 | |||
48 | static bool rr_transmit(struct team *team, struct sk_buff *skb) | ||
49 | { | ||
50 | struct team_port *port; | ||
51 | int port_index; | ||
52 | |||
53 | port_index = rr_priv(team)->sent_packets++ % team->port_count; | ||
54 | port = team_get_port_by_index_rcu(team, port_index); | ||
55 | port = __get_first_port_up(team, port); | ||
56 | if (unlikely(!port)) | ||
57 | goto drop; | ||
58 | skb->dev = port->dev; | ||
59 | if (dev_queue_xmit(skb)) | ||
60 | return false; | ||
61 | return true; | ||
62 | |||
63 | drop: | ||
64 | dev_kfree_skb_any(skb); | ||
65 | return false; | ||
66 | } | ||
67 | |||
68 | static int rr_port_enter(struct team *team, struct team_port *port) | ||
69 | { | ||
70 | return team_port_set_team_mac(port); | ||
71 | } | ||
72 | |||
73 | static void rr_port_change_mac(struct team *team, struct team_port *port) | ||
74 | { | ||
75 | team_port_set_team_mac(port); | ||
76 | } | ||
77 | |||
78 | static const struct team_mode_ops rr_mode_ops = { | ||
79 | .transmit = rr_transmit, | ||
80 | .port_enter = rr_port_enter, | ||
81 | .port_change_mac = rr_port_change_mac, | ||
82 | }; | ||
83 | |||
84 | static struct team_mode rr_mode = { | ||
85 | .kind = "roundrobin", | ||
86 | .owner = THIS_MODULE, | ||
87 | .priv_size = sizeof(struct rr_priv), | ||
88 | .ops = &rr_mode_ops, | ||
89 | }; | ||
90 | |||
91 | static int __init rr_init_module(void) | ||
92 | { | ||
93 | return team_mode_register(&rr_mode); | ||
94 | } | ||
95 | |||
96 | static void __exit rr_cleanup_module(void) | ||
97 | { | ||
98 | team_mode_unregister(&rr_mode); | ||
99 | } | ||
100 | |||
101 | module_init(rr_init_module); | ||
102 | module_exit(rr_cleanup_module); | ||
103 | |||
104 | MODULE_LICENSE("GPL v2"); | ||
105 | MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); | ||
106 | MODULE_DESCRIPTION("Round-robin mode for team"); | ||
107 | MODULE_ALIAS("team-mode-roundrobin"); | ||
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 619b5657af77..0b091b32267d 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -185,6 +185,7 @@ header-y += if_pppol2tp.h | |||
185 | header-y += if_pppox.h | 185 | header-y += if_pppox.h |
186 | header-y += if_slip.h | 186 | header-y += if_slip.h |
187 | header-y += if_strip.h | 187 | header-y += if_strip.h |
188 | header-y += if_team.h | ||
188 | header-y += if_tr.h | 189 | header-y += if_tr.h |
189 | header-y += if_tun.h | 190 | header-y += if_tun.h |
190 | header-y += if_tunnel.h | 191 | header-y += if_tunnel.h |
diff --git a/include/linux/if.h b/include/linux/if.h index db20bd4fd16b..06b6ef60c821 100644 --- a/include/linux/if.h +++ b/include/linux/if.h | |||
@@ -79,6 +79,7 @@ | |||
79 | #define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing | 79 | #define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing |
80 | * skbs on transmit */ | 80 | * skbs on transmit */ |
81 | #define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */ | 81 | #define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */ |
82 | #define IFF_TEAM_PORT 0x40000 /* device used as team port */ | ||
82 | 83 | ||
83 | #define IF_GET_IFACE 0x0001 /* for querying only */ | 84 | #define IF_GET_IFACE 0x0001 /* for querying only */ |
84 | #define IF_GET_PROTO 0x0002 | 85 | #define IF_GET_PROTO 0x0002 |
diff --git a/include/linux/if_team.h b/include/linux/if_team.h new file mode 100644 index 000000000000..14f6388f5460 --- /dev/null +++ b/include/linux/if_team.h | |||
@@ -0,0 +1,242 @@ | |||
1 | /* | ||
2 | * include/linux/if_team.h - Network team device driver header | ||
3 | * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #ifndef _LINUX_IF_TEAM_H_ | ||
12 | #define _LINUX_IF_TEAM_H_ | ||
13 | |||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | struct team_pcpu_stats { | ||
17 | u64 rx_packets; | ||
18 | u64 rx_bytes; | ||
19 | u64 rx_multicast; | ||
20 | u64 tx_packets; | ||
21 | u64 tx_bytes; | ||
22 | struct u64_stats_sync syncp; | ||
23 | u32 rx_dropped; | ||
24 | u32 tx_dropped; | ||
25 | }; | ||
26 | |||
27 | struct team; | ||
28 | |||
29 | struct team_port { | ||
30 | struct net_device *dev; | ||
31 | struct hlist_node hlist; /* node in hash list */ | ||
32 | struct list_head list; /* node in ordinary list */ | ||
33 | struct team *team; | ||
34 | int index; | ||
35 | |||
36 | /* | ||
37 | * A place for storing original values of the device before it | ||
38 | * become a port. | ||
39 | */ | ||
40 | struct { | ||
41 | unsigned char dev_addr[MAX_ADDR_LEN]; | ||
42 | unsigned int mtu; | ||
43 | } orig; | ||
44 | |||
45 | bool linkup; | ||
46 | u32 speed; | ||
47 | u8 duplex; | ||
48 | |||
49 | struct rcu_head rcu; | ||
50 | }; | ||
51 | |||
52 | struct team_mode_ops { | ||
53 | int (*init)(struct team *team); | ||
54 | void (*exit)(struct team *team); | ||
55 | rx_handler_result_t (*receive)(struct team *team, | ||
56 | struct team_port *port, | ||
57 | struct sk_buff *skb); | ||
58 | bool (*transmit)(struct team *team, struct sk_buff *skb); | ||
59 | int (*port_enter)(struct team *team, struct team_port *port); | ||
60 | void (*port_leave)(struct team *team, struct team_port *port); | ||
61 | void (*port_change_mac)(struct team *team, struct team_port *port); | ||
62 | }; | ||
63 | |||
64 | enum team_option_type { | ||
65 | TEAM_OPTION_TYPE_U32, | ||
66 | TEAM_OPTION_TYPE_STRING, | ||
67 | }; | ||
68 | |||
69 | struct team_option { | ||
70 | struct list_head list; | ||
71 | const char *name; | ||
72 | enum team_option_type type; | ||
73 | int (*getter)(struct team *team, void *arg); | ||
74 | int (*setter)(struct team *team, void *arg); | ||
75 | }; | ||
76 | |||
77 | struct team_mode { | ||
78 | struct list_head list; | ||
79 | const char *kind; | ||
80 | struct module *owner; | ||
81 | size_t priv_size; | ||
82 | const struct team_mode_ops *ops; | ||
83 | }; | ||
84 | |||
85 | #define TEAM_PORT_HASHBITS 4 | ||
86 | #define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS) | ||
87 | |||
88 | #define TEAM_MODE_PRIV_LONGS 4 | ||
89 | #define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS) | ||
90 | |||
91 | struct team { | ||
92 | struct net_device *dev; /* associated netdevice */ | ||
93 | struct team_pcpu_stats __percpu *pcpu_stats; | ||
94 | |||
95 | spinlock_t lock; /* used for overall locking, e.g. port lists write */ | ||
96 | |||
97 | /* | ||
98 | * port lists with port count | ||
99 | */ | ||
100 | int port_count; | ||
101 | struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES]; | ||
102 | struct list_head port_list; | ||
103 | |||
104 | struct list_head option_list; | ||
105 | |||
106 | const struct team_mode *mode; | ||
107 | struct team_mode_ops ops; | ||
108 | long mode_priv[TEAM_MODE_PRIV_LONGS]; | ||
109 | }; | ||
110 | |||
111 | static inline struct hlist_head *team_port_index_hash(struct team *team, | ||
112 | int port_index) | ||
113 | { | ||
114 | return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)]; | ||
115 | } | ||
116 | |||
117 | static inline struct team_port *team_get_port_by_index(struct team *team, | ||
118 | int port_index) | ||
119 | { | ||
120 | struct hlist_node *p; | ||
121 | struct team_port *port; | ||
122 | struct hlist_head *head = team_port_index_hash(team, port_index); | ||
123 | |||
124 | hlist_for_each_entry(port, p, head, hlist) | ||
125 | if (port->index == port_index) | ||
126 | return port; | ||
127 | return NULL; | ||
128 | } | ||
129 | static inline struct team_port *team_get_port_by_index_rcu(struct team *team, | ||
130 | int port_index) | ||
131 | { | ||
132 | struct hlist_node *p; | ||
133 | struct team_port *port; | ||
134 | struct hlist_head *head = team_port_index_hash(team, port_index); | ||
135 | |||
136 | hlist_for_each_entry_rcu(port, p, head, hlist) | ||
137 | if (port->index == port_index) | ||
138 | return port; | ||
139 | return NULL; | ||
140 | } | ||
141 | |||
142 | extern int team_port_set_team_mac(struct team_port *port); | ||
143 | extern void team_options_register(struct team *team, | ||
144 | struct team_option *option, | ||
145 | size_t option_count); | ||
146 | extern void team_options_unregister(struct team *team, | ||
147 | struct team_option *option, | ||
148 | size_t option_count); | ||
149 | extern int team_mode_register(struct team_mode *mode); | ||
150 | extern int team_mode_unregister(struct team_mode *mode); | ||
151 | |||
152 | #endif /* __KERNEL__ */ | ||
153 | |||
154 | #define TEAM_STRING_MAX_LEN 32 | ||
155 | |||
156 | /********************************** | ||
157 | * NETLINK_GENERIC netlink family. | ||
158 | **********************************/ | ||
159 | |||
160 | enum { | ||
161 | TEAM_CMD_NOOP, | ||
162 | TEAM_CMD_OPTIONS_SET, | ||
163 | TEAM_CMD_OPTIONS_GET, | ||
164 | TEAM_CMD_PORT_LIST_GET, | ||
165 | |||
166 | __TEAM_CMD_MAX, | ||
167 | TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1), | ||
168 | }; | ||
169 | |||
170 | enum { | ||
171 | TEAM_ATTR_UNSPEC, | ||
172 | TEAM_ATTR_TEAM_IFINDEX, /* u32 */ | ||
173 | TEAM_ATTR_LIST_OPTION, /* nest */ | ||
174 | TEAM_ATTR_LIST_PORT, /* nest */ | ||
175 | |||
176 | __TEAM_ATTR_MAX, | ||
177 | TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1, | ||
178 | }; | ||
179 | |||
180 | /* Nested layout of get/set msg: | ||
181 | * | ||
182 | * [TEAM_ATTR_LIST_OPTION] | ||
183 | * [TEAM_ATTR_ITEM_OPTION] | ||
184 | * [TEAM_ATTR_OPTION_*], ... | ||
185 | * [TEAM_ATTR_ITEM_OPTION] | ||
186 | * [TEAM_ATTR_OPTION_*], ... | ||
187 | * ... | ||
188 | * [TEAM_ATTR_LIST_PORT] | ||
189 | * [TEAM_ATTR_ITEM_PORT] | ||
190 | * [TEAM_ATTR_PORT_*], ... | ||
191 | * [TEAM_ATTR_ITEM_PORT] | ||
192 | * [TEAM_ATTR_PORT_*], ... | ||
193 | * ... | ||
194 | */ | ||
195 | |||
196 | enum { | ||
197 | TEAM_ATTR_ITEM_OPTION_UNSPEC, | ||
198 | TEAM_ATTR_ITEM_OPTION, /* nest */ | ||
199 | |||
200 | __TEAM_ATTR_ITEM_OPTION_MAX, | ||
201 | TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1, | ||
202 | }; | ||
203 | |||
204 | enum { | ||
205 | TEAM_ATTR_OPTION_UNSPEC, | ||
206 | TEAM_ATTR_OPTION_NAME, /* string */ | ||
207 | TEAM_ATTR_OPTION_CHANGED, /* flag */ | ||
208 | TEAM_ATTR_OPTION_TYPE, /* u8 */ | ||
209 | TEAM_ATTR_OPTION_DATA, /* dynamic */ | ||
210 | |||
211 | __TEAM_ATTR_OPTION_MAX, | ||
212 | TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, | ||
213 | }; | ||
214 | |||
215 | enum { | ||
216 | TEAM_ATTR_ITEM_PORT_UNSPEC, | ||
217 | TEAM_ATTR_ITEM_PORT, /* nest */ | ||
218 | |||
219 | __TEAM_ATTR_ITEM_PORT_MAX, | ||
220 | TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1, | ||
221 | }; | ||
222 | |||
223 | enum { | ||
224 | TEAM_ATTR_PORT_UNSPEC, | ||
225 | TEAM_ATTR_PORT_IFINDEX, /* u32 */ | ||
226 | TEAM_ATTR_PORT_CHANGED, /* flag */ | ||
227 | TEAM_ATTR_PORT_LINKUP, /* flag */ | ||
228 | TEAM_ATTR_PORT_SPEED, /* u32 */ | ||
229 | TEAM_ATTR_PORT_DUPLEX, /* u8 */ | ||
230 | |||
231 | __TEAM_ATTR_PORT_MAX, | ||
232 | TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1, | ||
233 | }; | ||
234 | |||
235 | /* | ||
236 | * NETLINK_GENERIC related info | ||
237 | */ | ||
238 | #define TEAM_GENL_NAME "team" | ||
239 | #define TEAM_GENL_VERSION 0x1 | ||
240 | #define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event" | ||
241 | |||
242 | #endif /* _LINUX_IF_TEAM_H_ */ | ||