diff options
Diffstat (limited to 'net/batman-adv/hard-interface.c')
-rw-r--r-- | net/batman-adv/hard-interface.c | 651 |
1 files changed, 651 insertions, 0 deletions
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c new file mode 100644 index 000000000000..4f95777ce080 --- /dev/null +++ b/net/batman-adv/hard-interface.c | |||
@@ -0,0 +1,651 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: | ||
3 | * | ||
4 | * Marek Lindner, Simon Wunderlich | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of version 2 of the GNU General Public | ||
8 | * License as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
18 | * 02110-1301, USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include "main.h" | ||
23 | #include "hard-interface.h" | ||
24 | #include "soft-interface.h" | ||
25 | #include "send.h" | ||
26 | #include "translation-table.h" | ||
27 | #include "routing.h" | ||
28 | #include "bat_sysfs.h" | ||
29 | #include "originator.h" | ||
30 | #include "hash.h" | ||
31 | |||
32 | #include <linux/if_arp.h> | ||
33 | |||
34 | /* protect update critical side of if_list - but not the content */ | ||
35 | static DEFINE_SPINLOCK(if_list_lock); | ||
36 | |||
37 | static void hardif_free_rcu(struct rcu_head *rcu) | ||
38 | { | ||
39 | struct batman_if *batman_if; | ||
40 | |||
41 | batman_if = container_of(rcu, struct batman_if, rcu); | ||
42 | dev_put(batman_if->net_dev); | ||
43 | kref_put(&batman_if->refcount, hardif_free_ref); | ||
44 | } | ||
45 | |||
46 | struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev) | ||
47 | { | ||
48 | struct batman_if *batman_if; | ||
49 | |||
50 | rcu_read_lock(); | ||
51 | list_for_each_entry_rcu(batman_if, &if_list, list) { | ||
52 | if (batman_if->net_dev == net_dev) | ||
53 | goto out; | ||
54 | } | ||
55 | |||
56 | batman_if = NULL; | ||
57 | |||
58 | out: | ||
59 | if (batman_if) | ||
60 | kref_get(&batman_if->refcount); | ||
61 | |||
62 | rcu_read_unlock(); | ||
63 | return batman_if; | ||
64 | } | ||
65 | |||
66 | static int is_valid_iface(struct net_device *net_dev) | ||
67 | { | ||
68 | if (net_dev->flags & IFF_LOOPBACK) | ||
69 | return 0; | ||
70 | |||
71 | if (net_dev->type != ARPHRD_ETHER) | ||
72 | return 0; | ||
73 | |||
74 | if (net_dev->addr_len != ETH_ALEN) | ||
75 | return 0; | ||
76 | |||
77 | /* no batman over batman */ | ||
78 | #ifdef HAVE_NET_DEVICE_OPS | ||
79 | if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) | ||
80 | return 0; | ||
81 | #else | ||
82 | if (net_dev->hard_start_xmit == interface_tx) | ||
83 | return 0; | ||
84 | #endif | ||
85 | |||
86 | /* Device is being bridged */ | ||
87 | /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) | ||
88 | return 0; */ | ||
89 | |||
90 | return 1; | ||
91 | } | ||
92 | |||
93 | static struct batman_if *get_active_batman_if(struct net_device *soft_iface) | ||
94 | { | ||
95 | struct batman_if *batman_if; | ||
96 | |||
97 | rcu_read_lock(); | ||
98 | list_for_each_entry_rcu(batman_if, &if_list, list) { | ||
99 | if (batman_if->soft_iface != soft_iface) | ||
100 | continue; | ||
101 | |||
102 | if (batman_if->if_status == IF_ACTIVE) | ||
103 | goto out; | ||
104 | } | ||
105 | |||
106 | batman_if = NULL; | ||
107 | |||
108 | out: | ||
109 | if (batman_if) | ||
110 | kref_get(&batman_if->refcount); | ||
111 | |||
112 | rcu_read_unlock(); | ||
113 | return batman_if; | ||
114 | } | ||
115 | |||
116 | static void update_primary_addr(struct bat_priv *bat_priv) | ||
117 | { | ||
118 | struct vis_packet *vis_packet; | ||
119 | |||
120 | vis_packet = (struct vis_packet *) | ||
121 | bat_priv->my_vis_info->skb_packet->data; | ||
122 | memcpy(vis_packet->vis_orig, | ||
123 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
124 | memcpy(vis_packet->sender_orig, | ||
125 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
126 | } | ||
127 | |||
128 | static void set_primary_if(struct bat_priv *bat_priv, | ||
129 | struct batman_if *batman_if) | ||
130 | { | ||
131 | struct batman_packet *batman_packet; | ||
132 | struct batman_if *old_if; | ||
133 | |||
134 | if (batman_if) | ||
135 | kref_get(&batman_if->refcount); | ||
136 | |||
137 | old_if = bat_priv->primary_if; | ||
138 | bat_priv->primary_if = batman_if; | ||
139 | |||
140 | if (old_if) | ||
141 | kref_put(&old_if->refcount, hardif_free_ref); | ||
142 | |||
143 | if (!bat_priv->primary_if) | ||
144 | return; | ||
145 | |||
146 | batman_packet = (struct batman_packet *)(batman_if->packet_buff); | ||
147 | batman_packet->flags = PRIMARIES_FIRST_HOP; | ||
148 | batman_packet->ttl = TTL; | ||
149 | |||
150 | update_primary_addr(bat_priv); | ||
151 | |||
152 | /*** | ||
153 | * hacky trick to make sure that we send the HNA information via | ||
154 | * our new primary interface | ||
155 | */ | ||
156 | atomic_set(&bat_priv->hna_local_changed, 1); | ||
157 | } | ||
158 | |||
159 | static bool hardif_is_iface_up(struct batman_if *batman_if) | ||
160 | { | ||
161 | if (batman_if->net_dev->flags & IFF_UP) | ||
162 | return true; | ||
163 | |||
164 | return false; | ||
165 | } | ||
166 | |||
167 | static void update_mac_addresses(struct batman_if *batman_if) | ||
168 | { | ||
169 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, | ||
170 | batman_if->net_dev->dev_addr, ETH_ALEN); | ||
171 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender, | ||
172 | batman_if->net_dev->dev_addr, ETH_ALEN); | ||
173 | } | ||
174 | |||
175 | static void check_known_mac_addr(struct net_device *net_dev) | ||
176 | { | ||
177 | struct batman_if *batman_if; | ||
178 | |||
179 | rcu_read_lock(); | ||
180 | list_for_each_entry_rcu(batman_if, &if_list, list) { | ||
181 | if ((batman_if->if_status != IF_ACTIVE) && | ||
182 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | ||
183 | continue; | ||
184 | |||
185 | if (batman_if->net_dev == net_dev) | ||
186 | continue; | ||
187 | |||
188 | if (!compare_orig(batman_if->net_dev->dev_addr, | ||
189 | net_dev->dev_addr)) | ||
190 | continue; | ||
191 | |||
192 | pr_warning("The newly added mac address (%pM) already exists " | ||
193 | "on: %s\n", net_dev->dev_addr, | ||
194 | batman_if->net_dev->name); | ||
195 | pr_warning("It is strongly recommended to keep mac addresses " | ||
196 | "unique to avoid problems!\n"); | ||
197 | } | ||
198 | rcu_read_unlock(); | ||
199 | } | ||
200 | |||
201 | int hardif_min_mtu(struct net_device *soft_iface) | ||
202 | { | ||
203 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | ||
204 | struct batman_if *batman_if; | ||
205 | /* allow big frames if all devices are capable to do so | ||
206 | * (have MTU > 1500 + BAT_HEADER_LEN) */ | ||
207 | int min_mtu = ETH_DATA_LEN; | ||
208 | |||
209 | if (atomic_read(&bat_priv->fragmentation)) | ||
210 | goto out; | ||
211 | |||
212 | rcu_read_lock(); | ||
213 | list_for_each_entry_rcu(batman_if, &if_list, list) { | ||
214 | if ((batman_if->if_status != IF_ACTIVE) && | ||
215 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | ||
216 | continue; | ||
217 | |||
218 | if (batman_if->soft_iface != soft_iface) | ||
219 | continue; | ||
220 | |||
221 | min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN, | ||
222 | min_mtu); | ||
223 | } | ||
224 | rcu_read_unlock(); | ||
225 | out: | ||
226 | return min_mtu; | ||
227 | } | ||
228 | |||
229 | /* adjusts the MTU if a new interface with a smaller MTU appeared. */ | ||
230 | void update_min_mtu(struct net_device *soft_iface) | ||
231 | { | ||
232 | int min_mtu; | ||
233 | |||
234 | min_mtu = hardif_min_mtu(soft_iface); | ||
235 | if (soft_iface->mtu != min_mtu) | ||
236 | soft_iface->mtu = min_mtu; | ||
237 | } | ||
238 | |||
239 | static void hardif_activate_interface(struct batman_if *batman_if) | ||
240 | { | ||
241 | struct bat_priv *bat_priv; | ||
242 | |||
243 | if (batman_if->if_status != IF_INACTIVE) | ||
244 | return; | ||
245 | |||
246 | bat_priv = netdev_priv(batman_if->soft_iface); | ||
247 | |||
248 | update_mac_addresses(batman_if); | ||
249 | batman_if->if_status = IF_TO_BE_ACTIVATED; | ||
250 | |||
251 | /** | ||
252 | * the first active interface becomes our primary interface or | ||
253 | * the next active interface after the old primay interface was removed | ||
254 | */ | ||
255 | if (!bat_priv->primary_if) | ||
256 | set_primary_if(bat_priv, batman_if); | ||
257 | |||
258 | bat_info(batman_if->soft_iface, "Interface activated: %s\n", | ||
259 | batman_if->net_dev->name); | ||
260 | |||
261 | update_min_mtu(batman_if->soft_iface); | ||
262 | return; | ||
263 | } | ||
264 | |||
265 | static void hardif_deactivate_interface(struct batman_if *batman_if) | ||
266 | { | ||
267 | if ((batman_if->if_status != IF_ACTIVE) && | ||
268 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | ||
269 | return; | ||
270 | |||
271 | batman_if->if_status = IF_INACTIVE; | ||
272 | |||
273 | bat_info(batman_if->soft_iface, "Interface deactivated: %s\n", | ||
274 | batman_if->net_dev->name); | ||
275 | |||
276 | update_min_mtu(batman_if->soft_iface); | ||
277 | } | ||
278 | |||
279 | int hardif_enable_interface(struct batman_if *batman_if, char *iface_name) | ||
280 | { | ||
281 | struct bat_priv *bat_priv; | ||
282 | struct batman_packet *batman_packet; | ||
283 | |||
284 | if (batman_if->if_status != IF_NOT_IN_USE) | ||
285 | goto out; | ||
286 | |||
287 | batman_if->soft_iface = dev_get_by_name(&init_net, iface_name); | ||
288 | |||
289 | if (!batman_if->soft_iface) { | ||
290 | batman_if->soft_iface = softif_create(iface_name); | ||
291 | |||
292 | if (!batman_if->soft_iface) | ||
293 | goto err; | ||
294 | |||
295 | /* dev_get_by_name() increases the reference counter for us */ | ||
296 | dev_hold(batman_if->soft_iface); | ||
297 | } | ||
298 | |||
299 | bat_priv = netdev_priv(batman_if->soft_iface); | ||
300 | batman_if->packet_len = BAT_PACKET_LEN; | ||
301 | batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC); | ||
302 | |||
303 | if (!batman_if->packet_buff) { | ||
304 | bat_err(batman_if->soft_iface, "Can't add interface packet " | ||
305 | "(%s): out of memory\n", batman_if->net_dev->name); | ||
306 | goto err; | ||
307 | } | ||
308 | |||
309 | batman_packet = (struct batman_packet *)(batman_if->packet_buff); | ||
310 | batman_packet->packet_type = BAT_PACKET; | ||
311 | batman_packet->version = COMPAT_VERSION; | ||
312 | batman_packet->flags = 0; | ||
313 | batman_packet->ttl = 2; | ||
314 | batman_packet->tq = TQ_MAX_VALUE; | ||
315 | batman_packet->num_hna = 0; | ||
316 | |||
317 | batman_if->if_num = bat_priv->num_ifaces; | ||
318 | bat_priv->num_ifaces++; | ||
319 | batman_if->if_status = IF_INACTIVE; | ||
320 | orig_hash_add_if(batman_if, bat_priv->num_ifaces); | ||
321 | |||
322 | batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); | ||
323 | batman_if->batman_adv_ptype.func = batman_skb_recv; | ||
324 | batman_if->batman_adv_ptype.dev = batman_if->net_dev; | ||
325 | kref_get(&batman_if->refcount); | ||
326 | dev_add_pack(&batman_if->batman_adv_ptype); | ||
327 | |||
328 | atomic_set(&batman_if->seqno, 1); | ||
329 | atomic_set(&batman_if->frag_seqno, 1); | ||
330 | bat_info(batman_if->soft_iface, "Adding interface: %s\n", | ||
331 | batman_if->net_dev->name); | ||
332 | |||
333 | if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < | ||
334 | ETH_DATA_LEN + BAT_HEADER_LEN) | ||
335 | bat_info(batman_if->soft_iface, | ||
336 | "The MTU of interface %s is too small (%i) to handle " | ||
337 | "the transport of batman-adv packets. Packets going " | ||
338 | "over this interface will be fragmented on layer2 " | ||
339 | "which could impact the performance. Setting the MTU " | ||
340 | "to %zi would solve the problem.\n", | ||
341 | batman_if->net_dev->name, batman_if->net_dev->mtu, | ||
342 | ETH_DATA_LEN + BAT_HEADER_LEN); | ||
343 | |||
344 | if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < | ||
345 | ETH_DATA_LEN + BAT_HEADER_LEN) | ||
346 | bat_info(batman_if->soft_iface, | ||
347 | "The MTU of interface %s is too small (%i) to handle " | ||
348 | "the transport of batman-adv packets. If you experience" | ||
349 | " problems getting traffic through try increasing the " | ||
350 | "MTU to %zi.\n", | ||
351 | batman_if->net_dev->name, batman_if->net_dev->mtu, | ||
352 | ETH_DATA_LEN + BAT_HEADER_LEN); | ||
353 | |||
354 | if (hardif_is_iface_up(batman_if)) | ||
355 | hardif_activate_interface(batman_if); | ||
356 | else | ||
357 | bat_err(batman_if->soft_iface, "Not using interface %s " | ||
358 | "(retrying later): interface not active\n", | ||
359 | batman_if->net_dev->name); | ||
360 | |||
361 | /* begin scheduling originator messages on that interface */ | ||
362 | schedule_own_packet(batman_if); | ||
363 | |||
364 | out: | ||
365 | return 0; | ||
366 | |||
367 | err: | ||
368 | return -ENOMEM; | ||
369 | } | ||
370 | |||
371 | void hardif_disable_interface(struct batman_if *batman_if) | ||
372 | { | ||
373 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | ||
374 | |||
375 | if (batman_if->if_status == IF_ACTIVE) | ||
376 | hardif_deactivate_interface(batman_if); | ||
377 | |||
378 | if (batman_if->if_status != IF_INACTIVE) | ||
379 | return; | ||
380 | |||
381 | bat_info(batman_if->soft_iface, "Removing interface: %s\n", | ||
382 | batman_if->net_dev->name); | ||
383 | dev_remove_pack(&batman_if->batman_adv_ptype); | ||
384 | kref_put(&batman_if->refcount, hardif_free_ref); | ||
385 | |||
386 | bat_priv->num_ifaces--; | ||
387 | orig_hash_del_if(batman_if, bat_priv->num_ifaces); | ||
388 | |||
389 | if (batman_if == bat_priv->primary_if) { | ||
390 | struct batman_if *new_if; | ||
391 | |||
392 | new_if = get_active_batman_if(batman_if->soft_iface); | ||
393 | set_primary_if(bat_priv, new_if); | ||
394 | |||
395 | if (new_if) | ||
396 | kref_put(&new_if->refcount, hardif_free_ref); | ||
397 | } | ||
398 | |||
399 | kfree(batman_if->packet_buff); | ||
400 | batman_if->packet_buff = NULL; | ||
401 | batman_if->if_status = IF_NOT_IN_USE; | ||
402 | |||
403 | /* delete all references to this batman_if */ | ||
404 | purge_orig_ref(bat_priv); | ||
405 | purge_outstanding_packets(bat_priv, batman_if); | ||
406 | dev_put(batman_if->soft_iface); | ||
407 | |||
408 | /* nobody uses this interface anymore */ | ||
409 | if (!bat_priv->num_ifaces) | ||
410 | softif_destroy(batman_if->soft_iface); | ||
411 | |||
412 | batman_if->soft_iface = NULL; | ||
413 | } | ||
414 | |||
415 | static struct batman_if *hardif_add_interface(struct net_device *net_dev) | ||
416 | { | ||
417 | struct batman_if *batman_if; | ||
418 | int ret; | ||
419 | |||
420 | ret = is_valid_iface(net_dev); | ||
421 | if (ret != 1) | ||
422 | goto out; | ||
423 | |||
424 | dev_hold(net_dev); | ||
425 | |||
426 | batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); | ||
427 | if (!batman_if) { | ||
428 | pr_err("Can't add interface (%s): out of memory\n", | ||
429 | net_dev->name); | ||
430 | goto release_dev; | ||
431 | } | ||
432 | |||
433 | ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev); | ||
434 | if (ret) | ||
435 | goto free_if; | ||
436 | |||
437 | batman_if->if_num = -1; | ||
438 | batman_if->net_dev = net_dev; | ||
439 | batman_if->soft_iface = NULL; | ||
440 | batman_if->if_status = IF_NOT_IN_USE; | ||
441 | INIT_LIST_HEAD(&batman_if->list); | ||
442 | kref_init(&batman_if->refcount); | ||
443 | |||
444 | check_known_mac_addr(batman_if->net_dev); | ||
445 | |||
446 | spin_lock(&if_list_lock); | ||
447 | list_add_tail_rcu(&batman_if->list, &if_list); | ||
448 | spin_unlock(&if_list_lock); | ||
449 | |||
450 | /* extra reference for return */ | ||
451 | kref_get(&batman_if->refcount); | ||
452 | return batman_if; | ||
453 | |||
454 | free_if: | ||
455 | kfree(batman_if); | ||
456 | release_dev: | ||
457 | dev_put(net_dev); | ||
458 | out: | ||
459 | return NULL; | ||
460 | } | ||
461 | |||
462 | static void hardif_remove_interface(struct batman_if *batman_if) | ||
463 | { | ||
464 | /* first deactivate interface */ | ||
465 | if (batman_if->if_status != IF_NOT_IN_USE) | ||
466 | hardif_disable_interface(batman_if); | ||
467 | |||
468 | if (batman_if->if_status != IF_NOT_IN_USE) | ||
469 | return; | ||
470 | |||
471 | batman_if->if_status = IF_TO_BE_REMOVED; | ||
472 | sysfs_del_hardif(&batman_if->hardif_obj); | ||
473 | call_rcu(&batman_if->rcu, hardif_free_rcu); | ||
474 | } | ||
475 | |||
476 | void hardif_remove_interfaces(void) | ||
477 | { | ||
478 | struct batman_if *batman_if, *batman_if_tmp; | ||
479 | struct list_head if_queue; | ||
480 | |||
481 | INIT_LIST_HEAD(&if_queue); | ||
482 | |||
483 | spin_lock(&if_list_lock); | ||
484 | list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) { | ||
485 | list_del_rcu(&batman_if->list); | ||
486 | list_add_tail(&batman_if->list, &if_queue); | ||
487 | } | ||
488 | spin_unlock(&if_list_lock); | ||
489 | |||
490 | rtnl_lock(); | ||
491 | list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) { | ||
492 | hardif_remove_interface(batman_if); | ||
493 | } | ||
494 | rtnl_unlock(); | ||
495 | } | ||
496 | |||
497 | static int hard_if_event(struct notifier_block *this, | ||
498 | unsigned long event, void *ptr) | ||
499 | { | ||
500 | struct net_device *net_dev = (struct net_device *)ptr; | ||
501 | struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); | ||
502 | struct bat_priv *bat_priv; | ||
503 | |||
504 | if (!batman_if && event == NETDEV_REGISTER) | ||
505 | batman_if = hardif_add_interface(net_dev); | ||
506 | |||
507 | if (!batman_if) | ||
508 | goto out; | ||
509 | |||
510 | switch (event) { | ||
511 | case NETDEV_UP: | ||
512 | hardif_activate_interface(batman_if); | ||
513 | break; | ||
514 | case NETDEV_GOING_DOWN: | ||
515 | case NETDEV_DOWN: | ||
516 | hardif_deactivate_interface(batman_if); | ||
517 | break; | ||
518 | case NETDEV_UNREGISTER: | ||
519 | spin_lock(&if_list_lock); | ||
520 | list_del_rcu(&batman_if->list); | ||
521 | spin_unlock(&if_list_lock); | ||
522 | |||
523 | hardif_remove_interface(batman_if); | ||
524 | break; | ||
525 | case NETDEV_CHANGEMTU: | ||
526 | if (batman_if->soft_iface) | ||
527 | update_min_mtu(batman_if->soft_iface); | ||
528 | break; | ||
529 | case NETDEV_CHANGEADDR: | ||
530 | if (batman_if->if_status == IF_NOT_IN_USE) | ||
531 | goto hardif_put; | ||
532 | |||
533 | check_known_mac_addr(batman_if->net_dev); | ||
534 | update_mac_addresses(batman_if); | ||
535 | |||
536 | bat_priv = netdev_priv(batman_if->soft_iface); | ||
537 | if (batman_if == bat_priv->primary_if) | ||
538 | update_primary_addr(bat_priv); | ||
539 | break; | ||
540 | default: | ||
541 | break; | ||
542 | }; | ||
543 | |||
544 | hardif_put: | ||
545 | kref_put(&batman_if->refcount, hardif_free_ref); | ||
546 | out: | ||
547 | return NOTIFY_DONE; | ||
548 | } | ||
549 | |||
550 | /* receive a packet with the batman ethertype coming on a hard | ||
551 | * interface */ | ||
552 | int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | ||
553 | struct packet_type *ptype, struct net_device *orig_dev) | ||
554 | { | ||
555 | struct bat_priv *bat_priv; | ||
556 | struct batman_packet *batman_packet; | ||
557 | struct batman_if *batman_if; | ||
558 | int ret; | ||
559 | |||
560 | batman_if = container_of(ptype, struct batman_if, batman_adv_ptype); | ||
561 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
562 | |||
563 | /* skb was released by skb_share_check() */ | ||
564 | if (!skb) | ||
565 | goto err_out; | ||
566 | |||
567 | /* packet should hold at least type and version */ | ||
568 | if (unlikely(!pskb_may_pull(skb, 2))) | ||
569 | goto err_free; | ||
570 | |||
571 | /* expect a valid ethernet header here. */ | ||
572 | if (unlikely(skb->mac_len != sizeof(struct ethhdr) | ||
573 | || !skb_mac_header(skb))) | ||
574 | goto err_free; | ||
575 | |||
576 | if (!batman_if->soft_iface) | ||
577 | goto err_free; | ||
578 | |||
579 | bat_priv = netdev_priv(batman_if->soft_iface); | ||
580 | |||
581 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) | ||
582 | goto err_free; | ||
583 | |||
584 | /* discard frames on not active interfaces */ | ||
585 | if (batman_if->if_status != IF_ACTIVE) | ||
586 | goto err_free; | ||
587 | |||
588 | batman_packet = (struct batman_packet *)skb->data; | ||
589 | |||
590 | if (batman_packet->version != COMPAT_VERSION) { | ||
591 | bat_dbg(DBG_BATMAN, bat_priv, | ||
592 | "Drop packet: incompatible batman version (%i)\n", | ||
593 | batman_packet->version); | ||
594 | goto err_free; | ||
595 | } | ||
596 | |||
597 | /* all receive handlers return whether they received or reused | ||
598 | * the supplied skb. if not, we have to free the skb. */ | ||
599 | |||
600 | switch (batman_packet->packet_type) { | ||
601 | /* batman originator packet */ | ||
602 | case BAT_PACKET: | ||
603 | ret = recv_bat_packet(skb, batman_if); | ||
604 | break; | ||
605 | |||
606 | /* batman icmp packet */ | ||
607 | case BAT_ICMP: | ||
608 | ret = recv_icmp_packet(skb, batman_if); | ||
609 | break; | ||
610 | |||
611 | /* unicast packet */ | ||
612 | case BAT_UNICAST: | ||
613 | ret = recv_unicast_packet(skb, batman_if); | ||
614 | break; | ||
615 | |||
616 | /* fragmented unicast packet */ | ||
617 | case BAT_UNICAST_FRAG: | ||
618 | ret = recv_ucast_frag_packet(skb, batman_if); | ||
619 | break; | ||
620 | |||
621 | /* broadcast packet */ | ||
622 | case BAT_BCAST: | ||
623 | ret = recv_bcast_packet(skb, batman_if); | ||
624 | break; | ||
625 | |||
626 | /* vis packet */ | ||
627 | case BAT_VIS: | ||
628 | ret = recv_vis_packet(skb, batman_if); | ||
629 | break; | ||
630 | default: | ||
631 | ret = NET_RX_DROP; | ||
632 | } | ||
633 | |||
634 | if (ret == NET_RX_DROP) | ||
635 | kfree_skb(skb); | ||
636 | |||
637 | /* return NET_RX_SUCCESS in any case as we | ||
638 | * most probably dropped the packet for | ||
639 | * routing-logical reasons. */ | ||
640 | |||
641 | return NET_RX_SUCCESS; | ||
642 | |||
643 | err_free: | ||
644 | kfree_skb(skb); | ||
645 | err_out: | ||
646 | return NET_RX_DROP; | ||
647 | } | ||
648 | |||
649 | struct notifier_block hard_if_notifier = { | ||
650 | .notifier_call = hard_if_event, | ||
651 | }; | ||