diff options
Diffstat (limited to 'net/batman-adv/send.c')
-rw-r--r-- | net/batman-adv/send.c | 110 |
1 files changed, 55 insertions, 55 deletions
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index b89b9f7709ae..d49e54d932af 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors: | 2 | * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: |
3 | * | 3 | * |
4 | * Marek Lindner, Simon Wunderlich | 4 | * Marek Lindner, Simon Wunderlich |
5 | * | 5 | * |
@@ -25,7 +25,6 @@ | |||
25 | #include "translation-table.h" | 25 | #include "translation-table.h" |
26 | #include "soft-interface.h" | 26 | #include "soft-interface.h" |
27 | #include "hard-interface.h" | 27 | #include "hard-interface.h" |
28 | #include "types.h" | ||
29 | #include "vis.h" | 28 | #include "vis.h" |
30 | #include "aggregation.h" | 29 | #include "aggregation.h" |
31 | #include "gateway_common.h" | 30 | #include "gateway_common.h" |
@@ -49,7 +48,7 @@ static unsigned long own_send_time(struct bat_priv *bat_priv) | |||
49 | } | 48 | } |
50 | 49 | ||
51 | /* when do we schedule a forwarded packet to be sent */ | 50 | /* when do we schedule a forwarded packet to be sent */ |
52 | static unsigned long forward_send_time(struct bat_priv *bat_priv) | 51 | static unsigned long forward_send_time(void) |
53 | { | 52 | { |
54 | return jiffies + msecs_to_jiffies(random32() % (JITTER/2)); | 53 | return jiffies + msecs_to_jiffies(random32() % (JITTER/2)); |
55 | } | 54 | } |
@@ -57,20 +56,20 @@ static unsigned long forward_send_time(struct bat_priv *bat_priv) | |||
57 | /* send out an already prepared packet to the given address via the | 56 | /* send out an already prepared packet to the given address via the |
58 | * specified batman interface */ | 57 | * specified batman interface */ |
59 | int send_skb_packet(struct sk_buff *skb, | 58 | int send_skb_packet(struct sk_buff *skb, |
60 | struct batman_if *batman_if, | 59 | struct hard_iface *hard_iface, |
61 | uint8_t *dst_addr) | 60 | uint8_t *dst_addr) |
62 | { | 61 | { |
63 | struct ethhdr *ethhdr; | 62 | struct ethhdr *ethhdr; |
64 | 63 | ||
65 | if (batman_if->if_status != IF_ACTIVE) | 64 | if (hard_iface->if_status != IF_ACTIVE) |
66 | goto send_skb_err; | 65 | goto send_skb_err; |
67 | 66 | ||
68 | if (unlikely(!batman_if->net_dev)) | 67 | if (unlikely(!hard_iface->net_dev)) |
69 | goto send_skb_err; | 68 | goto send_skb_err; |
70 | 69 | ||
71 | if (!(batman_if->net_dev->flags & IFF_UP)) { | 70 | if (!(hard_iface->net_dev->flags & IFF_UP)) { |
72 | pr_warning("Interface %s is not up - can't send packet via " | 71 | pr_warning("Interface %s is not up - can't send packet via " |
73 | "that interface!\n", batman_if->net_dev->name); | 72 | "that interface!\n", hard_iface->net_dev->name); |
74 | goto send_skb_err; | 73 | goto send_skb_err; |
75 | } | 74 | } |
76 | 75 | ||
@@ -81,7 +80,7 @@ int send_skb_packet(struct sk_buff *skb, | |||
81 | skb_reset_mac_header(skb); | 80 | skb_reset_mac_header(skb); |
82 | 81 | ||
83 | ethhdr = (struct ethhdr *) skb_mac_header(skb); | 82 | ethhdr = (struct ethhdr *) skb_mac_header(skb); |
84 | memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); | 83 | memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); |
85 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); | 84 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); |
86 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); | 85 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); |
87 | 86 | ||
@@ -89,7 +88,7 @@ int send_skb_packet(struct sk_buff *skb, | |||
89 | skb->priority = TC_PRIO_CONTROL; | 88 | skb->priority = TC_PRIO_CONTROL; |
90 | skb->protocol = __constant_htons(ETH_P_BATMAN); | 89 | skb->protocol = __constant_htons(ETH_P_BATMAN); |
91 | 90 | ||
92 | skb->dev = batman_if->net_dev; | 91 | skb->dev = hard_iface->net_dev; |
93 | 92 | ||
94 | /* dev_queue_xmit() returns a negative result on error. However on | 93 | /* dev_queue_xmit() returns a negative result on error. However on |
95 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP | 94 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP |
@@ -103,16 +102,16 @@ send_skb_err: | |||
103 | 102 | ||
104 | /* Send a packet to a given interface */ | 103 | /* Send a packet to a given interface */ |
105 | static void send_packet_to_if(struct forw_packet *forw_packet, | 104 | static void send_packet_to_if(struct forw_packet *forw_packet, |
106 | struct batman_if *batman_if) | 105 | struct hard_iface *hard_iface) |
107 | { | 106 | { |
108 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 107 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
109 | char *fwd_str; | 108 | char *fwd_str; |
110 | uint8_t packet_num; | 109 | uint8_t packet_num; |
111 | int16_t buff_pos; | 110 | int16_t buff_pos; |
112 | struct batman_packet *batman_packet; | 111 | struct batman_packet *batman_packet; |
113 | struct sk_buff *skb; | 112 | struct sk_buff *skb; |
114 | 113 | ||
115 | if (batman_if->if_status != IF_ACTIVE) | 114 | if (hard_iface->if_status != IF_ACTIVE) |
116 | return; | 115 | return; |
117 | 116 | ||
118 | packet_num = 0; | 117 | packet_num = 0; |
@@ -127,7 +126,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
127 | /* we might have aggregated direct link packets with an | 126 | /* we might have aggregated direct link packets with an |
128 | * ordinary base packet */ | 127 | * ordinary base packet */ |
129 | if ((forw_packet->direct_link_flags & (1 << packet_num)) && | 128 | if ((forw_packet->direct_link_flags & (1 << packet_num)) && |
130 | (forw_packet->if_incoming == batman_if)) | 129 | (forw_packet->if_incoming == hard_iface)) |
131 | batman_packet->flags |= DIRECTLINK; | 130 | batman_packet->flags |= DIRECTLINK; |
132 | else | 131 | else |
133 | batman_packet->flags &= ~DIRECTLINK; | 132 | batman_packet->flags &= ~DIRECTLINK; |
@@ -143,7 +142,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
143 | batman_packet->tq, batman_packet->ttl, | 142 | batman_packet->tq, batman_packet->ttl, |
144 | (batman_packet->flags & DIRECTLINK ? | 143 | (batman_packet->flags & DIRECTLINK ? |
145 | "on" : "off"), | 144 | "on" : "off"), |
146 | batman_if->net_dev->name, batman_if->net_dev->dev_addr); | 145 | hard_iface->net_dev->name, |
146 | hard_iface->net_dev->dev_addr); | ||
147 | 147 | ||
148 | buff_pos += sizeof(struct batman_packet) + | 148 | buff_pos += sizeof(struct batman_packet) + |
149 | (batman_packet->num_hna * ETH_ALEN); | 149 | (batman_packet->num_hna * ETH_ALEN); |
@@ -155,13 +155,13 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
155 | /* create clone because function is called more than once */ | 155 | /* create clone because function is called more than once */ |
156 | skb = skb_clone(forw_packet->skb, GFP_ATOMIC); | 156 | skb = skb_clone(forw_packet->skb, GFP_ATOMIC); |
157 | if (skb) | 157 | if (skb) |
158 | send_skb_packet(skb, batman_if, broadcast_addr); | 158 | send_skb_packet(skb, hard_iface, broadcast_addr); |
159 | } | 159 | } |
160 | 160 | ||
161 | /* send a batman packet */ | 161 | /* send a batman packet */ |
162 | static void send_packet(struct forw_packet *forw_packet) | 162 | static void send_packet(struct forw_packet *forw_packet) |
163 | { | 163 | { |
164 | struct batman_if *batman_if; | 164 | struct hard_iface *hard_iface; |
165 | struct net_device *soft_iface; | 165 | struct net_device *soft_iface; |
166 | struct bat_priv *bat_priv; | 166 | struct bat_priv *bat_priv; |
167 | struct batman_packet *batman_packet = | 167 | struct batman_packet *batman_packet = |
@@ -205,17 +205,17 @@ static void send_packet(struct forw_packet *forw_packet) | |||
205 | 205 | ||
206 | /* broadcast on every interface */ | 206 | /* broadcast on every interface */ |
207 | rcu_read_lock(); | 207 | rcu_read_lock(); |
208 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 208 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
209 | if (batman_if->soft_iface != soft_iface) | 209 | if (hard_iface->soft_iface != soft_iface) |
210 | continue; | 210 | continue; |
211 | 211 | ||
212 | send_packet_to_if(forw_packet, batman_if); | 212 | send_packet_to_if(forw_packet, hard_iface); |
213 | } | 213 | } |
214 | rcu_read_unlock(); | 214 | rcu_read_unlock(); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void rebuild_batman_packet(struct bat_priv *bat_priv, | 217 | static void rebuild_batman_packet(struct bat_priv *bat_priv, |
218 | struct batman_if *batman_if) | 218 | struct hard_iface *hard_iface) |
219 | { | 219 | { |
220 | int new_len; | 220 | int new_len; |
221 | unsigned char *new_buff; | 221 | unsigned char *new_buff; |
@@ -227,7 +227,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
227 | 227 | ||
228 | /* keep old buffer if kmalloc should fail */ | 228 | /* keep old buffer if kmalloc should fail */ |
229 | if (new_buff) { | 229 | if (new_buff) { |
230 | memcpy(new_buff, batman_if->packet_buff, | 230 | memcpy(new_buff, hard_iface->packet_buff, |
231 | sizeof(struct batman_packet)); | 231 | sizeof(struct batman_packet)); |
232 | batman_packet = (struct batman_packet *)new_buff; | 232 | batman_packet = (struct batman_packet *)new_buff; |
233 | 233 | ||
@@ -235,21 +235,21 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
235 | new_buff + sizeof(struct batman_packet), | 235 | new_buff + sizeof(struct batman_packet), |
236 | new_len - sizeof(struct batman_packet)); | 236 | new_len - sizeof(struct batman_packet)); |
237 | 237 | ||
238 | kfree(batman_if->packet_buff); | 238 | kfree(hard_iface->packet_buff); |
239 | batman_if->packet_buff = new_buff; | 239 | hard_iface->packet_buff = new_buff; |
240 | batman_if->packet_len = new_len; | 240 | hard_iface->packet_len = new_len; |
241 | } | 241 | } |
242 | } | 242 | } |
243 | 243 | ||
244 | void schedule_own_packet(struct batman_if *batman_if) | 244 | void schedule_own_packet(struct hard_iface *hard_iface) |
245 | { | 245 | { |
246 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 246 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
247 | unsigned long send_time; | 247 | unsigned long send_time; |
248 | struct batman_packet *batman_packet; | 248 | struct batman_packet *batman_packet; |
249 | int vis_server; | 249 | int vis_server; |
250 | 250 | ||
251 | if ((batman_if->if_status == IF_NOT_IN_USE) || | 251 | if ((hard_iface->if_status == IF_NOT_IN_USE) || |
252 | (batman_if->if_status == IF_TO_BE_REMOVED)) | 252 | (hard_iface->if_status == IF_TO_BE_REMOVED)) |
253 | return; | 253 | return; |
254 | 254 | ||
255 | vis_server = atomic_read(&bat_priv->vis_mode); | 255 | vis_server = atomic_read(&bat_priv->vis_mode); |
@@ -261,51 +261,51 @@ void schedule_own_packet(struct batman_if *batman_if) | |||
261 | * outdated packets (especially uninitialized mac addresses) in the | 261 | * outdated packets (especially uninitialized mac addresses) in the |
262 | * packet queue | 262 | * packet queue |
263 | */ | 263 | */ |
264 | if (batman_if->if_status == IF_TO_BE_ACTIVATED) | 264 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) |
265 | batman_if->if_status = IF_ACTIVE; | 265 | hard_iface->if_status = IF_ACTIVE; |
266 | 266 | ||
267 | /* if local hna has changed and interface is a primary interface */ | 267 | /* if local hna has changed and interface is a primary interface */ |
268 | if ((atomic_read(&bat_priv->hna_local_changed)) && | 268 | if ((atomic_read(&bat_priv->hna_local_changed)) && |
269 | (batman_if == bat_priv->primary_if)) | 269 | (hard_iface == bat_priv->primary_if)) |
270 | rebuild_batman_packet(bat_priv, batman_if); | 270 | rebuild_batman_packet(bat_priv, hard_iface); |
271 | 271 | ||
272 | /** | 272 | /** |
273 | * NOTE: packet_buff might just have been re-allocated in | 273 | * NOTE: packet_buff might just have been re-allocated in |
274 | * rebuild_batman_packet() | 274 | * rebuild_batman_packet() |
275 | */ | 275 | */ |
276 | batman_packet = (struct batman_packet *)batman_if->packet_buff; | 276 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; |
277 | 277 | ||
278 | /* change sequence number to network order */ | 278 | /* change sequence number to network order */ |
279 | batman_packet->seqno = | 279 | batman_packet->seqno = |
280 | htonl((uint32_t)atomic_read(&batman_if->seqno)); | 280 | htonl((uint32_t)atomic_read(&hard_iface->seqno)); |
281 | 281 | ||
282 | if (vis_server == VIS_TYPE_SERVER_SYNC) | 282 | if (vis_server == VIS_TYPE_SERVER_SYNC) |
283 | batman_packet->flags |= VIS_SERVER; | 283 | batman_packet->flags |= VIS_SERVER; |
284 | else | 284 | else |
285 | batman_packet->flags &= ~VIS_SERVER; | 285 | batman_packet->flags &= ~VIS_SERVER; |
286 | 286 | ||
287 | if ((batman_if == bat_priv->primary_if) && | 287 | if ((hard_iface == bat_priv->primary_if) && |
288 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) | 288 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) |
289 | batman_packet->gw_flags = | 289 | batman_packet->gw_flags = |
290 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); | 290 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); |
291 | else | 291 | else |
292 | batman_packet->gw_flags = 0; | 292 | batman_packet->gw_flags = 0; |
293 | 293 | ||
294 | atomic_inc(&batman_if->seqno); | 294 | atomic_inc(&hard_iface->seqno); |
295 | 295 | ||
296 | slide_own_bcast_window(batman_if); | 296 | slide_own_bcast_window(hard_iface); |
297 | send_time = own_send_time(bat_priv); | 297 | send_time = own_send_time(bat_priv); |
298 | add_bat_packet_to_list(bat_priv, | 298 | add_bat_packet_to_list(bat_priv, |
299 | batman_if->packet_buff, | 299 | hard_iface->packet_buff, |
300 | batman_if->packet_len, | 300 | hard_iface->packet_len, |
301 | batman_if, 1, send_time); | 301 | hard_iface, 1, send_time); |
302 | } | 302 | } |
303 | 303 | ||
304 | void schedule_forward_packet(struct orig_node *orig_node, | 304 | void schedule_forward_packet(struct orig_node *orig_node, |
305 | struct ethhdr *ethhdr, | 305 | struct ethhdr *ethhdr, |
306 | struct batman_packet *batman_packet, | 306 | struct batman_packet *batman_packet, |
307 | uint8_t directlink, int hna_buff_len, | 307 | uint8_t directlink, int hna_buff_len, |
308 | struct batman_if *if_incoming) | 308 | struct hard_iface *if_incoming) |
309 | { | 309 | { |
310 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 310 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
311 | unsigned char in_tq, in_ttl, tq_avg = 0; | 311 | unsigned char in_tq, in_ttl, tq_avg = 0; |
@@ -327,7 +327,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
327 | if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { | 327 | if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { |
328 | 328 | ||
329 | /* rebroadcast ogm of best ranking neighbor as is */ | 329 | /* rebroadcast ogm of best ranking neighbor as is */ |
330 | if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) { | 330 | if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) { |
331 | batman_packet->tq = orig_node->router->tq_avg; | 331 | batman_packet->tq = orig_node->router->tq_avg; |
332 | 332 | ||
333 | if (orig_node->router->last_ttl) | 333 | if (orig_node->router->last_ttl) |
@@ -356,7 +356,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
356 | else | 356 | else |
357 | batman_packet->flags &= ~DIRECTLINK; | 357 | batman_packet->flags &= ~DIRECTLINK; |
358 | 358 | ||
359 | send_time = forward_send_time(bat_priv); | 359 | send_time = forward_send_time(); |
360 | add_bat_packet_to_list(bat_priv, | 360 | add_bat_packet_to_list(bat_priv, |
361 | (unsigned char *)batman_packet, | 361 | (unsigned char *)batman_packet, |
362 | sizeof(struct batman_packet) + hna_buff_len, | 362 | sizeof(struct batman_packet) + hna_buff_len, |
@@ -444,7 +444,7 @@ out: | |||
444 | 444 | ||
445 | static void send_outstanding_bcast_packet(struct work_struct *work) | 445 | static void send_outstanding_bcast_packet(struct work_struct *work) |
446 | { | 446 | { |
447 | struct batman_if *batman_if; | 447 | struct hard_iface *hard_iface; |
448 | struct delayed_work *delayed_work = | 448 | struct delayed_work *delayed_work = |
449 | container_of(work, struct delayed_work, work); | 449 | container_of(work, struct delayed_work, work); |
450 | struct forw_packet *forw_packet = | 450 | struct forw_packet *forw_packet = |
@@ -462,14 +462,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work) | |||
462 | 462 | ||
463 | /* rebroadcast packet */ | 463 | /* rebroadcast packet */ |
464 | rcu_read_lock(); | 464 | rcu_read_lock(); |
465 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 465 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
466 | if (batman_if->soft_iface != soft_iface) | 466 | if (hard_iface->soft_iface != soft_iface) |
467 | continue; | 467 | continue; |
468 | 468 | ||
469 | /* send a copy of the saved skb */ | 469 | /* send a copy of the saved skb */ |
470 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); | 470 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); |
471 | if (skb1) | 471 | if (skb1) |
472 | send_skb_packet(skb1, batman_if, broadcast_addr); | 472 | send_skb_packet(skb1, hard_iface, broadcast_addr); |
473 | } | 473 | } |
474 | rcu_read_unlock(); | 474 | rcu_read_unlock(); |
475 | 475 | ||
@@ -522,15 +522,15 @@ out: | |||
522 | } | 522 | } |
523 | 523 | ||
524 | void purge_outstanding_packets(struct bat_priv *bat_priv, | 524 | void purge_outstanding_packets(struct bat_priv *bat_priv, |
525 | struct batman_if *batman_if) | 525 | struct hard_iface *hard_iface) |
526 | { | 526 | { |
527 | struct forw_packet *forw_packet; | 527 | struct forw_packet *forw_packet; |
528 | struct hlist_node *tmp_node, *safe_tmp_node; | 528 | struct hlist_node *tmp_node, *safe_tmp_node; |
529 | 529 | ||
530 | if (batman_if) | 530 | if (hard_iface) |
531 | bat_dbg(DBG_BATMAN, bat_priv, | 531 | bat_dbg(DBG_BATMAN, bat_priv, |
532 | "purge_outstanding_packets(): %s\n", | 532 | "purge_outstanding_packets(): %s\n", |
533 | batman_if->net_dev->name); | 533 | hard_iface->net_dev->name); |
534 | else | 534 | else |
535 | bat_dbg(DBG_BATMAN, bat_priv, | 535 | bat_dbg(DBG_BATMAN, bat_priv, |
536 | "purge_outstanding_packets()\n"); | 536 | "purge_outstanding_packets()\n"); |
@@ -544,8 +544,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
544 | * if purge_outstanding_packets() was called with an argmument | 544 | * if purge_outstanding_packets() was called with an argmument |
545 | * we delete only packets belonging to the given interface | 545 | * we delete only packets belonging to the given interface |
546 | */ | 546 | */ |
547 | if ((batman_if) && | 547 | if ((hard_iface) && |
548 | (forw_packet->if_incoming != batman_if)) | 548 | (forw_packet->if_incoming != hard_iface)) |
549 | continue; | 549 | continue; |
550 | 550 | ||
551 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 551 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
@@ -568,8 +568,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
568 | * if purge_outstanding_packets() was called with an argmument | 568 | * if purge_outstanding_packets() was called with an argmument |
569 | * we delete only packets belonging to the given interface | 569 | * we delete only packets belonging to the given interface |
570 | */ | 570 | */ |
571 | if ((batman_if) && | 571 | if ((hard_iface) && |
572 | (forw_packet->if_incoming != batman_if)) | 572 | (forw_packet->if_incoming != hard_iface)) |
573 | continue; | 573 | continue; |
574 | 574 | ||
575 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 575 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |