diff options
author | Marek Lindner <lindner_marek@yahoo.de> | 2011-02-18 07:33:20 -0500 |
---|---|---|
committer | Marek Lindner <lindner_marek@yahoo.de> | 2011-03-05 06:52:06 -0500 |
commit | e6c10f433af9c98994c94a10ae862c152fcfb2a9 (patch) | |
tree | 56b4a82b83da44f7c3657a283c92c5cc8e248b9f /net/batman-adv/send.c | |
parent | 4389e47af856635eb17d03b2572a50576c12db24 (diff) |
batman-adv: rename batman_if struct to hard_iface
Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Diffstat (limited to 'net/batman-adv/send.c')
-rw-r--r-- | net/batman-adv/send.c | 101 |
1 files changed, 51 insertions, 50 deletions
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index c4f3e4988b63..d49e54d932af 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -56,20 +56,20 @@ static unsigned long forward_send_time(void) | |||
56 | /* send out an already prepared packet to the given address via the | 56 | /* send out an already prepared packet to the given address via the |
57 | * specified batman interface */ | 57 | * specified batman interface */ |
58 | int send_skb_packet(struct sk_buff *skb, | 58 | int send_skb_packet(struct sk_buff *skb, |
59 | struct batman_if *batman_if, | 59 | struct hard_iface *hard_iface, |
60 | uint8_t *dst_addr) | 60 | uint8_t *dst_addr) |
61 | { | 61 | { |
62 | struct ethhdr *ethhdr; | 62 | struct ethhdr *ethhdr; |
63 | 63 | ||
64 | if (batman_if->if_status != IF_ACTIVE) | 64 | if (hard_iface->if_status != IF_ACTIVE) |
65 | goto send_skb_err; | 65 | goto send_skb_err; |
66 | 66 | ||
67 | if (unlikely(!batman_if->net_dev)) | 67 | if (unlikely(!hard_iface->net_dev)) |
68 | goto send_skb_err; | 68 | goto send_skb_err; |
69 | 69 | ||
70 | if (!(batman_if->net_dev->flags & IFF_UP)) { | 70 | if (!(hard_iface->net_dev->flags & IFF_UP)) { |
71 | pr_warning("Interface %s is not up - can't send packet via " | 71 | pr_warning("Interface %s is not up - can't send packet via " |
72 | "that interface!\n", batman_if->net_dev->name); | 72 | "that interface!\n", hard_iface->net_dev->name); |
73 | goto send_skb_err; | 73 | goto send_skb_err; |
74 | } | 74 | } |
75 | 75 | ||
@@ -80,7 +80,7 @@ int send_skb_packet(struct sk_buff *skb, | |||
80 | skb_reset_mac_header(skb); | 80 | skb_reset_mac_header(skb); |
81 | 81 | ||
82 | ethhdr = (struct ethhdr *) skb_mac_header(skb); | 82 | ethhdr = (struct ethhdr *) skb_mac_header(skb); |
83 | memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); | 83 | memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); |
84 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); | 84 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); |
85 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); | 85 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); |
86 | 86 | ||
@@ -88,7 +88,7 @@ int send_skb_packet(struct sk_buff *skb, | |||
88 | skb->priority = TC_PRIO_CONTROL; | 88 | skb->priority = TC_PRIO_CONTROL; |
89 | skb->protocol = __constant_htons(ETH_P_BATMAN); | 89 | skb->protocol = __constant_htons(ETH_P_BATMAN); |
90 | 90 | ||
91 | skb->dev = batman_if->net_dev; | 91 | skb->dev = hard_iface->net_dev; |
92 | 92 | ||
93 | /* dev_queue_xmit() returns a negative result on error. However on | 93 | /* dev_queue_xmit() returns a negative result on error. However on |
94 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP | 94 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP |
@@ -102,16 +102,16 @@ send_skb_err: | |||
102 | 102 | ||
103 | /* Send a packet to a given interface */ | 103 | /* Send a packet to a given interface */ |
104 | static void send_packet_to_if(struct forw_packet *forw_packet, | 104 | static void send_packet_to_if(struct forw_packet *forw_packet, |
105 | struct batman_if *batman_if) | 105 | struct hard_iface *hard_iface) |
106 | { | 106 | { |
107 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 107 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
108 | char *fwd_str; | 108 | char *fwd_str; |
109 | uint8_t packet_num; | 109 | uint8_t packet_num; |
110 | int16_t buff_pos; | 110 | int16_t buff_pos; |
111 | struct batman_packet *batman_packet; | 111 | struct batman_packet *batman_packet; |
112 | struct sk_buff *skb; | 112 | struct sk_buff *skb; |
113 | 113 | ||
114 | if (batman_if->if_status != IF_ACTIVE) | 114 | if (hard_iface->if_status != IF_ACTIVE) |
115 | return; | 115 | return; |
116 | 116 | ||
117 | packet_num = 0; | 117 | packet_num = 0; |
@@ -126,7 +126,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
126 | /* we might have aggregated direct link packets with an | 126 | /* we might have aggregated direct link packets with an |
127 | * ordinary base packet */ | 127 | * ordinary base packet */ |
128 | if ((forw_packet->direct_link_flags & (1 << packet_num)) && | 128 | if ((forw_packet->direct_link_flags & (1 << packet_num)) && |
129 | (forw_packet->if_incoming == batman_if)) | 129 | (forw_packet->if_incoming == hard_iface)) |
130 | batman_packet->flags |= DIRECTLINK; | 130 | batman_packet->flags |= DIRECTLINK; |
131 | else | 131 | else |
132 | batman_packet->flags &= ~DIRECTLINK; | 132 | batman_packet->flags &= ~DIRECTLINK; |
@@ -142,7 +142,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
142 | batman_packet->tq, batman_packet->ttl, | 142 | batman_packet->tq, batman_packet->ttl, |
143 | (batman_packet->flags & DIRECTLINK ? | 143 | (batman_packet->flags & DIRECTLINK ? |
144 | "on" : "off"), | 144 | "on" : "off"), |
145 | batman_if->net_dev->name, batman_if->net_dev->dev_addr); | 145 | hard_iface->net_dev->name, |
146 | hard_iface->net_dev->dev_addr); | ||
146 | 147 | ||
147 | buff_pos += sizeof(struct batman_packet) + | 148 | buff_pos += sizeof(struct batman_packet) + |
148 | (batman_packet->num_hna * ETH_ALEN); | 149 | (batman_packet->num_hna * ETH_ALEN); |
@@ -154,13 +155,13 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
154 | /* create clone because function is called more than once */ | 155 | /* create clone because function is called more than once */ |
155 | skb = skb_clone(forw_packet->skb, GFP_ATOMIC); | 156 | skb = skb_clone(forw_packet->skb, GFP_ATOMIC); |
156 | if (skb) | 157 | if (skb) |
157 | send_skb_packet(skb, batman_if, broadcast_addr); | 158 | send_skb_packet(skb, hard_iface, broadcast_addr); |
158 | } | 159 | } |
159 | 160 | ||
160 | /* send a batman packet */ | 161 | /* send a batman packet */ |
161 | static void send_packet(struct forw_packet *forw_packet) | 162 | static void send_packet(struct forw_packet *forw_packet) |
162 | { | 163 | { |
163 | struct batman_if *batman_if; | 164 | struct hard_iface *hard_iface; |
164 | struct net_device *soft_iface; | 165 | struct net_device *soft_iface; |
165 | struct bat_priv *bat_priv; | 166 | struct bat_priv *bat_priv; |
166 | struct batman_packet *batman_packet = | 167 | struct batman_packet *batman_packet = |
@@ -204,17 +205,17 @@ static void send_packet(struct forw_packet *forw_packet) | |||
204 | 205 | ||
205 | /* broadcast on every interface */ | 206 | /* broadcast on every interface */ |
206 | rcu_read_lock(); | 207 | rcu_read_lock(); |
207 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { | 208 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
208 | if (batman_if->soft_iface != soft_iface) | 209 | if (hard_iface->soft_iface != soft_iface) |
209 | continue; | 210 | continue; |
210 | 211 | ||
211 | send_packet_to_if(forw_packet, batman_if); | 212 | send_packet_to_if(forw_packet, hard_iface); |
212 | } | 213 | } |
213 | rcu_read_unlock(); | 214 | rcu_read_unlock(); |
214 | } | 215 | } |
215 | 216 | ||
216 | static void rebuild_batman_packet(struct bat_priv *bat_priv, | 217 | static void rebuild_batman_packet(struct bat_priv *bat_priv, |
217 | struct batman_if *batman_if) | 218 | struct hard_iface *hard_iface) |
218 | { | 219 | { |
219 | int new_len; | 220 | int new_len; |
220 | unsigned char *new_buff; | 221 | unsigned char *new_buff; |
@@ -226,7 +227,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
226 | 227 | ||
227 | /* keep old buffer if kmalloc should fail */ | 228 | /* keep old buffer if kmalloc should fail */ |
228 | if (new_buff) { | 229 | if (new_buff) { |
229 | memcpy(new_buff, batman_if->packet_buff, | 230 | memcpy(new_buff, hard_iface->packet_buff, |
230 | sizeof(struct batman_packet)); | 231 | sizeof(struct batman_packet)); |
231 | batman_packet = (struct batman_packet *)new_buff; | 232 | batman_packet = (struct batman_packet *)new_buff; |
232 | 233 | ||
@@ -234,21 +235,21 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
234 | new_buff + sizeof(struct batman_packet), | 235 | new_buff + sizeof(struct batman_packet), |
235 | new_len - sizeof(struct batman_packet)); | 236 | new_len - sizeof(struct batman_packet)); |
236 | 237 | ||
237 | kfree(batman_if->packet_buff); | 238 | kfree(hard_iface->packet_buff); |
238 | batman_if->packet_buff = new_buff; | 239 | hard_iface->packet_buff = new_buff; |
239 | batman_if->packet_len = new_len; | 240 | hard_iface->packet_len = new_len; |
240 | } | 241 | } |
241 | } | 242 | } |
242 | 243 | ||
243 | void schedule_own_packet(struct batman_if *batman_if) | 244 | void schedule_own_packet(struct hard_iface *hard_iface) |
244 | { | 245 | { |
245 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 246 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
246 | unsigned long send_time; | 247 | unsigned long send_time; |
247 | struct batman_packet *batman_packet; | 248 | struct batman_packet *batman_packet; |
248 | int vis_server; | 249 | int vis_server; |
249 | 250 | ||
250 | if ((batman_if->if_status == IF_NOT_IN_USE) || | 251 | if ((hard_iface->if_status == IF_NOT_IN_USE) || |
251 | (batman_if->if_status == IF_TO_BE_REMOVED)) | 252 | (hard_iface->if_status == IF_TO_BE_REMOVED)) |
252 | return; | 253 | return; |
253 | 254 | ||
254 | vis_server = atomic_read(&bat_priv->vis_mode); | 255 | vis_server = atomic_read(&bat_priv->vis_mode); |
@@ -260,51 +261,51 @@ void schedule_own_packet(struct batman_if *batman_if) | |||
260 | * outdated packets (especially uninitialized mac addresses) in the | 261 | * outdated packets (especially uninitialized mac addresses) in the |
261 | * packet queue | 262 | * packet queue |
262 | */ | 263 | */ |
263 | if (batman_if->if_status == IF_TO_BE_ACTIVATED) | 264 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) |
264 | batman_if->if_status = IF_ACTIVE; | 265 | hard_iface->if_status = IF_ACTIVE; |
265 | 266 | ||
266 | /* if local hna has changed and interface is a primary interface */ | 267 | /* if local hna has changed and interface is a primary interface */ |
267 | if ((atomic_read(&bat_priv->hna_local_changed)) && | 268 | if ((atomic_read(&bat_priv->hna_local_changed)) && |
268 | (batman_if == bat_priv->primary_if)) | 269 | (hard_iface == bat_priv->primary_if)) |
269 | rebuild_batman_packet(bat_priv, batman_if); | 270 | rebuild_batman_packet(bat_priv, hard_iface); |
270 | 271 | ||
271 | /** | 272 | /** |
272 | * NOTE: packet_buff might just have been re-allocated in | 273 | * NOTE: packet_buff might just have been re-allocated in |
273 | * rebuild_batman_packet() | 274 | * rebuild_batman_packet() |
274 | */ | 275 | */ |
275 | batman_packet = (struct batman_packet *)batman_if->packet_buff; | 276 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; |
276 | 277 | ||
277 | /* change sequence number to network order */ | 278 | /* change sequence number to network order */ |
278 | batman_packet->seqno = | 279 | batman_packet->seqno = |
279 | htonl((uint32_t)atomic_read(&batman_if->seqno)); | 280 | htonl((uint32_t)atomic_read(&hard_iface->seqno)); |
280 | 281 | ||
281 | if (vis_server == VIS_TYPE_SERVER_SYNC) | 282 | if (vis_server == VIS_TYPE_SERVER_SYNC) |
282 | batman_packet->flags |= VIS_SERVER; | 283 | batman_packet->flags |= VIS_SERVER; |
283 | else | 284 | else |
284 | batman_packet->flags &= ~VIS_SERVER; | 285 | batman_packet->flags &= ~VIS_SERVER; |
285 | 286 | ||
286 | if ((batman_if == bat_priv->primary_if) && | 287 | if ((hard_iface == bat_priv->primary_if) && |
287 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) | 288 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) |
288 | batman_packet->gw_flags = | 289 | batman_packet->gw_flags = |
289 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); | 290 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); |
290 | else | 291 | else |
291 | batman_packet->gw_flags = 0; | 292 | batman_packet->gw_flags = 0; |
292 | 293 | ||
293 | atomic_inc(&batman_if->seqno); | 294 | atomic_inc(&hard_iface->seqno); |
294 | 295 | ||
295 | slide_own_bcast_window(batman_if); | 296 | slide_own_bcast_window(hard_iface); |
296 | send_time = own_send_time(bat_priv); | 297 | send_time = own_send_time(bat_priv); |
297 | add_bat_packet_to_list(bat_priv, | 298 | add_bat_packet_to_list(bat_priv, |
298 | batman_if->packet_buff, | 299 | hard_iface->packet_buff, |
299 | batman_if->packet_len, | 300 | hard_iface->packet_len, |
300 | batman_if, 1, send_time); | 301 | hard_iface, 1, send_time); |
301 | } | 302 | } |
302 | 303 | ||
303 | void schedule_forward_packet(struct orig_node *orig_node, | 304 | void schedule_forward_packet(struct orig_node *orig_node, |
304 | struct ethhdr *ethhdr, | 305 | struct ethhdr *ethhdr, |
305 | struct batman_packet *batman_packet, | 306 | struct batman_packet *batman_packet, |
306 | uint8_t directlink, int hna_buff_len, | 307 | uint8_t directlink, int hna_buff_len, |
307 | struct batman_if *if_incoming) | 308 | struct hard_iface *if_incoming) |
308 | { | 309 | { |
309 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 310 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
310 | unsigned char in_tq, in_ttl, tq_avg = 0; | 311 | unsigned char in_tq, in_ttl, tq_avg = 0; |
@@ -443,7 +444,7 @@ out: | |||
443 | 444 | ||
444 | static void send_outstanding_bcast_packet(struct work_struct *work) | 445 | static void send_outstanding_bcast_packet(struct work_struct *work) |
445 | { | 446 | { |
446 | struct batman_if *batman_if; | 447 | struct hard_iface *hard_iface; |
447 | struct delayed_work *delayed_work = | 448 | struct delayed_work *delayed_work = |
448 | container_of(work, struct delayed_work, work); | 449 | container_of(work, struct delayed_work, work); |
449 | struct forw_packet *forw_packet = | 450 | struct forw_packet *forw_packet = |
@@ -461,14 +462,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work) | |||
461 | 462 | ||
462 | /* rebroadcast packet */ | 463 | /* rebroadcast packet */ |
463 | rcu_read_lock(); | 464 | rcu_read_lock(); |
464 | list_for_each_entry_rcu(batman_if, &hardif_list, list) { | 465 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
465 | if (batman_if->soft_iface != soft_iface) | 466 | if (hard_iface->soft_iface != soft_iface) |
466 | continue; | 467 | continue; |
467 | 468 | ||
468 | /* send a copy of the saved skb */ | 469 | /* send a copy of the saved skb */ |
469 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); | 470 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); |
470 | if (skb1) | 471 | if (skb1) |
471 | send_skb_packet(skb1, batman_if, broadcast_addr); | 472 | send_skb_packet(skb1, hard_iface, broadcast_addr); |
472 | } | 473 | } |
473 | rcu_read_unlock(); | 474 | rcu_read_unlock(); |
474 | 475 | ||
@@ -521,15 +522,15 @@ out: | |||
521 | } | 522 | } |
522 | 523 | ||
523 | void purge_outstanding_packets(struct bat_priv *bat_priv, | 524 | void purge_outstanding_packets(struct bat_priv *bat_priv, |
524 | struct batman_if *batman_if) | 525 | struct hard_iface *hard_iface) |
525 | { | 526 | { |
526 | struct forw_packet *forw_packet; | 527 | struct forw_packet *forw_packet; |
527 | struct hlist_node *tmp_node, *safe_tmp_node; | 528 | struct hlist_node *tmp_node, *safe_tmp_node; |
528 | 529 | ||
529 | if (batman_if) | 530 | if (hard_iface) |
530 | bat_dbg(DBG_BATMAN, bat_priv, | 531 | bat_dbg(DBG_BATMAN, bat_priv, |
531 | "purge_outstanding_packets(): %s\n", | 532 | "purge_outstanding_packets(): %s\n", |
532 | batman_if->net_dev->name); | 533 | hard_iface->net_dev->name); |
533 | else | 534 | else |
534 | bat_dbg(DBG_BATMAN, bat_priv, | 535 | bat_dbg(DBG_BATMAN, bat_priv, |
535 | "purge_outstanding_packets()\n"); | 536 | "purge_outstanding_packets()\n"); |
@@ -543,8 +544,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
543 | * if purge_outstanding_packets() was called with an argmument | 544 | * if purge_outstanding_packets() was called with an argmument |
544 | * we delete only packets belonging to the given interface | 545 | * we delete only packets belonging to the given interface |
545 | */ | 546 | */ |
546 | if ((batman_if) && | 547 | if ((hard_iface) && |
547 | (forw_packet->if_incoming != batman_if)) | 548 | (forw_packet->if_incoming != hard_iface)) |
548 | continue; | 549 | continue; |
549 | 550 | ||
550 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 551 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
@@ -567,8 +568,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
567 | * if purge_outstanding_packets() was called with an argmument | 568 | * if purge_outstanding_packets() was called with an argmument |
568 | * we delete only packets belonging to the given interface | 569 | * we delete only packets belonging to the given interface |
569 | */ | 570 | */ |
570 | if ((batman_if) && | 571 | if ((hard_iface) && |
571 | (forw_packet->if_incoming != batman_if)) | 572 | (forw_packet->if_incoming != hard_iface)) |
572 | continue; | 573 | continue; |
573 | 574 | ||
574 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 575 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |