aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/batman-adv/send.c
diff options
context:
space:
mode:
authorSimon Wunderlich <siwu@hrz.tu-chemnitz.de>2010-01-02 05:30:48 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2010-03-03 19:42:36 -0500
commite70171957a3ac67fd62af0c66efe7b7749121899 (patch)
treeec02d2965afac6384ab0fd29607c4062f93bf134 /drivers/staging/batman-adv/send.c
parentc4bf05d3960981a4291bcc9580f3d73eb4dcbe84 (diff)
Staging: batman-adv: receive packets directly using skbs
This patch removes the (ugly and racy) packet receiving thread and the kernel socket usage. Instead, packets are received directly by registering the ethernet type and handling skbs instead of self-allocated buffers. Some consequences and comments: * we don't copy the payload data when forwarding/sending/receiving data anymore. This should boost performance. * packets from/to different interfaces can be (theoretically) processed simultaneously. Only the big originator hash lock might be in the way. * no more polling or sleeping/wakeup/scheduling issues when receiving packets * this might introduce new race conditions. * aggregation and vis code still use packet buffers and are not (yet) converted. * all spinlocks were converted to irqsave/restore versions to solve some lifelock issues when preempted. This might be overkill, some of these locks might be reverted later. * skb copies are only done if neccesary to avoid overhead performance differences: * we made some "benchmarks" with intel laptops. * bandwidth on Gigabit Ethernet increased from ~500 MBit/s to ~920 MBit/s * ping latency decresed from ~2ms to ~0.2 ms I did some tests on my 9 node qemu environment and could confirm that usual sending/receiving, forwarding, vis, batctl ping etc works. Signed-off-by: Simon Wunderlich <siwu@hrz.tu-chemnitz.de> Acked-by: Sven Eckelmann <sven.eckelmann@gmx.de> Acked-by: Marek Lindner <lindner_marek@yahoo.de> Acked-by: Linus Lüssing <linus.luessing@web.de> Signed-off-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging/batman-adv/send.c')
-rw-r--r--drivers/staging/batman-adv/send.c93
1 files changed, 60 insertions, 33 deletions
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index 49b1534b8f7..fd48f3fa2d8 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -23,6 +23,7 @@
23#include "send.h" 23#include "send.h"
24#include "routing.h" 24#include "routing.h"
25#include "translation-table.h" 25#include "translation-table.h"
26#include "soft-interface.h"
26#include "hard-interface.h" 27#include "hard-interface.h"
27#include "types.h" 28#include "types.h"
28#include "vis.h" 29#include "vis.h"
@@ -58,51 +59,69 @@ static unsigned long forward_send_time(void)
58 return send_time; 59 return send_time;
59} 60}
60 61
61/* sends a raw packet. */ 62/* send out an already prepared packet to the given address via the
62void send_raw_packet(unsigned char *pack_buff, int pack_buff_len, 63 * specified batman interface */
63 struct batman_if *batman_if, uint8_t *dst_addr) 64int send_skb_packet(struct sk_buff *skb,
65 struct batman_if *batman_if,
66 uint8_t *dst_addr)
64{ 67{
65 struct ethhdr *ethhdr; 68 struct ethhdr *ethhdr;
66 struct sk_buff *skb;
67 int retval;
68 char *data;
69 69
70 if (batman_if->if_active != IF_ACTIVE) 70 if (batman_if->if_active != IF_ACTIVE)
71 return; 71 goto send_skb_err;
72
73 if (unlikely(!batman_if->net_dev))
74 goto send_skb_err;
72 75
73 if (!(batman_if->net_dev->flags & IFF_UP)) { 76 if (!(batman_if->net_dev->flags & IFF_UP)) {
74 printk(KERN_WARNING 77 printk(KERN_WARNING
75 "batman-adv:Interface %s is not up - can't send packet via that interface!\n", 78 "batman-adv:Interface %s is not up - can't send packet via that interface!\n",
76 batman_if->dev); 79 batman_if->dev);
77 return; 80 goto send_skb_err;
78 } 81 }
79 82
80 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr)); 83 /* push to the ethernet header. */
81 if (!skb) 84 if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
82 return; 85 goto send_skb_err;
83 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
84 86
85 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len); 87 skb_reset_mac_header(skb);
86 88
87 ethhdr = (struct ethhdr *) data; 89 ethhdr = (struct ethhdr *) skb_mac_header(skb);
88 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); 90 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
89 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); 91 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
90 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); 92 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
91 93
92 skb_reset_mac_header(skb);
93 skb_set_network_header(skb, ETH_HLEN); 94 skb_set_network_header(skb, ETH_HLEN);
94 skb->priority = TC_PRIO_CONTROL; 95 skb->priority = TC_PRIO_CONTROL;
95 skb->protocol = __constant_htons(ETH_P_BATMAN); 96 skb->protocol = __constant_htons(ETH_P_BATMAN);
97
96 skb->dev = batman_if->net_dev; 98 skb->dev = batman_if->net_dev;
97 99
98 /* dev_queue_xmit() returns a negative result on error. However on 100 /* dev_queue_xmit() returns a negative result on error. However on
99 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 101 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
100 * (which is > 0). This will not be treated as an error. */ 102 * (which is > 0). This will not be treated as an error. */
101 retval = dev_queue_xmit(skb); 103
102 if (retval < 0) 104 return dev_queue_xmit(skb);
103 printk(KERN_WARNING 105send_skb_err:
104 "batman-adv:Can't write to raw socket: %i\n", 106 kfree_skb(skb);
105 retval); 107 return NET_XMIT_DROP;
108}
109
110/* sends a raw packet. */
111void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
112 struct batman_if *batman_if, uint8_t *dst_addr)
113{
114 struct sk_buff *skb;
115 char *data;
116
117 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
118 if (!skb)
119 return;
120 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
121 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
122 /* pull back to the batman "network header" */
123 skb_pull(skb, sizeof(struct ethhdr));
124 send_skb_packet(skb, batman_if, dst_addr);
106} 125}
107 126
108/* Send a packet to a given interface */ 127/* Send a packet to a given interface */
@@ -331,6 +350,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
331 350
332static void forw_packet_free(struct forw_packet *forw_packet) 351static void forw_packet_free(struct forw_packet *forw_packet)
333{ 352{
353 if (forw_packet->skb)
354 kfree_skb(forw_packet->skb);
334 kfree(forw_packet->packet_buff); 355 kfree(forw_packet->packet_buff);
335 kfree(forw_packet); 356 kfree(forw_packet);
336} 357}
@@ -353,7 +374,7 @@ static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
353 send_time); 374 send_time);
354} 375}
355 376
356void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len) 377void add_bcast_packet_to_list(struct sk_buff *skb)
357{ 378{
358 struct forw_packet *forw_packet; 379 struct forw_packet *forw_packet;
359 380
@@ -361,14 +382,16 @@ void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len)
361 if (!forw_packet) 382 if (!forw_packet)
362 return; 383 return;
363 384
364 forw_packet->packet_buff = kmalloc(packet_len, GFP_ATOMIC); 385 skb = skb_copy(skb, GFP_ATOMIC);
365 if (!forw_packet->packet_buff) { 386 if (!skb) {
366 kfree(forw_packet); 387 kfree(forw_packet);
367 return; 388 return;
368 } 389 }
369 390
370 forw_packet->packet_len = packet_len; 391 skb_reset_mac_header(skb);
371 memcpy(forw_packet->packet_buff, packet_buff, forw_packet->packet_len); 392
393 forw_packet->skb = skb;
394 forw_packet->packet_buff = NULL;
372 395
373 /* how often did we send the bcast packet ? */ 396 /* how often did we send the bcast packet ? */
374 forw_packet->num_packets = 0; 397 forw_packet->num_packets = 0;
@@ -384,6 +407,7 @@ void send_outstanding_bcast_packet(struct work_struct *work)
384 struct forw_packet *forw_packet = 407 struct forw_packet *forw_packet =
385 container_of(delayed_work, struct forw_packet, delayed_work); 408 container_of(delayed_work, struct forw_packet, delayed_work);
386 unsigned long flags; 409 unsigned long flags;
410 struct sk_buff *skb1;
387 411
388 spin_lock_irqsave(&forw_bcast_list_lock, flags); 412 spin_lock_irqsave(&forw_bcast_list_lock, flags);
389 hlist_del(&forw_packet->list); 413 hlist_del(&forw_packet->list);
@@ -392,8 +416,10 @@ void send_outstanding_bcast_packet(struct work_struct *work)
392 /* rebroadcast packet */ 416 /* rebroadcast packet */
393 rcu_read_lock(); 417 rcu_read_lock();
394 list_for_each_entry_rcu(batman_if, &if_list, list) { 418 list_for_each_entry_rcu(batman_if, &if_list, list) {
395 send_raw_packet(forw_packet->packet_buff, 419 /* send a copy of the saved skb */
396 forw_packet->packet_len, 420 skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
421 if (skb1)
422 send_skb_packet(skb1,
397 batman_if, broadcastAddr); 423 batman_if, broadcastAddr);
398 } 424 }
399 rcu_read_unlock(); 425 rcu_read_unlock();
@@ -415,10 +441,11 @@ void send_outstanding_bat_packet(struct work_struct *work)
415 container_of(work, struct delayed_work, work); 441 container_of(work, struct delayed_work, work);
416 struct forw_packet *forw_packet = 442 struct forw_packet *forw_packet =
417 container_of(delayed_work, struct forw_packet, delayed_work); 443 container_of(delayed_work, struct forw_packet, delayed_work);
444 unsigned long flags;
418 445
419 spin_lock(&forw_bat_list_lock); 446 spin_lock_irqsave(&forw_bat_list_lock, flags);
420 hlist_del(&forw_packet->list); 447 hlist_del(&forw_packet->list);
421 spin_unlock(&forw_bat_list_lock); 448 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
422 449
423 send_packet(forw_packet); 450 send_packet(forw_packet);
424 451
@@ -459,18 +486,18 @@ void purge_outstanding_packets(void)
459 spin_unlock_irqrestore(&forw_bcast_list_lock, flags); 486 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
460 487
461 /* free batman packet list */ 488 /* free batman packet list */
462 spin_lock(&forw_bat_list_lock); 489 spin_lock_irqsave(&forw_bat_list_lock, flags);
463 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 490 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
464 &forw_bat_list, list) { 491 &forw_bat_list, list) {
465 492
466 spin_unlock(&forw_bat_list_lock); 493 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
467 494
468 /** 495 /**
469 * send_outstanding_bat_packet() will lock the list to 496 * send_outstanding_bat_packet() will lock the list to
470 * delete the item from the list 497 * delete the item from the list
471 */ 498 */
472 cancel_delayed_work_sync(&forw_packet->delayed_work); 499 cancel_delayed_work_sync(&forw_packet->delayed_work);
473 spin_lock(&forw_bat_list_lock); 500 spin_lock_irqsave(&forw_bat_list_lock, flags);
474 } 501 }
475 spin_unlock(&forw_bat_list_lock); 502 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
476} 503}