aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSimon Wunderlich <siwu@hrz.tu-chemnitz.de>2010-01-02 05:30:48 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2010-03-03 19:42:36 -0500
commite70171957a3ac67fd62af0c66efe7b7749121899 (patch)
treeec02d2965afac6384ab0fd29607c4062f93bf134
parentc4bf05d3960981a4291bcc9580f3d73eb4dcbe84 (diff)
Staging: batman-adv: receive packets directly using skbs
This patch removes the (ugly and racy) packet receiving thread and the kernel socket usage. Instead, packets are received directly by registering the ethernet type and handling skbs instead of self-allocated buffers. Some consequences and comments: * we don't copy the payload data when forwarding/sending/receiving data anymore. This should boost performance. * packets from/to different interfaces can be (theoretically) processed simultaneously. Only the big originator hash lock might be in the way. * no more polling or sleeping/wakeup/scheduling issues when receiving packets * this might introduce new race conditions. * aggregation and vis code still use packet buffers and are not (yet) converted. * all spinlocks were converted to irqsave/restore versions to solve some lifelock issues when preempted. This might be overkill, some of these locks might be reverted later. * skb copies are only done if neccesary to avoid overhead performance differences: * we made some "benchmarks" with intel laptops. * bandwidth on Gigabit Ethernet increased from ~500 MBit/s to ~920 MBit/s * ping latency decresed from ~2ms to ~0.2 ms I did some tests on my 9 node qemu environment and could confirm that usual sending/receiving, forwarding, vis, batctl ping etc works. Signed-off-by: Simon Wunderlich <siwu@hrz.tu-chemnitz.de> Acked-by: Sven Eckelmann <sven.eckelmann@gmx.de> Acked-by: Marek Lindner <lindner_marek@yahoo.de> Acked-by: Linus Lüssing <linus.luessing@web.de> Signed-off-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/batman-adv/aggregation.c13
-rw-r--r--drivers/staging/batman-adv/device.c24
-rw-r--r--drivers/staging/batman-adv/hard-interface.c154
-rw-r--r--drivers/staging/batman-adv/hard-interface.h4
-rw-r--r--drivers/staging/batman-adv/main.c29
-rw-r--r--drivers/staging/batman-adv/originator.c18
-rw-r--r--drivers/staging/batman-adv/proc.c10
-rw-r--r--drivers/staging/batman-adv/routing.c563
-rw-r--r--drivers/staging/batman-adv/routing.h8
-rw-r--r--drivers/staging/batman-adv/send.c93
-rw-r--r--drivers/staging/batman-adv/send.h5
-rw-r--r--drivers/staging/batman-adv/soft-interface.c76
-rw-r--r--drivers/staging/batman-adv/soft-interface.h3
-rw-r--r--drivers/staging/batman-adv/types.h2
-rw-r--r--drivers/staging/batman-adv/vis.c55
15 files changed, 548 insertions, 509 deletions
diff --git a/drivers/staging/batman-adv/aggregation.c b/drivers/staging/batman-adv/aggregation.c
index 9c6e681f6fb..7917322a7e2 100644
--- a/drivers/staging/batman-adv/aggregation.c
+++ b/drivers/staging/batman-adv/aggregation.c
@@ -96,6 +96,7 @@ static void new_aggregated_packet(unsigned char *packet_buff,
96 int own_packet) 96 int own_packet)
97{ 97{
98 struct forw_packet *forw_packet_aggr; 98 struct forw_packet *forw_packet_aggr;
99 unsigned long flags;
99 100
100 forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); 101 forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
101 if (!forw_packet_aggr) 102 if (!forw_packet_aggr)
@@ -115,6 +116,7 @@ static void new_aggregated_packet(unsigned char *packet_buff,
115 packet_buff, 116 packet_buff,
116 forw_packet_aggr->packet_len); 117 forw_packet_aggr->packet_len);
117 118
119 forw_packet_aggr->skb = NULL;
118 forw_packet_aggr->own = own_packet; 120 forw_packet_aggr->own = own_packet;
119 forw_packet_aggr->if_incoming = if_incoming; 121 forw_packet_aggr->if_incoming = if_incoming;
120 forw_packet_aggr->num_packets = 0; 122 forw_packet_aggr->num_packets = 0;
@@ -126,9 +128,9 @@ static void new_aggregated_packet(unsigned char *packet_buff,
126 forw_packet_aggr->direct_link_flags |= 1; 128 forw_packet_aggr->direct_link_flags |= 1;
127 129
128 /* add new packet to packet list */ 130 /* add new packet to packet list */
129 spin_lock(&forw_bat_list_lock); 131 spin_lock_irqsave(&forw_bat_list_lock, flags);
130 hlist_add_head(&forw_packet_aggr->list, &forw_bat_list); 132 hlist_add_head(&forw_packet_aggr->list, &forw_bat_list);
131 spin_unlock(&forw_bat_list_lock); 133 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
132 134
133 /* start timer for this packet */ 135 /* start timer for this packet */
134 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, 136 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
@@ -168,9 +170,10 @@ void add_bat_packet_to_list(unsigned char *packet_buff, int packet_len,
168 struct batman_packet *batman_packet = 170 struct batman_packet *batman_packet =
169 (struct batman_packet *)packet_buff; 171 (struct batman_packet *)packet_buff;
170 bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0; 172 bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0;
173 unsigned long flags;
171 174
172 /* find position for the packet in the forward queue */ 175 /* find position for the packet in the forward queue */
173 spin_lock(&forw_bat_list_lock); 176 spin_lock_irqsave(&forw_bat_list_lock, flags);
174 /* own packets are not to be aggregated */ 177 /* own packets are not to be aggregated */
175 if ((atomic_read(&aggregation_enabled)) && (!own_packet)) { 178 if ((atomic_read(&aggregation_enabled)) && (!own_packet)) {
176 hlist_for_each_entry(forw_packet_pos, tmp_node, &forw_bat_list, 179 hlist_for_each_entry(forw_packet_pos, tmp_node, &forw_bat_list,
@@ -191,7 +194,7 @@ void add_bat_packet_to_list(unsigned char *packet_buff, int packet_len,
191 * suitable aggregation packet found */ 194 * suitable aggregation packet found */
192 if (forw_packet_aggr == NULL) { 195 if (forw_packet_aggr == NULL) {
193 /* the following section can run without the lock */ 196 /* the following section can run without the lock */
194 spin_unlock(&forw_bat_list_lock); 197 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
195 new_aggregated_packet(packet_buff, packet_len, 198 new_aggregated_packet(packet_buff, packet_len,
196 send_time, direct_link, 199 send_time, direct_link,
197 if_incoming, own_packet); 200 if_incoming, own_packet);
@@ -199,7 +202,7 @@ void add_bat_packet_to_list(unsigned char *packet_buff, int packet_len,
199 aggregate(forw_packet_aggr, 202 aggregate(forw_packet_aggr,
200 packet_buff, packet_len, 203 packet_buff, packet_len,
201 direct_link); 204 direct_link);
202 spin_unlock(&forw_bat_list_lock); 205 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
203 } 206 }
204} 207}
205 208
diff --git a/drivers/staging/batman-adv/device.c b/drivers/staging/batman-adv/device.c
index 92cf8d56a5f..ab2b0b17d9e 100644
--- a/drivers/staging/batman-adv/device.c
+++ b/drivers/staging/batman-adv/device.c
@@ -133,8 +133,9 @@ int bat_device_release(struct inode *inode, struct file *file)
133 (struct device_client *)file->private_data; 133 (struct device_client *)file->private_data;
134 struct device_packet *device_packet; 134 struct device_packet *device_packet;
135 struct list_head *list_pos, *list_pos_tmp; 135 struct list_head *list_pos, *list_pos_tmp;
136 unsigned long flags;
136 137
137 spin_lock(&device_client->lock); 138 spin_lock_irqsave(&device_client->lock, flags);
138 139
139 /* for all packets in the queue ... */ 140 /* for all packets in the queue ... */
140 list_for_each_safe(list_pos, list_pos_tmp, &device_client->queue_list) { 141 list_for_each_safe(list_pos, list_pos_tmp, &device_client->queue_list) {
@@ -146,7 +147,7 @@ int bat_device_release(struct inode *inode, struct file *file)
146 } 147 }
147 148
148 device_client_hash[device_client->index] = NULL; 149 device_client_hash[device_client->index] = NULL;
149 spin_unlock(&device_client->lock); 150 spin_unlock_irqrestore(&device_client->lock, flags);
150 151
151 kfree(device_client); 152 kfree(device_client);
152 dec_module_count(); 153 dec_module_count();
@@ -161,6 +162,7 @@ ssize_t bat_device_read(struct file *file, char __user *buf, size_t count,
161 (struct device_client *)file->private_data; 162 (struct device_client *)file->private_data;
162 struct device_packet *device_packet; 163 struct device_packet *device_packet;
163 int error; 164 int error;
165 unsigned long flags;
164 166
165 if ((file->f_flags & O_NONBLOCK) && (device_client->queue_len == 0)) 167 if ((file->f_flags & O_NONBLOCK) && (device_client->queue_len == 0))
166 return -EAGAIN; 168 return -EAGAIN;
@@ -177,14 +179,14 @@ ssize_t bat_device_read(struct file *file, char __user *buf, size_t count,
177 if (error) 179 if (error)
178 return error; 180 return error;
179 181
180 spin_lock(&device_client->lock); 182 spin_lock_irqsave(&device_client->lock, flags);
181 183
182 device_packet = list_first_entry(&device_client->queue_list, 184 device_packet = list_first_entry(&device_client->queue_list,
183 struct device_packet, list); 185 struct device_packet, list);
184 list_del(&device_packet->list); 186 list_del(&device_packet->list);
185 device_client->queue_len--; 187 device_client->queue_len--;
186 188
187 spin_unlock(&device_client->lock); 189 spin_unlock_irqrestore(&device_client->lock, flags);
188 190
189 error = __copy_to_user(buf, &device_packet->icmp_packet, 191 error = __copy_to_user(buf, &device_packet->icmp_packet,
190 sizeof(struct icmp_packet)); 192 sizeof(struct icmp_packet));
@@ -205,6 +207,7 @@ ssize_t bat_device_write(struct file *file, const char __user *buff,
205 struct icmp_packet icmp_packet; 207 struct icmp_packet icmp_packet;
206 struct orig_node *orig_node; 208 struct orig_node *orig_node;
207 struct batman_if *batman_if; 209 struct batman_if *batman_if;
210 unsigned long flags;
208 211
209 if (len < sizeof(struct icmp_packet)) { 212 if (len < sizeof(struct icmp_packet)) {
210 bat_dbg(DBG_BATMAN, "batman-adv:Error - can't send packet from char device: invalid packet size\n"); 213 bat_dbg(DBG_BATMAN, "batman-adv:Error - can't send packet from char device: invalid packet size\n");
@@ -239,7 +242,7 @@ ssize_t bat_device_write(struct file *file, const char __user *buff,
239 if (atomic_read(&module_state) != MODULE_ACTIVE) 242 if (atomic_read(&module_state) != MODULE_ACTIVE)
240 goto dst_unreach; 243 goto dst_unreach;
241 244
242 spin_lock(&orig_hash_lock); 245 spin_lock_irqsave(&orig_hash_lock, flags);
243 orig_node = ((struct orig_node *)hash_find(orig_hash, icmp_packet.dst)); 246 orig_node = ((struct orig_node *)hash_find(orig_hash, icmp_packet.dst));
244 247
245 if (!orig_node) 248 if (!orig_node)
@@ -261,11 +264,11 @@ ssize_t bat_device_write(struct file *file, const char __user *buff,
261 sizeof(struct icmp_packet), 264 sizeof(struct icmp_packet),
262 batman_if, orig_node->router->addr); 265 batman_if, orig_node->router->addr);
263 266
264 spin_unlock(&orig_hash_lock); 267 spin_unlock_irqrestore(&orig_hash_lock, flags);
265 goto out; 268 goto out;
266 269
267unlock: 270unlock:
268 spin_unlock(&orig_hash_lock); 271 spin_unlock_irqrestore(&orig_hash_lock, flags);
269dst_unreach: 272dst_unreach:
270 icmp_packet.msg_type = DESTINATION_UNREACHABLE; 273 icmp_packet.msg_type = DESTINATION_UNREACHABLE;
271 bat_device_add_packet(device_client, &icmp_packet); 274 bat_device_add_packet(device_client, &icmp_packet);
@@ -290,6 +293,7 @@ void bat_device_add_packet(struct device_client *device_client,
290 struct icmp_packet *icmp_packet) 293 struct icmp_packet *icmp_packet)
291{ 294{
292 struct device_packet *device_packet; 295 struct device_packet *device_packet;
296 unsigned long flags;
293 297
294 device_packet = kmalloc(sizeof(struct device_packet), GFP_KERNEL); 298 device_packet = kmalloc(sizeof(struct device_packet), GFP_KERNEL);
295 299
@@ -300,12 +304,12 @@ void bat_device_add_packet(struct device_client *device_client,
300 memcpy(&device_packet->icmp_packet, icmp_packet, 304 memcpy(&device_packet->icmp_packet, icmp_packet,
301 sizeof(struct icmp_packet)); 305 sizeof(struct icmp_packet));
302 306
303 spin_lock(&device_client->lock); 307 spin_lock_irqsave(&device_client->lock, flags);
304 308
305 /* while waiting for the lock the device_client could have been 309 /* while waiting for the lock the device_client could have been
306 * deleted */ 310 * deleted */
307 if (!device_client_hash[icmp_packet->uid]) { 311 if (!device_client_hash[icmp_packet->uid]) {
308 spin_unlock(&device_client->lock); 312 spin_unlock_irqrestore(&device_client->lock, flags);
309 kfree(device_packet); 313 kfree(device_packet);
310 return; 314 return;
311 } 315 }
@@ -322,7 +326,7 @@ void bat_device_add_packet(struct device_client *device_client,
322 device_client->queue_len--; 326 device_client->queue_len--;
323 } 327 }
324 328
325 spin_unlock(&device_client->lock); 329 spin_unlock_irqrestore(&device_client->lock, flags);
326 330
327 wake_up(&device_client->queue_wait); 331 wake_up(&device_client->queue_wait);
328} 332}
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 7c885926b16..cc59c307a42 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -153,9 +153,6 @@ void hardif_deactivate_interface(struct batman_if *batman_if)
153 if (batman_if->if_active != IF_ACTIVE) 153 if (batman_if->if_active != IF_ACTIVE)
154 return; 154 return;
155 155
156 if (batman_if->raw_sock)
157 sock_release(batman_if->raw_sock);
158
159 /** 156 /**
160 * batman_if->net_dev has been acquired by dev_get_by_name() in 157 * batman_if->net_dev has been acquired by dev_get_by_name() in
161 * proc_interfaces_write() and has to be unreferenced. 158 * proc_interfaces_write() and has to be unreferenced.
@@ -164,9 +161,6 @@ void hardif_deactivate_interface(struct batman_if *batman_if)
164 if (batman_if->net_dev) 161 if (batman_if->net_dev)
165 dev_put(batman_if->net_dev); 162 dev_put(batman_if->net_dev);
166 163
167 batman_if->raw_sock = NULL;
168 batman_if->net_dev = NULL;
169
170 batman_if->if_active = IF_INACTIVE; 164 batman_if->if_active = IF_INACTIVE;
171 active_ifs--; 165 active_ifs--;
172 166
@@ -177,9 +171,6 @@ void hardif_deactivate_interface(struct batman_if *batman_if)
177/* (re)activate given interface. */ 171/* (re)activate given interface. */
178static void hardif_activate_interface(struct batman_if *batman_if) 172static void hardif_activate_interface(struct batman_if *batman_if)
179{ 173{
180 struct sockaddr_ll bind_addr;
181 int retval;
182
183 if (batman_if->if_active != IF_INACTIVE) 174 if (batman_if->if_active != IF_INACTIVE)
184 return; 175 return;
185 176
@@ -191,35 +182,8 @@ static void hardif_activate_interface(struct batman_if *batman_if)
191 if (!batman_if->net_dev) 182 if (!batman_if->net_dev)
192 goto dev_err; 183 goto dev_err;
193 184
194 retval = sock_create_kern(PF_PACKET, SOCK_RAW,
195 __constant_htons(ETH_P_BATMAN),
196 &batman_if->raw_sock);
197
198 if (retval < 0) {
199 printk(KERN_ERR "batman-adv:Can't create raw socket: %i\n",
200 retval);
201 goto sock_err;
202 }
203
204 bind_addr.sll_family = AF_PACKET;
205 bind_addr.sll_ifindex = batman_if->net_dev->ifindex;
206 bind_addr.sll_protocol = 0; /* is set by the kernel */
207
208 retval = kernel_bind(batman_if->raw_sock,
209 (struct sockaddr *)&bind_addr, sizeof(bind_addr));
210
211 if (retval < 0) {
212 printk(KERN_ERR "batman-adv:Can't create bind raw socket: %i\n",
213 retval);
214 goto bind_err;
215 }
216
217 check_known_mac_addr(batman_if->net_dev->dev_addr); 185 check_known_mac_addr(batman_if->net_dev->dev_addr);
218 186
219 batman_if->raw_sock->sk->sk_user_data =
220 batman_if->raw_sock->sk->sk_data_ready;
221 batman_if->raw_sock->sk->sk_data_ready = batman_data_ready;
222
223 addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr); 187 addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
224 188
225 memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, 189 memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
@@ -239,12 +203,7 @@ static void hardif_activate_interface(struct batman_if *batman_if)
239 203
240 return; 204 return;
241 205
242bind_err:
243 sock_release(batman_if->raw_sock);
244sock_err:
245 dev_put(batman_if->net_dev);
246dev_err: 206dev_err:
247 batman_if->raw_sock = NULL;
248 batman_if->net_dev = NULL; 207 batman_if->net_dev = NULL;
249} 208}
250 209
@@ -318,6 +277,7 @@ int hardif_add_interface(char *dev, int if_num)
318 struct batman_if *batman_if; 277 struct batman_if *batman_if;
319 struct batman_packet *batman_packet; 278 struct batman_packet *batman_packet;
320 struct orig_node *orig_node; 279 struct orig_node *orig_node;
280 unsigned long flags;
321 HASHIT(hashit); 281 HASHIT(hashit);
322 282
323 batman_if = kmalloc(sizeof(struct batman_if), GFP_KERNEL); 283 batman_if = kmalloc(sizeof(struct batman_if), GFP_KERNEL);
@@ -327,7 +287,6 @@ int hardif_add_interface(char *dev, int if_num)
327 return -1; 287 return -1;
328 } 288 }
329 289
330 batman_if->raw_sock = NULL;
331 batman_if->net_dev = NULL; 290 batman_if->net_dev = NULL;
332 291
333 if ((if_num == 0) && (num_hna > 0)) 292 if ((if_num == 0) && (num_hna > 0))
@@ -375,17 +334,17 @@ int hardif_add_interface(char *dev, int if_num)
375 334
376 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 335 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
377 * if_num */ 336 * if_num */
378 spin_lock(&orig_hash_lock); 337 spin_lock_irqsave(&orig_hash_lock, flags);
379 338
380 while (hash_iterate(orig_hash, &hashit)) { 339 while (hash_iterate(orig_hash, &hashit)) {
381 orig_node = hashit.bucket->data; 340 orig_node = hashit.bucket->data;
382 if (resize_orig(orig_node, if_num) == -1) { 341 if (resize_orig(orig_node, if_num) == -1) {
383 spin_unlock(&orig_hash_lock); 342 spin_unlock_irqrestore(&orig_hash_lock, flags);
384 goto out; 343 goto out;
385 } 344 }
386 } 345 }
387 346
388 spin_unlock(&orig_hash_lock); 347 spin_unlock_irqrestore(&orig_hash_lock, flags);
389 348
390 if (!hardif_is_interface_up(batman_if->dev)) 349 if (!hardif_is_interface_up(batman_if->dev))
391 printk(KERN_ERR "batman-adv:Not using interface %s (retrying later): interface not active\n", batman_if->dev); 350 printk(KERN_ERR "batman-adv:Not using interface %s (retrying later): interface not active\n", batman_if->dev);
@@ -443,6 +402,111 @@ out:
443 return NOTIFY_DONE; 402 return NOTIFY_DONE;
444} 403}
445 404
405/* find batman interface by netdev. assumes rcu_read_lock on */
406static struct batman_if *find_batman_if(struct net_device *dev)
407{
408 struct batman_if *batman_if;
409
410 rcu_read_lock();
411 list_for_each_entry_rcu(batman_if, &if_list, list) {
412 if (batman_if->net_dev == dev) {
413 rcu_read_unlock();
414 return batman_if;
415 }
416 }
417 rcu_read_unlock();
418 return NULL;
419}
420
421
422/* receive a packet with the batman ethertype coming on a hard
423 * interface */
424int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
425 struct packet_type *ptype, struct net_device *orig_dev)
426{
427 struct batman_packet *batman_packet;
428 struct batman_if *batman_if;
429 struct net_device_stats *stats;
430 int ret;
431
432 skb = skb_share_check(skb, GFP_ATOMIC);
433
434 if (skb == NULL)
435 goto err_free;
436
437 /* packet should hold at least type and version */
438 if (unlikely(skb_headlen(skb) < 2))
439 goto err_free;
440
441 /* expect a valid ethernet header here. */
442 if (unlikely(skb->mac_len != sizeof(struct ethhdr)
443 || !skb_mac_header(skb)))
444 goto err_free;
445
446 batman_if = find_batman_if(skb->dev);
447 if (!batman_if)
448 goto err_free;
449
450 stats = &skb->dev->stats;
451 stats->rx_packets++;
452 stats->rx_bytes += skb->len;
453
454 batman_packet = (struct batman_packet *)skb->data;
455
456 if (batman_packet->version != COMPAT_VERSION) {
457 bat_dbg(DBG_BATMAN,
458 "Drop packet: incompatible batman version (%i)\n",
459 batman_packet->version);
460 goto err_free;
461 }
462
463 /* all receive handlers return whether they received or reused
464 * the supplied skb. if not, we have to free the skb. */
465
466 switch (batman_packet->packet_type) {
467 /* batman originator packet */
468 case BAT_PACKET:
469 ret = recv_bat_packet(skb, batman_if);
470 break;
471
472 /* batman icmp packet */
473 case BAT_ICMP:
474 ret = recv_icmp_packet(skb);
475 break;
476
477 /* unicast packet */
478 case BAT_UNICAST:
479 ret = recv_unicast_packet(skb);
480 break;
481
482 /* broadcast packet */
483 case BAT_BCAST:
484 ret = recv_bcast_packet(skb);
485 break;
486
487 /* vis packet */
488 case BAT_VIS:
489 ret = recv_vis_packet(skb);
490 break;
491 default:
492 ret = NET_RX_DROP;
493 }
494 if (ret == NET_RX_DROP)
495 kfree_skb(skb);
496
497 /* return NET_RX_SUCCESS in any case as we
498 * most probably dropped the packet for
499 * routing-logical reasons. */
500
501 return NET_RX_SUCCESS;
502
503err_free:
504 kfree_skb(skb);
505 return NET_RX_DROP;
506
507}
508
509
446struct notifier_block hard_if_notifier = { 510struct notifier_block hard_if_notifier = {
447 .notifier_call = hard_if_event, 511 .notifier_call = hard_if_event,
448}; 512};
diff --git a/drivers/staging/batman-adv/hard-interface.h b/drivers/staging/batman-adv/hard-interface.h
index 742358c00c0..97c6ecb9e08 100644
--- a/drivers/staging/batman-adv/hard-interface.h
+++ b/drivers/staging/batman-adv/hard-interface.h
@@ -32,5 +32,9 @@ void hardif_deactivate_interface(struct batman_if *batman_if);
32char hardif_get_active_if_num(void); 32char hardif_get_active_if_num(void);
33void hardif_check_interfaces_status(void); 33void hardif_check_interfaces_status(void);
34void hardif_check_interfaces_status_wq(struct work_struct *work); 34void hardif_check_interfaces_status_wq(struct work_struct *work);
35int batman_skb_recv(struct sk_buff *skb,
36 struct net_device *dev,
37 struct packet_type *ptype,
38 struct net_device *orig_dev);
35int hardif_min_mtu(void); 39int hardif_min_mtu(void);
36void update_min_mtu(void); 40void update_min_mtu(void);
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c
index 434c600a6a4..c7335041569 100644
--- a/drivers/staging/batman-adv/main.c
+++ b/drivers/staging/batman-adv/main.c
@@ -50,11 +50,14 @@ int16_t num_ifs;
50 50
51struct net_device *soft_device; 51struct net_device *soft_device;
52 52
53static struct task_struct *kthread_task;
54
55unsigned char broadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 53unsigned char broadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
56atomic_t module_state; 54atomic_t module_state;
57 55
56static struct packet_type batman_adv_packet_type __read_mostly = {
57 .type = cpu_to_be16(ETH_P_BATMAN),
58 .func = batman_skb_recv,
59};
60
58struct workqueue_struct *bat_event_workqueue; 61struct workqueue_struct *bat_event_workqueue;
59 62
60#ifdef CONFIG_BATMAN_ADV_DEBUG 63#ifdef CONFIG_BATMAN_ADV_DEBUG
@@ -113,6 +116,7 @@ int init_module(void)
113 } 116 }
114 117
115 register_netdevice_notifier(&hard_if_notifier); 118 register_netdevice_notifier(&hard_if_notifier);
119 dev_add_pack(&batman_adv_packet_type);
116 120
117 printk(KERN_INFO "batman-adv:B.A.T.M.A.N. advanced %s%s (compatibility version %i) loaded \n", 121 printk(KERN_INFO "batman-adv:B.A.T.M.A.N. advanced %s%s (compatibility version %i) loaded \n",
118 SOURCE_VERSION, REVISION_VERSION_STR, COMPAT_VERSION); 122 SOURCE_VERSION, REVISION_VERSION_STR, COMPAT_VERSION);
@@ -135,6 +139,8 @@ void cleanup_module(void)
135 soft_device = NULL; 139 soft_device = NULL;
136 } 140 }
137 141
142 dev_remove_pack(&batman_adv_packet_type);
143
138 unregister_netdevice_notifier(&hard_if_notifier); 144 unregister_netdevice_notifier(&hard_if_notifier);
139 cleanup_procfs(); 145 cleanup_procfs();
140 146
@@ -162,16 +168,6 @@ void activate_module(void)
162 if (vis_init() < 1) 168 if (vis_init() < 1)
163 goto err; 169 goto err;
164 170
165 /* (re)start kernel thread for packet processing */
166 if (!kthread_task) {
167 kthread_task = kthread_run(packet_recv_thread, NULL, "batman-adv");
168
169 if (IS_ERR(kthread_task)) {
170 printk(KERN_ERR "batman-adv:Unable to start packet receive thread\n");
171 kthread_task = NULL;
172 }
173 }
174
175 update_min_mtu(); 171 update_min_mtu();
176 atomic_set(&module_state, MODULE_ACTIVE); 172 atomic_set(&module_state, MODULE_ACTIVE);
177 goto end; 173 goto end;
@@ -193,14 +189,7 @@ void shutdown_module(void)
193 189
194 vis_quit(); 190 vis_quit();
195 191
196 /* deactivate kernel thread for packet processing (if running) */ 192 /* TODO: unregister BATMAN pack */
197 if (kthread_task) {
198 atomic_set(&exit_cond, 1);
199 wake_up_interruptible(&thread_wait);
200 kthread_stop(kthread_task);
201
202 kthread_task = NULL;
203 }
204 193
205 originator_free(); 194 originator_free();
206 195
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
index 71bd2cf421c..1c0a3cec846 100644
--- a/drivers/staging/batman-adv/originator.c
+++ b/drivers/staging/batman-adv/originator.c
@@ -37,35 +37,38 @@ static void start_purge_timer(void)
37 37
38int originator_init(void) 38int originator_init(void)
39{ 39{
40 unsigned long flags;
40 if (orig_hash) 41 if (orig_hash)
41 return 1; 42 return 1;
42 43
43 spin_lock(&orig_hash_lock); 44 spin_lock_irqsave(&orig_hash_lock, flags);
44 orig_hash = hash_new(128, compare_orig, choose_orig); 45 orig_hash = hash_new(128, compare_orig, choose_orig);
45 46
46 if (!orig_hash) 47 if (!orig_hash)
47 goto err; 48 goto err;
48 49
49 spin_unlock(&orig_hash_lock); 50 spin_unlock_irqrestore(&orig_hash_lock, flags);
50 start_purge_timer(); 51 start_purge_timer();
51 return 1; 52 return 1;
52 53
53err: 54err:
54 spin_unlock(&orig_hash_lock); 55 spin_unlock_irqrestore(&orig_hash_lock, flags);
55 return 0; 56 return 0;
56} 57}
57 58
58void originator_free(void) 59void originator_free(void)
59{ 60{
61 unsigned long flags;
62
60 if (!orig_hash) 63 if (!orig_hash)
61 return; 64 return;
62 65
63 cancel_delayed_work_sync(&purge_orig_wq); 66 cancel_delayed_work_sync(&purge_orig_wq);
64 67
65 spin_lock(&orig_hash_lock); 68 spin_lock_irqsave(&orig_hash_lock, flags);
66 hash_delete(orig_hash, free_orig_node); 69 hash_delete(orig_hash, free_orig_node);
67 orig_hash = NULL; 70 orig_hash = NULL;
68 spin_unlock(&orig_hash_lock); 71 spin_unlock_irqrestore(&orig_hash_lock, flags);
69} 72}
70 73
71struct neigh_node * 74struct neigh_node *
@@ -243,8 +246,9 @@ void purge_orig(struct work_struct *work)
243{ 246{
244 HASHIT(hashit); 247 HASHIT(hashit);
245 struct orig_node *orig_node; 248 struct orig_node *orig_node;
249 unsigned long flags;
246 250
247 spin_lock(&orig_hash_lock); 251 spin_lock_irqsave(&orig_hash_lock, flags);
248 252
249 /* for all origins... */ 253 /* for all origins... */
250 while (hash_iterate(orig_hash, &hashit)) { 254 while (hash_iterate(orig_hash, &hashit)) {
@@ -255,7 +259,7 @@ void purge_orig(struct work_struct *work)
255 } 259 }
256 } 260 }
257 261
258 spin_unlock(&orig_hash_lock); 262 spin_unlock_irqrestore(&orig_hash_lock, flags);
259 263
260 start_purge_timer(); 264 start_purge_timer();
261} 265}
diff --git a/drivers/staging/batman-adv/proc.c b/drivers/staging/batman-adv/proc.c
index a4f19e1dfc7..33dae941a5c 100644
--- a/drivers/staging/batman-adv/proc.c
+++ b/drivers/staging/batman-adv/proc.c
@@ -189,6 +189,7 @@ static int proc_originators_read(struct seq_file *seq, void *offset)
189 struct neigh_node *neigh_node; 189 struct neigh_node *neigh_node;
190 int batman_count = 0; 190 int batman_count = 0;
191 char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN]; 191 char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN];
192 unsigned long flags;
192 193
193 rcu_read_lock(); 194 rcu_read_lock();
194 if (list_empty(&if_list)) { 195 if (list_empty(&if_list)) {
@@ -211,7 +212,7 @@ static int proc_originators_read(struct seq_file *seq, void *offset)
211 ((struct batman_if *)if_list.next)->addr_str); 212 ((struct batman_if *)if_list.next)->addr_str);
212 213
213 rcu_read_unlock(); 214 rcu_read_unlock();
214 spin_lock(&orig_hash_lock); 215 spin_lock_irqsave(&orig_hash_lock, flags);
215 216
216 while (hash_iterate(orig_hash, &hashit)) { 217 while (hash_iterate(orig_hash, &hashit)) {
217 218
@@ -242,7 +243,7 @@ static int proc_originators_read(struct seq_file *seq, void *offset)
242 243
243 } 244 }
244 245
245 spin_unlock(&orig_hash_lock); 246 spin_unlock_irqrestore(&orig_hash_lock, flags);
246 247
247 if (batman_count == 0) 248 if (batman_count == 0)
248 seq_printf(seq, "No batman nodes in range ... \n"); 249 seq_printf(seq, "No batman nodes in range ... \n");
@@ -376,6 +377,7 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset)
376 HLIST_HEAD(vis_if_list); 377 HLIST_HEAD(vis_if_list);
377 int i; 378 int i;
378 char tmp_addr_str[ETH_STR_LEN]; 379 char tmp_addr_str[ETH_STR_LEN];
380 unsigned long flags;
379 381
380 rcu_read_lock(); 382 rcu_read_lock();
381 if (list_empty(&if_list) || (!is_vis_server())) { 383 if (list_empty(&if_list) || (!is_vis_server())) {
@@ -385,7 +387,7 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset)
385 387
386 rcu_read_unlock(); 388 rcu_read_unlock();
387 389
388 spin_lock(&vis_hash_lock); 390 spin_lock_irqsave(&vis_hash_lock, flags);
389 while (hash_iterate(vis_hash, &hashit)) { 391 while (hash_iterate(vis_hash, &hashit)) {
390 info = hashit.bucket->data; 392 info = hashit.bucket->data;
391 entries = (struct vis_info_entry *) 393 entries = (struct vis_info_entry *)
@@ -402,7 +404,7 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset)
402 proc_vis_read_prim_sec(seq, &vis_if_list); 404 proc_vis_read_prim_sec(seq, &vis_if_list);
403 seq_printf(seq, "\n"); 405 seq_printf(seq, "\n");
404 } 406 }
405 spin_unlock(&vis_hash_lock); 407 spin_unlock_irqrestore(&vis_hash_lock, flags);
406 408
407end: 409end:
408 return 0; 410 return 0;
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index f8464cad30b..e0d093f5d52 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -36,15 +36,16 @@
36 36
37DECLARE_WAIT_QUEUE_HEAD(thread_wait); 37DECLARE_WAIT_QUEUE_HEAD(thread_wait);
38 38
39static atomic_t data_ready_cond;
40atomic_t exit_cond; 39atomic_t exit_cond;
40
41void slide_own_bcast_window(struct batman_if *batman_if) 41void slide_own_bcast_window(struct batman_if *batman_if)
42{ 42{
43 HASHIT(hashit); 43 HASHIT(hashit);
44 struct orig_node *orig_node; 44 struct orig_node *orig_node;
45 TYPE_OF_WORD *word; 45 TYPE_OF_WORD *word;
46 unsigned long flags;
46 47
47 spin_lock(&orig_hash_lock); 48 spin_lock_irqsave(&orig_hash_lock, flags);
48 49
49 while (hash_iterate(orig_hash, &hashit)) { 50 while (hash_iterate(orig_hash, &hashit)) {
50 orig_node = hashit.bucket->data; 51 orig_node = hashit.bucket->data;
@@ -55,7 +56,7 @@ void slide_own_bcast_window(struct batman_if *batman_if)
55 bit_packet_count(word); 56 bit_packet_count(word);
56 } 57 }
57 58
58 spin_unlock(&orig_hash_lock); 59 spin_unlock_irqrestore(&orig_hash_lock, flags);
59} 60}
60 61
61static void update_HNA(struct orig_node *orig_node, 62static void update_HNA(struct orig_node *orig_node,
@@ -365,10 +366,9 @@ static char count_real_packets(struct ethhdr *ethhdr,
365} 366}
366 367
367void receive_bat_packet(struct ethhdr *ethhdr, 368void receive_bat_packet(struct ethhdr *ethhdr,
368 struct batman_packet *batman_packet, 369 struct batman_packet *batman_packet,
369 unsigned char *hna_buff, 370 unsigned char *hna_buff, int hna_buff_len,
370 int hna_buff_len, 371 struct batman_if *if_incoming)
371 struct batman_if *if_incoming)
372{ 372{
373 struct batman_if *batman_if; 373 struct batman_if *batman_if;
374 struct orig_node *orig_neigh_node, *orig_node; 374 struct orig_node *orig_neigh_node, *orig_node;
@@ -566,95 +566,118 @@ void receive_bat_packet(struct ethhdr *ethhdr,
566 0, hna_buff_len, if_incoming); 566 0, hna_buff_len, if_incoming);
567} 567}
568 568
569 569int recv_bat_packet(struct sk_buff *skb,
570static int receive_raw_packet(struct socket *raw_sock, 570 struct batman_if *batman_if)
571 unsigned char *packet_buff, int packet_buff_len)
572{ 571{
573 struct kvec iov; 572 struct ethhdr *ethhdr;
574 struct msghdr msg; 573 unsigned long flags;
575 574
576 iov.iov_base = packet_buff; 575 /* drop packet if it has not necessary minimum size */
577 iov.iov_len = packet_buff_len; 576 if (skb_headlen(skb) < sizeof(struct batman_packet))
577 return NET_RX_DROP;
578 578
579 msg.msg_flags = MSG_DONTWAIT; /* non-blocking */ 579 ethhdr = (struct ethhdr *)skb_mac_header(skb);
580 msg.msg_name = NULL;
581 msg.msg_namelen = 0;
582 msg.msg_control = NULL;
583 580
584 return kernel_recvmsg(raw_sock, &msg, &iov, 1, packet_buff_len,
585 MSG_DONTWAIT);
586}
587
588static void recv_bat_packet(struct ethhdr *ethhdr,
589 unsigned char *packet_buff,
590 int result,
591 struct batman_if *batman_if)
592{
593 /* packet with broadcast indication but unicast recipient */ 581 /* packet with broadcast indication but unicast recipient */
594 if (!is_bcast(ethhdr->h_dest)) 582 if (!is_bcast(ethhdr->h_dest))
595 return; 583 return NET_RX_DROP;
596 584
597 /* packet with broadcast sender address */ 585 /* packet with broadcast sender address */
598 if (is_bcast(ethhdr->h_source)) 586 if (is_bcast(ethhdr->h_source))
599 return; 587 return NET_RX_DROP;
600 588
601 /* drop packet if it has not at least one batman packet as payload */ 589 spin_lock_irqsave(&orig_hash_lock, flags);
602 if (result < sizeof(struct ethhdr) + sizeof(struct batman_packet)) 590 /* TODO: we use headlen instead of "length", because
603 return; 591 * only this data is paged in. */
604 592 /* TODO: is another skb_copy needed here? there will be
605 spin_lock(&orig_hash_lock); 593 * written on the data, but nobody (?) should further use
594 * this data */
606 receive_aggr_bat_packet(ethhdr, 595 receive_aggr_bat_packet(ethhdr,
607 packet_buff + sizeof(struct ethhdr), 596 skb->data,
608 result - sizeof(struct ethhdr), 597 skb_headlen(skb),
609 batman_if); 598 batman_if);
610 spin_unlock(&orig_hash_lock); 599 spin_unlock_irqrestore(&orig_hash_lock, flags);
600
601 kfree_skb(skb);
602 return NET_RX_SUCCESS;
611} 603}
612 604
613static void recv_my_icmp_packet(struct ethhdr *ethhdr, 605static int recv_my_icmp_packet(struct sk_buff *skb)
614 struct icmp_packet *icmp_packet,
615 unsigned char *packet_buff,
616 int result)
617{ 606{
618 struct orig_node *orig_node; 607 struct orig_node *orig_node;
608 struct icmp_packet *icmp_packet;
609 struct ethhdr *ethhdr;
610 struct sk_buff *skb_old;
611 struct batman_if *batman_if;
612 int ret;
613 unsigned long flags;
614 uint8_t dstaddr[ETH_ALEN];
615
616 icmp_packet = (struct icmp_packet *) skb->data;
617 ethhdr = (struct ethhdr *) skb_mac_header(skb);
619 618
620 /* add data to device queue */ 619 /* add data to device queue */
621 if (icmp_packet->msg_type != ECHO_REQUEST) { 620 if (icmp_packet->msg_type != ECHO_REQUEST) {
622 bat_device_receive_packet(icmp_packet); 621 bat_device_receive_packet(icmp_packet);
623 return; 622 return NET_RX_DROP;
624 } 623 }
625 624
626 /* answer echo request (ping) */ 625 /* answer echo request (ping) */
627 /* get routing information */ 626 /* get routing information */
628 spin_lock(&orig_hash_lock); 627 spin_lock_irqsave(&orig_hash_lock, flags);
629 orig_node = ((struct orig_node *)hash_find(orig_hash, 628 orig_node = ((struct orig_node *)hash_find(orig_hash,
630 icmp_packet->orig)); 629 icmp_packet->orig));
630 ret = NET_RX_DROP;
631 631
632 if ((orig_node != NULL) && 632 if ((orig_node != NULL) &&
633 (orig_node->batman_if != NULL) && 633 (orig_node->batman_if != NULL) &&
634 (orig_node->router != NULL)) { 634 (orig_node->router != NULL)) {
635
636 /* don't lock while sending the packets ... we therefore
637 * copy the required data before sending */
638 batman_if = orig_node->batman_if;
639 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
640 spin_unlock_irqrestore(&orig_hash_lock, flags);
641
642 /* create a copy of the skb, if needed, to modify it. */
643 skb_old = NULL;
644 if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) {
645 skb_old = skb;
646 skb = skb_copy(skb, GFP_ATOMIC);
647 if (!skb)
648 return NET_RX_DROP;
649 icmp_packet = (struct icmp_packet *) skb->data;
650 kfree_skb(skb_old);
651 }
652
635 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 653 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
636 memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); 654 memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
637 icmp_packet->msg_type = ECHO_REPLY; 655 icmp_packet->msg_type = ECHO_REPLY;
638 icmp_packet->ttl = TTL; 656 icmp_packet->ttl = TTL;
639 657
640 send_raw_packet(packet_buff + sizeof(struct ethhdr), 658 send_skb_packet(skb, batman_if, dstaddr);
641 result - sizeof(struct ethhdr), 659 ret = NET_RX_SUCCESS;
642 orig_node->batman_if,
643 orig_node->router->addr);
644 }
645 660
646 spin_unlock(&orig_hash_lock); 661 } else
647 return; 662 spin_unlock_irqrestore(&orig_hash_lock, flags);
663
664 return ret;
648} 665}
649 666
650static void recv_icmp_ttl_exceeded(struct icmp_packet *icmp_packet, 667static int recv_icmp_ttl_exceeded(struct sk_buff *skb)
651 struct ethhdr *ethhdr,
652 unsigned char *packet_buff,
653 int result,
654 struct batman_if *batman_if)
655{ 668{
656 unsigned char src_str[ETH_STR_LEN], dst_str[ETH_STR_LEN]; 669 unsigned char src_str[ETH_STR_LEN], dst_str[ETH_STR_LEN];
657 struct orig_node *orig_node; 670 struct orig_node *orig_node;
671 struct icmp_packet *icmp_packet;
672 struct ethhdr *ethhdr;
673 struct sk_buff *skb_old;
674 struct batman_if *batman_if;
675 int ret;
676 unsigned long flags;
677 uint8_t dstaddr[ETH_ALEN];
678
679 icmp_packet = (struct icmp_packet *) skb->data;
680 ethhdr = (struct ethhdr *) skb_mac_header(skb);
658 681
659 addr_to_string(src_str, icmp_packet->orig); 682 addr_to_string(src_str, icmp_packet->orig);
660 addr_to_string(dst_str, icmp_packet->dst); 683 addr_to_string(dst_str, icmp_packet->dst);
@@ -663,74 +686,93 @@ static void recv_icmp_ttl_exceeded(struct icmp_packet *icmp_packet,
663 686
664 /* send TTL exceeded if packet is an echo request (traceroute) */ 687 /* send TTL exceeded if packet is an echo request (traceroute) */
665 if (icmp_packet->msg_type != ECHO_REQUEST) 688 if (icmp_packet->msg_type != ECHO_REQUEST)
666 return; 689 return NET_RX_DROP;
667 690
668 /* get routing information */ 691 /* get routing information */
669 spin_lock(&orig_hash_lock); 692 spin_lock_irqsave(&orig_hash_lock, flags);
670 orig_node = ((struct orig_node *) 693 orig_node = ((struct orig_node *)
671 hash_find(orig_hash, icmp_packet->orig)); 694 hash_find(orig_hash, icmp_packet->orig));
695 ret = NET_RX_DROP;
672 696
673 if ((orig_node != NULL) && 697 if ((orig_node != NULL) &&
674 (orig_node->batman_if != NULL) && 698 (orig_node->batman_if != NULL) &&
675 (orig_node->router != NULL)) { 699 (orig_node->router != NULL)) {
700
701 /* don't lock while sending the packets ... we therefore
702 * copy the required data before sending */
703 batman_if = orig_node->batman_if;
704 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
705 spin_unlock_irqrestore(&orig_hash_lock, flags);
706
707 /* create a copy of the skb, if needed, to modify it. */
708 if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) {
709 skb_old = skb;
710 skb = skb_copy(skb, GFP_ATOMIC);
711 if (!skb)
712 return NET_RX_DROP;
713 icmp_packet = (struct icmp_packet *) skb->data;
714 kfree_skb(skb_old);
715 }
716
676 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 717 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
677 memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); 718 memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
678 icmp_packet->msg_type = TTL_EXCEEDED; 719 icmp_packet->msg_type = TTL_EXCEEDED;
679 icmp_packet->ttl = TTL; 720 icmp_packet->ttl = TTL;
680 721
681 send_raw_packet(packet_buff + sizeof(struct ethhdr), 722 send_skb_packet(skb, batman_if, dstaddr);
682 result - sizeof(struct ethhdr), 723 ret = NET_RX_SUCCESS;
683 orig_node->batman_if,
684 orig_node->router->addr);
685 724
686 } 725 } else
726 spin_unlock_irqrestore(&orig_hash_lock, flags);
687 727
688 spin_unlock(&orig_hash_lock); 728 return ret;
689} 729}
690 730
691 731
692 732int recv_icmp_packet(struct sk_buff *skb)
693static void recv_icmp_packet(struct ethhdr *ethhdr,
694 unsigned char *packet_buff,
695 int result,
696 struct batman_if *batman_if)
697{ 733{
698 struct icmp_packet *icmp_packet; 734 struct icmp_packet *icmp_packet;
735 struct ethhdr *ethhdr;
699 struct orig_node *orig_node; 736 struct orig_node *orig_node;
737 struct sk_buff *skb_old;
738 struct batman_if *batman_if;
739 int hdr_size = sizeof(struct icmp_packet);
740 int ret;
741 unsigned long flags;
742 uint8_t dstaddr[ETH_ALEN];
743
744 /* drop packet if it has not necessary minimum size */
745 if (skb_headlen(skb) < hdr_size)
746 return NET_RX_DROP;
747
748 ethhdr = (struct ethhdr *)skb_mac_header(skb);
700 749
701 /* packet with unicast indication but broadcast recipient */ 750 /* packet with unicast indication but broadcast recipient */
702 if (is_bcast(ethhdr->h_dest)) 751 if (is_bcast(ethhdr->h_dest))
703 return; 752 return NET_RX_DROP;
704 753
705 /* packet with broadcast sender address */ 754 /* packet with broadcast sender address */
706 if (is_bcast(ethhdr->h_source)) 755 if (is_bcast(ethhdr->h_source))
707 return; 756 return NET_RX_DROP;
708 757
709 /* not for me */ 758 /* not for me */
710 if (!is_my_mac(ethhdr->h_dest)) 759 if (!is_my_mac(ethhdr->h_dest))
711 return; 760 return NET_RX_DROP;
712 761
713 /* drop packet if it has not necessary minimum size */ 762 icmp_packet = (struct icmp_packet *) skb->data;
714 if (result < sizeof(struct ethhdr) + sizeof(struct icmp_packet))
715 return;
716
717 icmp_packet = (struct icmp_packet *)
718 (packet_buff + sizeof(struct ethhdr));
719 763
720 /* packet for me */ 764 /* packet for me */
721 if (is_my_mac(icmp_packet->dst)) 765 if (is_my_mac(icmp_packet->dst))
722 recv_my_icmp_packet(ethhdr, icmp_packet, packet_buff, result); 766 return recv_my_icmp_packet(skb);
723 767
724 /* TTL exceeded */ 768 /* TTL exceeded */
725 if (icmp_packet->ttl < 2) { 769 if (icmp_packet->ttl < 2)
726 recv_icmp_ttl_exceeded(icmp_packet, ethhdr, packet_buff, result, 770 return recv_icmp_ttl_exceeded(skb);
727 batman_if);
728 return;
729 771
730 } 772 ret = NET_RX_DROP;
731 773
732 /* get routing information */ 774 /* get routing information */
733 spin_lock(&orig_hash_lock); 775 spin_lock_irqsave(&orig_hash_lock, flags);
734 orig_node = ((struct orig_node *) 776 orig_node = ((struct orig_node *)
735 hash_find(orig_hash, icmp_packet->dst)); 777 hash_find(orig_hash, icmp_packet->dst));
736 778
@@ -738,133 +780,169 @@ static void recv_icmp_packet(struct ethhdr *ethhdr,
738 (orig_node->batman_if != NULL) && 780 (orig_node->batman_if != NULL) &&
739 (orig_node->router != NULL)) { 781 (orig_node->router != NULL)) {
740 782
783 /* don't lock while sending the packets ... we therefore
784 * copy the required data before sending */
785 batman_if = orig_node->batman_if;
786 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
787 spin_unlock_irqrestore(&orig_hash_lock, flags);
788
789 /* create a copy of the skb, if needed, to modify it. */
790 if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) {
791 skb_old = skb;
792 skb = skb_copy(skb, GFP_ATOMIC);
793 if (!skb)
794 return NET_RX_DROP;
795 icmp_packet = (struct icmp_packet *) skb->data;
796 kfree_skb(skb_old);
797 }
798
741 /* decrement ttl */ 799 /* decrement ttl */
742 icmp_packet->ttl--; 800 icmp_packet->ttl--;
743 801
744 /* route it */ 802 /* route it */
745 send_raw_packet(packet_buff + sizeof(struct ethhdr), 803 send_skb_packet(skb, batman_if, dstaddr);
746 result - sizeof(struct ethhdr), 804 ret = NET_RX_SUCCESS;
747 orig_node->batman_if, 805
748 orig_node->router->addr); 806 } else
749 } 807 spin_unlock_irqrestore(&orig_hash_lock, flags);
750 spin_unlock(&orig_hash_lock); 808
809 return ret;
751} 810}
752 811
753static void recv_unicast_packet(struct ethhdr *ethhdr, 812int recv_unicast_packet(struct sk_buff *skb)
754 unsigned char *packet_buff,
755 int result,
756 struct batman_if *batman_if)
757{ 813{
758 struct unicast_packet *unicast_packet; 814 struct unicast_packet *unicast_packet;
759 unsigned char src_str[ETH_STR_LEN], dst_str[ETH_STR_LEN]; 815 unsigned char src_str[ETH_STR_LEN], dst_str[ETH_STR_LEN];
760 struct orig_node *orig_node; 816 struct orig_node *orig_node;
761 int hdr_size = sizeof(struct ethhdr) + sizeof(struct unicast_packet); 817 struct ethhdr *ethhdr;
818 struct batman_if *batman_if;
819 struct sk_buff *skb_old;
820 uint8_t dstaddr[ETH_ALEN];
821 int hdr_size = sizeof(struct unicast_packet);
822 int ret;
823 unsigned long flags;
824
825 /* drop packet if it has not necessary minimum size */
826 if (skb_headlen(skb) < hdr_size)
827 return NET_RX_DROP;
828
829 ethhdr = (struct ethhdr *) skb_mac_header(skb);
762 830
763 /* packet with unicast indication but broadcast recipient */ 831 /* packet with unicast indication but broadcast recipient */
764 if (is_bcast(ethhdr->h_dest)) 832 if (is_bcast(ethhdr->h_dest))
765 return; 833 return NET_RX_DROP;
766 834
767 /* packet with broadcast sender address */ 835 /* packet with broadcast sender address */
768 if (is_bcast(ethhdr->h_source)) 836 if (is_bcast(ethhdr->h_source))
769 return; 837 return NET_RX_DROP;
770 838
771 /* not for me */ 839 /* not for me */
772 if (!is_my_mac(ethhdr->h_dest)) 840 if (!is_my_mac(ethhdr->h_dest))
773 return; 841 return NET_RX_DROP;
774
775 /* drop packet if it has not necessary minimum size */
776 if (result < hdr_size)
777 return;
778 842
779 unicast_packet = (struct unicast_packet *) 843 unicast_packet = (struct unicast_packet *) skb->data;
780 (packet_buff + sizeof(struct ethhdr));
781 844
782 /* packet for me */ 845 /* packet for me */
783 if (is_my_mac(unicast_packet->dest)) { 846 if (is_my_mac(unicast_packet->dest)) {
784 interface_rx(soft_device, packet_buff + hdr_size, 847 interface_rx(skb, hdr_size);
785 result - hdr_size); 848 return NET_RX_SUCCESS;
786 return;
787
788 } 849 }
789 850
790 /* TTL exceeded */ 851 /* TTL exceeded */
791 if (unicast_packet->ttl < 2) { 852 if (unicast_packet->ttl < 2) {
792 addr_to_string(src_str, ((struct ethhdr *) 853 addr_to_string(src_str, ethhdr->h_source);
793 (unicast_packet + 1))->h_source); 854 addr_to_string(dst_str, ethhdr->h_dest);
794 addr_to_string(dst_str, unicast_packet->dest);
795 855
796 printk(KERN_WARNING "batman-adv:Warning - can't send packet from %s to %s: ttl exceeded\n", src_str, dst_str); 856 printk(KERN_WARNING "batman-adv:Warning - can't send packet from %s to %s: ttl exceeded\n", src_str, dst_str);
797 return; 857 return NET_RX_DROP;
798 } 858 }
799 859
860 ret = NET_RX_DROP;
800 /* get routing information */ 861 /* get routing information */
801 spin_lock(&orig_hash_lock); 862 spin_lock_irqsave(&orig_hash_lock, flags);
802 orig_node = ((struct orig_node *) 863 orig_node = ((struct orig_node *)
803 hash_find(orig_hash, unicast_packet->dest)); 864 hash_find(orig_hash, unicast_packet->dest));
804 865
805 if ((orig_node != NULL) && 866 if ((orig_node != NULL) &&
806 (orig_node->batman_if != NULL) && 867 (orig_node->batman_if != NULL) &&
807 (orig_node->router != NULL)) { 868 (orig_node->router != NULL)) {
869
870 /* don't lock while sending the packets ... we therefore
871 * copy the required data before sending */
872 batman_if = orig_node->batman_if;
873 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
874 spin_unlock_irqrestore(&orig_hash_lock, flags);
875
876 /* create a copy of the skb, if needed, to modify it. */
877 if (!skb_clone_writable(skb, sizeof(struct unicast_packet))) {
878 skb_old = skb;
879 skb = skb_copy(skb, GFP_ATOMIC);
880 if (!skb)
881 return NET_RX_DROP;
882 unicast_packet = (struct unicast_packet *) skb->data;
883 kfree_skb(skb_old);
884 }
808 /* decrement ttl */ 885 /* decrement ttl */
809 unicast_packet->ttl--; 886 unicast_packet->ttl--;
810 887
811 /* route it */ 888 /* route it */
812 send_raw_packet(packet_buff + sizeof(struct ethhdr), 889 send_skb_packet(skb, batman_if, dstaddr);
813 result - sizeof(struct ethhdr), 890 ret = NET_RX_SUCCESS;
814 orig_node->batman_if, 891
815 orig_node->router->addr); 892 } else
816 } 893 spin_unlock_irqrestore(&orig_hash_lock, flags);
817 spin_unlock(&orig_hash_lock); 894
895 return ret;
818} 896}
819 897
820 898
821static void recv_bcast_packet(struct ethhdr *ethhdr, 899int recv_bcast_packet(struct sk_buff *skb)
822 unsigned char *packet_buff,
823 int result,
824 struct batman_if *batman_if)
825{ 900{
826 struct orig_node *orig_node; 901 struct orig_node *orig_node;
827 struct bcast_packet *bcast_packet; 902 struct bcast_packet *bcast_packet;
828 int hdr_size = sizeof(struct ethhdr) + sizeof(struct bcast_packet); 903 struct ethhdr *ethhdr;
904 int hdr_size = sizeof(struct bcast_packet);
905 unsigned long flags;
906
907 /* drop packet if it has not necessary minimum size */
908 if (skb_headlen(skb) < hdr_size)
909 return NET_RX_DROP;
910
911 ethhdr = (struct ethhdr *)skb_mac_header(skb);
829 912
830 /* packet with broadcast indication but unicast recipient */ 913 /* packet with broadcast indication but unicast recipient */
831 if (!is_bcast(ethhdr->h_dest)) 914 if (!is_bcast(ethhdr->h_dest))
832 return; 915 return NET_RX_DROP;
833 916
834 /* packet with broadcast sender address */ 917 /* packet with broadcast sender address */
835 if (is_bcast(ethhdr->h_source)) 918 if (is_bcast(ethhdr->h_source))
836 return; 919 return NET_RX_DROP;
837
838 /* drop packet if it has not necessary minimum size */
839 if (result < hdr_size)
840 return;
841 920
842 /* ignore broadcasts sent by myself */ 921 /* ignore broadcasts sent by myself */
843 if (is_my_mac(ethhdr->h_source)) 922 if (is_my_mac(ethhdr->h_source))
844 return; 923 return NET_RX_DROP;
845 924
846 bcast_packet = (struct bcast_packet *) 925 bcast_packet = (struct bcast_packet *) skb->data;
847 (packet_buff + sizeof(struct ethhdr));
848 926
849 /* ignore broadcasts originated by myself */ 927 /* ignore broadcasts originated by myself */
850 if (is_my_mac(bcast_packet->orig)) 928 if (is_my_mac(bcast_packet->orig))
851 return; 929 return NET_RX_DROP;
852 930
853 spin_lock(&orig_hash_lock); 931 spin_lock_irqsave(&orig_hash_lock, flags);
854 orig_node = ((struct orig_node *) 932 orig_node = ((struct orig_node *)
855 hash_find(orig_hash, bcast_packet->orig)); 933 hash_find(orig_hash, bcast_packet->orig));
856 934
857 if (orig_node == NULL) { 935 if (orig_node == NULL) {
858 spin_unlock(&orig_hash_lock); 936 spin_unlock_irqrestore(&orig_hash_lock, flags);
859 return; 937 return NET_RX_DROP;
860 } 938 }
861 939
862 /* check flood history */ 940 /* check flood history */
863 if (get_bit_status(orig_node->bcast_bits, 941 if (get_bit_status(orig_node->bcast_bits,
864 orig_node->last_bcast_seqno, 942 orig_node->last_bcast_seqno,
865 ntohs(bcast_packet->seqno))) { 943 ntohs(bcast_packet->seqno))) {
866 spin_unlock(&orig_hash_lock); 944 spin_unlock_irqrestore(&orig_hash_lock, flags);
867 return; 945 return NET_RX_DROP;
868 } 946 }
869 947
870 /* mark broadcast in flood history */ 948 /* mark broadcast in flood history */
@@ -873,211 +951,58 @@ static void recv_bcast_packet(struct ethhdr *ethhdr,
873 orig_node->last_bcast_seqno, 1)) 951 orig_node->last_bcast_seqno, 1))
874 orig_node->last_bcast_seqno = ntohs(bcast_packet->seqno); 952 orig_node->last_bcast_seqno = ntohs(bcast_packet->seqno);
875 953
876 spin_unlock(&orig_hash_lock); 954 spin_unlock_irqrestore(&orig_hash_lock, flags);
955
956 /* rebroadcast packet */
957 add_bcast_packet_to_list(skb);
877 958
878 /* broadcast for me */ 959 /* broadcast for me */
879 interface_rx(soft_device, packet_buff + hdr_size, result - hdr_size); 960 interface_rx(skb, hdr_size);
880 961
881 /* rebroadcast packet */ 962 return NET_RX_SUCCESS;
882 add_bcast_packet_to_list(packet_buff + sizeof(struct ethhdr),
883 result - sizeof(struct ethhdr));
884} 963}
885 964
886static void recv_vis_packet(struct ethhdr *ethhdr, 965int recv_vis_packet(struct sk_buff *skb)
887 unsigned char *packet_buff,
888 int result)
889{ 966{
890 struct vis_packet *vis_packet; 967 struct vis_packet *vis_packet;
891 int hdr_size = sizeof(struct ethhdr) + sizeof(struct vis_packet); 968 struct ethhdr *ethhdr;
892 int vis_info_len; 969 int hdr_size = sizeof(struct vis_packet);
970 int ret;
893 971
894 /* drop if too short. */ 972 if (skb_headlen(skb) < hdr_size)
895 if (result < hdr_size) 973 return NET_RX_DROP;
896 return; 974
975 vis_packet = (struct vis_packet *) skb->data;
976 ethhdr = (struct ethhdr *)skb_mac_header(skb);
897 977
898 /* not for me */ 978 /* not for me */
899 if (!is_my_mac(ethhdr->h_dest)) 979 if (!is_my_mac(ethhdr->h_dest))
900 return; 980 return NET_RX_DROP;
901
902 vis_packet = (struct vis_packet *)(packet_buff + sizeof(struct ethhdr));
903 vis_info_len = result - hdr_size;
904 981
905 /* ignore own packets */ 982 /* ignore own packets */
906 if (is_my_mac(vis_packet->vis_orig)) 983 if (is_my_mac(vis_packet->vis_orig))
907 return; 984 return NET_RX_DROP;
908 985
909 if (is_my_mac(vis_packet->sender_orig)) 986 if (is_my_mac(vis_packet->sender_orig))
910 return; 987 return NET_RX_DROP;
911 988
912 switch (vis_packet->vis_type) { 989 switch (vis_packet->vis_type) {
913 case VIS_TYPE_SERVER_SYNC: 990 case VIS_TYPE_SERVER_SYNC:
914 receive_server_sync_packet(vis_packet, vis_info_len); 991 /* TODO: handle fragmented skbs properly */
992 receive_server_sync_packet(vis_packet, skb_headlen(skb));
993 ret = NET_RX_SUCCESS;
915 break; 994 break;
916 995
917 case VIS_TYPE_CLIENT_UPDATE: 996 case VIS_TYPE_CLIENT_UPDATE:
918 receive_client_update_packet(vis_packet, vis_info_len); 997 /* TODO: handle fragmented skbs properly */
998 receive_client_update_packet(vis_packet, skb_headlen(skb));
999 ret = NET_RX_SUCCESS;
919 break; 1000 break;
920 1001
921 default: /* ignore unknown packet */ 1002 default: /* ignore unknown packet */
1003 ret = NET_RX_DROP;
922 break; 1004 break;
923 } 1005 }
924} 1006 return ret;
925
926static int recv_one_packet(struct batman_if *batman_if,
927 unsigned char *packet_buff)
928{
929 int result;
930 struct ethhdr *ethhdr;
931 struct batman_packet *batman_packet;
932
933 result = receive_raw_packet(batman_if->raw_sock, packet_buff,
934 PACKBUFF_SIZE);
935 if (result <= 0)
936 return result;
937
938 if (result < sizeof(struct ethhdr) + 2)
939 return 0;
940
941 ethhdr = (struct ethhdr *)packet_buff;
942 batman_packet = (struct batman_packet *)
943 (packet_buff + sizeof(struct ethhdr));
944
945 if (batman_packet->version != COMPAT_VERSION) {
946 bat_dbg(DBG_BATMAN,
947 "Drop packet: incompatible batman version (%i)\n",
948 batman_packet->version);
949 return 0;
950 }
951
952 switch (batman_packet->packet_type) {
953 /* batman originator packet */
954 case BAT_PACKET:
955 recv_bat_packet(ethhdr, packet_buff, result, batman_if);
956 break;
957
958 /* batman icmp packet */
959 case BAT_ICMP:
960 recv_icmp_packet(ethhdr, packet_buff, result, batman_if);
961 break;
962
963 /* unicast packet */
964 case BAT_UNICAST:
965 recv_unicast_packet(ethhdr, packet_buff, result, batman_if);
966 break;
967
968 /* broadcast packet */
969 case BAT_BCAST:
970 recv_bcast_packet(ethhdr,
971 packet_buff, result, batman_if);
972 break;
973
974 /* vis packet */
975 case BAT_VIS:
976 recv_vis_packet(ethhdr, packet_buff, result);
977 break;
978 }
979 return 0;
980}
981
982
983static int discard_one_packet(struct batman_if *batman_if,
984 unsigned char *packet_buff)
985{
986 int result = -EAGAIN;
987
988 if (batman_if->raw_sock) {
989 result = receive_raw_packet(batman_if->raw_sock,
990 packet_buff,
991 PACKBUFF_SIZE);
992 }
993 return result;
994}
995
996
997static bool is_interface_active(struct batman_if *batman_if)
998{
999 if (batman_if->if_active != IF_ACTIVE)
1000 return false;
1001
1002 return true;
1003}
1004
1005static void service_interface(struct batman_if *batman_if,
1006 unsigned char *packet_buff)
1007
1008{
1009 int result;
1010
1011 do {
1012 if (is_interface_active(batman_if))
1013 result = recv_one_packet(batman_if, packet_buff);
1014 else
1015 result = discard_one_packet(batman_if, packet_buff);
1016 } while (result >= 0);
1017
1018 /* we perform none blocking reads, so EAGAIN indicates there
1019 are no more packets to read. Anything else is a real
1020 error.*/
1021
1022 if ((result < 0) && (result != -EAGAIN))
1023 printk(KERN_ERR "batman-adv:Could not receive packet from interface %s: %i\n", batman_if->dev, result);
1024}
1025
1026static void service_interfaces(unsigned char *packet_buffer)
1027{
1028 struct batman_if *batman_if;
1029 rcu_read_lock();
1030 list_for_each_entry_rcu(batman_if, &if_list, list) {
1031 rcu_read_unlock();
1032 service_interface(batman_if, packet_buffer);
1033 rcu_read_lock();
1034 }
1035 rcu_read_unlock();
1036}
1037
1038
1039int packet_recv_thread(void *data)
1040{
1041 unsigned char *packet_buff;
1042
1043 atomic_set(&data_ready_cond, 0);
1044 atomic_set(&exit_cond, 0);
1045 packet_buff = kmalloc(PACKBUFF_SIZE, GFP_KERNEL);
1046 if (!packet_buff) {
1047 printk(KERN_ERR"batman-adv:Could allocate memory for the packet buffer. :(\n");
1048 return -1;
1049 }
1050
1051 while ((!kthread_should_stop()) && (!atomic_read(&exit_cond))) {
1052
1053 wait_event_interruptible(thread_wait,
1054 (atomic_read(&data_ready_cond) ||
1055 atomic_read(&exit_cond)));
1056
1057 atomic_set(&data_ready_cond, 0);
1058
1059 if (kthread_should_stop() || atomic_read(&exit_cond))
1060 break;
1061
1062 service_interfaces(packet_buff);
1063 }
1064 kfree(packet_buff);
1065
1066 /* do not exit until kthread_stop() is actually called,
1067 * otherwise it will wait for us forever. */
1068 while (!kthread_should_stop())
1069 schedule();
1070
1071 return 0;
1072}
1073
1074void batman_data_ready(struct sock *sk, int len)
1075{
1076 void (*data_ready)(struct sock *, int) = sk->sk_user_data;
1077
1078 data_ready(sk, len);
1079
1080 atomic_set(&data_ready_cond, 1);
1081 wake_up_interruptible(&thread_wait);
1082} 1007}
1083 1008
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 890a4f53b55..c217241d0ef 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -25,8 +25,6 @@ extern wait_queue_head_t thread_wait;
25extern atomic_t exit_cond; 25extern atomic_t exit_cond;
26 26
27void slide_own_bcast_window(struct batman_if *batman_if); 27void slide_own_bcast_window(struct batman_if *batman_if);
28void batman_data_ready(struct sock *sk, int len);
29int packet_recv_thread(void *data);
30void receive_bat_packet(struct ethhdr *ethhdr, 28void receive_bat_packet(struct ethhdr *ethhdr,
31 struct batman_packet *batman_packet, 29 struct batman_packet *batman_packet,
32 unsigned char *hna_buff, int hna_buff_len, 30 unsigned char *hna_buff, int hna_buff_len,
@@ -34,3 +32,9 @@ void receive_bat_packet(struct ethhdr *ethhdr,
34void update_routes(struct orig_node *orig_node, 32void update_routes(struct orig_node *orig_node,
35 struct neigh_node *neigh_node, 33 struct neigh_node *neigh_node,
36 unsigned char *hna_buff, int hna_buff_len); 34 unsigned char *hna_buff, int hna_buff_len);
35int recv_icmp_packet(struct sk_buff *skb);
36int recv_unicast_packet(struct sk_buff *skb);
37int recv_bcast_packet(struct sk_buff *skb);
38int recv_vis_packet(struct sk_buff *skb);
39int recv_bat_packet(struct sk_buff *skb,
40 struct batman_if *batman_if);
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index 49b1534b8f7..fd48f3fa2d8 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -23,6 +23,7 @@
23#include "send.h" 23#include "send.h"
24#include "routing.h" 24#include "routing.h"
25#include "translation-table.h" 25#include "translation-table.h"
26#include "soft-interface.h"
26#include "hard-interface.h" 27#include "hard-interface.h"
27#include "types.h" 28#include "types.h"
28#include "vis.h" 29#include "vis.h"
@@ -58,51 +59,69 @@ static unsigned long forward_send_time(void)
58 return send_time; 59 return send_time;
59} 60}
60 61
61/* sends a raw packet. */ 62/* send out an already prepared packet to the given address via the
62void send_raw_packet(unsigned char *pack_buff, int pack_buff_len, 63 * specified batman interface */
63 struct batman_if *batman_if, uint8_t *dst_addr) 64int send_skb_packet(struct sk_buff *skb,
65 struct batman_if *batman_if,
66 uint8_t *dst_addr)
64{ 67{
65 struct ethhdr *ethhdr; 68 struct ethhdr *ethhdr;
66 struct sk_buff *skb;
67 int retval;
68 char *data;
69 69
70 if (batman_if->if_active != IF_ACTIVE) 70 if (batman_if->if_active != IF_ACTIVE)
71 return; 71 goto send_skb_err;
72
73 if (unlikely(!batman_if->net_dev))
74 goto send_skb_err;
72 75
73 if (!(batman_if->net_dev->flags & IFF_UP)) { 76 if (!(batman_if->net_dev->flags & IFF_UP)) {
74 printk(KERN_WARNING 77 printk(KERN_WARNING
75 "batman-adv:Interface %s is not up - can't send packet via that interface!\n", 78 "batman-adv:Interface %s is not up - can't send packet via that interface!\n",
76 batman_if->dev); 79 batman_if->dev);
77 return; 80 goto send_skb_err;
78 } 81 }
79 82
80 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr)); 83 /* push to the ethernet header. */
81 if (!skb) 84 if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
82 return; 85 goto send_skb_err;
83 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
84 86
85 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len); 87 skb_reset_mac_header(skb);
86 88
87 ethhdr = (struct ethhdr *) data; 89 ethhdr = (struct ethhdr *) skb_mac_header(skb);
88 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); 90 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
89 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); 91 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
90 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); 92 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
91 93
92 skb_reset_mac_header(skb);
93 skb_set_network_header(skb, ETH_HLEN); 94 skb_set_network_header(skb, ETH_HLEN);
94 skb->priority = TC_PRIO_CONTROL; 95 skb->priority = TC_PRIO_CONTROL;
95 skb->protocol = __constant_htons(ETH_P_BATMAN); 96 skb->protocol = __constant_htons(ETH_P_BATMAN);
97
96 skb->dev = batman_if->net_dev; 98 skb->dev = batman_if->net_dev;
97 99
98 /* dev_queue_xmit() returns a negative result on error. However on 100 /* dev_queue_xmit() returns a negative result on error. However on
99 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 101 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
100 * (which is > 0). This will not be treated as an error. */ 102 * (which is > 0). This will not be treated as an error. */
101 retval = dev_queue_xmit(skb); 103
102 if (retval < 0) 104 return dev_queue_xmit(skb);
103 printk(KERN_WARNING 105send_skb_err:
104 "batman-adv:Can't write to raw socket: %i\n", 106 kfree_skb(skb);
105 retval); 107 return NET_XMIT_DROP;
108}
109
110/* sends a raw packet. */
111void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
112 struct batman_if *batman_if, uint8_t *dst_addr)
113{
114 struct sk_buff *skb;
115 char *data;
116
117 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
118 if (!skb)
119 return;
120 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
121 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
122 /* pull back to the batman "network header" */
123 skb_pull(skb, sizeof(struct ethhdr));
124 send_skb_packet(skb, batman_if, dst_addr);
106} 125}
107 126
108/* Send a packet to a given interface */ 127/* Send a packet to a given interface */
@@ -331,6 +350,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
331 350
332static void forw_packet_free(struct forw_packet *forw_packet) 351static void forw_packet_free(struct forw_packet *forw_packet)
333{ 352{
353 if (forw_packet->skb)
354 kfree_skb(forw_packet->skb);
334 kfree(forw_packet->packet_buff); 355 kfree(forw_packet->packet_buff);
335 kfree(forw_packet); 356 kfree(forw_packet);
336} 357}
@@ -353,7 +374,7 @@ static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
353 send_time); 374 send_time);
354} 375}
355 376
356void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len) 377void add_bcast_packet_to_list(struct sk_buff *skb)
357{ 378{
358 struct forw_packet *forw_packet; 379 struct forw_packet *forw_packet;
359 380
@@ -361,14 +382,16 @@ void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len)
361 if (!forw_packet) 382 if (!forw_packet)
362 return; 383 return;
363 384
364 forw_packet->packet_buff = kmalloc(packet_len, GFP_ATOMIC); 385 skb = skb_copy(skb, GFP_ATOMIC);
365 if (!forw_packet->packet_buff) { 386 if (!skb) {
366 kfree(forw_packet); 387 kfree(forw_packet);
367 return; 388 return;
368 } 389 }
369 390
370 forw_packet->packet_len = packet_len; 391 skb_reset_mac_header(skb);
371 memcpy(forw_packet->packet_buff, packet_buff, forw_packet->packet_len); 392
393 forw_packet->skb = skb;
394 forw_packet->packet_buff = NULL;
372 395
373 /* how often did we send the bcast packet ? */ 396 /* how often did we send the bcast packet ? */
374 forw_packet->num_packets = 0; 397 forw_packet->num_packets = 0;
@@ -384,6 +407,7 @@ void send_outstanding_bcast_packet(struct work_struct *work)
384 struct forw_packet *forw_packet = 407 struct forw_packet *forw_packet =
385 container_of(delayed_work, struct forw_packet, delayed_work); 408 container_of(delayed_work, struct forw_packet, delayed_work);
386 unsigned long flags; 409 unsigned long flags;
410 struct sk_buff *skb1;
387 411
388 spin_lock_irqsave(&forw_bcast_list_lock, flags); 412 spin_lock_irqsave(&forw_bcast_list_lock, flags);
389 hlist_del(&forw_packet->list); 413 hlist_del(&forw_packet->list);
@@ -392,8 +416,10 @@ void send_outstanding_bcast_packet(struct work_struct *work)
392 /* rebroadcast packet */ 416 /* rebroadcast packet */
393 rcu_read_lock(); 417 rcu_read_lock();
394 list_for_each_entry_rcu(batman_if, &if_list, list) { 418 list_for_each_entry_rcu(batman_if, &if_list, list) {
395 send_raw_packet(forw_packet->packet_buff, 419 /* send a copy of the saved skb */
396 forw_packet->packet_len, 420 skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
421 if (skb1)
422 send_skb_packet(skb1,
397 batman_if, broadcastAddr); 423 batman_if, broadcastAddr);
398 } 424 }
399 rcu_read_unlock(); 425 rcu_read_unlock();
@@ -415,10 +441,11 @@ void send_outstanding_bat_packet(struct work_struct *work)
415 container_of(work, struct delayed_work, work); 441 container_of(work, struct delayed_work, work);
416 struct forw_packet *forw_packet = 442 struct forw_packet *forw_packet =
417 container_of(delayed_work, struct forw_packet, delayed_work); 443 container_of(delayed_work, struct forw_packet, delayed_work);
444 unsigned long flags;
418 445
419 spin_lock(&forw_bat_list_lock); 446 spin_lock_irqsave(&forw_bat_list_lock, flags);
420 hlist_del(&forw_packet->list); 447 hlist_del(&forw_packet->list);
421 spin_unlock(&forw_bat_list_lock); 448 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
422 449
423 send_packet(forw_packet); 450 send_packet(forw_packet);
424 451
@@ -459,18 +486,18 @@ void purge_outstanding_packets(void)
459 spin_unlock_irqrestore(&forw_bcast_list_lock, flags); 486 spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
460 487
461 /* free batman packet list */ 488 /* free batman packet list */
462 spin_lock(&forw_bat_list_lock); 489 spin_lock_irqsave(&forw_bat_list_lock, flags);
463 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, 490 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
464 &forw_bat_list, list) { 491 &forw_bat_list, list) {
465 492
466 spin_unlock(&forw_bat_list_lock); 493 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
467 494
468 /** 495 /**
469 * send_outstanding_bat_packet() will lock the list to 496 * send_outstanding_bat_packet() will lock the list to
470 * delete the item from the list 497 * delete the item from the list
471 */ 498 */
472 cancel_delayed_work_sync(&forw_packet->delayed_work); 499 cancel_delayed_work_sync(&forw_packet->delayed_work);
473 spin_lock(&forw_bat_list_lock); 500 spin_lock_irqsave(&forw_bat_list_lock, flags);
474 } 501 }
475 spin_unlock(&forw_bat_list_lock); 502 spin_unlock_irqrestore(&forw_bat_list_lock, flags);
476} 503}
diff --git a/drivers/staging/batman-adv/send.h b/drivers/staging/batman-adv/send.h
index 59d500917a3..5fc6f3417cb 100644
--- a/drivers/staging/batman-adv/send.h
+++ b/drivers/staging/batman-adv/send.h
@@ -22,6 +22,9 @@
22#include "types.h" 22#include "types.h"
23 23
24void send_own_packet_work(struct work_struct *work); 24void send_own_packet_work(struct work_struct *work);
25int send_skb_packet(struct sk_buff *skb,
26 struct batman_if *batman_if,
27 uint8_t *dst_addr);
25void send_raw_packet(unsigned char *pack_buff, int pack_buff_len, 28void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
26 struct batman_if *batman_if, uint8_t *dst_addr); 29 struct batman_if *batman_if, uint8_t *dst_addr);
27void schedule_own_packet(struct batman_if *batman_if); 30void schedule_own_packet(struct batman_if *batman_if);
@@ -30,7 +33,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
30 struct batman_packet *batman_packet, 33 struct batman_packet *batman_packet,
31 uint8_t directlink, int hna_buff_len, 34 uint8_t directlink, int hna_buff_len,
32 struct batman_if *if_outgoing); 35 struct batman_if *if_outgoing);
33void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len); 36void add_bcast_packet_to_list(struct sk_buff *skb);
34void send_outstanding_bcast_packet(struct work_struct *work); 37void send_outstanding_bcast_packet(struct work_struct *work);
35void send_outstanding_bat_packet(struct work_struct *work); 38void send_outstanding_bat_packet(struct work_struct *work);
36void purge_outstanding_packets(void); 39void purge_outstanding_packets(void);
diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c
index 168a4e195a1..8ae3483a625 100644
--- a/drivers/staging/batman-adv/soft-interface.c
+++ b/drivers/staging/batman-adv/soft-interface.c
@@ -34,7 +34,6 @@ static uint16_t bcast_seqno = 1; /* give own bcast messages seq numbers to avoid
34 * broadcast storms */ 34 * broadcast storms */
35static int32_t skb_packets; 35static int32_t skb_packets;
36static int32_t skb_bad_packets; 36static int32_t skb_bad_packets;
37static int32_t lock_dropped;
38 37
39unsigned char mainIfAddr[ETH_ALEN]; 38unsigned char mainIfAddr[ETH_ALEN];
40static unsigned char mainIfAddr_default[ETH_ALEN]; 39static unsigned char mainIfAddr_default[ETH_ALEN];
@@ -67,12 +66,12 @@ int main_if_was_up(void)
67 return (memcmp(mainIfAddr, mainIfAddr_default, ETH_ALEN) != 0 ? 1 : 0); 66 return (memcmp(mainIfAddr, mainIfAddr_default, ETH_ALEN) != 0 ? 1 : 0);
68} 67}
69 68
70static int my_skb_push(struct sk_buff *skb, unsigned int len) 69int my_skb_push(struct sk_buff *skb, unsigned int len)
71{ 70{
72 int result = 0; 71 int result = 0;
73 72
74 skb_packets++; 73 skb_packets++;
75 if (skb->data - len < skb->head) { 74 if (skb_headroom(skb) < len) {
76 skb_bad_packets++; 75 skb_bad_packets++;
77 result = pskb_expand_head(skb, len, 0, GFP_ATOMIC); 76 result = pskb_expand_head(skb, len, 0, GFP_ATOMIC);
78 77
@@ -169,7 +168,10 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
169 struct orig_node *orig_node; 168 struct orig_node *orig_node;
170 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 169 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
171 struct bat_priv *priv = netdev_priv(dev); 170 struct bat_priv *priv = netdev_priv(dev);
171 struct batman_if *batman_if;
172 uint8_t dstaddr[6];
172 int data_len = skb->len; 173 int data_len = skb->len;
174 unsigned long flags;
173 175
174 if (atomic_read(&module_state) != MODULE_ACTIVE) 176 if (atomic_read(&module_state) != MODULE_ACTIVE)
175 goto dropped; 177 goto dropped;
@@ -185,7 +187,6 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
185 goto dropped; 187 goto dropped;
186 188
187 bcast_packet = (struct bcast_packet *)skb->data; 189 bcast_packet = (struct bcast_packet *)skb->data;
188
189 bcast_packet->version = COMPAT_VERSION; 190 bcast_packet->version = COMPAT_VERSION;
190 191
191 /* batman packet type: broadcast */ 192 /* batman packet type: broadcast */
@@ -194,27 +195,21 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
194 /* hw address of first interface is the orig mac because only 195 /* hw address of first interface is the orig mac because only
195 * this mac is known throughout the mesh */ 196 * this mac is known throughout the mesh */
196 memcpy(bcast_packet->orig, mainIfAddr, ETH_ALEN); 197 memcpy(bcast_packet->orig, mainIfAddr, ETH_ALEN);
198
197 /* set broadcast sequence number */ 199 /* set broadcast sequence number */
198 bcast_packet->seqno = htons(bcast_seqno); 200 bcast_packet->seqno = htons(bcast_seqno);
199 201
200 bcast_seqno++; 202 bcast_seqno++;
201 203
202 /* broadcast packet */ 204 /* broadcast packet */
203 add_bcast_packet_to_list(skb->data, skb->len); 205 add_bcast_packet_to_list(skb);
206 /* a copy is stored in the bcast list, therefore removing
207 * the original skb. */
208 kfree_skb(skb);
204 209
205 /* unicast packet */ 210 /* unicast packet */
206 } else { 211 } else {
207 212 spin_lock_irqsave(&orig_hash_lock, flags);
208 /* simply spin_lock()ing can deadlock when the lock is already
209 * hold. */
210 /* TODO: defer the work in a working queue instead of
211 * dropping */
212 if (!spin_trylock(&orig_hash_lock)) {
213 lock_dropped++;
214 printk(KERN_WARNING "batman-adv:%d packets dropped because lock was hold\n", lock_dropped);
215 goto dropped;
216 }
217
218 /* get routing information */ 213 /* get routing information */
219 orig_node = ((struct orig_node *)hash_find(orig_hash, 214 orig_node = ((struct orig_node *)hash_find(orig_hash,
220 ethhdr->h_dest)); 215 ethhdr->h_dest));
@@ -243,14 +238,17 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
243 if (orig_node->batman_if->if_active != IF_ACTIVE) 238 if (orig_node->batman_if->if_active != IF_ACTIVE)
244 goto unlock; 239 goto unlock;
245 240
246 send_raw_packet(skb->data, skb->len, 241 /* don't lock while sending the packets ... we therefore
247 orig_node->batman_if, 242 * copy the required data before sending */
248 orig_node->router->addr); 243
244 batman_if = orig_node->batman_if;
245 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
246 spin_unlock_irqrestore(&orig_hash_lock, flags);
247
248 send_skb_packet(skb, batman_if, dstaddr);
249 } else { 249 } else {
250 goto unlock; 250 goto unlock;
251 } 251 }
252
253 spin_unlock(&orig_hash_lock);
254 } 252 }
255 253
256 priv->stats.tx_packets++; 254 priv->stats.tx_packets++;
@@ -258,42 +256,44 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
258 goto end; 256 goto end;
259 257
260unlock: 258unlock:
261 spin_unlock(&orig_hash_lock); 259 spin_unlock_irqrestore(&orig_hash_lock, flags);
262dropped: 260dropped:
263 priv->stats.tx_dropped++; 261 priv->stats.tx_dropped++;
264end: 262end:
265 kfree_skb(skb);
266 return 0; 263 return 0;
267} 264}
268 265
269void interface_rx(struct net_device *dev, void *packet, int packet_len) 266void interface_rx(struct sk_buff *skb, int hdr_size)
270{ 267{
271 struct sk_buff *skb; 268 struct net_device *dev = soft_device;
272 struct bat_priv *priv = netdev_priv(dev); 269 struct bat_priv *priv = netdev_priv(dev);
273 270
274 skb = dev_alloc_skb(packet_len); 271 /* check if enough space is available for pulling, and pull */
275 272 if (!pskb_may_pull(skb, hdr_size)) {
276 if (!skb) { 273 kfree_skb(skb);
277 priv->stats.rx_dropped++; 274 return;
278 goto out;
279 } 275 }
276 skb_pull_rcsum(skb, hdr_size);
277/* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/
280 278
281 memcpy(skb_put(skb, packet_len), packet, packet_len);
282
283 /* Write metadata, and then pass to the receive level */
284 skb->dev = dev; 279 skb->dev = dev;
285 skb->protocol = eth_type_trans(skb, dev); 280 skb->protocol = eth_type_trans(skb, dev);
286 skb->ip_summed = CHECKSUM_UNNECESSARY; 281
282 /* should not be neccesary anymore as we use skb_pull_rcsum()
283 * TODO: please verify this and remove this TODO
284 * -- Dec 21st 2009, Simon Wunderlich */
285
286/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/
287
288 /* TODO: set skb->pkt_type to PACKET_BROADCAST, PACKET_MULTICAST,
289 * PACKET_OTHERHOST or PACKET_HOST */
287 290
288 priv->stats.rx_packets++; 291 priv->stats.rx_packets++;
289 priv->stats.rx_bytes += packet_len; 292 priv->stats.rx_bytes += skb->len;
290 293
291 dev->last_rx = jiffies; 294 dev->last_rx = jiffies;
292 295
293 netif_rx(skb); 296 netif_rx(skb);
294
295out:
296 return;
297} 297}
298 298
299/* ethtool */ 299/* ethtool */
diff --git a/drivers/staging/batman-adv/soft-interface.h b/drivers/staging/batman-adv/soft-interface.h
index 515e276ef53..c0cad8134b2 100644
--- a/drivers/staging/batman-adv/soft-interface.h
+++ b/drivers/staging/batman-adv/soft-interface.h
@@ -28,6 +28,7 @@ struct net_device_stats *interface_stats(struct net_device *dev);
28int interface_set_mac_addr(struct net_device *dev, void *addr); 28int interface_set_mac_addr(struct net_device *dev, void *addr);
29int interface_change_mtu(struct net_device *dev, int new_mtu); 29int interface_change_mtu(struct net_device *dev, int new_mtu);
30int interface_tx(struct sk_buff *skb, struct net_device *dev); 30int interface_tx(struct sk_buff *skb, struct net_device *dev);
31void interface_rx(struct net_device *dev, void *packet, int packet_len); 31void interface_rx(struct sk_buff *skb, int hdr_size);
32int my_skb_push(struct sk_buff *skb, unsigned int len);
32 33
33extern unsigned char mainIfAddr[]; 34extern unsigned char mainIfAddr[];
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
index d708e6f15b3..dec1b54031b 100644
--- a/drivers/staging/batman-adv/types.h
+++ b/drivers/staging/batman-adv/types.h
@@ -39,7 +39,6 @@ struct batman_if {
39 char if_active; 39 char if_active;
40 char addr_str[ETH_STR_LEN]; 40 char addr_str[ETH_STR_LEN];
41 struct net_device *net_dev; 41 struct net_device *net_dev;
42 struct socket *raw_sock;
43 atomic_t seqno; 42 atomic_t seqno;
44 unsigned char *packet_buff; 43 unsigned char *packet_buff;
45 int packet_len; 44 int packet_len;
@@ -113,6 +112,7 @@ struct forw_packet { /* structure for forw_list maintaining packet
113 struct hlist_node list; 112 struct hlist_node list;
114 unsigned long send_time; 113 unsigned long send_time;
115 uint8_t own; 114 uint8_t own;
115 struct sk_buff *skb;
116 unsigned char *packet_buff; 116 unsigned char *packet_buff;
117 uint16_t packet_len; 117 uint16_t packet_len;
118 uint32_t direct_link_flags; 118 uint32_t direct_link_flags;
diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c
index 62ee7739641..191297926b3 100644
--- a/drivers/staging/batman-adv/vis.c
+++ b/drivers/staging/batman-adv/vis.c
@@ -52,12 +52,13 @@ static void free_info(void *data)
52/* set the mode of the visualization to client or server */ 52/* set the mode of the visualization to client or server */
53void vis_set_mode(int mode) 53void vis_set_mode(int mode)
54{ 54{
55 spin_lock(&vis_hash_lock); 55 unsigned long flags;
56 spin_lock_irqsave(&vis_hash_lock, flags);
56 57
57 if (my_vis_info != NULL) 58 if (my_vis_info != NULL)
58 my_vis_info->packet.vis_type = mode; 59 my_vis_info->packet.vis_type = mode;
59 60
60 spin_unlock(&vis_hash_lock); 61 spin_unlock_irqrestore(&vis_hash_lock, flags);
61} 62}
62 63
63/* is_vis_server(), locked outside */ 64/* is_vis_server(), locked outside */
@@ -74,10 +75,11 @@ static int is_vis_server_locked(void)
74int is_vis_server(void) 75int is_vis_server(void)
75{ 76{
76 int ret = 0; 77 int ret = 0;
78 unsigned long flags;
77 79
78 spin_lock(&vis_hash_lock); 80 spin_lock_irqsave(&vis_hash_lock, flags);
79 ret = is_vis_server_locked(); 81 ret = is_vis_server_locked();
80 spin_unlock(&vis_hash_lock); 82 spin_unlock_irqrestore(&vis_hash_lock, flags);
81 83
82 return ret; 84 return ret;
83} 85}
@@ -269,8 +271,9 @@ void receive_server_sync_packet(struct vis_packet *vis_packet, int vis_info_len)
269{ 271{
270 struct vis_info *info; 272 struct vis_info *info;
271 int is_new; 273 int is_new;
274 unsigned long flags;
272 275
273 spin_lock(&vis_hash_lock); 276 spin_lock_irqsave(&vis_hash_lock, flags);
274 info = add_packet(vis_packet, vis_info_len, &is_new); 277 info = add_packet(vis_packet, vis_info_len, &is_new);
275 if (info == NULL) 278 if (info == NULL)
276 goto end; 279 goto end;
@@ -283,7 +286,7 @@ void receive_server_sync_packet(struct vis_packet *vis_packet, int vis_info_len)
283 list_add_tail(&info->send_list, &send_list); 286 list_add_tail(&info->send_list, &send_list);
284 } 287 }
285end: 288end:
286 spin_unlock(&vis_hash_lock); 289 spin_unlock_irqrestore(&vis_hash_lock, flags);
287} 290}
288 291
289/* handle an incoming client update packet and schedule forward if needed. */ 292/* handle an incoming client update packet and schedule forward if needed. */
@@ -292,12 +295,13 @@ void receive_client_update_packet(struct vis_packet *vis_packet,
292{ 295{
293 struct vis_info *info; 296 struct vis_info *info;
294 int is_new; 297 int is_new;
298 unsigned long flags;
295 299
296 /* clients shall not broadcast. */ 300 /* clients shall not broadcast. */
297 if (is_bcast(vis_packet->target_orig)) 301 if (is_bcast(vis_packet->target_orig))
298 return; 302 return;
299 303
300 spin_lock(&vis_hash_lock); 304 spin_lock_irqsave(&vis_hash_lock, flags);
301 info = add_packet(vis_packet, vis_info_len, &is_new); 305 info = add_packet(vis_packet, vis_info_len, &is_new);
302 if (info == NULL) 306 if (info == NULL)
303 goto end; 307 goto end;
@@ -319,7 +323,7 @@ void receive_client_update_packet(struct vis_packet *vis_packet,
319 list_add_tail(&info->send_list, &send_list); 323 list_add_tail(&info->send_list, &send_list);
320 } 324 }
321end: 325end:
322 spin_unlock(&vis_hash_lock); 326 spin_unlock_irqrestore(&vis_hash_lock, flags);
323} 327}
324 328
325/* Walk the originators and find the VIS server with the best tq. Set the packet 329/* Walk the originators and find the VIS server with the best tq. Set the packet
@@ -370,7 +374,7 @@ static int generate_vis_packet(void)
370 374
371 info->first_seen = jiffies; 375 info->first_seen = jiffies;
372 376
373 spin_lock(&orig_hash_lock); 377 spin_lock_irqsave(&orig_hash_lock, flags);
374 memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN); 378 memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
375 info->packet.ttl = TTL; 379 info->packet.ttl = TTL;
376 info->packet.seqno++; 380 info->packet.seqno++;
@@ -379,7 +383,7 @@ static int generate_vis_packet(void)
379 if (!is_vis_server_locked()) { 383 if (!is_vis_server_locked()) {
380 best_tq = find_best_vis_server(info); 384 best_tq = find_best_vis_server(info);
381 if (best_tq < 0) { 385 if (best_tq < 0) {
382 spin_unlock(&orig_hash_lock); 386 spin_unlock_irqrestore(&orig_hash_lock, flags);
383 return -1; 387 return -1;
384 } 388 }
385 } 389 }
@@ -403,13 +407,13 @@ static int generate_vis_packet(void)
403 info->packet.entries++; 407 info->packet.entries++;
404 408
405 if (vis_packet_full(info)) { 409 if (vis_packet_full(info)) {
406 spin_unlock(&orig_hash_lock); 410 spin_unlock_irqrestore(&orig_hash_lock, flags);
407 return 0; 411 return 0;
408 } 412 }
409 } 413 }
410 } 414 }
411 415
412 spin_unlock(&orig_hash_lock); 416 spin_unlock_irqrestore(&orig_hash_lock, flags);
413 417
414 spin_lock_irqsave(&hna_local_hash_lock, flags); 418 spin_lock_irqsave(&hna_local_hash_lock, flags);
415 while (hash_iterate(hna_local_hash, &hashit_local)) { 419 while (hash_iterate(hna_local_hash, &hashit_local)) {
@@ -450,8 +454,9 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
450{ 454{
451 HASHIT(hashit); 455 HASHIT(hashit);
452 struct orig_node *orig_node; 456 struct orig_node *orig_node;
457 unsigned long flags;
453 458
454 spin_lock(&orig_hash_lock); 459 spin_lock_irqsave(&orig_hash_lock, flags);
455 460
456 /* send to all routers in range. */ 461 /* send to all routers in range. */
457 while (hash_iterate(orig_hash, &hashit)) { 462 while (hash_iterate(orig_hash, &hashit)) {
@@ -478,14 +483,15 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
478 } 483 }
479 } 484 }
480 memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN); 485 memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
481 spin_unlock(&orig_hash_lock); 486 spin_unlock_irqrestore(&orig_hash_lock, flags);
482} 487}
483 488
484static void unicast_vis_packet(struct vis_info *info, int packet_length) 489static void unicast_vis_packet(struct vis_info *info, int packet_length)
485{ 490{
486 struct orig_node *orig_node; 491 struct orig_node *orig_node;
492 unsigned long flags;
487 493
488 spin_lock(&orig_hash_lock); 494 spin_lock_irqsave(&orig_hash_lock, flags);
489 orig_node = ((struct orig_node *) 495 orig_node = ((struct orig_node *)
490 hash_find(orig_hash, info->packet.target_orig)); 496 hash_find(orig_hash, info->packet.target_orig));
491 497
@@ -496,7 +502,7 @@ static void unicast_vis_packet(struct vis_info *info, int packet_length)
496 orig_node->batman_if, 502 orig_node->batman_if,
497 orig_node->router->addr); 503 orig_node->router->addr);
498 } 504 }
499 spin_unlock(&orig_hash_lock); 505 spin_unlock_irqrestore(&orig_hash_lock, flags);
500} 506}
501 507
502/* only send one vis packet. called from send_vis_packets() */ 508/* only send one vis packet. called from send_vis_packets() */
@@ -526,8 +532,9 @@ static void send_vis_packet(struct vis_info *info)
526static void send_vis_packets(struct work_struct *work) 532static void send_vis_packets(struct work_struct *work)
527{ 533{
528 struct vis_info *info, *temp; 534 struct vis_info *info, *temp;
535 unsigned long flags;
529 536
530 spin_lock(&vis_hash_lock); 537 spin_lock_irqsave(&vis_hash_lock, flags);
531 purge_vis_packets(); 538 purge_vis_packets();
532 539
533 if (generate_vis_packet() == 0) 540 if (generate_vis_packet() == 0)
@@ -538,7 +545,7 @@ static void send_vis_packets(struct work_struct *work)
538 list_del_init(&info->send_list); 545 list_del_init(&info->send_list);
539 send_vis_packet(info); 546 send_vis_packet(info);
540 } 547 }
541 spin_unlock(&vis_hash_lock); 548 spin_unlock_irqrestore(&vis_hash_lock, flags);
542 start_vis_timer(); 549 start_vis_timer();
543} 550}
544static DECLARE_DELAYED_WORK(vis_timer_wq, send_vis_packets); 551static DECLARE_DELAYED_WORK(vis_timer_wq, send_vis_packets);
@@ -547,10 +554,11 @@ static DECLARE_DELAYED_WORK(vis_timer_wq, send_vis_packets);
547 * initialized (e.g. bat0 is initialized, interfaces have been added) */ 554 * initialized (e.g. bat0 is initialized, interfaces have been added) */
548int vis_init(void) 555int vis_init(void)
549{ 556{
557 unsigned long flags;
550 if (vis_hash) 558 if (vis_hash)
551 return 1; 559 return 1;
552 560
553 spin_lock(&vis_hash_lock); 561 spin_lock_irqsave(&vis_hash_lock, flags);
554 562
555 vis_hash = hash_new(256, vis_info_cmp, vis_info_choose); 563 vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
556 if (!vis_hash) { 564 if (!vis_hash) {
@@ -588,12 +596,12 @@ int vis_init(void)
588 goto err; 596 goto err;
589 } 597 }
590 598
591 spin_unlock(&vis_hash_lock); 599 spin_unlock_irqrestore(&vis_hash_lock, flags);
592 start_vis_timer(); 600 start_vis_timer();
593 return 1; 601 return 1;
594 602
595err: 603err:
596 spin_unlock(&vis_hash_lock); 604 spin_unlock_irqrestore(&vis_hash_lock, flags);
597 vis_quit(); 605 vis_quit();
598 return 0; 606 return 0;
599} 607}
@@ -601,17 +609,18 @@ err:
601/* shutdown vis-server */ 609/* shutdown vis-server */
602void vis_quit(void) 610void vis_quit(void)
603{ 611{
612 unsigned long flags;
604 if (!vis_hash) 613 if (!vis_hash)
605 return; 614 return;
606 615
607 cancel_delayed_work_sync(&vis_timer_wq); 616 cancel_delayed_work_sync(&vis_timer_wq);
608 617
609 spin_lock(&vis_hash_lock); 618 spin_lock_irqsave(&vis_hash_lock, flags);
610 /* properly remove, kill timers ... */ 619 /* properly remove, kill timers ... */
611 hash_delete(vis_hash, free_info); 620 hash_delete(vis_hash, free_info);
612 vis_hash = NULL; 621 vis_hash = NULL;
613 my_vis_info = NULL; 622 my_vis_info = NULL;
614 spin_unlock(&vis_hash_lock); 623 spin_unlock_irqrestore(&vis_hash_lock, flags);
615} 624}
616 625
617/* schedule packets for (re)transmission */ 626/* schedule packets for (re)transmission */