aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/mesh_pathtbl.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 16:38:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 16:38:27 -0400
commitaecdc33e111b2c447b622e287c6003726daa1426 (patch)
tree3e7657eae4b785e1a1fb5dfb225dbae0b2f0cfc6 /net/mac80211/mesh_pathtbl.c
parenta20acf99f75e49271381d65db097c9763060a1e8 (diff)
parenta3a6cab5ea10cca64d036851fe0d932448f2fe4f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: 1) GRE now works over ipv6, from Dmitry Kozlov. 2) Make SCTP more network namespace aware, from Eric Biederman. 3) TEAM driver now works with non-ethernet devices, from Jiri Pirko. 4) Make openvswitch network namespace aware, from Pravin B Shelar. 5) IPV6 NAT implementation, from Patrick McHardy. 6) Server side support for TCP Fast Open, from Jerry Chu and others. 7) Packet BPF filter supports MOD and XOR, from Eric Dumazet and Daniel Borkmann. 8) Increate the loopback default MTU to 64K, from Eric Dumazet. 9) Use a per-task rather than per-socket page fragment allocator for outgoing networking traffic. This benefits processes that have very many mostly idle sockets, which is quite common. From Eric Dumazet. 10) Use up to 32K for page fragment allocations, with fallbacks to smaller sizes when higher order page allocations fail. Benefits are a) less segments for driver to process b) less calls to page allocator c) less waste of space. From Eric Dumazet. 11) Allow GRO to be used on GRE tunnels, from Eric Dumazet. 12) VXLAN device driver, one way to handle VLAN issues such as the limitation of 4096 VLAN IDs yet still have some level of isolation. From Stephen Hemminger. 13) As usual there is a large boatload of driver changes, with the scale perhaps tilted towards the wireless side this time around. Fix up various fairly trivial conflicts, mostly caused by the user namespace changes. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1012 commits) hyperv: Add buffer for extended info after the RNDIS response message. hyperv: Report actual status in receive completion packet hyperv: Remove extra allocated space for recv_pkt_list elements hyperv: Fix page buffer handling in rndis_filter_send_request() hyperv: Fix the missing return value in rndis_filter_set_packet_filter() hyperv: Fix the max_xfer_size in RNDIS initialization vxlan: put UDP socket in correct namespace vxlan: Depend on CONFIG_INET sfc: Fix the reported priorities of different filter types sfc: Remove EFX_FILTER_FLAG_RX_OVERRIDE_IP sfc: Fix loopback self-test with separate_tx_channels=1 sfc: Fix MCDI structure field lookup sfc: Add parentheses around use of bitfield macro arguments sfc: Fix null function pointer in efx_sriov_channel_type vxlan: virtual extensible lan igmp: export symbol ip_mc_leave_group netlink: add attributes to fdb interface tg3: unconditionally select HWMON support when tg3 is enabled. Revert "net: ti cpsw ethernet: allow reading phy interface mode from DT" gre: fix sparse warning ...
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r--net/mac80211/mesh_pathtbl.c44
1 files changed, 20 insertions, 24 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 075bc535c601..aa749818860e 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -203,23 +203,17 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
203{ 203{
204 struct sk_buff *skb; 204 struct sk_buff *skb;
205 struct ieee80211_hdr *hdr; 205 struct ieee80211_hdr *hdr;
206 struct sk_buff_head tmpq;
207 unsigned long flags; 206 unsigned long flags;
208 207
209 rcu_assign_pointer(mpath->next_hop, sta); 208 rcu_assign_pointer(mpath->next_hop, sta);
210 209
211 __skb_queue_head_init(&tmpq);
212
213 spin_lock_irqsave(&mpath->frame_queue.lock, flags); 210 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
214 211 skb_queue_walk(&mpath->frame_queue, skb) {
215 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
216 hdr = (struct ieee80211_hdr *) skb->data; 212 hdr = (struct ieee80211_hdr *) skb->data;
217 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 213 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
218 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); 214 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
219 __skb_queue_tail(&tmpq, skb);
220 } 215 }
221 216
222 skb_queue_splice(&tmpq, &mpath->frame_queue);
223 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 217 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
224} 218}
225 219
@@ -285,40 +279,42 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
285 struct mesh_path *from_mpath, 279 struct mesh_path *from_mpath,
286 bool copy) 280 bool copy)
287{ 281{
288 struct sk_buff *skb, *cp_skb = NULL; 282 struct sk_buff *skb, *fskb, *tmp;
289 struct sk_buff_head gateq, failq; 283 struct sk_buff_head failq;
290 unsigned long flags; 284 unsigned long flags;
291 int num_skbs;
292 285
293 BUG_ON(gate_mpath == from_mpath); 286 BUG_ON(gate_mpath == from_mpath);
294 BUG_ON(!gate_mpath->next_hop); 287 BUG_ON(!gate_mpath->next_hop);
295 288
296 __skb_queue_head_init(&gateq);
297 __skb_queue_head_init(&failq); 289 __skb_queue_head_init(&failq);
298 290
299 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 291 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
300 skb_queue_splice_init(&from_mpath->frame_queue, &failq); 292 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
301 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 293 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
302 294
303 num_skbs = skb_queue_len(&failq); 295 skb_queue_walk_safe(&failq, fskb, tmp) {
304 296 if (skb_queue_len(&gate_mpath->frame_queue) >=
305 while (num_skbs--) { 297 MESH_FRAME_QUEUE_LEN) {
306 skb = __skb_dequeue(&failq); 298 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
307 if (copy) { 299 break;
308 cp_skb = skb_copy(skb, GFP_ATOMIC);
309 if (cp_skb)
310 __skb_queue_tail(&failq, cp_skb);
311 } 300 }
312 301
302 skb = skb_copy(fskb, GFP_ATOMIC);
303 if (WARN_ON(!skb))
304 break;
305
313 prepare_for_gate(skb, gate_mpath->dst, gate_mpath); 306 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
314 __skb_queue_tail(&gateq, skb); 307 skb_queue_tail(&gate_mpath->frame_queue, skb);
308
309 if (copy)
310 continue;
311
312 __skb_unlink(fskb, &failq);
313 kfree_skb(fskb);
315 } 314 }
316 315
317 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
318 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
319 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", 316 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
320 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); 317 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
321 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
322 318
323 if (!copy) 319 if (!copy)
324 return; 320 return;
@@ -531,7 +527,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
531 527
532 read_lock_bh(&pathtbl_resize_lock); 528 read_lock_bh(&pathtbl_resize_lock);
533 memcpy(new_mpath->dst, dst, ETH_ALEN); 529 memcpy(new_mpath->dst, dst, ETH_ALEN);
534 memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN); 530 eth_broadcast_addr(new_mpath->rann_snd_addr);
535 new_mpath->is_root = false; 531 new_mpath->is_root = false;
536 new_mpath->sdata = sdata; 532 new_mpath->sdata = sdata;
537 new_mpath->flags = 0; 533 new_mpath->flags = 0;