diff options
author | David S. Miller <davem@davemloft.net> | 2012-07-11 02:56:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-11 02:56:33 -0400 |
commit | 04c9f416e371cff076a8b3279fb213628915d059 (patch) | |
tree | 2b64cb835cbc9d19d2d06f1e7618615d40ada0af /net | |
parent | c278fa53c123282f753b2264fc62c0e9502a32fa (diff) | |
parent | c1f5163de417dab01fa9daaf09a74bbb19303f3c (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/soft-interface.c
net/mac80211/mlme.c
With merge help from Antonio Quartulli (batman-adv) and
Stephen Rothwell (drivers/net/usb/qmi_wwan.c).
The net/mac80211/mlme.c conflict seemed easy enough, accounting for a
conversion to some new tracing macros.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/8021q/vlan.c | 3 | ||||
-rw-r--r-- | net/batman-adv/bridge_loop_avoidance.c | 15 | ||||
-rw-r--r-- | net/batman-adv/bridge_loop_avoidance.h | 6 | ||||
-rw-r--r-- | net/batman-adv/soft-interface.c | 6 | ||||
-rw-r--r-- | net/core/dev.c | 8 | ||||
-rw-r--r-- | net/core/netprio_cgroup.c | 7 | ||||
-rw-r--r-- | net/ieee802154/dgram.c | 12 | ||||
-rw-r--r-- | net/mac80211/mlme.c | 6 | ||||
-rw-r--r-- | net/mac80211/rc80211_minstrel_ht.c | 2 | ||||
-rw-r--r-- | net/netfilter/xt_set.c | 4 | ||||
-rw-r--r-- | net/nfc/llcp/sock.c | 2 | ||||
-rw-r--r-- | net/rxrpc/ar-peer.c | 2 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 42 |
13 files changed, 62 insertions, 53 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 6089f0cf23b4..9096bcb08132 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
403 | break; | 403 | break; |
404 | 404 | ||
405 | case NETDEV_DOWN: | 405 | case NETDEV_DOWN: |
406 | if (dev->features & NETIF_F_HW_VLAN_FILTER) | ||
407 | vlan_vid_del(dev, 0); | ||
408 | |||
406 | /* Put all VLANs for this dev in the down state too. */ | 409 | /* Put all VLANs for this dev in the down state too. */ |
407 | for (i = 0; i < VLAN_N_VID; i++) { | 410 | for (i = 0; i < VLAN_N_VID; i++) { |
408 | vlandev = vlan_group_get_device(grp, i); | 411 | vlandev = vlan_group_get_device(grp, i); |
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 3483e4035cbe..6705d35b17ce 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -1381,6 +1381,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv) | |||
1381 | * @bat_priv: the bat priv with all the soft interface information | 1381 | * @bat_priv: the bat priv with all the soft interface information |
1382 | * @skb: the frame to be checked | 1382 | * @skb: the frame to be checked |
1383 | * @vid: the VLAN ID of the frame | 1383 | * @vid: the VLAN ID of the frame |
1384 | * @is_bcast: the packet came in a broadcast packet type. | ||
1384 | * | 1385 | * |
1385 | * bla_rx avoidance checks if: | 1386 | * bla_rx avoidance checks if: |
1386 | * * we have to race for a claim | 1387 | * * we have to race for a claim |
@@ -1390,7 +1391,8 @@ void batadv_bla_free(struct batadv_priv *bat_priv) | |||
1390 | * returns 1, otherwise it returns 0 and the caller shall further | 1391 | * returns 1, otherwise it returns 0 and the caller shall further |
1391 | * process the skb. | 1392 | * process the skb. |
1392 | */ | 1393 | */ |
1393 | int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) | 1394 | int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid, |
1395 | bool is_bcast) | ||
1394 | { | 1396 | { |
1395 | struct ethhdr *ethhdr; | 1397 | struct ethhdr *ethhdr; |
1396 | struct batadv_claim search_claim, *claim = NULL; | 1398 | struct batadv_claim search_claim, *claim = NULL; |
@@ -1409,7 +1411,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) | |||
1409 | 1411 | ||
1410 | if (unlikely(atomic_read(&bat_priv->bla_num_requests))) | 1412 | if (unlikely(atomic_read(&bat_priv->bla_num_requests))) |
1411 | /* don't allow broadcasts while requests are in flight */ | 1413 | /* don't allow broadcasts while requests are in flight */ |
1412 | if (is_multicast_ether_addr(ethhdr->h_dest)) | 1414 | if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) |
1413 | goto handled; | 1415 | goto handled; |
1414 | 1416 | ||
1415 | memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); | 1417 | memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); |
@@ -1435,8 +1437,13 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) | |||
1435 | } | 1437 | } |
1436 | 1438 | ||
1437 | /* if it is a broadcast ... */ | 1439 | /* if it is a broadcast ... */ |
1438 | if (is_multicast_ether_addr(ethhdr->h_dest)) { | 1440 | if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { |
1439 | /* ... drop it. the responsible gateway is in charge. */ | 1441 | /* ... drop it. the responsible gateway is in charge. |
1442 | * | ||
1443 | * We need to check is_bcast because with the gateway | ||
1444 | * feature, broadcasts (like DHCP requests) may be sent | ||
1445 | * using a unicast packet type. | ||
1446 | */ | ||
1440 | goto handled; | 1447 | goto handled; |
1441 | } else { | 1448 | } else { |
1442 | /* seems the client considers us as its best gateway. | 1449 | /* seems the client considers us as its best gateway. |
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 08d13cb1e3df..563cfbf94a7f 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h | |||
@@ -21,7 +21,8 @@ | |||
21 | #define _NET_BATMAN_ADV_BLA_H_ | 21 | #define _NET_BATMAN_ADV_BLA_H_ |
22 | 22 | ||
23 | #ifdef CONFIG_BATMAN_ADV_BLA | 23 | #ifdef CONFIG_BATMAN_ADV_BLA |
24 | int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid); | 24 | int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid, |
25 | bool is_bcast); | ||
25 | int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid); | 26 | int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid); |
26 | int batadv_bla_is_backbone_gw(struct sk_buff *skb, | 27 | int batadv_bla_is_backbone_gw(struct sk_buff *skb, |
27 | struct batadv_orig_node *orig_node, int hdr_size); | 28 | struct batadv_orig_node *orig_node, int hdr_size); |
@@ -40,7 +41,8 @@ void batadv_bla_free(struct batadv_priv *bat_priv); | |||
40 | #else /* ifdef CONFIG_BATMAN_ADV_BLA */ | 41 | #else /* ifdef CONFIG_BATMAN_ADV_BLA */ |
41 | 42 | ||
42 | static inline int batadv_bla_rx(struct batadv_priv *bat_priv, | 43 | static inline int batadv_bla_rx(struct batadv_priv *bat_priv, |
43 | struct sk_buff *skb, short vid) | 44 | struct sk_buff *skb, short vid, |
45 | bool is_bcast) | ||
44 | { | 46 | { |
45 | return 0; | 47 | return 0; |
46 | } | 48 | } |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 9e4bb61301ec..109ea2aae96c 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -267,8 +267,12 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
267 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | 267 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
268 | struct ethhdr *ethhdr; | 268 | struct ethhdr *ethhdr; |
269 | struct vlan_ethhdr *vhdr; | 269 | struct vlan_ethhdr *vhdr; |
270 | struct batadv_header *batadv_header = (struct batadv_header *)skb->data; | ||
270 | short vid __maybe_unused = -1; | 271 | short vid __maybe_unused = -1; |
271 | __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); | 272 | __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); |
273 | bool is_bcast; | ||
274 | |||
275 | is_bcast = (batadv_header->packet_type == BATADV_BCAST); | ||
272 | 276 | ||
273 | /* check if enough space is available for pulling, and pull */ | 277 | /* check if enough space is available for pulling, and pull */ |
274 | if (!pskb_may_pull(skb, hdr_size)) | 278 | if (!pskb_may_pull(skb, hdr_size)) |
@@ -315,7 +319,7 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
315 | /* Let the bridge loop avoidance check the packet. If will | 319 | /* Let the bridge loop avoidance check the packet. If will |
316 | * not handle it, we can safely push it up. | 320 | * not handle it, we can safely push it up. |
317 | */ | 321 | */ |
318 | if (batadv_bla_rx(bat_priv, skb, vid)) | 322 | if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) |
319 | goto out; | 323 | goto out; |
320 | 324 | ||
321 | netif_rx(skb); | 325 | netif_rx(skb); |
diff --git a/net/core/dev.c b/net/core/dev.c index 5ab6f4b37c0c..73e87c7b4377 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2457,8 +2457,12 @@ static void skb_update_prio(struct sk_buff *skb) | |||
2457 | { | 2457 | { |
2458 | struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); | 2458 | struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); |
2459 | 2459 | ||
2460 | if ((!skb->priority) && (skb->sk) && map) | 2460 | if (!skb->priority && skb->sk && map) { |
2461 | skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; | 2461 | unsigned int prioidx = skb->sk->sk_cgrp_prioidx; |
2462 | |||
2463 | if (prioidx < map->priomap_len) | ||
2464 | skb->priority = map->priomap[prioidx]; | ||
2465 | } | ||
2462 | } | 2466 | } |
2463 | #else | 2467 | #else |
2464 | #define skb_update_prio(skb) | 2468 | #define skb_update_prio(skb) |
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 5b8aa2fae48b..3e953eaddbfc 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c | |||
@@ -49,8 +49,9 @@ static int get_prioidx(u32 *prio) | |||
49 | return -ENOSPC; | 49 | return -ENOSPC; |
50 | } | 50 | } |
51 | set_bit(prioidx, prioidx_map); | 51 | set_bit(prioidx, prioidx_map); |
52 | if (atomic_read(&max_prioidx) < prioidx) | ||
53 | atomic_set(&max_prioidx, prioidx); | ||
52 | spin_unlock_irqrestore(&prioidx_map_lock, flags); | 54 | spin_unlock_irqrestore(&prioidx_map_lock, flags); |
53 | atomic_set(&max_prioidx, prioidx); | ||
54 | *prio = prioidx; | 55 | *prio = prioidx; |
55 | return 0; | 56 | return 0; |
56 | } | 57 | } |
@@ -141,7 +142,7 @@ static void cgrp_destroy(struct cgroup *cgrp) | |||
141 | rtnl_lock(); | 142 | rtnl_lock(); |
142 | for_each_netdev(&init_net, dev) { | 143 | for_each_netdev(&init_net, dev) { |
143 | map = rtnl_dereference(dev->priomap); | 144 | map = rtnl_dereference(dev->priomap); |
144 | if (map) | 145 | if (map && cs->prioidx < map->priomap_len) |
145 | map->priomap[cs->prioidx] = 0; | 146 | map->priomap[cs->prioidx] = 0; |
146 | } | 147 | } |
147 | rtnl_unlock(); | 148 | rtnl_unlock(); |
@@ -165,7 +166,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft, | |||
165 | rcu_read_lock(); | 166 | rcu_read_lock(); |
166 | for_each_netdev_rcu(&init_net, dev) { | 167 | for_each_netdev_rcu(&init_net, dev) { |
167 | map = rcu_dereference(dev->priomap); | 168 | map = rcu_dereference(dev->priomap); |
168 | priority = map ? map->priomap[prioidx] : 0; | 169 | priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0; |
169 | cb->fill(cb, dev->name, priority); | 170 | cb->fill(cb, dev->name, priority); |
170 | } | 171 | } |
171 | rcu_read_unlock(); | 172 | rcu_read_unlock(); |
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 6fbb2ad7bb6d..16705611589a 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c | |||
@@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
230 | mtu = dev->mtu; | 230 | mtu = dev->mtu; |
231 | pr_debug("name = %s, mtu = %u\n", dev->name, mtu); | 231 | pr_debug("name = %s, mtu = %u\n", dev->name, mtu); |
232 | 232 | ||
233 | if (size > mtu) { | ||
234 | pr_debug("size = %Zu, mtu = %u\n", size, mtu); | ||
235 | err = -EINVAL; | ||
236 | goto out_dev; | ||
237 | } | ||
238 | |||
233 | hlen = LL_RESERVED_SPACE(dev); | 239 | hlen = LL_RESERVED_SPACE(dev); |
234 | tlen = dev->needed_tailroom; | 240 | tlen = dev->needed_tailroom; |
235 | skb = sock_alloc_send_skb(sk, hlen + tlen + size, | 241 | skb = sock_alloc_send_skb(sk, hlen + tlen + size, |
@@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
258 | if (err < 0) | 264 | if (err < 0) |
259 | goto out_skb; | 265 | goto out_skb; |
260 | 266 | ||
261 | if (size > mtu) { | ||
262 | pr_debug("size = %Zu, mtu = %u\n", size, mtu); | ||
263 | err = -EINVAL; | ||
264 | goto out_skb; | ||
265 | } | ||
266 | |||
267 | skb->dev = dev; | 267 | skb->dev = dev; |
268 | skb->sk = sk; | 268 | skb->sk = sk; |
269 | skb->protocol = htons(ETH_P_IEEE802154); | 269 | skb->protocol = htons(ETH_P_IEEE802154); |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index e6fe84a08443..aa69a331f374 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -2152,15 +2152,13 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2152 | mgmt->sa, status_code); | 2152 | mgmt->sa, status_code); |
2153 | ieee80211_destroy_assoc_data(sdata, false); | 2153 | ieee80211_destroy_assoc_data(sdata, false); |
2154 | } else { | 2154 | } else { |
2155 | sdata_info(sdata, "associated\n"); | ||
2156 | |||
2157 | if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { | 2155 | if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { |
2158 | /* oops -- internal error -- send timeout for now */ | 2156 | /* oops -- internal error -- send timeout for now */ |
2159 | ieee80211_destroy_assoc_data(sdata, true); | 2157 | ieee80211_destroy_assoc_data(sdata, false); |
2160 | sta_info_destroy_addr(sdata, mgmt->bssid); | ||
2161 | cfg80211_put_bss(*bss); | 2158 | cfg80211_put_bss(*bss); |
2162 | return RX_MGMT_CFG80211_ASSOC_TIMEOUT; | 2159 | return RX_MGMT_CFG80211_ASSOC_TIMEOUT; |
2163 | } | 2160 | } |
2161 | sdata_info(sdata, "associated\n"); | ||
2164 | 2162 | ||
2165 | /* | 2163 | /* |
2166 | * destroy assoc_data afterwards, as otherwise an idle | 2164 | * destroy assoc_data afterwards, as otherwise an idle |
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 2d1acc6c5445..f9e51ef8dfa2 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) | |||
809 | max_rates = sband->n_bitrates; | 809 | max_rates = sband->n_bitrates; |
810 | } | 810 | } |
811 | 811 | ||
812 | msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); | 812 | msp = kzalloc(sizeof(*msp), gfp); |
813 | if (!msp) | 813 | if (!msp) |
814 | return NULL; | 814 | return NULL; |
815 | 815 | ||
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 035960ec5cb9..c6f7db720d84 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/netfilter/x_tables.h> | 17 | #include <linux/netfilter/x_tables.h> |
18 | #include <linux/netfilter/xt_set.h> | 18 | #include <linux/netfilter/xt_set.h> |
19 | #include <linux/netfilter/ipset/ip_set_timeout.h> | ||
19 | 20 | ||
20 | MODULE_LICENSE("GPL"); | 21 | MODULE_LICENSE("GPL"); |
21 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); | 22 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); |
@@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) | |||
310 | info->del_set.flags, 0, UINT_MAX); | 311 | info->del_set.flags, 0, UINT_MAX); |
311 | 312 | ||
312 | /* Normalize to fit into jiffies */ | 313 | /* Normalize to fit into jiffies */ |
313 | if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC) | 314 | if (add_opt.timeout != IPSET_NO_TIMEOUT && |
315 | add_opt.timeout > UINT_MAX/MSEC_PER_SEC) | ||
314 | add_opt.timeout = UINT_MAX/MSEC_PER_SEC; | 316 | add_opt.timeout = UINT_MAX/MSEC_PER_SEC; |
315 | if (info->add_set.index != IPSET_INVALID_ID) | 317 | if (info->add_set.index != IPSET_INVALID_ID) |
316 | ip_set_add(info->add_set.index, skb, par, &add_opt); | 318 | ip_set_add(info->add_set.index, skb, par, &add_opt); |
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 2c0b317344b7..05ca5a680071 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
@@ -292,7 +292,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, | |||
292 | 292 | ||
293 | pr_debug("%p\n", sk); | 293 | pr_debug("%p\n", sk); |
294 | 294 | ||
295 | if (llcp_sock == NULL) | 295 | if (llcp_sock == NULL || llcp_sock->dev == NULL) |
296 | return -EBADFD; | 296 | return -EBADFD; |
297 | 297 | ||
298 | addr->sa_family = AF_NFC; | 298 | addr->sa_family = AF_NFC; |
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c index 2754f098d436..bebaa43484bc 100644 --- a/net/rxrpc/ar-peer.c +++ b/net/rxrpc/ar-peer.c | |||
@@ -229,7 +229,7 @@ found_UDP_peer: | |||
229 | return peer; | 229 | return peer; |
230 | 230 | ||
231 | new_UDP_peer: | 231 | new_UDP_peer: |
232 | _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); | 232 | _net("Rx UDP DGRAM from NEW peer"); |
233 | read_unlock_bh(&rxrpc_peer_lock); | 233 | read_unlock_bh(&rxrpc_peer_lock); |
234 | _leave(" = -EBUSY [new]"); | 234 | _leave(" = -EBUSY [new]"); |
235 | return ERR_PTR(-EBUSY); | 235 | return ERR_PTR(-EBUSY); |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index a2a95aabf9c2..c412ad0d0308 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche | |||
331 | return PSCHED_NS2TICKS(ticks); | 331 | return PSCHED_NS2TICKS(ticks); |
332 | } | 332 | } |
333 | 333 | ||
334 | static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | 334 | static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) |
335 | { | 335 | { |
336 | struct sk_buff_head *list = &sch->q; | 336 | struct sk_buff_head *list = &sch->q; |
337 | psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; | 337 | psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; |
338 | struct sk_buff *skb; | 338 | struct sk_buff *skb = skb_peek_tail(list); |
339 | |||
340 | if (likely(skb_queue_len(list) < sch->limit)) { | ||
341 | skb = skb_peek_tail(list); | ||
342 | /* Optimize for add at tail */ | ||
343 | if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) | ||
344 | return qdisc_enqueue_tail(nskb, sch); | ||
345 | 339 | ||
346 | skb_queue_reverse_walk(list, skb) { | 340 | /* Optimize for add at tail */ |
347 | if (tnext >= netem_skb_cb(skb)->time_to_send) | 341 | if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) |
348 | break; | 342 | return __skb_queue_tail(list, nskb); |
349 | } | ||
350 | 343 | ||
351 | __skb_queue_after(list, skb, nskb); | 344 | skb_queue_reverse_walk(list, skb) { |
352 | sch->qstats.backlog += qdisc_pkt_len(nskb); | 345 | if (tnext >= netem_skb_cb(skb)->time_to_send) |
353 | return NET_XMIT_SUCCESS; | 346 | break; |
354 | } | 347 | } |
355 | 348 | ||
356 | return qdisc_reshape_fail(nskb, sch); | 349 | __skb_queue_after(list, skb, nskb); |
357 | } | 350 | } |
358 | 351 | ||
359 | /* | 352 | /* |
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
368 | /* We don't fill cb now as skb_unshare() may invalidate it */ | 361 | /* We don't fill cb now as skb_unshare() may invalidate it */ |
369 | struct netem_skb_cb *cb; | 362 | struct netem_skb_cb *cb; |
370 | struct sk_buff *skb2; | 363 | struct sk_buff *skb2; |
371 | int ret; | ||
372 | int count = 1; | 364 | int count = 1; |
373 | 365 | ||
374 | /* Random duplication */ | 366 | /* Random duplication */ |
@@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
419 | skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); | 411 | skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); |
420 | } | 412 | } |
421 | 413 | ||
414 | if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) | ||
415 | return qdisc_reshape_fail(skb, sch); | ||
416 | |||
417 | sch->qstats.backlog += qdisc_pkt_len(skb); | ||
418 | |||
422 | cb = netem_skb_cb(skb); | 419 | cb = netem_skb_cb(skb); |
423 | if (q->gap == 0 || /* not doing reordering */ | 420 | if (q->gap == 0 || /* not doing reordering */ |
424 | q->counter < q->gap - 1 || /* inside last reordering gap */ | 421 | q->counter < q->gap - 1 || /* inside last reordering gap */ |
@@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
450 | 447 | ||
451 | cb->time_to_send = now + delay; | 448 | cb->time_to_send = now + delay; |
452 | ++q->counter; | 449 | ++q->counter; |
453 | ret = tfifo_enqueue(skb, sch); | 450 | tfifo_enqueue(skb, sch); |
454 | } else { | 451 | } else { |
455 | /* | 452 | /* |
456 | * Do re-ordering by putting one out of N packets at the front | 453 | * Do re-ordering by putting one out of N packets at the front |
@@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
460 | q->counter = 0; | 457 | q->counter = 0; |
461 | 458 | ||
462 | __skb_queue_head(&sch->q, skb); | 459 | __skb_queue_head(&sch->q, skb); |
463 | sch->qstats.backlog += qdisc_pkt_len(skb); | ||
464 | sch->qstats.requeues++; | 460 | sch->qstats.requeues++; |
465 | ret = NET_XMIT_SUCCESS; | ||
466 | } | ||
467 | |||
468 | if (ret != NET_XMIT_SUCCESS) { | ||
469 | if (net_xmit_drop_count(ret)) { | ||
470 | sch->qstats.drops++; | ||
471 | return ret; | ||
472 | } | ||
473 | } | 461 | } |
474 | 462 | ||
475 | return NET_XMIT_SUCCESS; | 463 | return NET_XMIT_SUCCESS; |