diff options
Diffstat (limited to 'net')
41 files changed, 341 insertions, 118 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index bd537fc10254..50f58f5f1c34 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
12 | return NET_RX_DROP; | 12 | return NET_RX_DROP; |
13 | 13 | ||
14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
15 | goto drop; | 15 | skb->deliver_no_wcard = 1; |
16 | 16 | ||
17 | skb->skb_iif = skb->dev->ifindex; | 17 | skb->skb_iif = skb->dev->ifindex; |
18 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 18 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | |||
84 | struct sk_buff *p; | 84 | struct sk_buff *p; |
85 | 85 | ||
86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
87 | goto drop; | 87 | skb->deliver_no_wcard = 1; |
88 | 88 | ||
89 | skb->skb_iif = skb->dev->ifindex; | 89 | skb->skb_iif = skb->dev->ifindex; |
90 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 90 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 55be90826f5f..529842677817 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev) | |||
708 | netif_carrier_off(dev); | 708 | netif_carrier_off(dev); |
709 | 709 | ||
710 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ | 710 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ |
711 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); | 711 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | |
712 | IFF_MASTER | IFF_SLAVE); | ||
712 | dev->iflink = real_dev->ifindex; | 713 | dev->iflink = real_dev->ifindex; |
713 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | | 714 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | |
714 | (1<<__LINK_STATE_DORMANT))) | | 715 | (1<<__LINK_STATE_DORMANT))) | |
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index 0faad5ce6dc4..8c100c9dae28 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -104,6 +104,8 @@ static void bnep_net_set_mc_list(struct net_device *dev) | |||
104 | break; | 104 | break; |
105 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); | 105 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); |
106 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); | 106 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); |
107 | |||
108 | i++; | ||
107 | } | 109 | } |
108 | r->len = htons(skb->len - len); | 110 | r->len = htons(skb->len - len); |
109 | } | 111 | } |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 26637439965b..b01dde35a69e 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -128,7 +128,7 @@ void br_fdb_cleanup(unsigned long _data) | |||
128 | { | 128 | { |
129 | struct net_bridge *br = (struct net_bridge *)_data; | 129 | struct net_bridge *br = (struct net_bridge *)_data; |
130 | unsigned long delay = hold_time(br); | 130 | unsigned long delay = hold_time(br); |
131 | unsigned long next_timer = jiffies + br->forward_delay; | 131 | unsigned long next_timer = jiffies + br->ageing_time; |
132 | int i; | 132 | int i; |
133 | 133 | ||
134 | spin_lock_bh(&br->hash_lock); | 134 | spin_lock_bh(&br->hash_lock); |
@@ -149,9 +149,7 @@ void br_fdb_cleanup(unsigned long _data) | |||
149 | } | 149 | } |
150 | spin_unlock_bh(&br->hash_lock); | 150 | spin_unlock_bh(&br->hash_lock); |
151 | 151 | ||
152 | /* Add HZ/4 to ensure we round the jiffies upwards to be after the next | 152 | mod_timer(&br->gc_timer, round_jiffies_up(next_timer)); |
153 | * timer, otherwise we might round down and will have no-op run. */ | ||
154 | mod_timer(&br->gc_timer, round_jiffies(next_timer + HZ/4)); | ||
155 | } | 153 | } |
156 | 154 | ||
157 | /* Completely flush all dynamic entries in forwarding database.*/ | 155 | /* Completely flush all dynamic entries in forwarding database.*/ |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index a98ef1393097..a4e72a89e4ff 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -140,10 +140,10 @@ static int deliver_clone(const struct net_bridge_port *prev, | |||
140 | void (*__packet_hook)(const struct net_bridge_port *p, | 140 | void (*__packet_hook)(const struct net_bridge_port *p, |
141 | struct sk_buff *skb)) | 141 | struct sk_buff *skb)) |
142 | { | 142 | { |
143 | struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | ||
144 | |||
143 | skb = skb_clone(skb, GFP_ATOMIC); | 145 | skb = skb_clone(skb, GFP_ATOMIC); |
144 | if (!skb) { | 146 | if (!skb) { |
145 | struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | ||
146 | |||
147 | dev->stats.tx_dropped++; | 147 | dev->stats.tx_dropped++; |
148 | return -ENOMEM; | 148 | return -ENOMEM; |
149 | } | 149 | } |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index cd2830fec935..fd27b172fb5d 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c | |||
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
83 | if (!cfsrvl_ready(service, &ret)) | 83 | if (!cfsrvl_ready(service, &ret)) |
84 | return ret; | 84 | return ret; |
85 | 85 | ||
86 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 86 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
87 | pr_err("CAIF: %s():Packet too large - size=%d\n", | 87 | pr_err("CAIF: %s():Packet too large - size=%d\n", |
88 | __func__, cfpkt_getlen(pkt)); | 88 | __func__, cfpkt_getlen(pkt)); |
89 | return -EOVERFLOW; | 89 | return -EOVERFLOW; |
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index cb4325a3dc83..965c5baace40 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c | |||
@@ -59,16 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) | |||
59 | u8 stx = CFSERL_STX; | 59 | u8 stx = CFSERL_STX; |
60 | int ret; | 60 | int ret; |
61 | u16 expectlen = 0; | 61 | u16 expectlen = 0; |
62 | |||
62 | caif_assert(newpkt != NULL); | 63 | caif_assert(newpkt != NULL); |
63 | spin_lock(&layr->sync); | 64 | spin_lock(&layr->sync); |
64 | 65 | ||
65 | if (layr->incomplete_frm != NULL) { | 66 | if (layr->incomplete_frm != NULL) { |
66 | |||
67 | layr->incomplete_frm = | 67 | layr->incomplete_frm = |
68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); | 68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); |
69 | pkt = layr->incomplete_frm; | 69 | pkt = layr->incomplete_frm; |
70 | if (pkt == NULL) | 70 | if (pkt == NULL) { |
71 | spin_unlock(&layr->sync); | ||
71 | return -ENOMEM; | 72 | return -ENOMEM; |
73 | } | ||
72 | } else { | 74 | } else { |
73 | pkt = newpkt; | 75 | pkt = newpkt; |
74 | } | 76 | } |
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c index 0fd827f49491..e04f7d964e83 100644 --- a/net/caif/cfveil.c +++ b/net/caif/cfveil.c | |||
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
84 | return ret; | 84 | return ret; |
85 | caif_assert(layr->dn != NULL); | 85 | caif_assert(layr->dn != NULL); |
86 | caif_assert(layr->dn->transmit != NULL); | 86 | caif_assert(layr->dn->transmit != NULL); |
87 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 87 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", | 88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", |
89 | __func__, cfpkt_getlen(pkt)); | 89 | __func__, cfpkt_getlen(pkt)); |
90 | return -EOVERFLOW; | 90 | return -EOVERFLOW; |
diff --git a/net/core/dev.c b/net/core/dev.c index 1845b08c624e..723a34710ad4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1553,6 +1553,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1553 | rcu_read_unlock(); | 1553 | rcu_read_unlock(); |
1554 | } | 1554 | } |
1555 | 1555 | ||
1556 | /* | ||
1557 | * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues | ||
1558 | * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. | ||
1559 | */ | ||
1560 | void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) | ||
1561 | { | ||
1562 | unsigned int real_num = dev->real_num_tx_queues; | ||
1563 | |||
1564 | if (unlikely(txq > dev->num_tx_queues)) | ||
1565 | ; | ||
1566 | else if (txq > real_num) | ||
1567 | dev->real_num_tx_queues = txq; | ||
1568 | else if (txq < real_num) { | ||
1569 | dev->real_num_tx_queues = txq; | ||
1570 | qdisc_reset_all_tx_gt(dev, txq); | ||
1571 | } | ||
1572 | } | ||
1573 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); | ||
1556 | 1574 | ||
1557 | static inline void __netif_reschedule(struct Qdisc *q) | 1575 | static inline void __netif_reschedule(struct Qdisc *q) |
1558 | { | 1576 | { |
@@ -2253,11 +2271,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
2253 | if (skb_rx_queue_recorded(skb)) { | 2271 | if (skb_rx_queue_recorded(skb)) { |
2254 | u16 index = skb_get_rx_queue(skb); | 2272 | u16 index = skb_get_rx_queue(skb); |
2255 | if (unlikely(index >= dev->num_rx_queues)) { | 2273 | if (unlikely(index >= dev->num_rx_queues)) { |
2256 | if (net_ratelimit()) { | 2274 | WARN_ONCE(dev->num_rx_queues > 1, "%s received packet " |
2257 | pr_warning("%s received packet on queue " | 2275 | "on queue %u, but number of RX queues is %u\n", |
2258 | "%u, but number of RX queues is %u\n", | 2276 | dev->name, index, dev->num_rx_queues); |
2259 | dev->name, index, dev->num_rx_queues); | ||
2260 | } | ||
2261 | goto done; | 2277 | goto done; |
2262 | } | 2278 | } |
2263 | rxqueue = dev->_rx + index; | 2279 | rxqueue = dev->_rx + index; |
@@ -2795,7 +2811,7 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2795 | struct net_device *orig_dev; | 2811 | struct net_device *orig_dev; |
2796 | struct net_device *master; | 2812 | struct net_device *master; |
2797 | struct net_device *null_or_orig; | 2813 | struct net_device *null_or_orig; |
2798 | struct net_device *null_or_bond; | 2814 | struct net_device *orig_or_bond; |
2799 | int ret = NET_RX_DROP; | 2815 | int ret = NET_RX_DROP; |
2800 | __be16 type; | 2816 | __be16 type; |
2801 | 2817 | ||
@@ -2812,13 +2828,24 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2812 | if (!skb->skb_iif) | 2828 | if (!skb->skb_iif) |
2813 | skb->skb_iif = skb->dev->ifindex; | 2829 | skb->skb_iif = skb->dev->ifindex; |
2814 | 2830 | ||
2831 | /* | ||
2832 | * bonding note: skbs received on inactive slaves should only | ||
2833 | * be delivered to pkt handlers that are exact matches. Also | ||
2834 | * the deliver_no_wcard flag will be set. If packet handlers | ||
2835 | * are sensitive to duplicate packets these skbs will need to | ||
2836 | * be dropped at the handler. The vlan accel path may have | ||
2837 | * already set the deliver_no_wcard flag. | ||
2838 | */ | ||
2815 | null_or_orig = NULL; | 2839 | null_or_orig = NULL; |
2816 | orig_dev = skb->dev; | 2840 | orig_dev = skb->dev; |
2817 | master = ACCESS_ONCE(orig_dev->master); | 2841 | master = ACCESS_ONCE(orig_dev->master); |
2818 | if (master) { | 2842 | if (skb->deliver_no_wcard) |
2819 | if (skb_bond_should_drop(skb, master)) | 2843 | null_or_orig = orig_dev; |
2844 | else if (master) { | ||
2845 | if (skb_bond_should_drop(skb, master)) { | ||
2846 | skb->deliver_no_wcard = 1; | ||
2820 | null_or_orig = orig_dev; /* deliver only exact match */ | 2847 | null_or_orig = orig_dev; /* deliver only exact match */ |
2821 | else | 2848 | } else |
2822 | skb->dev = master; | 2849 | skb->dev = master; |
2823 | } | 2850 | } |
2824 | 2851 | ||
@@ -2868,10 +2895,10 @@ ncls: | |||
2868 | * device that may have registered for a specific ptype. The | 2895 | * device that may have registered for a specific ptype. The |
2869 | * handler may have to adjust skb->dev and orig_dev. | 2896 | * handler may have to adjust skb->dev and orig_dev. |
2870 | */ | 2897 | */ |
2871 | null_or_bond = NULL; | 2898 | orig_or_bond = orig_dev; |
2872 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && | 2899 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && |
2873 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { | 2900 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { |
2874 | null_or_bond = vlan_dev_real_dev(skb->dev); | 2901 | orig_or_bond = vlan_dev_real_dev(skb->dev); |
2875 | } | 2902 | } |
2876 | 2903 | ||
2877 | type = skb->protocol; | 2904 | type = skb->protocol; |
@@ -2879,7 +2906,7 @@ ncls: | |||
2879 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2906 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
2880 | if (ptype->type == type && (ptype->dev == null_or_orig || | 2907 | if (ptype->type == type && (ptype->dev == null_or_orig || |
2881 | ptype->dev == skb->dev || ptype->dev == orig_dev || | 2908 | ptype->dev == skb->dev || ptype->dev == orig_dev || |
2882 | ptype->dev == null_or_bond)) { | 2909 | ptype->dev == orig_or_bond)) { |
2883 | if (pt_prev) | 2910 | if (pt_prev) |
2884 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2911 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2885 | pt_prev = ptype; | 2912 | pt_prev = ptype; |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index a0f4964033d2..75e4ffeb8cc9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -318,23 +318,33 @@ out: | |||
318 | } | 318 | } |
319 | 319 | ||
320 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, | 320 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, |
321 | void __user *useraddr) | 321 | u32 cmd, void __user *useraddr) |
322 | { | 322 | { |
323 | struct ethtool_rxnfc cmd; | 323 | struct ethtool_rxnfc info; |
324 | size_t info_size = sizeof(info); | ||
324 | 325 | ||
325 | if (!dev->ethtool_ops->set_rxnfc) | 326 | if (!dev->ethtool_ops->set_rxnfc) |
326 | return -EOPNOTSUPP; | 327 | return -EOPNOTSUPP; |
327 | 328 | ||
328 | if (copy_from_user(&cmd, useraddr, sizeof(cmd))) | 329 | /* struct ethtool_rxnfc was originally defined for |
330 | * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data | ||
331 | * members. User-space might still be using that | ||
332 | * definition. */ | ||
333 | if (cmd == ETHTOOL_SRXFH) | ||
334 | info_size = (offsetof(struct ethtool_rxnfc, data) + | ||
335 | sizeof(info.data)); | ||
336 | |||
337 | if (copy_from_user(&info, useraddr, info_size)) | ||
329 | return -EFAULT; | 338 | return -EFAULT; |
330 | 339 | ||
331 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); | 340 | return dev->ethtool_ops->set_rxnfc(dev, &info); |
332 | } | 341 | } |
333 | 342 | ||
334 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, | 343 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, |
335 | void __user *useraddr) | 344 | u32 cmd, void __user *useraddr) |
336 | { | 345 | { |
337 | struct ethtool_rxnfc info; | 346 | struct ethtool_rxnfc info; |
347 | size_t info_size = sizeof(info); | ||
338 | const struct ethtool_ops *ops = dev->ethtool_ops; | 348 | const struct ethtool_ops *ops = dev->ethtool_ops; |
339 | int ret; | 349 | int ret; |
340 | void *rule_buf = NULL; | 350 | void *rule_buf = NULL; |
@@ -342,13 +352,22 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, | |||
342 | if (!ops->get_rxnfc) | 352 | if (!ops->get_rxnfc) |
343 | return -EOPNOTSUPP; | 353 | return -EOPNOTSUPP; |
344 | 354 | ||
345 | if (copy_from_user(&info, useraddr, sizeof(info))) | 355 | /* struct ethtool_rxnfc was originally defined for |
356 | * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data | ||
357 | * members. User-space might still be using that | ||
358 | * definition. */ | ||
359 | if (cmd == ETHTOOL_GRXFH) | ||
360 | info_size = (offsetof(struct ethtool_rxnfc, data) + | ||
361 | sizeof(info.data)); | ||
362 | |||
363 | if (copy_from_user(&info, useraddr, info_size)) | ||
346 | return -EFAULT; | 364 | return -EFAULT; |
347 | 365 | ||
348 | if (info.cmd == ETHTOOL_GRXCLSRLALL) { | 366 | if (info.cmd == ETHTOOL_GRXCLSRLALL) { |
349 | if (info.rule_cnt > 0) { | 367 | if (info.rule_cnt > 0) { |
350 | rule_buf = kmalloc(info.rule_cnt * sizeof(u32), | 368 | if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) |
351 | GFP_USER); | 369 | rule_buf = kmalloc(info.rule_cnt * sizeof(u32), |
370 | GFP_USER); | ||
352 | if (!rule_buf) | 371 | if (!rule_buf) |
353 | return -ENOMEM; | 372 | return -ENOMEM; |
354 | } | 373 | } |
@@ -359,7 +378,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, | |||
359 | goto err_out; | 378 | goto err_out; |
360 | 379 | ||
361 | ret = -EFAULT; | 380 | ret = -EFAULT; |
362 | if (copy_to_user(useraddr, &info, sizeof(info))) | 381 | if (copy_to_user(useraddr, &info, info_size)) |
363 | goto err_out; | 382 | goto err_out; |
364 | 383 | ||
365 | if (rule_buf) { | 384 | if (rule_buf) { |
@@ -1516,12 +1535,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1516 | case ETHTOOL_GRXCLSRLCNT: | 1535 | case ETHTOOL_GRXCLSRLCNT: |
1517 | case ETHTOOL_GRXCLSRULE: | 1536 | case ETHTOOL_GRXCLSRULE: |
1518 | case ETHTOOL_GRXCLSRLALL: | 1537 | case ETHTOOL_GRXCLSRLALL: |
1519 | rc = ethtool_get_rxnfc(dev, useraddr); | 1538 | rc = ethtool_get_rxnfc(dev, ethcmd, useraddr); |
1520 | break; | 1539 | break; |
1521 | case ETHTOOL_SRXFH: | 1540 | case ETHTOOL_SRXFH: |
1522 | case ETHTOOL_SRXCLSRLDEL: | 1541 | case ETHTOOL_SRXCLSRLDEL: |
1523 | case ETHTOOL_SRXCLSRLINS: | 1542 | case ETHTOOL_SRXCLSRLINS: |
1524 | rc = ethtool_set_rxnfc(dev, useraddr); | 1543 | rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); |
1525 | break; | 1544 | break; |
1526 | case ETHTOOL_GGRO: | 1545 | case ETHTOOL_GGRO: |
1527 | rc = ethtool_get_gro(dev, useraddr); | 1546 | rc = ethtool_get_gro(dev, useraddr); |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cf8e70392fe0..785e5276a300 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock); | |||
107 | 107 | ||
108 | /* Protects against soft lockup during large deletion */ | 108 | /* Protects against soft lockup during large deletion */ |
109 | static struct rb_root est_root = RB_ROOT; | 109 | static struct rb_root est_root = RB_ROOT; |
110 | static DEFINE_SPINLOCK(est_tree_lock); | ||
110 | 111 | ||
111 | static void est_timer(unsigned long arg) | 112 | static void est_timer(unsigned long arg) |
112 | { | 113 | { |
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats | |||
201 | * | 202 | * |
202 | * Returns 0 on success or a negative error code. | 203 | * Returns 0 on success or a negative error code. |
203 | * | 204 | * |
204 | * NOTE: Called under rtnl_mutex | ||
205 | */ | 205 | */ |
206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | 206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, |
207 | struct gnet_stats_rate_est *rate_est, | 207 | struct gnet_stats_rate_est *rate_est, |
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
232 | est->last_packets = bstats->packets; | 232 | est->last_packets = bstats->packets; |
233 | est->avpps = rate_est->pps<<10; | 233 | est->avpps = rate_est->pps<<10; |
234 | 234 | ||
235 | spin_lock(&est_tree_lock); | ||
235 | if (!elist[idx].timer.function) { | 236 | if (!elist[idx].timer.function) { |
236 | INIT_LIST_HEAD(&elist[idx].list); | 237 | INIT_LIST_HEAD(&elist[idx].list); |
237 | setup_timer(&elist[idx].timer, est_timer, idx); | 238 | setup_timer(&elist[idx].timer, est_timer, idx); |
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
242 | 243 | ||
243 | list_add_rcu(&est->list, &elist[idx].list); | 244 | list_add_rcu(&est->list, &elist[idx].list); |
244 | gen_add_node(est); | 245 | gen_add_node(est); |
246 | spin_unlock(&est_tree_lock); | ||
245 | 247 | ||
246 | return 0; | 248 | return 0; |
247 | } | 249 | } |
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head) | |||
261 | * | 263 | * |
262 | * Removes the rate estimator specified by &bstats and &rate_est. | 264 | * Removes the rate estimator specified by &bstats and &rate_est. |
263 | * | 265 | * |
264 | * NOTE: Called under rtnl_mutex | ||
265 | */ | 266 | */ |
266 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | 267 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, |
267 | struct gnet_stats_rate_est *rate_est) | 268 | struct gnet_stats_rate_est *rate_est) |
268 | { | 269 | { |
269 | struct gen_estimator *e; | 270 | struct gen_estimator *e; |
270 | 271 | ||
272 | spin_lock(&est_tree_lock); | ||
271 | while ((e = gen_find_node(bstats, rate_est))) { | 273 | while ((e = gen_find_node(bstats, rate_est))) { |
272 | rb_erase(&e->node, &est_root); | 274 | rb_erase(&e->node, &est_root); |
273 | 275 | ||
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
278 | list_del_rcu(&e->list); | 280 | list_del_rcu(&e->list); |
279 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 281 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
280 | } | 282 | } |
283 | spin_unlock(&est_tree_lock); | ||
281 | } | 284 | } |
282 | EXPORT_SYMBOL(gen_kill_estimator); | 285 | EXPORT_SYMBOL(gen_kill_estimator); |
283 | 286 | ||
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator); | |||
312 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, | 315 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |
313 | const struct gnet_stats_rate_est *rate_est) | 316 | const struct gnet_stats_rate_est *rate_est) |
314 | { | 317 | { |
318 | bool res; | ||
319 | |||
315 | ASSERT_RTNL(); | 320 | ASSERT_RTNL(); |
316 | 321 | ||
317 | return gen_find_node(bstats, rate_est) != NULL; | 322 | spin_lock(&est_tree_lock); |
323 | res = gen_find_node(bstats, rate_est) != NULL; | ||
324 | spin_unlock(&est_tree_lock); | ||
325 | |||
326 | return res; | ||
318 | } | 327 | } |
319 | EXPORT_SYMBOL(gen_estimator_active); | 328 | EXPORT_SYMBOL(gen_estimator_active); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2ad68da418df..1dacd7ba8dbb 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2170 | end_time = ktime_now(); | 2170 | end_time = ktime_now(); |
2171 | 2171 | ||
2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | 2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); |
2173 | pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); | 2173 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); |
2174 | } | 2174 | } |
2175 | 2175 | ||
2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f8abf68e3988..34432b4e96bb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb); | |||
482 | * reference count dropping and cleans up the skbuff as if it | 482 | * reference count dropping and cleans up the skbuff as if it |
483 | * just came from __alloc_skb(). | 483 | * just came from __alloc_skb(). |
484 | */ | 484 | */ |
485 | int skb_recycle_check(struct sk_buff *skb, int skb_size) | 485 | bool skb_recycle_check(struct sk_buff *skb, int skb_size) |
486 | { | 486 | { |
487 | struct skb_shared_info *shinfo; | 487 | struct skb_shared_info *shinfo; |
488 | 488 | ||
489 | if (irqs_disabled()) | 489 | if (irqs_disabled()) |
490 | return 0; | 490 | return false; |
491 | 491 | ||
492 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) | 492 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) |
493 | return 0; | 493 | return false; |
494 | 494 | ||
495 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); | 495 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); |
496 | if (skb_end_pointer(skb) - skb->head < skb_size) | 496 | if (skb_end_pointer(skb) - skb->head < skb_size) |
497 | return 0; | 497 | return false; |
498 | 498 | ||
499 | if (skb_shared(skb) || skb_cloned(skb)) | 499 | if (skb_shared(skb) || skb_cloned(skb)) |
500 | return 0; | 500 | return false; |
501 | 501 | ||
502 | skb_release_head_state(skb); | 502 | skb_release_head_state(skb); |
503 | 503 | ||
@@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
509 | skb->data = skb->head + NET_SKB_PAD; | 509 | skb->data = skb->head + NET_SKB_PAD; |
510 | skb_reset_tail_pointer(skb); | 510 | skb_reset_tail_pointer(skb); |
511 | 511 | ||
512 | return 1; | 512 | return true; |
513 | } | 513 | } |
514 | EXPORT_SYMBOL(skb_recycle_check); | 514 | EXPORT_SYMBOL(skb_recycle_check); |
515 | 515 | ||
@@ -532,6 +532,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
532 | new->ip_summed = old->ip_summed; | 532 | new->ip_summed = old->ip_summed; |
533 | skb_copy_queue_mapping(new, old); | 533 | skb_copy_queue_mapping(new, old); |
534 | new->priority = old->priority; | 534 | new->priority = old->priority; |
535 | new->deliver_no_wcard = old->deliver_no_wcard; | ||
535 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | 536 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) |
536 | new->ipvs_property = old->ipvs_property; | 537 | new->ipvs_property = old->ipvs_property; |
537 | #endif | 538 | #endif |
@@ -569,7 +570,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
569 | C(len); | 570 | C(len); |
570 | C(data_len); | 571 | C(data_len); |
571 | C(mac_len); | 572 | C(mac_len); |
572 | C(rxhash); | ||
573 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | 573 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
574 | n->cloned = 1; | 574 | n->cloned = 1; |
575 | n->nohdr = 0; | 575 | n->nohdr = 0; |
@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2965 | } | 2965 | } |
2966 | EXPORT_SYMBOL_GPL(skb_cow_data); | 2966 | EXPORT_SYMBOL_GPL(skb_cow_data); |
2967 | 2967 | ||
2968 | static void sock_rmem_free(struct sk_buff *skb) | ||
2969 | { | ||
2970 | struct sock *sk = skb->sk; | ||
2971 | |||
2972 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); | ||
2973 | } | ||
2974 | |||
2975 | /* | ||
2976 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) | ||
2977 | */ | ||
2978 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | ||
2979 | { | ||
2980 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
2981 | (unsigned)sk->sk_rcvbuf) | ||
2982 | return -ENOMEM; | ||
2983 | |||
2984 | skb_orphan(skb); | ||
2985 | skb->sk = sk; | ||
2986 | skb->destructor = sock_rmem_free; | ||
2987 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | ||
2988 | |||
2989 | skb_queue_tail(&sk->sk_error_queue, skb); | ||
2990 | if (!sock_flag(sk, SOCK_DEAD)) | ||
2991 | sk->sk_data_ready(sk, skb->len); | ||
2992 | return 0; | ||
2993 | } | ||
2994 | EXPORT_SYMBOL(sock_queue_err_skb); | ||
2995 | |||
2968 | void skb_tstamp_tx(struct sk_buff *orig_skb, | 2996 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
2969 | struct skb_shared_hwtstamps *hwtstamps) | 2997 | struct skb_shared_hwtstamps *hwtstamps) |
2970 | { | 2998 | { |
@@ -2996,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, | |||
2996 | memset(serr, 0, sizeof(*serr)); | 3024 | memset(serr, 0, sizeof(*serr)); |
2997 | serr->ee.ee_errno = ENOMSG; | 3025 | serr->ee.ee_errno = ENOMSG; |
2998 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; | 3026 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
3027 | |||
2999 | err = sock_queue_err_skb(sk, skb); | 3028 | err = sock_queue_err_skb(sk, skb); |
3029 | |||
3000 | if (err) | 3030 | if (err) |
3001 | kfree_skb(skb); | 3031 | kfree_skb(skb); |
3002 | } | 3032 | } |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 8e3a1fd938ab..7c3a7d191249 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -303,7 +303,7 @@ config ARPD | |||
303 | If unsure, say N. | 303 | If unsure, say N. |
304 | 304 | ||
305 | config SYN_COOKIES | 305 | config SYN_COOKIES |
306 | bool "IP: TCP syncookie support (disabled per default)" | 306 | bool "IP: TCP syncookie support" |
307 | ---help--- | 307 | ---help--- |
308 | Normal TCP/IP networking is open to an attack known as "SYN | 308 | Normal TCP/IP networking is open to an attack known as "SYN |
309 | flooding". This denial-of-service attack prevents legitimate remote | 309 | flooding". This denial-of-service attack prevents legitimate remote |
@@ -328,13 +328,13 @@ config SYN_COOKIES | |||
328 | server is really overloaded. If this happens frequently better turn | 328 | server is really overloaded. If this happens frequently better turn |
329 | them off. | 329 | them off. |
330 | 330 | ||
331 | If you say Y here, note that SYN cookies aren't enabled by default; | 331 | If you say Y here, you can disable SYN cookies at run time by |
332 | you can enable them by saying Y to "/proc file system support" and | 332 | saying Y to "/proc file system support" and |
333 | "Sysctl support" below and executing the command | 333 | "Sysctl support" below and executing the command |
334 | 334 | ||
335 | echo 1 >/proc/sys/net/ipv4/tcp_syncookies | 335 | echo 0 > /proc/sys/net/ipv4/tcp_syncookies |
336 | 336 | ||
337 | at boot time after the /proc file system has been mounted. | 337 | after the /proc file system has been mounted. |
338 | 338 | ||
339 | If unsure, say N. | 339 | If unsure, say N. |
340 | 340 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 9a4a6c96cb0d..041d41df1224 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -873,8 +873,10 @@ int ip_append_data(struct sock *sk, | |||
873 | !exthdrlen) | 873 | !exthdrlen) |
874 | csummode = CHECKSUM_PARTIAL; | 874 | csummode = CHECKSUM_PARTIAL; |
875 | 875 | ||
876 | skb = skb_peek_tail(&sk->sk_write_queue); | ||
877 | |||
876 | inet->cork.length += length; | 878 | inet->cork.length += length; |
877 | if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) && | 879 | if (((length > mtu) || (skb && skb_is_gso(skb))) && |
878 | (sk->sk_protocol == IPPROTO_UDP) && | 880 | (sk->sk_protocol == IPPROTO_UDP) && |
879 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 881 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
880 | err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, | 882 | err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, |
@@ -892,7 +894,7 @@ int ip_append_data(struct sock *sk, | |||
892 | * adding appropriate IP header. | 894 | * adding appropriate IP header. |
893 | */ | 895 | */ |
894 | 896 | ||
895 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) | 897 | if (!skb) |
896 | goto alloc_new_skb; | 898 | goto alloc_new_skb; |
897 | 899 | ||
898 | while (length > 0) { | 900 | while (length > 0) { |
@@ -1121,7 +1123,8 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1121 | return -EINVAL; | 1123 | return -EINVAL; |
1122 | 1124 | ||
1123 | inet->cork.length += size; | 1125 | inet->cork.length += size; |
1124 | if ((sk->sk_protocol == IPPROTO_UDP) && | 1126 | if ((size + skb->len > mtu) && |
1127 | (sk->sk_protocol == IPPROTO_UDP) && | ||
1125 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 1128 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
1126 | skb_shinfo(skb)->gso_size = mtu - fragheaderlen; | 1129 | skb_shinfo(skb)->gso_size = mtu - fragheaderlen; |
1127 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | 1130 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 856123fe32f9..757f25eb9b4b 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net) | |||
267 | { | 267 | { |
268 | struct mr_table *mrt, *next; | 268 | struct mr_table *mrt, *next; |
269 | 269 | ||
270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) | 270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { |
271 | list_del(&mrt->list); | ||
271 | kfree(mrt); | 272 | kfree(mrt); |
273 | } | ||
272 | fib_rules_unregister(net->ipv4.mr_rules_ops); | 274 | fib_rules_unregister(net->ipv4.mr_rules_ops); |
273 | } | 275 | } |
274 | #else | 276 | #else |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 5c24db4a3c91..9f6b22206c52 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
347 | { .sport = th->dest, | 347 | { .sport = th->dest, |
348 | .dport = th->source } } }; | 348 | .dport = th->source } } }; |
349 | security_req_classify_flow(req, &fl); | 349 | security_req_classify_flow(req, &fl); |
350 | if (ip_route_output_key(&init_net, &rt, &fl)) { | 350 | if (ip_route_output_key(sock_net(sk), &rt, &fl)) { |
351 | reqsk_free(req); | 351 | reqsk_free(req); |
352 | goto out; | 352 | goto out; |
353 | } | 353 | } |
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index c209e054a634..377bc9349371 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c | |||
@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
126 | * calculate 2^fract in a <<7 value. | 126 | * calculate 2^fract in a <<7 value. |
127 | */ | 127 | */ |
128 | is_slowstart = 1; | 128 | is_slowstart = 1; |
129 | increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) | 129 | increment = ((1 << min(ca->rho, 16U)) * |
130 | - 128; | 130 | hybla_fraction(rho_fractions)) - 128; |
131 | } else { | 131 | } else { |
132 | /* | 132 | /* |
133 | * congestion avoidance | 133 | * congestion avoidance |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3e6dafcb1071..548d575e6cc6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2639 | if (sk->sk_family == AF_INET) { | 2639 | if (sk->sk_family == AF_INET) { |
2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", | 2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", |
2641 | msg, | 2641 | msg, |
2642 | &inet->daddr, ntohs(inet->dport), | 2642 | &inet->inet_daddr, ntohs(inet->inet_dport), |
2643 | tp->snd_cwnd, tcp_left_out(tp), | 2643 | tp->snd_cwnd, tcp_left_out(tp), |
2644 | tp->snd_ssthresh, tp->prior_ssthresh, | 2644 | tp->snd_ssthresh, tp->prior_ssthresh, |
2645 | tp->packets_out); | 2645 | tp->packets_out); |
@@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2649 | struct ipv6_pinfo *np = inet6_sk(sk); | 2649 | struct ipv6_pinfo *np = inet6_sk(sk); |
2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", | 2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", |
2651 | msg, | 2651 | msg, |
2652 | &np->daddr, ntohs(inet->dport), | 2652 | &np->daddr, ntohs(inet->inet_dport), |
2653 | tp->snd_cwnd, tcp_left_out(tp), | 2653 | tp->snd_cwnd, tcp_left_out(tp), |
2654 | tp->snd_ssthresh, tp->prior_ssthresh, | 2654 | tp->snd_ssthresh, tp->prior_ssthresh, |
2655 | tp->packets_out); | 2655 | tp->packets_out); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 202cf09c4cd4..fe193e53af44 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1555 | #endif | 1555 | #endif |
1556 | 1556 | ||
1557 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1557 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
1558 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1558 | TCP_CHECK_TIMER(sk); | 1559 | TCP_CHECK_TIMER(sk); |
1559 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { | 1560 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { |
1560 | rsk = sk; | 1561 | rsk = sk; |
@@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1579 | } | 1580 | } |
1580 | return 0; | 1581 | return 0; |
1581 | } | 1582 | } |
1582 | } | 1583 | } else |
1584 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1585 | |||
1583 | 1586 | ||
1584 | TCP_CHECK_TIMER(sk); | 1587 | TCP_CHECK_TIMER(sk); |
1585 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { | 1588 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { |
@@ -1672,8 +1675,6 @@ process: | |||
1672 | 1675 | ||
1673 | skb->dev = NULL; | 1676 | skb->dev = NULL; |
1674 | 1677 | ||
1675 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1676 | |||
1677 | bh_lock_sock_nested(sk); | 1678 | bh_lock_sock_nested(sk); |
1678 | ret = 0; | 1679 | ret = 0; |
1679 | if (!sock_owned_by_user(sk)) { | 1680 | if (!sock_owned_by_user(sk)) { |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 58585748bdac..eec4ff456e33 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |||
633 | if (!inet->recverr) { | 633 | if (!inet->recverr) { |
634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) | 634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
635 | goto out; | 635 | goto out; |
636 | } else { | 636 | } else |
637 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); | 637 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); |
638 | } | 638 | |
639 | sk->sk_err = err; | 639 | sk->sk_err = err; |
640 | sk->sk_error_report(sk); | 640 | sk->sk_error_report(sk); |
641 | out: | 641 | out: |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index ce7992982557..03e62f94ff8e 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -483,7 +483,7 @@ route_done: | |||
483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, | 483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, |
484 | MSG_DONTWAIT, np->dontfrag); | 484 | MSG_DONTWAIT, np->dontfrag); |
485 | if (err) { | 485 | if (err) { |
486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
487 | ip6_flush_pending_frames(sk); | 487 | ip6_flush_pending_frames(sk); |
488 | goto out_put; | 488 | goto out_put; |
489 | } | 489 | } |
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
565 | np->dontfrag); | 565 | np->dontfrag); |
566 | 566 | ||
567 | if (err) { | 567 | if (err) { |
568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
569 | ip6_flush_pending_frames(sk); | 569 | ip6_flush_pending_frames(sk); |
570 | goto out_put; | 570 | goto out_put; |
571 | } | 571 | } |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 073071f2b75b..66078dad7fe8 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -120,7 +120,7 @@ static void mroute_clean_tables(struct mr6_table *mrt); | |||
120 | static void ipmr_expire_process(unsigned long arg); | 120 | static void ipmr_expire_process(unsigned long arg); |
121 | 121 | ||
122 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | 122 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES |
123 | #define ip6mr_for_each_table(mrt, met) \ | 123 | #define ip6mr_for_each_table(mrt, net) \ |
124 | list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) | 124 | list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) |
125 | 125 | ||
126 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) | 126 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) |
@@ -254,8 +254,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net) | |||
254 | { | 254 | { |
255 | struct mr6_table *mrt, *next; | 255 | struct mr6_table *mrt, *next; |
256 | 256 | ||
257 | list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) | 257 | list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) { |
258 | list_del(&mrt->list); | ||
258 | ip6mr_free_table(mrt); | 259 | ip6mr_free_table(mrt); |
260 | } | ||
259 | fib_rules_unregister(net->ipv6.mr6_rules_ops); | 261 | fib_rules_unregister(net->ipv6.mr6_rules_ops); |
260 | } | 262 | } |
261 | #else | 263 | #else |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 59f1881968c7..ab1622d7d409 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) | |||
1356 | IPV6_TLV_PADN, 0 }; | 1356 | IPV6_TLV_PADN, 0 }; |
1357 | 1357 | ||
1358 | /* we assume size > sizeof(ra) here */ | 1358 | /* we assume size > sizeof(ra) here */ |
1359 | skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); | 1359 | size += LL_ALLOCATED_SPACE(dev); |
1360 | /* limit our allocations to order-0 page */ | ||
1361 | size = min_t(int, size, SKB_MAX_ORDER(0, 0)); | ||
1362 | skb = sock_alloc_send_skb(sk, size, 1, &err); | ||
1360 | 1363 | ||
1361 | if (!skb) | 1364 | if (!skb) |
1362 | return NULL; | 1365 | return NULL; |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 0abdc242ddb7..2efef52fb461 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -586,6 +586,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, | |||
586 | src_addr = solicited_addr; | 586 | src_addr = solicited_addr; |
587 | if (ifp->flags & IFA_F_OPTIMISTIC) | 587 | if (ifp->flags & IFA_F_OPTIMISTIC) |
588 | override = 0; | 588 | override = 0; |
589 | inc_opt |= ifp->idev->cnf.force_tllao; | ||
589 | in6_ifa_put(ifp); | 590 | in6_ifa_put(ifp); |
590 | } else { | 591 | } else { |
591 | if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, | 592 | if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, |
@@ -599,7 +600,6 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, | |||
599 | icmp6h.icmp6_solicited = solicited; | 600 | icmp6h.icmp6_solicited = solicited; |
600 | icmp6h.icmp6_override = override; | 601 | icmp6h.icmp6_override = override; |
601 | 602 | ||
602 | inc_opt |= ifp->idev->cnf.force_tllao; | ||
603 | __ndisc_send(dev, neigh, daddr, src_addr, | 603 | __ndisc_send(dev, neigh, daddr, src_addr, |
604 | &icmp6h, solicited_addr, | 604 | &icmp6h, solicited_addr, |
605 | inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); | 605 | inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 294cbe8b0725..252d76199c41 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, | |||
814 | { | 814 | { |
815 | int flags = 0; | 815 | int flags = 0; |
816 | 816 | ||
817 | if (fl->oif || rt6_need_strict(&fl->fl6_dst)) | 817 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst)) |
818 | flags |= RT6_LOOKUP_F_IFACE; | 818 | flags |= RT6_LOOKUP_F_IFACE; |
819 | 819 | ||
820 | if (!ipv6_addr_any(&fl->fl6_src)) | 820 | if (!ipv6_addr_any(&fl->fl6_src)) |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index c163d0a149f4..98258b7341e3 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
332 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | 332 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); |
333 | 333 | ||
334 | spin_unlock(&local->ampdu_lock); | 334 | spin_unlock(&local->ampdu_lock); |
335 | spin_unlock_bh(&sta->lock); | ||
336 | 335 | ||
337 | /* send an addBA request */ | 336 | /* prepare tid data */ |
338 | sta->ampdu_mlme.dialog_token_allocator++; | 337 | sta->ampdu_mlme.dialog_token_allocator++; |
339 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = | 338 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = |
340 | sta->ampdu_mlme.dialog_token_allocator; | 339 | sta->ampdu_mlme.dialog_token_allocator; |
341 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | 340 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; |
342 | 341 | ||
342 | spin_unlock_bh(&sta->lock); | ||
343 | |||
344 | /* send AddBA request */ | ||
343 | ieee80211_send_addba_request(sdata, pubsta->addr, tid, | 345 | ieee80211_send_addba_request(sdata, pubsta->addr, tid, |
344 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | 346 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, |
345 | sta->ampdu_mlme.tid_tx[tid]->ssn, | 347 | sta->ampdu_mlme.tid_tx[tid]->ssn, |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 5d218c530a4e..32be11e4c4d9 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/nl80211.h> | 5 | #include <linux/nl80211.h> |
6 | #include "ieee80211_i.h" | 6 | #include "ieee80211_i.h" |
7 | 7 | ||
8 | enum ieee80211_chan_mode | 8 | static enum ieee80211_chan_mode |
9 | __ieee80211_get_channel_mode(struct ieee80211_local *local, | 9 | __ieee80211_get_channel_mode(struct ieee80211_local *local, |
10 | struct ieee80211_sub_if_data *ignore) | 10 | struct ieee80211_sub_if_data *ignore) |
11 | { | 11 | { |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 4f2271316650..9c1da0809160 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -349,7 +349,7 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx, | |||
349 | struct survey_info *survey) | 349 | struct survey_info *survey) |
350 | { | 350 | { |
351 | int ret = -EOPNOTSUPP; | 351 | int ret = -EOPNOTSUPP; |
352 | if (local->ops->conf_tx) | 352 | if (local->ops->get_survey) |
353 | ret = local->ops->get_survey(&local->hw, idx, survey); | 353 | ret = local->ops->get_survey(&local->hw, idx, survey); |
354 | /* trace_drv_get_survey(local, idx, survey, ret); */ | 354 | /* trace_drv_get_survey(local, idx, survey, ret); */ |
355 | return ret; | 355 | return ret; |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0839c4e8fd2e..f803f8b72a93 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1692,14 +1692,52 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1692 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); | 1692 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); |
1693 | break; | 1693 | break; |
1694 | case IEEE80211_STYPE_ACTION: | 1694 | case IEEE80211_STYPE_ACTION: |
1695 | if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) | 1695 | switch (mgmt->u.action.category) { |
1696 | case WLAN_CATEGORY_BACK: { | ||
1697 | struct ieee80211_local *local = sdata->local; | ||
1698 | int len = skb->len; | ||
1699 | struct sta_info *sta; | ||
1700 | |||
1701 | rcu_read_lock(); | ||
1702 | sta = sta_info_get(sdata, mgmt->sa); | ||
1703 | if (!sta) { | ||
1704 | rcu_read_unlock(); | ||
1705 | break; | ||
1706 | } | ||
1707 | |||
1708 | local_bh_disable(); | ||
1709 | |||
1710 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
1711 | case WLAN_ACTION_ADDBA_REQ: | ||
1712 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1713 | sizeof(mgmt->u.action.u.addba_req))) | ||
1714 | break; | ||
1715 | ieee80211_process_addba_request(local, sta, mgmt, len); | ||
1716 | break; | ||
1717 | case WLAN_ACTION_ADDBA_RESP: | ||
1718 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1719 | sizeof(mgmt->u.action.u.addba_resp))) | ||
1720 | break; | ||
1721 | ieee80211_process_addba_resp(local, sta, mgmt, len); | ||
1722 | break; | ||
1723 | case WLAN_ACTION_DELBA: | ||
1724 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1725 | sizeof(mgmt->u.action.u.delba))) | ||
1726 | break; | ||
1727 | ieee80211_process_delba(sdata, sta, mgmt, len); | ||
1728 | break; | ||
1729 | } | ||
1730 | local_bh_enable(); | ||
1731 | rcu_read_unlock(); | ||
1696 | break; | 1732 | break; |
1697 | 1733 | } | |
1698 | ieee80211_sta_process_chanswitch(sdata, | 1734 | case WLAN_CATEGORY_SPECTRUM_MGMT: |
1699 | &mgmt->u.action.u.chan_switch.sw_elem, | 1735 | ieee80211_sta_process_chanswitch(sdata, |
1700 | (void *)ifmgd->associated->priv, | 1736 | &mgmt->u.action.u.chan_switch.sw_elem, |
1701 | rx_status->mactime); | 1737 | (void *)ifmgd->associated->priv, |
1702 | break; | 1738 | rx_status->mactime); |
1739 | break; | ||
1740 | } | ||
1703 | } | 1741 | } |
1704 | mutex_unlock(&ifmgd->mtx); | 1742 | mutex_unlock(&ifmgd->mtx); |
1705 | 1743 | ||
@@ -1722,9 +1760,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1722 | mutex_unlock(&ifmgd->mtx); | 1760 | mutex_unlock(&ifmgd->mtx); |
1723 | 1761 | ||
1724 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && | 1762 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && |
1725 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) | 1763 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) { |
1726 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | 1764 | struct ieee80211_local *local = sdata->local; |
1765 | struct ieee80211_work *wk; | ||
1766 | |||
1767 | mutex_lock(&local->work_mtx); | ||
1768 | list_for_each_entry(wk, &local->work_list, list) { | ||
1769 | if (wk->sdata != sdata) | ||
1770 | continue; | ||
1771 | |||
1772 | if (wk->type != IEEE80211_WORK_ASSOC) | ||
1773 | continue; | ||
1774 | |||
1775 | if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN)) | ||
1776 | continue; | ||
1777 | if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN)) | ||
1778 | continue; | ||
1727 | 1779 | ||
1780 | /* | ||
1781 | * Printing the message only here means we can't | ||
1782 | * spuriously print it, but it also means that it | ||
1783 | * won't be printed when the frame comes in before | ||
1784 | * we even tried to associate or in similar cases. | ||
1785 | * | ||
1786 | * Ultimately, I suspect cfg80211 should print the | ||
1787 | * messages instead. | ||
1788 | */ | ||
1789 | printk(KERN_DEBUG | ||
1790 | "%s: deauthenticated from %pM (Reason: %u)\n", | ||
1791 | sdata->name, mgmt->bssid, | ||
1792 | le16_to_cpu(mgmt->u.deauth.reason_code)); | ||
1793 | |||
1794 | list_del_rcu(&wk->list); | ||
1795 | free_work(wk); | ||
1796 | break; | ||
1797 | } | ||
1798 | mutex_unlock(&local->work_mtx); | ||
1799 | |||
1800 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | ||
1801 | } | ||
1728 | out: | 1802 | out: |
1729 | kfree_skb(skb); | 1803 | kfree_skb(skb); |
1730 | } | 1804 | } |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6e2a7bcd8cb8..be9abc2e6348 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1818,17 +1818,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1818 | return RX_CONTINUE; | 1818 | return RX_CONTINUE; |
1819 | 1819 | ||
1820 | if (ieee80211_is_back_req(bar->frame_control)) { | 1820 | if (ieee80211_is_back_req(bar->frame_control)) { |
1821 | struct { | ||
1822 | __le16 control, start_seq_num; | ||
1823 | } __packed bar_data; | ||
1824 | |||
1821 | if (!rx->sta) | 1825 | if (!rx->sta) |
1822 | return RX_DROP_MONITOR; | 1826 | return RX_DROP_MONITOR; |
1827 | |||
1828 | if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), | ||
1829 | &bar_data, sizeof(bar_data))) | ||
1830 | return RX_DROP_MONITOR; | ||
1831 | |||
1823 | spin_lock(&rx->sta->lock); | 1832 | spin_lock(&rx->sta->lock); |
1824 | tid = le16_to_cpu(bar->control) >> 12; | 1833 | tid = le16_to_cpu(bar_data.control) >> 12; |
1825 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { | 1834 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { |
1826 | spin_unlock(&rx->sta->lock); | 1835 | spin_unlock(&rx->sta->lock); |
1827 | return RX_DROP_MONITOR; | 1836 | return RX_DROP_MONITOR; |
1828 | } | 1837 | } |
1829 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; | 1838 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; |
1830 | 1839 | ||
1831 | start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; | 1840 | start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; |
1832 | 1841 | ||
1833 | /* reset session timer */ | 1842 | /* reset session timer */ |
1834 | if (tid_agg_rx->timeout) | 1843 | if (tid_agg_rx->timeout) |
@@ -1935,6 +1944,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1935 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | 1944 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) |
1936 | break; | 1945 | break; |
1937 | 1946 | ||
1947 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
1948 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | ||
1949 | |||
1938 | switch (mgmt->u.action.u.addba_req.action_code) { | 1950 | switch (mgmt->u.action.u.addba_req.action_code) { |
1939 | case WLAN_ACTION_ADDBA_REQ: | 1951 | case WLAN_ACTION_ADDBA_REQ: |
1940 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1952 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index be3d4a698692..b025dc7bb0fd 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -715,7 +715,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, | |||
715 | struct ieee80211_rx_status *rx_status; | 715 | struct ieee80211_rx_status *rx_status; |
716 | struct ieee80211_mgmt *mgmt; | 716 | struct ieee80211_mgmt *mgmt; |
717 | struct ieee80211_work *wk; | 717 | struct ieee80211_work *wk; |
718 | enum work_action rma; | 718 | enum work_action rma = WORK_ACT_NONE; |
719 | u16 fc; | 719 | u16 fc; |
720 | 720 | ||
721 | rx_status = (struct ieee80211_rx_status *) skb->cb; | 721 | rx_status = (struct ieee80211_rx_status *) skb->cb; |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 7b048a35ca58..94d72e85a475 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -1045,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk) | |||
1045 | lock_sock(sk); | 1045 | lock_sock(sk); |
1046 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { | 1046 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { |
1047 | skparent = pn->listener; | 1047 | skparent = pn->listener; |
1048 | sk_del_node_init(sk); | ||
1049 | release_sock(sk); | 1048 | release_sock(sk); |
1050 | 1049 | ||
1051 | sk = skparent; | ||
1052 | pn = pep_sk(skparent); | 1050 | pn = pep_sk(skparent); |
1053 | lock_sock(sk); | 1051 | lock_sock(skparent); |
1052 | sk_del_node_init(sk); | ||
1053 | sk = skparent; | ||
1054 | } | 1054 | } |
1055 | /* Unhash a listening sock only when it is closed | 1055 | /* Unhash a listening sock only when it is closed |
1056 | * and all of its active connected pipes are closed. */ | 1056 | * and all of its active connected pipes are closed. */ |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 10ed0d55f759..f68832798db2 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
475 | err = rds_ib_setup_qp(conn); | 475 | err = rds_ib_setup_qp(conn); |
476 | if (err) { | 476 | if (err) { |
477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); | 477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); |
478 | mutex_unlock(&conn->c_cm_lock); | ||
478 | goto out; | 479 | goto out; |
479 | } | 480 | } |
480 | 481 | ||
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index a9d951b4fbae..b5dd6ac39be8 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
452 | err = rds_iw_setup_qp(conn); | 452 | err = rds_iw_setup_qp(conn); |
453 | if (err) { | 453 | if (err) { |
454 | rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); | 454 | rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); |
455 | mutex_unlock(&conn->c_cm_lock); | ||
455 | goto out; | 456 | goto out; |
456 | } | 457 | } |
457 | 458 | ||
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index d885ba311564..570949417f38 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
159 | iph->daddr = new_addr; | 159 | iph->daddr = new_addr; |
160 | 160 | ||
161 | csum_replace4(&iph->check, addr, new_addr); | 161 | csum_replace4(&iph->check, addr, new_addr); |
162 | } else if ((iph->frag_off & htons(IP_OFFSET)) || | ||
163 | iph->protocol != IPPROTO_ICMP) { | ||
164 | goto out; | ||
162 | } | 165 | } |
163 | 166 | ||
164 | ihl = iph->ihl * 4; | 167 | ihl = iph->ihl * 4; |
@@ -247,6 +250,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
247 | break; | 250 | break; |
248 | } | 251 | } |
249 | 252 | ||
253 | out: | ||
250 | return action; | 254 | return action; |
251 | 255 | ||
252 | drop: | 256 | drop: |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index fdbd0b7bd840..50e3d945e1f4 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
125 | { | 125 | { |
126 | struct tcf_pedit *p = a->priv; | 126 | struct tcf_pedit *p = a->priv; |
127 | int i, munged = 0; | 127 | int i, munged = 0; |
128 | u8 *pptr; | 128 | unsigned int off; |
129 | 129 | ||
130 | if (!(skb->tc_verd & TC_OK2MUNGE)) { | 130 | if (!(skb->tc_verd & TC_OK2MUNGE)) { |
131 | /* should we set skb->cloned? */ | 131 | /* should we set skb->cloned? */ |
@@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
134 | } | 134 | } |
135 | } | 135 | } |
136 | 136 | ||
137 | pptr = skb_network_header(skb); | 137 | off = skb_network_offset(skb); |
138 | 138 | ||
139 | spin_lock(&p->tcf_lock); | 139 | spin_lock(&p->tcf_lock); |
140 | 140 | ||
@@ -144,17 +144,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
144 | struct tc_pedit_key *tkey = p->tcfp_keys; | 144 | struct tc_pedit_key *tkey = p->tcfp_keys; |
145 | 145 | ||
146 | for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { | 146 | for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { |
147 | u32 *ptr; | 147 | u32 *ptr, _data; |
148 | int offset = tkey->off; | 148 | int offset = tkey->off; |
149 | 149 | ||
150 | if (tkey->offmask) { | 150 | if (tkey->offmask) { |
151 | if (skb->len > tkey->at) { | 151 | char *d, _d; |
152 | char *j = pptr + tkey->at; | 152 | |
153 | offset += ((*j & tkey->offmask) >> | 153 | d = skb_header_pointer(skb, off + tkey->at, 1, |
154 | tkey->shift); | 154 | &_d); |
155 | } else { | 155 | if (!d) |
156 | goto bad; | 156 | goto bad; |
157 | } | 157 | offset += (*d & tkey->offmask) >> tkey->shift; |
158 | } | 158 | } |
159 | 159 | ||
160 | if (offset % 4) { | 160 | if (offset % 4) { |
@@ -169,9 +169,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
169 | goto bad; | 169 | goto bad; |
170 | } | 170 | } |
171 | 171 | ||
172 | ptr = (u32 *)(pptr+offset); | 172 | ptr = skb_header_pointer(skb, off + offset, 4, &_data); |
173 | if (!ptr) | ||
174 | goto bad; | ||
173 | /* just do it, baby */ | 175 | /* just do it, baby */ |
174 | *ptr = ((*ptr & tkey->mask) ^ tkey->val); | 176 | *ptr = ((*ptr & tkey->mask) ^ tkey->val); |
177 | if (ptr == &_data) | ||
178 | skb_store_bits(skb, off + offset, ptr, 4); | ||
175 | munged++; | 179 | munged++; |
176 | } | 180 | } |
177 | 181 | ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 96275422c619..4f522143811e 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re | |||
98 | { | 98 | { |
99 | struct { | 99 | struct { |
100 | struct tc_u_knode *knode; | 100 | struct tc_u_knode *knode; |
101 | u8 *ptr; | 101 | unsigned int off; |
102 | } stack[TC_U32_MAXDEPTH]; | 102 | } stack[TC_U32_MAXDEPTH]; |
103 | 103 | ||
104 | struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; | 104 | struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; |
105 | u8 *ptr = skb_network_header(skb); | 105 | unsigned int off = skb_network_offset(skb); |
106 | struct tc_u_knode *n; | 106 | struct tc_u_knode *n; |
107 | int sdepth = 0; | 107 | int sdepth = 0; |
108 | int off2 = 0; | 108 | int off2 = 0; |
@@ -134,8 +134,14 @@ next_knode: | |||
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | for (i = n->sel.nkeys; i>0; i--, key++) { | 136 | for (i = n->sel.nkeys; i>0; i--, key++) { |
137 | 137 | unsigned int toff; | |
138 | if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { | 138 | __be32 *data, _data; |
139 | |||
140 | toff = off + key->off + (off2 & key->offmask); | ||
141 | data = skb_header_pointer(skb, toff, 4, &_data); | ||
142 | if (!data) | ||
143 | goto out; | ||
144 | if ((*data ^ key->val) & key->mask) { | ||
139 | n = n->next; | 145 | n = n->next; |
140 | goto next_knode; | 146 | goto next_knode; |
141 | } | 147 | } |
@@ -174,29 +180,45 @@ check_terminal: | |||
174 | if (sdepth >= TC_U32_MAXDEPTH) | 180 | if (sdepth >= TC_U32_MAXDEPTH) |
175 | goto deadloop; | 181 | goto deadloop; |
176 | stack[sdepth].knode = n; | 182 | stack[sdepth].knode = n; |
177 | stack[sdepth].ptr = ptr; | 183 | stack[sdepth].off = off; |
178 | sdepth++; | 184 | sdepth++; |
179 | 185 | ||
180 | ht = n->ht_down; | 186 | ht = n->ht_down; |
181 | sel = 0; | 187 | sel = 0; |
182 | if (ht->divisor) | 188 | if (ht->divisor) { |
183 | sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); | 189 | __be32 *data, _data; |
184 | 190 | ||
191 | data = skb_header_pointer(skb, off + n->sel.hoff, 4, | ||
192 | &_data); | ||
193 | if (!data) | ||
194 | goto out; | ||
195 | sel = ht->divisor & u32_hash_fold(*data, &n->sel, | ||
196 | n->fshift); | ||
197 | } | ||
185 | if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) | 198 | if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) |
186 | goto next_ht; | 199 | goto next_ht; |
187 | 200 | ||
188 | if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { | 201 | if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { |
189 | off2 = n->sel.off + 3; | 202 | off2 = n->sel.off + 3; |
190 | if (n->sel.flags&TC_U32_VAROFFSET) | 203 | if (n->sel.flags & TC_U32_VAROFFSET) { |
191 | off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; | 204 | __be16 *data, _data; |
205 | |||
206 | data = skb_header_pointer(skb, | ||
207 | off + n->sel.offoff, | ||
208 | 2, &_data); | ||
209 | if (!data) | ||
210 | goto out; | ||
211 | off2 += ntohs(n->sel.offmask & *data) >> | ||
212 | n->sel.offshift; | ||
213 | } | ||
192 | off2 &= ~3; | 214 | off2 &= ~3; |
193 | } | 215 | } |
194 | if (n->sel.flags&TC_U32_EAT) { | 216 | if (n->sel.flags&TC_U32_EAT) { |
195 | ptr += off2; | 217 | off += off2; |
196 | off2 = 0; | 218 | off2 = 0; |
197 | } | 219 | } |
198 | 220 | ||
199 | if (ptr < skb_tail_pointer(skb)) | 221 | if (off < skb->len) |
200 | goto next_ht; | 222 | goto next_ht; |
201 | } | 223 | } |
202 | 224 | ||
@@ -204,9 +226,10 @@ check_terminal: | |||
204 | if (sdepth--) { | 226 | if (sdepth--) { |
205 | n = stack[sdepth].knode; | 227 | n = stack[sdepth].knode; |
206 | ht = n->ht_up; | 228 | ht = n->ht_up; |
207 | ptr = stack[sdepth].ptr; | 229 | off = stack[sdepth].off; |
208 | goto check_terminal; | 230 | goto check_terminal; |
209 | } | 231 | } |
232 | out: | ||
210 | return -1; | 233 | return -1; |
211 | 234 | ||
212 | deadloop: | 235 | deadloop: |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 3415b6ce1c0a..807643bdcbac 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -449,6 +449,7 @@ static __init void teql_master_setup(struct net_device *dev) | |||
449 | dev->tx_queue_len = 100; | 449 | dev->tx_queue_len = 100; |
450 | dev->flags = IFF_NOARP; | 450 | dev->flags = IFF_NOARP; |
451 | dev->hard_header_len = LL_MAX_HEADER; | 451 | dev->hard_header_len = LL_MAX_HEADER; |
452 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | ||
452 | } | 453 | } |
453 | 454 | ||
454 | static LIST_HEAD(master_dev_list); | 455 | static LIST_HEAD(master_dev_list); |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 6a329158bdfa..a3cca0a94346 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -95,13 +95,13 @@ resume: | |||
95 | goto error_nolock; | 95 | goto error_nolock; |
96 | } | 96 | } |
97 | 97 | ||
98 | dst = dst_pop(dst); | 98 | dst = skb_dst_pop(skb); |
99 | if (!dst) { | 99 | if (!dst) { |
100 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); | 100 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); |
101 | err = -EHOSTUNREACH; | 101 | err = -EHOSTUNREACH; |
102 | goto error_nolock; | 102 | goto error_nolock; |
103 | } | 103 | } |
104 | skb_dst_set(skb, dst); | 104 | skb_dst_set_noref(skb, dst); |
105 | x = dst->xfrm; | 105 | x = dst->xfrm; |
106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); | 106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); |
107 | 107 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d965a2bad8d3..af1c173be4ad 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2153,6 +2153,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) | |||
2153 | return 0; | 2153 | return 0; |
2154 | } | 2154 | } |
2155 | 2155 | ||
2156 | skb_dst_force(skb); | ||
2156 | dst = skb_dst(skb); | 2157 | dst = skb_dst(skb); |
2157 | 2158 | ||
2158 | res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; | 2159 | res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; |
@@ -2299,7 +2300,8 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, | |||
2299 | return 0; | 2300 | return 0; |
2300 | if (xdst->xfrm_genid != dst->xfrm->genid) | 2301 | if (xdst->xfrm_genid != dst->xfrm->genid) |
2301 | return 0; | 2302 | return 0; |
2302 | if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) | 2303 | if (xdst->num_pols > 0 && |
2304 | xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) | ||
2303 | return 0; | 2305 | return 0; |
2304 | 2306 | ||
2305 | if (strict && fl && | 2307 | if (strict && fl && |