diff options
Diffstat (limited to 'net')
44 files changed, 477 insertions, 316 deletions
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 1a79a6c7e30e..cafb55b0cea5 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | #include <linux/debugfs.h> | 5 | #include <linux/debugfs.h> |
6 | #include <linux/seq_file.h> | ||
6 | 7 | ||
7 | #include <net/bluetooth/bluetooth.h> | 8 | #include <net/bluetooth/bluetooth.h> |
8 | #include <net/bluetooth/hci_core.h> | 9 | #include <net/bluetooth/hci_core.h> |
@@ -405,20 +406,11 @@ static struct device_type bt_host = { | |||
405 | .release = bt_host_release, | 406 | .release = bt_host_release, |
406 | }; | 407 | }; |
407 | 408 | ||
408 | static int inquiry_cache_open(struct inode *inode, struct file *file) | 409 | static int inquiry_cache_show(struct seq_file *f, void *p) |
409 | { | ||
410 | file->private_data = inode->i_private; | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf, | ||
415 | size_t count, loff_t *ppos) | ||
416 | { | 410 | { |
417 | struct hci_dev *hdev = file->private_data; | 411 | struct hci_dev *hdev = f->private; |
418 | struct inquiry_cache *cache = &hdev->inq_cache; | 412 | struct inquiry_cache *cache = &hdev->inq_cache; |
419 | struct inquiry_entry *e; | 413 | struct inquiry_entry *e; |
420 | char buf[4096]; | ||
421 | int n = 0; | ||
422 | 414 | ||
423 | hci_dev_lock_bh(hdev); | 415 | hci_dev_lock_bh(hdev); |
424 | 416 | ||
@@ -426,23 +418,30 @@ static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf, | |||
426 | struct inquiry_data *data = &e->data; | 418 | struct inquiry_data *data = &e->data; |
427 | bdaddr_t bdaddr; | 419 | bdaddr_t bdaddr; |
428 | baswap(&bdaddr, &data->bdaddr); | 420 | baswap(&bdaddr, &data->bdaddr); |
429 | n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", | 421 | seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", |
430 | batostr(&bdaddr), | 422 | batostr(&bdaddr), |
431 | data->pscan_rep_mode, data->pscan_period_mode, | 423 | data->pscan_rep_mode, data->pscan_period_mode, |
432 | data->pscan_mode, data->dev_class[2], | 424 | data->pscan_mode, data->dev_class[2], |
433 | data->dev_class[1], data->dev_class[0], | 425 | data->dev_class[1], data->dev_class[0], |
434 | __le16_to_cpu(data->clock_offset), | 426 | __le16_to_cpu(data->clock_offset), |
435 | data->rssi, data->ssp_mode, e->timestamp); | 427 | data->rssi, data->ssp_mode, e->timestamp); |
436 | } | 428 | } |
437 | 429 | ||
438 | hci_dev_unlock_bh(hdev); | 430 | hci_dev_unlock_bh(hdev); |
439 | 431 | ||
440 | return simple_read_from_buffer(userbuf, count, ppos, buf, n); | 432 | return 0; |
433 | } | ||
434 | |||
435 | static int inquiry_cache_open(struct inode *inode, struct file *file) | ||
436 | { | ||
437 | return single_open(file, inquiry_cache_show, inode->i_private); | ||
441 | } | 438 | } |
442 | 439 | ||
443 | static const struct file_operations inquiry_cache_fops = { | 440 | static const struct file_operations inquiry_cache_fops = { |
444 | .open = inquiry_cache_open, | 441 | .open = inquiry_cache_open, |
445 | .read = inquiry_cache_read, | 442 | .read = seq_read, |
443 | .llseek = seq_lseek, | ||
444 | .release = single_release, | ||
446 | }; | 445 | }; |
447 | 446 | ||
448 | int hci_register_sysfs(struct hci_dev *hdev) | 447 | int hci_register_sysfs(struct hci_dev *hdev) |
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index 19a6b9629c51..d115d5cea5b6 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig | |||
@@ -35,6 +35,7 @@ config BRIDGE | |||
35 | config BRIDGE_IGMP_SNOOPING | 35 | config BRIDGE_IGMP_SNOOPING |
36 | bool "IGMP snooping" | 36 | bool "IGMP snooping" |
37 | depends on BRIDGE | 37 | depends on BRIDGE |
38 | depends on INET | ||
38 | default y | 39 | default y |
39 | ---help--- | 40 | ---help--- |
40 | If you say Y here, then the Ethernet bridge will be able selectively | 41 | If you say Y here, then the Ethernet bridge will be able selectively |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 2559fb539836..fd96a8dc97f4 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -38,7 +38,7 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get( | |||
38 | struct net_bridge_mdb_entry *mp; | 38 | struct net_bridge_mdb_entry *mp; |
39 | struct hlist_node *p; | 39 | struct hlist_node *p; |
40 | 40 | ||
41 | hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { | 41 | hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { |
42 | if (dst == mp->addr) | 42 | if (dst == mp->addr) |
43 | return mp; | 43 | return mp; |
44 | } | 44 | } |
@@ -627,8 +627,8 @@ static void br_multicast_port_query_expired(unsigned long data) | |||
627 | struct net_bridge *br = port->br; | 627 | struct net_bridge *br = port->br; |
628 | 628 | ||
629 | spin_lock(&br->multicast_lock); | 629 | spin_lock(&br->multicast_lock); |
630 | if (port && (port->state == BR_STATE_DISABLED || | 630 | if (port->state == BR_STATE_DISABLED || |
631 | port->state == BR_STATE_BLOCKING)) | 631 | port->state == BR_STATE_BLOCKING) |
632 | goto out; | 632 | goto out; |
633 | 633 | ||
634 | if (port->multicast_startup_queries_sent < | 634 | if (port->multicast_startup_queries_sent < |
@@ -823,6 +823,7 @@ static int br_multicast_query(struct net_bridge *br, | |||
823 | unsigned long max_delay; | 823 | unsigned long max_delay; |
824 | unsigned long now = jiffies; | 824 | unsigned long now = jiffies; |
825 | __be32 group; | 825 | __be32 group; |
826 | int err = 0; | ||
826 | 827 | ||
827 | spin_lock(&br->multicast_lock); | 828 | spin_lock(&br->multicast_lock); |
828 | if (!netif_running(br->dev) || | 829 | if (!netif_running(br->dev) || |
@@ -841,12 +842,14 @@ static int br_multicast_query(struct net_bridge *br, | |||
841 | group = 0; | 842 | group = 0; |
842 | } | 843 | } |
843 | } else { | 844 | } else { |
844 | if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) | 845 | if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { |
845 | return -EINVAL; | 846 | err = -EINVAL; |
847 | goto out; | ||
848 | } | ||
846 | 849 | ||
847 | ih3 = igmpv3_query_hdr(skb); | 850 | ih3 = igmpv3_query_hdr(skb); |
848 | if (ih3->nsrcs) | 851 | if (ih3->nsrcs) |
849 | return 0; | 852 | goto out; |
850 | 853 | ||
851 | max_delay = ih3->code ? 1 : | 854 | max_delay = ih3->code ? 1 : |
852 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE); | 855 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE); |
@@ -876,7 +879,7 @@ static int br_multicast_query(struct net_bridge *br, | |||
876 | 879 | ||
877 | out: | 880 | out: |
878 | spin_unlock(&br->multicast_lock); | 881 | spin_unlock(&br->multicast_lock); |
879 | return 0; | 882 | return err; |
880 | } | 883 | } |
881 | 884 | ||
882 | static void br_multicast_leave_group(struct net_bridge *br, | 885 | static void br_multicast_leave_group(struct net_bridge *br, |
@@ -1135,7 +1138,7 @@ void br_multicast_stop(struct net_bridge *br) | |||
1135 | 1138 | ||
1136 | if (mdb->old) { | 1139 | if (mdb->old) { |
1137 | spin_unlock_bh(&br->multicast_lock); | 1140 | spin_unlock_bh(&br->multicast_lock); |
1138 | synchronize_rcu_bh(); | 1141 | rcu_barrier_bh(); |
1139 | spin_lock_bh(&br->multicast_lock); | 1142 | spin_lock_bh(&br->multicast_lock); |
1140 | WARN_ON(mdb->old); | 1143 | WARN_ON(mdb->old); |
1141 | } | 1144 | } |
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index fd91569e2394..3dc295beb483 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c | |||
@@ -97,8 +97,9 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | |||
97 | 97 | ||
98 | netif_addr_lock_bh(dev); | 98 | netif_addr_lock_bh(dev); |
99 | if (alen != dev->addr_len) | 99 | if (alen != dev->addr_len) |
100 | return -EINVAL; | 100 | err = -EINVAL; |
101 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); | 101 | else |
102 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); | ||
102 | if (!err) | 103 | if (!err) |
103 | __dev_set_rx_mode(dev); | 104 | __dev_set_rx_mode(dev); |
104 | netif_addr_unlock_bh(dev); | 105 | netif_addr_unlock_bh(dev); |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 0f2f82185ec4..f4cb6b6299d9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
20 | #include <linux/bitops.h> | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | 22 | ||
22 | /* | 23 | /* |
@@ -199,10 +200,7 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) | |||
199 | return dev->ethtool_ops->set_settings(dev, &cmd); | 200 | return dev->ethtool_ops->set_settings(dev, &cmd); |
200 | } | 201 | } |
201 | 202 | ||
202 | /* | 203 | static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) |
203 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
204 | */ | ||
205 | static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | ||
206 | { | 204 | { |
207 | struct ethtool_drvinfo info; | 205 | struct ethtool_drvinfo info; |
208 | const struct ethtool_ops *ops = dev->ethtool_ops; | 206 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -214,6 +212,10 @@ static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *use | |||
214 | info.cmd = ETHTOOL_GDRVINFO; | 212 | info.cmd = ETHTOOL_GDRVINFO; |
215 | ops->get_drvinfo(dev, &info); | 213 | ops->get_drvinfo(dev, &info); |
216 | 214 | ||
215 | /* | ||
216 | * this method of obtaining string set info is deprecated; | ||
217 | * Use ETHTOOL_GSSET_INFO instead. | ||
218 | */ | ||
217 | if (ops->get_sset_count) { | 219 | if (ops->get_sset_count) { |
218 | int rc; | 220 | int rc; |
219 | 221 | ||
@@ -237,10 +239,67 @@ static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *use | |||
237 | return 0; | 239 | return 0; |
238 | } | 240 | } |
239 | 241 | ||
240 | /* | 242 | static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, |
241 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | 243 | void __user *useraddr) |
242 | */ | 244 | { |
243 | static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | 245 | struct ethtool_sset_info info; |
246 | const struct ethtool_ops *ops = dev->ethtool_ops; | ||
247 | u64 sset_mask; | ||
248 | int i, idx = 0, n_bits = 0, ret, rc; | ||
249 | u32 *info_buf = NULL; | ||
250 | |||
251 | if (!ops->get_sset_count) | ||
252 | return -EOPNOTSUPP; | ||
253 | |||
254 | if (copy_from_user(&info, useraddr, sizeof(info))) | ||
255 | return -EFAULT; | ||
256 | |||
257 | /* store copy of mask, because we zero struct later on */ | ||
258 | sset_mask = info.sset_mask; | ||
259 | if (!sset_mask) | ||
260 | return 0; | ||
261 | |||
262 | /* calculate size of return buffer */ | ||
263 | n_bits = hweight64(sset_mask); | ||
264 | |||
265 | memset(&info, 0, sizeof(info)); | ||
266 | info.cmd = ETHTOOL_GSSET_INFO; | ||
267 | |||
268 | info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER); | ||
269 | if (!info_buf) | ||
270 | return -ENOMEM; | ||
271 | |||
272 | /* | ||
273 | * fill return buffer based on input bitmask and successful | ||
274 | * get_sset_count return | ||
275 | */ | ||
276 | for (i = 0; i < 64; i++) { | ||
277 | if (!(sset_mask & (1ULL << i))) | ||
278 | continue; | ||
279 | |||
280 | rc = ops->get_sset_count(dev, i); | ||
281 | if (rc >= 0) { | ||
282 | info.sset_mask |= (1ULL << i); | ||
283 | info_buf[idx++] = rc; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | ret = -EFAULT; | ||
288 | if (copy_to_user(useraddr, &info, sizeof(info))) | ||
289 | goto out; | ||
290 | |||
291 | useraddr += offsetof(struct ethtool_sset_info, data); | ||
292 | if (copy_to_user(useraddr, info_buf, idx * sizeof(u32))) | ||
293 | goto out; | ||
294 | |||
295 | ret = 0; | ||
296 | |||
297 | out: | ||
298 | kfree(info_buf); | ||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | ||
244 | { | 303 | { |
245 | struct ethtool_rxnfc cmd; | 304 | struct ethtool_rxnfc cmd; |
246 | 305 | ||
@@ -253,10 +312,7 @@ static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *usera | |||
253 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); | 312 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); |
254 | } | 313 | } |
255 | 314 | ||
256 | /* | 315 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) |
257 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
258 | */ | ||
259 | static noinline int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) | ||
260 | { | 316 | { |
261 | struct ethtool_rxnfc info; | 317 | struct ethtool_rxnfc info; |
262 | const struct ethtool_ops *ops = dev->ethtool_ops; | 318 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -328,10 +384,7 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, | |||
328 | list->count++; | 384 | list->count++; |
329 | } | 385 | } |
330 | 386 | ||
331 | /* | 387 | static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) |
332 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
333 | */ | ||
334 | static noinline int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) | ||
335 | { | 388 | { |
336 | struct ethtool_rx_ntuple cmd; | 389 | struct ethtool_rx_ntuple cmd; |
337 | const struct ethtool_ops *ops = dev->ethtool_ops; | 390 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -799,10 +852,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) | |||
799 | return ret; | 852 | return ret; |
800 | } | 853 | } |
801 | 854 | ||
802 | /* | 855 | static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) |
803 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
804 | */ | ||
805 | static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) | ||
806 | { | 856 | { |
807 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; | 857 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; |
808 | 858 | ||
@@ -816,10 +866,7 @@ static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *us | |||
816 | return 0; | 866 | return 0; |
817 | } | 867 | } |
818 | 868 | ||
819 | /* | 869 | static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) |
820 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
821 | */ | ||
822 | static noinline int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) | ||
823 | { | 870 | { |
824 | struct ethtool_coalesce coalesce; | 871 | struct ethtool_coalesce coalesce; |
825 | 872 | ||
@@ -1229,10 +1276,7 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, | |||
1229 | return actor(dev, edata.data); | 1276 | return actor(dev, edata.data); |
1230 | } | 1277 | } |
1231 | 1278 | ||
1232 | /* | 1279 | static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) |
1233 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
1234 | */ | ||
1235 | static noinline int ethtool_flash_device(struct net_device *dev, char __user *useraddr) | ||
1236 | { | 1280 | { |
1237 | struct ethtool_flash efl; | 1281 | struct ethtool_flash efl; |
1238 | 1282 | ||
@@ -1471,6 +1515,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1471 | case ETHTOOL_GRXNTUPLE: | 1515 | case ETHTOOL_GRXNTUPLE: |
1472 | rc = ethtool_get_rx_ntuple(dev, useraddr); | 1516 | rc = ethtool_get_rx_ntuple(dev, useraddr); |
1473 | break; | 1517 | break; |
1518 | case ETHTOOL_GSSET_INFO: | ||
1519 | rc = ethtool_get_sset_info(dev, useraddr); | ||
1520 | break; | ||
1474 | default: | 1521 | default: |
1475 | rc = -EOPNOTSUPP; | 1522 | rc = -EOPNOTSUPP; |
1476 | } | 1523 | } |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index d102f6d9abdc..6cee6434da67 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -771,6 +771,8 @@ static __inline__ int neigh_max_probes(struct neighbour *n) | |||
771 | } | 771 | } |
772 | 772 | ||
773 | static void neigh_invalidate(struct neighbour *neigh) | 773 | static void neigh_invalidate(struct neighbour *neigh) |
774 | __releases(neigh->lock) | ||
775 | __acquires(neigh->lock) | ||
774 | { | 776 | { |
775 | struct sk_buff *skb; | 777 | struct sk_buff *skb; |
776 | 778 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index fcd397a762ff..c5812bbc2cc9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) | |||
340 | rc = sk_backlog_rcv(sk, skb); | 340 | rc = sk_backlog_rcv(sk, skb); |
341 | 341 | ||
342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); | 342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); |
343 | } else | 343 | } else if (sk_add_backlog(sk, skb)) { |
344 | sk_add_backlog(sk, skb); | 344 | bh_unlock_sock(sk); |
345 | atomic_inc(&sk->sk_drops); | ||
346 | goto discard_and_relse; | ||
347 | } | ||
348 | |||
345 | bh_unlock_sock(sk); | 349 | bh_unlock_sock(sk); |
346 | out: | 350 | out: |
347 | sock_put(sk); | 351 | sock_put(sk); |
@@ -1139,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1139 | sock_lock_init(newsk); | 1143 | sock_lock_init(newsk); |
1140 | bh_lock_sock(newsk); | 1144 | bh_lock_sock(newsk); |
1141 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; | 1145 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; |
1146 | newsk->sk_backlog.len = 0; | ||
1142 | 1147 | ||
1143 | atomic_set(&newsk->sk_rmem_alloc, 0); | 1148 | atomic_set(&newsk->sk_rmem_alloc, 0); |
1144 | /* | 1149 | /* |
@@ -1542,6 +1547,12 @@ static void __release_sock(struct sock *sk) | |||
1542 | 1547 | ||
1543 | bh_lock_sock(sk); | 1548 | bh_lock_sock(sk); |
1544 | } while ((skb = sk->sk_backlog.head) != NULL); | 1549 | } while ((skb = sk->sk_backlog.head) != NULL); |
1550 | |||
1551 | /* | ||
1552 | * Doing the zeroing here guarantee we can not loop forever | ||
1553 | * while a wild producer attempts to flood us. | ||
1554 | */ | ||
1555 | sk->sk_backlog.len = 0; | ||
1545 | } | 1556 | } |
1546 | 1557 | ||
1547 | /** | 1558 | /** |
@@ -1874,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1874 | sk->sk_allocation = GFP_KERNEL; | 1885 | sk->sk_allocation = GFP_KERNEL; |
1875 | sk->sk_rcvbuf = sysctl_rmem_default; | 1886 | sk->sk_rcvbuf = sysctl_rmem_default; |
1876 | sk->sk_sndbuf = sysctl_wmem_default; | 1887 | sk->sk_sndbuf = sysctl_wmem_default; |
1888 | sk->sk_backlog.limit = sk->sk_rcvbuf << 1; | ||
1877 | sk->sk_state = TCP_CLOSE; | 1889 | sk->sk_state = TCP_CLOSE; |
1878 | sk_set_socket(sk, sock); | 1890 | sk_set_socket(sk, sock); |
1879 | 1891 | ||
@@ -2276,7 +2288,8 @@ out_free_request_sock_slab: | |||
2276 | prot->rsk_prot->slab = NULL; | 2288 | prot->rsk_prot->slab = NULL; |
2277 | } | 2289 | } |
2278 | out_free_request_sock_slab_name: | 2290 | out_free_request_sock_slab_name: |
2279 | kfree(prot->rsk_prot->slab_name); | 2291 | if (prot->rsk_prot) |
2292 | kfree(prot->rsk_prot->slab_name); | ||
2280 | out_free_sock_slab: | 2293 | out_free_sock_slab: |
2281 | kmem_cache_destroy(prot->slab); | 2294 | kmem_cache_destroy(prot->slab); |
2282 | prot->slab = NULL; | 2295 | prot->slab = NULL; |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index af226a063141..0d508c359fa9 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child, | |||
254 | * in main socket hash table and lock on listening | 254 | * in main socket hash table and lock on listening |
255 | * socket does not protect us more. | 255 | * socket does not protect us more. |
256 | */ | 256 | */ |
257 | sk_add_backlog(child, skb); | 257 | __sk_add_backlog(child, skb); |
258 | } | 258 | } |
259 | 259 | ||
260 | bh_unlock_sock(child); | 260 | bh_unlock_sock(child); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index c0c5274d0271..f47c9f76754b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1144,12 +1144,9 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, | |||
1144 | 1144 | ||
1145 | if (saddr) | 1145 | if (saddr) |
1146 | memcpy(&iph->saddr, saddr, 4); | 1146 | memcpy(&iph->saddr, saddr, 4); |
1147 | 1147 | if (daddr) | |
1148 | if (daddr) { | ||
1149 | memcpy(&iph->daddr, daddr, 4); | 1148 | memcpy(&iph->daddr, daddr, 4); |
1150 | return t->hlen; | 1149 | if (iph->daddr) |
1151 | } | ||
1152 | if (iph->daddr && !ipv4_is_multicast(iph->daddr)) | ||
1153 | return t->hlen; | 1150 | return t->hlen; |
1154 | 1151 | ||
1155 | return -t->hlen; | 1152 | return -t->hlen; |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 10a6a604bf32..678909281648 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -187,6 +187,16 @@ struct ic_device { | |||
187 | static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ | 187 | static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ |
188 | static struct net_device *ic_dev __initdata = NULL; /* Selected device */ | 188 | static struct net_device *ic_dev __initdata = NULL; /* Selected device */ |
189 | 189 | ||
190 | static bool __init ic_device_match(struct net_device *dev) | ||
191 | { | ||
192 | if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : | ||
193 | (!(dev->flags & IFF_LOOPBACK) && | ||
194 | (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) && | ||
195 | strncmp(dev->name, "dummy", 5))) | ||
196 | return true; | ||
197 | return false; | ||
198 | } | ||
199 | |||
190 | static int __init ic_open_devs(void) | 200 | static int __init ic_open_devs(void) |
191 | { | 201 | { |
192 | struct ic_device *d, **last; | 202 | struct ic_device *d, **last; |
@@ -207,10 +217,7 @@ static int __init ic_open_devs(void) | |||
207 | for_each_netdev(&init_net, dev) { | 217 | for_each_netdev(&init_net, dev) { |
208 | if (dev->flags & IFF_LOOPBACK) | 218 | if (dev->flags & IFF_LOOPBACK) |
209 | continue; | 219 | continue; |
210 | if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : | 220 | if (ic_device_match(dev)) { |
211 | (!(dev->flags & IFF_LOOPBACK) && | ||
212 | (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) && | ||
213 | strncmp(dev->name, "dummy", 5))) { | ||
214 | int able = 0; | 221 | int able = 0; |
215 | if (dev->mtu >= 364) | 222 | if (dev->mtu >= 364) |
216 | able |= IC_BOOTP; | 223 | able |= IC_BOOTP; |
@@ -228,7 +235,7 @@ static int __init ic_open_devs(void) | |||
228 | } | 235 | } |
229 | if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) { | 236 | if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) { |
230 | rtnl_unlock(); | 237 | rtnl_unlock(); |
231 | return -1; | 238 | return -ENOMEM; |
232 | } | 239 | } |
233 | d->dev = dev; | 240 | d->dev = dev; |
234 | *last = d; | 241 | *last = d; |
@@ -253,7 +260,7 @@ static int __init ic_open_devs(void) | |||
253 | printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name); | 260 | printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name); |
254 | else | 261 | else |
255 | printk(KERN_ERR "IP-Config: No network devices available.\n"); | 262 | printk(KERN_ERR "IP-Config: No network devices available.\n"); |
256 | return -1; | 263 | return -ENODEV; |
257 | } | 264 | } |
258 | return 0; | 265 | return 0; |
259 | } | 266 | } |
@@ -1303,6 +1310,32 @@ __be32 __init root_nfs_parse_addr(char *name) | |||
1303 | return addr; | 1310 | return addr; |
1304 | } | 1311 | } |
1305 | 1312 | ||
1313 | #define DEVICE_WAIT_MAX 12 /* 12 seconds */ | ||
1314 | |||
1315 | static int __init wait_for_devices(void) | ||
1316 | { | ||
1317 | int i; | ||
1318 | |||
1319 | msleep(CONF_PRE_OPEN); | ||
1320 | for (i = 0; i < DEVICE_WAIT_MAX; i++) { | ||
1321 | struct net_device *dev; | ||
1322 | int found = 0; | ||
1323 | |||
1324 | rtnl_lock(); | ||
1325 | for_each_netdev(&init_net, dev) { | ||
1326 | if (ic_device_match(dev)) { | ||
1327 | found = 1; | ||
1328 | break; | ||
1329 | } | ||
1330 | } | ||
1331 | rtnl_unlock(); | ||
1332 | if (found) | ||
1333 | return 0; | ||
1334 | ssleep(1); | ||
1335 | } | ||
1336 | return -ENODEV; | ||
1337 | } | ||
1338 | |||
1306 | /* | 1339 | /* |
1307 | * IP Autoconfig dispatcher. | 1340 | * IP Autoconfig dispatcher. |
1308 | */ | 1341 | */ |
@@ -1313,6 +1346,7 @@ static int __init ip_auto_config(void) | |||
1313 | #ifdef IPCONFIG_DYNAMIC | 1346 | #ifdef IPCONFIG_DYNAMIC |
1314 | int retries = CONF_OPEN_RETRIES; | 1347 | int retries = CONF_OPEN_RETRIES; |
1315 | #endif | 1348 | #endif |
1349 | int err; | ||
1316 | 1350 | ||
1317 | #ifdef CONFIG_PROC_FS | 1351 | #ifdef CONFIG_PROC_FS |
1318 | proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); | 1352 | proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); |
@@ -1325,12 +1359,15 @@ static int __init ip_auto_config(void) | |||
1325 | #ifdef IPCONFIG_DYNAMIC | 1359 | #ifdef IPCONFIG_DYNAMIC |
1326 | try_try_again: | 1360 | try_try_again: |
1327 | #endif | 1361 | #endif |
1328 | /* Give hardware a chance to settle */ | 1362 | /* Wait for devices to appear */ |
1329 | msleep(CONF_PRE_OPEN); | 1363 | err = wait_for_devices(); |
1364 | if (err) | ||
1365 | return err; | ||
1330 | 1366 | ||
1331 | /* Setup all network devices */ | 1367 | /* Setup all network devices */ |
1332 | if (ic_open_devs() < 0) | 1368 | err = ic_open_devs(); |
1333 | return -1; | 1369 | if (err) |
1370 | return err; | ||
1334 | 1371 | ||
1335 | /* Give drivers a chance to settle */ | 1372 | /* Give drivers a chance to settle */ |
1336 | ssleep(CONF_POST_OPEN); | 1373 | ssleep(CONF_POST_OPEN); |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 242ed2307370..4f1f337f4337 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -249,6 +249,8 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
249 | SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), | 249 | SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), |
250 | SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), | 250 | SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), |
251 | SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), | 251 | SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), |
252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), | ||
253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), | ||
252 | SNMP_MIB_SENTINEL | 254 | SNMP_MIB_SENTINEL |
253 | }; | 255 | }; |
254 | 256 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b2ba5581d2ae..d9b40248b97f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -146,7 +146,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); | |||
146 | static void ipv4_link_failure(struct sk_buff *skb); | 146 | static void ipv4_link_failure(struct sk_buff *skb); |
147 | static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); | 147 | static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); |
148 | static int rt_garbage_collect(struct dst_ops *ops); | 148 | static int rt_garbage_collect(struct dst_ops *ops); |
149 | static void rt_emergency_hash_rebuild(struct net *net); | ||
150 | 149 | ||
151 | 150 | ||
152 | static struct dst_ops ipv4_dst_ops = { | 151 | static struct dst_ops ipv4_dst_ops = { |
@@ -780,11 +779,30 @@ static void rt_do_flush(int process_context) | |||
780 | #define FRACT_BITS 3 | 779 | #define FRACT_BITS 3 |
781 | #define ONE (1UL << FRACT_BITS) | 780 | #define ONE (1UL << FRACT_BITS) |
782 | 781 | ||
782 | /* | ||
783 | * Given a hash chain and an item in this hash chain, | ||
784 | * find if a previous entry has the same hash_inputs | ||
785 | * (but differs on tos, mark or oif) | ||
786 | * Returns 0 if an alias is found. | ||
787 | * Returns ONE if rth has no alias before itself. | ||
788 | */ | ||
789 | static int has_noalias(const struct rtable *head, const struct rtable *rth) | ||
790 | { | ||
791 | const struct rtable *aux = head; | ||
792 | |||
793 | while (aux != rth) { | ||
794 | if (compare_hash_inputs(&aux->fl, &rth->fl)) | ||
795 | return 0; | ||
796 | aux = aux->u.dst.rt_next; | ||
797 | } | ||
798 | return ONE; | ||
799 | } | ||
800 | |||
783 | static void rt_check_expire(void) | 801 | static void rt_check_expire(void) |
784 | { | 802 | { |
785 | static unsigned int rover; | 803 | static unsigned int rover; |
786 | unsigned int i = rover, goal; | 804 | unsigned int i = rover, goal; |
787 | struct rtable *rth, *aux, **rthp; | 805 | struct rtable *rth, **rthp; |
788 | unsigned long samples = 0; | 806 | unsigned long samples = 0; |
789 | unsigned long sum = 0, sum2 = 0; | 807 | unsigned long sum = 0, sum2 = 0; |
790 | unsigned long delta; | 808 | unsigned long delta; |
@@ -835,15 +853,7 @@ nofree: | |||
835 | * attributes don't unfairly skew | 853 | * attributes don't unfairly skew |
836 | * the length computation | 854 | * the length computation |
837 | */ | 855 | */ |
838 | for (aux = rt_hash_table[i].chain;;) { | 856 | length += has_noalias(rt_hash_table[i].chain, rth); |
839 | if (aux == rth) { | ||
840 | length += ONE; | ||
841 | break; | ||
842 | } | ||
843 | if (compare_hash_inputs(&aux->fl, &rth->fl)) | ||
844 | break; | ||
845 | aux = aux->u.dst.rt_next; | ||
846 | } | ||
847 | continue; | 857 | continue; |
848 | } | 858 | } |
849 | } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) | 859 | } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) |
@@ -1073,6 +1083,21 @@ work_done: | |||
1073 | out: return 0; | 1083 | out: return 0; |
1074 | } | 1084 | } |
1075 | 1085 | ||
1086 | /* | ||
1087 | * Returns number of entries in a hash chain that have different hash_inputs | ||
1088 | */ | ||
1089 | static int slow_chain_length(const struct rtable *head) | ||
1090 | { | ||
1091 | int length = 0; | ||
1092 | const struct rtable *rth = head; | ||
1093 | |||
1094 | while (rth) { | ||
1095 | length += has_noalias(head, rth); | ||
1096 | rth = rth->u.dst.rt_next; | ||
1097 | } | ||
1098 | return length >> FRACT_BITS; | ||
1099 | } | ||
1100 | |||
1076 | static int rt_intern_hash(unsigned hash, struct rtable *rt, | 1101 | static int rt_intern_hash(unsigned hash, struct rtable *rt, |
1077 | struct rtable **rp, struct sk_buff *skb) | 1102 | struct rtable **rp, struct sk_buff *skb) |
1078 | { | 1103 | { |
@@ -1185,7 +1210,8 @@ restart: | |||
1185 | rt_free(cand); | 1210 | rt_free(cand); |
1186 | } | 1211 | } |
1187 | } else { | 1212 | } else { |
1188 | if (chain_length > rt_chain_length_max) { | 1213 | if (chain_length > rt_chain_length_max && |
1214 | slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { | ||
1189 | struct net *net = dev_net(rt->u.dst.dev); | 1215 | struct net *net = dev_net(rt->u.dst.dev); |
1190 | int num = ++net->ipv4.current_rt_cache_rebuild_count; | 1216 | int num = ++net->ipv4.current_rt_cache_rebuild_count; |
1191 | if (!rt_caching(dev_net(rt->u.dst.dev))) { | 1217 | if (!rt_caching(dev_net(rt->u.dst.dev))) { |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c3588b4fd979..70df40980a87 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1651,13 +1651,15 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1651 | if (!sk) | 1651 | if (!sk) |
1652 | goto no_tcp_socket; | 1652 | goto no_tcp_socket; |
1653 | 1653 | ||
1654 | if (iph->ttl < inet_sk(sk)->min_ttl) | ||
1655 | goto discard_and_relse; | ||
1656 | |||
1657 | process: | 1654 | process: |
1658 | if (sk->sk_state == TCP_TIME_WAIT) | 1655 | if (sk->sk_state == TCP_TIME_WAIT) |
1659 | goto do_time_wait; | 1656 | goto do_time_wait; |
1660 | 1657 | ||
1658 | if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { | ||
1659 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | ||
1660 | goto discard_and_relse; | ||
1661 | } | ||
1662 | |||
1661 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | 1663 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) |
1662 | goto discard_and_relse; | 1664 | goto discard_and_relse; |
1663 | nf_reset(skb); | 1665 | nf_reset(skb); |
@@ -1682,8 +1684,11 @@ process: | |||
1682 | if (!tcp_prequeue(sk, skb)) | 1684 | if (!tcp_prequeue(sk, skb)) |
1683 | ret = tcp_v4_do_rcv(sk, skb); | 1685 | ret = tcp_v4_do_rcv(sk, skb); |
1684 | } | 1686 | } |
1685 | } else | 1687 | } else if (unlikely(sk_add_backlog(sk, skb))) { |
1686 | sk_add_backlog(sk, skb); | 1688 | bh_unlock_sock(sk); |
1689 | NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); | ||
1690 | goto discard_and_relse; | ||
1691 | } | ||
1687 | bh_unlock_sock(sk); | 1692 | bh_unlock_sock(sk); |
1688 | 1693 | ||
1689 | sock_put(sk); | 1694 | sock_put(sk); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index f206ee5dda80..4199bc6915c5 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child, | |||
728 | * in main socket hash table and lock on listening | 728 | * in main socket hash table and lock on listening |
729 | * socket does not protect us more. | 729 | * socket does not protect us more. |
730 | */ | 730 | */ |
731 | sk_add_backlog(child, skb); | 731 | __sk_add_backlog(child, skb); |
732 | } | 732 | } |
733 | 733 | ||
734 | bh_unlock_sock(child); | 734 | bh_unlock_sock(child); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4a1605d3f909..f181b78f2385 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2395,13 +2395,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2395 | struct tcp_extend_values *xvp = tcp_xv(rvp); | 2395 | struct tcp_extend_values *xvp = tcp_xv(rvp); |
2396 | struct inet_request_sock *ireq = inet_rsk(req); | 2396 | struct inet_request_sock *ireq = inet_rsk(req); |
2397 | struct tcp_sock *tp = tcp_sk(sk); | 2397 | struct tcp_sock *tp = tcp_sk(sk); |
2398 | const struct tcp_cookie_values *cvp = tp->cookie_values; | ||
2398 | struct tcphdr *th; | 2399 | struct tcphdr *th; |
2399 | struct sk_buff *skb; | 2400 | struct sk_buff *skb; |
2400 | struct tcp_md5sig_key *md5; | 2401 | struct tcp_md5sig_key *md5; |
2401 | int tcp_header_size; | 2402 | int tcp_header_size; |
2402 | int mss; | 2403 | int mss; |
2404 | int s_data_desired = 0; | ||
2403 | 2405 | ||
2404 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); | 2406 | if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) |
2407 | s_data_desired = cvp->s_data_desired; | ||
2408 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); | ||
2405 | if (skb == NULL) | 2409 | if (skb == NULL) |
2406 | return NULL; | 2410 | return NULL; |
2407 | 2411 | ||
@@ -2457,16 +2461,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2457 | TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); | 2461 | TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); |
2458 | 2462 | ||
2459 | if (OPTION_COOKIE_EXTENSION & opts.options) { | 2463 | if (OPTION_COOKIE_EXTENSION & opts.options) { |
2460 | const struct tcp_cookie_values *cvp = tp->cookie_values; | 2464 | if (s_data_desired) { |
2461 | 2465 | u8 *buf = skb_put(skb, s_data_desired); | |
2462 | if (cvp != NULL && | ||
2463 | cvp->s_data_constant && | ||
2464 | cvp->s_data_desired > 0) { | ||
2465 | u8 *buf = skb_put(skb, cvp->s_data_desired); | ||
2466 | 2466 | ||
2467 | /* copy data directly from the listening socket. */ | 2467 | /* copy data directly from the listening socket. */ |
2468 | memcpy(buf, cvp->s_data_payload, cvp->s_data_desired); | 2468 | memcpy(buf, cvp->s_data_payload, s_data_desired); |
2469 | TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired; | 2469 | TCP_SKB_CB(skb)->end_seq += s_data_desired; |
2470 | } | 2470 | } |
2471 | 2471 | ||
2472 | if (opts.hash_size > 0) { | 2472 | if (opts.hash_size > 0) { |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 608a5446d05b..7af756d0f931 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1371,8 +1371,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
1371 | bh_lock_sock(sk); | 1371 | bh_lock_sock(sk); |
1372 | if (!sock_owned_by_user(sk)) | 1372 | if (!sock_owned_by_user(sk)) |
1373 | rc = __udp_queue_rcv_skb(sk, skb); | 1373 | rc = __udp_queue_rcv_skb(sk, skb); |
1374 | else | 1374 | else if (sk_add_backlog(sk, skb)) { |
1375 | sk_add_backlog(sk, skb); | 1375 | bh_unlock_sock(sk); |
1376 | goto drop; | ||
1377 | } | ||
1376 | bh_unlock_sock(sk); | 1378 | bh_unlock_sock(sk); |
1377 | 1379 | ||
1378 | return rc; | 1380 | return rc; |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 67107d63c1cd..e4a1483fba77 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -91,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst, | |||
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
93 | 93 | ||
94 | static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) | 94 | static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, |
95 | struct flowi *fl) | ||
95 | { | 96 | { |
96 | struct rtable *rt = (struct rtable *)xdst->route; | 97 | struct rtable *rt = (struct rtable *)xdst->route; |
97 | 98 | ||
98 | xdst->u.rt.fl = rt->fl; | 99 | xdst->u.rt.fl = *fl; |
99 | 100 | ||
100 | xdst->u.dst.dev = dev; | 101 | xdst->u.dst.dev = dev; |
101 | dev_hold(dev); | 102 | dev_hold(dev); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 88fd8c5877ee..3381b4317c27 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1380,6 +1380,8 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) | |||
1380 | if (dad_failed) | 1380 | if (dad_failed) |
1381 | ifp->flags |= IFA_F_DADFAILED; | 1381 | ifp->flags |= IFA_F_DADFAILED; |
1382 | spin_unlock_bh(&ifp->lock); | 1382 | spin_unlock_bh(&ifp->lock); |
1383 | if (dad_failed) | ||
1384 | ipv6_ifa_notify(0, ifp); | ||
1383 | in6_ifa_put(ifp); | 1385 | in6_ifa_put(ifp); |
1384 | #ifdef CONFIG_IPV6_PRIVACY | 1386 | #ifdef CONFIG_IPV6_PRIVACY |
1385 | } else if (ifp->flags&IFA_F_TEMPORARY) { | 1387 | } else if (ifp->flags&IFA_F_TEMPORARY) { |
@@ -2615,7 +2617,7 @@ static void addrconf_bonding_change(struct net_device *dev, unsigned long event) | |||
2615 | static int addrconf_ifdown(struct net_device *dev, int how) | 2617 | static int addrconf_ifdown(struct net_device *dev, int how) |
2616 | { | 2618 | { |
2617 | struct inet6_dev *idev; | 2619 | struct inet6_dev *idev; |
2618 | struct inet6_ifaddr *ifa, **bifa; | 2620 | struct inet6_ifaddr *ifa, *keep_list, **bifa; |
2619 | struct net *net = dev_net(dev); | 2621 | struct net *net = dev_net(dev); |
2620 | int i; | 2622 | int i; |
2621 | 2623 | ||
@@ -2649,11 +2651,11 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2649 | write_lock_bh(&addrconf_hash_lock); | 2651 | write_lock_bh(&addrconf_hash_lock); |
2650 | while ((ifa = *bifa) != NULL) { | 2652 | while ((ifa = *bifa) != NULL) { |
2651 | if (ifa->idev == idev && | 2653 | if (ifa->idev == idev && |
2652 | (how || !(ifa->flags&IFA_F_PERMANENT))) { | 2654 | (how || !(ifa->flags&IFA_F_PERMANENT) || |
2655 | ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { | ||
2653 | *bifa = ifa->lst_next; | 2656 | *bifa = ifa->lst_next; |
2654 | ifa->lst_next = NULL; | 2657 | ifa->lst_next = NULL; |
2655 | addrconf_del_timer(ifa); | 2658 | __in6_ifa_put(ifa); |
2656 | in6_ifa_put(ifa); | ||
2657 | continue; | 2659 | continue; |
2658 | } | 2660 | } |
2659 | bifa = &ifa->lst_next; | 2661 | bifa = &ifa->lst_next; |
@@ -2689,31 +2691,51 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2689 | write_lock_bh(&idev->lock); | 2691 | write_lock_bh(&idev->lock); |
2690 | } | 2692 | } |
2691 | #endif | 2693 | #endif |
2692 | bifa = &idev->addr_list; | 2694 | keep_list = NULL; |
2693 | while ((ifa = *bifa) != NULL) { | 2695 | bifa = &keep_list; |
2694 | if (how == 0 && (ifa->flags&IFA_F_PERMANENT)) { | 2696 | while ((ifa = idev->addr_list) != NULL) { |
2695 | /* Retain permanent address on admin down */ | 2697 | idev->addr_list = ifa->if_next; |
2698 | ifa->if_next = NULL; | ||
2699 | |||
2700 | addrconf_del_timer(ifa); | ||
2701 | |||
2702 | /* If just doing link down, and address is permanent | ||
2703 | and not link-local, then retain it. */ | ||
2704 | if (how == 0 && | ||
2705 | (ifa->flags&IFA_F_PERMANENT) && | ||
2706 | !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { | ||
2707 | |||
2708 | /* Move to holding list */ | ||
2709 | *bifa = ifa; | ||
2696 | bifa = &ifa->if_next; | 2710 | bifa = &ifa->if_next; |
2697 | 2711 | ||
2698 | /* Restart DAD if needed when link comes back up */ | 2712 | /* If not doing DAD on this address, just keep it. */ |
2699 | if ( !((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || | 2713 | if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || |
2700 | idev->cnf.accept_dad <= 0 || | 2714 | idev->cnf.accept_dad <= 0 || |
2701 | (ifa->flags & IFA_F_NODAD))) | 2715 | (ifa->flags & IFA_F_NODAD)) |
2702 | ifa->flags |= IFA_F_TENTATIVE; | 2716 | continue; |
2703 | } else { | ||
2704 | *bifa = ifa->if_next; | ||
2705 | ifa->if_next = NULL; | ||
2706 | 2717 | ||
2718 | /* If it was tentative already, no need to notify */ | ||
2719 | if (ifa->flags & IFA_F_TENTATIVE) | ||
2720 | continue; | ||
2721 | |||
2722 | /* Flag it for later restoration when link comes up */ | ||
2723 | ifa->flags |= IFA_F_TENTATIVE; | ||
2724 | in6_ifa_hold(ifa); | ||
2725 | } else { | ||
2707 | ifa->dead = 1; | 2726 | ifa->dead = 1; |
2708 | write_unlock_bh(&idev->lock); | 2727 | } |
2728 | write_unlock_bh(&idev->lock); | ||
2709 | 2729 | ||
2710 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 2730 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
2711 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); | 2731 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); |
2712 | in6_ifa_put(ifa); | 2732 | in6_ifa_put(ifa); |
2713 | 2733 | ||
2714 | write_lock_bh(&idev->lock); | 2734 | write_lock_bh(&idev->lock); |
2715 | } | ||
2716 | } | 2735 | } |
2736 | |||
2737 | idev->addr_list = keep_list; | ||
2738 | |||
2717 | write_unlock_bh(&idev->lock); | 2739 | write_unlock_bh(&idev->lock); |
2718 | 2740 | ||
2719 | /* Step 5: Discard multicast list */ | 2741 | /* Step 5: Discard multicast list */ |
@@ -2739,28 +2761,29 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2739 | static void addrconf_rs_timer(unsigned long data) | 2761 | static void addrconf_rs_timer(unsigned long data) |
2740 | { | 2762 | { |
2741 | struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; | 2763 | struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; |
2764 | struct inet6_dev *idev = ifp->idev; | ||
2742 | 2765 | ||
2743 | if (ifp->idev->cnf.forwarding) | 2766 | read_lock(&idev->lock); |
2767 | if (idev->dead || !(idev->if_flags & IF_READY)) | ||
2744 | goto out; | 2768 | goto out; |
2745 | 2769 | ||
2746 | if (ifp->idev->if_flags & IF_RA_RCVD) { | 2770 | if (idev->cnf.forwarding) |
2747 | /* | 2771 | goto out; |
2748 | * Announcement received after solicitation | 2772 | |
2749 | * was sent | 2773 | /* Announcement received after solicitation was sent */ |
2750 | */ | 2774 | if (idev->if_flags & IF_RA_RCVD) |
2751 | goto out; | 2775 | goto out; |
2752 | } | ||
2753 | 2776 | ||
2754 | spin_lock(&ifp->lock); | 2777 | spin_lock(&ifp->lock); |
2755 | if (ifp->probes++ < ifp->idev->cnf.rtr_solicits) { | 2778 | if (ifp->probes++ < idev->cnf.rtr_solicits) { |
2756 | /* The wait after the last probe can be shorter */ | 2779 | /* The wait after the last probe can be shorter */ |
2757 | addrconf_mod_timer(ifp, AC_RS, | 2780 | addrconf_mod_timer(ifp, AC_RS, |
2758 | (ifp->probes == ifp->idev->cnf.rtr_solicits) ? | 2781 | (ifp->probes == idev->cnf.rtr_solicits) ? |
2759 | ifp->idev->cnf.rtr_solicit_delay : | 2782 | idev->cnf.rtr_solicit_delay : |
2760 | ifp->idev->cnf.rtr_solicit_interval); | 2783 | idev->cnf.rtr_solicit_interval); |
2761 | spin_unlock(&ifp->lock); | 2784 | spin_unlock(&ifp->lock); |
2762 | 2785 | ||
2763 | ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); | 2786 | ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); |
2764 | } else { | 2787 | } else { |
2765 | spin_unlock(&ifp->lock); | 2788 | spin_unlock(&ifp->lock); |
2766 | /* | 2789 | /* |
@@ -2768,10 +2791,11 @@ static void addrconf_rs_timer(unsigned long data) | |||
2768 | * assumption any longer. | 2791 | * assumption any longer. |
2769 | */ | 2792 | */ |
2770 | printk(KERN_DEBUG "%s: no IPv6 routers present\n", | 2793 | printk(KERN_DEBUG "%s: no IPv6 routers present\n", |
2771 | ifp->idev->dev->name); | 2794 | idev->dev->name); |
2772 | } | 2795 | } |
2773 | 2796 | ||
2774 | out: | 2797 | out: |
2798 | read_unlock(&idev->lock); | ||
2775 | in6_ifa_put(ifp); | 2799 | in6_ifa_put(ifp); |
2776 | } | 2800 | } |
2777 | 2801 | ||
@@ -2850,9 +2874,9 @@ static void addrconf_dad_timer(unsigned long data) | |||
2850 | struct inet6_dev *idev = ifp->idev; | 2874 | struct inet6_dev *idev = ifp->idev; |
2851 | struct in6_addr mcaddr; | 2875 | struct in6_addr mcaddr; |
2852 | 2876 | ||
2853 | read_lock_bh(&idev->lock); | 2877 | read_lock(&idev->lock); |
2854 | if (idev->dead) { | 2878 | if (idev->dead || !(idev->if_flags & IF_READY)) { |
2855 | read_unlock_bh(&idev->lock); | 2879 | read_unlock(&idev->lock); |
2856 | goto out; | 2880 | goto out; |
2857 | } | 2881 | } |
2858 | 2882 | ||
@@ -2864,7 +2888,7 @@ static void addrconf_dad_timer(unsigned long data) | |||
2864 | 2888 | ||
2865 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); | 2889 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); |
2866 | spin_unlock(&ifp->lock); | 2890 | spin_unlock(&ifp->lock); |
2867 | read_unlock_bh(&idev->lock); | 2891 | read_unlock(&idev->lock); |
2868 | 2892 | ||
2869 | addrconf_dad_completed(ifp); | 2893 | addrconf_dad_completed(ifp); |
2870 | 2894 | ||
@@ -2874,7 +2898,7 @@ static void addrconf_dad_timer(unsigned long data) | |||
2874 | ifp->probes--; | 2898 | ifp->probes--; |
2875 | addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); | 2899 | addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); |
2876 | spin_unlock(&ifp->lock); | 2900 | spin_unlock(&ifp->lock); |
2877 | read_unlock_bh(&idev->lock); | 2901 | read_unlock(&idev->lock); |
2878 | 2902 | ||
2879 | /* send a neighbour solicitation for our addr */ | 2903 | /* send a neighbour solicitation for our addr */ |
2880 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); | 2904 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 551882b9dfd6..5e463c43fcc2 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -84,18 +84,11 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
84 | if ((rule->flags & FIB_RULE_FIND_SADDR) && | 84 | if ((rule->flags & FIB_RULE_FIND_SADDR) && |
85 | r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { | 85 | r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { |
86 | struct in6_addr saddr; | 86 | struct in6_addr saddr; |
87 | unsigned int srcprefs = 0; | ||
88 | |||
89 | if (flags & RT6_LOOKUP_F_SRCPREF_TMP) | ||
90 | srcprefs |= IPV6_PREFER_SRC_TMP; | ||
91 | if (flags & RT6_LOOKUP_F_SRCPREF_PUBLIC) | ||
92 | srcprefs |= IPV6_PREFER_SRC_PUBLIC; | ||
93 | if (flags & RT6_LOOKUP_F_SRCPREF_COA) | ||
94 | srcprefs |= IPV6_PREFER_SRC_COA; | ||
95 | 87 | ||
96 | if (ipv6_dev_get_saddr(net, | 88 | if (ipv6_dev_get_saddr(net, |
97 | ip6_dst_idev(&rt->u.dst)->dev, | 89 | ip6_dst_idev(&rt->u.dst)->dev, |
98 | &flp->fl6_dst, srcprefs, | 90 | &flp->fl6_dst, |
91 | rt6_flags2srcprefs(flags), | ||
99 | &saddr)) | 92 | &saddr)) |
100 | goto again; | 93 | goto again; |
101 | if (!ipv6_prefix_equal(&saddr, &r->src.addr, | 94 | if (!ipv6_prefix_equal(&saddr, &r->src.addr, |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b08879e97f22..52cd3eff31dc 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -819,15 +819,8 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, | |||
819 | 819 | ||
820 | if (!ipv6_addr_any(&fl->fl6_src)) | 820 | if (!ipv6_addr_any(&fl->fl6_src)) |
821 | flags |= RT6_LOOKUP_F_HAS_SADDR; | 821 | flags |= RT6_LOOKUP_F_HAS_SADDR; |
822 | else if (sk) { | 822 | else if (sk) |
823 | unsigned int prefs = inet6_sk(sk)->srcprefs; | 823 | flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs); |
824 | if (prefs & IPV6_PREFER_SRC_TMP) | ||
825 | flags |= RT6_LOOKUP_F_SRCPREF_TMP; | ||
826 | if (prefs & IPV6_PREFER_SRC_PUBLIC) | ||
827 | flags |= RT6_LOOKUP_F_SRCPREF_PUBLIC; | ||
828 | if (prefs & IPV6_PREFER_SRC_COA) | ||
829 | flags |= RT6_LOOKUP_F_SRCPREF_COA; | ||
830 | } | ||
831 | 824 | ||
832 | return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output); | 825 | return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output); |
833 | } | 826 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 6963a6b6763e..9b6dbba80d31 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1740,8 +1740,11 @@ process: | |||
1740 | if (!tcp_prequeue(sk, skb)) | 1740 | if (!tcp_prequeue(sk, skb)) |
1741 | ret = tcp_v6_do_rcv(sk, skb); | 1741 | ret = tcp_v6_do_rcv(sk, skb); |
1742 | } | 1742 | } |
1743 | } else | 1743 | } else if (unlikely(sk_add_backlog(sk, skb))) { |
1744 | sk_add_backlog(sk, skb); | 1744 | bh_unlock_sock(sk); |
1745 | NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); | ||
1746 | goto discard_and_relse; | ||
1747 | } | ||
1745 | bh_unlock_sock(sk); | 1748 | bh_unlock_sock(sk); |
1746 | 1749 | ||
1747 | sock_put(sk); | 1750 | sock_put(sk); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 52b8347ae3b2..3c0c9c755c92 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -583,16 +583,20 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
583 | bh_lock_sock(sk); | 583 | bh_lock_sock(sk); |
584 | if (!sock_owned_by_user(sk)) | 584 | if (!sock_owned_by_user(sk)) |
585 | udpv6_queue_rcv_skb(sk, skb1); | 585 | udpv6_queue_rcv_skb(sk, skb1); |
586 | else | 586 | else if (sk_add_backlog(sk, skb1)) { |
587 | sk_add_backlog(sk, skb1); | 587 | kfree_skb(skb1); |
588 | bh_unlock_sock(sk); | ||
589 | goto drop; | ||
590 | } | ||
588 | bh_unlock_sock(sk); | 591 | bh_unlock_sock(sk); |
589 | } else { | 592 | continue; |
590 | atomic_inc(&sk->sk_drops); | ||
591 | UDP6_INC_STATS_BH(sock_net(sk), | ||
592 | UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); | ||
593 | UDP6_INC_STATS_BH(sock_net(sk), | ||
594 | UDP_MIB_INERRORS, IS_UDPLITE(sk)); | ||
595 | } | 593 | } |
594 | drop: | ||
595 | atomic_inc(&sk->sk_drops); | ||
596 | UDP6_INC_STATS_BH(sock_net(sk), | ||
597 | UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); | ||
598 | UDP6_INC_STATS_BH(sock_net(sk), | ||
599 | UDP_MIB_INERRORS, IS_UDPLITE(sk)); | ||
596 | } | 600 | } |
597 | } | 601 | } |
598 | /* | 602 | /* |
@@ -754,8 +758,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
754 | bh_lock_sock(sk); | 758 | bh_lock_sock(sk); |
755 | if (!sock_owned_by_user(sk)) | 759 | if (!sock_owned_by_user(sk)) |
756 | udpv6_queue_rcv_skb(sk, skb); | 760 | udpv6_queue_rcv_skb(sk, skb); |
757 | else | 761 | else if (sk_add_backlog(sk, skb)) { |
758 | sk_add_backlog(sk, skb); | 762 | atomic_inc(&sk->sk_drops); |
763 | bh_unlock_sock(sk); | ||
764 | sock_put(sk); | ||
765 | goto discard; | ||
766 | } | ||
759 | bh_unlock_sock(sk); | 767 | bh_unlock_sock(sk); |
760 | sock_put(sk); | 768 | sock_put(sk); |
761 | return 0; | 769 | return 0; |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index dbdc696f5fc5..ae181651c75a 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -116,7 +116,8 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, | |||
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) | 119 | static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, |
120 | struct flowi *fl) | ||
120 | { | 121 | { |
121 | struct rt6_info *rt = (struct rt6_info*)xdst->route; | 122 | struct rt6_info *rt = (struct rt6_info*)xdst->route; |
122 | 123 | ||
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 019c780512e8..86d6985b9d49 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c | |||
@@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb) | |||
1437 | llc_conn_state_process(sk, skb); | 1437 | llc_conn_state_process(sk, skb); |
1438 | else { | 1438 | else { |
1439 | llc_set_backlog_type(skb, LLC_EVENT); | 1439 | llc_set_backlog_type(skb, LLC_EVENT); |
1440 | sk_add_backlog(sk, skb); | 1440 | __sk_add_backlog(sk, skb); |
1441 | } | 1441 | } |
1442 | } | 1442 | } |
1443 | } | 1443 | } |
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index a8dde9b010da..a12144da7974 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c | |||
@@ -827,7 +827,8 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) | |||
827 | else { | 827 | else { |
828 | dprintk("%s: adding to backlog...\n", __func__); | 828 | dprintk("%s: adding to backlog...\n", __func__); |
829 | llc_set_backlog_type(skb, LLC_PACKET); | 829 | llc_set_backlog_type(skb, LLC_PACKET); |
830 | sk_add_backlog(sk, skb); | 830 | if (sk_add_backlog(sk, skb)) |
831 | goto drop_unlock; | ||
831 | } | 832 | } |
832 | out: | 833 | out: |
833 | bh_unlock_sock(sk); | 834 | bh_unlock_sock(sk); |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 9affe2cd185f..b4ddb2f83914 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -48,20 +48,24 @@ static ssize_t ieee80211_if_write( | |||
48 | ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int)) | 48 | ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int)) |
49 | { | 49 | { |
50 | u8 *buf; | 50 | u8 *buf; |
51 | ssize_t ret = -ENODEV; | 51 | ssize_t ret; |
52 | 52 | ||
53 | buf = kzalloc(count, GFP_KERNEL); | 53 | buf = kmalloc(count, GFP_KERNEL); |
54 | if (!buf) | 54 | if (!buf) |
55 | return -ENOMEM; | 55 | return -ENOMEM; |
56 | 56 | ||
57 | ret = -EFAULT; | ||
57 | if (copy_from_user(buf, userbuf, count)) | 58 | if (copy_from_user(buf, userbuf, count)) |
58 | return -EFAULT; | 59 | goto freebuf; |
59 | 60 | ||
61 | ret = -ENODEV; | ||
60 | rtnl_lock(); | 62 | rtnl_lock(); |
61 | if (sdata->dev->reg_state == NETREG_REGISTERED) | 63 | if (sdata->dev->reg_state == NETREG_REGISTERED) |
62 | ret = (*write)(sdata, buf, count); | 64 | ret = (*write)(sdata, buf, count); |
63 | rtnl_unlock(); | 65 | rtnl_unlock(); |
64 | 66 | ||
67 | freebuf: | ||
68 | kfree(buf); | ||
65 | return ret; | 69 | return ret; |
66 | } | 70 | } |
67 | 71 | ||
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 41812a15eea0..be5f723d643a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -177,7 +177,8 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, | |||
177 | sta = sta_info_get(sdata, bssid); | 177 | sta = sta_info_get(sdata, bssid); |
178 | if (sta) | 178 | if (sta) |
179 | rate_control_rate_update(local, sband, sta, | 179 | rate_control_rate_update(local, sband, sta, |
180 | IEEE80211_RC_HT_CHANGED); | 180 | IEEE80211_RC_HT_CHANGED, |
181 | local->oper_channel_type); | ||
181 | rcu_read_unlock(); | 182 | rcu_read_unlock(); |
182 | } | 183 | } |
183 | 184 | ||
@@ -435,10 +436,12 @@ static void ieee80211_enable_ps(struct ieee80211_local *local, | |||
435 | if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) | 436 | if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) |
436 | ieee80211_send_nullfunc(local, sdata, 1); | 437 | ieee80211_send_nullfunc(local, sdata, 1); |
437 | 438 | ||
438 | if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { | 439 | if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && |
439 | conf->flags |= IEEE80211_CONF_PS; | 440 | (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) |
440 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | 441 | return; |
441 | } | 442 | |
443 | conf->flags |= IEEE80211_CONF_PS; | ||
444 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | ||
442 | } | 445 | } |
443 | } | 446 | } |
444 | 447 | ||
@@ -557,7 +560,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work) | |||
557 | (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) | 560 | (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) |
558 | ieee80211_send_nullfunc(local, sdata, 1); | 561 | ieee80211_send_nullfunc(local, sdata, 1); |
559 | 562 | ||
560 | if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) || | 563 | if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) && |
564 | (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) || | ||
561 | (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { | 565 | (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { |
562 | ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; | 566 | ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; |
563 | local->hw.conf.flags |= IEEE80211_CONF_PS; | 567 | local->hw.conf.flags |= IEEE80211_CONF_PS; |
@@ -1893,8 +1897,20 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
1893 | 1897 | ||
1894 | mutex_lock(&ifmgd->mtx); | 1898 | mutex_lock(&ifmgd->mtx); |
1895 | if (ifmgd->associated) { | 1899 | if (ifmgd->associated) { |
1896 | mutex_unlock(&ifmgd->mtx); | 1900 | if (!req->prev_bssid || |
1897 | return -EALREADY; | 1901 | memcmp(req->prev_bssid, ifmgd->associated->bssid, |
1902 | ETH_ALEN)) { | ||
1903 | /* | ||
1904 | * We are already associated and the request was not a | ||
1905 | * reassociation request from the current BSS, so | ||
1906 | * reject it. | ||
1907 | */ | ||
1908 | mutex_unlock(&ifmgd->mtx); | ||
1909 | return -EALREADY; | ||
1910 | } | ||
1911 | |||
1912 | /* Trying to reassociate - clear previous association state */ | ||
1913 | ieee80211_set_disassoc(sdata); | ||
1898 | } | 1914 | } |
1899 | mutex_unlock(&ifmgd->mtx); | 1915 | mutex_unlock(&ifmgd->mtx); |
1900 | 1916 | ||
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index b6108bca96d4..065a96190e32 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h | |||
@@ -66,7 +66,8 @@ static inline void rate_control_rate_init(struct sta_info *sta) | |||
66 | 66 | ||
67 | static inline void rate_control_rate_update(struct ieee80211_local *local, | 67 | static inline void rate_control_rate_update(struct ieee80211_local *local, |
68 | struct ieee80211_supported_band *sband, | 68 | struct ieee80211_supported_band *sband, |
69 | struct sta_info *sta, u32 changed) | 69 | struct sta_info *sta, u32 changed, |
70 | enum nl80211_channel_type oper_chan_type) | ||
70 | { | 71 | { |
71 | struct rate_control_ref *ref = local->rate_ctrl; | 72 | struct rate_control_ref *ref = local->rate_ctrl; |
72 | struct ieee80211_sta *ista = &sta->sta; | 73 | struct ieee80211_sta *ista = &sta->sta; |
@@ -74,7 +75,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local, | |||
74 | 75 | ||
75 | if (ref && ref->ops->rate_update) | 76 | if (ref && ref->ops->rate_update) |
76 | ref->ops->rate_update(ref->priv, sband, ista, | 77 | ref->ops->rate_update(ref->priv, sband, ista, |
77 | priv_sta, changed); | 78 | priv_sta, changed, oper_chan_type); |
78 | } | 79 | } |
79 | 80 | ||
80 | static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, | 81 | static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 211c475f73c6..56422d894351 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -434,6 +434,7 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) | |||
434 | /* check if STA exists already */ | 434 | /* check if STA exists already */ |
435 | if (sta_info_get_bss(sdata, sta->sta.addr)) { | 435 | if (sta_info_get_bss(sdata, sta->sta.addr)) { |
436 | spin_unlock_irqrestore(&local->sta_lock, flags); | 436 | spin_unlock_irqrestore(&local->sta_lock, flags); |
437 | mutex_unlock(&local->sta_mtx); | ||
437 | rcu_read_lock(); | 438 | rcu_read_lock(); |
438 | err = -EEXIST; | 439 | err = -EEXIST; |
439 | goto out_free; | 440 | goto out_free; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 031a5e6fb4aa..1612d417d10c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1688,6 +1688,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1688 | { | 1688 | { |
1689 | switch (i->type) { | 1689 | switch (i->type) { |
1690 | case PACKET_MR_MULTICAST: | 1690 | case PACKET_MR_MULTICAST: |
1691 | if (i->alen != dev->addr_len) | ||
1692 | return -EINVAL; | ||
1691 | if (what > 0) | 1693 | if (what > 0) |
1692 | return dev_mc_add(dev, i->addr, i->alen, 0); | 1694 | return dev_mc_add(dev, i->addr, i->alen, 0); |
1693 | else | 1695 | else |
@@ -1700,6 +1702,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1700 | return dev_set_allmulti(dev, what); | 1702 | return dev_set_allmulti(dev, what); |
1701 | break; | 1703 | break; |
1702 | case PACKET_MR_UNICAST: | 1704 | case PACKET_MR_UNICAST: |
1705 | if (i->alen != dev->addr_len) | ||
1706 | return -EINVAL; | ||
1703 | if (what > 0) | 1707 | if (what > 0) |
1704 | return dev_unicast_add(dev, i->addr); | 1708 | return dev_unicast_add(dev, i->addr); |
1705 | else | 1709 | else |
@@ -1734,7 +1738,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) | |||
1734 | goto done; | 1738 | goto done; |
1735 | 1739 | ||
1736 | err = -EINVAL; | 1740 | err = -EINVAL; |
1737 | if (mreq->mr_alen != dev->addr_len) | 1741 | if (mreq->mr_alen > dev->addr_len) |
1738 | goto done; | 1742 | goto done; |
1739 | 1743 | ||
1740 | err = -ENOBUFS; | 1744 | err = -ENOBUFS; |
diff --git a/net/rfkill/input.c b/net/rfkill/input.c index a7295ad5f9cb..3713d7ecab96 100644 --- a/net/rfkill/input.c +++ b/net/rfkill/input.c | |||
@@ -212,6 +212,9 @@ static void rfkill_event(struct input_handle *handle, unsigned int type, | |||
212 | case KEY_WIMAX: | 212 | case KEY_WIMAX: |
213 | rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); | 213 | rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); |
214 | break; | 214 | break; |
215 | case KEY_RFKILL: | ||
216 | rfkill_schedule_toggle(RFKILL_TYPE_ALL); | ||
217 | break; | ||
215 | } | 218 | } |
216 | } else if (type == EV_SW && code == SW_RFKILL_ALL) | 219 | } else if (type == EV_SW && code == SW_RFKILL_ALL) |
217 | rfkill_schedule_evsw_rfkillall(data); | 220 | rfkill_schedule_evsw_rfkillall(data); |
@@ -295,6 +298,11 @@ static const struct input_device_id rfkill_ids[] = { | |||
295 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, | 298 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, |
296 | }, | 299 | }, |
297 | { | 300 | { |
301 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
302 | .evbit = { BIT_MASK(EV_KEY) }, | ||
303 | .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) }, | ||
304 | }, | ||
305 | { | ||
298 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, | 306 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, |
299 | .evbit = { BIT(EV_SW) }, | 307 | .evbit = { BIT(EV_SW) }, |
300 | .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, | 308 | .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, |
diff --git a/net/sctp/input.c b/net/sctp/input.c index c0c973e67add..3d74b264ea22 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association( | |||
75 | const union sctp_addr *peer, | 75 | const union sctp_addr *peer, |
76 | struct sctp_transport **pt); | 76 | struct sctp_transport **pt); |
77 | 77 | ||
78 | static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); | 78 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb); |
79 | 79 | ||
80 | 80 | ||
81 | /* Calculate the SCTP checksum of an SCTP packet. */ | 81 | /* Calculate the SCTP checksum of an SCTP packet. */ |
@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb) | |||
265 | } | 265 | } |
266 | 266 | ||
267 | if (sock_owned_by_user(sk)) { | 267 | if (sock_owned_by_user(sk)) { |
268 | if (sctp_add_backlog(sk, skb)) { | ||
269 | sctp_bh_unlock_sock(sk); | ||
270 | sctp_chunk_free(chunk); | ||
271 | skb = NULL; /* sctp_chunk_free already freed the skb */ | ||
272 | goto discard_release; | ||
273 | } | ||
268 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); | 274 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); |
269 | sctp_add_backlog(sk, skb); | ||
270 | } else { | 275 | } else { |
271 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); | 276 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); |
272 | sctp_inq_push(&chunk->rcvr->inqueue, chunk); | 277 | sctp_inq_push(&chunk->rcvr->inqueue, chunk); |
@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
336 | sctp_bh_lock_sock(sk); | 341 | sctp_bh_lock_sock(sk); |
337 | 342 | ||
338 | if (sock_owned_by_user(sk)) { | 343 | if (sock_owned_by_user(sk)) { |
339 | sk_add_backlog(sk, skb); | 344 | if (sk_add_backlog(sk, skb)) |
340 | backloged = 1; | 345 | sctp_chunk_free(chunk); |
346 | else | ||
347 | backloged = 1; | ||
341 | } else | 348 | } else |
342 | sctp_inq_push(inqueue, chunk); | 349 | sctp_inq_push(inqueue, chunk); |
343 | 350 | ||
@@ -362,22 +369,27 @@ done: | |||
362 | return 0; | 369 | return 0; |
363 | } | 370 | } |
364 | 371 | ||
365 | static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | 372 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) |
366 | { | 373 | { |
367 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 374 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
368 | struct sctp_ep_common *rcvr = chunk->rcvr; | 375 | struct sctp_ep_common *rcvr = chunk->rcvr; |
376 | int ret; | ||
369 | 377 | ||
370 | /* Hold the assoc/ep while hanging on the backlog queue. | 378 | ret = sk_add_backlog(sk, skb); |
371 | * This way, we know structures we need will not disappear from us | 379 | if (!ret) { |
372 | */ | 380 | /* Hold the assoc/ep while hanging on the backlog queue. |
373 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 381 | * This way, we know structures we need will not disappear |
374 | sctp_association_hold(sctp_assoc(rcvr)); | 382 | * from us |
375 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 383 | */ |
376 | sctp_endpoint_hold(sctp_ep(rcvr)); | 384 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) |
377 | else | 385 | sctp_association_hold(sctp_assoc(rcvr)); |
378 | BUG(); | 386 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) |
387 | sctp_endpoint_hold(sctp_ep(rcvr)); | ||
388 | else | ||
389 | BUG(); | ||
390 | } | ||
391 | return ret; | ||
379 | 392 | ||
380 | sk_add_backlog(sk, skb); | ||
381 | } | 393 | } |
382 | 394 | ||
383 | /* Handle icmp frag needed error. */ | 395 | /* Handle icmp frag needed error. */ |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f6d1e59c4151..dfc5c127efd4 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
3720 | SCTP_DBG_OBJCNT_INC(sock); | 3720 | SCTP_DBG_OBJCNT_INC(sock); |
3721 | percpu_counter_inc(&sctp_sockets_allocated); | 3721 | percpu_counter_inc(&sctp_sockets_allocated); |
3722 | 3722 | ||
3723 | /* Set socket backlog limit. */ | ||
3724 | sk->sk_backlog.limit = sysctl_sctp_rmem[1]; | ||
3725 | |||
3723 | local_bh_disable(); | 3726 | local_bh_disable(); |
3724 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 3727 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
3725 | local_bh_enable(); | 3728 | local_bh_enable(); |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 7018eef1dcdd..f96c2fe6137b 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -160,16 +160,15 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt) | |||
160 | (void)rpc_ntop(sap, buf, sizeof(buf)); | 160 | (void)rpc_ntop(sap, buf, sizeof(buf)); |
161 | xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); | 161 | xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); |
162 | 162 | ||
163 | (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); | 163 | snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); |
164 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); | 164 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); |
165 | 165 | ||
166 | xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; | 166 | xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; |
167 | 167 | ||
168 | (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", | 168 | snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); |
169 | NIPQUAD(sin->sin_addr.s_addr)); | ||
170 | xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); | 169 | xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); |
171 | 170 | ||
172 | (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); | 171 | snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); |
173 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); | 172 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); |
174 | 173 | ||
175 | /* netid */ | 174 | /* netid */ |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 712412982cee..75ab08eac66b 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -297,12 +297,11 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) | |||
297 | switch (sap->sa_family) { | 297 | switch (sap->sa_family) { |
298 | case AF_INET: | 298 | case AF_INET: |
299 | sin = xs_addr_in(xprt); | 299 | sin = xs_addr_in(xprt); |
300 | (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", | 300 | snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); |
301 | NIPQUAD(sin->sin_addr.s_addr)); | ||
302 | break; | 301 | break; |
303 | case AF_INET6: | 302 | case AF_INET6: |
304 | sin6 = xs_addr_in6(xprt); | 303 | sin6 = xs_addr_in6(xprt); |
305 | (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); | 304 | snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); |
306 | break; | 305 | break; |
307 | default: | 306 | default: |
308 | BUG(); | 307 | BUG(); |
@@ -315,10 +314,10 @@ static void xs_format_common_peer_ports(struct rpc_xprt *xprt) | |||
315 | struct sockaddr *sap = xs_addr(xprt); | 314 | struct sockaddr *sap = xs_addr(xprt); |
316 | char buf[128]; | 315 | char buf[128]; |
317 | 316 | ||
318 | (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); | 317 | snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); |
319 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); | 318 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); |
320 | 319 | ||
321 | (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); | 320 | snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); |
322 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); | 321 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); |
323 | } | 322 | } |
324 | 323 | ||
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 327011fcc407..78091375ca12 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -45,10 +45,10 @@ | |||
45 | 45 | ||
46 | #define MAX_ADDR_STR 32 | 46 | #define MAX_ADDR_STR 32 |
47 | 47 | ||
48 | static struct media *media_list = NULL; | 48 | static struct media media_list[MAX_MEDIA]; |
49 | static u32 media_count = 0; | 49 | static u32 media_count = 0; |
50 | 50 | ||
51 | struct bearer *tipc_bearers = NULL; | 51 | struct bearer tipc_bearers[MAX_BEARERS]; |
52 | 52 | ||
53 | /** | 53 | /** |
54 | * media_name_valid - validate media name | 54 | * media_name_valid - validate media name |
@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type, | |||
108 | int res = -EINVAL; | 108 | int res = -EINVAL; |
109 | 109 | ||
110 | write_lock_bh(&tipc_net_lock); | 110 | write_lock_bh(&tipc_net_lock); |
111 | if (!media_list) | ||
112 | goto exit; | ||
113 | 111 | ||
112 | if (tipc_mode != TIPC_NET_MODE) { | ||
113 | warn("Media <%s> rejected, not in networked mode yet\n", name); | ||
114 | goto exit; | ||
115 | } | ||
114 | if (!media_name_valid(name)) { | 116 | if (!media_name_valid(name)) { |
115 | warn("Media <%s> rejected, illegal name\n", name); | 117 | warn("Media <%s> rejected, illegal name\n", name); |
116 | goto exit; | 118 | goto exit; |
@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name) | |||
660 | 662 | ||
661 | 663 | ||
662 | 664 | ||
663 | int tipc_bearer_init(void) | ||
664 | { | ||
665 | int res; | ||
666 | |||
667 | write_lock_bh(&tipc_net_lock); | ||
668 | tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC); | ||
669 | media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC); | ||
670 | if (tipc_bearers && media_list) { | ||
671 | res = 0; | ||
672 | } else { | ||
673 | kfree(tipc_bearers); | ||
674 | kfree(media_list); | ||
675 | tipc_bearers = NULL; | ||
676 | media_list = NULL; | ||
677 | res = -ENOMEM; | ||
678 | } | ||
679 | write_unlock_bh(&tipc_net_lock); | ||
680 | return res; | ||
681 | } | ||
682 | |||
683 | void tipc_bearer_stop(void) | 665 | void tipc_bearer_stop(void) |
684 | { | 666 | { |
685 | u32 i; | 667 | u32 i; |
686 | 668 | ||
687 | if (!tipc_bearers) | ||
688 | return; | ||
689 | |||
690 | for (i = 0; i < MAX_BEARERS; i++) { | 669 | for (i = 0; i < MAX_BEARERS; i++) { |
691 | if (tipc_bearers[i].active) | 670 | if (tipc_bearers[i].active) |
692 | tipc_bearers[i].publ.blocked = 1; | 671 | tipc_bearers[i].publ.blocked = 1; |
@@ -695,10 +674,6 @@ void tipc_bearer_stop(void) | |||
695 | if (tipc_bearers[i].active) | 674 | if (tipc_bearers[i].active) |
696 | bearer_disable(tipc_bearers[i].publ.name); | 675 | bearer_disable(tipc_bearers[i].publ.name); |
697 | } | 676 | } |
698 | kfree(tipc_bearers); | ||
699 | kfree(media_list); | ||
700 | tipc_bearers = NULL; | ||
701 | media_list = NULL; | ||
702 | media_count = 0; | 677 | media_count = 0; |
703 | } | 678 | } |
704 | 679 | ||
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index ca5734892713..000228e93f9e 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h | |||
@@ -114,7 +114,7 @@ struct bearer_name { | |||
114 | 114 | ||
115 | struct link; | 115 | struct link; |
116 | 116 | ||
117 | extern struct bearer *tipc_bearers; | 117 | extern struct bearer tipc_bearers[]; |
118 | 118 | ||
119 | void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); | 119 | void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); |
120 | struct sk_buff *tipc_media_get_names(void); | 120 | struct sk_buff *tipc_media_get_names(void); |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 6f50f6423f63..1a7e4665af80 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1882,6 +1882,15 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) | |||
1882 | (msg_destnode(msg) != tipc_own_addr))) | 1882 | (msg_destnode(msg) != tipc_own_addr))) |
1883 | goto cont; | 1883 | goto cont; |
1884 | 1884 | ||
1885 | /* Discard non-routeable messages destined for another node */ | ||
1886 | |||
1887 | if (unlikely(!msg_isdata(msg) && | ||
1888 | (msg_destnode(msg) != tipc_own_addr))) { | ||
1889 | if ((msg_user(msg) != CONN_MANAGER) && | ||
1890 | (msg_user(msg) != MSG_FRAGMENTER)) | ||
1891 | goto cont; | ||
1892 | } | ||
1893 | |||
1885 | /* Locate unicast link endpoint that should handle message */ | 1894 | /* Locate unicast link endpoint that should handle message */ |
1886 | 1895 | ||
1887 | n_ptr = tipc_node_find(msg_prevnode(msg)); | 1896 | n_ptr = tipc_node_find(msg_prevnode(msg)); |
diff --git a/net/tipc/net.c b/net/tipc/net.c index 7906608bf510..f25b1cdb64eb 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -116,7 +116,8 @@ | |||
116 | */ | 116 | */ |
117 | 117 | ||
118 | DEFINE_RWLOCK(tipc_net_lock); | 118 | DEFINE_RWLOCK(tipc_net_lock); |
119 | struct network tipc_net = { NULL }; | 119 | struct _zone *tipc_zones[256] = { NULL, }; |
120 | struct network tipc_net = { tipc_zones }; | ||
120 | 121 | ||
121 | struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) | 122 | struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) |
122 | { | 123 | { |
@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest) | |||
158 | } | 159 | } |
159 | } | 160 | } |
160 | 161 | ||
161 | static int net_init(void) | ||
162 | { | ||
163 | memset(&tipc_net, 0, sizeof(tipc_net)); | ||
164 | tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC); | ||
165 | if (!tipc_net.zones) { | ||
166 | return -ENOMEM; | ||
167 | } | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static void net_stop(void) | 162 | static void net_stop(void) |
172 | { | 163 | { |
173 | u32 z_num; | 164 | u32 z_num; |
174 | 165 | ||
175 | if (!tipc_net.zones) | 166 | for (z_num = 1; z_num <= tipc_max_zones; z_num++) |
176 | return; | ||
177 | |||
178 | for (z_num = 1; z_num <= tipc_max_zones; z_num++) { | ||
179 | tipc_zone_delete(tipc_net.zones[z_num]); | 167 | tipc_zone_delete(tipc_net.zones[z_num]); |
180 | } | ||
181 | kfree(tipc_net.zones); | ||
182 | tipc_net.zones = NULL; | ||
183 | } | 168 | } |
184 | 169 | ||
185 | static void net_route_named_msg(struct sk_buff *buf) | 170 | static void net_route_named_msg(struct sk_buff *buf) |
@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr) | |||
282 | tipc_named_reinit(); | 267 | tipc_named_reinit(); |
283 | tipc_port_reinit(); | 268 | tipc_port_reinit(); |
284 | 269 | ||
285 | if ((res = tipc_bearer_init()) || | 270 | if ((res = tipc_cltr_init()) || |
286 | (res = net_init()) || | ||
287 | (res = tipc_cltr_init()) || | ||
288 | (res = tipc_bclink_init())) { | 271 | (res = tipc_bclink_init())) { |
289 | return res; | 272 | return res; |
290 | } | 273 | } |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1ea64f09cc45..4b235fc1c70f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1322,8 +1322,10 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) | |||
1322 | if (!sock_owned_by_user(sk)) { | 1322 | if (!sock_owned_by_user(sk)) { |
1323 | res = filter_rcv(sk, buf); | 1323 | res = filter_rcv(sk, buf); |
1324 | } else { | 1324 | } else { |
1325 | sk_add_backlog(sk, buf); | 1325 | if (sk_add_backlog(sk, buf)) |
1326 | res = TIPC_OK; | 1326 | res = TIPC_ERR_OVERLOAD; |
1327 | else | ||
1328 | res = TIPC_OK; | ||
1327 | } | 1329 | } |
1328 | bh_unlock_sock(sk); | 1330 | bh_unlock_sock(sk); |
1329 | 1331 | ||
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index ac91f0dfa144..ff123e56114a 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -76,19 +76,6 @@ struct top_srv { | |||
76 | static struct top_srv topsrv = { 0 }; | 76 | static struct top_srv topsrv = { 0 }; |
77 | 77 | ||
78 | /** | 78 | /** |
79 | * htohl - convert value to endianness used by destination | ||
80 | * @in: value to convert | ||
81 | * @swap: non-zero if endianness must be reversed | ||
82 | * | ||
83 | * Returns converted value | ||
84 | */ | ||
85 | |||
86 | static u32 htohl(u32 in, int swap) | ||
87 | { | ||
88 | return swap ? swab32(in) : in; | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * subscr_send_event - send a message containing a tipc_event to the subscriber | 79 | * subscr_send_event - send a message containing a tipc_event to the subscriber |
93 | * | 80 | * |
94 | * Note: Must not hold subscriber's server port lock, since tipc_send() will | 81 | * Note: Must not hold subscriber's server port lock, since tipc_send() will |
@@ -107,11 +94,11 @@ static void subscr_send_event(struct subscription *sub, | |||
107 | msg_sect.iov_base = (void *)&sub->evt; | 94 | msg_sect.iov_base = (void *)&sub->evt; |
108 | msg_sect.iov_len = sizeof(struct tipc_event); | 95 | msg_sect.iov_len = sizeof(struct tipc_event); |
109 | 96 | ||
110 | sub->evt.event = htohl(event, sub->swap); | 97 | sub->evt.event = htonl(event); |
111 | sub->evt.found_lower = htohl(found_lower, sub->swap); | 98 | sub->evt.found_lower = htonl(found_lower); |
112 | sub->evt.found_upper = htohl(found_upper, sub->swap); | 99 | sub->evt.found_upper = htonl(found_upper); |
113 | sub->evt.port.ref = htohl(port_ref, sub->swap); | 100 | sub->evt.port.ref = htonl(port_ref); |
114 | sub->evt.port.node = htohl(node, sub->swap); | 101 | sub->evt.port.node = htonl(node); |
115 | tipc_send(sub->server_ref, 1, &msg_sect); | 102 | tipc_send(sub->server_ref, 1, &msg_sect); |
116 | } | 103 | } |
117 | 104 | ||
@@ -287,16 +274,23 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
287 | { | 274 | { |
288 | struct subscription *sub; | 275 | struct subscription *sub; |
289 | struct subscription *sub_temp; | 276 | struct subscription *sub_temp; |
277 | __u32 type, lower, upper; | ||
290 | int found = 0; | 278 | int found = 0; |
291 | 279 | ||
292 | /* Find first matching subscription, exit if not found */ | 280 | /* Find first matching subscription, exit if not found */ |
293 | 281 | ||
282 | type = ntohl(s->seq.type); | ||
283 | lower = ntohl(s->seq.lower); | ||
284 | upper = ntohl(s->seq.upper); | ||
285 | |||
294 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, | 286 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, |
295 | subscription_list) { | 287 | subscription_list) { |
296 | if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { | 288 | if ((type == sub->seq.type) && |
297 | found = 1; | 289 | (lower == sub->seq.lower) && |
298 | break; | 290 | (upper == sub->seq.upper)) { |
299 | } | 291 | found = 1; |
292 | break; | ||
293 | } | ||
300 | } | 294 | } |
301 | if (!found) | 295 | if (!found) |
302 | return; | 296 | return; |
@@ -325,16 +319,10 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, | |||
325 | struct subscriber *subscriber) | 319 | struct subscriber *subscriber) |
326 | { | 320 | { |
327 | struct subscription *sub; | 321 | struct subscription *sub; |
328 | int swap; | ||
329 | |||
330 | /* Determine subscriber's endianness */ | ||
331 | |||
332 | swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)); | ||
333 | 322 | ||
334 | /* Detect & process a subscription cancellation request */ | 323 | /* Detect & process a subscription cancellation request */ |
335 | 324 | ||
336 | if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { | 325 | if (ntohl(s->filter) & TIPC_SUB_CANCEL) { |
337 | s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); | ||
338 | subscr_cancel(s, subscriber); | 326 | subscr_cancel(s, subscriber); |
339 | return NULL; | 327 | return NULL; |
340 | } | 328 | } |
@@ -359,11 +347,11 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, | |||
359 | 347 | ||
360 | /* Initialize subscription object */ | 348 | /* Initialize subscription object */ |
361 | 349 | ||
362 | sub->seq.type = htohl(s->seq.type, swap); | 350 | sub->seq.type = ntohl(s->seq.type); |
363 | sub->seq.lower = htohl(s->seq.lower, swap); | 351 | sub->seq.lower = ntohl(s->seq.lower); |
364 | sub->seq.upper = htohl(s->seq.upper, swap); | 352 | sub->seq.upper = ntohl(s->seq.upper); |
365 | sub->timeout = htohl(s->timeout, swap); | 353 | sub->timeout = ntohl(s->timeout); |
366 | sub->filter = htohl(s->filter, swap); | 354 | sub->filter = ntohl(s->filter); |
367 | if ((!(sub->filter & TIPC_SUB_PORTS) == | 355 | if ((!(sub->filter & TIPC_SUB_PORTS) == |
368 | !(sub->filter & TIPC_SUB_SERVICE)) || | 356 | !(sub->filter & TIPC_SUB_SERVICE)) || |
369 | (sub->seq.lower > sub->seq.upper)) { | 357 | (sub->seq.lower > sub->seq.upper)) { |
@@ -376,7 +364,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, | |||
376 | INIT_LIST_HEAD(&sub->nameseq_list); | 364 | INIT_LIST_HEAD(&sub->nameseq_list); |
377 | list_add(&sub->subscription_list, &subscriber->subscription_list); | 365 | list_add(&sub->subscription_list, &subscriber->subscription_list); |
378 | sub->server_ref = subscriber->port_ref; | 366 | sub->server_ref = subscriber->port_ref; |
379 | sub->swap = swap; | ||
380 | memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); | 367 | memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); |
381 | atomic_inc(&topsrv.subscription_count); | 368 | atomic_inc(&topsrv.subscription_count); |
382 | if (sub->timeout != TIPC_WAIT_FOREVER) { | 369 | if (sub->timeout != TIPC_WAIT_FOREVER) { |
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index 45d89bf4d202..c20f496d95b2 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h | |||
@@ -53,7 +53,6 @@ typedef void (*tipc_subscr_event) (struct subscription *sub, | |||
53 | * @nameseq_list: adjacent subscriptions in name sequence's subscription list | 53 | * @nameseq_list: adjacent subscriptions in name sequence's subscription list |
54 | * @subscription_list: adjacent subscriptions in subscriber's subscription list | 54 | * @subscription_list: adjacent subscriptions in subscriber's subscription list |
55 | * @server_ref: object reference of server port associated with subscription | 55 | * @server_ref: object reference of server port associated with subscription |
56 | * @swap: indicates if subscriber uses opposite endianness in its messages | ||
57 | * @evt: template for events generated by subscription | 56 | * @evt: template for events generated by subscription |
58 | */ | 57 | */ |
59 | 58 | ||
@@ -66,7 +65,6 @@ struct subscription { | |||
66 | struct list_head nameseq_list; | 65 | struct list_head nameseq_list; |
67 | struct list_head subscription_list; | 66 | struct list_head subscription_list; |
68 | u32 server_ref; | 67 | u32 server_ref; |
69 | int swap; | ||
70 | struct tipc_event evt; | 68 | struct tipc_event evt; |
71 | }; | 69 | }; |
72 | 70 | ||
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 3e1efe534645..52e304212241 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c | |||
@@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) | |||
53 | if (!sock_owned_by_user(sk)) { | 53 | if (!sock_owned_by_user(sk)) { |
54 | queued = x25_process_rx_frame(sk, skb); | 54 | queued = x25_process_rx_frame(sk, skb); |
55 | } else { | 55 | } else { |
56 | sk_add_backlog(sk, skb); | 56 | queued = !sk_add_backlog(sk, skb); |
57 | } | 57 | } |
58 | bh_unlock_sock(sk); | 58 | bh_unlock_sock(sk); |
59 | sock_put(sk); | 59 | sock_put(sk); |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 34a5ef8316e7..843e066649cb 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1372,7 +1372,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, | |||
1372 | return err; | 1372 | return err; |
1373 | } | 1373 | } |
1374 | 1374 | ||
1375 | static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) | 1375 | static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, |
1376 | struct flowi *fl) | ||
1376 | { | 1377 | { |
1377 | struct xfrm_policy_afinfo *afinfo = | 1378 | struct xfrm_policy_afinfo *afinfo = |
1378 | xfrm_policy_get_afinfo(xdst->u.dst.ops->family); | 1379 | xfrm_policy_get_afinfo(xdst->u.dst.ops->family); |
@@ -1381,7 +1382,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) | |||
1381 | if (!afinfo) | 1382 | if (!afinfo) |
1382 | return -EINVAL; | 1383 | return -EINVAL; |
1383 | 1384 | ||
1384 | err = afinfo->fill_dst(xdst, dev); | 1385 | err = afinfo->fill_dst(xdst, dev, fl); |
1385 | 1386 | ||
1386 | xfrm_policy_put_afinfo(afinfo); | 1387 | xfrm_policy_put_afinfo(afinfo); |
1387 | 1388 | ||
@@ -1486,7 +1487,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1486 | for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { | 1487 | for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { |
1487 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; | 1488 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; |
1488 | 1489 | ||
1489 | err = xfrm_fill_dst(xdst, dev); | 1490 | err = xfrm_fill_dst(xdst, dev, fl); |
1490 | if (err) | 1491 | if (err) |
1491 | goto free_dst; | 1492 | goto free_dst; |
1492 | 1493 | ||