diff options
Diffstat (limited to 'net')
46 files changed, 432 insertions, 320 deletions
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 1a79a6c7e30e..cafb55b0cea5 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | #include <linux/debugfs.h> | 5 | #include <linux/debugfs.h> |
6 | #include <linux/seq_file.h> | ||
6 | 7 | ||
7 | #include <net/bluetooth/bluetooth.h> | 8 | #include <net/bluetooth/bluetooth.h> |
8 | #include <net/bluetooth/hci_core.h> | 9 | #include <net/bluetooth/hci_core.h> |
@@ -405,20 +406,11 @@ static struct device_type bt_host = { | |||
405 | .release = bt_host_release, | 406 | .release = bt_host_release, |
406 | }; | 407 | }; |
407 | 408 | ||
408 | static int inquiry_cache_open(struct inode *inode, struct file *file) | 409 | static int inquiry_cache_show(struct seq_file *f, void *p) |
409 | { | ||
410 | file->private_data = inode->i_private; | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf, | ||
415 | size_t count, loff_t *ppos) | ||
416 | { | 410 | { |
417 | struct hci_dev *hdev = file->private_data; | 411 | struct hci_dev *hdev = f->private; |
418 | struct inquiry_cache *cache = &hdev->inq_cache; | 412 | struct inquiry_cache *cache = &hdev->inq_cache; |
419 | struct inquiry_entry *e; | 413 | struct inquiry_entry *e; |
420 | char buf[4096]; | ||
421 | int n = 0; | ||
422 | 414 | ||
423 | hci_dev_lock_bh(hdev); | 415 | hci_dev_lock_bh(hdev); |
424 | 416 | ||
@@ -426,23 +418,30 @@ static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf, | |||
426 | struct inquiry_data *data = &e->data; | 418 | struct inquiry_data *data = &e->data; |
427 | bdaddr_t bdaddr; | 419 | bdaddr_t bdaddr; |
428 | baswap(&bdaddr, &data->bdaddr); | 420 | baswap(&bdaddr, &data->bdaddr); |
429 | n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", | 421 | seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", |
430 | batostr(&bdaddr), | 422 | batostr(&bdaddr), |
431 | data->pscan_rep_mode, data->pscan_period_mode, | 423 | data->pscan_rep_mode, data->pscan_period_mode, |
432 | data->pscan_mode, data->dev_class[2], | 424 | data->pscan_mode, data->dev_class[2], |
433 | data->dev_class[1], data->dev_class[0], | 425 | data->dev_class[1], data->dev_class[0], |
434 | __le16_to_cpu(data->clock_offset), | 426 | __le16_to_cpu(data->clock_offset), |
435 | data->rssi, data->ssp_mode, e->timestamp); | 427 | data->rssi, data->ssp_mode, e->timestamp); |
436 | } | 428 | } |
437 | 429 | ||
438 | hci_dev_unlock_bh(hdev); | 430 | hci_dev_unlock_bh(hdev); |
439 | 431 | ||
440 | return simple_read_from_buffer(userbuf, count, ppos, buf, n); | 432 | return 0; |
433 | } | ||
434 | |||
435 | static int inquiry_cache_open(struct inode *inode, struct file *file) | ||
436 | { | ||
437 | return single_open(file, inquiry_cache_show, inode->i_private); | ||
441 | } | 438 | } |
442 | 439 | ||
443 | static const struct file_operations inquiry_cache_fops = { | 440 | static const struct file_operations inquiry_cache_fops = { |
444 | .open = inquiry_cache_open, | 441 | .open = inquiry_cache_open, |
445 | .read = inquiry_cache_read, | 442 | .read = seq_read, |
443 | .llseek = seq_lseek, | ||
444 | .release = single_release, | ||
446 | }; | 445 | }; |
447 | 446 | ||
448 | int hci_register_sysfs(struct hci_dev *hdev) | 447 | int hci_register_sysfs(struct hci_dev *hdev) |
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index 19a6b9629c51..d115d5cea5b6 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig | |||
@@ -35,6 +35,7 @@ config BRIDGE | |||
35 | config BRIDGE_IGMP_SNOOPING | 35 | config BRIDGE_IGMP_SNOOPING |
36 | bool "IGMP snooping" | 36 | bool "IGMP snooping" |
37 | depends on BRIDGE | 37 | depends on BRIDGE |
38 | depends on INET | ||
38 | default y | 39 | default y |
39 | ---help--- | 40 | ---help--- |
40 | If you say Y here, then the Ethernet bridge will be able selectively | 41 | If you say Y here, then the Ethernet bridge will be able selectively |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 2559fb539836..12ce1eaa4f3e 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -38,7 +38,7 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get( | |||
38 | struct net_bridge_mdb_entry *mp; | 38 | struct net_bridge_mdb_entry *mp; |
39 | struct hlist_node *p; | 39 | struct hlist_node *p; |
40 | 40 | ||
41 | hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { | 41 | hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { |
42 | if (dst == mp->addr) | 42 | if (dst == mp->addr) |
43 | return mp; | 43 | return mp; |
44 | } | 44 | } |
@@ -627,8 +627,8 @@ static void br_multicast_port_query_expired(unsigned long data) | |||
627 | struct net_bridge *br = port->br; | 627 | struct net_bridge *br = port->br; |
628 | 628 | ||
629 | spin_lock(&br->multicast_lock); | 629 | spin_lock(&br->multicast_lock); |
630 | if (port && (port->state == BR_STATE_DISABLED || | 630 | if (port->state == BR_STATE_DISABLED || |
631 | port->state == BR_STATE_BLOCKING)) | 631 | port->state == BR_STATE_BLOCKING) |
632 | goto out; | 632 | goto out; |
633 | 633 | ||
634 | if (port->multicast_startup_queries_sent < | 634 | if (port->multicast_startup_queries_sent < |
@@ -1135,7 +1135,7 @@ void br_multicast_stop(struct net_bridge *br) | |||
1135 | 1135 | ||
1136 | if (mdb->old) { | 1136 | if (mdb->old) { |
1137 | spin_unlock_bh(&br->multicast_lock); | 1137 | spin_unlock_bh(&br->multicast_lock); |
1138 | synchronize_rcu_bh(); | 1138 | rcu_barrier_bh(); |
1139 | spin_lock_bh(&br->multicast_lock); | 1139 | spin_lock_bh(&br->multicast_lock); |
1140 | WARN_ON(mdb->old); | 1140 | WARN_ON(mdb->old); |
1141 | } | 1141 | } |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 0f2f82185ec4..f4cb6b6299d9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
20 | #include <linux/bitops.h> | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | 22 | ||
22 | /* | 23 | /* |
@@ -199,10 +200,7 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) | |||
199 | return dev->ethtool_ops->set_settings(dev, &cmd); | 200 | return dev->ethtool_ops->set_settings(dev, &cmd); |
200 | } | 201 | } |
201 | 202 | ||
202 | /* | 203 | static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) |
203 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
204 | */ | ||
205 | static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | ||
206 | { | 204 | { |
207 | struct ethtool_drvinfo info; | 205 | struct ethtool_drvinfo info; |
208 | const struct ethtool_ops *ops = dev->ethtool_ops; | 206 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -214,6 +212,10 @@ static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *use | |||
214 | info.cmd = ETHTOOL_GDRVINFO; | 212 | info.cmd = ETHTOOL_GDRVINFO; |
215 | ops->get_drvinfo(dev, &info); | 213 | ops->get_drvinfo(dev, &info); |
216 | 214 | ||
215 | /* | ||
216 | * this method of obtaining string set info is deprecated; | ||
217 | * Use ETHTOOL_GSSET_INFO instead. | ||
218 | */ | ||
217 | if (ops->get_sset_count) { | 219 | if (ops->get_sset_count) { |
218 | int rc; | 220 | int rc; |
219 | 221 | ||
@@ -237,10 +239,67 @@ static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *use | |||
237 | return 0; | 239 | return 0; |
238 | } | 240 | } |
239 | 241 | ||
240 | /* | 242 | static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, |
241 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | 243 | void __user *useraddr) |
242 | */ | 244 | { |
243 | static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | 245 | struct ethtool_sset_info info; |
246 | const struct ethtool_ops *ops = dev->ethtool_ops; | ||
247 | u64 sset_mask; | ||
248 | int i, idx = 0, n_bits = 0, ret, rc; | ||
249 | u32 *info_buf = NULL; | ||
250 | |||
251 | if (!ops->get_sset_count) | ||
252 | return -EOPNOTSUPP; | ||
253 | |||
254 | if (copy_from_user(&info, useraddr, sizeof(info))) | ||
255 | return -EFAULT; | ||
256 | |||
257 | /* store copy of mask, because we zero struct later on */ | ||
258 | sset_mask = info.sset_mask; | ||
259 | if (!sset_mask) | ||
260 | return 0; | ||
261 | |||
262 | /* calculate size of return buffer */ | ||
263 | n_bits = hweight64(sset_mask); | ||
264 | |||
265 | memset(&info, 0, sizeof(info)); | ||
266 | info.cmd = ETHTOOL_GSSET_INFO; | ||
267 | |||
268 | info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER); | ||
269 | if (!info_buf) | ||
270 | return -ENOMEM; | ||
271 | |||
272 | /* | ||
273 | * fill return buffer based on input bitmask and successful | ||
274 | * get_sset_count return | ||
275 | */ | ||
276 | for (i = 0; i < 64; i++) { | ||
277 | if (!(sset_mask & (1ULL << i))) | ||
278 | continue; | ||
279 | |||
280 | rc = ops->get_sset_count(dev, i); | ||
281 | if (rc >= 0) { | ||
282 | info.sset_mask |= (1ULL << i); | ||
283 | info_buf[idx++] = rc; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | ret = -EFAULT; | ||
288 | if (copy_to_user(useraddr, &info, sizeof(info))) | ||
289 | goto out; | ||
290 | |||
291 | useraddr += offsetof(struct ethtool_sset_info, data); | ||
292 | if (copy_to_user(useraddr, info_buf, idx * sizeof(u32))) | ||
293 | goto out; | ||
294 | |||
295 | ret = 0; | ||
296 | |||
297 | out: | ||
298 | kfree(info_buf); | ||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | ||
244 | { | 303 | { |
245 | struct ethtool_rxnfc cmd; | 304 | struct ethtool_rxnfc cmd; |
246 | 305 | ||
@@ -253,10 +312,7 @@ static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *usera | |||
253 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); | 312 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); |
254 | } | 313 | } |
255 | 314 | ||
256 | /* | 315 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) |
257 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
258 | */ | ||
259 | static noinline int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) | ||
260 | { | 316 | { |
261 | struct ethtool_rxnfc info; | 317 | struct ethtool_rxnfc info; |
262 | const struct ethtool_ops *ops = dev->ethtool_ops; | 318 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -328,10 +384,7 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, | |||
328 | list->count++; | 384 | list->count++; |
329 | } | 385 | } |
330 | 386 | ||
331 | /* | 387 | static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) |
332 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
333 | */ | ||
334 | static noinline int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) | ||
335 | { | 388 | { |
336 | struct ethtool_rx_ntuple cmd; | 389 | struct ethtool_rx_ntuple cmd; |
337 | const struct ethtool_ops *ops = dev->ethtool_ops; | 390 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -799,10 +852,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) | |||
799 | return ret; | 852 | return ret; |
800 | } | 853 | } |
801 | 854 | ||
802 | /* | 855 | static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) |
803 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
804 | */ | ||
805 | static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) | ||
806 | { | 856 | { |
807 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; | 857 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; |
808 | 858 | ||
@@ -816,10 +866,7 @@ static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *us | |||
816 | return 0; | 866 | return 0; |
817 | } | 867 | } |
818 | 868 | ||
819 | /* | 869 | static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) |
820 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
821 | */ | ||
822 | static noinline int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) | ||
823 | { | 870 | { |
824 | struct ethtool_coalesce coalesce; | 871 | struct ethtool_coalesce coalesce; |
825 | 872 | ||
@@ -1229,10 +1276,7 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, | |||
1229 | return actor(dev, edata.data); | 1276 | return actor(dev, edata.data); |
1230 | } | 1277 | } |
1231 | 1278 | ||
1232 | /* | 1279 | static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) |
1233 | * noinline attribute so that gcc doesnt use too much stack in dev_ethtool() | ||
1234 | */ | ||
1235 | static noinline int ethtool_flash_device(struct net_device *dev, char __user *useraddr) | ||
1236 | { | 1280 | { |
1237 | struct ethtool_flash efl; | 1281 | struct ethtool_flash efl; |
1238 | 1282 | ||
@@ -1471,6 +1515,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1471 | case ETHTOOL_GRXNTUPLE: | 1515 | case ETHTOOL_GRXNTUPLE: |
1472 | rc = ethtool_get_rx_ntuple(dev, useraddr); | 1516 | rc = ethtool_get_rx_ntuple(dev, useraddr); |
1473 | break; | 1517 | break; |
1518 | case ETHTOOL_GSSET_INFO: | ||
1519 | rc = ethtool_get_sset_info(dev, useraddr); | ||
1520 | break; | ||
1474 | default: | 1521 | default: |
1475 | rc = -EOPNOTSUPP; | 1522 | rc = -EOPNOTSUPP; |
1476 | } | 1523 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index fcd397a762ff..c5812bbc2cc9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) | |||
340 | rc = sk_backlog_rcv(sk, skb); | 340 | rc = sk_backlog_rcv(sk, skb); |
341 | 341 | ||
342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); | 342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); |
343 | } else | 343 | } else if (sk_add_backlog(sk, skb)) { |
344 | sk_add_backlog(sk, skb); | 344 | bh_unlock_sock(sk); |
345 | atomic_inc(&sk->sk_drops); | ||
346 | goto discard_and_relse; | ||
347 | } | ||
348 | |||
345 | bh_unlock_sock(sk); | 349 | bh_unlock_sock(sk); |
346 | out: | 350 | out: |
347 | sock_put(sk); | 351 | sock_put(sk); |
@@ -1139,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1139 | sock_lock_init(newsk); | 1143 | sock_lock_init(newsk); |
1140 | bh_lock_sock(newsk); | 1144 | bh_lock_sock(newsk); |
1141 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; | 1145 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; |
1146 | newsk->sk_backlog.len = 0; | ||
1142 | 1147 | ||
1143 | atomic_set(&newsk->sk_rmem_alloc, 0); | 1148 | atomic_set(&newsk->sk_rmem_alloc, 0); |
1144 | /* | 1149 | /* |
@@ -1542,6 +1547,12 @@ static void __release_sock(struct sock *sk) | |||
1542 | 1547 | ||
1543 | bh_lock_sock(sk); | 1548 | bh_lock_sock(sk); |
1544 | } while ((skb = sk->sk_backlog.head) != NULL); | 1549 | } while ((skb = sk->sk_backlog.head) != NULL); |
1550 | |||
1551 | /* | ||
1552 | * Doing the zeroing here guarantee we can not loop forever | ||
1553 | * while a wild producer attempts to flood us. | ||
1554 | */ | ||
1555 | sk->sk_backlog.len = 0; | ||
1545 | } | 1556 | } |
1546 | 1557 | ||
1547 | /** | 1558 | /** |
@@ -1874,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1874 | sk->sk_allocation = GFP_KERNEL; | 1885 | sk->sk_allocation = GFP_KERNEL; |
1875 | sk->sk_rcvbuf = sysctl_rmem_default; | 1886 | sk->sk_rcvbuf = sysctl_rmem_default; |
1876 | sk->sk_sndbuf = sysctl_wmem_default; | 1887 | sk->sk_sndbuf = sysctl_wmem_default; |
1888 | sk->sk_backlog.limit = sk->sk_rcvbuf << 1; | ||
1877 | sk->sk_state = TCP_CLOSE; | 1889 | sk->sk_state = TCP_CLOSE; |
1878 | sk_set_socket(sk, sock); | 1890 | sk_set_socket(sk, sock); |
1879 | 1891 | ||
@@ -2276,7 +2288,8 @@ out_free_request_sock_slab: | |||
2276 | prot->rsk_prot->slab = NULL; | 2288 | prot->rsk_prot->slab = NULL; |
2277 | } | 2289 | } |
2278 | out_free_request_sock_slab_name: | 2290 | out_free_request_sock_slab_name: |
2279 | kfree(prot->rsk_prot->slab_name); | 2291 | if (prot->rsk_prot) |
2292 | kfree(prot->rsk_prot->slab_name); | ||
2280 | out_free_sock_slab: | 2293 | out_free_sock_slab: |
2281 | kmem_cache_destroy(prot->slab); | 2294 | kmem_cache_destroy(prot->slab); |
2282 | prot->slab = NULL; | 2295 | prot->slab = NULL; |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index af226a063141..0d508c359fa9 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child, | |||
254 | * in main socket hash table and lock on listening | 254 | * in main socket hash table and lock on listening |
255 | * socket does not protect us more. | 255 | * socket does not protect us more. |
256 | */ | 256 | */ |
257 | sk_add_backlog(child, skb); | 257 | __sk_add_backlog(child, skb); |
258 | } | 258 | } |
259 | 259 | ||
260 | bh_unlock_sock(child); | 260 | bh_unlock_sock(child); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index c0c5274d0271..f47c9f76754b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1144,12 +1144,9 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, | |||
1144 | 1144 | ||
1145 | if (saddr) | 1145 | if (saddr) |
1146 | memcpy(&iph->saddr, saddr, 4); | 1146 | memcpy(&iph->saddr, saddr, 4); |
1147 | 1147 | if (daddr) | |
1148 | if (daddr) { | ||
1149 | memcpy(&iph->daddr, daddr, 4); | 1148 | memcpy(&iph->daddr, daddr, 4); |
1150 | return t->hlen; | 1149 | if (iph->daddr) |
1151 | } | ||
1152 | if (iph->daddr && !ipv4_is_multicast(iph->daddr)) | ||
1153 | return t->hlen; | 1150 | return t->hlen; |
1154 | 1151 | ||
1155 | return -t->hlen; | 1152 | return -t->hlen; |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 242ed2307370..4f1f337f4337 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -249,6 +249,8 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
249 | SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), | 249 | SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), |
250 | SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), | 250 | SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), |
251 | SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), | 251 | SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), |
252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), | ||
253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), | ||
252 | SNMP_MIB_SENTINEL | 254 | SNMP_MIB_SENTINEL |
253 | }; | 255 | }; |
254 | 256 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b2ba5581d2ae..d9b40248b97f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -146,7 +146,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); | |||
146 | static void ipv4_link_failure(struct sk_buff *skb); | 146 | static void ipv4_link_failure(struct sk_buff *skb); |
147 | static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); | 147 | static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); |
148 | static int rt_garbage_collect(struct dst_ops *ops); | 148 | static int rt_garbage_collect(struct dst_ops *ops); |
149 | static void rt_emergency_hash_rebuild(struct net *net); | ||
150 | 149 | ||
151 | 150 | ||
152 | static struct dst_ops ipv4_dst_ops = { | 151 | static struct dst_ops ipv4_dst_ops = { |
@@ -780,11 +779,30 @@ static void rt_do_flush(int process_context) | |||
780 | #define FRACT_BITS 3 | 779 | #define FRACT_BITS 3 |
781 | #define ONE (1UL << FRACT_BITS) | 780 | #define ONE (1UL << FRACT_BITS) |
782 | 781 | ||
782 | /* | ||
783 | * Given a hash chain and an item in this hash chain, | ||
784 | * find if a previous entry has the same hash_inputs | ||
785 | * (but differs on tos, mark or oif) | ||
786 | * Returns 0 if an alias is found. | ||
787 | * Returns ONE if rth has no alias before itself. | ||
788 | */ | ||
789 | static int has_noalias(const struct rtable *head, const struct rtable *rth) | ||
790 | { | ||
791 | const struct rtable *aux = head; | ||
792 | |||
793 | while (aux != rth) { | ||
794 | if (compare_hash_inputs(&aux->fl, &rth->fl)) | ||
795 | return 0; | ||
796 | aux = aux->u.dst.rt_next; | ||
797 | } | ||
798 | return ONE; | ||
799 | } | ||
800 | |||
783 | static void rt_check_expire(void) | 801 | static void rt_check_expire(void) |
784 | { | 802 | { |
785 | static unsigned int rover; | 803 | static unsigned int rover; |
786 | unsigned int i = rover, goal; | 804 | unsigned int i = rover, goal; |
787 | struct rtable *rth, *aux, **rthp; | 805 | struct rtable *rth, **rthp; |
788 | unsigned long samples = 0; | 806 | unsigned long samples = 0; |
789 | unsigned long sum = 0, sum2 = 0; | 807 | unsigned long sum = 0, sum2 = 0; |
790 | unsigned long delta; | 808 | unsigned long delta; |
@@ -835,15 +853,7 @@ nofree: | |||
835 | * attributes don't unfairly skew | 853 | * attributes don't unfairly skew |
836 | * the length computation | 854 | * the length computation |
837 | */ | 855 | */ |
838 | for (aux = rt_hash_table[i].chain;;) { | 856 | length += has_noalias(rt_hash_table[i].chain, rth); |
839 | if (aux == rth) { | ||
840 | length += ONE; | ||
841 | break; | ||
842 | } | ||
843 | if (compare_hash_inputs(&aux->fl, &rth->fl)) | ||
844 | break; | ||
845 | aux = aux->u.dst.rt_next; | ||
846 | } | ||
847 | continue; | 857 | continue; |
848 | } | 858 | } |
849 | } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) | 859 | } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) |
@@ -1073,6 +1083,21 @@ work_done: | |||
1073 | out: return 0; | 1083 | out: return 0; |
1074 | } | 1084 | } |
1075 | 1085 | ||
1086 | /* | ||
1087 | * Returns number of entries in a hash chain that have different hash_inputs | ||
1088 | */ | ||
1089 | static int slow_chain_length(const struct rtable *head) | ||
1090 | { | ||
1091 | int length = 0; | ||
1092 | const struct rtable *rth = head; | ||
1093 | |||
1094 | while (rth) { | ||
1095 | length += has_noalias(head, rth); | ||
1096 | rth = rth->u.dst.rt_next; | ||
1097 | } | ||
1098 | return length >> FRACT_BITS; | ||
1099 | } | ||
1100 | |||
1076 | static int rt_intern_hash(unsigned hash, struct rtable *rt, | 1101 | static int rt_intern_hash(unsigned hash, struct rtable *rt, |
1077 | struct rtable **rp, struct sk_buff *skb) | 1102 | struct rtable **rp, struct sk_buff *skb) |
1078 | { | 1103 | { |
@@ -1185,7 +1210,8 @@ restart: | |||
1185 | rt_free(cand); | 1210 | rt_free(cand); |
1186 | } | 1211 | } |
1187 | } else { | 1212 | } else { |
1188 | if (chain_length > rt_chain_length_max) { | 1213 | if (chain_length > rt_chain_length_max && |
1214 | slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { | ||
1189 | struct net *net = dev_net(rt->u.dst.dev); | 1215 | struct net *net = dev_net(rt->u.dst.dev); |
1190 | int num = ++net->ipv4.current_rt_cache_rebuild_count; | 1216 | int num = ++net->ipv4.current_rt_cache_rebuild_count; |
1191 | if (!rt_caching(dev_net(rt->u.dst.dev))) { | 1217 | if (!rt_caching(dev_net(rt->u.dst.dev))) { |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c3588b4fd979..8d51d39ad1bb 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1651,8 +1651,10 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1651 | if (!sk) | 1651 | if (!sk) |
1652 | goto no_tcp_socket; | 1652 | goto no_tcp_socket; |
1653 | 1653 | ||
1654 | if (iph->ttl < inet_sk(sk)->min_ttl) | 1654 | if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { |
1655 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | ||
1655 | goto discard_and_relse; | 1656 | goto discard_and_relse; |
1657 | } | ||
1656 | 1658 | ||
1657 | process: | 1659 | process: |
1658 | if (sk->sk_state == TCP_TIME_WAIT) | 1660 | if (sk->sk_state == TCP_TIME_WAIT) |
@@ -1682,8 +1684,11 @@ process: | |||
1682 | if (!tcp_prequeue(sk, skb)) | 1684 | if (!tcp_prequeue(sk, skb)) |
1683 | ret = tcp_v4_do_rcv(sk, skb); | 1685 | ret = tcp_v4_do_rcv(sk, skb); |
1684 | } | 1686 | } |
1685 | } else | 1687 | } else if (unlikely(sk_add_backlog(sk, skb))) { |
1686 | sk_add_backlog(sk, skb); | 1688 | bh_unlock_sock(sk); |
1689 | NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); | ||
1690 | goto discard_and_relse; | ||
1691 | } | ||
1687 | bh_unlock_sock(sk); | 1692 | bh_unlock_sock(sk); |
1688 | 1693 | ||
1689 | sock_put(sk); | 1694 | sock_put(sk); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index f206ee5dda80..4199bc6915c5 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child, | |||
728 | * in main socket hash table and lock on listening | 728 | * in main socket hash table and lock on listening |
729 | * socket does not protect us more. | 729 | * socket does not protect us more. |
730 | */ | 730 | */ |
731 | sk_add_backlog(child, skb); | 731 | __sk_add_backlog(child, skb); |
732 | } | 732 | } |
733 | 733 | ||
734 | bh_unlock_sock(child); | 734 | bh_unlock_sock(child); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4a1605d3f909..f181b78f2385 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2395,13 +2395,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2395 | struct tcp_extend_values *xvp = tcp_xv(rvp); | 2395 | struct tcp_extend_values *xvp = tcp_xv(rvp); |
2396 | struct inet_request_sock *ireq = inet_rsk(req); | 2396 | struct inet_request_sock *ireq = inet_rsk(req); |
2397 | struct tcp_sock *tp = tcp_sk(sk); | 2397 | struct tcp_sock *tp = tcp_sk(sk); |
2398 | const struct tcp_cookie_values *cvp = tp->cookie_values; | ||
2398 | struct tcphdr *th; | 2399 | struct tcphdr *th; |
2399 | struct sk_buff *skb; | 2400 | struct sk_buff *skb; |
2400 | struct tcp_md5sig_key *md5; | 2401 | struct tcp_md5sig_key *md5; |
2401 | int tcp_header_size; | 2402 | int tcp_header_size; |
2402 | int mss; | 2403 | int mss; |
2404 | int s_data_desired = 0; | ||
2403 | 2405 | ||
2404 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); | 2406 | if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) |
2407 | s_data_desired = cvp->s_data_desired; | ||
2408 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); | ||
2405 | if (skb == NULL) | 2409 | if (skb == NULL) |
2406 | return NULL; | 2410 | return NULL; |
2407 | 2411 | ||
@@ -2457,16 +2461,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2457 | TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); | 2461 | TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); |
2458 | 2462 | ||
2459 | if (OPTION_COOKIE_EXTENSION & opts.options) { | 2463 | if (OPTION_COOKIE_EXTENSION & opts.options) { |
2460 | const struct tcp_cookie_values *cvp = tp->cookie_values; | 2464 | if (s_data_desired) { |
2461 | 2465 | u8 *buf = skb_put(skb, s_data_desired); | |
2462 | if (cvp != NULL && | ||
2463 | cvp->s_data_constant && | ||
2464 | cvp->s_data_desired > 0) { | ||
2465 | u8 *buf = skb_put(skb, cvp->s_data_desired); | ||
2466 | 2466 | ||
2467 | /* copy data directly from the listening socket. */ | 2467 | /* copy data directly from the listening socket. */ |
2468 | memcpy(buf, cvp->s_data_payload, cvp->s_data_desired); | 2468 | memcpy(buf, cvp->s_data_payload, s_data_desired); |
2469 | TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired; | 2469 | TCP_SKB_CB(skb)->end_seq += s_data_desired; |
2470 | } | 2470 | } |
2471 | 2471 | ||
2472 | if (opts.hash_size > 0) { | 2472 | if (opts.hash_size > 0) { |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 608a5446d05b..7af756d0f931 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1371,8 +1371,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
1371 | bh_lock_sock(sk); | 1371 | bh_lock_sock(sk); |
1372 | if (!sock_owned_by_user(sk)) | 1372 | if (!sock_owned_by_user(sk)) |
1373 | rc = __udp_queue_rcv_skb(sk, skb); | 1373 | rc = __udp_queue_rcv_skb(sk, skb); |
1374 | else | 1374 | else if (sk_add_backlog(sk, skb)) { |
1375 | sk_add_backlog(sk, skb); | 1375 | bh_unlock_sock(sk); |
1376 | goto drop; | ||
1377 | } | ||
1376 | bh_unlock_sock(sk); | 1378 | bh_unlock_sock(sk); |
1377 | 1379 | ||
1378 | return rc; | 1380 | return rc; |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 67107d63c1cd..e4a1483fba77 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -91,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst, | |||
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
93 | 93 | ||
94 | static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) | 94 | static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, |
95 | struct flowi *fl) | ||
95 | { | 96 | { |
96 | struct rtable *rt = (struct rtable *)xdst->route; | 97 | struct rtable *rt = (struct rtable *)xdst->route; |
97 | 98 | ||
98 | xdst->u.rt.fl = rt->fl; | 99 | xdst->u.rt.fl = *fl; |
99 | 100 | ||
100 | xdst->u.dst.dev = dev; | 101 | xdst->u.dst.dev = dev; |
101 | dev_hold(dev); | 102 | dev_hold(dev); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 88fd8c5877ee..6cf3ee14ace3 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -2615,7 +2615,7 @@ static void addrconf_bonding_change(struct net_device *dev, unsigned long event) | |||
2615 | static int addrconf_ifdown(struct net_device *dev, int how) | 2615 | static int addrconf_ifdown(struct net_device *dev, int how) |
2616 | { | 2616 | { |
2617 | struct inet6_dev *idev; | 2617 | struct inet6_dev *idev; |
2618 | struct inet6_ifaddr *ifa, **bifa; | 2618 | struct inet6_ifaddr *ifa, *keep_list, **bifa; |
2619 | struct net *net = dev_net(dev); | 2619 | struct net *net = dev_net(dev); |
2620 | int i; | 2620 | int i; |
2621 | 2621 | ||
@@ -2649,11 +2649,11 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2649 | write_lock_bh(&addrconf_hash_lock); | 2649 | write_lock_bh(&addrconf_hash_lock); |
2650 | while ((ifa = *bifa) != NULL) { | 2650 | while ((ifa = *bifa) != NULL) { |
2651 | if (ifa->idev == idev && | 2651 | if (ifa->idev == idev && |
2652 | (how || !(ifa->flags&IFA_F_PERMANENT))) { | 2652 | (how || !(ifa->flags&IFA_F_PERMANENT) || |
2653 | ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { | ||
2653 | *bifa = ifa->lst_next; | 2654 | *bifa = ifa->lst_next; |
2654 | ifa->lst_next = NULL; | 2655 | ifa->lst_next = NULL; |
2655 | addrconf_del_timer(ifa); | 2656 | __in6_ifa_put(ifa); |
2656 | in6_ifa_put(ifa); | ||
2657 | continue; | 2657 | continue; |
2658 | } | 2658 | } |
2659 | bifa = &ifa->lst_next; | 2659 | bifa = &ifa->lst_next; |
@@ -2689,31 +2689,51 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2689 | write_lock_bh(&idev->lock); | 2689 | write_lock_bh(&idev->lock); |
2690 | } | 2690 | } |
2691 | #endif | 2691 | #endif |
2692 | bifa = &idev->addr_list; | 2692 | keep_list = NULL; |
2693 | while ((ifa = *bifa) != NULL) { | 2693 | bifa = &keep_list; |
2694 | if (how == 0 && (ifa->flags&IFA_F_PERMANENT)) { | 2694 | while ((ifa = idev->addr_list) != NULL) { |
2695 | /* Retain permanent address on admin down */ | 2695 | idev->addr_list = ifa->if_next; |
2696 | ifa->if_next = NULL; | ||
2697 | |||
2698 | addrconf_del_timer(ifa); | ||
2699 | |||
2700 | /* If just doing link down, and address is permanent | ||
2701 | and not link-local, then retain it. */ | ||
2702 | if (how == 0 && | ||
2703 | (ifa->flags&IFA_F_PERMANENT) && | ||
2704 | !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { | ||
2705 | |||
2706 | /* Move to holding list */ | ||
2707 | *bifa = ifa; | ||
2696 | bifa = &ifa->if_next; | 2708 | bifa = &ifa->if_next; |
2697 | 2709 | ||
2698 | /* Restart DAD if needed when link comes back up */ | 2710 | /* If not doing DAD on this address, just keep it. */ |
2699 | if ( !((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || | 2711 | if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || |
2700 | idev->cnf.accept_dad <= 0 || | 2712 | idev->cnf.accept_dad <= 0 || |
2701 | (ifa->flags & IFA_F_NODAD))) | 2713 | (ifa->flags & IFA_F_NODAD)) |
2702 | ifa->flags |= IFA_F_TENTATIVE; | 2714 | continue; |
2703 | } else { | ||
2704 | *bifa = ifa->if_next; | ||
2705 | ifa->if_next = NULL; | ||
2706 | 2715 | ||
2716 | /* If it was tentative already, no need to notify */ | ||
2717 | if (ifa->flags & IFA_F_TENTATIVE) | ||
2718 | continue; | ||
2719 | |||
2720 | /* Flag it for later restoration when link comes up */ | ||
2721 | ifa->flags |= IFA_F_TENTATIVE; | ||
2722 | in6_ifa_hold(ifa); | ||
2723 | } else { | ||
2707 | ifa->dead = 1; | 2724 | ifa->dead = 1; |
2708 | write_unlock_bh(&idev->lock); | 2725 | } |
2726 | write_unlock_bh(&idev->lock); | ||
2709 | 2727 | ||
2710 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 2728 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
2711 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); | 2729 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); |
2712 | in6_ifa_put(ifa); | 2730 | in6_ifa_put(ifa); |
2713 | 2731 | ||
2714 | write_lock_bh(&idev->lock); | 2732 | write_lock_bh(&idev->lock); |
2715 | } | ||
2716 | } | 2733 | } |
2734 | |||
2735 | idev->addr_list = keep_list; | ||
2736 | |||
2717 | write_unlock_bh(&idev->lock); | 2737 | write_unlock_bh(&idev->lock); |
2718 | 2738 | ||
2719 | /* Step 5: Discard multicast list */ | 2739 | /* Step 5: Discard multicast list */ |
@@ -2739,28 +2759,29 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2739 | static void addrconf_rs_timer(unsigned long data) | 2759 | static void addrconf_rs_timer(unsigned long data) |
2740 | { | 2760 | { |
2741 | struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; | 2761 | struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; |
2762 | struct inet6_dev *idev = ifp->idev; | ||
2742 | 2763 | ||
2743 | if (ifp->idev->cnf.forwarding) | 2764 | read_lock(&idev->lock); |
2765 | if (idev->dead || !(idev->if_flags & IF_READY)) | ||
2744 | goto out; | 2766 | goto out; |
2745 | 2767 | ||
2746 | if (ifp->idev->if_flags & IF_RA_RCVD) { | 2768 | if (idev->cnf.forwarding) |
2747 | /* | 2769 | goto out; |
2748 | * Announcement received after solicitation | 2770 | |
2749 | * was sent | 2771 | /* Announcement received after solicitation was sent */ |
2750 | */ | 2772 | if (idev->if_flags & IF_RA_RCVD) |
2751 | goto out; | 2773 | goto out; |
2752 | } | ||
2753 | 2774 | ||
2754 | spin_lock(&ifp->lock); | 2775 | spin_lock(&ifp->lock); |
2755 | if (ifp->probes++ < ifp->idev->cnf.rtr_solicits) { | 2776 | if (ifp->probes++ < idev->cnf.rtr_solicits) { |
2756 | /* The wait after the last probe can be shorter */ | 2777 | /* The wait after the last probe can be shorter */ |
2757 | addrconf_mod_timer(ifp, AC_RS, | 2778 | addrconf_mod_timer(ifp, AC_RS, |
2758 | (ifp->probes == ifp->idev->cnf.rtr_solicits) ? | 2779 | (ifp->probes == idev->cnf.rtr_solicits) ? |
2759 | ifp->idev->cnf.rtr_solicit_delay : | 2780 | idev->cnf.rtr_solicit_delay : |
2760 | ifp->idev->cnf.rtr_solicit_interval); | 2781 | idev->cnf.rtr_solicit_interval); |
2761 | spin_unlock(&ifp->lock); | 2782 | spin_unlock(&ifp->lock); |
2762 | 2783 | ||
2763 | ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); | 2784 | ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); |
2764 | } else { | 2785 | } else { |
2765 | spin_unlock(&ifp->lock); | 2786 | spin_unlock(&ifp->lock); |
2766 | /* | 2787 | /* |
@@ -2768,10 +2789,11 @@ static void addrconf_rs_timer(unsigned long data) | |||
2768 | * assumption any longer. | 2789 | * assumption any longer. |
2769 | */ | 2790 | */ |
2770 | printk(KERN_DEBUG "%s: no IPv6 routers present\n", | 2791 | printk(KERN_DEBUG "%s: no IPv6 routers present\n", |
2771 | ifp->idev->dev->name); | 2792 | idev->dev->name); |
2772 | } | 2793 | } |
2773 | 2794 | ||
2774 | out: | 2795 | out: |
2796 | read_unlock(&idev->lock); | ||
2775 | in6_ifa_put(ifp); | 2797 | in6_ifa_put(ifp); |
2776 | } | 2798 | } |
2777 | 2799 | ||
@@ -2850,9 +2872,9 @@ static void addrconf_dad_timer(unsigned long data) | |||
2850 | struct inet6_dev *idev = ifp->idev; | 2872 | struct inet6_dev *idev = ifp->idev; |
2851 | struct in6_addr mcaddr; | 2873 | struct in6_addr mcaddr; |
2852 | 2874 | ||
2853 | read_lock_bh(&idev->lock); | 2875 | read_lock(&idev->lock); |
2854 | if (idev->dead) { | 2876 | if (idev->dead || !(idev->if_flags & IF_READY)) { |
2855 | read_unlock_bh(&idev->lock); | 2877 | read_unlock(&idev->lock); |
2856 | goto out; | 2878 | goto out; |
2857 | } | 2879 | } |
2858 | 2880 | ||
@@ -2864,7 +2886,7 @@ static void addrconf_dad_timer(unsigned long data) | |||
2864 | 2886 | ||
2865 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); | 2887 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); |
2866 | spin_unlock(&ifp->lock); | 2888 | spin_unlock(&ifp->lock); |
2867 | read_unlock_bh(&idev->lock); | 2889 | read_unlock(&idev->lock); |
2868 | 2890 | ||
2869 | addrconf_dad_completed(ifp); | 2891 | addrconf_dad_completed(ifp); |
2870 | 2892 | ||
@@ -2874,7 +2896,7 @@ static void addrconf_dad_timer(unsigned long data) | |||
2874 | ifp->probes--; | 2896 | ifp->probes--; |
2875 | addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); | 2897 | addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); |
2876 | spin_unlock(&ifp->lock); | 2898 | spin_unlock(&ifp->lock); |
2877 | read_unlock_bh(&idev->lock); | 2899 | read_unlock(&idev->lock); |
2878 | 2900 | ||
2879 | /* send a neighbour solicitation for our addr */ | 2901 | /* send a neighbour solicitation for our addr */ |
2880 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); | 2902 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 551882b9dfd6..5e463c43fcc2 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -84,18 +84,11 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
84 | if ((rule->flags & FIB_RULE_FIND_SADDR) && | 84 | if ((rule->flags & FIB_RULE_FIND_SADDR) && |
85 | r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { | 85 | r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { |
86 | struct in6_addr saddr; | 86 | struct in6_addr saddr; |
87 | unsigned int srcprefs = 0; | ||
88 | |||
89 | if (flags & RT6_LOOKUP_F_SRCPREF_TMP) | ||
90 | srcprefs |= IPV6_PREFER_SRC_TMP; | ||
91 | if (flags & RT6_LOOKUP_F_SRCPREF_PUBLIC) | ||
92 | srcprefs |= IPV6_PREFER_SRC_PUBLIC; | ||
93 | if (flags & RT6_LOOKUP_F_SRCPREF_COA) | ||
94 | srcprefs |= IPV6_PREFER_SRC_COA; | ||
95 | 87 | ||
96 | if (ipv6_dev_get_saddr(net, | 88 | if (ipv6_dev_get_saddr(net, |
97 | ip6_dst_idev(&rt->u.dst)->dev, | 89 | ip6_dst_idev(&rt->u.dst)->dev, |
98 | &flp->fl6_dst, srcprefs, | 90 | &flp->fl6_dst, |
91 | rt6_flags2srcprefs(flags), | ||
99 | &saddr)) | 92 | &saddr)) |
100 | goto again; | 93 | goto again; |
101 | if (!ipv6_prefix_equal(&saddr, &r->src.addr, | 94 | if (!ipv6_prefix_equal(&saddr, &r->src.addr, |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b08879e97f22..52cd3eff31dc 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -819,15 +819,8 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, | |||
819 | 819 | ||
820 | if (!ipv6_addr_any(&fl->fl6_src)) | 820 | if (!ipv6_addr_any(&fl->fl6_src)) |
821 | flags |= RT6_LOOKUP_F_HAS_SADDR; | 821 | flags |= RT6_LOOKUP_F_HAS_SADDR; |
822 | else if (sk) { | 822 | else if (sk) |
823 | unsigned int prefs = inet6_sk(sk)->srcprefs; | 823 | flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs); |
824 | if (prefs & IPV6_PREFER_SRC_TMP) | ||
825 | flags |= RT6_LOOKUP_F_SRCPREF_TMP; | ||
826 | if (prefs & IPV6_PREFER_SRC_PUBLIC) | ||
827 | flags |= RT6_LOOKUP_F_SRCPREF_PUBLIC; | ||
828 | if (prefs & IPV6_PREFER_SRC_COA) | ||
829 | flags |= RT6_LOOKUP_F_SRCPREF_COA; | ||
830 | } | ||
831 | 824 | ||
832 | return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output); | 825 | return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output); |
833 | } | 826 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 6963a6b6763e..9b6dbba80d31 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1740,8 +1740,11 @@ process: | |||
1740 | if (!tcp_prequeue(sk, skb)) | 1740 | if (!tcp_prequeue(sk, skb)) |
1741 | ret = tcp_v6_do_rcv(sk, skb); | 1741 | ret = tcp_v6_do_rcv(sk, skb); |
1742 | } | 1742 | } |
1743 | } else | 1743 | } else if (unlikely(sk_add_backlog(sk, skb))) { |
1744 | sk_add_backlog(sk, skb); | 1744 | bh_unlock_sock(sk); |
1745 | NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); | ||
1746 | goto discard_and_relse; | ||
1747 | } | ||
1745 | bh_unlock_sock(sk); | 1748 | bh_unlock_sock(sk); |
1746 | 1749 | ||
1747 | sock_put(sk); | 1750 | sock_put(sk); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 52b8347ae3b2..3c0c9c755c92 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -583,16 +583,20 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
583 | bh_lock_sock(sk); | 583 | bh_lock_sock(sk); |
584 | if (!sock_owned_by_user(sk)) | 584 | if (!sock_owned_by_user(sk)) |
585 | udpv6_queue_rcv_skb(sk, skb1); | 585 | udpv6_queue_rcv_skb(sk, skb1); |
586 | else | 586 | else if (sk_add_backlog(sk, skb1)) { |
587 | sk_add_backlog(sk, skb1); | 587 | kfree_skb(skb1); |
588 | bh_unlock_sock(sk); | ||
589 | goto drop; | ||
590 | } | ||
588 | bh_unlock_sock(sk); | 591 | bh_unlock_sock(sk); |
589 | } else { | 592 | continue; |
590 | atomic_inc(&sk->sk_drops); | ||
591 | UDP6_INC_STATS_BH(sock_net(sk), | ||
592 | UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); | ||
593 | UDP6_INC_STATS_BH(sock_net(sk), | ||
594 | UDP_MIB_INERRORS, IS_UDPLITE(sk)); | ||
595 | } | 593 | } |
594 | drop: | ||
595 | atomic_inc(&sk->sk_drops); | ||
596 | UDP6_INC_STATS_BH(sock_net(sk), | ||
597 | UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); | ||
598 | UDP6_INC_STATS_BH(sock_net(sk), | ||
599 | UDP_MIB_INERRORS, IS_UDPLITE(sk)); | ||
596 | } | 600 | } |
597 | } | 601 | } |
598 | /* | 602 | /* |
@@ -754,8 +758,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
754 | bh_lock_sock(sk); | 758 | bh_lock_sock(sk); |
755 | if (!sock_owned_by_user(sk)) | 759 | if (!sock_owned_by_user(sk)) |
756 | udpv6_queue_rcv_skb(sk, skb); | 760 | udpv6_queue_rcv_skb(sk, skb); |
757 | else | 761 | else if (sk_add_backlog(sk, skb)) { |
758 | sk_add_backlog(sk, skb); | 762 | atomic_inc(&sk->sk_drops); |
763 | bh_unlock_sock(sk); | ||
764 | sock_put(sk); | ||
765 | goto discard; | ||
766 | } | ||
759 | bh_unlock_sock(sk); | 767 | bh_unlock_sock(sk); |
760 | sock_put(sk); | 768 | sock_put(sk); |
761 | return 0; | 769 | return 0; |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index dbdc696f5fc5..ae181651c75a 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -116,7 +116,8 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, | |||
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) | 119 | static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, |
120 | struct flowi *fl) | ||
120 | { | 121 | { |
121 | struct rt6_info *rt = (struct rt6_info*)xdst->route; | 122 | struct rt6_info *rt = (struct rt6_info*)xdst->route; |
122 | 123 | ||
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 019c780512e8..86d6985b9d49 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c | |||
@@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb) | |||
1437 | llc_conn_state_process(sk, skb); | 1437 | llc_conn_state_process(sk, skb); |
1438 | else { | 1438 | else { |
1439 | llc_set_backlog_type(skb, LLC_EVENT); | 1439 | llc_set_backlog_type(skb, LLC_EVENT); |
1440 | sk_add_backlog(sk, skb); | 1440 | __sk_add_backlog(sk, skb); |
1441 | } | 1441 | } |
1442 | } | 1442 | } |
1443 | } | 1443 | } |
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index a8dde9b010da..a12144da7974 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c | |||
@@ -827,7 +827,8 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) | |||
827 | else { | 827 | else { |
828 | dprintk("%s: adding to backlog...\n", __func__); | 828 | dprintk("%s: adding to backlog...\n", __func__); |
829 | llc_set_backlog_type(skb, LLC_PACKET); | 829 | llc_set_backlog_type(skb, LLC_PACKET); |
830 | sk_add_backlog(sk, skb); | 830 | if (sk_add_backlog(sk, skb)) |
831 | goto drop_unlock; | ||
831 | } | 832 | } |
832 | out: | 833 | out: |
833 | bh_unlock_sock(sk); | 834 | bh_unlock_sock(sk); |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index ee61a9f6fabc..623e6644b80c 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -48,20 +48,24 @@ static ssize_t ieee80211_if_write( | |||
48 | ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int)) | 48 | ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int)) |
49 | { | 49 | { |
50 | u8 *buf; | 50 | u8 *buf; |
51 | ssize_t ret = -ENODEV; | 51 | ssize_t ret; |
52 | 52 | ||
53 | buf = kzalloc(count, GFP_KERNEL); | 53 | buf = kmalloc(count, GFP_KERNEL); |
54 | if (!buf) | 54 | if (!buf) |
55 | return -ENOMEM; | 55 | return -ENOMEM; |
56 | 56 | ||
57 | ret = -EFAULT; | ||
57 | if (copy_from_user(buf, userbuf, count)) | 58 | if (copy_from_user(buf, userbuf, count)) |
58 | return -EFAULT; | 59 | goto freebuf; |
59 | 60 | ||
61 | ret = -ENODEV; | ||
60 | rtnl_lock(); | 62 | rtnl_lock(); |
61 | if (sdata->dev->reg_state == NETREG_REGISTERED) | 63 | if (sdata->dev->reg_state == NETREG_REGISTERED) |
62 | ret = (*write)(sdata, buf, count); | 64 | ret = (*write)(sdata, buf, count); |
63 | rtnl_unlock(); | 65 | rtnl_unlock(); |
64 | 66 | ||
67 | freebuf: | ||
68 | kfree(buf); | ||
65 | return ret; | 69 | return ret; |
66 | } | 70 | } |
67 | 71 | ||
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 06c33b68d8e5..b887e484ae04 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -225,11 +225,11 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
225 | switch (sdata->vif.type) { | 225 | switch (sdata->vif.type) { |
226 | case NL80211_IFTYPE_AP: | 226 | case NL80211_IFTYPE_AP: |
227 | sdata->vif.bss_conf.enable_beacon = | 227 | sdata->vif.bss_conf.enable_beacon = |
228 | !!rcu_dereference(sdata->u.ap.beacon); | 228 | !!sdata->u.ap.beacon; |
229 | break; | 229 | break; |
230 | case NL80211_IFTYPE_ADHOC: | 230 | case NL80211_IFTYPE_ADHOC: |
231 | sdata->vif.bss_conf.enable_beacon = | 231 | sdata->vif.bss_conf.enable_beacon = |
232 | !!rcu_dereference(sdata->u.ibss.presp); | 232 | !!sdata->u.ibss.presp; |
233 | break; | 233 | break; |
234 | case NL80211_IFTYPE_MESH_POINT: | 234 | case NL80211_IFTYPE_MESH_POINT: |
235 | sdata->vif.bss_conf.enable_beacon = true; | 235 | sdata->vif.bss_conf.enable_beacon = true; |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 61080c5fad50..7a6bebce7f2f 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -749,9 +749,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
749 | 749 | ||
750 | switch (fc & IEEE80211_FCTL_STYPE) { | 750 | switch (fc & IEEE80211_FCTL_STYPE) { |
751 | case IEEE80211_STYPE_ACTION: | 751 | case IEEE80211_STYPE_ACTION: |
752 | if (skb->len < IEEE80211_MIN_ACTION_SIZE) | ||
753 | return RX_DROP_MONITOR; | ||
754 | /* fall through */ | ||
755 | case IEEE80211_STYPE_PROBE_RESP: | 752 | case IEEE80211_STYPE_PROBE_RESP: |
756 | case IEEE80211_STYPE_BEACON: | 753 | case IEEE80211_STYPE_BEACON: |
757 | skb_queue_tail(&ifmsh->skb_queue, skb); | 754 | skb_queue_tail(&ifmsh->skb_queue, skb); |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index ce84237ebad3..ccff6133e19a 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -391,7 +391,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, | |||
391 | if (SN_GT(mpath->sn, orig_sn) || | 391 | if (SN_GT(mpath->sn, orig_sn) || |
392 | (mpath->sn == orig_sn && | 392 | (mpath->sn == orig_sn && |
393 | action == MPATH_PREQ && | 393 | action == MPATH_PREQ && |
394 | new_metric > mpath->metric)) { | 394 | new_metric >= mpath->metric)) { |
395 | process = false; | 395 | process = false; |
396 | fresh_info = false; | 396 | fresh_info = false; |
397 | } | 397 | } |
@@ -611,7 +611,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, | |||
611 | 611 | ||
612 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, | 612 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, |
613 | cpu_to_le32(orig_sn), 0, target_addr, | 613 | cpu_to_le32(orig_sn), 0, target_addr, |
614 | cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount, | 614 | cpu_to_le32(target_sn), next_hop, hopcount, |
615 | ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), | 615 | ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), |
616 | 0, sdata); | 616 | 0, sdata); |
617 | rcu_read_unlock(); | 617 | rcu_read_unlock(); |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 4c189d0be4a3..461167dfa42c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -447,10 +447,12 @@ static void ieee80211_enable_ps(struct ieee80211_local *local, | |||
447 | if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) | 447 | if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) |
448 | ieee80211_send_nullfunc(local, sdata, 1); | 448 | ieee80211_send_nullfunc(local, sdata, 1); |
449 | 449 | ||
450 | if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { | 450 | if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && |
451 | conf->flags |= IEEE80211_CONF_PS; | 451 | (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) |
452 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | 452 | return; |
453 | } | 453 | |
454 | conf->flags |= IEEE80211_CONF_PS; | ||
455 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | ||
454 | } | 456 | } |
455 | } | 457 | } |
456 | 458 | ||
@@ -569,7 +571,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work) | |||
569 | (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) | 571 | (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) |
570 | ieee80211_send_nullfunc(local, sdata, 1); | 572 | ieee80211_send_nullfunc(local, sdata, 1); |
571 | 573 | ||
572 | if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) || | 574 | if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) && |
575 | (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) || | ||
573 | (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { | 576 | (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { |
574 | ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; | 577 | ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; |
575 | local->hw.conf.flags |= IEEE80211_CONF_PS; | 578 | local->hw.conf.flags |= IEEE80211_CONF_PS; |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index a33f865807f9..c0ad7e879a6e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1994,6 +1994,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1994 | goto handled; | 1994 | goto handled; |
1995 | } | 1995 | } |
1996 | break; | 1996 | break; |
1997 | case MESH_PLINK_CATEGORY: | ||
1998 | case MESH_PATH_SEL_CATEGORY: | ||
1999 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
2000 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); | ||
2001 | break; | ||
1997 | } | 2002 | } |
1998 | 2003 | ||
1999 | /* | 2004 | /* |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 5bf044b92dca..4de987cbda1c 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -93,12 +93,18 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, | |||
93 | struct ieee80211_local *local = sdata->local; | 93 | struct ieee80211_local *local = sdata->local; |
94 | struct sta_info *sta; | 94 | struct sta_info *sta; |
95 | 95 | ||
96 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); | 96 | sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], |
97 | rcu_read_lock_held() || | ||
98 | lockdep_is_held(&local->sta_lock) || | ||
99 | lockdep_is_held(&local->sta_mtx)); | ||
97 | while (sta) { | 100 | while (sta) { |
98 | if (sta->sdata == sdata && | 101 | if (sta->sdata == sdata && |
99 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) | 102 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) |
100 | break; | 103 | break; |
101 | sta = rcu_dereference(sta->hnext); | 104 | sta = rcu_dereference_check(sta->hnext, |
105 | rcu_read_lock_held() || | ||
106 | lockdep_is_held(&local->sta_lock) || | ||
107 | lockdep_is_held(&local->sta_mtx)); | ||
102 | } | 108 | } |
103 | return sta; | 109 | return sta; |
104 | } | 110 | } |
@@ -113,13 +119,19 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, | |||
113 | struct ieee80211_local *local = sdata->local; | 119 | struct ieee80211_local *local = sdata->local; |
114 | struct sta_info *sta; | 120 | struct sta_info *sta; |
115 | 121 | ||
116 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); | 122 | sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], |
123 | rcu_read_lock_held() || | ||
124 | lockdep_is_held(&local->sta_lock) || | ||
125 | lockdep_is_held(&local->sta_mtx)); | ||
117 | while (sta) { | 126 | while (sta) { |
118 | if ((sta->sdata == sdata || | 127 | if ((sta->sdata == sdata || |
119 | sta->sdata->bss == sdata->bss) && | 128 | sta->sdata->bss == sdata->bss) && |
120 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) | 129 | memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) |
121 | break; | 130 | break; |
122 | sta = rcu_dereference(sta->hnext); | 131 | sta = rcu_dereference_check(sta->hnext, |
132 | rcu_read_lock_held() || | ||
133 | lockdep_is_held(&local->sta_lock) || | ||
134 | lockdep_is_held(&local->sta_mtx)); | ||
123 | } | 135 | } |
124 | return sta; | 136 | return sta; |
125 | } | 137 | } |
@@ -431,6 +443,7 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) | |||
431 | /* check if STA exists already */ | 443 | /* check if STA exists already */ |
432 | if (sta_info_get_bss(sdata, sta->sta.addr)) { | 444 | if (sta_info_get_bss(sdata, sta->sta.addr)) { |
433 | spin_unlock_irqrestore(&local->sta_lock, flags); | 445 | spin_unlock_irqrestore(&local->sta_lock, flags); |
446 | mutex_unlock(&local->sta_mtx); | ||
434 | rcu_read_lock(); | 447 | rcu_read_lock(); |
435 | err = -EEXIST; | 448 | err = -EEXIST; |
436 | goto out_free; | 449 | goto out_free; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f7209d691c35..2cb77267f733 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1992,6 +1992,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, | |||
1992 | void ieee80211_tx_pending(unsigned long data) | 1992 | void ieee80211_tx_pending(unsigned long data) |
1993 | { | 1993 | { |
1994 | struct ieee80211_local *local = (struct ieee80211_local *)data; | 1994 | struct ieee80211_local *local = (struct ieee80211_local *)data; |
1995 | struct ieee80211_sub_if_data *sdata; | ||
1995 | unsigned long flags; | 1996 | unsigned long flags; |
1996 | int i; | 1997 | int i; |
1997 | bool txok; | 1998 | bool txok; |
@@ -2028,6 +2029,11 @@ void ieee80211_tx_pending(unsigned long data) | |||
2028 | if (!txok) | 2029 | if (!txok) |
2029 | break; | 2030 | break; |
2030 | } | 2031 | } |
2032 | |||
2033 | if (skb_queue_empty(&local->pending[i])) | ||
2034 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
2035 | netif_tx_wake_queue( | ||
2036 | netdev_get_tx_queue(sdata->dev, i)); | ||
2031 | } | 2037 | } |
2032 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 2038 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
2033 | 2039 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 7614821caed5..ad9009f717ed 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -279,13 +279,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, | |||
279 | /* someone still has this queue stopped */ | 279 | /* someone still has this queue stopped */ |
280 | return; | 280 | return; |
281 | 281 | ||
282 | if (!skb_queue_empty(&local->pending[queue])) | 282 | if (skb_queue_empty(&local->pending[queue])) { |
283 | rcu_read_lock(); | ||
284 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
285 | netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); | ||
286 | rcu_read_unlock(); | ||
287 | } else | ||
283 | tasklet_schedule(&local->tx_pending_tasklet); | 288 | tasklet_schedule(&local->tx_pending_tasklet); |
284 | |||
285 | rcu_read_lock(); | ||
286 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
287 | netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); | ||
288 | rcu_read_unlock(); | ||
289 | } | 289 | } |
290 | 290 | ||
291 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, | 291 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, |
@@ -1102,9 +1102,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1102 | */ | 1102 | */ |
1103 | res = drv_start(local); | 1103 | res = drv_start(local); |
1104 | if (res) { | 1104 | if (res) { |
1105 | WARN(local->suspended, "Harware became unavailable " | 1105 | WARN(local->suspended, "Hardware became unavailable " |
1106 | "upon resume. This is could be a software issue" | 1106 | "upon resume. This could be a software issue " |
1107 | "prior to suspend or a hardware issue\n"); | 1107 | "prior to suspend or a hardware issue.\n"); |
1108 | return res; | 1108 | return res; |
1109 | } | 1109 | } |
1110 | 1110 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 031a5e6fb4aa..1612d417d10c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1688,6 +1688,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1688 | { | 1688 | { |
1689 | switch (i->type) { | 1689 | switch (i->type) { |
1690 | case PACKET_MR_MULTICAST: | 1690 | case PACKET_MR_MULTICAST: |
1691 | if (i->alen != dev->addr_len) | ||
1692 | return -EINVAL; | ||
1691 | if (what > 0) | 1693 | if (what > 0) |
1692 | return dev_mc_add(dev, i->addr, i->alen, 0); | 1694 | return dev_mc_add(dev, i->addr, i->alen, 0); |
1693 | else | 1695 | else |
@@ -1700,6 +1702,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1700 | return dev_set_allmulti(dev, what); | 1702 | return dev_set_allmulti(dev, what); |
1701 | break; | 1703 | break; |
1702 | case PACKET_MR_UNICAST: | 1704 | case PACKET_MR_UNICAST: |
1705 | if (i->alen != dev->addr_len) | ||
1706 | return -EINVAL; | ||
1703 | if (what > 0) | 1707 | if (what > 0) |
1704 | return dev_unicast_add(dev, i->addr); | 1708 | return dev_unicast_add(dev, i->addr); |
1705 | else | 1709 | else |
@@ -1734,7 +1738,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) | |||
1734 | goto done; | 1738 | goto done; |
1735 | 1739 | ||
1736 | err = -EINVAL; | 1740 | err = -EINVAL; |
1737 | if (mreq->mr_alen != dev->addr_len) | 1741 | if (mreq->mr_alen > dev->addr_len) |
1738 | goto done; | 1742 | goto done; |
1739 | 1743 | ||
1740 | err = -ENOBUFS; | 1744 | err = -ENOBUFS; |
diff --git a/net/sctp/input.c b/net/sctp/input.c index c0c973e67add..3d74b264ea22 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association( | |||
75 | const union sctp_addr *peer, | 75 | const union sctp_addr *peer, |
76 | struct sctp_transport **pt); | 76 | struct sctp_transport **pt); |
77 | 77 | ||
78 | static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); | 78 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb); |
79 | 79 | ||
80 | 80 | ||
81 | /* Calculate the SCTP checksum of an SCTP packet. */ | 81 | /* Calculate the SCTP checksum of an SCTP packet. */ |
@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb) | |||
265 | } | 265 | } |
266 | 266 | ||
267 | if (sock_owned_by_user(sk)) { | 267 | if (sock_owned_by_user(sk)) { |
268 | if (sctp_add_backlog(sk, skb)) { | ||
269 | sctp_bh_unlock_sock(sk); | ||
270 | sctp_chunk_free(chunk); | ||
271 | skb = NULL; /* sctp_chunk_free already freed the skb */ | ||
272 | goto discard_release; | ||
273 | } | ||
268 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); | 274 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); |
269 | sctp_add_backlog(sk, skb); | ||
270 | } else { | 275 | } else { |
271 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); | 276 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); |
272 | sctp_inq_push(&chunk->rcvr->inqueue, chunk); | 277 | sctp_inq_push(&chunk->rcvr->inqueue, chunk); |
@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
336 | sctp_bh_lock_sock(sk); | 341 | sctp_bh_lock_sock(sk); |
337 | 342 | ||
338 | if (sock_owned_by_user(sk)) { | 343 | if (sock_owned_by_user(sk)) { |
339 | sk_add_backlog(sk, skb); | 344 | if (sk_add_backlog(sk, skb)) |
340 | backloged = 1; | 345 | sctp_chunk_free(chunk); |
346 | else | ||
347 | backloged = 1; | ||
341 | } else | 348 | } else |
342 | sctp_inq_push(inqueue, chunk); | 349 | sctp_inq_push(inqueue, chunk); |
343 | 350 | ||
@@ -362,22 +369,27 @@ done: | |||
362 | return 0; | 369 | return 0; |
363 | } | 370 | } |
364 | 371 | ||
365 | static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | 372 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) |
366 | { | 373 | { |
367 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 374 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
368 | struct sctp_ep_common *rcvr = chunk->rcvr; | 375 | struct sctp_ep_common *rcvr = chunk->rcvr; |
376 | int ret; | ||
369 | 377 | ||
370 | /* Hold the assoc/ep while hanging on the backlog queue. | 378 | ret = sk_add_backlog(sk, skb); |
371 | * This way, we know structures we need will not disappear from us | 379 | if (!ret) { |
372 | */ | 380 | /* Hold the assoc/ep while hanging on the backlog queue. |
373 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 381 | * This way, we know structures we need will not disappear |
374 | sctp_association_hold(sctp_assoc(rcvr)); | 382 | * from us |
375 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 383 | */ |
376 | sctp_endpoint_hold(sctp_ep(rcvr)); | 384 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) |
377 | else | 385 | sctp_association_hold(sctp_assoc(rcvr)); |
378 | BUG(); | 386 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) |
387 | sctp_endpoint_hold(sctp_ep(rcvr)); | ||
388 | else | ||
389 | BUG(); | ||
390 | } | ||
391 | return ret; | ||
379 | 392 | ||
380 | sk_add_backlog(sk, skb); | ||
381 | } | 393 | } |
382 | 394 | ||
383 | /* Handle icmp frag needed error. */ | 395 | /* Handle icmp frag needed error. */ |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f6d1e59c4151..dfc5c127efd4 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
3720 | SCTP_DBG_OBJCNT_INC(sock); | 3720 | SCTP_DBG_OBJCNT_INC(sock); |
3721 | percpu_counter_inc(&sctp_sockets_allocated); | 3721 | percpu_counter_inc(&sctp_sockets_allocated); |
3722 | 3722 | ||
3723 | /* Set socket backlog limit. */ | ||
3724 | sk->sk_backlog.limit = sysctl_sctp_rmem[1]; | ||
3725 | |||
3723 | local_bh_disable(); | 3726 | local_bh_disable(); |
3724 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 3727 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
3725 | local_bh_enable(); | 3728 | local_bh_enable(); |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 7018eef1dcdd..f96c2fe6137b 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -160,16 +160,15 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt) | |||
160 | (void)rpc_ntop(sap, buf, sizeof(buf)); | 160 | (void)rpc_ntop(sap, buf, sizeof(buf)); |
161 | xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); | 161 | xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); |
162 | 162 | ||
163 | (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); | 163 | snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); |
164 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); | 164 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); |
165 | 165 | ||
166 | xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; | 166 | xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; |
167 | 167 | ||
168 | (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", | 168 | snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); |
169 | NIPQUAD(sin->sin_addr.s_addr)); | ||
170 | xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); | 169 | xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); |
171 | 170 | ||
172 | (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); | 171 | snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); |
173 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); | 172 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); |
174 | 173 | ||
175 | /* netid */ | 174 | /* netid */ |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 3d739e5d15d8..4f55ab7ec1b1 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -297,12 +297,11 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) | |||
297 | switch (sap->sa_family) { | 297 | switch (sap->sa_family) { |
298 | case AF_INET: | 298 | case AF_INET: |
299 | sin = xs_addr_in(xprt); | 299 | sin = xs_addr_in(xprt); |
300 | (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", | 300 | snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); |
301 | NIPQUAD(sin->sin_addr.s_addr)); | ||
302 | break; | 301 | break; |
303 | case AF_INET6: | 302 | case AF_INET6: |
304 | sin6 = xs_addr_in6(xprt); | 303 | sin6 = xs_addr_in6(xprt); |
305 | (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); | 304 | snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); |
306 | break; | 305 | break; |
307 | default: | 306 | default: |
308 | BUG(); | 307 | BUG(); |
@@ -315,10 +314,10 @@ static void xs_format_common_peer_ports(struct rpc_xprt *xprt) | |||
315 | struct sockaddr *sap = xs_addr(xprt); | 314 | struct sockaddr *sap = xs_addr(xprt); |
316 | char buf[128]; | 315 | char buf[128]; |
317 | 316 | ||
318 | (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); | 317 | snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); |
319 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); | 318 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); |
320 | 319 | ||
321 | (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); | 320 | snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); |
322 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); | 321 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); |
323 | } | 322 | } |
324 | 323 | ||
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 327011fcc407..78091375ca12 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -45,10 +45,10 @@ | |||
45 | 45 | ||
46 | #define MAX_ADDR_STR 32 | 46 | #define MAX_ADDR_STR 32 |
47 | 47 | ||
48 | static struct media *media_list = NULL; | 48 | static struct media media_list[MAX_MEDIA]; |
49 | static u32 media_count = 0; | 49 | static u32 media_count = 0; |
50 | 50 | ||
51 | struct bearer *tipc_bearers = NULL; | 51 | struct bearer tipc_bearers[MAX_BEARERS]; |
52 | 52 | ||
53 | /** | 53 | /** |
54 | * media_name_valid - validate media name | 54 | * media_name_valid - validate media name |
@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type, | |||
108 | int res = -EINVAL; | 108 | int res = -EINVAL; |
109 | 109 | ||
110 | write_lock_bh(&tipc_net_lock); | 110 | write_lock_bh(&tipc_net_lock); |
111 | if (!media_list) | ||
112 | goto exit; | ||
113 | 111 | ||
112 | if (tipc_mode != TIPC_NET_MODE) { | ||
113 | warn("Media <%s> rejected, not in networked mode yet\n", name); | ||
114 | goto exit; | ||
115 | } | ||
114 | if (!media_name_valid(name)) { | 116 | if (!media_name_valid(name)) { |
115 | warn("Media <%s> rejected, illegal name\n", name); | 117 | warn("Media <%s> rejected, illegal name\n", name); |
116 | goto exit; | 118 | goto exit; |
@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name) | |||
660 | 662 | ||
661 | 663 | ||
662 | 664 | ||
663 | int tipc_bearer_init(void) | ||
664 | { | ||
665 | int res; | ||
666 | |||
667 | write_lock_bh(&tipc_net_lock); | ||
668 | tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC); | ||
669 | media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC); | ||
670 | if (tipc_bearers && media_list) { | ||
671 | res = 0; | ||
672 | } else { | ||
673 | kfree(tipc_bearers); | ||
674 | kfree(media_list); | ||
675 | tipc_bearers = NULL; | ||
676 | media_list = NULL; | ||
677 | res = -ENOMEM; | ||
678 | } | ||
679 | write_unlock_bh(&tipc_net_lock); | ||
680 | return res; | ||
681 | } | ||
682 | |||
683 | void tipc_bearer_stop(void) | 665 | void tipc_bearer_stop(void) |
684 | { | 666 | { |
685 | u32 i; | 667 | u32 i; |
686 | 668 | ||
687 | if (!tipc_bearers) | ||
688 | return; | ||
689 | |||
690 | for (i = 0; i < MAX_BEARERS; i++) { | 669 | for (i = 0; i < MAX_BEARERS; i++) { |
691 | if (tipc_bearers[i].active) | 670 | if (tipc_bearers[i].active) |
692 | tipc_bearers[i].publ.blocked = 1; | 671 | tipc_bearers[i].publ.blocked = 1; |
@@ -695,10 +674,6 @@ void tipc_bearer_stop(void) | |||
695 | if (tipc_bearers[i].active) | 674 | if (tipc_bearers[i].active) |
696 | bearer_disable(tipc_bearers[i].publ.name); | 675 | bearer_disable(tipc_bearers[i].publ.name); |
697 | } | 676 | } |
698 | kfree(tipc_bearers); | ||
699 | kfree(media_list); | ||
700 | tipc_bearers = NULL; | ||
701 | media_list = NULL; | ||
702 | media_count = 0; | 677 | media_count = 0; |
703 | } | 678 | } |
704 | 679 | ||
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index ca5734892713..000228e93f9e 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h | |||
@@ -114,7 +114,7 @@ struct bearer_name { | |||
114 | 114 | ||
115 | struct link; | 115 | struct link; |
116 | 116 | ||
117 | extern struct bearer *tipc_bearers; | 117 | extern struct bearer tipc_bearers[]; |
118 | 118 | ||
119 | void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); | 119 | void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); |
120 | struct sk_buff *tipc_media_get_names(void); | 120 | struct sk_buff *tipc_media_get_names(void); |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 6f50f6423f63..1a7e4665af80 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1882,6 +1882,15 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) | |||
1882 | (msg_destnode(msg) != tipc_own_addr))) | 1882 | (msg_destnode(msg) != tipc_own_addr))) |
1883 | goto cont; | 1883 | goto cont; |
1884 | 1884 | ||
1885 | /* Discard non-routeable messages destined for another node */ | ||
1886 | |||
1887 | if (unlikely(!msg_isdata(msg) && | ||
1888 | (msg_destnode(msg) != tipc_own_addr))) { | ||
1889 | if ((msg_user(msg) != CONN_MANAGER) && | ||
1890 | (msg_user(msg) != MSG_FRAGMENTER)) | ||
1891 | goto cont; | ||
1892 | } | ||
1893 | |||
1885 | /* Locate unicast link endpoint that should handle message */ | 1894 | /* Locate unicast link endpoint that should handle message */ |
1886 | 1895 | ||
1887 | n_ptr = tipc_node_find(msg_prevnode(msg)); | 1896 | n_ptr = tipc_node_find(msg_prevnode(msg)); |
diff --git a/net/tipc/net.c b/net/tipc/net.c index 7906608bf510..f25b1cdb64eb 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -116,7 +116,8 @@ | |||
116 | */ | 116 | */ |
117 | 117 | ||
118 | DEFINE_RWLOCK(tipc_net_lock); | 118 | DEFINE_RWLOCK(tipc_net_lock); |
119 | struct network tipc_net = { NULL }; | 119 | struct _zone *tipc_zones[256] = { NULL, }; |
120 | struct network tipc_net = { tipc_zones }; | ||
120 | 121 | ||
121 | struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) | 122 | struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) |
122 | { | 123 | { |
@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest) | |||
158 | } | 159 | } |
159 | } | 160 | } |
160 | 161 | ||
161 | static int net_init(void) | ||
162 | { | ||
163 | memset(&tipc_net, 0, sizeof(tipc_net)); | ||
164 | tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC); | ||
165 | if (!tipc_net.zones) { | ||
166 | return -ENOMEM; | ||
167 | } | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static void net_stop(void) | 162 | static void net_stop(void) |
172 | { | 163 | { |
173 | u32 z_num; | 164 | u32 z_num; |
174 | 165 | ||
175 | if (!tipc_net.zones) | 166 | for (z_num = 1; z_num <= tipc_max_zones; z_num++) |
176 | return; | ||
177 | |||
178 | for (z_num = 1; z_num <= tipc_max_zones; z_num++) { | ||
179 | tipc_zone_delete(tipc_net.zones[z_num]); | 167 | tipc_zone_delete(tipc_net.zones[z_num]); |
180 | } | ||
181 | kfree(tipc_net.zones); | ||
182 | tipc_net.zones = NULL; | ||
183 | } | 168 | } |
184 | 169 | ||
185 | static void net_route_named_msg(struct sk_buff *buf) | 170 | static void net_route_named_msg(struct sk_buff *buf) |
@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr) | |||
282 | tipc_named_reinit(); | 267 | tipc_named_reinit(); |
283 | tipc_port_reinit(); | 268 | tipc_port_reinit(); |
284 | 269 | ||
285 | if ((res = tipc_bearer_init()) || | 270 | if ((res = tipc_cltr_init()) || |
286 | (res = net_init()) || | ||
287 | (res = tipc_cltr_init()) || | ||
288 | (res = tipc_bclink_init())) { | 271 | (res = tipc_bclink_init())) { |
289 | return res; | 272 | return res; |
290 | } | 273 | } |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1ea64f09cc45..4b235fc1c70f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1322,8 +1322,10 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) | |||
1322 | if (!sock_owned_by_user(sk)) { | 1322 | if (!sock_owned_by_user(sk)) { |
1323 | res = filter_rcv(sk, buf); | 1323 | res = filter_rcv(sk, buf); |
1324 | } else { | 1324 | } else { |
1325 | sk_add_backlog(sk, buf); | 1325 | if (sk_add_backlog(sk, buf)) |
1326 | res = TIPC_OK; | 1326 | res = TIPC_ERR_OVERLOAD; |
1327 | else | ||
1328 | res = TIPC_OK; | ||
1327 | } | 1329 | } |
1328 | bh_unlock_sock(sk); | 1330 | bh_unlock_sock(sk); |
1329 | 1331 | ||
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index ac91f0dfa144..ff123e56114a 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -76,19 +76,6 @@ struct top_srv { | |||
76 | static struct top_srv topsrv = { 0 }; | 76 | static struct top_srv topsrv = { 0 }; |
77 | 77 | ||
78 | /** | 78 | /** |
79 | * htohl - convert value to endianness used by destination | ||
80 | * @in: value to convert | ||
81 | * @swap: non-zero if endianness must be reversed | ||
82 | * | ||
83 | * Returns converted value | ||
84 | */ | ||
85 | |||
86 | static u32 htohl(u32 in, int swap) | ||
87 | { | ||
88 | return swap ? swab32(in) : in; | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * subscr_send_event - send a message containing a tipc_event to the subscriber | 79 | * subscr_send_event - send a message containing a tipc_event to the subscriber |
93 | * | 80 | * |
94 | * Note: Must not hold subscriber's server port lock, since tipc_send() will | 81 | * Note: Must not hold subscriber's server port lock, since tipc_send() will |
@@ -107,11 +94,11 @@ static void subscr_send_event(struct subscription *sub, | |||
107 | msg_sect.iov_base = (void *)&sub->evt; | 94 | msg_sect.iov_base = (void *)&sub->evt; |
108 | msg_sect.iov_len = sizeof(struct tipc_event); | 95 | msg_sect.iov_len = sizeof(struct tipc_event); |
109 | 96 | ||
110 | sub->evt.event = htohl(event, sub->swap); | 97 | sub->evt.event = htonl(event); |
111 | sub->evt.found_lower = htohl(found_lower, sub->swap); | 98 | sub->evt.found_lower = htonl(found_lower); |
112 | sub->evt.found_upper = htohl(found_upper, sub->swap); | 99 | sub->evt.found_upper = htonl(found_upper); |
113 | sub->evt.port.ref = htohl(port_ref, sub->swap); | 100 | sub->evt.port.ref = htonl(port_ref); |
114 | sub->evt.port.node = htohl(node, sub->swap); | 101 | sub->evt.port.node = htonl(node); |
115 | tipc_send(sub->server_ref, 1, &msg_sect); | 102 | tipc_send(sub->server_ref, 1, &msg_sect); |
116 | } | 103 | } |
117 | 104 | ||
@@ -287,16 +274,23 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
287 | { | 274 | { |
288 | struct subscription *sub; | 275 | struct subscription *sub; |
289 | struct subscription *sub_temp; | 276 | struct subscription *sub_temp; |
277 | __u32 type, lower, upper; | ||
290 | int found = 0; | 278 | int found = 0; |
291 | 279 | ||
292 | /* Find first matching subscription, exit if not found */ | 280 | /* Find first matching subscription, exit if not found */ |
293 | 281 | ||
282 | type = ntohl(s->seq.type); | ||
283 | lower = ntohl(s->seq.lower); | ||
284 | upper = ntohl(s->seq.upper); | ||
285 | |||
294 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, | 286 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, |
295 | subscription_list) { | 287 | subscription_list) { |
296 | if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { | 288 | if ((type == sub->seq.type) && |
297 | found = 1; | 289 | (lower == sub->seq.lower) && |
298 | break; | 290 | (upper == sub->seq.upper)) { |
299 | } | 291 | found = 1; |
292 | break; | ||
293 | } | ||
300 | } | 294 | } |
301 | if (!found) | 295 | if (!found) |
302 | return; | 296 | return; |
@@ -325,16 +319,10 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, | |||
325 | struct subscriber *subscriber) | 319 | struct subscriber *subscriber) |
326 | { | 320 | { |
327 | struct subscription *sub; | 321 | struct subscription *sub; |
328 | int swap; | ||
329 | |||
330 | /* Determine subscriber's endianness */ | ||
331 | |||
332 | swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)); | ||
333 | 322 | ||
334 | /* Detect & process a subscription cancellation request */ | 323 | /* Detect & process a subscription cancellation request */ |
335 | 324 | ||
336 | if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { | 325 | if (ntohl(s->filter) & TIPC_SUB_CANCEL) { |
337 | s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); | ||
338 | subscr_cancel(s, subscriber); | 326 | subscr_cancel(s, subscriber); |
339 | return NULL; | 327 | return NULL; |
340 | } | 328 | } |
@@ -359,11 +347,11 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, | |||
359 | 347 | ||
360 | /* Initialize subscription object */ | 348 | /* Initialize subscription object */ |
361 | 349 | ||
362 | sub->seq.type = htohl(s->seq.type, swap); | 350 | sub->seq.type = ntohl(s->seq.type); |
363 | sub->seq.lower = htohl(s->seq.lower, swap); | 351 | sub->seq.lower = ntohl(s->seq.lower); |
364 | sub->seq.upper = htohl(s->seq.upper, swap); | 352 | sub->seq.upper = ntohl(s->seq.upper); |
365 | sub->timeout = htohl(s->timeout, swap); | 353 | sub->timeout = ntohl(s->timeout); |
366 | sub->filter = htohl(s->filter, swap); | 354 | sub->filter = ntohl(s->filter); |
367 | if ((!(sub->filter & TIPC_SUB_PORTS) == | 355 | if ((!(sub->filter & TIPC_SUB_PORTS) == |
368 | !(sub->filter & TIPC_SUB_SERVICE)) || | 356 | !(sub->filter & TIPC_SUB_SERVICE)) || |
369 | (sub->seq.lower > sub->seq.upper)) { | 357 | (sub->seq.lower > sub->seq.upper)) { |
@@ -376,7 +364,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, | |||
376 | INIT_LIST_HEAD(&sub->nameseq_list); | 364 | INIT_LIST_HEAD(&sub->nameseq_list); |
377 | list_add(&sub->subscription_list, &subscriber->subscription_list); | 365 | list_add(&sub->subscription_list, &subscriber->subscription_list); |
378 | sub->server_ref = subscriber->port_ref; | 366 | sub->server_ref = subscriber->port_ref; |
379 | sub->swap = swap; | ||
380 | memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); | 367 | memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); |
381 | atomic_inc(&topsrv.subscription_count); | 368 | atomic_inc(&topsrv.subscription_count); |
382 | if (sub->timeout != TIPC_WAIT_FOREVER) { | 369 | if (sub->timeout != TIPC_WAIT_FOREVER) { |
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index 45d89bf4d202..c20f496d95b2 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h | |||
@@ -53,7 +53,6 @@ typedef void (*tipc_subscr_event) (struct subscription *sub, | |||
53 | * @nameseq_list: adjacent subscriptions in name sequence's subscription list | 53 | * @nameseq_list: adjacent subscriptions in name sequence's subscription list |
54 | * @subscription_list: adjacent subscriptions in subscriber's subscription list | 54 | * @subscription_list: adjacent subscriptions in subscriber's subscription list |
55 | * @server_ref: object reference of server port associated with subscription | 55 | * @server_ref: object reference of server port associated with subscription |
56 | * @swap: indicates if subscriber uses opposite endianness in its messages | ||
57 | * @evt: template for events generated by subscription | 56 | * @evt: template for events generated by subscription |
58 | */ | 57 | */ |
59 | 58 | ||
@@ -66,7 +65,6 @@ struct subscription { | |||
66 | struct list_head nameseq_list; | 65 | struct list_head nameseq_list; |
67 | struct list_head subscription_list; | 66 | struct list_head subscription_list; |
68 | u32 server_ref; | 67 | u32 server_ref; |
69 | int swap; | ||
70 | struct tipc_event evt; | 68 | struct tipc_event evt; |
71 | }; | 69 | }; |
72 | 70 | ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index e857d72c7e8c..496348c48506 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -324,7 +324,7 @@ struct reg_regdb_search_request { | |||
324 | }; | 324 | }; |
325 | 325 | ||
326 | static LIST_HEAD(reg_regdb_search_list); | 326 | static LIST_HEAD(reg_regdb_search_list); |
327 | static DEFINE_SPINLOCK(reg_regdb_search_lock); | 327 | static DEFINE_MUTEX(reg_regdb_search_mutex); |
328 | 328 | ||
329 | static void reg_regdb_search(struct work_struct *work) | 329 | static void reg_regdb_search(struct work_struct *work) |
330 | { | 330 | { |
@@ -332,7 +332,7 @@ static void reg_regdb_search(struct work_struct *work) | |||
332 | const struct ieee80211_regdomain *curdom, *regdom; | 332 | const struct ieee80211_regdomain *curdom, *regdom; |
333 | int i, r; | 333 | int i, r; |
334 | 334 | ||
335 | spin_lock(®_regdb_search_lock); | 335 | mutex_lock(®_regdb_search_mutex); |
336 | while (!list_empty(®_regdb_search_list)) { | 336 | while (!list_empty(®_regdb_search_list)) { |
337 | request = list_first_entry(®_regdb_search_list, | 337 | request = list_first_entry(®_regdb_search_list, |
338 | struct reg_regdb_search_request, | 338 | struct reg_regdb_search_request, |
@@ -346,18 +346,16 @@ static void reg_regdb_search(struct work_struct *work) | |||
346 | r = reg_copy_regd(®dom, curdom); | 346 | r = reg_copy_regd(®dom, curdom); |
347 | if (r) | 347 | if (r) |
348 | break; | 348 | break; |
349 | spin_unlock(®_regdb_search_lock); | ||
350 | mutex_lock(&cfg80211_mutex); | 349 | mutex_lock(&cfg80211_mutex); |
351 | set_regdom(regdom); | 350 | set_regdom(regdom); |
352 | mutex_unlock(&cfg80211_mutex); | 351 | mutex_unlock(&cfg80211_mutex); |
353 | spin_lock(®_regdb_search_lock); | ||
354 | break; | 352 | break; |
355 | } | 353 | } |
356 | } | 354 | } |
357 | 355 | ||
358 | kfree(request); | 356 | kfree(request); |
359 | } | 357 | } |
360 | spin_unlock(®_regdb_search_lock); | 358 | mutex_unlock(®_regdb_search_mutex); |
361 | } | 359 | } |
362 | 360 | ||
363 | static DECLARE_WORK(reg_regdb_work, reg_regdb_search); | 361 | static DECLARE_WORK(reg_regdb_work, reg_regdb_search); |
@@ -375,9 +373,9 @@ static void reg_regdb_query(const char *alpha2) | |||
375 | 373 | ||
376 | memcpy(request->alpha2, alpha2, 2); | 374 | memcpy(request->alpha2, alpha2, 2); |
377 | 375 | ||
378 | spin_lock(®_regdb_search_lock); | 376 | mutex_lock(®_regdb_search_mutex); |
379 | list_add_tail(&request->list, ®_regdb_search_list); | 377 | list_add_tail(&request->list, ®_regdb_search_list); |
380 | spin_unlock(®_regdb_search_lock); | 378 | mutex_unlock(®_regdb_search_mutex); |
381 | 379 | ||
382 | schedule_work(®_regdb_work); | 380 | schedule_work(®_regdb_work); |
383 | } | 381 | } |
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 3e1efe534645..52e304212241 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c | |||
@@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) | |||
53 | if (!sock_owned_by_user(sk)) { | 53 | if (!sock_owned_by_user(sk)) { |
54 | queued = x25_process_rx_frame(sk, skb); | 54 | queued = x25_process_rx_frame(sk, skb); |
55 | } else { | 55 | } else { |
56 | sk_add_backlog(sk, skb); | 56 | queued = !sk_add_backlog(sk, skb); |
57 | } | 57 | } |
58 | bh_unlock_sock(sk); | 58 | bh_unlock_sock(sk); |
59 | sock_put(sk); | 59 | sock_put(sk); |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 34a5ef8316e7..843e066649cb 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1372,7 +1372,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, | |||
1372 | return err; | 1372 | return err; |
1373 | } | 1373 | } |
1374 | 1374 | ||
1375 | static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) | 1375 | static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, |
1376 | struct flowi *fl) | ||
1376 | { | 1377 | { |
1377 | struct xfrm_policy_afinfo *afinfo = | 1378 | struct xfrm_policy_afinfo *afinfo = |
1378 | xfrm_policy_get_afinfo(xdst->u.dst.ops->family); | 1379 | xfrm_policy_get_afinfo(xdst->u.dst.ops->family); |
@@ -1381,7 +1382,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) | |||
1381 | if (!afinfo) | 1382 | if (!afinfo) |
1382 | return -EINVAL; | 1383 | return -EINVAL; |
1383 | 1384 | ||
1384 | err = afinfo->fill_dst(xdst, dev); | 1385 | err = afinfo->fill_dst(xdst, dev, fl); |
1385 | 1386 | ||
1386 | xfrm_policy_put_afinfo(afinfo); | 1387 | xfrm_policy_put_afinfo(afinfo); |
1387 | 1388 | ||
@@ -1486,7 +1487,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1486 | for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { | 1487 | for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { |
1487 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; | 1488 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; |
1488 | 1489 | ||
1489 | err = xfrm_fill_dst(xdst, dev); | 1490 | err = xfrm_fill_dst(xdst, dev, fl); |
1490 | if (err) | 1491 | if (err) |
1491 | goto free_dst; | 1492 | goto free_dst; |
1492 | 1493 | ||