aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/netfilter/ebtables.c5
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/filter.c32
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c6
-rw-r--r--net/ipv4/netfilter/ip_tables.c6
-rw-r--r--net/ipv6/netfilter/ip6_tables.c6
-rw-r--r--net/mac802154/mib.c1
-rw-r--r--net/netfilter/nf_tables_api.c7
-rw-r--r--net/netfilter/xt_cgroup.c3
-rw-r--r--net/netfilter/xt_connlimit.c25
-rw-r--r--net/netfilter/xt_osf.c2
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/sctp/socket.c36
-rw-r--r--net/tipc/net.c3
-rw-r--r--net/tipc/socket.c3
17 files changed, 107 insertions, 48 deletions
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 0e474b13463b..1059ed3bc255 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1044,10 +1044,9 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
1044 if (repl->num_counters && 1044 if (repl->num_counters &&
1045 copy_to_user(repl->counters, counterstmp, 1045 copy_to_user(repl->counters, counterstmp,
1046 repl->num_counters * sizeof(struct ebt_counter))) { 1046 repl->num_counters * sizeof(struct ebt_counter))) {
1047 ret = -EFAULT; 1047 /* Silent error, can't fail, new table is already in place */
1048 net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1048 } 1049 }
1049 else
1050 ret = 0;
1051 1050
1052 /* decrease module count and free resources */ 1051 /* decrease module count and free resources */
1053 EBT_ENTRY_ITERATE(table->entries, table->entries_size, 1052 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
diff --git a/net/core/dev.c b/net/core/dev.c
index 757063420ce0..14dac0654f28 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4043,6 +4043,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4043 skb->vlan_tci = 0; 4043 skb->vlan_tci = 0;
4044 skb->dev = napi->dev; 4044 skb->dev = napi->dev;
4045 skb->skb_iif = 0; 4045 skb->skb_iif = 0;
4046 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4046 4047
4047 napi->skb = skb; 4048 napi->skb = skb;
4048} 4049}
@@ -4588,8 +4589,7 @@ void *netdev_lower_get_next_private(struct net_device *dev,
4588 if (&lower->list == &dev->adj_list.lower) 4589 if (&lower->list == &dev->adj_list.lower)
4589 return NULL; 4590 return NULL;
4590 4591
4591 if (iter) 4592 *iter = lower->list.next;
4592 *iter = lower->list.next;
4593 4593
4594 return lower->private; 4594 return lower->private;
4595} 4595}
@@ -4617,8 +4617,7 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4617 if (&lower->list == &dev->adj_list.lower) 4617 if (&lower->list == &dev->adj_list.lower)
4618 return NULL; 4618 return NULL;
4619 4619
4620 if (iter) 4620 *iter = &lower->list;
4621 *iter = &lower->list;
4622 4621
4623 return lower->private; 4622 return lower->private;
4624} 4623}
@@ -5696,6 +5695,13 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
5696 } 5695 }
5697 } 5696 }
5698 5697
5698#ifdef CONFIG_NET_RX_BUSY_POLL
5699 if (dev->netdev_ops->ndo_busy_poll)
5700 features |= NETIF_F_BUSY_POLL;
5701 else
5702#endif
5703 features &= ~NETIF_F_BUSY_POLL;
5704
5699 return features; 5705 return features;
5700} 5706}
5701 5707
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 30071dec287a..640ba0e5831c 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -97,6 +97,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
97 [NETIF_F_RXFCS_BIT] = "rx-fcs", 97 [NETIF_F_RXFCS_BIT] = "rx-fcs",
98 [NETIF_F_RXALL_BIT] = "rx-all", 98 [NETIF_F_RXALL_BIT] = "rx-all",
99 [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", 99 [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
100 [NETIF_F_BUSY_POLL_BIT] = "busy-poll",
100}; 101};
101 102
102static int ethtool_get_features(struct net_device *dev, void __user *useraddr) 103static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
diff --git a/net/core/filter.c b/net/core/filter.c
index 765556ba32ef..e08b3822c72a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -295,43 +295,43 @@ select_insn:
295 (*(s64 *) &A) >>= K; 295 (*(s64 *) &A) >>= K;
296 CONT; 296 CONT;
297 BPF_ALU64_BPF_MOD_BPF_X: 297 BPF_ALU64_BPF_MOD_BPF_X:
298 if (unlikely(X == 0))
299 return 0;
298 tmp = A; 300 tmp = A;
299 if (X) 301 A = do_div(tmp, X);
300 A = do_div(tmp, X);
301 CONT; 302 CONT;
302 BPF_ALU_BPF_MOD_BPF_X: 303 BPF_ALU_BPF_MOD_BPF_X:
304 if (unlikely(X == 0))
305 return 0;
303 tmp = (u32) A; 306 tmp = (u32) A;
304 if (X) 307 A = do_div(tmp, (u32) X);
305 A = do_div(tmp, (u32) X);
306 CONT; 308 CONT;
307 BPF_ALU64_BPF_MOD_BPF_K: 309 BPF_ALU64_BPF_MOD_BPF_K:
308 tmp = A; 310 tmp = A;
309 if (K) 311 A = do_div(tmp, K);
310 A = do_div(tmp, K);
311 CONT; 312 CONT;
312 BPF_ALU_BPF_MOD_BPF_K: 313 BPF_ALU_BPF_MOD_BPF_K:
313 tmp = (u32) A; 314 tmp = (u32) A;
314 if (K) 315 A = do_div(tmp, (u32) K);
315 A = do_div(tmp, (u32) K);
316 CONT; 316 CONT;
317 BPF_ALU64_BPF_DIV_BPF_X: 317 BPF_ALU64_BPF_DIV_BPF_X:
318 if (X) 318 if (unlikely(X == 0))
319 do_div(A, X); 319 return 0;
320 do_div(A, X);
320 CONT; 321 CONT;
321 BPF_ALU_BPF_DIV_BPF_X: 322 BPF_ALU_BPF_DIV_BPF_X:
323 if (unlikely(X == 0))
324 return 0;
322 tmp = (u32) A; 325 tmp = (u32) A;
323 if (X) 326 do_div(tmp, (u32) X);
324 do_div(tmp, (u32) X);
325 A = (u32) tmp; 327 A = (u32) tmp;
326 CONT; 328 CONT;
327 BPF_ALU64_BPF_DIV_BPF_K: 329 BPF_ALU64_BPF_DIV_BPF_K:
328 if (K) 330 do_div(A, K);
329 do_div(A, K);
330 CONT; 331 CONT;
331 BPF_ALU_BPF_DIV_BPF_K: 332 BPF_ALU_BPF_DIV_BPF_K:
332 tmp = (u32) A; 333 tmp = (u32) A;
333 if (K) 334 do_div(tmp, (u32) K);
334 do_div(tmp, (u32) K);
335 A = (u32) tmp; 335 A = (u32) tmp;
336 CONT; 336 CONT;
337 BPF_ALU_BPF_END_BPF_TO_BE: 337 BPF_ALU_BPF_END_BPF_TO_BE:
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index d0dac57291af..d068ec25db1e 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3340,7 +3340,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3340 3340
3341 __netif_tx_lock_bh(txq); 3341 __netif_tx_lock_bh(txq);
3342 3342
3343 if (unlikely(netif_xmit_frozen_or_stopped(txq))) { 3343 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) {
3344 ret = NETDEV_TX_BUSY; 3344 ret = NETDEV_TX_BUSY;
3345 pkt_dev->last_ok = 0; 3345 pkt_dev->last_ok = 0;
3346 goto unlock; 3346 goto unlock;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 59da7cde0724..f95b6f93814b 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1044,8 +1044,10 @@ static int __do_replace(struct net *net, const char *name,
1044 1044
1045 xt_free_table_info(oldinfo); 1045 xt_free_table_info(oldinfo);
1046 if (copy_to_user(counters_ptr, counters, 1046 if (copy_to_user(counters_ptr, counters,
1047 sizeof(struct xt_counters) * num_counters) != 0) 1047 sizeof(struct xt_counters) * num_counters) != 0) {
1048 ret = -EFAULT; 1048 /* Silent error, can't fail, new table is already in place */
1049 net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
1050 }
1049 vfree(counters); 1051 vfree(counters);
1050 xt_table_unlock(t); 1052 xt_table_unlock(t);
1051 return ret; 1053 return ret;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 718dfbd30cbe..99e810f84671 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1231,8 +1231,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1231 1231
1232 xt_free_table_info(oldinfo); 1232 xt_free_table_info(oldinfo);
1233 if (copy_to_user(counters_ptr, counters, 1233 if (copy_to_user(counters_ptr, counters,
1234 sizeof(struct xt_counters) * num_counters) != 0) 1234 sizeof(struct xt_counters) * num_counters) != 0) {
1235 ret = -EFAULT; 1235 /* Silent error, can't fail, new table is already in place */
1236 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1237 }
1236 vfree(counters); 1238 vfree(counters);
1237 xt_table_unlock(t); 1239 xt_table_unlock(t);
1238 return ret; 1240 return ret;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 710238f58aa9..e080fbbbc0e5 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1241,8 +1241,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1241 1241
1242 xt_free_table_info(oldinfo); 1242 xt_free_table_info(oldinfo);
1243 if (copy_to_user(counters_ptr, counters, 1243 if (copy_to_user(counters_ptr, counters,
1244 sizeof(struct xt_counters) * num_counters) != 0) 1244 sizeof(struct xt_counters) * num_counters) != 0) {
1245 ret = -EFAULT; 1245 /* Silent error, can't fail, new table is already in place */
1246 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1247 }
1246 vfree(counters); 1248 vfree(counters);
1247 xt_table_unlock(t); 1249 xt_table_unlock(t);
1248 return ret; 1250 return ret;
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index 153bd1ddbfbb..f0991f2344d4 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -26,7 +26,6 @@
26#include <net/mac802154.h> 26#include <net/mac802154.h>
27#include <net/ieee802154_netdev.h> 27#include <net/ieee802154_netdev.h>
28#include <net/wpan-phy.h> 28#include <net/wpan-phy.h>
29#include <net/ieee802154_netdev.h>
30 29
31#include "mac802154.h" 30#include "mac802154.h"
32 31
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 33045a562297..3fd159db9f06 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -152,8 +152,8 @@ nf_tables_chain_type_lookup(const struct nft_af_info *afi,
152#ifdef CONFIG_MODULES 152#ifdef CONFIG_MODULES
153 if (autoload) { 153 if (autoload) {
154 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 154 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
155 request_module("nft-chain-%u-%*.s", afi->family, 155 request_module("nft-chain-%u-%.*s", afi->family,
156 nla_len(nla)-1, (const char *)nla_data(nla)); 156 nla_len(nla), (const char *)nla_data(nla));
157 nfnl_lock(NFNL_SUBSYS_NFTABLES); 157 nfnl_lock(NFNL_SUBSYS_NFTABLES);
158 type = __nf_tables_chain_type_lookup(afi->family, nla); 158 type = __nf_tables_chain_type_lookup(afi->family, nla);
159 if (type != NULL) 159 if (type != NULL)
@@ -1946,7 +1946,8 @@ static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const
1946 1946
1947static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { 1947static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
1948 [NFTA_SET_TABLE] = { .type = NLA_STRING }, 1948 [NFTA_SET_TABLE] = { .type = NLA_STRING },
1949 [NFTA_SET_NAME] = { .type = NLA_STRING }, 1949 [NFTA_SET_NAME] = { .type = NLA_STRING,
1950 .len = IFNAMSIZ - 1 },
1950 [NFTA_SET_FLAGS] = { .type = NLA_U32 }, 1951 [NFTA_SET_FLAGS] = { .type = NLA_U32 },
1951 [NFTA_SET_KEY_TYPE] = { .type = NLA_U32 }, 1952 [NFTA_SET_KEY_TYPE] = { .type = NLA_U32 },
1952 [NFTA_SET_KEY_LEN] = { .type = NLA_U32 }, 1953 [NFTA_SET_KEY_LEN] = { .type = NLA_U32 },
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
index 9a8e77e7f8d4..f4e833005320 100644
--- a/net/netfilter/xt_cgroup.c
+++ b/net/netfilter/xt_cgroup.c
@@ -54,7 +54,8 @@ static struct xt_match cgroup_mt_reg __read_mostly = {
54 .matchsize = sizeof(struct xt_cgroup_info), 54 .matchsize = sizeof(struct xt_cgroup_info),
55 .me = THIS_MODULE, 55 .me = THIS_MODULE,
56 .hooks = (1 << NF_INET_LOCAL_OUT) | 56 .hooks = (1 << NF_INET_LOCAL_OUT) |
57 (1 << NF_INET_POST_ROUTING), 57 (1 << NF_INET_POST_ROUTING) |
58 (1 << NF_INET_LOCAL_IN),
58}; 59};
59 60
60static int __init cgroup_mt_init(void) 61static int __init cgroup_mt_init(void)
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 458464e7bd7a..fbc66bb250d5 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -32,8 +32,14 @@
32#include <net/netfilter/nf_conntrack_tuple.h> 32#include <net/netfilter/nf_conntrack_tuple.h>
33#include <net/netfilter/nf_conntrack_zones.h> 33#include <net/netfilter/nf_conntrack_zones.h>
34 34
35#define CONNLIMIT_SLOTS 32 35#define CONNLIMIT_SLOTS 256U
36#define CONNLIMIT_LOCK_SLOTS 32 36
37#ifdef CONFIG_LOCKDEP
38#define CONNLIMIT_LOCK_SLOTS 8U
39#else
40#define CONNLIMIT_LOCK_SLOTS 256U
41#endif
42
37#define CONNLIMIT_GC_MAX_NODES 8 43#define CONNLIMIT_GC_MAX_NODES 8
38 44
39/* we will save the tuples of all connections we care about */ 45/* we will save the tuples of all connections we care about */
@@ -49,10 +55,11 @@ struct xt_connlimit_rb {
49 union nf_inet_addr addr; /* search key */ 55 union nf_inet_addr addr; /* search key */
50}; 56};
51 57
58static spinlock_t xt_connlimit_locks[CONNLIMIT_LOCK_SLOTS] __cacheline_aligned_in_smp;
59
52struct xt_connlimit_data { 60struct xt_connlimit_data {
53 struct rb_root climit_root4[CONNLIMIT_SLOTS]; 61 struct rb_root climit_root4[CONNLIMIT_SLOTS];
54 struct rb_root climit_root6[CONNLIMIT_SLOTS]; 62 struct rb_root climit_root6[CONNLIMIT_SLOTS];
55 spinlock_t locks[CONNLIMIT_LOCK_SLOTS];
56}; 63};
57 64
58static u_int32_t connlimit_rnd __read_mostly; 65static u_int32_t connlimit_rnd __read_mostly;
@@ -297,11 +304,11 @@ static int count_them(struct net *net,
297 root = &data->climit_root4[hash]; 304 root = &data->climit_root4[hash];
298 } 305 }
299 306
300 spin_lock_bh(&data->locks[hash % CONNLIMIT_LOCK_SLOTS]); 307 spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
301 308
302 count = count_tree(net, root, tuple, addr, mask, family); 309 count = count_tree(net, root, tuple, addr, mask, family);
303 310
304 spin_unlock_bh(&data->locks[hash % CONNLIMIT_LOCK_SLOTS]); 311 spin_unlock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
305 312
306 return count; 313 return count;
307} 314}
@@ -377,9 +384,6 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
377 return -ENOMEM; 384 return -ENOMEM;
378 } 385 }
379 386
380 for (i = 0; i < ARRAY_SIZE(info->data->locks); ++i)
381 spin_lock_init(&info->data->locks[i]);
382
383 for (i = 0; i < ARRAY_SIZE(info->data->climit_root4); ++i) 387 for (i = 0; i < ARRAY_SIZE(info->data->climit_root4); ++i)
384 info->data->climit_root4[i] = RB_ROOT; 388 info->data->climit_root4[i] = RB_ROOT;
385 for (i = 0; i < ARRAY_SIZE(info->data->climit_root6); ++i) 389 for (i = 0; i < ARRAY_SIZE(info->data->climit_root6); ++i)
@@ -435,11 +439,14 @@ static struct xt_match connlimit_mt_reg __read_mostly = {
435 439
436static int __init connlimit_mt_init(void) 440static int __init connlimit_mt_init(void)
437{ 441{
438 int ret; 442 int ret, i;
439 443
440 BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS > CONNLIMIT_SLOTS); 444 BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS > CONNLIMIT_SLOTS);
441 BUILD_BUG_ON((CONNLIMIT_SLOTS % CONNLIMIT_LOCK_SLOTS) != 0); 445 BUILD_BUG_ON((CONNLIMIT_SLOTS % CONNLIMIT_LOCK_SLOTS) != 0);
442 446
447 for (i = 0; i < CONNLIMIT_LOCK_SLOTS; ++i)
448 spin_lock_init(&xt_connlimit_locks[i]);
449
443 connlimit_conn_cachep = kmem_cache_create("xt_connlimit_conn", 450 connlimit_conn_cachep = kmem_cache_create("xt_connlimit_conn",
444 sizeof(struct xt_connlimit_conn), 451 sizeof(struct xt_connlimit_conn),
445 0, 0, NULL); 452 0, 0, NULL);
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 7174611bd672..c529161cdbf8 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -422,4 +422,6 @@ module_exit(xt_osf_fini);
422MODULE_LICENSE("GPL"); 422MODULE_LICENSE("GPL");
423MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); 423MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
424MODULE_DESCRIPTION("Passive OS fingerprint matching."); 424MODULE_DESCRIPTION("Passive OS fingerprint matching.");
425MODULE_ALIAS("ipt_osf");
426MODULE_ALIAS("ip6t_osf");
425MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF); 427MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 01039d2b1695..72e0c71fb01d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -261,7 +261,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
261 local_bh_disable(); 261 local_bh_disable();
262 262
263 HARD_TX_LOCK(dev, txq, smp_processor_id()); 263 HARD_TX_LOCK(dev, txq, smp_processor_id());
264 if (!netif_xmit_frozen_or_stopped(txq)) { 264 if (!netif_xmit_frozen_or_drv_stopped(txq)) {
265 ret = ops->ndo_start_xmit(skb, dev); 265 ret = ops->ndo_start_xmit(skb, dev);
266 if (ret == NETDEV_TX_OK) 266 if (ret == NETDEV_TX_OK)
267 txq_trans_update(txq); 267 txq_trans_update(txq);
@@ -275,6 +275,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
275 275
276 return ret; 276 return ret;
277drop: 277drop:
278 atomic_long_inc(&dev->tx_dropped);
278 kfree_skb(skb); 279 kfree_skb(skb);
279 return NET_XMIT_DROP; 280 return NET_XMIT_DROP;
280} 281}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 981aaf8b6ace..5f83a6a2fa67 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6593,6 +6593,40 @@ static void __sctp_write_space(struct sctp_association *asoc)
6593 } 6593 }
6594} 6594}
6595 6595
6596static void sctp_wake_up_waiters(struct sock *sk,
6597 struct sctp_association *asoc)
6598{
6599 struct sctp_association *tmp = asoc;
6600
6601 /* We do accounting for the sndbuf space per association,
6602 * so we only need to wake our own association.
6603 */
6604 if (asoc->ep->sndbuf_policy)
6605 return __sctp_write_space(asoc);
6606
6607 /* Accounting for the sndbuf space is per socket, so we
6608 * need to wake up others, try to be fair and in case of
6609 * other associations, let them have a go first instead
6610 * of just doing a sctp_write_space() call.
6611 *
6612 * Note that we reach sctp_wake_up_waiters() only when
6613 * associations free up queued chunks, thus we are under
6614 * lock and the list of associations on a socket is
6615 * guaranteed not to change.
6616 */
6617 for (tmp = list_next_entry(tmp, asocs); 1;
6618 tmp = list_next_entry(tmp, asocs)) {
6619 /* Manually skip the head element. */
6620 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
6621 continue;
6622 /* Wake up association. */
6623 __sctp_write_space(tmp);
6624 /* We've reached the end. */
6625 if (tmp == asoc)
6626 break;
6627 }
6628}
6629
6596/* Do accounting for the sndbuf space. 6630/* Do accounting for the sndbuf space.
6597 * Decrement the used sndbuf space of the corresponding association by the 6631 * Decrement the used sndbuf space of the corresponding association by the
6598 * data size which was just transmitted(freed). 6632 * data size which was just transmitted(freed).
@@ -6620,7 +6654,7 @@ static void sctp_wfree(struct sk_buff *skb)
6620 sk_mem_uncharge(sk, skb->truesize); 6654 sk_mem_uncharge(sk, skb->truesize);
6621 6655
6622 sock_wfree(skb); 6656 sock_wfree(skb);
6623 __sctp_write_space(asoc); 6657 sctp_wake_up_waiters(sk, asoc);
6624 6658
6625 sctp_association_put(asoc); 6659 sctp_association_put(asoc);
6626} 6660}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 0374a817631e..4c564eb69e1a 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -182,6 +182,8 @@ void tipc_net_start(u32 addr)
182 tipc_bclink_init(); 182 tipc_bclink_init();
183 write_unlock_bh(&tipc_net_lock); 183 write_unlock_bh(&tipc_net_lock);
184 184
185 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
186 TIPC_ZONE_SCOPE, 0, tipc_own_addr);
185 pr_info("Started in network mode\n"); 187 pr_info("Started in network mode\n");
186 pr_info("Own node address %s, network identity %u\n", 188 pr_info("Own node address %s, network identity %u\n",
187 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 189 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
@@ -192,6 +194,7 @@ void tipc_net_stop(void)
192 if (!tipc_own_addr) 194 if (!tipc_own_addr)
193 return; 195 return;
194 196
197 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
195 write_lock_bh(&tipc_net_lock); 198 write_lock_bh(&tipc_net_lock);
196 tipc_bearer_stop(); 199 tipc_bearer_stop();
197 tipc_bclink_stop(); 200 tipc_bclink_stop();
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 29b7f26a12cf..adc12e227303 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -301,7 +301,6 @@ static int tipc_release(struct socket *sock)
301 struct tipc_sock *tsk; 301 struct tipc_sock *tsk;
302 struct tipc_port *port; 302 struct tipc_port *port;
303 struct sk_buff *buf; 303 struct sk_buff *buf;
304 int res;
305 304
306 /* 305 /*
307 * Exit if socket isn't fully initialized (occurs when a failed accept() 306 * Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -349,7 +348,7 @@ static int tipc_release(struct socket *sock)
349 sock_put(sk); 348 sock_put(sk);
350 sock->sk = NULL; 349 sock->sk = NULL;
351 350
352 return res; 351 return 0;
353} 352}
354 353
355/** 354/**