aboutsummaryrefslogtreecommitdiffstats
path: root/net/netlink/af_netlink.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/netlink/af_netlink.c')
-rw-r--r--net/netlink/af_netlink.c37
1 files changed, 25 insertions, 12 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7a186e74b1b3..f1de72de273e 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -96,6 +96,14 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
96static int netlink_dump(struct sock *sk); 96static int netlink_dump(struct sock *sk);
97static void netlink_skb_destructor(struct sk_buff *skb); 97static void netlink_skb_destructor(struct sk_buff *skb);
98 98
99/* nl_table locking explained:
100 * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
101 * combined with an RCU read-side lock. Insertion and removal are protected
102 * with nl_sk_hash_lock while using RCU list modification primitives and may
103 * run in parallel to nl_table_lock protected lookups. Destruction of the
104 * Netlink socket may only occur *after* nl_table_lock has been acquired
105 * either during or after the socket has been removed from the list.
106 */
99DEFINE_RWLOCK(nl_table_lock); 107DEFINE_RWLOCK(nl_table_lock);
100EXPORT_SYMBOL_GPL(nl_table_lock); 108EXPORT_SYMBOL_GPL(nl_table_lock);
101static atomic_t nl_table_users = ATOMIC_INIT(0); 109static atomic_t nl_table_users = ATOMIC_INIT(0);
@@ -109,10 +117,10 @@ EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
109static int lockdep_nl_sk_hash_is_held(void) 117static int lockdep_nl_sk_hash_is_held(void)
110{ 118{
111#ifdef CONFIG_LOCKDEP 119#ifdef CONFIG_LOCKDEP
112 return (debug_locks) ? lockdep_is_held(&nl_sk_hash_lock) : 1; 120 if (debug_locks)
113#else 121 return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
114 return 1;
115#endif 122#endif
123 return 1;
116} 124}
117 125
118static ATOMIC_NOTIFIER_HEAD(netlink_chain); 126static ATOMIC_NOTIFIER_HEAD(netlink_chain);
@@ -1028,11 +1036,13 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1028 struct netlink_table *table = &nl_table[protocol]; 1036 struct netlink_table *table = &nl_table[protocol];
1029 struct sock *sk; 1037 struct sock *sk;
1030 1038
1039 read_lock(&nl_table_lock);
1031 rcu_read_lock(); 1040 rcu_read_lock();
1032 sk = __netlink_lookup(table, portid, net); 1041 sk = __netlink_lookup(table, portid, net);
1033 if (sk) 1042 if (sk)
1034 sock_hold(sk); 1043 sock_hold(sk);
1035 rcu_read_unlock(); 1044 rcu_read_unlock();
1045 read_unlock(&nl_table_lock);
1036 1046
1037 return sk; 1047 return sk;
1038} 1048}
@@ -1257,9 +1267,6 @@ static int netlink_release(struct socket *sock)
1257 } 1267 }
1258 netlink_table_ungrab(); 1268 netlink_table_ungrab();
1259 1269
1260 /* Wait for readers to complete */
1261 synchronize_net();
1262
1263 kfree(nlk->groups); 1270 kfree(nlk->groups);
1264 nlk->groups = NULL; 1271 nlk->groups = NULL;
1265 1272
@@ -1281,6 +1288,7 @@ static int netlink_autobind(struct socket *sock)
1281 1288
1282retry: 1289retry:
1283 cond_resched(); 1290 cond_resched();
1291 netlink_table_grab();
1284 rcu_read_lock(); 1292 rcu_read_lock();
1285 if (__netlink_lookup(table, portid, net)) { 1293 if (__netlink_lookup(table, portid, net)) {
1286 /* Bind collision, search negative portid values. */ 1294 /* Bind collision, search negative portid values. */
@@ -1288,9 +1296,11 @@ retry:
1288 if (rover > -4097) 1296 if (rover > -4097)
1289 rover = -4097; 1297 rover = -4097;
1290 rcu_read_unlock(); 1298 rcu_read_unlock();
1299 netlink_table_ungrab();
1291 goto retry; 1300 goto retry;
1292 } 1301 }
1293 rcu_read_unlock(); 1302 rcu_read_unlock();
1303 netlink_table_ungrab();
1294 1304
1295 err = netlink_insert(sk, net, portid); 1305 err = netlink_insert(sk, net, portid);
1296 if (err == -EADDRINUSE) 1306 if (err == -EADDRINUSE)
@@ -2921,14 +2931,16 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2921} 2931}
2922 2932
2923static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) 2933static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2924 __acquires(RCU) 2934 __acquires(nl_table_lock) __acquires(RCU)
2925{ 2935{
2936 read_lock(&nl_table_lock);
2926 rcu_read_lock(); 2937 rcu_read_lock();
2927 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2938 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2928} 2939}
2929 2940
2930static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2941static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2931{ 2942{
2943 struct rhashtable *ht;
2932 struct netlink_sock *nlk; 2944 struct netlink_sock *nlk;
2933 struct nl_seq_iter *iter; 2945 struct nl_seq_iter *iter;
2934 struct net *net; 2946 struct net *net;
@@ -2943,19 +2955,19 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2943 iter = seq->private; 2955 iter = seq->private;
2944 nlk = v; 2956 nlk = v;
2945 2957
2946 rht_for_each_entry_rcu(nlk, nlk->node.next, node) 2958 i = iter->link;
2959 ht = &nl_table[i].hash;
2960 rht_for_each_entry(nlk, nlk->node.next, ht, node)
2947 if (net_eq(sock_net((struct sock *)nlk), net)) 2961 if (net_eq(sock_net((struct sock *)nlk), net))
2948 return nlk; 2962 return nlk;
2949 2963
2950 i = iter->link;
2951 j = iter->hash_idx + 1; 2964 j = iter->hash_idx + 1;
2952 2965
2953 do { 2966 do {
2954 struct rhashtable *ht = &nl_table[i].hash;
2955 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); 2967 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
2956 2968
2957 for (; j < tbl->size; j++) { 2969 for (; j < tbl->size; j++) {
2958 rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) { 2970 rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
2959 if (net_eq(sock_net((struct sock *)nlk), net)) { 2971 if (net_eq(sock_net((struct sock *)nlk), net)) {
2960 iter->link = i; 2972 iter->link = i;
2961 iter->hash_idx = j; 2973 iter->hash_idx = j;
@@ -2971,9 +2983,10 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2971} 2983}
2972 2984
2973static void netlink_seq_stop(struct seq_file *seq, void *v) 2985static void netlink_seq_stop(struct seq_file *seq, void *v)
2974 __releases(RCU) 2986 __releases(RCU) __releases(nl_table_lock)
2975{ 2987{
2976 rcu_read_unlock(); 2988 rcu_read_unlock();
2989 read_unlock(&nl_table_lock);
2977} 2990}
2978 2991
2979 2992