diff options
author | Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com> | 2017-01-24 07:00:45 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-01-24 16:14:57 -0500 |
commit | fc0adfc8fd18b61b6f7a3f28b429e134d6f3a008 (patch) | |
tree | 9080d3fca5d3fd7f654b16e290466c1ceb9b8a3f /net/tipc | |
parent | d094c4d5f5c7e1b225e94227ca3f007be3adc4e8 (diff) |
tipc: fix connection refcount error
Until now, the generic server framework maintains the connection
id's per subscriber in server's conn_idr. At tipc_close_conn, we
remove the connection id from the server list, but the connection is
valid until we call the refcount cleanup. Hence we have a window
where the server allocates the same connection to an new subscriber
leading to inconsistent reference count. We have another refcount
warning we grab the refcount in tipc_conn_lookup() for connections
with flag with CF_CONNECTED not set. This usually occurs at shutdown
when the we stop the topology server and withdraw TIPC_CFG_SRV
publication thereby triggering a withdraw message to subscribers.
In this commit, we:
1. remove the connection from the server list at recount cleanup.
2. grab the refcount for a connection only if CF_CONNECTED is set.
Tested-by: John Thompson <thompa.atl@gmail.com>
Acked-by: Ying Xue <ying.xue@windriver.com>
Acked-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: Parthasarathy Bhuvaragan <parthasarathy.bhuvaragan@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r-- | net/tipc/server.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/net/tipc/server.c b/net/tipc/server.c index 215849ce453d..2e803601aa99 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c | |||
@@ -91,7 +91,8 @@ static void tipc_sock_release(struct tipc_conn *con); | |||
91 | static void tipc_conn_kref_release(struct kref *kref) | 91 | static void tipc_conn_kref_release(struct kref *kref) |
92 | { | 92 | { |
93 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); | 93 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); |
94 | struct sockaddr_tipc *saddr = con->server->saddr; | 94 | struct tipc_server *s = con->server; |
95 | struct sockaddr_tipc *saddr = s->saddr; | ||
95 | struct socket *sock = con->sock; | 96 | struct socket *sock = con->sock; |
96 | struct sock *sk; | 97 | struct sock *sk; |
97 | 98 | ||
@@ -106,6 +107,11 @@ static void tipc_conn_kref_release(struct kref *kref) | |||
106 | tipc_sock_release(con); | 107 | tipc_sock_release(con); |
107 | sock_release(sock); | 108 | sock_release(sock); |
108 | con->sock = NULL; | 109 | con->sock = NULL; |
110 | |||
111 | spin_lock_bh(&s->idr_lock); | ||
112 | idr_remove(&s->conn_idr, con->conid); | ||
113 | s->idr_in_use--; | ||
114 | spin_unlock_bh(&s->idr_lock); | ||
109 | } | 115 | } |
110 | 116 | ||
111 | tipc_clean_outqueues(con); | 117 | tipc_clean_outqueues(con); |
@@ -128,8 +134,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid) | |||
128 | 134 | ||
129 | spin_lock_bh(&s->idr_lock); | 135 | spin_lock_bh(&s->idr_lock); |
130 | con = idr_find(&s->conn_idr, conid); | 136 | con = idr_find(&s->conn_idr, conid); |
131 | if (con) | 137 | if (con && test_bit(CF_CONNECTED, &con->flags)) |
132 | conn_get(con); | 138 | conn_get(con); |
139 | else | ||
140 | con = NULL; | ||
133 | spin_unlock_bh(&s->idr_lock); | 141 | spin_unlock_bh(&s->idr_lock); |
134 | return con; | 142 | return con; |
135 | } | 143 | } |
@@ -198,15 +206,8 @@ static void tipc_sock_release(struct tipc_conn *con) | |||
198 | 206 | ||
199 | static void tipc_close_conn(struct tipc_conn *con) | 207 | static void tipc_close_conn(struct tipc_conn *con) |
200 | { | 208 | { |
201 | struct tipc_server *s = con->server; | ||
202 | |||
203 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { | 209 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { |
204 | 210 | ||
205 | spin_lock_bh(&s->idr_lock); | ||
206 | idr_remove(&s->conn_idr, con->conid); | ||
207 | s->idr_in_use--; | ||
208 | spin_unlock_bh(&s->idr_lock); | ||
209 | |||
210 | /* We shouldn't flush pending works as we may be in the | 211 | /* We shouldn't flush pending works as we may be in the |
211 | * thread. In fact the races with pending rx/tx work structs | 212 | * thread. In fact the races with pending rx/tx work structs |
212 | * are harmless for us here as we have already deleted this | 213 | * are harmless for us here as we have already deleted this |