diff options
Diffstat (limited to 'net/rxrpc/peer_object.c')
-rw-r--r-- | net/rxrpc/peer_object.c | 52 |
1 files changed, 11 insertions, 41 deletions
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 1dc7648e3eff..01a9febfa367 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c | |||
@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu( | |||
124 | struct rxrpc_net *rxnet = local->rxnet; | 124 | struct rxrpc_net *rxnet = local->rxnet; |
125 | 125 | ||
126 | hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { | 126 | hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { |
127 | if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) { | 127 | if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 && |
128 | if (atomic_read(&peer->usage) == 0) | 128 | atomic_read(&peer->usage) > 0) |
129 | return NULL; | ||
130 | return peer; | 129 | return peer; |
131 | } | ||
132 | } | 130 | } |
133 | 131 | ||
134 | return NULL; | 132 | return NULL; |
@@ -222,8 +220,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) | |||
222 | atomic_set(&peer->usage, 1); | 220 | atomic_set(&peer->usage, 1); |
223 | peer->local = local; | 221 | peer->local = local; |
224 | INIT_HLIST_HEAD(&peer->error_targets); | 222 | INIT_HLIST_HEAD(&peer->error_targets); |
225 | INIT_WORK(&peer->error_distributor, | ||
226 | &rxrpc_peer_error_distributor); | ||
227 | peer->service_conns = RB_ROOT; | 223 | peer->service_conns = RB_ROOT; |
228 | seqlock_init(&peer->service_conn_lock); | 224 | seqlock_init(&peer->service_conn_lock); |
229 | spin_lock_init(&peer->lock); | 225 | spin_lock_init(&peer->lock); |
@@ -299,34 +295,23 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, | |||
299 | } | 295 | } |
300 | 296 | ||
301 | /* | 297 | /* |
302 | * Set up a new incoming peer. The address is prestored in the preallocated | 298 | * Set up a new incoming peer. There shouldn't be any other matching peers |
303 | * peer. | 299 | * since we've already done a search in the list from the non-reentrant context |
300 | * (the data_ready handler) that is the only place we can add new peers. | ||
304 | */ | 301 | */ |
305 | struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local, | 302 | void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) |
306 | struct rxrpc_peer *prealloc) | ||
307 | { | 303 | { |
308 | struct rxrpc_peer *peer; | ||
309 | struct rxrpc_net *rxnet = local->rxnet; | 304 | struct rxrpc_net *rxnet = local->rxnet; |
310 | unsigned long hash_key; | 305 | unsigned long hash_key; |
311 | 306 | ||
312 | hash_key = rxrpc_peer_hash_key(local, &prealloc->srx); | 307 | hash_key = rxrpc_peer_hash_key(local, &peer->srx); |
313 | prealloc->local = local; | 308 | peer->local = local; |
314 | rxrpc_init_peer(prealloc, hash_key); | 309 | rxrpc_init_peer(peer, hash_key); |
315 | 310 | ||
316 | spin_lock(&rxnet->peer_hash_lock); | 311 | spin_lock(&rxnet->peer_hash_lock); |
317 | 312 | hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); | |
318 | /* Need to check that we aren't racing with someone else */ | 313 | list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new); |
319 | peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key); | ||
320 | if (peer && !rxrpc_get_peer_maybe(peer)) | ||
321 | peer = NULL; | ||
322 | if (!peer) { | ||
323 | peer = prealloc; | ||
324 | hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); | ||
325 | list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new); | ||
326 | } | ||
327 | |||
328 | spin_unlock(&rxnet->peer_hash_lock); | 314 | spin_unlock(&rxnet->peer_hash_lock); |
329 | return peer; | ||
330 | } | 315 | } |
331 | 316 | ||
332 | /* | 317 | /* |
@@ -416,21 +401,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) | |||
416 | } | 401 | } |
417 | 402 | ||
418 | /* | 403 | /* |
419 | * Queue a peer record. This passes the caller's ref to the workqueue. | ||
420 | */ | ||
421 | void __rxrpc_queue_peer_error(struct rxrpc_peer *peer) | ||
422 | { | ||
423 | const void *here = __builtin_return_address(0); | ||
424 | int n; | ||
425 | |||
426 | n = atomic_read(&peer->usage); | ||
427 | if (rxrpc_queue_work(&peer->error_distributor)) | ||
428 | trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here); | ||
429 | else | ||
430 | rxrpc_put_peer(peer); | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Discard a peer record. | 404 | * Discard a peer record. |
435 | */ | 405 | */ |
436 | static void __rxrpc_put_peer(struct rxrpc_peer *peer) | 406 | static void __rxrpc_put_peer(struct rxrpc_peer *peer) |