diff options
Diffstat (limited to 'net/rxrpc/local_object.c')
-rw-r--r-- | net/rxrpc/local_object.c | 104 |
1 files changed, 66 insertions, 38 deletions
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index b1c71bad510b..36587260cabd 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c | |||
@@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, | |||
79 | local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); | 79 | local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); |
80 | if (local) { | 80 | if (local) { |
81 | atomic_set(&local->usage, 1); | 81 | atomic_set(&local->usage, 1); |
82 | atomic_set(&local->active_users, 1); | ||
82 | local->rxnet = rxnet; | 83 | local->rxnet = rxnet; |
83 | INIT_LIST_HEAD(&local->link); | 84 | INIT_LIST_HEAD(&local->link); |
84 | INIT_WORK(&local->processor, rxrpc_local_processor); | 85 | INIT_WORK(&local->processor, rxrpc_local_processor); |
@@ -92,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, | |||
92 | local->debug_id = atomic_inc_return(&rxrpc_debug_id); | 93 | local->debug_id = atomic_inc_return(&rxrpc_debug_id); |
93 | memcpy(&local->srx, srx, sizeof(*srx)); | 94 | memcpy(&local->srx, srx, sizeof(*srx)); |
94 | local->srx.srx_service = 0; | 95 | local->srx.srx_service = 0; |
95 | trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); | 96 | trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL); |
96 | } | 97 | } |
97 | 98 | ||
98 | _leave(" = %p", local); | 99 | _leave(" = %p", local); |
@@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, | |||
266 | * bind the transport socket may still fail if we're attempting | 267 | * bind the transport socket may still fail if we're attempting |
267 | * to use a local address that the dying object is still using. | 268 | * to use a local address that the dying object is still using. |
268 | */ | 269 | */ |
269 | if (!rxrpc_get_local_maybe(local)) { | 270 | if (!rxrpc_use_local(local)) |
270 | cursor = cursor->next; | ||
271 | list_del_init(&local->link); | ||
272 | break; | 271 | break; |
273 | } | ||
274 | 272 | ||
275 | age = "old"; | 273 | age = "old"; |
276 | goto found; | 274 | goto found; |
@@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, | |||
284 | if (ret < 0) | 282 | if (ret < 0) |
285 | goto sock_error; | 283 | goto sock_error; |
286 | 284 | ||
287 | list_add_tail(&local->link, cursor); | 285 | if (cursor != &rxnet->local_endpoints) |
286 | list_replace_init(cursor, &local->link); | ||
287 | else | ||
288 | list_add_tail(&local->link, cursor); | ||
288 | age = "new"; | 289 | age = "new"; |
289 | 290 | ||
290 | found: | 291 | found: |
@@ -320,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) | |||
320 | int n; | 321 | int n; |
321 | 322 | ||
322 | n = atomic_inc_return(&local->usage); | 323 | n = atomic_inc_return(&local->usage); |
323 | trace_rxrpc_local(local, rxrpc_local_got, n, here); | 324 | trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here); |
324 | return local; | 325 | return local; |
325 | } | 326 | } |
326 | 327 | ||
@@ -334,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) | |||
334 | if (local) { | 335 | if (local) { |
335 | int n = atomic_fetch_add_unless(&local->usage, 1, 0); | 336 | int n = atomic_fetch_add_unless(&local->usage, 1, 0); |
336 | if (n > 0) | 337 | if (n > 0) |
337 | trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); | 338 | trace_rxrpc_local(local->debug_id, rxrpc_local_got, |
339 | n + 1, here); | ||
338 | else | 340 | else |
339 | local = NULL; | 341 | local = NULL; |
340 | } | 342 | } |
@@ -342,24 +344,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) | |||
342 | } | 344 | } |
343 | 345 | ||
344 | /* | 346 | /* |
345 | * Queue a local endpoint. | 347 | * Queue a local endpoint and pass the caller's reference to the work item. |
346 | */ | 348 | */ |
347 | void rxrpc_queue_local(struct rxrpc_local *local) | 349 | void rxrpc_queue_local(struct rxrpc_local *local) |
348 | { | 350 | { |
349 | const void *here = __builtin_return_address(0); | 351 | const void *here = __builtin_return_address(0); |
352 | unsigned int debug_id = local->debug_id; | ||
353 | int n = atomic_read(&local->usage); | ||
350 | 354 | ||
351 | if (rxrpc_queue_work(&local->processor)) | 355 | if (rxrpc_queue_work(&local->processor)) |
352 | trace_rxrpc_local(local, rxrpc_local_queued, | 356 | trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here); |
353 | atomic_read(&local->usage), here); | 357 | else |
354 | } | 358 | rxrpc_put_local(local); |
355 | |||
356 | /* | ||
357 | * A local endpoint reached its end of life. | ||
358 | */ | ||
359 | static void __rxrpc_put_local(struct rxrpc_local *local) | ||
360 | { | ||
361 | _enter("%d", local->debug_id); | ||
362 | rxrpc_queue_work(&local->processor); | ||
363 | } | 359 | } |
364 | 360 | ||
365 | /* | 361 | /* |
@@ -372,10 +368,47 @@ void rxrpc_put_local(struct rxrpc_local *local) | |||
372 | 368 | ||
373 | if (local) { | 369 | if (local) { |
374 | n = atomic_dec_return(&local->usage); | 370 | n = atomic_dec_return(&local->usage); |
375 | trace_rxrpc_local(local, rxrpc_local_put, n, here); | 371 | trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here); |
376 | 372 | ||
377 | if (n == 0) | 373 | if (n == 0) |
378 | __rxrpc_put_local(local); | 374 | call_rcu(&local->rcu, rxrpc_local_rcu); |
375 | } | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * Start using a local endpoint. | ||
380 | */ | ||
381 | struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local) | ||
382 | { | ||
383 | unsigned int au; | ||
384 | |||
385 | local = rxrpc_get_local_maybe(local); | ||
386 | if (!local) | ||
387 | return NULL; | ||
388 | |||
389 | au = atomic_fetch_add_unless(&local->active_users, 1, 0); | ||
390 | if (au == 0) { | ||
391 | rxrpc_put_local(local); | ||
392 | return NULL; | ||
393 | } | ||
394 | |||
395 | return local; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * Cease using a local endpoint. Once the number of active users reaches 0, we | ||
400 | * start the closure of the transport in the work processor. | ||
401 | */ | ||
402 | void rxrpc_unuse_local(struct rxrpc_local *local) | ||
403 | { | ||
404 | unsigned int au; | ||
405 | |||
406 | if (local) { | ||
407 | au = atomic_dec_return(&local->active_users); | ||
408 | if (au == 0) | ||
409 | rxrpc_queue_local(local); | ||
410 | else | ||
411 | rxrpc_put_local(local); | ||
379 | } | 412 | } |
380 | } | 413 | } |
381 | 414 | ||
@@ -393,21 +426,14 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) | |||
393 | 426 | ||
394 | _enter("%d", local->debug_id); | 427 | _enter("%d", local->debug_id); |
395 | 428 | ||
396 | /* We can get a race between an incoming call packet queueing the | ||
397 | * processor again and the work processor starting the destruction | ||
398 | * process which will shut down the UDP socket. | ||
399 | */ | ||
400 | if (local->dead) { | ||
401 | _leave(" [already dead]"); | ||
402 | return; | ||
403 | } | ||
404 | local->dead = true; | 429 | local->dead = true; |
405 | 430 | ||
406 | mutex_lock(&rxnet->local_mutex); | 431 | mutex_lock(&rxnet->local_mutex); |
407 | list_del_init(&local->link); | 432 | list_del_init(&local->link); |
408 | mutex_unlock(&rxnet->local_mutex); | 433 | mutex_unlock(&rxnet->local_mutex); |
409 | 434 | ||
410 | ASSERT(RB_EMPTY_ROOT(&local->client_conns)); | 435 | rxrpc_clean_up_local_conns(local); |
436 | rxrpc_service_connection_reaper(&rxnet->service_conn_reaper); | ||
411 | ASSERT(!local->service); | 437 | ASSERT(!local->service); |
412 | 438 | ||
413 | if (socket) { | 439 | if (socket) { |
@@ -422,13 +448,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) | |||
422 | */ | 448 | */ |
423 | rxrpc_purge_queue(&local->reject_queue); | 449 | rxrpc_purge_queue(&local->reject_queue); |
424 | rxrpc_purge_queue(&local->event_queue); | 450 | rxrpc_purge_queue(&local->event_queue); |
425 | |||
426 | _debug("rcu local %d", local->debug_id); | ||
427 | call_rcu(&local->rcu, rxrpc_local_rcu); | ||
428 | } | 451 | } |
429 | 452 | ||
430 | /* | 453 | /* |
431 | * Process events on an endpoint | 454 | * Process events on an endpoint. The work item carries a ref which |
455 | * we must release. | ||
432 | */ | 456 | */ |
433 | static void rxrpc_local_processor(struct work_struct *work) | 457 | static void rxrpc_local_processor(struct work_struct *work) |
434 | { | 458 | { |
@@ -436,13 +460,15 @@ static void rxrpc_local_processor(struct work_struct *work) | |||
436 | container_of(work, struct rxrpc_local, processor); | 460 | container_of(work, struct rxrpc_local, processor); |
437 | bool again; | 461 | bool again; |
438 | 462 | ||
439 | trace_rxrpc_local(local, rxrpc_local_processing, | 463 | trace_rxrpc_local(local->debug_id, rxrpc_local_processing, |
440 | atomic_read(&local->usage), NULL); | 464 | atomic_read(&local->usage), NULL); |
441 | 465 | ||
442 | do { | 466 | do { |
443 | again = false; | 467 | again = false; |
444 | if (atomic_read(&local->usage) == 0) | 468 | if (atomic_read(&local->active_users) == 0) { |
445 | return rxrpc_local_destroyer(local); | 469 | rxrpc_local_destroyer(local); |
470 | break; | ||
471 | } | ||
446 | 472 | ||
447 | if (!skb_queue_empty(&local->reject_queue)) { | 473 | if (!skb_queue_empty(&local->reject_queue)) { |
448 | rxrpc_reject_packets(local); | 474 | rxrpc_reject_packets(local); |
@@ -454,6 +480,8 @@ static void rxrpc_local_processor(struct work_struct *work) | |||
454 | again = true; | 480 | again = true; |
455 | } | 481 | } |
456 | } while (again); | 482 | } while (again); |
483 | |||
484 | rxrpc_put_local(local); | ||
457 | } | 485 | } |
458 | 486 | ||
459 | /* | 487 | /* |