summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/trace/events/rxrpc.h6
-rw-r--r--net/rxrpc/call_accept.c2
-rw-r--r--net/rxrpc/call_object.c28
3 files changed, 21 insertions, 15 deletions
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 38a97e890cb6..191fe447f990 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -606,10 +606,10 @@ TRACE_EVENT(rxrpc_client,
606 ); 606 );
607 607
608TRACE_EVENT(rxrpc_call, 608TRACE_EVENT(rxrpc_call,
609 TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op, 609 TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op,
610 int usage, const void *where, const void *aux), 610 int usage, const void *where, const void *aux),
611 611
612 TP_ARGS(call, op, usage, where, aux), 612 TP_ARGS(call_debug_id, op, usage, where, aux),
613 613
614 TP_STRUCT__entry( 614 TP_STRUCT__entry(
615 __field(unsigned int, call ) 615 __field(unsigned int, call )
@@ -620,7 +620,7 @@ TRACE_EVENT(rxrpc_call,
620 ), 620 ),
621 621
622 TP_fast_assign( 622 TP_fast_assign(
623 __entry->call = call->debug_id; 623 __entry->call = call_debug_id;
624 __entry->op = op; 624 __entry->op = op;
625 __entry->usage = usage; 625 __entry->usage = usage;
626 __entry->where = where; 626 __entry->where = where;
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index c1b1b7dd2924..1f778102ed8d 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -97,7 +97,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
97 call->flags |= (1 << RXRPC_CALL_IS_SERVICE); 97 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
98 call->state = RXRPC_CALL_SERVER_PREALLOC; 98 call->state = RXRPC_CALL_SERVER_PREALLOC;
99 99
100 trace_rxrpc_call(call, rxrpc_call_new_service, 100 trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
101 atomic_read(&call->usage), 101 atomic_read(&call->usage),
102 here, (const void *)user_call_ID); 102 here, (const void *)user_call_ID);
103 103
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 32d8dc677142..6dace078971a 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -240,7 +240,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
240 if (p->intr) 240 if (p->intr)
241 __set_bit(RXRPC_CALL_IS_INTR, &call->flags); 241 __set_bit(RXRPC_CALL_IS_INTR, &call->flags);
242 call->tx_total_len = p->tx_total_len; 242 call->tx_total_len = p->tx_total_len;
243 trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), 243 trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
244 atomic_read(&call->usage),
244 here, (const void *)p->user_call_ID); 245 here, (const void *)p->user_call_ID);
245 246
246 /* We need to protect a partially set up call against the user as we 247 /* We need to protect a partially set up call against the user as we
@@ -290,8 +291,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
290 if (ret < 0) 291 if (ret < 0)
291 goto error; 292 goto error;
292 293
293 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), 294 trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
294 here, NULL); 295 atomic_read(&call->usage), here, NULL);
295 296
296 rxrpc_start_call_timer(call); 297 rxrpc_start_call_timer(call);
297 298
@@ -313,8 +314,8 @@ error_dup_user_ID:
313error: 314error:
314 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 315 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
315 RX_CALL_DEAD, ret); 316 RX_CALL_DEAD, ret);
316 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), 317 trace_rxrpc_call(call->debug_id, rxrpc_call_error,
317 here, ERR_PTR(ret)); 318 atomic_read(&call->usage), here, ERR_PTR(ret));
318 rxrpc_release_call(rx, call); 319 rxrpc_release_call(rx, call);
319 mutex_unlock(&call->user_mutex); 320 mutex_unlock(&call->user_mutex);
320 rxrpc_put_call(call, rxrpc_call_put); 321 rxrpc_put_call(call, rxrpc_call_put);
@@ -376,7 +377,8 @@ bool rxrpc_queue_call(struct rxrpc_call *call)
376 if (n == 0) 377 if (n == 0)
377 return false; 378 return false;
378 if (rxrpc_queue_work(&call->processor)) 379 if (rxrpc_queue_work(&call->processor))
379 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL); 380 trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
381 here, NULL);
380 else 382 else
381 rxrpc_put_call(call, rxrpc_call_put_noqueue); 383 rxrpc_put_call(call, rxrpc_call_put_noqueue);
382 return true; 384 return true;
@@ -391,7 +393,8 @@ bool __rxrpc_queue_call(struct rxrpc_call *call)
391 int n = atomic_read(&call->usage); 393 int n = atomic_read(&call->usage);
392 ASSERTCMP(n, >=, 1); 394 ASSERTCMP(n, >=, 1);
393 if (rxrpc_queue_work(&call->processor)) 395 if (rxrpc_queue_work(&call->processor))
394 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL); 396 trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
397 here, NULL);
395 else 398 else
396 rxrpc_put_call(call, rxrpc_call_put_noqueue); 399 rxrpc_put_call(call, rxrpc_call_put_noqueue);
397 return true; 400 return true;
@@ -406,7 +409,8 @@ void rxrpc_see_call(struct rxrpc_call *call)
406 if (call) { 409 if (call) {
407 int n = atomic_read(&call->usage); 410 int n = atomic_read(&call->usage);
408 411
409 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL); 412 trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
413 here, NULL);
410 } 414 }
411} 415}
412 416
@@ -418,7 +422,7 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
418 const void *here = __builtin_return_address(0); 422 const void *here = __builtin_return_address(0);
419 int n = atomic_inc_return(&call->usage); 423 int n = atomic_inc_return(&call->usage);
420 424
421 trace_rxrpc_call(call, op, n, here, NULL); 425 trace_rxrpc_call(call->debug_id, op, n, here, NULL);
422} 426}
423 427
424/* 428/*
@@ -445,7 +449,8 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
445 449
446 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); 450 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
447 451
448 trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage), 452 trace_rxrpc_call(call->debug_id, rxrpc_call_release,
453 atomic_read(&call->usage),
449 here, (const void *)call->flags); 454 here, (const void *)call->flags);
450 455
451 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 456 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
@@ -534,12 +539,13 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
534{ 539{
535 struct rxrpc_net *rxnet = call->rxnet; 540 struct rxrpc_net *rxnet = call->rxnet;
536 const void *here = __builtin_return_address(0); 541 const void *here = __builtin_return_address(0);
542 unsigned int debug_id = call->debug_id;
537 int n; 543 int n;
538 544
539 ASSERT(call != NULL); 545 ASSERT(call != NULL);
540 546
541 n = atomic_dec_return(&call->usage); 547 n = atomic_dec_return(&call->usage);
542 trace_rxrpc_call(call, op, n, here, NULL); 548 trace_rxrpc_call(debug_id, op, n, here, NULL);
543 ASSERTCMP(n, >=, 0); 549 ASSERTCMP(n, >=, 0);
544 if (n == 0) { 550 if (n == 0) {
545 _debug("call %d dead", call->debug_id); 551 _debug("call %d dead", call->debug_id);