aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-31 22:29:12 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-31 22:29:12 -0400
commite2e80c027f5adab3cc44c3d07c4484291384d278 (patch)
tree86f44a75699112a8771261b8da193a265345c262
parent3be9b5fdc6379faf6f23cd8539ef9a6235396c5f (diff)
parent17226f1240381812c3a4927dc9da2814fb71c8ac (diff)
Merge tag 'rxrpc-next-20180330' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs
David Howells says: ==================== rxrpc: Fixes and more traces Here are some patches that add some more tracepoints to AF_RXRPC and fix some issues therein: (1) Fix the use of VERSION packets to keep firewall routes open. (2) Fix the incorrect current time usage in a tracepoint. (3) Fix Tx ring annotation corruption. (4) Fix accidental conversion of call-level abort into connection-level abort. (5) Fix calculation of resend time. (6) Remove a couple of unused variables. (7) Fix a bunch of checker warnings and an error. Note that not all warnings can be quashed as checker doesn't seem to correctly handle seqlocks. (8) Fix a potential race between call destruction and socket/net destruction. (9) Add a tracepoint to track rxrpc_local refcounting. (10) Fix an apparent leak of rxrpc_local objects. (11) Add a tracepoint to track rxrpc_peer refcounting. (12) Fix a leak of rxrpc_peer objects. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/trace/events/rxrpc.h85
-rw-r--r--net/rxrpc/af_rxrpc.c6
-rw-r--r--net/rxrpc/ar-internal.h68
-rw-r--r--net/rxrpc/call_accept.c9
-rw-r--r--net/rxrpc/call_event.c4
-rw-r--r--net/rxrpc/call_object.c17
-rw-r--r--net/rxrpc/conn_client.c3
-rw-r--r--net/rxrpc/conn_event.c3
-rw-r--r--net/rxrpc/conn_object.c10
-rw-r--r--net/rxrpc/conn_service.c1
-rw-r--r--net/rxrpc/input.c17
-rw-r--r--net/rxrpc/local_object.c65
-rw-r--r--net/rxrpc/net_ns.c24
-rw-r--r--net/rxrpc/output.c59
-rw-r--r--net/rxrpc/peer_event.c98
-rw-r--r--net/rxrpc/peer_object.c93
-rw-r--r--net/rxrpc/proc.c6
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/rxrpc/security.c3
-rw-r--r--net/rxrpc/sendmsg.c7
20 files changed, 509 insertions, 71 deletions
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 2ea788f6f95d..9e96c2fe2793 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -42,6 +42,22 @@ enum rxrpc_skb_trace {
42 rxrpc_skb_tx_seen, 42 rxrpc_skb_tx_seen,
43}; 43};
44 44
45enum rxrpc_local_trace {
46 rxrpc_local_got,
47 rxrpc_local_new,
48 rxrpc_local_processing,
49 rxrpc_local_put,
50 rxrpc_local_queued,
51};
52
53enum rxrpc_peer_trace {
54 rxrpc_peer_got,
55 rxrpc_peer_new,
56 rxrpc_peer_processing,
57 rxrpc_peer_put,
58 rxrpc_peer_queued_error,
59};
60
45enum rxrpc_conn_trace { 61enum rxrpc_conn_trace {
46 rxrpc_conn_got, 62 rxrpc_conn_got,
47 rxrpc_conn_new_client, 63 rxrpc_conn_new_client,
@@ -215,6 +231,20 @@ enum rxrpc_congest_change {
215 EM(rxrpc_skb_tx_rotated, "Tx ROT") \ 231 EM(rxrpc_skb_tx_rotated, "Tx ROT") \
216 E_(rxrpc_skb_tx_seen, "Tx SEE") 232 E_(rxrpc_skb_tx_seen, "Tx SEE")
217 233
234#define rxrpc_local_traces \
235 EM(rxrpc_local_got, "GOT") \
236 EM(rxrpc_local_new, "NEW") \
237 EM(rxrpc_local_processing, "PRO") \
238 EM(rxrpc_local_put, "PUT") \
239 E_(rxrpc_local_queued, "QUE")
240
241#define rxrpc_peer_traces \
242 EM(rxrpc_peer_got, "GOT") \
243 EM(rxrpc_peer_new, "NEW") \
244 EM(rxrpc_peer_processing, "PRO") \
245 EM(rxrpc_peer_put, "PUT") \
246 E_(rxrpc_peer_queued_error, "QER")
247
218#define rxrpc_conn_traces \ 248#define rxrpc_conn_traces \
219 EM(rxrpc_conn_got, "GOT") \ 249 EM(rxrpc_conn_got, "GOT") \
220 EM(rxrpc_conn_new_client, "NWc") \ 250 EM(rxrpc_conn_new_client, "NWc") \
@@ -416,6 +446,7 @@ enum rxrpc_congest_change {
416#define E_(a, b) TRACE_DEFINE_ENUM(a); 446#define E_(a, b) TRACE_DEFINE_ENUM(a);
417 447
418rxrpc_skb_traces; 448rxrpc_skb_traces;
449rxrpc_local_traces;
419rxrpc_conn_traces; 450rxrpc_conn_traces;
420rxrpc_client_traces; 451rxrpc_client_traces;
421rxrpc_call_traces; 452rxrpc_call_traces;
@@ -439,6 +470,60 @@ rxrpc_congest_changes;
439#define EM(a, b) { a, b }, 470#define EM(a, b) { a, b },
440#define E_(a, b) { a, b } 471#define E_(a, b) { a, b }
441 472
473TRACE_EVENT(rxrpc_local,
474 TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
475 int usage, const void *where),
476
477 TP_ARGS(local, op, usage, where),
478
479 TP_STRUCT__entry(
480 __field(unsigned int, local )
481 __field(int, op )
482 __field(int, usage )
483 __field(const void *, where )
484 ),
485
486 TP_fast_assign(
487 __entry->local = local->debug_id;
488 __entry->op = op;
489 __entry->usage = usage;
490 __entry->where = where;
491 ),
492
493 TP_printk("L=%08x %s u=%d sp=%pSR",
494 __entry->local,
495 __print_symbolic(__entry->op, rxrpc_local_traces),
496 __entry->usage,
497 __entry->where)
498 );
499
500TRACE_EVENT(rxrpc_peer,
501 TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op,
502 int usage, const void *where),
503
504 TP_ARGS(peer, op, usage, where),
505
506 TP_STRUCT__entry(
507 __field(unsigned int, peer )
508 __field(int, op )
509 __field(int, usage )
510 __field(const void *, where )
511 ),
512
513 TP_fast_assign(
514 __entry->peer = peer->debug_id;
515 __entry->op = op;
516 __entry->usage = usage;
517 __entry->where = where;
518 ),
519
520 TP_printk("P=%08x %s u=%d sp=%pSR",
521 __entry->peer,
522 __print_symbolic(__entry->op, rxrpc_peer_traces),
523 __entry->usage,
524 __entry->where)
525 );
526
442TRACE_EVENT(rxrpc_conn, 527TRACE_EVENT(rxrpc_conn,
443 TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op, 528 TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
444 int usage, const void *where), 529 int usage, const void *where),
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index ec5ec68be1aa..9a2c8e7c000e 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -324,6 +324,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
324 mutex_unlock(&call->user_mutex); 324 mutex_unlock(&call->user_mutex);
325 } 325 }
326 326
327 rxrpc_put_peer(cp.peer);
327 _leave(" = %p", call); 328 _leave(" = %p", call);
328 return call; 329 return call;
329} 330}
@@ -447,6 +448,7 @@ int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call,
447 ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL); 448 ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL);
448 449
449 mutex_unlock(&call->user_mutex); 450 mutex_unlock(&call->user_mutex);
451 rxrpc_put_peer(cp.peer);
450 _leave(" = %d", ret); 452 _leave(" = %d", ret);
451 return ret; 453 return ret;
452} 454}
@@ -762,6 +764,7 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
762static int rxrpc_create(struct net *net, struct socket *sock, int protocol, 764static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
763 int kern) 765 int kern)
764{ 766{
767 struct rxrpc_net *rxnet;
765 struct rxrpc_sock *rx; 768 struct rxrpc_sock *rx;
766 struct sock *sk; 769 struct sock *sk;
767 770
@@ -801,6 +804,9 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
801 rwlock_init(&rx->call_lock); 804 rwlock_init(&rx->call_lock);
802 memset(&rx->srx, 0, sizeof(rx->srx)); 805 memset(&rx->srx, 0, sizeof(rx->srx));
803 806
807 rxnet = rxrpc_net(sock_net(&rx->sk));
808 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1);
809
804 _leave(" = 0 [%p]", rx); 810 _leave(" = 0 [%p]", rx);
805 return 0; 811 return 0;
806} 812}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 21cf164b6d85..90d7079e0aa9 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -75,7 +75,9 @@ struct rxrpc_net {
75 u32 epoch; /* Local epoch for detecting local-end reset */ 75 u32 epoch; /* Local epoch for detecting local-end reset */
76 struct list_head calls; /* List of calls active in this namespace */ 76 struct list_head calls; /* List of calls active in this namespace */
77 rwlock_t call_lock; /* Lock for ->calls */ 77 rwlock_t call_lock; /* Lock for ->calls */
78 atomic_t nr_calls; /* Count of allocated calls */
78 79
80 atomic_t nr_conns;
79 struct list_head conn_proc_list; /* List of conns in this namespace for proc */ 81 struct list_head conn_proc_list; /* List of conns in this namespace for proc */
80 struct list_head service_conns; /* Service conns in this namespace */ 82 struct list_head service_conns; /* Service conns in this namespace */
81 rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */ 83 rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
@@ -97,8 +99,16 @@ struct rxrpc_net {
97 struct list_head local_endpoints; 99 struct list_head local_endpoints;
98 struct mutex local_mutex; /* Lock for ->local_endpoints */ 100 struct mutex local_mutex; /* Lock for ->local_endpoints */
99 101
100 spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
101 DECLARE_HASHTABLE (peer_hash, 10); 102 DECLARE_HASHTABLE (peer_hash, 10);
103 spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
104
105#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
106 u8 peer_keepalive_cursor;
107 ktime_t peer_keepalive_base;
108 struct hlist_head peer_keepalive[RXRPC_KEEPALIVE_TIME + 1];
109 struct hlist_head peer_keepalive_new;
110 struct timer_list peer_keepalive_timer;
111 struct work_struct peer_keepalive_work;
102}; 112};
103 113
104/* 114/*
@@ -285,6 +295,8 @@ struct rxrpc_peer {
285 struct hlist_head error_targets; /* targets for net error distribution */ 295 struct hlist_head error_targets; /* targets for net error distribution */
286 struct work_struct error_distributor; 296 struct work_struct error_distributor;
287 struct rb_root service_conns; /* Service connections */ 297 struct rb_root service_conns; /* Service connections */
298 struct hlist_node keepalive_link; /* Link in net->peer_keepalive[] */
299 time64_t last_tx_at; /* Last time packet sent here */
288 seqlock_t service_conn_lock; 300 seqlock_t service_conn_lock;
289 spinlock_t lock; /* access lock */ 301 spinlock_t lock; /* access lock */
290 unsigned int if_mtu; /* interface MTU for this peer */ 302 unsigned int if_mtu; /* interface MTU for this peer */
@@ -518,6 +530,7 @@ struct rxrpc_call {
518 struct rxrpc_connection *conn; /* connection carrying call */ 530 struct rxrpc_connection *conn; /* connection carrying call */
519 struct rxrpc_peer *peer; /* Peer record for remote address */ 531 struct rxrpc_peer *peer; /* Peer record for remote address */
520 struct rxrpc_sock __rcu *socket; /* socket responsible */ 532 struct rxrpc_sock __rcu *socket; /* socket responsible */
533 struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
521 struct mutex user_mutex; /* User access mutex */ 534 struct mutex user_mutex; /* User access mutex */
522 unsigned long ack_at; /* When deferred ACK needs to happen */ 535 unsigned long ack_at; /* When deferred ACK needs to happen */
523 unsigned long ack_lost_at; /* When ACK is figured as lost */ 536 unsigned long ack_lost_at; /* When ACK is figured as lost */
@@ -969,31 +982,12 @@ extern void rxrpc_process_local_events(struct rxrpc_local *);
969 * local_object.c 982 * local_object.c
970 */ 983 */
971struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *); 984struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
972void __rxrpc_put_local(struct rxrpc_local *); 985struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
986struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
987void rxrpc_put_local(struct rxrpc_local *);
988void rxrpc_queue_local(struct rxrpc_local *);
973void rxrpc_destroy_all_locals(struct rxrpc_net *); 989void rxrpc_destroy_all_locals(struct rxrpc_net *);
974 990
975static inline void rxrpc_get_local(struct rxrpc_local *local)
976{
977 atomic_inc(&local->usage);
978}
979
980static inline
981struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
982{
983 return atomic_inc_not_zero(&local->usage) ? local : NULL;
984}
985
986static inline void rxrpc_put_local(struct rxrpc_local *local)
987{
988 if (local && atomic_dec_and_test(&local->usage))
989 __rxrpc_put_local(local);
990}
991
992static inline void rxrpc_queue_local(struct rxrpc_local *local)
993{
994 rxrpc_queue_work(&local->processor);
995}
996
997/* 991/*
998 * misc.c 992 * misc.c
999 */ 993 */
@@ -1026,6 +1020,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
1026int rxrpc_send_abort_packet(struct rxrpc_call *); 1020int rxrpc_send_abort_packet(struct rxrpc_call *);
1027int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool); 1021int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
1028void rxrpc_reject_packets(struct rxrpc_local *); 1022void rxrpc_reject_packets(struct rxrpc_local *);
1023void rxrpc_send_keepalive(struct rxrpc_peer *);
1029 1024
1030/* 1025/*
1031 * peer_event.c 1026 * peer_event.c
@@ -1034,6 +1029,7 @@ void rxrpc_error_report(struct sock *);
1034void rxrpc_peer_error_distributor(struct work_struct *); 1029void rxrpc_peer_error_distributor(struct work_struct *);
1035void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, 1030void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
1036 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1031 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
1032void rxrpc_peer_keepalive_worker(struct work_struct *);
1037 1033
1038/* 1034/*
1039 * peer_object.c 1035 * peer_object.c
@@ -1045,25 +1041,11 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
1045struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); 1041struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
1046struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *, 1042struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
1047 struct rxrpc_peer *); 1043 struct rxrpc_peer *);
1048 1044void rxrpc_destroy_all_peers(struct rxrpc_net *);
1049static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) 1045struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
1050{ 1046struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
1051 atomic_inc(&peer->usage); 1047void rxrpc_put_peer(struct rxrpc_peer *);
1052 return peer; 1048void __rxrpc_queue_peer_error(struct rxrpc_peer *);
1053}
1054
1055static inline
1056struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
1057{
1058 return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
1059}
1060
1061extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
1062static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
1063{
1064 if (peer && atomic_dec_and_test(&peer->usage))
1065 __rxrpc_put_peer(peer);
1066}
1067 1049
1068/* 1050/*
1069 * proc.c 1051 * proc.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 92ebd1d7e0bb..f67017dcb25e 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -138,6 +138,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
138 138
139 write_unlock(&rx->call_lock); 139 write_unlock(&rx->call_lock);
140 140
141 rxnet = call->rxnet;
141 write_lock(&rxnet->call_lock); 142 write_lock(&rxnet->call_lock);
142 list_add_tail(&call->link, &rxnet->calls); 143 list_add_tail(&call->link, &rxnet->calls);
143 write_unlock(&rxnet->call_lock); 144 write_unlock(&rxnet->call_lock);
@@ -218,6 +219,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
218 list_del(&conn->proc_link); 219 list_del(&conn->proc_link);
219 write_unlock(&rxnet->conn_lock); 220 write_unlock(&rxnet->conn_lock);
220 kfree(conn); 221 kfree(conn);
222 if (atomic_dec_and_test(&rxnet->nr_conns))
223 wake_up_atomic_t(&rxnet->nr_conns);
221 tail = (tail + 1) & (size - 1); 224 tail = (tail + 1) & (size - 1);
222 } 225 }
223 226
@@ -225,7 +228,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
225 tail = b->call_backlog_tail; 228 tail = b->call_backlog_tail;
226 while (CIRC_CNT(head, tail, size) > 0) { 229 while (CIRC_CNT(head, tail, size) > 0) {
227 struct rxrpc_call *call = b->call_backlog[tail]; 230 struct rxrpc_call *call = b->call_backlog[tail];
228 call->socket = rx; 231 rcu_assign_pointer(call->socket, rx);
229 if (rx->discard_new_call) { 232 if (rx->discard_new_call) {
230 _debug("discard %lx", call->user_call_ID); 233 _debug("discard %lx", call->user_call_ID);
231 rx->discard_new_call(call, call->user_call_ID); 234 rx->discard_new_call(call, call->user_call_ID);
@@ -295,8 +298,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
295 b->conn_backlog[conn_tail] = NULL; 298 b->conn_backlog[conn_tail] = NULL;
296 smp_store_release(&b->conn_backlog_tail, 299 smp_store_release(&b->conn_backlog_tail,
297 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); 300 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
298 rxrpc_get_local(local); 301 conn->params.local = rxrpc_get_local(local);
299 conn->params.local = local;
300 conn->params.peer = peer; 302 conn->params.peer = peer;
301 rxrpc_see_connection(conn); 303 rxrpc_see_connection(conn);
302 rxrpc_new_incoming_connection(rx, conn, skb); 304 rxrpc_new_incoming_connection(rx, conn, skb);
@@ -456,6 +458,7 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
456 unsigned long user_call_ID, 458 unsigned long user_call_ID,
457 rxrpc_notify_rx_t notify_rx) 459 rxrpc_notify_rx_t notify_rx)
458 __releases(&rx->sk.sk_lock.slock) 460 __releases(&rx->sk.sk_lock.slock)
461 __acquires(call->user_mutex)
459{ 462{
460 struct rxrpc_call *call; 463 struct rxrpc_call *call;
461 struct rb_node *parent, **pp; 464 struct rb_node *parent, **pp;
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 6a62e42e1d8d..6e0d788b4dc4 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -226,7 +226,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
226 ktime_to_ns(ktime_sub(skb->tstamp, max_age))); 226 ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
227 } 227 }
228 228
229 resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now))); 229 resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
230 resend_at += jiffies + rxrpc_resend_timeout; 230 resend_at += jiffies + rxrpc_resend_timeout;
231 WRITE_ONCE(call->resend_at, resend_at); 231 WRITE_ONCE(call->resend_at, resend_at);
232 232
@@ -238,7 +238,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
238 * retransmitting data. 238 * retransmitting data.
239 */ 239 */
240 if (!retrans) { 240 if (!retrans) {
241 rxrpc_reduce_call_timer(call, resend_at, now, 241 rxrpc_reduce_call_timer(call, resend_at, now_j,
242 rxrpc_timer_set_for_resend); 242 rxrpc_timer_set_for_resend);
243 spin_unlock_bh(&call->lock); 243 spin_unlock_bh(&call->lock);
244 ack_ts = ktime_sub(now, call->acks_latest_ts); 244 ack_ts = ktime_sub(now, call->acks_latest_ts);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 147657dfe757..f721c2b7e234 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -103,6 +103,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
103 unsigned int debug_id) 103 unsigned int debug_id)
104{ 104{
105 struct rxrpc_call *call; 105 struct rxrpc_call *call;
106 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
106 107
107 call = kmem_cache_zalloc(rxrpc_call_jar, gfp); 108 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
108 if (!call) 109 if (!call)
@@ -153,6 +154,9 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
153 154
154 call->cong_cwnd = 2; 155 call->cong_cwnd = 2;
155 call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1; 156 call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
157
158 call->rxnet = rxnet;
159 atomic_inc(&rxnet->nr_calls);
156 return call; 160 return call;
157 161
158nomem_2: 162nomem_2:
@@ -219,9 +223,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
219 gfp_t gfp, 223 gfp_t gfp,
220 unsigned int debug_id) 224 unsigned int debug_id)
221 __releases(&rx->sk.sk_lock.slock) 225 __releases(&rx->sk.sk_lock.slock)
226 __acquires(&call->user_mutex)
222{ 227{
223 struct rxrpc_call *call, *xcall; 228 struct rxrpc_call *call, *xcall;
224 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 229 struct rxrpc_net *rxnet;
225 struct rb_node *parent, **pp; 230 struct rb_node *parent, **pp;
226 const void *here = __builtin_return_address(0); 231 const void *here = __builtin_return_address(0);
227 int ret; 232 int ret;
@@ -271,6 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
271 276
272 write_unlock(&rx->call_lock); 277 write_unlock(&rx->call_lock);
273 278
279 rxnet = call->rxnet;
274 write_lock(&rxnet->call_lock); 280 write_lock(&rxnet->call_lock);
275 list_add_tail(&call->link, &rxnet->calls); 281 list_add_tail(&call->link, &rxnet->calls);
276 write_unlock(&rxnet->call_lock); 282 write_unlock(&rxnet->call_lock);
@@ -616,7 +622,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
616 */ 622 */
617void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) 623void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
618{ 624{
619 struct rxrpc_net *rxnet; 625 struct rxrpc_net *rxnet = call->rxnet;
620 const void *here = __builtin_return_address(0); 626 const void *here = __builtin_return_address(0);
621 int n; 627 int n;
622 628
@@ -630,7 +636,6 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
630 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 636 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
631 637
632 if (!list_empty(&call->link)) { 638 if (!list_empty(&call->link)) {
633 rxnet = rxrpc_net(sock_net(&call->socket->sk));
634 write_lock(&rxnet->call_lock); 639 write_lock(&rxnet->call_lock);
635 list_del_init(&call->link); 640 list_del_init(&call->link);
636 write_unlock(&rxnet->call_lock); 641 write_unlock(&rxnet->call_lock);
@@ -646,11 +651,14 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
646static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) 651static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
647{ 652{
648 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); 653 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
654 struct rxrpc_net *rxnet = call->rxnet;
649 655
650 rxrpc_put_peer(call->peer); 656 rxrpc_put_peer(call->peer);
651 kfree(call->rxtx_buffer); 657 kfree(call->rxtx_buffer);
652 kfree(call->rxtx_annotations); 658 kfree(call->rxtx_annotations);
653 kmem_cache_free(rxrpc_call_jar, call); 659 kmem_cache_free(rxrpc_call_jar, call);
660 if (atomic_dec_and_test(&rxnet->nr_calls))
661 wake_up_atomic_t(&rxnet->nr_calls);
654} 662}
655 663
656/* 664/*
@@ -715,4 +723,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
715 } 723 }
716 724
717 write_unlock(&rxnet->call_lock); 725 write_unlock(&rxnet->call_lock);
726
727 atomic_dec(&rxnet->nr_calls);
728 wait_on_atomic_t(&rxnet->nr_calls, atomic_t_wait, TASK_UNINTERRUPTIBLE);
718} 729}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 064175068059..5736f643c516 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -207,6 +207,7 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
207 if (ret < 0) 207 if (ret < 0)
208 goto error_2; 208 goto error_2;
209 209
210 atomic_inc(&rxnet->nr_conns);
210 write_lock(&rxnet->conn_lock); 211 write_lock(&rxnet->conn_lock);
211 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); 212 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
212 write_unlock(&rxnet->conn_lock); 213 write_unlock(&rxnet->conn_lock);
@@ -776,7 +777,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
776 unsigned int channel = call->cid & RXRPC_CHANNELMASK; 777 unsigned int channel = call->cid & RXRPC_CHANNELMASK;
777 struct rxrpc_connection *conn = call->conn; 778 struct rxrpc_connection *conn = call->conn;
778 struct rxrpc_channel *chan = &conn->channels[channel]; 779 struct rxrpc_channel *chan = &conn->channels[channel];
779 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&call->socket->sk)); 780 struct rxrpc_net *rxnet = conn->params.local->rxnet;
780 781
781 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); 782 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
782 call->conn = NULL; 783 call->conn = NULL;
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index d2ec3fd593e8..c717152070df 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -136,6 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
136 } 136 }
137 137
138 kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); 138 kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
139 conn->params.peer->last_tx_at = ktime_get_real();
139 _leave(""); 140 _leave("");
140 return; 141 return;
141} 142}
@@ -239,6 +240,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
239 return -EAGAIN; 240 return -EAGAIN;
240 } 241 }
241 242
243 conn->params.peer->last_tx_at = ktime_get_real();
244
242 _leave(" = 0"); 245 _leave(" = 0");
243 return 0; 246 return 0;
244} 247}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index ccbac190add1..0950ee3d26f5 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -365,6 +365,9 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
365 key_put(conn->params.key); 365 key_put(conn->params.key);
366 key_put(conn->server_key); 366 key_put(conn->server_key);
367 rxrpc_put_peer(conn->params.peer); 367 rxrpc_put_peer(conn->params.peer);
368
369 if (atomic_dec_and_test(&conn->params.local->rxnet->nr_conns))
370 wake_up_atomic_t(&conn->params.local->rxnet->nr_conns);
368 rxrpc_put_local(conn->params.local); 371 rxrpc_put_local(conn->params.local);
369 372
370 kfree(conn); 373 kfree(conn);
@@ -418,7 +421,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
418 */ 421 */
419 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) 422 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
420 continue; 423 continue;
421 trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0); 424 trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
422 425
423 if (rxrpc_conn_is_client(conn)) 426 if (rxrpc_conn_is_client(conn))
424 BUG(); 427 BUG();
@@ -458,6 +461,7 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
458 461
459 _enter(""); 462 _enter("");
460 463
464 atomic_dec(&rxnet->nr_conns);
461 rxrpc_destroy_all_client_connections(rxnet); 465 rxrpc_destroy_all_client_connections(rxnet);
462 466
463 del_timer_sync(&rxnet->service_conn_reap_timer); 467 del_timer_sync(&rxnet->service_conn_reap_timer);
@@ -475,5 +479,9 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
475 479
476 ASSERT(list_empty(&rxnet->conn_proc_list)); 480 ASSERT(list_empty(&rxnet->conn_proc_list));
477 481
482 /* We need to wait for the connections to be destroyed by RCU as they
483 * pin things that we still need to get rid of.
484 */
485 wait_on_atomic_t(&rxnet->nr_conns, atomic_t_wait, TASK_UNINTERRUPTIBLE);
478 _leave(""); 486 _leave("");
479} 487}
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index f6fcdb3130a1..80773a50c755 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -132,6 +132,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
132 conn->state = RXRPC_CONN_SERVICE_PREALLOC; 132 conn->state = RXRPC_CONN_SERVICE_PREALLOC;
133 atomic_set(&conn->usage, 2); 133 atomic_set(&conn->usage, 2);
134 134
135 atomic_inc(&rxnet->nr_conns);
135 write_lock(&rxnet->conn_lock); 136 write_lock(&rxnet->conn_lock);
136 list_add_tail(&conn->link, &rxnet->service_conns); 137 list_add_tail(&conn->link, &rxnet->service_conns);
137 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); 138 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 2a868fdab0ae..21800e6f5019 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1183,6 +1183,8 @@ void rxrpc_data_ready(struct sock *udp_sk)
1183 1183
1184 switch (sp->hdr.type) { 1184 switch (sp->hdr.type) {
1185 case RXRPC_PACKET_TYPE_VERSION: 1185 case RXRPC_PACKET_TYPE_VERSION:
1186 if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
1187 goto discard;
1186 rxrpc_post_packet_to_local(local, skb); 1188 rxrpc_post_packet_to_local(local, skb);
1187 goto out; 1189 goto out;
1188 1190
@@ -1240,16 +1242,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
1240 goto discard_unlock; 1242 goto discard_unlock;
1241 1243
1242 if (sp->hdr.callNumber == chan->last_call) { 1244 if (sp->hdr.callNumber == chan->last_call) {
1243 /* For the previous service call, if completed successfully, we 1245 if (chan->call ||
1244 * discard all further packets. 1246 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
1247 goto discard_unlock;
1248
1249 /* For the previous service call, if completed
1250 * successfully, we discard all further packets.
1245 */ 1251 */
1246 if (rxrpc_conn_is_service(conn) && 1252 if (rxrpc_conn_is_service(conn) &&
1247 (chan->last_type == RXRPC_PACKET_TYPE_ACK || 1253 chan->last_type == RXRPC_PACKET_TYPE_ACK)
1248 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
1249 goto discard_unlock; 1254 goto discard_unlock;
1250 1255
1251 /* But otherwise we need to retransmit the final packet from 1256 /* But otherwise we need to retransmit the final packet
1252 * data cached in the connection record. 1257 * from data cached in the connection record.
1253 */ 1258 */
1254 rxrpc_post_packet_to_conn(conn, skb); 1259 rxrpc_post_packet_to_conn(conn, skb);
1255 goto out_unlock; 1260 goto out_unlock;
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 38b99db30e54..8b54e9531d52 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -95,6 +95,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
95 local->debug_id = atomic_inc_return(&rxrpc_debug_id); 95 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
96 memcpy(&local->srx, srx, sizeof(*srx)); 96 memcpy(&local->srx, srx, sizeof(*srx));
97 local->srx.srx_service = 0; 97 local->srx.srx_service = 0;
98 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
98 } 99 }
99 100
100 _leave(" = %p", local); 101 _leave(" = %p", local);
@@ -257,15 +258,74 @@ addr_in_use:
257} 258}
258 259
259/* 260/*
261 * Get a ref on a local endpoint.
262 */
263struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
264{
265 const void *here = __builtin_return_address(0);
266 int n;
267
268 n = atomic_inc_return(&local->usage);
269 trace_rxrpc_local(local, rxrpc_local_got, n, here);
270 return local;
271}
272
273/*
274 * Get a ref on a local endpoint unless its usage has already reached 0.
275 */
276struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
277{
278 const void *here = __builtin_return_address(0);
279
280 if (local) {
281 int n = __atomic_add_unless(&local->usage, 1, 0);
282 if (n > 0)
283 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
284 else
285 local = NULL;
286 }
287 return local;
288}
289
290/*
291 * Queue a local endpoint.
292 */
293void rxrpc_queue_local(struct rxrpc_local *local)
294{
295 const void *here = __builtin_return_address(0);
296
297 if (rxrpc_queue_work(&local->processor))
298 trace_rxrpc_local(local, rxrpc_local_queued,
299 atomic_read(&local->usage), here);
300}
301
302/*
260 * A local endpoint reached its end of life. 303 * A local endpoint reached its end of life.
261 */ 304 */
262void __rxrpc_put_local(struct rxrpc_local *local) 305static void __rxrpc_put_local(struct rxrpc_local *local)
263{ 306{
264 _enter("%d", local->debug_id); 307 _enter("%d", local->debug_id);
265 rxrpc_queue_work(&local->processor); 308 rxrpc_queue_work(&local->processor);
266} 309}
267 310
268/* 311/*
312 * Drop a ref on a local endpoint.
313 */
314void rxrpc_put_local(struct rxrpc_local *local)
315{
316 const void *here = __builtin_return_address(0);
317 int n;
318
319 if (local) {
320 n = atomic_dec_return(&local->usage);
321 trace_rxrpc_local(local, rxrpc_local_put, n, here);
322
323 if (n == 0)
324 __rxrpc_put_local(local);
325 }
326}
327
328/*
269 * Destroy a local endpoint's socket and then hand the record to RCU to dispose 329 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
270 * of. 330 * of.
271 * 331 *
@@ -322,7 +382,8 @@ static void rxrpc_local_processor(struct work_struct *work)
322 container_of(work, struct rxrpc_local, processor); 382 container_of(work, struct rxrpc_local, processor);
323 bool again; 383 bool again;
324 384
325 _enter("%d", local->debug_id); 385 trace_rxrpc_local(local, rxrpc_local_processing,
386 atomic_read(&local->usage), NULL);
326 387
327 do { 388 do {
328 again = false; 389 again = false;
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index f18c9248e0d4..c7a023fb22d0 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -32,13 +32,22 @@ static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
32 rxrpc_queue_work(&rxnet->service_conn_reaper); 32 rxrpc_queue_work(&rxnet->service_conn_reaper);
33} 33}
34 34
35static void rxrpc_peer_keepalive_timeout(struct timer_list *timer)
36{
37 struct rxrpc_net *rxnet =
38 container_of(timer, struct rxrpc_net, peer_keepalive_timer);
39
40 if (rxnet->live)
41 rxrpc_queue_work(&rxnet->peer_keepalive_work);
42}
43
35/* 44/*
36 * Initialise a per-network namespace record. 45 * Initialise a per-network namespace record.
37 */ 46 */
38static __net_init int rxrpc_init_net(struct net *net) 47static __net_init int rxrpc_init_net(struct net *net)
39{ 48{
40 struct rxrpc_net *rxnet = rxrpc_net(net); 49 struct rxrpc_net *rxnet = rxrpc_net(net);
41 int ret; 50 int ret, i;
42 51
43 rxnet->live = true; 52 rxnet->live = true;
44 get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch)); 53 get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
@@ -46,7 +55,9 @@ static __net_init int rxrpc_init_net(struct net *net)
46 55
47 INIT_LIST_HEAD(&rxnet->calls); 56 INIT_LIST_HEAD(&rxnet->calls);
48 rwlock_init(&rxnet->call_lock); 57 rwlock_init(&rxnet->call_lock);
58 atomic_set(&rxnet->nr_calls, 1);
49 59
60 atomic_set(&rxnet->nr_conns, 1);
50 INIT_LIST_HEAD(&rxnet->conn_proc_list); 61 INIT_LIST_HEAD(&rxnet->conn_proc_list);
51 INIT_LIST_HEAD(&rxnet->service_conns); 62 INIT_LIST_HEAD(&rxnet->service_conns);
52 rwlock_init(&rxnet->conn_lock); 63 rwlock_init(&rxnet->conn_lock);
@@ -70,8 +81,16 @@ static __net_init int rxrpc_init_net(struct net *net)
70 81
71 INIT_LIST_HEAD(&rxnet->local_endpoints); 82 INIT_LIST_HEAD(&rxnet->local_endpoints);
72 mutex_init(&rxnet->local_mutex); 83 mutex_init(&rxnet->local_mutex);
84
73 hash_init(rxnet->peer_hash); 85 hash_init(rxnet->peer_hash);
74 spin_lock_init(&rxnet->peer_hash_lock); 86 spin_lock_init(&rxnet->peer_hash_lock);
87 for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
88 INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]);
89 INIT_HLIST_HEAD(&rxnet->peer_keepalive_new);
90 timer_setup(&rxnet->peer_keepalive_timer,
91 rxrpc_peer_keepalive_timeout, 0);
92 INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
93 rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC);
75 94
76 ret = -ENOMEM; 95 ret = -ENOMEM;
77 rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net); 96 rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
@@ -95,8 +114,11 @@ static __net_exit void rxrpc_exit_net(struct net *net)
95 struct rxrpc_net *rxnet = rxrpc_net(net); 114 struct rxrpc_net *rxnet = rxrpc_net(net);
96 115
97 rxnet->live = false; 116 rxnet->live = false;
117 del_timer_sync(&rxnet->peer_keepalive_timer);
118 cancel_work_sync(&rxnet->peer_keepalive_work);
98 rxrpc_destroy_all_calls(rxnet); 119 rxrpc_destroy_all_calls(rxnet);
99 rxrpc_destroy_all_connections(rxnet); 120 rxrpc_destroy_all_connections(rxnet);
121 rxrpc_destroy_all_peers(rxnet);
100 rxrpc_destroy_all_locals(rxnet); 122 rxrpc_destroy_all_locals(rxnet);
101 proc_remove(rxnet->proc_net); 123 proc_remove(rxnet->proc_net);
102} 124}
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index cf73dc006c3b..7f1fc04775b3 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -32,6 +32,8 @@ struct rxrpc_abort_buffer {
32 __be32 abort_code; 32 __be32 abort_code;
33}; 33};
34 34
35static const char rxrpc_keepalive_string[] = "";
36
35/* 37/*
36 * Arrange for a keepalive ping a certain time after we last transmitted. This 38 * Arrange for a keepalive ping a certain time after we last transmitted. This
37 * lets the far side know we're still interested in this call and helps keep 39 * lets the far side know we're still interested in this call and helps keep
@@ -122,6 +124,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
122 struct kvec iov[2]; 124 struct kvec iov[2];
123 rxrpc_serial_t serial; 125 rxrpc_serial_t serial;
124 rxrpc_seq_t hard_ack, top; 126 rxrpc_seq_t hard_ack, top;
127 ktime_t now;
125 size_t len, n; 128 size_t len, n;
126 int ret; 129 int ret;
127 u8 reason; 130 u8 reason;
@@ -203,8 +206,10 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
203 } 206 }
204 207
205 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 208 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
209 now = ktime_get_real();
206 if (ping) 210 if (ping)
207 call->ping_time = ktime_get_real(); 211 call->ping_time = now;
212 conn->params.peer->last_tx_at = ktime_get_real();
208 213
209 if (call->state < RXRPC_CALL_COMPLETE) { 214 if (call->state < RXRPC_CALL_COMPLETE) {
210 if (ret < 0) { 215 if (ret < 0) {
@@ -288,6 +293,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
288 293
289 ret = kernel_sendmsg(conn->params.local->socket, 294 ret = kernel_sendmsg(conn->params.local->socket,
290 &msg, iov, 1, sizeof(pkt)); 295 &msg, iov, 1, sizeof(pkt));
296 conn->params.peer->last_tx_at = ktime_get_real();
291 297
292 rxrpc_put_connection(conn); 298 rxrpc_put_connection(conn);
293 return ret; 299 return ret;
@@ -378,6 +384,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
378 * message and update the peer record 384 * message and update the peer record
379 */ 385 */
380 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 386 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
387 conn->params.peer->last_tx_at = ktime_get_real();
381 388
382 up_read(&conn->params.local->defrag_sem); 389 up_read(&conn->params.local->defrag_sem);
383 if (ret == -EMSGSIZE) 390 if (ret == -EMSGSIZE)
@@ -429,6 +436,7 @@ send_fragmentable:
429 if (ret == 0) { 436 if (ret == 0) {
430 ret = kernel_sendmsg(conn->params.local->socket, &msg, 437 ret = kernel_sendmsg(conn->params.local->socket, &msg,
431 iov, 2, len); 438 iov, 2, len);
439 conn->params.peer->last_tx_at = ktime_get_real();
432 440
433 opt = IP_PMTUDISC_DO; 441 opt = IP_PMTUDISC_DO;
434 kernel_setsockopt(conn->params.local->socket, SOL_IP, 442 kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -446,6 +454,7 @@ send_fragmentable:
446 if (ret == 0) { 454 if (ret == 0) {
447 ret = kernel_sendmsg(conn->params.local->socket, &msg, 455 ret = kernel_sendmsg(conn->params.local->socket, &msg,
448 iov, 2, len); 456 iov, 2, len);
457 conn->params.peer->last_tx_at = ktime_get_real();
449 458
450 opt = IPV6_PMTUDISC_DO; 459 opt = IPV6_PMTUDISC_DO;
451 kernel_setsockopt(conn->params.local->socket, 460 kernel_setsockopt(conn->params.local->socket,
@@ -515,3 +524,51 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
515 524
516 _leave(""); 525 _leave("");
517} 526}
527
528/*
529 * Send a VERSION reply to a peer as a keepalive.
530 */
531void rxrpc_send_keepalive(struct rxrpc_peer *peer)
532{
533 struct rxrpc_wire_header whdr;
534 struct msghdr msg;
535 struct kvec iov[2];
536 size_t len;
537 int ret;
538
539 _enter("");
540
541 msg.msg_name = &peer->srx.transport;
542 msg.msg_namelen = peer->srx.transport_len;
543 msg.msg_control = NULL;
544 msg.msg_controllen = 0;
545 msg.msg_flags = 0;
546
547 whdr.epoch = htonl(peer->local->rxnet->epoch);
548 whdr.cid = 0;
549 whdr.callNumber = 0;
550 whdr.seq = 0;
551 whdr.serial = 0;
552 whdr.type = RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */
553 whdr.flags = RXRPC_LAST_PACKET;
554 whdr.userStatus = 0;
555 whdr.securityIndex = 0;
556 whdr._rsvd = 0;
557 whdr.serviceId = 0;
558
559 iov[0].iov_base = &whdr;
560 iov[0].iov_len = sizeof(whdr);
561 iov[1].iov_base = (char *)rxrpc_keepalive_string;
562 iov[1].iov_len = sizeof(rxrpc_keepalive_string);
563
564 len = iov[0].iov_len + iov[1].iov_len;
565
566 _proto("Tx VERSION (keepalive)");
567
568 ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
569 if (ret < 0)
570 _debug("sendmsg failed: %d", ret);
571
572 peer->last_tx_at = ktime_get_real();
573 _leave("");
574}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 7f749505e699..78c2f95d1f22 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -192,7 +192,7 @@ void rxrpc_error_report(struct sock *sk)
192 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 192 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
193 193
194 /* The ref we obtained is passed off to the work item */ 194 /* The ref we obtained is passed off to the work item */
195 rxrpc_queue_work(&peer->error_distributor); 195 __rxrpc_queue_peer_error(peer);
196 _leave(""); 196 _leave("");
197} 197}
198 198
@@ -348,3 +348,99 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
348 trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, 348 trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
349 usage, avg); 349 usage, avg);
350} 350}
351
352/*
353 * Perform keep-alive pings with VERSION packets to keep any NAT alive.
354 */
355void rxrpc_peer_keepalive_worker(struct work_struct *work)
356{
357 struct rxrpc_net *rxnet =
358 container_of(work, struct rxrpc_net, peer_keepalive_work);
359 struct rxrpc_peer *peer;
360 unsigned long delay;
361 ktime_t base, now = ktime_get_real();
362 s64 diff;
363 u8 cursor, slot;
364
365 base = rxnet->peer_keepalive_base;
366 cursor = rxnet->peer_keepalive_cursor;
367
368 _enter("%u,%lld", cursor, ktime_sub(now, base));
369
370next_bucket:
371 diff = ktime_to_ns(ktime_sub(now, base));
372 if (diff < 0)
373 goto resched;
374
375 _debug("at %u", cursor);
376 spin_lock_bh(&rxnet->peer_hash_lock);
377next_peer:
378 if (!rxnet->live) {
379 spin_unlock_bh(&rxnet->peer_hash_lock);
380 goto out;
381 }
382
383 /* Everything in the bucket at the cursor is processed this second; the
384 * bucket at cursor + 1 goes now + 1s and so on...
385 */
386 if (hlist_empty(&rxnet->peer_keepalive[cursor])) {
387 if (hlist_empty(&rxnet->peer_keepalive_new)) {
388 spin_unlock_bh(&rxnet->peer_hash_lock);
389 goto emptied_bucket;
390 }
391
392 hlist_move_list(&rxnet->peer_keepalive_new,
393 &rxnet->peer_keepalive[cursor]);
394 }
395
396 peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
397 struct rxrpc_peer, keepalive_link);
398 hlist_del_init(&peer->keepalive_link);
399 if (!rxrpc_get_peer_maybe(peer))
400 goto next_peer;
401
402 spin_unlock_bh(&rxnet->peer_hash_lock);
403
404 _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport);
405
406recalc:
407 diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC);
408 if (diff < -30 || diff > 30)
409 goto send; /* LSW of 64-bit time probably wrapped on 32-bit */
410 diff += RXRPC_KEEPALIVE_TIME - 1;
411 if (diff < 0)
412 goto send;
413
414 slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff;
415 if (slot == 0)
416 goto send;
417
418 /* A transmission to this peer occurred since last we examined it so
419 * put it into the appropriate future bucket.
420 */
421 slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
422 spin_lock_bh(&rxnet->peer_hash_lock);
423 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]);
424 rxrpc_put_peer(peer);
425 goto next_peer;
426
427send:
428 rxrpc_send_keepalive(peer);
429 now = ktime_get_real();
430 goto recalc;
431
432emptied_bucket:
433 cursor++;
434 if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
435 cursor = 0;
436 base = ktime_add_ns(base, NSEC_PER_SEC);
437 goto next_bucket;
438
439resched:
440 rxnet->peer_keepalive_base = base;
441 rxnet->peer_keepalive_cursor = cursor;
442 delay = nsecs_to_jiffies(-diff) + 1;
443 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
444out:
445 _leave("");
446}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index d02a99f37f5f..1b7e8107b3ae 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -322,6 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
322 if (!peer) { 322 if (!peer) {
323 peer = prealloc; 323 peer = prealloc;
324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); 324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
325 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new);
325 } 326 }
326 327
327 spin_unlock(&rxnet->peer_hash_lock); 328 spin_unlock(&rxnet->peer_hash_lock);
@@ -363,9 +364,12 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
363 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); 364 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
364 if (peer && !rxrpc_get_peer_maybe(peer)) 365 if (peer && !rxrpc_get_peer_maybe(peer))
365 peer = NULL; 366 peer = NULL;
366 if (!peer) 367 if (!peer) {
367 hash_add_rcu(rxnet->peer_hash, 368 hash_add_rcu(rxnet->peer_hash,
368 &candidate->hash_link, hash_key); 369 &candidate->hash_link, hash_key);
370 hlist_add_head(&candidate->keepalive_link,
371 &rxnet->peer_keepalive_new);
372 }
369 373
370 spin_unlock_bh(&rxnet->peer_hash_lock); 374 spin_unlock_bh(&rxnet->peer_hash_lock);
371 375
@@ -382,9 +386,54 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
382} 386}
383 387
384/* 388/*
385 * Discard a ref on a remote peer record. 389 * Get a ref on a peer record.
390 */
391struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
392{
393 const void *here = __builtin_return_address(0);
394 int n;
395
396 n = atomic_inc_return(&peer->usage);
397 trace_rxrpc_peer(peer, rxrpc_peer_got, n, here);
398 return peer;
399}
400
401/*
402 * Get a ref on a peer record unless its usage has already reached 0.
403 */
404struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
405{
406 const void *here = __builtin_return_address(0);
407
408 if (peer) {
409 int n = __atomic_add_unless(&peer->usage, 1, 0);
410 if (n > 0)
411 trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
412 else
413 peer = NULL;
414 }
415 return peer;
416}
417
418/*
419 * Queue a peer record. This passes the caller's ref to the workqueue.
420 */
421void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
422{
423 const void *here = __builtin_return_address(0);
424 int n;
425
426 n = atomic_read(&peer->usage);
427 if (rxrpc_queue_work(&peer->error_distributor))
428 trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
429 else
430 rxrpc_put_peer(peer);
431}
432
433/*
434 * Discard a peer record.
386 */ 435 */
387void __rxrpc_put_peer(struct rxrpc_peer *peer) 436static void __rxrpc_put_peer(struct rxrpc_peer *peer)
388{ 437{
389 struct rxrpc_net *rxnet = peer->local->rxnet; 438 struct rxrpc_net *rxnet = peer->local->rxnet;
390 439
@@ -392,11 +441,49 @@ void __rxrpc_put_peer(struct rxrpc_peer *peer)
392 441
393 spin_lock_bh(&rxnet->peer_hash_lock); 442 spin_lock_bh(&rxnet->peer_hash_lock);
394 hash_del_rcu(&peer->hash_link); 443 hash_del_rcu(&peer->hash_link);
444 hlist_del_init(&peer->keepalive_link);
395 spin_unlock_bh(&rxnet->peer_hash_lock); 445 spin_unlock_bh(&rxnet->peer_hash_lock);
396 446
397 kfree_rcu(peer, rcu); 447 kfree_rcu(peer, rcu);
398} 448}
399 449
450/*
451 * Drop a ref on a peer record.
452 */
453void rxrpc_put_peer(struct rxrpc_peer *peer)
454{
455 const void *here = __builtin_return_address(0);
456 int n;
457
458 if (peer) {
459 n = atomic_dec_return(&peer->usage);
460 trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
461 if (n == 0)
462 __rxrpc_put_peer(peer);
463 }
464}
465
466/*
467 * Make sure all peer records have been discarded.
468 */
469void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
470{
471 struct rxrpc_peer *peer;
472 int i;
473
474 for (i = 0; i < HASH_SIZE(rxnet->peer_hash); i++) {
475 if (hlist_empty(&rxnet->peer_hash[i]))
476 continue;
477
478 hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
479 pr_err("Leaked peer %u {%u} %pISp\n",
480 peer->debug_id,
481 atomic_read(&peer->usage),
482 &peer->srx.transport);
483 }
484 }
485}
486
400/** 487/**
401 * rxrpc_kernel_get_peer - Get the peer address of a call 488 * rxrpc_kernel_get_peer - Get the peer address of a call
402 * @sock: The socket on which the call is in progress. 489 * @sock: The socket on which the call is in progress.
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index f79f260c6ddc..7e45db058823 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -29,6 +29,8 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
29 * generate a list of extant and dead calls in /proc/net/rxrpc_calls 29 * generate a list of extant and dead calls in /proc/net/rxrpc_calls
30 */ 30 */
31static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos) 31static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
32 __acquires(rcu)
33 __acquires(rxnet->call_lock)
32{ 34{
33 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 35 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
34 36
@@ -45,6 +47,8 @@ static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
45} 47}
46 48
47static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) 49static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
50 __releases(rxnet->call_lock)
51 __releases(rcu)
48{ 52{
49 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 53 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
50 54
@@ -135,6 +139,7 @@ const struct file_operations rxrpc_call_seq_fops = {
135 * generate a list of extant virtual connections in /proc/net/rxrpc_conns 139 * generate a list of extant virtual connections in /proc/net/rxrpc_conns
136 */ 140 */
137static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) 141static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
142 __acquires(rxnet->conn_lock)
138{ 143{
139 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 144 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
140 145
@@ -151,6 +156,7 @@ static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
151} 156}
152 157
153static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) 158static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
159 __releases(rxnet->conn_lock)
154{ 160{
155 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 161 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
156 162
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 77cb23c7bd0a..588fea0dd362 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -668,6 +668,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
668 return -EAGAIN; 668 return -EAGAIN;
669 } 669 }
670 670
671 conn->params.peer->last_tx_at = ktime_get_real();
671 _leave(" = 0"); 672 _leave(" = 0");
672 return 0; 673 return 0;
673} 674}
@@ -722,6 +723,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
722 return -EAGAIN; 723 return -EAGAIN;
723 } 724 }
724 725
726 conn->params.peer->last_tx_at = ktime_get_real();
725 _leave(" = 0"); 727 _leave(" = 0");
726 return 0; 728 return 0;
727} 729}
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index e9f428351293..c4479afe8ae7 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -19,9 +19,6 @@
19#include <keys/rxrpc-type.h> 19#include <keys/rxrpc-type.h>
20#include "ar-internal.h" 20#include "ar-internal.h"
21 21
22static LIST_HEAD(rxrpc_security_methods);
23static DECLARE_RWSEM(rxrpc_security_sem);
24
25static const struct rxrpc_security *rxrpc_security_types[] = { 22static const struct rxrpc_security *rxrpc_security_types[] = {
26 [RXRPC_SECURITY_NONE] = &rxrpc_no_security, 23 [RXRPC_SECURITY_NONE] = &rxrpc_no_security,
27#ifdef CONFIG_RXKAD 24#ifdef CONFIG_RXKAD
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 8503f279b467..206e802ccbdc 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -130,7 +130,9 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
130 spin_lock_bh(&call->lock); 130 spin_lock_bh(&call->lock);
131 131
132 if (call->state < RXRPC_CALL_COMPLETE) { 132 if (call->state < RXRPC_CALL_COMPLETE) {
133 call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS; 133 call->rxtx_annotations[ix] =
134 (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
135 RXRPC_TX_ANNO_RETRANS;
134 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 136 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
135 rxrpc_queue_call(call); 137 rxrpc_queue_call(call);
136 } 138 }
@@ -554,6 +556,7 @@ static struct rxrpc_call *
554rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, 556rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
555 struct rxrpc_send_params *p) 557 struct rxrpc_send_params *p)
556 __releases(&rx->sk.sk_lock.slock) 558 __releases(&rx->sk.sk_lock.slock)
559 __acquires(&call->user_mutex)
557{ 560{
558 struct rxrpc_conn_parameters cp; 561 struct rxrpc_conn_parameters cp;
559 struct rxrpc_call *call; 562 struct rxrpc_call *call;
@@ -583,6 +586,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
583 atomic_inc_return(&rxrpc_debug_id)); 586 atomic_inc_return(&rxrpc_debug_id));
584 /* The socket is now unlocked */ 587 /* The socket is now unlocked */
585 588
589 rxrpc_put_peer(cp.peer);
586 _leave(" = %p\n", call); 590 _leave(" = %p\n", call);
587 return call; 591 return call;
588} 592}
@@ -594,6 +598,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
594 */ 598 */
595int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 599int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
596 __releases(&rx->sk.sk_lock.slock) 600 __releases(&rx->sk.sk_lock.slock)
601 __releases(&call->user_mutex)
597{ 602{
598 enum rxrpc_call_state state; 603 enum rxrpc_call_state state;
599 struct rxrpc_call *call; 604 struct rxrpc_call *call;