aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/rxrpc.txt81
-rw-r--r--net/rxrpc/Makefile5
-rw-r--r--net/rxrpc/af_rxrpc.c9
-rw-r--r--net/rxrpc/ar-ack.c61
-rw-r--r--net/rxrpc/ar-call.c213
-rw-r--r--net/rxrpc/ar-connection.c10
-rw-r--r--net/rxrpc/ar-error.c1
-rw-r--r--net/rxrpc/ar-input.c190
-rw-r--r--net/rxrpc/ar-internal.h40
-rw-r--r--net/rxrpc/ar-output.c15
-rw-r--r--net/rxrpc/ar-recvmsg.c25
-rw-r--r--net/rxrpc/ar-skbuff.c7
-rw-r--r--net/rxrpc/ar-transport.c10
-rw-r--r--net/rxrpc/sysctl.c146
14 files changed, 645 insertions, 168 deletions
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index b89bc82eed46..16a924c486bf 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -27,6 +27,8 @@ Contents of this document:
27 27
28 (*) AF_RXRPC kernel interface. 28 (*) AF_RXRPC kernel interface.
29 29
30 (*) Configurable parameters.
31
30 32
31======== 33========
32OVERVIEW 34OVERVIEW
@@ -864,3 +866,82 @@ The kernel interface functions are as follows:
864 866
865 This is used to allocate a null RxRPC key that can be used to indicate 867 This is used to allocate a null RxRPC key that can be used to indicate
866 anonymous security for a particular domain. 868 anonymous security for a particular domain.
869
870
871=======================
872CONFIGURABLE PARAMETERS
873=======================
874
875The RxRPC protocol driver has a number of configurable parameters that can be
876adjusted through sysctls in /proc/net/rxrpc/:
877
878 (*) req_ack_delay
879
880 The amount of time in milliseconds after receiving a packet with the
881 request-ack flag set before we honour the flag and actually send the
882 requested ack.
883
884 Usually the other side won't stop sending packets until the advertised
885 reception window is full (to a maximum of 255 packets), so delaying the
886 ACK permits several packets to be ACK'd in one go.
887
888 (*) soft_ack_delay
889
890 The amount of time in milliseconds after receiving a new packet before we
891 generate a soft-ACK to tell the sender that it doesn't need to resend.
892
893 (*) idle_ack_delay
894
895 The amount of time in milliseconds after all the packets currently in the
896 received queue have been consumed before we generate a hard-ACK to tell
897 the sender it can free its buffers, assuming no other reason occurs that
898 we would send an ACK.
899
900 (*) resend_timeout
901
902 The amount of time in milliseconds after transmitting a packet before we
903 transmit it again, assuming no ACK is received from the receiver telling
904 us they got it.
905
906 (*) max_call_lifetime
907
908 The maximum amount of time in seconds that a call may be in progress
909 before we preemptively kill it.
910
911 (*) dead_call_expiry
912
913 The amount of time in seconds before we remove a dead call from the call
914 list. Dead calls are kept around for a little while for the purpose of
915 repeating ACK and ABORT packets.
916
917 (*) connection_expiry
918
919 The amount of time in seconds after a connection was last used before we
920 remove it from the connection list. Whilst a connection is in existence,
921 it serves as a placeholder for negotiated security; when it is deleted,
922 the security must be renegotiated.
923
924 (*) transport_expiry
925
926 The amount of time in seconds after a transport was last used before we
927 remove it from the transport list. Whilst a transport is in existence, it
928 serves to anchor the peer data and keeps the connection ID counter.
929
930 (*) rxrpc_rx_window_size
931
932 The size of the receive window in packets. This is the maximum number of
933 unconsumed received packets we're willing to hold in memory for any
934 particular call.
935
936 (*) rxrpc_rx_mtu
937
938 The maximum packet MTU size that we're willing to receive in bytes. This
939 indicates to the peer whether we're willing to accept jumbo packets.
940
941 (*) rxrpc_rx_jumbo_max
942
943 The maximum number of packets that we're willing to accept in a jumbo
944 packet. Non-terminal packets in a jumbo packet must contain a four byte
945 header plus exactly 1412 bytes of data. The terminal packet must contain
946 a four byte header plus any amount of data. In any event, a jumbo packet
947 may not exceed rxrpc_rx_mtu in size.
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index d1c3429b69ed..ec126f91276b 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -20,9 +20,8 @@ af-rxrpc-y := \
20 ar-skbuff.o \ 20 ar-skbuff.o \
21 ar-transport.o 21 ar-transport.o
22 22
23ifeq ($(CONFIG_PROC_FS),y) 23af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o
24af-rxrpc-y += ar-proc.o 24af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o
25endif
26 25
27obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o 26obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
28 27
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index e61aa6001c65..7b1670489638 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -838,6 +838,12 @@ static int __init af_rxrpc_init(void)
838 goto error_key_type_s; 838 goto error_key_type_s;
839 } 839 }
840 840
841 ret = rxrpc_sysctl_init();
842 if (ret < 0) {
843 printk(KERN_CRIT "RxRPC: Cannot register sysctls\n");
844 goto error_sysctls;
845 }
846
841#ifdef CONFIG_PROC_FS 847#ifdef CONFIG_PROC_FS
842 proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops); 848 proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops);
843 proc_create("rxrpc_conns", 0, init_net.proc_net, 849 proc_create("rxrpc_conns", 0, init_net.proc_net,
@@ -845,6 +851,8 @@ static int __init af_rxrpc_init(void)
845#endif 851#endif
846 return 0; 852 return 0;
847 853
854error_sysctls:
855 unregister_key_type(&key_type_rxrpc_s);
848error_key_type_s: 856error_key_type_s:
849 unregister_key_type(&key_type_rxrpc); 857 unregister_key_type(&key_type_rxrpc);
850error_key_type: 858error_key_type:
@@ -865,6 +873,7 @@ error_call_jar:
865static void __exit af_rxrpc_exit(void) 873static void __exit af_rxrpc_exit(void)
866{ 874{
867 _enter(""); 875 _enter("");
876 rxrpc_sysctl_exit();
868 unregister_key_type(&key_type_rxrpc_s); 877 unregister_key_type(&key_type_rxrpc_s);
869 unregister_key_type(&key_type_rxrpc); 878 unregister_key_type(&key_type_rxrpc);
870 sock_unregister(PF_RXRPC); 879 sock_unregister(PF_RXRPC);
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index cd97a0ce48d8..c6be17a959a6 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -19,7 +19,49 @@
19#include <net/af_rxrpc.h> 19#include <net/af_rxrpc.h>
20#include "ar-internal.h" 20#include "ar-internal.h"
21 21
22static unsigned int rxrpc_ack_defer = 1; 22/*
23 * How long to wait before scheduling ACK generation after seeing a
24 * packet with RXRPC_REQUEST_ACK set (in jiffies).
25 */
26unsigned rxrpc_requested_ack_delay = 1;
27
28/*
29 * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
30 *
31 * We use this when we've received new data packets. If those packets aren't
32 * all consumed within this time we will send a DELAY ACK if an ACK was not
33 * requested to let the sender know it doesn't need to resend.
34 */
35unsigned rxrpc_soft_ack_delay = 1 * HZ;
36
37/*
38 * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
39 *
40 * We use this when we've consumed some previously soft-ACK'd packets when
41 * further packets aren't immediately received to decide when to send an IDLE
42 * ACK let the other end know that it can free up its Tx buffer space.
43 */
44unsigned rxrpc_idle_ack_delay = 0.5 * HZ;
45
46/*
47 * Receive window size in packets. This indicates the maximum number of
48 * unconsumed received packets we're willing to retain in memory. Once this
49 * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
50 * packets.
51 */
52unsigned rxrpc_rx_window_size = 32;
53
54/*
55 * Maximum Rx MTU size. This indicates to the sender the size of jumbo packet
56 * made by gluing normal packets together that we're willing to handle.
57 */
58unsigned rxrpc_rx_mtu = 5692;
59
60/*
61 * The maximum number of fragments in a received jumbo packet that we tell the
62 * sender that we're willing to handle.
63 */
64unsigned rxrpc_rx_jumbo_max = 4;
23 65
24static const char *rxrpc_acks(u8 reason) 66static const char *rxrpc_acks(u8 reason)
25{ 67{
@@ -82,24 +124,23 @@ void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
82 switch (ack_reason) { 124 switch (ack_reason) {
83 case RXRPC_ACK_DELAY: 125 case RXRPC_ACK_DELAY:
84 _debug("run delay timer"); 126 _debug("run delay timer");
85 call->ack_timer.expires = jiffies + rxrpc_ack_timeout * HZ; 127 expiry = rxrpc_soft_ack_delay;
86 add_timer(&call->ack_timer); 128 goto run_timer;
87 return;
88 129
89 case RXRPC_ACK_IDLE: 130 case RXRPC_ACK_IDLE:
90 if (!immediate) { 131 if (!immediate) {
91 _debug("run defer timer"); 132 _debug("run defer timer");
92 expiry = 1; 133 expiry = rxrpc_idle_ack_delay;
93 goto run_timer; 134 goto run_timer;
94 } 135 }
95 goto cancel_timer; 136 goto cancel_timer;
96 137
97 case RXRPC_ACK_REQUESTED: 138 case RXRPC_ACK_REQUESTED:
98 if (!rxrpc_ack_defer) 139 expiry = rxrpc_requested_ack_delay;
140 if (!expiry)
99 goto cancel_timer; 141 goto cancel_timer;
100 if (!immediate || serial == cpu_to_be32(1)) { 142 if (!immediate || serial == cpu_to_be32(1)) {
101 _debug("run defer timer"); 143 _debug("run defer timer");
102 expiry = rxrpc_ack_defer;
103 goto run_timer; 144 goto run_timer;
104 } 145 }
105 146
@@ -1174,11 +1215,11 @@ send_ACK:
1174 mtu = call->conn->trans->peer->if_mtu; 1215 mtu = call->conn->trans->peer->if_mtu;
1175 mtu -= call->conn->trans->peer->hdrsize; 1216 mtu -= call->conn->trans->peer->hdrsize;
1176 ackinfo.maxMTU = htonl(mtu); 1217 ackinfo.maxMTU = htonl(mtu);
1177 ackinfo.rwind = htonl(32); 1218 ackinfo.rwind = htonl(rxrpc_rx_window_size);
1178 1219
1179 /* permit the peer to send us jumbo packets if it wants to */ 1220 /* permit the peer to send us jumbo packets if it wants to */
1180 ackinfo.rxMTU = htonl(5692); 1221 ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
1181 ackinfo.jumbo_max = htonl(4); 1222 ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max);
1182 1223
1183 hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); 1224 hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
1184 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", 1225 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index a3bbb360a3f9..a9e05db0f5d5 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -12,10 +12,22 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/circ_buf.h> 14#include <linux/circ_buf.h>
15#include <linux/hashtable.h>
16#include <linux/spinlock_types.h>
15#include <net/sock.h> 17#include <net/sock.h>
16#include <net/af_rxrpc.h> 18#include <net/af_rxrpc.h>
17#include "ar-internal.h" 19#include "ar-internal.h"
18 20
21/*
22 * Maximum lifetime of a call (in jiffies).
23 */
24unsigned rxrpc_max_call_lifetime = 60 * HZ;
25
26/*
27 * Time till dead call expires after last use (in jiffies).
28 */
29unsigned rxrpc_dead_call_expiry = 2 * HZ;
30
19const char *const rxrpc_call_states[] = { 31const char *const rxrpc_call_states[] = {
20 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", 32 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
21 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", 33 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
@@ -38,8 +50,6 @@ const char *const rxrpc_call_states[] = {
38struct kmem_cache *rxrpc_call_jar; 50struct kmem_cache *rxrpc_call_jar;
39LIST_HEAD(rxrpc_calls); 51LIST_HEAD(rxrpc_calls);
40DEFINE_RWLOCK(rxrpc_call_lock); 52DEFINE_RWLOCK(rxrpc_call_lock);
41static unsigned int rxrpc_call_max_lifetime = 60;
42static unsigned int rxrpc_dead_call_timeout = 2;
43 53
44static void rxrpc_destroy_call(struct work_struct *work); 54static void rxrpc_destroy_call(struct work_struct *work);
45static void rxrpc_call_life_expired(unsigned long _call); 55static void rxrpc_call_life_expired(unsigned long _call);
@@ -47,6 +57,145 @@ static void rxrpc_dead_call_expired(unsigned long _call);
47static void rxrpc_ack_time_expired(unsigned long _call); 57static void rxrpc_ack_time_expired(unsigned long _call);
48static void rxrpc_resend_time_expired(unsigned long _call); 58static void rxrpc_resend_time_expired(unsigned long _call);
49 59
60static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
61static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
62
63/*
64 * Hash function for rxrpc_call_hash
65 */
66static unsigned long rxrpc_call_hashfunc(
67 u8 clientflag,
68 __be32 cid,
69 __be32 call_id,
70 __be32 epoch,
71 __be16 service_id,
72 sa_family_t proto,
73 void *localptr,
74 unsigned int addr_size,
75 const u8 *peer_addr)
76{
77 const u16 *p;
78 unsigned int i;
79 unsigned long key;
80 u32 hcid = ntohl(cid);
81
82 _enter("");
83
84 key = (unsigned long)localptr;
85 /* We just want to add up the __be32 values, so forcing the
86 * cast should be okay.
87 */
88 key += (__force u32)epoch;
89 key += (__force u16)service_id;
90 key += (__force u32)call_id;
91 key += (hcid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
92 key += hcid & RXRPC_CHANNELMASK;
93 key += clientflag;
94 key += proto;
95 /* Step through the peer address in 16-bit portions for speed */
96 for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
97 key += *p;
98 _leave(" key = 0x%lx", key);
99 return key;
100}
101
102/*
103 * Add a call to the hashtable
104 */
105static void rxrpc_call_hash_add(struct rxrpc_call *call)
106{
107 unsigned long key;
108 unsigned int addr_size = 0;
109
110 _enter("");
111 switch (call->proto) {
112 case AF_INET:
113 addr_size = sizeof(call->peer_ip.ipv4_addr);
114 break;
115 case AF_INET6:
116 addr_size = sizeof(call->peer_ip.ipv6_addr);
117 break;
118 default:
119 break;
120 }
121 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
122 call->call_id, call->epoch,
123 call->service_id, call->proto,
124 call->conn->trans->local, addr_size,
125 call->peer_ip.ipv6_addr);
126 /* Store the full key in the call */
127 call->hash_key = key;
128 spin_lock(&rxrpc_call_hash_lock);
129 hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
130 spin_unlock(&rxrpc_call_hash_lock);
131 _leave("");
132}
133
134/*
135 * Remove a call from the hashtable
136 */
137static void rxrpc_call_hash_del(struct rxrpc_call *call)
138{
139 _enter("");
140 spin_lock(&rxrpc_call_hash_lock);
141 hash_del_rcu(&call->hash_node);
142 spin_unlock(&rxrpc_call_hash_lock);
143 _leave("");
144}
145
146/*
147 * Find a call in the hashtable and return it, or NULL if it
148 * isn't there.
149 */
150struct rxrpc_call *rxrpc_find_call_hash(
151 u8 clientflag,
152 __be32 cid,
153 __be32 call_id,
154 __be32 epoch,
155 __be16 service_id,
156 void *localptr,
157 sa_family_t proto,
158 const u8 *peer_addr)
159{
160 unsigned long key;
161 unsigned int addr_size = 0;
162 struct rxrpc_call *call = NULL;
163 struct rxrpc_call *ret = NULL;
164
165 _enter("");
166 switch (proto) {
167 case AF_INET:
168 addr_size = sizeof(call->peer_ip.ipv4_addr);
169 break;
170 case AF_INET6:
171 addr_size = sizeof(call->peer_ip.ipv6_addr);
172 break;
173 default:
174 break;
175 }
176
177 key = rxrpc_call_hashfunc(clientflag, cid, call_id, epoch,
178 service_id, proto, localptr, addr_size,
179 peer_addr);
180 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
181 if (call->hash_key == key &&
182 call->call_id == call_id &&
183 call->cid == cid &&
184 call->in_clientflag == clientflag &&
185 call->service_id == service_id &&
186 call->proto == proto &&
187 call->local == localptr &&
188 memcmp(call->peer_ip.ipv6_addr, peer_addr,
189 addr_size) == 0 &&
190 call->epoch == epoch) {
191 ret = call;
192 break;
193 }
194 }
195 _leave(" = %p", ret);
196 return ret;
197}
198
50/* 199/*
51 * allocate a new call 200 * allocate a new call
52 */ 201 */
@@ -91,7 +240,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
91 call->rx_data_expect = 1; 240 call->rx_data_expect = 1;
92 call->rx_data_eaten = 0; 241 call->rx_data_eaten = 0;
93 call->rx_first_oos = 0; 242 call->rx_first_oos = 0;
94 call->ackr_win_top = call->rx_data_eaten + 1 + RXRPC_MAXACKS; 243 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
95 call->creation_jif = jiffies; 244 call->creation_jif = jiffies;
96 return call; 245 return call;
97} 246}
@@ -128,11 +277,31 @@ static struct rxrpc_call *rxrpc_alloc_client_call(
128 return ERR_PTR(ret); 277 return ERR_PTR(ret);
129 } 278 }
130 279
280 /* Record copies of information for hashtable lookup */
281 call->proto = rx->proto;
282 call->local = trans->local;
283 switch (call->proto) {
284 case AF_INET:
285 call->peer_ip.ipv4_addr =
286 trans->peer->srx.transport.sin.sin_addr.s_addr;
287 break;
288 case AF_INET6:
289 memcpy(call->peer_ip.ipv6_addr,
290 trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
291 sizeof(call->peer_ip.ipv6_addr));
292 break;
293 }
294 call->epoch = call->conn->epoch;
295 call->service_id = call->conn->service_id;
296 call->in_clientflag = call->conn->in_clientflag;
297 /* Add the new call to the hashtable */
298 rxrpc_call_hash_add(call);
299
131 spin_lock(&call->conn->trans->peer->lock); 300 spin_lock(&call->conn->trans->peer->lock);
132 list_add(&call->error_link, &call->conn->trans->peer->error_targets); 301 list_add(&call->error_link, &call->conn->trans->peer->error_targets);
133 spin_unlock(&call->conn->trans->peer->lock); 302 spin_unlock(&call->conn->trans->peer->lock);
134 303
135 call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ; 304 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
136 add_timer(&call->lifetimer); 305 add_timer(&call->lifetimer);
137 306
138 _leave(" = %p", call); 307 _leave(" = %p", call);
@@ -320,9 +489,12 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
320 parent = *p; 489 parent = *p;
321 call = rb_entry(parent, struct rxrpc_call, conn_node); 490 call = rb_entry(parent, struct rxrpc_call, conn_node);
322 491
323 if (call_id < call->call_id) 492 /* The tree is sorted in order of the __be32 value without
493 * turning it into host order.
494 */
495 if ((__force u32)call_id < (__force u32)call->call_id)
324 p = &(*p)->rb_left; 496 p = &(*p)->rb_left;
325 else if (call_id > call->call_id) 497 else if ((__force u32)call_id > (__force u32)call->call_id)
326 p = &(*p)->rb_right; 498 p = &(*p)->rb_right;
327 else 499 else
328 goto old_call; 500 goto old_call;
@@ -347,9 +519,31 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
347 list_add_tail(&call->link, &rxrpc_calls); 519 list_add_tail(&call->link, &rxrpc_calls);
348 write_unlock_bh(&rxrpc_call_lock); 520 write_unlock_bh(&rxrpc_call_lock);
349 521
522 /* Record copies of information for hashtable lookup */
523 call->proto = rx->proto;
524 call->local = conn->trans->local;
525 switch (call->proto) {
526 case AF_INET:
527 call->peer_ip.ipv4_addr =
528 conn->trans->peer->srx.transport.sin.sin_addr.s_addr;
529 break;
530 case AF_INET6:
531 memcpy(call->peer_ip.ipv6_addr,
532 conn->trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
533 sizeof(call->peer_ip.ipv6_addr));
534 break;
535 default:
536 break;
537 }
538 call->epoch = conn->epoch;
539 call->service_id = conn->service_id;
540 call->in_clientflag = conn->in_clientflag;
541 /* Add the new call to the hashtable */
542 rxrpc_call_hash_add(call);
543
350 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); 544 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
351 545
352 call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ; 546 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
353 add_timer(&call->lifetimer); 547 add_timer(&call->lifetimer);
354 _leave(" = %p {%d} [new]", call, call->debug_id); 548 _leave(" = %p {%d} [new]", call, call->debug_id);
355 return call; 549 return call;
@@ -533,7 +727,7 @@ void rxrpc_release_call(struct rxrpc_call *call)
533 del_timer_sync(&call->resend_timer); 727 del_timer_sync(&call->resend_timer);
534 del_timer_sync(&call->ack_timer); 728 del_timer_sync(&call->ack_timer);
535 del_timer_sync(&call->lifetimer); 729 del_timer_sync(&call->lifetimer);
536 call->deadspan.expires = jiffies + rxrpc_dead_call_timeout * HZ; 730 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
537 add_timer(&call->deadspan); 731 add_timer(&call->deadspan);
538 732
539 _leave(""); 733 _leave("");
@@ -665,6 +859,9 @@ static void rxrpc_cleanup_call(struct rxrpc_call *call)
665 rxrpc_put_connection(call->conn); 859 rxrpc_put_connection(call->conn);
666 } 860 }
667 861
862 /* Remove the call from the hash */
863 rxrpc_call_hash_del(call);
864
668 if (call->acks_window) { 865 if (call->acks_window) {
669 _debug("kill Tx window %d", 866 _debug("kill Tx window %d",
670 CIRC_CNT(call->acks_head, call->acks_tail, 867 CIRC_CNT(call->acks_head, call->acks_tail,
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 7bf5b5b9e8b9..6631f4f1e39b 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -18,11 +18,15 @@
18#include <net/af_rxrpc.h> 18#include <net/af_rxrpc.h>
19#include "ar-internal.h" 19#include "ar-internal.h"
20 20
21/*
22 * Time till a connection expires after last use (in seconds).
23 */
24unsigned rxrpc_connection_expiry = 10 * 60;
25
21static void rxrpc_connection_reaper(struct work_struct *work); 26static void rxrpc_connection_reaper(struct work_struct *work);
22 27
23LIST_HEAD(rxrpc_connections); 28LIST_HEAD(rxrpc_connections);
24DEFINE_RWLOCK(rxrpc_connection_lock); 29DEFINE_RWLOCK(rxrpc_connection_lock);
25static unsigned long rxrpc_connection_timeout = 10 * 60;
26static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper); 30static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
27 31
28/* 32/*
@@ -862,7 +866,7 @@ static void rxrpc_connection_reaper(struct work_struct *work)
862 866
863 spin_lock(&conn->trans->client_lock); 867 spin_lock(&conn->trans->client_lock);
864 write_lock(&conn->trans->conn_lock); 868 write_lock(&conn->trans->conn_lock);
865 reap_time = conn->put_time + rxrpc_connection_timeout; 869 reap_time = conn->put_time + rxrpc_connection_expiry;
866 870
867 if (atomic_read(&conn->usage) > 0) { 871 if (atomic_read(&conn->usage) > 0) {
868 ; 872 ;
@@ -916,7 +920,7 @@ void __exit rxrpc_destroy_all_connections(void)
916{ 920{
917 _enter(""); 921 _enter("");
918 922
919 rxrpc_connection_timeout = 0; 923 rxrpc_connection_expiry = 0;
920 cancel_delayed_work(&rxrpc_connection_reap); 924 cancel_delayed_work(&rxrpc_connection_reap);
921 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); 925 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
922 926
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index a9206087b4d7..db57458c824c 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -83,6 +83,7 @@ void rxrpc_UDP_error_report(struct sock *sk)
83 83
84 if (mtu == 0) { 84 if (mtu == 0) {
85 /* they didn't give us a size, estimate one */ 85 /* they didn't give us a size, estimate one */
86 mtu = peer->if_mtu;
86 if (mtu > 1500) { 87 if (mtu > 1500) {
87 mtu >>= 1; 88 mtu >>= 1;
88 if (mtu < 1500) 89 if (mtu < 1500)
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 529572f18d1f..73742647c135 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -25,8 +25,6 @@
25#include <net/net_namespace.h> 25#include <net/net_namespace.h>
26#include "ar-internal.h" 26#include "ar-internal.h"
27 27
28unsigned long rxrpc_ack_timeout = 1;
29
30const char *rxrpc_pkts[] = { 28const char *rxrpc_pkts[] = {
31 "?00", 29 "?00",
32 "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG", 30 "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
@@ -349,8 +347,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
349 * it */ 347 * it */
350 if (sp->hdr.flags & RXRPC_REQUEST_ACK) { 348 if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
351 _proto("ACK Requested on %%%u", serial); 349 _proto("ACK Requested on %%%u", serial);
352 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, 350 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, false);
353 !(sp->hdr.flags & RXRPC_MORE_PACKETS));
354 } 351 }
355 352
356 switch (sp->hdr.type) { 353 switch (sp->hdr.type) {
@@ -526,36 +523,38 @@ protocol_error:
526 * post an incoming packet to the appropriate call/socket to deal with 523 * post an incoming packet to the appropriate call/socket to deal with
527 * - must get rid of the sk_buff, either by freeing it or by queuing it 524 * - must get rid of the sk_buff, either by freeing it or by queuing it
528 */ 525 */
529static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn, 526static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
530 struct sk_buff *skb) 527 struct sk_buff *skb)
531{ 528{
532 struct rxrpc_skb_priv *sp; 529 struct rxrpc_skb_priv *sp;
533 struct rxrpc_call *call;
534 struct rb_node *p;
535 __be32 call_id;
536
537 _enter("%p,%p", conn, skb);
538 530
539 read_lock_bh(&conn->lock); 531 _enter("%p,%p", call, skb);
540 532
541 sp = rxrpc_skb(skb); 533 sp = rxrpc_skb(skb);
542 534
543 /* look at extant calls by channel number first */
544 call = conn->channels[ntohl(sp->hdr.cid) & RXRPC_CHANNELMASK];
545 if (!call || call->call_id != sp->hdr.callNumber)
546 goto call_not_extant;
547
548 _debug("extant call [%d]", call->state); 535 _debug("extant call [%d]", call->state);
549 ASSERTCMP(call->conn, ==, conn);
550 536
551 read_lock(&call->state_lock); 537 read_lock(&call->state_lock);
552 switch (call->state) { 538 switch (call->state) {
553 case RXRPC_CALL_LOCALLY_ABORTED: 539 case RXRPC_CALL_LOCALLY_ABORTED:
554 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) 540 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) {
555 rxrpc_queue_call(call); 541 rxrpc_queue_call(call);
542 goto free_unlock;
543 }
556 case RXRPC_CALL_REMOTELY_ABORTED: 544 case RXRPC_CALL_REMOTELY_ABORTED:
557 case RXRPC_CALL_NETWORK_ERROR: 545 case RXRPC_CALL_NETWORK_ERROR:
558 case RXRPC_CALL_DEAD: 546 case RXRPC_CALL_DEAD:
547 goto dead_call;
548 case RXRPC_CALL_COMPLETE:
549 case RXRPC_CALL_CLIENT_FINAL_ACK:
550 /* complete server call */
551 if (call->conn->in_clientflag)
552 goto dead_call;
553 /* resend last packet of a completed call */
554 _debug("final ack again");
555 rxrpc_get_call(call);
556 set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
557 rxrpc_queue_call(call);
559 goto free_unlock; 558 goto free_unlock;
560 default: 559 default:
561 break; 560 break;
@@ -563,7 +562,6 @@ static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn,
563 562
564 read_unlock(&call->state_lock); 563 read_unlock(&call->state_lock);
565 rxrpc_get_call(call); 564 rxrpc_get_call(call);
566 read_unlock_bh(&conn->lock);
567 565
568 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && 566 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
569 sp->hdr.flags & RXRPC_JUMBO_PACKET) 567 sp->hdr.flags & RXRPC_JUMBO_PACKET)
@@ -574,78 +572,16 @@ static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn,
574 rxrpc_put_call(call); 572 rxrpc_put_call(call);
575 goto done; 573 goto done;
576 574
577call_not_extant:
578 /* search the completed calls in case what we're dealing with is
579 * there */
580 _debug("call not extant");
581
582 call_id = sp->hdr.callNumber;
583 p = conn->calls.rb_node;
584 while (p) {
585 call = rb_entry(p, struct rxrpc_call, conn_node);
586
587 if (call_id < call->call_id)
588 p = p->rb_left;
589 else if (call_id > call->call_id)
590 p = p->rb_right;
591 else
592 goto found_completed_call;
593 }
594
595dead_call: 575dead_call:
596 /* it's a either a really old call that we no longer remember or its a 576 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
597 * new incoming call */ 577 skb->priority = RX_CALL_DEAD;
598 read_unlock_bh(&conn->lock); 578 rxrpc_reject_packet(call->conn->trans->local, skb);
599 579 goto unlock;
600 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
601 sp->hdr.seq == cpu_to_be32(1)) {
602 _debug("incoming call");
603 skb_queue_tail(&conn->trans->local->accept_queue, skb);
604 rxrpc_queue_work(&conn->trans->local->acceptor);
605 goto done;
606 }
607
608 _debug("dead call");
609 skb->priority = RX_CALL_DEAD;
610 rxrpc_reject_packet(conn->trans->local, skb);
611 goto done;
612
613 /* resend last packet of a completed call
614 * - client calls may have been aborted or ACK'd
615 * - server calls may have been aborted
616 */
617found_completed_call:
618 _debug("completed call");
619
620 if (atomic_read(&call->usage) == 0)
621 goto dead_call;
622
623 /* synchronise any state changes */
624 read_lock(&call->state_lock);
625 ASSERTIFCMP(call->state != RXRPC_CALL_CLIENT_FINAL_ACK,
626 call->state, >=, RXRPC_CALL_COMPLETE);
627
628 if (call->state == RXRPC_CALL_LOCALLY_ABORTED ||
629 call->state == RXRPC_CALL_REMOTELY_ABORTED ||
630 call->state == RXRPC_CALL_DEAD) {
631 read_unlock(&call->state_lock);
632 goto dead_call;
633 }
634
635 if (call->conn->in_clientflag) {
636 read_unlock(&call->state_lock);
637 goto dead_call; /* complete server call */
638 } 580 }
639
640 _debug("final ack again");
641 rxrpc_get_call(call);
642 set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
643 rxrpc_queue_call(call);
644
645free_unlock: 581free_unlock:
646 read_unlock(&call->state_lock);
647 read_unlock_bh(&conn->lock);
648 rxrpc_free_skb(skb); 582 rxrpc_free_skb(skb);
583unlock:
584 read_unlock(&call->state_lock);
649done: 585done:
650 _leave(""); 586 _leave("");
651} 587}
@@ -664,17 +600,42 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
664 rxrpc_queue_conn(conn); 600 rxrpc_queue_conn(conn);
665} 601}
666 602
603static struct rxrpc_connection *rxrpc_conn_from_local(struct rxrpc_local *local,
604 struct sk_buff *skb,
605 struct rxrpc_skb_priv *sp)
606{
607 struct rxrpc_peer *peer;
608 struct rxrpc_transport *trans;
609 struct rxrpc_connection *conn;
610
611 peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr,
612 udp_hdr(skb)->source);
613 if (IS_ERR(peer))
614 goto cant_find_conn;
615
616 trans = rxrpc_find_transport(local, peer);
617 rxrpc_put_peer(peer);
618 if (!trans)
619 goto cant_find_conn;
620
621 conn = rxrpc_find_connection(trans, &sp->hdr);
622 rxrpc_put_transport(trans);
623 if (!conn)
624 goto cant_find_conn;
625
626 return conn;
627cant_find_conn:
628 return NULL;
629}
630
667/* 631/*
668 * handle data received on the local endpoint 632 * handle data received on the local endpoint
669 * - may be called in interrupt context 633 * - may be called in interrupt context
670 */ 634 */
671void rxrpc_data_ready(struct sock *sk, int count) 635void rxrpc_data_ready(struct sock *sk, int count)
672{ 636{
673 struct rxrpc_connection *conn;
674 struct rxrpc_transport *trans;
675 struct rxrpc_skb_priv *sp; 637 struct rxrpc_skb_priv *sp;
676 struct rxrpc_local *local; 638 struct rxrpc_local *local;
677 struct rxrpc_peer *peer;
678 struct sk_buff *skb; 639 struct sk_buff *skb;
679 int ret; 640 int ret;
680 641
@@ -749,27 +710,34 @@ void rxrpc_data_ready(struct sock *sk, int count)
749 (sp->hdr.callNumber == 0 || sp->hdr.seq == 0)) 710 (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
750 goto bad_message; 711 goto bad_message;
751 712
752 peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr, udp_hdr(skb)->source); 713 if (sp->hdr.callNumber == 0) {
753 if (IS_ERR(peer)) 714 /* This is a connection-level packet. These should be
754 goto cant_route_call; 715 * fairly rare, so the extra overhead of looking them up the
716 * old-fashioned way doesn't really hurt */
717 struct rxrpc_connection *conn;
755 718
756 trans = rxrpc_find_transport(local, peer); 719 conn = rxrpc_conn_from_local(local, skb, sp);
757 rxrpc_put_peer(peer); 720 if (!conn)
758 if (!trans) 721 goto cant_route_call;
759 goto cant_route_call;
760 722
761 conn = rxrpc_find_connection(trans, &sp->hdr); 723 _debug("CONN %p {%d}", conn, conn->debug_id);
762 rxrpc_put_transport(trans);
763 if (!conn)
764 goto cant_route_call;
765
766 _debug("CONN %p {%d}", conn, conn->debug_id);
767
768 if (sp->hdr.callNumber == 0)
769 rxrpc_post_packet_to_conn(conn, skb); 724 rxrpc_post_packet_to_conn(conn, skb);
770 else 725 rxrpc_put_connection(conn);
771 rxrpc_post_packet_to_call(conn, skb); 726 } else {
772 rxrpc_put_connection(conn); 727 struct rxrpc_call *call;
728 u8 in_clientflag = 0;
729
730 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
731 in_clientflag = RXRPC_CLIENT_INITIATED;
732 call = rxrpc_find_call_hash(in_clientflag, sp->hdr.cid,
733 sp->hdr.callNumber, sp->hdr.epoch,
734 sp->hdr.serviceId, local, AF_INET,
735 (u8 *)&ip_hdr(skb)->saddr);
736 if (call)
737 rxrpc_post_packet_to_call(call, skb);
738 else
739 goto cant_route_call;
740 }
773 rxrpc_put_local(local); 741 rxrpc_put_local(local);
774 return; 742 return;
775 743
@@ -790,8 +758,10 @@ cant_route_call:
790 skb->priority = RX_CALL_DEAD; 758 skb->priority = RX_CALL_DEAD;
791 } 759 }
792 760
793 _debug("reject"); 761 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
794 rxrpc_reject_packet(local, skb); 762 _debug("reject type %d",sp->hdr.type);
763 rxrpc_reject_packet(local, skb);
764 }
795 rxrpc_put_local(local); 765 rxrpc_put_local(local);
796 _leave(" [no call]"); 766 _leave(" [no call]");
797 return; 767 return;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 5f43675ee1df..c831d44b0841 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -396,9 +396,20 @@ struct rxrpc_call {
396#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG) 396#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
397 unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1]; 397 unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
398 398
399 struct hlist_node hash_node;
400 unsigned long hash_key; /* Full hash key */
401 u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */
402 struct rxrpc_local *local; /* Local endpoint. Used for hashing. */
403 sa_family_t proto; /* Frame protocol */
399 /* the following should all be in net order */ 404 /* the following should all be in net order */
400 __be32 cid; /* connection ID + channel index */ 405 __be32 cid; /* connection ID + channel index */
401 __be32 call_id; /* call ID on connection */ 406 __be32 call_id; /* call ID on connection */
407 __be32 epoch; /* epoch of this connection */
408 __be16 service_id; /* service ID */
409 union { /* Peer IP address for hashing */
410 __be32 ipv4_addr;
411 __u8 ipv6_addr[16]; /* Anticipates eventual IPv6 support */
412 } peer_ip;
402}; 413};
403 414
404/* 415/*
@@ -433,6 +444,13 @@ int rxrpc_reject_call(struct rxrpc_sock *);
433/* 444/*
434 * ar-ack.c 445 * ar-ack.c
435 */ 446 */
447extern unsigned rxrpc_requested_ack_delay;
448extern unsigned rxrpc_soft_ack_delay;
449extern unsigned rxrpc_idle_ack_delay;
450extern unsigned rxrpc_rx_window_size;
451extern unsigned rxrpc_rx_mtu;
452extern unsigned rxrpc_rx_jumbo_max;
453
436void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool); 454void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
437void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool); 455void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
438void rxrpc_process_call(struct work_struct *); 456void rxrpc_process_call(struct work_struct *);
@@ -440,10 +458,14 @@ void rxrpc_process_call(struct work_struct *);
440/* 458/*
441 * ar-call.c 459 * ar-call.c
442 */ 460 */
461extern unsigned rxrpc_max_call_lifetime;
462extern unsigned rxrpc_dead_call_expiry;
443extern struct kmem_cache *rxrpc_call_jar; 463extern struct kmem_cache *rxrpc_call_jar;
444extern struct list_head rxrpc_calls; 464extern struct list_head rxrpc_calls;
445extern rwlock_t rxrpc_call_lock; 465extern rwlock_t rxrpc_call_lock;
446 466
467struct rxrpc_call *rxrpc_find_call_hash(u8, __be32, __be32, __be32,
468 __be16, void *, sa_family_t, const u8 *);
447struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *, 469struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
448 struct rxrpc_transport *, 470 struct rxrpc_transport *,
449 struct rxrpc_conn_bundle *, 471 struct rxrpc_conn_bundle *,
@@ -460,6 +482,7 @@ void __exit rxrpc_destroy_all_calls(void);
460/* 482/*
461 * ar-connection.c 483 * ar-connection.c
462 */ 484 */
485extern unsigned rxrpc_connection_expiry;
463extern struct list_head rxrpc_connections; 486extern struct list_head rxrpc_connections;
464extern rwlock_t rxrpc_connection_lock; 487extern rwlock_t rxrpc_connection_lock;
465 488
@@ -493,7 +516,6 @@ void rxrpc_UDP_error_handler(struct work_struct *);
493/* 516/*
494 * ar-input.c 517 * ar-input.c
495 */ 518 */
496extern unsigned long rxrpc_ack_timeout;
497extern const char *rxrpc_pkts[]; 519extern const char *rxrpc_pkts[];
498 520
499void rxrpc_data_ready(struct sock *, int); 521void rxrpc_data_ready(struct sock *, int);
@@ -504,6 +526,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
504 * ar-local.c 526 * ar-local.c
505 */ 527 */
506extern rwlock_t rxrpc_local_lock; 528extern rwlock_t rxrpc_local_lock;
529
507struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *); 530struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
508void rxrpc_put_local(struct rxrpc_local *); 531void rxrpc_put_local(struct rxrpc_local *);
509void __exit rxrpc_destroy_all_locals(void); 532void __exit rxrpc_destroy_all_locals(void);
@@ -522,7 +545,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
522/* 545/*
523 * ar-output.c 546 * ar-output.c
524 */ 547 */
525extern int rxrpc_resend_timeout; 548extern unsigned rxrpc_resend_timeout;
526 549
527int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *); 550int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
528int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *, 551int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
@@ -572,6 +595,8 @@ void rxrpc_packet_destructor(struct sk_buff *);
572/* 595/*
573 * ar-transport.c 596 * ar-transport.c
574 */ 597 */
598extern unsigned rxrpc_transport_expiry;
599
575struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *, 600struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
576 struct rxrpc_peer *, gfp_t); 601 struct rxrpc_peer *, gfp_t);
577void rxrpc_put_transport(struct rxrpc_transport *); 602void rxrpc_put_transport(struct rxrpc_transport *);
@@ -580,6 +605,17 @@ struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
580 struct rxrpc_peer *); 605 struct rxrpc_peer *);
581 606
582/* 607/*
608 * sysctl.c
609 */
610#ifdef CONFIG_SYSCTL
611extern int __init rxrpc_sysctl_init(void);
612extern void rxrpc_sysctl_exit(void);
613#else
614static inline int __init rxrpc_sysctl_init(void) { return 0; }
615static inline void rxrpc_sysctl_exit(void) {}
616#endif
617
618/*
583 * debug tracing 619 * debug tracing
584 */ 620 */
585extern unsigned int rxrpc_debug; 621extern unsigned int rxrpc_debug;
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index d0e8f1c1898a..0b4b9a79f5ab 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -18,7 +18,10 @@
18#include <net/af_rxrpc.h> 18#include <net/af_rxrpc.h>
19#include "ar-internal.h" 19#include "ar-internal.h"
20 20
21int rxrpc_resend_timeout = 4; 21/*
22 * Time till packet resend (in jiffies).
23 */
24unsigned rxrpc_resend_timeout = 4 * HZ;
22 25
23static int rxrpc_send_data(struct kiocb *iocb, 26static int rxrpc_send_data(struct kiocb *iocb,
24 struct rxrpc_sock *rx, 27 struct rxrpc_sock *rx,
@@ -487,7 +490,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
487 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); 490 ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
488 491
489 sp->need_resend = false; 492 sp->need_resend = false;
490 sp->resend_at = jiffies + rxrpc_resend_timeout * HZ; 493 sp->resend_at = jiffies + rxrpc_resend_timeout;
491 if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) { 494 if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
492 _debug("run timer"); 495 _debug("run timer");
493 call->resend_timer.expires = sp->resend_at; 496 call->resend_timer.expires = sp->resend_at;
@@ -666,6 +669,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
666 /* add the packet to the send queue if it's now full */ 669 /* add the packet to the send queue if it's now full */
667 if (sp->remain <= 0 || (segment == 0 && !more)) { 670 if (sp->remain <= 0 || (segment == 0 && !more)) {
668 struct rxrpc_connection *conn = call->conn; 671 struct rxrpc_connection *conn = call->conn;
672 uint32_t seq;
669 size_t pad; 673 size_t pad;
670 674
671 /* pad out if we're using security */ 675 /* pad out if we're using security */
@@ -678,11 +682,12 @@ static int rxrpc_send_data(struct kiocb *iocb,
678 memset(skb_put(skb, pad), 0, pad); 682 memset(skb_put(skb, pad), 0, pad);
679 } 683 }
680 684
685 seq = atomic_inc_return(&call->sequence);
686
681 sp->hdr.epoch = conn->epoch; 687 sp->hdr.epoch = conn->epoch;
682 sp->hdr.cid = call->cid; 688 sp->hdr.cid = call->cid;
683 sp->hdr.callNumber = call->call_id; 689 sp->hdr.callNumber = call->call_id;
684 sp->hdr.seq = 690 sp->hdr.seq = htonl(seq);
685 htonl(atomic_inc_return(&call->sequence));
686 sp->hdr.serial = 691 sp->hdr.serial =
687 htonl(atomic_inc_return(&conn->serial)); 692 htonl(atomic_inc_return(&conn->serial));
688 sp->hdr.type = RXRPC_PACKET_TYPE_DATA; 693 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
@@ -697,6 +702,8 @@ static int rxrpc_send_data(struct kiocb *iocb,
697 else if (CIRC_SPACE(call->acks_head, call->acks_tail, 702 else if (CIRC_SPACE(call->acks_head, call->acks_tail,
698 call->acks_winsz) > 1) 703 call->acks_winsz) > 1)
699 sp->hdr.flags |= RXRPC_MORE_PACKETS; 704 sp->hdr.flags |= RXRPC_MORE_PACKETS;
705 if (more && seq & 1)
706 sp->hdr.flags |= RXRPC_REQUEST_ACK;
700 707
701 ret = rxrpc_secure_packet( 708 ret = rxrpc_secure_packet(
702 call, skb, skb->mark, 709 call, skb, skb->mark,
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 34b5490dde65..e9aaa65c0778 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -180,16 +180,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
180 if (copy > len - copied) 180 if (copy > len - copied)
181 copy = len - copied; 181 copy = len - copied;
182 182
183 if (skb->ip_summed == CHECKSUM_UNNECESSARY || 183 ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy);
184 skb->ip_summed == CHECKSUM_PARTIAL) {
185 ret = skb_copy_datagram_iovec(skb, offset,
186 msg->msg_iov, copy);
187 } else {
188 ret = skb_copy_and_csum_datagram_iovec(skb, offset,
189 msg->msg_iov);
190 if (ret == -EINVAL)
191 goto csum_copy_error;
192 }
193 184
194 if (ret < 0) 185 if (ret < 0)
195 goto copy_error; 186 goto copy_error;
@@ -348,20 +339,6 @@ copy_error:
348 _leave(" = %d", ret); 339 _leave(" = %d", ret);
349 return ret; 340 return ret;
350 341
351csum_copy_error:
352 _debug("csum error");
353 release_sock(&rx->sk);
354 if (continue_call)
355 rxrpc_put_call(continue_call);
356 rxrpc_kill_skb(skb);
357 if (!(flags & MSG_PEEK)) {
358 if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
359 BUG();
360 }
361 skb_kill_datagram(&rx->sk, skb, flags);
362 rxrpc_put_call(call);
363 return -EAGAIN;
364
365wait_interrupted: 342wait_interrupted:
366 ret = sock_intr_errno(timeo); 343 ret = sock_intr_errno(timeo);
367wait_error: 344wait_error:
diff --git a/net/rxrpc/ar-skbuff.c b/net/rxrpc/ar-skbuff.c
index de755e04d29c..4cfab49e329d 100644
--- a/net/rxrpc/ar-skbuff.c
+++ b/net/rxrpc/ar-skbuff.c
@@ -83,9 +83,14 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
83 rxrpc_request_final_ACK(call); 83 rxrpc_request_final_ACK(call);
84 } else if (atomic_dec_and_test(&call->ackr_not_idle) && 84 } else if (atomic_dec_and_test(&call->ackr_not_idle) &&
85 test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) { 85 test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) {
86 /* We previously soft-ACK'd some received packets that have now
87 * been consumed, so send a hard-ACK if no more packets are
88 * immediately forthcoming to allow the transmitter to free up
89 * its Tx bufferage.
90 */
86 _debug("send Rx idle ACK"); 91 _debug("send Rx idle ACK");
87 __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial, 92 __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial,
88 true); 93 false);
89 } 94 }
90 95
91 spin_unlock_bh(&call->lock); 96 spin_unlock_bh(&call->lock);
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
index 92df566930b9..1976dec84f29 100644
--- a/net/rxrpc/ar-transport.c
+++ b/net/rxrpc/ar-transport.c
@@ -17,11 +17,15 @@
17#include <net/af_rxrpc.h> 17#include <net/af_rxrpc.h>
18#include "ar-internal.h" 18#include "ar-internal.h"
19 19
20/*
21 * Time after last use at which transport record is cleaned up.
22 */
23unsigned rxrpc_transport_expiry = 3600 * 24;
24
20static void rxrpc_transport_reaper(struct work_struct *work); 25static void rxrpc_transport_reaper(struct work_struct *work);
21 26
22static LIST_HEAD(rxrpc_transports); 27static LIST_HEAD(rxrpc_transports);
23static DEFINE_RWLOCK(rxrpc_transport_lock); 28static DEFINE_RWLOCK(rxrpc_transport_lock);
24static unsigned long rxrpc_transport_timeout = 3600 * 24;
25static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper); 29static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper);
26 30
27/* 31/*
@@ -235,7 +239,7 @@ static void rxrpc_transport_reaper(struct work_struct *work)
235 if (likely(atomic_read(&trans->usage) > 0)) 239 if (likely(atomic_read(&trans->usage) > 0))
236 continue; 240 continue;
237 241
238 reap_time = trans->put_time + rxrpc_transport_timeout; 242 reap_time = trans->put_time + rxrpc_transport_expiry;
239 if (reap_time <= now) 243 if (reap_time <= now)
240 list_move_tail(&trans->link, &graveyard); 244 list_move_tail(&trans->link, &graveyard);
241 else if (reap_time < earliest) 245 else if (reap_time < earliest)
@@ -271,7 +275,7 @@ void __exit rxrpc_destroy_all_transports(void)
271{ 275{
272 _enter(""); 276 _enter("");
273 277
274 rxrpc_transport_timeout = 0; 278 rxrpc_transport_expiry = 0;
275 cancel_delayed_work(&rxrpc_transport_reap); 279 cancel_delayed_work(&rxrpc_transport_reap);
276 rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); 280 rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
277 281
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
new file mode 100644
index 000000000000..50a98a910eb1
--- /dev/null
+++ b/net/rxrpc/sysctl.c
@@ -0,0 +1,146 @@
1/* sysctls for configuring RxRPC operating parameters
2 *
3 * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/sysctl.h>
13#include <net/sock.h>
14#include <net/af_rxrpc.h>
15#include "ar-internal.h"
16
17static struct ctl_table_header *rxrpc_sysctl_reg_table;
18static const unsigned zero = 0;
19static const unsigned one = 1;
20static const unsigned four = 4;
21static const unsigned n_65535 = 65535;
22static const unsigned n_max_acks = RXRPC_MAXACKS;
23
24/*
25 * RxRPC operating parameters.
26 *
27 * See Documentation/networking/rxrpc.txt and the variable definitions for more
28 * information on the individual parameters.
29 */
30static struct ctl_table rxrpc_sysctl_table[] = {
31 /* Values measured in milliseconds */
32 {
33 .procname = "req_ack_delay",
34 .data = &rxrpc_requested_ack_delay,
35 .maxlen = sizeof(unsigned),
36 .mode = 0644,
37 .proc_handler = proc_dointvec_ms_jiffies,
38 .extra1 = (void *)&zero,
39 },
40 {
41 .procname = "soft_ack_delay",
42 .data = &rxrpc_soft_ack_delay,
43 .maxlen = sizeof(unsigned),
44 .mode = 0644,
45 .proc_handler = proc_dointvec_ms_jiffies,
46 .extra1 = (void *)&one,
47 },
48 {
49 .procname = "idle_ack_delay",
50 .data = &rxrpc_idle_ack_delay,
51 .maxlen = sizeof(unsigned),
52 .mode = 0644,
53 .proc_handler = proc_dointvec_ms_jiffies,
54 .extra1 = (void *)&one,
55 },
56 {
57 .procname = "resend_timeout",
58 .data = &rxrpc_resend_timeout,
59 .maxlen = sizeof(unsigned),
60 .mode = 0644,
61 .proc_handler = proc_dointvec_ms_jiffies,
62 .extra1 = (void *)&one,
63 },
64
65 /* Values measured in seconds but used in jiffies */
66 {
67 .procname = "max_call_lifetime",
68 .data = &rxrpc_max_call_lifetime,
69 .maxlen = sizeof(unsigned),
70 .mode = 0644,
71 .proc_handler = proc_dointvec_jiffies,
72 .extra1 = (void *)&one,
73 },
74 {
75 .procname = "dead_call_expiry",
76 .data = &rxrpc_dead_call_expiry,
77 .maxlen = sizeof(unsigned),
78 .mode = 0644,
79 .proc_handler = proc_dointvec_jiffies,
80 .extra1 = (void *)&one,
81 },
82
83 /* Values measured in seconds */
84 {
85 .procname = "connection_expiry",
86 .data = &rxrpc_connection_expiry,
87 .maxlen = sizeof(unsigned),
88 .mode = 0644,
89 .proc_handler = proc_dointvec_minmax,
90 .extra1 = (void *)&one,
91 },
92 {
93 .procname = "transport_expiry",
94 .data = &rxrpc_transport_expiry,
95 .maxlen = sizeof(unsigned),
96 .mode = 0644,
97 .proc_handler = proc_dointvec_minmax,
98 .extra1 = (void *)&one,
99 },
100
101 /* Non-time values */
102 {
103 .procname = "rx_window_size",
104 .data = &rxrpc_rx_window_size,
105 .maxlen = sizeof(unsigned),
106 .mode = 0644,
107 .proc_handler = proc_dointvec_minmax,
108 .extra1 = (void *)&one,
109 .extra2 = (void *)&n_max_acks,
110 },
111 {
112 .procname = "rx_mtu",
113 .data = &rxrpc_rx_mtu,
114 .maxlen = sizeof(unsigned),
115 .mode = 0644,
116 .proc_handler = proc_dointvec_minmax,
117 .extra1 = (void *)&one,
118 .extra1 = (void *)&n_65535,
119 },
120 {
121 .procname = "rx_jumbo_max",
122 .data = &rxrpc_rx_jumbo_max,
123 .maxlen = sizeof(unsigned),
124 .mode = 0644,
125 .proc_handler = proc_dointvec_minmax,
126 .extra1 = (void *)&one,
127 .extra2 = (void *)&four,
128 },
129
130 { }
131};
132
133int __init rxrpc_sysctl_init(void)
134{
135 rxrpc_sysctl_reg_table = register_net_sysctl(&init_net, "net/rxrpc",
136 rxrpc_sysctl_table);
137 if (!rxrpc_sysctl_reg_table)
138 return -ENOMEM;
139 return 0;
140}
141
142void rxrpc_sysctl_exit(void)
143{
144 if (rxrpc_sysctl_reg_table)
145 unregister_net_sysctl_table(rxrpc_sysctl_reg_table);
146}