aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp
diff options
context:
space:
mode:
authorGerrit Renker <gerrit@erg.abdn.ac.uk>2006-11-13 10:26:51 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:22:20 -0500
commit4ed800d02cfb639b4f8375a0f0f04f0efea64e7f (patch)
tree5aeefac68f625bd446276d6d37b15d7ed2bccfa6 /net/dccp
parentafb0a34dd3e20b3f534de19993271b8664cf10bb (diff)
[DCCP]: Remove forward declarations in timer.c
This removes 3 forward declarations by reordering 2 functions. No code change at all. Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net/dccp')
-rw-r--r--net/dccp/timer.c110
1 files changed, 53 insertions, 57 deletions
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 7b3f16e29a97..e8f519e7f481 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -20,16 +20,6 @@ int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES;
20int sysctl_dccp_retries1 __read_mostly = TCP_RETR1; 20int sysctl_dccp_retries1 __read_mostly = TCP_RETR1;
21int sysctl_dccp_retries2 __read_mostly = TCP_RETR2; 21int sysctl_dccp_retries2 __read_mostly = TCP_RETR2;
22 22
23static void dccp_write_timer(unsigned long data);
24static void dccp_keepalive_timer(unsigned long data);
25static void dccp_delack_timer(unsigned long data);
26
27void dccp_init_xmit_timers(struct sock *sk)
28{
29 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
30 &dccp_keepalive_timer);
31}
32
33static void dccp_write_err(struct sock *sk) 23static void dccp_write_err(struct sock *sk)
34{ 24{
35 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; 25 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
@@ -90,53 +80,6 @@ static int dccp_write_timeout(struct sock *sk)
90 return 0; 80 return 0;
91} 81}
92 82
93/* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
94static void dccp_delack_timer(unsigned long data)
95{
96 struct sock *sk = (struct sock *)data;
97 struct inet_connection_sock *icsk = inet_csk(sk);
98
99 bh_lock_sock(sk);
100 if (sock_owned_by_user(sk)) {
101 /* Try again later. */
102 icsk->icsk_ack.blocked = 1;
103 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
104 sk_reset_timer(sk, &icsk->icsk_delack_timer,
105 jiffies + TCP_DELACK_MIN);
106 goto out;
107 }
108
109 if (sk->sk_state == DCCP_CLOSED ||
110 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
111 goto out;
112 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
113 sk_reset_timer(sk, &icsk->icsk_delack_timer,
114 icsk->icsk_ack.timeout);
115 goto out;
116 }
117
118 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
119
120 if (inet_csk_ack_scheduled(sk)) {
121 if (!icsk->icsk_ack.pingpong) {
122 /* Delayed ACK missed: inflate ATO. */
123 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
124 icsk->icsk_rto);
125 } else {
126 /* Delayed ACK missed: leave pingpong mode and
127 * deflate ATO.
128 */
129 icsk->icsk_ack.pingpong = 0;
130 icsk->icsk_ack.ato = TCP_ATO_MIN;
131 }
132 dccp_send_ack(sk);
133 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
134 }
135out:
136 bh_unlock_sock(sk);
137 sock_put(sk);
138}
139
140/* 83/*
141 * The DCCP retransmit timer. 84 * The DCCP retransmit timer.
142 */ 85 */
@@ -270,3 +213,56 @@ out:
270 bh_unlock_sock(sk); 213 bh_unlock_sock(sk);
271 sock_put(sk); 214 sock_put(sk);
272} 215}
216
217/* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
218static void dccp_delack_timer(unsigned long data)
219{
220 struct sock *sk = (struct sock *)data;
221 struct inet_connection_sock *icsk = inet_csk(sk);
222
223 bh_lock_sock(sk);
224 if (sock_owned_by_user(sk)) {
225 /* Try again later. */
226 icsk->icsk_ack.blocked = 1;
227 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
228 sk_reset_timer(sk, &icsk->icsk_delack_timer,
229 jiffies + TCP_DELACK_MIN);
230 goto out;
231 }
232
233 if (sk->sk_state == DCCP_CLOSED ||
234 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
235 goto out;
236 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
237 sk_reset_timer(sk, &icsk->icsk_delack_timer,
238 icsk->icsk_ack.timeout);
239 goto out;
240 }
241
242 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
243
244 if (inet_csk_ack_scheduled(sk)) {
245 if (!icsk->icsk_ack.pingpong) {
246 /* Delayed ACK missed: inflate ATO. */
247 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
248 icsk->icsk_rto);
249 } else {
250 /* Delayed ACK missed: leave pingpong mode and
251 * deflate ATO.
252 */
253 icsk->icsk_ack.pingpong = 0;
254 icsk->icsk_ack.ato = TCP_ATO_MIN;
255 }
256 dccp_send_ack(sk);
257 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
258 }
259out:
260 bh_unlock_sock(sk);
261 sock_put(sk);
262}
263
264void dccp_init_xmit_timers(struct sock *sk)
265{
266 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
267 &dccp_keepalive_timer);
268}