diff options
author | Arnaldo Carvalho de Melo <acme@mandriva.com> | 2005-08-13 19:34:54 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:59:26 -0400 |
commit | 7690af3fff7633e40b1b9950eb8489129251d074 (patch) | |
tree | 92fa07234a7547c4a7dd74877972b5a291673fcf /net/dccp/output.c | |
parent | c173437669967301facff151bfeb7bae67354e4c (diff) |
[DCCP]: Just reflow the source code to fit in 80 columns
Andrew Morton should be happy now 8)
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp/output.c')
-rw-r--r-- | net/dccp/output.c | 44 |
1 files changed, 29 insertions, 15 deletions
diff --git a/net/dccp/output.c b/net/dccp/output.c index 50292c0605fb..dcc061bed924 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -40,13 +40,13 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
40 | /* XXX For now we're using only 48 bits sequence numbers */ | 40 | /* XXX For now we're using only 48 bits sequence numbers */ |
41 | const int dccp_header_size = sizeof(*dh) + | 41 | const int dccp_header_size = sizeof(*dh) + |
42 | sizeof(struct dccp_hdr_ext) + | 42 | sizeof(struct dccp_hdr_ext) + |
43 | dccp_packet_hdr_len(dcb->dccpd_type); | 43 | dccp_packet_hdr_len(dcb->dccpd_type); |
44 | int err, set_ack = 1; | 44 | int err, set_ack = 1; |
45 | u64 ackno = dp->dccps_gsr; | 45 | u64 ackno = dp->dccps_gsr; |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * FIXME: study DCCP_PKT_SYNC[ACK] to see what is the right thing | 48 | * FIXME: study DCCP_PKT_SYNC[ACK] to see what is the right |
49 | * to do here... | 49 | * thing to do here... |
50 | */ | 50 | */ |
51 | dccp_inc_seqno(&dp->dccps_gss); | 51 | dccp_inc_seqno(&dp->dccps_gss); |
52 | 52 | ||
@@ -65,7 +65,9 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
65 | 65 | ||
66 | skb->h.raw = skb_push(skb, dccp_header_size); | 66 | skb->h.raw = skb_push(skb, dccp_header_size); |
67 | dh = dccp_hdr(skb); | 67 | dh = dccp_hdr(skb); |
68 | /* Data packets are not cloned as they are never retransmitted */ | 68 | /* |
69 | * Data packets are not cloned as they are never retransmitted | ||
70 | */ | ||
69 | if (skb_cloned(skb)) | 71 | if (skb_cloned(skb)) |
70 | skb_set_owner_w(skb, sk); | 72 | skb_set_owner_w(skb, sk); |
71 | 73 | ||
@@ -86,10 +88,12 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
86 | 88 | ||
87 | switch (dcb->dccpd_type) { | 89 | switch (dcb->dccpd_type) { |
88 | case DCCP_PKT_REQUEST: | 90 | case DCCP_PKT_REQUEST: |
89 | dccp_hdr_request(skb)->dccph_req_service = dcb->dccpd_service; | 91 | dccp_hdr_request(skb)->dccph_req_service = |
92 | dcb->dccpd_service; | ||
90 | break; | 93 | break; |
91 | case DCCP_PKT_RESET: | 94 | case DCCP_PKT_RESET: |
92 | dccp_hdr_reset(skb)->dccph_reset_code = dcb->dccpd_reset_code; | 95 | dccp_hdr_reset(skb)->dccph_reset_code = |
96 | dcb->dccpd_reset_code; | ||
93 | break; | 97 | break; |
94 | } | 98 | } |
95 | 99 | ||
@@ -123,10 +127,13 @@ unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) | |||
123 | int mss_now; | 127 | int mss_now; |
124 | 128 | ||
125 | /* | 129 | /* |
126 | * FIXME: we really should be using the af_specific thing to support IPv6. | 130 | * FIXME: we really should be using the af_specific thing to support |
127 | * mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext); | 131 | * IPv6. |
132 | * mss_now = pmtu - tp->af_specific->net_header_len - | ||
133 | * sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext); | ||
128 | */ | 134 | */ |
129 | mss_now = pmtu - sizeof(struct iphdr) - sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext); | 135 | mss_now = pmtu - sizeof(struct iphdr) - sizeof(struct dccp_hdr) - |
136 | sizeof(struct dccp_hdr_ext); | ||
130 | 137 | ||
131 | /* Now subtract optional transport overhead */ | 138 | /* Now subtract optional transport overhead */ |
132 | mss_now -= dp->dccps_ext_header_len; | 139 | mss_now -= dp->dccps_ext_header_len; |
@@ -223,7 +230,8 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
223 | 230 | ||
224 | dh->dccph_sport = inet_sk(sk)->sport; | 231 | dh->dccph_sport = inet_sk(sk)->sport; |
225 | dh->dccph_dport = inet_rsk(req)->rmt_port; | 232 | dh->dccph_dport = inet_rsk(req)->rmt_port; |
226 | dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; | 233 | dh->dccph_doff = (dccp_header_size + |
234 | DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; | ||
227 | dh->dccph_type = DCCP_PKT_RESPONSE; | 235 | dh->dccph_type = DCCP_PKT_RESPONSE; |
228 | dh->dccph_x = 1; | 236 | dh->dccph_x = 1; |
229 | dccp_hdr_set_seq(dh, dccp_rsk(req)->dreq_iss); | 237 | dccp_hdr_set_seq(dh, dccp_rsk(req)->dreq_iss); |
@@ -271,7 +279,8 @@ struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
271 | 279 | ||
272 | dh->dccph_sport = inet_sk(sk)->sport; | 280 | dh->dccph_sport = inet_sk(sk)->sport; |
273 | dh->dccph_dport = inet_sk(sk)->dport; | 281 | dh->dccph_dport = inet_sk(sk)->dport; |
274 | dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; | 282 | dh->dccph_doff = (dccp_header_size + |
283 | DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; | ||
275 | dh->dccph_type = DCCP_PKT_RESET; | 284 | dh->dccph_type = DCCP_PKT_RESET; |
276 | dh->dccph_x = 1; | 285 | dh->dccph_x = 1; |
277 | dccp_hdr_set_seq(dh, dp->dccps_gss); | 286 | dccp_hdr_set_seq(dh, dp->dccps_gss); |
@@ -348,7 +357,9 @@ void dccp_send_ack(struct sock *sk) | |||
348 | if (skb == NULL) { | 357 | if (skb == NULL) { |
349 | inet_csk_schedule_ack(sk); | 358 | inet_csk_schedule_ack(sk); |
350 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; | 359 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
351 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); | 360 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
361 | TCP_DELACK_MAX, | ||
362 | DCCP_RTO_MAX); | ||
352 | return; | 363 | return; |
353 | } | 364 | } |
354 | 365 | ||
@@ -416,8 +427,10 @@ void dccp_send_sync(struct sock *sk, u64 seq) | |||
416 | dccp_transmit_skb(sk, skb); | 427 | dccp_transmit_skb(sk, skb); |
417 | } | 428 | } |
418 | 429 | ||
419 | /* Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This cannot be | 430 | /* |
420 | * allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under any circumstances. | 431 | * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This |
432 | * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under | ||
433 | * any circumstances. | ||
421 | */ | 434 | */ |
422 | void dccp_send_close(struct sock *sk) | 435 | void dccp_send_close(struct sock *sk) |
423 | { | 436 | { |
@@ -435,7 +448,8 @@ void dccp_send_close(struct sock *sk) | |||
435 | /* Reserve space for headers and prepare control bits. */ | 448 | /* Reserve space for headers and prepare control bits. */ |
436 | skb_reserve(skb, sk->sk_prot->max_header); | 449 | skb_reserve(skb, sk->sk_prot->max_header); |
437 | skb->csum = 0; | 450 | skb->csum = 0; |
438 | DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; | 451 | DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? |
452 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; | ||
439 | 453 | ||
440 | skb_set_owner_w(skb, sk); | 454 | skb_set_owner_w(skb, sk); |
441 | dccp_transmit_skb(sk, skb); | 455 | dccp_transmit_skb(sk, skb); |