aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp
diff options
context:
space:
mode:
authorGerrit Renker <gerrit@erg.abdn.ac.uk>2006-11-14 08:21:36 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:22:27 -0500
commitb9df3cb8cf9a96e63dfdcd3056a9cbc71f2459e7 (patch)
tree46d19124b1bbfd9eaa26af3d6ba2293b4e8f326d /net/dccp
parent1ed176a801b83915b7c8ab80e0a2a6376a2d6051 (diff)
[TCP/DCCP]: Introduce net_xmit_eval
Throughout the TCP/DCCP (and tunnelling) code, it often happens that the return code of a transmit function needs to be tested against NET_XMIT_CN which is a value that does not indicate a strict error condition. This patch uses a macro for these recurring situations which is consistent with the already existing macro net_xmit_errno, saving on duplicated code. Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk> Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net/dccp')
-rw-r--r--net/dccp/ipv4.c5
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dccp/output.c14
3 files changed, 5 insertions, 17 deletions
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index bc400b2ba25e..61c09014dade 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -501,8 +501,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
501 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 501 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
502 ireq->rmt_addr, 502 ireq->rmt_addr,
503 ireq->opt); 503 ireq->opt);
504 if (err == NET_XMIT_CN) 504 err = net_xmit_eval(err);
505 err = 0;
506 } 505 }
507 506
508out: 507out:
@@ -571,7 +570,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb)
571 rxskb->nh.iph->saddr, NULL); 570 rxskb->nh.iph->saddr, NULL);
572 bh_unlock_sock(dccp_v4_ctl_socket->sk); 571 bh_unlock_sock(dccp_v4_ctl_socket->sk);
573 572
574 if (err == NET_XMIT_CN || err == 0) { 573 if (net_xmit_eval(err) == 0) {
575 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 574 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
576 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 575 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
577 } 576 }
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 8d6ddb6389a7..2165b1740c7c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -294,8 +294,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
294 &ireq6->rmt_addr); 294 &ireq6->rmt_addr);
295 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 295 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
296 err = ip6_xmit(sk, skb, &fl, opt, 0); 296 err = ip6_xmit(sk, skb, &fl, opt, 0);
297 if (err == NET_XMIT_CN) 297 err = net_xmit_eval(err);
298 err = 0;
299 } 298 }
300 299
301done: 300done:
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 0994b13f0f15..ef22f3cc791a 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -125,16 +125,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
125 125
126 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 126 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
127 err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0); 127 err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0);
128 if (err <= 0) 128 return net_xmit_eval(err);
129 return err;
130
131 /* NET_XMIT_CN is special. It does not guarantee,
132 * that this packet is lost. It tells that device
133 * is about to start to drop packets or already
134 * drops some packets of the same priority and
135 * invokes us to send less aggressively.
136 */
137 return err == NET_XMIT_CN ? 0 : err;
138 } 129 }
139 return -ENOBUFS; 130 return -ENOBUFS;
140} 131}
@@ -426,8 +417,7 @@ int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
426 if (skb != NULL) { 417 if (skb != NULL) {
427 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 418 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
428 err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0); 419 err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0);
429 if (err == NET_XMIT_CN) 420 return net_xmit_eval(err);
430 err = 0;
431 } 421 }
432 } 422 }
433 423