diff options
author | Gerrit Renker <gerrit@erg.abdn.ac.uk> | 2006-11-14 08:21:36 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-03 00:22:27 -0500 |
commit | b9df3cb8cf9a96e63dfdcd3056a9cbc71f2459e7 (patch) | |
tree | 46d19124b1bbfd9eaa26af3d6ba2293b4e8f326d /net | |
parent | 1ed176a801b83915b7c8ab80e0a2a6376a2d6051 (diff) |
[TCP/DCCP]: Introduce net_xmit_eval
Throughout the TCP/DCCP (and tunnelling) code, it often happens that the
return code of a transmit function needs to be tested against NET_XMIT_CN
which is a value that does not indicate a strict error condition.
This patch uses a macro for these recurring situations which is consistent
with the already existing macro net_xmit_errno, saving on duplicated code.
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/dccp/ipv4.c | 5 | ||||
-rw-r--r-- | net/dccp/ipv6.c | 3 | ||||
-rw-r--r-- | net/dccp/output.c | 14 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 8 | ||||
-rw-r--r-- | net/ipv6/ip6_tunnel.c | 2 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 3 |
7 files changed, 9 insertions, 29 deletions
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index bc400b2ba25e..61c09014dade 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -501,8 +501,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, | |||
501 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | 501 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, |
502 | ireq->rmt_addr, | 502 | ireq->rmt_addr, |
503 | ireq->opt); | 503 | ireq->opt); |
504 | if (err == NET_XMIT_CN) | 504 | err = net_xmit_eval(err); |
505 | err = 0; | ||
506 | } | 505 | } |
507 | 506 | ||
508 | out: | 507 | out: |
@@ -571,7 +570,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | |||
571 | rxskb->nh.iph->saddr, NULL); | 570 | rxskb->nh.iph->saddr, NULL); |
572 | bh_unlock_sock(dccp_v4_ctl_socket->sk); | 571 | bh_unlock_sock(dccp_v4_ctl_socket->sk); |
573 | 572 | ||
574 | if (err == NET_XMIT_CN || err == 0) { | 573 | if (net_xmit_eval(err) == 0) { |
575 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); | 574 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); |
576 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); | 575 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); |
577 | } | 576 | } |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 8d6ddb6389a7..2165b1740c7c 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -294,8 +294,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
294 | &ireq6->rmt_addr); | 294 | &ireq6->rmt_addr); |
295 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); | 295 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); |
296 | err = ip6_xmit(sk, skb, &fl, opt, 0); | 296 | err = ip6_xmit(sk, skb, &fl, opt, 0); |
297 | if (err == NET_XMIT_CN) | 297 | err = net_xmit_eval(err); |
298 | err = 0; | ||
299 | } | 298 | } |
300 | 299 | ||
301 | done: | 300 | done: |
diff --git a/net/dccp/output.c b/net/dccp/output.c index 0994b13f0f15..ef22f3cc791a 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -125,16 +125,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
125 | 125 | ||
126 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 126 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
127 | err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0); | 127 | err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0); |
128 | if (err <= 0) | 128 | return net_xmit_eval(err); |
129 | return err; | ||
130 | |||
131 | /* NET_XMIT_CN is special. It does not guarantee, | ||
132 | * that this packet is lost. It tells that device | ||
133 | * is about to start to drop packets or already | ||
134 | * drops some packets of the same priority and | ||
135 | * invokes us to send less aggressively. | ||
136 | */ | ||
137 | return err == NET_XMIT_CN ? 0 : err; | ||
138 | } | 129 | } |
139 | return -ENOBUFS; | 130 | return -ENOBUFS; |
140 | } | 131 | } |
@@ -426,8 +417,7 @@ int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) | |||
426 | if (skb != NULL) { | 417 | if (skb != NULL) { |
427 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 418 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
428 | err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0); | 419 | err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0); |
429 | if (err == NET_XMIT_CN) | 420 | return net_xmit_eval(err); |
430 | err = 0; | ||
431 | } | 421 | } |
432 | } | 422 | } |
433 | 423 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 2eb58844403c..0ad0904bf56c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -662,8 +662,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, | |||
662 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | 662 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, |
663 | ireq->rmt_addr, | 663 | ireq->rmt_addr, |
664 | ireq->opt); | 664 | ireq->opt); |
665 | if (err == NET_XMIT_CN) | 665 | err = net_xmit_eval(err); |
666 | err = 0; | ||
667 | } | 666 | } |
668 | 667 | ||
669 | out: | 668 | out: |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f63e99aac2d5..6a8581ab9a23 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -484,13 +484,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
484 | 484 | ||
485 | tcp_enter_cwr(sk); | 485 | tcp_enter_cwr(sk); |
486 | 486 | ||
487 | /* NET_XMIT_CN is special. It does not guarantee, | 487 | return net_xmit_eval(err); |
488 | * that this packet is lost. It tells that device | ||
489 | * is about to start to drop packets or already | ||
490 | * drops some packets of the same priority and | ||
491 | * invokes us to send less aggressively. | ||
492 | */ | ||
493 | return err == NET_XMIT_CN ? 0 : err; | ||
494 | 488 | ||
495 | #undef SYSCTL_FLAG_TSTAMPS | 489 | #undef SYSCTL_FLAG_TSTAMPS |
496 | #undef SYSCTL_FLAG_WSCALE | 490 | #undef SYSCTL_FLAG_WSCALE |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 4919f9294e2a..80a11909159d 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -748,7 +748,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
748 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, | 748 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, |
749 | skb->dst->dev, dst_output); | 749 | skb->dst->dev, dst_output); |
750 | 750 | ||
751 | if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) { | 751 | if (net_xmit_eval(err) == 0) { |
752 | stats->tx_bytes += pkt_len; | 752 | stats->tx_bytes += pkt_len; |
753 | stats->tx_packets++; | 753 | stats->tx_packets++; |
754 | } else { | 754 | } else { |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9a8e690fdf7c..9a88395a7629 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -502,8 +502,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
502 | 502 | ||
503 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); | 503 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); |
504 | err = ip6_xmit(sk, skb, &fl, opt, 0); | 504 | err = ip6_xmit(sk, skb, &fl, opt, 0); |
505 | if (err == NET_XMIT_CN) | 505 | err = net_xmit_eval(err); |
506 | err = 0; | ||
507 | } | 506 | } |
508 | 507 | ||
509 | done: | 508 | done: |