aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2008-07-03 04:05:41 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-03 04:05:41 -0400
commit40b215e594b65a3488576c9d24b367548e18902a (patch)
tree251b8247f97494b7461acc431044e81c33470254 /net/ipv4/tcp_input.c
parentb4653e99450693b75a3c6c8ff4f070164f12815e (diff)
tcp: de-bloat a bit with factoring NET_INC_STATS_BH out
There are some places in TCP that select one MIB index to bump snmp statistics like this: if (<something>) NET_INC_STATS_BH(<some_id>); else if (<something_else>) NET_INC_STATS_BH(<some_other_id>); ... else NET_INC_STATS_BH(<default_id>); or in a more tricky but still similar way. On the other hand, this NET_INC_STATS_BH is a camouflaged increment of percpu variable, which is not that small. Factoring those cases out de-bloats 235 bytes on non-preemptible i386 config and drives parts of the code into 80 columns. add/remove: 0/0 grow/shrink: 0/7 up/down: 0/-235 (-235) function old new delta tcp_fastretrans_alert 1437 1424 -13 tcp_dsack_set 137 124 -13 tcp_xmit_retransmit_queue 690 676 -14 tcp_try_undo_recovery 283 265 -18 tcp_sacktag_write_queue 1550 1515 -35 tcp_update_reordering 162 106 -56 tcp_retransmit_timer 990 904 -86 Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c46
1 files changed, 32 insertions, 14 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index de30e70ff25..d6ea970a151 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -947,17 +947,21 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
947{ 947{
948 struct tcp_sock *tp = tcp_sk(sk); 948 struct tcp_sock *tp = tcp_sk(sk);
949 if (metric > tp->reordering) { 949 if (metric > tp->reordering) {
950 int mib_idx;
951
950 tp->reordering = min(TCP_MAX_REORDERING, metric); 952 tp->reordering = min(TCP_MAX_REORDERING, metric);
951 953
952 /* This exciting event is worth to be remembered. 8) */ 954 /* This exciting event is worth to be remembered. 8) */
953 if (ts) 955 if (ts)
954 NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); 956 mib_idx = LINUX_MIB_TCPTSREORDER;
955 else if (tcp_is_reno(tp)) 957 else if (tcp_is_reno(tp))
956 NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); 958 mib_idx = LINUX_MIB_TCPRENOREORDER;
957 else if (tcp_is_fack(tp)) 959 else if (tcp_is_fack(tp))
958 NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); 960 mib_idx = LINUX_MIB_TCPFACKREORDER;
959 else 961 else
960 NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); 962 mib_idx = LINUX_MIB_TCPSACKREORDER;
963
964 NET_INC_STATS_BH(mib_idx);
961#if FASTRETRANS_DEBUG > 1 965#if FASTRETRANS_DEBUG > 1
962 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 966 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
963 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 967 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1456,18 +1460,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1456 if (!tcp_is_sackblock_valid(tp, dup_sack, 1460 if (!tcp_is_sackblock_valid(tp, dup_sack,
1457 sp[used_sacks].start_seq, 1461 sp[used_sacks].start_seq,
1458 sp[used_sacks].end_seq)) { 1462 sp[used_sacks].end_seq)) {
1463 int mib_idx;
1464
1459 if (dup_sack) { 1465 if (dup_sack) {
1460 if (!tp->undo_marker) 1466 if (!tp->undo_marker)
1461 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO); 1467 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
1462 else 1468 else
1463 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD); 1469 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
1464 } else { 1470 } else {
1465 /* Don't count olds caused by ACK reordering */ 1471 /* Don't count olds caused by ACK reordering */
1466 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1472 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1467 !after(sp[used_sacks].end_seq, tp->snd_una)) 1473 !after(sp[used_sacks].end_seq, tp->snd_una))
1468 continue; 1474 continue;
1469 NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD); 1475 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1470 } 1476 }
1477
1478 NET_INC_STATS_BH(mib_idx);
1471 if (i == 0) 1479 if (i == 0)
1472 first_sack_index = -1; 1480 first_sack_index = -1;
1473 continue; 1481 continue;
@@ -2380,15 +2388,19 @@ static int tcp_try_undo_recovery(struct sock *sk)
2380 struct tcp_sock *tp = tcp_sk(sk); 2388 struct tcp_sock *tp = tcp_sk(sk);
2381 2389
2382 if (tcp_may_undo(tp)) { 2390 if (tcp_may_undo(tp)) {
2391 int mib_idx;
2392
2383 /* Happy end! We did not retransmit anything 2393 /* Happy end! We did not retransmit anything
2384 * or our original transmission succeeded. 2394 * or our original transmission succeeded.
2385 */ 2395 */
2386 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2396 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2387 tcp_undo_cwr(sk, 1); 2397 tcp_undo_cwr(sk, 1);
2388 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2398 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2389 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 2399 mib_idx = LINUX_MIB_TCPLOSSUNDO;
2390 else 2400 else
2391 NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); 2401 mib_idx = LINUX_MIB_TCPFULLUNDO;
2402
2403 NET_INC_STATS_BH(mib_idx);
2392 tp->undo_marker = 0; 2404 tp->undo_marker = 0;
2393 } 2405 }
2394 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2406 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2560,7 +2572,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2560 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 2572 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
2561 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2573 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2562 (tcp_fackets_out(tp) > tp->reordering)); 2574 (tcp_fackets_out(tp) > tp->reordering));
2563 int fast_rexmit = 0; 2575 int fast_rexmit = 0, mib_idx;
2564 2576
2565 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 2577 if (WARN_ON(!tp->packets_out && tp->sacked_out))
2566 tp->sacked_out = 0; 2578 tp->sacked_out = 0;
@@ -2683,9 +2695,11 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2683 /* Otherwise enter Recovery state */ 2695 /* Otherwise enter Recovery state */
2684 2696
2685 if (tcp_is_reno(tp)) 2697 if (tcp_is_reno(tp))
2686 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); 2698 mib_idx = LINUX_MIB_TCPRENORECOVERY;
2687 else 2699 else
2688 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); 2700 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2701
2702 NET_INC_STATS_BH(mib_idx);
2689 2703
2690 tp->high_seq = tp->snd_nxt; 2704 tp->high_seq = tp->snd_nxt;
2691 tp->prior_ssthresh = 0; 2705 tp->prior_ssthresh = 0;
@@ -3700,10 +3714,14 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
3700static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) 3714static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
3701{ 3715{
3702 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3716 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
3717 int mib_idx;
3718
3703 if (before(seq, tp->rcv_nxt)) 3719 if (before(seq, tp->rcv_nxt))
3704 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); 3720 mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
3705 else 3721 else
3706 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT); 3722 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
3723
3724 NET_INC_STATS_BH(mib_idx);
3707 3725
3708 tp->rx_opt.dsack = 1; 3726 tp->rx_opt.dsack = 1;
3709 tp->duplicate_sack[0].start_seq = seq; 3727 tp->duplicate_sack[0].start_seq = seq;