aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_timer.c
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2008-07-03 04:05:41 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-03 04:05:41 -0400
commit40b215e594b65a3488576c9d24b367548e18902a (patch)
tree251b8247f97494b7461acc431044e81c33470254 /net/ipv4/tcp_timer.c
parentb4653e99450693b75a3c6c8ff4f070164f12815e (diff)
tcp: de-bloat a bit with factoring NET_INC_STATS_BH out
There are some places in TCP that select one MIB index to bump snmp statistics like this: if (<something>) NET_INC_STATS_BH(<some_id>); else if (<something_else>) NET_INC_STATS_BH(<some_other_id>); ... else NET_INC_STATS_BH(<default_id>); or in a more tricky but still similar way. On the other hand, this NET_INC_STATS_BH is a camouflaged increment of percpu variable, which is not that small. Factoring those cases out de-bloats 235 bytes on non-preemptible i386 config and drives parts of the code into 80 columns. add/remove: 0/0 grow/shrink: 0/7 up/down: 0/-235 (-235) function old new delta tcp_fastretrans_alert 1437 1424 -13 tcp_dsack_set 137 124 -13 tcp_xmit_retransmit_queue 690 676 -14 tcp_try_undo_recovery 283 265 -18 tcp_sacktag_write_queue 1550 1515 -35 tcp_update_reordering 162 106 -56 tcp_retransmit_timer 990 904 -86 Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r--net/ipv4/tcp_timer.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 3e358cbb1247..6a480d1fd8f6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -326,24 +326,27 @@ static void tcp_retransmit_timer(struct sock *sk)
326 goto out; 326 goto out;
327 327
328 if (icsk->icsk_retransmits == 0) { 328 if (icsk->icsk_retransmits == 0) {
329 int mib_idx;
330
329 if (icsk->icsk_ca_state == TCP_CA_Disorder || 331 if (icsk->icsk_ca_state == TCP_CA_Disorder ||
330 icsk->icsk_ca_state == TCP_CA_Recovery) { 332 icsk->icsk_ca_state == TCP_CA_Recovery) {
331 if (tcp_is_sack(tp)) { 333 if (tcp_is_sack(tp)) {
332 if (icsk->icsk_ca_state == TCP_CA_Recovery) 334 if (icsk->icsk_ca_state == TCP_CA_Recovery)
333 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); 335 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
334 else 336 else
335 NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); 337 mib_idx = LINUX_MIB_TCPSACKFAILURES;
336 } else { 338 } else {
337 if (icsk->icsk_ca_state == TCP_CA_Recovery) 339 if (icsk->icsk_ca_state == TCP_CA_Recovery)
338 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); 340 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
339 else 341 else
340 NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); 342 mib_idx = LINUX_MIB_TCPRENOFAILURES;
341 } 343 }
342 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { 344 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
343 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); 345 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
344 } else { 346 } else {
345 NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); 347 mib_idx = LINUX_MIB_TCPTIMEOUTS;
346 } 348 }
349 NET_INC_STATS_BH(mib_idx);
347 } 350 }
348 351
349 if (tcp_use_frto(sk)) { 352 if (tcp_use_frto(sk)) {