diff options
| author | Yuvaraja Mariappan <ymariappan@gmail.com> | 2015-10-06 13:53:29 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2015-10-07 08:01:04 -0400 |
| commit | 686a562449af96a0e8c18c6f1b87b47ff8c36de8 (patch) | |
| tree | 48308fd9e59fb9947c50abd4d69f8977ec334e4d /net/ipv4/tcp.c | |
| parent | 729ecbc77a1fe763de409be0307c61c1fa32cc06 (diff) | |
net: ipv4: tcp.c Fixed an assignment coding style issue
Fixed an assignment coding style issue
Signed-off-by: Yuvaraja Mariappan <ymariappan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
| -rw-r--r-- | net/ipv4/tcp.c | 24 |
1 files changed, 16 insertions, 8 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3c96fa87ff9e..ac1bdbb50352 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -900,7 +900,8 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, | |||
| 900 | */ | 900 | */ |
| 901 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && | 901 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && |
| 902 | !tcp_passive_fastopen(sk)) { | 902 | !tcp_passive_fastopen(sk)) { |
| 903 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) | 903 | err = sk_stream_wait_connect(sk, &timeo); |
| 904 | if (err != 0) | ||
| 904 | goto out_err; | 905 | goto out_err; |
| 905 | } | 906 | } |
| 906 | 907 | ||
| @@ -967,7 +968,8 @@ new_segment: | |||
| 967 | 968 | ||
| 968 | copied += copy; | 969 | copied += copy; |
| 969 | offset += copy; | 970 | offset += copy; |
| 970 | if (!(size -= copy)) { | 971 | size -= copy; |
| 972 | if (!size) { | ||
| 971 | tcp_tx_timestamp(sk, skb); | 973 | tcp_tx_timestamp(sk, skb); |
| 972 | goto out; | 974 | goto out; |
| 973 | } | 975 | } |
| @@ -988,7 +990,8 @@ wait_for_memory: | |||
| 988 | tcp_push(sk, flags & ~MSG_MORE, mss_now, | 990 | tcp_push(sk, flags & ~MSG_MORE, mss_now, |
| 989 | TCP_NAGLE_PUSH, size_goal); | 991 | TCP_NAGLE_PUSH, size_goal); |
| 990 | 992 | ||
| 991 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | 993 | err = sk_stream_wait_memory(sk, &timeo); |
| 994 | if (err != 0) | ||
| 992 | goto do_error; | 995 | goto do_error; |
| 993 | 996 | ||
| 994 | mss_now = tcp_send_mss(sk, &size_goal, flags); | 997 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
| @@ -1111,7 +1114,8 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) | |||
| 1111 | */ | 1114 | */ |
| 1112 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && | 1115 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && |
| 1113 | !tcp_passive_fastopen(sk)) { | 1116 | !tcp_passive_fastopen(sk)) { |
| 1114 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) | 1117 | err = sk_stream_wait_connect(sk, &timeo); |
| 1118 | if (err != 0) | ||
| 1115 | goto do_error; | 1119 | goto do_error; |
| 1116 | } | 1120 | } |
| 1117 | 1121 | ||
| @@ -1267,7 +1271,8 @@ wait_for_memory: | |||
| 1267 | tcp_push(sk, flags & ~MSG_MORE, mss_now, | 1271 | tcp_push(sk, flags & ~MSG_MORE, mss_now, |
| 1268 | TCP_NAGLE_PUSH, size_goal); | 1272 | TCP_NAGLE_PUSH, size_goal); |
| 1269 | 1273 | ||
| 1270 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | 1274 | err = sk_stream_wait_memory(sk, &timeo); |
| 1275 | if (err != 0) | ||
| 1271 | goto do_error; | 1276 | goto do_error; |
| 1272 | 1277 | ||
| 1273 | mss_now = tcp_send_mss(sk, &size_goal, flags); | 1278 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
| @@ -1767,7 +1772,8 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, | |||
| 1767 | 1772 | ||
| 1768 | /* __ Restore normal policy in scheduler __ */ | 1773 | /* __ Restore normal policy in scheduler __ */ |
| 1769 | 1774 | ||
| 1770 | if ((chunk = len - tp->ucopy.len) != 0) { | 1775 | chunk = len - tp->ucopy.len; |
| 1776 | if (chunk != 0) { | ||
| 1771 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); | 1777 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); |
| 1772 | len -= chunk; | 1778 | len -= chunk; |
| 1773 | copied += chunk; | 1779 | copied += chunk; |
| @@ -1778,7 +1784,8 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, | |||
| 1778 | do_prequeue: | 1784 | do_prequeue: |
| 1779 | tcp_prequeue_process(sk); | 1785 | tcp_prequeue_process(sk); |
| 1780 | 1786 | ||
| 1781 | if ((chunk = len - tp->ucopy.len) != 0) { | 1787 | chunk = len - tp->ucopy.len; |
| 1788 | if (chunk != 0) { | ||
| 1782 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); | 1789 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
| 1783 | len -= chunk; | 1790 | len -= chunk; |
| 1784 | copied += chunk; | 1791 | copied += chunk; |
| @@ -2230,7 +2237,8 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
| 2230 | sk->sk_shutdown = 0; | 2237 | sk->sk_shutdown = 0; |
| 2231 | sock_reset_flag(sk, SOCK_DONE); | 2238 | sock_reset_flag(sk, SOCK_DONE); |
| 2232 | tp->srtt_us = 0; | 2239 | tp->srtt_us = 0; |
| 2233 | if ((tp->write_seq += tp->max_window + 2) == 0) | 2240 | tp->write_seq += tp->max_window + 2; |
| 2241 | if (tp->write_seq == 0) | ||
| 2234 | tp->write_seq = 1; | 2242 | tp->write_seq = 1; |
| 2235 | icsk->icsk_backoff = 0; | 2243 | icsk->icsk_backoff = 0; |
| 2236 | tp->snd_cwnd = 2; | 2244 | tp->snd_cwnd = 2; |
