aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-05-07 23:35:40 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-07 23:35:40 -0400
commit0d6c4a2e4641bbc556dd74d3aa158c413a972492 (patch)
treeda944af17682659bb433dc2282dcb48380c14cd1 /net/ipv4
parent6e06c0e2347ec79d0bd5702b2438fe883f784545 (diff)
parent1c430a727fa512500a422ffe4712166c550ea06a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/intel/e1000e/param.c drivers/net/wireless/iwlwifi/iwl-agn-rx.c drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c drivers/net/wireless/iwlwifi/iwl-trans.h Resolved the iwlwifi conflict with mainline using 3-way diff posted by John Linville and Stephen Rothwell. In 'net' we added a bug fix to make iwlwifi report a more accurate skb->truesize but this conflicted with RX path changes that happened meanwhile in net-next. In e1000e a conflict arose in the validation code for settings of adapter->itr. 'net-next' had more sophisticated logic so that logic was used. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/udp_diag.c9
4 files changed, 23 insertions, 10 deletions
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 9f24028a3ba..46d1e7199a8 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
141 goto rtattr_failure; 141 goto rtattr_failure;
142 142
143 if (icsk == NULL) { 143 if (icsk == NULL) {
144 r->idiag_rqueue = r->idiag_wqueue = 0; 144 handler->idiag_get_info(sk, r, NULL);
145 goto out; 145 goto out;
146 } 146 }
147 147
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c2cff8b6277..565406287f6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3515,7 +3515,7 @@ void __init tcp_init(void)
3515{ 3515{
3516 struct sk_buff *skb = NULL; 3516 struct sk_buff *skb = NULL;
3517 unsigned long limit; 3517 unsigned long limit;
3518 int max_share, cnt; 3518 int max_rshare, max_wshare, cnt;
3519 unsigned int i; 3519 unsigned int i;
3520 unsigned long jiffy = jiffies; 3520 unsigned long jiffy = jiffies;
3521 3521
@@ -3575,15 +3575,16 @@ void __init tcp_init(void)
3575 tcp_init_mem(&init_net); 3575 tcp_init_mem(&init_net);
3576 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3576 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3577 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 3577 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3578 max_share = min(4UL*1024*1024, limit); 3578 max_wshare = min(4UL*1024*1024, limit);
3579 max_rshare = min(6UL*1024*1024, limit);
3579 3580
3580 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3581 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3581 sysctl_tcp_wmem[1] = 16*1024; 3582 sysctl_tcp_wmem[1] = 16*1024;
3582 sysctl_tcp_wmem[2] = max(64*1024, max_share); 3583 sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3583 3584
3584 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 3585 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3585 sysctl_tcp_rmem[1] = 87380; 3586 sysctl_tcp_rmem[1] = 87380;
3586 sysctl_tcp_rmem[2] = max(87380, max_share); 3587 sysctl_tcp_rmem[2] = max(87380, max_rshare);
3587 3588
3588 pr_info("Hash tables configured (established %u bind %u)\n", 3589 pr_info("Hash tables configured (established %u bind %u)\n",
3589 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3590 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7b2d351f24d..eb58b94301e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
85EXPORT_SYMBOL(sysctl_tcp_ecn); 85EXPORT_SYMBOL(sysctl_tcp_ecn);
86int sysctl_tcp_dsack __read_mostly = 1; 86int sysctl_tcp_dsack __read_mostly = 1;
87int sysctl_tcp_app_win __read_mostly = 31; 87int sysctl_tcp_app_win __read_mostly = 31;
88int sysctl_tcp_adv_win_scale __read_mostly = 2; 88int sysctl_tcp_adv_win_scale __read_mostly = 1;
89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 89EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
90 90
91int sysctl_tcp_stdurg __read_mostly; 91int sysctl_tcp_stdurg __read_mostly;
@@ -496,7 +496,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
496 goto new_measure; 496 goto new_measure;
497 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 497 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
498 return; 498 return;
499 tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); 499 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
500 500
501new_measure: 501new_measure:
502 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 502 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -2904,11 +2904,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
2904 2904
2905 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2905 /* Do not moderate cwnd if it's already undone in cwr or recovery. */
2906 if (tp->undo_marker) { 2906 if (tp->undo_marker) {
2907 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) 2907 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
2908 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2908 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2909 else /* PRR */ 2909 tp->snd_cwnd_stamp = tcp_time_stamp;
2910 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
2911 /* PRR algorithm. */
2910 tp->snd_cwnd = tp->snd_ssthresh; 2912 tp->snd_cwnd = tp->snd_ssthresh;
2911 tp->snd_cwnd_stamp = tcp_time_stamp; 2913 tp->snd_cwnd_stamp = tcp_time_stamp;
2914 }
2912 } 2915 }
2913 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2916 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2914} 2917}
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a949f19deb..a7f86a3cd50 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
146 return udp_dump_one(&udp_table, in_skb, nlh, req); 146 return udp_dump_one(&udp_table, in_skb, nlh, req);
147} 147}
148 148
149static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
150 void *info)
151{
152 r->idiag_rqueue = sk_rmem_alloc_get(sk);
153 r->idiag_wqueue = sk_wmem_alloc_get(sk);
154}
155
149static const struct inet_diag_handler udp_diag_handler = { 156static const struct inet_diag_handler udp_diag_handler = {
150 .dump = udp_diag_dump, 157 .dump = udp_diag_dump,
151 .dump_one = udp_diag_dump_one, 158 .dump_one = udp_diag_dump_one,
159 .idiag_get_info = udp_diag_get_info,
152 .idiag_type = IPPROTO_UDP, 160 .idiag_type = IPPROTO_UDP,
153}; 161};
154 162
@@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *
167static const struct inet_diag_handler udplite_diag_handler = { 175static const struct inet_diag_handler udplite_diag_handler = {
168 .dump = udplite_diag_dump, 176 .dump = udplite_diag_dump,
169 .dump_one = udplite_diag_dump_one, 177 .dump_one = udplite_diag_dump_one,
178 .idiag_get_info = udp_diag_get_info,
170 .idiag_type = IPPROTO_UDPLITE, 179 .idiag_type = IPPROTO_UDPLITE,
171}; 180};
172 181