aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-11-04 13:48:30 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-04 13:48:30 -0500
commit394efd19d5fcae936261bd48e5b33b21897aacf8 (patch)
treec48cf3ddbb07fd87309f1abdf31a27c71330e587 /net/ipv4
parentf421436a591d34fa5279b54a96ac07d70250cc8d (diff)
parentbe408cd3e1fef73e9408b196a79b9934697fe3b1 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/emulex/benet/be.h drivers/net/netconsole.c net/bridge/br_private.h Three mostly trivial conflicts. The net/bridge/br_private.h conflict was a function signature (argument addition) change overlapping with the extern removals from Joe Perches. In drivers/net/netconsole.c we had one change adjusting a printk message whilst another changed "printk(KERN_INFO" into "pr_info(". Lastly, the emulex change was a new inline function addition overlapping with Joe Perches's extern removals. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/netfilter/arp_tables.c5
-rw-r--r--net/ipv4/netfilter/ip_tables.c5
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c7
-rw-r--r--net/ipv4/tcp_input.c34
-rw-r--r--net/ipv4/tcp_offload.c13
-rw-r--r--net/ipv4/xfrm4_policy.c8
6 files changed, 47 insertions, 25 deletions
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 85a4f21aac1a..59da7cde0724 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -271,6 +271,11 @@ unsigned int arpt_do_table(struct sk_buff *skb,
271 local_bh_disable(); 271 local_bh_disable();
272 addend = xt_write_recseq_begin(); 272 addend = xt_write_recseq_begin();
273 private = table->private; 273 private = table->private;
274 /*
275 * Ensure we load private-> members after we've fetched the base
276 * pointer.
277 */
278 smp_read_barrier_depends();
274 table_base = private->entries[smp_processor_id()]; 279 table_base = private->entries[smp_processor_id()];
275 280
276 e = get_entry(table_base, private->hook_entry[hook]); 281 e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d23118d95ff9..718dfbd30cbe 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -327,6 +327,11 @@ ipt_do_table(struct sk_buff *skb,
327 addend = xt_write_recseq_begin(); 327 addend = xt_write_recseq_begin();
328 private = table->private; 328 private = table->private;
329 cpu = smp_processor_id(); 329 cpu = smp_processor_id();
330 /*
331 * Ensure we load private-> members after we've fetched the base
332 * pointer.
333 */
334 smp_read_barrier_depends();
330 table_base = private->entries[cpu]; 335 table_base = private->entries[cpu];
331 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; 336 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
332 stackptr = per_cpu_ptr(private->stackptr, cpu); 337 stackptr = per_cpu_ptr(private->stackptr, cpu);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index cbc22158af49..9cb993cd224b 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -220,6 +220,7 @@ static void ipt_ulog_packet(struct net *net,
220 ub->qlen++; 220 ub->qlen++;
221 221
222 pm = nlmsg_data(nlh); 222 pm = nlmsg_data(nlh);
223 memset(pm, 0, sizeof(*pm));
223 224
224 /* We might not have a timestamp, get one */ 225 /* We might not have a timestamp, get one */
225 if (skb->tstamp.tv64 == 0) 226 if (skb->tstamp.tv64 == 0)
@@ -238,8 +239,6 @@ static void ipt_ulog_packet(struct net *net,
238 } 239 }
239 else if (loginfo->prefix[0] != '\0') 240 else if (loginfo->prefix[0] != '\0')
240 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix)); 241 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
241 else
242 *(pm->prefix) = '\0';
243 242
244 if (in && in->hard_header_len > 0 && 243 if (in && in->hard_header_len > 0 &&
245 skb->mac_header != skb->network_header && 244 skb->mac_header != skb->network_header &&
@@ -251,13 +250,9 @@ static void ipt_ulog_packet(struct net *net,
251 250
252 if (in) 251 if (in)
253 strncpy(pm->indev_name, in->name, sizeof(pm->indev_name)); 252 strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
254 else
255 pm->indev_name[0] = '\0';
256 253
257 if (out) 254 if (out)
258 strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name)); 255 strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
259 else
260 pm->outdev_name[0] = '\0';
261 256
262 /* copy_len <= skb->len, so can't fail. */ 257 /* copy_len <= skb->len, so can't fail. */
263 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0) 258 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b935397c703c..63095b218b4a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2903,7 +2903,8 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2903 * left edge of the send window. 2903 * left edge of the send window.
2904 * See draft-ietf-tcplw-high-performance-00, section 3.3. 2904 * See draft-ietf-tcplw-high-performance-00, section 3.3.
2905 */ 2905 */
2906 if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 2906 if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2907 flag & FLAG_ACKED)
2907 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 2908 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
2908 2909
2909 if (seq_rtt < 0) 2910 if (seq_rtt < 0)
@@ -2918,14 +2919,19 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2918} 2919}
2919 2920
2920/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ 2921/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
2921static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) 2922static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
2922{ 2923{
2923 struct tcp_sock *tp = tcp_sk(sk); 2924 struct tcp_sock *tp = tcp_sk(sk);
2924 s32 seq_rtt = -1; 2925 s32 seq_rtt = -1;
2925 2926
2926 if (tp->lsndtime && !tp->total_retrans) 2927 if (synack_stamp && !tp->total_retrans)
2927 seq_rtt = tcp_time_stamp - tp->lsndtime; 2928 seq_rtt = tcp_time_stamp - synack_stamp;
2928 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1); 2929
2930 /* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
2931 * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
2932 */
2933 if (!tp->srtt)
2934 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
2929} 2935}
2930 2936
2931static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 2937static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
@@ -3028,6 +3034,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3028 s32 seq_rtt = -1; 3034 s32 seq_rtt = -1;
3029 s32 ca_seq_rtt = -1; 3035 s32 ca_seq_rtt = -1;
3030 ktime_t last_ackt = net_invalid_timestamp(); 3036 ktime_t last_ackt = net_invalid_timestamp();
3037 bool rtt_update;
3031 3038
3032 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3039 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
3033 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3040 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
@@ -3104,14 +3111,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3104 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3111 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
3105 flag |= FLAG_SACK_RENEGING; 3112 flag |= FLAG_SACK_RENEGING;
3106 3113
3107 if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) || 3114 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt);
3108 (flag & FLAG_ACKED))
3109 tcp_rearm_rto(sk);
3110 3115
3111 if (flag & FLAG_ACKED) { 3116 if (flag & FLAG_ACKED) {
3112 const struct tcp_congestion_ops *ca_ops 3117 const struct tcp_congestion_ops *ca_ops
3113 = inet_csk(sk)->icsk_ca_ops; 3118 = inet_csk(sk)->icsk_ca_ops;
3114 3119
3120 tcp_rearm_rto(sk);
3115 if (unlikely(icsk->icsk_mtup.probe_size && 3121 if (unlikely(icsk->icsk_mtup.probe_size &&
3116 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3122 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3117 tcp_mtup_probe_success(sk); 3123 tcp_mtup_probe_success(sk);
@@ -3150,6 +3156,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3150 3156
3151 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); 3157 ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
3152 } 3158 }
3159 } else if (skb && rtt_update && sack_rtt >= 0 &&
3160 sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) {
3161 /* Do not re-arm RTO if the sack RTT is measured from data sent
3162 * after when the head was last (re)transmitted. Otherwise the
3163 * timeout may continue to extend in loss recovery.
3164 */
3165 tcp_rearm_rto(sk);
3153 } 3166 }
3154 3167
3155#if FASTRETRANS_DEBUG > 0 3168#if FASTRETRANS_DEBUG > 0
@@ -5626,6 +5639,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5626 struct request_sock *req; 5639 struct request_sock *req;
5627 int queued = 0; 5640 int queued = 0;
5628 bool acceptable; 5641 bool acceptable;
5642 u32 synack_stamp;
5629 5643
5630 tp->rx_opt.saw_tstamp = 0; 5644 tp->rx_opt.saw_tstamp = 0;
5631 5645
@@ -5708,9 +5722,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5708 * so release it. 5722 * so release it.
5709 */ 5723 */
5710 if (req) { 5724 if (req) {
5725 synack_stamp = tcp_rsk(req)->snt_synack;
5711 tp->total_retrans = req->num_retrans; 5726 tp->total_retrans = req->num_retrans;
5712 reqsk_fastopen_remove(sk, req, false); 5727 reqsk_fastopen_remove(sk, req, false);
5713 } else { 5728 } else {
5729 synack_stamp = tp->lsndtime;
5714 /* Make sure socket is routed, for correct metrics. */ 5730 /* Make sure socket is routed, for correct metrics. */
5715 icsk->icsk_af_ops->rebuild_header(sk); 5731 icsk->icsk_af_ops->rebuild_header(sk);
5716 tcp_init_congestion_control(sk); 5732 tcp_init_congestion_control(sk);
@@ -5733,7 +5749,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5733 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 5749 tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
5734 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; 5750 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
5735 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5751 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5736 tcp_synack_rtt_meas(sk, req); 5752 tcp_synack_rtt_meas(sk, synack_stamp);
5737 5753
5738 if (tp->rx_opt.tstamp_ok) 5754 if (tp->rx_opt.tstamp_ok)
5739 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5755 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index a7a5583eab04..a2b68a108eae 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -18,6 +18,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
18 netdev_features_t features) 18 netdev_features_t features)
19{ 19{
20 struct sk_buff *segs = ERR_PTR(-EINVAL); 20 struct sk_buff *segs = ERR_PTR(-EINVAL);
21 unsigned int sum_truesize = 0;
21 struct tcphdr *th; 22 struct tcphdr *th;
22 unsigned int thlen; 23 unsigned int thlen;
23 unsigned int seq; 24 unsigned int seq;
@@ -104,13 +105,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
104 if (copy_destructor) { 105 if (copy_destructor) {
105 skb->destructor = gso_skb->destructor; 106 skb->destructor = gso_skb->destructor;
106 skb->sk = gso_skb->sk; 107 skb->sk = gso_skb->sk;
107 /* {tcp|sock}_wfree() use exact truesize accounting : 108 sum_truesize += skb->truesize;
108 * sum(skb->truesize) MUST be exactly be gso_skb->truesize
109 * So we account mss bytes of 'true size' for each segment.
110 * The last segment will contain the remaining.
111 */
112 skb->truesize = mss;
113 gso_skb->truesize -= mss;
114 } 109 }
115 skb = skb->next; 110 skb = skb->next;
116 th = tcp_hdr(skb); 111 th = tcp_hdr(skb);
@@ -127,7 +122,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
127 if (copy_destructor) { 122 if (copy_destructor) {
128 swap(gso_skb->sk, skb->sk); 123 swap(gso_skb->sk, skb->sk);
129 swap(gso_skb->destructor, skb->destructor); 124 swap(gso_skb->destructor, skb->destructor);
130 swap(gso_skb->truesize, skb->truesize); 125 sum_truesize += skb->truesize;
126 atomic_add(sum_truesize - gso_skb->truesize,
127 &skb->sk->sk_wmem_alloc);
131 } 128 }
132 129
133 delta = htonl(oldlen + (skb_tail_pointer(skb) - 130 delta = htonl(oldlen + (skb_tail_pointer(skb) -
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index ccde54248c8c..e1a63930a967 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -104,10 +104,14 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104 const struct iphdr *iph = ip_hdr(skb); 104 const struct iphdr *iph = ip_hdr(skb);
105 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 105 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
106 struct flowi4 *fl4 = &fl->u.ip4; 106 struct flowi4 *fl4 = &fl->u.ip4;
107 int oif = 0;
108
109 if (skb_dst(skb))
110 oif = skb_dst(skb)->dev->ifindex;
107 111
108 memset(fl4, 0, sizeof(struct flowi4)); 112 memset(fl4, 0, sizeof(struct flowi4));
109 fl4->flowi4_mark = skb->mark; 113 fl4->flowi4_mark = skb->mark;
110 fl4->flowi4_oif = skb_dst(skb)->dev->ifindex; 114 fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
111 115
112 if (!ip_is_fragment(iph)) { 116 if (!ip_is_fragment(iph)) {
113 switch (iph->protocol) { 117 switch (iph->protocol) {
@@ -236,7 +240,7 @@ static struct dst_ops xfrm4_dst_ops = {
236 .destroy = xfrm4_dst_destroy, 240 .destroy = xfrm4_dst_destroy,
237 .ifdown = xfrm4_dst_ifdown, 241 .ifdown = xfrm4_dst_ifdown,
238 .local_out = __ip_local_out, 242 .local_out = __ip_local_out,
239 .gc_thresh = 1024, 243 .gc_thresh = 32768,
240}; 244};
241 245
242static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { 246static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {