aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-06-02 19:22:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-06-02 19:22:51 -0400
commit4fc3acf2918fa158dc651a0c824a23944e956919 (patch)
tree3258f1037ee45c3799d12805d78679e41fdc98c9 /net
parent63004afa718b1506fe9a286075b3b2d8c6ca2b9b (diff)
parent9ca3cc6f3026946ba655e863ca2096339e667639 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking updates from David Miller: 1) Make syn floods consume significantly less resources by a) Not pre-COW'ing routing metrics for SYN/ACKs b) Mirroring the device queue mapping of the SYN for the SYN/ACK reply. Both from Eric Dumazet. 2) Fix calculation errors in Byte Queue Limiting, from Hiroaki SHIMODA. 3) Validate the length requested when building a paged SKB for a socket, so we don't overrun the page vector accidently. From Jason Wang. 4) When netlabel is disabled, we abort all IP option processing when we see a CIPSO option. This isn't the right thing to do, we should simply skip over it and continue processing the remaining options (if any). Fix from Paul Moore. 5) SRIOV fixes for the mellanox driver from Jack orgenstein and Marcel Apfelbaum. 6) 8139cp enables the receiver before the ring address is properly programmed, which potentially lets the device crap over random memory. Fix from Jason Wang. 7) e1000/e1000e fixes for i217 RST handling, and an improper buffer address reference in jumbo RX frame processing from Bruce Allan and Sebastian Andrzej Siewior, respectively. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: fec_mpc52xx: fix timestamp filtering mcs7830: Implement link state detection e1000e: fix Rapid Start Technology support for i217 e1000: look into the page instead of skb->data for e1000_tbi_adjust_stats() r8169: call netif_napi_del at errpaths and at driver unload tcp: reflect SYN queue_mapping into SYNACK packets tcp: do not create inetpeer on SYNACK message 8139cp/8139too: terminate the eeprom access with the right opmode 8139cp: set ring address before enabling receiver cipso: handle CIPSO options correctly when NetLabel is disabled net: sock: validate data_len before allocating skb in sock_alloc_send_pskb() bql: Avoid possible inconsistent calculation. bql: Avoid unneeded limit decrement. bql: Fix POSDIFF() to integer overflow aware. net/mlx4_core: Fix obscure mlx4_cmd_box parameter in QUERY_DEV_CAP net/mlx4_core: Check port out-of-range before using in mlx4_slave_cap net/mlx4_core: Fixes for VF / Guest startup flow net/mlx4_en: Fix improper use of "port" parameter in mlx4_en_event net/mlx4_core: Fix number of EQs used in ICM initialisation net/mlx4_core: Fix the slave_id out-of-range test in mlx4_eq_int
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c7
-rw-r--r--net/ipv4/inet_connection_sock.c3
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv6/tcp_ipv6.c9
4 files changed, 19 insertions, 9 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 653f8c0aedc5..9e5b71fda6ec 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1592 gfp_t gfp_mask; 1592 gfp_t gfp_mask;
1593 long timeo; 1593 long timeo;
1594 int err; 1594 int err;
1595 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1596
1597 err = -EMSGSIZE;
1598 if (npages > MAX_SKB_FRAGS)
1599 goto failure;
1595 1600
1596 gfp_mask = sk->sk_allocation; 1601 gfp_mask = sk->sk_allocation;
1597 if (gfp_mask & __GFP_WAIT) 1602 if (gfp_mask & __GFP_WAIT)
@@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1610 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1615 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1611 skb = alloc_skb(header_len, gfp_mask); 1616 skb = alloc_skb(header_len, gfp_mask);
1612 if (skb) { 1617 if (skb) {
1613 int npages;
1614 int i; 1618 int i;
1615 1619
1616 /* No pages, we're done... */ 1620 /* No pages, we're done... */
1617 if (!data_len) 1621 if (!data_len)
1618 break; 1622 break;
1619 1623
1620 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1621 skb->truesize += data_len; 1624 skb->truesize += data_len;
1622 skb_shinfo(skb)->nr_frags = npages; 1625 skb_shinfo(skb)->nr_frags = npages;
1623 for (i = 0; i < npages; i++) { 1626 for (i = 0; i < npages; i++) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 95e61596e605..f9ee7417f6a0 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -377,7 +377,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
377 377
378 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 378 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
379 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 379 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
380 sk->sk_protocol, inet_sk_flowi_flags(sk), 380 sk->sk_protocol,
381 inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
381 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 382 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
382 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 383 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
383 security_req_classify_flow(req, flowi4_to_flowi(fl4)); 384 security_req_classify_flow(req, flowi4_to_flowi(fl4));
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a43b87dfe800..c8d28c433b2b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
824 */ 824 */
825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req, 826 struct request_sock *req,
827 struct request_values *rvp) 827 struct request_values *rvp,
828 u16 queue_mapping)
828{ 829{
829 const struct inet_request_sock *ireq = inet_rsk(req); 830 const struct inet_request_sock *ireq = inet_rsk(req);
830 struct flowi4 fl4; 831 struct flowi4 fl4;
@@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
840 if (skb) { 841 if (skb) {
841 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 842 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
842 843
844 skb_set_queue_mapping(skb, queue_mapping);
843 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 845 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
844 ireq->rmt_addr, 846 ireq->rmt_addr,
845 ireq->opt); 847 ireq->opt);
@@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
854 struct request_values *rvp) 856 struct request_values *rvp)
855{ 857{
856 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
857 return tcp_v4_send_synack(sk, NULL, req, rvp); 859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
858} 860}
859 861
860/* 862/*
@@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1422 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1424 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1423 1425
1424 if (tcp_v4_send_synack(sk, dst, req, 1426 if (tcp_v4_send_synack(sk, dst, req,
1425 (struct request_values *)&tmp_ext) || 1427 (struct request_values *)&tmp_ext,
1428 skb_get_queue_mapping(skb)) ||
1426 want_cookie) 1429 want_cookie)
1427 goto drop_and_free; 1430 goto drop_and_free;
1428 1431
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 554d5999abc4..3a9aec29581a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -476,7 +476,8 @@ out:
476 476
477 477
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp) 479 struct request_values *rvp,
480 u16 queue_mapping)
480{ 481{
481 struct inet6_request_sock *treq = inet6_rsk(req); 482 struct inet6_request_sock *treq = inet6_rsk(req);
482 struct ipv6_pinfo *np = inet6_sk(sk); 483 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 514 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514 515
515 fl6.daddr = treq->rmt_addr; 516 fl6.daddr = treq->rmt_addr;
517 skb_set_queue_mapping(skb, queue_mapping);
516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 518 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err); 519 err = net_xmit_eval(err);
518 } 520 }
@@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp) 530 struct request_values *rvp)
529{ 531{
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 532 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp); 533 return tcp_v6_send_synack(sk, req, rvp, 0);
532} 534}
533 535
534static void tcp_v6_reqsk_destructor(struct request_sock *req) 536static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1213,7 +1215,8 @@ have_isn:
1213 security_inet_conn_request(sk, skb, req); 1215 security_inet_conn_request(sk, skb, req);
1214 1216
1215 if (tcp_v6_send_synack(sk, req, 1217 if (tcp_v6_send_synack(sk, req,
1216 (struct request_values *)&tmp_ext) || 1218 (struct request_values *)&tmp_ext,
1219 skb_get_queue_mapping(skb)) ||
1217 want_cookie) 1220 want_cookie)
1218 goto drop_and_free; 1221 goto drop_and_free;
1219 1222