diff options
| author | David S. Miller <davem@davemloft.net> | 2017-08-01 13:07:50 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2017-08-01 13:07:50 -0400 |
| commit | 29fda25a2d31098044f8dfa177c4d2834071828e (patch) | |
| tree | 9e4be11c49a4405c19ece8f81fbb1db478da1055 /net | |
| parent | bb1182bc3e5956a93ab3ef8a3cbfb7966c42a94a (diff) | |
| parent | bc78d646e708dabd1744ca98744dea316f459497 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Two minor conflicts in virtio_net driver (bug fix overlapping addition
of a helper) and MAINTAINERS (new driver edit overlapping revamp of
PHY entry).
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
| -rw-r--r-- | net/core/dev_ioctl.c | 2 | ||||
| -rw-r--r-- | net/core/netpoll.c | 2 | ||||
| -rw-r--r-- | net/dccp/feat.c | 7 | ||||
| -rw-r--r-- | net/dccp/ipv4.c | 1 | ||||
| -rw-r--r-- | net/dccp/ipv6.c | 1 | ||||
| -rw-r--r-- | net/dsa/dsa2.c | 13 | ||||
| -rw-r--r-- | net/ipv4/fib_semantics.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_output.c | 5 | ||||
| -rw-r--r-- | net/ipv4/udp.c | 41 | ||||
| -rw-r--r-- | net/ipv6/exthdrs.c | 1 | ||||
| -rw-r--r-- | net/ipv6/ip6_output.c | 4 | ||||
| -rw-r--r-- | net/ipv6/udp.c | 38 | ||||
| -rw-r--r-- | net/openvswitch/conntrack.c | 7 | ||||
| -rw-r--r-- | net/packet/af_packet.c | 2 | ||||
| -rw-r--r-- | net/socket.c | 5 | ||||
| -rw-r--r-- | net/sunrpc/xprtsock.c | 2 |
16 files changed, 81 insertions, 52 deletions
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 06b147d7d9e2..709a4e6fb447 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c | |||
| @@ -263,6 +263,8 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
| 263 | return dev_set_mtu(dev, ifr->ifr_mtu); | 263 | return dev_set_mtu(dev, ifr->ifr_mtu); |
| 264 | 264 | ||
| 265 | case SIOCSIFHWADDR: | 265 | case SIOCSIFHWADDR: |
| 266 | if (dev->addr_len > sizeof(struct sockaddr)) | ||
| 267 | return -EINVAL; | ||
| 266 | return dev_set_mac_address(dev, &ifr->ifr_hwaddr); | 268 | return dev_set_mac_address(dev, &ifr->ifr_hwaddr); |
| 267 | 269 | ||
| 268 | case SIOCSIFHWBROADCAST: | 270 | case SIOCSIFHWBROADCAST: |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 8357f164c660..912731bed7b7 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -666,7 +666,7 @@ int netpoll_setup(struct netpoll *np) | |||
| 666 | int err; | 666 | int err; |
| 667 | 667 | ||
| 668 | rtnl_lock(); | 668 | rtnl_lock(); |
| 669 | if (np->dev_name) { | 669 | if (np->dev_name[0]) { |
| 670 | struct net *net = current->nsproxy->net_ns; | 670 | struct net *net = current->nsproxy->net_ns; |
| 671 | ndev = __dev_get_by_name(net, np->dev_name); | 671 | ndev = __dev_get_by_name(net, np->dev_name); |
| 672 | } | 672 | } |
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 1704948e6a12..f227f002c73d 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c | |||
| @@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk) | |||
| 1471 | * singleton values (which always leads to failure). | 1471 | * singleton values (which always leads to failure). |
| 1472 | * These settings can still (later) be overridden via sockopts. | 1472 | * These settings can still (later) be overridden via sockopts. |
| 1473 | */ | 1473 | */ |
| 1474 | if (ccid_get_builtin_ccids(&tx.val, &tx.len) || | 1474 | if (ccid_get_builtin_ccids(&tx.val, &tx.len)) |
| 1475 | ccid_get_builtin_ccids(&rx.val, &rx.len)) | ||
| 1476 | return -ENOBUFS; | 1475 | return -ENOBUFS; |
| 1476 | if (ccid_get_builtin_ccids(&rx.val, &rx.len)) { | ||
| 1477 | kfree(tx.val); | ||
| 1478 | return -ENOBUFS; | ||
| 1479 | } | ||
| 1477 | 1480 | ||
| 1478 | if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || | 1481 | if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || |
| 1479 | !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) | 1482 | !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index f85d901f4e3f..1b202f16531f 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
| @@ -631,6 +631,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
| 631 | goto drop_and_free; | 631 | goto drop_and_free; |
| 632 | 632 | ||
| 633 | inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); | 633 | inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); |
| 634 | reqsk_put(req); | ||
| 634 | return 0; | 635 | return 0; |
| 635 | 636 | ||
| 636 | drop_and_free: | 637 | drop_and_free: |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index c376af5bfdfb..1b58eac8aad3 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
| @@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
| 380 | goto drop_and_free; | 380 | goto drop_and_free; |
| 381 | 381 | ||
| 382 | inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); | 382 | inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); |
| 383 | reqsk_put(req); | ||
| 383 | return 0; | 384 | return 0; |
| 384 | 385 | ||
| 385 | drop_and_free: | 386 | drop_and_free: |
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 56e46090526b..c442051d5a55 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c | |||
| @@ -509,21 +509,22 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index, | |||
| 509 | dst->cpu_dp->netdev = ethernet_dev; | 509 | dst->cpu_dp->netdev = ethernet_dev; |
| 510 | } | 510 | } |
| 511 | 511 | ||
| 512 | /* Initialize cpu_port_mask now for drv->setup() | ||
| 513 | * to have access to a correct value, just like what | ||
| 514 | * net/dsa/dsa.c::dsa_switch_setup_one does. | ||
| 515 | */ | ||
| 516 | ds->cpu_port_mask |= BIT(index); | ||
| 517 | |||
| 512 | tag_protocol = ds->ops->get_tag_protocol(ds); | 518 | tag_protocol = ds->ops->get_tag_protocol(ds); |
| 513 | dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol); | 519 | dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol); |
| 514 | if (IS_ERR(dst->tag_ops)) { | 520 | if (IS_ERR(dst->tag_ops)) { |
| 515 | dev_warn(ds->dev, "No tagger for this switch\n"); | 521 | dev_warn(ds->dev, "No tagger for this switch\n"); |
| 522 | ds->cpu_port_mask &= ~BIT(index); | ||
| 516 | return PTR_ERR(dst->tag_ops); | 523 | return PTR_ERR(dst->tag_ops); |
| 517 | } | 524 | } |
| 518 | 525 | ||
| 519 | dst->rcv = dst->tag_ops->rcv; | 526 | dst->rcv = dst->tag_ops->rcv; |
| 520 | 527 | ||
| 521 | /* Initialize cpu_port_mask now for drv->setup() | ||
| 522 | * to have access to a correct value, just like what | ||
| 523 | * net/dsa/dsa.c::dsa_switch_setup_one does. | ||
| 524 | */ | ||
| 525 | ds->cpu_port_mask |= BIT(index); | ||
| 526 | |||
| 527 | return 0; | 528 | return 0; |
| 528 | } | 529 | } |
| 529 | 530 | ||
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 222100103808..b8d18171cca3 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
| @@ -1452,7 +1452,7 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh, | |||
| 1452 | return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type, | 1452 | return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type, |
| 1453 | &info.info); | 1453 | &info.info); |
| 1454 | case FIB_EVENT_NH_DEL: | 1454 | case FIB_EVENT_NH_DEL: |
| 1455 | if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && | 1455 | if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && |
| 1456 | fib_nh->nh_flags & RTNH_F_LINKDOWN) || | 1456 | fib_nh->nh_flags & RTNH_F_LINKDOWN) || |
| 1457 | (fib_nh->nh_flags & RTNH_F_DEAD)) | 1457 | (fib_nh->nh_flags & RTNH_F_DEAD)) |
| 1458 | return call_fib_notifiers(dev_net(fib_nh->nh_dev), | 1458 | return call_fib_notifiers(dev_net(fib_nh->nh_dev), |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8380464aead1..d49bff51bdb7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -2200,9 +2200,10 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, | |||
| 2200 | static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) | 2200 | static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) |
| 2201 | { | 2201 | { |
| 2202 | const u32 now = tcp_jiffies32; | 2202 | const u32 now = tcp_jiffies32; |
| 2203 | enum tcp_chrono old = tp->chrono_type; | ||
| 2203 | 2204 | ||
| 2204 | if (tp->chrono_type > TCP_CHRONO_UNSPEC) | 2205 | if (old > TCP_CHRONO_UNSPEC) |
| 2205 | tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start; | 2206 | tp->chrono_stat[old - 1] += now - tp->chrono_start; |
| 2206 | tp->chrono_start = now; | 2207 | tp->chrono_start = now; |
| 2207 | tp->chrono_type = new; | 2208 | tp->chrono_type = new; |
| 2208 | } | 2209 | } |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index b057653ceca9..e6276fa3750b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -1163,34 +1163,32 @@ out: | |||
| 1163 | return ret; | 1163 | return ret; |
| 1164 | } | 1164 | } |
| 1165 | 1165 | ||
| 1166 | #if BITS_PER_LONG == 64 | 1166 | #define UDP_SKB_IS_STATELESS 0x80000000 |
| 1167 | |||
| 1167 | static void udp_set_dev_scratch(struct sk_buff *skb) | 1168 | static void udp_set_dev_scratch(struct sk_buff *skb) |
| 1168 | { | 1169 | { |
| 1169 | struct udp_dev_scratch *scratch; | 1170 | struct udp_dev_scratch *scratch = udp_skb_scratch(skb); |
| 1170 | 1171 | ||
| 1171 | BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long)); | 1172 | BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long)); |
| 1172 | scratch = (struct udp_dev_scratch *)&skb->dev_scratch; | 1173 | scratch->_tsize_state = skb->truesize; |
| 1173 | scratch->truesize = skb->truesize; | 1174 | #if BITS_PER_LONG == 64 |
| 1174 | scratch->len = skb->len; | 1175 | scratch->len = skb->len; |
| 1175 | scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); | 1176 | scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); |
| 1176 | scratch->is_linear = !skb_is_nonlinear(skb); | 1177 | scratch->is_linear = !skb_is_nonlinear(skb); |
| 1178 | #endif | ||
| 1179 | if (likely(!skb->_skb_refdst)) | ||
| 1180 | scratch->_tsize_state |= UDP_SKB_IS_STATELESS; | ||
| 1177 | } | 1181 | } |
| 1178 | 1182 | ||
| 1179 | static int udp_skb_truesize(struct sk_buff *skb) | 1183 | static int udp_skb_truesize(struct sk_buff *skb) |
| 1180 | { | 1184 | { |
| 1181 | return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize; | 1185 | return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS; |
| 1182 | } | ||
| 1183 | #else | ||
| 1184 | static void udp_set_dev_scratch(struct sk_buff *skb) | ||
| 1185 | { | ||
| 1186 | skb->dev_scratch = skb->truesize; | ||
| 1187 | } | 1186 | } |
| 1188 | 1187 | ||
| 1189 | static int udp_skb_truesize(struct sk_buff *skb) | 1188 | static bool udp_skb_has_head_state(struct sk_buff *skb) |
| 1190 | { | 1189 | { |
| 1191 | return skb->dev_scratch; | 1190 | return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS); |
| 1192 | } | 1191 | } |
| 1193 | #endif | ||
| 1194 | 1192 | ||
| 1195 | /* fully reclaim rmem/fwd memory allocated for skb */ | 1193 | /* fully reclaim rmem/fwd memory allocated for skb */ |
| 1196 | static void udp_rmem_release(struct sock *sk, int size, int partial, | 1194 | static void udp_rmem_release(struct sock *sk, int size, int partial, |
| @@ -1388,10 +1386,10 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) | |||
| 1388 | unlock_sock_fast(sk, slow); | 1386 | unlock_sock_fast(sk, slow); |
| 1389 | } | 1387 | } |
| 1390 | 1388 | ||
| 1391 | /* we cleared the head states previously only if the skb lacks any IP | 1389 | /* In the more common cases we cleared the head states previously, |
| 1392 | * options, see __udp_queue_rcv_skb(). | 1390 | * see __udp_queue_rcv_skb(). |
| 1393 | */ | 1391 | */ |
| 1394 | if (unlikely(IPCB(skb)->opt.optlen > 0)) | 1392 | if (unlikely(udp_skb_has_head_state(skb))) |
| 1395 | skb_release_head_state(skb); | 1393 | skb_release_head_state(skb); |
| 1396 | consume_stateless_skb(skb); | 1394 | consume_stateless_skb(skb); |
| 1397 | } | 1395 | } |
| @@ -1784,11 +1782,11 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 1784 | sk_mark_napi_id_once(sk, skb); | 1782 | sk_mark_napi_id_once(sk, skb); |
| 1785 | } | 1783 | } |
| 1786 | 1784 | ||
| 1787 | /* At recvmsg() time we need skb->dst to process IP options-related | 1785 | /* At recvmsg() time we may access skb->dst or skb->sp depending on |
| 1788 | * cmsg, elsewhere can we clear all pending head states while they are | 1786 | * the IP options and the cmsg flags, elsewhere can we clear all |
| 1789 | * hot in the cache | 1787 | * pending head states while they are hot in the cache |
| 1790 | */ | 1788 | */ |
| 1791 | if (likely(IPCB(skb)->opt.optlen == 0)) | 1789 | if (likely(IPCB(skb)->opt.optlen == 0 && !skb_sec_path(skb))) |
| 1792 | skb_release_head_state(skb); | 1790 | skb_release_head_state(skb); |
| 1793 | 1791 | ||
| 1794 | rc = __udp_enqueue_schedule_skb(sk, skb); | 1792 | rc = __udp_enqueue_schedule_skb(sk, skb); |
| @@ -1930,7 +1928,7 @@ drop: | |||
| 1930 | /* For TCP sockets, sk_rx_dst is protected by socket lock | 1928 | /* For TCP sockets, sk_rx_dst is protected by socket lock |
| 1931 | * For UDP, we use xchg() to guard against concurrent changes. | 1929 | * For UDP, we use xchg() to guard against concurrent changes. |
| 1932 | */ | 1930 | */ |
| 1933 | static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) | 1931 | void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) |
| 1934 | { | 1932 | { |
| 1935 | struct dst_entry *old; | 1933 | struct dst_entry *old; |
| 1936 | 1934 | ||
| @@ -1939,6 +1937,7 @@ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) | |||
| 1939 | dst_release(old); | 1937 | dst_release(old); |
| 1940 | } | 1938 | } |
| 1941 | } | 1939 | } |
| 1940 | EXPORT_SYMBOL(udp_sk_rx_dst_set); | ||
| 1942 | 1941 | ||
| 1943 | /* | 1942 | /* |
| 1944 | * Multicasts and broadcasts go to each listener. | 1943 | * Multicasts and broadcasts go to each listener. |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 4996d734f1d2..3cec529c6113 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
| @@ -756,6 +756,7 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff) | |||
| 756 | if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) | 756 | if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) |
| 757 | goto drop; | 757 | goto drop; |
| 758 | 758 | ||
| 759 | IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM; | ||
| 759 | return true; | 760 | return true; |
| 760 | 761 | ||
| 761 | drop: | 762 | drop: |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index c6ec06465ce0..43ca864327c7 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -673,8 +673,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, | |||
| 673 | *prevhdr = NEXTHDR_FRAGMENT; | 673 | *prevhdr = NEXTHDR_FRAGMENT; |
| 674 | tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); | 674 | tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); |
| 675 | if (!tmp_hdr) { | 675 | if (!tmp_hdr) { |
| 676 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | ||
| 677 | IPSTATS_MIB_FRAGFAILS); | ||
| 678 | err = -ENOMEM; | 676 | err = -ENOMEM; |
| 679 | goto fail; | 677 | goto fail; |
| 680 | } | 678 | } |
| @@ -789,8 +787,6 @@ slow_path: | |||
| 789 | frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + | 787 | frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + |
| 790 | hroom + troom, GFP_ATOMIC); | 788 | hroom + troom, GFP_ATOMIC); |
| 791 | if (!frag) { | 789 | if (!frag) { |
| 792 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | ||
| 793 | IPSTATS_MIB_FRAGFAILS); | ||
| 794 | err = -ENOMEM; | 790 | err = -ENOMEM; |
| 795 | goto fail; | 791 | goto fail; |
| 796 | } | 792 | } |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4a3e65626e8b..578142b7ca3e 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -291,11 +291,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, | |||
| 291 | struct udp_table *udptable) | 291 | struct udp_table *udptable) |
| 292 | { | 292 | { |
| 293 | const struct ipv6hdr *iph = ipv6_hdr(skb); | 293 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
| 294 | struct sock *sk; | ||
| 295 | 294 | ||
| 296 | sk = skb_steal_sock(skb); | ||
| 297 | if (unlikely(sk)) | ||
| 298 | return sk; | ||
| 299 | return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, | 295 | return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, |
| 300 | &iph->daddr, dport, inet6_iif(skb), | 296 | &iph->daddr, dport, inet6_iif(skb), |
| 301 | udptable, skb); | 297 | udptable, skb); |
| @@ -332,6 +328,15 @@ struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be | |||
| 332 | EXPORT_SYMBOL_GPL(udp6_lib_lookup); | 328 | EXPORT_SYMBOL_GPL(udp6_lib_lookup); |
| 333 | #endif | 329 | #endif |
| 334 | 330 | ||
| 331 | /* do not use the scratch area len for jumbogram: their length execeeds the | ||
| 332 | * scratch area space; note that the IP6CB flags is still in the first | ||
| 333 | * cacheline, so checking for jumbograms is cheap | ||
| 334 | */ | ||
| 335 | static int udp6_skb_len(struct sk_buff *skb) | ||
| 336 | { | ||
| 337 | return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); | ||
| 338 | } | ||
| 339 | |||
| 335 | /* | 340 | /* |
| 336 | * This should be easy, if there is something there we | 341 | * This should be easy, if there is something there we |
| 337 | * return it, otherwise we block. | 342 | * return it, otherwise we block. |
| @@ -362,7 +367,7 @@ try_again: | |||
| 362 | if (!skb) | 367 | if (!skb) |
| 363 | return err; | 368 | return err; |
| 364 | 369 | ||
| 365 | ulen = udp_skb_len(skb); | 370 | ulen = udp6_skb_len(skb); |
| 366 | copied = len; | 371 | copied = len; |
| 367 | if (copied > ulen - off) | 372 | if (copied > ulen - off) |
| 368 | copied = ulen - off; | 373 | copied = ulen - off; |
| @@ -804,6 +809,24 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
| 804 | if (udp6_csum_init(skb, uh, proto)) | 809 | if (udp6_csum_init(skb, uh, proto)) |
| 805 | goto csum_error; | 810 | goto csum_error; |
| 806 | 811 | ||
| 812 | /* Check if the socket is already available, e.g. due to early demux */ | ||
| 813 | sk = skb_steal_sock(skb); | ||
| 814 | if (sk) { | ||
| 815 | struct dst_entry *dst = skb_dst(skb); | ||
| 816 | int ret; | ||
| 817 | |||
| 818 | if (unlikely(sk->sk_rx_dst != dst)) | ||
| 819 | udp_sk_rx_dst_set(sk, dst); | ||
| 820 | |||
| 821 | ret = udpv6_queue_rcv_skb(sk, skb); | ||
| 822 | sock_put(sk); | ||
| 823 | |||
| 824 | /* a return value > 0 means to resubmit the input */ | ||
| 825 | if (ret > 0) | ||
| 826 | return ret; | ||
| 827 | return 0; | ||
| 828 | } | ||
| 829 | |||
| 807 | /* | 830 | /* |
| 808 | * Multicast receive code | 831 | * Multicast receive code |
| 809 | */ | 832 | */ |
| @@ -812,11 +835,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
| 812 | saddr, daddr, udptable, proto); | 835 | saddr, daddr, udptable, proto); |
| 813 | 836 | ||
| 814 | /* Unicast */ | 837 | /* Unicast */ |
| 815 | |||
| 816 | /* | ||
| 817 | * check socket cache ... must talk to Alan about his plans | ||
| 818 | * for sock caches... i'll skip this for now. | ||
| 819 | */ | ||
| 820 | sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); | 838 | sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); |
| 821 | if (sk) { | 839 | if (sk) { |
| 822 | int ret; | 840 | int ret; |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index e3c4c6c3fef7..03859e386b47 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
| @@ -1310,8 +1310,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, | |||
| 1310 | 1310 | ||
| 1311 | nla_for_each_nested(a, attr, rem) { | 1311 | nla_for_each_nested(a, attr, rem) { |
| 1312 | int type = nla_type(a); | 1312 | int type = nla_type(a); |
| 1313 | int maxlen = ovs_ct_attr_lens[type].maxlen; | 1313 | int maxlen; |
| 1314 | int minlen = ovs_ct_attr_lens[type].minlen; | 1314 | int minlen; |
| 1315 | 1315 | ||
| 1316 | if (type > OVS_CT_ATTR_MAX) { | 1316 | if (type > OVS_CT_ATTR_MAX) { |
| 1317 | OVS_NLERR(log, | 1317 | OVS_NLERR(log, |
| @@ -1319,6 +1319,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, | |||
| 1319 | type, OVS_CT_ATTR_MAX); | 1319 | type, OVS_CT_ATTR_MAX); |
| 1320 | return -EINVAL; | 1320 | return -EINVAL; |
| 1321 | } | 1321 | } |
| 1322 | |||
| 1323 | maxlen = ovs_ct_attr_lens[type].maxlen; | ||
| 1324 | minlen = ovs_ct_attr_lens[type].minlen; | ||
| 1322 | if (nla_len(a) < minlen || nla_len(a) > maxlen) { | 1325 | if (nla_len(a) < minlen || nla_len(a) > maxlen) { |
| 1323 | OVS_NLERR(log, | 1326 | OVS_NLERR(log, |
| 1324 | "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)", | 1327 | "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)", |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e7303f68972d..5a178047a7ce 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -4327,7 +4327,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
| 4327 | register_prot_hook(sk); | 4327 | register_prot_hook(sk); |
| 4328 | } | 4328 | } |
| 4329 | spin_unlock(&po->bind_lock); | 4329 | spin_unlock(&po->bind_lock); |
| 4330 | if (closing && (po->tp_version > TPACKET_V2)) { | 4330 | if (pg_vec && (po->tp_version > TPACKET_V2)) { |
| 4331 | /* Because we don't support block-based V3 on tx-ring */ | 4331 | /* Because we don't support block-based V3 on tx-ring */ |
| 4332 | if (!tx_ring) | 4332 | if (!tx_ring) |
| 4333 | prb_shutdown_retire_blk_timer(po, rb_queue); | 4333 | prb_shutdown_retire_blk_timer(po, rb_queue); |
diff --git a/net/socket.c b/net/socket.c index 79d9bb964cd8..cb0fdf799f40 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -1916,7 +1916,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, | |||
| 1916 | if (copy_from_user(&msg, umsg, sizeof(*umsg))) | 1916 | if (copy_from_user(&msg, umsg, sizeof(*umsg))) |
| 1917 | return -EFAULT; | 1917 | return -EFAULT; |
| 1918 | 1918 | ||
| 1919 | kmsg->msg_control = msg.msg_control; | 1919 | kmsg->msg_control = (void __force *)msg.msg_control; |
| 1920 | kmsg->msg_controllen = msg.msg_controllen; | 1920 | kmsg->msg_controllen = msg.msg_controllen; |
| 1921 | kmsg->msg_flags = msg.msg_flags; | 1921 | kmsg->msg_flags = msg.msg_flags; |
| 1922 | 1922 | ||
| @@ -1935,7 +1935,8 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, | |||
| 1935 | 1935 | ||
| 1936 | if (msg.msg_name && kmsg->msg_namelen) { | 1936 | if (msg.msg_name && kmsg->msg_namelen) { |
| 1937 | if (!save_addr) { | 1937 | if (!save_addr) { |
| 1938 | err = move_addr_to_kernel(msg.msg_name, kmsg->msg_namelen, | 1938 | err = move_addr_to_kernel(msg.msg_name, |
| 1939 | kmsg->msg_namelen, | ||
| 1939 | kmsg->msg_name); | 1940 | kmsg->msg_name); |
| 1940 | if (err < 0) | 1941 | if (err < 0) |
| 1941 | return err; | 1942 | return err; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d5b54c020dec..4f154d388748 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -1624,6 +1624,8 @@ static void xs_tcp_state_change(struct sock *sk) | |||
| 1624 | if (test_and_clear_bit(XPRT_SOCK_CONNECTING, | 1624 | if (test_and_clear_bit(XPRT_SOCK_CONNECTING, |
| 1625 | &transport->sock_state)) | 1625 | &transport->sock_state)) |
| 1626 | xprt_clear_connecting(xprt); | 1626 | xprt_clear_connecting(xprt); |
| 1627 | if (sk->sk_err) | ||
| 1628 | xprt_wake_pending_tasks(xprt, -sk->sk_err); | ||
| 1627 | xs_sock_mark_closed(xprt); | 1629 | xs_sock_mark_closed(xprt); |
| 1628 | } | 1630 | } |
| 1629 | out: | 1631 | out: |
