diff options
author | David S. Miller <davem@davemloft.net> | 2018-03-23 11:24:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-23 11:31:58 -0400 |
commit | 03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch) | |
tree | fbaf8738296b2e9dcba81c6daef2d515b6c4948c /net/core | |
parent | 6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff) | |
parent | f36b7534b83357cf52e747905de6d65b4f7c2512 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds. Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
Due to bug fixes found by the syzkaller bot and taken into the for-rc
branch after development for the 4.17 merge window had already started
being taken into the for-next branch, there were fairly non-trivial
merge issues that would need to be resolved between the for-rc branch
and the for-next branch. This merge resolves those conflicts and
provides a unified base upon which ongoing development for 4.17 can
be based.
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524
(IB/mlx5: Fix cleanup order on unload) added to for-rc and
commit b5ca15ad7e61 (IB/mlx5: Add proper representors support)
add as part of the devel cycle both needed to modify the
init/de-init functions used by mlx5. To support the new
representors, the new functions added by the cleanup patch
needed to be made non-static, and the init/de-init list
added by the representors patch needed to be modified to
match the init/de-init list changes made by the cleanup
patch.
Updates:
drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
prototypes added by representors patch to reflect new function
names as changed by cleanup patch
drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 22 | ||||
-rw-r--r-- | net/core/dev_ioctl.c | 7 | ||||
-rw-r--r-- | net/core/devlink.c | 16 | ||||
-rw-r--r-- | net/core/filter.c | 60 | ||||
-rw-r--r-- | net/core/skbuff.c | 11 | ||||
-rw-r--r-- | net/core/sock.c | 21 | ||||
-rw-r--r-- | net/core/sock_diag.c | 12 |
7 files changed, 98 insertions, 51 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index d8887cc38e7b..f9c28f44286c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3278,15 +3278,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
3278 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) | 3278 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
3279 | static void skb_update_prio(struct sk_buff *skb) | 3279 | static void skb_update_prio(struct sk_buff *skb) |
3280 | { | 3280 | { |
3281 | struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); | 3281 | const struct netprio_map *map; |
3282 | const struct sock *sk; | ||
3283 | unsigned int prioidx; | ||
3282 | 3284 | ||
3283 | if (!skb->priority && skb->sk && map) { | 3285 | if (skb->priority) |
3284 | unsigned int prioidx = | 3286 | return; |
3285 | sock_cgroup_prioidx(&skb->sk->sk_cgrp_data); | 3287 | map = rcu_dereference_bh(skb->dev->priomap); |
3288 | if (!map) | ||
3289 | return; | ||
3290 | sk = skb_to_full_sk(skb); | ||
3291 | if (!sk) | ||
3292 | return; | ||
3286 | 3293 | ||
3287 | if (prioidx < map->priomap_len) | 3294 | prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); |
3288 | skb->priority = map->priomap[prioidx]; | 3295 | |
3289 | } | 3296 | if (prioidx < map->priomap_len) |
3297 | skb->priority = map->priomap[prioidx]; | ||
3290 | } | 3298 | } |
3291 | #else | 3299 | #else |
3292 | #define skb_update_prio(skb) | 3300 | #define skb_update_prio(skb) |
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 0ab1af04296c..a04e1e88bf3a 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c | |||
@@ -402,8 +402,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
402 | if (colon) | 402 | if (colon) |
403 | *colon = 0; | 403 | *colon = 0; |
404 | 404 | ||
405 | dev_load(net, ifr->ifr_name); | ||
406 | |||
407 | /* | 405 | /* |
408 | * See which interface the caller is talking about. | 406 | * See which interface the caller is talking about. |
409 | */ | 407 | */ |
@@ -423,6 +421,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
423 | case SIOCGIFMAP: | 421 | case SIOCGIFMAP: |
424 | case SIOCGIFINDEX: | 422 | case SIOCGIFINDEX: |
425 | case SIOCGIFTXQLEN: | 423 | case SIOCGIFTXQLEN: |
424 | dev_load(net, ifr->ifr_name); | ||
426 | rcu_read_lock(); | 425 | rcu_read_lock(); |
427 | ret = dev_ifsioc_locked(net, ifr, cmd); | 426 | ret = dev_ifsioc_locked(net, ifr, cmd); |
428 | rcu_read_unlock(); | 427 | rcu_read_unlock(); |
@@ -431,6 +430,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
431 | return ret; | 430 | return ret; |
432 | 431 | ||
433 | case SIOCETHTOOL: | 432 | case SIOCETHTOOL: |
433 | dev_load(net, ifr->ifr_name); | ||
434 | rtnl_lock(); | 434 | rtnl_lock(); |
435 | ret = dev_ethtool(net, ifr); | 435 | ret = dev_ethtool(net, ifr); |
436 | rtnl_unlock(); | 436 | rtnl_unlock(); |
@@ -447,6 +447,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
447 | case SIOCGMIIPHY: | 447 | case SIOCGMIIPHY: |
448 | case SIOCGMIIREG: | 448 | case SIOCGMIIREG: |
449 | case SIOCSIFNAME: | 449 | case SIOCSIFNAME: |
450 | dev_load(net, ifr->ifr_name); | ||
450 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) | 451 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
451 | return -EPERM; | 452 | return -EPERM; |
452 | rtnl_lock(); | 453 | rtnl_lock(); |
@@ -494,6 +495,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
494 | /* fall through */ | 495 | /* fall through */ |
495 | case SIOCBONDSLAVEINFOQUERY: | 496 | case SIOCBONDSLAVEINFOQUERY: |
496 | case SIOCBONDINFOQUERY: | 497 | case SIOCBONDINFOQUERY: |
498 | dev_load(net, ifr->ifr_name); | ||
497 | rtnl_lock(); | 499 | rtnl_lock(); |
498 | ret = dev_ifsioc(net, ifr, cmd); | 500 | ret = dev_ifsioc(net, ifr, cmd); |
499 | rtnl_unlock(); | 501 | rtnl_unlock(); |
@@ -518,6 +520,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c | |||
518 | cmd == SIOCGHWTSTAMP || | 520 | cmd == SIOCGHWTSTAMP || |
519 | (cmd >= SIOCDEVPRIVATE && | 521 | (cmd >= SIOCDEVPRIVATE && |
520 | cmd <= SIOCDEVPRIVATE + 15)) { | 522 | cmd <= SIOCDEVPRIVATE + 15)) { |
523 | dev_load(net, ifr->ifr_name); | ||
521 | rtnl_lock(); | 524 | rtnl_lock(); |
522 | ret = dev_ifsioc(net, ifr, cmd); | 525 | ret = dev_ifsioc(net, ifr, cmd); |
523 | rtnl_unlock(); | 526 | rtnl_unlock(); |
diff --git a/net/core/devlink.c b/net/core/devlink.c index d03b96f87c25..9236e421bd62 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c | |||
@@ -1798,7 +1798,7 @@ send_done: | |||
1798 | if (!nlh) { | 1798 | if (!nlh) { |
1799 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); | 1799 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); |
1800 | if (err) | 1800 | if (err) |
1801 | goto err_skb_send_alloc; | 1801 | return err; |
1802 | goto send_done; | 1802 | goto send_done; |
1803 | } | 1803 | } |
1804 | 1804 | ||
@@ -1807,7 +1807,6 @@ send_done: | |||
1807 | nla_put_failure: | 1807 | nla_put_failure: |
1808 | err = -EMSGSIZE; | 1808 | err = -EMSGSIZE; |
1809 | err_table_put: | 1809 | err_table_put: |
1810 | err_skb_send_alloc: | ||
1811 | genlmsg_cancel(skb, hdr); | 1810 | genlmsg_cancel(skb, hdr); |
1812 | nlmsg_free(skb); | 1811 | nlmsg_free(skb); |
1813 | return err; | 1812 | return err; |
@@ -2073,7 +2072,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info, | |||
2073 | table->counters_enabled, | 2072 | table->counters_enabled, |
2074 | &dump_ctx); | 2073 | &dump_ctx); |
2075 | if (err) | 2074 | if (err) |
2076 | goto err_entries_dump; | 2075 | return err; |
2077 | 2076 | ||
2078 | send_done: | 2077 | send_done: |
2079 | nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq, | 2078 | nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq, |
@@ -2081,16 +2080,10 @@ send_done: | |||
2081 | if (!nlh) { | 2080 | if (!nlh) { |
2082 | err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info); | 2081 | err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info); |
2083 | if (err) | 2082 | if (err) |
2084 | goto err_skb_send_alloc; | 2083 | return err; |
2085 | goto send_done; | 2084 | goto send_done; |
2086 | } | 2085 | } |
2087 | return genlmsg_reply(dump_ctx.skb, info); | 2086 | return genlmsg_reply(dump_ctx.skb, info); |
2088 | |||
2089 | err_entries_dump: | ||
2090 | err_skb_send_alloc: | ||
2091 | genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr); | ||
2092 | nlmsg_free(dump_ctx.skb); | ||
2093 | return err; | ||
2094 | } | 2087 | } |
2095 | 2088 | ||
2096 | static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb, | 2089 | static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb, |
@@ -2229,7 +2222,7 @@ send_done: | |||
2229 | if (!nlh) { | 2222 | if (!nlh) { |
2230 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); | 2223 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); |
2231 | if (err) | 2224 | if (err) |
2232 | goto err_skb_send_alloc; | 2225 | return err; |
2233 | goto send_done; | 2226 | goto send_done; |
2234 | } | 2227 | } |
2235 | return genlmsg_reply(skb, info); | 2228 | return genlmsg_reply(skb, info); |
@@ -2237,7 +2230,6 @@ send_done: | |||
2237 | nla_put_failure: | 2230 | nla_put_failure: |
2238 | err = -EMSGSIZE; | 2231 | err = -EMSGSIZE; |
2239 | err_table_put: | 2232 | err_table_put: |
2240 | err_skb_send_alloc: | ||
2241 | genlmsg_cancel(skb, hdr); | 2233 | genlmsg_cancel(skb, hdr); |
2242 | nlmsg_free(skb); | 2234 | nlmsg_free(skb); |
2243 | return err; | 2235 | return err; |
diff --git a/net/core/filter.c b/net/core/filter.c index c86f03fd9ea5..00c711c5f1a2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2283,6 +2283,10 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) | |||
2283 | u32 off = skb_mac_header_len(skb); | 2283 | u32 off = skb_mac_header_len(skb); |
2284 | int ret; | 2284 | int ret; |
2285 | 2285 | ||
2286 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | ||
2287 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2288 | return -ENOTSUPP; | ||
2289 | |||
2286 | ret = skb_cow(skb, len_diff); | 2290 | ret = skb_cow(skb, len_diff); |
2287 | if (unlikely(ret < 0)) | 2291 | if (unlikely(ret < 0)) |
2288 | return ret; | 2292 | return ret; |
@@ -2292,19 +2296,21 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) | |||
2292 | return ret; | 2296 | return ret; |
2293 | 2297 | ||
2294 | if (skb_is_gso(skb)) { | 2298 | if (skb_is_gso(skb)) { |
2299 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
2300 | |||
2295 | /* SKB_GSO_TCPV4 needs to be changed into | 2301 | /* SKB_GSO_TCPV4 needs to be changed into |
2296 | * SKB_GSO_TCPV6. | 2302 | * SKB_GSO_TCPV6. |
2297 | */ | 2303 | */ |
2298 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { | 2304 | if (shinfo->gso_type & SKB_GSO_TCPV4) { |
2299 | skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; | 2305 | shinfo->gso_type &= ~SKB_GSO_TCPV4; |
2300 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; | 2306 | shinfo->gso_type |= SKB_GSO_TCPV6; |
2301 | } | 2307 | } |
2302 | 2308 | ||
2303 | /* Due to IPv6 header, MSS needs to be downgraded. */ | 2309 | /* Due to IPv6 header, MSS needs to be downgraded. */ |
2304 | skb_shinfo(skb)->gso_size -= len_diff; | 2310 | skb_decrease_gso_size(shinfo, len_diff); |
2305 | /* Header must be checked, and gso_segs recomputed. */ | 2311 | /* Header must be checked, and gso_segs recomputed. */ |
2306 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 2312 | shinfo->gso_type |= SKB_GSO_DODGY; |
2307 | skb_shinfo(skb)->gso_segs = 0; | 2313 | shinfo->gso_segs = 0; |
2308 | } | 2314 | } |
2309 | 2315 | ||
2310 | skb->protocol = htons(ETH_P_IPV6); | 2316 | skb->protocol = htons(ETH_P_IPV6); |
@@ -2319,6 +2325,10 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) | |||
2319 | u32 off = skb_mac_header_len(skb); | 2325 | u32 off = skb_mac_header_len(skb); |
2320 | int ret; | 2326 | int ret; |
2321 | 2327 | ||
2328 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | ||
2329 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2330 | return -ENOTSUPP; | ||
2331 | |||
2322 | ret = skb_unclone(skb, GFP_ATOMIC); | 2332 | ret = skb_unclone(skb, GFP_ATOMIC); |
2323 | if (unlikely(ret < 0)) | 2333 | if (unlikely(ret < 0)) |
2324 | return ret; | 2334 | return ret; |
@@ -2328,19 +2338,21 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) | |||
2328 | return ret; | 2338 | return ret; |
2329 | 2339 | ||
2330 | if (skb_is_gso(skb)) { | 2340 | if (skb_is_gso(skb)) { |
2341 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
2342 | |||
2331 | /* SKB_GSO_TCPV6 needs to be changed into | 2343 | /* SKB_GSO_TCPV6 needs to be changed into |
2332 | * SKB_GSO_TCPV4. | 2344 | * SKB_GSO_TCPV4. |
2333 | */ | 2345 | */ |
2334 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { | 2346 | if (shinfo->gso_type & SKB_GSO_TCPV6) { |
2335 | skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; | 2347 | shinfo->gso_type &= ~SKB_GSO_TCPV6; |
2336 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; | 2348 | shinfo->gso_type |= SKB_GSO_TCPV4; |
2337 | } | 2349 | } |
2338 | 2350 | ||
2339 | /* Due to IPv4 header, MSS can be upgraded. */ | 2351 | /* Due to IPv4 header, MSS can be upgraded. */ |
2340 | skb_shinfo(skb)->gso_size += len_diff; | 2352 | skb_increase_gso_size(shinfo, len_diff); |
2341 | /* Header must be checked, and gso_segs recomputed. */ | 2353 | /* Header must be checked, and gso_segs recomputed. */ |
2342 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 2354 | shinfo->gso_type |= SKB_GSO_DODGY; |
2343 | skb_shinfo(skb)->gso_segs = 0; | 2355 | shinfo->gso_segs = 0; |
2344 | } | 2356 | } |
2345 | 2357 | ||
2346 | skb->protocol = htons(ETH_P_IP); | 2358 | skb->protocol = htons(ETH_P_IP); |
@@ -2439,6 +2451,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) | |||
2439 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | 2451 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
2440 | int ret; | 2452 | int ret; |
2441 | 2453 | ||
2454 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | ||
2455 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2456 | return -ENOTSUPP; | ||
2457 | |||
2442 | ret = skb_cow(skb, len_diff); | 2458 | ret = skb_cow(skb, len_diff); |
2443 | if (unlikely(ret < 0)) | 2459 | if (unlikely(ret < 0)) |
2444 | return ret; | 2460 | return ret; |
@@ -2448,11 +2464,13 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) | |||
2448 | return ret; | 2464 | return ret; |
2449 | 2465 | ||
2450 | if (skb_is_gso(skb)) { | 2466 | if (skb_is_gso(skb)) { |
2467 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
2468 | |||
2451 | /* Due to header grow, MSS needs to be downgraded. */ | 2469 | /* Due to header grow, MSS needs to be downgraded. */ |
2452 | skb_shinfo(skb)->gso_size -= len_diff; | 2470 | skb_decrease_gso_size(shinfo, len_diff); |
2453 | /* Header must be checked, and gso_segs recomputed. */ | 2471 | /* Header must be checked, and gso_segs recomputed. */ |
2454 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 2472 | shinfo->gso_type |= SKB_GSO_DODGY; |
2455 | skb_shinfo(skb)->gso_segs = 0; | 2473 | shinfo->gso_segs = 0; |
2456 | } | 2474 | } |
2457 | 2475 | ||
2458 | return 0; | 2476 | return 0; |
@@ -2463,6 +2481,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) | |||
2463 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | 2481 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
2464 | int ret; | 2482 | int ret; |
2465 | 2483 | ||
2484 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | ||
2485 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2486 | return -ENOTSUPP; | ||
2487 | |||
2466 | ret = skb_unclone(skb, GFP_ATOMIC); | 2488 | ret = skb_unclone(skb, GFP_ATOMIC); |
2467 | if (unlikely(ret < 0)) | 2489 | if (unlikely(ret < 0)) |
2468 | return ret; | 2490 | return ret; |
@@ -2472,11 +2494,13 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) | |||
2472 | return ret; | 2494 | return ret; |
2473 | 2495 | ||
2474 | if (skb_is_gso(skb)) { | 2496 | if (skb_is_gso(skb)) { |
2497 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
2498 | |||
2475 | /* Due to header shrink, MSS can be upgraded. */ | 2499 | /* Due to header shrink, MSS can be upgraded. */ |
2476 | skb_shinfo(skb)->gso_size += len_diff; | 2500 | skb_increase_gso_size(shinfo, len_diff); |
2477 | /* Header must be checked, and gso_segs recomputed. */ | 2501 | /* Header must be checked, and gso_segs recomputed. */ |
2478 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 2502 | shinfo->gso_type |= SKB_GSO_DODGY; |
2479 | skb_shinfo(skb)->gso_segs = 0; | 2503 | shinfo->gso_segs = 0; |
2480 | } | 2504 | } |
2481 | 2505 | ||
2482 | return 0; | 2506 | return 0; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 715c13495ba6..46cb22215ff4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4181,7 +4181,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | |||
4181 | 4181 | ||
4182 | skb_queue_tail(&sk->sk_error_queue, skb); | 4182 | skb_queue_tail(&sk->sk_error_queue, skb); |
4183 | if (!sock_flag(sk, SOCK_DEAD)) | 4183 | if (!sock_flag(sk, SOCK_DEAD)) |
4184 | sk->sk_data_ready(sk); | 4184 | sk->sk_error_report(sk); |
4185 | return 0; | 4185 | return 0; |
4186 | } | 4186 | } |
4187 | EXPORT_SYMBOL(sock_queue_err_skb); | 4187 | EXPORT_SYMBOL(sock_queue_err_skb); |
@@ -4906,7 +4906,7 @@ static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) | |||
4906 | thlen += inner_tcp_hdrlen(skb); | 4906 | thlen += inner_tcp_hdrlen(skb); |
4907 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { | 4907 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { |
4908 | thlen = tcp_hdrlen(skb); | 4908 | thlen = tcp_hdrlen(skb); |
4909 | } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) { | 4909 | } else if (unlikely(skb_is_gso_sctp(skb))) { |
4910 | thlen = sizeof(struct sctphdr); | 4910 | thlen = sizeof(struct sctphdr); |
4911 | } | 4911 | } |
4912 | /* UFO sets gso_size to the size of the fragmentation | 4912 | /* UFO sets gso_size to the size of the fragmentation |
@@ -5022,13 +5022,16 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); | |||
5022 | 5022 | ||
5023 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) | 5023 | static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
5024 | { | 5024 | { |
5025 | int mac_len; | ||
5026 | |||
5025 | if (skb_cow(skb, skb_headroom(skb)) < 0) { | 5027 | if (skb_cow(skb, skb_headroom(skb)) < 0) { |
5026 | kfree_skb(skb); | 5028 | kfree_skb(skb); |
5027 | return NULL; | 5029 | return NULL; |
5028 | } | 5030 | } |
5029 | 5031 | ||
5030 | memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, | 5032 | mac_len = skb->data - skb_mac_header(skb); |
5031 | 2 * ETH_ALEN); | 5033 | memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), |
5034 | mac_len - VLAN_HLEN - ETH_TLEN); | ||
5032 | skb->mac_header += VLAN_HLEN; | 5035 | skb->mac_header += VLAN_HLEN; |
5033 | return skb; | 5036 | return skb; |
5034 | } | 5037 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index f704324d1219..e689496dfd8a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -3326,6 +3326,27 @@ void proto_unregister(struct proto *prot) | |||
3326 | } | 3326 | } |
3327 | EXPORT_SYMBOL(proto_unregister); | 3327 | EXPORT_SYMBOL(proto_unregister); |
3328 | 3328 | ||
3329 | int sock_load_diag_module(int family, int protocol) | ||
3330 | { | ||
3331 | if (!protocol) { | ||
3332 | if (!sock_is_registered(family)) | ||
3333 | return -ENOENT; | ||
3334 | |||
3335 | return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | ||
3336 | NETLINK_SOCK_DIAG, family); | ||
3337 | } | ||
3338 | |||
3339 | #ifdef CONFIG_INET | ||
3340 | if (family == AF_INET && | ||
3341 | !rcu_access_pointer(inet_protos[protocol])) | ||
3342 | return -ENOENT; | ||
3343 | #endif | ||
3344 | |||
3345 | return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, | ||
3346 | NETLINK_SOCK_DIAG, family, protocol); | ||
3347 | } | ||
3348 | EXPORT_SYMBOL(sock_load_diag_module); | ||
3349 | |||
3329 | #ifdef CONFIG_PROC_FS | 3350 | #ifdef CONFIG_PROC_FS |
3330 | static void *proto_seq_start(struct seq_file *seq, loff_t *pos) | 3351 | static void *proto_seq_start(struct seq_file *seq, loff_t *pos) |
3331 | __acquires(proto_list_mutex) | 3352 | __acquires(proto_list_mutex) |
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index aee5642affd9..a3392a8f9276 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c | |||
@@ -220,8 +220,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
220 | return -EINVAL; | 220 | return -EINVAL; |
221 | 221 | ||
222 | if (sock_diag_handlers[req->sdiag_family] == NULL) | 222 | if (sock_diag_handlers[req->sdiag_family] == NULL) |
223 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | 223 | sock_load_diag_module(req->sdiag_family, 0); |
224 | NETLINK_SOCK_DIAG, req->sdiag_family); | ||
225 | 224 | ||
226 | mutex_lock(&sock_diag_table_mutex); | 225 | mutex_lock(&sock_diag_table_mutex); |
227 | hndl = sock_diag_handlers[req->sdiag_family]; | 226 | hndl = sock_diag_handlers[req->sdiag_family]; |
@@ -247,8 +246,7 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
247 | case TCPDIAG_GETSOCK: | 246 | case TCPDIAG_GETSOCK: |
248 | case DCCPDIAG_GETSOCK: | 247 | case DCCPDIAG_GETSOCK: |
249 | if (inet_rcv_compat == NULL) | 248 | if (inet_rcv_compat == NULL) |
250 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | 249 | sock_load_diag_module(AF_INET, 0); |
251 | NETLINK_SOCK_DIAG, AF_INET); | ||
252 | 250 | ||
253 | mutex_lock(&sock_diag_table_mutex); | 251 | mutex_lock(&sock_diag_table_mutex); |
254 | if (inet_rcv_compat != NULL) | 252 | if (inet_rcv_compat != NULL) |
@@ -281,14 +279,12 @@ static int sock_diag_bind(struct net *net, int group) | |||
281 | case SKNLGRP_INET_TCP_DESTROY: | 279 | case SKNLGRP_INET_TCP_DESTROY: |
282 | case SKNLGRP_INET_UDP_DESTROY: | 280 | case SKNLGRP_INET_UDP_DESTROY: |
283 | if (!sock_diag_handlers[AF_INET]) | 281 | if (!sock_diag_handlers[AF_INET]) |
284 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | 282 | sock_load_diag_module(AF_INET, 0); |
285 | NETLINK_SOCK_DIAG, AF_INET); | ||
286 | break; | 283 | break; |
287 | case SKNLGRP_INET6_TCP_DESTROY: | 284 | case SKNLGRP_INET6_TCP_DESTROY: |
288 | case SKNLGRP_INET6_UDP_DESTROY: | 285 | case SKNLGRP_INET6_UDP_DESTROY: |
289 | if (!sock_diag_handlers[AF_INET6]) | 286 | if (!sock_diag_handlers[AF_INET6]) |
290 | request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, | 287 | sock_load_diag_module(AF_INET6, 0); |
291 | NETLINK_SOCK_DIAG, AF_INET6); | ||
292 | break; | 288 | break; |
293 | } | 289 | } |
294 | return 0; | 290 | return 0; |