diff options
author | Takashi Iwai <tiwai@suse.de> | 2015-10-07 14:11:21 -0400 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2015-10-07 14:11:21 -0400 |
commit | 601d62959d08a450d4666c728ddd2f47c5ba1cfe (patch) | |
tree | 52d9f3c1a2528a9de405d5a19014f3f94b16b35d /net | |
parent | 225db5762dc1a35b26850477ffa06e5cd0097243 (diff) | |
parent | e4fc141d2a022a63c87a4851b3c688eca6a1647b (diff) |
Merge tag 'asoc-fix-v4.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v4.3
Quite a few fixes here but they're all very small and driver specific,
none of them really stand out if you aren't using the relevant hardware
but they're all useful if you do happen to have an affected device.
Diffstat (limited to 'net')
63 files changed, 744 insertions, 431 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c index 17e55dfecbe2..e07f551a863c 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -317,6 +317,9 @@ static int clip_constructor(struct neighbour *neigh) | |||
317 | 317 | ||
318 | static int clip_encap(struct atm_vcc *vcc, int mode) | 318 | static int clip_encap(struct atm_vcc *vcc, int mode) |
319 | { | 319 | { |
320 | if (!CLIP_VCC(vcc)) | ||
321 | return -EBADFD; | ||
322 | |||
320 | CLIP_VCC(vcc)->encap = mode; | 323 | CLIP_VCC(vcc)->encap = mode; |
321 | return 0; | 324 | return 0; |
322 | } | 325 | } |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index ad82324f710f..0510a577a7b5 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -2311,12 +2311,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) | |||
2311 | if (!conn) | 2311 | if (!conn) |
2312 | return 1; | 2312 | return 1; |
2313 | 2313 | ||
2314 | chan = conn->smp; | ||
2315 | if (!chan) { | ||
2316 | BT_ERR("SMP security requested but not available"); | ||
2317 | return 1; | ||
2318 | } | ||
2319 | |||
2320 | if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) | 2314 | if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) |
2321 | return 1; | 2315 | return 1; |
2322 | 2316 | ||
@@ -2330,6 +2324,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) | |||
2330 | if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) | 2324 | if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) |
2331 | return 0; | 2325 | return 0; |
2332 | 2326 | ||
2327 | chan = conn->smp; | ||
2328 | if (!chan) { | ||
2329 | BT_ERR("SMP security requested but not available"); | ||
2330 | return 1; | ||
2331 | } | ||
2332 | |||
2333 | l2cap_chan_lock(chan); | 2333 | l2cap_chan_lock(chan); |
2334 | 2334 | ||
2335 | /* If SMP is already in progress ignore this request */ | 2335 | /* If SMP is already in progress ignore this request */ |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 66efdc21f548..480b3de1a0e3 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1006,7 +1006,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, | |||
1006 | 1006 | ||
1007 | ih = igmpv3_report_hdr(skb); | 1007 | ih = igmpv3_report_hdr(skb); |
1008 | num = ntohs(ih->ngrec); | 1008 | num = ntohs(ih->ngrec); |
1009 | len = sizeof(*ih); | 1009 | len = skb_transport_offset(skb) + sizeof(*ih); |
1010 | 1010 | ||
1011 | for (i = 0; i < num; i++) { | 1011 | for (i = 0; i < num; i++) { |
1012 | len += sizeof(*grec); | 1012 | len += sizeof(*grec); |
@@ -1067,7 +1067,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
1067 | 1067 | ||
1068 | icmp6h = icmp6_hdr(skb); | 1068 | icmp6h = icmp6_hdr(skb); |
1069 | num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); | 1069 | num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); |
1070 | len = sizeof(*icmp6h); | 1070 | len = skb_transport_offset(skb) + sizeof(*icmp6h); |
1071 | 1071 | ||
1072 | for (i = 0; i < num; i++) { | 1072 | for (i = 0; i < num; i++) { |
1073 | __be16 *nsrcs, _nsrcs; | 1073 | __be16 *nsrcs, _nsrcs; |
diff --git a/net/core/dev.c b/net/core/dev.c index 877c84834d81..6bb6470f5b7b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4713,6 +4713,8 @@ void napi_disable(struct napi_struct *n) | |||
4713 | 4713 | ||
4714 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | 4714 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) |
4715 | msleep(1); | 4715 | msleep(1); |
4716 | while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) | ||
4717 | msleep(1); | ||
4716 | 4718 | ||
4717 | hrtimer_cancel(&n->timer); | 4719 | hrtimer_cancel(&n->timer); |
4718 | 4720 | ||
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index bf77e3639ce0..365de66436ac 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -631,15 +631,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, | |||
631 | { | 631 | { |
632 | int idx = 0; | 632 | int idx = 0; |
633 | struct fib_rule *rule; | 633 | struct fib_rule *rule; |
634 | int err = 0; | ||
634 | 635 | ||
635 | rcu_read_lock(); | 636 | rcu_read_lock(); |
636 | list_for_each_entry_rcu(rule, &ops->rules_list, list) { | 637 | list_for_each_entry_rcu(rule, &ops->rules_list, list) { |
637 | if (idx < cb->args[1]) | 638 | if (idx < cb->args[1]) |
638 | goto skip; | 639 | goto skip; |
639 | 640 | ||
640 | if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, | 641 | err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, |
641 | cb->nlh->nlmsg_seq, RTM_NEWRULE, | 642 | cb->nlh->nlmsg_seq, RTM_NEWRULE, |
642 | NLM_F_MULTI, ops) < 0) | 643 | NLM_F_MULTI, ops); |
644 | if (err) | ||
643 | break; | 645 | break; |
644 | skip: | 646 | skip: |
645 | idx++; | 647 | idx++; |
@@ -648,7 +650,7 @@ skip: | |||
648 | cb->args[1] = idx; | 650 | cb->args[1] = idx; |
649 | rules_ops_put(ops); | 651 | rules_ops_put(ops); |
650 | 652 | ||
651 | return skb->len; | 653 | return err; |
652 | } | 654 | } |
653 | 655 | ||
654 | static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) | 656 | static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) |
@@ -664,7 +666,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) | |||
664 | if (ops == NULL) | 666 | if (ops == NULL) |
665 | return -EAFNOSUPPORT; | 667 | return -EAFNOSUPPORT; |
666 | 668 | ||
667 | return dump_rules(skb, cb, ops); | 669 | dump_rules(skb, cb, ops); |
670 | |||
671 | return skb->len; | ||
668 | } | 672 | } |
669 | 673 | ||
670 | rcu_read_lock(); | 674 | rcu_read_lock(); |
diff --git a/net/core/filter.c b/net/core/filter.c index 13079f03902e..05a04ea87172 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -478,9 +478,9 @@ do_pass: | |||
478 | bpf_src = BPF_X; | 478 | bpf_src = BPF_X; |
479 | } else { | 479 | } else { |
480 | insn->dst_reg = BPF_REG_A; | 480 | insn->dst_reg = BPF_REG_A; |
481 | insn->src_reg = BPF_REG_X; | ||
482 | insn->imm = fp->k; | 481 | insn->imm = fp->k; |
483 | bpf_src = BPF_SRC(fp->code); | 482 | bpf_src = BPF_SRC(fp->code); |
483 | insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; | ||
484 | } | 484 | } |
485 | 485 | ||
486 | /* Common case where 'jump_false' is next insn. */ | 486 | /* Common case where 'jump_false' is next insn. */ |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index b279077c3089..830f8a7c1cb1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -31,7 +31,6 @@ | |||
31 | static const char fmt_hex[] = "%#x\n"; | 31 | static const char fmt_hex[] = "%#x\n"; |
32 | static const char fmt_long_hex[] = "%#lx\n"; | 32 | static const char fmt_long_hex[] = "%#lx\n"; |
33 | static const char fmt_dec[] = "%d\n"; | 33 | static const char fmt_dec[] = "%d\n"; |
34 | static const char fmt_udec[] = "%u\n"; | ||
35 | static const char fmt_ulong[] = "%lu\n"; | 34 | static const char fmt_ulong[] = "%lu\n"; |
36 | static const char fmt_u64[] = "%llu\n"; | 35 | static const char fmt_u64[] = "%llu\n"; |
37 | 36 | ||
@@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev, | |||
202 | if (netif_running(netdev)) { | 201 | if (netif_running(netdev)) { |
203 | struct ethtool_cmd cmd; | 202 | struct ethtool_cmd cmd; |
204 | if (!__ethtool_get_settings(netdev, &cmd)) | 203 | if (!__ethtool_get_settings(netdev, &cmd)) |
205 | ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); | 204 | ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); |
206 | } | 205 | } |
207 | rtnl_unlock(); | 206 | rtnl_unlock(); |
208 | return ret; | 207 | return ret; |
@@ -1481,6 +1480,15 @@ static int of_dev_node_match(struct device *dev, const void *data) | |||
1481 | return ret == 0 ? dev->of_node == data : ret; | 1480 | return ret == 0 ? dev->of_node == data : ret; |
1482 | } | 1481 | } |
1483 | 1482 | ||
1483 | /* | ||
1484 | * of_find_net_device_by_node - lookup the net device for the device node | ||
1485 | * @np: OF device node | ||
1486 | * | ||
1487 | * Looks up the net_device structure corresponding with the device node. | ||
1488 | * If successful, returns a pointer to the net_device with the embedded | ||
1489 | * struct device refcount incremented by one, or NULL on failure. The | ||
1490 | * refcount must be dropped when done with the net_device. | ||
1491 | */ | ||
1484 | struct net_device *of_find_net_device_by_node(struct device_node *np) | 1492 | struct net_device *of_find_net_device_by_node(struct device_node *np) |
1485 | { | 1493 | { |
1486 | struct device *dev; | 1494 | struct device *dev; |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 6aa3db8dfc3b..8bdada242a7d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -142,7 +142,7 @@ static void queue_process(struct work_struct *work) | |||
142 | */ | 142 | */ |
143 | static int poll_one_napi(struct napi_struct *napi, int budget) | 143 | static int poll_one_napi(struct napi_struct *napi, int budget) |
144 | { | 144 | { |
145 | int work; | 145 | int work = 0; |
146 | 146 | ||
147 | /* net_rx_action's ->poll() invocations and our's are | 147 | /* net_rx_action's ->poll() invocations and our's are |
148 | * synchronized by this test which is only made while | 148 | * synchronized by this test which is only made while |
@@ -151,7 +151,12 @@ static int poll_one_napi(struct napi_struct *napi, int budget) | |||
151 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) | 151 | if (!test_bit(NAPI_STATE_SCHED, &napi->state)) |
152 | return budget; | 152 | return budget; |
153 | 153 | ||
154 | set_bit(NAPI_STATE_NPSVC, &napi->state); | 154 | /* If we set this bit but see that it has already been set, |
155 | * that indicates that napi has been disabled and we need | ||
156 | * to abort this operation | ||
157 | */ | ||
158 | if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state)) | ||
159 | goto out; | ||
155 | 160 | ||
156 | work = napi->poll(napi, budget); | 161 | work = napi->poll(napi, budget); |
157 | WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll); | 162 | WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll); |
@@ -159,6 +164,7 @@ static int poll_one_napi(struct napi_struct *napi, int budget) | |||
159 | 164 | ||
160 | clear_bit(NAPI_STATE_NPSVC, &napi->state); | 165 | clear_bit(NAPI_STATE_NPSVC, &napi->state); |
161 | 166 | ||
167 | out: | ||
162 | return budget - work; | 168 | return budget - work; |
163 | } | 169 | } |
164 | 170 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a466821d1441..0ec48403ed68 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -3047,6 +3047,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
3047 | u32 portid = NETLINK_CB(cb->skb).portid; | 3047 | u32 portid = NETLINK_CB(cb->skb).portid; |
3048 | u32 seq = cb->nlh->nlmsg_seq; | 3048 | u32 seq = cb->nlh->nlmsg_seq; |
3049 | u32 filter_mask = 0; | 3049 | u32 filter_mask = 0; |
3050 | int err; | ||
3050 | 3051 | ||
3051 | if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { | 3052 | if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { |
3052 | struct nlattr *extfilt; | 3053 | struct nlattr *extfilt; |
@@ -3067,20 +3068,25 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
3067 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); | 3068 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); |
3068 | 3069 | ||
3069 | if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { | 3070 | if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { |
3070 | if (idx >= cb->args[0] && | 3071 | if (idx >= cb->args[0]) { |
3071 | br_dev->netdev_ops->ndo_bridge_getlink( | 3072 | err = br_dev->netdev_ops->ndo_bridge_getlink( |
3072 | skb, portid, seq, dev, filter_mask, | 3073 | skb, portid, seq, dev, |
3073 | NLM_F_MULTI) < 0) | 3074 | filter_mask, NLM_F_MULTI); |
3074 | break; | 3075 | if (err < 0 && err != -EOPNOTSUPP) |
3076 | break; | ||
3077 | } | ||
3075 | idx++; | 3078 | idx++; |
3076 | } | 3079 | } |
3077 | 3080 | ||
3078 | if (ops->ndo_bridge_getlink) { | 3081 | if (ops->ndo_bridge_getlink) { |
3079 | if (idx >= cb->args[0] && | 3082 | if (idx >= cb->args[0]) { |
3080 | ops->ndo_bridge_getlink(skb, portid, seq, dev, | 3083 | err = ops->ndo_bridge_getlink(skb, portid, |
3081 | filter_mask, | 3084 | seq, dev, |
3082 | NLM_F_MULTI) < 0) | 3085 | filter_mask, |
3083 | break; | 3086 | NLM_F_MULTI); |
3087 | if (err < 0 && err != -EOPNOTSUPP) | ||
3088 | break; | ||
3089 | } | ||
3084 | idx++; | 3090 | idx++; |
3085 | } | 3091 | } |
3086 | } | 3092 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index dad4dd37e2aa..fab4599ba8b2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags); | |||
2958 | */ | 2958 | */ |
2959 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) | 2959 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) |
2960 | { | 2960 | { |
2961 | unsigned char *data = skb->data; | ||
2962 | |||
2961 | BUG_ON(len > skb->len); | 2963 | BUG_ON(len > skb->len); |
2962 | skb->len -= len; | 2964 | __skb_pull(skb, len); |
2963 | BUG_ON(skb->len < skb->data_len); | 2965 | skb_postpull_rcsum(skb, data, len); |
2964 | skb_postpull_rcsum(skb, skb->data, len); | 2966 | return skb->data; |
2965 | return skb->data += len; | ||
2966 | } | 2967 | } |
2967 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); | 2968 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); |
2968 | 2969 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index ca2984afe16e..3307c02244d3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -2740,10 +2740,8 @@ static void req_prot_cleanup(struct request_sock_ops *rsk_prot) | |||
2740 | return; | 2740 | return; |
2741 | kfree(rsk_prot->slab_name); | 2741 | kfree(rsk_prot->slab_name); |
2742 | rsk_prot->slab_name = NULL; | 2742 | rsk_prot->slab_name = NULL; |
2743 | if (rsk_prot->slab) { | 2743 | kmem_cache_destroy(rsk_prot->slab); |
2744 | kmem_cache_destroy(rsk_prot->slab); | 2744 | rsk_prot->slab = NULL; |
2745 | rsk_prot->slab = NULL; | ||
2746 | } | ||
2747 | } | 2745 | } |
2748 | 2746 | ||
2749 | static int req_prot_init(const struct proto *prot) | 2747 | static int req_prot_init(const struct proto *prot) |
@@ -2828,10 +2826,8 @@ void proto_unregister(struct proto *prot) | |||
2828 | list_del(&prot->node); | 2826 | list_del(&prot->node); |
2829 | mutex_unlock(&proto_list_mutex); | 2827 | mutex_unlock(&proto_list_mutex); |
2830 | 2828 | ||
2831 | if (prot->slab != NULL) { | 2829 | kmem_cache_destroy(prot->slab); |
2832 | kmem_cache_destroy(prot->slab); | 2830 | prot->slab = NULL; |
2833 | prot->slab = NULL; | ||
2834 | } | ||
2835 | 2831 | ||
2836 | req_prot_cleanup(prot->rsk_prot); | 2832 | req_prot_cleanup(prot->rsk_prot); |
2837 | 2833 | ||
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index bd9e718c2a20..3de0d0362d7f 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c | |||
@@ -398,12 +398,8 @@ out_err: | |||
398 | 398 | ||
399 | void dccp_ackvec_exit(void) | 399 | void dccp_ackvec_exit(void) |
400 | { | 400 | { |
401 | if (dccp_ackvec_slab != NULL) { | 401 | kmem_cache_destroy(dccp_ackvec_slab); |
402 | kmem_cache_destroy(dccp_ackvec_slab); | 402 | dccp_ackvec_slab = NULL; |
403 | dccp_ackvec_slab = NULL; | 403 | kmem_cache_destroy(dccp_ackvec_record_slab); |
404 | } | 404 | dccp_ackvec_record_slab = NULL; |
405 | if (dccp_ackvec_record_slab != NULL) { | ||
406 | kmem_cache_destroy(dccp_ackvec_record_slab); | ||
407 | dccp_ackvec_record_slab = NULL; | ||
408 | } | ||
409 | } | 405 | } |
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index 83498975165f..90f77d08cc37 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c | |||
@@ -95,8 +95,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f | |||
95 | 95 | ||
96 | static void ccid_kmem_cache_destroy(struct kmem_cache *slab) | 96 | static void ccid_kmem_cache_destroy(struct kmem_cache *slab) |
97 | { | 97 | { |
98 | if (slab != NULL) | 98 | kmem_cache_destroy(slab); |
99 | kmem_cache_destroy(slab); | ||
100 | } | 99 | } |
101 | 100 | ||
102 | static int __init ccid_activate(struct ccid_operations *ccid_ops) | 101 | static int __init ccid_activate(struct ccid_operations *ccid_ops) |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 30addee2dd03..838f524cf11a 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) | |||
48 | tw->tw_ipv6only = sk->sk_ipv6only; | 48 | tw->tw_ipv6only = sk->sk_ipv6only; |
49 | } | 49 | } |
50 | #endif | 50 | #endif |
51 | /* Linkage updates. */ | ||
52 | __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); | ||
53 | 51 | ||
54 | /* Get the TIME_WAIT timeout firing. */ | 52 | /* Get the TIME_WAIT timeout firing. */ |
55 | if (timeo < rto) | 53 | if (timeo < rto) |
@@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) | |||
60 | timeo = DCCP_TIMEWAIT_LEN; | 58 | timeo = DCCP_TIMEWAIT_LEN; |
61 | 59 | ||
62 | inet_twsk_schedule(tw, timeo); | 60 | inet_twsk_schedule(tw, timeo); |
61 | /* Linkage updates. */ | ||
62 | __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); | ||
63 | inet_twsk_put(tw); | 63 | inet_twsk_put(tw); |
64 | } else { | 64 | } else { |
65 | /* Sorry, if we're out of memory, just CLOSE this | 65 | /* Sorry, if we're out of memory, just CLOSE this |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 76e3800765f8..c59fa5d9c22c 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -634,6 +634,10 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd) | |||
634 | port_index++; | 634 | port_index++; |
635 | } | 635 | } |
636 | kfree(pd->chip[i].rtable); | 636 | kfree(pd->chip[i].rtable); |
637 | |||
638 | /* Drop our reference to the MDIO bus device */ | ||
639 | if (pd->chip[i].host_dev) | ||
640 | put_device(pd->chip[i].host_dev); | ||
637 | } | 641 | } |
638 | kfree(pd->chip); | 642 | kfree(pd->chip); |
639 | } | 643 | } |
@@ -661,16 +665,22 @@ static int dsa_of_probe(struct device *dev) | |||
661 | return -EPROBE_DEFER; | 665 | return -EPROBE_DEFER; |
662 | 666 | ||
663 | ethernet = of_parse_phandle(np, "dsa,ethernet", 0); | 667 | ethernet = of_parse_phandle(np, "dsa,ethernet", 0); |
664 | if (!ethernet) | 668 | if (!ethernet) { |
665 | return -EINVAL; | 669 | ret = -EINVAL; |
670 | goto out_put_mdio; | ||
671 | } | ||
666 | 672 | ||
667 | ethernet_dev = of_find_net_device_by_node(ethernet); | 673 | ethernet_dev = of_find_net_device_by_node(ethernet); |
668 | if (!ethernet_dev) | 674 | if (!ethernet_dev) { |
669 | return -EPROBE_DEFER; | 675 | ret = -EPROBE_DEFER; |
676 | goto out_put_mdio; | ||
677 | } | ||
670 | 678 | ||
671 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); | 679 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); |
672 | if (!pd) | 680 | if (!pd) { |
673 | return -ENOMEM; | 681 | ret = -ENOMEM; |
682 | goto out_put_ethernet; | ||
683 | } | ||
674 | 684 | ||
675 | dev->platform_data = pd; | 685 | dev->platform_data = pd; |
676 | pd->of_netdev = ethernet_dev; | 686 | pd->of_netdev = ethernet_dev; |
@@ -691,7 +701,9 @@ static int dsa_of_probe(struct device *dev) | |||
691 | cd = &pd->chip[chip_index]; | 701 | cd = &pd->chip[chip_index]; |
692 | 702 | ||
693 | cd->of_node = child; | 703 | cd->of_node = child; |
694 | cd->host_dev = &mdio_bus->dev; | 704 | |
705 | /* When assigning the host device, increment its refcount */ | ||
706 | cd->host_dev = get_device(&mdio_bus->dev); | ||
695 | 707 | ||
696 | sw_addr = of_get_property(child, "reg", NULL); | 708 | sw_addr = of_get_property(child, "reg", NULL); |
697 | if (!sw_addr) | 709 | if (!sw_addr) |
@@ -711,6 +723,12 @@ static int dsa_of_probe(struct device *dev) | |||
711 | ret = -EPROBE_DEFER; | 723 | ret = -EPROBE_DEFER; |
712 | goto out_free_chip; | 724 | goto out_free_chip; |
713 | } | 725 | } |
726 | |||
727 | /* Drop the mdio_bus device ref, replacing the host | ||
728 | * device with the mdio_bus_switch device, keeping | ||
729 | * the refcount from of_mdio_find_bus() above. | ||
730 | */ | ||
731 | put_device(cd->host_dev); | ||
714 | cd->host_dev = &mdio_bus_switch->dev; | 732 | cd->host_dev = &mdio_bus_switch->dev; |
715 | } | 733 | } |
716 | 734 | ||
@@ -744,6 +762,10 @@ static int dsa_of_probe(struct device *dev) | |||
744 | } | 762 | } |
745 | } | 763 | } |
746 | 764 | ||
765 | /* The individual chips hold their own refcount on the mdio bus, | ||
766 | * so drop ours */ | ||
767 | put_device(&mdio_bus->dev); | ||
768 | |||
747 | return 0; | 769 | return 0; |
748 | 770 | ||
749 | out_free_chip: | 771 | out_free_chip: |
@@ -751,6 +773,10 @@ out_free_chip: | |||
751 | out_free: | 773 | out_free: |
752 | kfree(pd); | 774 | kfree(pd); |
753 | dev->platform_data = NULL; | 775 | dev->platform_data = NULL; |
776 | out_put_ethernet: | ||
777 | put_device(ðernet_dev->dev); | ||
778 | out_put_mdio: | ||
779 | put_device(&mdio_bus->dev); | ||
754 | return ret; | 780 | return ret; |
755 | } | 781 | } |
756 | 782 | ||
@@ -762,6 +788,7 @@ static void dsa_of_remove(struct device *dev) | |||
762 | return; | 788 | return; |
763 | 789 | ||
764 | dsa_of_free_platform_data(pd); | 790 | dsa_of_free_platform_data(pd); |
791 | put_device(&pd->of_netdev->dev); | ||
765 | kfree(pd); | 792 | kfree(pd); |
766 | } | 793 | } |
767 | #else | 794 | #else |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index cce97385f743..7d91f4612ac0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state) | |||
458 | static int dsa_slave_port_attr_set(struct net_device *dev, | 458 | static int dsa_slave_port_attr_set(struct net_device *dev, |
459 | struct switchdev_attr *attr) | 459 | struct switchdev_attr *attr) |
460 | { | 460 | { |
461 | int ret = 0; | 461 | struct dsa_slave_priv *p = netdev_priv(dev); |
462 | struct dsa_switch *ds = p->parent; | ||
463 | int ret; | ||
462 | 464 | ||
463 | switch (attr->id) { | 465 | switch (attr->id) { |
464 | case SWITCHDEV_ATTR_PORT_STP_STATE: | 466 | case SWITCHDEV_ATTR_PORT_STP_STATE: |
465 | if (attr->trans == SWITCHDEV_TRANS_COMMIT) | 467 | if (attr->trans == SWITCHDEV_TRANS_PREPARE) |
466 | ret = dsa_slave_stp_update(dev, attr->u.stp_state); | 468 | ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP; |
469 | else | ||
470 | ret = ds->drv->port_stp_update(ds, p->port, | ||
471 | attr->u.stp_state); | ||
467 | break; | 472 | break; |
468 | default: | 473 | default: |
469 | ret = -EOPNOTSUPP; | 474 | ret = -EOPNOTSUPP; |
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index d25efc93d8f1..b6ca0890d018 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c | |||
@@ -78,7 +78,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, | |||
78 | 78 | ||
79 | trailer = skb_tail_pointer(skb) - 4; | 79 | trailer = skb_tail_pointer(skb) - 4; |
80 | if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 || | 80 | if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 || |
81 | (trailer[3] & 0xef) != 0x00 || trailer[3] != 0x00) | 81 | (trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00) |
82 | goto out_drop; | 82 | goto out_drop; |
83 | 83 | ||
84 | source_port = trailer[1] & 7; | 84 | source_port = trailer[1] & 7; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 30409b75e925..f03db8b7abee 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -113,6 +113,8 @@ | |||
113 | #include <net/arp.h> | 113 | #include <net/arp.h> |
114 | #include <net/ax25.h> | 114 | #include <net/ax25.h> |
115 | #include <net/netrom.h> | 115 | #include <net/netrom.h> |
116 | #include <net/dst_metadata.h> | ||
117 | #include <net/ip_tunnels.h> | ||
116 | 118 | ||
117 | #include <linux/uaccess.h> | 119 | #include <linux/uaccess.h> |
118 | 120 | ||
@@ -296,7 +298,8 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip, | |||
296 | struct net_device *dev, __be32 src_ip, | 298 | struct net_device *dev, __be32 src_ip, |
297 | const unsigned char *dest_hw, | 299 | const unsigned char *dest_hw, |
298 | const unsigned char *src_hw, | 300 | const unsigned char *src_hw, |
299 | const unsigned char *target_hw, struct sk_buff *oskb) | 301 | const unsigned char *target_hw, |
302 | struct dst_entry *dst) | ||
300 | { | 303 | { |
301 | struct sk_buff *skb; | 304 | struct sk_buff *skb; |
302 | 305 | ||
@@ -309,9 +312,7 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip, | |||
309 | if (!skb) | 312 | if (!skb) |
310 | return; | 313 | return; |
311 | 314 | ||
312 | if (oskb) | 315 | skb_dst_set(skb, dst); |
313 | skb_dst_copy(skb, oskb); | ||
314 | |||
315 | arp_xmit(skb); | 316 | arp_xmit(skb); |
316 | } | 317 | } |
317 | 318 | ||
@@ -333,6 +334,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) | |||
333 | __be32 target = *(__be32 *)neigh->primary_key; | 334 | __be32 target = *(__be32 *)neigh->primary_key; |
334 | int probes = atomic_read(&neigh->probes); | 335 | int probes = atomic_read(&neigh->probes); |
335 | struct in_device *in_dev; | 336 | struct in_device *in_dev; |
337 | struct dst_entry *dst = NULL; | ||
336 | 338 | ||
337 | rcu_read_lock(); | 339 | rcu_read_lock(); |
338 | in_dev = __in_dev_get_rcu(dev); | 340 | in_dev = __in_dev_get_rcu(dev); |
@@ -381,9 +383,10 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) | |||
381 | } | 383 | } |
382 | } | 384 | } |
383 | 385 | ||
386 | if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE)) | ||
387 | dst = dst_clone(skb_dst(skb)); | ||
384 | arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, | 388 | arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, |
385 | dst_hw, dev->dev_addr, NULL, | 389 | dst_hw, dev->dev_addr, NULL, dst); |
386 | dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb); | ||
387 | } | 390 | } |
388 | 391 | ||
389 | static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip) | 392 | static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip) |
@@ -649,6 +652,7 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) | |||
649 | int addr_type; | 652 | int addr_type; |
650 | struct neighbour *n; | 653 | struct neighbour *n; |
651 | struct net *net = dev_net(dev); | 654 | struct net *net = dev_net(dev); |
655 | struct dst_entry *reply_dst = NULL; | ||
652 | bool is_garp = false; | 656 | bool is_garp = false; |
653 | 657 | ||
654 | /* arp_rcv below verifies the ARP header and verifies the device | 658 | /* arp_rcv below verifies the ARP header and verifies the device |
@@ -749,13 +753,18 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) | |||
749 | * cache. | 753 | * cache. |
750 | */ | 754 | */ |
751 | 755 | ||
756 | if (arp->ar_op == htons(ARPOP_REQUEST) && skb_metadata_dst(skb)) | ||
757 | reply_dst = (struct dst_entry *) | ||
758 | iptunnel_metadata_reply(skb_metadata_dst(skb), | ||
759 | GFP_ATOMIC); | ||
760 | |||
752 | /* Special case: IPv4 duplicate address detection packet (RFC2131) */ | 761 | /* Special case: IPv4 duplicate address detection packet (RFC2131) */ |
753 | if (sip == 0) { | 762 | if (sip == 0) { |
754 | if (arp->ar_op == htons(ARPOP_REQUEST) && | 763 | if (arp->ar_op == htons(ARPOP_REQUEST) && |
755 | inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL && | 764 | inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL && |
756 | !arp_ignore(in_dev, sip, tip)) | 765 | !arp_ignore(in_dev, sip, tip)) |
757 | arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, | 766 | arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, |
758 | dev->dev_addr, sha); | 767 | sha, dev->dev_addr, sha, reply_dst); |
759 | goto out; | 768 | goto out; |
760 | } | 769 | } |
761 | 770 | ||
@@ -774,9 +783,10 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) | |||
774 | if (!dont_send) { | 783 | if (!dont_send) { |
775 | n = neigh_event_ns(&arp_tbl, sha, &sip, dev); | 784 | n = neigh_event_ns(&arp_tbl, sha, &sip, dev); |
776 | if (n) { | 785 | if (n) { |
777 | arp_send(ARPOP_REPLY, ETH_P_ARP, sip, | 786 | arp_send_dst(ARPOP_REPLY, ETH_P_ARP, |
778 | dev, tip, sha, dev->dev_addr, | 787 | sip, dev, tip, sha, |
779 | sha); | 788 | dev->dev_addr, sha, |
789 | reply_dst); | ||
780 | neigh_release(n); | 790 | neigh_release(n); |
781 | } | 791 | } |
782 | } | 792 | } |
@@ -794,9 +804,10 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) | |||
794 | if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || | 804 | if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || |
795 | skb->pkt_type == PACKET_HOST || | 805 | skb->pkt_type == PACKET_HOST || |
796 | NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) { | 806 | NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) { |
797 | arp_send(ARPOP_REPLY, ETH_P_ARP, sip, | 807 | arp_send_dst(ARPOP_REPLY, ETH_P_ARP, |
798 | dev, tip, sha, dev->dev_addr, | 808 | sip, dev, tip, sha, |
799 | sha); | 809 | dev->dev_addr, sha, |
810 | reply_dst); | ||
800 | } else { | 811 | } else { |
801 | pneigh_enqueue(&arp_tbl, | 812 | pneigh_enqueue(&arp_tbl, |
802 | in_dev->arp_parms, skb); | 813 | in_dev->arp_parms, skb); |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6fcbd215cdbc..690bcbc59f26 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, | |||
340 | fl4.flowi4_tos = tos; | 340 | fl4.flowi4_tos = tos; |
341 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; | 341 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; |
342 | fl4.flowi4_tun_key.tun_id = 0; | 342 | fl4.flowi4_tun_key.tun_id = 0; |
343 | fl4.flowi4_flags = 0; | ||
343 | 344 | ||
344 | no_addr = idev->ifa_list == NULL; | 345 | no_addr = idev->ifa_list == NULL; |
345 | 346 | ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 26d6ffb6d23c..6c2af797f2f9 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1426,7 +1426,7 @@ found: | |||
1426 | nh->nh_flags & RTNH_F_LINKDOWN && | 1426 | nh->nh_flags & RTNH_F_LINKDOWN && |
1427 | !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) | 1427 | !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) |
1428 | continue; | 1428 | continue; |
1429 | if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) { | 1429 | if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { |
1430 | if (flp->flowi4_oif && | 1430 | if (flp->flowi4_oif && |
1431 | flp->flowi4_oif != nh->nh_oif) | 1431 | flp->flowi4_oif != nh->nh_oif) |
1432 | continue; | 1432 | continue; |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 79fe05befcae..e5eb8ac4089d 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
427 | fl4.flowi4_mark = mark; | 427 | fl4.flowi4_mark = mark; |
428 | fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); | 428 | fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); |
429 | fl4.flowi4_proto = IPPROTO_ICMP; | 429 | fl4.flowi4_proto = IPPROTO_ICMP; |
430 | fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex; | 430 | fl4.flowi4_oif = vrf_master_ifindex(skb->dev); |
431 | security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); | 431 | security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); |
432 | rt = ip_route_output_key(net, &fl4); | 432 | rt = ip_route_output_key(net, &fl4); |
433 | if (IS_ERR(rt)) | 433 | if (IS_ERR(rt)) |
@@ -461,7 +461,7 @@ static struct rtable *icmp_route_lookup(struct net *net, | |||
461 | fl4->flowi4_proto = IPPROTO_ICMP; | 461 | fl4->flowi4_proto = IPPROTO_ICMP; |
462 | fl4->fl4_icmp_type = type; | 462 | fl4->fl4_icmp_type = type; |
463 | fl4->fl4_icmp_code = code; | 463 | fl4->fl4_icmp_code = code; |
464 | fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex; | 464 | fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev); |
465 | 465 | ||
466 | security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); | 466 | security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); |
467 | rt = __ip_route_output_key(net, fl4); | 467 | rt = __ip_route_output_key(net, fl4); |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 134957159c27..7bb9c39e0a4d 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -685,20 +685,20 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue, | |||
685 | req->num_timeout = 0; | 685 | req->num_timeout = 0; |
686 | req->sk = NULL; | 686 | req->sk = NULL; |
687 | 687 | ||
688 | setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); | ||
689 | mod_timer_pinned(&req->rsk_timer, jiffies + timeout); | ||
690 | req->rsk_hash = hash; | ||
691 | |||
688 | /* before letting lookups find us, make sure all req fields | 692 | /* before letting lookups find us, make sure all req fields |
689 | * are committed to memory and refcnt initialized. | 693 | * are committed to memory and refcnt initialized. |
690 | */ | 694 | */ |
691 | smp_wmb(); | 695 | smp_wmb(); |
692 | atomic_set(&req->rsk_refcnt, 2); | 696 | atomic_set(&req->rsk_refcnt, 2); |
693 | setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); | ||
694 | req->rsk_hash = hash; | ||
695 | 697 | ||
696 | spin_lock(&queue->syn_wait_lock); | 698 | spin_lock(&queue->syn_wait_lock); |
697 | req->dl_next = lopt->syn_table[hash]; | 699 | req->dl_next = lopt->syn_table[hash]; |
698 | lopt->syn_table[hash] = req; | 700 | lopt->syn_table[hash] = req; |
699 | spin_unlock(&queue->syn_wait_lock); | 701 | spin_unlock(&queue->syn_wait_lock); |
700 | |||
701 | mod_timer_pinned(&req->rsk_timer, jiffies + timeout); | ||
702 | } | 702 | } |
703 | EXPORT_SYMBOL(reqsk_queue_hash_req); | 703 | EXPORT_SYMBOL(reqsk_queue_hash_req); |
704 | 704 | ||
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index ae22cc24fbe8..c67f9bd7699c 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -123,13 +123,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
123 | /* | 123 | /* |
124 | * Step 2: Hash TW into tcp ehash chain. | 124 | * Step 2: Hash TW into tcp ehash chain. |
125 | * Notes : | 125 | * Notes : |
126 | * - tw_refcnt is set to 3 because : | 126 | * - tw_refcnt is set to 4 because : |
127 | * - We have one reference from bhash chain. | 127 | * - We have one reference from bhash chain. |
128 | * - We have one reference from ehash chain. | 128 | * - We have one reference from ehash chain. |
129 | * - We have one reference from timer. | ||
130 | * - One reference for ourself (our caller will release it). | ||
129 | * We can use atomic_set() because prior spin_lock()/spin_unlock() | 131 | * We can use atomic_set() because prior spin_lock()/spin_unlock() |
130 | * committed into memory all tw fields. | 132 | * committed into memory all tw fields. |
131 | */ | 133 | */ |
132 | atomic_set(&tw->tw_refcnt, 1 + 1 + 1); | 134 | atomic_set(&tw->tw_refcnt, 4); |
133 | inet_twsk_add_node_rcu(tw, &ehead->chain); | 135 | inet_twsk_add_node_rcu(tw, &ehead->chain); |
134 | 136 | ||
135 | /* Step 3: Remove SK from hash chain */ | 137 | /* Step 3: Remove SK from hash chain */ |
@@ -217,7 +219,7 @@ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw) | |||
217 | } | 219 | } |
218 | EXPORT_SYMBOL(inet_twsk_deschedule_put); | 220 | EXPORT_SYMBOL(inet_twsk_deschedule_put); |
219 | 221 | ||
220 | void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo) | 222 | void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm) |
221 | { | 223 | { |
222 | /* timeout := RTO * 3.5 | 224 | /* timeout := RTO * 3.5 |
223 | * | 225 | * |
@@ -245,12 +247,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo) | |||
245 | */ | 247 | */ |
246 | 248 | ||
247 | tw->tw_kill = timeo <= 4*HZ; | 249 | tw->tw_kill = timeo <= 4*HZ; |
248 | if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) { | 250 | if (!rearm) { |
249 | atomic_inc(&tw->tw_refcnt); | 251 | BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo)); |
250 | atomic_inc(&tw->tw_dr->tw_count); | 252 | atomic_inc(&tw->tw_dr->tw_count); |
253 | } else { | ||
254 | mod_timer_pending(&tw->tw_timer, jiffies + timeo); | ||
251 | } | 255 | } |
252 | } | 256 | } |
253 | EXPORT_SYMBOL_GPL(inet_twsk_schedule); | 257 | EXPORT_SYMBOL_GPL(__inet_twsk_schedule); |
254 | 258 | ||
255 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, | 259 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, |
256 | struct inet_timewait_death_row *twdr, int family) | 260 | struct inet_timewait_death_row *twdr, int family) |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 29ed6c5a5185..84dce6a92f93 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -46,12 +46,13 @@ | |||
46 | #include <net/net_namespace.h> | 46 | #include <net/net_namespace.h> |
47 | #include <net/netns/generic.h> | 47 | #include <net/netns/generic.h> |
48 | #include <net/rtnetlink.h> | 48 | #include <net/rtnetlink.h> |
49 | #include <net/dst_metadata.h> | ||
49 | 50 | ||
50 | int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, | 51 | int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, |
51 | __be32 src, __be32 dst, __u8 proto, | 52 | __be32 src, __be32 dst, __u8 proto, |
52 | __u8 tos, __u8 ttl, __be16 df, bool xnet) | 53 | __u8 tos, __u8 ttl, __be16 df, bool xnet) |
53 | { | 54 | { |
54 | int pkt_len = skb->len; | 55 | int pkt_len = skb->len - skb_inner_network_offset(skb); |
55 | struct iphdr *iph; | 56 | struct iphdr *iph; |
56 | int err; | 57 | int err; |
57 | 58 | ||
@@ -119,6 +120,33 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) | |||
119 | } | 120 | } |
120 | EXPORT_SYMBOL_GPL(iptunnel_pull_header); | 121 | EXPORT_SYMBOL_GPL(iptunnel_pull_header); |
121 | 122 | ||
123 | struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, | ||
124 | gfp_t flags) | ||
125 | { | ||
126 | struct metadata_dst *res; | ||
127 | struct ip_tunnel_info *dst, *src; | ||
128 | |||
129 | if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX) | ||
130 | return NULL; | ||
131 | |||
132 | res = metadata_dst_alloc(0, flags); | ||
133 | if (!res) | ||
134 | return NULL; | ||
135 | |||
136 | dst = &res->u.tun_info; | ||
137 | src = &md->u.tun_info; | ||
138 | dst->key.tun_id = src->key.tun_id; | ||
139 | if (src->mode & IP_TUNNEL_INFO_IPV6) | ||
140 | memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src, | ||
141 | sizeof(struct in6_addr)); | ||
142 | else | ||
143 | dst->key.u.ipv4.dst = src->key.u.ipv4.src; | ||
144 | dst->mode = src->mode | IP_TUNNEL_INFO_TX; | ||
145 | |||
146 | return res; | ||
147 | } | ||
148 | EXPORT_SYMBOL_GPL(iptunnel_metadata_reply); | ||
149 | |||
122 | struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, | 150 | struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, |
123 | bool csum_help, | 151 | bool csum_help, |
124 | int gso_type_mask) | 152 | int gso_type_mask) |
@@ -198,8 +226,6 @@ static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = { | |||
198 | [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, | 226 | [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, |
199 | [LWTUNNEL_IP_TTL] = { .type = NLA_U8 }, | 227 | [LWTUNNEL_IP_TTL] = { .type = NLA_U8 }, |
200 | [LWTUNNEL_IP_TOS] = { .type = NLA_U8 }, | 228 | [LWTUNNEL_IP_TOS] = { .type = NLA_U8 }, |
201 | [LWTUNNEL_IP_SPORT] = { .type = NLA_U16 }, | ||
202 | [LWTUNNEL_IP_DPORT] = { .type = NLA_U16 }, | ||
203 | [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, | 229 | [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, |
204 | }; | 230 | }; |
205 | 231 | ||
@@ -239,12 +265,6 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr, | |||
239 | if (tb[LWTUNNEL_IP_TOS]) | 265 | if (tb[LWTUNNEL_IP_TOS]) |
240 | tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); | 266 | tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); |
241 | 267 | ||
242 | if (tb[LWTUNNEL_IP_SPORT]) | ||
243 | tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]); | ||
244 | |||
245 | if (tb[LWTUNNEL_IP_DPORT]) | ||
246 | tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]); | ||
247 | |||
248 | if (tb[LWTUNNEL_IP_FLAGS]) | 268 | if (tb[LWTUNNEL_IP_FLAGS]) |
249 | tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]); | 269 | tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]); |
250 | 270 | ||
@@ -266,8 +286,6 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb, | |||
266 | nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || | 286 | nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || |
267 | nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || | 287 | nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || |
268 | nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || | 288 | nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || |
269 | nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) || | ||
270 | nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) || | ||
271 | nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) | 289 | nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) |
272 | return -ENOMEM; | 290 | return -ENOMEM; |
273 | 291 | ||
@@ -281,8 +299,6 @@ static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate) | |||
281 | + nla_total_size(4) /* LWTUNNEL_IP_SRC */ | 299 | + nla_total_size(4) /* LWTUNNEL_IP_SRC */ |
282 | + nla_total_size(1) /* LWTUNNEL_IP_TOS */ | 300 | + nla_total_size(1) /* LWTUNNEL_IP_TOS */ |
283 | + nla_total_size(1) /* LWTUNNEL_IP_TTL */ | 301 | + nla_total_size(1) /* LWTUNNEL_IP_TTL */ |
284 | + nla_total_size(2) /* LWTUNNEL_IP_SPORT */ | ||
285 | + nla_total_size(2) /* LWTUNNEL_IP_DPORT */ | ||
286 | + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */ | 302 | + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */ |
287 | } | 303 | } |
288 | 304 | ||
@@ -305,8 +321,6 @@ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { | |||
305 | [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) }, | 321 | [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) }, |
306 | [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 }, | 322 | [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 }, |
307 | [LWTUNNEL_IP6_TC] = { .type = NLA_U8 }, | 323 | [LWTUNNEL_IP6_TC] = { .type = NLA_U8 }, |
308 | [LWTUNNEL_IP6_SPORT] = { .type = NLA_U16 }, | ||
309 | [LWTUNNEL_IP6_DPORT] = { .type = NLA_U16 }, | ||
310 | [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, | 324 | [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, |
311 | }; | 325 | }; |
312 | 326 | ||
@@ -346,12 +360,6 @@ static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr, | |||
346 | if (tb[LWTUNNEL_IP6_TC]) | 360 | if (tb[LWTUNNEL_IP6_TC]) |
347 | tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]); | 361 | tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]); |
348 | 362 | ||
349 | if (tb[LWTUNNEL_IP6_SPORT]) | ||
350 | tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]); | ||
351 | |||
352 | if (tb[LWTUNNEL_IP6_DPORT]) | ||
353 | tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]); | ||
354 | |||
355 | if (tb[LWTUNNEL_IP6_FLAGS]) | 363 | if (tb[LWTUNNEL_IP6_FLAGS]) |
356 | tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]); | 364 | tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]); |
357 | 365 | ||
@@ -373,8 +381,6 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb, | |||
373 | nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || | 381 | nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || |
374 | nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || | 382 | nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || |
375 | nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || | 383 | nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || |
376 | nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) || | ||
377 | nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) || | ||
378 | nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) | 384 | nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) |
379 | return -ENOMEM; | 385 | return -ENOMEM; |
380 | 386 | ||
@@ -388,8 +394,6 @@ static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate) | |||
388 | + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ | 394 | + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ |
389 | + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ | 395 | + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ |
390 | + nla_total_size(1) /* LWTUNNEL_IP6_TC */ | 396 | + nla_total_size(1) /* LWTUNNEL_IP6_TC */ |
391 | + nla_total_size(2) /* LWTUNNEL_IP6_SPORT */ | ||
392 | + nla_total_size(2) /* LWTUNNEL_IP6_DPORT */ | ||
393 | + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */ | 397 | + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */ |
394 | } | 398 | } |
395 | 399 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 5f4a5565ad8b..c81deb85acb4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1737 | fl4.flowi4_mark = skb->mark; | 1737 | fl4.flowi4_mark = skb->mark; |
1738 | fl4.flowi4_tos = tos; | 1738 | fl4.flowi4_tos = tos; |
1739 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; | 1739 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; |
1740 | fl4.flowi4_flags = 0; | ||
1740 | fl4.daddr = daddr; | 1741 | fl4.daddr = daddr; |
1741 | fl4.saddr = saddr; | 1742 | fl4.saddr = saddr; |
1742 | err = fib_lookup(net, &fl4, &res, 0); | 1743 | err = fib_lookup(net, &fl4, &res, 0); |
@@ -2045,6 +2046,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2045 | struct fib_result res; | 2046 | struct fib_result res; |
2046 | struct rtable *rth; | 2047 | struct rtable *rth; |
2047 | int orig_oif; | 2048 | int orig_oif; |
2049 | int err = -ENETUNREACH; | ||
2048 | 2050 | ||
2049 | res.tclassid = 0; | 2051 | res.tclassid = 0; |
2050 | res.fi = NULL; | 2052 | res.fi = NULL; |
@@ -2153,7 +2155,8 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2153 | goto make_route; | 2155 | goto make_route; |
2154 | } | 2156 | } |
2155 | 2157 | ||
2156 | if (fib_lookup(net, fl4, &res, 0)) { | 2158 | err = fib_lookup(net, fl4, &res, 0); |
2159 | if (err) { | ||
2157 | res.fi = NULL; | 2160 | res.fi = NULL; |
2158 | res.table = NULL; | 2161 | res.table = NULL; |
2159 | if (fl4->flowi4_oif) { | 2162 | if (fl4->flowi4_oif) { |
@@ -2181,7 +2184,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2181 | res.type = RTN_UNICAST; | 2184 | res.type = RTN_UNICAST; |
2182 | goto make_route; | 2185 | goto make_route; |
2183 | } | 2186 | } |
2184 | rth = ERR_PTR(-ENETUNREACH); | 2187 | rth = ERR_PTR(err); |
2185 | goto out; | 2188 | goto out; |
2186 | } | 2189 | } |
2187 | 2190 | ||
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index c6ded6b2a79f..448c2615fece 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -154,14 +154,20 @@ static void bictcp_init(struct sock *sk) | |||
154 | static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) | 154 | static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) |
155 | { | 155 | { |
156 | if (event == CA_EVENT_TX_START) { | 156 | if (event == CA_EVENT_TX_START) { |
157 | s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime; | ||
158 | struct bictcp *ca = inet_csk_ca(sk); | 157 | struct bictcp *ca = inet_csk_ca(sk); |
158 | u32 now = tcp_time_stamp; | ||
159 | s32 delta; | ||
160 | |||
161 | delta = now - tcp_sk(sk)->lsndtime; | ||
159 | 162 | ||
160 | /* We were application limited (idle) for a while. | 163 | /* We were application limited (idle) for a while. |
161 | * Shift epoch_start to keep cwnd growth to cubic curve. | 164 | * Shift epoch_start to keep cwnd growth to cubic curve. |
162 | */ | 165 | */ |
163 | if (ca->epoch_start && delta > 0) | 166 | if (ca->epoch_start && delta > 0) { |
164 | ca->epoch_start += delta; | 167 | ca->epoch_start += delta; |
168 | if (after(ca->epoch_start, now)) | ||
169 | ca->epoch_start = now; | ||
170 | } | ||
165 | return; | 171 | return; |
166 | } | 172 | } |
167 | } | 173 | } |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6d8795b066ac..def765911ff8 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -162,9 +162,9 @@ kill_with_rst: | |||
162 | if (tcp_death_row.sysctl_tw_recycle && | 162 | if (tcp_death_row.sysctl_tw_recycle && |
163 | tcptw->tw_ts_recent_stamp && | 163 | tcptw->tw_ts_recent_stamp && |
164 | tcp_tw_remember_stamp(tw)) | 164 | tcp_tw_remember_stamp(tw)) |
165 | inet_twsk_schedule(tw, tw->tw_timeout); | 165 | inet_twsk_reschedule(tw, tw->tw_timeout); |
166 | else | 166 | else |
167 | inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); | 167 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
168 | return TCP_TW_ACK; | 168 | return TCP_TW_ACK; |
169 | } | 169 | } |
170 | 170 | ||
@@ -201,7 +201,7 @@ kill: | |||
201 | return TCP_TW_SUCCESS; | 201 | return TCP_TW_SUCCESS; |
202 | } | 202 | } |
203 | } | 203 | } |
204 | inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); | 204 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
205 | 205 | ||
206 | if (tmp_opt.saw_tstamp) { | 206 | if (tmp_opt.saw_tstamp) { |
207 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; | 207 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
@@ -251,7 +251,7 @@ kill: | |||
251 | * Do not reschedule in the last case. | 251 | * Do not reschedule in the last case. |
252 | */ | 252 | */ |
253 | if (paws_reject || th->ack) | 253 | if (paws_reject || th->ack) |
254 | inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); | 254 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
255 | 255 | ||
256 | return tcp_timewait_check_oow_rate_limit( | 256 | return tcp_timewait_check_oow_rate_limit( |
257 | tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); | 257 | tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); |
@@ -322,9 +322,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
322 | } while (0); | 322 | } while (0); |
323 | #endif | 323 | #endif |
324 | 324 | ||
325 | /* Linkage updates. */ | ||
326 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); | ||
327 | |||
328 | /* Get the TIME_WAIT timeout firing. */ | 325 | /* Get the TIME_WAIT timeout firing. */ |
329 | if (timeo < rto) | 326 | if (timeo < rto) |
330 | timeo = rto; | 327 | timeo = rto; |
@@ -338,6 +335,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
338 | } | 335 | } |
339 | 336 | ||
340 | inet_twsk_schedule(tw, timeo); | 337 | inet_twsk_schedule(tw, timeo); |
338 | /* Linkage updates. */ | ||
339 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); | ||
341 | inet_twsk_put(tw); | 340 | inet_twsk_put(tw); |
342 | } else { | 341 | } else { |
343 | /* Sorry, if we're out of memory, just CLOSE this | 342 | /* Sorry, if we're out of memory, just CLOSE this |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f9a8a12b62ee..1100ffe4a722 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2897,6 +2897,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) | |||
2897 | skb_reserve(skb, MAX_TCP_HEADER); | 2897 | skb_reserve(skb, MAX_TCP_HEADER); |
2898 | tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), | 2898 | tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), |
2899 | TCPHDR_ACK | TCPHDR_RST); | 2899 | TCPHDR_ACK | TCPHDR_RST); |
2900 | skb_mstamp_get(&skb->skb_mstamp); | ||
2900 | /* Send it off. */ | 2901 | /* Send it off. */ |
2901 | if (tcp_transmit_skb(sk, skb, 0, priority)) | 2902 | if (tcp_transmit_skb(sk, skb, 0, priority)) |
2902 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); | 2903 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index c0a15e7f359f..f7d1d5e19e95 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1024,7 +1024,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
1024 | if (netif_index_is_vrf(net, ipc.oif)) { | 1024 | if (netif_index_is_vrf(net, ipc.oif)) { |
1025 | flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, | 1025 | flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, |
1026 | RT_SCOPE_UNIVERSE, sk->sk_protocol, | 1026 | RT_SCOPE_UNIVERSE, sk->sk_protocol, |
1027 | (flow_flags | FLOWI_FLAG_VRFSRC), | 1027 | (flow_flags | FLOWI_FLAG_VRFSRC | |
1028 | FLOWI_FLAG_SKIP_NH_OIF), | ||
1028 | faddr, saddr, dport, | 1029 | faddr, saddr, dport, |
1029 | inet->inet_sport); | 1030 | inet->inet_sport); |
1030 | 1031 | ||
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index bb919b28619f..c10a9ee68433 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -33,6 +33,8 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, | |||
33 | if (saddr) | 33 | if (saddr) |
34 | fl4->saddr = saddr->a4; | 34 | fl4->saddr = saddr->a4; |
35 | 35 | ||
36 | fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF; | ||
37 | |||
36 | rt = __ip_route_output_key(net, fl4); | 38 | rt = __ip_route_output_key(net, fl4); |
37 | if (!IS_ERR(rt)) | 39 | if (!IS_ERR(rt)) |
38 | return &rt->dst; | 40 | return &rt->dst; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 030fefdc9aed..900113376d4e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -5127,13 +5127,12 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
5127 | 5127 | ||
5128 | rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, | 5128 | rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, |
5129 | ifp->idev->dev, 0, 0); | 5129 | ifp->idev->dev, 0, 0); |
5130 | if (rt && ip6_del_rt(rt)) | 5130 | if (rt) |
5131 | dst_free(&rt->dst); | 5131 | ip6_del_rt(rt); |
5132 | } | 5132 | } |
5133 | dst_hold(&ifp->rt->dst); | 5133 | dst_hold(&ifp->rt->dst); |
5134 | 5134 | ||
5135 | if (ip6_del_rt(ifp->rt)) | 5135 | ip6_del_rt(ifp->rt); |
5136 | dst_free(&ifp->rt->dst); | ||
5137 | 5136 | ||
5138 | rt_genid_bump_ipv6(net); | 5137 | rt_genid_bump_ipv6(net); |
5139 | break; | 5138 | break; |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 418d9823692b..7d2e0023c72d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn) | |||
155 | kmem_cache_free(fib6_node_kmem, fn); | 155 | kmem_cache_free(fib6_node_kmem, fn); |
156 | } | 156 | } |
157 | 157 | ||
158 | static void rt6_rcu_free(struct rt6_info *rt) | ||
159 | { | ||
160 | call_rcu(&rt->dst.rcu_head, dst_rcu_free); | ||
161 | } | ||
162 | |||
158 | static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) | 163 | static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) |
159 | { | 164 | { |
160 | int cpu; | 165 | int cpu; |
@@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) | |||
169 | ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); | 174 | ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); |
170 | pcpu_rt = *ppcpu_rt; | 175 | pcpu_rt = *ppcpu_rt; |
171 | if (pcpu_rt) { | 176 | if (pcpu_rt) { |
172 | dst_free(&pcpu_rt->dst); | 177 | rt6_rcu_free(pcpu_rt); |
173 | *ppcpu_rt = NULL; | 178 | *ppcpu_rt = NULL; |
174 | } | 179 | } |
175 | } | 180 | } |
@@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt) | |||
181 | { | 186 | { |
182 | if (atomic_dec_and_test(&rt->rt6i_ref)) { | 187 | if (atomic_dec_and_test(&rt->rt6i_ref)) { |
183 | rt6_free_pcpu(rt); | 188 | rt6_free_pcpu(rt); |
184 | dst_free(&rt->dst); | 189 | rt6_rcu_free(rt); |
185 | } | 190 | } |
186 | } | 191 | } |
187 | 192 | ||
@@ -846,7 +851,7 @@ add: | |||
846 | *ins = rt; | 851 | *ins = rt; |
847 | rt->rt6i_node = fn; | 852 | rt->rt6i_node = fn; |
848 | atomic_inc(&rt->rt6i_ref); | 853 | atomic_inc(&rt->rt6i_ref); |
849 | inet6_rt_notify(RTM_NEWROUTE, rt, info); | 854 | inet6_rt_notify(RTM_NEWROUTE, rt, info, 0); |
850 | info->nl_net->ipv6.rt6_stats->fib_rt_entries++; | 855 | info->nl_net->ipv6.rt6_stats->fib_rt_entries++; |
851 | 856 | ||
852 | if (!(fn->fn_flags & RTN_RTINFO)) { | 857 | if (!(fn->fn_flags & RTN_RTINFO)) { |
@@ -872,7 +877,7 @@ add: | |||
872 | rt->rt6i_node = fn; | 877 | rt->rt6i_node = fn; |
873 | rt->dst.rt6_next = iter->dst.rt6_next; | 878 | rt->dst.rt6_next = iter->dst.rt6_next; |
874 | atomic_inc(&rt->rt6i_ref); | 879 | atomic_inc(&rt->rt6i_ref); |
875 | inet6_rt_notify(RTM_NEWROUTE, rt, info); | 880 | inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); |
876 | if (!(fn->fn_flags & RTN_RTINFO)) { | 881 | if (!(fn->fn_flags & RTN_RTINFO)) { |
877 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; | 882 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; |
878 | fn->fn_flags |= RTN_RTINFO; | 883 | fn->fn_flags |= RTN_RTINFO; |
@@ -933,6 +938,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, | |||
933 | int replace_required = 0; | 938 | int replace_required = 0; |
934 | int sernum = fib6_new_sernum(info->nl_net); | 939 | int sernum = fib6_new_sernum(info->nl_net); |
935 | 940 | ||
941 | if (WARN_ON_ONCE((rt->dst.flags & DST_NOCACHE) && | ||
942 | !atomic_read(&rt->dst.__refcnt))) | ||
943 | return -EINVAL; | ||
944 | |||
936 | if (info->nlh) { | 945 | if (info->nlh) { |
937 | if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) | 946 | if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) |
938 | allow_create = 0; | 947 | allow_create = 0; |
@@ -1025,6 +1034,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, | |||
1025 | fib6_start_gc(info->nl_net, rt); | 1034 | fib6_start_gc(info->nl_net, rt); |
1026 | if (!(rt->rt6i_flags & RTF_CACHE)) | 1035 | if (!(rt->rt6i_flags & RTF_CACHE)) |
1027 | fib6_prune_clones(info->nl_net, pn); | 1036 | fib6_prune_clones(info->nl_net, pn); |
1037 | rt->dst.flags &= ~DST_NOCACHE; | ||
1028 | } | 1038 | } |
1029 | 1039 | ||
1030 | out: | 1040 | out: |
@@ -1049,7 +1059,8 @@ out: | |||
1049 | atomic_inc(&pn->leaf->rt6i_ref); | 1059 | atomic_inc(&pn->leaf->rt6i_ref); |
1050 | } | 1060 | } |
1051 | #endif | 1061 | #endif |
1052 | dst_free(&rt->dst); | 1062 | if (!(rt->dst.flags & DST_NOCACHE)) |
1063 | dst_free(&rt->dst); | ||
1053 | } | 1064 | } |
1054 | return err; | 1065 | return err; |
1055 | 1066 | ||
@@ -1060,7 +1071,8 @@ out: | |||
1060 | st_failure: | 1071 | st_failure: |
1061 | if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) | 1072 | if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) |
1062 | fib6_repair_tree(info->nl_net, fn); | 1073 | fib6_repair_tree(info->nl_net, fn); |
1063 | dst_free(&rt->dst); | 1074 | if (!(rt->dst.flags & DST_NOCACHE)) |
1075 | dst_free(&rt->dst); | ||
1064 | return err; | 1076 | return err; |
1065 | #endif | 1077 | #endif |
1066 | } | 1078 | } |
@@ -1410,7 +1422,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1410 | 1422 | ||
1411 | fib6_purge_rt(rt, fn, net); | 1423 | fib6_purge_rt(rt, fn, net); |
1412 | 1424 | ||
1413 | inet6_rt_notify(RTM_DELROUTE, rt, info); | 1425 | inet6_rt_notify(RTM_DELROUTE, rt, info, 0); |
1414 | rt6_release(rt); | 1426 | rt6_release(rt); |
1415 | } | 1427 | } |
1416 | 1428 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4038c694ec03..3c7b9310b33f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -404,13 +404,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
404 | struct ipv6_tlv_tnl_enc_lim *tel; | 404 | struct ipv6_tlv_tnl_enc_lim *tel; |
405 | __u32 mtu; | 405 | __u32 mtu; |
406 | case ICMPV6_DEST_UNREACH: | 406 | case ICMPV6_DEST_UNREACH: |
407 | net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", | 407 | net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", |
408 | t->parms.name); | 408 | t->parms.name); |
409 | break; | 409 | break; |
410 | case ICMPV6_TIME_EXCEED: | 410 | case ICMPV6_TIME_EXCEED: |
411 | if (code == ICMPV6_EXC_HOPLIMIT) { | 411 | if (code == ICMPV6_EXC_HOPLIMIT) { |
412 | net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", | 412 | net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", |
413 | t->parms.name); | 413 | t->parms.name); |
414 | } | 414 | } |
415 | break; | 415 | break; |
416 | case ICMPV6_PARAMPROB: | 416 | case ICMPV6_PARAMPROB: |
@@ -421,12 +421,12 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
421 | if (teli && teli == be32_to_cpu(info) - 2) { | 421 | if (teli && teli == be32_to_cpu(info) - 2) { |
422 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; | 422 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; |
423 | if (tel->encap_limit == 0) { | 423 | if (tel->encap_limit == 0) { |
424 | net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", | 424 | net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", |
425 | t->parms.name); | 425 | t->parms.name); |
426 | } | 426 | } |
427 | } else { | 427 | } else { |
428 | net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", | 428 | net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", |
429 | t->parms.name); | 429 | t->parms.name); |
430 | } | 430 | } |
431 | break; | 431 | break; |
432 | case ICMPV6_PKT_TOOBIG: | 432 | case ICMPV6_PKT_TOOBIG: |
@@ -634,20 +634,20 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
634 | } | 634 | } |
635 | 635 | ||
636 | if (!fl6->flowi6_mark) | 636 | if (!fl6->flowi6_mark) |
637 | dst = ip6_tnl_dst_check(tunnel); | 637 | dst = ip6_tnl_dst_get(tunnel); |
638 | 638 | ||
639 | if (!dst) { | 639 | if (!dst) { |
640 | ndst = ip6_route_output(net, NULL, fl6); | 640 | dst = ip6_route_output(net, NULL, fl6); |
641 | 641 | ||
642 | if (ndst->error) | 642 | if (dst->error) |
643 | goto tx_err_link_failure; | 643 | goto tx_err_link_failure; |
644 | ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); | 644 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); |
645 | if (IS_ERR(ndst)) { | 645 | if (IS_ERR(dst)) { |
646 | err = PTR_ERR(ndst); | 646 | err = PTR_ERR(dst); |
647 | ndst = NULL; | 647 | dst = NULL; |
648 | goto tx_err_link_failure; | 648 | goto tx_err_link_failure; |
649 | } | 649 | } |
650 | dst = ndst; | 650 | ndst = dst; |
651 | } | 651 | } |
652 | 652 | ||
653 | tdev = dst->dev; | 653 | tdev = dst->dev; |
@@ -702,12 +702,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
702 | skb = new_skb; | 702 | skb = new_skb; |
703 | } | 703 | } |
704 | 704 | ||
705 | if (fl6->flowi6_mark) { | 705 | if (!fl6->flowi6_mark && ndst) |
706 | skb_dst_set(skb, dst); | 706 | ip6_tnl_dst_set(tunnel, ndst); |
707 | ndst = NULL; | 707 | skb_dst_set(skb, dst); |
708 | } else { | ||
709 | skb_dst_set_noref(skb, dst); | ||
710 | } | ||
711 | 708 | ||
712 | proto = NEXTHDR_GRE; | 709 | proto = NEXTHDR_GRE; |
713 | if (encap_limit >= 0) { | 710 | if (encap_limit >= 0) { |
@@ -762,14 +759,12 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
762 | skb_set_inner_protocol(skb, protocol); | 759 | skb_set_inner_protocol(skb, protocol); |
763 | 760 | ||
764 | ip6tunnel_xmit(NULL, skb, dev); | 761 | ip6tunnel_xmit(NULL, skb, dev); |
765 | if (ndst) | ||
766 | ip6_tnl_dst_store(tunnel, ndst); | ||
767 | return 0; | 762 | return 0; |
768 | tx_err_link_failure: | 763 | tx_err_link_failure: |
769 | stats->tx_carrier_errors++; | 764 | stats->tx_carrier_errors++; |
770 | dst_link_failure(skb); | 765 | dst_link_failure(skb); |
771 | tx_err_dst_release: | 766 | tx_err_dst_release: |
772 | dst_release(ndst); | 767 | dst_release(dst); |
773 | return err; | 768 | return err; |
774 | } | 769 | } |
775 | 770 | ||
@@ -1223,6 +1218,9 @@ static const struct net_device_ops ip6gre_netdev_ops = { | |||
1223 | 1218 | ||
1224 | static void ip6gre_dev_free(struct net_device *dev) | 1219 | static void ip6gre_dev_free(struct net_device *dev) |
1225 | { | 1220 | { |
1221 | struct ip6_tnl *t = netdev_priv(dev); | ||
1222 | |||
1223 | ip6_tnl_dst_destroy(t); | ||
1226 | free_percpu(dev->tstats); | 1224 | free_percpu(dev->tstats); |
1227 | free_netdev(dev); | 1225 | free_netdev(dev); |
1228 | } | 1226 | } |
@@ -1245,9 +1243,10 @@ static void ip6gre_tunnel_setup(struct net_device *dev) | |||
1245 | netif_keep_dst(dev); | 1243 | netif_keep_dst(dev); |
1246 | } | 1244 | } |
1247 | 1245 | ||
1248 | static int ip6gre_tunnel_init(struct net_device *dev) | 1246 | static int ip6gre_tunnel_init_common(struct net_device *dev) |
1249 | { | 1247 | { |
1250 | struct ip6_tnl *tunnel; | 1248 | struct ip6_tnl *tunnel; |
1249 | int ret; | ||
1251 | 1250 | ||
1252 | tunnel = netdev_priv(dev); | 1251 | tunnel = netdev_priv(dev); |
1253 | 1252 | ||
@@ -1255,16 +1254,37 @@ static int ip6gre_tunnel_init(struct net_device *dev) | |||
1255 | tunnel->net = dev_net(dev); | 1254 | tunnel->net = dev_net(dev); |
1256 | strcpy(tunnel->parms.name, dev->name); | 1255 | strcpy(tunnel->parms.name, dev->name); |
1257 | 1256 | ||
1257 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | ||
1258 | if (!dev->tstats) | ||
1259 | return -ENOMEM; | ||
1260 | |||
1261 | ret = ip6_tnl_dst_init(tunnel); | ||
1262 | if (ret) { | ||
1263 | free_percpu(dev->tstats); | ||
1264 | dev->tstats = NULL; | ||
1265 | return ret; | ||
1266 | } | ||
1267 | |||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1271 | static int ip6gre_tunnel_init(struct net_device *dev) | ||
1272 | { | ||
1273 | struct ip6_tnl *tunnel; | ||
1274 | int ret; | ||
1275 | |||
1276 | ret = ip6gre_tunnel_init_common(dev); | ||
1277 | if (ret) | ||
1278 | return ret; | ||
1279 | |||
1280 | tunnel = netdev_priv(dev); | ||
1281 | |||
1258 | memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); | 1282 | memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); |
1259 | memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); | 1283 | memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); |
1260 | 1284 | ||
1261 | if (ipv6_addr_any(&tunnel->parms.raddr)) | 1285 | if (ipv6_addr_any(&tunnel->parms.raddr)) |
1262 | dev->header_ops = &ip6gre_header_ops; | 1286 | dev->header_ops = &ip6gre_header_ops; |
1263 | 1287 | ||
1264 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | ||
1265 | if (!dev->tstats) | ||
1266 | return -ENOMEM; | ||
1267 | |||
1268 | return 0; | 1288 | return 0; |
1269 | } | 1289 | } |
1270 | 1290 | ||
@@ -1460,19 +1480,16 @@ static void ip6gre_netlink_parms(struct nlattr *data[], | |||
1460 | static int ip6gre_tap_init(struct net_device *dev) | 1480 | static int ip6gre_tap_init(struct net_device *dev) |
1461 | { | 1481 | { |
1462 | struct ip6_tnl *tunnel; | 1482 | struct ip6_tnl *tunnel; |
1483 | int ret; | ||
1463 | 1484 | ||
1464 | tunnel = netdev_priv(dev); | 1485 | ret = ip6gre_tunnel_init_common(dev); |
1486 | if (ret) | ||
1487 | return ret; | ||
1465 | 1488 | ||
1466 | tunnel->dev = dev; | 1489 | tunnel = netdev_priv(dev); |
1467 | tunnel->net = dev_net(dev); | ||
1468 | strcpy(tunnel->parms.name, dev->name); | ||
1469 | 1490 | ||
1470 | ip6gre_tnl_link_config(tunnel, 1); | 1491 | ip6gre_tnl_link_config(tunnel, 1); |
1471 | 1492 | ||
1472 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | ||
1473 | if (!dev->tstats) | ||
1474 | return -ENOMEM; | ||
1475 | |||
1476 | return 0; | 1493 | return 0; |
1477 | } | 1494 | } |
1478 | 1495 | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 26ea47930740..92b1aa38f121 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -586,20 +586,22 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, | |||
586 | frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, | 586 | frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, |
587 | &ipv6_hdr(skb)->saddr); | 587 | &ipv6_hdr(skb)->saddr); |
588 | 588 | ||
589 | hroom = LL_RESERVED_SPACE(rt->dst.dev); | ||
589 | if (skb_has_frag_list(skb)) { | 590 | if (skb_has_frag_list(skb)) { |
590 | int first_len = skb_pagelen(skb); | 591 | int first_len = skb_pagelen(skb); |
591 | struct sk_buff *frag2; | 592 | struct sk_buff *frag2; |
592 | 593 | ||
593 | if (first_len - hlen > mtu || | 594 | if (first_len - hlen > mtu || |
594 | ((first_len - hlen) & 7) || | 595 | ((first_len - hlen) & 7) || |
595 | skb_cloned(skb)) | 596 | skb_cloned(skb) || |
597 | skb_headroom(skb) < (hroom + sizeof(struct frag_hdr))) | ||
596 | goto slow_path; | 598 | goto slow_path; |
597 | 599 | ||
598 | skb_walk_frags(skb, frag) { | 600 | skb_walk_frags(skb, frag) { |
599 | /* Correct geometry. */ | 601 | /* Correct geometry. */ |
600 | if (frag->len > mtu || | 602 | if (frag->len > mtu || |
601 | ((frag->len & 7) && frag->next) || | 603 | ((frag->len & 7) && frag->next) || |
602 | skb_headroom(frag) < hlen) | 604 | skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr))) |
603 | goto slow_path_clean; | 605 | goto slow_path_clean; |
604 | 606 | ||
605 | /* Partially cloned skb? */ | 607 | /* Partially cloned skb? */ |
@@ -616,8 +618,6 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, | |||
616 | 618 | ||
617 | err = 0; | 619 | err = 0; |
618 | offset = 0; | 620 | offset = 0; |
619 | frag = skb_shinfo(skb)->frag_list; | ||
620 | skb_frag_list_init(skb); | ||
621 | /* BUILD HEADER */ | 621 | /* BUILD HEADER */ |
622 | 622 | ||
623 | *prevhdr = NEXTHDR_FRAGMENT; | 623 | *prevhdr = NEXTHDR_FRAGMENT; |
@@ -625,8 +625,11 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb, | |||
625 | if (!tmp_hdr) { | 625 | if (!tmp_hdr) { |
626 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | 626 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
627 | IPSTATS_MIB_FRAGFAILS); | 627 | IPSTATS_MIB_FRAGFAILS); |
628 | return -ENOMEM; | 628 | err = -ENOMEM; |
629 | goto fail; | ||
629 | } | 630 | } |
631 | frag = skb_shinfo(skb)->frag_list; | ||
632 | skb_frag_list_init(skb); | ||
630 | 633 | ||
631 | __skb_pull(skb, hlen); | 634 | __skb_pull(skb, hlen); |
632 | fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr)); | 635 | fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr)); |
@@ -723,7 +726,6 @@ slow_path: | |||
723 | */ | 726 | */ |
724 | 727 | ||
725 | *prevhdr = NEXTHDR_FRAGMENT; | 728 | *prevhdr = NEXTHDR_FRAGMENT; |
726 | hroom = LL_RESERVED_SPACE(rt->dst.dev); | ||
727 | troom = rt->dst.dev->needed_tailroom; | 729 | troom = rt->dst.dev->needed_tailroom; |
728 | 730 | ||
729 | /* | 731 | /* |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b0ab420612bc..eabffbb89795 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -126,36 +126,92 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev) | |||
126 | * Locking : hash tables are protected by RCU and RTNL | 126 | * Locking : hash tables are protected by RCU and RTNL |
127 | */ | 127 | */ |
128 | 128 | ||
129 | struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) | 129 | static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst, |
130 | struct dst_entry *dst) | ||
130 | { | 131 | { |
131 | struct dst_entry *dst = t->dst_cache; | 132 | write_seqlock_bh(&idst->lock); |
133 | dst_release(rcu_dereference_protected( | ||
134 | idst->dst, | ||
135 | lockdep_is_held(&idst->lock.lock))); | ||
136 | if (dst) { | ||
137 | dst_hold(dst); | ||
138 | idst->cookie = rt6_get_cookie((struct rt6_info *)dst); | ||
139 | } else { | ||
140 | idst->cookie = 0; | ||
141 | } | ||
142 | rcu_assign_pointer(idst->dst, dst); | ||
143 | write_sequnlock_bh(&idst->lock); | ||
144 | } | ||
145 | |||
146 | struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t) | ||
147 | { | ||
148 | struct ip6_tnl_dst *idst; | ||
149 | struct dst_entry *dst; | ||
150 | unsigned int seq; | ||
151 | u32 cookie; | ||
132 | 152 | ||
133 | if (dst && dst->obsolete && | 153 | idst = raw_cpu_ptr(t->dst_cache); |
134 | !dst->ops->check(dst, t->dst_cookie)) { | 154 | |
135 | t->dst_cache = NULL; | 155 | rcu_read_lock(); |
156 | do { | ||
157 | seq = read_seqbegin(&idst->lock); | ||
158 | dst = rcu_dereference(idst->dst); | ||
159 | cookie = idst->cookie; | ||
160 | } while (read_seqretry(&idst->lock, seq)); | ||
161 | |||
162 | if (dst && !atomic_inc_not_zero(&dst->__refcnt)) | ||
163 | dst = NULL; | ||
164 | rcu_read_unlock(); | ||
165 | |||
166 | if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) { | ||
167 | ip6_tnl_per_cpu_dst_set(idst, NULL); | ||
136 | dst_release(dst); | 168 | dst_release(dst); |
137 | return NULL; | 169 | dst = NULL; |
138 | } | 170 | } |
139 | |||
140 | return dst; | 171 | return dst; |
141 | } | 172 | } |
142 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_check); | 173 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_get); |
143 | 174 | ||
144 | void ip6_tnl_dst_reset(struct ip6_tnl *t) | 175 | void ip6_tnl_dst_reset(struct ip6_tnl *t) |
145 | { | 176 | { |
146 | dst_release(t->dst_cache); | 177 | int i; |
147 | t->dst_cache = NULL; | 178 | |
179 | for_each_possible_cpu(i) | ||
180 | ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL); | ||
148 | } | 181 | } |
149 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); | 182 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); |
150 | 183 | ||
151 | void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) | 184 | void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst) |
185 | { | ||
186 | ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), dst); | ||
187 | |||
188 | } | ||
189 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_set); | ||
190 | |||
191 | void ip6_tnl_dst_destroy(struct ip6_tnl *t) | ||
152 | { | 192 | { |
153 | struct rt6_info *rt = (struct rt6_info *) dst; | 193 | if (!t->dst_cache) |
154 | t->dst_cookie = rt6_get_cookie(rt); | 194 | return; |
155 | dst_release(t->dst_cache); | 195 | |
156 | t->dst_cache = dst; | 196 | ip6_tnl_dst_reset(t); |
197 | free_percpu(t->dst_cache); | ||
157 | } | 198 | } |
158 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_store); | 199 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_destroy); |
200 | |||
201 | int ip6_tnl_dst_init(struct ip6_tnl *t) | ||
202 | { | ||
203 | int i; | ||
204 | |||
205 | t->dst_cache = alloc_percpu(struct ip6_tnl_dst); | ||
206 | if (!t->dst_cache) | ||
207 | return -ENOMEM; | ||
208 | |||
209 | for_each_possible_cpu(i) | ||
210 | seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock); | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_init); | ||
159 | 215 | ||
160 | /** | 216 | /** |
161 | * ip6_tnl_lookup - fetch tunnel matching the end-point addresses | 217 | * ip6_tnl_lookup - fetch tunnel matching the end-point addresses |
@@ -271,6 +327,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) | |||
271 | 327 | ||
272 | static void ip6_dev_free(struct net_device *dev) | 328 | static void ip6_dev_free(struct net_device *dev) |
273 | { | 329 | { |
330 | struct ip6_tnl *t = netdev_priv(dev); | ||
331 | |||
332 | ip6_tnl_dst_destroy(t); | ||
274 | free_percpu(dev->tstats); | 333 | free_percpu(dev->tstats); |
275 | free_netdev(dev); | 334 | free_netdev(dev); |
276 | } | 335 | } |
@@ -510,14 +569,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
510 | struct ipv6_tlv_tnl_enc_lim *tel; | 569 | struct ipv6_tlv_tnl_enc_lim *tel; |
511 | __u32 mtu; | 570 | __u32 mtu; |
512 | case ICMPV6_DEST_UNREACH: | 571 | case ICMPV6_DEST_UNREACH: |
513 | net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", | 572 | net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", |
514 | t->parms.name); | 573 | t->parms.name); |
515 | rel_msg = 1; | 574 | rel_msg = 1; |
516 | break; | 575 | break; |
517 | case ICMPV6_TIME_EXCEED: | 576 | case ICMPV6_TIME_EXCEED: |
518 | if ((*code) == ICMPV6_EXC_HOPLIMIT) { | 577 | if ((*code) == ICMPV6_EXC_HOPLIMIT) { |
519 | net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", | 578 | net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", |
520 | t->parms.name); | 579 | t->parms.name); |
521 | rel_msg = 1; | 580 | rel_msg = 1; |
522 | } | 581 | } |
523 | break; | 582 | break; |
@@ -529,13 +588,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
529 | if (teli && teli == *info - 2) { | 588 | if (teli && teli == *info - 2) { |
530 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; | 589 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; |
531 | if (tel->encap_limit == 0) { | 590 | if (tel->encap_limit == 0) { |
532 | net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", | 591 | net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", |
533 | t->parms.name); | 592 | t->parms.name); |
534 | rel_msg = 1; | 593 | rel_msg = 1; |
535 | } | 594 | } |
536 | } else { | 595 | } else { |
537 | net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", | 596 | net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", |
538 | t->parms.name); | 597 | t->parms.name); |
539 | } | 598 | } |
540 | break; | 599 | break; |
541 | case ICMPV6_PKT_TOOBIG: | 600 | case ICMPV6_PKT_TOOBIG: |
@@ -1010,23 +1069,23 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1010 | memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); | 1069 | memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); |
1011 | neigh_release(neigh); | 1070 | neigh_release(neigh); |
1012 | } else if (!fl6->flowi6_mark) | 1071 | } else if (!fl6->flowi6_mark) |
1013 | dst = ip6_tnl_dst_check(t); | 1072 | dst = ip6_tnl_dst_get(t); |
1014 | 1073 | ||
1015 | if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) | 1074 | if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) |
1016 | goto tx_err_link_failure; | 1075 | goto tx_err_link_failure; |
1017 | 1076 | ||
1018 | if (!dst) { | 1077 | if (!dst) { |
1019 | ndst = ip6_route_output(net, NULL, fl6); | 1078 | dst = ip6_route_output(net, NULL, fl6); |
1020 | 1079 | ||
1021 | if (ndst->error) | 1080 | if (dst->error) |
1022 | goto tx_err_link_failure; | 1081 | goto tx_err_link_failure; |
1023 | ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); | 1082 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0); |
1024 | if (IS_ERR(ndst)) { | 1083 | if (IS_ERR(dst)) { |
1025 | err = PTR_ERR(ndst); | 1084 | err = PTR_ERR(dst); |
1026 | ndst = NULL; | 1085 | dst = NULL; |
1027 | goto tx_err_link_failure; | 1086 | goto tx_err_link_failure; |
1028 | } | 1087 | } |
1029 | dst = ndst; | 1088 | ndst = dst; |
1030 | } | 1089 | } |
1031 | 1090 | ||
1032 | tdev = dst->dev; | 1091 | tdev = dst->dev; |
@@ -1072,12 +1131,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1072 | consume_skb(skb); | 1131 | consume_skb(skb); |
1073 | skb = new_skb; | 1132 | skb = new_skb; |
1074 | } | 1133 | } |
1075 | if (fl6->flowi6_mark) { | 1134 | |
1076 | skb_dst_set(skb, dst); | 1135 | if (!fl6->flowi6_mark && ndst) |
1077 | ndst = NULL; | 1136 | ip6_tnl_dst_set(t, ndst); |
1078 | } else { | 1137 | skb_dst_set(skb, dst); |
1079 | skb_dst_set_noref(skb, dst); | 1138 | |
1080 | } | ||
1081 | skb->transport_header = skb->network_header; | 1139 | skb->transport_header = skb->network_header; |
1082 | 1140 | ||
1083 | proto = fl6->flowi6_proto; | 1141 | proto = fl6->flowi6_proto; |
@@ -1101,14 +1159,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1101 | ipv6h->saddr = fl6->saddr; | 1159 | ipv6h->saddr = fl6->saddr; |
1102 | ipv6h->daddr = fl6->daddr; | 1160 | ipv6h->daddr = fl6->daddr; |
1103 | ip6tunnel_xmit(NULL, skb, dev); | 1161 | ip6tunnel_xmit(NULL, skb, dev); |
1104 | if (ndst) | ||
1105 | ip6_tnl_dst_store(t, ndst); | ||
1106 | return 0; | 1162 | return 0; |
1107 | tx_err_link_failure: | 1163 | tx_err_link_failure: |
1108 | stats->tx_carrier_errors++; | 1164 | stats->tx_carrier_errors++; |
1109 | dst_link_failure(skb); | 1165 | dst_link_failure(skb); |
1110 | tx_err_dst_release: | 1166 | tx_err_dst_release: |
1111 | dst_release(ndst); | 1167 | dst_release(dst); |
1112 | return err; | 1168 | return err; |
1113 | } | 1169 | } |
1114 | 1170 | ||
@@ -1573,12 +1629,21 @@ static inline int | |||
1573 | ip6_tnl_dev_init_gen(struct net_device *dev) | 1629 | ip6_tnl_dev_init_gen(struct net_device *dev) |
1574 | { | 1630 | { |
1575 | struct ip6_tnl *t = netdev_priv(dev); | 1631 | struct ip6_tnl *t = netdev_priv(dev); |
1632 | int ret; | ||
1576 | 1633 | ||
1577 | t->dev = dev; | 1634 | t->dev = dev; |
1578 | t->net = dev_net(dev); | 1635 | t->net = dev_net(dev); |
1579 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | 1636 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
1580 | if (!dev->tstats) | 1637 | if (!dev->tstats) |
1581 | return -ENOMEM; | 1638 | return -ENOMEM; |
1639 | |||
1640 | ret = ip6_tnl_dst_init(t); | ||
1641 | if (ret) { | ||
1642 | free_percpu(dev->tstats); | ||
1643 | dev->tstats = NULL; | ||
1644 | return ret; | ||
1645 | } | ||
1646 | |||
1582 | return 0; | 1647 | return 0; |
1583 | } | 1648 | } |
1584 | 1649 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 53617d715188..cb32ce250db0 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1193,7 +1193,8 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, | |||
1193 | 1193 | ||
1194 | fl6->flowi6_iif = LOOPBACK_IFINDEX; | 1194 | fl6->flowi6_iif = LOOPBACK_IFINDEX; |
1195 | 1195 | ||
1196 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) | 1196 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || |
1197 | fl6->flowi6_oif) | ||
1197 | flags |= RT6_LOOKUP_F_IFACE; | 1198 | flags |= RT6_LOOKUP_F_IFACE; |
1198 | 1199 | ||
1199 | if (!ipv6_addr_any(&fl6->saddr)) | 1200 | if (!ipv6_addr_any(&fl6->saddr)) |
@@ -1322,8 +1323,7 @@ static void ip6_link_failure(struct sk_buff *skb) | |||
1322 | if (rt) { | 1323 | if (rt) { |
1323 | if (rt->rt6i_flags & RTF_CACHE) { | 1324 | if (rt->rt6i_flags & RTF_CACHE) { |
1324 | dst_hold(&rt->dst); | 1325 | dst_hold(&rt->dst); |
1325 | if (ip6_del_rt(rt)) | 1326 | ip6_del_rt(rt); |
1326 | dst_free(&rt->dst); | ||
1327 | } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { | 1327 | } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { |
1328 | rt->rt6i_node->fn_sernum = -1; | 1328 | rt->rt6i_node->fn_sernum = -1; |
1329 | } | 1329 | } |
@@ -1886,9 +1886,11 @@ int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret) | |||
1886 | rt->dst.input = ip6_pkt_prohibit; | 1886 | rt->dst.input = ip6_pkt_prohibit; |
1887 | break; | 1887 | break; |
1888 | case RTN_THROW: | 1888 | case RTN_THROW: |
1889 | case RTN_UNREACHABLE: | ||
1889 | default: | 1890 | default: |
1890 | rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN | 1891 | rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN |
1891 | : -ENETUNREACH; | 1892 | : (cfg->fc_type == RTN_UNREACHABLE) |
1893 | ? -EHOSTUNREACH : -ENETUNREACH; | ||
1892 | rt->dst.output = ip6_pkt_discard_out; | 1894 | rt->dst.output = ip6_pkt_discard_out; |
1893 | rt->dst.input = ip6_pkt_discard; | 1895 | rt->dst.input = ip6_pkt_discard; |
1894 | break; | 1896 | break; |
@@ -2028,7 +2030,8 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) | |||
2028 | struct fib6_table *table; | 2030 | struct fib6_table *table; |
2029 | struct net *net = dev_net(rt->dst.dev); | 2031 | struct net *net = dev_net(rt->dst.dev); |
2030 | 2032 | ||
2031 | if (rt == net->ipv6.ip6_null_entry) { | 2033 | if (rt == net->ipv6.ip6_null_entry || |
2034 | rt->dst.flags & DST_NOCACHE) { | ||
2032 | err = -ENOENT; | 2035 | err = -ENOENT; |
2033 | goto out; | 2036 | goto out; |
2034 | } | 2037 | } |
@@ -2515,6 +2518,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
2515 | rt->rt6i_dst.addr = *addr; | 2518 | rt->rt6i_dst.addr = *addr; |
2516 | rt->rt6i_dst.plen = 128; | 2519 | rt->rt6i_dst.plen = 128; |
2517 | rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); | 2520 | rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); |
2521 | rt->dst.flags |= DST_NOCACHE; | ||
2518 | 2522 | ||
2519 | atomic_set(&rt->dst.__refcnt, 1); | 2523 | atomic_set(&rt->dst.__refcnt, 1); |
2520 | 2524 | ||
@@ -3303,7 +3307,8 @@ errout: | |||
3303 | return err; | 3307 | return err; |
3304 | } | 3308 | } |
3305 | 3309 | ||
3306 | void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) | 3310 | void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, |
3311 | unsigned int nlm_flags) | ||
3307 | { | 3312 | { |
3308 | struct sk_buff *skb; | 3313 | struct sk_buff *skb; |
3309 | struct net *net = info->nl_net; | 3314 | struct net *net = info->nl_net; |
@@ -3318,7 +3323,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) | |||
3318 | goto errout; | 3323 | goto errout; |
3319 | 3324 | ||
3320 | err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, | 3325 | err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, |
3321 | event, info->portid, seq, 0, 0, 0); | 3326 | event, info->portid, seq, 0, 0, nlm_flags); |
3322 | if (err < 0) { | 3327 | if (err < 0) { |
3323 | /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ | 3328 | /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ |
3324 | WARN_ON(err == -EMSGSIZE); | 3329 | WARN_ON(err == -EMSGSIZE); |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f6b090df3930..afca2eb4dfa7 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work) | |||
1319 | tunnel = container_of(work, struct l2tp_tunnel, del_work); | 1319 | tunnel = container_of(work, struct l2tp_tunnel, del_work); |
1320 | sk = l2tp_tunnel_sock_lookup(tunnel); | 1320 | sk = l2tp_tunnel_sock_lookup(tunnel); |
1321 | if (!sk) | 1321 | if (!sk) |
1322 | return; | 1322 | goto out; |
1323 | 1323 | ||
1324 | sock = sk->sk_socket; | 1324 | sock = sk->sk_socket; |
1325 | 1325 | ||
@@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work) | |||
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | l2tp_tunnel_sock_put(sk); | 1343 | l2tp_tunnel_sock_put(sk); |
1344 | out: | ||
1345 | l2tp_tunnel_dec_refcount(tunnel); | ||
1344 | } | 1346 | } |
1345 | 1347 | ||
1346 | /* Create a socket for the tunnel, if one isn't set up by | 1348 | /* Create a socket for the tunnel, if one isn't set up by |
@@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | |||
1636 | */ | 1638 | */ |
1637 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | 1639 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) |
1638 | { | 1640 | { |
1641 | l2tp_tunnel_inc_refcount(tunnel); | ||
1639 | l2tp_tunnel_closeall(tunnel); | 1642 | l2tp_tunnel_closeall(tunnel); |
1640 | return (false == queue_work(l2tp_wq, &tunnel->del_work)); | 1643 | if (false == queue_work(l2tp_wq, &tunnel->del_work)) { |
1644 | l2tp_tunnel_dec_refcount(tunnel); | ||
1645 | return 1; | ||
1646 | } | ||
1647 | return 0; | ||
1641 | } | 1648 | } |
1642 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | 1649 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); |
1643 | 1650 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 17b1fe961c5d..7a77a1470f25 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2474,6 +2474,7 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy, | |||
2474 | 2474 | ||
2475 | bss_conf->cqm_rssi_thold = rssi_thold; | 2475 | bss_conf->cqm_rssi_thold = rssi_thold; |
2476 | bss_conf->cqm_rssi_hyst = rssi_hyst; | 2476 | bss_conf->cqm_rssi_hyst = rssi_hyst; |
2477 | sdata->u.mgd.last_cqm_event_signal = 0; | ||
2477 | 2478 | ||
2478 | /* tell the driver upon association, unless already associated */ | 2479 | /* tell the driver upon association, unless already associated */ |
2479 | if (sdata->u.mgd.associated && | 2480 | if (sdata->u.mgd.associated && |
@@ -2518,15 +2519,17 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, | |||
2518 | continue; | 2519 | continue; |
2519 | 2520 | ||
2520 | for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { | 2521 | for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { |
2521 | if (~sdata->rc_rateidx_mcs_mask[i][j]) | 2522 | if (~sdata->rc_rateidx_mcs_mask[i][j]) { |
2522 | sdata->rc_has_mcs_mask[i] = true; | 2523 | sdata->rc_has_mcs_mask[i] = true; |
2524 | break; | ||
2525 | } | ||
2526 | } | ||
2523 | 2527 | ||
2524 | if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) | 2528 | for (j = 0; j < NL80211_VHT_NSS_MAX; j++) { |
2529 | if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) { | ||
2525 | sdata->rc_has_vht_mcs_mask[i] = true; | 2530 | sdata->rc_has_vht_mcs_mask[i] = true; |
2526 | |||
2527 | if (sdata->rc_has_mcs_mask[i] && | ||
2528 | sdata->rc_has_vht_mcs_mask[i]) | ||
2529 | break; | 2531 | break; |
2532 | } | ||
2530 | } | 2533 | } |
2531 | } | 2534 | } |
2532 | 2535 | ||
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 675d12c69e32..a5d41dfa9f05 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -107,12 +107,17 @@ EXPORT_SYMBOL(nf_log_register); | |||
107 | 107 | ||
108 | void nf_log_unregister(struct nf_logger *logger) | 108 | void nf_log_unregister(struct nf_logger *logger) |
109 | { | 109 | { |
110 | const struct nf_logger *log; | ||
110 | int i; | 111 | int i; |
111 | 112 | ||
112 | mutex_lock(&nf_log_mutex); | 113 | mutex_lock(&nf_log_mutex); |
113 | for (i = 0; i < NFPROTO_NUMPROTO; i++) | 114 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { |
114 | RCU_INIT_POINTER(loggers[i][logger->type], NULL); | 115 | log = nft_log_dereference(loggers[i][logger->type]); |
116 | if (log == logger) | ||
117 | RCU_INIT_POINTER(loggers[i][logger->type], NULL); | ||
118 | } | ||
115 | mutex_unlock(&nf_log_mutex); | 119 | mutex_unlock(&nf_log_mutex); |
120 | synchronize_rcu(); | ||
116 | } | 121 | } |
117 | EXPORT_SYMBOL(nf_log_unregister); | 122 | EXPORT_SYMBOL(nf_log_unregister); |
118 | 123 | ||
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 66def315eb56..9c8fab00164b 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -619,6 +619,13 @@ struct nft_xt { | |||
619 | 619 | ||
620 | static struct nft_expr_type nft_match_type; | 620 | static struct nft_expr_type nft_match_type; |
621 | 621 | ||
622 | static bool nft_match_cmp(const struct xt_match *match, | ||
623 | const char *name, u32 rev, u32 family) | ||
624 | { | ||
625 | return strcmp(match->name, name) == 0 && match->revision == rev && | ||
626 | (match->family == NFPROTO_UNSPEC || match->family == family); | ||
627 | } | ||
628 | |||
622 | static const struct nft_expr_ops * | 629 | static const struct nft_expr_ops * |
623 | nft_match_select_ops(const struct nft_ctx *ctx, | 630 | nft_match_select_ops(const struct nft_ctx *ctx, |
624 | const struct nlattr * const tb[]) | 631 | const struct nlattr * const tb[]) |
@@ -626,7 +633,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
626 | struct nft_xt *nft_match; | 633 | struct nft_xt *nft_match; |
627 | struct xt_match *match; | 634 | struct xt_match *match; |
628 | char *mt_name; | 635 | char *mt_name; |
629 | __u32 rev, family; | 636 | u32 rev, family; |
630 | 637 | ||
631 | if (tb[NFTA_MATCH_NAME] == NULL || | 638 | if (tb[NFTA_MATCH_NAME] == NULL || |
632 | tb[NFTA_MATCH_REV] == NULL || | 639 | tb[NFTA_MATCH_REV] == NULL || |
@@ -641,8 +648,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
641 | list_for_each_entry(nft_match, &nft_match_list, head) { | 648 | list_for_each_entry(nft_match, &nft_match_list, head) { |
642 | struct xt_match *match = nft_match->ops.data; | 649 | struct xt_match *match = nft_match->ops.data; |
643 | 650 | ||
644 | if (strcmp(match->name, mt_name) == 0 && | 651 | if (nft_match_cmp(match, mt_name, rev, family)) { |
645 | match->revision == rev && match->family == family) { | ||
646 | if (!try_module_get(match->me)) | 652 | if (!try_module_get(match->me)) |
647 | return ERR_PTR(-ENOENT); | 653 | return ERR_PTR(-ENOENT); |
648 | 654 | ||
@@ -693,6 +699,13 @@ static LIST_HEAD(nft_target_list); | |||
693 | 699 | ||
694 | static struct nft_expr_type nft_target_type; | 700 | static struct nft_expr_type nft_target_type; |
695 | 701 | ||
702 | static bool nft_target_cmp(const struct xt_target *tg, | ||
703 | const char *name, u32 rev, u32 family) | ||
704 | { | ||
705 | return strcmp(tg->name, name) == 0 && tg->revision == rev && | ||
706 | (tg->family == NFPROTO_UNSPEC || tg->family == family); | ||
707 | } | ||
708 | |||
696 | static const struct nft_expr_ops * | 709 | static const struct nft_expr_ops * |
697 | nft_target_select_ops(const struct nft_ctx *ctx, | 710 | nft_target_select_ops(const struct nft_ctx *ctx, |
698 | const struct nlattr * const tb[]) | 711 | const struct nlattr * const tb[]) |
@@ -700,7 +713,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
700 | struct nft_xt *nft_target; | 713 | struct nft_xt *nft_target; |
701 | struct xt_target *target; | 714 | struct xt_target *target; |
702 | char *tg_name; | 715 | char *tg_name; |
703 | __u32 rev, family; | 716 | u32 rev, family; |
704 | 717 | ||
705 | if (tb[NFTA_TARGET_NAME] == NULL || | 718 | if (tb[NFTA_TARGET_NAME] == NULL || |
706 | tb[NFTA_TARGET_REV] == NULL || | 719 | tb[NFTA_TARGET_REV] == NULL || |
@@ -715,8 +728,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
715 | list_for_each_entry(nft_target, &nft_target_list, head) { | 728 | list_for_each_entry(nft_target, &nft_target_list, head) { |
716 | struct xt_target *target = nft_target->ops.data; | 729 | struct xt_target *target = nft_target->ops.data; |
717 | 730 | ||
718 | if (strcmp(target->name, tg_name) == 0 && | 731 | if (nft_target_cmp(target, tg_name, rev, family)) { |
719 | target->revision == rev && target->family == family) { | ||
720 | if (!try_module_get(target->me)) | 732 | if (!try_module_get(target->me)) |
721 | return ERR_PTR(-ENOENT); | 733 | return ERR_PTR(-ENOENT); |
722 | 734 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7f86d3b55060..8f060d7f9a0e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -125,6 +125,24 @@ static inline u32 netlink_group_mask(u32 group) | |||
125 | return group ? 1 << (group - 1) : 0; | 125 | return group ? 1 << (group - 1) : 0; |
126 | } | 126 | } |
127 | 127 | ||
128 | static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb, | ||
129 | gfp_t gfp_mask) | ||
130 | { | ||
131 | unsigned int len = skb_end_offset(skb); | ||
132 | struct sk_buff *new; | ||
133 | |||
134 | new = alloc_skb(len, gfp_mask); | ||
135 | if (new == NULL) | ||
136 | return NULL; | ||
137 | |||
138 | NETLINK_CB(new).portid = NETLINK_CB(skb).portid; | ||
139 | NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group; | ||
140 | NETLINK_CB(new).creds = NETLINK_CB(skb).creds; | ||
141 | |||
142 | memcpy(skb_put(new, len), skb->data, len); | ||
143 | return new; | ||
144 | } | ||
145 | |||
128 | int netlink_add_tap(struct netlink_tap *nt) | 146 | int netlink_add_tap(struct netlink_tap *nt) |
129 | { | 147 | { |
130 | if (unlikely(nt->dev->type != ARPHRD_NETLINK)) | 148 | if (unlikely(nt->dev->type != ARPHRD_NETLINK)) |
@@ -206,7 +224,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, | |||
206 | int ret = -ENOMEM; | 224 | int ret = -ENOMEM; |
207 | 225 | ||
208 | dev_hold(dev); | 226 | dev_hold(dev); |
209 | nskb = skb_clone(skb, GFP_ATOMIC); | 227 | |
228 | if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head)) | ||
229 | nskb = netlink_to_full_skb(skb, GFP_ATOMIC); | ||
230 | else | ||
231 | nskb = skb_clone(skb, GFP_ATOMIC); | ||
210 | if (nskb) { | 232 | if (nskb) { |
211 | nskb->dev = dev; | 233 | nskb->dev = dev; |
212 | nskb->protocol = htons((u16) sk->sk_protocol); | 234 | nskb->protocol = htons((u16) sk->sk_protocol); |
@@ -279,11 +301,6 @@ static void netlink_rcv_wake(struct sock *sk) | |||
279 | } | 301 | } |
280 | 302 | ||
281 | #ifdef CONFIG_NETLINK_MMAP | 303 | #ifdef CONFIG_NETLINK_MMAP |
282 | static bool netlink_skb_is_mmaped(const struct sk_buff *skb) | ||
283 | { | ||
284 | return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; | ||
285 | } | ||
286 | |||
287 | static bool netlink_rx_is_mmaped(struct sock *sk) | 304 | static bool netlink_rx_is_mmaped(struct sock *sk) |
288 | { | 305 | { |
289 | return nlk_sk(sk)->rx_ring.pg_vec != NULL; | 306 | return nlk_sk(sk)->rx_ring.pg_vec != NULL; |
@@ -846,7 +863,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) | |||
846 | } | 863 | } |
847 | 864 | ||
848 | #else /* CONFIG_NETLINK_MMAP */ | 865 | #else /* CONFIG_NETLINK_MMAP */ |
849 | #define netlink_skb_is_mmaped(skb) false | ||
850 | #define netlink_rx_is_mmaped(sk) false | 866 | #define netlink_rx_is_mmaped(sk) false |
851 | #define netlink_tx_is_mmaped(sk) false | 867 | #define netlink_tx_is_mmaped(sk) false |
852 | #define netlink_mmap sock_no_mmap | 868 | #define netlink_mmap sock_no_mmap |
@@ -1094,8 +1110,8 @@ static int netlink_insert(struct sock *sk, u32 portid) | |||
1094 | 1110 | ||
1095 | lock_sock(sk); | 1111 | lock_sock(sk); |
1096 | 1112 | ||
1097 | err = -EBUSY; | 1113 | err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; |
1098 | if (nlk_sk(sk)->portid) | 1114 | if (nlk_sk(sk)->bound) |
1099 | goto err; | 1115 | goto err; |
1100 | 1116 | ||
1101 | err = -ENOMEM; | 1117 | err = -ENOMEM; |
@@ -1115,10 +1131,14 @@ static int netlink_insert(struct sock *sk, u32 portid) | |||
1115 | err = -EOVERFLOW; | 1131 | err = -EOVERFLOW; |
1116 | if (err == -EEXIST) | 1132 | if (err == -EEXIST) |
1117 | err = -EADDRINUSE; | 1133 | err = -EADDRINUSE; |
1118 | nlk_sk(sk)->portid = 0; | ||
1119 | sock_put(sk); | 1134 | sock_put(sk); |
1135 | goto err; | ||
1120 | } | 1136 | } |
1121 | 1137 | ||
1138 | /* We need to ensure that the socket is hashed and visible. */ | ||
1139 | smp_wmb(); | ||
1140 | nlk_sk(sk)->bound = portid; | ||
1141 | |||
1122 | err: | 1142 | err: |
1123 | release_sock(sk); | 1143 | release_sock(sk); |
1124 | return err; | 1144 | return err; |
@@ -1503,6 +1523,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
1503 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | 1523 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; |
1504 | int err; | 1524 | int err; |
1505 | long unsigned int groups = nladdr->nl_groups; | 1525 | long unsigned int groups = nladdr->nl_groups; |
1526 | bool bound; | ||
1506 | 1527 | ||
1507 | if (addr_len < sizeof(struct sockaddr_nl)) | 1528 | if (addr_len < sizeof(struct sockaddr_nl)) |
1508 | return -EINVAL; | 1529 | return -EINVAL; |
@@ -1519,9 +1540,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
1519 | return err; | 1540 | return err; |
1520 | } | 1541 | } |
1521 | 1542 | ||
1522 | if (nlk->portid) | 1543 | bound = nlk->bound; |
1544 | if (bound) { | ||
1545 | /* Ensure nlk->portid is up-to-date. */ | ||
1546 | smp_rmb(); | ||
1547 | |||
1523 | if (nladdr->nl_pid != nlk->portid) | 1548 | if (nladdr->nl_pid != nlk->portid) |
1524 | return -EINVAL; | 1549 | return -EINVAL; |
1550 | } | ||
1525 | 1551 | ||
1526 | if (nlk->netlink_bind && groups) { | 1552 | if (nlk->netlink_bind && groups) { |
1527 | int group; | 1553 | int group; |
@@ -1537,7 +1563,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
1537 | } | 1563 | } |
1538 | } | 1564 | } |
1539 | 1565 | ||
1540 | if (!nlk->portid) { | 1566 | /* No need for barriers here as we return to user-space without |
1567 | * using any of the bound attributes. | ||
1568 | */ | ||
1569 | if (!bound) { | ||
1541 | err = nladdr->nl_pid ? | 1570 | err = nladdr->nl_pid ? |
1542 | netlink_insert(sk, nladdr->nl_pid) : | 1571 | netlink_insert(sk, nladdr->nl_pid) : |
1543 | netlink_autobind(sock); | 1572 | netlink_autobind(sock); |
@@ -1585,7 +1614,10 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, | |||
1585 | !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) | 1614 | !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) |
1586 | return -EPERM; | 1615 | return -EPERM; |
1587 | 1616 | ||
1588 | if (!nlk->portid) | 1617 | /* No need for barriers here as we return to user-space without |
1618 | * using any of the bound attributes. | ||
1619 | */ | ||
1620 | if (!nlk->bound) | ||
1589 | err = netlink_autobind(sock); | 1621 | err = netlink_autobind(sock); |
1590 | 1622 | ||
1591 | if (err == 0) { | 1623 | if (err == 0) { |
@@ -2426,10 +2458,13 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | |||
2426 | dst_group = nlk->dst_group; | 2458 | dst_group = nlk->dst_group; |
2427 | } | 2459 | } |
2428 | 2460 | ||
2429 | if (!nlk->portid) { | 2461 | if (!nlk->bound) { |
2430 | err = netlink_autobind(sock); | 2462 | err = netlink_autobind(sock); |
2431 | if (err) | 2463 | if (err) |
2432 | goto out; | 2464 | goto out; |
2465 | } else { | ||
2466 | /* Ensure nlk is hashed and visible. */ | ||
2467 | smp_rmb(); | ||
2433 | } | 2468 | } |
2434 | 2469 | ||
2435 | /* It's a really convoluted way for userland to ask for mmaped | 2470 | /* It's a really convoluted way for userland to ask for mmaped |
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 89008405d6b4..14437d9b1965 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h | |||
@@ -35,6 +35,7 @@ struct netlink_sock { | |||
35 | unsigned long state; | 35 | unsigned long state; |
36 | size_t max_recvmsg_len; | 36 | size_t max_recvmsg_len; |
37 | wait_queue_head_t wait; | 37 | wait_queue_head_t wait; |
38 | bool bound; | ||
38 | bool cb_running; | 39 | bool cb_running; |
39 | struct netlink_callback cb; | 40 | struct netlink_callback cb; |
40 | struct mutex *cb_mutex; | 41 | struct mutex *cb_mutex; |
@@ -59,6 +60,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk) | |||
59 | return container_of(sk, struct netlink_sock, sk); | 60 | return container_of(sk, struct netlink_sock, sk); |
60 | } | 61 | } |
61 | 62 | ||
63 | static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb) | ||
64 | { | ||
65 | #ifdef CONFIG_NETLINK_MMAP | ||
66 | return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; | ||
67 | #else | ||
68 | return false; | ||
69 | #endif /* CONFIG_NETLINK_MMAP */ | ||
70 | } | ||
71 | |||
62 | struct netlink_table { | 72 | struct netlink_table { |
63 | struct rhashtable hash; | 73 | struct rhashtable hash; |
64 | struct hlist_head mc_list; | 74 | struct hlist_head mc_list; |
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 2a071f470d57..d143aa9f6654 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig | |||
@@ -5,7 +5,8 @@ | |||
5 | config OPENVSWITCH | 5 | config OPENVSWITCH |
6 | tristate "Open vSwitch" | 6 | tristate "Open vSwitch" |
7 | depends on INET | 7 | depends on INET |
8 | depends on (!NF_CONNTRACK || NF_CONNTRACK) | 8 | depends on !NF_CONNTRACK || \ |
9 | (NF_CONNTRACK && (!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6)) | ||
9 | select LIBCRC32C | 10 | select LIBCRC32C |
10 | select MPLS | 11 | select MPLS |
11 | select NET_MPLS_GSO | 12 | select NET_MPLS_GSO |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index e8e524ad8a01..002a755fa07e 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
@@ -275,13 +275,15 @@ static int ovs_ct_helper(struct sk_buff *skb, u16 proto) | |||
275 | case NFPROTO_IPV6: { | 275 | case NFPROTO_IPV6: { |
276 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; | 276 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; |
277 | __be16 frag_off; | 277 | __be16 frag_off; |
278 | int ofs; | ||
278 | 279 | ||
279 | protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), | 280 | ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, |
280 | &nexthdr, &frag_off); | 281 | &frag_off); |
281 | if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { | 282 | if (ofs < 0 || (frag_off & htons(~0x7)) != 0) { |
282 | pr_debug("proto header not found\n"); | 283 | pr_debug("proto header not found\n"); |
283 | return NF_ACCEPT; | 284 | return NF_ACCEPT; |
284 | } | 285 | } |
286 | protoff = ofs; | ||
285 | break; | 287 | break; |
286 | } | 288 | } |
287 | default: | 289 | default: |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 6fbd2decb19e..b816ff871528 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -952,7 +952,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
952 | if (error) | 952 | if (error) |
953 | goto err_kfree_flow; | 953 | goto err_kfree_flow; |
954 | 954 | ||
955 | ovs_flow_mask_key(&new_flow->key, &key, &mask); | 955 | ovs_flow_mask_key(&new_flow->key, &key, true, &mask); |
956 | 956 | ||
957 | /* Extract flow identifier. */ | 957 | /* Extract flow identifier. */ |
958 | error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], | 958 | error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], |
@@ -1080,7 +1080,7 @@ static struct sw_flow_actions *get_flow_actions(struct net *net, | |||
1080 | struct sw_flow_key masked_key; | 1080 | struct sw_flow_key masked_key; |
1081 | int error; | 1081 | int error; |
1082 | 1082 | ||
1083 | ovs_flow_mask_key(&masked_key, key, mask); | 1083 | ovs_flow_mask_key(&masked_key, key, true, mask); |
1084 | error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log); | 1084 | error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log); |
1085 | if (error) { | 1085 | if (error) { |
1086 | OVS_NLERR(log, | 1086 | OVS_NLERR(log, |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index c92d6a262bc5..5c030a4d7338 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -57,6 +57,7 @@ struct ovs_len_tbl { | |||
57 | }; | 57 | }; |
58 | 58 | ||
59 | #define OVS_ATTR_NESTED -1 | 59 | #define OVS_ATTR_NESTED -1 |
60 | #define OVS_ATTR_VARIABLE -2 | ||
60 | 61 | ||
61 | static void update_range(struct sw_flow_match *match, | 62 | static void update_range(struct sw_flow_match *match, |
62 | size_t offset, size_t size, bool is_mask) | 63 | size_t offset, size_t size, bool is_mask) |
@@ -304,6 +305,10 @@ size_t ovs_key_attr_size(void) | |||
304 | + nla_total_size(28); /* OVS_KEY_ATTR_ND */ | 305 | + nla_total_size(28); /* OVS_KEY_ATTR_ND */ |
305 | } | 306 | } |
306 | 307 | ||
308 | static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = { | ||
309 | [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) }, | ||
310 | }; | ||
311 | |||
307 | static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { | 312 | static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { |
308 | [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) }, | 313 | [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) }, |
309 | [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) }, | 314 | [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) }, |
@@ -315,8 +320,9 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] | |||
315 | [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) }, | 320 | [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) }, |
316 | [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) }, | 321 | [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) }, |
317 | [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 }, | 322 | [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 }, |
318 | [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_NESTED }, | 323 | [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE }, |
319 | [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED }, | 324 | [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED, |
325 | .next = ovs_vxlan_ext_key_lens }, | ||
320 | }; | 326 | }; |
321 | 327 | ||
322 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ | 328 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ |
@@ -349,6 +355,13 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { | |||
349 | [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) }, | 355 | [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) }, |
350 | }; | 356 | }; |
351 | 357 | ||
358 | static bool check_attr_len(unsigned int attr_len, unsigned int expected_len) | ||
359 | { | ||
360 | return expected_len == attr_len || | ||
361 | expected_len == OVS_ATTR_NESTED || | ||
362 | expected_len == OVS_ATTR_VARIABLE; | ||
363 | } | ||
364 | |||
352 | static bool is_all_zero(const u8 *fp, size_t size) | 365 | static bool is_all_zero(const u8 *fp, size_t size) |
353 | { | 366 | { |
354 | int i; | 367 | int i; |
@@ -388,7 +401,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, | |||
388 | } | 401 | } |
389 | 402 | ||
390 | expected_len = ovs_key_lens[type].len; | 403 | expected_len = ovs_key_lens[type].len; |
391 | if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) { | 404 | if (!check_attr_len(nla_len(nla), expected_len)) { |
392 | OVS_NLERR(log, "Key %d has unexpected len %d expected %d", | 405 | OVS_NLERR(log, "Key %d has unexpected len %d expected %d", |
393 | type, nla_len(nla), expected_len); | 406 | type, nla_len(nla), expected_len); |
394 | return -EINVAL; | 407 | return -EINVAL; |
@@ -473,29 +486,50 @@ static int genev_tun_opt_from_nlattr(const struct nlattr *a, | |||
473 | return 0; | 486 | return 0; |
474 | } | 487 | } |
475 | 488 | ||
476 | static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = { | 489 | static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr, |
477 | [OVS_VXLAN_EXT_GBP] = { .type = NLA_U32 }, | ||
478 | }; | ||
479 | |||
480 | static int vxlan_tun_opt_from_nlattr(const struct nlattr *a, | ||
481 | struct sw_flow_match *match, bool is_mask, | 490 | struct sw_flow_match *match, bool is_mask, |
482 | bool log) | 491 | bool log) |
483 | { | 492 | { |
484 | struct nlattr *tb[OVS_VXLAN_EXT_MAX+1]; | 493 | struct nlattr *a; |
494 | int rem; | ||
485 | unsigned long opt_key_offset; | 495 | unsigned long opt_key_offset; |
486 | struct vxlan_metadata opts; | 496 | struct vxlan_metadata opts; |
487 | int err; | ||
488 | 497 | ||
489 | BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts)); | 498 | BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts)); |
490 | 499 | ||
491 | err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy); | ||
492 | if (err < 0) | ||
493 | return err; | ||
494 | |||
495 | memset(&opts, 0, sizeof(opts)); | 500 | memset(&opts, 0, sizeof(opts)); |
501 | nla_for_each_nested(a, attr, rem) { | ||
502 | int type = nla_type(a); | ||
496 | 503 | ||
497 | if (tb[OVS_VXLAN_EXT_GBP]) | 504 | if (type > OVS_VXLAN_EXT_MAX) { |
498 | opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]); | 505 | OVS_NLERR(log, "VXLAN extension %d out of range max %d", |
506 | type, OVS_VXLAN_EXT_MAX); | ||
507 | return -EINVAL; | ||
508 | } | ||
509 | |||
510 | if (!check_attr_len(nla_len(a), | ||
511 | ovs_vxlan_ext_key_lens[type].len)) { | ||
512 | OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d", | ||
513 | type, nla_len(a), | ||
514 | ovs_vxlan_ext_key_lens[type].len); | ||
515 | return -EINVAL; | ||
516 | } | ||
517 | |||
518 | switch (type) { | ||
519 | case OVS_VXLAN_EXT_GBP: | ||
520 | opts.gbp = nla_get_u32(a); | ||
521 | break; | ||
522 | default: | ||
523 | OVS_NLERR(log, "Unknown VXLAN extension attribute %d", | ||
524 | type); | ||
525 | return -EINVAL; | ||
526 | } | ||
527 | } | ||
528 | if (rem) { | ||
529 | OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.", | ||
530 | rem); | ||
531 | return -EINVAL; | ||
532 | } | ||
499 | 533 | ||
500 | if (!is_mask) | 534 | if (!is_mask) |
501 | SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false); | 535 | SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false); |
@@ -528,8 +562,8 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, | |||
528 | return -EINVAL; | 562 | return -EINVAL; |
529 | } | 563 | } |
530 | 564 | ||
531 | if (ovs_tunnel_key_lens[type].len != nla_len(a) && | 565 | if (!check_attr_len(nla_len(a), |
532 | ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) { | 566 | ovs_tunnel_key_lens[type].len)) { |
533 | OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d", | 567 | OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d", |
534 | type, nla_len(a), ovs_tunnel_key_lens[type].len); | 568 | type, nla_len(a), ovs_tunnel_key_lens[type].len); |
535 | return -EINVAL; | 569 | return -EINVAL; |
@@ -1052,10 +1086,13 @@ static void nlattr_set(struct nlattr *attr, u8 val, | |||
1052 | 1086 | ||
1053 | /* The nlattr stream should already have been validated */ | 1087 | /* The nlattr stream should already have been validated */ |
1054 | nla_for_each_nested(nla, attr, rem) { | 1088 | nla_for_each_nested(nla, attr, rem) { |
1055 | if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED) | 1089 | if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) { |
1056 | nlattr_set(nla, val, tbl[nla_type(nla)].next); | 1090 | if (tbl[nla_type(nla)].next) |
1057 | else | 1091 | tbl = tbl[nla_type(nla)].next; |
1092 | nlattr_set(nla, val, tbl); | ||
1093 | } else { | ||
1058 | memset(nla_data(nla), val, nla_len(nla)); | 1094 | memset(nla_data(nla), val, nla_len(nla)); |
1095 | } | ||
1059 | } | 1096 | } |
1060 | } | 1097 | } |
1061 | 1098 | ||
@@ -1922,8 +1959,7 @@ static int validate_set(const struct nlattr *a, | |||
1922 | key_len /= 2; | 1959 | key_len /= 2; |
1923 | 1960 | ||
1924 | if (key_type > OVS_KEY_ATTR_MAX || | 1961 | if (key_type > OVS_KEY_ATTR_MAX || |
1925 | (ovs_key_lens[key_type].len != key_len && | 1962 | !check_attr_len(key_len, ovs_key_lens[key_type].len)) |
1926 | ovs_key_lens[key_type].len != OVS_ATTR_NESTED)) | ||
1927 | return -EINVAL; | 1963 | return -EINVAL; |
1928 | 1964 | ||
1929 | if (masked && !validate_masked(nla_data(ovs_key), key_len)) | 1965 | if (masked && !validate_masked(nla_data(ovs_key), key_len)) |
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index d22d8e948d0f..f2ea83ba4763 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c | |||
@@ -57,20 +57,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range) | |||
57 | } | 57 | } |
58 | 58 | ||
59 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | 59 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, |
60 | const struct sw_flow_mask *mask) | 60 | bool full, const struct sw_flow_mask *mask) |
61 | { | 61 | { |
62 | const long *m = (const long *)((const u8 *)&mask->key + | 62 | int start = full ? 0 : mask->range.start; |
63 | mask->range.start); | 63 | int len = full ? sizeof *dst : range_n_bytes(&mask->range); |
64 | const long *s = (const long *)((const u8 *)src + | 64 | const long *m = (const long *)((const u8 *)&mask->key + start); |
65 | mask->range.start); | 65 | const long *s = (const long *)((const u8 *)src + start); |
66 | long *d = (long *)((u8 *)dst + mask->range.start); | 66 | long *d = (long *)((u8 *)dst + start); |
67 | int i; | 67 | int i; |
68 | 68 | ||
69 | /* The memory outside of the 'mask->range' are not set since | 69 | /* If 'full' is true then all of 'dst' is fully initialized. Otherwise, |
70 | * further operations on 'dst' only uses contents within | 70 | * if 'full' is false the memory outside of the 'mask->range' is left |
71 | * 'mask->range'. | 71 | * uninitialized. This can be used as an optimization when further |
72 | * operations on 'dst' only use contents within 'mask->range'. | ||
72 | */ | 73 | */ |
73 | for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) | 74 | for (i = 0; i < len; i += sizeof(long)) |
74 | *d++ = *s++ & *m++; | 75 | *d++ = *s++ & *m++; |
75 | } | 76 | } |
76 | 77 | ||
@@ -475,7 +476,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, | |||
475 | u32 hash; | 476 | u32 hash; |
476 | struct sw_flow_key masked_key; | 477 | struct sw_flow_key masked_key; |
477 | 478 | ||
478 | ovs_flow_mask_key(&masked_key, unmasked, mask); | 479 | ovs_flow_mask_key(&masked_key, unmasked, false, mask); |
479 | hash = flow_hash(&masked_key, &mask->range); | 480 | hash = flow_hash(&masked_key, &mask->range); |
480 | head = find_bucket(ti, hash); | 481 | head = find_bucket(ti, hash); |
481 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { | 482 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { |
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 616eda10d955..2dd9900f533d 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h | |||
@@ -86,5 +86,5 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *, | |||
86 | bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); | 86 | bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); |
87 | 87 | ||
88 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | 88 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, |
89 | const struct sw_flow_mask *mask); | 89 | bool full, const struct sw_flow_mask *mask); |
90 | #endif /* flow_table.h */ | 90 | #endif /* flow_table.h */ |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 7b8e39a22387..aa4b15c35884 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -230,6 +230,8 @@ struct packet_skb_cb { | |||
230 | } sa; | 230 | } sa; |
231 | }; | 231 | }; |
232 | 232 | ||
233 | #define vio_le() virtio_legacy_is_little_endian() | ||
234 | |||
233 | #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) | 235 | #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) |
234 | 236 | ||
235 | #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) | 237 | #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) |
@@ -2680,15 +2682,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2680 | goto out_unlock; | 2682 | goto out_unlock; |
2681 | 2683 | ||
2682 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | 2684 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
2683 | (__virtio16_to_cpu(false, vnet_hdr.csum_start) + | 2685 | (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) + |
2684 | __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 > | 2686 | __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 > |
2685 | __virtio16_to_cpu(false, vnet_hdr.hdr_len))) | 2687 | __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len))) |
2686 | vnet_hdr.hdr_len = __cpu_to_virtio16(false, | 2688 | vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(), |
2687 | __virtio16_to_cpu(false, vnet_hdr.csum_start) + | 2689 | __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) + |
2688 | __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2); | 2690 | __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2); |
2689 | 2691 | ||
2690 | err = -EINVAL; | 2692 | err = -EINVAL; |
2691 | if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len) | 2693 | if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len) |
2692 | goto out_unlock; | 2694 | goto out_unlock; |
2693 | 2695 | ||
2694 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 2696 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
@@ -2731,7 +2733,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2731 | hlen = LL_RESERVED_SPACE(dev); | 2733 | hlen = LL_RESERVED_SPACE(dev); |
2732 | tlen = dev->needed_tailroom; | 2734 | tlen = dev->needed_tailroom; |
2733 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, | 2735 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, |
2734 | __virtio16_to_cpu(false, vnet_hdr.hdr_len), | 2736 | __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len), |
2735 | msg->msg_flags & MSG_DONTWAIT, &err); | 2737 | msg->msg_flags & MSG_DONTWAIT, &err); |
2736 | if (skb == NULL) | 2738 | if (skb == NULL) |
2737 | goto out_unlock; | 2739 | goto out_unlock; |
@@ -2778,8 +2780,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2778 | 2780 | ||
2779 | if (po->has_vnet_hdr) { | 2781 | if (po->has_vnet_hdr) { |
2780 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 2782 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
2781 | u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start); | 2783 | u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start); |
2782 | u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset); | 2784 | u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset); |
2783 | if (!skb_partial_csum_set(skb, s, o)) { | 2785 | if (!skb_partial_csum_set(skb, s, o)) { |
2784 | err = -EINVAL; | 2786 | err = -EINVAL; |
2785 | goto out_free; | 2787 | goto out_free; |
@@ -2787,7 +2789,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2787 | } | 2789 | } |
2788 | 2790 | ||
2789 | skb_shinfo(skb)->gso_size = | 2791 | skb_shinfo(skb)->gso_size = |
2790 | __virtio16_to_cpu(false, vnet_hdr.gso_size); | 2792 | __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size); |
2791 | skb_shinfo(skb)->gso_type = gso_type; | 2793 | skb_shinfo(skb)->gso_type = gso_type; |
2792 | 2794 | ||
2793 | /* Header must be checked, and gso_segs computed. */ | 2795 | /* Header must be checked, and gso_segs computed. */ |
@@ -3161,9 +3163,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
3161 | 3163 | ||
3162 | /* This is a hint as to how much should be linear. */ | 3164 | /* This is a hint as to how much should be linear. */ |
3163 | vnet_hdr.hdr_len = | 3165 | vnet_hdr.hdr_len = |
3164 | __cpu_to_virtio16(false, skb_headlen(skb)); | 3166 | __cpu_to_virtio16(vio_le(), skb_headlen(skb)); |
3165 | vnet_hdr.gso_size = | 3167 | vnet_hdr.gso_size = |
3166 | __cpu_to_virtio16(false, sinfo->gso_size); | 3168 | __cpu_to_virtio16(vio_le(), sinfo->gso_size); |
3167 | if (sinfo->gso_type & SKB_GSO_TCPV4) | 3169 | if (sinfo->gso_type & SKB_GSO_TCPV4) |
3168 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | 3170 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
3169 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | 3171 | else if (sinfo->gso_type & SKB_GSO_TCPV6) |
@@ -3181,9 +3183,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
3181 | 3183 | ||
3182 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 3184 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
3183 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | 3185 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
3184 | vnet_hdr.csum_start = __cpu_to_virtio16(false, | 3186 | vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(), |
3185 | skb_checksum_start_offset(skb)); | 3187 | skb_checksum_start_offset(skb)); |
3186 | vnet_hdr.csum_offset = __cpu_to_virtio16(false, | 3188 | vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(), |
3187 | skb->csum_offset); | 3189 | skb->csum_offset); |
3188 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { | 3190 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
3189 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; | 3191 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; |
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 715e01e5910a..f23a3b68bba6 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | struct fw_head { | 34 | struct fw_head { |
35 | u32 mask; | 35 | u32 mask; |
36 | bool mask_set; | ||
37 | struct fw_filter __rcu *ht[HTSIZE]; | 36 | struct fw_filter __rcu *ht[HTSIZE]; |
38 | struct rcu_head rcu; | 37 | struct rcu_head rcu; |
39 | }; | 38 | }; |
@@ -84,7 +83,7 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
84 | } | 83 | } |
85 | } | 84 | } |
86 | } else { | 85 | } else { |
87 | /* old method */ | 86 | /* Old method: classify the packet using its skb mark. */ |
88 | if (id && (TC_H_MAJ(id) == 0 || | 87 | if (id && (TC_H_MAJ(id) == 0 || |
89 | !(TC_H_MAJ(id ^ tp->q->handle)))) { | 88 | !(TC_H_MAJ(id ^ tp->q->handle)))) { |
90 | res->classid = id; | 89 | res->classid = id; |
@@ -114,14 +113,9 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle) | |||
114 | 113 | ||
115 | static int fw_init(struct tcf_proto *tp) | 114 | static int fw_init(struct tcf_proto *tp) |
116 | { | 115 | { |
117 | struct fw_head *head; | 116 | /* We don't allocate fw_head here, because in the old method |
118 | 117 | * we don't need it at all. | |
119 | head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); | 118 | */ |
120 | if (head == NULL) | ||
121 | return -ENOBUFS; | ||
122 | |||
123 | head->mask_set = false; | ||
124 | rcu_assign_pointer(tp->root, head); | ||
125 | return 0; | 119 | return 0; |
126 | } | 120 | } |
127 | 121 | ||
@@ -252,7 +246,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, | |||
252 | int err; | 246 | int err; |
253 | 247 | ||
254 | if (!opt) | 248 | if (!opt) |
255 | return handle ? -EINVAL : 0; | 249 | return handle ? -EINVAL : 0; /* Succeed if it is old method. */ |
256 | 250 | ||
257 | err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); | 251 | err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); |
258 | if (err < 0) | 252 | if (err < 0) |
@@ -302,11 +296,17 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, | |||
302 | if (!handle) | 296 | if (!handle) |
303 | return -EINVAL; | 297 | return -EINVAL; |
304 | 298 | ||
305 | if (!head->mask_set) { | 299 | if (!head) { |
306 | head->mask = 0xFFFFFFFF; | 300 | u32 mask = 0xFFFFFFFF; |
307 | if (tb[TCA_FW_MASK]) | 301 | if (tb[TCA_FW_MASK]) |
308 | head->mask = nla_get_u32(tb[TCA_FW_MASK]); | 302 | mask = nla_get_u32(tb[TCA_FW_MASK]); |
309 | head->mask_set = true; | 303 | |
304 | head = kzalloc(sizeof(*head), GFP_KERNEL); | ||
305 | if (!head) | ||
306 | return -ENOBUFS; | ||
307 | head->mask = mask; | ||
308 | |||
309 | rcu_assign_pointer(tp->root, head); | ||
310 | } | 310 | } |
311 | 311 | ||
312 | f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); | 312 | f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 197c3f59ecbf..b00f1f9611d6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
1208 | * within this document. | 1208 | * within this document. |
1209 | * | 1209 | * |
1210 | * Our basic strategy is to round-robin transports in priorities | 1210 | * Our basic strategy is to round-robin transports in priorities |
1211 | * according to sctp_state_prio_map[] e.g., if no such | 1211 | * according to sctp_trans_score() e.g., if no such |
1212 | * transport with state SCTP_ACTIVE exists, round-robin through | 1212 | * transport with state SCTP_ACTIVE exists, round-robin through |
1213 | * SCTP_UNKNOWN, etc. You get the picture. | 1213 | * SCTP_UNKNOWN, etc. You get the picture. |
1214 | */ | 1214 | */ |
1215 | static const u8 sctp_trans_state_to_prio_map[] = { | ||
1216 | [SCTP_ACTIVE] = 3, /* best case */ | ||
1217 | [SCTP_UNKNOWN] = 2, | ||
1218 | [SCTP_PF] = 1, | ||
1219 | [SCTP_INACTIVE] = 0, /* worst case */ | ||
1220 | }; | ||
1221 | |||
1222 | static u8 sctp_trans_score(const struct sctp_transport *trans) | 1215 | static u8 sctp_trans_score(const struct sctp_transport *trans) |
1223 | { | 1216 | { |
1224 | return sctp_trans_state_to_prio_map[trans->state]; | 1217 | switch (trans->state) { |
1218 | case SCTP_ACTIVE: | ||
1219 | return 3; /* best case */ | ||
1220 | case SCTP_UNKNOWN: | ||
1221 | return 2; | ||
1222 | case SCTP_PF: | ||
1223 | return 1; | ||
1224 | default: /* case SCTP_INACTIVE */ | ||
1225 | return 0; /* worst case */ | ||
1226 | } | ||
1225 | } | 1227 | } |
1226 | 1228 | ||
1227 | static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, | 1229 | static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index b7143337e4fa..3d9ea9a48289 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -1186,7 +1186,7 @@ static void sctp_v4_del_protocol(void) | |||
1186 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); | 1186 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | static int __net_init sctp_net_init(struct net *net) | 1189 | static int __net_init sctp_defaults_init(struct net *net) |
1190 | { | 1190 | { |
1191 | int status; | 1191 | int status; |
1192 | 1192 | ||
@@ -1279,12 +1279,6 @@ static int __net_init sctp_net_init(struct net *net) | |||
1279 | 1279 | ||
1280 | sctp_dbg_objcnt_init(net); | 1280 | sctp_dbg_objcnt_init(net); |
1281 | 1281 | ||
1282 | /* Initialize the control inode/socket for handling OOTB packets. */ | ||
1283 | if ((status = sctp_ctl_sock_init(net))) { | ||
1284 | pr_err("Failed to initialize the SCTP control sock\n"); | ||
1285 | goto err_ctl_sock_init; | ||
1286 | } | ||
1287 | |||
1288 | /* Initialize the local address list. */ | 1282 | /* Initialize the local address list. */ |
1289 | INIT_LIST_HEAD(&net->sctp.local_addr_list); | 1283 | INIT_LIST_HEAD(&net->sctp.local_addr_list); |
1290 | spin_lock_init(&net->sctp.local_addr_lock); | 1284 | spin_lock_init(&net->sctp.local_addr_lock); |
@@ -1300,9 +1294,6 @@ static int __net_init sctp_net_init(struct net *net) | |||
1300 | 1294 | ||
1301 | return 0; | 1295 | return 0; |
1302 | 1296 | ||
1303 | err_ctl_sock_init: | ||
1304 | sctp_dbg_objcnt_exit(net); | ||
1305 | sctp_proc_exit(net); | ||
1306 | err_init_proc: | 1297 | err_init_proc: |
1307 | cleanup_sctp_mibs(net); | 1298 | cleanup_sctp_mibs(net); |
1308 | err_init_mibs: | 1299 | err_init_mibs: |
@@ -1311,15 +1302,12 @@ err_sysctl_register: | |||
1311 | return status; | 1302 | return status; |
1312 | } | 1303 | } |
1313 | 1304 | ||
1314 | static void __net_exit sctp_net_exit(struct net *net) | 1305 | static void __net_exit sctp_defaults_exit(struct net *net) |
1315 | { | 1306 | { |
1316 | /* Free the local address list */ | 1307 | /* Free the local address list */ |
1317 | sctp_free_addr_wq(net); | 1308 | sctp_free_addr_wq(net); |
1318 | sctp_free_local_addr_list(net); | 1309 | sctp_free_local_addr_list(net); |
1319 | 1310 | ||
1320 | /* Free the control endpoint. */ | ||
1321 | inet_ctl_sock_destroy(net->sctp.ctl_sock); | ||
1322 | |||
1323 | sctp_dbg_objcnt_exit(net); | 1311 | sctp_dbg_objcnt_exit(net); |
1324 | 1312 | ||
1325 | sctp_proc_exit(net); | 1313 | sctp_proc_exit(net); |
@@ -1327,9 +1315,32 @@ static void __net_exit sctp_net_exit(struct net *net) | |||
1327 | sctp_sysctl_net_unregister(net); | 1315 | sctp_sysctl_net_unregister(net); |
1328 | } | 1316 | } |
1329 | 1317 | ||
1330 | static struct pernet_operations sctp_net_ops = { | 1318 | static struct pernet_operations sctp_defaults_ops = { |
1331 | .init = sctp_net_init, | 1319 | .init = sctp_defaults_init, |
1332 | .exit = sctp_net_exit, | 1320 | .exit = sctp_defaults_exit, |
1321 | }; | ||
1322 | |||
1323 | static int __net_init sctp_ctrlsock_init(struct net *net) | ||
1324 | { | ||
1325 | int status; | ||
1326 | |||
1327 | /* Initialize the control inode/socket for handling OOTB packets. */ | ||
1328 | status = sctp_ctl_sock_init(net); | ||
1329 | if (status) | ||
1330 | pr_err("Failed to initialize the SCTP control sock\n"); | ||
1331 | |||
1332 | return status; | ||
1333 | } | ||
1334 | |||
1335 | static void __net_init sctp_ctrlsock_exit(struct net *net) | ||
1336 | { | ||
1337 | /* Free the control endpoint. */ | ||
1338 | inet_ctl_sock_destroy(net->sctp.ctl_sock); | ||
1339 | } | ||
1340 | |||
1341 | static struct pernet_operations sctp_ctrlsock_ops = { | ||
1342 | .init = sctp_ctrlsock_init, | ||
1343 | .exit = sctp_ctrlsock_exit, | ||
1333 | }; | 1344 | }; |
1334 | 1345 | ||
1335 | /* Initialize the universe into something sensible. */ | 1346 | /* Initialize the universe into something sensible. */ |
@@ -1462,8 +1473,11 @@ static __init int sctp_init(void) | |||
1462 | sctp_v4_pf_init(); | 1473 | sctp_v4_pf_init(); |
1463 | sctp_v6_pf_init(); | 1474 | sctp_v6_pf_init(); |
1464 | 1475 | ||
1465 | status = sctp_v4_protosw_init(); | 1476 | status = register_pernet_subsys(&sctp_defaults_ops); |
1477 | if (status) | ||
1478 | goto err_register_defaults; | ||
1466 | 1479 | ||
1480 | status = sctp_v4_protosw_init(); | ||
1467 | if (status) | 1481 | if (status) |
1468 | goto err_protosw_init; | 1482 | goto err_protosw_init; |
1469 | 1483 | ||
@@ -1471,9 +1485,9 @@ static __init int sctp_init(void) | |||
1471 | if (status) | 1485 | if (status) |
1472 | goto err_v6_protosw_init; | 1486 | goto err_v6_protosw_init; |
1473 | 1487 | ||
1474 | status = register_pernet_subsys(&sctp_net_ops); | 1488 | status = register_pernet_subsys(&sctp_ctrlsock_ops); |
1475 | if (status) | 1489 | if (status) |
1476 | goto err_register_pernet_subsys; | 1490 | goto err_register_ctrlsock; |
1477 | 1491 | ||
1478 | status = sctp_v4_add_protocol(); | 1492 | status = sctp_v4_add_protocol(); |
1479 | if (status) | 1493 | if (status) |
@@ -1489,12 +1503,14 @@ out: | |||
1489 | err_v6_add_protocol: | 1503 | err_v6_add_protocol: |
1490 | sctp_v4_del_protocol(); | 1504 | sctp_v4_del_protocol(); |
1491 | err_add_protocol: | 1505 | err_add_protocol: |
1492 | unregister_pernet_subsys(&sctp_net_ops); | 1506 | unregister_pernet_subsys(&sctp_ctrlsock_ops); |
1493 | err_register_pernet_subsys: | 1507 | err_register_ctrlsock: |
1494 | sctp_v6_protosw_exit(); | 1508 | sctp_v6_protosw_exit(); |
1495 | err_v6_protosw_init: | 1509 | err_v6_protosw_init: |
1496 | sctp_v4_protosw_exit(); | 1510 | sctp_v4_protosw_exit(); |
1497 | err_protosw_init: | 1511 | err_protosw_init: |
1512 | unregister_pernet_subsys(&sctp_defaults_ops); | ||
1513 | err_register_defaults: | ||
1498 | sctp_v4_pf_exit(); | 1514 | sctp_v4_pf_exit(); |
1499 | sctp_v6_pf_exit(); | 1515 | sctp_v6_pf_exit(); |
1500 | sctp_sysctl_unregister(); | 1516 | sctp_sysctl_unregister(); |
@@ -1527,12 +1543,14 @@ static __exit void sctp_exit(void) | |||
1527 | sctp_v6_del_protocol(); | 1543 | sctp_v6_del_protocol(); |
1528 | sctp_v4_del_protocol(); | 1544 | sctp_v4_del_protocol(); |
1529 | 1545 | ||
1530 | unregister_pernet_subsys(&sctp_net_ops); | 1546 | unregister_pernet_subsys(&sctp_ctrlsock_ops); |
1531 | 1547 | ||
1532 | /* Free protosw registrations */ | 1548 | /* Free protosw registrations */ |
1533 | sctp_v6_protosw_exit(); | 1549 | sctp_v6_protosw_exit(); |
1534 | sctp_v4_protosw_exit(); | 1550 | sctp_v4_protosw_exit(); |
1535 | 1551 | ||
1552 | unregister_pernet_subsys(&sctp_defaults_ops); | ||
1553 | |||
1536 | /* Unregister with socket layer. */ | 1554 | /* Unregister with socket layer. */ |
1537 | sctp_v6_pf_exit(); | 1555 | sctp_v6_pf_exit(); |
1538 | sctp_v4_pf_exit(); | 1556 | sctp_v4_pf_exit(); |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 35df1266bf07..6098d4c42fa9 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer) | |||
244 | int error; | 244 | int error; |
245 | struct sctp_transport *transport = (struct sctp_transport *) peer; | 245 | struct sctp_transport *transport = (struct sctp_transport *) peer; |
246 | struct sctp_association *asoc = transport->asoc; | 246 | struct sctp_association *asoc = transport->asoc; |
247 | struct net *net = sock_net(asoc->base.sk); | 247 | struct sock *sk = asoc->base.sk; |
248 | struct net *net = sock_net(sk); | ||
248 | 249 | ||
249 | /* Check whether a task is in the sock. */ | 250 | /* Check whether a task is in the sock. */ |
250 | 251 | ||
251 | bh_lock_sock(asoc->base.sk); | 252 | bh_lock_sock(sk); |
252 | if (sock_owned_by_user(asoc->base.sk)) { | 253 | if (sock_owned_by_user(sk)) { |
253 | pr_debug("%s: sock is busy\n", __func__); | 254 | pr_debug("%s: sock is busy\n", __func__); |
254 | 255 | ||
255 | /* Try again later. */ | 256 | /* Try again later. */ |
@@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer) | |||
272 | transport, GFP_ATOMIC); | 273 | transport, GFP_ATOMIC); |
273 | 274 | ||
274 | if (error) | 275 | if (error) |
275 | asoc->base.sk->sk_err = -error; | 276 | sk->sk_err = -error; |
276 | 277 | ||
277 | out_unlock: | 278 | out_unlock: |
278 | bh_unlock_sock(asoc->base.sk); | 279 | bh_unlock_sock(sk); |
279 | sctp_transport_put(transport); | 280 | sctp_transport_put(transport); |
280 | } | 281 | } |
281 | 282 | ||
@@ -285,11 +286,12 @@ out_unlock: | |||
285 | static void sctp_generate_timeout_event(struct sctp_association *asoc, | 286 | static void sctp_generate_timeout_event(struct sctp_association *asoc, |
286 | sctp_event_timeout_t timeout_type) | 287 | sctp_event_timeout_t timeout_type) |
287 | { | 288 | { |
288 | struct net *net = sock_net(asoc->base.sk); | 289 | struct sock *sk = asoc->base.sk; |
290 | struct net *net = sock_net(sk); | ||
289 | int error = 0; | 291 | int error = 0; |
290 | 292 | ||
291 | bh_lock_sock(asoc->base.sk); | 293 | bh_lock_sock(sk); |
292 | if (sock_owned_by_user(asoc->base.sk)) { | 294 | if (sock_owned_by_user(sk)) { |
293 | pr_debug("%s: sock is busy: timer %d\n", __func__, | 295 | pr_debug("%s: sock is busy: timer %d\n", __func__, |
294 | timeout_type); | 296 | timeout_type); |
295 | 297 | ||
@@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, | |||
312 | (void *)timeout_type, GFP_ATOMIC); | 314 | (void *)timeout_type, GFP_ATOMIC); |
313 | 315 | ||
314 | if (error) | 316 | if (error) |
315 | asoc->base.sk->sk_err = -error; | 317 | sk->sk_err = -error; |
316 | 318 | ||
317 | out_unlock: | 319 | out_unlock: |
318 | bh_unlock_sock(asoc->base.sk); | 320 | bh_unlock_sock(sk); |
319 | sctp_association_put(asoc); | 321 | sctp_association_put(asoc); |
320 | } | 322 | } |
321 | 323 | ||
@@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
365 | int error = 0; | 367 | int error = 0; |
366 | struct sctp_transport *transport = (struct sctp_transport *) data; | 368 | struct sctp_transport *transport = (struct sctp_transport *) data; |
367 | struct sctp_association *asoc = transport->asoc; | 369 | struct sctp_association *asoc = transport->asoc; |
368 | struct net *net = sock_net(asoc->base.sk); | 370 | struct sock *sk = asoc->base.sk; |
371 | struct net *net = sock_net(sk); | ||
369 | 372 | ||
370 | bh_lock_sock(asoc->base.sk); | 373 | bh_lock_sock(sk); |
371 | if (sock_owned_by_user(asoc->base.sk)) { | 374 | if (sock_owned_by_user(sk)) { |
372 | pr_debug("%s: sock is busy\n", __func__); | 375 | pr_debug("%s: sock is busy\n", __func__); |
373 | 376 | ||
374 | /* Try again later. */ | 377 | /* Try again later. */ |
@@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
388 | asoc->state, asoc->ep, asoc, | 391 | asoc->state, asoc->ep, asoc, |
389 | transport, GFP_ATOMIC); | 392 | transport, GFP_ATOMIC); |
390 | 393 | ||
391 | if (error) | 394 | if (error) |
392 | asoc->base.sk->sk_err = -error; | 395 | sk->sk_err = -error; |
393 | 396 | ||
394 | out_unlock: | 397 | out_unlock: |
395 | bh_unlock_sock(asoc->base.sk); | 398 | bh_unlock_sock(sk); |
396 | sctp_transport_put(transport); | 399 | sctp_transport_put(transport); |
397 | } | 400 | } |
398 | 401 | ||
@@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data) | |||
403 | { | 406 | { |
404 | struct sctp_transport *transport = (struct sctp_transport *) data; | 407 | struct sctp_transport *transport = (struct sctp_transport *) data; |
405 | struct sctp_association *asoc = transport->asoc; | 408 | struct sctp_association *asoc = transport->asoc; |
406 | struct net *net = sock_net(asoc->base.sk); | 409 | struct sock *sk = asoc->base.sk; |
410 | struct net *net = sock_net(sk); | ||
407 | 411 | ||
408 | bh_lock_sock(asoc->base.sk); | 412 | bh_lock_sock(sk); |
409 | if (sock_owned_by_user(asoc->base.sk)) { | 413 | if (sock_owned_by_user(sk)) { |
410 | pr_debug("%s: sock is busy\n", __func__); | 414 | pr_debug("%s: sock is busy\n", __func__); |
411 | 415 | ||
412 | /* Try again later. */ | 416 | /* Try again later. */ |
@@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) | |||
427 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); | 431 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); |
428 | 432 | ||
429 | out_unlock: | 433 | out_unlock: |
430 | bh_unlock_sock(asoc->base.sk); | 434 | bh_unlock_sock(sk); |
431 | sctp_association_put(asoc); | 435 | sctp_association_put(asoc); |
432 | } | 436 | } |
433 | 437 | ||
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index b140c092d226..f14f24ee9983 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -297,7 +297,7 @@ static int rpc_complete_task(struct rpc_task *task) | |||
297 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | 297 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
298 | ret = atomic_dec_and_test(&task->tk_count); | 298 | ret = atomic_dec_and_test(&task->tk_count); |
299 | if (waitqueue_active(wq)) | 299 | if (waitqueue_active(wq)) |
300 | __wake_up_locked_key(wq, TASK_NORMAL, 1, &k); | 300 | __wake_up_locked_key(wq, TASK_NORMAL, &k); |
301 | spin_unlock_irqrestore(&wq->lock, flags); | 301 | spin_unlock_irqrestore(&wq->lock, flags); |
302 | return ret; | 302 | return ret; |
303 | } | 303 | } |
@@ -1092,14 +1092,10 @@ void | |||
1092 | rpc_destroy_mempool(void) | 1092 | rpc_destroy_mempool(void) |
1093 | { | 1093 | { |
1094 | rpciod_stop(); | 1094 | rpciod_stop(); |
1095 | if (rpc_buffer_mempool) | 1095 | mempool_destroy(rpc_buffer_mempool); |
1096 | mempool_destroy(rpc_buffer_mempool); | 1096 | mempool_destroy(rpc_task_mempool); |
1097 | if (rpc_task_mempool) | 1097 | kmem_cache_destroy(rpc_task_slabp); |
1098 | mempool_destroy(rpc_task_mempool); | 1098 | kmem_cache_destroy(rpc_buffer_slabp); |
1099 | if (rpc_task_slabp) | ||
1100 | kmem_cache_destroy(rpc_task_slabp); | ||
1101 | if (rpc_buffer_slabp) | ||
1102 | kmem_cache_destroy(rpc_buffer_slabp); | ||
1103 | rpc_destroy_wait_queue(&delay_queue); | 1099 | rpc_destroy_wait_queue(&delay_queue); |
1104 | } | 1100 | } |
1105 | 1101 | ||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ab5dd621ae0c..2e98f4a243e5 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -614,6 +614,7 @@ static void xprt_autoclose(struct work_struct *work) | |||
614 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); | 614 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); |
615 | xprt->ops->close(xprt); | 615 | xprt->ops->close(xprt); |
616 | xprt_release_write(xprt, NULL); | 616 | xprt_release_write(xprt, NULL); |
617 | wake_up_bit(&xprt->state, XPRT_LOCKED); | ||
617 | } | 618 | } |
618 | 619 | ||
619 | /** | 620 | /** |
@@ -723,6 +724,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) | |||
723 | xprt->ops->release_xprt(xprt, NULL); | 724 | xprt->ops->release_xprt(xprt, NULL); |
724 | out: | 725 | out: |
725 | spin_unlock_bh(&xprt->transport_lock); | 726 | spin_unlock_bh(&xprt->transport_lock); |
727 | wake_up_bit(&xprt->state, XPRT_LOCKED); | ||
726 | } | 728 | } |
727 | 729 | ||
728 | /** | 730 | /** |
@@ -1394,6 +1396,10 @@ out: | |||
1394 | static void xprt_destroy(struct rpc_xprt *xprt) | 1396 | static void xprt_destroy(struct rpc_xprt *xprt) |
1395 | { | 1397 | { |
1396 | dprintk("RPC: destroying transport %p\n", xprt); | 1398 | dprintk("RPC: destroying transport %p\n", xprt); |
1399 | |||
1400 | /* Exclude transport connect/disconnect handlers */ | ||
1401 | wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); | ||
1402 | |||
1397 | del_timer_sync(&xprt->timer); | 1403 | del_timer_sync(&xprt->timer); |
1398 | 1404 | ||
1399 | rpc_xprt_debugfs_unregister(xprt); | 1405 | rpc_xprt_debugfs_unregister(xprt); |
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index cb25c89da623..f1e8dafbd507 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c | |||
@@ -39,25 +39,6 @@ static int | |||
39 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | 39 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
40 | struct rpcrdma_create_data_internal *cdata) | 40 | struct rpcrdma_create_data_internal *cdata) |
41 | { | 41 | { |
42 | struct ib_device_attr *devattr = &ia->ri_devattr; | ||
43 | struct ib_mr *mr; | ||
44 | |||
45 | /* Obtain an lkey to use for the regbufs, which are | ||
46 | * protected from remote access. | ||
47 | */ | ||
48 | if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { | ||
49 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
50 | } else { | ||
51 | mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE); | ||
52 | if (IS_ERR(mr)) { | ||
53 | pr_err("%s: ib_get_dma_mr for failed with %lX\n", | ||
54 | __func__, PTR_ERR(mr)); | ||
55 | return -ENOMEM; | ||
56 | } | ||
57 | ia->ri_dma_lkey = ia->ri_dma_mr->lkey; | ||
58 | ia->ri_dma_mr = mr; | ||
59 | } | ||
60 | |||
61 | return 0; | 42 | return 0; |
62 | } | 43 | } |
63 | 44 | ||
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index d6653f5d0830..5318951b3b53 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |||
189 | struct ib_device_attr *devattr = &ia->ri_devattr; | 189 | struct ib_device_attr *devattr = &ia->ri_devattr; |
190 | int depth, delta; | 190 | int depth, delta; |
191 | 191 | ||
192 | /* Obtain an lkey to use for the regbufs, which are | ||
193 | * protected from remote access. | ||
194 | */ | ||
195 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
196 | |||
197 | ia->ri_max_frmr_depth = | 192 | ia->ri_max_frmr_depth = |
198 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | 193 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
199 | devattr->max_fast_reg_page_list_len); | 194 | devattr->max_fast_reg_page_list_len); |
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c index 72cf8b15bbb4..617b76f22154 100644 --- a/net/sunrpc/xprtrdma/physical_ops.c +++ b/net/sunrpc/xprtrdma/physical_ops.c | |||
@@ -23,7 +23,6 @@ static int | |||
23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | 23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
24 | struct rpcrdma_create_data_internal *cdata) | 24 | struct rpcrdma_create_data_internal *cdata) |
25 | { | 25 | { |
26 | struct ib_device_attr *devattr = &ia->ri_devattr; | ||
27 | struct ib_mr *mr; | 26 | struct ib_mr *mr; |
28 | 27 | ||
29 | /* Obtain an rkey to use for RPC data payloads. | 28 | /* Obtain an rkey to use for RPC data payloads. |
@@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |||
37 | __func__, PTR_ERR(mr)); | 36 | __func__, PTR_ERR(mr)); |
38 | return -ENOMEM; | 37 | return -ENOMEM; |
39 | } | 38 | } |
40 | ia->ri_dma_mr = mr; | ||
41 | |||
42 | /* Obtain an lkey to use for regbufs. | ||
43 | */ | ||
44 | if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) | ||
45 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
46 | else | ||
47 | ia->ri_dma_lkey = ia->ri_dma_mr->lkey; | ||
48 | 39 | ||
40 | ia->ri_dma_mr = mr; | ||
49 | return 0; | 41 | return 0; |
50 | } | 42 | } |
51 | 43 | ||
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 682996779970..eb081ad05e33 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -1252,7 +1252,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) | |||
1252 | goto out_free; | 1252 | goto out_free; |
1253 | 1253 | ||
1254 | iov->length = size; | 1254 | iov->length = size; |
1255 | iov->lkey = ia->ri_dma_lkey; | 1255 | iov->lkey = ia->ri_pd->local_dma_lkey; |
1256 | rb->rg_size = size; | 1256 | rb->rg_size = size; |
1257 | rb->rg_owner = NULL; | 1257 | rb->rg_owner = NULL; |
1258 | return rb; | 1258 | return rb; |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 02512221b8bc..c09414e6f91b 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -65,7 +65,6 @@ struct rpcrdma_ia { | |||
65 | struct rdma_cm_id *ri_id; | 65 | struct rdma_cm_id *ri_id; |
66 | struct ib_pd *ri_pd; | 66 | struct ib_pd *ri_pd; |
67 | struct ib_mr *ri_dma_mr; | 67 | struct ib_mr *ri_dma_mr; |
68 | u32 ri_dma_lkey; | ||
69 | struct completion ri_done; | 68 | struct completion ri_done; |
70 | int ri_async_rc; | 69 | int ri_async_rc; |
71 | unsigned int ri_max_frmr_depth; | 70 | unsigned int ri_max_frmr_depth; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7be90bc1a7c2..1a85e0ed0b48 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -777,7 +777,6 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt) | |||
777 | xs_sock_reset_connection_flags(xprt); | 777 | xs_sock_reset_connection_flags(xprt); |
778 | /* Mark transport as closed and wake up all pending tasks */ | 778 | /* Mark transport as closed and wake up all pending tasks */ |
779 | xprt_disconnect_done(xprt); | 779 | xprt_disconnect_done(xprt); |
780 | xprt_force_disconnect(xprt); | ||
781 | } | 780 | } |
782 | 781 | ||
783 | /** | 782 | /** |
@@ -881,8 +880,11 @@ static void xs_xprt_free(struct rpc_xprt *xprt) | |||
881 | */ | 880 | */ |
882 | static void xs_destroy(struct rpc_xprt *xprt) | 881 | static void xs_destroy(struct rpc_xprt *xprt) |
883 | { | 882 | { |
883 | struct sock_xprt *transport = container_of(xprt, | ||
884 | struct sock_xprt, xprt); | ||
884 | dprintk("RPC: xs_destroy xprt %p\n", xprt); | 885 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
885 | 886 | ||
887 | cancel_delayed_work_sync(&transport->connect_worker); | ||
886 | xs_close(xprt); | 888 | xs_close(xprt); |
887 | xs_xprt_free(xprt); | 889 | xs_xprt_free(xprt); |
888 | module_put(THIS_MODULE); | 890 | module_put(THIS_MODULE); |
@@ -1435,6 +1437,7 @@ out: | |||
1435 | static void xs_tcp_state_change(struct sock *sk) | 1437 | static void xs_tcp_state_change(struct sock *sk) |
1436 | { | 1438 | { |
1437 | struct rpc_xprt *xprt; | 1439 | struct rpc_xprt *xprt; |
1440 | struct sock_xprt *transport; | ||
1438 | 1441 | ||
1439 | read_lock_bh(&sk->sk_callback_lock); | 1442 | read_lock_bh(&sk->sk_callback_lock); |
1440 | if (!(xprt = xprt_from_sock(sk))) | 1443 | if (!(xprt = xprt_from_sock(sk))) |
@@ -1446,13 +1449,12 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1446 | sock_flag(sk, SOCK_ZAPPED), | 1449 | sock_flag(sk, SOCK_ZAPPED), |
1447 | sk->sk_shutdown); | 1450 | sk->sk_shutdown); |
1448 | 1451 | ||
1452 | transport = container_of(xprt, struct sock_xprt, xprt); | ||
1449 | trace_rpc_socket_state_change(xprt, sk->sk_socket); | 1453 | trace_rpc_socket_state_change(xprt, sk->sk_socket); |
1450 | switch (sk->sk_state) { | 1454 | switch (sk->sk_state) { |
1451 | case TCP_ESTABLISHED: | 1455 | case TCP_ESTABLISHED: |
1452 | spin_lock(&xprt->transport_lock); | 1456 | spin_lock(&xprt->transport_lock); |
1453 | if (!xprt_test_and_set_connected(xprt)) { | 1457 | if (!xprt_test_and_set_connected(xprt)) { |
1454 | struct sock_xprt *transport = container_of(xprt, | ||
1455 | struct sock_xprt, xprt); | ||
1456 | 1458 | ||
1457 | /* Reset TCP record info */ | 1459 | /* Reset TCP record info */ |
1458 | transport->tcp_offset = 0; | 1460 | transport->tcp_offset = 0; |
@@ -1461,6 +1463,8 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1461 | transport->tcp_flags = | 1463 | transport->tcp_flags = |
1462 | TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; | 1464 | TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; |
1463 | xprt->connect_cookie++; | 1465 | xprt->connect_cookie++; |
1466 | clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); | ||
1467 | xprt_clear_connecting(xprt); | ||
1464 | 1468 | ||
1465 | xprt_wake_pending_tasks(xprt, -EAGAIN); | 1469 | xprt_wake_pending_tasks(xprt, -EAGAIN); |
1466 | } | 1470 | } |
@@ -1496,6 +1500,9 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1496 | smp_mb__after_atomic(); | 1500 | smp_mb__after_atomic(); |
1497 | break; | 1501 | break; |
1498 | case TCP_CLOSE: | 1502 | case TCP_CLOSE: |
1503 | if (test_and_clear_bit(XPRT_SOCK_CONNECTING, | ||
1504 | &transport->sock_state)) | ||
1505 | xprt_clear_connecting(xprt); | ||
1499 | xs_sock_mark_closed(xprt); | 1506 | xs_sock_mark_closed(xprt); |
1500 | } | 1507 | } |
1501 | out: | 1508 | out: |
@@ -2179,6 +2186,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2179 | /* Tell the socket layer to start connecting... */ | 2186 | /* Tell the socket layer to start connecting... */ |
2180 | xprt->stat.connect_count++; | 2187 | xprt->stat.connect_count++; |
2181 | xprt->stat.connect_start = jiffies; | 2188 | xprt->stat.connect_start = jiffies; |
2189 | set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); | ||
2182 | ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); | 2190 | ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); |
2183 | switch (ret) { | 2191 | switch (ret) { |
2184 | case 0: | 2192 | case 0: |
@@ -2240,7 +2248,6 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
2240 | case -EINPROGRESS: | 2248 | case -EINPROGRESS: |
2241 | case -EALREADY: | 2249 | case -EALREADY: |
2242 | xprt_unlock_connect(xprt, transport); | 2250 | xprt_unlock_connect(xprt, transport); |
2243 | xprt_clear_connecting(xprt); | ||
2244 | return; | 2251 | return; |
2245 | case -EINVAL: | 2252 | case -EINVAL: |
2246 | /* Happens, for instance, if the user specified a link | 2253 | /* Happens, for instance, if the user specified a link |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 562c926a51cc..c5ac436235e0 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -539,6 +539,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) | |||
539 | *err = -TIPC_ERR_NO_NAME; | 539 | *err = -TIPC_ERR_NO_NAME; |
540 | if (skb_linearize(skb)) | 540 | if (skb_linearize(skb)) |
541 | return false; | 541 | return false; |
542 | msg = buf_msg(skb); | ||
542 | if (msg_reroute_cnt(msg)) | 543 | if (msg_reroute_cnt(msg)) |
543 | return false; | 544 | return false; |
544 | dnode = addr_domain(net, msg_lookup_scope(msg)); | 545 | dnode = addr_domain(net, msg_lookup_scope(msg)); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 03ee4d359f6a..ef31b40ad550 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -2179,8 +2179,21 @@ unlock: | |||
2179 | if (UNIXCB(skb).fp) | 2179 | if (UNIXCB(skb).fp) |
2180 | scm.fp = scm_fp_dup(UNIXCB(skb).fp); | 2180 | scm.fp = scm_fp_dup(UNIXCB(skb).fp); |
2181 | 2181 | ||
2182 | sk_peek_offset_fwd(sk, chunk); | 2182 | if (skip) { |
2183 | sk_peek_offset_fwd(sk, chunk); | ||
2184 | skip -= chunk; | ||
2185 | } | ||
2183 | 2186 | ||
2187 | if (UNIXCB(skb).fp) | ||
2188 | break; | ||
2189 | |||
2190 | last = skb; | ||
2191 | last_len = skb->len; | ||
2192 | unix_state_lock(sk); | ||
2193 | skb = skb_peek_next(skb, &sk->sk_receive_queue); | ||
2194 | if (skb) | ||
2195 | goto again; | ||
2196 | unix_state_unlock(sk); | ||
2184 | break; | 2197 | break; |
2185 | } | 2198 | } |
2186 | } while (size); | 2199 | } while (size); |