diff options
Diffstat (limited to 'net')
40 files changed, 465 insertions, 272 deletions
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 253bc77eda3b..7dbc80d01eb0 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -61,6 +61,19 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) | |||
61 | e->flags |= MDB_FLAGS_OFFLOAD; | 61 | e->flags |= MDB_FLAGS_OFFLOAD; |
62 | } | 62 | } |
63 | 63 | ||
64 | static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) | ||
65 | { | ||
66 | memset(ip, 0, sizeof(struct br_ip)); | ||
67 | ip->vid = entry->vid; | ||
68 | ip->proto = entry->addr.proto; | ||
69 | if (ip->proto == htons(ETH_P_IP)) | ||
70 | ip->u.ip4 = entry->addr.u.ip4; | ||
71 | #if IS_ENABLED(CONFIG_IPV6) | ||
72 | else | ||
73 | ip->u.ip6 = entry->addr.u.ip6; | ||
74 | #endif | ||
75 | } | ||
76 | |||
64 | static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | 77 | static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, |
65 | struct net_device *dev) | 78 | struct net_device *dev) |
66 | { | 79 | { |
@@ -243,9 +256,45 @@ static inline size_t rtnl_mdb_nlmsg_size(void) | |||
243 | + nla_total_size(sizeof(struct br_mdb_entry)); | 256 | + nla_total_size(sizeof(struct br_mdb_entry)); |
244 | } | 257 | } |
245 | 258 | ||
246 | static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, | 259 | struct br_mdb_complete_info { |
247 | int type, struct net_bridge_port_group *pg) | 260 | struct net_bridge_port *port; |
261 | struct br_ip ip; | ||
262 | }; | ||
263 | |||
264 | static void br_mdb_complete(struct net_device *dev, int err, void *priv) | ||
248 | { | 265 | { |
266 | struct br_mdb_complete_info *data = priv; | ||
267 | struct net_bridge_port_group __rcu **pp; | ||
268 | struct net_bridge_port_group *p; | ||
269 | struct net_bridge_mdb_htable *mdb; | ||
270 | struct net_bridge_mdb_entry *mp; | ||
271 | struct net_bridge_port *port = data->port; | ||
272 | struct net_bridge *br = port->br; | ||
273 | |||
274 | if (err) | ||
275 | goto err; | ||
276 | |||
277 | spin_lock_bh(&br->multicast_lock); | ||
278 | mdb = mlock_dereference(br->mdb, br); | ||
279 | mp = br_mdb_ip_get(mdb, &data->ip); | ||
280 | if (!mp) | ||
281 | goto out; | ||
282 | for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; | ||
283 | pp = &p->next) { | ||
284 | if (p->port != port) | ||
285 | continue; | ||
286 | p->flags |= MDB_PG_FLAGS_OFFLOAD; | ||
287 | } | ||
288 | out: | ||
289 | spin_unlock_bh(&br->multicast_lock); | ||
290 | err: | ||
291 | kfree(priv); | ||
292 | } | ||
293 | |||
294 | static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, | ||
295 | struct br_mdb_entry *entry, int type) | ||
296 | { | ||
297 | struct br_mdb_complete_info *complete_info; | ||
249 | struct switchdev_obj_port_mdb mdb = { | 298 | struct switchdev_obj_port_mdb mdb = { |
250 | .obj = { | 299 | .obj = { |
251 | .id = SWITCHDEV_OBJ_ID_PORT_MDB, | 300 | .id = SWITCHDEV_OBJ_ID_PORT_MDB, |
@@ -268,9 +317,14 @@ static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, | |||
268 | 317 | ||
269 | mdb.obj.orig_dev = port_dev; | 318 | mdb.obj.orig_dev = port_dev; |
270 | if (port_dev && type == RTM_NEWMDB) { | 319 | if (port_dev && type == RTM_NEWMDB) { |
271 | err = switchdev_port_obj_add(port_dev, &mdb.obj); | 320 | complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); |
272 | if (!err && pg) | 321 | if (complete_info) { |
273 | pg->flags |= MDB_PG_FLAGS_OFFLOAD; | 322 | complete_info->port = p; |
323 | __mdb_entry_to_br_ip(entry, &complete_info->ip); | ||
324 | mdb.obj.complete_priv = complete_info; | ||
325 | mdb.obj.complete = br_mdb_complete; | ||
326 | switchdev_port_obj_add(port_dev, &mdb.obj); | ||
327 | } | ||
274 | } else if (port_dev && type == RTM_DELMDB) { | 328 | } else if (port_dev && type == RTM_DELMDB) { |
275 | switchdev_port_obj_del(port_dev, &mdb.obj); | 329 | switchdev_port_obj_del(port_dev, &mdb.obj); |
276 | } | 330 | } |
@@ -291,21 +345,21 @@ errout: | |||
291 | rtnl_set_sk_err(net, RTNLGRP_MDB, err); | 345 | rtnl_set_sk_err(net, RTNLGRP_MDB, err); |
292 | } | 346 | } |
293 | 347 | ||
294 | void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, | 348 | void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, |
295 | int type) | 349 | struct br_ip *group, int type, u8 flags) |
296 | { | 350 | { |
297 | struct br_mdb_entry entry; | 351 | struct br_mdb_entry entry; |
298 | 352 | ||
299 | memset(&entry, 0, sizeof(entry)); | 353 | memset(&entry, 0, sizeof(entry)); |
300 | entry.ifindex = pg->port->dev->ifindex; | 354 | entry.ifindex = port->dev->ifindex; |
301 | entry.addr.proto = pg->addr.proto; | 355 | entry.addr.proto = group->proto; |
302 | entry.addr.u.ip4 = pg->addr.u.ip4; | 356 | entry.addr.u.ip4 = group->u.ip4; |
303 | #if IS_ENABLED(CONFIG_IPV6) | 357 | #if IS_ENABLED(CONFIG_IPV6) |
304 | entry.addr.u.ip6 = pg->addr.u.ip6; | 358 | entry.addr.u.ip6 = group->u.ip6; |
305 | #endif | 359 | #endif |
306 | entry.vid = pg->addr.vid; | 360 | entry.vid = group->vid; |
307 | __mdb_entry_fill_flags(&entry, pg->flags); | 361 | __mdb_entry_fill_flags(&entry, flags); |
308 | __br_mdb_notify(dev, &entry, type, pg); | 362 | __br_mdb_notify(dev, port, &entry, type); |
309 | } | 363 | } |
310 | 364 | ||
311 | static int nlmsg_populate_rtr_fill(struct sk_buff *skb, | 365 | static int nlmsg_populate_rtr_fill(struct sk_buff *skb, |
@@ -450,8 +504,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
450 | } | 504 | } |
451 | 505 | ||
452 | static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, | 506 | static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, |
453 | struct br_ip *group, unsigned char state, | 507 | struct br_ip *group, unsigned char state) |
454 | struct net_bridge_port_group **pg) | ||
455 | { | 508 | { |
456 | struct net_bridge_mdb_entry *mp; | 509 | struct net_bridge_mdb_entry *mp; |
457 | struct net_bridge_port_group *p; | 510 | struct net_bridge_port_group *p; |
@@ -482,7 +535,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, | |||
482 | if (unlikely(!p)) | 535 | if (unlikely(!p)) |
483 | return -ENOMEM; | 536 | return -ENOMEM; |
484 | rcu_assign_pointer(*pp, p); | 537 | rcu_assign_pointer(*pp, p); |
485 | *pg = p; | ||
486 | if (state == MDB_TEMPORARY) | 538 | if (state == MDB_TEMPORARY) |
487 | mod_timer(&p->timer, now + br->multicast_membership_interval); | 539 | mod_timer(&p->timer, now + br->multicast_membership_interval); |
488 | 540 | ||
@@ -490,8 +542,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, | |||
490 | } | 542 | } |
491 | 543 | ||
492 | static int __br_mdb_add(struct net *net, struct net_bridge *br, | 544 | static int __br_mdb_add(struct net *net, struct net_bridge *br, |
493 | struct br_mdb_entry *entry, | 545 | struct br_mdb_entry *entry) |
494 | struct net_bridge_port_group **pg) | ||
495 | { | 546 | { |
496 | struct br_ip ip; | 547 | struct br_ip ip; |
497 | struct net_device *dev; | 548 | struct net_device *dev; |
@@ -509,18 +560,10 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, | |||
509 | if (!p || p->br != br || p->state == BR_STATE_DISABLED) | 560 | if (!p || p->br != br || p->state == BR_STATE_DISABLED) |
510 | return -EINVAL; | 561 | return -EINVAL; |
511 | 562 | ||
512 | memset(&ip, 0, sizeof(ip)); | 563 | __mdb_entry_to_br_ip(entry, &ip); |
513 | ip.vid = entry->vid; | ||
514 | ip.proto = entry->addr.proto; | ||
515 | if (ip.proto == htons(ETH_P_IP)) | ||
516 | ip.u.ip4 = entry->addr.u.ip4; | ||
517 | #if IS_ENABLED(CONFIG_IPV6) | ||
518 | else | ||
519 | ip.u.ip6 = entry->addr.u.ip6; | ||
520 | #endif | ||
521 | 564 | ||
522 | spin_lock_bh(&br->multicast_lock); | 565 | spin_lock_bh(&br->multicast_lock); |
523 | ret = br_mdb_add_group(br, p, &ip, entry->state, pg); | 566 | ret = br_mdb_add_group(br, p, &ip, entry->state); |
524 | spin_unlock_bh(&br->multicast_lock); | 567 | spin_unlock_bh(&br->multicast_lock); |
525 | return ret; | 568 | return ret; |
526 | } | 569 | } |
@@ -528,7 +571,6 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, | |||
528 | static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) | 571 | static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) |
529 | { | 572 | { |
530 | struct net *net = sock_net(skb->sk); | 573 | struct net *net = sock_net(skb->sk); |
531 | struct net_bridge_port_group *pg; | ||
532 | struct net_bridge_vlan_group *vg; | 574 | struct net_bridge_vlan_group *vg; |
533 | struct net_device *dev, *pdev; | 575 | struct net_device *dev, *pdev; |
534 | struct br_mdb_entry *entry; | 576 | struct br_mdb_entry *entry; |
@@ -558,15 +600,15 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
558 | if (br_vlan_enabled(br) && vg && entry->vid == 0) { | 600 | if (br_vlan_enabled(br) && vg && entry->vid == 0) { |
559 | list_for_each_entry(v, &vg->vlan_list, vlist) { | 601 | list_for_each_entry(v, &vg->vlan_list, vlist) { |
560 | entry->vid = v->vid; | 602 | entry->vid = v->vid; |
561 | err = __br_mdb_add(net, br, entry, &pg); | 603 | err = __br_mdb_add(net, br, entry); |
562 | if (err) | 604 | if (err) |
563 | break; | 605 | break; |
564 | __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); | 606 | __br_mdb_notify(dev, p, entry, RTM_NEWMDB); |
565 | } | 607 | } |
566 | } else { | 608 | } else { |
567 | err = __br_mdb_add(net, br, entry, &pg); | 609 | err = __br_mdb_add(net, br, entry); |
568 | if (!err) | 610 | if (!err) |
569 | __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); | 611 | __br_mdb_notify(dev, p, entry, RTM_NEWMDB); |
570 | } | 612 | } |
571 | 613 | ||
572 | return err; | 614 | return err; |
@@ -584,15 +626,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) | |||
584 | if (!netif_running(br->dev) || br->multicast_disabled) | 626 | if (!netif_running(br->dev) || br->multicast_disabled) |
585 | return -EINVAL; | 627 | return -EINVAL; |
586 | 628 | ||
587 | memset(&ip, 0, sizeof(ip)); | 629 | __mdb_entry_to_br_ip(entry, &ip); |
588 | ip.vid = entry->vid; | ||
589 | ip.proto = entry->addr.proto; | ||
590 | if (ip.proto == htons(ETH_P_IP)) | ||
591 | ip.u.ip4 = entry->addr.u.ip4; | ||
592 | #if IS_ENABLED(CONFIG_IPV6) | ||
593 | else | ||
594 | ip.u.ip6 = entry->addr.u.ip6; | ||
595 | #endif | ||
596 | 630 | ||
597 | spin_lock_bh(&br->multicast_lock); | 631 | spin_lock_bh(&br->multicast_lock); |
598 | mdb = mlock_dereference(br->mdb, br); | 632 | mdb = mlock_dereference(br->mdb, br); |
@@ -662,12 +696,12 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
662 | entry->vid = v->vid; | 696 | entry->vid = v->vid; |
663 | err = __br_mdb_del(br, entry); | 697 | err = __br_mdb_del(br, entry); |
664 | if (!err) | 698 | if (!err) |
665 | __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); | 699 | __br_mdb_notify(dev, p, entry, RTM_DELMDB); |
666 | } | 700 | } |
667 | } else { | 701 | } else { |
668 | err = __br_mdb_del(br, entry); | 702 | err = __br_mdb_del(br, entry); |
669 | if (!err) | 703 | if (!err) |
670 | __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); | 704 | __br_mdb_notify(dev, p, entry, RTM_DELMDB); |
671 | } | 705 | } |
672 | 706 | ||
673 | return err; | 707 | return err; |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index a4c15df2b792..191ea66e4d92 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -283,7 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br, | |||
283 | rcu_assign_pointer(*pp, p->next); | 283 | rcu_assign_pointer(*pp, p->next); |
284 | hlist_del_init(&p->mglist); | 284 | hlist_del_init(&p->mglist); |
285 | del_timer(&p->timer); | 285 | del_timer(&p->timer); |
286 | br_mdb_notify(br->dev, p, RTM_DELMDB); | 286 | br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, |
287 | p->flags); | ||
287 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 288 | call_rcu_bh(&p->rcu, br_multicast_free_pg); |
288 | 289 | ||
289 | if (!mp->ports && !mp->mglist && | 290 | if (!mp->ports && !mp->mglist && |
@@ -705,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
705 | if (unlikely(!p)) | 706 | if (unlikely(!p)) |
706 | goto err; | 707 | goto err; |
707 | rcu_assign_pointer(*pp, p); | 708 | rcu_assign_pointer(*pp, p); |
708 | br_mdb_notify(br->dev, p, RTM_NEWMDB); | 709 | br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); |
709 | 710 | ||
710 | found: | 711 | found: |
711 | mod_timer(&p->timer, now + br->multicast_membership_interval); | 712 | mod_timer(&p->timer, now + br->multicast_membership_interval); |
@@ -1461,7 +1462,8 @@ br_multicast_leave_group(struct net_bridge *br, | |||
1461 | hlist_del_init(&p->mglist); | 1462 | hlist_del_init(&p->mglist); |
1462 | del_timer(&p->timer); | 1463 | del_timer(&p->timer); |
1463 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 1464 | call_rcu_bh(&p->rcu, br_multicast_free_pg); |
1464 | br_mdb_notify(br->dev, p, RTM_DELMDB); | 1465 | br_mdb_notify(br->dev, port, group, RTM_DELMDB, |
1466 | p->flags); | ||
1465 | 1467 | ||
1466 | if (!mp->ports && !mp->mglist && | 1468 | if (!mp->ports && !mp->mglist && |
1467 | netif_running(br->dev)) | 1469 | netif_running(br->dev)) |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 1b5d145dfcbf..d9da857182ef 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -560,8 +560,8 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group, | |||
560 | unsigned char flags); | 560 | unsigned char flags); |
561 | void br_mdb_init(void); | 561 | void br_mdb_init(void); |
562 | void br_mdb_uninit(void); | 562 | void br_mdb_uninit(void); |
563 | void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, | 563 | void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, |
564 | int type); | 564 | struct br_ip *group, int type, u8 flags); |
565 | void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, | 565 | void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, |
566 | int type); | 566 | int type); |
567 | 567 | ||
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 8570bc7744c2..5a61f35412a0 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -370,7 +370,11 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par, | |||
370 | left - sizeof(struct ebt_entry_match) < m->match_size) | 370 | left - sizeof(struct ebt_entry_match) < m->match_size) |
371 | return -EINVAL; | 371 | return -EINVAL; |
372 | 372 | ||
373 | match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0); | 373 | match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); |
374 | if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) { | ||
375 | request_module("ebt_%s", m->u.name); | ||
376 | match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0); | ||
377 | } | ||
374 | if (IS_ERR(match)) | 378 | if (IS_ERR(match)) |
375 | return PTR_ERR(match); | 379 | return PTR_ERR(match); |
376 | m->u.match = match; | 380 | m->u.match = match; |
diff --git a/net/ceph/auth.c b/net/ceph/auth.c index 6b923bcaa2a4..2bc5965fdd1e 100644 --- a/net/ceph/auth.c +++ b/net/ceph/auth.c | |||
@@ -293,13 +293,9 @@ int ceph_auth_create_authorizer(struct ceph_auth_client *ac, | |||
293 | } | 293 | } |
294 | EXPORT_SYMBOL(ceph_auth_create_authorizer); | 294 | EXPORT_SYMBOL(ceph_auth_create_authorizer); |
295 | 295 | ||
296 | void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, | 296 | void ceph_auth_destroy_authorizer(struct ceph_authorizer *a) |
297 | struct ceph_authorizer *a) | ||
298 | { | 297 | { |
299 | mutex_lock(&ac->mutex); | 298 | a->destroy(a); |
300 | if (ac->ops && ac->ops->destroy_authorizer) | ||
301 | ac->ops->destroy_authorizer(ac, a); | ||
302 | mutex_unlock(&ac->mutex); | ||
303 | } | 299 | } |
304 | EXPORT_SYMBOL(ceph_auth_destroy_authorizer); | 300 | EXPORT_SYMBOL(ceph_auth_destroy_authorizer); |
305 | 301 | ||
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c index 8c93fa8d81bc..5f836f02ae36 100644 --- a/net/ceph/auth_none.c +++ b/net/ceph/auth_none.c | |||
@@ -16,7 +16,6 @@ static void reset(struct ceph_auth_client *ac) | |||
16 | struct ceph_auth_none_info *xi = ac->private; | 16 | struct ceph_auth_none_info *xi = ac->private; |
17 | 17 | ||
18 | xi->starting = true; | 18 | xi->starting = true; |
19 | xi->built_authorizer = false; | ||
20 | } | 19 | } |
21 | 20 | ||
22 | static void destroy(struct ceph_auth_client *ac) | 21 | static void destroy(struct ceph_auth_client *ac) |
@@ -39,6 +38,27 @@ static int should_authenticate(struct ceph_auth_client *ac) | |||
39 | return xi->starting; | 38 | return xi->starting; |
40 | } | 39 | } |
41 | 40 | ||
41 | static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac, | ||
42 | struct ceph_none_authorizer *au) | ||
43 | { | ||
44 | void *p = au->buf; | ||
45 | void *const end = p + sizeof(au->buf); | ||
46 | int ret; | ||
47 | |||
48 | ceph_encode_8_safe(&p, end, 1, e_range); | ||
49 | ret = ceph_entity_name_encode(ac->name, &p, end); | ||
50 | if (ret < 0) | ||
51 | return ret; | ||
52 | |||
53 | ceph_encode_64_safe(&p, end, ac->global_id, e_range); | ||
54 | au->buf_len = p - (void *)au->buf; | ||
55 | dout("%s built authorizer len %d\n", __func__, au->buf_len); | ||
56 | return 0; | ||
57 | |||
58 | e_range: | ||
59 | return -ERANGE; | ||
60 | } | ||
61 | |||
42 | static int build_request(struct ceph_auth_client *ac, void *buf, void *end) | 62 | static int build_request(struct ceph_auth_client *ac, void *buf, void *end) |
43 | { | 63 | { |
44 | return 0; | 64 | return 0; |
@@ -57,32 +77,32 @@ static int handle_reply(struct ceph_auth_client *ac, int result, | |||
57 | return result; | 77 | return result; |
58 | } | 78 | } |
59 | 79 | ||
80 | static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a) | ||
81 | { | ||
82 | kfree(a); | ||
83 | } | ||
84 | |||
60 | /* | 85 | /* |
61 | * build an 'authorizer' with our entity_name and global_id. we can | 86 | * build an 'authorizer' with our entity_name and global_id. it is |
62 | * reuse a single static copy since it is identical for all services | 87 | * identical for all services we connect to. |
63 | * we connect to. | ||
64 | */ | 88 | */ |
65 | static int ceph_auth_none_create_authorizer( | 89 | static int ceph_auth_none_create_authorizer( |
66 | struct ceph_auth_client *ac, int peer_type, | 90 | struct ceph_auth_client *ac, int peer_type, |
67 | struct ceph_auth_handshake *auth) | 91 | struct ceph_auth_handshake *auth) |
68 | { | 92 | { |
69 | struct ceph_auth_none_info *ai = ac->private; | 93 | struct ceph_none_authorizer *au; |
70 | struct ceph_none_authorizer *au = &ai->au; | ||
71 | void *p, *end; | ||
72 | int ret; | 94 | int ret; |
73 | 95 | ||
74 | if (!ai->built_authorizer) { | 96 | au = kmalloc(sizeof(*au), GFP_NOFS); |
75 | p = au->buf; | 97 | if (!au) |
76 | end = p + sizeof(au->buf); | 98 | return -ENOMEM; |
77 | ceph_encode_8(&p, 1); | 99 | |
78 | ret = ceph_entity_name_encode(ac->name, &p, end - 8); | 100 | au->base.destroy = ceph_auth_none_destroy_authorizer; |
79 | if (ret < 0) | 101 | |
80 | goto bad; | 102 | ret = ceph_auth_none_build_authorizer(ac, au); |
81 | ceph_decode_need(&p, end, sizeof(u64), bad2); | 103 | if (ret) { |
82 | ceph_encode_64(&p, ac->global_id); | 104 | kfree(au); |
83 | au->buf_len = p - (void *)au->buf; | 105 | return ret; |
84 | ai->built_authorizer = true; | ||
85 | dout("built authorizer len %d\n", au->buf_len); | ||
86 | } | 106 | } |
87 | 107 | ||
88 | auth->authorizer = (struct ceph_authorizer *) au; | 108 | auth->authorizer = (struct ceph_authorizer *) au; |
@@ -92,17 +112,6 @@ static int ceph_auth_none_create_authorizer( | |||
92 | auth->authorizer_reply_buf_len = sizeof (au->reply_buf); | 112 | auth->authorizer_reply_buf_len = sizeof (au->reply_buf); |
93 | 113 | ||
94 | return 0; | 114 | return 0; |
95 | |||
96 | bad2: | ||
97 | ret = -ERANGE; | ||
98 | bad: | ||
99 | return ret; | ||
100 | } | ||
101 | |||
102 | static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac, | ||
103 | struct ceph_authorizer *a) | ||
104 | { | ||
105 | /* nothing to do */ | ||
106 | } | 115 | } |
107 | 116 | ||
108 | static const struct ceph_auth_client_ops ceph_auth_none_ops = { | 117 | static const struct ceph_auth_client_ops ceph_auth_none_ops = { |
@@ -114,7 +123,6 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = { | |||
114 | .build_request = build_request, | 123 | .build_request = build_request, |
115 | .handle_reply = handle_reply, | 124 | .handle_reply = handle_reply, |
116 | .create_authorizer = ceph_auth_none_create_authorizer, | 125 | .create_authorizer = ceph_auth_none_create_authorizer, |
117 | .destroy_authorizer = ceph_auth_none_destroy_authorizer, | ||
118 | }; | 126 | }; |
119 | 127 | ||
120 | int ceph_auth_none_init(struct ceph_auth_client *ac) | 128 | int ceph_auth_none_init(struct ceph_auth_client *ac) |
@@ -127,7 +135,6 @@ int ceph_auth_none_init(struct ceph_auth_client *ac) | |||
127 | return -ENOMEM; | 135 | return -ENOMEM; |
128 | 136 | ||
129 | xi->starting = true; | 137 | xi->starting = true; |
130 | xi->built_authorizer = false; | ||
131 | 138 | ||
132 | ac->protocol = CEPH_AUTH_NONE; | 139 | ac->protocol = CEPH_AUTH_NONE; |
133 | ac->private = xi; | 140 | ac->private = xi; |
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h index 059a3ce4b53f..62021535ae4a 100644 --- a/net/ceph/auth_none.h +++ b/net/ceph/auth_none.h | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | struct ceph_none_authorizer { | 14 | struct ceph_none_authorizer { |
15 | struct ceph_authorizer base; | ||
15 | char buf[128]; | 16 | char buf[128]; |
16 | int buf_len; | 17 | int buf_len; |
17 | char reply_buf[0]; | 18 | char reply_buf[0]; |
@@ -19,8 +20,6 @@ struct ceph_none_authorizer { | |||
19 | 20 | ||
20 | struct ceph_auth_none_info { | 21 | struct ceph_auth_none_info { |
21 | bool starting; | 22 | bool starting; |
22 | bool built_authorizer; | ||
23 | struct ceph_none_authorizer au; /* we only need one; it's static */ | ||
24 | }; | 23 | }; |
25 | 24 | ||
26 | int ceph_auth_none_init(struct ceph_auth_client *ac); | 25 | int ceph_auth_none_init(struct ceph_auth_client *ac); |
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 9e43a315e662..a0905f04bd13 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c | |||
@@ -565,6 +565,14 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, | |||
565 | return -EAGAIN; | 565 | return -EAGAIN; |
566 | } | 566 | } |
567 | 567 | ||
568 | static void ceph_x_destroy_authorizer(struct ceph_authorizer *a) | ||
569 | { | ||
570 | struct ceph_x_authorizer *au = (void *)a; | ||
571 | |||
572 | ceph_x_authorizer_cleanup(au); | ||
573 | kfree(au); | ||
574 | } | ||
575 | |||
568 | static int ceph_x_create_authorizer( | 576 | static int ceph_x_create_authorizer( |
569 | struct ceph_auth_client *ac, int peer_type, | 577 | struct ceph_auth_client *ac, int peer_type, |
570 | struct ceph_auth_handshake *auth) | 578 | struct ceph_auth_handshake *auth) |
@@ -581,6 +589,8 @@ static int ceph_x_create_authorizer( | |||
581 | if (!au) | 589 | if (!au) |
582 | return -ENOMEM; | 590 | return -ENOMEM; |
583 | 591 | ||
592 | au->base.destroy = ceph_x_destroy_authorizer; | ||
593 | |||
584 | ret = ceph_x_build_authorizer(ac, th, au); | 594 | ret = ceph_x_build_authorizer(ac, th, au); |
585 | if (ret) { | 595 | if (ret) { |
586 | kfree(au); | 596 | kfree(au); |
@@ -643,16 +653,6 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, | |||
643 | return ret; | 653 | return ret; |
644 | } | 654 | } |
645 | 655 | ||
646 | static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, | ||
647 | struct ceph_authorizer *a) | ||
648 | { | ||
649 | struct ceph_x_authorizer *au = (void *)a; | ||
650 | |||
651 | ceph_x_authorizer_cleanup(au); | ||
652 | kfree(au); | ||
653 | } | ||
654 | |||
655 | |||
656 | static void ceph_x_reset(struct ceph_auth_client *ac) | 656 | static void ceph_x_reset(struct ceph_auth_client *ac) |
657 | { | 657 | { |
658 | struct ceph_x_info *xi = ac->private; | 658 | struct ceph_x_info *xi = ac->private; |
@@ -770,7 +770,6 @@ static const struct ceph_auth_client_ops ceph_x_ops = { | |||
770 | .create_authorizer = ceph_x_create_authorizer, | 770 | .create_authorizer = ceph_x_create_authorizer, |
771 | .update_authorizer = ceph_x_update_authorizer, | 771 | .update_authorizer = ceph_x_update_authorizer, |
772 | .verify_authorizer_reply = ceph_x_verify_authorizer_reply, | 772 | .verify_authorizer_reply = ceph_x_verify_authorizer_reply, |
773 | .destroy_authorizer = ceph_x_destroy_authorizer, | ||
774 | .invalidate_authorizer = ceph_x_invalidate_authorizer, | 773 | .invalidate_authorizer = ceph_x_invalidate_authorizer, |
775 | .reset = ceph_x_reset, | 774 | .reset = ceph_x_reset, |
776 | .destroy = ceph_x_destroy, | 775 | .destroy = ceph_x_destroy, |
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h index 40b1a3cf7397..21a5af904bae 100644 --- a/net/ceph/auth_x.h +++ b/net/ceph/auth_x.h | |||
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler { | |||
26 | 26 | ||
27 | 27 | ||
28 | struct ceph_x_authorizer { | 28 | struct ceph_x_authorizer { |
29 | struct ceph_authorizer base; | ||
29 | struct ceph_crypto_key session_key; | 30 | struct ceph_crypto_key session_key; |
30 | struct ceph_buffer *buf; | 31 | struct ceph_buffer *buf; |
31 | unsigned int service; | 32 | unsigned int service; |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 32355d9d0103..40a53a70efdf 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -1087,10 +1087,8 @@ static void put_osd(struct ceph_osd *osd) | |||
1087 | dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), | 1087 | dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), |
1088 | atomic_read(&osd->o_ref) - 1); | 1088 | atomic_read(&osd->o_ref) - 1); |
1089 | if (atomic_dec_and_test(&osd->o_ref)) { | 1089 | if (atomic_dec_and_test(&osd->o_ref)) { |
1090 | struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; | ||
1091 | |||
1092 | if (osd->o_auth.authorizer) | 1090 | if (osd->o_auth.authorizer) |
1093 | ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); | 1091 | ceph_auth_destroy_authorizer(osd->o_auth.authorizer); |
1094 | kfree(osd); | 1092 | kfree(osd); |
1095 | } | 1093 | } |
1096 | } | 1094 | } |
@@ -2984,7 +2982,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |||
2984 | struct ceph_auth_handshake *auth = &o->o_auth; | 2982 | struct ceph_auth_handshake *auth = &o->o_auth; |
2985 | 2983 | ||
2986 | if (force_new && auth->authorizer) { | 2984 | if (force_new && auth->authorizer) { |
2987 | ceph_auth_destroy_authorizer(ac, auth->authorizer); | 2985 | ceph_auth_destroy_authorizer(auth->authorizer); |
2988 | auth->authorizer = NULL; | 2986 | auth->authorizer = NULL; |
2989 | } | 2987 | } |
2990 | if (!auth->authorizer) { | 2988 | if (!auth->authorizer) { |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d04c2d1c8c87..e561f9f07d6d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4502,13 +4502,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) | |||
4502 | __skb_push(skb, offset); | 4502 | __skb_push(skb, offset); |
4503 | err = __vlan_insert_tag(skb, skb->vlan_proto, | 4503 | err = __vlan_insert_tag(skb, skb->vlan_proto, |
4504 | skb_vlan_tag_get(skb)); | 4504 | skb_vlan_tag_get(skb)); |
4505 | if (err) | 4505 | if (err) { |
4506 | __skb_pull(skb, offset); | ||
4506 | return err; | 4507 | return err; |
4508 | } | ||
4509 | |||
4507 | skb->protocol = skb->vlan_proto; | 4510 | skb->protocol = skb->vlan_proto; |
4508 | skb->mac_len += VLAN_HLEN; | 4511 | skb->mac_len += VLAN_HLEN; |
4509 | __skb_pull(skb, offset); | ||
4510 | 4512 | ||
4511 | skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); | 4513 | skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); |
4514 | __skb_pull(skb, offset); | ||
4512 | } | 4515 | } |
4513 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); | 4516 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); |
4514 | return 0; | 4517 | return 0; |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 607a14f20d88..b1dc096d22f8 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -1034,10 +1034,13 @@ source_ok: | |||
1034 | if (!fld.daddr) { | 1034 | if (!fld.daddr) { |
1035 | fld.daddr = fld.saddr; | 1035 | fld.daddr = fld.saddr; |
1036 | 1036 | ||
1037 | err = -EADDRNOTAVAIL; | ||
1038 | if (dev_out) | 1037 | if (dev_out) |
1039 | dev_put(dev_out); | 1038 | dev_put(dev_out); |
1039 | err = -EINVAL; | ||
1040 | dev_out = init_net.loopback_dev; | 1040 | dev_out = init_net.loopback_dev; |
1041 | if (!dev_out->dn_ptr) | ||
1042 | goto out; | ||
1043 | err = -EADDRNOTAVAIL; | ||
1041 | dev_hold(dev_out); | 1044 | dev_hold(dev_out); |
1042 | if (!fld.daddr) { | 1045 | if (!fld.daddr) { |
1043 | fld.daddr = | 1046 | fld.daddr = |
@@ -1110,6 +1113,8 @@ source_ok: | |||
1110 | if (dev_out == NULL) | 1113 | if (dev_out == NULL) |
1111 | goto out; | 1114 | goto out; |
1112 | dn_db = rcu_dereference_raw(dev_out->dn_ptr); | 1115 | dn_db = rcu_dereference_raw(dev_out->dn_ptr); |
1116 | if (!dn_db) | ||
1117 | goto e_inval; | ||
1113 | /* Possible improvement - check all devices for local addr */ | 1118 | /* Possible improvement - check all devices for local addr */ |
1114 | if (dn_dev_islocal(dev_out, fld.daddr)) { | 1119 | if (dn_dev_islocal(dev_out, fld.daddr)) { |
1115 | dev_put(dev_out); | 1120 | dev_put(dev_out); |
@@ -1151,6 +1156,8 @@ select_source: | |||
1151 | dev_put(dev_out); | 1156 | dev_put(dev_out); |
1152 | dev_out = init_net.loopback_dev; | 1157 | dev_out = init_net.loopback_dev; |
1153 | dev_hold(dev_out); | 1158 | dev_hold(dev_out); |
1159 | if (!dev_out->dn_ptr) | ||
1160 | goto e_inval; | ||
1154 | fld.flowidn_oif = dev_out->ifindex; | 1161 | fld.flowidn_oif = dev_out->ifindex; |
1155 | if (res.fi) | 1162 | if (res.fi) |
1156 | dn_fib_info_put(res.fi); | 1163 | dn_fib_info_put(res.fi); |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 8a9246deccfe..63566ec54794 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -904,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) | |||
904 | if (ifa->ifa_flags & IFA_F_SECONDARY) { | 904 | if (ifa->ifa_flags & IFA_F_SECONDARY) { |
905 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); | 905 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); |
906 | if (!prim) { | 906 | if (!prim) { |
907 | pr_warn("%s: bug: prim == NULL\n", __func__); | 907 | /* if the device has been deleted, we don't perform |
908 | * address promotion | ||
909 | */ | ||
910 | if (!in_dev->dead) | ||
911 | pr_warn("%s: bug: prim == NULL\n", __func__); | ||
908 | return; | 912 | return; |
909 | } | 913 | } |
910 | if (iprim && iprim != prim) { | 914 | if (iprim && iprim != prim) { |
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c index dd8c80dc32a2..8f8713b4388f 100644 --- a/net/ipv4/netfilter/arptable_filter.c +++ b/net/ipv4/netfilter/arptable_filter.c | |||
@@ -81,6 +81,12 @@ static int __init arptable_filter_init(void) | |||
81 | return ret; | 81 | return ret; |
82 | } | 82 | } |
83 | 83 | ||
84 | ret = arptable_filter_table_init(&init_net); | ||
85 | if (ret) { | ||
86 | unregister_pernet_subsys(&arptable_filter_net_ops); | ||
87 | kfree(arpfilter_ops); | ||
88 | } | ||
89 | |||
84 | return ret; | 90 | return ret; |
85 | } | 91 | } |
86 | 92 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 02c62299d717..60398a9370e7 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1438,9 +1438,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, | |||
1438 | #endif | 1438 | #endif |
1439 | } | 1439 | } |
1440 | 1440 | ||
1441 | static struct rtable *rt_dst_alloc(struct net_device *dev, | 1441 | struct rtable *rt_dst_alloc(struct net_device *dev, |
1442 | unsigned int flags, u16 type, | 1442 | unsigned int flags, u16 type, |
1443 | bool nopolicy, bool noxfrm, bool will_cache) | 1443 | bool nopolicy, bool noxfrm, bool will_cache) |
1444 | { | 1444 | { |
1445 | struct rtable *rt; | 1445 | struct rtable *rt; |
1446 | 1446 | ||
@@ -1468,6 +1468,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev, | |||
1468 | 1468 | ||
1469 | return rt; | 1469 | return rt; |
1470 | } | 1470 | } |
1471 | EXPORT_SYMBOL(rt_dst_alloc); | ||
1471 | 1472 | ||
1472 | /* called in rcu_read_lock() section */ | 1473 | /* called in rcu_read_lock() section */ |
1473 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | 1474 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
@@ -2045,6 +2046,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
2045 | */ | 2046 | */ |
2046 | if (fi && res->prefixlen < 4) | 2047 | if (fi && res->prefixlen < 4) |
2047 | fi = NULL; | 2048 | fi = NULL; |
2049 | } else if ((type == RTN_LOCAL) && (orig_oif != 0) && | ||
2050 | (orig_oif != dev_out->ifindex)) { | ||
2051 | /* For local routes that require a particular output interface | ||
2052 | * we do not want to cache the result. Caching the result | ||
2053 | * causes incorrect behaviour when there are multiple source | ||
2054 | * addresses on the interface, the end result being that if the | ||
2055 | * intended recipient is waiting on that interface for the | ||
2056 | * packet he won't receive it because it will be delivered on | ||
2057 | * the loopback interface and the IP_PKTINFO ipi_ifindex will | ||
2058 | * be set to the loopback interface as well. | ||
2059 | */ | ||
2060 | fi = NULL; | ||
2048 | } | 2061 | } |
2049 | 2062 | ||
2050 | fnhe = NULL; | 2063 | fnhe = NULL; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e6e65f79ade8..c124c3c12f7c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1309,6 +1309,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | |||
1309 | if (skb == tcp_highest_sack(sk)) | 1309 | if (skb == tcp_highest_sack(sk)) |
1310 | tcp_advance_highest_sack(sk, skb); | 1310 | tcp_advance_highest_sack(sk, skb); |
1311 | 1311 | ||
1312 | tcp_skb_collapse_tstamp(prev, skb); | ||
1312 | tcp_unlink_write_queue(skb, sk); | 1313 | tcp_unlink_write_queue(skb, sk); |
1313 | sk_wmem_free_skb(sk, skb); | 1314 | sk_wmem_free_skb(sk, skb); |
1314 | 1315 | ||
@@ -3098,7 +3099,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, | |||
3098 | 3099 | ||
3099 | shinfo = skb_shinfo(skb); | 3100 | shinfo = skb_shinfo(skb); |
3100 | if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && | 3101 | if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && |
3101 | between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) | 3102 | !before(shinfo->tskey, prior_snd_una) && |
3103 | before(shinfo->tskey, tcp_sk(sk)->snd_una)) | ||
3102 | __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); | 3104 | __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); |
3103 | } | 3105 | } |
3104 | 3106 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7d2dc015cd19..441ae9da3a23 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2441,6 +2441,20 @@ u32 __tcp_select_window(struct sock *sk) | |||
2441 | return window; | 2441 | return window; |
2442 | } | 2442 | } |
2443 | 2443 | ||
2444 | void tcp_skb_collapse_tstamp(struct sk_buff *skb, | ||
2445 | const struct sk_buff *next_skb) | ||
2446 | { | ||
2447 | const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb); | ||
2448 | u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; | ||
2449 | |||
2450 | if (unlikely(tsflags)) { | ||
2451 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
2452 | |||
2453 | shinfo->tx_flags |= tsflags; | ||
2454 | shinfo->tskey = next_shinfo->tskey; | ||
2455 | } | ||
2456 | } | ||
2457 | |||
2444 | /* Collapses two adjacent SKB's during retransmission. */ | 2458 | /* Collapses two adjacent SKB's during retransmission. */ |
2445 | static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) | 2459 | static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) |
2446 | { | 2460 | { |
@@ -2484,6 +2498,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) | |||
2484 | 2498 | ||
2485 | tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); | 2499 | tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); |
2486 | 2500 | ||
2501 | tcp_skb_collapse_tstamp(skb, next_skb); | ||
2502 | |||
2487 | sk_wmem_free_skb(sk, next_skb); | 2503 | sk_wmem_free_skb(sk, next_skb); |
2488 | } | 2504 | } |
2489 | 2505 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 08eed5e16df0..a2e7f55a1f61 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -339,8 +339,13 @@ found: | |||
339 | 339 | ||
340 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); | 340 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); |
341 | spin_lock(&hslot2->lock); | 341 | spin_lock(&hslot2->lock); |
342 | hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, | 342 | if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && |
343 | &hslot2->head); | 343 | sk->sk_family == AF_INET6) |
344 | hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, | ||
345 | &hslot2->head); | ||
346 | else | ||
347 | hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, | ||
348 | &hslot2->head); | ||
344 | hslot2->count++; | 349 | hslot2->count++; |
345 | spin_unlock(&hslot2->lock); | 350 | spin_unlock(&hslot2->lock); |
346 | } | 351 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 27aed1afcf81..8ec4b3089e20 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3176,35 +3176,9 @@ static void addrconf_gre_config(struct net_device *dev) | |||
3176 | } | 3176 | } |
3177 | #endif | 3177 | #endif |
3178 | 3178 | ||
3179 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) | ||
3180 | /* If the host route is cached on the addr struct make sure it is associated | ||
3181 | * with the proper table. e.g., enslavement can change and if so the cached | ||
3182 | * host route needs to move to the new table. | ||
3183 | */ | ||
3184 | static void l3mdev_check_host_rt(struct inet6_dev *idev, | ||
3185 | struct inet6_ifaddr *ifp) | ||
3186 | { | ||
3187 | if (ifp->rt) { | ||
3188 | u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; | ||
3189 | |||
3190 | if (tb_id != ifp->rt->rt6i_table->tb6_id) { | ||
3191 | ip6_del_rt(ifp->rt); | ||
3192 | ifp->rt = NULL; | ||
3193 | } | ||
3194 | } | ||
3195 | } | ||
3196 | #else | ||
3197 | static void l3mdev_check_host_rt(struct inet6_dev *idev, | ||
3198 | struct inet6_ifaddr *ifp) | ||
3199 | { | ||
3200 | } | ||
3201 | #endif | ||
3202 | |||
3203 | static int fixup_permanent_addr(struct inet6_dev *idev, | 3179 | static int fixup_permanent_addr(struct inet6_dev *idev, |
3204 | struct inet6_ifaddr *ifp) | 3180 | struct inet6_ifaddr *ifp) |
3205 | { | 3181 | { |
3206 | l3mdev_check_host_rt(idev, ifp); | ||
3207 | |||
3208 | if (!ifp->rt) { | 3182 | if (!ifp->rt) { |
3209 | struct rt6_info *rt; | 3183 | struct rt6_info *rt; |
3210 | 3184 | ||
@@ -3255,6 +3229,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3255 | void *ptr) | 3229 | void *ptr) |
3256 | { | 3230 | { |
3257 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 3231 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
3232 | struct netdev_notifier_changeupper_info *info; | ||
3258 | struct inet6_dev *idev = __in6_dev_get(dev); | 3233 | struct inet6_dev *idev = __in6_dev_get(dev); |
3259 | int run_pending = 0; | 3234 | int run_pending = 0; |
3260 | int err; | 3235 | int err; |
@@ -3303,6 +3278,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3303 | break; | 3278 | break; |
3304 | 3279 | ||
3305 | if (event == NETDEV_UP) { | 3280 | if (event == NETDEV_UP) { |
3281 | /* restore routes for permanent addresses */ | ||
3282 | addrconf_permanent_addr(dev); | ||
3283 | |||
3306 | if (!addrconf_qdisc_ok(dev)) { | 3284 | if (!addrconf_qdisc_ok(dev)) { |
3307 | /* device is not ready yet. */ | 3285 | /* device is not ready yet. */ |
3308 | pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", | 3286 | pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", |
@@ -3336,9 +3314,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3336 | run_pending = 1; | 3314 | run_pending = 1; |
3337 | } | 3315 | } |
3338 | 3316 | ||
3339 | /* restore routes for permanent addresses */ | ||
3340 | addrconf_permanent_addr(dev); | ||
3341 | |||
3342 | switch (dev->type) { | 3317 | switch (dev->type) { |
3343 | #if IS_ENABLED(CONFIG_IPV6_SIT) | 3318 | #if IS_ENABLED(CONFIG_IPV6_SIT) |
3344 | case ARPHRD_SIT: | 3319 | case ARPHRD_SIT: |
@@ -3413,6 +3388,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3413 | if (idev) | 3388 | if (idev) |
3414 | addrconf_type_change(dev, event); | 3389 | addrconf_type_change(dev, event); |
3415 | break; | 3390 | break; |
3391 | |||
3392 | case NETDEV_CHANGEUPPER: | ||
3393 | info = ptr; | ||
3394 | |||
3395 | /* flush all routes if dev is linked to or unlinked from | ||
3396 | * an L3 master device (e.g., VRF) | ||
3397 | */ | ||
3398 | if (info->upper_dev && netif_is_l3_master(info->upper_dev)) | ||
3399 | addrconf_ifdown(dev, 0); | ||
3416 | } | 3400 | } |
3417 | 3401 | ||
3418 | return NOTIFY_OK; | 3402 | return NOTIFY_OK; |
@@ -3438,6 +3422,12 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event) | |||
3438 | ipv6_mc_unmap(idev); | 3422 | ipv6_mc_unmap(idev); |
3439 | } | 3423 | } |
3440 | 3424 | ||
3425 | static bool addr_is_local(const struct in6_addr *addr) | ||
3426 | { | ||
3427 | return ipv6_addr_type(addr) & | ||
3428 | (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); | ||
3429 | } | ||
3430 | |||
3441 | static int addrconf_ifdown(struct net_device *dev, int how) | 3431 | static int addrconf_ifdown(struct net_device *dev, int how) |
3442 | { | 3432 | { |
3443 | struct net *net = dev_net(dev); | 3433 | struct net *net = dev_net(dev); |
@@ -3495,7 +3485,8 @@ restart: | |||
3495 | * address is retained on a down event | 3485 | * address is retained on a down event |
3496 | */ | 3486 | */ |
3497 | if (!keep_addr || | 3487 | if (!keep_addr || |
3498 | !(ifa->flags & IFA_F_PERMANENT)) { | 3488 | !(ifa->flags & IFA_F_PERMANENT) || |
3489 | addr_is_local(&ifa->addr)) { | ||
3499 | hlist_del_init_rcu(&ifa->addr_lst); | 3490 | hlist_del_init_rcu(&ifa->addr_lst); |
3500 | goto restart; | 3491 | goto restart; |
3501 | } | 3492 | } |
@@ -3539,17 +3530,23 @@ restart: | |||
3539 | 3530 | ||
3540 | INIT_LIST_HEAD(&del_list); | 3531 | INIT_LIST_HEAD(&del_list); |
3541 | list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { | 3532 | list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { |
3533 | struct rt6_info *rt = NULL; | ||
3534 | |||
3542 | addrconf_del_dad_work(ifa); | 3535 | addrconf_del_dad_work(ifa); |
3543 | 3536 | ||
3544 | write_unlock_bh(&idev->lock); | 3537 | write_unlock_bh(&idev->lock); |
3545 | spin_lock_bh(&ifa->lock); | 3538 | spin_lock_bh(&ifa->lock); |
3546 | 3539 | ||
3547 | if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) { | 3540 | if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && |
3541 | !addr_is_local(&ifa->addr)) { | ||
3548 | /* set state to skip the notifier below */ | 3542 | /* set state to skip the notifier below */ |
3549 | state = INET6_IFADDR_STATE_DEAD; | 3543 | state = INET6_IFADDR_STATE_DEAD; |
3550 | ifa->state = 0; | 3544 | ifa->state = 0; |
3551 | if (!(ifa->flags & IFA_F_NODAD)) | 3545 | if (!(ifa->flags & IFA_F_NODAD)) |
3552 | ifa->flags |= IFA_F_TENTATIVE; | 3546 | ifa->flags |= IFA_F_TENTATIVE; |
3547 | |||
3548 | rt = ifa->rt; | ||
3549 | ifa->rt = NULL; | ||
3553 | } else { | 3550 | } else { |
3554 | state = ifa->state; | 3551 | state = ifa->state; |
3555 | ifa->state = INET6_IFADDR_STATE_DEAD; | 3552 | ifa->state = INET6_IFADDR_STATE_DEAD; |
@@ -3560,6 +3557,9 @@ restart: | |||
3560 | 3557 | ||
3561 | spin_unlock_bh(&ifa->lock); | 3558 | spin_unlock_bh(&ifa->lock); |
3562 | 3559 | ||
3560 | if (rt) | ||
3561 | ip6_del_rt(rt); | ||
3562 | |||
3563 | if (state != INET6_IFADDR_STATE_DEAD) { | 3563 | if (state != INET6_IFADDR_STATE_DEAD) { |
3564 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 3564 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
3565 | inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); | 3565 | inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); |
@@ -5325,10 +5325,10 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
5325 | if (rt) | 5325 | if (rt) |
5326 | ip6_del_rt(rt); | 5326 | ip6_del_rt(rt); |
5327 | } | 5327 | } |
5328 | dst_hold(&ifp->rt->dst); | 5328 | if (ifp->rt) { |
5329 | 5329 | dst_hold(&ifp->rt->dst); | |
5330 | ip6_del_rt(ifp->rt); | 5330 | ip6_del_rt(ifp->rt); |
5331 | 5331 | } | |
5332 | rt_genid_bump_ipv6(net); | 5332 | rt_genid_bump_ipv6(net); |
5333 | break; | 5333 | break; |
5334 | } | 5334 | } |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 428162155280..9dd3882fe6bf 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -40,18 +40,114 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) | |||
40 | return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); | 40 | return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); |
41 | } | 41 | } |
42 | 42 | ||
43 | static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk) | ||
44 | { | ||
45 | struct inet_sock *inet = inet_sk(sk); | ||
46 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
47 | |||
48 | memset(fl6, 0, sizeof(*fl6)); | ||
49 | fl6->flowi6_proto = sk->sk_protocol; | ||
50 | fl6->daddr = sk->sk_v6_daddr; | ||
51 | fl6->saddr = np->saddr; | ||
52 | fl6->flowi6_oif = sk->sk_bound_dev_if; | ||
53 | fl6->flowi6_mark = sk->sk_mark; | ||
54 | fl6->fl6_dport = inet->inet_dport; | ||
55 | fl6->fl6_sport = inet->inet_sport; | ||
56 | fl6->flowlabel = np->flow_label; | ||
57 | |||
58 | if (!fl6->flowi6_oif) | ||
59 | fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; | ||
60 | |||
61 | if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) | ||
62 | fl6->flowi6_oif = np->mcast_oif; | ||
63 | |||
64 | security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); | ||
65 | } | ||
66 | |||
67 | int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr) | ||
68 | { | ||
69 | struct ip6_flowlabel *flowlabel = NULL; | ||
70 | struct in6_addr *final_p, final; | ||
71 | struct ipv6_txoptions *opt; | ||
72 | struct dst_entry *dst; | ||
73 | struct inet_sock *inet = inet_sk(sk); | ||
74 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
75 | struct flowi6 fl6; | ||
76 | int err = 0; | ||
77 | |||
78 | if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) { | ||
79 | flowlabel = fl6_sock_lookup(sk, np->flow_label); | ||
80 | if (!flowlabel) | ||
81 | return -EINVAL; | ||
82 | } | ||
83 | ip6_datagram_flow_key_init(&fl6, sk); | ||
84 | |||
85 | rcu_read_lock(); | ||
86 | opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt); | ||
87 | final_p = fl6_update_dst(&fl6, opt, &final); | ||
88 | rcu_read_unlock(); | ||
89 | |||
90 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | ||
91 | if (IS_ERR(dst)) { | ||
92 | err = PTR_ERR(dst); | ||
93 | goto out; | ||
94 | } | ||
95 | |||
96 | if (fix_sk_saddr) { | ||
97 | if (ipv6_addr_any(&np->saddr)) | ||
98 | np->saddr = fl6.saddr; | ||
99 | |||
100 | if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { | ||
101 | sk->sk_v6_rcv_saddr = fl6.saddr; | ||
102 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; | ||
103 | if (sk->sk_prot->rehash) | ||
104 | sk->sk_prot->rehash(sk); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | ip6_dst_store(sk, dst, | ||
109 | ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? | ||
110 | &sk->sk_v6_daddr : NULL, | ||
111 | #ifdef CONFIG_IPV6_SUBTREES | ||
112 | ipv6_addr_equal(&fl6.saddr, &np->saddr) ? | ||
113 | &np->saddr : | ||
114 | #endif | ||
115 | NULL); | ||
116 | |||
117 | out: | ||
118 | fl6_sock_release(flowlabel); | ||
119 | return err; | ||
120 | } | ||
121 | |||
122 | void ip6_datagram_release_cb(struct sock *sk) | ||
123 | { | ||
124 | struct dst_entry *dst; | ||
125 | |||
126 | if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) | ||
127 | return; | ||
128 | |||
129 | rcu_read_lock(); | ||
130 | dst = __sk_dst_get(sk); | ||
131 | if (!dst || !dst->obsolete || | ||
132 | dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) { | ||
133 | rcu_read_unlock(); | ||
134 | return; | ||
135 | } | ||
136 | rcu_read_unlock(); | ||
137 | |||
138 | ip6_datagram_dst_update(sk, false); | ||
139 | } | ||
140 | EXPORT_SYMBOL_GPL(ip6_datagram_release_cb); | ||
141 | |||
43 | static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | 142 | static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
44 | { | 143 | { |
45 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; | 144 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
46 | struct inet_sock *inet = inet_sk(sk); | 145 | struct inet_sock *inet = inet_sk(sk); |
47 | struct ipv6_pinfo *np = inet6_sk(sk); | 146 | struct ipv6_pinfo *np = inet6_sk(sk); |
48 | struct in6_addr *daddr, *final_p, final; | 147 | struct in6_addr *daddr; |
49 | struct dst_entry *dst; | ||
50 | struct flowi6 fl6; | ||
51 | struct ip6_flowlabel *flowlabel = NULL; | ||
52 | struct ipv6_txoptions *opt; | ||
53 | int addr_type; | 148 | int addr_type; |
54 | int err; | 149 | int err; |
150 | __be32 fl6_flowlabel = 0; | ||
55 | 151 | ||
56 | if (usin->sin6_family == AF_INET) { | 152 | if (usin->sin6_family == AF_INET) { |
57 | if (__ipv6_only_sock(sk)) | 153 | if (__ipv6_only_sock(sk)) |
@@ -66,15 +162,8 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a | |||
66 | if (usin->sin6_family != AF_INET6) | 162 | if (usin->sin6_family != AF_INET6) |
67 | return -EAFNOSUPPORT; | 163 | return -EAFNOSUPPORT; |
68 | 164 | ||
69 | memset(&fl6, 0, sizeof(fl6)); | 165 | if (np->sndflow) |
70 | if (np->sndflow) { | 166 | fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; |
71 | fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; | ||
72 | if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { | ||
73 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); | ||
74 | if (!flowlabel) | ||
75 | return -EINVAL; | ||
76 | } | ||
77 | } | ||
78 | 167 | ||
79 | addr_type = ipv6_addr_type(&usin->sin6_addr); | 168 | addr_type = ipv6_addr_type(&usin->sin6_addr); |
80 | 169 | ||
@@ -145,7 +234,7 @@ ipv4_connected: | |||
145 | } | 234 | } |
146 | 235 | ||
147 | sk->sk_v6_daddr = *daddr; | 236 | sk->sk_v6_daddr = *daddr; |
148 | np->flow_label = fl6.flowlabel; | 237 | np->flow_label = fl6_flowlabel; |
149 | 238 | ||
150 | inet->inet_dport = usin->sin6_port; | 239 | inet->inet_dport = usin->sin6_port; |
151 | 240 | ||
@@ -154,59 +243,13 @@ ipv4_connected: | |||
154 | * destination cache for it. | 243 | * destination cache for it. |
155 | */ | 244 | */ |
156 | 245 | ||
157 | fl6.flowi6_proto = sk->sk_protocol; | 246 | err = ip6_datagram_dst_update(sk, true); |
158 | fl6.daddr = sk->sk_v6_daddr; | 247 | if (err) |
159 | fl6.saddr = np->saddr; | ||
160 | fl6.flowi6_oif = sk->sk_bound_dev_if; | ||
161 | fl6.flowi6_mark = sk->sk_mark; | ||
162 | fl6.fl6_dport = inet->inet_dport; | ||
163 | fl6.fl6_sport = inet->inet_sport; | ||
164 | |||
165 | if (!fl6.flowi6_oif) | ||
166 | fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; | ||
167 | |||
168 | if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST)) | ||
169 | fl6.flowi6_oif = np->mcast_oif; | ||
170 | |||
171 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | ||
172 | |||
173 | rcu_read_lock(); | ||
174 | opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt); | ||
175 | final_p = fl6_update_dst(&fl6, opt, &final); | ||
176 | rcu_read_unlock(); | ||
177 | |||
178 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | ||
179 | err = 0; | ||
180 | if (IS_ERR(dst)) { | ||
181 | err = PTR_ERR(dst); | ||
182 | goto out; | 248 | goto out; |
183 | } | ||
184 | |||
185 | /* source address lookup done in ip6_dst_lookup */ | ||
186 | |||
187 | if (ipv6_addr_any(&np->saddr)) | ||
188 | np->saddr = fl6.saddr; | ||
189 | |||
190 | if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { | ||
191 | sk->sk_v6_rcv_saddr = fl6.saddr; | ||
192 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; | ||
193 | if (sk->sk_prot->rehash) | ||
194 | sk->sk_prot->rehash(sk); | ||
195 | } | ||
196 | |||
197 | ip6_dst_store(sk, dst, | ||
198 | ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? | ||
199 | &sk->sk_v6_daddr : NULL, | ||
200 | #ifdef CONFIG_IPV6_SUBTREES | ||
201 | ipv6_addr_equal(&fl6.saddr, &np->saddr) ? | ||
202 | &np->saddr : | ||
203 | #endif | ||
204 | NULL); | ||
205 | 249 | ||
206 | sk->sk_state = TCP_ESTABLISHED; | 250 | sk->sk_state = TCP_ESTABLISHED; |
207 | sk_set_txhash(sk); | 251 | sk_set_txhash(sk); |
208 | out: | 252 | out: |
209 | fl6_sock_release(flowlabel); | ||
210 | return err; | 253 | return err; |
211 | } | 254 | } |
212 | 255 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index ed446639219c..d916d6ab9ad2 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -338,9 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net, | |||
338 | return rt; | 338 | return rt; |
339 | } | 339 | } |
340 | 340 | ||
341 | static struct rt6_info *ip6_dst_alloc(struct net *net, | 341 | struct rt6_info *ip6_dst_alloc(struct net *net, |
342 | struct net_device *dev, | 342 | struct net_device *dev, |
343 | int flags) | 343 | int flags) |
344 | { | 344 | { |
345 | struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); | 345 | struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); |
346 | 346 | ||
@@ -364,6 +364,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net, | |||
364 | 364 | ||
365 | return rt; | 365 | return rt; |
366 | } | 366 | } |
367 | EXPORT_SYMBOL(ip6_dst_alloc); | ||
367 | 368 | ||
368 | static void ip6_dst_destroy(struct dst_entry *dst) | 369 | static void ip6_dst_destroy(struct dst_entry *dst) |
369 | { | 370 | { |
@@ -1417,8 +1418,20 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu); | |||
1417 | 1418 | ||
1418 | void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) | 1419 | void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) |
1419 | { | 1420 | { |
1421 | struct dst_entry *dst; | ||
1422 | |||
1420 | ip6_update_pmtu(skb, sock_net(sk), mtu, | 1423 | ip6_update_pmtu(skb, sock_net(sk), mtu, |
1421 | sk->sk_bound_dev_if, sk->sk_mark); | 1424 | sk->sk_bound_dev_if, sk->sk_mark); |
1425 | |||
1426 | dst = __sk_dst_get(sk); | ||
1427 | if (!dst || !dst->obsolete || | ||
1428 | dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) | ||
1429 | return; | ||
1430 | |||
1431 | bh_lock_sock(sk); | ||
1432 | if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) | ||
1433 | ip6_datagram_dst_update(sk, false); | ||
1434 | bh_unlock_sock(sk); | ||
1422 | } | 1435 | } |
1423 | EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); | 1436 | EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); |
1424 | 1437 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8125931106be..6bc5c664fa46 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1539,6 +1539,7 @@ struct proto udpv6_prot = { | |||
1539 | .sendmsg = udpv6_sendmsg, | 1539 | .sendmsg = udpv6_sendmsg, |
1540 | .recvmsg = udpv6_recvmsg, | 1540 | .recvmsg = udpv6_recvmsg, |
1541 | .backlog_rcv = __udpv6_queue_rcv_skb, | 1541 | .backlog_rcv = __udpv6_queue_rcv_skb, |
1542 | .release_cb = ip6_datagram_release_cb, | ||
1542 | .hash = udp_lib_hash, | 1543 | .hash = udp_lib_hash, |
1543 | .unhash = udp_lib_unhash, | 1544 | .unhash = udp_lib_unhash, |
1544 | .rehash = udp_v6_rehash, | 1545 | .rehash = udp_v6_rehash, |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 278f3b9356ef..7cc1d9c22a9f 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb, | |||
410 | length--; | 410 | length--; |
411 | continue; | 411 | continue; |
412 | default: | 412 | default: |
413 | if (length < 2) | ||
414 | return; | ||
413 | opsize=*ptr++; | 415 | opsize=*ptr++; |
414 | if (opsize < 2) /* "silly options" */ | 416 | if (opsize < 2) /* "silly options" */ |
415 | return; | 417 | return; |
@@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, | |||
470 | length--; | 472 | length--; |
471 | continue; | 473 | continue; |
472 | default: | 474 | default: |
475 | if (length < 2) | ||
476 | return; | ||
473 | opsize = *ptr++; | 477 | opsize = *ptr++; |
474 | if (opsize < 2) /* "silly options" */ | 478 | if (opsize < 2) /* "silly options" */ |
475 | return; | 479 | return; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 215fc08c02ab..330ebd600f25 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -688,7 +688,7 @@ static int netlink_release(struct socket *sock) | |||
688 | 688 | ||
689 | skb_queue_purge(&sk->sk_write_queue); | 689 | skb_queue_purge(&sk->sk_write_queue); |
690 | 690 | ||
691 | if (nlk->portid) { | 691 | if (nlk->portid && nlk->bound) { |
692 | struct netlink_notify n = { | 692 | struct netlink_notify n = { |
693 | .net = sock_net(sk), | 693 | .net = sock_net(sk), |
694 | .protocol = sk->sk_protocol, | 694 | .protocol = sk->sk_protocol, |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index e9dd47b2a85b..879185fe183f 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -461,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, | |||
461 | mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); | 461 | mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); |
462 | 462 | ||
463 | if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { | 463 | if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { |
464 | set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, | 464 | set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked, |
465 | true); | 465 | true); |
466 | memcpy(&flow_key->ipv6.addr.src, masked, | 466 | memcpy(&flow_key->ipv6.addr.src, masked, |
467 | sizeof(flow_key->ipv6.addr.src)); | 467 | sizeof(flow_key->ipv6.addr.src)); |
@@ -483,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, | |||
483 | NULL, &flags) | 483 | NULL, &flags) |
484 | != NEXTHDR_ROUTING); | 484 | != NEXTHDR_ROUTING); |
485 | 485 | ||
486 | set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, | 486 | set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked, |
487 | recalc_csum); | 487 | recalc_csum); |
488 | memcpy(&flow_key->ipv6.addr.dst, masked, | 488 | memcpy(&flow_key->ipv6.addr.dst, masked, |
489 | sizeof(flow_key->ipv6.addr.dst)); | 489 | sizeof(flow_key->ipv6.addr.dst)); |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 1b9d286756be..b5fea1101faa 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
@@ -367,6 +367,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key, | |||
367 | } else if (key->eth.type == htons(ETH_P_IPV6)) { | 367 | } else if (key->eth.type == htons(ETH_P_IPV6)) { |
368 | enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; | 368 | enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; |
369 | 369 | ||
370 | skb_orphan(skb); | ||
370 | memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); | 371 | memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); |
371 | err = nf_ct_frag6_gather(net, skb, user); | 372 | err = nf_ct_frag6_gather(net, skb, user); |
372 | if (err) | 373 | if (err) |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f12c17f355d9..18d0becbc46d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -3521,6 +3521,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) | |||
3521 | i->ifindex = mreq->mr_ifindex; | 3521 | i->ifindex = mreq->mr_ifindex; |
3522 | i->alen = mreq->mr_alen; | 3522 | i->alen = mreq->mr_alen; |
3523 | memcpy(i->addr, mreq->mr_address, i->alen); | 3523 | memcpy(i->addr, mreq->mr_address, i->alen); |
3524 | memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); | ||
3524 | i->count = 1; | 3525 | i->count = 1; |
3525 | i->next = po->mclist; | 3526 | i->next = po->mclist; |
3526 | po->mclist = i; | 3527 | po->mclist = i; |
diff --git a/net/rds/cong.c b/net/rds/cong.c index e6144b8246fd..6641bcf7c185 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c | |||
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port) | |||
299 | i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; | 299 | i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; |
300 | off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; | 300 | off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; |
301 | 301 | ||
302 | __set_bit_le(off, (void *)map->m_page_addrs[i]); | 302 | set_bit_le(off, (void *)map->m_page_addrs[i]); |
303 | } | 303 | } |
304 | 304 | ||
305 | void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) | 305 | void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) |
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) | |||
313 | i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; | 313 | i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; |
314 | off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; | 314 | off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; |
315 | 315 | ||
316 | __clear_bit_le(off, (void *)map->m_page_addrs[i]); | 316 | clear_bit_le(off, (void *)map->m_page_addrs[i]); |
317 | } | 317 | } |
318 | 318 | ||
319 | static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) | 319 | static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 8764970f0c24..310cabce2311 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -194,7 +194,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, | |||
194 | dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); | 194 | dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); |
195 | dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); | 195 | dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); |
196 | dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); | 196 | dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); |
197 | dp->dp_ack_seq = rds_ib_piggyb_ack(ic); | 197 | dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic)); |
198 | 198 | ||
199 | /* Advertise flow control */ | 199 | /* Advertise flow control */ |
200 | if (ic->i_flowctl) { | 200 | if (ic->i_flowctl) { |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index f18c35024207..80742edea96f 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
159 | if (validate) | 159 | if (validate) |
160 | skb = validate_xmit_skb_list(skb, dev); | 160 | skb = validate_xmit_skb_list(skb, dev); |
161 | 161 | ||
162 | if (skb) { | 162 | if (likely(skb)) { |
163 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 163 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
164 | if (!netif_xmit_frozen_or_stopped(txq)) | 164 | if (!netif_xmit_frozen_or_stopped(txq)) |
165 | skb = dev_hard_start_xmit(skb, dev, txq, &ret); | 165 | skb = dev_hard_start_xmit(skb, dev, txq, &ret); |
166 | 166 | ||
167 | HARD_TX_UNLOCK(dev, txq); | 167 | HARD_TX_UNLOCK(dev, txq); |
168 | } else { | ||
169 | spin_lock(root_lock); | ||
170 | return qdisc_qlen(q); | ||
168 | } | 171 | } |
169 | spin_lock(root_lock); | 172 | spin_lock(root_lock); |
170 | 173 | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 8d3d3625130e..084718f9b3da 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -866,8 +866,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) | |||
866 | * sender MUST assure that at least one T3-rtx | 866 | * sender MUST assure that at least one T3-rtx |
867 | * timer is running. | 867 | * timer is running. |
868 | */ | 868 | */ |
869 | if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) | 869 | if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { |
870 | sctp_transport_reset_timers(transport); | 870 | sctp_transport_reset_t3_rtx(transport); |
871 | transport->last_time_sent = jiffies; | ||
872 | } | ||
871 | } | 873 | } |
872 | break; | 874 | break; |
873 | 875 | ||
@@ -924,8 +926,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) | |||
924 | error = sctp_outq_flush_rtx(q, packet, | 926 | error = sctp_outq_flush_rtx(q, packet, |
925 | rtx_timeout, &start_timer); | 927 | rtx_timeout, &start_timer); |
926 | 928 | ||
927 | if (start_timer) | 929 | if (start_timer) { |
928 | sctp_transport_reset_timers(transport); | 930 | sctp_transport_reset_t3_rtx(transport); |
931 | transport->last_time_sent = jiffies; | ||
932 | } | ||
929 | 933 | ||
930 | /* This can happen on COOKIE-ECHO resend. Only | 934 | /* This can happen on COOKIE-ECHO resend. Only |
931 | * one chunk can get bundled with a COOKIE-ECHO. | 935 | * one chunk can get bundled with a COOKIE-ECHO. |
@@ -1062,7 +1066,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) | |||
1062 | list_add_tail(&chunk->transmitted_list, | 1066 | list_add_tail(&chunk->transmitted_list, |
1063 | &transport->transmitted); | 1067 | &transport->transmitted); |
1064 | 1068 | ||
1065 | sctp_transport_reset_timers(transport); | 1069 | sctp_transport_reset_t3_rtx(transport); |
1070 | transport->last_time_sent = jiffies; | ||
1066 | 1071 | ||
1067 | /* Only let one DATA chunk get bundled with a | 1072 | /* Only let one DATA chunk get bundled with a |
1068 | * COOKIE-ECHO chunk. | 1073 | * COOKIE-ECHO chunk. |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 7f0bf798205b..56f364d8f932 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -3080,8 +3080,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
3080 | return SCTP_ERROR_RSRC_LOW; | 3080 | return SCTP_ERROR_RSRC_LOW; |
3081 | 3081 | ||
3082 | /* Start the heartbeat timer. */ | 3082 | /* Start the heartbeat timer. */ |
3083 | if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) | 3083 | sctp_transport_reset_hb_timer(peer); |
3084 | sctp_transport_hold(peer); | ||
3085 | asoc->new_transport = peer; | 3084 | asoc->new_transport = peer; |
3086 | break; | 3085 | break; |
3087 | case SCTP_PARAM_DEL_IP: | 3086 | case SCTP_PARAM_DEL_IP: |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 7fe56d0acabf..41b081a64752 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -69,8 +69,6 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | |||
69 | sctp_cmd_seq_t *commands, | 69 | sctp_cmd_seq_t *commands, |
70 | gfp_t gfp); | 70 | gfp_t gfp); |
71 | 71 | ||
72 | static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, | ||
73 | struct sctp_transport *t); | ||
74 | /******************************************************************** | 72 | /******************************************************************** |
75 | * Helper functions | 73 | * Helper functions |
76 | ********************************************************************/ | 74 | ********************************************************************/ |
@@ -367,6 +365,7 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
367 | struct sctp_association *asoc = transport->asoc; | 365 | struct sctp_association *asoc = transport->asoc; |
368 | struct sock *sk = asoc->base.sk; | 366 | struct sock *sk = asoc->base.sk; |
369 | struct net *net = sock_net(sk); | 367 | struct net *net = sock_net(sk); |
368 | u32 elapsed, timeout; | ||
370 | 369 | ||
371 | bh_lock_sock(sk); | 370 | bh_lock_sock(sk); |
372 | if (sock_owned_by_user(sk)) { | 371 | if (sock_owned_by_user(sk)) { |
@@ -378,6 +377,16 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
378 | goto out_unlock; | 377 | goto out_unlock; |
379 | } | 378 | } |
380 | 379 | ||
380 | /* Check if we should still send the heartbeat or reschedule */ | ||
381 | elapsed = jiffies - transport->last_time_sent; | ||
382 | timeout = sctp_transport_timeout(transport); | ||
383 | if (elapsed < timeout) { | ||
384 | elapsed = timeout - elapsed; | ||
385 | if (!mod_timer(&transport->hb_timer, jiffies + elapsed)) | ||
386 | sctp_transport_hold(transport); | ||
387 | goto out_unlock; | ||
388 | } | ||
389 | |||
381 | error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, | 390 | error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, |
382 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), | 391 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), |
383 | asoc->state, asoc->ep, asoc, | 392 | asoc->state, asoc->ep, asoc, |
@@ -507,7 +516,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, | |||
507 | 0); | 516 | 0); |
508 | 517 | ||
509 | /* Update the hb timer to resend a heartbeat every rto */ | 518 | /* Update the hb timer to resend a heartbeat every rto */ |
510 | sctp_cmd_hb_timer_update(commands, transport); | 519 | sctp_transport_reset_hb_timer(transport); |
511 | } | 520 | } |
512 | 521 | ||
513 | if (transport->state != SCTP_INACTIVE && | 522 | if (transport->state != SCTP_INACTIVE && |
@@ -634,11 +643,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, | |||
634 | * hold a reference on the transport to make sure none of | 643 | * hold a reference on the transport to make sure none of |
635 | * the needed data structures go away. | 644 | * the needed data structures go away. |
636 | */ | 645 | */ |
637 | list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { | 646 | list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) |
638 | 647 | sctp_transport_reset_hb_timer(t); | |
639 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | ||
640 | sctp_transport_hold(t); | ||
641 | } | ||
642 | } | 648 | } |
643 | 649 | ||
644 | static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, | 650 | static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, |
@@ -669,15 +675,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, | |||
669 | } | 675 | } |
670 | 676 | ||
671 | 677 | ||
672 | /* Helper function to update the heartbeat timer. */ | ||
673 | static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, | ||
674 | struct sctp_transport *t) | ||
675 | { | ||
676 | /* Update the heartbeat timer. */ | ||
677 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | ||
678 | sctp_transport_hold(t); | ||
679 | } | ||
680 | |||
681 | /* Helper function to handle the reception of an HEARTBEAT ACK. */ | 678 | /* Helper function to handle the reception of an HEARTBEAT ACK. */ |
682 | static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | 679 | static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, |
683 | struct sctp_association *asoc, | 680 | struct sctp_association *asoc, |
@@ -742,8 +739,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | |||
742 | sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); | 739 | sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); |
743 | 740 | ||
744 | /* Update the heartbeat timer. */ | 741 | /* Update the heartbeat timer. */ |
745 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | 742 | sctp_transport_reset_hb_timer(t); |
746 | sctp_transport_hold(t); | ||
747 | 743 | ||
748 | if (was_unconfirmed && asoc->peer.transport_count == 1) | 744 | if (was_unconfirmed && asoc->peer.transport_count == 1) |
749 | sctp_transport_immediate_rtx(t); | 745 | sctp_transport_immediate_rtx(t); |
@@ -1614,7 +1610,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1614 | 1610 | ||
1615 | case SCTP_CMD_HB_TIMER_UPDATE: | 1611 | case SCTP_CMD_HB_TIMER_UPDATE: |
1616 | t = cmd->obj.transport; | 1612 | t = cmd->obj.transport; |
1617 | sctp_cmd_hb_timer_update(commands, t); | 1613 | sctp_transport_reset_hb_timer(t); |
1618 | break; | 1614 | break; |
1619 | 1615 | ||
1620 | case SCTP_CMD_HB_TIMERS_STOP: | 1616 | case SCTP_CMD_HB_TIMERS_STOP: |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 9b6b48c7524e..81b86678be4d 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -183,7 +183,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport) | |||
183 | /* Start T3_rtx timer if it is not already running and update the heartbeat | 183 | /* Start T3_rtx timer if it is not already running and update the heartbeat |
184 | * timer. This routine is called every time a DATA chunk is sent. | 184 | * timer. This routine is called every time a DATA chunk is sent. |
185 | */ | 185 | */ |
186 | void sctp_transport_reset_timers(struct sctp_transport *transport) | 186 | void sctp_transport_reset_t3_rtx(struct sctp_transport *transport) |
187 | { | 187 | { |
188 | /* RFC 2960 6.3.2 Retransmission Timer Rules | 188 | /* RFC 2960 6.3.2 Retransmission Timer Rules |
189 | * | 189 | * |
@@ -197,11 +197,18 @@ void sctp_transport_reset_timers(struct sctp_transport *transport) | |||
197 | if (!mod_timer(&transport->T3_rtx_timer, | 197 | if (!mod_timer(&transport->T3_rtx_timer, |
198 | jiffies + transport->rto)) | 198 | jiffies + transport->rto)) |
199 | sctp_transport_hold(transport); | 199 | sctp_transport_hold(transport); |
200 | } | ||
201 | |||
202 | void sctp_transport_reset_hb_timer(struct sctp_transport *transport) | ||
203 | { | ||
204 | unsigned long expires; | ||
200 | 205 | ||
201 | /* When a data chunk is sent, reset the heartbeat interval. */ | 206 | /* When a data chunk is sent, reset the heartbeat interval. */ |
202 | if (!mod_timer(&transport->hb_timer, | 207 | expires = jiffies + sctp_transport_timeout(transport); |
203 | sctp_transport_timeout(transport))) | 208 | if (time_before(transport->hb_timer.expires, expires) && |
204 | sctp_transport_hold(transport); | 209 | !mod_timer(&transport->hb_timer, |
210 | expires + prandom_u32_max(transport->rto))) | ||
211 | sctp_transport_hold(transport); | ||
205 | } | 212 | } |
206 | 213 | ||
207 | /* This transport has been assigned to an association. | 214 | /* This transport has been assigned to an association. |
@@ -595,13 +602,13 @@ void sctp_transport_burst_reset(struct sctp_transport *t) | |||
595 | unsigned long sctp_transport_timeout(struct sctp_transport *trans) | 602 | unsigned long sctp_transport_timeout(struct sctp_transport *trans) |
596 | { | 603 | { |
597 | /* RTO + timer slack +/- 50% of RTO */ | 604 | /* RTO + timer slack +/- 50% of RTO */ |
598 | unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto); | 605 | unsigned long timeout = trans->rto >> 1; |
599 | 606 | ||
600 | if (trans->state != SCTP_UNCONFIRMED && | 607 | if (trans->state != SCTP_UNCONFIRMED && |
601 | trans->state != SCTP_PF) | 608 | trans->state != SCTP_PF) |
602 | timeout += trans->hbinterval; | 609 | timeout += trans->hbinterval; |
603 | 610 | ||
604 | return timeout + jiffies; | 611 | return timeout; |
605 | } | 612 | } |
606 | 613 | ||
607 | /* Reset transport variables to their initial values */ | 614 | /* Reset transport variables to their initial values */ |
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 2b9b98f1c2ff..b7e01d88bdc5 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c | |||
@@ -305,6 +305,8 @@ static void switchdev_port_attr_set_deferred(struct net_device *dev, | |||
305 | if (err && err != -EOPNOTSUPP) | 305 | if (err && err != -EOPNOTSUPP) |
306 | netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", | 306 | netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", |
307 | err, attr->id); | 307 | err, attr->id); |
308 | if (attr->complete) | ||
309 | attr->complete(dev, err, attr->complete_priv); | ||
308 | } | 310 | } |
309 | 311 | ||
310 | static int switchdev_port_attr_set_defer(struct net_device *dev, | 312 | static int switchdev_port_attr_set_defer(struct net_device *dev, |
@@ -434,6 +436,8 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev, | |||
434 | if (err && err != -EOPNOTSUPP) | 436 | if (err && err != -EOPNOTSUPP) |
435 | netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", | 437 | netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", |
436 | err, obj->id); | 438 | err, obj->id); |
439 | if (obj->complete) | ||
440 | obj->complete(dev, err, obj->complete_priv); | ||
437 | } | 441 | } |
438 | 442 | ||
439 | static int switchdev_port_obj_add_defer(struct net_device *dev, | 443 | static int switchdev_port_obj_add_defer(struct net_device *dev, |
@@ -502,6 +506,8 @@ static void switchdev_port_obj_del_deferred(struct net_device *dev, | |||
502 | if (err && err != -EOPNOTSUPP) | 506 | if (err && err != -EOPNOTSUPP) |
503 | netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", | 507 | netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", |
504 | err, obj->id); | 508 | err, obj->id); |
509 | if (obj->complete) | ||
510 | obj->complete(dev, err, obj->complete_priv); | ||
505 | } | 511 | } |
506 | 512 | ||
507 | static int switchdev_port_obj_del_defer(struct net_device *dev, | 513 | static int switchdev_port_obj_del_defer(struct net_device *dev, |
diff --git a/net/tipc/core.c b/net/tipc/core.c index 03a842870c52..e2bdb07a49a2 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net) | |||
69 | if (err) | 69 | if (err) |
70 | goto out_nametbl; | 70 | goto out_nametbl; |
71 | 71 | ||
72 | INIT_LIST_HEAD(&tn->dist_queue); | ||
72 | err = tipc_topsrv_start(net); | 73 | err = tipc_topsrv_start(net); |
73 | if (err) | 74 | if (err) |
74 | goto out_subscr; | 75 | goto out_subscr; |
diff --git a/net/tipc/core.h b/net/tipc/core.h index 5504d63503df..eff58dc53aa1 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
@@ -103,6 +103,9 @@ struct tipc_net { | |||
103 | spinlock_t nametbl_lock; | 103 | spinlock_t nametbl_lock; |
104 | struct name_table *nametbl; | 104 | struct name_table *nametbl; |
105 | 105 | ||
106 | /* Name dist queue */ | ||
107 | struct list_head dist_queue; | ||
108 | |||
106 | /* Topology subscription server */ | 109 | /* Topology subscription server */ |
107 | struct tipc_server *topsrv; | 110 | struct tipc_server *topsrv; |
108 | atomic_t subscription_count; | 111 | atomic_t subscription_count; |
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index ebe9d0ff6e9e..6b626a64b517 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c | |||
@@ -40,11 +40,6 @@ | |||
40 | 40 | ||
41 | int sysctl_tipc_named_timeout __read_mostly = 2000; | 41 | int sysctl_tipc_named_timeout __read_mostly = 2000; |
42 | 42 | ||
43 | /** | ||
44 | * struct tipc_dist_queue - queue holding deferred name table updates | ||
45 | */ | ||
46 | static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue); | ||
47 | |||
48 | struct distr_queue_item { | 43 | struct distr_queue_item { |
49 | struct distr_item i; | 44 | struct distr_item i; |
50 | u32 dtype; | 45 | u32 dtype; |
@@ -229,12 +224,31 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr) | |||
229 | kfree_rcu(p, rcu); | 224 | kfree_rcu(p, rcu); |
230 | } | 225 | } |
231 | 226 | ||
227 | /** | ||
228 | * tipc_dist_queue_purge - remove deferred updates from a node that went down | ||
229 | */ | ||
230 | static void tipc_dist_queue_purge(struct net *net, u32 addr) | ||
231 | { | ||
232 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
233 | struct distr_queue_item *e, *tmp; | ||
234 | |||
235 | spin_lock_bh(&tn->nametbl_lock); | ||
236 | list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) { | ||
237 | if (e->node != addr) | ||
238 | continue; | ||
239 | list_del(&e->next); | ||
240 | kfree(e); | ||
241 | } | ||
242 | spin_unlock_bh(&tn->nametbl_lock); | ||
243 | } | ||
244 | |||
232 | void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) | 245 | void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) |
233 | { | 246 | { |
234 | struct publication *publ, *tmp; | 247 | struct publication *publ, *tmp; |
235 | 248 | ||
236 | list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) | 249 | list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) |
237 | tipc_publ_purge(net, publ, addr); | 250 | tipc_publ_purge(net, publ, addr); |
251 | tipc_dist_queue_purge(net, addr); | ||
238 | } | 252 | } |
239 | 253 | ||
240 | /** | 254 | /** |
@@ -279,9 +293,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, | |||
279 | * tipc_named_add_backlog - add a failed name table update to the backlog | 293 | * tipc_named_add_backlog - add a failed name table update to the backlog |
280 | * | 294 | * |
281 | */ | 295 | */ |
282 | static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) | 296 | static void tipc_named_add_backlog(struct net *net, struct distr_item *i, |
297 | u32 type, u32 node) | ||
283 | { | 298 | { |
284 | struct distr_queue_item *e; | 299 | struct distr_queue_item *e; |
300 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
285 | unsigned long now = get_jiffies_64(); | 301 | unsigned long now = get_jiffies_64(); |
286 | 302 | ||
287 | e = kzalloc(sizeof(*e), GFP_ATOMIC); | 303 | e = kzalloc(sizeof(*e), GFP_ATOMIC); |
@@ -291,7 +307,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) | |||
291 | e->node = node; | 307 | e->node = node; |
292 | e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); | 308 | e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); |
293 | memcpy(e, i, sizeof(*i)); | 309 | memcpy(e, i, sizeof(*i)); |
294 | list_add_tail(&e->next, &tipc_dist_queue); | 310 | list_add_tail(&e->next, &tn->dist_queue); |
295 | } | 311 | } |
296 | 312 | ||
297 | /** | 313 | /** |
@@ -301,10 +317,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) | |||
301 | void tipc_named_process_backlog(struct net *net) | 317 | void tipc_named_process_backlog(struct net *net) |
302 | { | 318 | { |
303 | struct distr_queue_item *e, *tmp; | 319 | struct distr_queue_item *e, *tmp; |
320 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
304 | char addr[16]; | 321 | char addr[16]; |
305 | unsigned long now = get_jiffies_64(); | 322 | unsigned long now = get_jiffies_64(); |
306 | 323 | ||
307 | list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { | 324 | list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) { |
308 | if (time_after(e->expires, now)) { | 325 | if (time_after(e->expires, now)) { |
309 | if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) | 326 | if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) |
310 | continue; | 327 | continue; |
@@ -344,7 +361,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) | |||
344 | node = msg_orignode(msg); | 361 | node = msg_orignode(msg); |
345 | while (count--) { | 362 | while (count--) { |
346 | if (!tipc_update_nametbl(net, item, node, mtype)) | 363 | if (!tipc_update_nametbl(net, item, node, mtype)) |
347 | tipc_named_add_backlog(item, mtype, node); | 364 | tipc_named_add_backlog(net, item, mtype, node); |
348 | item++; | 365 | item++; |
349 | } | 366 | } |
350 | kfree_skb(skb); | 367 | kfree_skb(skb); |
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 662bdd20a748..56214736fe88 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c | |||
@@ -1735,11 +1735,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk, | |||
1735 | /* Retrieve the head sk_buff from the socket's receive queue. */ | 1735 | /* Retrieve the head sk_buff from the socket's receive queue. */ |
1736 | err = 0; | 1736 | err = 0; |
1737 | skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); | 1737 | skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); |
1738 | if (err) | ||
1739 | return err; | ||
1740 | |||
1741 | if (!skb) | 1738 | if (!skb) |
1742 | return -EAGAIN; | 1739 | return err; |
1743 | 1740 | ||
1744 | dg = (struct vmci_datagram *)skb->data; | 1741 | dg = (struct vmci_datagram *)skb->data; |
1745 | if (!dg) | 1742 | if (!dg) |
@@ -2154,7 +2151,7 @@ module_exit(vmci_transport_exit); | |||
2154 | 2151 | ||
2155 | MODULE_AUTHOR("VMware, Inc."); | 2152 | MODULE_AUTHOR("VMware, Inc."); |
2156 | MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); | 2153 | MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); |
2157 | MODULE_VERSION("1.0.3.0-k"); | 2154 | MODULE_VERSION("1.0.4.0-k"); |
2158 | MODULE_LICENSE("GPL v2"); | 2155 | MODULE_LICENSE("GPL v2"); |
2159 | MODULE_ALIAS("vmware_vsock"); | 2156 | MODULE_ALIAS("vmware_vsock"); |
2160 | MODULE_ALIAS_NETPROTO(PF_VSOCK); | 2157 | MODULE_ALIAS_NETPROTO(PF_VSOCK); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 98c924260b3d..056a7307862b 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -13216,7 +13216,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb, | |||
13216 | struct wireless_dev *wdev; | 13216 | struct wireless_dev *wdev; |
13217 | struct cfg80211_beacon_registration *reg, *tmp; | 13217 | struct cfg80211_beacon_registration *reg, *tmp; |
13218 | 13218 | ||
13219 | if (state != NETLINK_URELEASE) | 13219 | if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC) |
13220 | return NOTIFY_DONE; | 13220 | return NOTIFY_DONE; |
13221 | 13221 | ||
13222 | rcu_read_lock(); | 13222 | rcu_read_lock(); |