diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 152 |
1 files changed, 99 insertions, 53 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 45109b70664e..1796cef55ab5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -660,6 +660,27 @@ __setup("netdev=", netdev_boot_setup); | |||
660 | *******************************************************************************/ | 660 | *******************************************************************************/ |
661 | 661 | ||
662 | /** | 662 | /** |
663 | * dev_get_iflink - get 'iflink' value of a interface | ||
664 | * @dev: targeted interface | ||
665 | * | ||
666 | * Indicates the ifindex the interface is linked to. | ||
667 | * Physical interfaces have the same 'ifindex' and 'iflink' values. | ||
668 | */ | ||
669 | |||
670 | int dev_get_iflink(const struct net_device *dev) | ||
671 | { | ||
672 | if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) | ||
673 | return dev->netdev_ops->ndo_get_iflink(dev); | ||
674 | |||
675 | /* If dev->rtnl_link_ops is set, it's a virtual interface. */ | ||
676 | if (dev->rtnl_link_ops) | ||
677 | return 0; | ||
678 | |||
679 | return dev->ifindex; | ||
680 | } | ||
681 | EXPORT_SYMBOL(dev_get_iflink); | ||
682 | |||
683 | /** | ||
663 | * __dev_get_by_name - find a device by its name | 684 | * __dev_get_by_name - find a device by its name |
664 | * @net: the applicable net namespace | 685 | * @net: the applicable net namespace |
665 | * @name: name to find | 686 | * @name: name to find |
@@ -1385,7 +1406,7 @@ static int __dev_close(struct net_device *dev) | |||
1385 | return retval; | 1406 | return retval; |
1386 | } | 1407 | } |
1387 | 1408 | ||
1388 | static int dev_close_many(struct list_head *head) | 1409 | int dev_close_many(struct list_head *head, bool unlink) |
1389 | { | 1410 | { |
1390 | struct net_device *dev, *tmp; | 1411 | struct net_device *dev, *tmp; |
1391 | 1412 | ||
@@ -1399,11 +1420,13 @@ static int dev_close_many(struct list_head *head) | |||
1399 | list_for_each_entry_safe(dev, tmp, head, close_list) { | 1420 | list_for_each_entry_safe(dev, tmp, head, close_list) { |
1400 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); | 1421 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); |
1401 | call_netdevice_notifiers(NETDEV_DOWN, dev); | 1422 | call_netdevice_notifiers(NETDEV_DOWN, dev); |
1402 | list_del_init(&dev->close_list); | 1423 | if (unlink) |
1424 | list_del_init(&dev->close_list); | ||
1403 | } | 1425 | } |
1404 | 1426 | ||
1405 | return 0; | 1427 | return 0; |
1406 | } | 1428 | } |
1429 | EXPORT_SYMBOL(dev_close_many); | ||
1407 | 1430 | ||
1408 | /** | 1431 | /** |
1409 | * dev_close - shutdown an interface. | 1432 | * dev_close - shutdown an interface. |
@@ -1420,7 +1443,7 @@ int dev_close(struct net_device *dev) | |||
1420 | LIST_HEAD(single); | 1443 | LIST_HEAD(single); |
1421 | 1444 | ||
1422 | list_add(&dev->close_list, &single); | 1445 | list_add(&dev->close_list, &single); |
1423 | dev_close_many(&single); | 1446 | dev_close_many(&single, true); |
1424 | list_del(&single); | 1447 | list_del(&single); |
1425 | } | 1448 | } |
1426 | return 0; | 1449 | return 0; |
@@ -1607,6 +1630,22 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) | |||
1607 | } | 1630 | } |
1608 | EXPORT_SYMBOL(call_netdevice_notifiers); | 1631 | EXPORT_SYMBOL(call_netdevice_notifiers); |
1609 | 1632 | ||
1633 | #ifdef CONFIG_NET_CLS_ACT | ||
1634 | static struct static_key ingress_needed __read_mostly; | ||
1635 | |||
1636 | void net_inc_ingress_queue(void) | ||
1637 | { | ||
1638 | static_key_slow_inc(&ingress_needed); | ||
1639 | } | ||
1640 | EXPORT_SYMBOL_GPL(net_inc_ingress_queue); | ||
1641 | |||
1642 | void net_dec_ingress_queue(void) | ||
1643 | { | ||
1644 | static_key_slow_dec(&ingress_needed); | ||
1645 | } | ||
1646 | EXPORT_SYMBOL_GPL(net_dec_ingress_queue); | ||
1647 | #endif | ||
1648 | |||
1610 | static struct static_key netstamp_needed __read_mostly; | 1649 | static struct static_key netstamp_needed __read_mostly; |
1611 | #ifdef HAVE_JUMP_LABEL | 1650 | #ifdef HAVE_JUMP_LABEL |
1612 | /* We are not allowed to call static_key_slow_dec() from irq context | 1651 | /* We are not allowed to call static_key_slow_dec() from irq context |
@@ -1694,6 +1733,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1694 | } | 1733 | } |
1695 | 1734 | ||
1696 | skb_scrub_packet(skb, true); | 1735 | skb_scrub_packet(skb, true); |
1736 | skb->priority = 0; | ||
1697 | skb->protocol = eth_type_trans(skb, dev); | 1737 | skb->protocol = eth_type_trans(skb, dev); |
1698 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | 1738 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
1699 | 1739 | ||
@@ -1737,7 +1777,8 @@ static inline int deliver_skb(struct sk_buff *skb, | |||
1737 | 1777 | ||
1738 | static inline void deliver_ptype_list_skb(struct sk_buff *skb, | 1778 | static inline void deliver_ptype_list_skb(struct sk_buff *skb, |
1739 | struct packet_type **pt, | 1779 | struct packet_type **pt, |
1740 | struct net_device *dev, __be16 type, | 1780 | struct net_device *orig_dev, |
1781 | __be16 type, | ||
1741 | struct list_head *ptype_list) | 1782 | struct list_head *ptype_list) |
1742 | { | 1783 | { |
1743 | struct packet_type *ptype, *pt_prev = *pt; | 1784 | struct packet_type *ptype, *pt_prev = *pt; |
@@ -1746,7 +1787,7 @@ static inline void deliver_ptype_list_skb(struct sk_buff *skb, | |||
1746 | if (ptype->type != type) | 1787 | if (ptype->type != type) |
1747 | continue; | 1788 | continue; |
1748 | if (pt_prev) | 1789 | if (pt_prev) |
1749 | deliver_skb(skb, pt_prev, dev); | 1790 | deliver_skb(skb, pt_prev, orig_dev); |
1750 | pt_prev = ptype; | 1791 | pt_prev = ptype; |
1751 | } | 1792 | } |
1752 | *pt = pt_prev; | 1793 | *pt = pt_prev; |
@@ -2559,12 +2600,26 @@ static netdev_features_t harmonize_features(struct sk_buff *skb, | |||
2559 | return features; | 2600 | return features; |
2560 | } | 2601 | } |
2561 | 2602 | ||
2603 | netdev_features_t passthru_features_check(struct sk_buff *skb, | ||
2604 | struct net_device *dev, | ||
2605 | netdev_features_t features) | ||
2606 | { | ||
2607 | return features; | ||
2608 | } | ||
2609 | EXPORT_SYMBOL(passthru_features_check); | ||
2610 | |||
2611 | static netdev_features_t dflt_features_check(const struct sk_buff *skb, | ||
2612 | struct net_device *dev, | ||
2613 | netdev_features_t features) | ||
2614 | { | ||
2615 | return vlan_features_check(skb, features); | ||
2616 | } | ||
2617 | |||
2562 | netdev_features_t netif_skb_features(struct sk_buff *skb) | 2618 | netdev_features_t netif_skb_features(struct sk_buff *skb) |
2563 | { | 2619 | { |
2564 | struct net_device *dev = skb->dev; | 2620 | struct net_device *dev = skb->dev; |
2565 | netdev_features_t features = dev->features; | 2621 | netdev_features_t features = dev->features; |
2566 | u16 gso_segs = skb_shinfo(skb)->gso_segs; | 2622 | u16 gso_segs = skb_shinfo(skb)->gso_segs; |
2567 | __be16 protocol = skb->protocol; | ||
2568 | 2623 | ||
2569 | if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) | 2624 | if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) |
2570 | features &= ~NETIF_F_GSO_MASK; | 2625 | features &= ~NETIF_F_GSO_MASK; |
@@ -2576,34 +2631,17 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) | |||
2576 | if (skb->encapsulation) | 2631 | if (skb->encapsulation) |
2577 | features &= dev->hw_enc_features; | 2632 | features &= dev->hw_enc_features; |
2578 | 2633 | ||
2579 | if (!skb_vlan_tag_present(skb)) { | 2634 | if (skb_vlan_tagged(skb)) |
2580 | if (unlikely(protocol == htons(ETH_P_8021Q) || | ||
2581 | protocol == htons(ETH_P_8021AD))) { | ||
2582 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | ||
2583 | protocol = veh->h_vlan_encapsulated_proto; | ||
2584 | } else { | ||
2585 | goto finalize; | ||
2586 | } | ||
2587 | } | ||
2588 | |||
2589 | features = netdev_intersect_features(features, | ||
2590 | dev->vlan_features | | ||
2591 | NETIF_F_HW_VLAN_CTAG_TX | | ||
2592 | NETIF_F_HW_VLAN_STAG_TX); | ||
2593 | |||
2594 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) | ||
2595 | features = netdev_intersect_features(features, | 2635 | features = netdev_intersect_features(features, |
2596 | NETIF_F_SG | | 2636 | dev->vlan_features | |
2597 | NETIF_F_HIGHDMA | | ||
2598 | NETIF_F_FRAGLIST | | ||
2599 | NETIF_F_GEN_CSUM | | ||
2600 | NETIF_F_HW_VLAN_CTAG_TX | | 2637 | NETIF_F_HW_VLAN_CTAG_TX | |
2601 | NETIF_F_HW_VLAN_STAG_TX); | 2638 | NETIF_F_HW_VLAN_STAG_TX); |
2602 | 2639 | ||
2603 | finalize: | ||
2604 | if (dev->netdev_ops->ndo_features_check) | 2640 | if (dev->netdev_ops->ndo_features_check) |
2605 | features &= dev->netdev_ops->ndo_features_check(skb, dev, | 2641 | features &= dev->netdev_ops->ndo_features_check(skb, dev, |
2606 | features); | 2642 | features); |
2643 | else | ||
2644 | features &= dflt_features_check(skb, dev, features); | ||
2607 | 2645 | ||
2608 | return harmonize_features(skb, features); | 2646 | return harmonize_features(skb, features); |
2609 | } | 2647 | } |
@@ -2675,7 +2713,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device | |||
2675 | if (unlikely(!skb)) | 2713 | if (unlikely(!skb)) |
2676 | goto out_null; | 2714 | goto out_null; |
2677 | 2715 | ||
2678 | if (netif_needs_gso(dev, skb, features)) { | 2716 | if (netif_needs_gso(skb, features)) { |
2679 | struct sk_buff *segs; | 2717 | struct sk_buff *segs; |
2680 | 2718 | ||
2681 | segs = skb_gso_segment(skb, features); | 2719 | segs = skb_gso_segment(skb, features); |
@@ -2857,7 +2895,7 @@ EXPORT_SYMBOL(xmit_recursion); | |||
2857 | * dev_loopback_xmit - loop back @skb | 2895 | * dev_loopback_xmit - loop back @skb |
2858 | * @skb: buffer to transmit | 2896 | * @skb: buffer to transmit |
2859 | */ | 2897 | */ |
2860 | int dev_loopback_xmit(struct sk_buff *skb) | 2898 | int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb) |
2861 | { | 2899 | { |
2862 | skb_reset_mac_header(skb); | 2900 | skb_reset_mac_header(skb); |
2863 | __skb_pull(skb, skb_network_offset(skb)); | 2901 | __skb_pull(skb, skb_network_offset(skb)); |
@@ -2995,11 +3033,11 @@ out: | |||
2995 | return rc; | 3033 | return rc; |
2996 | } | 3034 | } |
2997 | 3035 | ||
2998 | int dev_queue_xmit(struct sk_buff *skb) | 3036 | int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb) |
2999 | { | 3037 | { |
3000 | return __dev_queue_xmit(skb, NULL); | 3038 | return __dev_queue_xmit(skb, NULL); |
3001 | } | 3039 | } |
3002 | EXPORT_SYMBOL(dev_queue_xmit); | 3040 | EXPORT_SYMBOL(dev_queue_xmit_sk); |
3003 | 3041 | ||
3004 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) | 3042 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) |
3005 | { | 3043 | { |
@@ -3525,7 +3563,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
3525 | struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); | 3563 | struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); |
3526 | 3564 | ||
3527 | if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc) | 3565 | if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc) |
3528 | goto out; | 3566 | return skb; |
3529 | 3567 | ||
3530 | if (*pt_prev) { | 3568 | if (*pt_prev) { |
3531 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | 3569 | *ret = deliver_skb(skb, *pt_prev, orig_dev); |
@@ -3539,8 +3577,6 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
3539 | return NULL; | 3577 | return NULL; |
3540 | } | 3578 | } |
3541 | 3579 | ||
3542 | out: | ||
3543 | skb->tc_verd = 0; | ||
3544 | return skb; | 3580 | return skb; |
3545 | } | 3581 | } |
3546 | #endif | 3582 | #endif |
@@ -3676,12 +3712,15 @@ another_round: | |||
3676 | 3712 | ||
3677 | skip_taps: | 3713 | skip_taps: |
3678 | #ifdef CONFIG_NET_CLS_ACT | 3714 | #ifdef CONFIG_NET_CLS_ACT |
3679 | skb = handle_ing(skb, &pt_prev, &ret, orig_dev); | 3715 | if (static_key_false(&ingress_needed)) { |
3680 | if (!skb) | 3716 | skb = handle_ing(skb, &pt_prev, &ret, orig_dev); |
3681 | goto unlock; | 3717 | if (!skb) |
3718 | goto unlock; | ||
3719 | } | ||
3720 | |||
3721 | skb->tc_verd = 0; | ||
3682 | ncls: | 3722 | ncls: |
3683 | #endif | 3723 | #endif |
3684 | |||
3685 | if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) | 3724 | if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) |
3686 | goto drop; | 3725 | goto drop; |
3687 | 3726 | ||
@@ -3831,13 +3870,13 @@ static int netif_receive_skb_internal(struct sk_buff *skb) | |||
3831 | * NET_RX_SUCCESS: no congestion | 3870 | * NET_RX_SUCCESS: no congestion |
3832 | * NET_RX_DROP: packet was dropped | 3871 | * NET_RX_DROP: packet was dropped |
3833 | */ | 3872 | */ |
3834 | int netif_receive_skb(struct sk_buff *skb) | 3873 | int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb) |
3835 | { | 3874 | { |
3836 | trace_netif_receive_skb_entry(skb); | 3875 | trace_netif_receive_skb_entry(skb); |
3837 | 3876 | ||
3838 | return netif_receive_skb_internal(skb); | 3877 | return netif_receive_skb_internal(skb); |
3839 | } | 3878 | } |
3840 | EXPORT_SYMBOL(netif_receive_skb); | 3879 | EXPORT_SYMBOL(netif_receive_skb_sk); |
3841 | 3880 | ||
3842 | /* Network device is going away, flush any packets still pending | 3881 | /* Network device is going away, flush any packets still pending |
3843 | * Called with irqs disabled. | 3882 | * Called with irqs disabled. |
@@ -5914,6 +5953,24 @@ int dev_get_phys_port_id(struct net_device *dev, | |||
5914 | EXPORT_SYMBOL(dev_get_phys_port_id); | 5953 | EXPORT_SYMBOL(dev_get_phys_port_id); |
5915 | 5954 | ||
5916 | /** | 5955 | /** |
5956 | * dev_get_phys_port_name - Get device physical port name | ||
5957 | * @dev: device | ||
5958 | * @name: port name | ||
5959 | * | ||
5960 | * Get device physical port name | ||
5961 | */ | ||
5962 | int dev_get_phys_port_name(struct net_device *dev, | ||
5963 | char *name, size_t len) | ||
5964 | { | ||
5965 | const struct net_device_ops *ops = dev->netdev_ops; | ||
5966 | |||
5967 | if (!ops->ndo_get_phys_port_name) | ||
5968 | return -EOPNOTSUPP; | ||
5969 | return ops->ndo_get_phys_port_name(dev, name, len); | ||
5970 | } | ||
5971 | EXPORT_SYMBOL(dev_get_phys_port_name); | ||
5972 | |||
5973 | /** | ||
5917 | * dev_new_index - allocate an ifindex | 5974 | * dev_new_index - allocate an ifindex |
5918 | * @net: the applicable net namespace | 5975 | * @net: the applicable net namespace |
5919 | * | 5976 | * |
@@ -5970,7 +6027,7 @@ static void rollback_registered_many(struct list_head *head) | |||
5970 | /* If device is running, close it first. */ | 6027 | /* If device is running, close it first. */ |
5971 | list_for_each_entry(dev, head, unreg_list) | 6028 | list_for_each_entry(dev, head, unreg_list) |
5972 | list_add_tail(&dev->close_list, &close_head); | 6029 | list_add_tail(&dev->close_list, &close_head); |
5973 | dev_close_many(&close_head); | 6030 | dev_close_many(&close_head, true); |
5974 | 6031 | ||
5975 | list_for_each_entry(dev, head, unreg_list) { | 6032 | list_for_each_entry(dev, head, unreg_list) { |
5976 | /* And unlink it from device chain. */ | 6033 | /* And unlink it from device chain. */ |
@@ -6297,8 +6354,6 @@ int register_netdevice(struct net_device *dev) | |||
6297 | spin_lock_init(&dev->addr_list_lock); | 6354 | spin_lock_init(&dev->addr_list_lock); |
6298 | netdev_set_addr_lockdep_class(dev); | 6355 | netdev_set_addr_lockdep_class(dev); |
6299 | 6356 | ||
6300 | dev->iflink = -1; | ||
6301 | |||
6302 | ret = dev_get_valid_name(net, dev, dev->name); | 6357 | ret = dev_get_valid_name(net, dev, dev->name); |
6303 | if (ret < 0) | 6358 | if (ret < 0) |
6304 | goto out; | 6359 | goto out; |
@@ -6328,9 +6383,6 @@ int register_netdevice(struct net_device *dev) | |||
6328 | else if (__dev_get_by_index(net, dev->ifindex)) | 6383 | else if (__dev_get_by_index(net, dev->ifindex)) |
6329 | goto err_uninit; | 6384 | goto err_uninit; |
6330 | 6385 | ||
6331 | if (dev->iflink == -1) | ||
6332 | dev->iflink = dev->ifindex; | ||
6333 | |||
6334 | /* Transfer changeable features to wanted_features and enable | 6386 | /* Transfer changeable features to wanted_features and enable |
6335 | * software offloads (GSO and GRO). | 6387 | * software offloads (GSO and GRO). |
6336 | */ | 6388 | */ |
@@ -6843,8 +6895,6 @@ void free_netdev(struct net_device *dev) | |||
6843 | { | 6895 | { |
6844 | struct napi_struct *p, *n; | 6896 | struct napi_struct *p, *n; |
6845 | 6897 | ||
6846 | release_net(dev_net(dev)); | ||
6847 | |||
6848 | netif_free_tx_queues(dev); | 6898 | netif_free_tx_queues(dev); |
6849 | #ifdef CONFIG_SYSFS | 6899 | #ifdef CONFIG_SYSFS |
6850 | kvfree(dev->_rx); | 6900 | kvfree(dev->_rx); |
@@ -7045,12 +7095,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
7045 | dev_net_set(dev, net); | 7095 | dev_net_set(dev, net); |
7046 | 7096 | ||
7047 | /* If there is an ifindex conflict assign a new one */ | 7097 | /* If there is an ifindex conflict assign a new one */ |
7048 | if (__dev_get_by_index(net, dev->ifindex)) { | 7098 | if (__dev_get_by_index(net, dev->ifindex)) |
7049 | int iflink = (dev->iflink == dev->ifindex); | ||
7050 | dev->ifindex = dev_new_index(net); | 7099 | dev->ifindex = dev_new_index(net); |
7051 | if (iflink) | ||
7052 | dev->iflink = dev->ifindex; | ||
7053 | } | ||
7054 | 7100 | ||
7055 | /* Send a netdev-add uevent to the new namespace */ | 7101 | /* Send a netdev-add uevent to the new namespace */ |
7056 | kobject_uevent(&dev->dev.kobj, KOBJ_ADD); | 7102 | kobject_uevent(&dev->dev.kobj, KOBJ_ADD); |