diff options
author | Takashi Iwai <tiwai@suse.de> | 2010-03-22 12:05:48 -0400 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2010-03-22 12:05:48 -0400 |
commit | 2fb20b61550d3c5335e59819ed22734900d4d6e3 (patch) | |
tree | 5ac7690306a0230b51e79afe5cfd3e6575b98cb1 /net/core/dev.c | |
parent | 23caaf19b11eda7054348452e1618d4512a86907 (diff) | |
parent | 6da7a2aa899f75116e1a62cef78c358ada9878b7 (diff) |
Merge branch 'topic/misc' into topic/usb
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 288 |
1 files changed, 198 insertions, 90 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index be9924f60ec..bcc490cc945 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1113,19 +1113,7 @@ void dev_load(struct net *net, const char *name) | |||
1113 | } | 1113 | } |
1114 | EXPORT_SYMBOL(dev_load); | 1114 | EXPORT_SYMBOL(dev_load); |
1115 | 1115 | ||
1116 | /** | 1116 | static int __dev_open(struct net_device *dev) |
1117 | * dev_open - prepare an interface for use. | ||
1118 | * @dev: device to open | ||
1119 | * | ||
1120 | * Takes a device from down to up state. The device's private open | ||
1121 | * function is invoked and then the multicast lists are loaded. Finally | ||
1122 | * the device is moved into the up state and a %NETDEV_UP message is | ||
1123 | * sent to the netdev notifier chain. | ||
1124 | * | ||
1125 | * Calling this function on an active interface is a nop. On a failure | ||
1126 | * a negative errno code is returned. | ||
1127 | */ | ||
1128 | int dev_open(struct net_device *dev) | ||
1129 | { | 1117 | { |
1130 | const struct net_device_ops *ops = dev->netdev_ops; | 1118 | const struct net_device_ops *ops = dev->netdev_ops; |
1131 | int ret; | 1119 | int ret; |
@@ -1133,13 +1121,6 @@ int dev_open(struct net_device *dev) | |||
1133 | ASSERT_RTNL(); | 1121 | ASSERT_RTNL(); |
1134 | 1122 | ||
1135 | /* | 1123 | /* |
1136 | * Is it already up? | ||
1137 | */ | ||
1138 | |||
1139 | if (dev->flags & IFF_UP) | ||
1140 | return 0; | ||
1141 | |||
1142 | /* | ||
1143 | * Is it even present? | 1124 | * Is it even present? |
1144 | */ | 1125 | */ |
1145 | if (!netif_device_present(dev)) | 1126 | if (!netif_device_present(dev)) |
@@ -1187,36 +1168,57 @@ int dev_open(struct net_device *dev) | |||
1187 | * Wakeup transmit queue engine | 1168 | * Wakeup transmit queue engine |
1188 | */ | 1169 | */ |
1189 | dev_activate(dev); | 1170 | dev_activate(dev); |
1190 | |||
1191 | /* | ||
1192 | * ... and announce new interface. | ||
1193 | */ | ||
1194 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
1195 | } | 1171 | } |
1196 | 1172 | ||
1197 | return ret; | 1173 | return ret; |
1198 | } | 1174 | } |
1199 | EXPORT_SYMBOL(dev_open); | ||
1200 | 1175 | ||
1201 | /** | 1176 | /** |
1202 | * dev_close - shutdown an interface. | 1177 | * dev_open - prepare an interface for use. |
1203 | * @dev: device to shutdown | 1178 | * @dev: device to open |
1204 | * | 1179 | * |
1205 | * This function moves an active device into down state. A | 1180 | * Takes a device from down to up state. The device's private open |
1206 | * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | 1181 | * function is invoked and then the multicast lists are loaded. Finally |
1207 | * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | 1182 | * the device is moved into the up state and a %NETDEV_UP message is |
1208 | * chain. | 1183 | * sent to the netdev notifier chain. |
1184 | * | ||
1185 | * Calling this function on an active interface is a nop. On a failure | ||
1186 | * a negative errno code is returned. | ||
1209 | */ | 1187 | */ |
1210 | int dev_close(struct net_device *dev) | 1188 | int dev_open(struct net_device *dev) |
1189 | { | ||
1190 | int ret; | ||
1191 | |||
1192 | /* | ||
1193 | * Is it already up? | ||
1194 | */ | ||
1195 | if (dev->flags & IFF_UP) | ||
1196 | return 0; | ||
1197 | |||
1198 | /* | ||
1199 | * Open device | ||
1200 | */ | ||
1201 | ret = __dev_open(dev); | ||
1202 | if (ret < 0) | ||
1203 | return ret; | ||
1204 | |||
1205 | /* | ||
1206 | * ... and announce new interface. | ||
1207 | */ | ||
1208 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | ||
1209 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
1210 | |||
1211 | return ret; | ||
1212 | } | ||
1213 | EXPORT_SYMBOL(dev_open); | ||
1214 | |||
1215 | static int __dev_close(struct net_device *dev) | ||
1211 | { | 1216 | { |
1212 | const struct net_device_ops *ops = dev->netdev_ops; | 1217 | const struct net_device_ops *ops = dev->netdev_ops; |
1213 | ASSERT_RTNL(); | ||
1214 | 1218 | ||
1219 | ASSERT_RTNL(); | ||
1215 | might_sleep(); | 1220 | might_sleep(); |
1216 | 1221 | ||
1217 | if (!(dev->flags & IFF_UP)) | ||
1218 | return 0; | ||
1219 | |||
1220 | /* | 1222 | /* |
1221 | * Tell people we are going down, so that they can | 1223 | * Tell people we are going down, so that they can |
1222 | * prepare to death, when device is still operating. | 1224 | * prepare to death, when device is still operating. |
@@ -1252,14 +1254,34 @@ int dev_close(struct net_device *dev) | |||
1252 | dev->flags &= ~IFF_UP; | 1254 | dev->flags &= ~IFF_UP; |
1253 | 1255 | ||
1254 | /* | 1256 | /* |
1255 | * Tell people we are down | 1257 | * Shutdown NET_DMA |
1256 | */ | 1258 | */ |
1257 | call_netdevice_notifiers(NETDEV_DOWN, dev); | 1259 | net_dmaengine_put(); |
1260 | |||
1261 | return 0; | ||
1262 | } | ||
1263 | |||
1264 | /** | ||
1265 | * dev_close - shutdown an interface. | ||
1266 | * @dev: device to shutdown | ||
1267 | * | ||
1268 | * This function moves an active device into down state. A | ||
1269 | * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | ||
1270 | * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | ||
1271 | * chain. | ||
1272 | */ | ||
1273 | int dev_close(struct net_device *dev) | ||
1274 | { | ||
1275 | if (!(dev->flags & IFF_UP)) | ||
1276 | return 0; | ||
1277 | |||
1278 | __dev_close(dev); | ||
1258 | 1279 | ||
1259 | /* | 1280 | /* |
1260 | * Shutdown NET_DMA | 1281 | * Tell people we are down |
1261 | */ | 1282 | */ |
1262 | net_dmaengine_put(); | 1283 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); |
1284 | call_netdevice_notifiers(NETDEV_DOWN, dev); | ||
1263 | 1285 | ||
1264 | return 0; | 1286 | return 0; |
1265 | } | 1287 | } |
@@ -1448,13 +1470,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1448 | if (skb->len > (dev->mtu + dev->hard_header_len)) | 1470 | if (skb->len > (dev->mtu + dev->hard_header_len)) |
1449 | return NET_RX_DROP; | 1471 | return NET_RX_DROP; |
1450 | 1472 | ||
1451 | skb_dst_drop(skb); | 1473 | skb_set_dev(skb, dev); |
1452 | skb->tstamp.tv64 = 0; | 1474 | skb->tstamp.tv64 = 0; |
1453 | skb->pkt_type = PACKET_HOST; | 1475 | skb->pkt_type = PACKET_HOST; |
1454 | skb->protocol = eth_type_trans(skb, dev); | 1476 | skb->protocol = eth_type_trans(skb, dev); |
1455 | skb->mark = 0; | ||
1456 | secpath_reset(skb); | ||
1457 | nf_reset(skb); | ||
1458 | return netif_rx(skb); | 1477 | return netif_rx(skb); |
1459 | } | 1478 | } |
1460 | EXPORT_SYMBOL_GPL(dev_forward_skb); | 1479 | EXPORT_SYMBOL_GPL(dev_forward_skb); |
@@ -1614,6 +1633,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) | |||
1614 | return false; | 1633 | return false; |
1615 | } | 1634 | } |
1616 | 1635 | ||
1636 | /** | ||
1637 | * skb_dev_set -- assign a new device to a buffer | ||
1638 | * @skb: buffer for the new device | ||
1639 | * @dev: network device | ||
1640 | * | ||
1641 | * If an skb is owned by a device already, we have to reset | ||
1642 | * all data private to the namespace a device belongs to | ||
1643 | * before assigning it a new device. | ||
1644 | */ | ||
1645 | #ifdef CONFIG_NET_NS | ||
1646 | void skb_set_dev(struct sk_buff *skb, struct net_device *dev) | ||
1647 | { | ||
1648 | skb_dst_drop(skb); | ||
1649 | if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { | ||
1650 | secpath_reset(skb); | ||
1651 | nf_reset(skb); | ||
1652 | skb_init_secmark(skb); | ||
1653 | skb->mark = 0; | ||
1654 | skb->priority = 0; | ||
1655 | skb->nf_trace = 0; | ||
1656 | skb->ipvs_property = 0; | ||
1657 | #ifdef CONFIG_NET_SCHED | ||
1658 | skb->tc_index = 0; | ||
1659 | #endif | ||
1660 | } | ||
1661 | skb->dev = dev; | ||
1662 | } | ||
1663 | EXPORT_SYMBOL(skb_set_dev); | ||
1664 | #endif /* CONFIG_NET_NS */ | ||
1665 | |||
1617 | /* | 1666 | /* |
1618 | * Invalidate hardware checksum when packet is to be mangled, and | 1667 | * Invalidate hardware checksum when packet is to be mangled, and |
1619 | * complete checksum manually on outgoing path. | 1668 | * complete checksum manually on outgoing path. |
@@ -1853,6 +1902,14 @@ gso: | |||
1853 | 1902 | ||
1854 | skb->next = nskb->next; | 1903 | skb->next = nskb->next; |
1855 | nskb->next = NULL; | 1904 | nskb->next = NULL; |
1905 | |||
1906 | /* | ||
1907 | * If device doesnt need nskb->dst, release it right now while | ||
1908 | * its hot in this cpu cache | ||
1909 | */ | ||
1910 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | ||
1911 | skb_dst_drop(nskb); | ||
1912 | |||
1856 | rc = ops->ndo_start_xmit(nskb, dev); | 1913 | rc = ops->ndo_start_xmit(nskb, dev); |
1857 | if (unlikely(rc != NETDEV_TX_OK)) { | 1914 | if (unlikely(rc != NETDEV_TX_OK)) { |
1858 | if (rc & ~NETDEV_TX_MASK) | 1915 | if (rc & ~NETDEV_TX_MASK) |
@@ -1974,6 +2031,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
1974 | return rc; | 2031 | return rc; |
1975 | } | 2032 | } |
1976 | 2033 | ||
2034 | /* | ||
2035 | * Returns true if either: | ||
2036 | * 1. skb has frag_list and the device doesn't support FRAGLIST, or | ||
2037 | * 2. skb is fragmented and the device does not support SG, or if | ||
2038 | * at least one of fragments is in highmem and device does not | ||
2039 | * support DMA from it. | ||
2040 | */ | ||
2041 | static inline int skb_needs_linearize(struct sk_buff *skb, | ||
2042 | struct net_device *dev) | ||
2043 | { | ||
2044 | return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || | ||
2045 | (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || | ||
2046 | illegal_highdma(dev, skb))); | ||
2047 | } | ||
2048 | |||
1977 | /** | 2049 | /** |
1978 | * dev_queue_xmit - transmit a buffer | 2050 | * dev_queue_xmit - transmit a buffer |
1979 | * @skb: buffer to transmit | 2051 | * @skb: buffer to transmit |
@@ -2010,18 +2082,8 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
2010 | if (netif_needs_gso(dev, skb)) | 2082 | if (netif_needs_gso(dev, skb)) |
2011 | goto gso; | 2083 | goto gso; |
2012 | 2084 | ||
2013 | if (skb_has_frags(skb) && | 2085 | /* Convert a paged skb to linear, if required */ |
2014 | !(dev->features & NETIF_F_FRAGLIST) && | 2086 | if (skb_needs_linearize(skb, dev) && __skb_linearize(skb)) |
2015 | __skb_linearize(skb)) | ||
2016 | goto out_kfree_skb; | ||
2017 | |||
2018 | /* Fragmented skb is linearized if device does not support SG, | ||
2019 | * or if at least one of fragments is in highmem and device | ||
2020 | * does not support DMA from it. | ||
2021 | */ | ||
2022 | if (skb_shinfo(skb)->nr_frags && | ||
2023 | (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && | ||
2024 | __skb_linearize(skb)) | ||
2025 | goto out_kfree_skb; | 2087 | goto out_kfree_skb; |
2026 | 2088 | ||
2027 | /* If packet is not checksummed and device does not support | 2089 | /* If packet is not checksummed and device does not support |
@@ -2041,7 +2103,7 @@ gso: | |||
2041 | rcu_read_lock_bh(); | 2103 | rcu_read_lock_bh(); |
2042 | 2104 | ||
2043 | txq = dev_pick_tx(dev, skb); | 2105 | txq = dev_pick_tx(dev, skb); |
2044 | q = rcu_dereference(txq->qdisc); | 2106 | q = rcu_dereference_bh(txq->qdisc); |
2045 | 2107 | ||
2046 | #ifdef CONFIG_NET_CLS_ACT | 2108 | #ifdef CONFIG_NET_CLS_ACT |
2047 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); | 2109 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); |
@@ -2422,6 +2484,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2422 | struct packet_type *ptype, *pt_prev; | 2484 | struct packet_type *ptype, *pt_prev; |
2423 | struct net_device *orig_dev; | 2485 | struct net_device *orig_dev; |
2424 | struct net_device *null_or_orig; | 2486 | struct net_device *null_or_orig; |
2487 | struct net_device *null_or_bond; | ||
2425 | int ret = NET_RX_DROP; | 2488 | int ret = NET_RX_DROP; |
2426 | __be16 type; | 2489 | __be16 type; |
2427 | 2490 | ||
@@ -2487,12 +2550,24 @@ ncls: | |||
2487 | if (!skb) | 2550 | if (!skb) |
2488 | goto out; | 2551 | goto out; |
2489 | 2552 | ||
2553 | /* | ||
2554 | * Make sure frames received on VLAN interfaces stacked on | ||
2555 | * bonding interfaces still make their way to any base bonding | ||
2556 | * device that may have registered for a specific ptype. The | ||
2557 | * handler may have to adjust skb->dev and orig_dev. | ||
2558 | */ | ||
2559 | null_or_bond = NULL; | ||
2560 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && | ||
2561 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { | ||
2562 | null_or_bond = vlan_dev_real_dev(skb->dev); | ||
2563 | } | ||
2564 | |||
2490 | type = skb->protocol; | 2565 | type = skb->protocol; |
2491 | list_for_each_entry_rcu(ptype, | 2566 | list_for_each_entry_rcu(ptype, |
2492 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2567 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
2493 | if (ptype->type == type && | 2568 | if (ptype->type == type && (ptype->dev == null_or_orig || |
2494 | (ptype->dev == null_or_orig || ptype->dev == skb->dev || | 2569 | ptype->dev == skb->dev || ptype->dev == orig_dev || |
2495 | ptype->dev == orig_dev)) { | 2570 | ptype->dev == null_or_bond)) { |
2496 | if (pt_prev) | 2571 | if (pt_prev) |
2497 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2572 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2498 | pt_prev = ptype; | 2573 | pt_prev = ptype; |
@@ -2561,7 +2636,7 @@ out: | |||
2561 | return netif_receive_skb(skb); | 2636 | return netif_receive_skb(skb); |
2562 | } | 2637 | } |
2563 | 2638 | ||
2564 | void napi_gro_flush(struct napi_struct *napi) | 2639 | static void napi_gro_flush(struct napi_struct *napi) |
2565 | { | 2640 | { |
2566 | struct sk_buff *skb, *next; | 2641 | struct sk_buff *skb, *next; |
2567 | 2642 | ||
@@ -2574,7 +2649,6 @@ void napi_gro_flush(struct napi_struct *napi) | |||
2574 | napi->gro_count = 0; | 2649 | napi->gro_count = 0; |
2575 | napi->gro_list = NULL; | 2650 | napi->gro_list = NULL; |
2576 | } | 2651 | } |
2577 | EXPORT_SYMBOL(napi_gro_flush); | ||
2578 | 2652 | ||
2579 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2653 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2580 | { | 2654 | { |
@@ -2761,7 +2835,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, | |||
2761 | switch (ret) { | 2835 | switch (ret) { |
2762 | case GRO_NORMAL: | 2836 | case GRO_NORMAL: |
2763 | case GRO_HELD: | 2837 | case GRO_HELD: |
2764 | skb->protocol = eth_type_trans(skb, napi->dev); | 2838 | skb->protocol = eth_type_trans(skb, skb->dev); |
2765 | 2839 | ||
2766 | if (ret == GRO_HELD) | 2840 | if (ret == GRO_HELD) |
2767 | skb_gro_pull(skb, -ETH_HLEN); | 2841 | skb_gro_pull(skb, -ETH_HLEN); |
@@ -2966,7 +3040,7 @@ static void net_rx_action(struct softirq_action *h) | |||
2966 | * entries to the tail of this list, and only ->poll() | 3040 | * entries to the tail of this list, and only ->poll() |
2967 | * calls can remove this head entry from the list. | 3041 | * calls can remove this head entry from the list. |
2968 | */ | 3042 | */ |
2969 | n = list_entry(list->next, struct napi_struct, poll_list); | 3043 | n = list_first_entry(list, struct napi_struct, poll_list); |
2970 | 3044 | ||
2971 | have = netpoll_poll_lock(n); | 3045 | have = netpoll_poll_lock(n); |
2972 | 3046 | ||
@@ -3185,7 +3259,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | |||
3185 | { | 3259 | { |
3186 | const struct net_device_stats *stats = dev_get_stats(dev); | 3260 | const struct net_device_stats *stats = dev_get_stats(dev); |
3187 | 3261 | ||
3188 | seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " | 3262 | seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " |
3189 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", | 3263 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", |
3190 | dev->name, stats->rx_bytes, stats->rx_packets, | 3264 | dev->name, stats->rx_bytes, stats->rx_packets, |
3191 | stats->rx_errors, | 3265 | stats->rx_errors, |
@@ -3640,10 +3714,10 @@ void __dev_set_rx_mode(struct net_device *dev) | |||
3640 | /* Unicast addresses changes may only happen under the rtnl, | 3714 | /* Unicast addresses changes may only happen under the rtnl, |
3641 | * therefore calling __dev_set_promiscuity here is safe. | 3715 | * therefore calling __dev_set_promiscuity here is safe. |
3642 | */ | 3716 | */ |
3643 | if (dev->uc.count > 0 && !dev->uc_promisc) { | 3717 | if (!netdev_uc_empty(dev) && !dev->uc_promisc) { |
3644 | __dev_set_promiscuity(dev, 1); | 3718 | __dev_set_promiscuity(dev, 1); |
3645 | dev->uc_promisc = 1; | 3719 | dev->uc_promisc = 1; |
3646 | } else if (dev->uc.count == 0 && dev->uc_promisc) { | 3720 | } else if (netdev_uc_empty(dev) && dev->uc_promisc) { |
3647 | __dev_set_promiscuity(dev, -1); | 3721 | __dev_set_promiscuity(dev, -1); |
3648 | dev->uc_promisc = 0; | 3722 | dev->uc_promisc = 0; |
3649 | } | 3723 | } |
@@ -4211,7 +4285,7 @@ static void dev_addr_discard(struct net_device *dev) | |||
4211 | netif_addr_lock_bh(dev); | 4285 | netif_addr_lock_bh(dev); |
4212 | 4286 | ||
4213 | __dev_addr_discard(&dev->mc_list); | 4287 | __dev_addr_discard(&dev->mc_list); |
4214 | dev->mc_count = 0; | 4288 | netdev_mc_count(dev) = 0; |
4215 | 4289 | ||
4216 | netif_addr_unlock_bh(dev); | 4290 | netif_addr_unlock_bh(dev); |
4217 | } | 4291 | } |
@@ -4247,18 +4321,10 @@ unsigned dev_get_flags(const struct net_device *dev) | |||
4247 | } | 4321 | } |
4248 | EXPORT_SYMBOL(dev_get_flags); | 4322 | EXPORT_SYMBOL(dev_get_flags); |
4249 | 4323 | ||
4250 | /** | 4324 | int __dev_change_flags(struct net_device *dev, unsigned int flags) |
4251 | * dev_change_flags - change device settings | ||
4252 | * @dev: device | ||
4253 | * @flags: device state flags | ||
4254 | * | ||
4255 | * Change settings on device based state flags. The flags are | ||
4256 | * in the userspace exported format. | ||
4257 | */ | ||
4258 | int dev_change_flags(struct net_device *dev, unsigned flags) | ||
4259 | { | 4325 | { |
4260 | int ret, changes; | ||
4261 | int old_flags = dev->flags; | 4326 | int old_flags = dev->flags; |
4327 | int ret; | ||
4262 | 4328 | ||
4263 | ASSERT_RTNL(); | 4329 | ASSERT_RTNL(); |
4264 | 4330 | ||
@@ -4289,17 +4355,12 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
4289 | 4355 | ||
4290 | ret = 0; | 4356 | ret = 0; |
4291 | if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ | 4357 | if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ |
4292 | ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); | 4358 | ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); |
4293 | 4359 | ||
4294 | if (!ret) | 4360 | if (!ret) |
4295 | dev_set_rx_mode(dev); | 4361 | dev_set_rx_mode(dev); |
4296 | } | 4362 | } |
4297 | 4363 | ||
4298 | if (dev->flags & IFF_UP && | ||
4299 | ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | | ||
4300 | IFF_VOLATILE))) | ||
4301 | call_netdevice_notifiers(NETDEV_CHANGE, dev); | ||
4302 | |||
4303 | if ((flags ^ dev->gflags) & IFF_PROMISC) { | 4364 | if ((flags ^ dev->gflags) & IFF_PROMISC) { |
4304 | int inc = (flags & IFF_PROMISC) ? 1 : -1; | 4365 | int inc = (flags & IFF_PROMISC) ? 1 : -1; |
4305 | 4366 | ||
@@ -4318,11 +4379,47 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
4318 | dev_set_allmulti(dev, inc); | 4379 | dev_set_allmulti(dev, inc); |
4319 | } | 4380 | } |
4320 | 4381 | ||
4321 | /* Exclude state transition flags, already notified */ | 4382 | return ret; |
4322 | changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING); | 4383 | } |
4384 | |||
4385 | void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) | ||
4386 | { | ||
4387 | unsigned int changes = dev->flags ^ old_flags; | ||
4388 | |||
4389 | if (changes & IFF_UP) { | ||
4390 | if (dev->flags & IFF_UP) | ||
4391 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
4392 | else | ||
4393 | call_netdevice_notifiers(NETDEV_DOWN, dev); | ||
4394 | } | ||
4395 | |||
4396 | if (dev->flags & IFF_UP && | ||
4397 | (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) | ||
4398 | call_netdevice_notifiers(NETDEV_CHANGE, dev); | ||
4399 | } | ||
4400 | |||
4401 | /** | ||
4402 | * dev_change_flags - change device settings | ||
4403 | * @dev: device | ||
4404 | * @flags: device state flags | ||
4405 | * | ||
4406 | * Change settings on device based state flags. The flags are | ||
4407 | * in the userspace exported format. | ||
4408 | */ | ||
4409 | int dev_change_flags(struct net_device *dev, unsigned flags) | ||
4410 | { | ||
4411 | int ret, changes; | ||
4412 | int old_flags = dev->flags; | ||
4413 | |||
4414 | ret = __dev_change_flags(dev, flags); | ||
4415 | if (ret < 0) | ||
4416 | return ret; | ||
4417 | |||
4418 | changes = old_flags ^ dev->flags; | ||
4323 | if (changes) | 4419 | if (changes) |
4324 | rtmsg_ifinfo(RTM_NEWLINK, dev, changes); | 4420 | rtmsg_ifinfo(RTM_NEWLINK, dev, changes); |
4325 | 4421 | ||
4422 | __dev_notify_flags(dev, old_flags); | ||
4326 | return ret; | 4423 | return ret; |
4327 | } | 4424 | } |
4328 | EXPORT_SYMBOL(dev_change_flags); | 4425 | EXPORT_SYMBOL(dev_change_flags); |
@@ -4813,6 +4910,10 @@ static void rollback_registered_many(struct list_head *head) | |||
4813 | */ | 4910 | */ |
4814 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 4911 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); |
4815 | 4912 | ||
4913 | if (!dev->rtnl_link_ops || | ||
4914 | dev->rtnl_link_state == RTNL_LINK_INITIALIZED) | ||
4915 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); | ||
4916 | |||
4816 | /* | 4917 | /* |
4817 | * Flush the unicast and multicast chains | 4918 | * Flush the unicast and multicast chains |
4818 | */ | 4919 | */ |
@@ -4830,7 +4931,7 @@ static void rollback_registered_many(struct list_head *head) | |||
4830 | } | 4931 | } |
4831 | 4932 | ||
4832 | /* Process any work delayed until the end of the batch */ | 4933 | /* Process any work delayed until the end of the batch */ |
4833 | dev = list_entry(head->next, struct net_device, unreg_list); | 4934 | dev = list_first_entry(head, struct net_device, unreg_list); |
4834 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); | 4935 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); |
4835 | 4936 | ||
4836 | synchronize_net(); | 4937 | synchronize_net(); |
@@ -5039,7 +5140,9 @@ int register_netdevice(struct net_device *dev) | |||
5039 | * Prevent userspace races by waiting until the network | 5140 | * Prevent userspace races by waiting until the network |
5040 | * device is fully setup before sending notifications. | 5141 | * device is fully setup before sending notifications. |
5041 | */ | 5142 | */ |
5042 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | 5143 | if (!dev->rtnl_link_ops || |
5144 | dev->rtnl_link_state == RTNL_LINK_INITIALIZED) | ||
5145 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
5043 | 5146 | ||
5044 | out: | 5147 | out: |
5045 | return ret; | 5148 | return ret; |
@@ -5216,7 +5319,7 @@ void netdev_run_todo(void) | |||
5216 | 5319 | ||
5217 | while (!list_empty(&list)) { | 5320 | while (!list_empty(&list)) { |
5218 | struct net_device *dev | 5321 | struct net_device *dev |
5219 | = list_entry(list.next, struct net_device, todo_list); | 5322 | = list_first_entry(&list, struct net_device, todo_list); |
5220 | list_del(&dev->todo_list); | 5323 | list_del(&dev->todo_list); |
5221 | 5324 | ||
5222 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { | 5325 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { |
@@ -5367,6 +5470,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5367 | 5470 | ||
5368 | netdev_init_queues(dev); | 5471 | netdev_init_queues(dev); |
5369 | 5472 | ||
5473 | INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); | ||
5474 | dev->ethtool_ntuple_list.count = 0; | ||
5370 | INIT_LIST_HEAD(&dev->napi_list); | 5475 | INIT_LIST_HEAD(&dev->napi_list); |
5371 | INIT_LIST_HEAD(&dev->unreg_list); | 5476 | INIT_LIST_HEAD(&dev->unreg_list); |
5372 | INIT_LIST_HEAD(&dev->link_watch_list); | 5477 | INIT_LIST_HEAD(&dev->link_watch_list); |
@@ -5403,6 +5508,9 @@ void free_netdev(struct net_device *dev) | |||
5403 | /* Flush device addresses */ | 5508 | /* Flush device addresses */ |
5404 | dev_addr_flush(dev); | 5509 | dev_addr_flush(dev); |
5405 | 5510 | ||
5511 | /* Clear ethtool n-tuple list */ | ||
5512 | ethtool_ntuple_flush(dev); | ||
5513 | |||
5406 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) | 5514 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) |
5407 | netif_napi_del(p); | 5515 | netif_napi_del(p); |
5408 | 5516 | ||