diff options
Diffstat (limited to 'net/core/dev.c')
| -rw-r--r-- | net/core/dev.c | 294 |
1 files changed, 202 insertions, 92 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index ec874218b206..59d4394d2ce8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1113,19 +1113,7 @@ void dev_load(struct net *net, const char *name) | |||
| 1113 | } | 1113 | } |
| 1114 | EXPORT_SYMBOL(dev_load); | 1114 | EXPORT_SYMBOL(dev_load); |
| 1115 | 1115 | ||
| 1116 | /** | 1116 | static int __dev_open(struct net_device *dev) |
| 1117 | * dev_open - prepare an interface for use. | ||
| 1118 | * @dev: device to open | ||
| 1119 | * | ||
| 1120 | * Takes a device from down to up state. The device's private open | ||
| 1121 | * function is invoked and then the multicast lists are loaded. Finally | ||
| 1122 | * the device is moved into the up state and a %NETDEV_UP message is | ||
| 1123 | * sent to the netdev notifier chain. | ||
| 1124 | * | ||
| 1125 | * Calling this function on an active interface is a nop. On a failure | ||
| 1126 | * a negative errno code is returned. | ||
| 1127 | */ | ||
| 1128 | int dev_open(struct net_device *dev) | ||
| 1129 | { | 1117 | { |
| 1130 | const struct net_device_ops *ops = dev->netdev_ops; | 1118 | const struct net_device_ops *ops = dev->netdev_ops; |
| 1131 | int ret; | 1119 | int ret; |
| @@ -1133,13 +1121,6 @@ int dev_open(struct net_device *dev) | |||
| 1133 | ASSERT_RTNL(); | 1121 | ASSERT_RTNL(); |
| 1134 | 1122 | ||
| 1135 | /* | 1123 | /* |
| 1136 | * Is it already up? | ||
| 1137 | */ | ||
| 1138 | |||
| 1139 | if (dev->flags & IFF_UP) | ||
| 1140 | return 0; | ||
| 1141 | |||
| 1142 | /* | ||
| 1143 | * Is it even present? | 1124 | * Is it even present? |
| 1144 | */ | 1125 | */ |
| 1145 | if (!netif_device_present(dev)) | 1126 | if (!netif_device_present(dev)) |
| @@ -1187,36 +1168,57 @@ int dev_open(struct net_device *dev) | |||
| 1187 | * Wakeup transmit queue engine | 1168 | * Wakeup transmit queue engine |
| 1188 | */ | 1169 | */ |
| 1189 | dev_activate(dev); | 1170 | dev_activate(dev); |
| 1190 | |||
| 1191 | /* | ||
| 1192 | * ... and announce new interface. | ||
| 1193 | */ | ||
| 1194 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
| 1195 | } | 1171 | } |
| 1196 | 1172 | ||
| 1197 | return ret; | 1173 | return ret; |
| 1198 | } | 1174 | } |
| 1199 | EXPORT_SYMBOL(dev_open); | ||
| 1200 | 1175 | ||
| 1201 | /** | 1176 | /** |
| 1202 | * dev_close - shutdown an interface. | 1177 | * dev_open - prepare an interface for use. |
| 1203 | * @dev: device to shutdown | 1178 | * @dev: device to open |
| 1204 | * | 1179 | * |
| 1205 | * This function moves an active device into down state. A | 1180 | * Takes a device from down to up state. The device's private open |
| 1206 | * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | 1181 | * function is invoked and then the multicast lists are loaded. Finally |
| 1207 | * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | 1182 | * the device is moved into the up state and a %NETDEV_UP message is |
| 1208 | * chain. | 1183 | * sent to the netdev notifier chain. |
| 1184 | * | ||
| 1185 | * Calling this function on an active interface is a nop. On a failure | ||
| 1186 | * a negative errno code is returned. | ||
| 1209 | */ | 1187 | */ |
| 1210 | int dev_close(struct net_device *dev) | 1188 | int dev_open(struct net_device *dev) |
| 1189 | { | ||
| 1190 | int ret; | ||
| 1191 | |||
| 1192 | /* | ||
| 1193 | * Is it already up? | ||
| 1194 | */ | ||
| 1195 | if (dev->flags & IFF_UP) | ||
| 1196 | return 0; | ||
| 1197 | |||
| 1198 | /* | ||
| 1199 | * Open device | ||
| 1200 | */ | ||
| 1201 | ret = __dev_open(dev); | ||
| 1202 | if (ret < 0) | ||
| 1203 | return ret; | ||
| 1204 | |||
| 1205 | /* | ||
| 1206 | * ... and announce new interface. | ||
| 1207 | */ | ||
| 1208 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | ||
| 1209 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
| 1210 | |||
| 1211 | return ret; | ||
| 1212 | } | ||
| 1213 | EXPORT_SYMBOL(dev_open); | ||
| 1214 | |||
| 1215 | static int __dev_close(struct net_device *dev) | ||
| 1211 | { | 1216 | { |
| 1212 | const struct net_device_ops *ops = dev->netdev_ops; | 1217 | const struct net_device_ops *ops = dev->netdev_ops; |
| 1213 | ASSERT_RTNL(); | ||
| 1214 | 1218 | ||
| 1219 | ASSERT_RTNL(); | ||
| 1215 | might_sleep(); | 1220 | might_sleep(); |
| 1216 | 1221 | ||
| 1217 | if (!(dev->flags & IFF_UP)) | ||
| 1218 | return 0; | ||
| 1219 | |||
| 1220 | /* | 1222 | /* |
| 1221 | * Tell people we are going down, so that they can | 1223 | * Tell people we are going down, so that they can |
| 1222 | * prepare to death, when device is still operating. | 1224 | * prepare to death, when device is still operating. |
| @@ -1252,14 +1254,34 @@ int dev_close(struct net_device *dev) | |||
| 1252 | dev->flags &= ~IFF_UP; | 1254 | dev->flags &= ~IFF_UP; |
| 1253 | 1255 | ||
| 1254 | /* | 1256 | /* |
| 1255 | * Tell people we are down | 1257 | * Shutdown NET_DMA |
| 1256 | */ | 1258 | */ |
| 1257 | call_netdevice_notifiers(NETDEV_DOWN, dev); | 1259 | net_dmaengine_put(); |
| 1260 | |||
| 1261 | return 0; | ||
| 1262 | } | ||
| 1263 | |||
| 1264 | /** | ||
| 1265 | * dev_close - shutdown an interface. | ||
| 1266 | * @dev: device to shutdown | ||
| 1267 | * | ||
| 1268 | * This function moves an active device into down state. A | ||
| 1269 | * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | ||
| 1270 | * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | ||
| 1271 | * chain. | ||
| 1272 | */ | ||
| 1273 | int dev_close(struct net_device *dev) | ||
| 1274 | { | ||
| 1275 | if (!(dev->flags & IFF_UP)) | ||
| 1276 | return 0; | ||
| 1277 | |||
| 1278 | __dev_close(dev); | ||
| 1258 | 1279 | ||
| 1259 | /* | 1280 | /* |
| 1260 | * Shutdown NET_DMA | 1281 | * Tell people we are down |
| 1261 | */ | 1282 | */ |
| 1262 | net_dmaengine_put(); | 1283 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); |
| 1284 | call_netdevice_notifiers(NETDEV_DOWN, dev); | ||
| 1263 | 1285 | ||
| 1264 | return 0; | 1286 | return 0; |
| 1265 | } | 1287 | } |
| @@ -1448,13 +1470,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
| 1448 | if (skb->len > (dev->mtu + dev->hard_header_len)) | 1470 | if (skb->len > (dev->mtu + dev->hard_header_len)) |
| 1449 | return NET_RX_DROP; | 1471 | return NET_RX_DROP; |
| 1450 | 1472 | ||
| 1451 | skb_dst_drop(skb); | 1473 | skb_set_dev(skb, dev); |
| 1452 | skb->tstamp.tv64 = 0; | 1474 | skb->tstamp.tv64 = 0; |
| 1453 | skb->pkt_type = PACKET_HOST; | 1475 | skb->pkt_type = PACKET_HOST; |
| 1454 | skb->protocol = eth_type_trans(skb, dev); | 1476 | skb->protocol = eth_type_trans(skb, dev); |
| 1455 | skb->mark = 0; | ||
| 1456 | secpath_reset(skb); | ||
| 1457 | nf_reset(skb); | ||
| 1458 | return netif_rx(skb); | 1477 | return netif_rx(skb); |
| 1459 | } | 1478 | } |
| 1460 | EXPORT_SYMBOL_GPL(dev_forward_skb); | 1479 | EXPORT_SYMBOL_GPL(dev_forward_skb); |
| @@ -1614,6 +1633,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) | |||
| 1614 | return false; | 1633 | return false; |
| 1615 | } | 1634 | } |
| 1616 | 1635 | ||
| 1636 | /** | ||
| 1637 | * skb_dev_set -- assign a new device to a buffer | ||
| 1638 | * @skb: buffer for the new device | ||
| 1639 | * @dev: network device | ||
| 1640 | * | ||
| 1641 | * If an skb is owned by a device already, we have to reset | ||
| 1642 | * all data private to the namespace a device belongs to | ||
| 1643 | * before assigning it a new device. | ||
| 1644 | */ | ||
| 1645 | #ifdef CONFIG_NET_NS | ||
| 1646 | void skb_set_dev(struct sk_buff *skb, struct net_device *dev) | ||
| 1647 | { | ||
| 1648 | skb_dst_drop(skb); | ||
| 1649 | if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { | ||
| 1650 | secpath_reset(skb); | ||
| 1651 | nf_reset(skb); | ||
| 1652 | skb_init_secmark(skb); | ||
| 1653 | skb->mark = 0; | ||
| 1654 | skb->priority = 0; | ||
| 1655 | skb->nf_trace = 0; | ||
| 1656 | skb->ipvs_property = 0; | ||
| 1657 | #ifdef CONFIG_NET_SCHED | ||
| 1658 | skb->tc_index = 0; | ||
| 1659 | #endif | ||
| 1660 | } | ||
| 1661 | skb->dev = dev; | ||
| 1662 | } | ||
| 1663 | EXPORT_SYMBOL(skb_set_dev); | ||
| 1664 | #endif /* CONFIG_NET_NS */ | ||
| 1665 | |||
| 1617 | /* | 1666 | /* |
| 1618 | * Invalidate hardware checksum when packet is to be mangled, and | 1667 | * Invalidate hardware checksum when packet is to be mangled, and |
| 1619 | * complete checksum manually on outgoing path. | 1668 | * complete checksum manually on outgoing path. |
| @@ -1853,6 +1902,14 @@ gso: | |||
| 1853 | 1902 | ||
| 1854 | skb->next = nskb->next; | 1903 | skb->next = nskb->next; |
| 1855 | nskb->next = NULL; | 1904 | nskb->next = NULL; |
| 1905 | |||
| 1906 | /* | ||
| 1907 | * If device doesnt need nskb->dst, release it right now while | ||
| 1908 | * its hot in this cpu cache | ||
| 1909 | */ | ||
| 1910 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | ||
| 1911 | skb_dst_drop(nskb); | ||
| 1912 | |||
| 1856 | rc = ops->ndo_start_xmit(nskb, dev); | 1913 | rc = ops->ndo_start_xmit(nskb, dev); |
| 1857 | if (unlikely(rc != NETDEV_TX_OK)) { | 1914 | if (unlikely(rc != NETDEV_TX_OK)) { |
| 1858 | if (rc & ~NETDEV_TX_MASK) | 1915 | if (rc & ~NETDEV_TX_MASK) |
| @@ -1974,6 +2031,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
| 1974 | return rc; | 2031 | return rc; |
| 1975 | } | 2032 | } |
| 1976 | 2033 | ||
| 2034 | /* | ||
| 2035 | * Returns true if either: | ||
| 2036 | * 1. skb has frag_list and the device doesn't support FRAGLIST, or | ||
| 2037 | * 2. skb is fragmented and the device does not support SG, or if | ||
| 2038 | * at least one of fragments is in highmem and device does not | ||
| 2039 | * support DMA from it. | ||
| 2040 | */ | ||
| 2041 | static inline int skb_needs_linearize(struct sk_buff *skb, | ||
| 2042 | struct net_device *dev) | ||
| 2043 | { | ||
| 2044 | return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || | ||
| 2045 | (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || | ||
| 2046 | illegal_highdma(dev, skb))); | ||
| 2047 | } | ||
| 2048 | |||
| 1977 | /** | 2049 | /** |
| 1978 | * dev_queue_xmit - transmit a buffer | 2050 | * dev_queue_xmit - transmit a buffer |
| 1979 | * @skb: buffer to transmit | 2051 | * @skb: buffer to transmit |
| @@ -2010,18 +2082,8 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
| 2010 | if (netif_needs_gso(dev, skb)) | 2082 | if (netif_needs_gso(dev, skb)) |
| 2011 | goto gso; | 2083 | goto gso; |
| 2012 | 2084 | ||
| 2013 | if (skb_has_frags(skb) && | 2085 | /* Convert a paged skb to linear, if required */ |
| 2014 | !(dev->features & NETIF_F_FRAGLIST) && | 2086 | if (skb_needs_linearize(skb, dev) && __skb_linearize(skb)) |
| 2015 | __skb_linearize(skb)) | ||
| 2016 | goto out_kfree_skb; | ||
| 2017 | |||
| 2018 | /* Fragmented skb is linearized if device does not support SG, | ||
| 2019 | * or if at least one of fragments is in highmem and device | ||
| 2020 | * does not support DMA from it. | ||
| 2021 | */ | ||
| 2022 | if (skb_shinfo(skb)->nr_frags && | ||
| 2023 | (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && | ||
| 2024 | __skb_linearize(skb)) | ||
| 2025 | goto out_kfree_skb; | 2087 | goto out_kfree_skb; |
| 2026 | 2088 | ||
| 2027 | /* If packet is not checksummed and device does not support | 2089 | /* If packet is not checksummed and device does not support |
| @@ -2041,7 +2103,7 @@ gso: | |||
| 2041 | rcu_read_lock_bh(); | 2103 | rcu_read_lock_bh(); |
| 2042 | 2104 | ||
| 2043 | txq = dev_pick_tx(dev, skb); | 2105 | txq = dev_pick_tx(dev, skb); |
| 2044 | q = rcu_dereference(txq->qdisc); | 2106 | q = rcu_dereference_bh(txq->qdisc); |
| 2045 | 2107 | ||
| 2046 | #ifdef CONFIG_NET_CLS_ACT | 2108 | #ifdef CONFIG_NET_CLS_ACT |
| 2047 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); | 2109 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); |
| @@ -2421,7 +2483,9 @@ int netif_receive_skb(struct sk_buff *skb) | |||
| 2421 | { | 2483 | { |
| 2422 | struct packet_type *ptype, *pt_prev; | 2484 | struct packet_type *ptype, *pt_prev; |
| 2423 | struct net_device *orig_dev; | 2485 | struct net_device *orig_dev; |
| 2486 | struct net_device *master; | ||
| 2424 | struct net_device *null_or_orig; | 2487 | struct net_device *null_or_orig; |
| 2488 | struct net_device *null_or_bond; | ||
| 2425 | int ret = NET_RX_DROP; | 2489 | int ret = NET_RX_DROP; |
| 2426 | __be16 type; | 2490 | __be16 type; |
| 2427 | 2491 | ||
| @@ -2440,11 +2504,12 @@ int netif_receive_skb(struct sk_buff *skb) | |||
| 2440 | 2504 | ||
| 2441 | null_or_orig = NULL; | 2505 | null_or_orig = NULL; |
| 2442 | orig_dev = skb->dev; | 2506 | orig_dev = skb->dev; |
| 2443 | if (orig_dev->master) { | 2507 | master = ACCESS_ONCE(orig_dev->master); |
| 2444 | if (skb_bond_should_drop(skb)) | 2508 | if (master) { |
| 2509 | if (skb_bond_should_drop(skb, master)) | ||
| 2445 | null_or_orig = orig_dev; /* deliver only exact match */ | 2510 | null_or_orig = orig_dev; /* deliver only exact match */ |
| 2446 | else | 2511 | else |
| 2447 | skb->dev = orig_dev->master; | 2512 | skb->dev = master; |
| 2448 | } | 2513 | } |
| 2449 | 2514 | ||
| 2450 | __get_cpu_var(netdev_rx_stat).total++; | 2515 | __get_cpu_var(netdev_rx_stat).total++; |
| @@ -2487,12 +2552,24 @@ ncls: | |||
| 2487 | if (!skb) | 2552 | if (!skb) |
| 2488 | goto out; | 2553 | goto out; |
| 2489 | 2554 | ||
| 2555 | /* | ||
| 2556 | * Make sure frames received on VLAN interfaces stacked on | ||
| 2557 | * bonding interfaces still make their way to any base bonding | ||
| 2558 | * device that may have registered for a specific ptype. The | ||
| 2559 | * handler may have to adjust skb->dev and orig_dev. | ||
| 2560 | */ | ||
| 2561 | null_or_bond = NULL; | ||
| 2562 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && | ||
| 2563 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { | ||
| 2564 | null_or_bond = vlan_dev_real_dev(skb->dev); | ||
| 2565 | } | ||
| 2566 | |||
| 2490 | type = skb->protocol; | 2567 | type = skb->protocol; |
| 2491 | list_for_each_entry_rcu(ptype, | 2568 | list_for_each_entry_rcu(ptype, |
| 2492 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2569 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
| 2493 | if (ptype->type == type && | 2570 | if (ptype->type == type && (ptype->dev == null_or_orig || |
| 2494 | (ptype->dev == null_or_orig || ptype->dev == skb->dev || | 2571 | ptype->dev == skb->dev || ptype->dev == orig_dev || |
| 2495 | ptype->dev == orig_dev)) { | 2572 | ptype->dev == null_or_bond)) { |
| 2496 | if (pt_prev) | 2573 | if (pt_prev) |
| 2497 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2574 | ret = deliver_skb(skb, pt_prev, orig_dev); |
| 2498 | pt_prev = ptype; | 2575 | pt_prev = ptype; |
| @@ -2561,7 +2638,7 @@ out: | |||
| 2561 | return netif_receive_skb(skb); | 2638 | return netif_receive_skb(skb); |
| 2562 | } | 2639 | } |
| 2563 | 2640 | ||
| 2564 | void napi_gro_flush(struct napi_struct *napi) | 2641 | static void napi_gro_flush(struct napi_struct *napi) |
| 2565 | { | 2642 | { |
| 2566 | struct sk_buff *skb, *next; | 2643 | struct sk_buff *skb, *next; |
| 2567 | 2644 | ||
| @@ -2574,7 +2651,6 @@ void napi_gro_flush(struct napi_struct *napi) | |||
| 2574 | napi->gro_count = 0; | 2651 | napi->gro_count = 0; |
| 2575 | napi->gro_list = NULL; | 2652 | napi->gro_list = NULL; |
| 2576 | } | 2653 | } |
| 2577 | EXPORT_SYMBOL(napi_gro_flush); | ||
| 2578 | 2654 | ||
| 2579 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2655 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
| 2580 | { | 2656 | { |
| @@ -2966,7 +3042,7 @@ static void net_rx_action(struct softirq_action *h) | |||
| 2966 | * entries to the tail of this list, and only ->poll() | 3042 | * entries to the tail of this list, and only ->poll() |
| 2967 | * calls can remove this head entry from the list. | 3043 | * calls can remove this head entry from the list. |
| 2968 | */ | 3044 | */ |
| 2969 | n = list_entry(list->next, struct napi_struct, poll_list); | 3045 | n = list_first_entry(list, struct napi_struct, poll_list); |
| 2970 | 3046 | ||
| 2971 | have = netpoll_poll_lock(n); | 3047 | have = netpoll_poll_lock(n); |
| 2972 | 3048 | ||
| @@ -3185,7 +3261,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | |||
| 3185 | { | 3261 | { |
| 3186 | const struct net_device_stats *stats = dev_get_stats(dev); | 3262 | const struct net_device_stats *stats = dev_get_stats(dev); |
| 3187 | 3263 | ||
| 3188 | seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " | 3264 | seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " |
| 3189 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", | 3265 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", |
| 3190 | dev->name, stats->rx_bytes, stats->rx_packets, | 3266 | dev->name, stats->rx_bytes, stats->rx_packets, |
| 3191 | stats->rx_errors, | 3267 | stats->rx_errors, |
| @@ -3640,10 +3716,10 @@ void __dev_set_rx_mode(struct net_device *dev) | |||
| 3640 | /* Unicast addresses changes may only happen under the rtnl, | 3716 | /* Unicast addresses changes may only happen under the rtnl, |
| 3641 | * therefore calling __dev_set_promiscuity here is safe. | 3717 | * therefore calling __dev_set_promiscuity here is safe. |
| 3642 | */ | 3718 | */ |
| 3643 | if (dev->uc.count > 0 && !dev->uc_promisc) { | 3719 | if (!netdev_uc_empty(dev) && !dev->uc_promisc) { |
| 3644 | __dev_set_promiscuity(dev, 1); | 3720 | __dev_set_promiscuity(dev, 1); |
| 3645 | dev->uc_promisc = 1; | 3721 | dev->uc_promisc = 1; |
| 3646 | } else if (dev->uc.count == 0 && dev->uc_promisc) { | 3722 | } else if (netdev_uc_empty(dev) && dev->uc_promisc) { |
| 3647 | __dev_set_promiscuity(dev, -1); | 3723 | __dev_set_promiscuity(dev, -1); |
| 3648 | dev->uc_promisc = 0; | 3724 | dev->uc_promisc = 0; |
| 3649 | } | 3725 | } |
| @@ -4211,7 +4287,7 @@ static void dev_addr_discard(struct net_device *dev) | |||
| 4211 | netif_addr_lock_bh(dev); | 4287 | netif_addr_lock_bh(dev); |
| 4212 | 4288 | ||
| 4213 | __dev_addr_discard(&dev->mc_list); | 4289 | __dev_addr_discard(&dev->mc_list); |
| 4214 | dev->mc_count = 0; | 4290 | netdev_mc_count(dev) = 0; |
| 4215 | 4291 | ||
| 4216 | netif_addr_unlock_bh(dev); | 4292 | netif_addr_unlock_bh(dev); |
| 4217 | } | 4293 | } |
| @@ -4247,18 +4323,10 @@ unsigned dev_get_flags(const struct net_device *dev) | |||
| 4247 | } | 4323 | } |
| 4248 | EXPORT_SYMBOL(dev_get_flags); | 4324 | EXPORT_SYMBOL(dev_get_flags); |
| 4249 | 4325 | ||
| 4250 | /** | 4326 | int __dev_change_flags(struct net_device *dev, unsigned int flags) |
| 4251 | * dev_change_flags - change device settings | ||
| 4252 | * @dev: device | ||
| 4253 | * @flags: device state flags | ||
| 4254 | * | ||
| 4255 | * Change settings on device based state flags. The flags are | ||
| 4256 | * in the userspace exported format. | ||
| 4257 | */ | ||
| 4258 | int dev_change_flags(struct net_device *dev, unsigned flags) | ||
| 4259 | { | 4327 | { |
| 4260 | int ret, changes; | ||
| 4261 | int old_flags = dev->flags; | 4328 | int old_flags = dev->flags; |
| 4329 | int ret; | ||
| 4262 | 4330 | ||
| 4263 | ASSERT_RTNL(); | 4331 | ASSERT_RTNL(); |
| 4264 | 4332 | ||
| @@ -4289,17 +4357,12 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
| 4289 | 4357 | ||
| 4290 | ret = 0; | 4358 | ret = 0; |
| 4291 | if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ | 4359 | if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ |
| 4292 | ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); | 4360 | ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); |
| 4293 | 4361 | ||
| 4294 | if (!ret) | 4362 | if (!ret) |
| 4295 | dev_set_rx_mode(dev); | 4363 | dev_set_rx_mode(dev); |
| 4296 | } | 4364 | } |
| 4297 | 4365 | ||
| 4298 | if (dev->flags & IFF_UP && | ||
| 4299 | ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | | ||
| 4300 | IFF_VOLATILE))) | ||
| 4301 | call_netdevice_notifiers(NETDEV_CHANGE, dev); | ||
| 4302 | |||
| 4303 | if ((flags ^ dev->gflags) & IFF_PROMISC) { | 4366 | if ((flags ^ dev->gflags) & IFF_PROMISC) { |
| 4304 | int inc = (flags & IFF_PROMISC) ? 1 : -1; | 4367 | int inc = (flags & IFF_PROMISC) ? 1 : -1; |
| 4305 | 4368 | ||
| @@ -4318,11 +4381,47 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
| 4318 | dev_set_allmulti(dev, inc); | 4381 | dev_set_allmulti(dev, inc); |
| 4319 | } | 4382 | } |
| 4320 | 4383 | ||
| 4321 | /* Exclude state transition flags, already notified */ | 4384 | return ret; |
| 4322 | changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING); | 4385 | } |
| 4386 | |||
| 4387 | void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) | ||
| 4388 | { | ||
| 4389 | unsigned int changes = dev->flags ^ old_flags; | ||
| 4390 | |||
| 4391 | if (changes & IFF_UP) { | ||
| 4392 | if (dev->flags & IFF_UP) | ||
| 4393 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
| 4394 | else | ||
| 4395 | call_netdevice_notifiers(NETDEV_DOWN, dev); | ||
| 4396 | } | ||
| 4397 | |||
| 4398 | if (dev->flags & IFF_UP && | ||
| 4399 | (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) | ||
| 4400 | call_netdevice_notifiers(NETDEV_CHANGE, dev); | ||
| 4401 | } | ||
| 4402 | |||
| 4403 | /** | ||
| 4404 | * dev_change_flags - change device settings | ||
| 4405 | * @dev: device | ||
| 4406 | * @flags: device state flags | ||
| 4407 | * | ||
| 4408 | * Change settings on device based state flags. The flags are | ||
| 4409 | * in the userspace exported format. | ||
| 4410 | */ | ||
| 4411 | int dev_change_flags(struct net_device *dev, unsigned flags) | ||
| 4412 | { | ||
| 4413 | int ret, changes; | ||
| 4414 | int old_flags = dev->flags; | ||
| 4415 | |||
| 4416 | ret = __dev_change_flags(dev, flags); | ||
| 4417 | if (ret < 0) | ||
| 4418 | return ret; | ||
| 4419 | |||
| 4420 | changes = old_flags ^ dev->flags; | ||
| 4323 | if (changes) | 4421 | if (changes) |
| 4324 | rtmsg_ifinfo(RTM_NEWLINK, dev, changes); | 4422 | rtmsg_ifinfo(RTM_NEWLINK, dev, changes); |
| 4325 | 4423 | ||
| 4424 | __dev_notify_flags(dev, old_flags); | ||
| 4326 | return ret; | 4425 | return ret; |
| 4327 | } | 4426 | } |
| 4328 | EXPORT_SYMBOL(dev_change_flags); | 4427 | EXPORT_SYMBOL(dev_change_flags); |
| @@ -4813,6 +4912,10 @@ static void rollback_registered_many(struct list_head *head) | |||
| 4813 | */ | 4912 | */ |
| 4814 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 4913 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); |
| 4815 | 4914 | ||
| 4915 | if (!dev->rtnl_link_ops || | ||
| 4916 | dev->rtnl_link_state == RTNL_LINK_INITIALIZED) | ||
| 4917 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); | ||
| 4918 | |||
| 4816 | /* | 4919 | /* |
| 4817 | * Flush the unicast and multicast chains | 4920 | * Flush the unicast and multicast chains |
| 4818 | */ | 4921 | */ |
| @@ -4830,7 +4933,7 @@ static void rollback_registered_many(struct list_head *head) | |||
| 4830 | } | 4933 | } |
| 4831 | 4934 | ||
| 4832 | /* Process any work delayed until the end of the batch */ | 4935 | /* Process any work delayed until the end of the batch */ |
| 4833 | dev = list_entry(head->next, struct net_device, unreg_list); | 4936 | dev = list_first_entry(head, struct net_device, unreg_list); |
| 4834 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); | 4937 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); |
| 4835 | 4938 | ||
| 4836 | synchronize_net(); | 4939 | synchronize_net(); |
| @@ -5039,7 +5142,9 @@ int register_netdevice(struct net_device *dev) | |||
| 5039 | * Prevent userspace races by waiting until the network | 5142 | * Prevent userspace races by waiting until the network |
| 5040 | * device is fully setup before sending notifications. | 5143 | * device is fully setup before sending notifications. |
| 5041 | */ | 5144 | */ |
| 5042 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | 5145 | if (!dev->rtnl_link_ops || |
| 5146 | dev->rtnl_link_state == RTNL_LINK_INITIALIZED) | ||
| 5147 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
| 5043 | 5148 | ||
| 5044 | out: | 5149 | out: |
| 5045 | return ret; | 5150 | return ret; |
| @@ -5216,7 +5321,7 @@ void netdev_run_todo(void) | |||
| 5216 | 5321 | ||
| 5217 | while (!list_empty(&list)) { | 5322 | while (!list_empty(&list)) { |
| 5218 | struct net_device *dev | 5323 | struct net_device *dev |
| 5219 | = list_entry(list.next, struct net_device, todo_list); | 5324 | = list_first_entry(&list, struct net_device, todo_list); |
| 5220 | list_del(&dev->todo_list); | 5325 | list_del(&dev->todo_list); |
| 5221 | 5326 | ||
| 5222 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { | 5327 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { |
| @@ -5367,6 +5472,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
| 5367 | 5472 | ||
| 5368 | netdev_init_queues(dev); | 5473 | netdev_init_queues(dev); |
| 5369 | 5474 | ||
| 5475 | INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); | ||
| 5476 | dev->ethtool_ntuple_list.count = 0; | ||
| 5370 | INIT_LIST_HEAD(&dev->napi_list); | 5477 | INIT_LIST_HEAD(&dev->napi_list); |
| 5371 | INIT_LIST_HEAD(&dev->unreg_list); | 5478 | INIT_LIST_HEAD(&dev->unreg_list); |
| 5372 | INIT_LIST_HEAD(&dev->link_watch_list); | 5479 | INIT_LIST_HEAD(&dev->link_watch_list); |
| @@ -5403,6 +5510,9 @@ void free_netdev(struct net_device *dev) | |||
| 5403 | /* Flush device addresses */ | 5510 | /* Flush device addresses */ |
| 5404 | dev_addr_flush(dev); | 5511 | dev_addr_flush(dev); |
| 5405 | 5512 | ||
| 5513 | /* Clear ethtool n-tuple list */ | ||
| 5514 | ethtool_ntuple_flush(dev); | ||
| 5515 | |||
| 5406 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) | 5516 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) |
| 5407 | netif_napi_del(p); | 5517 | netif_napi_del(p); |
| 5408 | 5518 | ||
