diff options
Diffstat (limited to 'net/core')
| -rw-r--r-- | net/core/datagram.c | 1 | ||||
| -rw-r--r-- | net/core/dev.c | 301 | ||||
| -rw-r--r-- | net/core/dev_mcast.c | 5 | ||||
| -rw-r--r-- | net/core/drop_monitor.c | 2 | ||||
| -rw-r--r-- | net/core/dst.c | 1 | ||||
| -rw-r--r-- | net/core/ethtool.c | 434 | ||||
| -rw-r--r-- | net/core/fib_rules.c | 3 | ||||
| -rw-r--r-- | net/core/filter.c | 3 | ||||
| -rw-r--r-- | net/core/gen_estimator.c | 1 | ||||
| -rw-r--r-- | net/core/iovec.c | 1 | ||||
| -rw-r--r-- | net/core/link_watch.c | 1 | ||||
| -rw-r--r-- | net/core/neighbour.c | 21 | ||||
| -rw-r--r-- | net/core/net-sysfs.c | 1 | ||||
| -rw-r--r-- | net/core/net-traces.c | 1 | ||||
| -rw-r--r-- | net/core/netpoll.c | 179 | ||||
| -rw-r--r-- | net/core/pktgen.c | 3 | ||||
| -rw-r--r-- | net/core/rtnetlink.c | 126 | ||||
| -rw-r--r-- | net/core/scm.c | 3 | ||||
| -rw-r--r-- | net/core/sock.c | 35 | ||||
| -rw-r--r-- | net/core/sysctl_net_core.c | 1 |
20 files changed, 905 insertions, 218 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index 95c2e0840d0d..2dccd4ee591b 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
| @@ -48,6 +48,7 @@ | |||
| 48 | #include <linux/poll.h> | 48 | #include <linux/poll.h> |
| 49 | #include <linux/highmem.h> | 49 | #include <linux/highmem.h> |
| 50 | #include <linux/spinlock.h> | 50 | #include <linux/spinlock.h> |
| 51 | #include <linux/slab.h> | ||
| 51 | 52 | ||
| 52 | #include <net/protocol.h> | 53 | #include <net/protocol.h> |
| 53 | #include <linux/skbuff.h> | 54 | #include <linux/skbuff.h> |
diff --git a/net/core/dev.c b/net/core/dev.c index bb1f1da2b8a7..f769098774b7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -80,6 +80,7 @@ | |||
| 80 | #include <linux/types.h> | 80 | #include <linux/types.h> |
| 81 | #include <linux/kernel.h> | 81 | #include <linux/kernel.h> |
| 82 | #include <linux/hash.h> | 82 | #include <linux/hash.h> |
| 83 | #include <linux/slab.h> | ||
| 83 | #include <linux/sched.h> | 84 | #include <linux/sched.h> |
| 84 | #include <linux/mutex.h> | 85 | #include <linux/mutex.h> |
| 85 | #include <linux/string.h> | 86 | #include <linux/string.h> |
| @@ -1113,19 +1114,7 @@ void dev_load(struct net *net, const char *name) | |||
| 1113 | } | 1114 | } |
| 1114 | EXPORT_SYMBOL(dev_load); | 1115 | EXPORT_SYMBOL(dev_load); |
| 1115 | 1116 | ||
| 1116 | /** | 1117 | static int __dev_open(struct net_device *dev) |
| 1117 | * dev_open - prepare an interface for use. | ||
| 1118 | * @dev: device to open | ||
| 1119 | * | ||
| 1120 | * Takes a device from down to up state. The device's private open | ||
| 1121 | * function is invoked and then the multicast lists are loaded. Finally | ||
| 1122 | * the device is moved into the up state and a %NETDEV_UP message is | ||
| 1123 | * sent to the netdev notifier chain. | ||
| 1124 | * | ||
| 1125 | * Calling this function on an active interface is a nop. On a failure | ||
| 1126 | * a negative errno code is returned. | ||
| 1127 | */ | ||
| 1128 | int dev_open(struct net_device *dev) | ||
| 1129 | { | 1118 | { |
| 1130 | const struct net_device_ops *ops = dev->netdev_ops; | 1119 | const struct net_device_ops *ops = dev->netdev_ops; |
| 1131 | int ret; | 1120 | int ret; |
| @@ -1133,13 +1122,6 @@ int dev_open(struct net_device *dev) | |||
| 1133 | ASSERT_RTNL(); | 1122 | ASSERT_RTNL(); |
| 1134 | 1123 | ||
| 1135 | /* | 1124 | /* |
| 1136 | * Is it already up? | ||
| 1137 | */ | ||
| 1138 | |||
| 1139 | if (dev->flags & IFF_UP) | ||
| 1140 | return 0; | ||
| 1141 | |||
| 1142 | /* | ||
| 1143 | * Is it even present? | 1125 | * Is it even present? |
| 1144 | */ | 1126 | */ |
| 1145 | if (!netif_device_present(dev)) | 1127 | if (!netif_device_present(dev)) |
| @@ -1187,36 +1169,57 @@ int dev_open(struct net_device *dev) | |||
| 1187 | * Wakeup transmit queue engine | 1169 | * Wakeup transmit queue engine |
| 1188 | */ | 1170 | */ |
| 1189 | dev_activate(dev); | 1171 | dev_activate(dev); |
| 1190 | |||
| 1191 | /* | ||
| 1192 | * ... and announce new interface. | ||
| 1193 | */ | ||
| 1194 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
| 1195 | } | 1172 | } |
| 1196 | 1173 | ||
| 1197 | return ret; | 1174 | return ret; |
| 1198 | } | 1175 | } |
| 1199 | EXPORT_SYMBOL(dev_open); | ||
| 1200 | 1176 | ||
| 1201 | /** | 1177 | /** |
| 1202 | * dev_close - shutdown an interface. | 1178 | * dev_open - prepare an interface for use. |
| 1203 | * @dev: device to shutdown | 1179 | * @dev: device to open |
| 1204 | * | 1180 | * |
| 1205 | * This function moves an active device into down state. A | 1181 | * Takes a device from down to up state. The device's private open |
| 1206 | * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | 1182 | * function is invoked and then the multicast lists are loaded. Finally |
| 1207 | * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | 1183 | * the device is moved into the up state and a %NETDEV_UP message is |
| 1208 | * chain. | 1184 | * sent to the netdev notifier chain. |
| 1185 | * | ||
| 1186 | * Calling this function on an active interface is a nop. On a failure | ||
| 1187 | * a negative errno code is returned. | ||
| 1209 | */ | 1188 | */ |
| 1210 | int dev_close(struct net_device *dev) | 1189 | int dev_open(struct net_device *dev) |
| 1190 | { | ||
| 1191 | int ret; | ||
| 1192 | |||
| 1193 | /* | ||
| 1194 | * Is it already up? | ||
| 1195 | */ | ||
| 1196 | if (dev->flags & IFF_UP) | ||
| 1197 | return 0; | ||
| 1198 | |||
| 1199 | /* | ||
| 1200 | * Open device | ||
| 1201 | */ | ||
| 1202 | ret = __dev_open(dev); | ||
| 1203 | if (ret < 0) | ||
| 1204 | return ret; | ||
| 1205 | |||
| 1206 | /* | ||
| 1207 | * ... and announce new interface. | ||
| 1208 | */ | ||
| 1209 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | ||
| 1210 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
| 1211 | |||
| 1212 | return ret; | ||
| 1213 | } | ||
| 1214 | EXPORT_SYMBOL(dev_open); | ||
| 1215 | |||
| 1216 | static int __dev_close(struct net_device *dev) | ||
| 1211 | { | 1217 | { |
| 1212 | const struct net_device_ops *ops = dev->netdev_ops; | 1218 | const struct net_device_ops *ops = dev->netdev_ops; |
| 1213 | ASSERT_RTNL(); | ||
| 1214 | 1219 | ||
| 1220 | ASSERT_RTNL(); | ||
| 1215 | might_sleep(); | 1221 | might_sleep(); |
| 1216 | 1222 | ||
| 1217 | if (!(dev->flags & IFF_UP)) | ||
| 1218 | return 0; | ||
| 1219 | |||
| 1220 | /* | 1223 | /* |
| 1221 | * Tell people we are going down, so that they can | 1224 | * Tell people we are going down, so that they can |
| 1222 | * prepare to death, when device is still operating. | 1225 | * prepare to death, when device is still operating. |
| @@ -1252,14 +1255,34 @@ int dev_close(struct net_device *dev) | |||
| 1252 | dev->flags &= ~IFF_UP; | 1255 | dev->flags &= ~IFF_UP; |
| 1253 | 1256 | ||
| 1254 | /* | 1257 | /* |
| 1255 | * Tell people we are down | 1258 | * Shutdown NET_DMA |
| 1256 | */ | 1259 | */ |
| 1257 | call_netdevice_notifiers(NETDEV_DOWN, dev); | 1260 | net_dmaengine_put(); |
| 1261 | |||
| 1262 | return 0; | ||
| 1263 | } | ||
| 1264 | |||
| 1265 | /** | ||
| 1266 | * dev_close - shutdown an interface. | ||
| 1267 | * @dev: device to shutdown | ||
| 1268 | * | ||
| 1269 | * This function moves an active device into down state. A | ||
| 1270 | * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | ||
| 1271 | * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | ||
| 1272 | * chain. | ||
| 1273 | */ | ||
| 1274 | int dev_close(struct net_device *dev) | ||
| 1275 | { | ||
| 1276 | if (!(dev->flags & IFF_UP)) | ||
| 1277 | return 0; | ||
| 1278 | |||
| 1279 | __dev_close(dev); | ||
| 1258 | 1280 | ||
| 1259 | /* | 1281 | /* |
| 1260 | * Shutdown NET_DMA | 1282 | * Tell people we are down |
| 1261 | */ | 1283 | */ |
| 1262 | net_dmaengine_put(); | 1284 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); |
| 1285 | call_netdevice_notifiers(NETDEV_DOWN, dev); | ||
| 1263 | 1286 | ||
| 1264 | return 0; | 1287 | return 0; |
| 1265 | } | 1288 | } |
| @@ -1448,13 +1471,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
| 1448 | if (skb->len > (dev->mtu + dev->hard_header_len)) | 1471 | if (skb->len > (dev->mtu + dev->hard_header_len)) |
| 1449 | return NET_RX_DROP; | 1472 | return NET_RX_DROP; |
| 1450 | 1473 | ||
| 1451 | skb_dst_drop(skb); | 1474 | skb_set_dev(skb, dev); |
| 1452 | skb->tstamp.tv64 = 0; | 1475 | skb->tstamp.tv64 = 0; |
| 1453 | skb->pkt_type = PACKET_HOST; | 1476 | skb->pkt_type = PACKET_HOST; |
| 1454 | skb->protocol = eth_type_trans(skb, dev); | 1477 | skb->protocol = eth_type_trans(skb, dev); |
| 1455 | skb->mark = 0; | ||
| 1456 | secpath_reset(skb); | ||
| 1457 | nf_reset(skb); | ||
| 1458 | return netif_rx(skb); | 1478 | return netif_rx(skb); |
| 1459 | } | 1479 | } |
| 1460 | EXPORT_SYMBOL_GPL(dev_forward_skb); | 1480 | EXPORT_SYMBOL_GPL(dev_forward_skb); |
| @@ -1614,6 +1634,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) | |||
| 1614 | return false; | 1634 | return false; |
| 1615 | } | 1635 | } |
| 1616 | 1636 | ||
| 1637 | /** | ||
| 1638 | * skb_dev_set -- assign a new device to a buffer | ||
| 1639 | * @skb: buffer for the new device | ||
| 1640 | * @dev: network device | ||
| 1641 | * | ||
| 1642 | * If an skb is owned by a device already, we have to reset | ||
| 1643 | * all data private to the namespace a device belongs to | ||
| 1644 | * before assigning it a new device. | ||
| 1645 | */ | ||
| 1646 | #ifdef CONFIG_NET_NS | ||
| 1647 | void skb_set_dev(struct sk_buff *skb, struct net_device *dev) | ||
| 1648 | { | ||
| 1649 | skb_dst_drop(skb); | ||
| 1650 | if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { | ||
| 1651 | secpath_reset(skb); | ||
| 1652 | nf_reset(skb); | ||
| 1653 | skb_init_secmark(skb); | ||
| 1654 | skb->mark = 0; | ||
| 1655 | skb->priority = 0; | ||
| 1656 | skb->nf_trace = 0; | ||
| 1657 | skb->ipvs_property = 0; | ||
| 1658 | #ifdef CONFIG_NET_SCHED | ||
| 1659 | skb->tc_index = 0; | ||
| 1660 | #endif | ||
| 1661 | } | ||
| 1662 | skb->dev = dev; | ||
| 1663 | } | ||
| 1664 | EXPORT_SYMBOL(skb_set_dev); | ||
| 1665 | #endif /* CONFIG_NET_NS */ | ||
| 1666 | |||
| 1617 | /* | 1667 | /* |
| 1618 | * Invalidate hardware checksum when packet is to be mangled, and | 1668 | * Invalidate hardware checksum when packet is to be mangled, and |
| 1619 | * complete checksum manually on outgoing path. | 1669 | * complete checksum manually on outgoing path. |
| @@ -1853,6 +1903,14 @@ gso: | |||
| 1853 | 1903 | ||
| 1854 | skb->next = nskb->next; | 1904 | skb->next = nskb->next; |
| 1855 | nskb->next = NULL; | 1905 | nskb->next = NULL; |
| 1906 | |||
| 1907 | /* | ||
| 1908 | * If device doesnt need nskb->dst, release it right now while | ||
| 1909 | * its hot in this cpu cache | ||
| 1910 | */ | ||
| 1911 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | ||
| 1912 | skb_dst_drop(nskb); | ||
| 1913 | |||
| 1856 | rc = ops->ndo_start_xmit(nskb, dev); | 1914 | rc = ops->ndo_start_xmit(nskb, dev); |
| 1857 | if (unlikely(rc != NETDEV_TX_OK)) { | 1915 | if (unlikely(rc != NETDEV_TX_OK)) { |
| 1858 | if (rc & ~NETDEV_TX_MASK) | 1916 | if (rc & ~NETDEV_TX_MASK) |
| @@ -1931,8 +1989,12 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
| 1931 | if (dev->real_num_tx_queues > 1) | 1989 | if (dev->real_num_tx_queues > 1) |
| 1932 | queue_index = skb_tx_hash(dev, skb); | 1990 | queue_index = skb_tx_hash(dev, skb); |
| 1933 | 1991 | ||
| 1934 | if (sk && sk->sk_dst_cache) | 1992 | if (sk) { |
| 1935 | sk_tx_queue_set(sk, queue_index); | 1993 | struct dst_entry *dst = rcu_dereference_bh(sk->sk_dst_cache); |
| 1994 | |||
| 1995 | if (dst && skb_dst(skb) == dst) | ||
| 1996 | sk_tx_queue_set(sk, queue_index); | ||
| 1997 | } | ||
| 1936 | } | 1998 | } |
| 1937 | } | 1999 | } |
| 1938 | 2000 | ||
| @@ -1974,6 +2036,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
| 1974 | return rc; | 2036 | return rc; |
| 1975 | } | 2037 | } |
| 1976 | 2038 | ||
| 2039 | /* | ||
| 2040 | * Returns true if either: | ||
| 2041 | * 1. skb has frag_list and the device doesn't support FRAGLIST, or | ||
| 2042 | * 2. skb is fragmented and the device does not support SG, or if | ||
| 2043 | * at least one of fragments is in highmem and device does not | ||
| 2044 | * support DMA from it. | ||
| 2045 | */ | ||
| 2046 | static inline int skb_needs_linearize(struct sk_buff *skb, | ||
| 2047 | struct net_device *dev) | ||
| 2048 | { | ||
| 2049 | return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || | ||
| 2050 | (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || | ||
| 2051 | illegal_highdma(dev, skb))); | ||
| 2052 | } | ||
| 2053 | |||
| 1977 | /** | 2054 | /** |
| 1978 | * dev_queue_xmit - transmit a buffer | 2055 | * dev_queue_xmit - transmit a buffer |
| 1979 | * @skb: buffer to transmit | 2056 | * @skb: buffer to transmit |
| @@ -2010,18 +2087,8 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
| 2010 | if (netif_needs_gso(dev, skb)) | 2087 | if (netif_needs_gso(dev, skb)) |
| 2011 | goto gso; | 2088 | goto gso; |
| 2012 | 2089 | ||
| 2013 | if (skb_has_frags(skb) && | 2090 | /* Convert a paged skb to linear, if required */ |
| 2014 | !(dev->features & NETIF_F_FRAGLIST) && | 2091 | if (skb_needs_linearize(skb, dev) && __skb_linearize(skb)) |
| 2015 | __skb_linearize(skb)) | ||
| 2016 | goto out_kfree_skb; | ||
| 2017 | |||
| 2018 | /* Fragmented skb is linearized if device does not support SG, | ||
| 2019 | * or if at least one of fragments is in highmem and device | ||
| 2020 | * does not support DMA from it. | ||
| 2021 | */ | ||
| 2022 | if (skb_shinfo(skb)->nr_frags && | ||
| 2023 | (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && | ||
| 2024 | __skb_linearize(skb)) | ||
| 2025 | goto out_kfree_skb; | 2092 | goto out_kfree_skb; |
| 2026 | 2093 | ||
| 2027 | /* If packet is not checksummed and device does not support | 2094 | /* If packet is not checksummed and device does not support |
| @@ -2421,7 +2488,9 @@ int netif_receive_skb(struct sk_buff *skb) | |||
| 2421 | { | 2488 | { |
| 2422 | struct packet_type *ptype, *pt_prev; | 2489 | struct packet_type *ptype, *pt_prev; |
| 2423 | struct net_device *orig_dev; | 2490 | struct net_device *orig_dev; |
| 2491 | struct net_device *master; | ||
| 2424 | struct net_device *null_or_orig; | 2492 | struct net_device *null_or_orig; |
| 2493 | struct net_device *null_or_bond; | ||
| 2425 | int ret = NET_RX_DROP; | 2494 | int ret = NET_RX_DROP; |
| 2426 | __be16 type; | 2495 | __be16 type; |
| 2427 | 2496 | ||
| @@ -2440,11 +2509,12 @@ int netif_receive_skb(struct sk_buff *skb) | |||
| 2440 | 2509 | ||
| 2441 | null_or_orig = NULL; | 2510 | null_or_orig = NULL; |
| 2442 | orig_dev = skb->dev; | 2511 | orig_dev = skb->dev; |
| 2443 | if (orig_dev->master) { | 2512 | master = ACCESS_ONCE(orig_dev->master); |
| 2444 | if (skb_bond_should_drop(skb)) | 2513 | if (master) { |
| 2514 | if (skb_bond_should_drop(skb, master)) | ||
| 2445 | null_or_orig = orig_dev; /* deliver only exact match */ | 2515 | null_or_orig = orig_dev; /* deliver only exact match */ |
| 2446 | else | 2516 | else |
| 2447 | skb->dev = orig_dev->master; | 2517 | skb->dev = master; |
| 2448 | } | 2518 | } |
| 2449 | 2519 | ||
| 2450 | __get_cpu_var(netdev_rx_stat).total++; | 2520 | __get_cpu_var(netdev_rx_stat).total++; |
| @@ -2487,12 +2557,24 @@ ncls: | |||
| 2487 | if (!skb) | 2557 | if (!skb) |
| 2488 | goto out; | 2558 | goto out; |
| 2489 | 2559 | ||
| 2560 | /* | ||
| 2561 | * Make sure frames received on VLAN interfaces stacked on | ||
| 2562 | * bonding interfaces still make their way to any base bonding | ||
| 2563 | * device that may have registered for a specific ptype. The | ||
| 2564 | * handler may have to adjust skb->dev and orig_dev. | ||
| 2565 | */ | ||
| 2566 | null_or_bond = NULL; | ||
| 2567 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && | ||
| 2568 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { | ||
| 2569 | null_or_bond = vlan_dev_real_dev(skb->dev); | ||
| 2570 | } | ||
| 2571 | |||
| 2490 | type = skb->protocol; | 2572 | type = skb->protocol; |
| 2491 | list_for_each_entry_rcu(ptype, | 2573 | list_for_each_entry_rcu(ptype, |
| 2492 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2574 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
| 2493 | if (ptype->type == type && | 2575 | if (ptype->type == type && (ptype->dev == null_or_orig || |
| 2494 | (ptype->dev == null_or_orig || ptype->dev == skb->dev || | 2576 | ptype->dev == skb->dev || ptype->dev == orig_dev || |
| 2495 | ptype->dev == orig_dev)) { | 2577 | ptype->dev == null_or_bond)) { |
| 2496 | if (pt_prev) | 2578 | if (pt_prev) |
| 2497 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2579 | ret = deliver_skb(skb, pt_prev, orig_dev); |
| 2498 | pt_prev = ptype; | 2580 | pt_prev = ptype; |
| @@ -2561,7 +2643,7 @@ out: | |||
| 2561 | return netif_receive_skb(skb); | 2643 | return netif_receive_skb(skb); |
| 2562 | } | 2644 | } |
| 2563 | 2645 | ||
| 2564 | void napi_gro_flush(struct napi_struct *napi) | 2646 | static void napi_gro_flush(struct napi_struct *napi) |
| 2565 | { | 2647 | { |
| 2566 | struct sk_buff *skb, *next; | 2648 | struct sk_buff *skb, *next; |
| 2567 | 2649 | ||
| @@ -2574,7 +2656,6 @@ void napi_gro_flush(struct napi_struct *napi) | |||
| 2574 | napi->gro_count = 0; | 2656 | napi->gro_count = 0; |
| 2575 | napi->gro_list = NULL; | 2657 | napi->gro_list = NULL; |
| 2576 | } | 2658 | } |
| 2577 | EXPORT_SYMBOL(napi_gro_flush); | ||
| 2578 | 2659 | ||
| 2579 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2660 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
| 2580 | { | 2661 | { |
| @@ -2966,7 +3047,7 @@ static void net_rx_action(struct softirq_action *h) | |||
| 2966 | * entries to the tail of this list, and only ->poll() | 3047 | * entries to the tail of this list, and only ->poll() |
| 2967 | * calls can remove this head entry from the list. | 3048 | * calls can remove this head entry from the list. |
| 2968 | */ | 3049 | */ |
| 2969 | n = list_entry(list->next, struct napi_struct, poll_list); | 3050 | n = list_first_entry(list, struct napi_struct, poll_list); |
| 2970 | 3051 | ||
| 2971 | have = netpoll_poll_lock(n); | 3052 | have = netpoll_poll_lock(n); |
| 2972 | 3053 | ||
| @@ -3185,7 +3266,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | |||
| 3185 | { | 3266 | { |
| 3186 | const struct net_device_stats *stats = dev_get_stats(dev); | 3267 | const struct net_device_stats *stats = dev_get_stats(dev); |
| 3187 | 3268 | ||
| 3188 | seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " | 3269 | seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " |
| 3189 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", | 3270 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", |
| 3190 | dev->name, stats->rx_bytes, stats->rx_packets, | 3271 | dev->name, stats->rx_bytes, stats->rx_packets, |
| 3191 | stats->rx_errors, | 3272 | stats->rx_errors, |
| @@ -3640,10 +3721,10 @@ void __dev_set_rx_mode(struct net_device *dev) | |||
| 3640 | /* Unicast addresses changes may only happen under the rtnl, | 3721 | /* Unicast addresses changes may only happen under the rtnl, |
| 3641 | * therefore calling __dev_set_promiscuity here is safe. | 3722 | * therefore calling __dev_set_promiscuity here is safe. |
| 3642 | */ | 3723 | */ |
| 3643 | if (dev->uc.count > 0 && !dev->uc_promisc) { | 3724 | if (!netdev_uc_empty(dev) && !dev->uc_promisc) { |
| 3644 | __dev_set_promiscuity(dev, 1); | 3725 | __dev_set_promiscuity(dev, 1); |
| 3645 | dev->uc_promisc = 1; | 3726 | dev->uc_promisc = 1; |
| 3646 | } else if (dev->uc.count == 0 && dev->uc_promisc) { | 3727 | } else if (netdev_uc_empty(dev) && dev->uc_promisc) { |
| 3647 | __dev_set_promiscuity(dev, -1); | 3728 | __dev_set_promiscuity(dev, -1); |
| 3648 | dev->uc_promisc = 0; | 3729 | dev->uc_promisc = 0; |
| 3649 | } | 3730 | } |
| @@ -4211,7 +4292,7 @@ static void dev_addr_discard(struct net_device *dev) | |||
| 4211 | netif_addr_lock_bh(dev); | 4292 | netif_addr_lock_bh(dev); |
| 4212 | 4293 | ||
| 4213 | __dev_addr_discard(&dev->mc_list); | 4294 | __dev_addr_discard(&dev->mc_list); |
| 4214 | dev->mc_count = 0; | 4295 | netdev_mc_count(dev) = 0; |
| 4215 | 4296 | ||
| 4216 | netif_addr_unlock_bh(dev); | 4297 | netif_addr_unlock_bh(dev); |
| 4217 | } | 4298 | } |
| @@ -4247,18 +4328,10 @@ unsigned dev_get_flags(const struct net_device *dev) | |||
| 4247 | } | 4328 | } |
| 4248 | EXPORT_SYMBOL(dev_get_flags); | 4329 | EXPORT_SYMBOL(dev_get_flags); |
| 4249 | 4330 | ||
| 4250 | /** | 4331 | int __dev_change_flags(struct net_device *dev, unsigned int flags) |
| 4251 | * dev_change_flags - change device settings | ||
| 4252 | * @dev: device | ||
| 4253 | * @flags: device state flags | ||
| 4254 | * | ||
| 4255 | * Change settings on device based state flags. The flags are | ||
| 4256 | * in the userspace exported format. | ||
| 4257 | */ | ||
| 4258 | int dev_change_flags(struct net_device *dev, unsigned flags) | ||
| 4259 | { | 4332 | { |
| 4260 | int ret, changes; | ||
| 4261 | int old_flags = dev->flags; | 4333 | int old_flags = dev->flags; |
| 4334 | int ret; | ||
| 4262 | 4335 | ||
| 4263 | ASSERT_RTNL(); | 4336 | ASSERT_RTNL(); |
| 4264 | 4337 | ||
| @@ -4289,17 +4362,12 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
| 4289 | 4362 | ||
| 4290 | ret = 0; | 4363 | ret = 0; |
| 4291 | if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ | 4364 | if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ |
| 4292 | ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); | 4365 | ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); |
| 4293 | 4366 | ||
| 4294 | if (!ret) | 4367 | if (!ret) |
| 4295 | dev_set_rx_mode(dev); | 4368 | dev_set_rx_mode(dev); |
| 4296 | } | 4369 | } |
| 4297 | 4370 | ||
| 4298 | if (dev->flags & IFF_UP && | ||
| 4299 | ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | | ||
| 4300 | IFF_VOLATILE))) | ||
| 4301 | call_netdevice_notifiers(NETDEV_CHANGE, dev); | ||
| 4302 | |||
| 4303 | if ((flags ^ dev->gflags) & IFF_PROMISC) { | 4371 | if ((flags ^ dev->gflags) & IFF_PROMISC) { |
| 4304 | int inc = (flags & IFF_PROMISC) ? 1 : -1; | 4372 | int inc = (flags & IFF_PROMISC) ? 1 : -1; |
| 4305 | 4373 | ||
| @@ -4318,11 +4386,47 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
| 4318 | dev_set_allmulti(dev, inc); | 4386 | dev_set_allmulti(dev, inc); |
| 4319 | } | 4387 | } |
| 4320 | 4388 | ||
| 4321 | /* Exclude state transition flags, already notified */ | 4389 | return ret; |
| 4322 | changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING); | 4390 | } |
| 4391 | |||
| 4392 | void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) | ||
| 4393 | { | ||
| 4394 | unsigned int changes = dev->flags ^ old_flags; | ||
| 4395 | |||
| 4396 | if (changes & IFF_UP) { | ||
| 4397 | if (dev->flags & IFF_UP) | ||
| 4398 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
| 4399 | else | ||
| 4400 | call_netdevice_notifiers(NETDEV_DOWN, dev); | ||
| 4401 | } | ||
| 4402 | |||
| 4403 | if (dev->flags & IFF_UP && | ||
| 4404 | (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) | ||
| 4405 | call_netdevice_notifiers(NETDEV_CHANGE, dev); | ||
| 4406 | } | ||
| 4407 | |||
| 4408 | /** | ||
| 4409 | * dev_change_flags - change device settings | ||
| 4410 | * @dev: device | ||
| 4411 | * @flags: device state flags | ||
| 4412 | * | ||
| 4413 | * Change settings on device based state flags. The flags are | ||
| 4414 | * in the userspace exported format. | ||
| 4415 | */ | ||
| 4416 | int dev_change_flags(struct net_device *dev, unsigned flags) | ||
| 4417 | { | ||
| 4418 | int ret, changes; | ||
| 4419 | int old_flags = dev->flags; | ||
| 4420 | |||
| 4421 | ret = __dev_change_flags(dev, flags); | ||
| 4422 | if (ret < 0) | ||
| 4423 | return ret; | ||
| 4424 | |||
| 4425 | changes = old_flags ^ dev->flags; | ||
| 4323 | if (changes) | 4426 | if (changes) |
| 4324 | rtmsg_ifinfo(RTM_NEWLINK, dev, changes); | 4427 | rtmsg_ifinfo(RTM_NEWLINK, dev, changes); |
| 4325 | 4428 | ||
| 4429 | __dev_notify_flags(dev, old_flags); | ||
| 4326 | return ret; | 4430 | return ret; |
| 4327 | } | 4431 | } |
| 4328 | EXPORT_SYMBOL(dev_change_flags); | 4432 | EXPORT_SYMBOL(dev_change_flags); |
| @@ -4813,6 +4917,10 @@ static void rollback_registered_many(struct list_head *head) | |||
| 4813 | */ | 4917 | */ |
| 4814 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 4918 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); |
| 4815 | 4919 | ||
| 4920 | if (!dev->rtnl_link_ops || | ||
| 4921 | dev->rtnl_link_state == RTNL_LINK_INITIALIZED) | ||
| 4922 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); | ||
| 4923 | |||
| 4816 | /* | 4924 | /* |
| 4817 | * Flush the unicast and multicast chains | 4925 | * Flush the unicast and multicast chains |
| 4818 | */ | 4926 | */ |
| @@ -4830,7 +4938,7 @@ static void rollback_registered_many(struct list_head *head) | |||
| 4830 | } | 4938 | } |
| 4831 | 4939 | ||
| 4832 | /* Process any work delayed until the end of the batch */ | 4940 | /* Process any work delayed until the end of the batch */ |
| 4833 | dev = list_entry(head->next, struct net_device, unreg_list); | 4941 | dev = list_first_entry(head, struct net_device, unreg_list); |
| 4834 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); | 4942 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); |
| 4835 | 4943 | ||
| 4836 | synchronize_net(); | 4944 | synchronize_net(); |
| @@ -5039,7 +5147,9 @@ int register_netdevice(struct net_device *dev) | |||
| 5039 | * Prevent userspace races by waiting until the network | 5147 | * Prevent userspace races by waiting until the network |
| 5040 | * device is fully setup before sending notifications. | 5148 | * device is fully setup before sending notifications. |
| 5041 | */ | 5149 | */ |
| 5042 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | 5150 | if (!dev->rtnl_link_ops || |
| 5151 | dev->rtnl_link_state == RTNL_LINK_INITIALIZED) | ||
| 5152 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
| 5043 | 5153 | ||
| 5044 | out: | 5154 | out: |
| 5045 | return ret; | 5155 | return ret; |
| @@ -5216,7 +5326,7 @@ void netdev_run_todo(void) | |||
| 5216 | 5326 | ||
| 5217 | while (!list_empty(&list)) { | 5327 | while (!list_empty(&list)) { |
| 5218 | struct net_device *dev | 5328 | struct net_device *dev |
| 5219 | = list_entry(list.next, struct net_device, todo_list); | 5329 | = list_first_entry(&list, struct net_device, todo_list); |
| 5220 | list_del(&dev->todo_list); | 5330 | list_del(&dev->todo_list); |
| 5221 | 5331 | ||
| 5222 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { | 5332 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { |
| @@ -5367,6 +5477,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
| 5367 | 5477 | ||
| 5368 | netdev_init_queues(dev); | 5478 | netdev_init_queues(dev); |
| 5369 | 5479 | ||
| 5480 | INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); | ||
| 5481 | dev->ethtool_ntuple_list.count = 0; | ||
| 5370 | INIT_LIST_HEAD(&dev->napi_list); | 5482 | INIT_LIST_HEAD(&dev->napi_list); |
| 5371 | INIT_LIST_HEAD(&dev->unreg_list); | 5483 | INIT_LIST_HEAD(&dev->unreg_list); |
| 5372 | INIT_LIST_HEAD(&dev->link_watch_list); | 5484 | INIT_LIST_HEAD(&dev->link_watch_list); |
| @@ -5403,6 +5515,9 @@ void free_netdev(struct net_device *dev) | |||
| 5403 | /* Flush device addresses */ | 5515 | /* Flush device addresses */ |
| 5404 | dev_addr_flush(dev); | 5516 | dev_addr_flush(dev); |
| 5405 | 5517 | ||
| 5518 | /* Clear ethtool n-tuple list */ | ||
| 5519 | ethtool_ntuple_flush(dev); | ||
| 5520 | |||
| 5406 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) | 5521 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) |
| 5407 | netif_napi_del(p); | 5522 | netif_napi_del(p); |
| 5408 | 5523 | ||
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index 9e2fa39f22a3..3dc295beb483 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c | |||
| @@ -96,7 +96,10 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | |||
| 96 | int err; | 96 | int err; |
| 97 | 97 | ||
| 98 | netif_addr_lock_bh(dev); | 98 | netif_addr_lock_bh(dev); |
| 99 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); | 99 | if (alen != dev->addr_len) |
| 100 | err = -EINVAL; | ||
| 101 | else | ||
| 102 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); | ||
| 100 | if (!err) | 103 | if (!err) |
| 101 | __dev_set_rx_mode(dev); | 104 | __dev_set_rx_mode(dev); |
| 102 | netif_addr_unlock_bh(dev); | 105 | netif_addr_unlock_bh(dev); |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index b8e9d3a86887..cf208d8042b1 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/percpu.h> | 21 | #include <linux/percpu.h> |
| 22 | #include <linux/timer.h> | 22 | #include <linux/timer.h> |
| 23 | #include <linux/bitops.h> | 23 | #include <linux/bitops.h> |
| 24 | #include <linux/slab.h> | ||
| 24 | #include <net/genetlink.h> | 25 | #include <net/genetlink.h> |
| 25 | #include <net/netevent.h> | 26 | #include <net/netevent.h> |
| 26 | 27 | ||
| @@ -296,7 +297,6 @@ static int dropmon_net_event(struct notifier_block *ev_block, | |||
| 296 | 297 | ||
| 297 | new_stat->dev = dev; | 298 | new_stat->dev = dev; |
| 298 | new_stat->last_rx = jiffies; | 299 | new_stat->last_rx = jiffies; |
| 299 | INIT_RCU_HEAD(&new_stat->rcu); | ||
| 300 | spin_lock(&trace_state_lock); | 300 | spin_lock(&trace_state_lock); |
| 301 | list_add_rcu(&new_stat->list, &hw_stats_list); | 301 | list_add_rcu(&new_stat->list, &hw_stats_list); |
| 302 | spin_unlock(&trace_state_lock); | 302 | spin_unlock(&trace_state_lock); |
diff --git a/net/core/dst.c b/net/core/dst.c index cb1b3488b739..f307bc18f6a0 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/workqueue.h> | 12 | #include <linux/workqueue.h> |
| 13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/slab.h> | ||
| 15 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
| 16 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
| 17 | #include <linux/string.h> | 18 | #include <linux/string.h> |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 236a9988ea91..9d55c57f318a 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
| 18 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
| 19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
| 20 | #include <linux/bitops.h> | ||
| 21 | #include <linux/slab.h> | ||
| 20 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
| 21 | 23 | ||
| 22 | /* | 24 | /* |
| @@ -120,7 +122,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data) | |||
| 120 | * NETIF_F_xxx values in include/linux/netdevice.h | 122 | * NETIF_F_xxx values in include/linux/netdevice.h |
| 121 | */ | 123 | */ |
| 122 | static const u32 flags_dup_features = | 124 | static const u32 flags_dup_features = |
| 123 | ETH_FLAG_LRO; | 125 | (ETH_FLAG_LRO | ETH_FLAG_NTUPLE); |
| 124 | 126 | ||
| 125 | u32 ethtool_op_get_flags(struct net_device *dev) | 127 | u32 ethtool_op_get_flags(struct net_device *dev) |
| 126 | { | 128 | { |
| @@ -134,19 +136,44 @@ u32 ethtool_op_get_flags(struct net_device *dev) | |||
| 134 | 136 | ||
| 135 | int ethtool_op_set_flags(struct net_device *dev, u32 data) | 137 | int ethtool_op_set_flags(struct net_device *dev, u32 data) |
| 136 | { | 138 | { |
| 139 | const struct ethtool_ops *ops = dev->ethtool_ops; | ||
| 140 | unsigned long features = dev->features; | ||
| 141 | |||
| 137 | if (data & ETH_FLAG_LRO) | 142 | if (data & ETH_FLAG_LRO) |
| 138 | dev->features |= NETIF_F_LRO; | 143 | features |= NETIF_F_LRO; |
| 139 | else | 144 | else |
| 140 | dev->features &= ~NETIF_F_LRO; | 145 | features &= ~NETIF_F_LRO; |
| 146 | |||
| 147 | if (data & ETH_FLAG_NTUPLE) { | ||
| 148 | if (!ops->set_rx_ntuple) | ||
| 149 | return -EOPNOTSUPP; | ||
| 150 | features |= NETIF_F_NTUPLE; | ||
| 151 | } else { | ||
| 152 | /* safe to clear regardless */ | ||
| 153 | features &= ~NETIF_F_NTUPLE; | ||
| 154 | } | ||
| 141 | 155 | ||
| 156 | dev->features = features; | ||
| 142 | return 0; | 157 | return 0; |
| 143 | } | 158 | } |
| 144 | 159 | ||
| 160 | void ethtool_ntuple_flush(struct net_device *dev) | ||
| 161 | { | ||
| 162 | struct ethtool_rx_ntuple_flow_spec_container *fsc, *f; | ||
| 163 | |||
| 164 | list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) { | ||
| 165 | list_del(&fsc->list); | ||
| 166 | kfree(fsc); | ||
| 167 | } | ||
| 168 | dev->ethtool_ntuple_list.count = 0; | ||
| 169 | } | ||
| 170 | EXPORT_SYMBOL(ethtool_ntuple_flush); | ||
| 171 | |||
| 145 | /* Handlers for each ethtool command */ | 172 | /* Handlers for each ethtool command */ |
| 146 | 173 | ||
| 147 | static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) | 174 | static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) |
| 148 | { | 175 | { |
| 149 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | 176 | struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; |
| 150 | int err; | 177 | int err; |
| 151 | 178 | ||
| 152 | if (!dev->ethtool_ops->get_settings) | 179 | if (!dev->ethtool_ops->get_settings) |
| @@ -174,7 +201,7 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) | |||
| 174 | return dev->ethtool_ops->set_settings(dev, &cmd); | 201 | return dev->ethtool_ops->set_settings(dev, &cmd); |
| 175 | } | 202 | } |
| 176 | 203 | ||
| 177 | static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | 204 | static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) |
| 178 | { | 205 | { |
| 179 | struct ethtool_drvinfo info; | 206 | struct ethtool_drvinfo info; |
| 180 | const struct ethtool_ops *ops = dev->ethtool_ops; | 207 | const struct ethtool_ops *ops = dev->ethtool_ops; |
| @@ -186,6 +213,10 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | |||
| 186 | info.cmd = ETHTOOL_GDRVINFO; | 213 | info.cmd = ETHTOOL_GDRVINFO; |
| 187 | ops->get_drvinfo(dev, &info); | 214 | ops->get_drvinfo(dev, &info); |
| 188 | 215 | ||
| 216 | /* | ||
| 217 | * this method of obtaining string set info is deprecated; | ||
| 218 | * Use ETHTOOL_GSSET_INFO instead. | ||
| 219 | */ | ||
| 189 | if (ops->get_sset_count) { | 220 | if (ops->get_sset_count) { |
| 190 | int rc; | 221 | int rc; |
| 191 | 222 | ||
| @@ -209,7 +240,67 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | |||
| 209 | return 0; | 240 | return 0; |
| 210 | } | 241 | } |
| 211 | 242 | ||
| 212 | static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | 243 | static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, |
| 244 | void __user *useraddr) | ||
| 245 | { | ||
| 246 | struct ethtool_sset_info info; | ||
| 247 | const struct ethtool_ops *ops = dev->ethtool_ops; | ||
| 248 | u64 sset_mask; | ||
| 249 | int i, idx = 0, n_bits = 0, ret, rc; | ||
| 250 | u32 *info_buf = NULL; | ||
| 251 | |||
| 252 | if (!ops->get_sset_count) | ||
| 253 | return -EOPNOTSUPP; | ||
| 254 | |||
| 255 | if (copy_from_user(&info, useraddr, sizeof(info))) | ||
| 256 | return -EFAULT; | ||
| 257 | |||
| 258 | /* store copy of mask, because we zero struct later on */ | ||
| 259 | sset_mask = info.sset_mask; | ||
| 260 | if (!sset_mask) | ||
| 261 | return 0; | ||
| 262 | |||
| 263 | /* calculate size of return buffer */ | ||
| 264 | n_bits = hweight64(sset_mask); | ||
| 265 | |||
| 266 | memset(&info, 0, sizeof(info)); | ||
| 267 | info.cmd = ETHTOOL_GSSET_INFO; | ||
| 268 | |||
| 269 | info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER); | ||
| 270 | if (!info_buf) | ||
| 271 | return -ENOMEM; | ||
| 272 | |||
| 273 | /* | ||
| 274 | * fill return buffer based on input bitmask and successful | ||
| 275 | * get_sset_count return | ||
| 276 | */ | ||
| 277 | for (i = 0; i < 64; i++) { | ||
| 278 | if (!(sset_mask & (1ULL << i))) | ||
| 279 | continue; | ||
| 280 | |||
| 281 | rc = ops->get_sset_count(dev, i); | ||
| 282 | if (rc >= 0) { | ||
| 283 | info.sset_mask |= (1ULL << i); | ||
| 284 | info_buf[idx++] = rc; | ||
| 285 | } | ||
| 286 | } | ||
| 287 | |||
| 288 | ret = -EFAULT; | ||
| 289 | if (copy_to_user(useraddr, &info, sizeof(info))) | ||
| 290 | goto out; | ||
| 291 | |||
| 292 | useraddr += offsetof(struct ethtool_sset_info, data); | ||
| 293 | if (copy_to_user(useraddr, info_buf, idx * sizeof(u32))) | ||
| 294 | goto out; | ||
| 295 | |||
| 296 | ret = 0; | ||
| 297 | |||
| 298 | out: | ||
| 299 | kfree(info_buf); | ||
| 300 | return ret; | ||
| 301 | } | ||
| 302 | |||
| 303 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | ||
| 213 | { | 304 | { |
| 214 | struct ethtool_rxnfc cmd; | 305 | struct ethtool_rxnfc cmd; |
| 215 | 306 | ||
| @@ -222,7 +313,7 @@ static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | |||
| 222 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); | 313 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); |
| 223 | } | 314 | } |
| 224 | 315 | ||
| 225 | static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) | 316 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) |
| 226 | { | 317 | { |
| 227 | struct ethtool_rxnfc info; | 318 | struct ethtool_rxnfc info; |
| 228 | const struct ethtool_ops *ops = dev->ethtool_ops; | 319 | const struct ethtool_ops *ops = dev->ethtool_ops; |
| @@ -266,6 +357,312 @@ err_out: | |||
| 266 | return ret; | 357 | return ret; |
| 267 | } | 358 | } |
| 268 | 359 | ||
| 360 | static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, | ||
| 361 | struct ethtool_rx_ntuple_flow_spec *spec, | ||
| 362 | struct ethtool_rx_ntuple_flow_spec_container *fsc) | ||
| 363 | { | ||
| 364 | |||
| 365 | /* don't add filters forever */ | ||
| 366 | if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) { | ||
| 367 | /* free the container */ | ||
| 368 | kfree(fsc); | ||
| 369 | return; | ||
| 370 | } | ||
| 371 | |||
| 372 | /* Copy the whole filter over */ | ||
| 373 | fsc->fs.flow_type = spec->flow_type; | ||
| 374 | memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u)); | ||
| 375 | memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u)); | ||
| 376 | |||
| 377 | fsc->fs.vlan_tag = spec->vlan_tag; | ||
| 378 | fsc->fs.vlan_tag_mask = spec->vlan_tag_mask; | ||
| 379 | fsc->fs.data = spec->data; | ||
| 380 | fsc->fs.data_mask = spec->data_mask; | ||
| 381 | fsc->fs.action = spec->action; | ||
| 382 | |||
| 383 | /* add to the list */ | ||
| 384 | list_add_tail_rcu(&fsc->list, &list->list); | ||
| 385 | list->count++; | ||
| 386 | } | ||
| 387 | |||
| 388 | static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) | ||
| 389 | { | ||
| 390 | struct ethtool_rx_ntuple cmd; | ||
| 391 | const struct ethtool_ops *ops = dev->ethtool_ops; | ||
| 392 | struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL; | ||
| 393 | int ret; | ||
| 394 | |||
| 395 | if (!(dev->features & NETIF_F_NTUPLE)) | ||
| 396 | return -EINVAL; | ||
| 397 | |||
| 398 | if (copy_from_user(&cmd, useraddr, sizeof(cmd))) | ||
| 399 | return -EFAULT; | ||
| 400 | |||
| 401 | /* | ||
| 402 | * Cache filter in dev struct for GET operation only if | ||
| 403 | * the underlying driver doesn't have its own GET operation, and | ||
| 404 | * only if the filter was added successfully. First make sure we | ||
| 405 | * can allocate the filter, then continue if successful. | ||
| 406 | */ | ||
| 407 | if (!ops->get_rx_ntuple) { | ||
| 408 | fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC); | ||
| 409 | if (!fsc) | ||
| 410 | return -ENOMEM; | ||
| 411 | } | ||
| 412 | |||
| 413 | ret = ops->set_rx_ntuple(dev, &cmd); | ||
| 414 | if (ret) { | ||
| 415 | kfree(fsc); | ||
| 416 | return ret; | ||
| 417 | } | ||
| 418 | |||
| 419 | if (!ops->get_rx_ntuple) | ||
| 420 | __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc); | ||
| 421 | |||
| 422 | return ret; | ||
| 423 | } | ||
| 424 | |||
| 425 | static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | ||
| 426 | { | ||
| 427 | struct ethtool_gstrings gstrings; | ||
| 428 | const struct ethtool_ops *ops = dev->ethtool_ops; | ||
| 429 | struct ethtool_rx_ntuple_flow_spec_container *fsc; | ||
| 430 | u8 *data; | ||
| 431 | char *p; | ||
| 432 | int ret, i, num_strings = 0; | ||
| 433 | |||
| 434 | if (!ops->get_sset_count) | ||
| 435 | return -EOPNOTSUPP; | ||
| 436 | |||
| 437 | if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) | ||
| 438 | return -EFAULT; | ||
| 439 | |||
| 440 | ret = ops->get_sset_count(dev, gstrings.string_set); | ||
| 441 | if (ret < 0) | ||
| 442 | return ret; | ||
| 443 | |||
| 444 | gstrings.len = ret; | ||
| 445 | |||
| 446 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); | ||
| 447 | if (!data) | ||
| 448 | return -ENOMEM; | ||
| 449 | |||
| 450 | if (ops->get_rx_ntuple) { | ||
| 451 | /* driver-specific filter grab */ | ||
| 452 | ret = ops->get_rx_ntuple(dev, gstrings.string_set, data); | ||
| 453 | goto copy; | ||
| 454 | } | ||
| 455 | |||
| 456 | /* default ethtool filter grab */ | ||
| 457 | i = 0; | ||
| 458 | p = (char *)data; | ||
| 459 | list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) { | ||
| 460 | sprintf(p, "Filter %d:\n", i); | ||
| 461 | p += ETH_GSTRING_LEN; | ||
| 462 | num_strings++; | ||
| 463 | |||
| 464 | switch (fsc->fs.flow_type) { | ||
| 465 | case TCP_V4_FLOW: | ||
| 466 | sprintf(p, "\tFlow Type: TCP\n"); | ||
| 467 | p += ETH_GSTRING_LEN; | ||
| 468 | num_strings++; | ||
| 469 | break; | ||
| 470 | case UDP_V4_FLOW: | ||
| 471 | sprintf(p, "\tFlow Type: UDP\n"); | ||
| 472 | p += ETH_GSTRING_LEN; | ||
| 473 | num_strings++; | ||
| 474 | break; | ||
| 475 | case SCTP_V4_FLOW: | ||
| 476 | sprintf(p, "\tFlow Type: SCTP\n"); | ||
| 477 | p += ETH_GSTRING_LEN; | ||
| 478 | num_strings++; | ||
| 479 | break; | ||
| 480 | case AH_ESP_V4_FLOW: | ||
| 481 | sprintf(p, "\tFlow Type: AH ESP\n"); | ||
| 482 | p += ETH_GSTRING_LEN; | ||
| 483 | num_strings++; | ||
| 484 | break; | ||
| 485 | case ESP_V4_FLOW: | ||
| 486 | sprintf(p, "\tFlow Type: ESP\n"); | ||
| 487 | p += ETH_GSTRING_LEN; | ||
| 488 | num_strings++; | ||
| 489 | break; | ||
| 490 | case IP_USER_FLOW: | ||
| 491 | sprintf(p, "\tFlow Type: Raw IP\n"); | ||
| 492 | p += ETH_GSTRING_LEN; | ||
| 493 | num_strings++; | ||
| 494 | break; | ||
| 495 | case IPV4_FLOW: | ||
| 496 | sprintf(p, "\tFlow Type: IPv4\n"); | ||
| 497 | p += ETH_GSTRING_LEN; | ||
| 498 | num_strings++; | ||
| 499 | break; | ||
| 500 | default: | ||
| 501 | sprintf(p, "\tFlow Type: Unknown\n"); | ||
| 502 | p += ETH_GSTRING_LEN; | ||
| 503 | num_strings++; | ||
| 504 | goto unknown_filter; | ||
| 505 | }; | ||
| 506 | |||
| 507 | /* now the rest of the filters */ | ||
| 508 | switch (fsc->fs.flow_type) { | ||
| 509 | case TCP_V4_FLOW: | ||
| 510 | case UDP_V4_FLOW: | ||
| 511 | case SCTP_V4_FLOW: | ||
| 512 | sprintf(p, "\tSrc IP addr: 0x%x\n", | ||
| 513 | fsc->fs.h_u.tcp_ip4_spec.ip4src); | ||
| 514 | p += ETH_GSTRING_LEN; | ||
| 515 | num_strings++; | ||
| 516 | sprintf(p, "\tSrc IP mask: 0x%x\n", | ||
| 517 | fsc->fs.m_u.tcp_ip4_spec.ip4src); | ||
| 518 | p += ETH_GSTRING_LEN; | ||
| 519 | num_strings++; | ||
| 520 | sprintf(p, "\tDest IP addr: 0x%x\n", | ||
| 521 | fsc->fs.h_u.tcp_ip4_spec.ip4dst); | ||
| 522 | p += ETH_GSTRING_LEN; | ||
| 523 | num_strings++; | ||
| 524 | sprintf(p, "\tDest IP mask: 0x%x\n", | ||
| 525 | fsc->fs.m_u.tcp_ip4_spec.ip4dst); | ||
| 526 | p += ETH_GSTRING_LEN; | ||
| 527 | num_strings++; | ||
| 528 | sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", | ||
| 529 | fsc->fs.h_u.tcp_ip4_spec.psrc, | ||
| 530 | fsc->fs.m_u.tcp_ip4_spec.psrc); | ||
| 531 | p += ETH_GSTRING_LEN; | ||
| 532 | num_strings++; | ||
| 533 | sprintf(p, "\tDest Port: %d, mask: 0x%x\n", | ||
| 534 | fsc->fs.h_u.tcp_ip4_spec.pdst, | ||
| 535 | fsc->fs.m_u.tcp_ip4_spec.pdst); | ||
| 536 | p += ETH_GSTRING_LEN; | ||
| 537 | num_strings++; | ||
| 538 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | ||
| 539 | fsc->fs.h_u.tcp_ip4_spec.tos, | ||
| 540 | fsc->fs.m_u.tcp_ip4_spec.tos); | ||
| 541 | p += ETH_GSTRING_LEN; | ||
| 542 | num_strings++; | ||
| 543 | break; | ||
| 544 | case AH_ESP_V4_FLOW: | ||
| 545 | case ESP_V4_FLOW: | ||
| 546 | sprintf(p, "\tSrc IP addr: 0x%x\n", | ||
| 547 | fsc->fs.h_u.ah_ip4_spec.ip4src); | ||
| 548 | p += ETH_GSTRING_LEN; | ||
| 549 | num_strings++; | ||
| 550 | sprintf(p, "\tSrc IP mask: 0x%x\n", | ||
| 551 | fsc->fs.m_u.ah_ip4_spec.ip4src); | ||
| 552 | p += ETH_GSTRING_LEN; | ||
| 553 | num_strings++; | ||
| 554 | sprintf(p, "\tDest IP addr: 0x%x\n", | ||
| 555 | fsc->fs.h_u.ah_ip4_spec.ip4dst); | ||
| 556 | p += ETH_GSTRING_LEN; | ||
| 557 | num_strings++; | ||
| 558 | sprintf(p, "\tDest IP mask: 0x%x\n", | ||
| 559 | fsc->fs.m_u.ah_ip4_spec.ip4dst); | ||
| 560 | p += ETH_GSTRING_LEN; | ||
| 561 | num_strings++; | ||
| 562 | sprintf(p, "\tSPI: %d, mask: 0x%x\n", | ||
| 563 | fsc->fs.h_u.ah_ip4_spec.spi, | ||
| 564 | fsc->fs.m_u.ah_ip4_spec.spi); | ||
| 565 | p += ETH_GSTRING_LEN; | ||
| 566 | num_strings++; | ||
| 567 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | ||
| 568 | fsc->fs.h_u.ah_ip4_spec.tos, | ||
| 569 | fsc->fs.m_u.ah_ip4_spec.tos); | ||
| 570 | p += ETH_GSTRING_LEN; | ||
| 571 | num_strings++; | ||
| 572 | break; | ||
| 573 | case IP_USER_FLOW: | ||
| 574 | sprintf(p, "\tSrc IP addr: 0x%x\n", | ||
| 575 | fsc->fs.h_u.raw_ip4_spec.ip4src); | ||
| 576 | p += ETH_GSTRING_LEN; | ||
| 577 | num_strings++; | ||
| 578 | sprintf(p, "\tSrc IP mask: 0x%x\n", | ||
| 579 | fsc->fs.m_u.raw_ip4_spec.ip4src); | ||
| 580 | p += ETH_GSTRING_LEN; | ||
| 581 | num_strings++; | ||
| 582 | sprintf(p, "\tDest IP addr: 0x%x\n", | ||
| 583 | fsc->fs.h_u.raw_ip4_spec.ip4dst); | ||
| 584 | p += ETH_GSTRING_LEN; | ||
| 585 | num_strings++; | ||
| 586 | sprintf(p, "\tDest IP mask: 0x%x\n", | ||
| 587 | fsc->fs.m_u.raw_ip4_spec.ip4dst); | ||
| 588 | p += ETH_GSTRING_LEN; | ||
| 589 | num_strings++; | ||
| 590 | break; | ||
| 591 | case IPV4_FLOW: | ||
| 592 | sprintf(p, "\tSrc IP addr: 0x%x\n", | ||
| 593 | fsc->fs.h_u.usr_ip4_spec.ip4src); | ||
| 594 | p += ETH_GSTRING_LEN; | ||
| 595 | num_strings++; | ||
| 596 | sprintf(p, "\tSrc IP mask: 0x%x\n", | ||
| 597 | fsc->fs.m_u.usr_ip4_spec.ip4src); | ||
| 598 | p += ETH_GSTRING_LEN; | ||
| 599 | num_strings++; | ||
| 600 | sprintf(p, "\tDest IP addr: 0x%x\n", | ||
| 601 | fsc->fs.h_u.usr_ip4_spec.ip4dst); | ||
| 602 | p += ETH_GSTRING_LEN; | ||
| 603 | num_strings++; | ||
| 604 | sprintf(p, "\tDest IP mask: 0x%x\n", | ||
| 605 | fsc->fs.m_u.usr_ip4_spec.ip4dst); | ||
| 606 | p += ETH_GSTRING_LEN; | ||
| 607 | num_strings++; | ||
| 608 | sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", | ||
| 609 | fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, | ||
| 610 | fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); | ||
| 611 | p += ETH_GSTRING_LEN; | ||
| 612 | num_strings++; | ||
| 613 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | ||
| 614 | fsc->fs.h_u.usr_ip4_spec.tos, | ||
| 615 | fsc->fs.m_u.usr_ip4_spec.tos); | ||
| 616 | p += ETH_GSTRING_LEN; | ||
| 617 | num_strings++; | ||
| 618 | sprintf(p, "\tIP Version: %d, mask: 0x%x\n", | ||
| 619 | fsc->fs.h_u.usr_ip4_spec.ip_ver, | ||
| 620 | fsc->fs.m_u.usr_ip4_spec.ip_ver); | ||
| 621 | p += ETH_GSTRING_LEN; | ||
| 622 | num_strings++; | ||
| 623 | sprintf(p, "\tProtocol: %d, mask: 0x%x\n", | ||
| 624 | fsc->fs.h_u.usr_ip4_spec.proto, | ||
| 625 | fsc->fs.m_u.usr_ip4_spec.proto); | ||
| 626 | p += ETH_GSTRING_LEN; | ||
| 627 | num_strings++; | ||
| 628 | break; | ||
| 629 | }; | ||
| 630 | sprintf(p, "\tVLAN: %d, mask: 0x%x\n", | ||
| 631 | fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); | ||
| 632 | p += ETH_GSTRING_LEN; | ||
| 633 | num_strings++; | ||
| 634 | sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); | ||
| 635 | p += ETH_GSTRING_LEN; | ||
| 636 | num_strings++; | ||
| 637 | sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask); | ||
| 638 | p += ETH_GSTRING_LEN; | ||
| 639 | num_strings++; | ||
| 640 | if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) | ||
| 641 | sprintf(p, "\tAction: Drop\n"); | ||
| 642 | else | ||
| 643 | sprintf(p, "\tAction: Direct to queue %d\n", | ||
| 644 | fsc->fs.action); | ||
| 645 | p += ETH_GSTRING_LEN; | ||
| 646 | num_strings++; | ||
| 647 | unknown_filter: | ||
| 648 | i++; | ||
| 649 | } | ||
| 650 | copy: | ||
| 651 | /* indicate to userspace how many strings we actually have */ | ||
| 652 | gstrings.len = num_strings; | ||
| 653 | ret = -EFAULT; | ||
| 654 | if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) | ||
| 655 | goto out; | ||
| 656 | useraddr += sizeof(gstrings); | ||
| 657 | if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) | ||
| 658 | goto out; | ||
| 659 | ret = 0; | ||
| 660 | |||
| 661 | out: | ||
| 662 | kfree(data); | ||
| 663 | return ret; | ||
| 664 | } | ||
| 665 | |||
| 269 | static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) | 666 | static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) |
| 270 | { | 667 | { |
| 271 | struct ethtool_regs regs; | 668 | struct ethtool_regs regs; |
| @@ -324,7 +721,7 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr) | |||
| 324 | 721 | ||
| 325 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) | 722 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) |
| 326 | { | 723 | { |
| 327 | struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; | 724 | struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
| 328 | 725 | ||
| 329 | if (!dev->ethtool_ops->get_wol) | 726 | if (!dev->ethtool_ops->get_wol) |
| 330 | return -EOPNOTSUPP; | 727 | return -EOPNOTSUPP; |
| @@ -456,9 +853,9 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) | |||
| 456 | return ret; | 853 | return ret; |
| 457 | } | 854 | } |
| 458 | 855 | ||
| 459 | static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) | 856 | static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) |
| 460 | { | 857 | { |
| 461 | struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; | 858 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; |
| 462 | 859 | ||
| 463 | if (!dev->ethtool_ops->get_coalesce) | 860 | if (!dev->ethtool_ops->get_coalesce) |
| 464 | return -EOPNOTSUPP; | 861 | return -EOPNOTSUPP; |
| @@ -470,7 +867,7 @@ static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) | |||
| 470 | return 0; | 867 | return 0; |
| 471 | } | 868 | } |
| 472 | 869 | ||
| 473 | static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) | 870 | static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) |
| 474 | { | 871 | { |
| 475 | struct ethtool_coalesce coalesce; | 872 | struct ethtool_coalesce coalesce; |
| 476 | 873 | ||
| @@ -485,7 +882,7 @@ static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) | |||
| 485 | 882 | ||
| 486 | static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) | 883 | static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) |
| 487 | { | 884 | { |
| 488 | struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; | 885 | struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; |
| 489 | 886 | ||
| 490 | if (!dev->ethtool_ops->get_ringparam) | 887 | if (!dev->ethtool_ops->get_ringparam) |
| 491 | return -EOPNOTSUPP; | 888 | return -EOPNOTSUPP; |
| @@ -839,7 +1236,7 @@ static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) | |||
| 839 | static int ethtool_get_value(struct net_device *dev, char __user *useraddr, | 1236 | static int ethtool_get_value(struct net_device *dev, char __user *useraddr, |
| 840 | u32 cmd, u32 (*actor)(struct net_device *)) | 1237 | u32 cmd, u32 (*actor)(struct net_device *)) |
| 841 | { | 1238 | { |
| 842 | struct ethtool_value edata = { cmd }; | 1239 | struct ethtool_value edata = { .cmd = cmd }; |
| 843 | 1240 | ||
| 844 | if (!actor) | 1241 | if (!actor) |
| 845 | return -EOPNOTSUPP; | 1242 | return -EOPNOTSUPP; |
| @@ -880,7 +1277,7 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, | |||
| 880 | return actor(dev, edata.data); | 1277 | return actor(dev, edata.data); |
| 881 | } | 1278 | } |
| 882 | 1279 | ||
| 883 | static int ethtool_flash_device(struct net_device *dev, char __user *useraddr) | 1280 | static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) |
| 884 | { | 1281 | { |
| 885 | struct ethtool_flash efl; | 1282 | struct ethtool_flash efl; |
| 886 | 1283 | ||
| @@ -1113,6 +1510,15 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
| 1113 | case ETHTOOL_RESET: | 1510 | case ETHTOOL_RESET: |
| 1114 | rc = ethtool_reset(dev, useraddr); | 1511 | rc = ethtool_reset(dev, useraddr); |
| 1115 | break; | 1512 | break; |
| 1513 | case ETHTOOL_SRXNTUPLE: | ||
| 1514 | rc = ethtool_set_rx_ntuple(dev, useraddr); | ||
| 1515 | break; | ||
| 1516 | case ETHTOOL_GRXNTUPLE: | ||
| 1517 | rc = ethtool_get_rx_ntuple(dev, useraddr); | ||
| 1518 | break; | ||
| 1519 | case ETHTOOL_GSSET_INFO: | ||
| 1520 | rc = ethtool_get_sset_info(dev, useraddr); | ||
| 1521 | break; | ||
| 1116 | default: | 1522 | default: |
| 1117 | rc = -EOPNOTSUPP; | 1523 | rc = -EOPNOTSUPP; |
| 1118 | } | 1524 | } |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 02a3b2c69c1e..d2c3e7dc2e5f 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
| 12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/slab.h> | ||
| 13 | #include <linux/list.h> | 14 | #include <linux/list.h> |
| 14 | #include <net/net_namespace.h> | 15 | #include <net/net_namespace.h> |
| 15 | #include <net/sock.h> | 16 | #include <net/sock.h> |
| @@ -708,7 +709,7 @@ static struct notifier_block fib_rules_notifier = { | |||
| 708 | .notifier_call = fib_rules_event, | 709 | .notifier_call = fib_rules_event, |
| 709 | }; | 710 | }; |
| 710 | 711 | ||
| 711 | static int fib_rules_net_init(struct net *net) | 712 | static int __net_init fib_rules_net_init(struct net *net) |
| 712 | { | 713 | { |
| 713 | INIT_LIST_HEAD(&net->rules_ops); | 714 | INIT_LIST_HEAD(&net->rules_ops); |
| 714 | spin_lock_init(&net->rules_mod_lock); | 715 | spin_lock_init(&net->rules_mod_lock); |
diff --git a/net/core/filter.c b/net/core/filter.c index 3541aa48d21d..ff943bed21af 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/inet.h> | 25 | #include <linux/inet.h> |
| 26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
| 27 | #include <linux/if_packet.h> | 27 | #include <linux/if_packet.h> |
| 28 | #include <linux/gfp.h> | ||
| 28 | #include <net/ip.h> | 29 | #include <net/ip.h> |
| 29 | #include <net/protocol.h> | 30 | #include <net/protocol.h> |
| 30 | #include <net/netlink.h> | 31 | #include <net/netlink.h> |
| @@ -529,6 +530,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
| 529 | sk_filter_delayed_uncharge(sk, old_fp); | 530 | sk_filter_delayed_uncharge(sk, old_fp); |
| 530 | return 0; | 531 | return 0; |
| 531 | } | 532 | } |
| 533 | EXPORT_SYMBOL_GPL(sk_attach_filter); | ||
| 532 | 534 | ||
| 533 | int sk_detach_filter(struct sock *sk) | 535 | int sk_detach_filter(struct sock *sk) |
| 534 | { | 536 | { |
| @@ -545,3 +547,4 @@ int sk_detach_filter(struct sock *sk) | |||
| 545 | rcu_read_unlock_bh(); | 547 | rcu_read_unlock_bh(); |
| 546 | return ret; | 548 | return ret; |
| 547 | } | 549 | } |
| 550 | EXPORT_SYMBOL_GPL(sk_detach_filter); | ||
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 493775f4f2f1..cf8e70392fe0 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/rtnetlink.h> | 32 | #include <linux/rtnetlink.h> |
| 33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
| 34 | #include <linux/rbtree.h> | 34 | #include <linux/rbtree.h> |
| 35 | #include <linux/slab.h> | ||
| 35 | #include <net/sock.h> | 36 | #include <net/sock.h> |
| 36 | #include <net/gen_stats.h> | 37 | #include <net/gen_stats.h> |
| 37 | 38 | ||
diff --git a/net/core/iovec.c b/net/core/iovec.c index 16ad45d4882b..1e7f4e91a935 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
| 22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
| 23 | #include <linux/slab.h> | ||
| 24 | #include <linux/net.h> | 23 | #include <linux/net.h> |
| 25 | #include <linux/in6.h> | 24 | #include <linux/in6.h> |
| 26 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 5910b555a54a..bdbce2f5875b 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
| @@ -19,7 +19,6 @@ | |||
| 19 | #include <linux/rtnetlink.h> | 19 | #include <linux/rtnetlink.h> |
| 20 | #include <linux/jiffies.h> | 20 | #include <linux/jiffies.h> |
| 21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
| 22 | #include <linux/slab.h> | ||
| 23 | #include <linux/workqueue.h> | 22 | #include <linux/workqueue.h> |
| 24 | #include <linux/bitops.h> | 23 | #include <linux/bitops.h> |
| 25 | #include <asm/types.h> | 24 | #include <asm/types.h> |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index f35377b643e4..bff37908bd55 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | * Harald Welte Add neighbour cache statistics like rtstat | 15 | * Harald Welte Add neighbour cache statistics like rtstat |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #include <linux/slab.h> | ||
| 18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| 20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| @@ -771,6 +772,8 @@ static __inline__ int neigh_max_probes(struct neighbour *n) | |||
| 771 | } | 772 | } |
| 772 | 773 | ||
| 773 | static void neigh_invalidate(struct neighbour *neigh) | 774 | static void neigh_invalidate(struct neighbour *neigh) |
| 775 | __releases(neigh->lock) | ||
| 776 | __acquires(neigh->lock) | ||
| 774 | { | 777 | { |
| 775 | struct sk_buff *skb; | 778 | struct sk_buff *skb; |
| 776 | 779 | ||
| @@ -2417,8 +2420,7 @@ EXPORT_SYMBOL(neigh_seq_stop); | |||
| 2417 | 2420 | ||
| 2418 | static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) | 2421 | static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) |
| 2419 | { | 2422 | { |
| 2420 | struct proc_dir_entry *pde = seq->private; | 2423 | struct neigh_table *tbl = seq->private; |
| 2421 | struct neigh_table *tbl = pde->data; | ||
| 2422 | int cpu; | 2424 | int cpu; |
| 2423 | 2425 | ||
| 2424 | if (*pos == 0) | 2426 | if (*pos == 0) |
| @@ -2435,8 +2437,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 2435 | 2437 | ||
| 2436 | static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 2438 | static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 2437 | { | 2439 | { |
| 2438 | struct proc_dir_entry *pde = seq->private; | 2440 | struct neigh_table *tbl = seq->private; |
| 2439 | struct neigh_table *tbl = pde->data; | ||
| 2440 | int cpu; | 2441 | int cpu; |
| 2441 | 2442 | ||
| 2442 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { | 2443 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
| @@ -2455,8 +2456,7 @@ static void neigh_stat_seq_stop(struct seq_file *seq, void *v) | |||
| 2455 | 2456 | ||
| 2456 | static int neigh_stat_seq_show(struct seq_file *seq, void *v) | 2457 | static int neigh_stat_seq_show(struct seq_file *seq, void *v) |
| 2457 | { | 2458 | { |
| 2458 | struct proc_dir_entry *pde = seq->private; | 2459 | struct neigh_table *tbl = seq->private; |
| 2459 | struct neigh_table *tbl = pde->data; | ||
| 2460 | struct neigh_statistics *st = v; | 2460 | struct neigh_statistics *st = v; |
| 2461 | 2461 | ||
| 2462 | if (v == SEQ_START_TOKEN) { | 2462 | if (v == SEQ_START_TOKEN) { |
| @@ -2501,7 +2501,7 @@ static int neigh_stat_seq_open(struct inode *inode, struct file *file) | |||
| 2501 | 2501 | ||
| 2502 | if (!ret) { | 2502 | if (!ret) { |
| 2503 | struct seq_file *sf = file->private_data; | 2503 | struct seq_file *sf = file->private_data; |
| 2504 | sf->private = PDE(inode); | 2504 | sf->private = PDE(inode)->data; |
| 2505 | } | 2505 | } |
| 2506 | return ret; | 2506 | return ret; |
| 2507 | }; | 2507 | }; |
| @@ -2559,9 +2559,11 @@ EXPORT_SYMBOL(neigh_app_ns); | |||
| 2559 | 2559 | ||
| 2560 | #ifdef CONFIG_SYSCTL | 2560 | #ifdef CONFIG_SYSCTL |
| 2561 | 2561 | ||
| 2562 | #define NEIGH_VARS_MAX 19 | ||
| 2563 | |||
| 2562 | static struct neigh_sysctl_table { | 2564 | static struct neigh_sysctl_table { |
| 2563 | struct ctl_table_header *sysctl_header; | 2565 | struct ctl_table_header *sysctl_header; |
| 2564 | struct ctl_table neigh_vars[__NET_NEIGH_MAX]; | 2566 | struct ctl_table neigh_vars[NEIGH_VARS_MAX]; |
| 2565 | char *dev_name; | 2567 | char *dev_name; |
| 2566 | } neigh_sysctl_template __read_mostly = { | 2568 | } neigh_sysctl_template __read_mostly = { |
| 2567 | .neigh_vars = { | 2569 | .neigh_vars = { |
| @@ -2678,8 +2680,7 @@ static struct neigh_sysctl_table { | |||
| 2678 | }; | 2680 | }; |
| 2679 | 2681 | ||
| 2680 | int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, | 2682 | int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, |
| 2681 | int p_id, int pdev_id, char *p_name, | 2683 | char *p_name, proc_handler *handler) |
| 2682 | proc_handler *handler) | ||
| 2683 | { | 2684 | { |
| 2684 | struct neigh_sysctl_table *t; | 2685 | struct neigh_sysctl_table *t; |
| 2685 | const char *dev_name_source = NULL; | 2686 | const char *dev_name_source = NULL; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 099c753c4213..59cfc7d8fc45 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
| 15 | #include <linux/if_arp.h> | 15 | #include <linux/if_arp.h> |
| 16 | #include <linux/slab.h> | ||
| 16 | #include <net/sock.h> | 17 | #include <net/sock.h> |
| 17 | #include <linux/rtnetlink.h> | 18 | #include <linux/rtnetlink.h> |
| 18 | #include <linux/wireless.h> | 19 | #include <linux/wireless.h> |
diff --git a/net/core/net-traces.c b/net/core/net-traces.c index f1e982c508bb..afa6380ed88a 100644 --- a/net/core/net-traces.c +++ b/net/core/net-traces.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
| 20 | #include <linux/netlink.h> | 20 | #include <linux/netlink.h> |
| 21 | #include <linux/net_dropmon.h> | 21 | #include <linux/net_dropmon.h> |
| 22 | #include <linux/slab.h> | ||
| 22 | 23 | ||
| 23 | #include <asm/unaligned.h> | 24 | #include <asm/unaligned.h> |
| 24 | #include <asm/bitops.h> | 25 | #include <asm/bitops.h> |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 0b4d0d35ef40..a58f59b97597 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
| 23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
| 24 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
| 25 | #include <linux/slab.h> | ||
| 25 | #include <net/tcp.h> | 26 | #include <net/tcp.h> |
| 26 | #include <net/udp.h> | 27 | #include <net/udp.h> |
| 27 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
| @@ -407,11 +408,24 @@ static void arp_reply(struct sk_buff *skb) | |||
| 407 | __be32 sip, tip; | 408 | __be32 sip, tip; |
| 408 | unsigned char *sha; | 409 | unsigned char *sha; |
| 409 | struct sk_buff *send_skb; | 410 | struct sk_buff *send_skb; |
| 410 | struct netpoll *np = NULL; | 411 | struct netpoll *np, *tmp; |
| 412 | unsigned long flags; | ||
| 413 | int hits = 0; | ||
| 414 | |||
| 415 | if (list_empty(&npinfo->rx_np)) | ||
| 416 | return; | ||
| 417 | |||
| 418 | /* Before checking the packet, we do some early | ||
| 419 | inspection whether this is interesting at all */ | ||
| 420 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
| 421 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { | ||
| 422 | if (np->dev == skb->dev) | ||
| 423 | hits++; | ||
| 424 | } | ||
| 425 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
| 411 | 426 | ||
| 412 | if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) | 427 | /* No netpoll struct is using this dev */ |
| 413 | np = npinfo->rx_np; | 428 | if (!hits) |
| 414 | if (!np) | ||
| 415 | return; | 429 | return; |
| 416 | 430 | ||
| 417 | /* No arp on this interface */ | 431 | /* No arp on this interface */ |
| @@ -437,77 +451,91 @@ static void arp_reply(struct sk_buff *skb) | |||
| 437 | arp_ptr += skb->dev->addr_len; | 451 | arp_ptr += skb->dev->addr_len; |
| 438 | memcpy(&sip, arp_ptr, 4); | 452 | memcpy(&sip, arp_ptr, 4); |
| 439 | arp_ptr += 4; | 453 | arp_ptr += 4; |
| 440 | /* if we actually cared about dst hw addr, it would get copied here */ | 454 | /* If we actually cared about dst hw addr, |
| 455 | it would get copied here */ | ||
| 441 | arp_ptr += skb->dev->addr_len; | 456 | arp_ptr += skb->dev->addr_len; |
| 442 | memcpy(&tip, arp_ptr, 4); | 457 | memcpy(&tip, arp_ptr, 4); |
| 443 | 458 | ||
| 444 | /* Should we ignore arp? */ | 459 | /* Should we ignore arp? */ |
| 445 | if (tip != np->local_ip || | 460 | if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) |
| 446 | ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) | ||
| 447 | return; | 461 | return; |
| 448 | 462 | ||
| 449 | size = arp_hdr_len(skb->dev); | 463 | size = arp_hdr_len(skb->dev); |
| 450 | send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), | ||
| 451 | LL_RESERVED_SPACE(np->dev)); | ||
| 452 | 464 | ||
| 453 | if (!send_skb) | 465 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 454 | return; | 466 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { |
| 455 | 467 | if (tip != np->local_ip) | |
| 456 | skb_reset_network_header(send_skb); | 468 | continue; |
| 457 | arp = (struct arphdr *) skb_put(send_skb, size); | ||
| 458 | send_skb->dev = skb->dev; | ||
| 459 | send_skb->protocol = htons(ETH_P_ARP); | ||
| 460 | 469 | ||
| 461 | /* Fill the device header for the ARP frame */ | 470 | send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), |
| 462 | if (dev_hard_header(send_skb, skb->dev, ptype, | 471 | LL_RESERVED_SPACE(np->dev)); |
| 463 | sha, np->dev->dev_addr, | 472 | if (!send_skb) |
| 464 | send_skb->len) < 0) { | 473 | continue; |
| 465 | kfree_skb(send_skb); | ||
| 466 | return; | ||
| 467 | } | ||
| 468 | 474 | ||
| 469 | /* | 475 | skb_reset_network_header(send_skb); |
| 470 | * Fill out the arp protocol part. | 476 | arp = (struct arphdr *) skb_put(send_skb, size); |
| 471 | * | 477 | send_skb->dev = skb->dev; |
| 472 | * we only support ethernet device type, | 478 | send_skb->protocol = htons(ETH_P_ARP); |
| 473 | * which (according to RFC 1390) should always equal 1 (Ethernet). | ||
| 474 | */ | ||
| 475 | 479 | ||
| 476 | arp->ar_hrd = htons(np->dev->type); | 480 | /* Fill the device header for the ARP frame */ |
| 477 | arp->ar_pro = htons(ETH_P_IP); | 481 | if (dev_hard_header(send_skb, skb->dev, ptype, |
| 478 | arp->ar_hln = np->dev->addr_len; | 482 | sha, np->dev->dev_addr, |
| 479 | arp->ar_pln = 4; | 483 | send_skb->len) < 0) { |
| 480 | arp->ar_op = htons(type); | 484 | kfree_skb(send_skb); |
| 485 | continue; | ||
| 486 | } | ||
| 481 | 487 | ||
| 482 | arp_ptr=(unsigned char *)(arp + 1); | 488 | /* |
| 483 | memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); | 489 | * Fill out the arp protocol part. |
| 484 | arp_ptr += np->dev->addr_len; | 490 | * |
| 485 | memcpy(arp_ptr, &tip, 4); | 491 | * we only support ethernet device type, |
| 486 | arp_ptr += 4; | 492 | * which (according to RFC 1390) should |
| 487 | memcpy(arp_ptr, sha, np->dev->addr_len); | 493 | * always equal 1 (Ethernet). |
| 488 | arp_ptr += np->dev->addr_len; | 494 | */ |
| 489 | memcpy(arp_ptr, &sip, 4); | ||
| 490 | 495 | ||
| 491 | netpoll_send_skb(np, send_skb); | 496 | arp->ar_hrd = htons(np->dev->type); |
| 497 | arp->ar_pro = htons(ETH_P_IP); | ||
| 498 | arp->ar_hln = np->dev->addr_len; | ||
| 499 | arp->ar_pln = 4; | ||
| 500 | arp->ar_op = htons(type); | ||
| 501 | |||
| 502 | arp_ptr = (unsigned char *)(arp + 1); | ||
| 503 | memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); | ||
| 504 | arp_ptr += np->dev->addr_len; | ||
| 505 | memcpy(arp_ptr, &tip, 4); | ||
| 506 | arp_ptr += 4; | ||
| 507 | memcpy(arp_ptr, sha, np->dev->addr_len); | ||
| 508 | arp_ptr += np->dev->addr_len; | ||
| 509 | memcpy(arp_ptr, &sip, 4); | ||
| 510 | |||
| 511 | netpoll_send_skb(np, send_skb); | ||
| 512 | |||
| 513 | /* If there are several rx_hooks for the same address, | ||
| 514 | we're fine by sending a single reply */ | ||
| 515 | break; | ||
| 516 | } | ||
| 517 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
| 492 | } | 518 | } |
| 493 | 519 | ||
| 494 | int __netpoll_rx(struct sk_buff *skb) | 520 | int __netpoll_rx(struct sk_buff *skb) |
| 495 | { | 521 | { |
| 496 | int proto, len, ulen; | 522 | int proto, len, ulen; |
| 523 | int hits = 0; | ||
| 497 | struct iphdr *iph; | 524 | struct iphdr *iph; |
| 498 | struct udphdr *uh; | 525 | struct udphdr *uh; |
| 499 | struct netpoll_info *npi = skb->dev->npinfo; | 526 | struct netpoll_info *npinfo = skb->dev->npinfo; |
| 500 | struct netpoll *np = npi->rx_np; | 527 | struct netpoll *np, *tmp; |
| 501 | 528 | ||
| 502 | if (!np) | 529 | if (list_empty(&npinfo->rx_np)) |
| 503 | goto out; | 530 | goto out; |
| 531 | |||
| 504 | if (skb->dev->type != ARPHRD_ETHER) | 532 | if (skb->dev->type != ARPHRD_ETHER) |
| 505 | goto out; | 533 | goto out; |
| 506 | 534 | ||
| 507 | /* check if netpoll clients need ARP */ | 535 | /* check if netpoll clients need ARP */ |
| 508 | if (skb->protocol == htons(ETH_P_ARP) && | 536 | if (skb->protocol == htons(ETH_P_ARP) && |
| 509 | atomic_read(&trapped)) { | 537 | atomic_read(&trapped)) { |
| 510 | skb_queue_tail(&npi->arp_tx, skb); | 538 | skb_queue_tail(&npinfo->arp_tx, skb); |
| 511 | return 1; | 539 | return 1; |
| 512 | } | 540 | } |
| 513 | 541 | ||
| @@ -551,16 +579,23 @@ int __netpoll_rx(struct sk_buff *skb) | |||
| 551 | goto out; | 579 | goto out; |
| 552 | if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) | 580 | if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) |
| 553 | goto out; | 581 | goto out; |
| 554 | if (np->local_ip && np->local_ip != iph->daddr) | ||
| 555 | goto out; | ||
| 556 | if (np->remote_ip && np->remote_ip != iph->saddr) | ||
| 557 | goto out; | ||
| 558 | if (np->local_port && np->local_port != ntohs(uh->dest)) | ||
| 559 | goto out; | ||
| 560 | 582 | ||
| 561 | np->rx_hook(np, ntohs(uh->source), | 583 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { |
| 562 | (char *)(uh+1), | 584 | if (np->local_ip && np->local_ip != iph->daddr) |
| 563 | ulen - sizeof(struct udphdr)); | 585 | continue; |
| 586 | if (np->remote_ip && np->remote_ip != iph->saddr) | ||
| 587 | continue; | ||
| 588 | if (np->local_port && np->local_port != ntohs(uh->dest)) | ||
| 589 | continue; | ||
| 590 | |||
| 591 | np->rx_hook(np, ntohs(uh->source), | ||
| 592 | (char *)(uh+1), | ||
| 593 | ulen - sizeof(struct udphdr)); | ||
| 594 | hits++; | ||
| 595 | } | ||
| 596 | |||
| 597 | if (!hits) | ||
| 598 | goto out; | ||
| 564 | 599 | ||
| 565 | kfree_skb(skb); | 600 | kfree_skb(skb); |
| 566 | return 1; | 601 | return 1; |
| @@ -580,7 +615,7 @@ void netpoll_print_options(struct netpoll *np) | |||
| 580 | np->name, np->local_port); | 615 | np->name, np->local_port); |
| 581 | printk(KERN_INFO "%s: local IP %pI4\n", | 616 | printk(KERN_INFO "%s: local IP %pI4\n", |
| 582 | np->name, &np->local_ip); | 617 | np->name, &np->local_ip); |
| 583 | printk(KERN_INFO "%s: interface %s\n", | 618 | printk(KERN_INFO "%s: interface '%s'\n", |
| 584 | np->name, np->dev_name); | 619 | np->name, np->dev_name); |
| 585 | printk(KERN_INFO "%s: remote port %d\n", | 620 | printk(KERN_INFO "%s: remote port %d\n", |
| 586 | np->name, np->remote_port); | 621 | np->name, np->remote_port); |
| @@ -627,6 +662,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
| 627 | if ((delim = strchr(cur, '@')) == NULL) | 662 | if ((delim = strchr(cur, '@')) == NULL) |
| 628 | goto parse_failed; | 663 | goto parse_failed; |
| 629 | *delim = 0; | 664 | *delim = 0; |
| 665 | if (*cur == ' ' || *cur == '\t') | ||
| 666 | printk(KERN_INFO "%s: warning: whitespace" | ||
| 667 | "is not allowed\n", np->name); | ||
| 630 | np->remote_port = simple_strtol(cur, NULL, 10); | 668 | np->remote_port = simple_strtol(cur, NULL, 10); |
| 631 | cur = delim; | 669 | cur = delim; |
| 632 | } | 670 | } |
| @@ -674,7 +712,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
| 674 | return 0; | 712 | return 0; |
| 675 | 713 | ||
| 676 | parse_failed: | 714 | parse_failed: |
| 677 | printk(KERN_INFO "%s: couldn't parse config at %s!\n", | 715 | printk(KERN_INFO "%s: couldn't parse config at '%s'!\n", |
| 678 | np->name, cur); | 716 | np->name, cur); |
| 679 | return -1; | 717 | return -1; |
| 680 | } | 718 | } |
| @@ -684,6 +722,7 @@ int netpoll_setup(struct netpoll *np) | |||
| 684 | struct net_device *ndev = NULL; | 722 | struct net_device *ndev = NULL; |
| 685 | struct in_device *in_dev; | 723 | struct in_device *in_dev; |
| 686 | struct netpoll_info *npinfo; | 724 | struct netpoll_info *npinfo; |
| 725 | struct netpoll *npe, *tmp; | ||
| 687 | unsigned long flags; | 726 | unsigned long flags; |
| 688 | int err; | 727 | int err; |
| 689 | 728 | ||
| @@ -700,11 +739,11 @@ int netpoll_setup(struct netpoll *np) | |||
| 700 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | 739 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); |
| 701 | if (!npinfo) { | 740 | if (!npinfo) { |
| 702 | err = -ENOMEM; | 741 | err = -ENOMEM; |
| 703 | goto release; | 742 | goto put; |
| 704 | } | 743 | } |
| 705 | 744 | ||
| 706 | npinfo->rx_flags = 0; | 745 | npinfo->rx_flags = 0; |
| 707 | npinfo->rx_np = NULL; | 746 | INIT_LIST_HEAD(&npinfo->rx_np); |
| 708 | 747 | ||
| 709 | spin_lock_init(&npinfo->rx_lock); | 748 | spin_lock_init(&npinfo->rx_lock); |
| 710 | skb_queue_head_init(&npinfo->arp_tx); | 749 | skb_queue_head_init(&npinfo->arp_tx); |
| @@ -785,7 +824,7 @@ int netpoll_setup(struct netpoll *np) | |||
| 785 | if (np->rx_hook) { | 824 | if (np->rx_hook) { |
| 786 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 825 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 787 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; | 826 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; |
| 788 | npinfo->rx_np = np; | 827 | list_add_tail(&np->rx, &npinfo->rx_np); |
| 789 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 828 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
| 790 | } | 829 | } |
| 791 | 830 | ||
| @@ -801,9 +840,16 @@ int netpoll_setup(struct netpoll *np) | |||
| 801 | return 0; | 840 | return 0; |
| 802 | 841 | ||
| 803 | release: | 842 | release: |
| 804 | if (!ndev->npinfo) | 843 | if (!ndev->npinfo) { |
| 844 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
| 845 | list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) { | ||
| 846 | npe->dev = NULL; | ||
| 847 | } | ||
| 848 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
| 849 | |||
| 805 | kfree(npinfo); | 850 | kfree(npinfo); |
| 806 | np->dev = NULL; | 851 | } |
| 852 | put: | ||
| 807 | dev_put(ndev); | 853 | dev_put(ndev); |
| 808 | return err; | 854 | return err; |
| 809 | } | 855 | } |
| @@ -823,10 +869,11 @@ void netpoll_cleanup(struct netpoll *np) | |||
| 823 | if (np->dev) { | 869 | if (np->dev) { |
| 824 | npinfo = np->dev->npinfo; | 870 | npinfo = np->dev->npinfo; |
| 825 | if (npinfo) { | 871 | if (npinfo) { |
| 826 | if (npinfo->rx_np == np) { | 872 | if (!list_empty(&npinfo->rx_np)) { |
| 827 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 873 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 828 | npinfo->rx_np = NULL; | 874 | list_del(&np->rx); |
| 829 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; | 875 | if (list_empty(&npinfo->rx_np)) |
| 876 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; | ||
| 830 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 877 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
| 831 | } | 878 | } |
| 832 | 879 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2e692afdc55d..43923811bd6a 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -2188,12 +2188,13 @@ static inline int f_pick(struct pktgen_dev *pkt_dev) | |||
| 2188 | /* If there was already an IPSEC SA, we keep it as is, else | 2188 | /* If there was already an IPSEC SA, we keep it as is, else |
| 2189 | * we go look for it ... | 2189 | * we go look for it ... |
| 2190 | */ | 2190 | */ |
| 2191 | #define DUMMY_MARK 0 | ||
| 2191 | static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) | 2192 | static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) |
| 2192 | { | 2193 | { |
| 2193 | struct xfrm_state *x = pkt_dev->flows[flow].x; | 2194 | struct xfrm_state *x = pkt_dev->flows[flow].x; |
| 2194 | if (!x) { | 2195 | if (!x) { |
| 2195 | /*slow path: we dont already have xfrm_state*/ | 2196 | /*slow path: we dont already have xfrm_state*/ |
| 2196 | x = xfrm_stateonly_find(&init_net, | 2197 | x = xfrm_stateonly_find(&init_net, DUMMY_MARK, |
| 2197 | (xfrm_address_t *)&pkt_dev->cur_daddr, | 2198 | (xfrm_address_t *)&pkt_dev->cur_daddr, |
| 2198 | (xfrm_address_t *)&pkt_dev->cur_saddr, | 2199 | (xfrm_address_t *)&pkt_dev->cur_saddr, |
| 2199 | AF_INET, | 2200 | AF_INET, |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4c7d3f635ba7..fe776c9ddeca 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/security.h> | 35 | #include <linux/security.h> |
| 36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
| 37 | #include <linux/if_addr.h> | 37 | #include <linux/if_addr.h> |
| 38 | #include <linux/pci.h> | ||
| 38 | 39 | ||
| 39 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
| 40 | #include <asm/system.h> | 41 | #include <asm/system.h> |
| @@ -556,6 +557,19 @@ static void set_operstate(struct net_device *dev, unsigned char transition) | |||
| 556 | } | 557 | } |
| 557 | } | 558 | } |
| 558 | 559 | ||
| 560 | static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, | ||
| 561 | const struct ifinfomsg *ifm) | ||
| 562 | { | ||
| 563 | unsigned int flags = ifm->ifi_flags; | ||
| 564 | |||
| 565 | /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ | ||
| 566 | if (ifm->ifi_change) | ||
| 567 | flags = (flags & ifm->ifi_change) | | ||
| 568 | (dev->flags & ~ifm->ifi_change); | ||
| 569 | |||
| 570 | return flags; | ||
| 571 | } | ||
| 572 | |||
| 559 | static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | 573 | static void copy_rtnl_link_stats(struct rtnl_link_stats *a, |
| 560 | const struct net_device_stats *b) | 574 | const struct net_device_stats *b) |
| 561 | { | 575 | { |
| @@ -588,6 +602,15 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | |||
| 588 | a->tx_compressed = b->tx_compressed; | 602 | a->tx_compressed = b->tx_compressed; |
| 589 | }; | 603 | }; |
| 590 | 604 | ||
| 605 | static inline int rtnl_vfinfo_size(const struct net_device *dev) | ||
| 606 | { | ||
| 607 | if (dev->dev.parent && dev_is_pci(dev->dev.parent)) | ||
| 608 | return dev_num_vf(dev->dev.parent) * | ||
| 609 | sizeof(struct ifla_vf_info); | ||
| 610 | else | ||
| 611 | return 0; | ||
| 612 | } | ||
| 613 | |||
| 591 | static inline size_t if_nlmsg_size(const struct net_device *dev) | 614 | static inline size_t if_nlmsg_size(const struct net_device *dev) |
| 592 | { | 615 | { |
| 593 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) | 616 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) |
| @@ -605,6 +628,8 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) | |||
| 605 | + nla_total_size(4) /* IFLA_MASTER */ | 628 | + nla_total_size(4) /* IFLA_MASTER */ |
| 606 | + nla_total_size(1) /* IFLA_OPERSTATE */ | 629 | + nla_total_size(1) /* IFLA_OPERSTATE */ |
| 607 | + nla_total_size(1) /* IFLA_LINKMODE */ | 630 | + nla_total_size(1) /* IFLA_LINKMODE */ |
| 631 | + nla_total_size(4) /* IFLA_NUM_VF */ | ||
| 632 | + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ | ||
| 608 | + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ | 633 | + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ |
| 609 | } | 634 | } |
| 610 | 635 | ||
| @@ -673,6 +698,17 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
| 673 | stats = dev_get_stats(dev); | 698 | stats = dev_get_stats(dev); |
| 674 | copy_rtnl_link_stats(nla_data(attr), stats); | 699 | copy_rtnl_link_stats(nla_data(attr), stats); |
| 675 | 700 | ||
| 701 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { | ||
| 702 | int i; | ||
| 703 | struct ifla_vf_info ivi; | ||
| 704 | |||
| 705 | NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); | ||
| 706 | for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { | ||
| 707 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) | ||
| 708 | break; | ||
| 709 | NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); | ||
| 710 | } | ||
| 711 | } | ||
| 676 | if (dev->rtnl_link_ops) { | 712 | if (dev->rtnl_link_ops) { |
| 677 | if (rtnl_link_fill(skb, dev) < 0) | 713 | if (rtnl_link_fill(skb, dev) < 0) |
| 678 | goto nla_put_failure; | 714 | goto nla_put_failure; |
| @@ -733,6 +769,12 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { | |||
| 733 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, | 769 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, |
| 734 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, | 770 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, |
| 735 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, | 771 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, |
| 772 | [IFLA_VF_MAC] = { .type = NLA_BINARY, | ||
| 773 | .len = sizeof(struct ifla_vf_mac) }, | ||
| 774 | [IFLA_VF_VLAN] = { .type = NLA_BINARY, | ||
| 775 | .len = sizeof(struct ifla_vf_vlan) }, | ||
| 776 | [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, | ||
| 777 | .len = sizeof(struct ifla_vf_tx_rate) }, | ||
| 736 | }; | 778 | }; |
| 737 | EXPORT_SYMBOL(ifla_policy); | 779 | EXPORT_SYMBOL(ifla_policy); |
| 738 | 780 | ||
| @@ -883,13 +925,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
| 883 | } | 925 | } |
| 884 | 926 | ||
| 885 | if (ifm->ifi_flags || ifm->ifi_change) { | 927 | if (ifm->ifi_flags || ifm->ifi_change) { |
| 886 | unsigned int flags = ifm->ifi_flags; | 928 | err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm)); |
| 887 | |||
| 888 | /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ | ||
| 889 | if (ifm->ifi_change) | ||
| 890 | flags = (flags & ifm->ifi_change) | | ||
| 891 | (dev->flags & ~ifm->ifi_change); | ||
| 892 | err = dev_change_flags(dev, flags); | ||
| 893 | if (err < 0) | 929 | if (err < 0) |
| 894 | goto errout; | 930 | goto errout; |
| 895 | } | 931 | } |
| @@ -906,6 +942,41 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
| 906 | write_unlock_bh(&dev_base_lock); | 942 | write_unlock_bh(&dev_base_lock); |
| 907 | } | 943 | } |
| 908 | 944 | ||
| 945 | if (tb[IFLA_VF_MAC]) { | ||
| 946 | struct ifla_vf_mac *ivm; | ||
| 947 | ivm = nla_data(tb[IFLA_VF_MAC]); | ||
| 948 | err = -EOPNOTSUPP; | ||
| 949 | if (ops->ndo_set_vf_mac) | ||
| 950 | err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); | ||
| 951 | if (err < 0) | ||
| 952 | goto errout; | ||
| 953 | modified = 1; | ||
| 954 | } | ||
| 955 | |||
| 956 | if (tb[IFLA_VF_VLAN]) { | ||
| 957 | struct ifla_vf_vlan *ivv; | ||
| 958 | ivv = nla_data(tb[IFLA_VF_VLAN]); | ||
| 959 | err = -EOPNOTSUPP; | ||
| 960 | if (ops->ndo_set_vf_vlan) | ||
| 961 | err = ops->ndo_set_vf_vlan(dev, ivv->vf, | ||
| 962 | ivv->vlan, | ||
| 963 | ivv->qos); | ||
| 964 | if (err < 0) | ||
| 965 | goto errout; | ||
| 966 | modified = 1; | ||
| 967 | } | ||
| 968 | err = 0; | ||
| 969 | |||
| 970 | if (tb[IFLA_VF_TX_RATE]) { | ||
| 971 | struct ifla_vf_tx_rate *ivt; | ||
| 972 | ivt = nla_data(tb[IFLA_VF_TX_RATE]); | ||
| 973 | err = -EOPNOTSUPP; | ||
| 974 | if (ops->ndo_set_vf_tx_rate) | ||
| 975 | err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); | ||
| 976 | if (err < 0) | ||
| 977 | goto errout; | ||
| 978 | modified = 1; | ||
| 979 | } | ||
| 909 | err = 0; | 980 | err = 0; |
| 910 | 981 | ||
| 911 | errout: | 982 | errout: |
| @@ -997,6 +1068,26 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
| 997 | return 0; | 1068 | return 0; |
| 998 | } | 1069 | } |
| 999 | 1070 | ||
| 1071 | int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) | ||
| 1072 | { | ||
| 1073 | unsigned int old_flags; | ||
| 1074 | int err; | ||
| 1075 | |||
| 1076 | old_flags = dev->flags; | ||
| 1077 | if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { | ||
| 1078 | err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm)); | ||
| 1079 | if (err < 0) | ||
| 1080 | return err; | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | dev->rtnl_link_state = RTNL_LINK_INITIALIZED; | ||
| 1084 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
| 1085 | |||
| 1086 | __dev_notify_flags(dev, old_flags); | ||
| 1087 | return 0; | ||
| 1088 | } | ||
| 1089 | EXPORT_SYMBOL(rtnl_configure_link); | ||
| 1090 | |||
| 1000 | struct net_device *rtnl_create_link(struct net *src_net, struct net *net, | 1091 | struct net_device *rtnl_create_link(struct net *src_net, struct net *net, |
| 1001 | char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) | 1092 | char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) |
| 1002 | { | 1093 | { |
| @@ -1018,6 +1109,7 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, | |||
| 1018 | 1109 | ||
| 1019 | dev_net_set(dev, net); | 1110 | dev_net_set(dev, net); |
| 1020 | dev->rtnl_link_ops = ops; | 1111 | dev->rtnl_link_ops = ops; |
| 1112 | dev->rtnl_link_state = RTNL_LINK_INITIALIZING; | ||
| 1021 | dev->real_num_tx_queues = real_num_queues; | 1113 | dev->real_num_tx_queues = real_num_queues; |
| 1022 | 1114 | ||
| 1023 | if (strchr(dev->name, '%')) { | 1115 | if (strchr(dev->name, '%')) { |
| @@ -1147,7 +1239,7 @@ replay: | |||
| 1147 | if (!(nlh->nlmsg_flags & NLM_F_CREATE)) | 1239 | if (!(nlh->nlmsg_flags & NLM_F_CREATE)) |
| 1148 | return -ENODEV; | 1240 | return -ENODEV; |
| 1149 | 1241 | ||
| 1150 | if (ifm->ifi_index || ifm->ifi_flags || ifm->ifi_change) | 1242 | if (ifm->ifi_index) |
| 1151 | return -EOPNOTSUPP; | 1243 | return -EOPNOTSUPP; |
| 1152 | if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) | 1244 | if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) |
| 1153 | return -EOPNOTSUPP; | 1245 | return -EOPNOTSUPP; |
| @@ -1178,9 +1270,16 @@ replay: | |||
| 1178 | err = ops->newlink(net, dev, tb, data); | 1270 | err = ops->newlink(net, dev, tb, data); |
| 1179 | else | 1271 | else |
| 1180 | err = register_netdevice(dev); | 1272 | err = register_netdevice(dev); |
| 1273 | |||
| 1181 | if (err < 0 && !IS_ERR(dev)) | 1274 | if (err < 0 && !IS_ERR(dev)) |
| 1182 | free_netdev(dev); | 1275 | free_netdev(dev); |
| 1276 | if (err < 0) | ||
| 1277 | goto out; | ||
| 1183 | 1278 | ||
| 1279 | err = rtnl_configure_link(dev, ifm); | ||
| 1280 | if (err < 0) | ||
| 1281 | unregister_netdevice(dev); | ||
| 1282 | out: | ||
| 1184 | put_net(dest_net); | 1283 | put_net(dest_net); |
| 1185 | return err; | 1284 | return err; |
| 1186 | } | 1285 | } |
| @@ -1369,17 +1468,14 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi | |||
| 1369 | struct net_device *dev = ptr; | 1468 | struct net_device *dev = ptr; |
| 1370 | 1469 | ||
| 1371 | switch (event) { | 1470 | switch (event) { |
| 1372 | case NETDEV_UNREGISTER: | ||
| 1373 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); | ||
| 1374 | break; | ||
| 1375 | case NETDEV_UP: | 1471 | case NETDEV_UP: |
| 1376 | case NETDEV_DOWN: | 1472 | case NETDEV_DOWN: |
| 1377 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | 1473 | case NETDEV_PRE_UP: |
| 1378 | break; | ||
| 1379 | case NETDEV_POST_INIT: | 1474 | case NETDEV_POST_INIT: |
| 1380 | case NETDEV_REGISTER: | 1475 | case NETDEV_REGISTER: |
| 1381 | case NETDEV_CHANGE: | 1476 | case NETDEV_CHANGE: |
| 1382 | case NETDEV_GOING_DOWN: | 1477 | case NETDEV_GOING_DOWN: |
| 1478 | case NETDEV_UNREGISTER: | ||
| 1383 | case NETDEV_UNREGISTER_BATCH: | 1479 | case NETDEV_UNREGISTER_BATCH: |
| 1384 | break; | 1480 | break; |
| 1385 | default: | 1481 | default: |
| @@ -1394,7 +1490,7 @@ static struct notifier_block rtnetlink_dev_notifier = { | |||
| 1394 | }; | 1490 | }; |
| 1395 | 1491 | ||
| 1396 | 1492 | ||
| 1397 | static int rtnetlink_net_init(struct net *net) | 1493 | static int __net_init rtnetlink_net_init(struct net *net) |
| 1398 | { | 1494 | { |
| 1399 | struct sock *sk; | 1495 | struct sock *sk; |
| 1400 | sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, | 1496 | sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, |
| @@ -1405,7 +1501,7 @@ static int rtnetlink_net_init(struct net *net) | |||
| 1405 | return 0; | 1501 | return 0; |
| 1406 | } | 1502 | } |
| 1407 | 1503 | ||
| 1408 | static void rtnetlink_net_exit(struct net *net) | 1504 | static void __net_exit rtnetlink_net_exit(struct net *net) |
| 1409 | { | 1505 | { |
| 1410 | netlink_kernel_release(net->rtnl); | 1506 | netlink_kernel_release(net->rtnl); |
| 1411 | net->rtnl = NULL; | 1507 | net->rtnl = NULL; |
diff --git a/net/core/scm.c b/net/core/scm.c index b7ba91b074b3..b88f6f9d0b97 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
| 27 | #include <linux/pid.h> | 27 | #include <linux/pid.h> |
| 28 | #include <linux/nsproxy.h> | 28 | #include <linux/nsproxy.h> |
| 29 | #include <linux/slab.h> | ||
| 29 | 30 | ||
| 30 | #include <asm/system.h> | 31 | #include <asm/system.h> |
| 31 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
| @@ -156,6 +157,8 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) | |||
| 156 | switch (cmsg->cmsg_type) | 157 | switch (cmsg->cmsg_type) |
| 157 | { | 158 | { |
| 158 | case SCM_RIGHTS: | 159 | case SCM_RIGHTS: |
| 160 | if (!sock->ops || sock->ops->family != PF_UNIX) | ||
| 161 | goto error; | ||
| 159 | err=scm_fp_copy(cmsg, &p->fp); | 162 | err=scm_fp_copy(cmsg, &p->fp); |
| 160 | if (err<0) | 163 | if (err<0) |
| 161 | goto error; | 164 | goto error; |
diff --git a/net/core/sock.c b/net/core/sock.c index 305cba401ae6..c5812bbc2cc9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) | |||
| 340 | rc = sk_backlog_rcv(sk, skb); | 340 | rc = sk_backlog_rcv(sk, skb); |
| 341 | 341 | ||
| 342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); | 342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); |
| 343 | } else | 343 | } else if (sk_add_backlog(sk, skb)) { |
| 344 | sk_add_backlog(sk, skb); | 344 | bh_unlock_sock(sk); |
| 345 | atomic_inc(&sk->sk_drops); | ||
| 346 | goto discard_and_relse; | ||
| 347 | } | ||
| 348 | |||
| 345 | bh_unlock_sock(sk); | 349 | bh_unlock_sock(sk); |
| 346 | out: | 350 | out: |
| 347 | sock_put(sk); | 351 | sock_put(sk); |
| @@ -741,7 +745,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
| 741 | struct timeval tm; | 745 | struct timeval tm; |
| 742 | } v; | 746 | } v; |
| 743 | 747 | ||
| 744 | unsigned int lv = sizeof(int); | 748 | int lv = sizeof(int); |
| 745 | int len; | 749 | int len; |
| 746 | 750 | ||
| 747 | if (get_user(len, optlen)) | 751 | if (get_user(len, optlen)) |
| @@ -1139,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
| 1139 | sock_lock_init(newsk); | 1143 | sock_lock_init(newsk); |
| 1140 | bh_lock_sock(newsk); | 1144 | bh_lock_sock(newsk); |
| 1141 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; | 1145 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; |
| 1146 | newsk->sk_backlog.len = 0; | ||
| 1142 | 1147 | ||
| 1143 | atomic_set(&newsk->sk_rmem_alloc, 0); | 1148 | atomic_set(&newsk->sk_rmem_alloc, 0); |
| 1144 | /* | 1149 | /* |
| @@ -1542,6 +1547,12 @@ static void __release_sock(struct sock *sk) | |||
| 1542 | 1547 | ||
| 1543 | bh_lock_sock(sk); | 1548 | bh_lock_sock(sk); |
| 1544 | } while ((skb = sk->sk_backlog.head) != NULL); | 1549 | } while ((skb = sk->sk_backlog.head) != NULL); |
| 1550 | |||
| 1551 | /* | ||
| 1552 | * Doing the zeroing here guarantee we can not loop forever | ||
| 1553 | * while a wild producer attempts to flood us. | ||
| 1554 | */ | ||
| 1555 | sk->sk_backlog.len = 0; | ||
| 1545 | } | 1556 | } |
| 1546 | 1557 | ||
| 1547 | /** | 1558 | /** |
| @@ -1874,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
| 1874 | sk->sk_allocation = GFP_KERNEL; | 1885 | sk->sk_allocation = GFP_KERNEL; |
| 1875 | sk->sk_rcvbuf = sysctl_rmem_default; | 1886 | sk->sk_rcvbuf = sysctl_rmem_default; |
| 1876 | sk->sk_sndbuf = sysctl_wmem_default; | 1887 | sk->sk_sndbuf = sysctl_wmem_default; |
| 1888 | sk->sk_backlog.limit = sk->sk_rcvbuf << 1; | ||
| 1877 | sk->sk_state = TCP_CLOSE; | 1889 | sk->sk_state = TCP_CLOSE; |
| 1878 | sk_set_socket(sk, sock); | 1890 | sk_set_socket(sk, sock); |
| 1879 | 1891 | ||
| @@ -2141,13 +2153,13 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot) | |||
| 2141 | } | 2153 | } |
| 2142 | EXPORT_SYMBOL_GPL(sock_prot_inuse_get); | 2154 | EXPORT_SYMBOL_GPL(sock_prot_inuse_get); |
| 2143 | 2155 | ||
| 2144 | static int sock_inuse_init_net(struct net *net) | 2156 | static int __net_init sock_inuse_init_net(struct net *net) |
| 2145 | { | 2157 | { |
| 2146 | net->core.inuse = alloc_percpu(struct prot_inuse); | 2158 | net->core.inuse = alloc_percpu(struct prot_inuse); |
| 2147 | return net->core.inuse ? 0 : -ENOMEM; | 2159 | return net->core.inuse ? 0 : -ENOMEM; |
| 2148 | } | 2160 | } |
| 2149 | 2161 | ||
| 2150 | static void sock_inuse_exit_net(struct net *net) | 2162 | static void __net_exit sock_inuse_exit_net(struct net *net) |
| 2151 | { | 2163 | { |
| 2152 | free_percpu(net->core.inuse); | 2164 | free_percpu(net->core.inuse); |
| 2153 | } | 2165 | } |
| @@ -2229,13 +2241,10 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
| 2229 | } | 2241 | } |
| 2230 | 2242 | ||
| 2231 | if (prot->rsk_prot != NULL) { | 2243 | if (prot->rsk_prot != NULL) { |
| 2232 | static const char mask[] = "request_sock_%s"; | 2244 | prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); |
| 2233 | |||
| 2234 | prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); | ||
| 2235 | if (prot->rsk_prot->slab_name == NULL) | 2245 | if (prot->rsk_prot->slab_name == NULL) |
| 2236 | goto out_free_sock_slab; | 2246 | goto out_free_sock_slab; |
| 2237 | 2247 | ||
| 2238 | sprintf(prot->rsk_prot->slab_name, mask, prot->name); | ||
| 2239 | prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, | 2248 | prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, |
| 2240 | prot->rsk_prot->obj_size, 0, | 2249 | prot->rsk_prot->obj_size, 0, |
| 2241 | SLAB_HWCACHE_ALIGN, NULL); | 2250 | SLAB_HWCACHE_ALIGN, NULL); |
| @@ -2248,14 +2257,11 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
| 2248 | } | 2257 | } |
| 2249 | 2258 | ||
| 2250 | if (prot->twsk_prot != NULL) { | 2259 | if (prot->twsk_prot != NULL) { |
| 2251 | static const char mask[] = "tw_sock_%s"; | 2260 | prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); |
| 2252 | |||
| 2253 | prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); | ||
| 2254 | 2261 | ||
| 2255 | if (prot->twsk_prot->twsk_slab_name == NULL) | 2262 | if (prot->twsk_prot->twsk_slab_name == NULL) |
| 2256 | goto out_free_request_sock_slab; | 2263 | goto out_free_request_sock_slab; |
| 2257 | 2264 | ||
| 2258 | sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name); | ||
| 2259 | prot->twsk_prot->twsk_slab = | 2265 | prot->twsk_prot->twsk_slab = |
| 2260 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, | 2266 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, |
| 2261 | prot->twsk_prot->twsk_obj_size, | 2267 | prot->twsk_prot->twsk_obj_size, |
| @@ -2282,7 +2288,8 @@ out_free_request_sock_slab: | |||
| 2282 | prot->rsk_prot->slab = NULL; | 2288 | prot->rsk_prot->slab = NULL; |
| 2283 | } | 2289 | } |
| 2284 | out_free_request_sock_slab_name: | 2290 | out_free_request_sock_slab_name: |
| 2285 | kfree(prot->rsk_prot->slab_name); | 2291 | if (prot->rsk_prot) |
| 2292 | kfree(prot->rsk_prot->slab_name); | ||
| 2286 | out_free_sock_slab: | 2293 | out_free_sock_slab: |
| 2287 | kmem_cache_destroy(prot->slab); | 2294 | kmem_cache_destroy(prot->slab); |
| 2288 | prot->slab = NULL; | 2295 | prot->slab = NULL; |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 06124872af5b..b7b6b8208f75 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
| 13 | #include <linux/ratelimit.h> | 13 | #include <linux/ratelimit.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/slab.h> | ||
| 15 | 16 | ||
| 16 | #include <net/ip.h> | 17 | #include <net/ip.h> |
| 17 | #include <net/sock.h> | 18 | #include <net/sock.h> |
