diff options
Diffstat (limited to 'net/core')
| -rw-r--r-- | net/core/dev.c | 294 | ||||
| -rw-r--r-- | net/core/dev_mcast.c | 5 | ||||
| -rw-r--r-- | net/core/drop_monitor.c | 1 | ||||
| -rw-r--r-- | net/core/ethtool.c | 433 | ||||
| -rw-r--r-- | net/core/fib_rules.c | 2 | ||||
| -rw-r--r-- | net/core/filter.c | 8 | ||||
| -rw-r--r-- | net/core/neighbour.c | 20 | ||||
| -rw-r--r-- | net/core/netpoll.c | 178 | ||||
| -rw-r--r-- | net/core/pktgen.c | 3 | ||||
| -rw-r--r-- | net/core/rtnetlink.c | 135 | ||||
| -rw-r--r-- | net/core/scm.c | 2 | ||||
| -rw-r--r-- | net/core/sock.c | 38 |
12 files changed, 899 insertions, 220 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index ec874218b206..59d4394d2ce8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1113,19 +1113,7 @@ void dev_load(struct net *net, const char *name) | |||
| 1113 | } | 1113 | } |
| 1114 | EXPORT_SYMBOL(dev_load); | 1114 | EXPORT_SYMBOL(dev_load); |
| 1115 | 1115 | ||
| 1116 | /** | 1116 | static int __dev_open(struct net_device *dev) |
| 1117 | * dev_open - prepare an interface for use. | ||
| 1118 | * @dev: device to open | ||
| 1119 | * | ||
| 1120 | * Takes a device from down to up state. The device's private open | ||
| 1121 | * function is invoked and then the multicast lists are loaded. Finally | ||
| 1122 | * the device is moved into the up state and a %NETDEV_UP message is | ||
| 1123 | * sent to the netdev notifier chain. | ||
| 1124 | * | ||
| 1125 | * Calling this function on an active interface is a nop. On a failure | ||
| 1126 | * a negative errno code is returned. | ||
| 1127 | */ | ||
| 1128 | int dev_open(struct net_device *dev) | ||
| 1129 | { | 1117 | { |
| 1130 | const struct net_device_ops *ops = dev->netdev_ops; | 1118 | const struct net_device_ops *ops = dev->netdev_ops; |
| 1131 | int ret; | 1119 | int ret; |
| @@ -1133,13 +1121,6 @@ int dev_open(struct net_device *dev) | |||
| 1133 | ASSERT_RTNL(); | 1121 | ASSERT_RTNL(); |
| 1134 | 1122 | ||
| 1135 | /* | 1123 | /* |
| 1136 | * Is it already up? | ||
| 1137 | */ | ||
| 1138 | |||
| 1139 | if (dev->flags & IFF_UP) | ||
| 1140 | return 0; | ||
| 1141 | |||
| 1142 | /* | ||
| 1143 | * Is it even present? | 1124 | * Is it even present? |
| 1144 | */ | 1125 | */ |
| 1145 | if (!netif_device_present(dev)) | 1126 | if (!netif_device_present(dev)) |
| @@ -1187,36 +1168,57 @@ int dev_open(struct net_device *dev) | |||
| 1187 | * Wakeup transmit queue engine | 1168 | * Wakeup transmit queue engine |
| 1188 | */ | 1169 | */ |
| 1189 | dev_activate(dev); | 1170 | dev_activate(dev); |
| 1190 | |||
| 1191 | /* | ||
| 1192 | * ... and announce new interface. | ||
| 1193 | */ | ||
| 1194 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
| 1195 | } | 1171 | } |
| 1196 | 1172 | ||
| 1197 | return ret; | 1173 | return ret; |
| 1198 | } | 1174 | } |
| 1199 | EXPORT_SYMBOL(dev_open); | ||
| 1200 | 1175 | ||
| 1201 | /** | 1176 | /** |
| 1202 | * dev_close - shutdown an interface. | 1177 | * dev_open - prepare an interface for use. |
| 1203 | * @dev: device to shutdown | 1178 | * @dev: device to open |
| 1204 | * | 1179 | * |
| 1205 | * This function moves an active device into down state. A | 1180 | * Takes a device from down to up state. The device's private open |
| 1206 | * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | 1181 | * function is invoked and then the multicast lists are loaded. Finally |
| 1207 | * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | 1182 | * the device is moved into the up state and a %NETDEV_UP message is |
| 1208 | * chain. | 1183 | * sent to the netdev notifier chain. |
| 1184 | * | ||
| 1185 | * Calling this function on an active interface is a nop. On a failure | ||
| 1186 | * a negative errno code is returned. | ||
| 1209 | */ | 1187 | */ |
| 1210 | int dev_close(struct net_device *dev) | 1188 | int dev_open(struct net_device *dev) |
| 1189 | { | ||
| 1190 | int ret; | ||
| 1191 | |||
| 1192 | /* | ||
| 1193 | * Is it already up? | ||
| 1194 | */ | ||
| 1195 | if (dev->flags & IFF_UP) | ||
| 1196 | return 0; | ||
| 1197 | |||
| 1198 | /* | ||
| 1199 | * Open device | ||
| 1200 | */ | ||
| 1201 | ret = __dev_open(dev); | ||
| 1202 | if (ret < 0) | ||
| 1203 | return ret; | ||
| 1204 | |||
| 1205 | /* | ||
| 1206 | * ... and announce new interface. | ||
| 1207 | */ | ||
| 1208 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | ||
| 1209 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
| 1210 | |||
| 1211 | return ret; | ||
| 1212 | } | ||
| 1213 | EXPORT_SYMBOL(dev_open); | ||
| 1214 | |||
| 1215 | static int __dev_close(struct net_device *dev) | ||
| 1211 | { | 1216 | { |
| 1212 | const struct net_device_ops *ops = dev->netdev_ops; | 1217 | const struct net_device_ops *ops = dev->netdev_ops; |
| 1213 | ASSERT_RTNL(); | ||
| 1214 | 1218 | ||
| 1219 | ASSERT_RTNL(); | ||
| 1215 | might_sleep(); | 1220 | might_sleep(); |
| 1216 | 1221 | ||
| 1217 | if (!(dev->flags & IFF_UP)) | ||
| 1218 | return 0; | ||
| 1219 | |||
| 1220 | /* | 1222 | /* |
| 1221 | * Tell people we are going down, so that they can | 1223 | * Tell people we are going down, so that they can |
| 1222 | * prepare to death, when device is still operating. | 1224 | * prepare to death, when device is still operating. |
| @@ -1252,14 +1254,34 @@ int dev_close(struct net_device *dev) | |||
| 1252 | dev->flags &= ~IFF_UP; | 1254 | dev->flags &= ~IFF_UP; |
| 1253 | 1255 | ||
| 1254 | /* | 1256 | /* |
| 1255 | * Tell people we are down | 1257 | * Shutdown NET_DMA |
| 1256 | */ | 1258 | */ |
| 1257 | call_netdevice_notifiers(NETDEV_DOWN, dev); | 1259 | net_dmaengine_put(); |
| 1260 | |||
| 1261 | return 0; | ||
| 1262 | } | ||
| 1263 | |||
| 1264 | /** | ||
| 1265 | * dev_close - shutdown an interface. | ||
| 1266 | * @dev: device to shutdown | ||
| 1267 | * | ||
| 1268 | * This function moves an active device into down state. A | ||
| 1269 | * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device | ||
| 1270 | * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier | ||
| 1271 | * chain. | ||
| 1272 | */ | ||
| 1273 | int dev_close(struct net_device *dev) | ||
| 1274 | { | ||
| 1275 | if (!(dev->flags & IFF_UP)) | ||
| 1276 | return 0; | ||
| 1277 | |||
| 1278 | __dev_close(dev); | ||
| 1258 | 1279 | ||
| 1259 | /* | 1280 | /* |
| 1260 | * Shutdown NET_DMA | 1281 | * Tell people we are down |
| 1261 | */ | 1282 | */ |
| 1262 | net_dmaengine_put(); | 1283 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); |
| 1284 | call_netdevice_notifiers(NETDEV_DOWN, dev); | ||
| 1263 | 1285 | ||
| 1264 | return 0; | 1286 | return 0; |
| 1265 | } | 1287 | } |
| @@ -1448,13 +1470,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
| 1448 | if (skb->len > (dev->mtu + dev->hard_header_len)) | 1470 | if (skb->len > (dev->mtu + dev->hard_header_len)) |
| 1449 | return NET_RX_DROP; | 1471 | return NET_RX_DROP; |
| 1450 | 1472 | ||
| 1451 | skb_dst_drop(skb); | 1473 | skb_set_dev(skb, dev); |
| 1452 | skb->tstamp.tv64 = 0; | 1474 | skb->tstamp.tv64 = 0; |
| 1453 | skb->pkt_type = PACKET_HOST; | 1475 | skb->pkt_type = PACKET_HOST; |
| 1454 | skb->protocol = eth_type_trans(skb, dev); | 1476 | skb->protocol = eth_type_trans(skb, dev); |
| 1455 | skb->mark = 0; | ||
| 1456 | secpath_reset(skb); | ||
| 1457 | nf_reset(skb); | ||
| 1458 | return netif_rx(skb); | 1477 | return netif_rx(skb); |
| 1459 | } | 1478 | } |
| 1460 | EXPORT_SYMBOL_GPL(dev_forward_skb); | 1479 | EXPORT_SYMBOL_GPL(dev_forward_skb); |
| @@ -1614,6 +1633,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) | |||
| 1614 | return false; | 1633 | return false; |
| 1615 | } | 1634 | } |
| 1616 | 1635 | ||
| 1636 | /** | ||
| 1637 | * skb_dev_set -- assign a new device to a buffer | ||
| 1638 | * @skb: buffer for the new device | ||
| 1639 | * @dev: network device | ||
| 1640 | * | ||
| 1641 | * If an skb is owned by a device already, we have to reset | ||
| 1642 | * all data private to the namespace a device belongs to | ||
| 1643 | * before assigning it a new device. | ||
| 1644 | */ | ||
| 1645 | #ifdef CONFIG_NET_NS | ||
| 1646 | void skb_set_dev(struct sk_buff *skb, struct net_device *dev) | ||
| 1647 | { | ||
| 1648 | skb_dst_drop(skb); | ||
| 1649 | if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { | ||
| 1650 | secpath_reset(skb); | ||
| 1651 | nf_reset(skb); | ||
| 1652 | skb_init_secmark(skb); | ||
| 1653 | skb->mark = 0; | ||
| 1654 | skb->priority = 0; | ||
| 1655 | skb->nf_trace = 0; | ||
| 1656 | skb->ipvs_property = 0; | ||
| 1657 | #ifdef CONFIG_NET_SCHED | ||
| 1658 | skb->tc_index = 0; | ||
| 1659 | #endif | ||
| 1660 | } | ||
| 1661 | skb->dev = dev; | ||
| 1662 | } | ||
| 1663 | EXPORT_SYMBOL(skb_set_dev); | ||
| 1664 | #endif /* CONFIG_NET_NS */ | ||
| 1665 | |||
| 1617 | /* | 1666 | /* |
| 1618 | * Invalidate hardware checksum when packet is to be mangled, and | 1667 | * Invalidate hardware checksum when packet is to be mangled, and |
| 1619 | * complete checksum manually on outgoing path. | 1668 | * complete checksum manually on outgoing path. |
| @@ -1853,6 +1902,14 @@ gso: | |||
| 1853 | 1902 | ||
| 1854 | skb->next = nskb->next; | 1903 | skb->next = nskb->next; |
| 1855 | nskb->next = NULL; | 1904 | nskb->next = NULL; |
| 1905 | |||
| 1906 | /* | ||
| 1907 | * If device doesnt need nskb->dst, release it right now while | ||
| 1908 | * its hot in this cpu cache | ||
| 1909 | */ | ||
| 1910 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | ||
| 1911 | skb_dst_drop(nskb); | ||
| 1912 | |||
| 1856 | rc = ops->ndo_start_xmit(nskb, dev); | 1913 | rc = ops->ndo_start_xmit(nskb, dev); |
| 1857 | if (unlikely(rc != NETDEV_TX_OK)) { | 1914 | if (unlikely(rc != NETDEV_TX_OK)) { |
| 1858 | if (rc & ~NETDEV_TX_MASK) | 1915 | if (rc & ~NETDEV_TX_MASK) |
| @@ -1974,6 +2031,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
| 1974 | return rc; | 2031 | return rc; |
| 1975 | } | 2032 | } |
| 1976 | 2033 | ||
| 2034 | /* | ||
| 2035 | * Returns true if either: | ||
| 2036 | * 1. skb has frag_list and the device doesn't support FRAGLIST, or | ||
| 2037 | * 2. skb is fragmented and the device does not support SG, or if | ||
| 2038 | * at least one of fragments is in highmem and device does not | ||
| 2039 | * support DMA from it. | ||
| 2040 | */ | ||
| 2041 | static inline int skb_needs_linearize(struct sk_buff *skb, | ||
| 2042 | struct net_device *dev) | ||
| 2043 | { | ||
| 2044 | return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || | ||
| 2045 | (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || | ||
| 2046 | illegal_highdma(dev, skb))); | ||
| 2047 | } | ||
| 2048 | |||
| 1977 | /** | 2049 | /** |
| 1978 | * dev_queue_xmit - transmit a buffer | 2050 | * dev_queue_xmit - transmit a buffer |
| 1979 | * @skb: buffer to transmit | 2051 | * @skb: buffer to transmit |
| @@ -2010,18 +2082,8 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
| 2010 | if (netif_needs_gso(dev, skb)) | 2082 | if (netif_needs_gso(dev, skb)) |
| 2011 | goto gso; | 2083 | goto gso; |
| 2012 | 2084 | ||
| 2013 | if (skb_has_frags(skb) && | 2085 | /* Convert a paged skb to linear, if required */ |
| 2014 | !(dev->features & NETIF_F_FRAGLIST) && | 2086 | if (skb_needs_linearize(skb, dev) && __skb_linearize(skb)) |
| 2015 | __skb_linearize(skb)) | ||
| 2016 | goto out_kfree_skb; | ||
| 2017 | |||
| 2018 | /* Fragmented skb is linearized if device does not support SG, | ||
| 2019 | * or if at least one of fragments is in highmem and device | ||
| 2020 | * does not support DMA from it. | ||
| 2021 | */ | ||
| 2022 | if (skb_shinfo(skb)->nr_frags && | ||
| 2023 | (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && | ||
| 2024 | __skb_linearize(skb)) | ||
| 2025 | goto out_kfree_skb; | 2087 | goto out_kfree_skb; |
| 2026 | 2088 | ||
| 2027 | /* If packet is not checksummed and device does not support | 2089 | /* If packet is not checksummed and device does not support |
| @@ -2041,7 +2103,7 @@ gso: | |||
| 2041 | rcu_read_lock_bh(); | 2103 | rcu_read_lock_bh(); |
| 2042 | 2104 | ||
| 2043 | txq = dev_pick_tx(dev, skb); | 2105 | txq = dev_pick_tx(dev, skb); |
| 2044 | q = rcu_dereference(txq->qdisc); | 2106 | q = rcu_dereference_bh(txq->qdisc); |
| 2045 | 2107 | ||
| 2046 | #ifdef CONFIG_NET_CLS_ACT | 2108 | #ifdef CONFIG_NET_CLS_ACT |
| 2047 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); | 2109 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); |
| @@ -2421,7 +2483,9 @@ int netif_receive_skb(struct sk_buff *skb) | |||
| 2421 | { | 2483 | { |
| 2422 | struct packet_type *ptype, *pt_prev; | 2484 | struct packet_type *ptype, *pt_prev; |
| 2423 | struct net_device *orig_dev; | 2485 | struct net_device *orig_dev; |
| 2486 | struct net_device *master; | ||
| 2424 | struct net_device *null_or_orig; | 2487 | struct net_device *null_or_orig; |
| 2488 | struct net_device *null_or_bond; | ||
| 2425 | int ret = NET_RX_DROP; | 2489 | int ret = NET_RX_DROP; |
| 2426 | __be16 type; | 2490 | __be16 type; |
| 2427 | 2491 | ||
| @@ -2440,11 +2504,12 @@ int netif_receive_skb(struct sk_buff *skb) | |||
| 2440 | 2504 | ||
| 2441 | null_or_orig = NULL; | 2505 | null_or_orig = NULL; |
| 2442 | orig_dev = skb->dev; | 2506 | orig_dev = skb->dev; |
| 2443 | if (orig_dev->master) { | 2507 | master = ACCESS_ONCE(orig_dev->master); |
| 2444 | if (skb_bond_should_drop(skb)) | 2508 | if (master) { |
| 2509 | if (skb_bond_should_drop(skb, master)) | ||
| 2445 | null_or_orig = orig_dev; /* deliver only exact match */ | 2510 | null_or_orig = orig_dev; /* deliver only exact match */ |
| 2446 | else | 2511 | else |
| 2447 | skb->dev = orig_dev->master; | 2512 | skb->dev = master; |
| 2448 | } | 2513 | } |
| 2449 | 2514 | ||
| 2450 | __get_cpu_var(netdev_rx_stat).total++; | 2515 | __get_cpu_var(netdev_rx_stat).total++; |
| @@ -2487,12 +2552,24 @@ ncls: | |||
| 2487 | if (!skb) | 2552 | if (!skb) |
| 2488 | goto out; | 2553 | goto out; |
| 2489 | 2554 | ||
| 2555 | /* | ||
| 2556 | * Make sure frames received on VLAN interfaces stacked on | ||
| 2557 | * bonding interfaces still make their way to any base bonding | ||
| 2558 | * device that may have registered for a specific ptype. The | ||
| 2559 | * handler may have to adjust skb->dev and orig_dev. | ||
| 2560 | */ | ||
| 2561 | null_or_bond = NULL; | ||
| 2562 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && | ||
| 2563 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { | ||
| 2564 | null_or_bond = vlan_dev_real_dev(skb->dev); | ||
| 2565 | } | ||
| 2566 | |||
| 2490 | type = skb->protocol; | 2567 | type = skb->protocol; |
| 2491 | list_for_each_entry_rcu(ptype, | 2568 | list_for_each_entry_rcu(ptype, |
| 2492 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2569 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
| 2493 | if (ptype->type == type && | 2570 | if (ptype->type == type && (ptype->dev == null_or_orig || |
| 2494 | (ptype->dev == null_or_orig || ptype->dev == skb->dev || | 2571 | ptype->dev == skb->dev || ptype->dev == orig_dev || |
| 2495 | ptype->dev == orig_dev)) { | 2572 | ptype->dev == null_or_bond)) { |
| 2496 | if (pt_prev) | 2573 | if (pt_prev) |
| 2497 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2574 | ret = deliver_skb(skb, pt_prev, orig_dev); |
| 2498 | pt_prev = ptype; | 2575 | pt_prev = ptype; |
| @@ -2561,7 +2638,7 @@ out: | |||
| 2561 | return netif_receive_skb(skb); | 2638 | return netif_receive_skb(skb); |
| 2562 | } | 2639 | } |
| 2563 | 2640 | ||
| 2564 | void napi_gro_flush(struct napi_struct *napi) | 2641 | static void napi_gro_flush(struct napi_struct *napi) |
| 2565 | { | 2642 | { |
| 2566 | struct sk_buff *skb, *next; | 2643 | struct sk_buff *skb, *next; |
| 2567 | 2644 | ||
| @@ -2574,7 +2651,6 @@ void napi_gro_flush(struct napi_struct *napi) | |||
| 2574 | napi->gro_count = 0; | 2651 | napi->gro_count = 0; |
| 2575 | napi->gro_list = NULL; | 2652 | napi->gro_list = NULL; |
| 2576 | } | 2653 | } |
| 2577 | EXPORT_SYMBOL(napi_gro_flush); | ||
| 2578 | 2654 | ||
| 2579 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2655 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
| 2580 | { | 2656 | { |
| @@ -2966,7 +3042,7 @@ static void net_rx_action(struct softirq_action *h) | |||
| 2966 | * entries to the tail of this list, and only ->poll() | 3042 | * entries to the tail of this list, and only ->poll() |
| 2967 | * calls can remove this head entry from the list. | 3043 | * calls can remove this head entry from the list. |
| 2968 | */ | 3044 | */ |
| 2969 | n = list_entry(list->next, struct napi_struct, poll_list); | 3045 | n = list_first_entry(list, struct napi_struct, poll_list); |
| 2970 | 3046 | ||
| 2971 | have = netpoll_poll_lock(n); | 3047 | have = netpoll_poll_lock(n); |
| 2972 | 3048 | ||
| @@ -3185,7 +3261,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | |||
| 3185 | { | 3261 | { |
| 3186 | const struct net_device_stats *stats = dev_get_stats(dev); | 3262 | const struct net_device_stats *stats = dev_get_stats(dev); |
| 3187 | 3263 | ||
| 3188 | seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " | 3264 | seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " |
| 3189 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", | 3265 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", |
| 3190 | dev->name, stats->rx_bytes, stats->rx_packets, | 3266 | dev->name, stats->rx_bytes, stats->rx_packets, |
| 3191 | stats->rx_errors, | 3267 | stats->rx_errors, |
| @@ -3640,10 +3716,10 @@ void __dev_set_rx_mode(struct net_device *dev) | |||
| 3640 | /* Unicast addresses changes may only happen under the rtnl, | 3716 | /* Unicast addresses changes may only happen under the rtnl, |
| 3641 | * therefore calling __dev_set_promiscuity here is safe. | 3717 | * therefore calling __dev_set_promiscuity here is safe. |
| 3642 | */ | 3718 | */ |
| 3643 | if (dev->uc.count > 0 && !dev->uc_promisc) { | 3719 | if (!netdev_uc_empty(dev) && !dev->uc_promisc) { |
| 3644 | __dev_set_promiscuity(dev, 1); | 3720 | __dev_set_promiscuity(dev, 1); |
| 3645 | dev->uc_promisc = 1; | 3721 | dev->uc_promisc = 1; |
| 3646 | } else if (dev->uc.count == 0 && dev->uc_promisc) { | 3722 | } else if (netdev_uc_empty(dev) && dev->uc_promisc) { |
| 3647 | __dev_set_promiscuity(dev, -1); | 3723 | __dev_set_promiscuity(dev, -1); |
| 3648 | dev->uc_promisc = 0; | 3724 | dev->uc_promisc = 0; |
| 3649 | } | 3725 | } |
| @@ -4211,7 +4287,7 @@ static void dev_addr_discard(struct net_device *dev) | |||
| 4211 | netif_addr_lock_bh(dev); | 4287 | netif_addr_lock_bh(dev); |
| 4212 | 4288 | ||
| 4213 | __dev_addr_discard(&dev->mc_list); | 4289 | __dev_addr_discard(&dev->mc_list); |
| 4214 | dev->mc_count = 0; | 4290 | netdev_mc_count(dev) = 0; |
| 4215 | 4291 | ||
| 4216 | netif_addr_unlock_bh(dev); | 4292 | netif_addr_unlock_bh(dev); |
| 4217 | } | 4293 | } |
| @@ -4247,18 +4323,10 @@ unsigned dev_get_flags(const struct net_device *dev) | |||
| 4247 | } | 4323 | } |
| 4248 | EXPORT_SYMBOL(dev_get_flags); | 4324 | EXPORT_SYMBOL(dev_get_flags); |
| 4249 | 4325 | ||
| 4250 | /** | 4326 | int __dev_change_flags(struct net_device *dev, unsigned int flags) |
| 4251 | * dev_change_flags - change device settings | ||
| 4252 | * @dev: device | ||
| 4253 | * @flags: device state flags | ||
| 4254 | * | ||
| 4255 | * Change settings on device based state flags. The flags are | ||
| 4256 | * in the userspace exported format. | ||
| 4257 | */ | ||
| 4258 | int dev_change_flags(struct net_device *dev, unsigned flags) | ||
| 4259 | { | 4327 | { |
| 4260 | int ret, changes; | ||
| 4261 | int old_flags = dev->flags; | 4328 | int old_flags = dev->flags; |
| 4329 | int ret; | ||
| 4262 | 4330 | ||
| 4263 | ASSERT_RTNL(); | 4331 | ASSERT_RTNL(); |
| 4264 | 4332 | ||
| @@ -4289,17 +4357,12 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
| 4289 | 4357 | ||
| 4290 | ret = 0; | 4358 | ret = 0; |
| 4291 | if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ | 4359 | if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ |
| 4292 | ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); | 4360 | ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); |
| 4293 | 4361 | ||
| 4294 | if (!ret) | 4362 | if (!ret) |
| 4295 | dev_set_rx_mode(dev); | 4363 | dev_set_rx_mode(dev); |
| 4296 | } | 4364 | } |
| 4297 | 4365 | ||
| 4298 | if (dev->flags & IFF_UP && | ||
| 4299 | ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | | ||
| 4300 | IFF_VOLATILE))) | ||
| 4301 | call_netdevice_notifiers(NETDEV_CHANGE, dev); | ||
| 4302 | |||
| 4303 | if ((flags ^ dev->gflags) & IFF_PROMISC) { | 4366 | if ((flags ^ dev->gflags) & IFF_PROMISC) { |
| 4304 | int inc = (flags & IFF_PROMISC) ? 1 : -1; | 4367 | int inc = (flags & IFF_PROMISC) ? 1 : -1; |
| 4305 | 4368 | ||
| @@ -4318,11 +4381,47 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
| 4318 | dev_set_allmulti(dev, inc); | 4381 | dev_set_allmulti(dev, inc); |
| 4319 | } | 4382 | } |
| 4320 | 4383 | ||
| 4321 | /* Exclude state transition flags, already notified */ | 4384 | return ret; |
| 4322 | changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING); | 4385 | } |
| 4386 | |||
| 4387 | void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) | ||
| 4388 | { | ||
| 4389 | unsigned int changes = dev->flags ^ old_flags; | ||
| 4390 | |||
| 4391 | if (changes & IFF_UP) { | ||
| 4392 | if (dev->flags & IFF_UP) | ||
| 4393 | call_netdevice_notifiers(NETDEV_UP, dev); | ||
| 4394 | else | ||
| 4395 | call_netdevice_notifiers(NETDEV_DOWN, dev); | ||
| 4396 | } | ||
| 4397 | |||
| 4398 | if (dev->flags & IFF_UP && | ||
| 4399 | (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) | ||
| 4400 | call_netdevice_notifiers(NETDEV_CHANGE, dev); | ||
| 4401 | } | ||
| 4402 | |||
| 4403 | /** | ||
| 4404 | * dev_change_flags - change device settings | ||
| 4405 | * @dev: device | ||
| 4406 | * @flags: device state flags | ||
| 4407 | * | ||
| 4408 | * Change settings on device based state flags. The flags are | ||
| 4409 | * in the userspace exported format. | ||
| 4410 | */ | ||
| 4411 | int dev_change_flags(struct net_device *dev, unsigned flags) | ||
| 4412 | { | ||
| 4413 | int ret, changes; | ||
| 4414 | int old_flags = dev->flags; | ||
| 4415 | |||
| 4416 | ret = __dev_change_flags(dev, flags); | ||
| 4417 | if (ret < 0) | ||
| 4418 | return ret; | ||
| 4419 | |||
| 4420 | changes = old_flags ^ dev->flags; | ||
| 4323 | if (changes) | 4421 | if (changes) |
| 4324 | rtmsg_ifinfo(RTM_NEWLINK, dev, changes); | 4422 | rtmsg_ifinfo(RTM_NEWLINK, dev, changes); |
| 4325 | 4423 | ||
| 4424 | __dev_notify_flags(dev, old_flags); | ||
| 4326 | return ret; | 4425 | return ret; |
| 4327 | } | 4426 | } |
| 4328 | EXPORT_SYMBOL(dev_change_flags); | 4427 | EXPORT_SYMBOL(dev_change_flags); |
| @@ -4813,6 +4912,10 @@ static void rollback_registered_many(struct list_head *head) | |||
| 4813 | */ | 4912 | */ |
| 4814 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 4913 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); |
| 4815 | 4914 | ||
| 4915 | if (!dev->rtnl_link_ops || | ||
| 4916 | dev->rtnl_link_state == RTNL_LINK_INITIALIZED) | ||
| 4917 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); | ||
| 4918 | |||
| 4816 | /* | 4919 | /* |
| 4817 | * Flush the unicast and multicast chains | 4920 | * Flush the unicast and multicast chains |
| 4818 | */ | 4921 | */ |
| @@ -4830,7 +4933,7 @@ static void rollback_registered_many(struct list_head *head) | |||
| 4830 | } | 4933 | } |
| 4831 | 4934 | ||
| 4832 | /* Process any work delayed until the end of the batch */ | 4935 | /* Process any work delayed until the end of the batch */ |
| 4833 | dev = list_entry(head->next, struct net_device, unreg_list); | 4936 | dev = list_first_entry(head, struct net_device, unreg_list); |
| 4834 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); | 4937 | call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); |
| 4835 | 4938 | ||
| 4836 | synchronize_net(); | 4939 | synchronize_net(); |
| @@ -5039,7 +5142,9 @@ int register_netdevice(struct net_device *dev) | |||
| 5039 | * Prevent userspace races by waiting until the network | 5142 | * Prevent userspace races by waiting until the network |
| 5040 | * device is fully setup before sending notifications. | 5143 | * device is fully setup before sending notifications. |
| 5041 | */ | 5144 | */ |
| 5042 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | 5145 | if (!dev->rtnl_link_ops || |
| 5146 | dev->rtnl_link_state == RTNL_LINK_INITIALIZED) | ||
| 5147 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
| 5043 | 5148 | ||
| 5044 | out: | 5149 | out: |
| 5045 | return ret; | 5150 | return ret; |
| @@ -5216,7 +5321,7 @@ void netdev_run_todo(void) | |||
| 5216 | 5321 | ||
| 5217 | while (!list_empty(&list)) { | 5322 | while (!list_empty(&list)) { |
| 5218 | struct net_device *dev | 5323 | struct net_device *dev |
| 5219 | = list_entry(list.next, struct net_device, todo_list); | 5324 | = list_first_entry(&list, struct net_device, todo_list); |
| 5220 | list_del(&dev->todo_list); | 5325 | list_del(&dev->todo_list); |
| 5221 | 5326 | ||
| 5222 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { | 5327 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { |
| @@ -5367,6 +5472,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
| 5367 | 5472 | ||
| 5368 | netdev_init_queues(dev); | 5473 | netdev_init_queues(dev); |
| 5369 | 5474 | ||
| 5475 | INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); | ||
| 5476 | dev->ethtool_ntuple_list.count = 0; | ||
| 5370 | INIT_LIST_HEAD(&dev->napi_list); | 5477 | INIT_LIST_HEAD(&dev->napi_list); |
| 5371 | INIT_LIST_HEAD(&dev->unreg_list); | 5478 | INIT_LIST_HEAD(&dev->unreg_list); |
| 5372 | INIT_LIST_HEAD(&dev->link_watch_list); | 5479 | INIT_LIST_HEAD(&dev->link_watch_list); |
| @@ -5403,6 +5510,9 @@ void free_netdev(struct net_device *dev) | |||
| 5403 | /* Flush device addresses */ | 5510 | /* Flush device addresses */ |
| 5404 | dev_addr_flush(dev); | 5511 | dev_addr_flush(dev); |
| 5405 | 5512 | ||
| 5513 | /* Clear ethtool n-tuple list */ | ||
| 5514 | ethtool_ntuple_flush(dev); | ||
| 5515 | |||
| 5406 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) | 5516 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) |
| 5407 | netif_napi_del(p); | 5517 | netif_napi_del(p); |
| 5408 | 5518 | ||
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index 9e2fa39f22a3..3dc295beb483 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c | |||
| @@ -96,7 +96,10 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | |||
| 96 | int err; | 96 | int err; |
| 97 | 97 | ||
| 98 | netif_addr_lock_bh(dev); | 98 | netif_addr_lock_bh(dev); |
| 99 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); | 99 | if (alen != dev->addr_len) |
| 100 | err = -EINVAL; | ||
| 101 | else | ||
| 102 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); | ||
| 100 | if (!err) | 103 | if (!err) |
| 101 | __dev_set_rx_mode(dev); | 104 | __dev_set_rx_mode(dev); |
| 102 | netif_addr_unlock_bh(dev); | 105 | netif_addr_unlock_bh(dev); |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index b8e9d3a86887..f8c874975350 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
| @@ -296,7 +296,6 @@ static int dropmon_net_event(struct notifier_block *ev_block, | |||
| 296 | 296 | ||
| 297 | new_stat->dev = dev; | 297 | new_stat->dev = dev; |
| 298 | new_stat->last_rx = jiffies; | 298 | new_stat->last_rx = jiffies; |
| 299 | INIT_RCU_HEAD(&new_stat->rcu); | ||
| 300 | spin_lock(&trace_state_lock); | 299 | spin_lock(&trace_state_lock); |
| 301 | list_add_rcu(&new_stat->list, &hw_stats_list); | 300 | list_add_rcu(&new_stat->list, &hw_stats_list); |
| 302 | spin_unlock(&trace_state_lock); | 301 | spin_unlock(&trace_state_lock); |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 236a9988ea91..f4cb6b6299d9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
| 18 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
| 19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
| 20 | #include <linux/bitops.h> | ||
| 20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
| 21 | 22 | ||
| 22 | /* | 23 | /* |
| @@ -120,7 +121,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data) | |||
| 120 | * NETIF_F_xxx values in include/linux/netdevice.h | 121 | * NETIF_F_xxx values in include/linux/netdevice.h |
| 121 | */ | 122 | */ |
| 122 | static const u32 flags_dup_features = | 123 | static const u32 flags_dup_features = |
| 123 | ETH_FLAG_LRO; | 124 | (ETH_FLAG_LRO | ETH_FLAG_NTUPLE); |
| 124 | 125 | ||
| 125 | u32 ethtool_op_get_flags(struct net_device *dev) | 126 | u32 ethtool_op_get_flags(struct net_device *dev) |
| 126 | { | 127 | { |
| @@ -134,19 +135,44 @@ u32 ethtool_op_get_flags(struct net_device *dev) | |||
| 134 | 135 | ||
| 135 | int ethtool_op_set_flags(struct net_device *dev, u32 data) | 136 | int ethtool_op_set_flags(struct net_device *dev, u32 data) |
| 136 | { | 137 | { |
| 138 | const struct ethtool_ops *ops = dev->ethtool_ops; | ||
| 139 | unsigned long features = dev->features; | ||
| 140 | |||
| 137 | if (data & ETH_FLAG_LRO) | 141 | if (data & ETH_FLAG_LRO) |
| 138 | dev->features |= NETIF_F_LRO; | 142 | features |= NETIF_F_LRO; |
| 139 | else | 143 | else |
| 140 | dev->features &= ~NETIF_F_LRO; | 144 | features &= ~NETIF_F_LRO; |
| 145 | |||
| 146 | if (data & ETH_FLAG_NTUPLE) { | ||
| 147 | if (!ops->set_rx_ntuple) | ||
| 148 | return -EOPNOTSUPP; | ||
| 149 | features |= NETIF_F_NTUPLE; | ||
| 150 | } else { | ||
| 151 | /* safe to clear regardless */ | ||
| 152 | features &= ~NETIF_F_NTUPLE; | ||
| 153 | } | ||
| 141 | 154 | ||
| 155 | dev->features = features; | ||
| 142 | return 0; | 156 | return 0; |
| 143 | } | 157 | } |
| 144 | 158 | ||
| 159 | void ethtool_ntuple_flush(struct net_device *dev) | ||
| 160 | { | ||
| 161 | struct ethtool_rx_ntuple_flow_spec_container *fsc, *f; | ||
| 162 | |||
| 163 | list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) { | ||
| 164 | list_del(&fsc->list); | ||
| 165 | kfree(fsc); | ||
| 166 | } | ||
| 167 | dev->ethtool_ntuple_list.count = 0; | ||
| 168 | } | ||
| 169 | EXPORT_SYMBOL(ethtool_ntuple_flush); | ||
| 170 | |||
| 145 | /* Handlers for each ethtool command */ | 171 | /* Handlers for each ethtool command */ |
| 146 | 172 | ||
| 147 | static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) | 173 | static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) |
| 148 | { | 174 | { |
| 149 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | 175 | struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; |
| 150 | int err; | 176 | int err; |
| 151 | 177 | ||
| 152 | if (!dev->ethtool_ops->get_settings) | 178 | if (!dev->ethtool_ops->get_settings) |
| @@ -174,7 +200,7 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) | |||
| 174 | return dev->ethtool_ops->set_settings(dev, &cmd); | 200 | return dev->ethtool_ops->set_settings(dev, &cmd); |
| 175 | } | 201 | } |
| 176 | 202 | ||
| 177 | static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | 203 | static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) |
| 178 | { | 204 | { |
| 179 | struct ethtool_drvinfo info; | 205 | struct ethtool_drvinfo info; |
| 180 | const struct ethtool_ops *ops = dev->ethtool_ops; | 206 | const struct ethtool_ops *ops = dev->ethtool_ops; |
| @@ -186,6 +212,10 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | |||
| 186 | info.cmd = ETHTOOL_GDRVINFO; | 212 | info.cmd = ETHTOOL_GDRVINFO; |
| 187 | ops->get_drvinfo(dev, &info); | 213 | ops->get_drvinfo(dev, &info); |
| 188 | 214 | ||
| 215 | /* | ||
| 216 | * this method of obtaining string set info is deprecated; | ||
| 217 | * Use ETHTOOL_GSSET_INFO instead. | ||
| 218 | */ | ||
| 189 | if (ops->get_sset_count) { | 219 | if (ops->get_sset_count) { |
| 190 | int rc; | 220 | int rc; |
| 191 | 221 | ||
| @@ -209,7 +239,67 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | |||
| 209 | return 0; | 239 | return 0; |
| 210 | } | 240 | } |
| 211 | 241 | ||
| 212 | static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | 242 | static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, |
| 243 | void __user *useraddr) | ||
| 244 | { | ||
| 245 | struct ethtool_sset_info info; | ||
| 246 | const struct ethtool_ops *ops = dev->ethtool_ops; | ||
| 247 | u64 sset_mask; | ||
| 248 | int i, idx = 0, n_bits = 0, ret, rc; | ||
| 249 | u32 *info_buf = NULL; | ||
| 250 | |||
| 251 | if (!ops->get_sset_count) | ||
| 252 | return -EOPNOTSUPP; | ||
| 253 | |||
| 254 | if (copy_from_user(&info, useraddr, sizeof(info))) | ||
| 255 | return -EFAULT; | ||
| 256 | |||
| 257 | /* store copy of mask, because we zero struct later on */ | ||
| 258 | sset_mask = info.sset_mask; | ||
| 259 | if (!sset_mask) | ||
| 260 | return 0; | ||
| 261 | |||
| 262 | /* calculate size of return buffer */ | ||
| 263 | n_bits = hweight64(sset_mask); | ||
| 264 | |||
| 265 | memset(&info, 0, sizeof(info)); | ||
| 266 | info.cmd = ETHTOOL_GSSET_INFO; | ||
| 267 | |||
| 268 | info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER); | ||
| 269 | if (!info_buf) | ||
| 270 | return -ENOMEM; | ||
| 271 | |||
| 272 | /* | ||
| 273 | * fill return buffer based on input bitmask and successful | ||
| 274 | * get_sset_count return | ||
| 275 | */ | ||
| 276 | for (i = 0; i < 64; i++) { | ||
| 277 | if (!(sset_mask & (1ULL << i))) | ||
| 278 | continue; | ||
| 279 | |||
| 280 | rc = ops->get_sset_count(dev, i); | ||
| 281 | if (rc >= 0) { | ||
| 282 | info.sset_mask |= (1ULL << i); | ||
| 283 | info_buf[idx++] = rc; | ||
| 284 | } | ||
| 285 | } | ||
| 286 | |||
| 287 | ret = -EFAULT; | ||
| 288 | if (copy_to_user(useraddr, &info, sizeof(info))) | ||
| 289 | goto out; | ||
| 290 | |||
| 291 | useraddr += offsetof(struct ethtool_sset_info, data); | ||
| 292 | if (copy_to_user(useraddr, info_buf, idx * sizeof(u32))) | ||
| 293 | goto out; | ||
| 294 | |||
| 295 | ret = 0; | ||
| 296 | |||
| 297 | out: | ||
| 298 | kfree(info_buf); | ||
| 299 | return ret; | ||
| 300 | } | ||
| 301 | |||
| 302 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | ||
| 213 | { | 303 | { |
| 214 | struct ethtool_rxnfc cmd; | 304 | struct ethtool_rxnfc cmd; |
| 215 | 305 | ||
| @@ -222,7 +312,7 @@ static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | |||
| 222 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); | 312 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); |
| 223 | } | 313 | } |
| 224 | 314 | ||
| 225 | static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) | 315 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) |
| 226 | { | 316 | { |
| 227 | struct ethtool_rxnfc info; | 317 | struct ethtool_rxnfc info; |
| 228 | const struct ethtool_ops *ops = dev->ethtool_ops; | 318 | const struct ethtool_ops *ops = dev->ethtool_ops; |
| @@ -266,6 +356,312 @@ err_out: | |||
| 266 | return ret; | 356 | return ret; |
| 267 | } | 357 | } |
| 268 | 358 | ||
| 359 | static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, | ||
| 360 | struct ethtool_rx_ntuple_flow_spec *spec, | ||
| 361 | struct ethtool_rx_ntuple_flow_spec_container *fsc) | ||
| 362 | { | ||
| 363 | |||
| 364 | /* don't add filters forever */ | ||
| 365 | if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) { | ||
| 366 | /* free the container */ | ||
| 367 | kfree(fsc); | ||
| 368 | return; | ||
| 369 | } | ||
| 370 | |||
| 371 | /* Copy the whole filter over */ | ||
| 372 | fsc->fs.flow_type = spec->flow_type; | ||
| 373 | memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u)); | ||
| 374 | memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u)); | ||
| 375 | |||
| 376 | fsc->fs.vlan_tag = spec->vlan_tag; | ||
| 377 | fsc->fs.vlan_tag_mask = spec->vlan_tag_mask; | ||
| 378 | fsc->fs.data = spec->data; | ||
| 379 | fsc->fs.data_mask = spec->data_mask; | ||
| 380 | fsc->fs.action = spec->action; | ||
| 381 | |||
| 382 | /* add to the list */ | ||
| 383 | list_add_tail_rcu(&fsc->list, &list->list); | ||
| 384 | list->count++; | ||
| 385 | } | ||
| 386 | |||
| 387 | static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) | ||
| 388 | { | ||
| 389 | struct ethtool_rx_ntuple cmd; | ||
| 390 | const struct ethtool_ops *ops = dev->ethtool_ops; | ||
| 391 | struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL; | ||
| 392 | int ret; | ||
| 393 | |||
| 394 | if (!(dev->features & NETIF_F_NTUPLE)) | ||
| 395 | return -EINVAL; | ||
| 396 | |||
| 397 | if (copy_from_user(&cmd, useraddr, sizeof(cmd))) | ||
| 398 | return -EFAULT; | ||
| 399 | |||
| 400 | /* | ||
| 401 | * Cache filter in dev struct for GET operation only if | ||
| 402 | * the underlying driver doesn't have its own GET operation, and | ||
| 403 | * only if the filter was added successfully. First make sure we | ||
| 404 | * can allocate the filter, then continue if successful. | ||
| 405 | */ | ||
| 406 | if (!ops->get_rx_ntuple) { | ||
| 407 | fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC); | ||
| 408 | if (!fsc) | ||
| 409 | return -ENOMEM; | ||
| 410 | } | ||
| 411 | |||
| 412 | ret = ops->set_rx_ntuple(dev, &cmd); | ||
| 413 | if (ret) { | ||
| 414 | kfree(fsc); | ||
| 415 | return ret; | ||
| 416 | } | ||
| 417 | |||
| 418 | if (!ops->get_rx_ntuple) | ||
| 419 | __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc); | ||
| 420 | |||
| 421 | return ret; | ||
| 422 | } | ||
| 423 | |||
| 424 | static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | ||
| 425 | { | ||
| 426 | struct ethtool_gstrings gstrings; | ||
| 427 | const struct ethtool_ops *ops = dev->ethtool_ops; | ||
| 428 | struct ethtool_rx_ntuple_flow_spec_container *fsc; | ||
| 429 | u8 *data; | ||
| 430 | char *p; | ||
| 431 | int ret, i, num_strings = 0; | ||
| 432 | |||
| 433 | if (!ops->get_sset_count) | ||
| 434 | return -EOPNOTSUPP; | ||
| 435 | |||
| 436 | if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) | ||
| 437 | return -EFAULT; | ||
| 438 | |||
| 439 | ret = ops->get_sset_count(dev, gstrings.string_set); | ||
| 440 | if (ret < 0) | ||
| 441 | return ret; | ||
| 442 | |||
| 443 | gstrings.len = ret; | ||
| 444 | |||
| 445 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); | ||
| 446 | if (!data) | ||
| 447 | return -ENOMEM; | ||
| 448 | |||
| 449 | if (ops->get_rx_ntuple) { | ||
| 450 | /* driver-specific filter grab */ | ||
| 451 | ret = ops->get_rx_ntuple(dev, gstrings.string_set, data); | ||
| 452 | goto copy; | ||
| 453 | } | ||
| 454 | |||
| 455 | /* default ethtool filter grab */ | ||
| 456 | i = 0; | ||
| 457 | p = (char *)data; | ||
| 458 | list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) { | ||
| 459 | sprintf(p, "Filter %d:\n", i); | ||
| 460 | p += ETH_GSTRING_LEN; | ||
| 461 | num_strings++; | ||
| 462 | |||
| 463 | switch (fsc->fs.flow_type) { | ||
| 464 | case TCP_V4_FLOW: | ||
| 465 | sprintf(p, "\tFlow Type: TCP\n"); | ||
| 466 | p += ETH_GSTRING_LEN; | ||
| 467 | num_strings++; | ||
| 468 | break; | ||
| 469 | case UDP_V4_FLOW: | ||
| 470 | sprintf(p, "\tFlow Type: UDP\n"); | ||
| 471 | p += ETH_GSTRING_LEN; | ||
| 472 | num_strings++; | ||
| 473 | break; | ||
| 474 | case SCTP_V4_FLOW: | ||
| 475 | sprintf(p, "\tFlow Type: SCTP\n"); | ||
| 476 | p += ETH_GSTRING_LEN; | ||
| 477 | num_strings++; | ||
| 478 | break; | ||
| 479 | case AH_ESP_V4_FLOW: | ||
| 480 | sprintf(p, "\tFlow Type: AH ESP\n"); | ||
| 481 | p += ETH_GSTRING_LEN; | ||
| 482 | num_strings++; | ||
| 483 | break; | ||
| 484 | case ESP_V4_FLOW: | ||
| 485 | sprintf(p, "\tFlow Type: ESP\n"); | ||
| 486 | p += ETH_GSTRING_LEN; | ||
| 487 | num_strings++; | ||
| 488 | break; | ||
| 489 | case IP_USER_FLOW: | ||
| 490 | sprintf(p, "\tFlow Type: Raw IP\n"); | ||
| 491 | p += ETH_GSTRING_LEN; | ||
| 492 | num_strings++; | ||
| 493 | break; | ||
| 494 | case IPV4_FLOW: | ||
| 495 | sprintf(p, "\tFlow Type: IPv4\n"); | ||
| 496 | p += ETH_GSTRING_LEN; | ||
| 497 | num_strings++; | ||
| 498 | break; | ||
| 499 | default: | ||
| 500 | sprintf(p, "\tFlow Type: Unknown\n"); | ||
| 501 | p += ETH_GSTRING_LEN; | ||
| 502 | num_strings++; | ||
| 503 | goto unknown_filter; | ||
| 504 | }; | ||
| 505 | |||
| 506 | /* now the rest of the filters */ | ||
| 507 | switch (fsc->fs.flow_type) { | ||
| 508 | case TCP_V4_FLOW: | ||
| 509 | case UDP_V4_FLOW: | ||
| 510 | case SCTP_V4_FLOW: | ||
| 511 | sprintf(p, "\tSrc IP addr: 0x%x\n", | ||
| 512 | fsc->fs.h_u.tcp_ip4_spec.ip4src); | ||
| 513 | p += ETH_GSTRING_LEN; | ||
| 514 | num_strings++; | ||
| 515 | sprintf(p, "\tSrc IP mask: 0x%x\n", | ||
| 516 | fsc->fs.m_u.tcp_ip4_spec.ip4src); | ||
| 517 | p += ETH_GSTRING_LEN; | ||
| 518 | num_strings++; | ||
| 519 | sprintf(p, "\tDest IP addr: 0x%x\n", | ||
| 520 | fsc->fs.h_u.tcp_ip4_spec.ip4dst); | ||
| 521 | p += ETH_GSTRING_LEN; | ||
| 522 | num_strings++; | ||
| 523 | sprintf(p, "\tDest IP mask: 0x%x\n", | ||
| 524 | fsc->fs.m_u.tcp_ip4_spec.ip4dst); | ||
| 525 | p += ETH_GSTRING_LEN; | ||
| 526 | num_strings++; | ||
| 527 | sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", | ||
| 528 | fsc->fs.h_u.tcp_ip4_spec.psrc, | ||
| 529 | fsc->fs.m_u.tcp_ip4_spec.psrc); | ||
| 530 | p += ETH_GSTRING_LEN; | ||
| 531 | num_strings++; | ||
| 532 | sprintf(p, "\tDest Port: %d, mask: 0x%x\n", | ||
| 533 | fsc->fs.h_u.tcp_ip4_spec.pdst, | ||
| 534 | fsc->fs.m_u.tcp_ip4_spec.pdst); | ||
| 535 | p += ETH_GSTRING_LEN; | ||
| 536 | num_strings++; | ||
| 537 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | ||
| 538 | fsc->fs.h_u.tcp_ip4_spec.tos, | ||
| 539 | fsc->fs.m_u.tcp_ip4_spec.tos); | ||
| 540 | p += ETH_GSTRING_LEN; | ||
| 541 | num_strings++; | ||
| 542 | break; | ||
| 543 | case AH_ESP_V4_FLOW: | ||
| 544 | case ESP_V4_FLOW: | ||
| 545 | sprintf(p, "\tSrc IP addr: 0x%x\n", | ||
| 546 | fsc->fs.h_u.ah_ip4_spec.ip4src); | ||
| 547 | p += ETH_GSTRING_LEN; | ||
| 548 | num_strings++; | ||
| 549 | sprintf(p, "\tSrc IP mask: 0x%x\n", | ||
| 550 | fsc->fs.m_u.ah_ip4_spec.ip4src); | ||
| 551 | p += ETH_GSTRING_LEN; | ||
| 552 | num_strings++; | ||
| 553 | sprintf(p, "\tDest IP addr: 0x%x\n", | ||
| 554 | fsc->fs.h_u.ah_ip4_spec.ip4dst); | ||
| 555 | p += ETH_GSTRING_LEN; | ||
| 556 | num_strings++; | ||
| 557 | sprintf(p, "\tDest IP mask: 0x%x\n", | ||
| 558 | fsc->fs.m_u.ah_ip4_spec.ip4dst); | ||
| 559 | p += ETH_GSTRING_LEN; | ||
| 560 | num_strings++; | ||
| 561 | sprintf(p, "\tSPI: %d, mask: 0x%x\n", | ||
| 562 | fsc->fs.h_u.ah_ip4_spec.spi, | ||
| 563 | fsc->fs.m_u.ah_ip4_spec.spi); | ||
| 564 | p += ETH_GSTRING_LEN; | ||
| 565 | num_strings++; | ||
| 566 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | ||
| 567 | fsc->fs.h_u.ah_ip4_spec.tos, | ||
| 568 | fsc->fs.m_u.ah_ip4_spec.tos); | ||
| 569 | p += ETH_GSTRING_LEN; | ||
| 570 | num_strings++; | ||
| 571 | break; | ||
| 572 | case IP_USER_FLOW: | ||
| 573 | sprintf(p, "\tSrc IP addr: 0x%x\n", | ||
| 574 | fsc->fs.h_u.raw_ip4_spec.ip4src); | ||
| 575 | p += ETH_GSTRING_LEN; | ||
| 576 | num_strings++; | ||
| 577 | sprintf(p, "\tSrc IP mask: 0x%x\n", | ||
| 578 | fsc->fs.m_u.raw_ip4_spec.ip4src); | ||
| 579 | p += ETH_GSTRING_LEN; | ||
| 580 | num_strings++; | ||
| 581 | sprintf(p, "\tDest IP addr: 0x%x\n", | ||
| 582 | fsc->fs.h_u.raw_ip4_spec.ip4dst); | ||
| 583 | p += ETH_GSTRING_LEN; | ||
| 584 | num_strings++; | ||
| 585 | sprintf(p, "\tDest IP mask: 0x%x\n", | ||
| 586 | fsc->fs.m_u.raw_ip4_spec.ip4dst); | ||
| 587 | p += ETH_GSTRING_LEN; | ||
| 588 | num_strings++; | ||
| 589 | break; | ||
| 590 | case IPV4_FLOW: | ||
| 591 | sprintf(p, "\tSrc IP addr: 0x%x\n", | ||
| 592 | fsc->fs.h_u.usr_ip4_spec.ip4src); | ||
| 593 | p += ETH_GSTRING_LEN; | ||
| 594 | num_strings++; | ||
| 595 | sprintf(p, "\tSrc IP mask: 0x%x\n", | ||
| 596 | fsc->fs.m_u.usr_ip4_spec.ip4src); | ||
| 597 | p += ETH_GSTRING_LEN; | ||
| 598 | num_strings++; | ||
| 599 | sprintf(p, "\tDest IP addr: 0x%x\n", | ||
| 600 | fsc->fs.h_u.usr_ip4_spec.ip4dst); | ||
| 601 | p += ETH_GSTRING_LEN; | ||
| 602 | num_strings++; | ||
| 603 | sprintf(p, "\tDest IP mask: 0x%x\n", | ||
| 604 | fsc->fs.m_u.usr_ip4_spec.ip4dst); | ||
| 605 | p += ETH_GSTRING_LEN; | ||
| 606 | num_strings++; | ||
| 607 | sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", | ||
| 608 | fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, | ||
| 609 | fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); | ||
| 610 | p += ETH_GSTRING_LEN; | ||
| 611 | num_strings++; | ||
| 612 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | ||
| 613 | fsc->fs.h_u.usr_ip4_spec.tos, | ||
| 614 | fsc->fs.m_u.usr_ip4_spec.tos); | ||
| 615 | p += ETH_GSTRING_LEN; | ||
| 616 | num_strings++; | ||
| 617 | sprintf(p, "\tIP Version: %d, mask: 0x%x\n", | ||
| 618 | fsc->fs.h_u.usr_ip4_spec.ip_ver, | ||
| 619 | fsc->fs.m_u.usr_ip4_spec.ip_ver); | ||
| 620 | p += ETH_GSTRING_LEN; | ||
| 621 | num_strings++; | ||
| 622 | sprintf(p, "\tProtocol: %d, mask: 0x%x\n", | ||
| 623 | fsc->fs.h_u.usr_ip4_spec.proto, | ||
| 624 | fsc->fs.m_u.usr_ip4_spec.proto); | ||
| 625 | p += ETH_GSTRING_LEN; | ||
| 626 | num_strings++; | ||
| 627 | break; | ||
| 628 | }; | ||
| 629 | sprintf(p, "\tVLAN: %d, mask: 0x%x\n", | ||
| 630 | fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); | ||
| 631 | p += ETH_GSTRING_LEN; | ||
| 632 | num_strings++; | ||
| 633 | sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); | ||
| 634 | p += ETH_GSTRING_LEN; | ||
| 635 | num_strings++; | ||
| 636 | sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask); | ||
| 637 | p += ETH_GSTRING_LEN; | ||
| 638 | num_strings++; | ||
| 639 | if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) | ||
| 640 | sprintf(p, "\tAction: Drop\n"); | ||
| 641 | else | ||
| 642 | sprintf(p, "\tAction: Direct to queue %d\n", | ||
| 643 | fsc->fs.action); | ||
| 644 | p += ETH_GSTRING_LEN; | ||
| 645 | num_strings++; | ||
| 646 | unknown_filter: | ||
| 647 | i++; | ||
| 648 | } | ||
| 649 | copy: | ||
| 650 | /* indicate to userspace how many strings we actually have */ | ||
| 651 | gstrings.len = num_strings; | ||
| 652 | ret = -EFAULT; | ||
| 653 | if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) | ||
| 654 | goto out; | ||
| 655 | useraddr += sizeof(gstrings); | ||
| 656 | if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) | ||
| 657 | goto out; | ||
| 658 | ret = 0; | ||
| 659 | |||
| 660 | out: | ||
| 661 | kfree(data); | ||
| 662 | return ret; | ||
| 663 | } | ||
| 664 | |||
| 269 | static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) | 665 | static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) |
| 270 | { | 666 | { |
| 271 | struct ethtool_regs regs; | 667 | struct ethtool_regs regs; |
| @@ -324,7 +720,7 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr) | |||
| 324 | 720 | ||
| 325 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) | 721 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) |
| 326 | { | 722 | { |
| 327 | struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; | 723 | struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
| 328 | 724 | ||
| 329 | if (!dev->ethtool_ops->get_wol) | 725 | if (!dev->ethtool_ops->get_wol) |
| 330 | return -EOPNOTSUPP; | 726 | return -EOPNOTSUPP; |
| @@ -456,9 +852,9 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) | |||
| 456 | return ret; | 852 | return ret; |
| 457 | } | 853 | } |
| 458 | 854 | ||
| 459 | static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) | 855 | static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) |
| 460 | { | 856 | { |
| 461 | struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; | 857 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; |
| 462 | 858 | ||
| 463 | if (!dev->ethtool_ops->get_coalesce) | 859 | if (!dev->ethtool_ops->get_coalesce) |
| 464 | return -EOPNOTSUPP; | 860 | return -EOPNOTSUPP; |
| @@ -470,7 +866,7 @@ static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) | |||
| 470 | return 0; | 866 | return 0; |
| 471 | } | 867 | } |
| 472 | 868 | ||
| 473 | static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) | 869 | static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) |
| 474 | { | 870 | { |
| 475 | struct ethtool_coalesce coalesce; | 871 | struct ethtool_coalesce coalesce; |
| 476 | 872 | ||
| @@ -485,7 +881,7 @@ static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) | |||
| 485 | 881 | ||
| 486 | static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) | 882 | static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) |
| 487 | { | 883 | { |
| 488 | struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; | 884 | struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; |
| 489 | 885 | ||
| 490 | if (!dev->ethtool_ops->get_ringparam) | 886 | if (!dev->ethtool_ops->get_ringparam) |
| 491 | return -EOPNOTSUPP; | 887 | return -EOPNOTSUPP; |
| @@ -839,7 +1235,7 @@ static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) | |||
| 839 | static int ethtool_get_value(struct net_device *dev, char __user *useraddr, | 1235 | static int ethtool_get_value(struct net_device *dev, char __user *useraddr, |
| 840 | u32 cmd, u32 (*actor)(struct net_device *)) | 1236 | u32 cmd, u32 (*actor)(struct net_device *)) |
| 841 | { | 1237 | { |
| 842 | struct ethtool_value edata = { cmd }; | 1238 | struct ethtool_value edata = { .cmd = cmd }; |
| 843 | 1239 | ||
| 844 | if (!actor) | 1240 | if (!actor) |
| 845 | return -EOPNOTSUPP; | 1241 | return -EOPNOTSUPP; |
| @@ -880,7 +1276,7 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, | |||
| 880 | return actor(dev, edata.data); | 1276 | return actor(dev, edata.data); |
| 881 | } | 1277 | } |
| 882 | 1278 | ||
| 883 | static int ethtool_flash_device(struct net_device *dev, char __user *useraddr) | 1279 | static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) |
| 884 | { | 1280 | { |
| 885 | struct ethtool_flash efl; | 1281 | struct ethtool_flash efl; |
| 886 | 1282 | ||
| @@ -1113,6 +1509,15 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
| 1113 | case ETHTOOL_RESET: | 1509 | case ETHTOOL_RESET: |
| 1114 | rc = ethtool_reset(dev, useraddr); | 1510 | rc = ethtool_reset(dev, useraddr); |
| 1115 | break; | 1511 | break; |
| 1512 | case ETHTOOL_SRXNTUPLE: | ||
| 1513 | rc = ethtool_set_rx_ntuple(dev, useraddr); | ||
| 1514 | break; | ||
| 1515 | case ETHTOOL_GRXNTUPLE: | ||
| 1516 | rc = ethtool_get_rx_ntuple(dev, useraddr); | ||
| 1517 | break; | ||
| 1518 | case ETHTOOL_GSSET_INFO: | ||
| 1519 | rc = ethtool_get_sset_info(dev, useraddr); | ||
| 1520 | break; | ||
| 1116 | default: | 1521 | default: |
| 1117 | rc = -EOPNOTSUPP; | 1522 | rc = -EOPNOTSUPP; |
| 1118 | } | 1523 | } |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 02a3b2c69c1e..9a24377146bf 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
| @@ -708,7 +708,7 @@ static struct notifier_block fib_rules_notifier = { | |||
| 708 | .notifier_call = fib_rules_event, | 708 | .notifier_call = fib_rules_event, |
| 709 | }; | 709 | }; |
| 710 | 710 | ||
| 711 | static int fib_rules_net_init(struct net *net) | 711 | static int __net_init fib_rules_net_init(struct net *net) |
| 712 | { | 712 | { |
| 713 | INIT_LIST_HEAD(&net->rules_ops); | 713 | INIT_LIST_HEAD(&net->rules_ops); |
| 714 | spin_lock_init(&net->rules_mod_lock); | 714 | spin_lock_init(&net->rules_mod_lock); |
diff --git a/net/core/filter.c b/net/core/filter.c index 08db7b9143a3..d38ef7fd50f0 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -86,7 +86,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) | |||
| 86 | return err; | 86 | return err; |
| 87 | 87 | ||
| 88 | rcu_read_lock_bh(); | 88 | rcu_read_lock_bh(); |
| 89 | filter = rcu_dereference(sk->sk_filter); | 89 | filter = rcu_dereference_bh(sk->sk_filter); |
| 90 | if (filter) { | 90 | if (filter) { |
| 91 | unsigned int pkt_len = sk_run_filter(skb, filter->insns, | 91 | unsigned int pkt_len = sk_run_filter(skb, filter->insns, |
| 92 | filter->len); | 92 | filter->len); |
| @@ -521,7 +521,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
| 521 | } | 521 | } |
| 522 | 522 | ||
| 523 | rcu_read_lock_bh(); | 523 | rcu_read_lock_bh(); |
| 524 | old_fp = rcu_dereference(sk->sk_filter); | 524 | old_fp = rcu_dereference_bh(sk->sk_filter); |
| 525 | rcu_assign_pointer(sk->sk_filter, fp); | 525 | rcu_assign_pointer(sk->sk_filter, fp); |
| 526 | rcu_read_unlock_bh(); | 526 | rcu_read_unlock_bh(); |
| 527 | 527 | ||
| @@ -529,6 +529,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
| 529 | sk_filter_delayed_uncharge(sk, old_fp); | 529 | sk_filter_delayed_uncharge(sk, old_fp); |
| 530 | return 0; | 530 | return 0; |
| 531 | } | 531 | } |
| 532 | EXPORT_SYMBOL_GPL(sk_attach_filter); | ||
| 532 | 533 | ||
| 533 | int sk_detach_filter(struct sock *sk) | 534 | int sk_detach_filter(struct sock *sk) |
| 534 | { | 535 | { |
| @@ -536,7 +537,7 @@ int sk_detach_filter(struct sock *sk) | |||
| 536 | struct sk_filter *filter; | 537 | struct sk_filter *filter; |
| 537 | 538 | ||
| 538 | rcu_read_lock_bh(); | 539 | rcu_read_lock_bh(); |
| 539 | filter = rcu_dereference(sk->sk_filter); | 540 | filter = rcu_dereference_bh(sk->sk_filter); |
| 540 | if (filter) { | 541 | if (filter) { |
| 541 | rcu_assign_pointer(sk->sk_filter, NULL); | 542 | rcu_assign_pointer(sk->sk_filter, NULL); |
| 542 | sk_filter_delayed_uncharge(sk, filter); | 543 | sk_filter_delayed_uncharge(sk, filter); |
| @@ -545,3 +546,4 @@ int sk_detach_filter(struct sock *sk) | |||
| 545 | rcu_read_unlock_bh(); | 546 | rcu_read_unlock_bh(); |
| 546 | return ret; | 547 | return ret; |
| 547 | } | 548 | } |
| 549 | EXPORT_SYMBOL_GPL(sk_detach_filter); | ||
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index f35377b643e4..6cee6434da67 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -771,6 +771,8 @@ static __inline__ int neigh_max_probes(struct neighbour *n) | |||
| 771 | } | 771 | } |
| 772 | 772 | ||
| 773 | static void neigh_invalidate(struct neighbour *neigh) | 773 | static void neigh_invalidate(struct neighbour *neigh) |
| 774 | __releases(neigh->lock) | ||
| 775 | __acquires(neigh->lock) | ||
| 774 | { | 776 | { |
| 775 | struct sk_buff *skb; | 777 | struct sk_buff *skb; |
| 776 | 778 | ||
| @@ -2417,8 +2419,7 @@ EXPORT_SYMBOL(neigh_seq_stop); | |||
| 2417 | 2419 | ||
| 2418 | static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) | 2420 | static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) |
| 2419 | { | 2421 | { |
| 2420 | struct proc_dir_entry *pde = seq->private; | 2422 | struct neigh_table *tbl = seq->private; |
| 2421 | struct neigh_table *tbl = pde->data; | ||
| 2422 | int cpu; | 2423 | int cpu; |
| 2423 | 2424 | ||
| 2424 | if (*pos == 0) | 2425 | if (*pos == 0) |
| @@ -2435,8 +2436,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 2435 | 2436 | ||
| 2436 | static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 2437 | static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 2437 | { | 2438 | { |
| 2438 | struct proc_dir_entry *pde = seq->private; | 2439 | struct neigh_table *tbl = seq->private; |
| 2439 | struct neigh_table *tbl = pde->data; | ||
| 2440 | int cpu; | 2440 | int cpu; |
| 2441 | 2441 | ||
| 2442 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { | 2442 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
| @@ -2455,8 +2455,7 @@ static void neigh_stat_seq_stop(struct seq_file *seq, void *v) | |||
| 2455 | 2455 | ||
| 2456 | static int neigh_stat_seq_show(struct seq_file *seq, void *v) | 2456 | static int neigh_stat_seq_show(struct seq_file *seq, void *v) |
| 2457 | { | 2457 | { |
| 2458 | struct proc_dir_entry *pde = seq->private; | 2458 | struct neigh_table *tbl = seq->private; |
| 2459 | struct neigh_table *tbl = pde->data; | ||
| 2460 | struct neigh_statistics *st = v; | 2459 | struct neigh_statistics *st = v; |
| 2461 | 2460 | ||
| 2462 | if (v == SEQ_START_TOKEN) { | 2461 | if (v == SEQ_START_TOKEN) { |
| @@ -2501,7 +2500,7 @@ static int neigh_stat_seq_open(struct inode *inode, struct file *file) | |||
| 2501 | 2500 | ||
| 2502 | if (!ret) { | 2501 | if (!ret) { |
| 2503 | struct seq_file *sf = file->private_data; | 2502 | struct seq_file *sf = file->private_data; |
| 2504 | sf->private = PDE(inode); | 2503 | sf->private = PDE(inode)->data; |
| 2505 | } | 2504 | } |
| 2506 | return ret; | 2505 | return ret; |
| 2507 | }; | 2506 | }; |
| @@ -2559,9 +2558,11 @@ EXPORT_SYMBOL(neigh_app_ns); | |||
| 2559 | 2558 | ||
| 2560 | #ifdef CONFIG_SYSCTL | 2559 | #ifdef CONFIG_SYSCTL |
| 2561 | 2560 | ||
| 2561 | #define NEIGH_VARS_MAX 19 | ||
| 2562 | |||
| 2562 | static struct neigh_sysctl_table { | 2563 | static struct neigh_sysctl_table { |
| 2563 | struct ctl_table_header *sysctl_header; | 2564 | struct ctl_table_header *sysctl_header; |
| 2564 | struct ctl_table neigh_vars[__NET_NEIGH_MAX]; | 2565 | struct ctl_table neigh_vars[NEIGH_VARS_MAX]; |
| 2565 | char *dev_name; | 2566 | char *dev_name; |
| 2566 | } neigh_sysctl_template __read_mostly = { | 2567 | } neigh_sysctl_template __read_mostly = { |
| 2567 | .neigh_vars = { | 2568 | .neigh_vars = { |
| @@ -2678,8 +2679,7 @@ static struct neigh_sysctl_table { | |||
| 2678 | }; | 2679 | }; |
| 2679 | 2680 | ||
| 2680 | int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, | 2681 | int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, |
| 2681 | int p_id, int pdev_id, char *p_name, | 2682 | char *p_name, proc_handler *handler) |
| 2682 | proc_handler *handler) | ||
| 2683 | { | 2683 | { |
| 2684 | struct neigh_sysctl_table *t; | 2684 | struct neigh_sysctl_table *t; |
| 2685 | const char *dev_name_source = NULL; | 2685 | const char *dev_name_source = NULL; |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 0b4d0d35ef40..6f9206b36dc2 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -407,11 +407,24 @@ static void arp_reply(struct sk_buff *skb) | |||
| 407 | __be32 sip, tip; | 407 | __be32 sip, tip; |
| 408 | unsigned char *sha; | 408 | unsigned char *sha; |
| 409 | struct sk_buff *send_skb; | 409 | struct sk_buff *send_skb; |
| 410 | struct netpoll *np = NULL; | 410 | struct netpoll *np, *tmp; |
| 411 | unsigned long flags; | ||
| 412 | int hits = 0; | ||
| 413 | |||
| 414 | if (list_empty(&npinfo->rx_np)) | ||
| 415 | return; | ||
| 416 | |||
| 417 | /* Before checking the packet, we do some early | ||
| 418 | inspection whether this is interesting at all */ | ||
| 419 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
| 420 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { | ||
| 421 | if (np->dev == skb->dev) | ||
| 422 | hits++; | ||
| 423 | } | ||
| 424 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
| 411 | 425 | ||
| 412 | if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) | 426 | /* No netpoll struct is using this dev */ |
| 413 | np = npinfo->rx_np; | 427 | if (!hits) |
| 414 | if (!np) | ||
| 415 | return; | 428 | return; |
| 416 | 429 | ||
| 417 | /* No arp on this interface */ | 430 | /* No arp on this interface */ |
| @@ -437,77 +450,91 @@ static void arp_reply(struct sk_buff *skb) | |||
| 437 | arp_ptr += skb->dev->addr_len; | 450 | arp_ptr += skb->dev->addr_len; |
| 438 | memcpy(&sip, arp_ptr, 4); | 451 | memcpy(&sip, arp_ptr, 4); |
| 439 | arp_ptr += 4; | 452 | arp_ptr += 4; |
| 440 | /* if we actually cared about dst hw addr, it would get copied here */ | 453 | /* If we actually cared about dst hw addr, |
| 454 | it would get copied here */ | ||
| 441 | arp_ptr += skb->dev->addr_len; | 455 | arp_ptr += skb->dev->addr_len; |
| 442 | memcpy(&tip, arp_ptr, 4); | 456 | memcpy(&tip, arp_ptr, 4); |
| 443 | 457 | ||
| 444 | /* Should we ignore arp? */ | 458 | /* Should we ignore arp? */ |
| 445 | if (tip != np->local_ip || | 459 | if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) |
| 446 | ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) | ||
| 447 | return; | 460 | return; |
| 448 | 461 | ||
| 449 | size = arp_hdr_len(skb->dev); | 462 | size = arp_hdr_len(skb->dev); |
| 450 | send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), | ||
| 451 | LL_RESERVED_SPACE(np->dev)); | ||
| 452 | 463 | ||
| 453 | if (!send_skb) | 464 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 454 | return; | 465 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { |
| 455 | 466 | if (tip != np->local_ip) | |
| 456 | skb_reset_network_header(send_skb); | 467 | continue; |
| 457 | arp = (struct arphdr *) skb_put(send_skb, size); | ||
| 458 | send_skb->dev = skb->dev; | ||
| 459 | send_skb->protocol = htons(ETH_P_ARP); | ||
| 460 | 468 | ||
| 461 | /* Fill the device header for the ARP frame */ | 469 | send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), |
| 462 | if (dev_hard_header(send_skb, skb->dev, ptype, | 470 | LL_RESERVED_SPACE(np->dev)); |
| 463 | sha, np->dev->dev_addr, | 471 | if (!send_skb) |
| 464 | send_skb->len) < 0) { | 472 | continue; |
| 465 | kfree_skb(send_skb); | ||
| 466 | return; | ||
| 467 | } | ||
| 468 | 473 | ||
| 469 | /* | 474 | skb_reset_network_header(send_skb); |
| 470 | * Fill out the arp protocol part. | 475 | arp = (struct arphdr *) skb_put(send_skb, size); |
| 471 | * | 476 | send_skb->dev = skb->dev; |
| 472 | * we only support ethernet device type, | 477 | send_skb->protocol = htons(ETH_P_ARP); |
| 473 | * which (according to RFC 1390) should always equal 1 (Ethernet). | ||
| 474 | */ | ||
| 475 | 478 | ||
| 476 | arp->ar_hrd = htons(np->dev->type); | 479 | /* Fill the device header for the ARP frame */ |
| 477 | arp->ar_pro = htons(ETH_P_IP); | 480 | if (dev_hard_header(send_skb, skb->dev, ptype, |
| 478 | arp->ar_hln = np->dev->addr_len; | 481 | sha, np->dev->dev_addr, |
| 479 | arp->ar_pln = 4; | 482 | send_skb->len) < 0) { |
| 480 | arp->ar_op = htons(type); | 483 | kfree_skb(send_skb); |
| 484 | continue; | ||
| 485 | } | ||
| 481 | 486 | ||
| 482 | arp_ptr=(unsigned char *)(arp + 1); | 487 | /* |
| 483 | memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); | 488 | * Fill out the arp protocol part. |
| 484 | arp_ptr += np->dev->addr_len; | 489 | * |
| 485 | memcpy(arp_ptr, &tip, 4); | 490 | * we only support ethernet device type, |
| 486 | arp_ptr += 4; | 491 | * which (according to RFC 1390) should |
| 487 | memcpy(arp_ptr, sha, np->dev->addr_len); | 492 | * always equal 1 (Ethernet). |
| 488 | arp_ptr += np->dev->addr_len; | 493 | */ |
| 489 | memcpy(arp_ptr, &sip, 4); | ||
| 490 | 494 | ||
| 491 | netpoll_send_skb(np, send_skb); | 495 | arp->ar_hrd = htons(np->dev->type); |
| 496 | arp->ar_pro = htons(ETH_P_IP); | ||
| 497 | arp->ar_hln = np->dev->addr_len; | ||
| 498 | arp->ar_pln = 4; | ||
| 499 | arp->ar_op = htons(type); | ||
| 500 | |||
| 501 | arp_ptr = (unsigned char *)(arp + 1); | ||
| 502 | memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); | ||
| 503 | arp_ptr += np->dev->addr_len; | ||
| 504 | memcpy(arp_ptr, &tip, 4); | ||
| 505 | arp_ptr += 4; | ||
| 506 | memcpy(arp_ptr, sha, np->dev->addr_len); | ||
| 507 | arp_ptr += np->dev->addr_len; | ||
| 508 | memcpy(arp_ptr, &sip, 4); | ||
| 509 | |||
| 510 | netpoll_send_skb(np, send_skb); | ||
| 511 | |||
| 512 | /* If there are several rx_hooks for the same address, | ||
| 513 | we're fine by sending a single reply */ | ||
| 514 | break; | ||
| 515 | } | ||
| 516 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
| 492 | } | 517 | } |
| 493 | 518 | ||
| 494 | int __netpoll_rx(struct sk_buff *skb) | 519 | int __netpoll_rx(struct sk_buff *skb) |
| 495 | { | 520 | { |
| 496 | int proto, len, ulen; | 521 | int proto, len, ulen; |
| 522 | int hits = 0; | ||
| 497 | struct iphdr *iph; | 523 | struct iphdr *iph; |
| 498 | struct udphdr *uh; | 524 | struct udphdr *uh; |
| 499 | struct netpoll_info *npi = skb->dev->npinfo; | 525 | struct netpoll_info *npinfo = skb->dev->npinfo; |
| 500 | struct netpoll *np = npi->rx_np; | 526 | struct netpoll *np, *tmp; |
| 501 | 527 | ||
| 502 | if (!np) | 528 | if (list_empty(&npinfo->rx_np)) |
| 503 | goto out; | 529 | goto out; |
| 530 | |||
| 504 | if (skb->dev->type != ARPHRD_ETHER) | 531 | if (skb->dev->type != ARPHRD_ETHER) |
| 505 | goto out; | 532 | goto out; |
| 506 | 533 | ||
| 507 | /* check if netpoll clients need ARP */ | 534 | /* check if netpoll clients need ARP */ |
| 508 | if (skb->protocol == htons(ETH_P_ARP) && | 535 | if (skb->protocol == htons(ETH_P_ARP) && |
| 509 | atomic_read(&trapped)) { | 536 | atomic_read(&trapped)) { |
| 510 | skb_queue_tail(&npi->arp_tx, skb); | 537 | skb_queue_tail(&npinfo->arp_tx, skb); |
| 511 | return 1; | 538 | return 1; |
| 512 | } | 539 | } |
| 513 | 540 | ||
| @@ -551,16 +578,23 @@ int __netpoll_rx(struct sk_buff *skb) | |||
| 551 | goto out; | 578 | goto out; |
| 552 | if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) | 579 | if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) |
| 553 | goto out; | 580 | goto out; |
| 554 | if (np->local_ip && np->local_ip != iph->daddr) | ||
| 555 | goto out; | ||
| 556 | if (np->remote_ip && np->remote_ip != iph->saddr) | ||
| 557 | goto out; | ||
| 558 | if (np->local_port && np->local_port != ntohs(uh->dest)) | ||
| 559 | goto out; | ||
| 560 | 581 | ||
| 561 | np->rx_hook(np, ntohs(uh->source), | 582 | list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) { |
| 562 | (char *)(uh+1), | 583 | if (np->local_ip && np->local_ip != iph->daddr) |
| 563 | ulen - sizeof(struct udphdr)); | 584 | continue; |
| 585 | if (np->remote_ip && np->remote_ip != iph->saddr) | ||
| 586 | continue; | ||
| 587 | if (np->local_port && np->local_port != ntohs(uh->dest)) | ||
| 588 | continue; | ||
| 589 | |||
| 590 | np->rx_hook(np, ntohs(uh->source), | ||
| 591 | (char *)(uh+1), | ||
| 592 | ulen - sizeof(struct udphdr)); | ||
| 593 | hits++; | ||
| 594 | } | ||
| 595 | |||
| 596 | if (!hits) | ||
| 597 | goto out; | ||
| 564 | 598 | ||
| 565 | kfree_skb(skb); | 599 | kfree_skb(skb); |
| 566 | return 1; | 600 | return 1; |
| @@ -580,7 +614,7 @@ void netpoll_print_options(struct netpoll *np) | |||
| 580 | np->name, np->local_port); | 614 | np->name, np->local_port); |
| 581 | printk(KERN_INFO "%s: local IP %pI4\n", | 615 | printk(KERN_INFO "%s: local IP %pI4\n", |
| 582 | np->name, &np->local_ip); | 616 | np->name, &np->local_ip); |
| 583 | printk(KERN_INFO "%s: interface %s\n", | 617 | printk(KERN_INFO "%s: interface '%s'\n", |
| 584 | np->name, np->dev_name); | 618 | np->name, np->dev_name); |
| 585 | printk(KERN_INFO "%s: remote port %d\n", | 619 | printk(KERN_INFO "%s: remote port %d\n", |
| 586 | np->name, np->remote_port); | 620 | np->name, np->remote_port); |
| @@ -627,6 +661,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
| 627 | if ((delim = strchr(cur, '@')) == NULL) | 661 | if ((delim = strchr(cur, '@')) == NULL) |
| 628 | goto parse_failed; | 662 | goto parse_failed; |
| 629 | *delim = 0; | 663 | *delim = 0; |
| 664 | if (*cur == ' ' || *cur == '\t') | ||
| 665 | printk(KERN_INFO "%s: warning: whitespace" | ||
| 666 | "is not allowed\n", np->name); | ||
| 630 | np->remote_port = simple_strtol(cur, NULL, 10); | 667 | np->remote_port = simple_strtol(cur, NULL, 10); |
| 631 | cur = delim; | 668 | cur = delim; |
| 632 | } | 669 | } |
| @@ -674,7 +711,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
| 674 | return 0; | 711 | return 0; |
| 675 | 712 | ||
| 676 | parse_failed: | 713 | parse_failed: |
| 677 | printk(KERN_INFO "%s: couldn't parse config at %s!\n", | 714 | printk(KERN_INFO "%s: couldn't parse config at '%s'!\n", |
| 678 | np->name, cur); | 715 | np->name, cur); |
| 679 | return -1; | 716 | return -1; |
| 680 | } | 717 | } |
| @@ -684,6 +721,7 @@ int netpoll_setup(struct netpoll *np) | |||
| 684 | struct net_device *ndev = NULL; | 721 | struct net_device *ndev = NULL; |
| 685 | struct in_device *in_dev; | 722 | struct in_device *in_dev; |
| 686 | struct netpoll_info *npinfo; | 723 | struct netpoll_info *npinfo; |
| 724 | struct netpoll *npe, *tmp; | ||
| 687 | unsigned long flags; | 725 | unsigned long flags; |
| 688 | int err; | 726 | int err; |
| 689 | 727 | ||
| @@ -700,11 +738,11 @@ int netpoll_setup(struct netpoll *np) | |||
| 700 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | 738 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); |
| 701 | if (!npinfo) { | 739 | if (!npinfo) { |
| 702 | err = -ENOMEM; | 740 | err = -ENOMEM; |
| 703 | goto release; | 741 | goto put; |
| 704 | } | 742 | } |
| 705 | 743 | ||
| 706 | npinfo->rx_flags = 0; | 744 | npinfo->rx_flags = 0; |
| 707 | npinfo->rx_np = NULL; | 745 | INIT_LIST_HEAD(&npinfo->rx_np); |
| 708 | 746 | ||
| 709 | spin_lock_init(&npinfo->rx_lock); | 747 | spin_lock_init(&npinfo->rx_lock); |
| 710 | skb_queue_head_init(&npinfo->arp_tx); | 748 | skb_queue_head_init(&npinfo->arp_tx); |
| @@ -785,7 +823,7 @@ int netpoll_setup(struct netpoll *np) | |||
| 785 | if (np->rx_hook) { | 823 | if (np->rx_hook) { |
| 786 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 824 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 787 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; | 825 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; |
| 788 | npinfo->rx_np = np; | 826 | list_add_tail(&np->rx, &npinfo->rx_np); |
| 789 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 827 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
| 790 | } | 828 | } |
| 791 | 829 | ||
| @@ -801,9 +839,16 @@ int netpoll_setup(struct netpoll *np) | |||
| 801 | return 0; | 839 | return 0; |
| 802 | 840 | ||
| 803 | release: | 841 | release: |
| 804 | if (!ndev->npinfo) | 842 | if (!ndev->npinfo) { |
| 843 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
| 844 | list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) { | ||
| 845 | npe->dev = NULL; | ||
| 846 | } | ||
| 847 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
| 848 | |||
| 805 | kfree(npinfo); | 849 | kfree(npinfo); |
| 806 | np->dev = NULL; | 850 | } |
| 851 | put: | ||
| 807 | dev_put(ndev); | 852 | dev_put(ndev); |
| 808 | return err; | 853 | return err; |
| 809 | } | 854 | } |
| @@ -823,10 +868,11 @@ void netpoll_cleanup(struct netpoll *np) | |||
| 823 | if (np->dev) { | 868 | if (np->dev) { |
| 824 | npinfo = np->dev->npinfo; | 869 | npinfo = np->dev->npinfo; |
| 825 | if (npinfo) { | 870 | if (npinfo) { |
| 826 | if (npinfo->rx_np == np) { | 871 | if (!list_empty(&npinfo->rx_np)) { |
| 827 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 872 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 828 | npinfo->rx_np = NULL; | 873 | list_del(&np->rx); |
| 829 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; | 874 | if (list_empty(&npinfo->rx_np)) |
| 875 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; | ||
| 830 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 876 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
| 831 | } | 877 | } |
| 832 | 878 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2e692afdc55d..43923811bd6a 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -2188,12 +2188,13 @@ static inline int f_pick(struct pktgen_dev *pkt_dev) | |||
| 2188 | /* If there was already an IPSEC SA, we keep it as is, else | 2188 | /* If there was already an IPSEC SA, we keep it as is, else |
| 2189 | * we go look for it ... | 2189 | * we go look for it ... |
| 2190 | */ | 2190 | */ |
| 2191 | #define DUMMY_MARK 0 | ||
| 2191 | static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) | 2192 | static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) |
| 2192 | { | 2193 | { |
| 2193 | struct xfrm_state *x = pkt_dev->flows[flow].x; | 2194 | struct xfrm_state *x = pkt_dev->flows[flow].x; |
| 2194 | if (!x) { | 2195 | if (!x) { |
| 2195 | /*slow path: we dont already have xfrm_state*/ | 2196 | /*slow path: we dont already have xfrm_state*/ |
| 2196 | x = xfrm_stateonly_find(&init_net, | 2197 | x = xfrm_stateonly_find(&init_net, DUMMY_MARK, |
| 2197 | (xfrm_address_t *)&pkt_dev->cur_daddr, | 2198 | (xfrm_address_t *)&pkt_dev->cur_daddr, |
| 2198 | (xfrm_address_t *)&pkt_dev->cur_saddr, | 2199 | (xfrm_address_t *)&pkt_dev->cur_saddr, |
| 2199 | AF_INET, | 2200 | AF_INET, |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 794bcb897ff0..4568120d8533 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/security.h> | 35 | #include <linux/security.h> |
| 36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
| 37 | #include <linux/if_addr.h> | 37 | #include <linux/if_addr.h> |
| 38 | #include <linux/pci.h> | ||
| 38 | 39 | ||
| 39 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
| 40 | #include <asm/system.h> | 41 | #include <asm/system.h> |
| @@ -89,6 +90,14 @@ int rtnl_is_locked(void) | |||
| 89 | } | 90 | } |
| 90 | EXPORT_SYMBOL(rtnl_is_locked); | 91 | EXPORT_SYMBOL(rtnl_is_locked); |
| 91 | 92 | ||
| 93 | #ifdef CONFIG_PROVE_LOCKING | ||
| 94 | int lockdep_rtnl_is_held(void) | ||
| 95 | { | ||
| 96 | return lockdep_is_held(&rtnl_mutex); | ||
| 97 | } | ||
| 98 | EXPORT_SYMBOL(lockdep_rtnl_is_held); | ||
| 99 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
| 100 | |||
| 92 | static struct rtnl_link *rtnl_msg_handlers[NPROTO]; | 101 | static struct rtnl_link *rtnl_msg_handlers[NPROTO]; |
| 93 | 102 | ||
| 94 | static inline int rtm_msgindex(int msgtype) | 103 | static inline int rtm_msgindex(int msgtype) |
| @@ -548,6 +557,19 @@ static void set_operstate(struct net_device *dev, unsigned char transition) | |||
| 548 | } | 557 | } |
| 549 | } | 558 | } |
| 550 | 559 | ||
| 560 | static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, | ||
| 561 | const struct ifinfomsg *ifm) | ||
| 562 | { | ||
| 563 | unsigned int flags = ifm->ifi_flags; | ||
| 564 | |||
| 565 | /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ | ||
| 566 | if (ifm->ifi_change) | ||
| 567 | flags = (flags & ifm->ifi_change) | | ||
| 568 | (dev->flags & ~ifm->ifi_change); | ||
| 569 | |||
| 570 | return flags; | ||
| 571 | } | ||
| 572 | |||
| 551 | static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | 573 | static void copy_rtnl_link_stats(struct rtnl_link_stats *a, |
| 552 | const struct net_device_stats *b) | 574 | const struct net_device_stats *b) |
| 553 | { | 575 | { |
| @@ -580,6 +602,15 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | |||
| 580 | a->tx_compressed = b->tx_compressed; | 602 | a->tx_compressed = b->tx_compressed; |
| 581 | }; | 603 | }; |
| 582 | 604 | ||
| 605 | static inline int rtnl_vfinfo_size(const struct net_device *dev) | ||
| 606 | { | ||
| 607 | if (dev->dev.parent && dev_is_pci(dev->dev.parent)) | ||
| 608 | return dev_num_vf(dev->dev.parent) * | ||
| 609 | sizeof(struct ifla_vf_info); | ||
| 610 | else | ||
| 611 | return 0; | ||
| 612 | } | ||
| 613 | |||
| 583 | static inline size_t if_nlmsg_size(const struct net_device *dev) | 614 | static inline size_t if_nlmsg_size(const struct net_device *dev) |
| 584 | { | 615 | { |
| 585 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) | 616 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) |
| @@ -597,6 +628,8 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) | |||
| 597 | + nla_total_size(4) /* IFLA_MASTER */ | 628 | + nla_total_size(4) /* IFLA_MASTER */ |
| 598 | + nla_total_size(1) /* IFLA_OPERSTATE */ | 629 | + nla_total_size(1) /* IFLA_OPERSTATE */ |
| 599 | + nla_total_size(1) /* IFLA_LINKMODE */ | 630 | + nla_total_size(1) /* IFLA_LINKMODE */ |
| 631 | + nla_total_size(4) /* IFLA_NUM_VF */ | ||
| 632 | + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ | ||
| 600 | + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ | 633 | + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ |
| 601 | } | 634 | } |
| 602 | 635 | ||
| @@ -665,6 +698,17 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
| 665 | stats = dev_get_stats(dev); | 698 | stats = dev_get_stats(dev); |
| 666 | copy_rtnl_link_stats(nla_data(attr), stats); | 699 | copy_rtnl_link_stats(nla_data(attr), stats); |
| 667 | 700 | ||
| 701 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { | ||
| 702 | int i; | ||
| 703 | struct ifla_vf_info ivi; | ||
| 704 | |||
| 705 | NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); | ||
| 706 | for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { | ||
| 707 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) | ||
| 708 | break; | ||
| 709 | NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); | ||
| 710 | } | ||
| 711 | } | ||
| 668 | if (dev->rtnl_link_ops) { | 712 | if (dev->rtnl_link_ops) { |
| 669 | if (rtnl_link_fill(skb, dev) < 0) | 713 | if (rtnl_link_fill(skb, dev) < 0) |
| 670 | goto nla_put_failure; | 714 | goto nla_put_failure; |
| @@ -725,6 +769,12 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { | |||
| 725 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, | 769 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, |
| 726 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, | 770 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, |
| 727 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, | 771 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, |
| 772 | [IFLA_VF_MAC] = { .type = NLA_BINARY, | ||
| 773 | .len = sizeof(struct ifla_vf_mac) }, | ||
| 774 | [IFLA_VF_VLAN] = { .type = NLA_BINARY, | ||
| 775 | .len = sizeof(struct ifla_vf_vlan) }, | ||
| 776 | [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, | ||
| 777 | .len = sizeof(struct ifla_vf_tx_rate) }, | ||
| 728 | }; | 778 | }; |
| 729 | EXPORT_SYMBOL(ifla_policy); | 779 | EXPORT_SYMBOL(ifla_policy); |
| 730 | 780 | ||
| @@ -875,13 +925,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
| 875 | } | 925 | } |
| 876 | 926 | ||
| 877 | if (ifm->ifi_flags || ifm->ifi_change) { | 927 | if (ifm->ifi_flags || ifm->ifi_change) { |
| 878 | unsigned int flags = ifm->ifi_flags; | 928 | err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm)); |
| 879 | |||
| 880 | /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ | ||
| 881 | if (ifm->ifi_change) | ||
| 882 | flags = (flags & ifm->ifi_change) | | ||
| 883 | (dev->flags & ~ifm->ifi_change); | ||
| 884 | err = dev_change_flags(dev, flags); | ||
| 885 | if (err < 0) | 929 | if (err < 0) |
| 886 | goto errout; | 930 | goto errout; |
| 887 | } | 931 | } |
| @@ -898,6 +942,41 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
| 898 | write_unlock_bh(&dev_base_lock); | 942 | write_unlock_bh(&dev_base_lock); |
| 899 | } | 943 | } |
| 900 | 944 | ||
| 945 | if (tb[IFLA_VF_MAC]) { | ||
| 946 | struct ifla_vf_mac *ivm; | ||
| 947 | ivm = nla_data(tb[IFLA_VF_MAC]); | ||
| 948 | err = -EOPNOTSUPP; | ||
| 949 | if (ops->ndo_set_vf_mac) | ||
| 950 | err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); | ||
| 951 | if (err < 0) | ||
| 952 | goto errout; | ||
| 953 | modified = 1; | ||
| 954 | } | ||
| 955 | |||
| 956 | if (tb[IFLA_VF_VLAN]) { | ||
| 957 | struct ifla_vf_vlan *ivv; | ||
| 958 | ivv = nla_data(tb[IFLA_VF_VLAN]); | ||
| 959 | err = -EOPNOTSUPP; | ||
| 960 | if (ops->ndo_set_vf_vlan) | ||
| 961 | err = ops->ndo_set_vf_vlan(dev, ivv->vf, | ||
| 962 | ivv->vlan, | ||
| 963 | ivv->qos); | ||
| 964 | if (err < 0) | ||
| 965 | goto errout; | ||
| 966 | modified = 1; | ||
| 967 | } | ||
| 968 | err = 0; | ||
| 969 | |||
| 970 | if (tb[IFLA_VF_TX_RATE]) { | ||
| 971 | struct ifla_vf_tx_rate *ivt; | ||
| 972 | ivt = nla_data(tb[IFLA_VF_TX_RATE]); | ||
| 973 | err = -EOPNOTSUPP; | ||
| 974 | if (ops->ndo_set_vf_tx_rate) | ||
| 975 | err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); | ||
| 976 | if (err < 0) | ||
| 977 | goto errout; | ||
| 978 | modified = 1; | ||
| 979 | } | ||
| 901 | err = 0; | 980 | err = 0; |
| 902 | 981 | ||
| 903 | errout: | 982 | errout: |
| @@ -989,6 +1068,26 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
| 989 | return 0; | 1068 | return 0; |
| 990 | } | 1069 | } |
| 991 | 1070 | ||
| 1071 | int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) | ||
| 1072 | { | ||
| 1073 | unsigned int old_flags; | ||
| 1074 | int err; | ||
| 1075 | |||
| 1076 | old_flags = dev->flags; | ||
| 1077 | if (ifm && (ifm->ifi_flags || ifm->ifi_change)) { | ||
| 1078 | err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm)); | ||
| 1079 | if (err < 0) | ||
| 1080 | return err; | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | dev->rtnl_link_state = RTNL_LINK_INITIALIZED; | ||
| 1084 | rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
| 1085 | |||
| 1086 | __dev_notify_flags(dev, old_flags); | ||
| 1087 | return 0; | ||
| 1088 | } | ||
| 1089 | EXPORT_SYMBOL(rtnl_configure_link); | ||
| 1090 | |||
| 992 | struct net_device *rtnl_create_link(struct net *src_net, struct net *net, | 1091 | struct net_device *rtnl_create_link(struct net *src_net, struct net *net, |
| 993 | char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) | 1092 | char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) |
| 994 | { | 1093 | { |
| @@ -1010,6 +1109,7 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, | |||
| 1010 | 1109 | ||
| 1011 | dev_net_set(dev, net); | 1110 | dev_net_set(dev, net); |
| 1012 | dev->rtnl_link_ops = ops; | 1111 | dev->rtnl_link_ops = ops; |
| 1112 | dev->rtnl_link_state = RTNL_LINK_INITIALIZING; | ||
| 1013 | dev->real_num_tx_queues = real_num_queues; | 1113 | dev->real_num_tx_queues = real_num_queues; |
| 1014 | 1114 | ||
| 1015 | if (strchr(dev->name, '%')) { | 1115 | if (strchr(dev->name, '%')) { |
| @@ -1139,7 +1239,7 @@ replay: | |||
| 1139 | if (!(nlh->nlmsg_flags & NLM_F_CREATE)) | 1239 | if (!(nlh->nlmsg_flags & NLM_F_CREATE)) |
| 1140 | return -ENODEV; | 1240 | return -ENODEV; |
| 1141 | 1241 | ||
| 1142 | if (ifm->ifi_index || ifm->ifi_flags || ifm->ifi_change) | 1242 | if (ifm->ifi_index) |
| 1143 | return -EOPNOTSUPP; | 1243 | return -EOPNOTSUPP; |
| 1144 | if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) | 1244 | if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) |
| 1145 | return -EOPNOTSUPP; | 1245 | return -EOPNOTSUPP; |
| @@ -1170,9 +1270,15 @@ replay: | |||
| 1170 | err = ops->newlink(net, dev, tb, data); | 1270 | err = ops->newlink(net, dev, tb, data); |
| 1171 | else | 1271 | else |
| 1172 | err = register_netdevice(dev); | 1272 | err = register_netdevice(dev); |
| 1173 | if (err < 0 && !IS_ERR(dev)) | 1273 | if (err < 0 && !IS_ERR(dev)) { |
| 1174 | free_netdev(dev); | 1274 | free_netdev(dev); |
| 1275 | goto out; | ||
| 1276 | } | ||
| 1175 | 1277 | ||
| 1278 | err = rtnl_configure_link(dev, ifm); | ||
| 1279 | if (err < 0) | ||
| 1280 | unregister_netdevice(dev); | ||
| 1281 | out: | ||
| 1176 | put_net(dest_net); | 1282 | put_net(dest_net); |
| 1177 | return err; | 1283 | return err; |
| 1178 | } | 1284 | } |
| @@ -1361,17 +1467,14 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi | |||
| 1361 | struct net_device *dev = ptr; | 1467 | struct net_device *dev = ptr; |
| 1362 | 1468 | ||
| 1363 | switch (event) { | 1469 | switch (event) { |
| 1364 | case NETDEV_UNREGISTER: | ||
| 1365 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); | ||
| 1366 | break; | ||
| 1367 | case NETDEV_UP: | 1470 | case NETDEV_UP: |
| 1368 | case NETDEV_DOWN: | 1471 | case NETDEV_DOWN: |
| 1369 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | 1472 | case NETDEV_PRE_UP: |
| 1370 | break; | ||
| 1371 | case NETDEV_POST_INIT: | 1473 | case NETDEV_POST_INIT: |
| 1372 | case NETDEV_REGISTER: | 1474 | case NETDEV_REGISTER: |
| 1373 | case NETDEV_CHANGE: | 1475 | case NETDEV_CHANGE: |
| 1374 | case NETDEV_GOING_DOWN: | 1476 | case NETDEV_GOING_DOWN: |
| 1477 | case NETDEV_UNREGISTER: | ||
| 1375 | case NETDEV_UNREGISTER_BATCH: | 1478 | case NETDEV_UNREGISTER_BATCH: |
| 1376 | break; | 1479 | break; |
| 1377 | default: | 1480 | default: |
| @@ -1386,7 +1489,7 @@ static struct notifier_block rtnetlink_dev_notifier = { | |||
| 1386 | }; | 1489 | }; |
| 1387 | 1490 | ||
| 1388 | 1491 | ||
| 1389 | static int rtnetlink_net_init(struct net *net) | 1492 | static int __net_init rtnetlink_net_init(struct net *net) |
| 1390 | { | 1493 | { |
| 1391 | struct sock *sk; | 1494 | struct sock *sk; |
| 1392 | sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, | 1495 | sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, |
| @@ -1397,7 +1500,7 @@ static int rtnetlink_net_init(struct net *net) | |||
| 1397 | return 0; | 1500 | return 0; |
| 1398 | } | 1501 | } |
| 1399 | 1502 | ||
| 1400 | static void rtnetlink_net_exit(struct net *net) | 1503 | static void __net_exit rtnetlink_net_exit(struct net *net) |
| 1401 | { | 1504 | { |
| 1402 | netlink_kernel_release(net->rtnl); | 1505 | netlink_kernel_release(net->rtnl); |
| 1403 | net->rtnl = NULL; | 1506 | net->rtnl = NULL; |
diff --git a/net/core/scm.c b/net/core/scm.c index b7ba91b074b3..9b264634acfd 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
| @@ -156,6 +156,8 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) | |||
| 156 | switch (cmsg->cmsg_type) | 156 | switch (cmsg->cmsg_type) |
| 157 | { | 157 | { |
| 158 | case SCM_RIGHTS: | 158 | case SCM_RIGHTS: |
| 159 | if (!sock->ops || sock->ops->family != PF_UNIX) | ||
| 160 | goto error; | ||
| 159 | err=scm_fp_copy(cmsg, &p->fp); | 161 | err=scm_fp_copy(cmsg, &p->fp); |
| 160 | if (err<0) | 162 | if (err<0) |
| 161 | goto error; | 163 | goto error; |
diff --git a/net/core/sock.c b/net/core/sock.c index e1f6f225f012..c5812bbc2cc9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) | |||
| 340 | rc = sk_backlog_rcv(sk, skb); | 340 | rc = sk_backlog_rcv(sk, skb); |
| 341 | 341 | ||
| 342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); | 342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); |
| 343 | } else | 343 | } else if (sk_add_backlog(sk, skb)) { |
| 344 | sk_add_backlog(sk, skb); | 344 | bh_unlock_sock(sk); |
| 345 | atomic_inc(&sk->sk_drops); | ||
| 346 | goto discard_and_relse; | ||
| 347 | } | ||
| 348 | |||
| 345 | bh_unlock_sock(sk); | 349 | bh_unlock_sock(sk); |
| 346 | out: | 350 | out: |
| 347 | sock_put(sk); | 351 | sock_put(sk); |
| @@ -741,7 +745,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
| 741 | struct timeval tm; | 745 | struct timeval tm; |
| 742 | } v; | 746 | } v; |
| 743 | 747 | ||
| 744 | unsigned int lv = sizeof(int); | 748 | int lv = sizeof(int); |
| 745 | int len; | 749 | int len; |
| 746 | 750 | ||
| 747 | if (get_user(len, optlen)) | 751 | if (get_user(len, optlen)) |
| @@ -1073,7 +1077,8 @@ static void __sk_free(struct sock *sk) | |||
| 1073 | if (sk->sk_destruct) | 1077 | if (sk->sk_destruct) |
| 1074 | sk->sk_destruct(sk); | 1078 | sk->sk_destruct(sk); |
| 1075 | 1079 | ||
| 1076 | filter = rcu_dereference(sk->sk_filter); | 1080 | filter = rcu_dereference_check(sk->sk_filter, |
| 1081 | atomic_read(&sk->sk_wmem_alloc) == 0); | ||
| 1077 | if (filter) { | 1082 | if (filter) { |
| 1078 | sk_filter_uncharge(sk, filter); | 1083 | sk_filter_uncharge(sk, filter); |
| 1079 | rcu_assign_pointer(sk->sk_filter, NULL); | 1084 | rcu_assign_pointer(sk->sk_filter, NULL); |
| @@ -1138,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
| 1138 | sock_lock_init(newsk); | 1143 | sock_lock_init(newsk); |
| 1139 | bh_lock_sock(newsk); | 1144 | bh_lock_sock(newsk); |
| 1140 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; | 1145 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; |
| 1146 | newsk->sk_backlog.len = 0; | ||
| 1141 | 1147 | ||
| 1142 | atomic_set(&newsk->sk_rmem_alloc, 0); | 1148 | atomic_set(&newsk->sk_rmem_alloc, 0); |
| 1143 | /* | 1149 | /* |
| @@ -1541,6 +1547,12 @@ static void __release_sock(struct sock *sk) | |||
| 1541 | 1547 | ||
| 1542 | bh_lock_sock(sk); | 1548 | bh_lock_sock(sk); |
| 1543 | } while ((skb = sk->sk_backlog.head) != NULL); | 1549 | } while ((skb = sk->sk_backlog.head) != NULL); |
| 1550 | |||
| 1551 | /* | ||
| 1552 | * Doing the zeroing here guarantee we can not loop forever | ||
| 1553 | * while a wild producer attempts to flood us. | ||
| 1554 | */ | ||
| 1555 | sk->sk_backlog.len = 0; | ||
| 1544 | } | 1556 | } |
| 1545 | 1557 | ||
| 1546 | /** | 1558 | /** |
| @@ -1873,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
| 1873 | sk->sk_allocation = GFP_KERNEL; | 1885 | sk->sk_allocation = GFP_KERNEL; |
| 1874 | sk->sk_rcvbuf = sysctl_rmem_default; | 1886 | sk->sk_rcvbuf = sysctl_rmem_default; |
| 1875 | sk->sk_sndbuf = sysctl_wmem_default; | 1887 | sk->sk_sndbuf = sysctl_wmem_default; |
| 1888 | sk->sk_backlog.limit = sk->sk_rcvbuf << 1; | ||
| 1876 | sk->sk_state = TCP_CLOSE; | 1889 | sk->sk_state = TCP_CLOSE; |
| 1877 | sk_set_socket(sk, sock); | 1890 | sk_set_socket(sk, sock); |
| 1878 | 1891 | ||
| @@ -2140,13 +2153,13 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot) | |||
| 2140 | } | 2153 | } |
| 2141 | EXPORT_SYMBOL_GPL(sock_prot_inuse_get); | 2154 | EXPORT_SYMBOL_GPL(sock_prot_inuse_get); |
| 2142 | 2155 | ||
| 2143 | static int sock_inuse_init_net(struct net *net) | 2156 | static int __net_init sock_inuse_init_net(struct net *net) |
| 2144 | { | 2157 | { |
| 2145 | net->core.inuse = alloc_percpu(struct prot_inuse); | 2158 | net->core.inuse = alloc_percpu(struct prot_inuse); |
| 2146 | return net->core.inuse ? 0 : -ENOMEM; | 2159 | return net->core.inuse ? 0 : -ENOMEM; |
| 2147 | } | 2160 | } |
| 2148 | 2161 | ||
| 2149 | static void sock_inuse_exit_net(struct net *net) | 2162 | static void __net_exit sock_inuse_exit_net(struct net *net) |
| 2150 | { | 2163 | { |
| 2151 | free_percpu(net->core.inuse); | 2164 | free_percpu(net->core.inuse); |
| 2152 | } | 2165 | } |
| @@ -2228,13 +2241,10 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
| 2228 | } | 2241 | } |
| 2229 | 2242 | ||
| 2230 | if (prot->rsk_prot != NULL) { | 2243 | if (prot->rsk_prot != NULL) { |
| 2231 | static const char mask[] = "request_sock_%s"; | 2244 | prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); |
| 2232 | |||
| 2233 | prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); | ||
| 2234 | if (prot->rsk_prot->slab_name == NULL) | 2245 | if (prot->rsk_prot->slab_name == NULL) |
| 2235 | goto out_free_sock_slab; | 2246 | goto out_free_sock_slab; |
| 2236 | 2247 | ||
| 2237 | sprintf(prot->rsk_prot->slab_name, mask, prot->name); | ||
| 2238 | prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, | 2248 | prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, |
| 2239 | prot->rsk_prot->obj_size, 0, | 2249 | prot->rsk_prot->obj_size, 0, |
| 2240 | SLAB_HWCACHE_ALIGN, NULL); | 2250 | SLAB_HWCACHE_ALIGN, NULL); |
| @@ -2247,14 +2257,11 @@ int proto_register(struct proto *prot, int alloc_slab) | |||
| 2247 | } | 2257 | } |
| 2248 | 2258 | ||
| 2249 | if (prot->twsk_prot != NULL) { | 2259 | if (prot->twsk_prot != NULL) { |
| 2250 | static const char mask[] = "tw_sock_%s"; | 2260 | prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); |
| 2251 | |||
| 2252 | prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); | ||
| 2253 | 2261 | ||
| 2254 | if (prot->twsk_prot->twsk_slab_name == NULL) | 2262 | if (prot->twsk_prot->twsk_slab_name == NULL) |
| 2255 | goto out_free_request_sock_slab; | 2263 | goto out_free_request_sock_slab; |
| 2256 | 2264 | ||
| 2257 | sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name); | ||
| 2258 | prot->twsk_prot->twsk_slab = | 2265 | prot->twsk_prot->twsk_slab = |
| 2259 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, | 2266 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, |
| 2260 | prot->twsk_prot->twsk_obj_size, | 2267 | prot->twsk_prot->twsk_obj_size, |
| @@ -2281,7 +2288,8 @@ out_free_request_sock_slab: | |||
| 2281 | prot->rsk_prot->slab = NULL; | 2288 | prot->rsk_prot->slab = NULL; |
| 2282 | } | 2289 | } |
| 2283 | out_free_request_sock_slab_name: | 2290 | out_free_request_sock_slab_name: |
| 2284 | kfree(prot->rsk_prot->slab_name); | 2291 | if (prot->rsk_prot) |
| 2292 | kfree(prot->rsk_prot->slab_name); | ||
| 2285 | out_free_sock_slab: | 2293 | out_free_sock_slab: |
| 2286 | kmem_cache_destroy(prot->slab); | 2294 | kmem_cache_destroy(prot->slab); |
| 2287 | prot->slab = NULL; | 2295 | prot->slab = NULL; |
