aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2011-07-27 03:54:47 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2011-07-27 03:54:47 -0400
commitaa7eb8e78d8ecd6cd0475d86ea8385ff9cb47ece (patch)
tree3f9e98fadd5124fb05e8f6f9b06aa23698d4f215 /net/core
parentcca8edfd2ec2a34d9f50f593bc753bb11e1bc1f5 (diff)
parent3c6b50141ef9f0a8844bf1357b80c0cdf518bf05 (diff)
Merge branch 'next' into for-linus
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c275
-rw-r--r--net/core/dev_addr_lists.c12
-rw-r--r--net/core/drop_monitor.c12
-rw-r--r--net/core/dst.c57
-rw-r--r--net/core/ethtool.c221
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/filter.c70
-rw-r--r--net/core/gen_estimator.c9
-rw-r--r--net/core/net-sysfs.c83
-rw-r--r--net/core/net_namespace.c97
-rw-r--r--net/core/netpoll.c35
-rw-r--r--net/core/pktgen.c222
-rw-r--r--net/core/rtnetlink.c25
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sysctl_net_core.c10
-rw-r--r--net/core/utils.c25
16 files changed, 626 insertions, 535 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 956d3b006e8b..9c58c1ec41a9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -948,7 +948,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
948} 948}
949EXPORT_SYMBOL(dev_alloc_name); 949EXPORT_SYMBOL(dev_alloc_name);
950 950
951static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) 951static int dev_get_valid_name(struct net_device *dev, const char *name)
952{ 952{
953 struct net *net; 953 struct net *net;
954 954
@@ -958,7 +958,7 @@ static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt
958 if (!dev_valid_name(name)) 958 if (!dev_valid_name(name))
959 return -EINVAL; 959 return -EINVAL;
960 960
961 if (fmt && strchr(name, '%')) 961 if (strchr(name, '%'))
962 return dev_alloc_name(dev, name); 962 return dev_alloc_name(dev, name);
963 else if (__dev_get_by_name(net, name)) 963 else if (__dev_get_by_name(net, name))
964 return -EEXIST; 964 return -EEXIST;
@@ -995,7 +995,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
995 995
996 memcpy(oldname, dev->name, IFNAMSIZ); 996 memcpy(oldname, dev->name, IFNAMSIZ);
997 997
998 err = dev_get_valid_name(dev, newname, 1); 998 err = dev_get_valid_name(dev, newname);
999 if (err < 0) 999 if (err < 0)
1000 return err; 1000 return err;
1001 1001
@@ -1007,7 +1007,7 @@ rollback:
1007 } 1007 }
1008 1008
1009 write_lock_bh(&dev_base_lock); 1009 write_lock_bh(&dev_base_lock);
1010 hlist_del(&dev->name_hlist); 1010 hlist_del_rcu(&dev->name_hlist);
1011 write_unlock_bh(&dev_base_lock); 1011 write_unlock_bh(&dev_base_lock);
1012 1012
1013 synchronize_rcu(); 1013 synchronize_rcu();
@@ -1284,11 +1284,13 @@ static int dev_close_many(struct list_head *head)
1284 */ 1284 */
1285int dev_close(struct net_device *dev) 1285int dev_close(struct net_device *dev)
1286{ 1286{
1287 LIST_HEAD(single); 1287 if (dev->flags & IFF_UP) {
1288 LIST_HEAD(single);
1288 1289
1289 list_add(&dev->unreg_list, &single); 1290 list_add(&dev->unreg_list, &single);
1290 dev_close_many(&single); 1291 dev_close_many(&single);
1291 list_del(&single); 1292 list_del(&single);
1293 }
1292 return 0; 1294 return 0;
1293} 1295}
1294EXPORT_SYMBOL(dev_close); 1296EXPORT_SYMBOL(dev_close);
@@ -1306,6 +1308,13 @@ void dev_disable_lro(struct net_device *dev)
1306{ 1308{
1307 u32 flags; 1309 u32 flags;
1308 1310
1311 /*
1312 * If we're trying to disable lro on a vlan device
1313 * use the underlying physical device instead
1314 */
1315 if (is_vlan_dev(dev))
1316 dev = vlan_dev_real_dev(dev);
1317
1309 if (dev->ethtool_ops && dev->ethtool_ops->get_flags) 1318 if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
1310 flags = dev->ethtool_ops->get_flags(dev); 1319 flags = dev->ethtool_ops->get_flags(dev);
1311 else 1320 else
@@ -1315,7 +1324,8 @@ void dev_disable_lro(struct net_device *dev)
1315 return; 1324 return;
1316 1325
1317 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO); 1326 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
1318 WARN_ON(dev->features & NETIF_F_LRO); 1327 if (unlikely(dev->features & NETIF_F_LRO))
1328 netdev_WARN(dev, "failed to disable LRO!\n");
1319} 1329}
1320EXPORT_SYMBOL(dev_disable_lro); 1330EXPORT_SYMBOL(dev_disable_lro);
1321 1331
@@ -2086,6 +2096,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2086{ 2096{
2087 const struct net_device_ops *ops = dev->netdev_ops; 2097 const struct net_device_ops *ops = dev->netdev_ops;
2088 int rc = NETDEV_TX_OK; 2098 int rc = NETDEV_TX_OK;
2099 unsigned int skb_len;
2089 2100
2090 if (likely(!skb->next)) { 2101 if (likely(!skb->next)) {
2091 u32 features; 2102 u32 features;
@@ -2136,8 +2147,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2136 } 2147 }
2137 } 2148 }
2138 2149
2150 skb_len = skb->len;
2139 rc = ops->ndo_start_xmit(skb, dev); 2151 rc = ops->ndo_start_xmit(skb, dev);
2140 trace_net_dev_xmit(skb, rc); 2152 trace_net_dev_xmit(skb, rc, dev, skb_len);
2141 if (rc == NETDEV_TX_OK) 2153 if (rc == NETDEV_TX_OK)
2142 txq_trans_update(txq); 2154 txq_trans_update(txq);
2143 return rc; 2155 return rc;
@@ -2157,8 +2169,9 @@ gso:
2157 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2169 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2158 skb_dst_drop(nskb); 2170 skb_dst_drop(nskb);
2159 2171
2172 skb_len = nskb->len;
2160 rc = ops->ndo_start_xmit(nskb, dev); 2173 rc = ops->ndo_start_xmit(nskb, dev);
2161 trace_net_dev_xmit(nskb, rc); 2174 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2162 if (unlikely(rc != NETDEV_TX_OK)) { 2175 if (unlikely(rc != NETDEV_TX_OK)) {
2163 if (rc & ~NETDEV_TX_MASK) 2176 if (rc & ~NETDEV_TX_MASK)
2164 goto out_kfree_gso_skb; 2177 goto out_kfree_gso_skb;
@@ -2502,8 +2515,8 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2502__u32 __skb_get_rxhash(struct sk_buff *skb) 2515__u32 __skb_get_rxhash(struct sk_buff *skb)
2503{ 2516{
2504 int nhoff, hash = 0, poff; 2517 int nhoff, hash = 0, poff;
2505 struct ipv6hdr *ip6; 2518 const struct ipv6hdr *ip6;
2506 struct iphdr *ip; 2519 const struct iphdr *ip;
2507 u8 ip_proto; 2520 u8 ip_proto;
2508 u32 addr1, addr2, ihl; 2521 u32 addr1, addr2, ihl;
2509 union { 2522 union {
@@ -2518,7 +2531,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2518 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) 2531 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2519 goto done; 2532 goto done;
2520 2533
2521 ip = (struct iphdr *) (skb->data + nhoff); 2534 ip = (const struct iphdr *) (skb->data + nhoff);
2522 if (ip->frag_off & htons(IP_MF | IP_OFFSET)) 2535 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2523 ip_proto = 0; 2536 ip_proto = 0;
2524 else 2537 else
@@ -2531,7 +2544,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2531 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) 2544 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2532 goto done; 2545 goto done;
2533 2546
2534 ip6 = (struct ipv6hdr *) (skb->data + nhoff); 2547 ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
2535 ip_proto = ip6->nexthdr; 2548 ip_proto = ip6->nexthdr;
2536 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2549 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2537 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2550 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -3076,25 +3089,6 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3076} 3089}
3077EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3090EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3078 3091
3079static void vlan_on_bond_hook(struct sk_buff *skb)
3080{
3081 /*
3082 * Make sure ARP frames received on VLAN interfaces stacked on
3083 * bonding interfaces still make their way to any base bonding
3084 * device that may have registered for a specific ptype.
3085 */
3086 if (skb->dev->priv_flags & IFF_802_1Q_VLAN &&
3087 vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING &&
3088 skb->protocol == htons(ETH_P_ARP)) {
3089 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
3090
3091 if (!skb2)
3092 return;
3093 skb2->dev = vlan_dev_real_dev(skb->dev);
3094 netif_rx(skb2);
3095 }
3096}
3097
3098static int __netif_receive_skb(struct sk_buff *skb) 3092static int __netif_receive_skb(struct sk_buff *skb)
3099{ 3093{
3100 struct packet_type *ptype, *pt_prev; 3094 struct packet_type *ptype, *pt_prev;
@@ -3120,7 +3114,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
3120 3114
3121 skb_reset_network_header(skb); 3115 skb_reset_network_header(skb);
3122 skb_reset_transport_header(skb); 3116 skb_reset_transport_header(skb);
3123 skb->mac_len = skb->network_header - skb->mac_header; 3117 skb_reset_mac_len(skb);
3124 3118
3125 pt_prev = NULL; 3119 pt_prev = NULL;
3126 3120
@@ -3130,6 +3124,12 @@ another_round:
3130 3124
3131 __this_cpu_inc(softnet_data.processed); 3125 __this_cpu_inc(softnet_data.processed);
3132 3126
3127 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3128 skb = vlan_untag(skb);
3129 if (unlikely(!skb))
3130 goto out;
3131 }
3132
3133#ifdef CONFIG_NET_CLS_ACT 3133#ifdef CONFIG_NET_CLS_ACT
3134 if (skb->tc_verd & TC_NCLS) { 3134 if (skb->tc_verd & TC_NCLS) {
3135 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3135 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -3177,15 +3177,13 @@ ncls:
3177 ret = deliver_skb(skb, pt_prev, orig_dev); 3177 ret = deliver_skb(skb, pt_prev, orig_dev);
3178 pt_prev = NULL; 3178 pt_prev = NULL;
3179 } 3179 }
3180 if (vlan_hwaccel_do_receive(&skb)) { 3180 if (vlan_do_receive(&skb)) {
3181 ret = __netif_receive_skb(skb); 3181 ret = __netif_receive_skb(skb);
3182 goto out; 3182 goto out;
3183 } else if (unlikely(!skb)) 3183 } else if (unlikely(!skb))
3184 goto out; 3184 goto out;
3185 } 3185 }
3186 3186
3187 vlan_on_bond_hook(skb);
3188
3189 /* deliver only exact match when indicated */ 3187 /* deliver only exact match when indicated */
3190 null_or_dev = deliver_exact ? skb->dev : NULL; 3188 null_or_dev = deliver_exact ? skb->dev : NULL;
3191 3189
@@ -4306,10 +4304,8 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
4306 4304
4307 slave->master = master; 4305 slave->master = master;
4308 4306
4309 if (old) { 4307 if (old)
4310 synchronize_net();
4311 dev_put(old); 4308 dev_put(old);
4312 }
4313 return 0; 4309 return 0;
4314} 4310}
4315EXPORT_SYMBOL(netdev_set_master); 4311EXPORT_SYMBOL(netdev_set_master);
@@ -4510,6 +4506,30 @@ void dev_set_rx_mode(struct net_device *dev)
4510} 4506}
4511 4507
4512/** 4508/**
4509 * dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
4510 * @dev: device
4511 * @cmd: memory area for ethtool_ops::get_settings() result
4512 *
4513 * The cmd arg is initialized properly (cleared and
4514 * ethtool_cmd::cmd field set to ETHTOOL_GSET).
4515 *
4516 * Return device's ethtool_ops::get_settings() result value or
4517 * -EOPNOTSUPP when device doesn't expose
4518 * ethtool_ops::get_settings() operation.
4519 */
4520int dev_ethtool_get_settings(struct net_device *dev,
4521 struct ethtool_cmd *cmd)
4522{
4523 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
4524 return -EOPNOTSUPP;
4525
4526 memset(cmd, 0, sizeof(struct ethtool_cmd));
4527 cmd->cmd = ETHTOOL_GSET;
4528 return dev->ethtool_ops->get_settings(dev, cmd);
4529}
4530EXPORT_SYMBOL(dev_ethtool_get_settings);
4531
4532/**
4513 * dev_get_flags - get flags reported to userspace 4533 * dev_get_flags - get flags reported to userspace
4514 * @dev: device 4534 * @dev: device
4515 * 4535 *
@@ -4773,7 +4793,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
4773 * is never reached 4793 * is never reached
4774 */ 4794 */
4775 WARN_ON(1); 4795 WARN_ON(1);
4776 err = -EINVAL; 4796 err = -ENOTTY;
4777 break; 4797 break;
4778 4798
4779 } 4799 }
@@ -5041,7 +5061,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5041 /* Set the per device memory buffer space. 5061 /* Set the per device memory buffer space.
5042 * Not applicable in our case */ 5062 * Not applicable in our case */
5043 case SIOCSIFLINK: 5063 case SIOCSIFLINK:
5044 return -EINVAL; 5064 return -ENOTTY;
5045 5065
5046 /* 5066 /*
5047 * Unknown or private ioctl. 5067 * Unknown or private ioctl.
@@ -5062,7 +5082,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5062 /* Take care of Wireless Extensions */ 5082 /* Take care of Wireless Extensions */
5063 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) 5083 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5064 return wext_handle_ioctl(net, &ifr, cmd, arg); 5084 return wext_handle_ioctl(net, &ifr, cmd, arg);
5065 return -EINVAL; 5085 return -ENOTTY;
5066 } 5086 }
5067} 5087}
5068 5088
@@ -5114,7 +5134,7 @@ static void rollback_registered_many(struct list_head *head)
5114 list_del(&dev->unreg_list); 5134 list_del(&dev->unreg_list);
5115 continue; 5135 continue;
5116 } 5136 }
5117 5137 dev->dismantle = true;
5118 BUG_ON(dev->reg_state != NETREG_REGISTERED); 5138 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5119 } 5139 }
5120 5140
@@ -5184,33 +5204,37 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5184 /* Fix illegal checksum combinations */ 5204 /* Fix illegal checksum combinations */
5185 if ((features & NETIF_F_HW_CSUM) && 5205 if ((features & NETIF_F_HW_CSUM) &&
5186 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5206 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5187 netdev_info(dev, "mixed HW and IP checksum settings.\n"); 5207 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5188 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 5208 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5189 } 5209 }
5190 5210
5191 if ((features & NETIF_F_NO_CSUM) && 5211 if ((features & NETIF_F_NO_CSUM) &&
5192 (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5212 (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5193 netdev_info(dev, "mixed no checksumming and other settings.\n"); 5213 netdev_warn(dev, "mixed no checksumming and other settings.\n");
5194 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); 5214 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5195 } 5215 }
5196 5216
5197 /* Fix illegal SG+CSUM combinations. */ 5217 /* Fix illegal SG+CSUM combinations. */
5198 if ((features & NETIF_F_SG) && 5218 if ((features & NETIF_F_SG) &&
5199 !(features & NETIF_F_ALL_CSUM)) { 5219 !(features & NETIF_F_ALL_CSUM)) {
5200 netdev_info(dev, 5220 netdev_dbg(dev,
5201 "Dropping NETIF_F_SG since no checksum feature.\n"); 5221 "Dropping NETIF_F_SG since no checksum feature.\n");
5202 features &= ~NETIF_F_SG; 5222 features &= ~NETIF_F_SG;
5203 } 5223 }
5204 5224
5205 /* TSO requires that SG is present as well. */ 5225 /* TSO requires that SG is present as well. */
5206 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { 5226 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5207 netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n"); 5227 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5208 features &= ~NETIF_F_TSO; 5228 features &= ~NETIF_F_ALL_TSO;
5209 } 5229 }
5210 5230
5231 /* TSO ECN requires that TSO is present as well. */
5232 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5233 features &= ~NETIF_F_TSO_ECN;
5234
5211 /* Software GSO depends on SG. */ 5235 /* Software GSO depends on SG. */
5212 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5236 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5213 netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5237 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5214 features &= ~NETIF_F_GSO; 5238 features &= ~NETIF_F_GSO;
5215 } 5239 }
5216 5240
@@ -5220,13 +5244,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5220 if (!((features & NETIF_F_GEN_CSUM) || 5244 if (!((features & NETIF_F_GEN_CSUM) ||
5221 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 5245 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5222 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { 5246 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5223 netdev_info(dev, 5247 netdev_dbg(dev,
5224 "Dropping NETIF_F_UFO since no checksum offload features.\n"); 5248 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5225 features &= ~NETIF_F_UFO; 5249 features &= ~NETIF_F_UFO;
5226 } 5250 }
5227 5251
5228 if (!(features & NETIF_F_SG)) { 5252 if (!(features & NETIF_F_SG)) {
5229 netdev_info(dev, 5253 netdev_dbg(dev,
5230 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); 5254 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5231 features &= ~NETIF_F_UFO; 5255 features &= ~NETIF_F_UFO;
5232 } 5256 }
@@ -5236,11 +5260,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5236} 5260}
5237EXPORT_SYMBOL(netdev_fix_features); 5261EXPORT_SYMBOL(netdev_fix_features);
5238 5262
5239void netdev_update_features(struct net_device *dev) 5263int __netdev_update_features(struct net_device *dev)
5240{ 5264{
5241 u32 features; 5265 u32 features;
5242 int err = 0; 5266 int err = 0;
5243 5267
5268 ASSERT_RTNL();
5269
5244 features = netdev_get_wanted_features(dev); 5270 features = netdev_get_wanted_features(dev);
5245 5271
5246 if (dev->netdev_ops->ndo_fix_features) 5272 if (dev->netdev_ops->ndo_fix_features)
@@ -5250,24 +5276,60 @@ void netdev_update_features(struct net_device *dev)
5250 features = netdev_fix_features(dev, features); 5276 features = netdev_fix_features(dev, features);
5251 5277
5252 if (dev->features == features) 5278 if (dev->features == features)
5253 return; 5279 return 0;
5254 5280
5255 netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n", 5281 netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
5256 dev->features, features); 5282 dev->features, features);
5257 5283
5258 if (dev->netdev_ops->ndo_set_features) 5284 if (dev->netdev_ops->ndo_set_features)
5259 err = dev->netdev_ops->ndo_set_features(dev, features); 5285 err = dev->netdev_ops->ndo_set_features(dev, features);
5260 5286
5261 if (!err) 5287 if (unlikely(err < 0)) {
5262 dev->features = features;
5263 else if (err < 0)
5264 netdev_err(dev, 5288 netdev_err(dev,
5265 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n", 5289 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
5266 err, features, dev->features); 5290 err, features, dev->features);
5291 return -1;
5292 }
5293
5294 if (!err)
5295 dev->features = features;
5296
5297 return 1;
5298}
5299
5300/**
5301 * netdev_update_features - recalculate device features
5302 * @dev: the device to check
5303 *
5304 * Recalculate dev->features set and send notifications if it
5305 * has changed. Should be called after driver or hardware dependent
5306 * conditions might have changed that influence the features.
5307 */
5308void netdev_update_features(struct net_device *dev)
5309{
5310 if (__netdev_update_features(dev))
5311 netdev_features_change(dev);
5267} 5312}
5268EXPORT_SYMBOL(netdev_update_features); 5313EXPORT_SYMBOL(netdev_update_features);
5269 5314
5270/** 5315/**
5316 * netdev_change_features - recalculate device features
5317 * @dev: the device to check
5318 *
5319 * Recalculate dev->features set and send notifications even
5320 * if they have not changed. Should be called instead of
5321 * netdev_update_features() if also dev->vlan_features might
5322 * have changed to allow the changes to be propagated to stacked
5323 * VLAN devices.
5324 */
5325void netdev_change_features(struct net_device *dev)
5326{
5327 __netdev_update_features(dev);
5328 netdev_features_change(dev);
5329}
5330EXPORT_SYMBOL(netdev_change_features);
5331
5332/**
5271 * netif_stacked_transfer_operstate - transfer operstate 5333 * netif_stacked_transfer_operstate - transfer operstate
5272 * @rootdev: the root or lower level device to transfer state from 5334 * @rootdev: the root or lower level device to transfer state from
5273 * @dev: the device to transfer operstate to 5335 * @dev: the device to transfer operstate to
@@ -5383,6 +5445,10 @@ int register_netdevice(struct net_device *dev)
5383 5445
5384 dev->iflink = -1; 5446 dev->iflink = -1;
5385 5447
5448 ret = dev_get_valid_name(dev, dev->name);
5449 if (ret < 0)
5450 goto out;
5451
5386 /* Init, if this function is available */ 5452 /* Init, if this function is available */
5387 if (dev->netdev_ops->ndo_init) { 5453 if (dev->netdev_ops->ndo_init) {
5388 ret = dev->netdev_ops->ndo_init(dev); 5454 ret = dev->netdev_ops->ndo_init(dev);
@@ -5393,10 +5459,6 @@ int register_netdevice(struct net_device *dev)
5393 } 5459 }
5394 } 5460 }
5395 5461
5396 ret = dev_get_valid_name(dev, dev->name, 0);
5397 if (ret)
5398 goto err_uninit;
5399
5400 dev->ifindex = dev_new_index(net); 5462 dev->ifindex = dev_new_index(net);
5401 if (dev->iflink == -1) 5463 if (dev->iflink == -1)
5402 dev->iflink = dev->ifindex; 5464 dev->iflink = dev->ifindex;
@@ -5408,10 +5470,12 @@ int register_netdevice(struct net_device *dev)
5408 dev->features |= NETIF_F_SOFT_FEATURES; 5470 dev->features |= NETIF_F_SOFT_FEATURES;
5409 dev->wanted_features = dev->features & dev->hw_features; 5471 dev->wanted_features = dev->features & dev->hw_features;
5410 5472
5411 /* Avoid warning from netdev_fix_features() for GSO without SG */ 5473 /* Turn on no cache copy if HW is doing checksum */
5412 if (!(dev->wanted_features & NETIF_F_SG)) { 5474 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5413 dev->wanted_features &= ~NETIF_F_GSO; 5475 if ((dev->features & NETIF_F_ALL_CSUM) &&
5414 dev->features &= ~NETIF_F_GSO; 5476 !(dev->features & NETIF_F_NO_CSUM)) {
5477 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5478 dev->features |= NETIF_F_NOCACHE_COPY;
5415 } 5479 }
5416 5480
5417 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, 5481 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
@@ -5430,7 +5494,7 @@ int register_netdevice(struct net_device *dev)
5430 goto err_uninit; 5494 goto err_uninit;
5431 dev->reg_state = NETREG_REGISTERED; 5495 dev->reg_state = NETREG_REGISTERED;
5432 5496
5433 netdev_update_features(dev); 5497 __netdev_update_features(dev);
5434 5498
5435 /* 5499 /*
5436 * Default initial state at registry is that the 5500 * Default initial state at registry is that the
@@ -5527,19 +5591,7 @@ int register_netdev(struct net_device *dev)
5527 int err; 5591 int err;
5528 5592
5529 rtnl_lock(); 5593 rtnl_lock();
5530
5531 /*
5532 * If the name is a format string the caller wants us to do a
5533 * name allocation.
5534 */
5535 if (strchr(dev->name, '%')) {
5536 err = dev_alloc_name(dev, dev->name);
5537 if (err < 0)
5538 goto out;
5539 }
5540
5541 err = register_netdevice(dev); 5594 err = register_netdevice(dev);
5542out:
5543 rtnl_unlock(); 5595 rtnl_unlock();
5544 return err; 5596 return err;
5545} 5597}
@@ -5912,7 +5964,10 @@ EXPORT_SYMBOL(free_netdev);
5912void synchronize_net(void) 5964void synchronize_net(void)
5913{ 5965{
5914 might_sleep(); 5966 might_sleep();
5915 synchronize_rcu(); 5967 if (rtnl_is_locked())
5968 synchronize_rcu_expedited();
5969 else
5970 synchronize_rcu();
5916} 5971}
5917EXPORT_SYMBOL(synchronize_net); 5972EXPORT_SYMBOL(synchronize_net);
5918 5973
@@ -6021,7 +6076,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6021 /* We get here if we can't use the current device name */ 6076 /* We get here if we can't use the current device name */
6022 if (!pat) 6077 if (!pat)
6023 goto out; 6078 goto out;
6024 if (dev_get_valid_name(dev, pat, 1)) 6079 if (dev_get_valid_name(dev, pat) < 0)
6025 goto out; 6080 goto out;
6026 } 6081 }
6027 6082
@@ -6123,6 +6178,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
6123 oldsd->output_queue = NULL; 6178 oldsd->output_queue = NULL;
6124 oldsd->output_queue_tailp = &oldsd->output_queue; 6179 oldsd->output_queue_tailp = &oldsd->output_queue;
6125 } 6180 }
6181 /* Append NAPI poll list from offline CPU. */
6182 if (!list_empty(&oldsd->poll_list)) {
6183 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6184 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6185 }
6126 6186
6127 raise_softirq_irqoff(NET_TX_SOFTIRQ); 6187 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6128 local_irq_enable(); 6188 local_irq_enable();
@@ -6153,29 +6213,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
6153 */ 6213 */
6154u32 netdev_increment_features(u32 all, u32 one, u32 mask) 6214u32 netdev_increment_features(u32 all, u32 one, u32 mask)
6155{ 6215{
6156 /* If device needs checksumming, downgrade to it. */ 6216 if (mask & NETIF_F_GEN_CSUM)
6157 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) 6217 mask |= NETIF_F_ALL_CSUM;
6158 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); 6218 mask |= NETIF_F_VLAN_CHALLENGED;
6159 else if (mask & NETIF_F_ALL_CSUM) {
6160 /* If one device supports v4/v6 checksumming, set for all. */
6161 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
6162 !(all & NETIF_F_GEN_CSUM)) {
6163 all &= ~NETIF_F_ALL_CSUM;
6164 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
6165 }
6166 6219
6167 /* If one device supports hw checksumming, set for all. */ 6220 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6168 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) { 6221 all &= one | ~NETIF_F_ALL_FOR_ALL;
6169 all &= ~NETIF_F_ALL_CSUM;
6170 all |= NETIF_F_HW_CSUM;
6171 }
6172 }
6173 6222
6174 one |= NETIF_F_ALL_CSUM; 6223 /* If device needs checksumming, downgrade to it. */
6224 if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
6225 all &= ~NETIF_F_NO_CSUM;
6175 6226
6176 one |= all & NETIF_F_ONE_FOR_ALL; 6227 /* If one device supports hw checksumming, set for all. */
6177 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; 6228 if (all & NETIF_F_GEN_CSUM)
6178 all |= one & mask & NETIF_F_ONE_FOR_ALL; 6229 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6179 6230
6180 return all; 6231 return all;
6181} 6232}
@@ -6218,29 +6269,23 @@ err_name:
6218/** 6269/**
6219 * netdev_drivername - network driver for the device 6270 * netdev_drivername - network driver for the device
6220 * @dev: network device 6271 * @dev: network device
6221 * @buffer: buffer for resulting name
6222 * @len: size of buffer
6223 * 6272 *
6224 * Determine network driver for device. 6273 * Determine network driver for device.
6225 */ 6274 */
6226char *netdev_drivername(const struct net_device *dev, char *buffer, int len) 6275const char *netdev_drivername(const struct net_device *dev)
6227{ 6276{
6228 const struct device_driver *driver; 6277 const struct device_driver *driver;
6229 const struct device *parent; 6278 const struct device *parent;
6230 6279 const char *empty = "";
6231 if (len <= 0 || !buffer)
6232 return buffer;
6233 buffer[0] = 0;
6234 6280
6235 parent = dev->dev.parent; 6281 parent = dev->dev.parent;
6236
6237 if (!parent) 6282 if (!parent)
6238 return buffer; 6283 return empty;
6239 6284
6240 driver = parent->driver; 6285 driver = parent->driver;
6241 if (driver && driver->name) 6286 if (driver && driver->name)
6242 strlcpy(buffer, driver->name, len); 6287 return driver->name;
6243 return buffer; 6288 return empty;
6244} 6289}
6245 6290
6246static int __netdev_printk(const char *level, const struct net_device *dev, 6291static int __netdev_printk(const char *level, const struct net_device *dev,
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 7b39f3ed2fda..e2e66939ed00 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -68,14 +68,6 @@ static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
68 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); 68 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false);
69} 69}
70 70
71static void ha_rcu_free(struct rcu_head *head)
72{
73 struct netdev_hw_addr *ha;
74
75 ha = container_of(head, struct netdev_hw_addr, rcu_head);
76 kfree(ha);
77}
78
79static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, 71static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
80 unsigned char *addr, int addr_len, 72 unsigned char *addr, int addr_len,
81 unsigned char addr_type, bool global) 73 unsigned char addr_type, bool global)
@@ -94,7 +86,7 @@ static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
94 if (--ha->refcount) 86 if (--ha->refcount)
95 return 0; 87 return 0;
96 list_del_rcu(&ha->list); 88 list_del_rcu(&ha->list);
97 call_rcu(&ha->rcu_head, ha_rcu_free); 89 kfree_rcu(ha, rcu_head);
98 list->count--; 90 list->count--;
99 return 0; 91 return 0;
100 } 92 }
@@ -197,7 +189,7 @@ void __hw_addr_flush(struct netdev_hw_addr_list *list)
197 189
198 list_for_each_entry_safe(ha, tmp, &list->list, list) { 190 list_for_each_entry_safe(ha, tmp, &list->list, list) {
199 list_del_rcu(&ha->list); 191 list_del_rcu(&ha->list);
200 call_rcu(&ha->rcu_head, ha_rcu_free); 192 kfree_rcu(ha, rcu_head);
201 } 193 }
202 list->count = 0; 194 list->count = 0;
203} 195}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 706502ff64aa..7f36b38e060f 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -207,14 +207,6 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi)
207 rcu_read_unlock(); 207 rcu_read_unlock();
208} 208}
209 209
210
211static void free_dm_hw_stat(struct rcu_head *head)
212{
213 struct dm_hw_stat_delta *n;
214 n = container_of(head, struct dm_hw_stat_delta, rcu);
215 kfree(n);
216}
217
218static int set_all_monitor_traces(int state) 210static int set_all_monitor_traces(int state)
219{ 211{
220 int rc = 0; 212 int rc = 0;
@@ -245,7 +237,7 @@ static int set_all_monitor_traces(int state)
245 list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) { 237 list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) {
246 if (new_stat->dev == NULL) { 238 if (new_stat->dev == NULL) {
247 list_del_rcu(&new_stat->list); 239 list_del_rcu(&new_stat->list);
248 call_rcu(&new_stat->rcu, free_dm_hw_stat); 240 kfree_rcu(new_stat, rcu);
249 } 241 }
250 } 242 }
251 break; 243 break;
@@ -314,7 +306,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
314 new_stat->dev = NULL; 306 new_stat->dev = NULL;
315 if (trace_state == TRACE_OFF) { 307 if (trace_state == TRACE_OFF) {
316 list_del_rcu(&new_stat->list); 308 list_del_rcu(&new_stat->list);
317 call_rcu(&new_stat->rcu, free_dm_hw_stat); 309 kfree_rcu(new_stat, rcu);
318 break; 310 break;
319 } 311 }
320 } 312 }
diff --git a/net/core/dst.c b/net/core/dst.c
index 91104d35de7d..9ccca038444f 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -19,6 +19,7 @@
19#include <linux/types.h> 19#include <linux/types.h>
20#include <net/net_namespace.h> 20#include <net/net_namespace.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/prefetch.h>
22 23
23#include <net/dst.h> 24#include <net/dst.h>
24 25
@@ -33,9 +34,6 @@
33 * 3) This list is guarded by a mutex, 34 * 3) This list is guarded by a mutex,
34 * so that the gc_task and dst_dev_event() can be synchronized. 35 * so that the gc_task and dst_dev_event() can be synchronized.
35 */ 36 */
36#if RT_CACHE_DEBUG >= 2
37static atomic_t dst_total = ATOMIC_INIT(0);
38#endif
39 37
40/* 38/*
41 * We want to keep lock & list close together 39 * We want to keep lock & list close together
@@ -69,10 +67,6 @@ static void dst_gc_task(struct work_struct *work)
69 unsigned long expires = ~0L; 67 unsigned long expires = ~0L;
70 struct dst_entry *dst, *next, head; 68 struct dst_entry *dst, *next, head;
71 struct dst_entry *last = &head; 69 struct dst_entry *last = &head;
72#if RT_CACHE_DEBUG >= 2
73 ktime_t time_start = ktime_get();
74 struct timespec elapsed;
75#endif
76 70
77 mutex_lock(&dst_gc_mutex); 71 mutex_lock(&dst_gc_mutex);
78 next = dst_busy_list; 72 next = dst_busy_list;
@@ -146,15 +140,6 @@ loop:
146 140
147 spin_unlock_bh(&dst_garbage.lock); 141 spin_unlock_bh(&dst_garbage.lock);
148 mutex_unlock(&dst_gc_mutex); 142 mutex_unlock(&dst_gc_mutex);
149#if RT_CACHE_DEBUG >= 2
150 elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
151 printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
152 " expires: %lu elapsed: %lu us\n",
153 atomic_read(&dst_total), delayed, work_performed,
154 expires,
155 elapsed.tv_sec * USEC_PER_SEC +
156 elapsed.tv_nsec / NSEC_PER_USEC);
157#endif
158} 143}
159 144
160int dst_discard(struct sk_buff *skb) 145int dst_discard(struct sk_buff *skb)
@@ -166,7 +151,8 @@ EXPORT_SYMBOL(dst_discard);
166 151
167const u32 dst_default_metrics[RTAX_MAX]; 152const u32 dst_default_metrics[RTAX_MAX];
168 153
169void *dst_alloc(struct dst_ops *ops, int initial_ref) 154void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
155 int initial_ref, int initial_obsolete, int flags)
170{ 156{
171 struct dst_entry *dst; 157 struct dst_entry *dst;
172 158
@@ -174,18 +160,36 @@ void *dst_alloc(struct dst_ops *ops, int initial_ref)
174 if (ops->gc(ops)) 160 if (ops->gc(ops))
175 return NULL; 161 return NULL;
176 } 162 }
177 dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); 163 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
178 if (!dst) 164 if (!dst)
179 return NULL; 165 return NULL;
180 atomic_set(&dst->__refcnt, initial_ref); 166 dst->child = NULL;
167 dst->dev = dev;
168 if (dev)
169 dev_hold(dev);
181 dst->ops = ops; 170 dst->ops = ops;
182 dst->lastuse = jiffies;
183 dst->path = dst;
184 dst->input = dst->output = dst_discard;
185 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
186#if RT_CACHE_DEBUG >= 2 172 dst->expires = 0UL;
187 atomic_inc(&dst_total); 173 dst->path = dst;
174 dst->neighbour = NULL;
175 dst->hh = NULL;
176#ifdef CONFIG_XFRM
177 dst->xfrm = NULL;
178#endif
179 dst->input = dst_discard;
180 dst->output = dst_discard;
181 dst->error = 0;
182 dst->obsolete = initial_obsolete;
183 dst->header_len = 0;
184 dst->trailer_len = 0;
185#ifdef CONFIG_IP_ROUTE_CLASSID
186 dst->tclassid = 0;
188#endif 187#endif
188 atomic_set(&dst->__refcnt, initial_ref);
189 dst->__use = 0;
190 dst->lastuse = jiffies;
191 dst->flags = flags;
192 dst->next = NULL;
189 dst_entries_add(ops, 1); 193 dst_entries_add(ops, 1);
190 return dst; 194 return dst;
191} 195}
@@ -245,9 +249,6 @@ again:
245 dst->ops->destroy(dst); 249 dst->ops->destroy(dst);
246 if (dst->dev) 250 if (dst->dev)
247 dev_put(dst->dev); 251 dev_put(dst->dev);
248#if RT_CACHE_DEBUG >= 2
249 atomic_dec(&dst_total);
250#endif
251 kmem_cache_free(dst->ops->kmem_cachep, dst); 252 kmem_cache_free(dst->ops->kmem_cachep, dst);
252 253
253 dst = child; 254 dst = child;
@@ -314,7 +315,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
314{ 315{
315 unsigned long prev, new; 316 unsigned long prev, new;
316 317
317 new = (unsigned long) dst_default_metrics; 318 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
318 prev = cmpxchg(&dst->_metrics, old, new); 319 prev = cmpxchg(&dst->_metrics, old, new);
319 if (prev == old) 320 if (prev == old)
320 kfree(__DST_METRICS_PTR(old)); 321 kfree(__DST_METRICS_PTR(old));
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 74ead9eca126..fd14116ad7f0 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -21,6 +21,8 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/rtnetlink.h>
25#include <linux/sched.h>
24 26
25/* 27/*
26 * Some useful ethtool_ops methods that're device independent. 28 * Some useful ethtool_ops methods that're device independent.
@@ -231,6 +233,29 @@ static int ethtool_set_feature_compat(struct net_device *dev,
231 return 1; 233 return 1;
232} 234}
233 235
236static int ethtool_set_flags_compat(struct net_device *dev,
237 int (*legacy_set)(struct net_device *, u32),
238 struct ethtool_set_features_block *features, u32 mask)
239{
240 u32 value;
241
242 if (!legacy_set)
243 return 0;
244
245 if (!(features[0].valid & mask))
246 return 0;
247
248 value = dev->features & ~features[0].valid;
249 value |= features[0].requested;
250
251 features[0].valid &= ~mask;
252
253 if (legacy_set(dev, value & mask) < 0)
254 netdev_info(dev, "Legacy flags change failed\n");
255
256 return 1;
257}
258
234static int ethtool_set_features_compat(struct net_device *dev, 259static int ethtool_set_features_compat(struct net_device *dev,
235 struct ethtool_set_features_block *features) 260 struct ethtool_set_features_block *features)
236{ 261{
@@ -247,7 +272,7 @@ static int ethtool_set_features_compat(struct net_device *dev,
247 features, NETIF_F_ALL_TSO); 272 features, NETIF_F_ALL_TSO);
248 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum, 273 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
249 features, NETIF_F_RXCSUM); 274 features, NETIF_F_RXCSUM);
250 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags, 275 compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
251 features, flags_dup_features); 276 features, flags_dup_features);
252 277
253 return compat; 278 return compat;
@@ -317,7 +342,7 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
317 342
318 dev->wanted_features &= ~features[0].valid; 343 dev->wanted_features &= ~features[0].valid;
319 dev->wanted_features |= features[0].valid & features[0].requested; 344 dev->wanted_features |= features[0].valid & features[0].requested;
320 netdev_update_features(dev); 345 __netdev_update_features(dev);
321 346
322 if ((dev->wanted_features ^ dev->features) & features[0].valid) 347 if ((dev->wanted_features ^ dev->features) & features[0].valid)
323 ret |= ETHTOOL_F_WISH; 348 ret |= ETHTOOL_F_WISH;
@@ -330,7 +355,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
330 /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4", 355 /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4",
331 /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded", 356 /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded",
332 /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic", 357 /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic",
333 /* NETIF_F_IPV6_CSUM */ "tx_checksum-ipv6", 358 /* NETIF_F_IPV6_CSUM */ "tx-checksum-ipv6",
334 /* NETIF_F_HIGHDMA */ "highdma", 359 /* NETIF_F_HIGHDMA */ "highdma",
335 /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist", 360 /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist",
336 /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert", 361 /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert",
@@ -359,8 +384,8 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
359 /* NETIF_F_NTUPLE */ "rx-ntuple-filter", 384 /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
360 /* NETIF_F_RXHASH */ "rx-hashing", 385 /* NETIF_F_RXHASH */ "rx-hashing",
361 /* NETIF_F_RXCSUM */ "rx-checksum", 386 /* NETIF_F_RXCSUM */ "rx-checksum",
362 "", 387 /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy",
363 "", 388 /* NETIF_F_LOOPBACK */ "loopback",
364}; 389};
365 390
366static int __ethtool_get_sset_count(struct net_device *dev, int sset) 391static int __ethtool_get_sset_count(struct net_device *dev, int sset)
@@ -499,7 +524,7 @@ static int ethtool_set_one_feature(struct net_device *dev,
499 else 524 else
500 dev->wanted_features &= ~mask; 525 dev->wanted_features &= ~mask;
501 526
502 netdev_update_features(dev); 527 __netdev_update_features(dev);
503 return 0; 528 return 0;
504 } 529 }
505 530
@@ -544,14 +569,14 @@ int __ethtool_set_flags(struct net_device *dev, u32 data)
544 } 569 }
545 570
546 /* allow changing only bits set in hw_features */ 571 /* allow changing only bits set in hw_features */
547 changed = (data ^ dev->wanted_features) & flags_dup_features; 572 changed = (data ^ dev->features) & flags_dup_features;
548 if (changed & ~dev->hw_features) 573 if (changed & ~dev->hw_features)
549 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; 574 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
550 575
551 dev->wanted_features = 576 dev->wanted_features =
552 (dev->wanted_features & ~changed) | data; 577 (dev->wanted_features & ~changed) | (data & dev->hw_features);
553 578
554 netdev_update_features(dev); 579 __netdev_update_features(dev);
555 580
556 return 0; 581 return 0;
557} 582}
@@ -908,6 +933,9 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
908 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL; 933 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
909 int ret; 934 int ret;
910 935
936 if (!ops->set_rx_ntuple)
937 return -EOPNOTSUPP;
938
911 if (!(dev->features & NETIF_F_NTUPLE)) 939 if (!(dev->features & NETIF_F_NTUPLE))
912 return -EINVAL; 940 return -EINVAL;
913 941
@@ -1441,6 +1469,35 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
1441 return dev->ethtool_ops->set_ringparam(dev, &ringparam); 1469 return dev->ethtool_ops->set_ringparam(dev, &ringparam);
1442} 1470}
1443 1471
1472static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1473 void __user *useraddr)
1474{
1475 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
1476
1477 if (!dev->ethtool_ops->get_channels)
1478 return -EOPNOTSUPP;
1479
1480 dev->ethtool_ops->get_channels(dev, &channels);
1481
1482 if (copy_to_user(useraddr, &channels, sizeof(channels)))
1483 return -EFAULT;
1484 return 0;
1485}
1486
1487static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1488 void __user *useraddr)
1489{
1490 struct ethtool_channels channels;
1491
1492 if (!dev->ethtool_ops->set_channels)
1493 return -EOPNOTSUPP;
1494
1495 if (copy_from_user(&channels, useraddr, sizeof(channels)))
1496 return -EFAULT;
1497
1498 return dev->ethtool_ops->set_channels(dev, &channels);
1499}
1500
1444static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) 1501static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
1445{ 1502{
1446 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; 1503 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
@@ -1618,14 +1675,60 @@ out:
1618static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) 1675static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1619{ 1676{
1620 struct ethtool_value id; 1677 struct ethtool_value id;
1678 static bool busy;
1679 int rc;
1621 1680
1622 if (!dev->ethtool_ops->phys_id) 1681 if (!dev->ethtool_ops->set_phys_id)
1623 return -EOPNOTSUPP; 1682 return -EOPNOTSUPP;
1624 1683
1684 if (busy)
1685 return -EBUSY;
1686
1625 if (copy_from_user(&id, useraddr, sizeof(id))) 1687 if (copy_from_user(&id, useraddr, sizeof(id)))
1626 return -EFAULT; 1688 return -EFAULT;
1627 1689
1628 return dev->ethtool_ops->phys_id(dev, id.data); 1690 rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
1691 if (rc < 0)
1692 return rc;
1693
1694 /* Drop the RTNL lock while waiting, but prevent reentry or
1695 * removal of the device.
1696 */
1697 busy = true;
1698 dev_hold(dev);
1699 rtnl_unlock();
1700
1701 if (rc == 0) {
1702 /* Driver will handle this itself */
1703 schedule_timeout_interruptible(
1704 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT);
1705 } else {
1706 /* Driver expects to be called at twice the frequency in rc */
1707 int n = rc * 2, i, interval = HZ / n;
1708
1709 /* Count down seconds */
1710 do {
1711 /* Count down iterations per second */
1712 i = n;
1713 do {
1714 rtnl_lock();
1715 rc = dev->ethtool_ops->set_phys_id(dev,
1716 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
1717 rtnl_unlock();
1718 if (rc)
1719 break;
1720 schedule_timeout_interruptible(interval);
1721 } while (!signal_pending(current) && --i != 0);
1722 } while (!signal_pending(current) &&
1723 (id.data == 0 || --id.data != 0));
1724 }
1725
1726 rtnl_lock();
1727 dev_put(dev);
1728 busy = false;
1729
1730 (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
1731 return rc;
1629} 1732}
1630 1733
1631static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) 1734static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
@@ -1743,6 +1846,87 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1743 return dev->ethtool_ops->flash_device(dev, &efl); 1846 return dev->ethtool_ops->flash_device(dev, &efl);
1744} 1847}
1745 1848
1849static int ethtool_set_dump(struct net_device *dev,
1850 void __user *useraddr)
1851{
1852 struct ethtool_dump dump;
1853
1854 if (!dev->ethtool_ops->set_dump)
1855 return -EOPNOTSUPP;
1856
1857 if (copy_from_user(&dump, useraddr, sizeof(dump)))
1858 return -EFAULT;
1859
1860 return dev->ethtool_ops->set_dump(dev, &dump);
1861}
1862
1863static int ethtool_get_dump_flag(struct net_device *dev,
1864 void __user *useraddr)
1865{
1866 int ret;
1867 struct ethtool_dump dump;
1868 const struct ethtool_ops *ops = dev->ethtool_ops;
1869
1870 if (!dev->ethtool_ops->get_dump_flag)
1871 return -EOPNOTSUPP;
1872
1873 if (copy_from_user(&dump, useraddr, sizeof(dump)))
1874 return -EFAULT;
1875
1876 ret = ops->get_dump_flag(dev, &dump);
1877 if (ret)
1878 return ret;
1879
1880 if (copy_to_user(useraddr, &dump, sizeof(dump)))
1881 return -EFAULT;
1882 return 0;
1883}
1884
1885static int ethtool_get_dump_data(struct net_device *dev,
1886 void __user *useraddr)
1887{
1888 int ret;
1889 __u32 len;
1890 struct ethtool_dump dump, tmp;
1891 const struct ethtool_ops *ops = dev->ethtool_ops;
1892 void *data = NULL;
1893
1894 if (!dev->ethtool_ops->get_dump_data ||
1895 !dev->ethtool_ops->get_dump_flag)
1896 return -EOPNOTSUPP;
1897
1898 if (copy_from_user(&dump, useraddr, sizeof(dump)))
1899 return -EFAULT;
1900
1901 memset(&tmp, 0, sizeof(tmp));
1902 tmp.cmd = ETHTOOL_GET_DUMP_FLAG;
1903 ret = ops->get_dump_flag(dev, &tmp);
1904 if (ret)
1905 return ret;
1906
1907 len = (tmp.len > dump.len) ? dump.len : tmp.len;
1908 if (!len)
1909 return -EFAULT;
1910
1911 data = vzalloc(tmp.len);
1912 if (!data)
1913 return -ENOMEM;
1914 ret = ops->get_dump_data(dev, &dump, data);
1915 if (ret)
1916 goto out;
1917
1918 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
1919 ret = -EFAULT;
1920 goto out;
1921 }
1922 useraddr += offsetof(struct ethtool_dump, data);
1923 if (copy_to_user(useraddr, data, len))
1924 ret = -EFAULT;
1925out:
1926 vfree(data);
1927 return ret;
1928}
1929
1746/* The main entry point in this file. Called from net/core/dev.c */ 1930/* The main entry point in this file. Called from net/core/dev.c */
1747 1931
1748int dev_ethtool(struct net *net, struct ifreq *ifr) 1932int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1953,6 +2137,21 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1953 case ETHTOOL_SGRO: 2137 case ETHTOOL_SGRO:
1954 rc = ethtool_set_one_feature(dev, useraddr, ethcmd); 2138 rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
1955 break; 2139 break;
2140 case ETHTOOL_GCHANNELS:
2141 rc = ethtool_get_channels(dev, useraddr);
2142 break;
2143 case ETHTOOL_SCHANNELS:
2144 rc = ethtool_set_channels(dev, useraddr);
2145 break;
2146 case ETHTOOL_SET_DUMP:
2147 rc = ethtool_set_dump(dev, useraddr);
2148 break;
2149 case ETHTOOL_GET_DUMP_FLAG:
2150 rc = ethtool_get_dump_flag(dev, useraddr);
2151 break;
2152 case ETHTOOL_GET_DUMP_DATA:
2153 rc = ethtool_get_dump_data(dev, useraddr);
2154 break;
1956 default: 2155 default:
1957 rc = -EOPNOTSUPP; 2156 rc = -EOPNOTSUPP;
1958 } 2157 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 8248ebb5891d..008dc70b064b 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -590,7 +590,8 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
590 int idx = 0; 590 int idx = 0;
591 struct fib_rule *rule; 591 struct fib_rule *rule;
592 592
593 list_for_each_entry(rule, &ops->rules_list, list) { 593 rcu_read_lock();
594 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
594 if (idx < cb->args[1]) 595 if (idx < cb->args[1])
595 goto skip; 596 goto skip;
596 597
@@ -601,6 +602,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
601skip: 602skip:
602 idx++; 603 idx++;
603 } 604 }
605 rcu_read_unlock();
604 cb->args[1] = idx; 606 cb->args[1] = idx;
605 rules_ops_put(ops); 607 rules_ops_put(ops);
606 608
diff --git a/net/core/filter.c b/net/core/filter.c
index afb8afb066bb..36f975fa87cb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -38,65 +38,7 @@
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <linux/filter.h> 39#include <linux/filter.h>
40#include <linux/reciprocal_div.h> 40#include <linux/reciprocal_div.h>
41 41#include <linux/ratelimit.h>
42enum {
43 BPF_S_RET_K = 1,
44 BPF_S_RET_A,
45 BPF_S_ALU_ADD_K,
46 BPF_S_ALU_ADD_X,
47 BPF_S_ALU_SUB_K,
48 BPF_S_ALU_SUB_X,
49 BPF_S_ALU_MUL_K,
50 BPF_S_ALU_MUL_X,
51 BPF_S_ALU_DIV_X,
52 BPF_S_ALU_AND_K,
53 BPF_S_ALU_AND_X,
54 BPF_S_ALU_OR_K,
55 BPF_S_ALU_OR_X,
56 BPF_S_ALU_LSH_K,
57 BPF_S_ALU_LSH_X,
58 BPF_S_ALU_RSH_K,
59 BPF_S_ALU_RSH_X,
60 BPF_S_ALU_NEG,
61 BPF_S_LD_W_ABS,
62 BPF_S_LD_H_ABS,
63 BPF_S_LD_B_ABS,
64 BPF_S_LD_W_LEN,
65 BPF_S_LD_W_IND,
66 BPF_S_LD_H_IND,
67 BPF_S_LD_B_IND,
68 BPF_S_LD_IMM,
69 BPF_S_LDX_W_LEN,
70 BPF_S_LDX_B_MSH,
71 BPF_S_LDX_IMM,
72 BPF_S_MISC_TAX,
73 BPF_S_MISC_TXA,
74 BPF_S_ALU_DIV_K,
75 BPF_S_LD_MEM,
76 BPF_S_LDX_MEM,
77 BPF_S_ST,
78 BPF_S_STX,
79 BPF_S_JMP_JA,
80 BPF_S_JMP_JEQ_K,
81 BPF_S_JMP_JEQ_X,
82 BPF_S_JMP_JGE_K,
83 BPF_S_JMP_JGE_X,
84 BPF_S_JMP_JGT_K,
85 BPF_S_JMP_JGT_X,
86 BPF_S_JMP_JSET_K,
87 BPF_S_JMP_JSET_X,
88 /* Ancillary data */
89 BPF_S_ANC_PROTOCOL,
90 BPF_S_ANC_PKTTYPE,
91 BPF_S_ANC_IFINDEX,
92 BPF_S_ANC_NLATTR,
93 BPF_S_ANC_NLATTR_NEST,
94 BPF_S_ANC_MARK,
95 BPF_S_ANC_QUEUE,
96 BPF_S_ANC_HATYPE,
97 BPF_S_ANC_RXHASH,
98 BPF_S_ANC_CPU,
99};
100 42
101/* No hurry in this branch */ 43/* No hurry in this branch */
102static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) 44static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
@@ -145,7 +87,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
145 rcu_read_lock(); 87 rcu_read_lock();
146 filter = rcu_dereference(sk->sk_filter); 88 filter = rcu_dereference(sk->sk_filter);
147 if (filter) { 89 if (filter) {
148 unsigned int pkt_len = sk_run_filter(skb, filter->insns); 90 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
149 91
150 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 92 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
151 } 93 }
@@ -409,7 +351,9 @@ load_b:
409 continue; 351 continue;
410 } 352 }
411 default: 353 default:
412 WARN_ON(1); 354 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
355 fentry->code, fentry->jt,
356 fentry->jf, fentry->k);
413 return 0; 357 return 0;
414 } 358 }
415 } 359 }
@@ -638,6 +582,7 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
638{ 582{
639 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 583 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
640 584
585 bpf_jit_free(fp);
641 kfree(fp); 586 kfree(fp);
642} 587}
643EXPORT_SYMBOL(sk_filter_release_rcu); 588EXPORT_SYMBOL(sk_filter_release_rcu);
@@ -672,6 +617,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
672 617
673 atomic_set(&fp->refcnt, 1); 618 atomic_set(&fp->refcnt, 1);
674 fp->len = fprog->len; 619 fp->len = fprog->len;
620 fp->bpf_func = sk_run_filter;
675 621
676 err = sk_chk_filter(fp->insns, fp->len); 622 err = sk_chk_filter(fp->insns, fp->len);
677 if (err) { 623 if (err) {
@@ -679,6 +625,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
679 return err; 625 return err;
680 } 626 }
681 627
628 bpf_jit_compile(fp);
629
682 old_fp = rcu_dereference_protected(sk->sk_filter, 630 old_fp = rcu_dereference_protected(sk->sk_filter,
683 sock_owned_by_user(sk)); 631 sock_owned_by_user(sk));
684 rcu_assign_pointer(sk->sk_filter, fp); 632 rcu_assign_pointer(sk->sk_filter, fp);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 7c2373321b74..43b03dd71e85 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -249,13 +249,6 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
249} 249}
250EXPORT_SYMBOL(gen_new_estimator); 250EXPORT_SYMBOL(gen_new_estimator);
251 251
252static void __gen_kill_estimator(struct rcu_head *head)
253{
254 struct gen_estimator *e = container_of(head,
255 struct gen_estimator, e_rcu);
256 kfree(e);
257}
258
259/** 252/**
260 * gen_kill_estimator - remove a rate estimator 253 * gen_kill_estimator - remove a rate estimator
261 * @bstats: basic statistics 254 * @bstats: basic statistics
@@ -279,7 +272,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
279 write_unlock(&est_lock); 272 write_unlock(&est_lock);
280 273
281 list_del_rcu(&e->list); 274 list_del_rcu(&e->list);
282 call_rcu(&e->e_rcu, __gen_kill_estimator); 275 kfree_rcu(e, e_rcu);
283 } 276 }
284 spin_unlock_bh(&est_tree_lock); 277 spin_unlock_bh(&est_tree_lock);
285} 278}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 5ceb257e860c..33d2a1fba131 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -28,6 +28,7 @@
28static const char fmt_hex[] = "%#x\n"; 28static const char fmt_hex[] = "%#x\n";
29static const char fmt_long_hex[] = "%#lx\n"; 29static const char fmt_long_hex[] = "%#lx\n";
30static const char fmt_dec[] = "%d\n"; 30static const char fmt_dec[] = "%d\n";
31static const char fmt_udec[] = "%u\n";
31static const char fmt_ulong[] = "%lu\n"; 32static const char fmt_ulong[] = "%lu\n";
32static const char fmt_u64[] = "%llu\n"; 33static const char fmt_u64[] = "%llu\n";
33 34
@@ -145,13 +146,10 @@ static ssize_t show_speed(struct device *dev,
145 if (!rtnl_trylock()) 146 if (!rtnl_trylock())
146 return restart_syscall(); 147 return restart_syscall();
147 148
148 if (netif_running(netdev) && 149 if (netif_running(netdev)) {
149 netdev->ethtool_ops && 150 struct ethtool_cmd cmd;
150 netdev->ethtool_ops->get_settings) { 151 if (!dev_ethtool_get_settings(netdev, &cmd))
151 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 152 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
152
153 if (!netdev->ethtool_ops->get_settings(netdev, &cmd))
154 ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
155 } 153 }
156 rtnl_unlock(); 154 rtnl_unlock();
157 return ret; 155 return ret;
@@ -166,13 +164,11 @@ static ssize_t show_duplex(struct device *dev,
166 if (!rtnl_trylock()) 164 if (!rtnl_trylock())
167 return restart_syscall(); 165 return restart_syscall();
168 166
169 if (netif_running(netdev) && 167 if (netif_running(netdev)) {
170 netdev->ethtool_ops && 168 struct ethtool_cmd cmd;
171 netdev->ethtool_ops->get_settings) { 169 if (!dev_ethtool_get_settings(netdev, &cmd))
172 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 170 ret = sprintf(buf, "%s\n",
173 171 cmd.duplex ? "full" : "half");
174 if (!netdev->ethtool_ops->get_settings(netdev, &cmd))
175 ret = sprintf(buf, "%s\n", cmd.duplex ? "full" : "half");
176 } 172 }
177 rtnl_unlock(); 173 rtnl_unlock();
178 return ret; 174 return ret;
@@ -565,13 +561,6 @@ static ssize_t show_rps_map(struct netdev_rx_queue *queue,
565 return len; 561 return len;
566} 562}
567 563
568static void rps_map_release(struct rcu_head *rcu)
569{
570 struct rps_map *map = container_of(rcu, struct rps_map, rcu);
571
572 kfree(map);
573}
574
575static ssize_t store_rps_map(struct netdev_rx_queue *queue, 564static ssize_t store_rps_map(struct netdev_rx_queue *queue,
576 struct rx_queue_attribute *attribute, 565 struct rx_queue_attribute *attribute,
577 const char *buf, size_t len) 566 const char *buf, size_t len)
@@ -619,7 +608,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
619 spin_unlock(&rps_map_lock); 608 spin_unlock(&rps_map_lock);
620 609
621 if (old_map) 610 if (old_map)
622 call_rcu(&old_map->rcu, rps_map_release); 611 kfree_rcu(old_map, rcu);
623 612
624 free_cpumask_var(mask); 613 free_cpumask_var(mask);
625 return len; 614 return len;
@@ -728,7 +717,7 @@ static void rx_queue_release(struct kobject *kobj)
728 map = rcu_dereference_raw(queue->rps_map); 717 map = rcu_dereference_raw(queue->rps_map);
729 if (map) { 718 if (map) {
730 RCU_INIT_POINTER(queue->rps_map, NULL); 719 RCU_INIT_POINTER(queue->rps_map, NULL);
731 call_rcu(&map->rcu, rps_map_release); 720 kfree_rcu(map, rcu);
732 } 721 }
733 722
734 flow_table = rcu_dereference_raw(queue->rps_flow_table); 723 flow_table = rcu_dereference_raw(queue->rps_flow_table);
@@ -898,21 +887,6 @@ static ssize_t show_xps_map(struct netdev_queue *queue,
898 return len; 887 return len;
899} 888}
900 889
901static void xps_map_release(struct rcu_head *rcu)
902{
903 struct xps_map *map = container_of(rcu, struct xps_map, rcu);
904
905 kfree(map);
906}
907
908static void xps_dev_maps_release(struct rcu_head *rcu)
909{
910 struct xps_dev_maps *dev_maps =
911 container_of(rcu, struct xps_dev_maps, rcu);
912
913 kfree(dev_maps);
914}
915
916static DEFINE_MUTEX(xps_map_mutex); 890static DEFINE_MUTEX(xps_map_mutex);
917#define xmap_dereference(P) \ 891#define xmap_dereference(P) \
918 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 892 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
@@ -968,7 +942,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
968 } else 942 } else
969 pos = map_len = alloc_len = 0; 943 pos = map_len = alloc_len = 0;
970 944
971 need_set = cpu_isset(cpu, *mask) && cpu_online(cpu); 945 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
972#ifdef CONFIG_NUMA 946#ifdef CONFIG_NUMA
973 if (need_set) { 947 if (need_set) {
974 if (numa_node == -2) 948 if (numa_node == -2)
@@ -1009,7 +983,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
1009 map = dev_maps ? 983 map = dev_maps ?
1010 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL; 984 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1011 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map) 985 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1012 call_rcu(&map->rcu, xps_map_release); 986 kfree_rcu(map, rcu);
1013 if (new_dev_maps->cpu_map[cpu]) 987 if (new_dev_maps->cpu_map[cpu])
1014 nonempty = 1; 988 nonempty = 1;
1015 } 989 }
@@ -1022,7 +996,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
1022 } 996 }
1023 997
1024 if (dev_maps) 998 if (dev_maps)
1025 call_rcu(&dev_maps->rcu, xps_dev_maps_release); 999 kfree_rcu(dev_maps, rcu);
1026 1000
1027 netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : 1001 netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node :
1028 NUMA_NO_NODE); 1002 NUMA_NO_NODE);
@@ -1084,7 +1058,7 @@ static void netdev_queue_release(struct kobject *kobj)
1084 else { 1058 else {
1085 RCU_INIT_POINTER(dev_maps->cpu_map[i], 1059 RCU_INIT_POINTER(dev_maps->cpu_map[i],
1086 NULL); 1060 NULL);
1087 call_rcu(&map->rcu, xps_map_release); 1061 kfree_rcu(map, rcu);
1088 map = NULL; 1062 map = NULL;
1089 } 1063 }
1090 } 1064 }
@@ -1094,7 +1068,7 @@ static void netdev_queue_release(struct kobject *kobj)
1094 1068
1095 if (!nonempty) { 1069 if (!nonempty) {
1096 RCU_INIT_POINTER(dev->xps_maps, NULL); 1070 RCU_INIT_POINTER(dev->xps_maps, NULL);
1097 call_rcu(&dev_maps->rcu, xps_dev_maps_release); 1071 kfree_rcu(dev_maps, rcu);
1098 } 1072 }
1099 } 1073 }
1100 1074
@@ -1205,9 +1179,14 @@ static void remove_queue_kobjects(struct net_device *net)
1205#endif 1179#endif
1206} 1180}
1207 1181
1208static const void *net_current_ns(void) 1182static void *net_grab_current_ns(void)
1209{ 1183{
1210 return current->nsproxy->net_ns; 1184 struct net *ns = current->nsproxy->net_ns;
1185#ifdef CONFIG_NET_NS
1186 if (ns)
1187 atomic_inc(&ns->passive);
1188#endif
1189 return ns;
1211} 1190}
1212 1191
1213static const void *net_initial_ns(void) 1192static const void *net_initial_ns(void)
@@ -1222,22 +1201,13 @@ static const void *net_netlink_ns(struct sock *sk)
1222 1201
1223struct kobj_ns_type_operations net_ns_type_operations = { 1202struct kobj_ns_type_operations net_ns_type_operations = {
1224 .type = KOBJ_NS_TYPE_NET, 1203 .type = KOBJ_NS_TYPE_NET,
1225 .current_ns = net_current_ns, 1204 .grab_current_ns = net_grab_current_ns,
1226 .netlink_ns = net_netlink_ns, 1205 .netlink_ns = net_netlink_ns,
1227 .initial_ns = net_initial_ns, 1206 .initial_ns = net_initial_ns,
1207 .drop_ns = net_drop_ns,
1228}; 1208};
1229EXPORT_SYMBOL_GPL(net_ns_type_operations); 1209EXPORT_SYMBOL_GPL(net_ns_type_operations);
1230 1210
1231static void net_kobj_ns_exit(struct net *net)
1232{
1233 kobj_ns_exit(KOBJ_NS_TYPE_NET, net);
1234}
1235
1236static struct pernet_operations kobj_net_ops = {
1237 .exit = net_kobj_ns_exit,
1238};
1239
1240
1241#ifdef CONFIG_HOTPLUG 1211#ifdef CONFIG_HOTPLUG
1242static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 1212static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1243{ 1213{
@@ -1365,6 +1335,5 @@ EXPORT_SYMBOL(netdev_class_remove_file);
1365int netdev_kobject_init(void) 1335int netdev_kobject_init(void)
1366{ 1336{
1367 kobj_ns_type_register(&net_ns_type_operations); 1337 kobj_ns_type_register(&net_ns_type_operations);
1368 register_pernet_subsys(&kobj_net_ops);
1369 return class_register(&net_class); 1338 return class_register(&net_class);
1370} 1339}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 3f860261c5ee..ea489db1bc23 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -8,6 +8,8 @@
8#include <linux/idr.h> 8#include <linux/idr.h>
9#include <linux/rculist.h> 9#include <linux/rculist.h>
10#include <linux/nsproxy.h> 10#include <linux/nsproxy.h>
11#include <linux/proc_fs.h>
12#include <linux/file.h>
11#include <net/net_namespace.h> 13#include <net/net_namespace.h>
12#include <net/netns/generic.h> 14#include <net/netns/generic.h>
13 15
@@ -27,14 +29,6 @@ EXPORT_SYMBOL(init_net);
27 29
28#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 30#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
29 31
30static void net_generic_release(struct rcu_head *rcu)
31{
32 struct net_generic *ng;
33
34 ng = container_of(rcu, struct net_generic, rcu);
35 kfree(ng);
36}
37
38static int net_assign_generic(struct net *net, int id, void *data) 32static int net_assign_generic(struct net *net, int id, void *data)
39{ 33{
40 struct net_generic *ng, *old_ng; 34 struct net_generic *ng, *old_ng;
@@ -68,7 +62,7 @@ static int net_assign_generic(struct net *net, int id, void *data)
68 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 62 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
69 63
70 rcu_assign_pointer(net->gen, ng); 64 rcu_assign_pointer(net->gen, ng);
71 call_rcu(&old_ng->rcu, net_generic_release); 65 kfree_rcu(old_ng, rcu);
72assign: 66assign:
73 ng->ptr[id - 1] = data; 67 ng->ptr[id - 1] = data;
74 return 0; 68 return 0;
@@ -134,6 +128,7 @@ static __net_init int setup_net(struct net *net)
134 LIST_HEAD(net_exit_list); 128 LIST_HEAD(net_exit_list);
135 129
136 atomic_set(&net->count, 1); 130 atomic_set(&net->count, 1);
131 atomic_set(&net->passive, 1);
137 132
138#ifdef NETNS_REFCNT_DEBUG 133#ifdef NETNS_REFCNT_DEBUG
139 atomic_set(&net->use_count, 0); 134 atomic_set(&net->use_count, 0);
@@ -216,11 +211,21 @@ static void net_free(struct net *net)
216 kmem_cache_free(net_cachep, net); 211 kmem_cache_free(net_cachep, net);
217} 212}
218 213
219static struct net *net_create(void) 214void net_drop_ns(void *p)
215{
216 struct net *ns = p;
217 if (ns && atomic_dec_and_test(&ns->passive))
218 net_free(ns);
219}
220
221struct net *copy_net_ns(unsigned long flags, struct net *old_net)
220{ 222{
221 struct net *net; 223 struct net *net;
222 int rv; 224 int rv;
223 225
226 if (!(flags & CLONE_NEWNET))
227 return get_net(old_net);
228
224 net = net_alloc(); 229 net = net_alloc();
225 if (!net) 230 if (!net)
226 return ERR_PTR(-ENOMEM); 231 return ERR_PTR(-ENOMEM);
@@ -233,19 +238,12 @@ static struct net *net_create(void)
233 } 238 }
234 mutex_unlock(&net_mutex); 239 mutex_unlock(&net_mutex);
235 if (rv < 0) { 240 if (rv < 0) {
236 net_free(net); 241 net_drop_ns(net);
237 return ERR_PTR(rv); 242 return ERR_PTR(rv);
238 } 243 }
239 return net; 244 return net;
240} 245}
241 246
242struct net *copy_net_ns(unsigned long flags, struct net *old_net)
243{
244 if (!(flags & CLONE_NEWNET))
245 return get_net(old_net);
246 return net_create();
247}
248
249static DEFINE_SPINLOCK(cleanup_list_lock); 247static DEFINE_SPINLOCK(cleanup_list_lock);
250static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 248static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
251 249
@@ -296,7 +294,7 @@ static void cleanup_net(struct work_struct *work)
296 /* Finally it is safe to free my network namespace structure */ 294 /* Finally it is safe to free my network namespace structure */
297 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 295 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
298 list_del_init(&net->exit_list); 296 list_del_init(&net->exit_list);
299 net_free(net); 297 net_drop_ns(net);
300 } 298 }
301} 299}
302static DECLARE_WORK(net_cleanup_work, cleanup_net); 300static DECLARE_WORK(net_cleanup_work, cleanup_net);
@@ -314,6 +312,26 @@ void __put_net(struct net *net)
314} 312}
315EXPORT_SYMBOL_GPL(__put_net); 313EXPORT_SYMBOL_GPL(__put_net);
316 314
315struct net *get_net_ns_by_fd(int fd)
316{
317 struct proc_inode *ei;
318 struct file *file;
319 struct net *net;
320
321 file = proc_ns_fget(fd);
322 if (IS_ERR(file))
323 return ERR_CAST(file);
324
325 ei = PROC_I(file->f_dentry->d_inode);
326 if (ei->ns_ops == &netns_operations)
327 net = get_net(ei->ns);
328 else
329 net = ERR_PTR(-EINVAL);
330
331 fput(file);
332 return net;
333}
334
317#else 335#else
318struct net *copy_net_ns(unsigned long flags, struct net *old_net) 336struct net *copy_net_ns(unsigned long flags, struct net *old_net)
319{ 337{
@@ -321,6 +339,11 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
321 return ERR_PTR(-EINVAL); 339 return ERR_PTR(-EINVAL);
322 return old_net; 340 return old_net;
323} 341}
342
343struct net *get_net_ns_by_fd(int fd)
344{
345 return ERR_PTR(-EINVAL);
346}
324#endif 347#endif
325 348
326struct net *get_net_ns_by_pid(pid_t pid) 349struct net *get_net_ns_by_pid(pid_t pid)
@@ -573,3 +596,39 @@ void unregister_pernet_device(struct pernet_operations *ops)
573 mutex_unlock(&net_mutex); 596 mutex_unlock(&net_mutex);
574} 597}
575EXPORT_SYMBOL_GPL(unregister_pernet_device); 598EXPORT_SYMBOL_GPL(unregister_pernet_device);
599
600#ifdef CONFIG_NET_NS
601static void *netns_get(struct task_struct *task)
602{
603 struct net *net = NULL;
604 struct nsproxy *nsproxy;
605
606 rcu_read_lock();
607 nsproxy = task_nsproxy(task);
608 if (nsproxy)
609 net = get_net(nsproxy->net_ns);
610 rcu_read_unlock();
611
612 return net;
613}
614
615static void netns_put(void *ns)
616{
617 put_net(ns);
618}
619
620static int netns_install(struct nsproxy *nsproxy, void *ns)
621{
622 put_net(nsproxy->net_ns);
623 nsproxy->net_ns = get_net(ns);
624 return 0;
625}
626
627const struct proc_ns_operations netns_operations = {
628 .name = "net",
629 .type = CLONE_NEWNET,
630 .get = netns_get,
631 .put = netns_put,
632 .install = netns_install,
633};
634#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 06be2431753e..18d9cbda3a39 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -539,7 +539,7 @@ int __netpoll_rx(struct sk_buff *skb)
539{ 539{
540 int proto, len, ulen; 540 int proto, len, ulen;
541 int hits = 0; 541 int hits = 0;
542 struct iphdr *iph; 542 const struct iphdr *iph;
543 struct udphdr *uh; 543 struct udphdr *uh;
544 struct netpoll_info *npinfo = skb->dev->npinfo; 544 struct netpoll_info *npinfo = skb->dev->npinfo;
545 struct netpoll *np, *tmp; 545 struct netpoll *np, *tmp;
@@ -698,32 +698,8 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
698 698
699 if (*cur != 0) { 699 if (*cur != 0) {
700 /* MAC address */ 700 /* MAC address */
701 if ((delim = strchr(cur, ':')) == NULL) 701 if (!mac_pton(cur, np->remote_mac))
702 goto parse_failed;
703 *delim = 0;
704 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
705 cur = delim + 1;
706 if ((delim = strchr(cur, ':')) == NULL)
707 goto parse_failed;
708 *delim = 0;
709 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
710 cur = delim + 1;
711 if ((delim = strchr(cur, ':')) == NULL)
712 goto parse_failed;
713 *delim = 0;
714 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
715 cur = delim + 1;
716 if ((delim = strchr(cur, ':')) == NULL)
717 goto parse_failed; 702 goto parse_failed;
718 *delim = 0;
719 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
720 cur = delim + 1;
721 if ((delim = strchr(cur, ':')) == NULL)
722 goto parse_failed;
723 *delim = 0;
724 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
725 cur = delim + 1;
726 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
727 } 703 }
728 704
729 netpoll_print_options(np); 705 netpoll_print_options(np);
@@ -816,6 +792,13 @@ int netpoll_setup(struct netpoll *np)
816 return -ENODEV; 792 return -ENODEV;
817 } 793 }
818 794
795 if (ndev->master) {
796 printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
797 np->name, np->dev_name);
798 err = -EBUSY;
799 goto put;
800 }
801
819 if (!netif_running(ndev)) { 802 if (!netif_running(ndev)) {
820 unsigned long atmost, atleast; 803 unsigned long atmost, atleast;
821 804
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index aeeece72b72f..f76079cd750c 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -156,6 +156,7 @@
156#include <linux/wait.h> 156#include <linux/wait.h>
157#include <linux/etherdevice.h> 157#include <linux/etherdevice.h>
158#include <linux/kthread.h> 158#include <linux/kthread.h>
159#include <linux/prefetch.h>
159#include <net/net_namespace.h> 160#include <net/net_namespace.h>
160#include <net/checksum.h> 161#include <net/checksum.h>
161#include <net/ipv6.h> 162#include <net/ipv6.h>
@@ -449,7 +450,6 @@ static void pktgen_stop(struct pktgen_thread *t);
449static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 450static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
450 451
451static unsigned int scan_ip6(const char *s, char ip[16]); 452static unsigned int scan_ip6(const char *s, char ip[16]);
452static unsigned int fmt_ip6(char *s, const char ip[16]);
453 453
454/* Module parameters, defaults. */ 454/* Module parameters, defaults. */
455static int pg_count_d __read_mostly = 1000; 455static int pg_count_d __read_mostly = 1000;
@@ -556,21 +556,13 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
556 pkt_dev->skb_priority); 556 pkt_dev->skb_priority);
557 557
558 if (pkt_dev->flags & F_IPV6) { 558 if (pkt_dev->flags & F_IPV6) {
559 char b1[128], b2[128], b3[128];
560 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
561 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr);
562 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr);
563 seq_printf(seq, 559 seq_printf(seq,
564 " saddr: %s min_saddr: %s max_saddr: %s\n", b1, 560 " saddr: %pI6c min_saddr: %pI6c max_saddr: %pI6c\n"
565 b2, b3); 561 " daddr: %pI6c min_daddr: %pI6c max_daddr: %pI6c\n",
566 562 &pkt_dev->in6_saddr,
567 fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); 563 &pkt_dev->min_in6_saddr, &pkt_dev->max_in6_saddr,
568 fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); 564 &pkt_dev->in6_daddr,
569 fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); 565 &pkt_dev->min_in6_daddr, &pkt_dev->max_in6_daddr);
570 seq_printf(seq,
571 " daddr: %s min_daddr: %s max_daddr: %s\n", b1,
572 b2, b3);
573
574 } else { 566 } else {
575 seq_printf(seq, 567 seq_printf(seq,
576 " dst_min: %s dst_max: %s\n", 568 " dst_min: %s dst_max: %s\n",
@@ -706,10 +698,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
706 pkt_dev->cur_src_mac_offset); 698 pkt_dev->cur_src_mac_offset);
707 699
708 if (pkt_dev->flags & F_IPV6) { 700 if (pkt_dev->flags & F_IPV6) {
709 char b1[128], b2[128]; 701 seq_printf(seq, " cur_saddr: %pI6c cur_daddr: %pI6c\n",
710 fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); 702 &pkt_dev->cur_in6_saddr,
711 fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); 703 &pkt_dev->cur_in6_daddr);
712 seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1);
713 } else 704 } else
714 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", 705 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n",
715 pkt_dev->cur_saddr, pkt_dev->cur_daddr); 706 pkt_dev->cur_saddr, pkt_dev->cur_daddr);
@@ -1309,7 +1300,7 @@ static ssize_t pktgen_if_write(struct file *file,
1309 buf[len] = 0; 1300 buf[len] = 0;
1310 1301
1311 scan_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1302 scan_ip6(buf, pkt_dev->in6_daddr.s6_addr);
1312 fmt_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1303 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
1313 1304
1314 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr); 1305 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr);
1315 1306
@@ -1332,7 +1323,7 @@ static ssize_t pktgen_if_write(struct file *file,
1332 buf[len] = 0; 1323 buf[len] = 0;
1333 1324
1334 scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1325 scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
1335 fmt_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1326 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
1336 1327
1337 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, 1328 ipv6_addr_copy(&pkt_dev->cur_in6_daddr,
1338 &pkt_dev->min_in6_daddr); 1329 &pkt_dev->min_in6_daddr);
@@ -1355,7 +1346,7 @@ static ssize_t pktgen_if_write(struct file *file,
1355 buf[len] = 0; 1346 buf[len] = 0;
1356 1347
1357 scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1348 scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr);
1358 fmt_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1349 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
1359 1350
1360 if (debug) 1351 if (debug)
1361 printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf); 1352 printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf);
@@ -1376,7 +1367,7 @@ static ssize_t pktgen_if_write(struct file *file,
1376 buf[len] = 0; 1367 buf[len] = 0;
1377 1368
1378 scan_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1369 scan_ip6(buf, pkt_dev->in6_saddr.s6_addr);
1379 fmt_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1370 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
1380 1371
1381 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr); 1372 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr);
1382 1373
@@ -1430,11 +1421,6 @@ static ssize_t pktgen_if_write(struct file *file,
1430 return count; 1421 return count;
1431 } 1422 }
1432 if (!strcmp(name, "dst_mac")) { 1423 if (!strcmp(name, "dst_mac")) {
1433 char *v = valstr;
1434 unsigned char old_dmac[ETH_ALEN];
1435 unsigned char *m = pkt_dev->dst_mac;
1436 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
1437
1438 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1424 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1439 if (len < 0) 1425 if (len < 0)
1440 return len; 1426 return len;
@@ -1442,35 +1428,16 @@ static ssize_t pktgen_if_write(struct file *file,
1442 memset(valstr, 0, sizeof(valstr)); 1428 memset(valstr, 0, sizeof(valstr));
1443 if (copy_from_user(valstr, &user_buffer[i], len)) 1429 if (copy_from_user(valstr, &user_buffer[i], len))
1444 return -EFAULT; 1430 return -EFAULT;
1445 i += len;
1446
1447 for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) {
1448 int value;
1449
1450 value = hex_to_bin(*v);
1451 if (value >= 0)
1452 *m = *m * 16 + value;
1453
1454 if (*v == ':') {
1455 m++;
1456 *m = 0;
1457 }
1458 }
1459 1431
1432 if (!mac_pton(valstr, pkt_dev->dst_mac))
1433 return -EINVAL;
1460 /* Set up Dest MAC */ 1434 /* Set up Dest MAC */
1461 if (compare_ether_addr(old_dmac, pkt_dev->dst_mac)) 1435 memcpy(&pkt_dev->hh[0], pkt_dev->dst_mac, ETH_ALEN);
1462 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
1463 1436
1464 sprintf(pg_result, "OK: dstmac"); 1437 sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac);
1465 return count; 1438 return count;
1466 } 1439 }
1467 if (!strcmp(name, "src_mac")) { 1440 if (!strcmp(name, "src_mac")) {
1468 char *v = valstr;
1469 unsigned char old_smac[ETH_ALEN];
1470 unsigned char *m = pkt_dev->src_mac;
1471
1472 memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN);
1473
1474 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1441 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1475 if (len < 0) 1442 if (len < 0)
1476 return len; 1443 return len;
@@ -1478,26 +1445,13 @@ static ssize_t pktgen_if_write(struct file *file,
1478 memset(valstr, 0, sizeof(valstr)); 1445 memset(valstr, 0, sizeof(valstr));
1479 if (copy_from_user(valstr, &user_buffer[i], len)) 1446 if (copy_from_user(valstr, &user_buffer[i], len))
1480 return -EFAULT; 1447 return -EFAULT;
1481 i += len;
1482
1483 for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) {
1484 int value;
1485
1486 value = hex_to_bin(*v);
1487 if (value >= 0)
1488 *m = *m * 16 + value;
1489
1490 if (*v == ':') {
1491 m++;
1492 *m = 0;
1493 }
1494 }
1495 1448
1449 if (!mac_pton(valstr, pkt_dev->src_mac))
1450 return -EINVAL;
1496 /* Set up Src MAC */ 1451 /* Set up Src MAC */
1497 if (compare_ether_addr(old_smac, pkt_dev->src_mac)) 1452 memcpy(&pkt_dev->hh[6], pkt_dev->src_mac, ETH_ALEN);
1498 memcpy(&(pkt_dev->hh[6]), pkt_dev->src_mac, ETH_ALEN);
1499 1453
1500 sprintf(pg_result, "OK: srcmac"); 1454 sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac);
1501 return count; 1455 return count;
1502 } 1456 }
1503 1457
@@ -2514,7 +2468,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2514{ 2468{
2515 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2469 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2516 int err = 0; 2470 int err = 0;
2517 struct iphdr *iph;
2518 2471
2519 if (!x) 2472 if (!x)
2520 return 0; 2473 return 0;
@@ -2524,7 +2477,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2524 return 0; 2477 return 0;
2525 2478
2526 spin_lock(&x->lock); 2479 spin_lock(&x->lock);
2527 iph = ip_hdr(skb);
2528 2480
2529 err = x->outer_mode->output(x, skb); 2481 err = x->outer_mode->output(x, skb);
2530 if (err) 2482 if (err)
@@ -2624,6 +2576,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2624 } else { 2576 } else {
2625 int frags = pkt_dev->nfrags; 2577 int frags = pkt_dev->nfrags;
2626 int i, len; 2578 int i, len;
2579 int frag_len;
2627 2580
2628 2581
2629 if (frags > MAX_SKB_FRAGS) 2582 if (frags > MAX_SKB_FRAGS)
@@ -2635,6 +2588,8 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2635 } 2588 }
2636 2589
2637 i = 0; 2590 i = 0;
2591 frag_len = (datalen/frags) < PAGE_SIZE ?
2592 (datalen/frags) : PAGE_SIZE;
2638 while (datalen > 0) { 2593 while (datalen > 0) {
2639 if (unlikely(!pkt_dev->page)) { 2594 if (unlikely(!pkt_dev->page)) {
2640 int node = numa_node_id(); 2595 int node = numa_node_id();
@@ -2648,38 +2603,18 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2648 skb_shinfo(skb)->frags[i].page = pkt_dev->page; 2603 skb_shinfo(skb)->frags[i].page = pkt_dev->page;
2649 get_page(pkt_dev->page); 2604 get_page(pkt_dev->page);
2650 skb_shinfo(skb)->frags[i].page_offset = 0; 2605 skb_shinfo(skb)->frags[i].page_offset = 0;
2651 skb_shinfo(skb)->frags[i].size = 2606 /*last fragment, fill rest of data*/
2652 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2607 if (i == (frags - 1))
2608 skb_shinfo(skb)->frags[i].size =
2609 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
2610 else
2611 skb_shinfo(skb)->frags[i].size = frag_len;
2653 datalen -= skb_shinfo(skb)->frags[i].size; 2612 datalen -= skb_shinfo(skb)->frags[i].size;
2654 skb->len += skb_shinfo(skb)->frags[i].size; 2613 skb->len += skb_shinfo(skb)->frags[i].size;
2655 skb->data_len += skb_shinfo(skb)->frags[i].size; 2614 skb->data_len += skb_shinfo(skb)->frags[i].size;
2656 i++; 2615 i++;
2657 skb_shinfo(skb)->nr_frags = i; 2616 skb_shinfo(skb)->nr_frags = i;
2658 } 2617 }
2659
2660 while (i < frags) {
2661 int rem;
2662
2663 if (i == 0)
2664 break;
2665
2666 rem = skb_shinfo(skb)->frags[i - 1].size / 2;
2667 if (rem == 0)
2668 break;
2669
2670 skb_shinfo(skb)->frags[i - 1].size -= rem;
2671
2672 skb_shinfo(skb)->frags[i] =
2673 skb_shinfo(skb)->frags[i - 1];
2674 get_page(skb_shinfo(skb)->frags[i].page);
2675 skb_shinfo(skb)->frags[i].page =
2676 skb_shinfo(skb)->frags[i - 1].page;
2677 skb_shinfo(skb)->frags[i].page_offset +=
2678 skb_shinfo(skb)->frags[i - 1].size;
2679 skb_shinfo(skb)->frags[i].size = rem;
2680 i++;
2681 skb_shinfo(skb)->nr_frags = i;
2682 }
2683 } 2618 }
2684 2619
2685 /* Stamp the time, and sequence number, 2620 /* Stamp the time, and sequence number,
@@ -2917,79 +2852,6 @@ static unsigned int scan_ip6(const char *s, char ip[16])
2917 return len; 2852 return len;
2918} 2853}
2919 2854
2920static char tohex(char hexdigit)
2921{
2922 return hexdigit > 9 ? hexdigit + 'a' - 10 : hexdigit + '0';
2923}
2924
2925static int fmt_xlong(char *s, unsigned int i)
2926{
2927 char *bak = s;
2928 *s = tohex((i >> 12) & 0xf);
2929 if (s != bak || *s != '0')
2930 ++s;
2931 *s = tohex((i >> 8) & 0xf);
2932 if (s != bak || *s != '0')
2933 ++s;
2934 *s = tohex((i >> 4) & 0xf);
2935 if (s != bak || *s != '0')
2936 ++s;
2937 *s = tohex(i & 0xf);
2938 return s - bak + 1;
2939}
2940
2941static unsigned int fmt_ip6(char *s, const char ip[16])
2942{
2943 unsigned int len;
2944 unsigned int i;
2945 unsigned int temp;
2946 unsigned int compressing;
2947 int j;
2948
2949 len = 0;
2950 compressing = 0;
2951 for (j = 0; j < 16; j += 2) {
2952
2953#ifdef V4MAPPEDPREFIX
2954 if (j == 12 && !memcmp(ip, V4mappedprefix, 12)) {
2955 inet_ntoa_r(*(struct in_addr *)(ip + 12), s);
2956 temp = strlen(s);
2957 return len + temp;
2958 }
2959#endif
2960 temp = ((unsigned long)(unsigned char)ip[j] << 8) +
2961 (unsigned long)(unsigned char)ip[j + 1];
2962 if (temp == 0) {
2963 if (!compressing) {
2964 compressing = 1;
2965 if (j == 0) {
2966 *s++ = ':';
2967 ++len;
2968 }
2969 }
2970 } else {
2971 if (compressing) {
2972 compressing = 0;
2973 *s++ = ':';
2974 ++len;
2975 }
2976 i = fmt_xlong(s, temp);
2977 len += i;
2978 s += i;
2979 if (j < 14) {
2980 *s++ = ':';
2981 ++len;
2982 }
2983 }
2984 }
2985 if (compressing) {
2986 *s++ = ':';
2987 ++len;
2988 }
2989 *s = 0;
2990 return len;
2991}
2992
2993static struct sk_buff *fill_packet_ipv6(struct net_device *odev, 2855static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2994 struct pktgen_dev *pkt_dev) 2856 struct pktgen_dev *pkt_dev)
2995{ 2857{
@@ -3682,13 +3544,12 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3682 return -ENOMEM; 3544 return -ENOMEM;
3683 3545
3684 strcpy(pkt_dev->odevname, ifname); 3546 strcpy(pkt_dev->odevname, ifname);
3685 pkt_dev->flows = vmalloc_node(MAX_CFLOWS * sizeof(struct flow_state), 3547 pkt_dev->flows = vzalloc_node(MAX_CFLOWS * sizeof(struct flow_state),
3686 node); 3548 node);
3687 if (pkt_dev->flows == NULL) { 3549 if (pkt_dev->flows == NULL) {
3688 kfree(pkt_dev); 3550 kfree(pkt_dev);
3689 return -ENOMEM; 3551 return -ENOMEM;
3690 } 3552 }
3691 memset(pkt_dev->flows, 0, MAX_CFLOWS * sizeof(struct flow_state));
3692 3553
3693 pkt_dev->removal_mark = 0; 3554 pkt_dev->removal_mark = 0;
3694 pkt_dev->min_pkt_size = ETH_ZLEN; 3555 pkt_dev->min_pkt_size = ETH_ZLEN;
@@ -3846,6 +3707,7 @@ static int __init pg_init(void)
3846{ 3707{
3847 int cpu; 3708 int cpu;
3848 struct proc_dir_entry *pe; 3709 struct proc_dir_entry *pe;
3710 int ret = 0;
3849 3711
3850 pr_info("%s", version); 3712 pr_info("%s", version);
3851 3713
@@ -3856,11 +3718,10 @@ static int __init pg_init(void)
3856 pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops); 3718 pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
3857 if (pe == NULL) { 3719 if (pe == NULL) {
3858 pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL); 3720 pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL);
3859 proc_net_remove(&init_net, PG_PROC_DIR); 3721 ret = -EINVAL;
3860 return -EINVAL; 3722 goto remove_dir;
3861 } 3723 }
3862 3724
3863 /* Register us to receive netdevice events */
3864 register_netdevice_notifier(&pktgen_notifier_block); 3725 register_netdevice_notifier(&pktgen_notifier_block);
3865 3726
3866 for_each_online_cpu(cpu) { 3727 for_each_online_cpu(cpu) {
@@ -3874,13 +3735,18 @@ static int __init pg_init(void)
3874 3735
3875 if (list_empty(&pktgen_threads)) { 3736 if (list_empty(&pktgen_threads)) {
3876 pr_err("ERROR: Initialization failed for all threads\n"); 3737 pr_err("ERROR: Initialization failed for all threads\n");
3877 unregister_netdevice_notifier(&pktgen_notifier_block); 3738 ret = -ENODEV;
3878 remove_proc_entry(PGCTRL, pg_proc_dir); 3739 goto unregister;
3879 proc_net_remove(&init_net, PG_PROC_DIR);
3880 return -ENODEV;
3881 } 3740 }
3882 3741
3883 return 0; 3742 return 0;
3743
3744 unregister:
3745 unregister_netdevice_notifier(&pktgen_notifier_block);
3746 remove_proc_entry(PGCTRL, pg_proc_dir);
3747 remove_dir:
3748 proc_net_remove(&init_net, PG_PROC_DIR);
3749 return ret;
3884} 3750}
3885 3751
3886static void __exit pg_cleanup(void) 3752static void __exit pg_cleanup(void)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d7c4bb4b1820..abd936d8a716 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -850,6 +850,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
850 struct nlattr *attr, *af_spec; 850 struct nlattr *attr, *af_spec;
851 struct rtnl_af_ops *af_ops; 851 struct rtnl_af_ops *af_ops;
852 852
853 ASSERT_RTNL();
853 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 854 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
854 if (nlh == NULL) 855 if (nlh == NULL)
855 return -EMSGSIZE; 856 return -EMSGSIZE;
@@ -1007,10 +1008,11 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1007 s_h = cb->args[0]; 1008 s_h = cb->args[0];
1008 s_idx = cb->args[1]; 1009 s_idx = cb->args[1];
1009 1010
1011 rcu_read_lock();
1010 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1012 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1011 idx = 0; 1013 idx = 0;
1012 head = &net->dev_index_head[h]; 1014 head = &net->dev_index_head[h];
1013 hlist_for_each_entry(dev, node, head, index_hlist) { 1015 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1014 if (idx < s_idx) 1016 if (idx < s_idx)
1015 goto cont; 1017 goto cont;
1016 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1018 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1023,6 +1025,7 @@ cont:
1023 } 1025 }
1024 } 1026 }
1025out: 1027out:
1028 rcu_read_unlock();
1026 cb->args[1] = idx; 1029 cb->args[1] = idx;
1027 cb->args[0] = h; 1030 cb->args[0] = h;
1028 1031
@@ -1043,6 +1046,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1043 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1046 [IFLA_LINKMODE] = { .type = NLA_U8 },
1044 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1047 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1045 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1048 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1049 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1046 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 1050 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
1047 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1051 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1048 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1052 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
@@ -1091,6 +1095,8 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1091 */ 1095 */
1092 if (tb[IFLA_NET_NS_PID]) 1096 if (tb[IFLA_NET_NS_PID])
1093 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 1097 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1098 else if (tb[IFLA_NET_NS_FD])
1099 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1094 else 1100 else
1095 net = get_net(src_net); 1101 net = get_net(src_net);
1096 return net; 1102 return net;
@@ -1221,7 +1227,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1221 int send_addr_notify = 0; 1227 int send_addr_notify = 0;
1222 int err; 1228 int err;
1223 1229
1224 if (tb[IFLA_NET_NS_PID]) { 1230 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1225 struct net *net = rtnl_link_get_net(dev_net(dev), tb); 1231 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
1226 if (IS_ERR(net)) { 1232 if (IS_ERR(net)) {
1227 err = PTR_ERR(net); 1233 err = PTR_ERR(net);
@@ -1499,6 +1505,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1499 char ifname[IFNAMSIZ]; 1505 char ifname[IFNAMSIZ];
1500 struct nlattr *tb[IFLA_MAX+1]; 1506 struct nlattr *tb[IFLA_MAX+1];
1501 int err; 1507 int err;
1508 LIST_HEAD(list_kill);
1502 1509
1503 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 1510 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
1504 if (err < 0) 1511 if (err < 0)
@@ -1522,7 +1529,9 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1522 if (!ops) 1529 if (!ops)
1523 return -EOPNOTSUPP; 1530 return -EOPNOTSUPP;
1524 1531
1525 ops->dellink(dev, NULL); 1532 ops->dellink(dev, &list_kill);
1533 unregister_netdevice_many(&list_kill);
1534 list_del(&list_kill);
1526 return 0; 1535 return 0;
1527} 1536}
1528 1537
@@ -1570,12 +1579,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1570 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 1579 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1571 dev->real_num_tx_queues = real_num_queues; 1580 dev->real_num_tx_queues = real_num_queues;
1572 1581
1573 if (strchr(dev->name, '%')) {
1574 err = dev_alloc_name(dev, dev->name);
1575 if (err < 0)
1576 goto err_free;
1577 }
1578
1579 if (tb[IFLA_MTU]) 1582 if (tb[IFLA_MTU])
1580 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1583 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
1581 if (tb[IFLA_ADDRESS]) 1584 if (tb[IFLA_ADDRESS])
@@ -1595,8 +1598,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1595 1598
1596 return dev; 1599 return dev;
1597 1600
1598err_free:
1599 free_netdev(dev);
1600err: 1601err:
1601 return ERR_PTR(err); 1602 return ERR_PTR(err);
1602} 1603}
@@ -1963,6 +1964,8 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
1963 case NETDEV_GOING_DOWN: 1964 case NETDEV_GOING_DOWN:
1964 case NETDEV_UNREGISTER: 1965 case NETDEV_UNREGISTER:
1965 case NETDEV_UNREGISTER_BATCH: 1966 case NETDEV_UNREGISTER_BATCH:
1967 case NETDEV_RELEASE:
1968 case NETDEV_JOIN:
1966 break; 1969 break;
1967 default: 1970 default:
1968 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1971 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7ebeed0a877c..46cbd28f40f9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -57,6 +57,7 @@
57#include <linux/init.h> 57#include <linux/init.h>
58#include <linux/scatterlist.h> 58#include <linux/scatterlist.h>
59#include <linux/errqueue.h> 59#include <linux/errqueue.h>
60#include <linux/prefetch.h>
60 61
61#include <net/protocol.h> 62#include <net/protocol.h>
62#include <net/dst.h> 63#include <net/dst.h>
@@ -2993,6 +2994,9 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2993 skb->destructor = sock_rmem_free; 2994 skb->destructor = sock_rmem_free;
2994 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 2995 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2995 2996
2997 /* before exiting rcu section, make sure dst is refcounted */
2998 skb_dst_force(skb);
2999
2996 skb_queue_tail(&sk->sk_error_queue, skb); 3000 skb_queue_tail(&sk->sk_error_queue, skb);
2997 if (!sock_flag(sk, SOCK_DEAD)) 3001 if (!sock_flag(sk, SOCK_DEAD))
2998 sk->sk_data_ready(sk, skb->len); 3002 sk->sk_data_ready(sk, skb->len);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 385b6095fdc4..77a65f031488 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -17,6 +17,7 @@
17 17
18#include <net/ip.h> 18#include <net/ip.h>
19#include <net/sock.h> 19#include <net/sock.h>
20#include <net/net_ratelimit.h>
20 21
21#ifdef CONFIG_RPS 22#ifdef CONFIG_RPS
22static int rps_sock_flow_sysctl(ctl_table *table, int write, 23static int rps_sock_flow_sysctl(ctl_table *table, int write,
@@ -122,6 +123,15 @@ static struct ctl_table net_core_table[] = {
122 .mode = 0644, 123 .mode = 0644,
123 .proc_handler = proc_dointvec 124 .proc_handler = proc_dointvec
124 }, 125 },
126#ifdef CONFIG_BPF_JIT
127 {
128 .procname = "bpf_jit_enable",
129 .data = &bpf_jit_enable,
130 .maxlen = sizeof(int),
131 .mode = 0644,
132 .proc_handler = proc_dointvec
133 },
134#endif
125 { 135 {
126 .procname = "netdev_tstamp_prequeue", 136 .procname = "netdev_tstamp_prequeue",
127 .data = &netdev_tstamp_prequeue, 137 .data = &netdev_tstamp_prequeue,
diff --git a/net/core/utils.c b/net/core/utils.c
index 5fea0ab21902..386e263f6066 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -27,6 +27,7 @@
27#include <linux/ratelimit.h> 27#include <linux/ratelimit.h>
28 28
29#include <net/sock.h> 29#include <net/sock.h>
30#include <net/net_ratelimit.h>
30 31
31#include <asm/byteorder.h> 32#include <asm/byteorder.h>
32#include <asm/system.h> 33#include <asm/system.h>
@@ -296,3 +297,27 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
296 csum_unfold(*sum))); 297 csum_unfold(*sum)));
297} 298}
298EXPORT_SYMBOL(inet_proto_csum_replace4); 299EXPORT_SYMBOL(inet_proto_csum_replace4);
300
301int mac_pton(const char *s, u8 *mac)
302{
303 int i;
304
305 /* XX:XX:XX:XX:XX:XX */
306 if (strlen(s) < 3 * ETH_ALEN - 1)
307 return 0;
308
309 /* Don't dirty result unless string is valid MAC. */
310 for (i = 0; i < ETH_ALEN; i++) {
311 if (!strchr("0123456789abcdefABCDEF", s[i * 3]))
312 return 0;
313 if (!strchr("0123456789abcdefABCDEF", s[i * 3 + 1]))
314 return 0;
315 if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
316 return 0;
317 }
318 for (i = 0; i < ETH_ALEN; i++) {
319 mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
320 }
321 return 1;
322}
323EXPORT_SYMBOL(mac_pton);