diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 152 | ||||
-rw-r--r-- | net/core/dst.c | 34 | ||||
-rw-r--r-- | net/core/ethtool.c | 106 | ||||
-rw-r--r-- | net/core/fib_rules.c | 3 | ||||
-rw-r--r-- | net/core/filter.c | 65 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 24 | ||||
-rw-r--r-- | net/core/net_namespace.c | 12 | ||||
-rw-r--r-- | net/core/netpoll.c | 2 | ||||
-rw-r--r-- | net/core/pktgen.c | 37 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 20 | ||||
-rw-r--r-- | net/core/sysctl_net_core.c | 9 |
11 files changed, 252 insertions, 212 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 856b6ee9a1d5..44ef8f8998ca 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -948,7 +948,7 @@ int dev_alloc_name(struct net_device *dev, const char *name) | |||
948 | } | 948 | } |
949 | EXPORT_SYMBOL(dev_alloc_name); | 949 | EXPORT_SYMBOL(dev_alloc_name); |
950 | 950 | ||
951 | static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) | 951 | static int dev_get_valid_name(struct net_device *dev, const char *name) |
952 | { | 952 | { |
953 | struct net *net; | 953 | struct net *net; |
954 | 954 | ||
@@ -958,7 +958,7 @@ static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt | |||
958 | if (!dev_valid_name(name)) | 958 | if (!dev_valid_name(name)) |
959 | return -EINVAL; | 959 | return -EINVAL; |
960 | 960 | ||
961 | if (fmt && strchr(name, '%')) | 961 | if (strchr(name, '%')) |
962 | return dev_alloc_name(dev, name); | 962 | return dev_alloc_name(dev, name); |
963 | else if (__dev_get_by_name(net, name)) | 963 | else if (__dev_get_by_name(net, name)) |
964 | return -EEXIST; | 964 | return -EEXIST; |
@@ -995,7 +995,7 @@ int dev_change_name(struct net_device *dev, const char *newname) | |||
995 | 995 | ||
996 | memcpy(oldname, dev->name, IFNAMSIZ); | 996 | memcpy(oldname, dev->name, IFNAMSIZ); |
997 | 997 | ||
998 | err = dev_get_valid_name(dev, newname, 1); | 998 | err = dev_get_valid_name(dev, newname); |
999 | if (err < 0) | 999 | if (err < 0) |
1000 | return err; | 1000 | return err; |
1001 | 1001 | ||
@@ -1315,7 +1315,8 @@ void dev_disable_lro(struct net_device *dev) | |||
1315 | return; | 1315 | return; |
1316 | 1316 | ||
1317 | __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO); | 1317 | __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO); |
1318 | WARN_ON(dev->features & NETIF_F_LRO); | 1318 | if (unlikely(dev->features & NETIF_F_LRO)) |
1319 | netdev_WARN(dev, "failed to disable LRO!\n"); | ||
1319 | } | 1320 | } |
1320 | EXPORT_SYMBOL(dev_disable_lro); | 1321 | EXPORT_SYMBOL(dev_disable_lro); |
1321 | 1322 | ||
@@ -2502,8 +2503,8 @@ static inline void ____napi_schedule(struct softnet_data *sd, | |||
2502 | __u32 __skb_get_rxhash(struct sk_buff *skb) | 2503 | __u32 __skb_get_rxhash(struct sk_buff *skb) |
2503 | { | 2504 | { |
2504 | int nhoff, hash = 0, poff; | 2505 | int nhoff, hash = 0, poff; |
2505 | struct ipv6hdr *ip6; | 2506 | const struct ipv6hdr *ip6; |
2506 | struct iphdr *ip; | 2507 | const struct iphdr *ip; |
2507 | u8 ip_proto; | 2508 | u8 ip_proto; |
2508 | u32 addr1, addr2, ihl; | 2509 | u32 addr1, addr2, ihl; |
2509 | union { | 2510 | union { |
@@ -2518,7 +2519,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb) | |||
2518 | if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) | 2519 | if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) |
2519 | goto done; | 2520 | goto done; |
2520 | 2521 | ||
2521 | ip = (struct iphdr *) (skb->data + nhoff); | 2522 | ip = (const struct iphdr *) (skb->data + nhoff); |
2522 | if (ip->frag_off & htons(IP_MF | IP_OFFSET)) | 2523 | if (ip->frag_off & htons(IP_MF | IP_OFFSET)) |
2523 | ip_proto = 0; | 2524 | ip_proto = 0; |
2524 | else | 2525 | else |
@@ -2531,7 +2532,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb) | |||
2531 | if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) | 2532 | if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) |
2532 | goto done; | 2533 | goto done; |
2533 | 2534 | ||
2534 | ip6 = (struct ipv6hdr *) (skb->data + nhoff); | 2535 | ip6 = (const struct ipv6hdr *) (skb->data + nhoff); |
2535 | ip_proto = ip6->nexthdr; | 2536 | ip_proto = ip6->nexthdr; |
2536 | addr1 = (__force u32) ip6->saddr.s6_addr32[3]; | 2537 | addr1 = (__force u32) ip6->saddr.s6_addr32[3]; |
2537 | addr2 = (__force u32) ip6->daddr.s6_addr32[3]; | 2538 | addr2 = (__force u32) ip6->daddr.s6_addr32[3]; |
@@ -3076,25 +3077,6 @@ void netdev_rx_handler_unregister(struct net_device *dev) | |||
3076 | } | 3077 | } |
3077 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); | 3078 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); |
3078 | 3079 | ||
3079 | static void vlan_on_bond_hook(struct sk_buff *skb) | ||
3080 | { | ||
3081 | /* | ||
3082 | * Make sure ARP frames received on VLAN interfaces stacked on | ||
3083 | * bonding interfaces still make their way to any base bonding | ||
3084 | * device that may have registered for a specific ptype. | ||
3085 | */ | ||
3086 | if (skb->dev->priv_flags & IFF_802_1Q_VLAN && | ||
3087 | vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING && | ||
3088 | skb->protocol == htons(ETH_P_ARP)) { | ||
3089 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | ||
3090 | |||
3091 | if (!skb2) | ||
3092 | return; | ||
3093 | skb2->dev = vlan_dev_real_dev(skb->dev); | ||
3094 | netif_rx(skb2); | ||
3095 | } | ||
3096 | } | ||
3097 | |||
3098 | static int __netif_receive_skb(struct sk_buff *skb) | 3080 | static int __netif_receive_skb(struct sk_buff *skb) |
3099 | { | 3081 | { |
3100 | struct packet_type *ptype, *pt_prev; | 3082 | struct packet_type *ptype, *pt_prev; |
@@ -3130,6 +3112,12 @@ another_round: | |||
3130 | 3112 | ||
3131 | __this_cpu_inc(softnet_data.processed); | 3113 | __this_cpu_inc(softnet_data.processed); |
3132 | 3114 | ||
3115 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { | ||
3116 | skb = vlan_untag(skb); | ||
3117 | if (unlikely(!skb)) | ||
3118 | goto out; | ||
3119 | } | ||
3120 | |||
3133 | #ifdef CONFIG_NET_CLS_ACT | 3121 | #ifdef CONFIG_NET_CLS_ACT |
3134 | if (skb->tc_verd & TC_NCLS) { | 3122 | if (skb->tc_verd & TC_NCLS) { |
3135 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); | 3123 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); |
@@ -3177,15 +3165,13 @@ ncls: | |||
3177 | ret = deliver_skb(skb, pt_prev, orig_dev); | 3165 | ret = deliver_skb(skb, pt_prev, orig_dev); |
3178 | pt_prev = NULL; | 3166 | pt_prev = NULL; |
3179 | } | 3167 | } |
3180 | if (vlan_hwaccel_do_receive(&skb)) { | 3168 | if (vlan_do_receive(&skb)) { |
3181 | ret = __netif_receive_skb(skb); | 3169 | ret = __netif_receive_skb(skb); |
3182 | goto out; | 3170 | goto out; |
3183 | } else if (unlikely(!skb)) | 3171 | } else if (unlikely(!skb)) |
3184 | goto out; | 3172 | goto out; |
3185 | } | 3173 | } |
3186 | 3174 | ||
3187 | vlan_on_bond_hook(skb); | ||
3188 | |||
3189 | /* deliver only exact match when indicated */ | 3175 | /* deliver only exact match when indicated */ |
3190 | null_or_dev = deliver_exact ? skb->dev : NULL; | 3176 | null_or_dev = deliver_exact ? skb->dev : NULL; |
3191 | 3177 | ||
@@ -4510,6 +4496,30 @@ void dev_set_rx_mode(struct net_device *dev) | |||
4510 | } | 4496 | } |
4511 | 4497 | ||
4512 | /** | 4498 | /** |
4499 | * dev_ethtool_get_settings - call device's ethtool_ops::get_settings() | ||
4500 | * @dev: device | ||
4501 | * @cmd: memory area for ethtool_ops::get_settings() result | ||
4502 | * | ||
4503 | * The cmd arg is initialized properly (cleared and | ||
4504 | * ethtool_cmd::cmd field set to ETHTOOL_GSET). | ||
4505 | * | ||
4506 | * Return device's ethtool_ops::get_settings() result value or | ||
4507 | * -EOPNOTSUPP when device doesn't expose | ||
4508 | * ethtool_ops::get_settings() operation. | ||
4509 | */ | ||
4510 | int dev_ethtool_get_settings(struct net_device *dev, | ||
4511 | struct ethtool_cmd *cmd) | ||
4512 | { | ||
4513 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings) | ||
4514 | return -EOPNOTSUPP; | ||
4515 | |||
4516 | memset(cmd, 0, sizeof(struct ethtool_cmd)); | ||
4517 | cmd->cmd = ETHTOOL_GSET; | ||
4518 | return dev->ethtool_ops->get_settings(dev, cmd); | ||
4519 | } | ||
4520 | EXPORT_SYMBOL(dev_ethtool_get_settings); | ||
4521 | |||
4522 | /** | ||
4513 | * dev_get_flags - get flags reported to userspace | 4523 | * dev_get_flags - get flags reported to userspace |
4514 | * @dev: device | 4524 | * @dev: device |
4515 | * | 4525 | * |
@@ -5240,11 +5250,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) | |||
5240 | } | 5250 | } |
5241 | EXPORT_SYMBOL(netdev_fix_features); | 5251 | EXPORT_SYMBOL(netdev_fix_features); |
5242 | 5252 | ||
5243 | void netdev_update_features(struct net_device *dev) | 5253 | int __netdev_update_features(struct net_device *dev) |
5244 | { | 5254 | { |
5245 | u32 features; | 5255 | u32 features; |
5246 | int err = 0; | 5256 | int err = 0; |
5247 | 5257 | ||
5258 | ASSERT_RTNL(); | ||
5259 | |||
5248 | features = netdev_get_wanted_features(dev); | 5260 | features = netdev_get_wanted_features(dev); |
5249 | 5261 | ||
5250 | if (dev->netdev_ops->ndo_fix_features) | 5262 | if (dev->netdev_ops->ndo_fix_features) |
@@ -5254,7 +5266,7 @@ void netdev_update_features(struct net_device *dev) | |||
5254 | features = netdev_fix_features(dev, features); | 5266 | features = netdev_fix_features(dev, features); |
5255 | 5267 | ||
5256 | if (dev->features == features) | 5268 | if (dev->features == features) |
5257 | return; | 5269 | return 0; |
5258 | 5270 | ||
5259 | netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n", | 5271 | netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n", |
5260 | dev->features, features); | 5272 | dev->features, features); |
@@ -5262,12 +5274,23 @@ void netdev_update_features(struct net_device *dev) | |||
5262 | if (dev->netdev_ops->ndo_set_features) | 5274 | if (dev->netdev_ops->ndo_set_features) |
5263 | err = dev->netdev_ops->ndo_set_features(dev, features); | 5275 | err = dev->netdev_ops->ndo_set_features(dev, features); |
5264 | 5276 | ||
5265 | if (!err) | 5277 | if (unlikely(err < 0)) { |
5266 | dev->features = features; | ||
5267 | else if (err < 0) | ||
5268 | netdev_err(dev, | 5278 | netdev_err(dev, |
5269 | "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n", | 5279 | "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n", |
5270 | err, features, dev->features); | 5280 | err, features, dev->features); |
5281 | return -1; | ||
5282 | } | ||
5283 | |||
5284 | if (!err) | ||
5285 | dev->features = features; | ||
5286 | |||
5287 | return 1; | ||
5288 | } | ||
5289 | |||
5290 | void netdev_update_features(struct net_device *dev) | ||
5291 | { | ||
5292 | if (__netdev_update_features(dev)) | ||
5293 | netdev_features_change(dev); | ||
5271 | } | 5294 | } |
5272 | EXPORT_SYMBOL(netdev_update_features); | 5295 | EXPORT_SYMBOL(netdev_update_features); |
5273 | 5296 | ||
@@ -5397,8 +5420,8 @@ int register_netdevice(struct net_device *dev) | |||
5397 | } | 5420 | } |
5398 | } | 5421 | } |
5399 | 5422 | ||
5400 | ret = dev_get_valid_name(dev, dev->name, 0); | 5423 | ret = dev_get_valid_name(dev, dev->name); |
5401 | if (ret) | 5424 | if (ret < 0) |
5402 | goto err_uninit; | 5425 | goto err_uninit; |
5403 | 5426 | ||
5404 | dev->ifindex = dev_new_index(net); | 5427 | dev->ifindex = dev_new_index(net); |
@@ -5418,6 +5441,14 @@ int register_netdevice(struct net_device *dev) | |||
5418 | dev->features &= ~NETIF_F_GSO; | 5441 | dev->features &= ~NETIF_F_GSO; |
5419 | } | 5442 | } |
5420 | 5443 | ||
5444 | /* Turn on no cache copy if HW is doing checksum */ | ||
5445 | dev->hw_features |= NETIF_F_NOCACHE_COPY; | ||
5446 | if ((dev->features & NETIF_F_ALL_CSUM) && | ||
5447 | !(dev->features & NETIF_F_NO_CSUM)) { | ||
5448 | dev->wanted_features |= NETIF_F_NOCACHE_COPY; | ||
5449 | dev->features |= NETIF_F_NOCACHE_COPY; | ||
5450 | } | ||
5451 | |||
5421 | /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, | 5452 | /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, |
5422 | * vlan_dev_init() will do the dev->features check, so these features | 5453 | * vlan_dev_init() will do the dev->features check, so these features |
5423 | * are enabled only if supported by underlying device. | 5454 | * are enabled only if supported by underlying device. |
@@ -5434,7 +5465,7 @@ int register_netdevice(struct net_device *dev) | |||
5434 | goto err_uninit; | 5465 | goto err_uninit; |
5435 | dev->reg_state = NETREG_REGISTERED; | 5466 | dev->reg_state = NETREG_REGISTERED; |
5436 | 5467 | ||
5437 | netdev_update_features(dev); | 5468 | __netdev_update_features(dev); |
5438 | 5469 | ||
5439 | /* | 5470 | /* |
5440 | * Default initial state at registry is that the | 5471 | * Default initial state at registry is that the |
@@ -5531,19 +5562,7 @@ int register_netdev(struct net_device *dev) | |||
5531 | int err; | 5562 | int err; |
5532 | 5563 | ||
5533 | rtnl_lock(); | 5564 | rtnl_lock(); |
5534 | |||
5535 | /* | ||
5536 | * If the name is a format string the caller wants us to do a | ||
5537 | * name allocation. | ||
5538 | */ | ||
5539 | if (strchr(dev->name, '%')) { | ||
5540 | err = dev_alloc_name(dev, dev->name); | ||
5541 | if (err < 0) | ||
5542 | goto out; | ||
5543 | } | ||
5544 | |||
5545 | err = register_netdevice(dev); | 5565 | err = register_netdevice(dev); |
5546 | out: | ||
5547 | rtnl_unlock(); | 5566 | rtnl_unlock(); |
5548 | return err; | 5567 | return err; |
5549 | } | 5568 | } |
@@ -6025,7 +6044,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
6025 | /* We get here if we can't use the current device name */ | 6044 | /* We get here if we can't use the current device name */ |
6026 | if (!pat) | 6045 | if (!pat) |
6027 | goto out; | 6046 | goto out; |
6028 | if (dev_get_valid_name(dev, pat, 1)) | 6047 | if (dev_get_valid_name(dev, pat) < 0) |
6029 | goto out; | 6048 | goto out; |
6030 | } | 6049 | } |
6031 | 6050 | ||
@@ -6157,29 +6176,20 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
6157 | */ | 6176 | */ |
6158 | u32 netdev_increment_features(u32 all, u32 one, u32 mask) | 6177 | u32 netdev_increment_features(u32 all, u32 one, u32 mask) |
6159 | { | 6178 | { |
6160 | /* If device needs checksumming, downgrade to it. */ | 6179 | if (mask & NETIF_F_GEN_CSUM) |
6161 | if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) | 6180 | mask |= NETIF_F_ALL_CSUM; |
6162 | all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); | 6181 | mask |= NETIF_F_VLAN_CHALLENGED; |
6163 | else if (mask & NETIF_F_ALL_CSUM) { | ||
6164 | /* If one device supports v4/v6 checksumming, set for all. */ | ||
6165 | if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) && | ||
6166 | !(all & NETIF_F_GEN_CSUM)) { | ||
6167 | all &= ~NETIF_F_ALL_CSUM; | ||
6168 | all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | ||
6169 | } | ||
6170 | 6182 | ||
6171 | /* If one device supports hw checksumming, set for all. */ | 6183 | all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; |
6172 | if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) { | 6184 | all &= one | ~NETIF_F_ALL_FOR_ALL; |
6173 | all &= ~NETIF_F_ALL_CSUM; | ||
6174 | all |= NETIF_F_HW_CSUM; | ||
6175 | } | ||
6176 | } | ||
6177 | 6185 | ||
6178 | one |= NETIF_F_ALL_CSUM; | 6186 | /* If device needs checksumming, downgrade to it. */ |
6187 | if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM)) | ||
6188 | all &= ~NETIF_F_NO_CSUM; | ||
6179 | 6189 | ||
6180 | one |= all & NETIF_F_ONE_FOR_ALL; | 6190 | /* If one device supports hw checksumming, set for all. */ |
6181 | all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; | 6191 | if (all & NETIF_F_GEN_CSUM) |
6182 | all |= one & mask & NETIF_F_ONE_FOR_ALL; | 6192 | all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); |
6183 | 6193 | ||
6184 | return all; | 6194 | return all; |
6185 | } | 6195 | } |
diff --git a/net/core/dst.c b/net/core/dst.c index 91104d35de7d..30f009327b62 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -166,7 +166,8 @@ EXPORT_SYMBOL(dst_discard); | |||
166 | 166 | ||
167 | const u32 dst_default_metrics[RTAX_MAX]; | 167 | const u32 dst_default_metrics[RTAX_MAX]; |
168 | 168 | ||
169 | void *dst_alloc(struct dst_ops *ops, int initial_ref) | 169 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, |
170 | int initial_ref, int initial_obsolete, int flags) | ||
170 | { | 171 | { |
171 | struct dst_entry *dst; | 172 | struct dst_entry *dst; |
172 | 173 | ||
@@ -174,15 +175,36 @@ void *dst_alloc(struct dst_ops *ops, int initial_ref) | |||
174 | if (ops->gc(ops)) | 175 | if (ops->gc(ops)) |
175 | return NULL; | 176 | return NULL; |
176 | } | 177 | } |
177 | dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); | 178 | dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); |
178 | if (!dst) | 179 | if (!dst) |
179 | return NULL; | 180 | return NULL; |
180 | atomic_set(&dst->__refcnt, initial_ref); | 181 | dst->child = NULL; |
182 | dst->dev = dev; | ||
183 | if (dev) | ||
184 | dev_hold(dev); | ||
181 | dst->ops = ops; | 185 | dst->ops = ops; |
182 | dst->lastuse = jiffies; | ||
183 | dst->path = dst; | ||
184 | dst->input = dst->output = dst_discard; | ||
185 | dst_init_metrics(dst, dst_default_metrics, true); | 186 | dst_init_metrics(dst, dst_default_metrics, true); |
187 | dst->expires = 0UL; | ||
188 | dst->path = dst; | ||
189 | dst->neighbour = NULL; | ||
190 | dst->hh = NULL; | ||
191 | #ifdef CONFIG_XFRM | ||
192 | dst->xfrm = NULL; | ||
193 | #endif | ||
194 | dst->input = dst_discard; | ||
195 | dst->output = dst_discard; | ||
196 | dst->error = 0; | ||
197 | dst->obsolete = initial_obsolete; | ||
198 | dst->header_len = 0; | ||
199 | dst->trailer_len = 0; | ||
200 | #ifdef CONFIG_IP_ROUTE_CLASSID | ||
201 | dst->tclassid = 0; | ||
202 | #endif | ||
203 | atomic_set(&dst->__refcnt, initial_ref); | ||
204 | dst->__use = 0; | ||
205 | dst->lastuse = jiffies; | ||
206 | dst->flags = flags; | ||
207 | dst->next = NULL; | ||
186 | #if RT_CACHE_DEBUG >= 2 | 208 | #if RT_CACHE_DEBUG >= 2 |
187 | atomic_inc(&dst_total); | 209 | atomic_inc(&dst_total); |
188 | #endif | 210 | #endif |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 74ead9eca126..d8b1a8d85a96 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/rtnetlink.h> | ||
25 | #include <linux/sched.h> | ||
24 | 26 | ||
25 | /* | 27 | /* |
26 | * Some useful ethtool_ops methods that're device independent. | 28 | * Some useful ethtool_ops methods that're device independent. |
@@ -317,7 +319,7 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr) | |||
317 | 319 | ||
318 | dev->wanted_features &= ~features[0].valid; | 320 | dev->wanted_features &= ~features[0].valid; |
319 | dev->wanted_features |= features[0].valid & features[0].requested; | 321 | dev->wanted_features |= features[0].valid & features[0].requested; |
320 | netdev_update_features(dev); | 322 | __netdev_update_features(dev); |
321 | 323 | ||
322 | if ((dev->wanted_features ^ dev->features) & features[0].valid) | 324 | if ((dev->wanted_features ^ dev->features) & features[0].valid) |
323 | ret |= ETHTOOL_F_WISH; | 325 | ret |= ETHTOOL_F_WISH; |
@@ -359,7 +361,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS | |||
359 | /* NETIF_F_NTUPLE */ "rx-ntuple-filter", | 361 | /* NETIF_F_NTUPLE */ "rx-ntuple-filter", |
360 | /* NETIF_F_RXHASH */ "rx-hashing", | 362 | /* NETIF_F_RXHASH */ "rx-hashing", |
361 | /* NETIF_F_RXCSUM */ "rx-checksum", | 363 | /* NETIF_F_RXCSUM */ "rx-checksum", |
362 | "", | 364 | /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy" |
363 | "", | 365 | "", |
364 | }; | 366 | }; |
365 | 367 | ||
@@ -499,7 +501,7 @@ static int ethtool_set_one_feature(struct net_device *dev, | |||
499 | else | 501 | else |
500 | dev->wanted_features &= ~mask; | 502 | dev->wanted_features &= ~mask; |
501 | 503 | ||
502 | netdev_update_features(dev); | 504 | __netdev_update_features(dev); |
503 | return 0; | 505 | return 0; |
504 | } | 506 | } |
505 | 507 | ||
@@ -544,14 +546,14 @@ int __ethtool_set_flags(struct net_device *dev, u32 data) | |||
544 | } | 546 | } |
545 | 547 | ||
546 | /* allow changing only bits set in hw_features */ | 548 | /* allow changing only bits set in hw_features */ |
547 | changed = (data ^ dev->wanted_features) & flags_dup_features; | 549 | changed = (data ^ dev->features) & flags_dup_features; |
548 | if (changed & ~dev->hw_features) | 550 | if (changed & ~dev->hw_features) |
549 | return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; | 551 | return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; |
550 | 552 | ||
551 | dev->wanted_features = | 553 | dev->wanted_features = |
552 | (dev->wanted_features & ~changed) | data; | 554 | (dev->wanted_features & ~changed) | (data & dev->hw_features); |
553 | 555 | ||
554 | netdev_update_features(dev); | 556 | __netdev_update_features(dev); |
555 | 557 | ||
556 | return 0; | 558 | return 0; |
557 | } | 559 | } |
@@ -908,6 +910,9 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, | |||
908 | struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL; | 910 | struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL; |
909 | int ret; | 911 | int ret; |
910 | 912 | ||
913 | if (!ops->set_rx_ntuple) | ||
914 | return -EOPNOTSUPP; | ||
915 | |||
911 | if (!(dev->features & NETIF_F_NTUPLE)) | 916 | if (!(dev->features & NETIF_F_NTUPLE)) |
912 | return -EINVAL; | 917 | return -EINVAL; |
913 | 918 | ||
@@ -1441,6 +1446,35 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) | |||
1441 | return dev->ethtool_ops->set_ringparam(dev, &ringparam); | 1446 | return dev->ethtool_ops->set_ringparam(dev, &ringparam); |
1442 | } | 1447 | } |
1443 | 1448 | ||
1449 | static noinline_for_stack int ethtool_get_channels(struct net_device *dev, | ||
1450 | void __user *useraddr) | ||
1451 | { | ||
1452 | struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; | ||
1453 | |||
1454 | if (!dev->ethtool_ops->get_channels) | ||
1455 | return -EOPNOTSUPP; | ||
1456 | |||
1457 | dev->ethtool_ops->get_channels(dev, &channels); | ||
1458 | |||
1459 | if (copy_to_user(useraddr, &channels, sizeof(channels))) | ||
1460 | return -EFAULT; | ||
1461 | return 0; | ||
1462 | } | ||
1463 | |||
1464 | static noinline_for_stack int ethtool_set_channels(struct net_device *dev, | ||
1465 | void __user *useraddr) | ||
1466 | { | ||
1467 | struct ethtool_channels channels; | ||
1468 | |||
1469 | if (!dev->ethtool_ops->set_channels) | ||
1470 | return -EOPNOTSUPP; | ||
1471 | |||
1472 | if (copy_from_user(&channels, useraddr, sizeof(channels))) | ||
1473 | return -EFAULT; | ||
1474 | |||
1475 | return dev->ethtool_ops->set_channels(dev, &channels); | ||
1476 | } | ||
1477 | |||
1444 | static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) | 1478 | static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) |
1445 | { | 1479 | { |
1446 | struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; | 1480 | struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; |
@@ -1618,14 +1652,64 @@ out: | |||
1618 | static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) | 1652 | static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) |
1619 | { | 1653 | { |
1620 | struct ethtool_value id; | 1654 | struct ethtool_value id; |
1655 | static bool busy; | ||
1656 | int rc; | ||
1621 | 1657 | ||
1622 | if (!dev->ethtool_ops->phys_id) | 1658 | if (!dev->ethtool_ops->set_phys_id && !dev->ethtool_ops->phys_id) |
1623 | return -EOPNOTSUPP; | 1659 | return -EOPNOTSUPP; |
1624 | 1660 | ||
1661 | if (busy) | ||
1662 | return -EBUSY; | ||
1663 | |||
1625 | if (copy_from_user(&id, useraddr, sizeof(id))) | 1664 | if (copy_from_user(&id, useraddr, sizeof(id))) |
1626 | return -EFAULT; | 1665 | return -EFAULT; |
1627 | 1666 | ||
1628 | return dev->ethtool_ops->phys_id(dev, id.data); | 1667 | if (!dev->ethtool_ops->set_phys_id) |
1668 | /* Do it the old way */ | ||
1669 | return dev->ethtool_ops->phys_id(dev, id.data); | ||
1670 | |||
1671 | rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE); | ||
1672 | if (rc < 0) | ||
1673 | return rc; | ||
1674 | |||
1675 | /* Drop the RTNL lock while waiting, but prevent reentry or | ||
1676 | * removal of the device. | ||
1677 | */ | ||
1678 | busy = true; | ||
1679 | dev_hold(dev); | ||
1680 | rtnl_unlock(); | ||
1681 | |||
1682 | if (rc == 0) { | ||
1683 | /* Driver will handle this itself */ | ||
1684 | schedule_timeout_interruptible( | ||
1685 | id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT); | ||
1686 | } else { | ||
1687 | /* Driver expects to be called at twice the frequency in rc */ | ||
1688 | int n = rc * 2, i, interval = HZ / n; | ||
1689 | |||
1690 | /* Count down seconds */ | ||
1691 | do { | ||
1692 | /* Count down iterations per second */ | ||
1693 | i = n; | ||
1694 | do { | ||
1695 | rtnl_lock(); | ||
1696 | rc = dev->ethtool_ops->set_phys_id(dev, | ||
1697 | (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON); | ||
1698 | rtnl_unlock(); | ||
1699 | if (rc) | ||
1700 | break; | ||
1701 | schedule_timeout_interruptible(interval); | ||
1702 | } while (!signal_pending(current) && --i != 0); | ||
1703 | } while (!signal_pending(current) && | ||
1704 | (id.data == 0 || --id.data != 0)); | ||
1705 | } | ||
1706 | |||
1707 | rtnl_lock(); | ||
1708 | dev_put(dev); | ||
1709 | busy = false; | ||
1710 | |||
1711 | (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); | ||
1712 | return rc; | ||
1629 | } | 1713 | } |
1630 | 1714 | ||
1631 | static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) | 1715 | static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) |
@@ -1953,6 +2037,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1953 | case ETHTOOL_SGRO: | 2037 | case ETHTOOL_SGRO: |
1954 | rc = ethtool_set_one_feature(dev, useraddr, ethcmd); | 2038 | rc = ethtool_set_one_feature(dev, useraddr, ethcmd); |
1955 | break; | 2039 | break; |
2040 | case ETHTOOL_GCHANNELS: | ||
2041 | rc = ethtool_get_channels(dev, useraddr); | ||
2042 | break; | ||
2043 | case ETHTOOL_SCHANNELS: | ||
2044 | rc = ethtool_set_channels(dev, useraddr); | ||
2045 | break; | ||
1956 | default: | 2046 | default: |
1957 | rc = -EOPNOTSUPP; | 2047 | rc = -EOPNOTSUPP; |
1958 | } | 2048 | } |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 8248ebb5891d..3911586e12e4 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -590,7 +590,8 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, | |||
590 | int idx = 0; | 590 | int idx = 0; |
591 | struct fib_rule *rule; | 591 | struct fib_rule *rule; |
592 | 592 | ||
593 | list_for_each_entry(rule, &ops->rules_list, list) { | 593 | rcu_read_lock(); |
594 | list_for_each_entry_rcu(rule, &ops->rules_list, list) { | ||
594 | if (idx < cb->args[1]) | 595 | if (idx < cb->args[1]) |
595 | goto skip; | 596 | goto skip; |
596 | 597 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index afb8afb066bb..0eb8c4466eaa 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -39,65 +39,6 @@ | |||
39 | #include <linux/filter.h> | 39 | #include <linux/filter.h> |
40 | #include <linux/reciprocal_div.h> | 40 | #include <linux/reciprocal_div.h> |
41 | 41 | ||
42 | enum { | ||
43 | BPF_S_RET_K = 1, | ||
44 | BPF_S_RET_A, | ||
45 | BPF_S_ALU_ADD_K, | ||
46 | BPF_S_ALU_ADD_X, | ||
47 | BPF_S_ALU_SUB_K, | ||
48 | BPF_S_ALU_SUB_X, | ||
49 | BPF_S_ALU_MUL_K, | ||
50 | BPF_S_ALU_MUL_X, | ||
51 | BPF_S_ALU_DIV_X, | ||
52 | BPF_S_ALU_AND_K, | ||
53 | BPF_S_ALU_AND_X, | ||
54 | BPF_S_ALU_OR_K, | ||
55 | BPF_S_ALU_OR_X, | ||
56 | BPF_S_ALU_LSH_K, | ||
57 | BPF_S_ALU_LSH_X, | ||
58 | BPF_S_ALU_RSH_K, | ||
59 | BPF_S_ALU_RSH_X, | ||
60 | BPF_S_ALU_NEG, | ||
61 | BPF_S_LD_W_ABS, | ||
62 | BPF_S_LD_H_ABS, | ||
63 | BPF_S_LD_B_ABS, | ||
64 | BPF_S_LD_W_LEN, | ||
65 | BPF_S_LD_W_IND, | ||
66 | BPF_S_LD_H_IND, | ||
67 | BPF_S_LD_B_IND, | ||
68 | BPF_S_LD_IMM, | ||
69 | BPF_S_LDX_W_LEN, | ||
70 | BPF_S_LDX_B_MSH, | ||
71 | BPF_S_LDX_IMM, | ||
72 | BPF_S_MISC_TAX, | ||
73 | BPF_S_MISC_TXA, | ||
74 | BPF_S_ALU_DIV_K, | ||
75 | BPF_S_LD_MEM, | ||
76 | BPF_S_LDX_MEM, | ||
77 | BPF_S_ST, | ||
78 | BPF_S_STX, | ||
79 | BPF_S_JMP_JA, | ||
80 | BPF_S_JMP_JEQ_K, | ||
81 | BPF_S_JMP_JEQ_X, | ||
82 | BPF_S_JMP_JGE_K, | ||
83 | BPF_S_JMP_JGE_X, | ||
84 | BPF_S_JMP_JGT_K, | ||
85 | BPF_S_JMP_JGT_X, | ||
86 | BPF_S_JMP_JSET_K, | ||
87 | BPF_S_JMP_JSET_X, | ||
88 | /* Ancillary data */ | ||
89 | BPF_S_ANC_PROTOCOL, | ||
90 | BPF_S_ANC_PKTTYPE, | ||
91 | BPF_S_ANC_IFINDEX, | ||
92 | BPF_S_ANC_NLATTR, | ||
93 | BPF_S_ANC_NLATTR_NEST, | ||
94 | BPF_S_ANC_MARK, | ||
95 | BPF_S_ANC_QUEUE, | ||
96 | BPF_S_ANC_HATYPE, | ||
97 | BPF_S_ANC_RXHASH, | ||
98 | BPF_S_ANC_CPU, | ||
99 | }; | ||
100 | |||
101 | /* No hurry in this branch */ | 42 | /* No hurry in this branch */ |
102 | static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) | 43 | static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) |
103 | { | 44 | { |
@@ -145,7 +86,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) | |||
145 | rcu_read_lock(); | 86 | rcu_read_lock(); |
146 | filter = rcu_dereference(sk->sk_filter); | 87 | filter = rcu_dereference(sk->sk_filter); |
147 | if (filter) { | 88 | if (filter) { |
148 | unsigned int pkt_len = sk_run_filter(skb, filter->insns); | 89 | unsigned int pkt_len = SK_RUN_FILTER(filter, skb); |
149 | 90 | ||
150 | err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; | 91 | err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; |
151 | } | 92 | } |
@@ -638,6 +579,7 @@ void sk_filter_release_rcu(struct rcu_head *rcu) | |||
638 | { | 579 | { |
639 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); | 580 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); |
640 | 581 | ||
582 | bpf_jit_free(fp); | ||
641 | kfree(fp); | 583 | kfree(fp); |
642 | } | 584 | } |
643 | EXPORT_SYMBOL(sk_filter_release_rcu); | 585 | EXPORT_SYMBOL(sk_filter_release_rcu); |
@@ -672,6 +614,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
672 | 614 | ||
673 | atomic_set(&fp->refcnt, 1); | 615 | atomic_set(&fp->refcnt, 1); |
674 | fp->len = fprog->len; | 616 | fp->len = fprog->len; |
617 | fp->bpf_func = sk_run_filter; | ||
675 | 618 | ||
676 | err = sk_chk_filter(fp->insns, fp->len); | 619 | err = sk_chk_filter(fp->insns, fp->len); |
677 | if (err) { | 620 | if (err) { |
@@ -679,6 +622,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
679 | return err; | 622 | return err; |
680 | } | 623 | } |
681 | 624 | ||
625 | bpf_jit_compile(fp); | ||
626 | |||
682 | old_fp = rcu_dereference_protected(sk->sk_filter, | 627 | old_fp = rcu_dereference_protected(sk->sk_filter, |
683 | sock_owned_by_user(sk)); | 628 | sock_owned_by_user(sk)); |
684 | rcu_assign_pointer(sk->sk_filter, fp); | 629 | rcu_assign_pointer(sk->sk_filter, fp); |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 5ceb257e860c..381813eae46c 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -28,6 +28,7 @@ | |||
28 | static const char fmt_hex[] = "%#x\n"; | 28 | static const char fmt_hex[] = "%#x\n"; |
29 | static const char fmt_long_hex[] = "%#lx\n"; | 29 | static const char fmt_long_hex[] = "%#lx\n"; |
30 | static const char fmt_dec[] = "%d\n"; | 30 | static const char fmt_dec[] = "%d\n"; |
31 | static const char fmt_udec[] = "%u\n"; | ||
31 | static const char fmt_ulong[] = "%lu\n"; | 32 | static const char fmt_ulong[] = "%lu\n"; |
32 | static const char fmt_u64[] = "%llu\n"; | 33 | static const char fmt_u64[] = "%llu\n"; |
33 | 34 | ||
@@ -145,13 +146,10 @@ static ssize_t show_speed(struct device *dev, | |||
145 | if (!rtnl_trylock()) | 146 | if (!rtnl_trylock()) |
146 | return restart_syscall(); | 147 | return restart_syscall(); |
147 | 148 | ||
148 | if (netif_running(netdev) && | 149 | if (netif_running(netdev)) { |
149 | netdev->ethtool_ops && | 150 | struct ethtool_cmd cmd; |
150 | netdev->ethtool_ops->get_settings) { | 151 | if (!dev_ethtool_get_settings(netdev, &cmd)) |
151 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | 152 | ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); |
152 | |||
153 | if (!netdev->ethtool_ops->get_settings(netdev, &cmd)) | ||
154 | ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); | ||
155 | } | 153 | } |
156 | rtnl_unlock(); | 154 | rtnl_unlock(); |
157 | return ret; | 155 | return ret; |
@@ -166,13 +164,11 @@ static ssize_t show_duplex(struct device *dev, | |||
166 | if (!rtnl_trylock()) | 164 | if (!rtnl_trylock()) |
167 | return restart_syscall(); | 165 | return restart_syscall(); |
168 | 166 | ||
169 | if (netif_running(netdev) && | 167 | if (netif_running(netdev)) { |
170 | netdev->ethtool_ops && | 168 | struct ethtool_cmd cmd; |
171 | netdev->ethtool_ops->get_settings) { | 169 | if (!dev_ethtool_get_settings(netdev, &cmd)) |
172 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | 170 | ret = sprintf(buf, "%s\n", |
173 | 171 | cmd.duplex ? "full" : "half"); | |
174 | if (!netdev->ethtool_ops->get_settings(netdev, &cmd)) | ||
175 | ret = sprintf(buf, "%s\n", cmd.duplex ? "full" : "half"); | ||
176 | } | 172 | } |
177 | rtnl_unlock(); | 173 | rtnl_unlock(); |
178 | return ret; | 174 | return ret; |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 3f860261c5ee..1abb50841046 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -216,11 +216,14 @@ static void net_free(struct net *net) | |||
216 | kmem_cache_free(net_cachep, net); | 216 | kmem_cache_free(net_cachep, net); |
217 | } | 217 | } |
218 | 218 | ||
219 | static struct net *net_create(void) | 219 | struct net *copy_net_ns(unsigned long flags, struct net *old_net) |
220 | { | 220 | { |
221 | struct net *net; | 221 | struct net *net; |
222 | int rv; | 222 | int rv; |
223 | 223 | ||
224 | if (!(flags & CLONE_NEWNET)) | ||
225 | return get_net(old_net); | ||
226 | |||
224 | net = net_alloc(); | 227 | net = net_alloc(); |
225 | if (!net) | 228 | if (!net) |
226 | return ERR_PTR(-ENOMEM); | 229 | return ERR_PTR(-ENOMEM); |
@@ -239,13 +242,6 @@ static struct net *net_create(void) | |||
239 | return net; | 242 | return net; |
240 | } | 243 | } |
241 | 244 | ||
242 | struct net *copy_net_ns(unsigned long flags, struct net *old_net) | ||
243 | { | ||
244 | if (!(flags & CLONE_NEWNET)) | ||
245 | return get_net(old_net); | ||
246 | return net_create(); | ||
247 | } | ||
248 | |||
249 | static DEFINE_SPINLOCK(cleanup_list_lock); | 245 | static DEFINE_SPINLOCK(cleanup_list_lock); |
250 | static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ | 246 | static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ |
251 | 247 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 06be2431753e..46d9c3a4de2f 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -539,7 +539,7 @@ int __netpoll_rx(struct sk_buff *skb) | |||
539 | { | 539 | { |
540 | int proto, len, ulen; | 540 | int proto, len, ulen; |
541 | int hits = 0; | 541 | int hits = 0; |
542 | struct iphdr *iph; | 542 | const struct iphdr *iph; |
543 | struct udphdr *uh; | 543 | struct udphdr *uh; |
544 | struct netpoll_info *npinfo = skb->dev->npinfo; | 544 | struct netpoll_info *npinfo = skb->dev->npinfo; |
545 | struct netpoll *np, *tmp; | 545 | struct netpoll *np, *tmp; |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index aeeece72b72f..ff79d94b5944 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2514,7 +2514,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev) | |||
2514 | { | 2514 | { |
2515 | struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; | 2515 | struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; |
2516 | int err = 0; | 2516 | int err = 0; |
2517 | struct iphdr *iph; | ||
2518 | 2517 | ||
2519 | if (!x) | 2518 | if (!x) |
2520 | return 0; | 2519 | return 0; |
@@ -2524,7 +2523,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev) | |||
2524 | return 0; | 2523 | return 0; |
2525 | 2524 | ||
2526 | spin_lock(&x->lock); | 2525 | spin_lock(&x->lock); |
2527 | iph = ip_hdr(skb); | ||
2528 | 2526 | ||
2529 | err = x->outer_mode->output(x, skb); | 2527 | err = x->outer_mode->output(x, skb); |
2530 | if (err) | 2528 | if (err) |
@@ -2624,6 +2622,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, | |||
2624 | } else { | 2622 | } else { |
2625 | int frags = pkt_dev->nfrags; | 2623 | int frags = pkt_dev->nfrags; |
2626 | int i, len; | 2624 | int i, len; |
2625 | int frag_len; | ||
2627 | 2626 | ||
2628 | 2627 | ||
2629 | if (frags > MAX_SKB_FRAGS) | 2628 | if (frags > MAX_SKB_FRAGS) |
@@ -2635,6 +2634,8 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, | |||
2635 | } | 2634 | } |
2636 | 2635 | ||
2637 | i = 0; | 2636 | i = 0; |
2637 | frag_len = (datalen/frags) < PAGE_SIZE ? | ||
2638 | (datalen/frags) : PAGE_SIZE; | ||
2638 | while (datalen > 0) { | 2639 | while (datalen > 0) { |
2639 | if (unlikely(!pkt_dev->page)) { | 2640 | if (unlikely(!pkt_dev->page)) { |
2640 | int node = numa_node_id(); | 2641 | int node = numa_node_id(); |
@@ -2648,38 +2649,18 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, | |||
2648 | skb_shinfo(skb)->frags[i].page = pkt_dev->page; | 2649 | skb_shinfo(skb)->frags[i].page = pkt_dev->page; |
2649 | get_page(pkt_dev->page); | 2650 | get_page(pkt_dev->page); |
2650 | skb_shinfo(skb)->frags[i].page_offset = 0; | 2651 | skb_shinfo(skb)->frags[i].page_offset = 0; |
2651 | skb_shinfo(skb)->frags[i].size = | 2652 | /*last fragment, fill rest of data*/ |
2652 | (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); | 2653 | if (i == (frags - 1)) |
2654 | skb_shinfo(skb)->frags[i].size = | ||
2655 | (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); | ||
2656 | else | ||
2657 | skb_shinfo(skb)->frags[i].size = frag_len; | ||
2653 | datalen -= skb_shinfo(skb)->frags[i].size; | 2658 | datalen -= skb_shinfo(skb)->frags[i].size; |
2654 | skb->len += skb_shinfo(skb)->frags[i].size; | 2659 | skb->len += skb_shinfo(skb)->frags[i].size; |
2655 | skb->data_len += skb_shinfo(skb)->frags[i].size; | 2660 | skb->data_len += skb_shinfo(skb)->frags[i].size; |
2656 | i++; | 2661 | i++; |
2657 | skb_shinfo(skb)->nr_frags = i; | 2662 | skb_shinfo(skb)->nr_frags = i; |
2658 | } | 2663 | } |
2659 | |||
2660 | while (i < frags) { | ||
2661 | int rem; | ||
2662 | |||
2663 | if (i == 0) | ||
2664 | break; | ||
2665 | |||
2666 | rem = skb_shinfo(skb)->frags[i - 1].size / 2; | ||
2667 | if (rem == 0) | ||
2668 | break; | ||
2669 | |||
2670 | skb_shinfo(skb)->frags[i - 1].size -= rem; | ||
2671 | |||
2672 | skb_shinfo(skb)->frags[i] = | ||
2673 | skb_shinfo(skb)->frags[i - 1]; | ||
2674 | get_page(skb_shinfo(skb)->frags[i].page); | ||
2675 | skb_shinfo(skb)->frags[i].page = | ||
2676 | skb_shinfo(skb)->frags[i - 1].page; | ||
2677 | skb_shinfo(skb)->frags[i].page_offset += | ||
2678 | skb_shinfo(skb)->frags[i - 1].size; | ||
2679 | skb_shinfo(skb)->frags[i].size = rem; | ||
2680 | i++; | ||
2681 | skb_shinfo(skb)->nr_frags = i; | ||
2682 | } | ||
2683 | } | 2664 | } |
2684 | 2665 | ||
2685 | /* Stamp the time, and sequence number, | 2666 | /* Stamp the time, and sequence number, |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d7c4bb4b1820..5a160f4a1ba0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1007,10 +1007,11 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
1007 | s_h = cb->args[0]; | 1007 | s_h = cb->args[0]; |
1008 | s_idx = cb->args[1]; | 1008 | s_idx = cb->args[1]; |
1009 | 1009 | ||
1010 | rcu_read_lock(); | ||
1010 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { | 1011 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
1011 | idx = 0; | 1012 | idx = 0; |
1012 | head = &net->dev_index_head[h]; | 1013 | head = &net->dev_index_head[h]; |
1013 | hlist_for_each_entry(dev, node, head, index_hlist) { | 1014 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { |
1014 | if (idx < s_idx) | 1015 | if (idx < s_idx) |
1015 | goto cont; | 1016 | goto cont; |
1016 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | 1017 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, |
@@ -1023,6 +1024,7 @@ cont: | |||
1023 | } | 1024 | } |
1024 | } | 1025 | } |
1025 | out: | 1026 | out: |
1027 | rcu_read_unlock(); | ||
1026 | cb->args[1] = idx; | 1028 | cb->args[1] = idx; |
1027 | cb->args[0] = h; | 1029 | cb->args[0] = h; |
1028 | 1030 | ||
@@ -1570,12 +1572,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, | |||
1570 | dev->rtnl_link_state = RTNL_LINK_INITIALIZING; | 1572 | dev->rtnl_link_state = RTNL_LINK_INITIALIZING; |
1571 | dev->real_num_tx_queues = real_num_queues; | 1573 | dev->real_num_tx_queues = real_num_queues; |
1572 | 1574 | ||
1573 | if (strchr(dev->name, '%')) { | ||
1574 | err = dev_alloc_name(dev, dev->name); | ||
1575 | if (err < 0) | ||
1576 | goto err_free; | ||
1577 | } | ||
1578 | |||
1579 | if (tb[IFLA_MTU]) | 1575 | if (tb[IFLA_MTU]) |
1580 | dev->mtu = nla_get_u32(tb[IFLA_MTU]); | 1576 | dev->mtu = nla_get_u32(tb[IFLA_MTU]); |
1581 | if (tb[IFLA_ADDRESS]) | 1577 | if (tb[IFLA_ADDRESS]) |
@@ -1595,8 +1591,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, | |||
1595 | 1591 | ||
1596 | return dev; | 1592 | return dev; |
1597 | 1593 | ||
1598 | err_free: | ||
1599 | free_netdev(dev); | ||
1600 | err: | 1594 | err: |
1601 | return ERR_PTR(err); | 1595 | return ERR_PTR(err); |
1602 | } | 1596 | } |
@@ -1879,7 +1873,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1879 | int min_len; | 1873 | int min_len; |
1880 | int family; | 1874 | int family; |
1881 | int type; | 1875 | int type; |
1882 | int err; | ||
1883 | 1876 | ||
1884 | type = nlh->nlmsg_type; | 1877 | type = nlh->nlmsg_type; |
1885 | if (type > RTM_MAX) | 1878 | if (type > RTM_MAX) |
@@ -1906,11 +1899,8 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1906 | if (dumpit == NULL) | 1899 | if (dumpit == NULL) |
1907 | return -EOPNOTSUPP; | 1900 | return -EOPNOTSUPP; |
1908 | 1901 | ||
1909 | __rtnl_unlock(); | ||
1910 | rtnl = net->rtnl; | 1902 | rtnl = net->rtnl; |
1911 | err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); | 1903 | return netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); |
1912 | rtnl_lock(); | ||
1913 | return err; | ||
1914 | } | 1904 | } |
1915 | 1905 | ||
1916 | memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); | 1906 | memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); |
@@ -1980,7 +1970,7 @@ static int __net_init rtnetlink_net_init(struct net *net) | |||
1980 | { | 1970 | { |
1981 | struct sock *sk; | 1971 | struct sock *sk; |
1982 | sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, | 1972 | sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, |
1983 | rtnetlink_rcv, &rtnl_mutex, THIS_MODULE); | 1973 | rtnetlink_rcv, NULL, THIS_MODULE); |
1984 | if (!sk) | 1974 | if (!sk) |
1985 | return -ENOMEM; | 1975 | return -ENOMEM; |
1986 | net->rtnl = sk; | 1976 | net->rtnl = sk; |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 385b6095fdc4..a829e3f60aeb 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -122,6 +122,15 @@ static struct ctl_table net_core_table[] = { | |||
122 | .mode = 0644, | 122 | .mode = 0644, |
123 | .proc_handler = proc_dointvec | 123 | .proc_handler = proc_dointvec |
124 | }, | 124 | }, |
125 | #ifdef CONFIG_BPF_JIT | ||
126 | { | ||
127 | .procname = "bpf_jit_enable", | ||
128 | .data = &bpf_jit_enable, | ||
129 | .maxlen = sizeof(int), | ||
130 | .mode = 0644, | ||
131 | .proc_handler = proc_dointvec | ||
132 | }, | ||
133 | #endif | ||
125 | { | 134 | { |
126 | .procname = "netdev_tstamp_prequeue", | 135 | .procname = "netdev_tstamp_prequeue", |
127 | .data = &netdev_tstamp_prequeue, | 136 | .data = &netdev_tstamp_prequeue, |