diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index d4a1ec3bded5..14de297d024d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -640,6 +640,8 @@ int dev_valid_name(const char *name) | |||
640 | { | 640 | { |
641 | if (*name == '\0') | 641 | if (*name == '\0') |
642 | return 0; | 642 | return 0; |
643 | if (strlen(name) >= IFNAMSIZ) | ||
644 | return 0; | ||
643 | if (!strcmp(name, ".") || !strcmp(name, "..")) | 645 | if (!strcmp(name, ".") || !strcmp(name, "..")) |
644 | return 0; | 646 | return 0; |
645 | 647 | ||
@@ -1166,12 +1168,12 @@ EXPORT_SYMBOL(netif_device_attach); | |||
1166 | * Invalidate hardware checksum when packet is to be mangled, and | 1168 | * Invalidate hardware checksum when packet is to be mangled, and |
1167 | * complete checksum manually on outgoing path. | 1169 | * complete checksum manually on outgoing path. |
1168 | */ | 1170 | */ |
1169 | int skb_checksum_help(struct sk_buff *skb, int inward) | 1171 | int skb_checksum_help(struct sk_buff *skb) |
1170 | { | 1172 | { |
1171 | unsigned int csum; | 1173 | unsigned int csum; |
1172 | int ret = 0, offset = skb->h.raw - skb->data; | 1174 | int ret = 0, offset = skb->h.raw - skb->data; |
1173 | 1175 | ||
1174 | if (inward) | 1176 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
1175 | goto out_set_summed; | 1177 | goto out_set_summed; |
1176 | 1178 | ||
1177 | if (unlikely(skb_shinfo(skb)->gso_size)) { | 1179 | if (unlikely(skb_shinfo(skb)->gso_size)) { |
@@ -1223,7 +1225,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) | |||
1223 | skb->mac_len = skb->nh.raw - skb->data; | 1225 | skb->mac_len = skb->nh.raw - skb->data; |
1224 | __skb_pull(skb, skb->mac_len); | 1226 | __skb_pull(skb, skb->mac_len); |
1225 | 1227 | ||
1226 | if (unlikely(skb->ip_summed != CHECKSUM_HW)) { | 1228 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { |
1227 | if (skb_header_cloned(skb) && | 1229 | if (skb_header_cloned(skb) && |
1228 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) | 1230 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) |
1229 | return ERR_PTR(err); | 1231 | return ERR_PTR(err); |
@@ -1232,7 +1234,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) | |||
1232 | rcu_read_lock(); | 1234 | rcu_read_lock(); |
1233 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { | 1235 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { |
1234 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { | 1236 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { |
1235 | if (unlikely(skb->ip_summed != CHECKSUM_HW)) { | 1237 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { |
1236 | err = ptype->gso_send_check(skb); | 1238 | err = ptype->gso_send_check(skb); |
1237 | segs = ERR_PTR(err); | 1239 | segs = ERR_PTR(err); |
1238 | if (err || skb_gso_ok(skb, features)) | 1240 | if (err || skb_gso_ok(skb, features)) |
@@ -1444,11 +1446,11 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
1444 | /* If packet is not checksummed and device does not support | 1446 | /* If packet is not checksummed and device does not support |
1445 | * checksumming for this protocol, complete checksumming here. | 1447 | * checksumming for this protocol, complete checksumming here. |
1446 | */ | 1448 | */ |
1447 | if (skb->ip_summed == CHECKSUM_HW && | 1449 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
1448 | (!(dev->features & NETIF_F_GEN_CSUM) && | 1450 | (!(dev->features & NETIF_F_GEN_CSUM) && |
1449 | (!(dev->features & NETIF_F_IP_CSUM) || | 1451 | (!(dev->features & NETIF_F_IP_CSUM) || |
1450 | skb->protocol != htons(ETH_P_IP)))) | 1452 | skb->protocol != htons(ETH_P_IP)))) |
1451 | if (skb_checksum_help(skb, 0)) | 1453 | if (skb_checksum_help(skb)) |
1452 | goto out_kfree_skb; | 1454 | goto out_kfree_skb; |
1453 | 1455 | ||
1454 | gso: | 1456 | gso: |
@@ -3191,13 +3193,15 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name, | |||
3191 | struct net_device *dev; | 3193 | struct net_device *dev; |
3192 | int alloc_size; | 3194 | int alloc_size; |
3193 | 3195 | ||
3196 | BUG_ON(strlen(name) >= sizeof(dev->name)); | ||
3197 | |||
3194 | /* ensure 32-byte alignment of both the device and private area */ | 3198 | /* ensure 32-byte alignment of both the device and private area */ |
3195 | alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; | 3199 | alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; |
3196 | alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; | 3200 | alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; |
3197 | 3201 | ||
3198 | p = kzalloc(alloc_size, GFP_KERNEL); | 3202 | p = kzalloc(alloc_size, GFP_KERNEL); |
3199 | if (!p) { | 3203 | if (!p) { |
3200 | printk(KERN_ERR "alloc_dev: Unable to allocate device.\n"); | 3204 | printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); |
3201 | return NULL; | 3205 | return NULL; |
3202 | } | 3206 | } |
3203 | 3207 | ||