aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c118
1 files changed, 52 insertions, 66 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 6982bfd6a781..452db7090d18 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -446,7 +446,7 @@ void __dev_remove_pack(struct packet_type *pt)
446 } 446 }
447 } 447 }
448 448
449 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); 449 pr_warn("dev_remove_pack: %p not found\n", pt);
450out: 450out:
451 spin_unlock(&ptype_lock); 451 spin_unlock(&ptype_lock);
452} 452}
@@ -848,21 +848,21 @@ EXPORT_SYMBOL(dev_get_by_flags_rcu);
848 * to allow sysfs to work. We also disallow any kind of 848 * to allow sysfs to work. We also disallow any kind of
849 * whitespace. 849 * whitespace.
850 */ 850 */
851int dev_valid_name(const char *name) 851bool dev_valid_name(const char *name)
852{ 852{
853 if (*name == '\0') 853 if (*name == '\0')
854 return 0; 854 return false;
855 if (strlen(name) >= IFNAMSIZ) 855 if (strlen(name) >= IFNAMSIZ)
856 return 0; 856 return false;
857 if (!strcmp(name, ".") || !strcmp(name, "..")) 857 if (!strcmp(name, ".") || !strcmp(name, ".."))
858 return 0; 858 return false;
859 859
860 while (*name) { 860 while (*name) {
861 if (*name == '/' || isspace(*name)) 861 if (*name == '/' || isspace(*name))
862 return 0; 862 return false;
863 name++; 863 name++;
864 } 864 }
865 return 1; 865 return true;
866} 866}
867EXPORT_SYMBOL(dev_valid_name); 867EXPORT_SYMBOL(dev_valid_name);
868 868
@@ -1039,8 +1039,7 @@ rollback:
1039 memcpy(dev->name, oldname, IFNAMSIZ); 1039 memcpy(dev->name, oldname, IFNAMSIZ);
1040 goto rollback; 1040 goto rollback;
1041 } else { 1041 } else {
1042 printk(KERN_ERR 1042 pr_err("%s: name change rollback failed: %d\n",
1043 "%s: name change rollback failed: %d.\n",
1044 dev->name, ret); 1043 dev->name, ret);
1045 } 1044 }
1046 } 1045 }
@@ -1139,9 +1138,8 @@ void dev_load(struct net *net, const char *name)
1139 no_module = request_module("netdev-%s", name); 1138 no_module = request_module("netdev-%s", name);
1140 if (no_module && capable(CAP_SYS_MODULE)) { 1139 if (no_module && capable(CAP_SYS_MODULE)) {
1141 if (!request_module("%s", name)) 1140 if (!request_module("%s", name))
1142 pr_err("Loading kernel module for a network device " 1141 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1143"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " 1142 name);
1144"instead\n", name);
1145 } 1143 }
1146} 1144}
1147EXPORT_SYMBOL(dev_load); 1145EXPORT_SYMBOL(dev_load);
@@ -1655,10 +1653,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1655 if (skb_network_header(skb2) < skb2->data || 1653 if (skb_network_header(skb2) < skb2->data ||
1656 skb2->network_header > skb2->tail) { 1654 skb2->network_header > skb2->tail) {
1657 if (net_ratelimit()) 1655 if (net_ratelimit())
1658 printk(KERN_CRIT "protocol %04x is " 1656 pr_crit("protocol %04x is buggy, dev %s\n",
1659 "buggy, dev %s\n", 1657 ntohs(skb2->protocol),
1660 ntohs(skb2->protocol), 1658 dev->name);
1661 dev->name);
1662 skb_reset_network_header(skb2); 1659 skb_reset_network_header(skb2);
1663 } 1660 }
1664 1661
@@ -1691,9 +1688,7 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1691 1688
1692 /* If TC0 is invalidated disable TC mapping */ 1689 /* If TC0 is invalidated disable TC mapping */
1693 if (tc->offset + tc->count > txq) { 1690 if (tc->offset + tc->count > txq) {
1694 pr_warning("Number of in use tx queues changed " 1691 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1695 "invalidating tc mappings. Priority "
1696 "traffic classification disabled!\n");
1697 dev->num_tc = 0; 1692 dev->num_tc = 0;
1698 return; 1693 return;
1699 } 1694 }
@@ -1704,11 +1699,8 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1704 1699
1705 tc = &dev->tc_to_txq[q]; 1700 tc = &dev->tc_to_txq[q];
1706 if (tc->offset + tc->count > txq) { 1701 if (tc->offset + tc->count > txq) {
1707 pr_warning("Number of in use tx queues " 1702 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1708 "changed. Priority %i to tc " 1703 i, q);
1709 "mapping %i is no longer valid "
1710 "setting map to 0\n",
1711 i, q);
1712 netdev_set_prio_tc_map(dev, i, 0); 1704 netdev_set_prio_tc_map(dev, i, 0);
1713 } 1705 }
1714 } 1706 }
@@ -2014,8 +2006,7 @@ EXPORT_SYMBOL(skb_gso_segment);
2014void netdev_rx_csum_fault(struct net_device *dev) 2006void netdev_rx_csum_fault(struct net_device *dev)
2015{ 2007{
2016 if (net_ratelimit()) { 2008 if (net_ratelimit()) {
2017 printk(KERN_ERR "%s: hw csum failure.\n", 2009 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2018 dev ? dev->name : "<unknown>");
2019 dump_stack(); 2010 dump_stack();
2020 } 2011 }
2021} 2012}
@@ -2332,9 +2323,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2332{ 2323{
2333 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2324 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2334 if (net_ratelimit()) { 2325 if (net_ratelimit()) {
2335 pr_warning("%s selects TX queue %d, but " 2326 pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
2336 "real number of TX queues is %d\n", 2327 dev->name, queue_index,
2337 dev->name, queue_index, dev->real_num_tx_queues); 2328 dev->real_num_tx_queues);
2338 } 2329 }
2339 return 0; 2330 return 0;
2340 } 2331 }
@@ -2578,16 +2569,16 @@ int dev_queue_xmit(struct sk_buff *skb)
2578 } 2569 }
2579 HARD_TX_UNLOCK(dev, txq); 2570 HARD_TX_UNLOCK(dev, txq);
2580 if (net_ratelimit()) 2571 if (net_ratelimit())
2581 printk(KERN_CRIT "Virtual device %s asks to " 2572 pr_crit("Virtual device %s asks to queue packet!\n",
2582 "queue packet!\n", dev->name); 2573 dev->name);
2583 } else { 2574 } else {
2584 /* Recursion is detected! It is possible, 2575 /* Recursion is detected! It is possible,
2585 * unfortunately 2576 * unfortunately
2586 */ 2577 */
2587recursion_alert: 2578recursion_alert:
2588 if (net_ratelimit()) 2579 if (net_ratelimit())
2589 printk(KERN_CRIT "Dead loop on virtual device " 2580 pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
2590 "%s, fix it urgently!\n", dev->name); 2581 dev->name);
2591 } 2582 }
2592 } 2583 }
2593 2584
@@ -3069,8 +3060,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3069 3060
3070 if (unlikely(MAX_RED_LOOP < ttl++)) { 3061 if (unlikely(MAX_RED_LOOP < ttl++)) {
3071 if (net_ratelimit()) 3062 if (net_ratelimit())
3072 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", 3063 pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
3073 skb->skb_iif, dev->ifindex); 3064 skb->skb_iif, dev->ifindex);
3074 return TC_ACT_SHOT; 3065 return TC_ACT_SHOT;
3075 } 3066 }
3076 3067
@@ -3569,7 +3560,8 @@ EXPORT_SYMBOL(napi_gro_receive);
3569static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 3560static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3570{ 3561{
3571 __skb_pull(skb, skb_headlen(skb)); 3562 __skb_pull(skb, skb_headlen(skb));
3572 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); 3563 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3564 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3573 skb->vlan_tci = 0; 3565 skb->vlan_tci = 0;
3574 skb->dev = napi->dev; 3566 skb->dev = napi->dev;
3575 skb->skb_iif = 0; 3567 skb->skb_iif = 0;
@@ -4497,16 +4489,15 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
4497 dev->flags &= ~IFF_PROMISC; 4489 dev->flags &= ~IFF_PROMISC;
4498 else { 4490 else {
4499 dev->promiscuity -= inc; 4491 dev->promiscuity -= inc;
4500 printk(KERN_WARNING "%s: promiscuity touches roof, " 4492 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4501 "set promiscuity failed, promiscuity feature " 4493 dev->name);
4502 "of device might be broken.\n", dev->name);
4503 return -EOVERFLOW; 4494 return -EOVERFLOW;
4504 } 4495 }
4505 } 4496 }
4506 if (dev->flags != old_flags) { 4497 if (dev->flags != old_flags) {
4507 printk(KERN_INFO "device %s %s promiscuous mode\n", 4498 pr_info("device %s %s promiscuous mode\n",
4508 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 4499 dev->name,
4509 "left"); 4500 dev->flags & IFF_PROMISC ? "entered" : "left");
4510 if (audit_enabled) { 4501 if (audit_enabled) {
4511 current_uid_gid(&uid, &gid); 4502 current_uid_gid(&uid, &gid);
4512 audit_log(current->audit_context, GFP_ATOMIC, 4503 audit_log(current->audit_context, GFP_ATOMIC,
@@ -4579,9 +4570,8 @@ int dev_set_allmulti(struct net_device *dev, int inc)
4579 dev->flags &= ~IFF_ALLMULTI; 4570 dev->flags &= ~IFF_ALLMULTI;
4580 else { 4571 else {
4581 dev->allmulti -= inc; 4572 dev->allmulti -= inc;
4582 printk(KERN_WARNING "%s: allmulti touches roof, " 4573 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4583 "set allmulti failed, allmulti feature of " 4574 dev->name);
4584 "device might be broken.\n", dev->name);
4585 return -EOVERFLOW; 4575 return -EOVERFLOW;
4586 } 4576 }
4587 } 4577 }
@@ -5238,8 +5228,8 @@ static void rollback_registered_many(struct list_head *head)
5238 * devices and proceed with the remaining. 5228 * devices and proceed with the remaining.
5239 */ 5229 */
5240 if (dev->reg_state == NETREG_UNINITIALIZED) { 5230 if (dev->reg_state == NETREG_UNINITIALIZED) {
5241 pr_debug("unregister_netdevice: device %s/%p never " 5231 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5242 "was registered\n", dev->name, dev); 5232 dev->name, dev);
5243 5233
5244 WARN_ON(1); 5234 WARN_ON(1);
5245 list_del(&dev->unreg_list); 5235 list_del(&dev->unreg_list);
@@ -5471,7 +5461,7 @@ static int netif_alloc_rx_queues(struct net_device *dev)
5471 5461
5472 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL); 5462 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5473 if (!rx) { 5463 if (!rx) {
5474 pr_err("netdev: Unable to allocate %u rx queues.\n", count); 5464 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5475 return -ENOMEM; 5465 return -ENOMEM;
5476 } 5466 }
5477 dev->_rx = rx; 5467 dev->_rx = rx;
@@ -5505,8 +5495,7 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
5505 5495
5506 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL); 5496 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5507 if (!tx) { 5497 if (!tx) {
5508 pr_err("netdev: Unable to allocate %u tx queues.\n", 5498 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5509 count);
5510 return -ENOMEM; 5499 return -ENOMEM;
5511 } 5500 }
5512 dev->_tx = tx; 5501 dev->_tx = tx;
@@ -5765,10 +5754,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5765 refcnt = netdev_refcnt_read(dev); 5754 refcnt = netdev_refcnt_read(dev);
5766 5755
5767 if (time_after(jiffies, warning_time + 10 * HZ)) { 5756 if (time_after(jiffies, warning_time + 10 * HZ)) {
5768 printk(KERN_EMERG "unregister_netdevice: " 5757 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5769 "waiting for %s to become free. Usage " 5758 dev->name, refcnt);
5770 "count = %d\n",
5771 dev->name, refcnt);
5772 warning_time = jiffies; 5759 warning_time = jiffies;
5773 } 5760 }
5774 } 5761 }
@@ -5819,7 +5806,7 @@ void netdev_run_todo(void)
5819 list_del(&dev->todo_list); 5806 list_del(&dev->todo_list);
5820 5807
5821 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5808 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5822 printk(KERN_ERR "network todo '%s' but state %d\n", 5809 pr_err("network todo '%s' but state %d\n",
5823 dev->name, dev->reg_state); 5810 dev->name, dev->reg_state);
5824 dump_stack(); 5811 dump_stack();
5825 continue; 5812 continue;
@@ -5848,12 +5835,12 @@ void netdev_run_todo(void)
5848/* Convert net_device_stats to rtnl_link_stats64. They have the same 5835/* Convert net_device_stats to rtnl_link_stats64. They have the same
5849 * fields in the same order, with only the type differing. 5836 * fields in the same order, with only the type differing.
5850 */ 5837 */
5851static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 5838void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5852 const struct net_device_stats *netdev_stats) 5839 const struct net_device_stats *netdev_stats)
5853{ 5840{
5854#if BITS_PER_LONG == 64 5841#if BITS_PER_LONG == 64
5855 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); 5842 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5856 memcpy(stats64, netdev_stats, sizeof(*stats64)); 5843 memcpy(stats64, netdev_stats, sizeof(*stats64));
5857#else 5844#else
5858 size_t i, n = sizeof(*stats64) / sizeof(u64); 5845 size_t i, n = sizeof(*stats64) / sizeof(u64);
5859 const unsigned long *src = (const unsigned long *)netdev_stats; 5846 const unsigned long *src = (const unsigned long *)netdev_stats;
@@ -5865,6 +5852,7 @@ static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5865 dst[i] = src[i]; 5852 dst[i] = src[i];
5866#endif 5853#endif
5867} 5854}
5855EXPORT_SYMBOL(netdev_stats_to_stats64);
5868 5856
5869/** 5857/**
5870 * dev_get_stats - get network device statistics 5858 * dev_get_stats - get network device statistics
@@ -5935,15 +5923,13 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5935 BUG_ON(strlen(name) >= sizeof(dev->name)); 5923 BUG_ON(strlen(name) >= sizeof(dev->name));
5936 5924
5937 if (txqs < 1) { 5925 if (txqs < 1) {
5938 pr_err("alloc_netdev: Unable to allocate device " 5926 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5939 "with zero queues.\n");
5940 return NULL; 5927 return NULL;
5941 } 5928 }
5942 5929
5943#ifdef CONFIG_RPS 5930#ifdef CONFIG_RPS
5944 if (rxqs < 1) { 5931 if (rxqs < 1) {
5945 pr_err("alloc_netdev: Unable to allocate device " 5932 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5946 "with zero RX queues.\n");
5947 return NULL; 5933 return NULL;
5948 } 5934 }
5949#endif 5935#endif
@@ -5959,7 +5945,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5959 5945
5960 p = kzalloc(alloc_size, GFP_KERNEL); 5946 p = kzalloc(alloc_size, GFP_KERNEL);
5961 if (!p) { 5947 if (!p) {
5962 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n"); 5948 pr_err("alloc_netdev: Unable to allocate device\n");
5963 return NULL; 5949 return NULL;
5964 } 5950 }
5965 5951
@@ -6492,8 +6478,8 @@ static void __net_exit default_device_exit(struct net *net)
6492 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6478 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6493 err = dev_change_net_namespace(dev, &init_net, fb_name); 6479 err = dev_change_net_namespace(dev, &init_net, fb_name);
6494 if (err) { 6480 if (err) {
6495 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n", 6481 pr_emerg("%s: failed to move %s to init_net: %d\n",
6496 __func__, dev->name, err); 6482 __func__, dev->name, err);
6497 BUG(); 6483 BUG();
6498 } 6484 }
6499 } 6485 }