aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c171
1 files changed, 116 insertions, 55 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index a39354ee1432..1e0a1847c3bb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -959,18 +959,30 @@ int dev_alloc_name(struct net_device *dev, const char *name)
959} 959}
960EXPORT_SYMBOL(dev_alloc_name); 960EXPORT_SYMBOL(dev_alloc_name);
961 961
962static int dev_get_valid_name(struct net_device *dev, const char *name) 962static int dev_alloc_name_ns(struct net *net,
963 struct net_device *dev,
964 const char *name)
963{ 965{
964 struct net *net; 966 char buf[IFNAMSIZ];
967 int ret;
965 968
966 BUG_ON(!dev_net(dev)); 969 ret = __dev_alloc_name(net, name, buf);
967 net = dev_net(dev); 970 if (ret >= 0)
971 strlcpy(dev->name, buf, IFNAMSIZ);
972 return ret;
973}
974
975static int dev_get_valid_name(struct net *net,
976 struct net_device *dev,
977 const char *name)
978{
979 BUG_ON(!net);
968 980
969 if (!dev_valid_name(name)) 981 if (!dev_valid_name(name))
970 return -EINVAL; 982 return -EINVAL;
971 983
972 if (strchr(name, '%')) 984 if (strchr(name, '%'))
973 return dev_alloc_name(dev, name); 985 return dev_alloc_name_ns(net, dev, name);
974 else if (__dev_get_by_name(net, name)) 986 else if (__dev_get_by_name(net, name))
975 return -EEXIST; 987 return -EEXIST;
976 else if (dev->name != name) 988 else if (dev->name != name)
@@ -1006,7 +1018,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
1006 1018
1007 memcpy(oldname, dev->name, IFNAMSIZ); 1019 memcpy(oldname, dev->name, IFNAMSIZ);
1008 1020
1009 err = dev_get_valid_name(dev, newname); 1021 err = dev_get_valid_name(net, dev, newname);
1010 if (err < 0) 1022 if (err < 0)
1011 return err; 1023 return err;
1012 1024
@@ -1109,11 +1121,23 @@ void netdev_state_change(struct net_device *dev)
1109} 1121}
1110EXPORT_SYMBOL(netdev_state_change); 1122EXPORT_SYMBOL(netdev_state_change);
1111 1123
1112int netdev_bonding_change(struct net_device *dev, unsigned long event) 1124/**
1125 * netdev_notify_peers - notify network peers about existence of @dev
1126 * @dev: network device
1127 *
1128 * Generate traffic such that interested network peers are aware of
1129 * @dev, such as by generating a gratuitous ARP. This may be used when
1130 * a device wants to inform the rest of the network about some sort of
1131 * reconfiguration such as a failover event or virtual machine
1132 * migration.
1133 */
1134void netdev_notify_peers(struct net_device *dev)
1113{ 1135{
1114 return call_netdevice_notifiers(event, dev); 1136 rtnl_lock();
1137 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1138 rtnl_unlock();
1115} 1139}
1116EXPORT_SYMBOL(netdev_bonding_change); 1140EXPORT_SYMBOL(netdev_notify_peers);
1117 1141
1118/** 1142/**
1119 * dev_load - load a network module 1143 * dev_load - load a network module
@@ -1394,7 +1418,6 @@ rollback:
1394 nb->notifier_call(nb, NETDEV_DOWN, dev); 1418 nb->notifier_call(nb, NETDEV_DOWN, dev);
1395 } 1419 }
1396 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1420 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1397 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1398 } 1421 }
1399 } 1422 }
1400 1423
@@ -1436,7 +1459,6 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
1436 nb->notifier_call(nb, NETDEV_DOWN, dev); 1459 nb->notifier_call(nb, NETDEV_DOWN, dev);
1437 } 1460 }
1438 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1461 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1439 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1440 } 1462 }
1441 } 1463 }
1442unlock: 1464unlock:
@@ -1642,6 +1664,19 @@ static inline int deliver_skb(struct sk_buff *skb,
1642 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1664 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1643} 1665}
1644 1666
1667static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1668{
1669 if (ptype->af_packet_priv == NULL)
1670 return false;
1671
1672 if (ptype->id_match)
1673 return ptype->id_match(ptype, skb->sk);
1674 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1675 return true;
1676
1677 return false;
1678}
1679
1645/* 1680/*
1646 * Support routine. Sends outgoing frames to any network 1681 * Support routine. Sends outgoing frames to any network
1647 * taps currently in use. 1682 * taps currently in use.
@@ -1659,8 +1694,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1659 * they originated from - MvS (miquels@drinkel.ow.org) 1694 * they originated from - MvS (miquels@drinkel.ow.org)
1660 */ 1695 */
1661 if ((ptype->dev == dev || !ptype->dev) && 1696 if ((ptype->dev == dev || !ptype->dev) &&
1662 (ptype->af_packet_priv == NULL || 1697 (!skb_loop_sk(ptype, skb))) {
1663 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1664 if (pt_prev) { 1698 if (pt_prev) {
1665 deliver_skb(skb2, pt_prev, skb->dev); 1699 deliver_skb(skb2, pt_prev, skb->dev);
1666 pt_prev = ptype; 1700 pt_prev = ptype;
@@ -2122,7 +2156,8 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2122static netdev_features_t harmonize_features(struct sk_buff *skb, 2156static netdev_features_t harmonize_features(struct sk_buff *skb,
2123 __be16 protocol, netdev_features_t features) 2157 __be16 protocol, netdev_features_t features)
2124{ 2158{
2125 if (!can_checksum_protocol(features, protocol)) { 2159 if (skb->ip_summed != CHECKSUM_NONE &&
2160 !can_checksum_protocol(features, protocol)) {
2126 features &= ~NETIF_F_ALL_CSUM; 2161 features &= ~NETIF_F_ALL_CSUM;
2127 features &= ~NETIF_F_SG; 2162 features &= ~NETIF_F_SG;
2128 } else if (illegal_highdma(skb->dev, skb)) { 2163 } else if (illegal_highdma(skb->dev, skb)) {
@@ -2162,9 +2197,7 @@ EXPORT_SYMBOL(netif_skb_features);
2162/* 2197/*
2163 * Returns true if either: 2198 * Returns true if either:
2164 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 2199 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2165 * 2. skb is fragmented and the device does not support SG, or if 2200 * 2. skb is fragmented and the device does not support SG.
2166 * at least one of fragments is in highmem and device does not
2167 * support DMA from it.
2168 */ 2201 */
2169static inline int skb_needs_linearize(struct sk_buff *skb, 2202static inline int skb_needs_linearize(struct sk_buff *skb,
2170 int features) 2203 int features)
@@ -2193,9 +2226,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2193 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2226 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2194 skb_dst_drop(skb); 2227 skb_dst_drop(skb);
2195 2228
2196 if (!list_empty(&ptype_all))
2197 dev_queue_xmit_nit(skb, dev);
2198
2199 features = netif_skb_features(skb); 2229 features = netif_skb_features(skb);
2200 2230
2201 if (vlan_tx_tag_present(skb) && 2231 if (vlan_tx_tag_present(skb) &&
@@ -2230,6 +2260,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2230 } 2260 }
2231 } 2261 }
2232 2262
2263 if (!list_empty(&ptype_all))
2264 dev_queue_xmit_nit(skb, dev);
2265
2233 skb_len = skb->len; 2266 skb_len = skb->len;
2234 rc = ops->ndo_start_xmit(skb, dev); 2267 rc = ops->ndo_start_xmit(skb, dev);
2235 trace_net_dev_xmit(skb, rc, dev, skb_len); 2268 trace_net_dev_xmit(skb, rc, dev, skb_len);
@@ -2252,6 +2285,9 @@ gso:
2252 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2285 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2253 skb_dst_drop(nskb); 2286 skb_dst_drop(nskb);
2254 2287
2288 if (!list_empty(&ptype_all))
2289 dev_queue_xmit_nit(nskb, dev);
2290
2255 skb_len = nskb->len; 2291 skb_len = nskb->len;
2256 rc = ops->ndo_start_xmit(nskb, dev); 2292 rc = ops->ndo_start_xmit(nskb, dev);
2257 trace_net_dev_xmit(nskb, rc, dev, skb_len); 2293 trace_net_dev_xmit(nskb, rc, dev, skb_len);
@@ -2361,8 +2397,8 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2361#endif 2397#endif
2362} 2398}
2363 2399
2364static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2400struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2365 struct sk_buff *skb) 2401 struct sk_buff *skb)
2366{ 2402{
2367 int queue_index; 2403 int queue_index;
2368 const struct net_device_ops *ops = dev->netdev_ops; 2404 const struct net_device_ops *ops = dev->netdev_ops;
@@ -2536,7 +2572,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2536 2572
2537 skb_update_prio(skb); 2573 skb_update_prio(skb);
2538 2574
2539 txq = dev_pick_tx(dev, skb); 2575 txq = netdev_pick_tx(dev, skb);
2540 q = rcu_dereference_bh(txq->qdisc); 2576 q = rcu_dereference_bh(txq->qdisc);
2541 2577
2542#ifdef CONFIG_NET_CLS_ACT 2578#ifdef CONFIG_NET_CLS_ACT
@@ -2609,6 +2645,8 @@ EXPORT_SYMBOL(dev_queue_xmit);
2609 =======================================================================*/ 2645 =======================================================================*/
2610 2646
2611int netdev_max_backlog __read_mostly = 1000; 2647int netdev_max_backlog __read_mostly = 1000;
2648EXPORT_SYMBOL(netdev_max_backlog);
2649
2612int netdev_tstamp_prequeue __read_mostly = 1; 2650int netdev_tstamp_prequeue __read_mostly = 1;
2613int netdev_budget __read_mostly = 300; 2651int netdev_budget __read_mostly = 300;
2614int weight_p __read_mostly = 64; /* old backlog weight */ 2652int weight_p __read_mostly = 64; /* old backlog weight */
@@ -2635,15 +2673,16 @@ void __skb_get_rxhash(struct sk_buff *skb)
2635 if (!skb_flow_dissect(skb, &keys)) 2673 if (!skb_flow_dissect(skb, &keys))
2636 return; 2674 return;
2637 2675
2638 if (keys.ports) { 2676 if (keys.ports)
2639 if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
2640 swap(keys.port16[0], keys.port16[1]);
2641 skb->l4_rxhash = 1; 2677 skb->l4_rxhash = 1;
2642 }
2643 2678
2644 /* get a consistent hash (same value on both flow directions) */ 2679 /* get a consistent hash (same value on both flow directions) */
2645 if ((__force u32)keys.dst < (__force u32)keys.src) 2680 if (((__force u32)keys.dst < (__force u32)keys.src) ||
2681 (((__force u32)keys.dst == (__force u32)keys.src) &&
2682 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
2646 swap(keys.dst, keys.src); 2683 swap(keys.dst, keys.src);
2684 swap(keys.port16[0], keys.port16[1]);
2685 }
2647 2686
2648 hash = jhash_3words((__force u32)keys.dst, 2687 hash = jhash_3words((__force u32)keys.dst,
2649 (__force u32)keys.src, 2688 (__force u32)keys.src,
@@ -3309,7 +3348,7 @@ ncls:
3309 3348
3310 if (pt_prev) { 3349 if (pt_prev) {
3311 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 3350 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3312 ret = -ENOMEM; 3351 goto drop;
3313 else 3352 else
3314 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 3353 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3315 } else { 3354 } else {
@@ -4498,8 +4537,8 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
4498static int __dev_set_promiscuity(struct net_device *dev, int inc) 4537static int __dev_set_promiscuity(struct net_device *dev, int inc)
4499{ 4538{
4500 unsigned int old_flags = dev->flags; 4539 unsigned int old_flags = dev->flags;
4501 uid_t uid; 4540 kuid_t uid;
4502 gid_t gid; 4541 kgid_t gid;
4503 4542
4504 ASSERT_RTNL(); 4543 ASSERT_RTNL();
4505 4544
@@ -4530,8 +4569,9 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
4530 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u", 4569 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4531 dev->name, (dev->flags & IFF_PROMISC), 4570 dev->name, (dev->flags & IFF_PROMISC),
4532 (old_flags & IFF_PROMISC), 4571 (old_flags & IFF_PROMISC),
4533 audit_get_loginuid(current), 4572 from_kuid(&init_user_ns, audit_get_loginuid(current)),
4534 uid, gid, 4573 from_kuid(&init_user_ns, uid),
4574 from_kgid(&init_user_ns, gid),
4535 audit_get_sessionid(current)); 4575 audit_get_sessionid(current));
4536 } 4576 }
4537 4577
@@ -5224,12 +5264,12 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5224 */ 5264 */
5225static int dev_new_index(struct net *net) 5265static int dev_new_index(struct net *net)
5226{ 5266{
5227 static int ifindex; 5267 int ifindex = net->ifindex;
5228 for (;;) { 5268 for (;;) {
5229 if (++ifindex <= 0) 5269 if (++ifindex <= 0)
5230 ifindex = 1; 5270 ifindex = 1;
5231 if (!__dev_get_by_index(net, ifindex)) 5271 if (!__dev_get_by_index(net, ifindex))
5232 return ifindex; 5272 return net->ifindex = ifindex;
5233 } 5273 }
5234} 5274}
5235 5275
@@ -5307,10 +5347,6 @@ static void rollback_registered_many(struct list_head *head)
5307 netdev_unregister_kobject(dev); 5347 netdev_unregister_kobject(dev);
5308 } 5348 }
5309 5349
5310 /* Process any work delayed until the end of the batch */
5311 dev = list_first_entry(head, struct net_device, unreg_list);
5312 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5313
5314 synchronize_net(); 5350 synchronize_net();
5315 5351
5316 list_for_each_entry(dev, head, unreg_list) 5352 list_for_each_entry(dev, head, unreg_list)
@@ -5568,7 +5604,7 @@ int register_netdevice(struct net_device *dev)
5568 5604
5569 dev->iflink = -1; 5605 dev->iflink = -1;
5570 5606
5571 ret = dev_get_valid_name(dev, dev->name); 5607 ret = dev_get_valid_name(net, dev, dev->name);
5572 if (ret < 0) 5608 if (ret < 0)
5573 goto out; 5609 goto out;
5574 5610
@@ -5582,7 +5618,12 @@ int register_netdevice(struct net_device *dev)
5582 } 5618 }
5583 } 5619 }
5584 5620
5585 dev->ifindex = dev_new_index(net); 5621 ret = -EBUSY;
5622 if (!dev->ifindex)
5623 dev->ifindex = dev_new_index(net);
5624 else if (__dev_get_by_index(net, dev->ifindex))
5625 goto err_uninit;
5626
5586 if (dev->iflink == -1) 5627 if (dev->iflink == -1)
5587 dev->iflink = dev->ifindex; 5628 dev->iflink = dev->ifindex;
5588 5629
@@ -5625,6 +5666,8 @@ int register_netdevice(struct net_device *dev)
5625 5666
5626 set_bit(__LINK_STATE_PRESENT, &dev->state); 5667 set_bit(__LINK_STATE_PRESENT, &dev->state);
5627 5668
5669 linkwatch_init_dev(dev);
5670
5628 dev_init_scheduler(dev); 5671 dev_init_scheduler(dev);
5629 dev_hold(dev); 5672 dev_hold(dev);
5630 list_netdevice(dev); 5673 list_netdevice(dev);
@@ -5732,6 +5775,7 @@ EXPORT_SYMBOL(netdev_refcnt_read);
5732 5775
5733/** 5776/**
5734 * netdev_wait_allrefs - wait until all references are gone. 5777 * netdev_wait_allrefs - wait until all references are gone.
5778 * @dev: target net_device
5735 * 5779 *
5736 * This is called when unregistering network devices. 5780 * This is called when unregistering network devices.
5737 * 5781 *
@@ -5757,9 +5801,12 @@ static void netdev_wait_allrefs(struct net_device *dev)
5757 5801
5758 /* Rebroadcast unregister notification */ 5802 /* Rebroadcast unregister notification */
5759 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5803 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5760 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5761 * should have already handle it the first time */
5762 5804
5805 __rtnl_unlock();
5806 rcu_barrier();
5807 rtnl_lock();
5808
5809 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5763 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 5810 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5764 &dev->state)) { 5811 &dev->state)) {
5765 /* We must not have linkwatch events 5812 /* We must not have linkwatch events
@@ -5821,9 +5868,8 @@ void netdev_run_todo(void)
5821 5868
5822 __rtnl_unlock(); 5869 __rtnl_unlock();
5823 5870
5824 /* Wait for rcu callbacks to finish before attempting to drain 5871
5825 * the device list. This usually avoids a 250ms wait. 5872 /* Wait for rcu callbacks to finish before next phase */
5826 */
5827 if (!list_empty(&list)) 5873 if (!list_empty(&list))
5828 rcu_barrier(); 5874 rcu_barrier();
5829 5875
@@ -5832,6 +5878,10 @@ void netdev_run_todo(void)
5832 = list_first_entry(&list, struct net_device, todo_list); 5878 = list_first_entry(&list, struct net_device, todo_list);
5833 list_del(&dev->todo_list); 5879 list_del(&dev->todo_list);
5834 5880
5881 rtnl_lock();
5882 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5883 __rtnl_unlock();
5884
5835 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5885 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5836 pr_err("network todo '%s' but state %d\n", 5886 pr_err("network todo '%s' but state %d\n",
5837 dev->name, dev->reg_state); 5887 dev->name, dev->reg_state);
@@ -5927,6 +5977,8 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5927 return queue; 5977 return queue;
5928} 5978}
5929 5979
5980static const struct ethtool_ops default_ethtool_ops;
5981
5930/** 5982/**
5931 * alloc_netdev_mqs - allocate network device 5983 * alloc_netdev_mqs - allocate network device
5932 * @sizeof_priv: size of private data to allocate space for 5984 * @sizeof_priv: size of private data to allocate space for
@@ -6014,6 +6066,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6014 6066
6015 strcpy(dev->name, name); 6067 strcpy(dev->name, name);
6016 dev->group = INIT_NETDEV_GROUP; 6068 dev->group = INIT_NETDEV_GROUP;
6069 if (!dev->ethtool_ops)
6070 dev->ethtool_ops = &default_ethtool_ops;
6017 return dev; 6071 return dev;
6018 6072
6019free_all: 6073free_all:
@@ -6198,7 +6252,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6198 /* We get here if we can't use the current device name */ 6252 /* We get here if we can't use the current device name */
6199 if (!pat) 6253 if (!pat)
6200 goto out; 6254 goto out;
6201 if (dev_get_valid_name(dev, pat) < 0) 6255 if (dev_get_valid_name(net, dev, pat) < 0)
6202 goto out; 6256 goto out;
6203 } 6257 }
6204 6258
@@ -6226,7 +6280,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6226 the device is just moving and can keep their slaves up. 6280 the device is just moving and can keep their slaves up.
6227 */ 6281 */
6228 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 6282 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6229 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 6283 rcu_barrier();
6284 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6230 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); 6285 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6231 6286
6232 /* 6287 /*
@@ -6409,22 +6464,26 @@ const char *netdev_drivername(const struct net_device *dev)
6409 return empty; 6464 return empty;
6410} 6465}
6411 6466
6412int __netdev_printk(const char *level, const struct net_device *dev, 6467static int __netdev_printk(const char *level, const struct net_device *dev,
6413 struct va_format *vaf) 6468 struct va_format *vaf)
6414{ 6469{
6415 int r; 6470 int r;
6416 6471
6417 if (dev && dev->dev.parent) 6472 if (dev && dev->dev.parent) {
6418 r = dev_printk(level, dev->dev.parent, "%s: %pV", 6473 r = dev_printk_emit(level[1] - '0',
6419 netdev_name(dev), vaf); 6474 dev->dev.parent,
6420 else if (dev) 6475 "%s %s %s: %pV",
6476 dev_driver_string(dev->dev.parent),
6477 dev_name(dev->dev.parent),
6478 netdev_name(dev), vaf);
6479 } else if (dev) {
6421 r = printk("%s%s: %pV", level, netdev_name(dev), vaf); 6480 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6422 else 6481 } else {
6423 r = printk("%s(NULL net_device): %pV", level, vaf); 6482 r = printk("%s(NULL net_device): %pV", level, vaf);
6483 }
6424 6484
6425 return r; 6485 return r;
6426} 6486}
6427EXPORT_SYMBOL(__netdev_printk);
6428 6487
6429int netdev_printk(const char *level, const struct net_device *dev, 6488int netdev_printk(const char *level, const struct net_device *dev,
6430 const char *format, ...) 6489 const char *format, ...)
@@ -6439,6 +6498,7 @@ int netdev_printk(const char *level, const struct net_device *dev,
6439 vaf.va = &args; 6498 vaf.va = &args;
6440 6499
6441 r = __netdev_printk(level, dev, &vaf); 6500 r = __netdev_printk(level, dev, &vaf);
6501
6442 va_end(args); 6502 va_end(args);
6443 6503
6444 return r; 6504 return r;
@@ -6458,6 +6518,7 @@ int func(const struct net_device *dev, const char *fmt, ...) \
6458 vaf.va = &args; \ 6518 vaf.va = &args; \
6459 \ 6519 \
6460 r = __netdev_printk(level, dev, &vaf); \ 6520 r = __netdev_printk(level, dev, &vaf); \
6521 \
6461 va_end(args); \ 6522 va_end(args); \
6462 \ 6523 \
6463 return r; \ 6524 return r; \