aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c3
-rw-r--r--net/core/dev.c656
-rw-r--r--net/core/drop_monitor.c14
-rw-r--r--net/core/ethtool.c32
-rw-r--r--net/core/neighbour.c95
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/net_namespace.c35
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/pktgen.c700
-rw-r--r--net/core/rtnetlink.c37
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sock.c16
12 files changed, 837 insertions, 762 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b0fe69211eef..1c6cf3a1a4f6 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -55,6 +55,7 @@
55#include <net/checksum.h> 55#include <net/checksum.h>
56#include <net/sock.h> 56#include <net/sock.h>
57#include <net/tcp_states.h> 57#include <net/tcp_states.h>
58#include <trace/events/skb.h>
58 59
59/* 60/*
60 * Is a socket 'connection oriented' ? 61 * Is a socket 'connection oriented' ?
@@ -284,6 +285,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
284 int i, copy = start - offset; 285 int i, copy = start - offset;
285 struct sk_buff *frag_iter; 286 struct sk_buff *frag_iter;
286 287
288 trace_skb_copy_datagram_iovec(skb, len);
289
287 /* Copy header. */ 290 /* Copy header. */
288 if (copy > 0) { 291 if (copy > 0) {
289 if (copy > len) 292 if (copy > len)
diff --git a/net/core/dev.c b/net/core/dev.c
index 278d489aad3b..84945470ab38 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -191,7 +191,6 @@ static struct list_head ptype_all __read_mostly; /* Taps */
191 * semaphore held. 191 * semaphore held.
192 */ 192 */
193DEFINE_RWLOCK(dev_base_lock); 193DEFINE_RWLOCK(dev_base_lock);
194
195EXPORT_SYMBOL(dev_base_lock); 194EXPORT_SYMBOL(dev_base_lock);
196 195
197#define NETDEV_HASHBITS 8 196#define NETDEV_HASHBITS 8
@@ -248,6 +247,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
248 */ 247 */
249 248
250DEFINE_PER_CPU(struct softnet_data, softnet_data); 249DEFINE_PER_CPU(struct softnet_data, softnet_data);
250EXPORT_PER_CPU_SYMBOL(softnet_data);
251 251
252#ifdef CONFIG_LOCKDEP 252#ifdef CONFIG_LOCKDEP
253/* 253/*
@@ -269,10 +269,10 @@ static const unsigned short netdev_lock_type[] =
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, 270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, 271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY, 272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
273 ARPHRD_VOID, ARPHRD_NONE}; 273 ARPHRD_VOID, ARPHRD_NONE};
274 274
275static const char *netdev_lock_name[] = 275static const char *const netdev_lock_name[] =
276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", 277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", 278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
@@ -287,7 +287,7 @@ static const char *netdev_lock_name[] =
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", 288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", 289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY", 290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
291 "_xmit_VOID", "_xmit_NONE"}; 291 "_xmit_VOID", "_xmit_NONE"};
292 292
293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -381,6 +381,7 @@ void dev_add_pack(struct packet_type *pt)
381 } 381 }
382 spin_unlock_bh(&ptype_lock); 382 spin_unlock_bh(&ptype_lock);
383} 383}
384EXPORT_SYMBOL(dev_add_pack);
384 385
385/** 386/**
386 * __dev_remove_pack - remove packet handler 387 * __dev_remove_pack - remove packet handler
@@ -418,6 +419,8 @@ void __dev_remove_pack(struct packet_type *pt)
418out: 419out:
419 spin_unlock_bh(&ptype_lock); 420 spin_unlock_bh(&ptype_lock);
420} 421}
422EXPORT_SYMBOL(__dev_remove_pack);
423
421/** 424/**
422 * dev_remove_pack - remove packet handler 425 * dev_remove_pack - remove packet handler
423 * @pt: packet type declaration 426 * @pt: packet type declaration
@@ -436,6 +439,7 @@ void dev_remove_pack(struct packet_type *pt)
436 439
437 synchronize_net(); 440 synchronize_net();
438} 441}
442EXPORT_SYMBOL(dev_remove_pack);
439 443
440/****************************************************************************** 444/******************************************************************************
441 445
@@ -499,6 +503,7 @@ int netdev_boot_setup_check(struct net_device *dev)
499 } 503 }
500 return 0; 504 return 0;
501} 505}
506EXPORT_SYMBOL(netdev_boot_setup_check);
502 507
503 508
504/** 509/**
@@ -591,6 +596,7 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name)
591 } 596 }
592 return NULL; 597 return NULL;
593} 598}
599EXPORT_SYMBOL(__dev_get_by_name);
594 600
595/** 601/**
596 * dev_get_by_name - find a device by its name 602 * dev_get_by_name - find a device by its name
@@ -615,6 +621,7 @@ struct net_device *dev_get_by_name(struct net *net, const char *name)
615 read_unlock(&dev_base_lock); 621 read_unlock(&dev_base_lock);
616 return dev; 622 return dev;
617} 623}
624EXPORT_SYMBOL(dev_get_by_name);
618 625
619/** 626/**
620 * __dev_get_by_index - find a device by its ifindex 627 * __dev_get_by_index - find a device by its ifindex
@@ -640,6 +647,7 @@ struct net_device *__dev_get_by_index(struct net *net, int ifindex)
640 } 647 }
641 return NULL; 648 return NULL;
642} 649}
650EXPORT_SYMBOL(__dev_get_by_index);
643 651
644 652
645/** 653/**
@@ -664,6 +672,7 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
664 read_unlock(&dev_base_lock); 672 read_unlock(&dev_base_lock);
665 return dev; 673 return dev;
666} 674}
675EXPORT_SYMBOL(dev_get_by_index);
667 676
668/** 677/**
669 * dev_getbyhwaddr - find a device by its hardware address 678 * dev_getbyhwaddr - find a device by its hardware address
@@ -693,7 +702,6 @@ struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *h
693 702
694 return NULL; 703 return NULL;
695} 704}
696
697EXPORT_SYMBOL(dev_getbyhwaddr); 705EXPORT_SYMBOL(dev_getbyhwaddr);
698 706
699struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) 707struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
@@ -707,7 +715,6 @@ struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
707 715
708 return NULL; 716 return NULL;
709} 717}
710
711EXPORT_SYMBOL(__dev_getfirstbyhwtype); 718EXPORT_SYMBOL(__dev_getfirstbyhwtype);
712 719
713struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 720struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
@@ -721,7 +728,6 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
721 rtnl_unlock(); 728 rtnl_unlock();
722 return dev; 729 return dev;
723} 730}
724
725EXPORT_SYMBOL(dev_getfirstbyhwtype); 731EXPORT_SYMBOL(dev_getfirstbyhwtype);
726 732
727/** 733/**
@@ -736,7 +742,8 @@ EXPORT_SYMBOL(dev_getfirstbyhwtype);
736 * dev_put to indicate they have finished with it. 742 * dev_put to indicate they have finished with it.
737 */ 743 */
738 744
739struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask) 745struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
746 unsigned short mask)
740{ 747{
741 struct net_device *dev, *ret; 748 struct net_device *dev, *ret;
742 749
@@ -752,6 +759,7 @@ struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, u
752 read_unlock(&dev_base_lock); 759 read_unlock(&dev_base_lock);
753 return ret; 760 return ret;
754} 761}
762EXPORT_SYMBOL(dev_get_by_flags);
755 763
756/** 764/**
757 * dev_valid_name - check if name is okay for network device 765 * dev_valid_name - check if name is okay for network device
@@ -777,6 +785,7 @@ int dev_valid_name(const char *name)
777 } 785 }
778 return 1; 786 return 1;
779} 787}
788EXPORT_SYMBOL(dev_valid_name);
780 789
781/** 790/**
782 * __dev_alloc_name - allocate a name for a device 791 * __dev_alloc_name - allocate a name for a device
@@ -870,6 +879,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
870 strlcpy(dev->name, buf, IFNAMSIZ); 879 strlcpy(dev->name, buf, IFNAMSIZ);
871 return ret; 880 return ret;
872} 881}
882EXPORT_SYMBOL(dev_alloc_name);
873 883
874 884
875/** 885/**
@@ -906,8 +916,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
906 err = dev_alloc_name(dev, newname); 916 err = dev_alloc_name(dev, newname);
907 if (err < 0) 917 if (err < 0)
908 return err; 918 return err;
909 } 919 } else if (__dev_get_by_name(net, newname))
910 else if (__dev_get_by_name(net, newname))
911 return -EEXIST; 920 return -EEXIST;
912 else 921 else
913 strlcpy(dev->name, newname, IFNAMSIZ); 922 strlcpy(dev->name, newname, IFNAMSIZ);
@@ -970,7 +979,7 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
970 return 0; 979 return 0;
971 } 980 }
972 981
973 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL); 982 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
974 if (!dev->ifalias) 983 if (!dev->ifalias)
975 return -ENOMEM; 984 return -ENOMEM;
976 985
@@ -1006,6 +1015,7 @@ void netdev_state_change(struct net_device *dev)
1006 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1015 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1007 } 1016 }
1008} 1017}
1018EXPORT_SYMBOL(netdev_state_change);
1009 1019
1010void netdev_bonding_change(struct net_device *dev) 1020void netdev_bonding_change(struct net_device *dev)
1011{ 1021{
@@ -1034,6 +1044,7 @@ void dev_load(struct net *net, const char *name)
1034 if (!dev && capable(CAP_NET_ADMIN)) 1044 if (!dev && capable(CAP_NET_ADMIN))
1035 request_module("%s", name); 1045 request_module("%s", name);
1036} 1046}
1047EXPORT_SYMBOL(dev_load);
1037 1048
1038/** 1049/**
1039 * dev_open - prepare an interface for use. 1050 * dev_open - prepare an interface for use.
@@ -1118,6 +1129,7 @@ int dev_open(struct net_device *dev)
1118 1129
1119 return ret; 1130 return ret;
1120} 1131}
1132EXPORT_SYMBOL(dev_open);
1121 1133
1122/** 1134/**
1123 * dev_close - shutdown an interface. 1135 * dev_close - shutdown an interface.
@@ -1184,6 +1196,7 @@ int dev_close(struct net_device *dev)
1184 1196
1185 return 0; 1197 return 0;
1186} 1198}
1199EXPORT_SYMBOL(dev_close);
1187 1200
1188 1201
1189/** 1202/**
@@ -1279,6 +1292,7 @@ rollback:
1279 raw_notifier_chain_unregister(&netdev_chain, nb); 1292 raw_notifier_chain_unregister(&netdev_chain, nb);
1280 goto unlock; 1293 goto unlock;
1281} 1294}
1295EXPORT_SYMBOL(register_netdevice_notifier);
1282 1296
1283/** 1297/**
1284 * unregister_netdevice_notifier - unregister a network notifier block 1298 * unregister_netdevice_notifier - unregister a network notifier block
@@ -1299,6 +1313,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
1299 rtnl_unlock(); 1313 rtnl_unlock();
1300 return err; 1314 return err;
1301} 1315}
1316EXPORT_SYMBOL(unregister_netdevice_notifier);
1302 1317
1303/** 1318/**
1304 * call_netdevice_notifiers - call all network notifier blocks 1319 * call_netdevice_notifiers - call all network notifier blocks
@@ -1321,11 +1336,13 @@ void net_enable_timestamp(void)
1321{ 1336{
1322 atomic_inc(&netstamp_needed); 1337 atomic_inc(&netstamp_needed);
1323} 1338}
1339EXPORT_SYMBOL(net_enable_timestamp);
1324 1340
1325void net_disable_timestamp(void) 1341void net_disable_timestamp(void)
1326{ 1342{
1327 atomic_dec(&netstamp_needed); 1343 atomic_dec(&netstamp_needed);
1328} 1344}
1345EXPORT_SYMBOL(net_disable_timestamp);
1329 1346
1330static inline void net_timestamp(struct sk_buff *skb) 1347static inline void net_timestamp(struct sk_buff *skb)
1331{ 1348{
@@ -1359,7 +1376,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1359 if ((ptype->dev == dev || !ptype->dev) && 1376 if ((ptype->dev == dev || !ptype->dev) &&
1360 (ptype->af_packet_priv == NULL || 1377 (ptype->af_packet_priv == NULL ||
1361 (struct sock *)ptype->af_packet_priv != skb->sk)) { 1378 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1362 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC); 1379 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1363 if (!skb2) 1380 if (!skb2)
1364 break; 1381 break;
1365 1382
@@ -1527,6 +1544,7 @@ out_set_summed:
1527out: 1544out:
1528 return ret; 1545 return ret;
1529} 1546}
1547EXPORT_SYMBOL(skb_checksum_help);
1530 1548
1531/** 1549/**
1532 * skb_gso_segment - Perform segmentation on skb. 1550 * skb_gso_segment - Perform segmentation on skb.
@@ -1589,7 +1607,6 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1589 1607
1590 return segs; 1608 return segs;
1591} 1609}
1592
1593EXPORT_SYMBOL(skb_gso_segment); 1610EXPORT_SYMBOL(skb_gso_segment);
1594 1611
1595/* Take action when hardware reception checksum errors are detected. */ 1612/* Take action when hardware reception checksum errors are detected. */
@@ -1704,7 +1721,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1704 skb_dst_drop(skb); 1721 skb_dst_drop(skb);
1705 1722
1706 rc = ops->ndo_start_xmit(skb, dev); 1723 rc = ops->ndo_start_xmit(skb, dev);
1707 if (rc == 0) 1724 if (rc == NETDEV_TX_OK)
1708 txq_trans_update(txq); 1725 txq_trans_update(txq);
1709 /* 1726 /*
1710 * TODO: if skb_orphan() was called by 1727 * TODO: if skb_orphan() was called by
@@ -1730,7 +1747,7 @@ gso:
1730 skb->next = nskb->next; 1747 skb->next = nskb->next;
1731 nskb->next = NULL; 1748 nskb->next = NULL;
1732 rc = ops->ndo_start_xmit(nskb, dev); 1749 rc = ops->ndo_start_xmit(nskb, dev);
1733 if (unlikely(rc)) { 1750 if (unlikely(rc != NETDEV_TX_OK)) {
1734 nskb->next = skb->next; 1751 nskb->next = skb->next;
1735 skb->next = nskb; 1752 skb->next = nskb;
1736 return rc; 1753 return rc;
@@ -1744,7 +1761,7 @@ gso:
1744 1761
1745out_kfree_skb: 1762out_kfree_skb:
1746 kfree_skb(skb); 1763 kfree_skb(skb);
1747 return 0; 1764 return NETDEV_TX_OK;
1748} 1765}
1749 1766
1750static u32 skb_tx_hashrnd; 1767static u32 skb_tx_hashrnd;
@@ -1755,7 +1772,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1755 1772
1756 if (skb_rx_queue_recorded(skb)) { 1773 if (skb_rx_queue_recorded(skb)) {
1757 hash = skb_get_rx_queue(skb); 1774 hash = skb_get_rx_queue(skb);
1758 while (unlikely (hash >= dev->real_num_tx_queues)) 1775 while (unlikely(hash >= dev->real_num_tx_queues))
1759 hash -= dev->real_num_tx_queues; 1776 hash -= dev->real_num_tx_queues;
1760 return hash; 1777 return hash;
1761 } 1778 }
@@ -1786,6 +1803,40 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1786 return netdev_get_tx_queue(dev, queue_index); 1803 return netdev_get_tx_queue(dev, queue_index);
1787} 1804}
1788 1805
1806static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1807 struct net_device *dev,
1808 struct netdev_queue *txq)
1809{
1810 spinlock_t *root_lock = qdisc_lock(q);
1811 int rc;
1812
1813 spin_lock(root_lock);
1814 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1815 kfree_skb(skb);
1816 rc = NET_XMIT_DROP;
1817 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1818 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1819 /*
1820 * This is a work-conserving queue; there are no old skbs
1821 * waiting to be sent out; and the qdisc is not running -
1822 * xmit the skb directly.
1823 */
1824 __qdisc_update_bstats(q, skb->len);
1825 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1826 __qdisc_run(q);
1827 else
1828 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1829
1830 rc = NET_XMIT_SUCCESS;
1831 } else {
1832 rc = qdisc_enqueue_root(skb, q);
1833 qdisc_run(q);
1834 }
1835 spin_unlock(root_lock);
1836
1837 return rc;
1838}
1839
1789/** 1840/**
1790 * dev_queue_xmit - transmit a buffer 1841 * dev_queue_xmit - transmit a buffer
1791 * @skb: buffer to transmit 1842 * @skb: buffer to transmit
@@ -1856,22 +1907,10 @@ gso:
1856 q = rcu_dereference(txq->qdisc); 1907 q = rcu_dereference(txq->qdisc);
1857 1908
1858#ifdef CONFIG_NET_CLS_ACT 1909#ifdef CONFIG_NET_CLS_ACT
1859 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); 1910 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1860#endif 1911#endif
1861 if (q->enqueue) { 1912 if (q->enqueue) {
1862 spinlock_t *root_lock = qdisc_lock(q); 1913 rc = __dev_xmit_skb(skb, q, dev, txq);
1863
1864 spin_lock(root_lock);
1865
1866 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1867 kfree_skb(skb);
1868 rc = NET_XMIT_DROP;
1869 } else {
1870 rc = qdisc_enqueue_root(skb, q);
1871 qdisc_run(q);
1872 }
1873 spin_unlock(root_lock);
1874
1875 goto out; 1914 goto out;
1876 } 1915 }
1877 1916
@@ -1895,7 +1934,7 @@ gso:
1895 HARD_TX_LOCK(dev, txq, cpu); 1934 HARD_TX_LOCK(dev, txq, cpu);
1896 1935
1897 if (!netif_tx_queue_stopped(txq)) { 1936 if (!netif_tx_queue_stopped(txq)) {
1898 rc = 0; 1937 rc = NET_XMIT_SUCCESS;
1899 if (!dev_hard_start_xmit(skb, dev, txq)) { 1938 if (!dev_hard_start_xmit(skb, dev, txq)) {
1900 HARD_TX_UNLOCK(dev, txq); 1939 HARD_TX_UNLOCK(dev, txq);
1901 goto out; 1940 goto out;
@@ -1924,6 +1963,7 @@ out:
1924 rcu_read_unlock_bh(); 1963 rcu_read_unlock_bh();
1925 return rc; 1964 return rc;
1926} 1965}
1966EXPORT_SYMBOL(dev_queue_xmit);
1927 1967
1928 1968
1929/*======================================================================= 1969/*=======================================================================
@@ -1990,6 +2030,7 @@ enqueue:
1990 kfree_skb(skb); 2030 kfree_skb(skb);
1991 return NET_RX_DROP; 2031 return NET_RX_DROP;
1992} 2032}
2033EXPORT_SYMBOL(netif_rx);
1993 2034
1994int netif_rx_ni(struct sk_buff *skb) 2035int netif_rx_ni(struct sk_buff *skb)
1995{ 2036{
@@ -2003,7 +2044,6 @@ int netif_rx_ni(struct sk_buff *skb)
2003 2044
2004 return err; 2045 return err;
2005} 2046}
2006
2007EXPORT_SYMBOL(netif_rx_ni); 2047EXPORT_SYMBOL(netif_rx_ni);
2008 2048
2009static void net_tx_action(struct softirq_action *h) 2049static void net_tx_action(struct softirq_action *h)
@@ -2076,7 +2116,7 @@ static inline int deliver_skb(struct sk_buff *skb,
2076/* This hook is defined here for ATM LANE */ 2116/* This hook is defined here for ATM LANE */
2077int (*br_fdb_test_addr_hook)(struct net_device *dev, 2117int (*br_fdb_test_addr_hook)(struct net_device *dev,
2078 unsigned char *addr) __read_mostly; 2118 unsigned char *addr) __read_mostly;
2079EXPORT_SYMBOL(br_fdb_test_addr_hook); 2119EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2080#endif 2120#endif
2081 2121
2082/* 2122/*
@@ -2085,7 +2125,7 @@ EXPORT_SYMBOL(br_fdb_test_addr_hook);
2085 */ 2125 */
2086struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, 2126struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2087 struct sk_buff *skb) __read_mostly; 2127 struct sk_buff *skb) __read_mostly;
2088EXPORT_SYMBOL(br_handle_frame_hook); 2128EXPORT_SYMBOL_GPL(br_handle_frame_hook);
2089 2129
2090static inline struct sk_buff *handle_bridge(struct sk_buff *skb, 2130static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2091 struct packet_type **pt_prev, int *ret, 2131 struct packet_type **pt_prev, int *ret,
@@ -2336,6 +2376,7 @@ out:
2336 rcu_read_unlock(); 2376 rcu_read_unlock();
2337 return ret; 2377 return ret;
2338} 2378}
2379EXPORT_SYMBOL(netif_receive_skb);
2339 2380
2340/* Network device is going away, flush any packets still pending */ 2381/* Network device is going away, flush any packets still pending */
2341static void flush_backlog(void *arg) 2382static void flush_backlog(void *arg)
@@ -2852,7 +2893,7 @@ softnet_break:
2852 goto out; 2893 goto out;
2853} 2894}
2854 2895
2855static gifconf_func_t * gifconf_list [NPROTO]; 2896static gifconf_func_t *gifconf_list[NPROTO];
2856 2897
2857/** 2898/**
2858 * register_gifconf - register a SIOCGIF handler 2899 * register_gifconf - register a SIOCGIF handler
@@ -2863,13 +2904,14 @@ static gifconf_func_t * gifconf_list [NPROTO];
2863 * that is passed must not be freed or reused until it has been replaced 2904 * that is passed must not be freed or reused until it has been replaced
2864 * by another handler. 2905 * by another handler.
2865 */ 2906 */
2866int register_gifconf(unsigned int family, gifconf_func_t * gifconf) 2907int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
2867{ 2908{
2868 if (family >= NPROTO) 2909 if (family >= NPROTO)
2869 return -EINVAL; 2910 return -EINVAL;
2870 gifconf_list[family] = gifconf; 2911 gifconf_list[family] = gifconf;
2871 return 0; 2912 return 0;
2872} 2913}
2914EXPORT_SYMBOL(register_gifconf);
2873 2915
2874 2916
2875/* 2917/*
@@ -3080,7 +3122,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
3080 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 3122 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3081 s->total, s->dropped, s->time_squeeze, 0, 3123 s->total, s->dropped, s->time_squeeze, 0,
3082 0, 0, 0, 0, /* was fastroute */ 3124 0, 0, 0, 0, /* was fastroute */
3083 s->cpu_collision ); 3125 s->cpu_collision);
3084 return 0; 3126 return 0;
3085} 3127}
3086 3128
@@ -3316,6 +3358,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
3316 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); 3358 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3317 return 0; 3359 return 0;
3318} 3360}
3361EXPORT_SYMBOL(netdev_set_master);
3319 3362
3320static void dev_change_rx_flags(struct net_device *dev, int flags) 3363static void dev_change_rx_flags(struct net_device *dev, int flags)
3321{ 3364{
@@ -3394,6 +3437,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
3394 dev_set_rx_mode(dev); 3437 dev_set_rx_mode(dev);
3395 return err; 3438 return err;
3396} 3439}
3440EXPORT_SYMBOL(dev_set_promiscuity);
3397 3441
3398/** 3442/**
3399 * dev_set_allmulti - update allmulti count on a device 3443 * dev_set_allmulti - update allmulti count on a device
@@ -3437,6 +3481,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
3437 } 3481 }
3438 return 0; 3482 return 0;
3439} 3483}
3484EXPORT_SYMBOL(dev_set_allmulti);
3440 3485
3441/* 3486/*
3442 * Upload unicast and multicast address lists to device and 3487 * Upload unicast and multicast address lists to device and
@@ -3927,6 +3972,7 @@ int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3927 } 3972 }
3928 return err; 3973 return err;
3929} 3974}
3975EXPORT_SYMBOL_GPL(__dev_addr_sync);
3930 3976
3931void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, 3977void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3932 struct dev_addr_list **from, int *from_count) 3978 struct dev_addr_list **from, int *from_count)
@@ -3946,6 +3992,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3946 da = next; 3992 da = next;
3947 } 3993 }
3948} 3994}
3995EXPORT_SYMBOL_GPL(__dev_addr_unsync);
3949 3996
3950/** 3997/**
3951 * dev_unicast_sync - Synchronize device's unicast list to another device 3998 * dev_unicast_sync - Synchronize device's unicast list to another device
@@ -4064,6 +4111,7 @@ unsigned dev_get_flags(const struct net_device *dev)
4064 4111
4065 return flags; 4112 return flags;
4066} 4113}
4114EXPORT_SYMBOL(dev_get_flags);
4067 4115
4068/** 4116/**
4069 * dev_change_flags - change device settings 4117 * dev_change_flags - change device settings
@@ -4114,12 +4162,13 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
4114 } 4162 }
4115 4163
4116 if (dev->flags & IFF_UP && 4164 if (dev->flags & IFF_UP &&
4117 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | 4165 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4118 IFF_VOLATILE))) 4166 IFF_VOLATILE)))
4119 call_netdevice_notifiers(NETDEV_CHANGE, dev); 4167 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4120 4168
4121 if ((flags ^ dev->gflags) & IFF_PROMISC) { 4169 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4122 int inc = (flags & IFF_PROMISC) ? +1 : -1; 4170 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4171
4123 dev->gflags ^= IFF_PROMISC; 4172 dev->gflags ^= IFF_PROMISC;
4124 dev_set_promiscuity(dev, inc); 4173 dev_set_promiscuity(dev, inc);
4125 } 4174 }
@@ -4129,7 +4178,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
4129 IFF_ALLMULTI is requested not asking us and not reporting. 4178 IFF_ALLMULTI is requested not asking us and not reporting.
4130 */ 4179 */
4131 if ((flags ^ dev->gflags) & IFF_ALLMULTI) { 4180 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4132 int inc = (flags & IFF_ALLMULTI) ? +1 : -1; 4181 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4182
4133 dev->gflags ^= IFF_ALLMULTI; 4183 dev->gflags ^= IFF_ALLMULTI;
4134 dev_set_allmulti(dev, inc); 4184 dev_set_allmulti(dev, inc);
4135 } 4185 }
@@ -4141,6 +4191,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
4141 4191
4142 return ret; 4192 return ret;
4143} 4193}
4194EXPORT_SYMBOL(dev_change_flags);
4144 4195
4145/** 4196/**
4146 * dev_set_mtu - Change maximum transfer unit 4197 * dev_set_mtu - Change maximum transfer unit
@@ -4174,6 +4225,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
4174 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); 4225 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4175 return err; 4226 return err;
4176} 4227}
4228EXPORT_SYMBOL(dev_set_mtu);
4177 4229
4178/** 4230/**
4179 * dev_set_mac_address - Change Media Access Control Address 4231 * dev_set_mac_address - Change Media Access Control Address
@@ -4198,6 +4250,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4198 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 4250 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4199 return err; 4251 return err;
4200} 4252}
4253EXPORT_SYMBOL(dev_set_mac_address);
4201 4254
4202/* 4255/*
4203 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock) 4256 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
@@ -4211,56 +4264,56 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
4211 return -ENODEV; 4264 return -ENODEV;
4212 4265
4213 switch (cmd) { 4266 switch (cmd) {
4214 case SIOCGIFFLAGS: /* Get interface flags */ 4267 case SIOCGIFFLAGS: /* Get interface flags */
4215 ifr->ifr_flags = (short) dev_get_flags(dev); 4268 ifr->ifr_flags = (short) dev_get_flags(dev);
4216 return 0; 4269 return 0;
4217 4270
4218 case SIOCGIFMETRIC: /* Get the metric on the interface 4271 case SIOCGIFMETRIC: /* Get the metric on the interface
4219 (currently unused) */ 4272 (currently unused) */
4220 ifr->ifr_metric = 0; 4273 ifr->ifr_metric = 0;
4221 return 0; 4274 return 0;
4222 4275
4223 case SIOCGIFMTU: /* Get the MTU of a device */ 4276 case SIOCGIFMTU: /* Get the MTU of a device */
4224 ifr->ifr_mtu = dev->mtu; 4277 ifr->ifr_mtu = dev->mtu;
4225 return 0; 4278 return 0;
4226 4279
4227 case SIOCGIFHWADDR: 4280 case SIOCGIFHWADDR:
4228 if (!dev->addr_len) 4281 if (!dev->addr_len)
4229 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); 4282 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4230 else 4283 else
4231 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, 4284 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4232 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 4285 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4233 ifr->ifr_hwaddr.sa_family = dev->type; 4286 ifr->ifr_hwaddr.sa_family = dev->type;
4234 return 0; 4287 return 0;
4235 4288
4236 case SIOCGIFSLAVE: 4289 case SIOCGIFSLAVE:
4237 err = -EINVAL; 4290 err = -EINVAL;
4238 break; 4291 break;
4239 4292
4240 case SIOCGIFMAP: 4293 case SIOCGIFMAP:
4241 ifr->ifr_map.mem_start = dev->mem_start; 4294 ifr->ifr_map.mem_start = dev->mem_start;
4242 ifr->ifr_map.mem_end = dev->mem_end; 4295 ifr->ifr_map.mem_end = dev->mem_end;
4243 ifr->ifr_map.base_addr = dev->base_addr; 4296 ifr->ifr_map.base_addr = dev->base_addr;
4244 ifr->ifr_map.irq = dev->irq; 4297 ifr->ifr_map.irq = dev->irq;
4245 ifr->ifr_map.dma = dev->dma; 4298 ifr->ifr_map.dma = dev->dma;
4246 ifr->ifr_map.port = dev->if_port; 4299 ifr->ifr_map.port = dev->if_port;
4247 return 0; 4300 return 0;
4248 4301
4249 case SIOCGIFINDEX: 4302 case SIOCGIFINDEX:
4250 ifr->ifr_ifindex = dev->ifindex; 4303 ifr->ifr_ifindex = dev->ifindex;
4251 return 0; 4304 return 0;
4252 4305
4253 case SIOCGIFTXQLEN: 4306 case SIOCGIFTXQLEN:
4254 ifr->ifr_qlen = dev->tx_queue_len; 4307 ifr->ifr_qlen = dev->tx_queue_len;
4255 return 0; 4308 return 0;
4256 4309
4257 default: 4310 default:
4258 /* dev_ioctl() should ensure this case 4311 /* dev_ioctl() should ensure this case
4259 * is never reached 4312 * is never reached
4260 */ 4313 */
4261 WARN_ON(1); 4314 WARN_ON(1);
4262 err = -EINVAL; 4315 err = -EINVAL;
4263 break; 4316 break;
4264 4317
4265 } 4318 }
4266 return err; 4319 return err;
@@ -4281,92 +4334,91 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4281 ops = dev->netdev_ops; 4334 ops = dev->netdev_ops;
4282 4335
4283 switch (cmd) { 4336 switch (cmd) {
4284 case SIOCSIFFLAGS: /* Set interface flags */ 4337 case SIOCSIFFLAGS: /* Set interface flags */
4285 return dev_change_flags(dev, ifr->ifr_flags); 4338 return dev_change_flags(dev, ifr->ifr_flags);
4286
4287 case SIOCSIFMETRIC: /* Set the metric on the interface
4288 (currently unused) */
4289 return -EOPNOTSUPP;
4290 4339
4291 case SIOCSIFMTU: /* Set the MTU of a device */ 4340 case SIOCSIFMETRIC: /* Set the metric on the interface
4292 return dev_set_mtu(dev, ifr->ifr_mtu); 4341 (currently unused) */
4342 return -EOPNOTSUPP;
4293 4343
4294 case SIOCSIFHWADDR: 4344 case SIOCSIFMTU: /* Set the MTU of a device */
4295 return dev_set_mac_address(dev, &ifr->ifr_hwaddr); 4345 return dev_set_mtu(dev, ifr->ifr_mtu);
4296 4346
4297 case SIOCSIFHWBROADCAST: 4347 case SIOCSIFHWADDR:
4298 if (ifr->ifr_hwaddr.sa_family != dev->type) 4348 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4299 return -EINVAL;
4300 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4301 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4302 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4303 return 0;
4304 4349
4305 case SIOCSIFMAP: 4350 case SIOCSIFHWBROADCAST:
4306 if (ops->ndo_set_config) { 4351 if (ifr->ifr_hwaddr.sa_family != dev->type)
4307 if (!netif_device_present(dev)) 4352 return -EINVAL;
4308 return -ENODEV; 4353 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4309 return ops->ndo_set_config(dev, &ifr->ifr_map); 4354 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4310 } 4355 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4311 return -EOPNOTSUPP; 4356 return 0;
4312
4313 case SIOCADDMULTI:
4314 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4315 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4316 return -EINVAL;
4317 if (!netif_device_present(dev))
4318 return -ENODEV;
4319 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4320 dev->addr_len, 1);
4321 4357
4322 case SIOCDELMULTI: 4358 case SIOCSIFMAP:
4323 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4359 if (ops->ndo_set_config) {
4324 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4325 return -EINVAL;
4326 if (!netif_device_present(dev)) 4360 if (!netif_device_present(dev))
4327 return -ENODEV; 4361 return -ENODEV;
4328 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, 4362 return ops->ndo_set_config(dev, &ifr->ifr_map);
4329 dev->addr_len, 1); 4363 }
4364 return -EOPNOTSUPP;
4330 4365
4331 case SIOCSIFTXQLEN: 4366 case SIOCADDMULTI:
4332 if (ifr->ifr_qlen < 0) 4367 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4333 return -EINVAL; 4368 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4334 dev->tx_queue_len = ifr->ifr_qlen; 4369 return -EINVAL;
4335 return 0; 4370 if (!netif_device_present(dev))
4371 return -ENODEV;
4372 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4373 dev->addr_len, 1);
4374
4375 case SIOCDELMULTI:
4376 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4377 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4378 return -EINVAL;
4379 if (!netif_device_present(dev))
4380 return -ENODEV;
4381 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4382 dev->addr_len, 1);
4336 4383
4337 case SIOCSIFNAME: 4384 case SIOCSIFTXQLEN:
4338 ifr->ifr_newname[IFNAMSIZ-1] = '\0'; 4385 if (ifr->ifr_qlen < 0)
4339 return dev_change_name(dev, ifr->ifr_newname); 4386 return -EINVAL;
4387 dev->tx_queue_len = ifr->ifr_qlen;
4388 return 0;
4340 4389
4341 /* 4390 case SIOCSIFNAME:
4342 * Unknown or private ioctl 4391 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4343 */ 4392 return dev_change_name(dev, ifr->ifr_newname);
4344 4393
4345 default: 4394 /*
4346 if ((cmd >= SIOCDEVPRIVATE && 4395 * Unknown or private ioctl
4347 cmd <= SIOCDEVPRIVATE + 15) || 4396 */
4348 cmd == SIOCBONDENSLAVE || 4397 default:
4349 cmd == SIOCBONDRELEASE || 4398 if ((cmd >= SIOCDEVPRIVATE &&
4350 cmd == SIOCBONDSETHWADDR || 4399 cmd <= SIOCDEVPRIVATE + 15) ||
4351 cmd == SIOCBONDSLAVEINFOQUERY || 4400 cmd == SIOCBONDENSLAVE ||
4352 cmd == SIOCBONDINFOQUERY || 4401 cmd == SIOCBONDRELEASE ||
4353 cmd == SIOCBONDCHANGEACTIVE || 4402 cmd == SIOCBONDSETHWADDR ||
4354 cmd == SIOCGMIIPHY || 4403 cmd == SIOCBONDSLAVEINFOQUERY ||
4355 cmd == SIOCGMIIREG || 4404 cmd == SIOCBONDINFOQUERY ||
4356 cmd == SIOCSMIIREG || 4405 cmd == SIOCBONDCHANGEACTIVE ||
4357 cmd == SIOCBRADDIF || 4406 cmd == SIOCGMIIPHY ||
4358 cmd == SIOCBRDELIF || 4407 cmd == SIOCGMIIREG ||
4359 cmd == SIOCSHWTSTAMP || 4408 cmd == SIOCSMIIREG ||
4360 cmd == SIOCWANDEV) { 4409 cmd == SIOCBRADDIF ||
4361 err = -EOPNOTSUPP; 4410 cmd == SIOCBRDELIF ||
4362 if (ops->ndo_do_ioctl) { 4411 cmd == SIOCSHWTSTAMP ||
4363 if (netif_device_present(dev)) 4412 cmd == SIOCWANDEV) {
4364 err = ops->ndo_do_ioctl(dev, ifr, cmd); 4413 err = -EOPNOTSUPP;
4365 else 4414 if (ops->ndo_do_ioctl) {
4366 err = -ENODEV; 4415 if (netif_device_present(dev))
4367 } 4416 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4368 } else 4417 else
4369 err = -EINVAL; 4418 err = -ENODEV;
4419 }
4420 } else
4421 err = -EINVAL;
4370 4422
4371 } 4423 }
4372 return err; 4424 return err;
@@ -4423,135 +4475,135 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4423 */ 4475 */
4424 4476
4425 switch (cmd) { 4477 switch (cmd) {
4426 /* 4478 /*
4427 * These ioctl calls: 4479 * These ioctl calls:
4428 * - can be done by all. 4480 * - can be done by all.
4429 * - atomic and do not require locking. 4481 * - atomic and do not require locking.
4430 * - return a value 4482 * - return a value
4431 */ 4483 */
4432 case SIOCGIFFLAGS: 4484 case SIOCGIFFLAGS:
4433 case SIOCGIFMETRIC: 4485 case SIOCGIFMETRIC:
4434 case SIOCGIFMTU: 4486 case SIOCGIFMTU:
4435 case SIOCGIFHWADDR: 4487 case SIOCGIFHWADDR:
4436 case SIOCGIFSLAVE: 4488 case SIOCGIFSLAVE:
4437 case SIOCGIFMAP: 4489 case SIOCGIFMAP:
4438 case SIOCGIFINDEX: 4490 case SIOCGIFINDEX:
4439 case SIOCGIFTXQLEN: 4491 case SIOCGIFTXQLEN:
4440 dev_load(net, ifr.ifr_name); 4492 dev_load(net, ifr.ifr_name);
4441 read_lock(&dev_base_lock); 4493 read_lock(&dev_base_lock);
4442 ret = dev_ifsioc_locked(net, &ifr, cmd); 4494 ret = dev_ifsioc_locked(net, &ifr, cmd);
4443 read_unlock(&dev_base_lock); 4495 read_unlock(&dev_base_lock);
4444 if (!ret) { 4496 if (!ret) {
4445 if (colon) 4497 if (colon)
4446 *colon = ':'; 4498 *colon = ':';
4447 if (copy_to_user(arg, &ifr, 4499 if (copy_to_user(arg, &ifr,
4448 sizeof(struct ifreq))) 4500 sizeof(struct ifreq)))
4449 ret = -EFAULT; 4501 ret = -EFAULT;
4450 } 4502 }
4451 return ret; 4503 return ret;
4452 4504
4453 case SIOCETHTOOL: 4505 case SIOCETHTOOL:
4454 dev_load(net, ifr.ifr_name); 4506 dev_load(net, ifr.ifr_name);
4455 rtnl_lock(); 4507 rtnl_lock();
4456 ret = dev_ethtool(net, &ifr); 4508 ret = dev_ethtool(net, &ifr);
4457 rtnl_unlock(); 4509 rtnl_unlock();
4458 if (!ret) { 4510 if (!ret) {
4459 if (colon) 4511 if (colon)
4460 *colon = ':'; 4512 *colon = ':';
4461 if (copy_to_user(arg, &ifr, 4513 if (copy_to_user(arg, &ifr,
4462 sizeof(struct ifreq))) 4514 sizeof(struct ifreq)))
4463 ret = -EFAULT; 4515 ret = -EFAULT;
4464 } 4516 }
4465 return ret; 4517 return ret;
4466 4518
4467 /* 4519 /*
4468 * These ioctl calls: 4520 * These ioctl calls:
4469 * - require superuser power. 4521 * - require superuser power.
4470 * - require strict serialization. 4522 * - require strict serialization.
4471 * - return a value 4523 * - return a value
4472 */ 4524 */
4473 case SIOCGMIIPHY: 4525 case SIOCGMIIPHY:
4474 case SIOCGMIIREG: 4526 case SIOCGMIIREG:
4475 case SIOCSIFNAME: 4527 case SIOCSIFNAME:
4476 if (!capable(CAP_NET_ADMIN)) 4528 if (!capable(CAP_NET_ADMIN))
4477 return -EPERM; 4529 return -EPERM;
4478 dev_load(net, ifr.ifr_name); 4530 dev_load(net, ifr.ifr_name);
4479 rtnl_lock(); 4531 rtnl_lock();
4480 ret = dev_ifsioc(net, &ifr, cmd); 4532 ret = dev_ifsioc(net, &ifr, cmd);
4481 rtnl_unlock(); 4533 rtnl_unlock();
4482 if (!ret) { 4534 if (!ret) {
4483 if (colon) 4535 if (colon)
4484 *colon = ':'; 4536 *colon = ':';
4485 if (copy_to_user(arg, &ifr, 4537 if (copy_to_user(arg, &ifr,
4486 sizeof(struct ifreq))) 4538 sizeof(struct ifreq)))
4487 ret = -EFAULT; 4539 ret = -EFAULT;
4488 } 4540 }
4489 return ret; 4541 return ret;
4490 4542
4491 /* 4543 /*
4492 * These ioctl calls: 4544 * These ioctl calls:
4493 * - require superuser power. 4545 * - require superuser power.
4494 * - require strict serialization. 4546 * - require strict serialization.
4495 * - do not return a value 4547 * - do not return a value
4496 */ 4548 */
4497 case SIOCSIFFLAGS: 4549 case SIOCSIFFLAGS:
4498 case SIOCSIFMETRIC: 4550 case SIOCSIFMETRIC:
4499 case SIOCSIFMTU: 4551 case SIOCSIFMTU:
4500 case SIOCSIFMAP: 4552 case SIOCSIFMAP:
4501 case SIOCSIFHWADDR: 4553 case SIOCSIFHWADDR:
4502 case SIOCSIFSLAVE: 4554 case SIOCSIFSLAVE:
4503 case SIOCADDMULTI: 4555 case SIOCADDMULTI:
4504 case SIOCDELMULTI: 4556 case SIOCDELMULTI:
4505 case SIOCSIFHWBROADCAST: 4557 case SIOCSIFHWBROADCAST:
4506 case SIOCSIFTXQLEN: 4558 case SIOCSIFTXQLEN:
4507 case SIOCSMIIREG: 4559 case SIOCSMIIREG:
4508 case SIOCBONDENSLAVE: 4560 case SIOCBONDENSLAVE:
4509 case SIOCBONDRELEASE: 4561 case SIOCBONDRELEASE:
4510 case SIOCBONDSETHWADDR: 4562 case SIOCBONDSETHWADDR:
4511 case SIOCBONDCHANGEACTIVE: 4563 case SIOCBONDCHANGEACTIVE:
4512 case SIOCBRADDIF: 4564 case SIOCBRADDIF:
4513 case SIOCBRDELIF: 4565 case SIOCBRDELIF:
4514 case SIOCSHWTSTAMP: 4566 case SIOCSHWTSTAMP:
4515 if (!capable(CAP_NET_ADMIN)) 4567 if (!capable(CAP_NET_ADMIN))
4516 return -EPERM; 4568 return -EPERM;
4517 /* fall through */ 4569 /* fall through */
4518 case SIOCBONDSLAVEINFOQUERY: 4570 case SIOCBONDSLAVEINFOQUERY:
4519 case SIOCBONDINFOQUERY: 4571 case SIOCBONDINFOQUERY:
4572 dev_load(net, ifr.ifr_name);
4573 rtnl_lock();
4574 ret = dev_ifsioc(net, &ifr, cmd);
4575 rtnl_unlock();
4576 return ret;
4577
4578 case SIOCGIFMEM:
4579 /* Get the per device memory space. We can add this but
4580 * currently do not support it */
4581 case SIOCSIFMEM:
4582 /* Set the per device memory buffer space.
4583 * Not applicable in our case */
4584 case SIOCSIFLINK:
4585 return -EINVAL;
4586
4587 /*
4588 * Unknown or private ioctl.
4589 */
4590 default:
4591 if (cmd == SIOCWANDEV ||
4592 (cmd >= SIOCDEVPRIVATE &&
4593 cmd <= SIOCDEVPRIVATE + 15)) {
4520 dev_load(net, ifr.ifr_name); 4594 dev_load(net, ifr.ifr_name);
4521 rtnl_lock(); 4595 rtnl_lock();
4522 ret = dev_ifsioc(net, &ifr, cmd); 4596 ret = dev_ifsioc(net, &ifr, cmd);
4523 rtnl_unlock(); 4597 rtnl_unlock();
4598 if (!ret && copy_to_user(arg, &ifr,
4599 sizeof(struct ifreq)))
4600 ret = -EFAULT;
4524 return ret; 4601 return ret;
4525 4602 }
4526 case SIOCGIFMEM: 4603 /* Take care of Wireless Extensions */
4527 /* Get the per device memory space. We can add this but 4604 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4528 * currently do not support it */ 4605 return wext_handle_ioctl(net, &ifr, cmd, arg);
4529 case SIOCSIFMEM: 4606 return -EINVAL;
4530 /* Set the per device memory buffer space.
4531 * Not applicable in our case */
4532 case SIOCSIFLINK:
4533 return -EINVAL;
4534
4535 /*
4536 * Unknown or private ioctl.
4537 */
4538 default:
4539 if (cmd == SIOCWANDEV ||
4540 (cmd >= SIOCDEVPRIVATE &&
4541 cmd <= SIOCDEVPRIVATE + 15)) {
4542 dev_load(net, ifr.ifr_name);
4543 rtnl_lock();
4544 ret = dev_ifsioc(net, &ifr, cmd);
4545 rtnl_unlock();
4546 if (!ret && copy_to_user(arg, &ifr,
4547 sizeof(struct ifreq)))
4548 ret = -EFAULT;
4549 return ret;
4550 }
4551 /* Take care of Wireless Extensions */
4552 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4553 return wext_handle_ioctl(net, &ifr, cmd, arg);
4554 return -EINVAL;
4555 } 4607 }
4556} 4608}
4557 4609
@@ -4816,6 +4868,7 @@ err_uninit:
4816 dev->netdev_ops->ndo_uninit(dev); 4868 dev->netdev_ops->ndo_uninit(dev);
4817 goto out; 4869 goto out;
4818} 4870}
4871EXPORT_SYMBOL(register_netdevice);
4819 4872
4820/** 4873/**
4821 * init_dummy_netdev - init a dummy network device for NAPI 4874 * init_dummy_netdev - init a dummy network device for NAPI
@@ -5168,6 +5221,7 @@ void free_netdev(struct net_device *dev)
5168 /* will free via device release */ 5221 /* will free via device release */
5169 put_device(&dev->dev); 5222 put_device(&dev->dev);
5170} 5223}
5224EXPORT_SYMBOL(free_netdev);
5171 5225
5172/** 5226/**
5173 * synchronize_net - Synchronize with packet receive processing 5227 * synchronize_net - Synchronize with packet receive processing
@@ -5180,6 +5234,7 @@ void synchronize_net(void)
5180 might_sleep(); 5234 might_sleep();
5181 synchronize_rcu(); 5235 synchronize_rcu();
5182} 5236}
5237EXPORT_SYMBOL(synchronize_net);
5183 5238
5184/** 5239/**
5185 * unregister_netdevice - remove device from the kernel 5240 * unregister_netdevice - remove device from the kernel
@@ -5200,6 +5255,7 @@ void unregister_netdevice(struct net_device *dev)
5200 /* Finish processing unregister after unlock */ 5255 /* Finish processing unregister after unlock */
5201 net_set_todo(dev); 5256 net_set_todo(dev);
5202} 5257}
5258EXPORT_SYMBOL(unregister_netdevice);
5203 5259
5204/** 5260/**
5205 * unregister_netdev - remove device from the kernel 5261 * unregister_netdev - remove device from the kernel
@@ -5218,7 +5274,6 @@ void unregister_netdev(struct net_device *dev)
5218 unregister_netdevice(dev); 5274 unregister_netdevice(dev);
5219 rtnl_unlock(); 5275 rtnl_unlock();
5220} 5276}
5221
5222EXPORT_SYMBOL(unregister_netdev); 5277EXPORT_SYMBOL(unregister_netdev);
5223 5278
5224/** 5279/**
@@ -5347,6 +5402,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5347out: 5402out:
5348 return err; 5403 return err;
5349} 5404}
5405EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5350 5406
5351static int dev_cpu_callback(struct notifier_block *nfb, 5407static int dev_cpu_callback(struct notifier_block *nfb,
5352 unsigned long action, 5408 unsigned long action,
@@ -5407,7 +5463,7 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5407 unsigned long mask) 5463 unsigned long mask)
5408{ 5464{
5409 /* If device needs checksumming, downgrade to it. */ 5465 /* If device needs checksumming, downgrade to it. */
5410 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) 5466 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5411 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); 5467 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5412 else if (mask & NETIF_F_ALL_CSUM) { 5468 else if (mask & NETIF_F_ALL_CSUM) {
5413 /* If one device supports v4/v6 checksumming, set for all. */ 5469 /* If one device supports v4/v6 checksumming, set for all. */
@@ -5633,41 +5689,3 @@ static int __init initialize_hashrnd(void)
5633 5689
5634late_initcall_sync(initialize_hashrnd); 5690late_initcall_sync(initialize_hashrnd);
5635 5691
5636EXPORT_SYMBOL(__dev_get_by_index);
5637EXPORT_SYMBOL(__dev_get_by_name);
5638EXPORT_SYMBOL(__dev_remove_pack);
5639EXPORT_SYMBOL(dev_valid_name);
5640EXPORT_SYMBOL(dev_add_pack);
5641EXPORT_SYMBOL(dev_alloc_name);
5642EXPORT_SYMBOL(dev_close);
5643EXPORT_SYMBOL(dev_get_by_flags);
5644EXPORT_SYMBOL(dev_get_by_index);
5645EXPORT_SYMBOL(dev_get_by_name);
5646EXPORT_SYMBOL(dev_open);
5647EXPORT_SYMBOL(dev_queue_xmit);
5648EXPORT_SYMBOL(dev_remove_pack);
5649EXPORT_SYMBOL(dev_set_allmulti);
5650EXPORT_SYMBOL(dev_set_promiscuity);
5651EXPORT_SYMBOL(dev_change_flags);
5652EXPORT_SYMBOL(dev_set_mtu);
5653EXPORT_SYMBOL(dev_set_mac_address);
5654EXPORT_SYMBOL(free_netdev);
5655EXPORT_SYMBOL(netdev_boot_setup_check);
5656EXPORT_SYMBOL(netdev_set_master);
5657EXPORT_SYMBOL(netdev_state_change);
5658EXPORT_SYMBOL(netif_receive_skb);
5659EXPORT_SYMBOL(netif_rx);
5660EXPORT_SYMBOL(register_gifconf);
5661EXPORT_SYMBOL(register_netdevice);
5662EXPORT_SYMBOL(register_netdevice_notifier);
5663EXPORT_SYMBOL(skb_checksum_help);
5664EXPORT_SYMBOL(synchronize_net);
5665EXPORT_SYMBOL(unregister_netdevice);
5666EXPORT_SYMBOL(unregister_netdevice_notifier);
5667EXPORT_SYMBOL(net_enable_timestamp);
5668EXPORT_SYMBOL(net_disable_timestamp);
5669EXPORT_SYMBOL(dev_get_flags);
5670
5671EXPORT_SYMBOL(dev_load);
5672
5673EXPORT_PER_CPU_SYMBOL(softnet_data);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 9d66fa953ab7..0a113f26bc9f 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -52,6 +52,7 @@ struct per_cpu_dm_data {
52 52
53struct dm_hw_stat_delta { 53struct dm_hw_stat_delta {
54 struct net_device *dev; 54 struct net_device *dev;
55 unsigned long last_rx;
55 struct list_head list; 56 struct list_head list;
56 struct rcu_head rcu; 57 struct rcu_head rcu;
57 unsigned long last_drop_val; 58 unsigned long last_drop_val;
@@ -180,17 +181,25 @@ static void trace_napi_poll_hit(struct napi_struct *napi)
180 struct dm_hw_stat_delta *new_stat; 181 struct dm_hw_stat_delta *new_stat;
181 182
182 /* 183 /*
183 * Ratelimit our check time to dm_hw_check_delta jiffies 184 * Don't check napi structures with no associated device
184 */ 185 */
185 if (!time_after(jiffies, napi->dev->last_rx + dm_hw_check_delta)) 186 if (!napi->dev)
186 return; 187 return;
187 188
188 rcu_read_lock(); 189 rcu_read_lock();
189 list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { 190 list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
191 /*
192 * only add a note to our monitor buffer if:
193 * 1) this is the dev we received on
194 * 2) its after the last_rx delta
195 * 3) our rx_dropped count has gone up
196 */
190 if ((new_stat->dev == napi->dev) && 197 if ((new_stat->dev == napi->dev) &&
198 (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
191 (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { 199 (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
192 trace_drop_common(NULL, NULL); 200 trace_drop_common(NULL, NULL);
193 new_stat->last_drop_val = napi->dev->stats.rx_dropped; 201 new_stat->last_drop_val = napi->dev->stats.rx_dropped;
202 new_stat->last_rx = jiffies;
194 break; 203 break;
195 } 204 }
196 } 205 }
@@ -286,6 +295,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
286 goto out; 295 goto out;
287 296
288 new_stat->dev = dev; 297 new_stat->dev = dev;
298 new_stat->last_rx = jiffies;
289 INIT_RCU_HEAD(&new_stat->rcu); 299 INIT_RCU_HEAD(&new_stat->rcu);
290 spin_lock(&trace_state_lock); 300 spin_lock(&trace_state_lock);
291 list_add_rcu(&new_stat->list, &hw_stats_list); 301 list_add_rcu(&new_stat->list, &hw_stats_list);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d9d5160610d5..4c12ddb5f5ee 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -30,10 +30,17 @@ u32 ethtool_op_get_link(struct net_device *dev)
30 return netif_carrier_ok(dev) ? 1 : 0; 30 return netif_carrier_ok(dev) ? 1 : 0;
31} 31}
32 32
33u32 ethtool_op_get_rx_csum(struct net_device *dev)
34{
35 return (dev->features & NETIF_F_ALL_CSUM) != 0;
36}
37EXPORT_SYMBOL(ethtool_op_get_rx_csum);
38
33u32 ethtool_op_get_tx_csum(struct net_device *dev) 39u32 ethtool_op_get_tx_csum(struct net_device *dev)
34{ 40{
35 return (dev->features & NETIF_F_ALL_CSUM) != 0; 41 return (dev->features & NETIF_F_ALL_CSUM) != 0;
36} 42}
43EXPORT_SYMBOL(ethtool_op_get_tx_csum);
37 44
38int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 45int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
39{ 46{
@@ -891,6 +898,19 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
891 return actor(dev, edata.data); 898 return actor(dev, edata.data);
892} 899}
893 900
901static int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
902{
903 struct ethtool_flash efl;
904
905 if (copy_from_user(&efl, useraddr, sizeof(efl)))
906 return -EFAULT;
907
908 if (!dev->ethtool_ops->flash_device)
909 return -EOPNOTSUPP;
910
911 return dev->ethtool_ops->flash_device(dev, &efl);
912}
913
894/* The main entry point in this file. Called from net/core/dev.c */ 914/* The main entry point in this file. Called from net/core/dev.c */
895 915
896int dev_ethtool(struct net *net, struct ifreq *ifr) 916int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1004,7 +1024,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1004 break; 1024 break;
1005 case ETHTOOL_GRXCSUM: 1025 case ETHTOOL_GRXCSUM:
1006 rc = ethtool_get_value(dev, useraddr, ethcmd, 1026 rc = ethtool_get_value(dev, useraddr, ethcmd,
1007 dev->ethtool_ops->get_rx_csum); 1027 (dev->ethtool_ops->get_rx_csum ?
1028 dev->ethtool_ops->get_rx_csum :
1029 ethtool_op_get_rx_csum));
1008 break; 1030 break;
1009 case ETHTOOL_SRXCSUM: 1031 case ETHTOOL_SRXCSUM:
1010 rc = ethtool_set_rx_csum(dev, useraddr); 1032 rc = ethtool_set_rx_csum(dev, useraddr);
@@ -1068,7 +1090,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1068 break; 1090 break;
1069 case ETHTOOL_GFLAGS: 1091 case ETHTOOL_GFLAGS:
1070 rc = ethtool_get_value(dev, useraddr, ethcmd, 1092 rc = ethtool_get_value(dev, useraddr, ethcmd,
1071 dev->ethtool_ops->get_flags); 1093 (dev->ethtool_ops->get_flags ?
1094 dev->ethtool_ops->get_flags :
1095 ethtool_op_get_flags));
1072 break; 1096 break;
1073 case ETHTOOL_SFLAGS: 1097 case ETHTOOL_SFLAGS:
1074 rc = ethtool_set_value(dev, useraddr, 1098 rc = ethtool_set_value(dev, useraddr,
@@ -1100,6 +1124,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1100 case ETHTOOL_SGRO: 1124 case ETHTOOL_SGRO:
1101 rc = ethtool_set_gro(dev, useraddr); 1125 rc = ethtool_set_gro(dev, useraddr);
1102 break; 1126 break;
1127 case ETHTOOL_FLASHDEV:
1128 rc = ethtool_flash_device(dev, useraddr);
1129 break;
1103 default: 1130 default:
1104 rc = -EOPNOTSUPP; 1131 rc = -EOPNOTSUPP;
1105 } 1132 }
@@ -1116,7 +1143,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1116EXPORT_SYMBOL(ethtool_op_get_link); 1143EXPORT_SYMBOL(ethtool_op_get_link);
1117EXPORT_SYMBOL(ethtool_op_get_sg); 1144EXPORT_SYMBOL(ethtool_op_get_sg);
1118EXPORT_SYMBOL(ethtool_op_get_tso); 1145EXPORT_SYMBOL(ethtool_op_get_tso);
1119EXPORT_SYMBOL(ethtool_op_get_tx_csum);
1120EXPORT_SYMBOL(ethtool_op_set_sg); 1146EXPORT_SYMBOL(ethtool_op_set_sg);
1121EXPORT_SYMBOL(ethtool_op_set_tso); 1147EXPORT_SYMBOL(ethtool_op_set_tso);
1122EXPORT_SYMBOL(ethtool_op_set_tx_csum); 1148EXPORT_SYMBOL(ethtool_op_set_tx_csum);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 163b4f5b0365..e587e6819698 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -692,75 +692,74 @@ static void neigh_connect(struct neighbour *neigh)
692 hh->hh_output = neigh->ops->hh_output; 692 hh->hh_output = neigh->ops->hh_output;
693} 693}
694 694
695static void neigh_periodic_timer(unsigned long arg) 695static void neigh_periodic_work(struct work_struct *work)
696{ 696{
697 struct neigh_table *tbl = (struct neigh_table *)arg; 697 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
698 struct neighbour *n, **np; 698 struct neighbour *n, **np;
699 unsigned long expire, now = jiffies; 699 unsigned int i;
700 700
701 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); 701 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
702 702
703 write_lock(&tbl->lock); 703 write_lock_bh(&tbl->lock);
704 704
705 /* 705 /*
706 * periodically recompute ReachableTime from random function 706 * periodically recompute ReachableTime from random function
707 */ 707 */
708 708
709 if (time_after(now, tbl->last_rand + 300 * HZ)) { 709 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
710 struct neigh_parms *p; 710 struct neigh_parms *p;
711 tbl->last_rand = now; 711 tbl->last_rand = jiffies;
712 for (p = &tbl->parms; p; p = p->next) 712 for (p = &tbl->parms; p; p = p->next)
713 p->reachable_time = 713 p->reachable_time =
714 neigh_rand_reach_time(p->base_reachable_time); 714 neigh_rand_reach_time(p->base_reachable_time);
715 } 715 }
716 716
717 np = &tbl->hash_buckets[tbl->hash_chain_gc]; 717 for (i = 0 ; i <= tbl->hash_mask; i++) {
718 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask); 718 np = &tbl->hash_buckets[i];
719 719
720 while ((n = *np) != NULL) { 720 while ((n = *np) != NULL) {
721 unsigned int state; 721 unsigned int state;
722 722
723 write_lock(&n->lock); 723 write_lock(&n->lock);
724 724
725 state = n->nud_state; 725 state = n->nud_state;
726 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { 726 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
727 write_unlock(&n->lock); 727 write_unlock(&n->lock);
728 goto next_elt; 728 goto next_elt;
729 } 729 }
730 730
731 if (time_before(n->used, n->confirmed)) 731 if (time_before(n->used, n->confirmed))
732 n->used = n->confirmed; 732 n->used = n->confirmed;
733 733
734 if (atomic_read(&n->refcnt) == 1 && 734 if (atomic_read(&n->refcnt) == 1 &&
735 (state == NUD_FAILED || 735 (state == NUD_FAILED ||
736 time_after(now, n->used + n->parms->gc_staletime))) { 736 time_after(jiffies, n->used + n->parms->gc_staletime))) {
737 *np = n->next; 737 *np = n->next;
738 n->dead = 1; 738 n->dead = 1;
739 write_unlock(&n->lock);
740 neigh_cleanup_and_release(n);
741 continue;
742 }
739 write_unlock(&n->lock); 743 write_unlock(&n->lock);
740 neigh_cleanup_and_release(n);
741 continue;
742 }
743 write_unlock(&n->lock);
744 744
745next_elt: 745next_elt:
746 np = &n->next; 746 np = &n->next;
747 }
748 /*
749 * It's fine to release lock here, even if hash table
750 * grows while we are preempted.
751 */
752 write_unlock_bh(&tbl->lock);
753 cond_resched();
754 write_lock_bh(&tbl->lock);
747 } 755 }
748
749 /* Cycle through all hash buckets every base_reachable_time/2 ticks. 756 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
750 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 757 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
751 * base_reachable_time. 758 * base_reachable_time.
752 */ 759 */
753 expire = tbl->parms.base_reachable_time >> 1; 760 schedule_delayed_work(&tbl->gc_work,
754 expire /= (tbl->hash_mask + 1); 761 tbl->parms.base_reachable_time >> 1);
755 if (!expire) 762 write_unlock_bh(&tbl->lock);
756 expire = 1;
757
758 if (expire>HZ)
759 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
760 else
761 mod_timer(&tbl->gc_timer, now + expire);
762
763 write_unlock(&tbl->lock);
764} 763}
765 764
766static __inline__ int neigh_max_probes(struct neighbour *n) 765static __inline__ int neigh_max_probes(struct neighbour *n)
@@ -1316,7 +1315,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1316} 1315}
1317EXPORT_SYMBOL(pneigh_enqueue); 1316EXPORT_SYMBOL(pneigh_enqueue);
1318 1317
1319static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, 1318static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1320 struct net *net, int ifindex) 1319 struct net *net, int ifindex)
1321{ 1320{
1322 struct neigh_parms *p; 1321 struct neigh_parms *p;
@@ -1337,7 +1336,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1337 struct net *net = dev_net(dev); 1336 struct net *net = dev_net(dev);
1338 const struct net_device_ops *ops = dev->netdev_ops; 1337 const struct net_device_ops *ops = dev->netdev_ops;
1339 1338
1340 ref = lookup_neigh_params(tbl, net, 0); 1339 ref = lookup_neigh_parms(tbl, net, 0);
1341 if (!ref) 1340 if (!ref)
1342 return NULL; 1341 return NULL;
1343 1342
@@ -1442,10 +1441,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
1442 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); 1441 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1443 1442
1444 rwlock_init(&tbl->lock); 1443 rwlock_init(&tbl->lock);
1445 setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl); 1444 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1446 tbl->gc_timer.expires = now + 1; 1445 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1447 add_timer(&tbl->gc_timer);
1448
1449 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); 1446 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1450 skb_queue_head_init_class(&tbl->proxy_queue, 1447 skb_queue_head_init_class(&tbl->proxy_queue,
1451 &neigh_table_proxy_queue_class); 1448 &neigh_table_proxy_queue_class);
@@ -1482,7 +1479,8 @@ int neigh_table_clear(struct neigh_table *tbl)
1482 struct neigh_table **tp; 1479 struct neigh_table **tp;
1483 1480
1484 /* It is not clean... Fix it to unload IPv6 module safely */ 1481 /* It is not clean... Fix it to unload IPv6 module safely */
1485 del_timer_sync(&tbl->gc_timer); 1482 cancel_delayed_work(&tbl->gc_work);
1483 flush_scheduled_work();
1486 del_timer_sync(&tbl->proxy_timer); 1484 del_timer_sync(&tbl->proxy_timer);
1487 pneigh_queue_purge(&tbl->proxy_queue); 1485 pneigh_queue_purge(&tbl->proxy_queue);
1488 neigh_ifdown(tbl, NULL); 1486 neigh_ifdown(tbl, NULL);
@@ -1752,7 +1750,6 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1752 .ndtc_last_rand = jiffies_to_msecs(rand_delta), 1750 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1753 .ndtc_hash_rnd = tbl->hash_rnd, 1751 .ndtc_hash_rnd = tbl->hash_rnd,
1754 .ndtc_hash_mask = tbl->hash_mask, 1752 .ndtc_hash_mask = tbl->hash_mask,
1755 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1756 .ndtc_proxy_qlen = tbl->proxy_queue.qlen, 1753 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1757 }; 1754 };
1758 1755
@@ -1906,7 +1903,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1906 if (tbp[NDTPA_IFINDEX]) 1903 if (tbp[NDTPA_IFINDEX])
1907 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); 1904 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1908 1905
1909 p = lookup_neigh_params(tbl, net, ifindex); 1906 p = lookup_neigh_parms(tbl, net, ifindex);
1910 if (p == NULL) { 1907 if (p == NULL) {
1911 err = -ENOENT; 1908 err = -ENOENT;
1912 goto errout_tbl_lock; 1909 goto errout_tbl_lock;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 3994680c08b9..ad91e9e5f475 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -141,7 +141,7 @@ static ssize_t show_dormant(struct device *dev,
141 return -EINVAL; 141 return -EINVAL;
142} 142}
143 143
144static const char *operstates[] = { 144static const char *const operstates[] = {
145 "unknown", 145 "unknown",
146 "notpresent", /* currently unused */ 146 "notpresent", /* currently unused */
147 "down", 147 "down",
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 197283072cc8..1c1af2756f38 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -6,6 +6,8 @@
6#include <linux/delay.h> 6#include <linux/delay.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/idr.h> 8#include <linux/idr.h>
9#include <linux/rculist.h>
10#include <linux/nsproxy.h>
9#include <net/net_namespace.h> 11#include <net/net_namespace.h>
10#include <net/netns/generic.h> 12#include <net/netns/generic.h>
11 13
@@ -127,7 +129,7 @@ static struct net *net_create(void)
127 rv = setup_net(net); 129 rv = setup_net(net);
128 if (rv == 0) { 130 if (rv == 0) {
129 rtnl_lock(); 131 rtnl_lock();
130 list_add_tail(&net->list, &net_namespace_list); 132 list_add_tail_rcu(&net->list, &net_namespace_list);
131 rtnl_unlock(); 133 rtnl_unlock();
132 } 134 }
133 mutex_unlock(&net_mutex); 135 mutex_unlock(&net_mutex);
@@ -156,9 +158,16 @@ static void cleanup_net(struct work_struct *work)
156 158
157 /* Don't let anyone else find us. */ 159 /* Don't let anyone else find us. */
158 rtnl_lock(); 160 rtnl_lock();
159 list_del(&net->list); 161 list_del_rcu(&net->list);
160 rtnl_unlock(); 162 rtnl_unlock();
161 163
164 /*
165 * Another CPU might be rcu-iterating the list, wait for it.
166 * This needs to be before calling the exit() notifiers, so
167 * the rcu_barrier() below isn't sufficient alone.
168 */
169 synchronize_rcu();
170
162 /* Run all of the network namespace exit methods */ 171 /* Run all of the network namespace exit methods */
163 list_for_each_entry_reverse(ops, &pernet_list, list) { 172 list_for_each_entry_reverse(ops, &pernet_list, list) {
164 if (ops->exit) 173 if (ops->exit)
@@ -193,6 +202,26 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
193} 202}
194#endif 203#endif
195 204
205struct net *get_net_ns_by_pid(pid_t pid)
206{
207 struct task_struct *tsk;
208 struct net *net;
209
210 /* Lookup the network namespace */
211 net = ERR_PTR(-ESRCH);
212 rcu_read_lock();
213 tsk = find_task_by_vpid(pid);
214 if (tsk) {
215 struct nsproxy *nsproxy;
216 nsproxy = task_nsproxy(tsk);
217 if (nsproxy)
218 net = get_net(nsproxy->net_ns);
219 }
220 rcu_read_unlock();
221 return net;
222}
223EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
224
196static int __init net_ns_init(void) 225static int __init net_ns_init(void)
197{ 226{
198 struct net_generic *ng; 227 struct net_generic *ng;
@@ -219,7 +248,7 @@ static int __init net_ns_init(void)
219 panic("Could not setup the initial network namespace"); 248 panic("Could not setup the initial network namespace");
220 249
221 rtnl_lock(); 250 rtnl_lock();
222 list_add_tail(&init_net.list, &net_namespace_list); 251 list_add_tail_rcu(&init_net.list, &net_namespace_list);
223 rtnl_unlock(); 252 rtnl_unlock();
224 253
225 mutex_unlock(&net_mutex); 254 mutex_unlock(&net_mutex);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 1b76eb11deb4..0b4d0d35ef40 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -9,6 +9,7 @@
9 * Copyright (C) 2002 Red Hat, Inc. 9 * Copyright (C) 2002 Red Hat, Inc.
10 */ 10 */
11 11
12#include <linux/moduleparam.h>
12#include <linux/netdevice.h> 13#include <linux/netdevice.h>
13#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
14#include <linux/string.h> 15#include <linux/string.h>
@@ -50,6 +51,9 @@ static atomic_t trapped;
50static void zap_completion_queue(void); 51static void zap_completion_queue(void);
51static void arp_reply(struct sk_buff *skb); 52static void arp_reply(struct sk_buff *skb);
52 53
54static unsigned int carrier_timeout = 4;
55module_param(carrier_timeout, uint, 0644);
56
53static void queue_process(struct work_struct *work) 57static void queue_process(struct work_struct *work)
54{ 58{
55 struct netpoll_info *npinfo = 59 struct netpoll_info *npinfo =
@@ -737,7 +741,7 @@ int netpoll_setup(struct netpoll *np)
737 } 741 }
738 742
739 atleast = jiffies + HZ/10; 743 atleast = jiffies + HZ/10;
740 atmost = jiffies + 4*HZ; 744 atmost = jiffies + carrier_timeout * HZ;
741 while (!netif_carrier_ok(ndev)) { 745 while (!netif_carrier_ok(ndev)) {
742 if (time_after(jiffies, atmost)) { 746 if (time_after(jiffies, atmost)) {
743 printk(KERN_NOTICE 747 printk(KERN_NOTICE
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 19b8c20e98a4..0bcecbf06581 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -131,6 +131,7 @@
131#include <linux/ioport.h> 131#include <linux/ioport.h>
132#include <linux/interrupt.h> 132#include <linux/interrupt.h>
133#include <linux/capability.h> 133#include <linux/capability.h>
134#include <linux/hrtimer.h>
134#include <linux/freezer.h> 135#include <linux/freezer.h>
135#include <linux/delay.h> 136#include <linux/delay.h>
136#include <linux/timer.h> 137#include <linux/timer.h>
@@ -162,14 +163,13 @@
162#include <asm/byteorder.h> 163#include <asm/byteorder.h>
163#include <linux/rcupdate.h> 164#include <linux/rcupdate.h>
164#include <linux/bitops.h> 165#include <linux/bitops.h>
165#include <asm/io.h> 166#include <linux/io.h>
167#include <linux/timex.h>
168#include <linux/uaccess.h>
166#include <asm/dma.h> 169#include <asm/dma.h>
167#include <asm/uaccess.h>
168#include <asm/div64.h> /* do_div */ 170#include <asm/div64.h> /* do_div */
169#include <asm/timex.h>
170
171#define VERSION "pktgen v2.70: Packet Generator for packet performance testing.\n"
172 171
172#define VERSION "2.72"
173#define IP_NAME_SZ 32 173#define IP_NAME_SZ 32
174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
175#define MPLS_STACK_BOTTOM htonl(0x00000100) 175#define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -206,7 +206,7 @@
206#define PKTGEN_MAGIC 0xbe9be955 206#define PKTGEN_MAGIC 0xbe9be955
207#define PG_PROC_DIR "pktgen" 207#define PG_PROC_DIR "pktgen"
208#define PGCTRL "pgctrl" 208#define PGCTRL "pgctrl"
209static struct proc_dir_entry *pg_proc_dir = NULL; 209static struct proc_dir_entry *pg_proc_dir;
210 210
211#define MAX_CFLOWS 65536 211#define MAX_CFLOWS 65536
212 212
@@ -231,9 +231,9 @@ struct pktgen_dev {
231 */ 231 */
232 struct proc_dir_entry *entry; /* proc file */ 232 struct proc_dir_entry *entry; /* proc file */
233 struct pktgen_thread *pg_thread;/* the owner */ 233 struct pktgen_thread *pg_thread;/* the owner */
234 struct list_head list; /* Used for chaining in the thread's run-queue */ 234 struct list_head list; /* chaining in the thread's run-queue */
235 235
236 int running; /* if this changes to false, the test will stop */ 236 int running; /* if false, the test will stop */
237 237
238 /* If min != max, then we will either do a linear iteration, or 238 /* If min != max, then we will either do a linear iteration, or
239 * we will do a random selection from within the range. 239 * we will do a random selection from within the range.
@@ -246,33 +246,37 @@ struct pktgen_dev {
246 int max_pkt_size; /* = ETH_ZLEN; */ 246 int max_pkt_size; /* = ETH_ZLEN; */
247 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ 247 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
248 int nfrags; 248 int nfrags;
249 __u32 delay_us; /* Default delay */ 249 u64 delay; /* nano-seconds */
250 __u32 delay_ns; 250
251 __u64 count; /* Default No packets to send */ 251 __u64 count; /* Default No packets to send */
252 __u64 sofar; /* How many pkts we've sent so far */ 252 __u64 sofar; /* How many pkts we've sent so far */
253 __u64 tx_bytes; /* How many bytes we've transmitted */ 253 __u64 tx_bytes; /* How many bytes we've transmitted */
254 __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */ 254 __u64 errors; /* Errors when trying to transmit,
255 pkts will be re-sent */
255 256
256 /* runtime counters relating to clone_skb */ 257 /* runtime counters relating to clone_skb */
257 __u64 next_tx_us; /* timestamp of when to tx next */
258 __u32 next_tx_ns;
259 258
260 __u64 allocated_skbs; 259 __u64 allocated_skbs;
261 __u32 clone_count; 260 __u32 clone_count;
262 int last_ok; /* Was last skb sent? 261 int last_ok; /* Was last skb sent?
263 * Or a failed transmit of some sort? This will keep 262 * Or a failed transmit of some sort?
264 * sequence numbers in order, for example. 263 * This will keep sequence numbers in order
265 */ 264 */
266 __u64 started_at; /* micro-seconds */ 265 ktime_t next_tx;
267 __u64 stopped_at; /* micro-seconds */ 266 ktime_t started_at;
268 __u64 idle_acc; /* micro-seconds */ 267 ktime_t stopped_at;
268 u64 idle_acc; /* nano-seconds */
269
269 __u32 seq_num; 270 __u32 seq_num;
270 271
271 int clone_skb; /* Use multiple SKBs during packet gen. If this number 272 int clone_skb; /*
272 * is greater than 1, then that many copies of the same 273 * Use multiple SKBs during packet gen.
273 * packet will be sent before a new packet is allocated. 274 * If this number is greater than 1, then
274 * For instance, if you want to send 1024 identical packets 275 * that many copies of the same packet will be
275 * before creating a new packet, set clone_skb to 1024. 276 * sent before a new packet is allocated.
277 * If you want to send 1024 identical packets
278 * before creating a new packet,
279 * set clone_skb to 1024.
276 */ 280 */
277 281
278 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 282 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
@@ -304,8 +308,10 @@ struct pktgen_dev {
304 __u16 udp_dst_max; /* exclusive, dest UDP port */ 308 __u16 udp_dst_max; /* exclusive, dest UDP port */
305 309
306 /* DSCP + ECN */ 310 /* DSCP + ECN */
307 __u8 tos; /* six most significant bits of (former) IPv4 TOS are for dscp codepoint */ 311 __u8 tos; /* six MSB of (former) IPv4 TOS
308 __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 (see RFC 3260, sec. 4) */ 312 are for dscp codepoint */
313 __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6
314 (see RFC 3260, sec. 4) */
309 315
310 /* MPLS */ 316 /* MPLS */
311 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ 317 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */
@@ -346,15 +352,17 @@ struct pktgen_dev {
346 */ 352 */
347 __u16 pad; /* pad out the hh struct to an even 16 bytes */ 353 __u16 pad; /* pad out the hh struct to an even 16 bytes */
348 354
349 struct sk_buff *skb; /* skb we are to transmit next, mainly used for when we 355 struct sk_buff *skb; /* skb we are to transmit next, used for when we
350 * are transmitting the same one multiple times 356 * are transmitting the same one multiple times
351 */ 357 */
352 struct net_device *odev; /* The out-going device. Note that the device should 358 struct net_device *odev; /* The out-going device.
353 * have it's pg_info pointer pointing back to this 359 * Note that the device should have it's
354 * device. This will be set when the user specifies 360 * pg_info pointer pointing back to this
355 * the out-going device name (not when the inject is 361 * device.
356 * started as it used to do.) 362 * Set when the user specifies the out-going
357 */ 363 * device name (not when the inject is
364 * started as it used to do.)
365 */
358 struct flow_state *flows; 366 struct flow_state *flows;
359 unsigned cflows; /* Concurrent flows (config) */ 367 unsigned cflows; /* Concurrent flows (config) */
360 unsigned lflow; /* Flow length (config) */ 368 unsigned lflow; /* Flow length (config) */
@@ -379,13 +387,14 @@ struct pktgen_hdr {
379}; 387};
380 388
381struct pktgen_thread { 389struct pktgen_thread {
382 spinlock_t if_lock; 390 spinlock_t if_lock; /* for list of devices */
383 struct list_head if_list; /* All device here */ 391 struct list_head if_list; /* All device here */
384 struct list_head th_list; 392 struct list_head th_list;
385 struct task_struct *tsk; 393 struct task_struct *tsk;
386 char result[512]; 394 char result[512];
387 395
388 /* Field for thread to receive "posted" events terminate, stop ifs etc. */ 396 /* Field for thread to receive "posted" events terminate,
397 stop ifs etc. */
389 398
390 u32 control; 399 u32 control;
391 int cpu; 400 int cpu;
@@ -397,24 +406,22 @@ struct pktgen_thread {
397#define REMOVE 1 406#define REMOVE 1
398#define FIND 0 407#define FIND 0
399 408
400/** Convert to micro-seconds */ 409static inline ktime_t ktime_now(void)
401static inline __u64 tv_to_us(const struct timeval *tv)
402{ 410{
403 __u64 us = tv->tv_usec; 411 struct timespec ts;
404 us += (__u64) tv->tv_sec * (__u64) 1000000; 412 ktime_get_ts(&ts);
405 return us; 413
414 return timespec_to_ktime(ts);
406} 415}
407 416
408static __u64 getCurUs(void) 417/* This works even if 32 bit because of careful byte order choice */
418static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
409{ 419{
410 struct timeval tv; 420 return cmp1.tv64 < cmp2.tv64;
411 do_gettimeofday(&tv);
412 return tv_to_us(&tv);
413} 421}
414 422
415/* old include end */ 423static const char version[] =
416 424 "pktgen " VERSION ": Packet Generator for packet performance testing.\n";
417static char version[] __initdata = VERSION;
418 425
419static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 426static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
420static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 427static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
@@ -424,7 +431,7 @@ static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
424static void pktgen_run_all_threads(void); 431static void pktgen_run_all_threads(void);
425static void pktgen_reset_all_threads(void); 432static void pktgen_reset_all_threads(void);
426static void pktgen_stop_all_threads_ifs(void); 433static void pktgen_stop_all_threads_ifs(void);
427static int pktgen_stop_device(struct pktgen_dev *pkt_dev); 434
428static void pktgen_stop(struct pktgen_thread *t); 435static void pktgen_stop(struct pktgen_thread *t);
429static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 436static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
430 437
@@ -432,10 +439,10 @@ static unsigned int scan_ip6(const char *s, char ip[16]);
432static unsigned int fmt_ip6(char *s, const char ip[16]); 439static unsigned int fmt_ip6(char *s, const char ip[16]);
433 440
434/* Module parameters, defaults. */ 441/* Module parameters, defaults. */
435static int pg_count_d = 1000; /* 1000 pkts by default */ 442static int pg_count_d __read_mostly = 1000;
436static int pg_delay_d; 443static int pg_delay_d __read_mostly;
437static int pg_clone_skb_d; 444static int pg_clone_skb_d __read_mostly;
438static int debug; 445static int debug __read_mostly;
439 446
440static DEFINE_MUTEX(pktgen_thread_lock); 447static DEFINE_MUTEX(pktgen_thread_lock);
441static LIST_HEAD(pktgen_threads); 448static LIST_HEAD(pktgen_threads);
@@ -451,12 +458,12 @@ static struct notifier_block pktgen_notifier_block = {
451 458
452static int pgctrl_show(struct seq_file *seq, void *v) 459static int pgctrl_show(struct seq_file *seq, void *v)
453{ 460{
454 seq_puts(seq, VERSION); 461 seq_puts(seq, version);
455 return 0; 462 return 0;
456} 463}
457 464
458static ssize_t pgctrl_write(struct file *file, const char __user * buf, 465static ssize_t pgctrl_write(struct file *file, const char __user *buf,
459 size_t count, loff_t * ppos) 466 size_t count, loff_t *ppos)
460{ 467{
461 int err = 0; 468 int err = 0;
462 char data[128]; 469 char data[128];
@@ -509,10 +516,9 @@ static const struct file_operations pktgen_fops = {
509 516
510static int pktgen_if_show(struct seq_file *seq, void *v) 517static int pktgen_if_show(struct seq_file *seq, void *v)
511{ 518{
512 struct pktgen_dev *pkt_dev = seq->private; 519 const struct pktgen_dev *pkt_dev = seq->private;
513 __u64 sa; 520 ktime_t stopped;
514 __u64 stopped; 521 u64 idle;
515 __u64 now = getCurUs();
516 522
517 seq_printf(seq, 523 seq_printf(seq,
518 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", 524 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n",
@@ -520,9 +526,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
520 pkt_dev->max_pkt_size); 526 pkt_dev->max_pkt_size);
521 527
522 seq_printf(seq, 528 seq_printf(seq,
523 " frags: %d delay: %u clone_skb: %d ifname: %s\n", 529 " frags: %d delay: %llu clone_skb: %d ifname: %s\n",
524 pkt_dev->nfrags, 530 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
525 1000 * pkt_dev->delay_us + pkt_dev->delay_ns,
526 pkt_dev->clone_skb, pkt_dev->odev->name); 531 pkt_dev->clone_skb, pkt_dev->odev->name);
527 532
528 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 533 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
@@ -549,11 +554,14 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
549 " daddr: %s min_daddr: %s max_daddr: %s\n", b1, 554 " daddr: %s min_daddr: %s max_daddr: %s\n", b1,
550 b2, b3); 555 b2, b3);
551 556
552 } else 557 } else {
558 seq_printf(seq,
559 " dst_min: %s dst_max: %s\n",
560 pkt_dev->dst_min, pkt_dev->dst_max);
553 seq_printf(seq, 561 seq_printf(seq,
554 " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", 562 " src_min: %s src_max: %s\n",
555 pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, 563 pkt_dev->src_min, pkt_dev->src_max);
556 pkt_dev->src_max); 564 }
557 565
558 seq_puts(seq, " src_mac: "); 566 seq_puts(seq, " src_mac: ");
559 567
@@ -565,7 +573,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
565 seq_printf(seq, "%pM\n", pkt_dev->dst_mac); 573 seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
566 574
567 seq_printf(seq, 575 seq_printf(seq,
568 " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", 576 " udp_src_min: %d udp_src_max: %d"
577 " udp_dst_min: %d udp_dst_max: %d\n",
569 pkt_dev->udp_src_min, pkt_dev->udp_src_max, 578 pkt_dev->udp_src_min, pkt_dev->udp_src_max,
570 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); 579 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max);
571 580
@@ -581,23 +590,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
581 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 590 i == pkt_dev->nr_labels-1 ? "\n" : ", ");
582 } 591 }
583 592
584 if (pkt_dev->vlan_id != 0xffff) { 593 if (pkt_dev->vlan_id != 0xffff)
585 seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n", 594 seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n",
586 pkt_dev->vlan_id, pkt_dev->vlan_p, pkt_dev->vlan_cfi); 595 pkt_dev->vlan_id, pkt_dev->vlan_p,
587 } 596 pkt_dev->vlan_cfi);
588 597
589 if (pkt_dev->svlan_id != 0xffff) { 598 if (pkt_dev->svlan_id != 0xffff)
590 seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n", 599 seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n",
591 pkt_dev->svlan_id, pkt_dev->svlan_p, pkt_dev->svlan_cfi); 600 pkt_dev->svlan_id, pkt_dev->svlan_p,
592 } 601 pkt_dev->svlan_cfi);
593 602
594 if (pkt_dev->tos) { 603 if (pkt_dev->tos)
595 seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos); 604 seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos);
596 }
597 605
598 if (pkt_dev->traffic_class) { 606 if (pkt_dev->traffic_class)
599 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); 607 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class);
600 }
601 608
602 seq_printf(seq, " Flags: "); 609 seq_printf(seq, " Flags: ");
603 610
@@ -654,17 +661,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
654 661
655 seq_puts(seq, "\n"); 662 seq_puts(seq, "\n");
656 663
657 sa = pkt_dev->started_at; 664 /* not really stopped, more like last-running-at */
658 stopped = pkt_dev->stopped_at; 665 stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at;
659 if (pkt_dev->running) 666 idle = pkt_dev->idle_acc;
660 stopped = now; /* not really stopped, more like last-running-at */ 667 do_div(idle, NSEC_PER_USEC);
661 668
662 seq_printf(seq, 669 seq_printf(seq,
663 "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", 670 "Current:\n pkts-sofar: %llu errors: %llu\n",
664 (unsigned long long)pkt_dev->sofar, 671 (unsigned long long)pkt_dev->sofar,
665 (unsigned long long)pkt_dev->errors, (unsigned long long)sa, 672 (unsigned long long)pkt_dev->errors);
666 (unsigned long long)stopped, 673
667 (unsigned long long)pkt_dev->idle_acc); 674 seq_printf(seq,
675 " started: %lluus stopped: %lluus idle: %lluus\n",
676 (unsigned long long) ktime_to_us(pkt_dev->started_at),
677 (unsigned long long) ktime_to_us(stopped),
678 (unsigned long long) idle);
668 679
669 seq_printf(seq, 680 seq_printf(seq,
670 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", 681 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n",
@@ -696,7 +707,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
696} 707}
697 708
698 709
699static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, __u32 *num) 710static int hex32_arg(const char __user *user_buffer, unsigned long maxlen,
711 __u32 *num)
700{ 712{
701 int i = 0; 713 int i = 0;
702 *num = 0; 714 *num = 0;
@@ -846,9 +858,9 @@ static ssize_t pktgen_if_write(struct file *file,
846 /* Read variable name */ 858 /* Read variable name */
847 859
848 len = strn_len(&user_buffer[i], sizeof(name) - 1); 860 len = strn_len(&user_buffer[i], sizeof(name) - 1);
849 if (len < 0) { 861 if (len < 0)
850 return len; 862 return len;
851 } 863
852 memset(name, 0, sizeof(name)); 864 memset(name, 0, sizeof(name));
853 if (copy_from_user(name, &user_buffer[i], len)) 865 if (copy_from_user(name, &user_buffer[i], len))
854 return -EFAULT; 866 return -EFAULT;
@@ -872,9 +884,9 @@ static ssize_t pktgen_if_write(struct file *file,
872 884
873 if (!strcmp(name, "min_pkt_size")) { 885 if (!strcmp(name, "min_pkt_size")) {
874 len = num_arg(&user_buffer[i], 10, &value); 886 len = num_arg(&user_buffer[i], 10, &value);
875 if (len < 0) { 887 if (len < 0)
876 return len; 888 return len;
877 } 889
878 i += len; 890 i += len;
879 if (value < 14 + 20 + 8) 891 if (value < 14 + 20 + 8)
880 value = 14 + 20 + 8; 892 value = 14 + 20 + 8;
@@ -889,9 +901,9 @@ static ssize_t pktgen_if_write(struct file *file,
889 901
890 if (!strcmp(name, "max_pkt_size")) { 902 if (!strcmp(name, "max_pkt_size")) {
891 len = num_arg(&user_buffer[i], 10, &value); 903 len = num_arg(&user_buffer[i], 10, &value);
892 if (len < 0) { 904 if (len < 0)
893 return len; 905 return len;
894 } 906
895 i += len; 907 i += len;
896 if (value < 14 + 20 + 8) 908 if (value < 14 + 20 + 8)
897 value = 14 + 20 + 8; 909 value = 14 + 20 + 8;
@@ -908,9 +920,9 @@ static ssize_t pktgen_if_write(struct file *file,
908 920
909 if (!strcmp(name, "pkt_size")) { 921 if (!strcmp(name, "pkt_size")) {
910 len = num_arg(&user_buffer[i], 10, &value); 922 len = num_arg(&user_buffer[i], 10, &value);
911 if (len < 0) { 923 if (len < 0)
912 return len; 924 return len;
913 } 925
914 i += len; 926 i += len;
915 if (value < 14 + 20 + 8) 927 if (value < 14 + 20 + 8)
916 value = 14 + 20 + 8; 928 value = 14 + 20 + 8;
@@ -925,9 +937,9 @@ static ssize_t pktgen_if_write(struct file *file,
925 937
926 if (!strcmp(name, "debug")) { 938 if (!strcmp(name, "debug")) {
927 len = num_arg(&user_buffer[i], 10, &value); 939 len = num_arg(&user_buffer[i], 10, &value);
928 if (len < 0) { 940 if (len < 0)
929 return len; 941 return len;
930 } 942
931 i += len; 943 i += len;
932 debug = value; 944 debug = value;
933 sprintf(pg_result, "OK: debug=%u", debug); 945 sprintf(pg_result, "OK: debug=%u", debug);
@@ -936,9 +948,9 @@ static ssize_t pktgen_if_write(struct file *file,
936 948
937 if (!strcmp(name, "frags")) { 949 if (!strcmp(name, "frags")) {
938 len = num_arg(&user_buffer[i], 10, &value); 950 len = num_arg(&user_buffer[i], 10, &value);
939 if (len < 0) { 951 if (len < 0)
940 return len; 952 return len;
941 } 953
942 i += len; 954 i += len;
943 pkt_dev->nfrags = value; 955 pkt_dev->nfrags = value;
944 sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags); 956 sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags);
@@ -946,26 +958,24 @@ static ssize_t pktgen_if_write(struct file *file,
946 } 958 }
947 if (!strcmp(name, "delay")) { 959 if (!strcmp(name, "delay")) {
948 len = num_arg(&user_buffer[i], 10, &value); 960 len = num_arg(&user_buffer[i], 10, &value);
949 if (len < 0) { 961 if (len < 0)
950 return len; 962 return len;
951 } 963
952 i += len; 964 i += len;
953 if (value == 0x7FFFFFFF) { 965 if (value == 0x7FFFFFFF)
954 pkt_dev->delay_us = 0x7FFFFFFF; 966 pkt_dev->delay = ULLONG_MAX;
955 pkt_dev->delay_ns = 0; 967 else
956 } else { 968 pkt_dev->delay = (u64)value * NSEC_PER_USEC;
957 pkt_dev->delay_us = value / 1000; 969
958 pkt_dev->delay_ns = value % 1000; 970 sprintf(pg_result, "OK: delay=%llu",
959 } 971 (unsigned long long) pkt_dev->delay);
960 sprintf(pg_result, "OK: delay=%u",
961 1000 * pkt_dev->delay_us + pkt_dev->delay_ns);
962 return count; 972 return count;
963 } 973 }
964 if (!strcmp(name, "udp_src_min")) { 974 if (!strcmp(name, "udp_src_min")) {
965 len = num_arg(&user_buffer[i], 10, &value); 975 len = num_arg(&user_buffer[i], 10, &value);
966 if (len < 0) { 976 if (len < 0)
967 return len; 977 return len;
968 } 978
969 i += len; 979 i += len;
970 if (value != pkt_dev->udp_src_min) { 980 if (value != pkt_dev->udp_src_min) {
971 pkt_dev->udp_src_min = value; 981 pkt_dev->udp_src_min = value;
@@ -976,9 +986,9 @@ static ssize_t pktgen_if_write(struct file *file,
976 } 986 }
977 if (!strcmp(name, "udp_dst_min")) { 987 if (!strcmp(name, "udp_dst_min")) {
978 len = num_arg(&user_buffer[i], 10, &value); 988 len = num_arg(&user_buffer[i], 10, &value);
979 if (len < 0) { 989 if (len < 0)
980 return len; 990 return len;
981 } 991
982 i += len; 992 i += len;
983 if (value != pkt_dev->udp_dst_min) { 993 if (value != pkt_dev->udp_dst_min) {
984 pkt_dev->udp_dst_min = value; 994 pkt_dev->udp_dst_min = value;
@@ -989,9 +999,9 @@ static ssize_t pktgen_if_write(struct file *file,
989 } 999 }
990 if (!strcmp(name, "udp_src_max")) { 1000 if (!strcmp(name, "udp_src_max")) {
991 len = num_arg(&user_buffer[i], 10, &value); 1001 len = num_arg(&user_buffer[i], 10, &value);
992 if (len < 0) { 1002 if (len < 0)
993 return len; 1003 return len;
994 } 1004
995 i += len; 1005 i += len;
996 if (value != pkt_dev->udp_src_max) { 1006 if (value != pkt_dev->udp_src_max) {
997 pkt_dev->udp_src_max = value; 1007 pkt_dev->udp_src_max = value;
@@ -1002,9 +1012,9 @@ static ssize_t pktgen_if_write(struct file *file,
1002 } 1012 }
1003 if (!strcmp(name, "udp_dst_max")) { 1013 if (!strcmp(name, "udp_dst_max")) {
1004 len = num_arg(&user_buffer[i], 10, &value); 1014 len = num_arg(&user_buffer[i], 10, &value);
1005 if (len < 0) { 1015 if (len < 0)
1006 return len; 1016 return len;
1007 } 1017
1008 i += len; 1018 i += len;
1009 if (value != pkt_dev->udp_dst_max) { 1019 if (value != pkt_dev->udp_dst_max) {
1010 pkt_dev->udp_dst_max = value; 1020 pkt_dev->udp_dst_max = value;
@@ -1015,9 +1025,9 @@ static ssize_t pktgen_if_write(struct file *file,
1015 } 1025 }
1016 if (!strcmp(name, "clone_skb")) { 1026 if (!strcmp(name, "clone_skb")) {
1017 len = num_arg(&user_buffer[i], 10, &value); 1027 len = num_arg(&user_buffer[i], 10, &value);
1018 if (len < 0) { 1028 if (len < 0)
1019 return len; 1029 return len;
1020 } 1030
1021 i += len; 1031 i += len;
1022 pkt_dev->clone_skb = value; 1032 pkt_dev->clone_skb = value;
1023 1033
@@ -1026,9 +1036,9 @@ static ssize_t pktgen_if_write(struct file *file,
1026 } 1036 }
1027 if (!strcmp(name, "count")) { 1037 if (!strcmp(name, "count")) {
1028 len = num_arg(&user_buffer[i], 10, &value); 1038 len = num_arg(&user_buffer[i], 10, &value);
1029 if (len < 0) { 1039 if (len < 0)
1030 return len; 1040 return len;
1031 } 1041
1032 i += len; 1042 i += len;
1033 pkt_dev->count = value; 1043 pkt_dev->count = value;
1034 sprintf(pg_result, "OK: count=%llu", 1044 sprintf(pg_result, "OK: count=%llu",
@@ -1037,9 +1047,9 @@ static ssize_t pktgen_if_write(struct file *file,
1037 } 1047 }
1038 if (!strcmp(name, "src_mac_count")) { 1048 if (!strcmp(name, "src_mac_count")) {
1039 len = num_arg(&user_buffer[i], 10, &value); 1049 len = num_arg(&user_buffer[i], 10, &value);
1040 if (len < 0) { 1050 if (len < 0)
1041 return len; 1051 return len;
1042 } 1052
1043 i += len; 1053 i += len;
1044 if (pkt_dev->src_mac_count != value) { 1054 if (pkt_dev->src_mac_count != value) {
1045 pkt_dev->src_mac_count = value; 1055 pkt_dev->src_mac_count = value;
@@ -1051,9 +1061,9 @@ static ssize_t pktgen_if_write(struct file *file,
1051 } 1061 }
1052 if (!strcmp(name, "dst_mac_count")) { 1062 if (!strcmp(name, "dst_mac_count")) {
1053 len = num_arg(&user_buffer[i], 10, &value); 1063 len = num_arg(&user_buffer[i], 10, &value);
1054 if (len < 0) { 1064 if (len < 0)
1055 return len; 1065 return len;
1056 } 1066
1057 i += len; 1067 i += len;
1058 if (pkt_dev->dst_mac_count != value) { 1068 if (pkt_dev->dst_mac_count != value) {
1059 pkt_dev->dst_mac_count = value; 1069 pkt_dev->dst_mac_count = value;
@@ -1067,9 +1077,9 @@ static ssize_t pktgen_if_write(struct file *file,
1067 char f[32]; 1077 char f[32];
1068 memset(f, 0, 32); 1078 memset(f, 0, 32);
1069 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1079 len = strn_len(&user_buffer[i], sizeof(f) - 1);
1070 if (len < 0) { 1080 if (len < 0)
1071 return len; 1081 return len;
1072 } 1082
1073 if (copy_from_user(f, &user_buffer[i], len)) 1083 if (copy_from_user(f, &user_buffer[i], len))
1074 return -EFAULT; 1084 return -EFAULT;
1075 i += len; 1085 i += len;
@@ -1168,9 +1178,8 @@ static ssize_t pktgen_if_write(struct file *file,
1168 } 1178 }
1169 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { 1179 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) {
1170 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); 1180 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1);
1171 if (len < 0) { 1181 if (len < 0)
1172 return len; 1182 return len;
1173 }
1174 1183
1175 if (copy_from_user(buf, &user_buffer[i], len)) 1184 if (copy_from_user(buf, &user_buffer[i], len))
1176 return -EFAULT; 1185 return -EFAULT;
@@ -1190,9 +1199,9 @@ static ssize_t pktgen_if_write(struct file *file,
1190 } 1199 }
1191 if (!strcmp(name, "dst_max")) { 1200 if (!strcmp(name, "dst_max")) {
1192 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); 1201 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1);
1193 if (len < 0) { 1202 if (len < 0)
1194 return len; 1203 return len;
1195 } 1204
1196 1205
1197 if (copy_from_user(buf, &user_buffer[i], len)) 1206 if (copy_from_user(buf, &user_buffer[i], len))
1198 return -EFAULT; 1207 return -EFAULT;
@@ -1303,9 +1312,9 @@ static ssize_t pktgen_if_write(struct file *file,
1303 } 1312 }
1304 if (!strcmp(name, "src_min")) { 1313 if (!strcmp(name, "src_min")) {
1305 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); 1314 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1);
1306 if (len < 0) { 1315 if (len < 0)
1307 return len; 1316 return len;
1308 } 1317
1309 if (copy_from_user(buf, &user_buffer[i], len)) 1318 if (copy_from_user(buf, &user_buffer[i], len))
1310 return -EFAULT; 1319 return -EFAULT;
1311 buf[len] = 0; 1320 buf[len] = 0;
@@ -1324,9 +1333,9 @@ static ssize_t pktgen_if_write(struct file *file,
1324 } 1333 }
1325 if (!strcmp(name, "src_max")) { 1334 if (!strcmp(name, "src_max")) {
1326 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); 1335 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1);
1327 if (len < 0) { 1336 if (len < 0)
1328 return len; 1337 return len;
1329 } 1338
1330 if (copy_from_user(buf, &user_buffer[i], len)) 1339 if (copy_from_user(buf, &user_buffer[i], len))
1331 return -EFAULT; 1340 return -EFAULT;
1332 buf[len] = 0; 1341 buf[len] = 0;
@@ -1350,9 +1359,9 @@ static ssize_t pktgen_if_write(struct file *file,
1350 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN); 1359 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
1351 1360
1352 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1361 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1353 if (len < 0) { 1362 if (len < 0)
1354 return len; 1363 return len;
1355 } 1364
1356 memset(valstr, 0, sizeof(valstr)); 1365 memset(valstr, 0, sizeof(valstr));
1357 if (copy_from_user(valstr, &user_buffer[i], len)) 1366 if (copy_from_user(valstr, &user_buffer[i], len))
1358 return -EFAULT; 1367 return -EFAULT;
@@ -1392,9 +1401,9 @@ static ssize_t pktgen_if_write(struct file *file,
1392 memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN); 1401 memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN);
1393 1402
1394 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1403 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1395 if (len < 0) { 1404 if (len < 0)
1396 return len; 1405 return len;
1397 } 1406
1398 memset(valstr, 0, sizeof(valstr)); 1407 memset(valstr, 0, sizeof(valstr));
1399 if (copy_from_user(valstr, &user_buffer[i], len)) 1408 if (copy_from_user(valstr, &user_buffer[i], len))
1400 return -EFAULT; 1409 return -EFAULT;
@@ -1435,9 +1444,9 @@ static ssize_t pktgen_if_write(struct file *file,
1435 1444
1436 if (!strcmp(name, "flows")) { 1445 if (!strcmp(name, "flows")) {
1437 len = num_arg(&user_buffer[i], 10, &value); 1446 len = num_arg(&user_buffer[i], 10, &value);
1438 if (len < 0) { 1447 if (len < 0)
1439 return len; 1448 return len;
1440 } 1449
1441 i += len; 1450 i += len;
1442 if (value > MAX_CFLOWS) 1451 if (value > MAX_CFLOWS)
1443 value = MAX_CFLOWS; 1452 value = MAX_CFLOWS;
@@ -1449,9 +1458,9 @@ static ssize_t pktgen_if_write(struct file *file,
1449 1458
1450 if (!strcmp(name, "flowlen")) { 1459 if (!strcmp(name, "flowlen")) {
1451 len = num_arg(&user_buffer[i], 10, &value); 1460 len = num_arg(&user_buffer[i], 10, &value);
1452 if (len < 0) { 1461 if (len < 0)
1453 return len; 1462 return len;
1454 } 1463
1455 i += len; 1464 i += len;
1456 pkt_dev->lflow = value; 1465 pkt_dev->lflow = value;
1457 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); 1466 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow);
@@ -1460,9 +1469,9 @@ static ssize_t pktgen_if_write(struct file *file,
1460 1469
1461 if (!strcmp(name, "queue_map_min")) { 1470 if (!strcmp(name, "queue_map_min")) {
1462 len = num_arg(&user_buffer[i], 5, &value); 1471 len = num_arg(&user_buffer[i], 5, &value);
1463 if (len < 0) { 1472 if (len < 0)
1464 return len; 1473 return len;
1465 } 1474
1466 i += len; 1475 i += len;
1467 pkt_dev->queue_map_min = value; 1476 pkt_dev->queue_map_min = value;
1468 sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); 1477 sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min);
@@ -1471,9 +1480,9 @@ static ssize_t pktgen_if_write(struct file *file,
1471 1480
1472 if (!strcmp(name, "queue_map_max")) { 1481 if (!strcmp(name, "queue_map_max")) {
1473 len = num_arg(&user_buffer[i], 5, &value); 1482 len = num_arg(&user_buffer[i], 5, &value);
1474 if (len < 0) { 1483 if (len < 0)
1475 return len; 1484 return len;
1476 } 1485
1477 i += len; 1486 i += len;
1478 pkt_dev->queue_map_max = value; 1487 pkt_dev->queue_map_max = value;
1479 sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); 1488 sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max);
@@ -1505,9 +1514,9 @@ static ssize_t pktgen_if_write(struct file *file,
1505 1514
1506 if (!strcmp(name, "vlan_id")) { 1515 if (!strcmp(name, "vlan_id")) {
1507 len = num_arg(&user_buffer[i], 4, &value); 1516 len = num_arg(&user_buffer[i], 4, &value);
1508 if (len < 0) { 1517 if (len < 0)
1509 return len; 1518 return len;
1510 } 1519
1511 i += len; 1520 i += len;
1512 if (value <= 4095) { 1521 if (value <= 4095) {
1513 pkt_dev->vlan_id = value; /* turn on VLAN */ 1522 pkt_dev->vlan_id = value; /* turn on VLAN */
@@ -1532,9 +1541,9 @@ static ssize_t pktgen_if_write(struct file *file,
1532 1541
1533 if (!strcmp(name, "vlan_p")) { 1542 if (!strcmp(name, "vlan_p")) {
1534 len = num_arg(&user_buffer[i], 1, &value); 1543 len = num_arg(&user_buffer[i], 1, &value);
1535 if (len < 0) { 1544 if (len < 0)
1536 return len; 1545 return len;
1537 } 1546
1538 i += len; 1547 i += len;
1539 if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { 1548 if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) {
1540 pkt_dev->vlan_p = value; 1549 pkt_dev->vlan_p = value;
@@ -1547,9 +1556,9 @@ static ssize_t pktgen_if_write(struct file *file,
1547 1556
1548 if (!strcmp(name, "vlan_cfi")) { 1557 if (!strcmp(name, "vlan_cfi")) {
1549 len = num_arg(&user_buffer[i], 1, &value); 1558 len = num_arg(&user_buffer[i], 1, &value);
1550 if (len < 0) { 1559 if (len < 0)
1551 return len; 1560 return len;
1552 } 1561
1553 i += len; 1562 i += len;
1554 if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { 1563 if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) {
1555 pkt_dev->vlan_cfi = value; 1564 pkt_dev->vlan_cfi = value;
@@ -1562,9 +1571,9 @@ static ssize_t pktgen_if_write(struct file *file,
1562 1571
1563 if (!strcmp(name, "svlan_id")) { 1572 if (!strcmp(name, "svlan_id")) {
1564 len = num_arg(&user_buffer[i], 4, &value); 1573 len = num_arg(&user_buffer[i], 4, &value);
1565 if (len < 0) { 1574 if (len < 0)
1566 return len; 1575 return len;
1567 } 1576
1568 i += len; 1577 i += len;
1569 if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { 1578 if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) {
1570 pkt_dev->svlan_id = value; /* turn on SVLAN */ 1579 pkt_dev->svlan_id = value; /* turn on SVLAN */
@@ -1589,9 +1598,9 @@ static ssize_t pktgen_if_write(struct file *file,
1589 1598
1590 if (!strcmp(name, "svlan_p")) { 1599 if (!strcmp(name, "svlan_p")) {
1591 len = num_arg(&user_buffer[i], 1, &value); 1600 len = num_arg(&user_buffer[i], 1, &value);
1592 if (len < 0) { 1601 if (len < 0)
1593 return len; 1602 return len;
1594 } 1603
1595 i += len; 1604 i += len;
1596 if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { 1605 if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) {
1597 pkt_dev->svlan_p = value; 1606 pkt_dev->svlan_p = value;
@@ -1604,9 +1613,9 @@ static ssize_t pktgen_if_write(struct file *file,
1604 1613
1605 if (!strcmp(name, "svlan_cfi")) { 1614 if (!strcmp(name, "svlan_cfi")) {
1606 len = num_arg(&user_buffer[i], 1, &value); 1615 len = num_arg(&user_buffer[i], 1, &value);
1607 if (len < 0) { 1616 if (len < 0)
1608 return len; 1617 return len;
1609 } 1618
1610 i += len; 1619 i += len;
1611 if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { 1620 if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) {
1612 pkt_dev->svlan_cfi = value; 1621 pkt_dev->svlan_cfi = value;
@@ -1620,9 +1629,9 @@ static ssize_t pktgen_if_write(struct file *file,
1620 if (!strcmp(name, "tos")) { 1629 if (!strcmp(name, "tos")) {
1621 __u32 tmp_value = 0; 1630 __u32 tmp_value = 0;
1622 len = hex32_arg(&user_buffer[i], 2, &tmp_value); 1631 len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1623 if (len < 0) { 1632 if (len < 0)
1624 return len; 1633 return len;
1625 } 1634
1626 i += len; 1635 i += len;
1627 if (len == 2) { 1636 if (len == 2) {
1628 pkt_dev->tos = tmp_value; 1637 pkt_dev->tos = tmp_value;
@@ -1636,9 +1645,9 @@ static ssize_t pktgen_if_write(struct file *file,
1636 if (!strcmp(name, "traffic_class")) { 1645 if (!strcmp(name, "traffic_class")) {
1637 __u32 tmp_value = 0; 1646 __u32 tmp_value = 0;
1638 len = hex32_arg(&user_buffer[i], 2, &tmp_value); 1647 len = hex32_arg(&user_buffer[i], 2, &tmp_value);
1639 if (len < 0) { 1648 if (len < 0)
1640 return len; 1649 return len;
1641 } 1650
1642 i += len; 1651 i += len;
1643 if (len == 2) { 1652 if (len == 2) {
1644 pkt_dev->traffic_class = tmp_value; 1653 pkt_dev->traffic_class = tmp_value;
@@ -1670,7 +1679,7 @@ static const struct file_operations pktgen_if_fops = {
1670static int pktgen_thread_show(struct seq_file *seq, void *v) 1679static int pktgen_thread_show(struct seq_file *seq, void *v)
1671{ 1680{
1672 struct pktgen_thread *t = seq->private; 1681 struct pktgen_thread *t = seq->private;
1673 struct pktgen_dev *pkt_dev; 1682 const struct pktgen_dev *pkt_dev;
1674 1683
1675 BUG_ON(!t); 1684 BUG_ON(!t);
1676 1685
@@ -1873,8 +1882,10 @@ static void pktgen_change_name(struct net_device *dev)
1873 1882
1874 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); 1883 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir);
1875 1884
1876 pkt_dev->entry = create_proc_entry(dev->name, 0600, 1885 pkt_dev->entry = proc_create_data(dev->name, 0600,
1877 pg_proc_dir); 1886 pg_proc_dir,
1887 &pktgen_if_fops,
1888 pkt_dev);
1878 if (!pkt_dev->entry) 1889 if (!pkt_dev->entry)
1879 printk(KERN_ERR "pktgen: can't move proc " 1890 printk(KERN_ERR "pktgen: can't move proc "
1880 " entry for '%s'\n", dev->name); 1891 " entry for '%s'\n", dev->name);
@@ -1908,13 +1919,14 @@ static int pktgen_device_event(struct notifier_block *unused,
1908 return NOTIFY_DONE; 1919 return NOTIFY_DONE;
1909} 1920}
1910 1921
1911static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, const char *ifname) 1922static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
1923 const char *ifname)
1912{ 1924{
1913 char b[IFNAMSIZ+5]; 1925 char b[IFNAMSIZ+5];
1914 int i = 0; 1926 int i = 0;
1915 1927
1916 for(i=0; ifname[i] != '@'; i++) { 1928 for (i = 0; ifname[i] != '@'; i++) {
1917 if(i == IFNAMSIZ) 1929 if (i == IFNAMSIZ)
1918 break; 1930 break;
1919 1931
1920 b[i] = ifname[i]; 1932 b[i] = ifname[i];
@@ -1981,7 +1993,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1981 printk(KERN_WARNING "pktgen: WARNING: Requested " 1993 printk(KERN_WARNING "pktgen: WARNING: Requested "
1982 "queue_map_min (zero-based) (%d) exceeds valid range " 1994 "queue_map_min (zero-based) (%d) exceeds valid range "
1983 "[0 - %d] for (%d) queues on %s, resetting\n", 1995 "[0 - %d] for (%d) queues on %s, resetting\n",
1984 pkt_dev->queue_map_min, (ntxq ?: 1)- 1, ntxq, 1996 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
1985 pkt_dev->odev->name); 1997 pkt_dev->odev->name);
1986 pkt_dev->queue_map_min = ntxq - 1; 1998 pkt_dev->queue_map_min = ntxq - 1;
1987 } 1999 }
@@ -1989,7 +2001,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1989 printk(KERN_WARNING "pktgen: WARNING: Requested " 2001 printk(KERN_WARNING "pktgen: WARNING: Requested "
1990 "queue_map_max (zero-based) (%d) exceeds valid range " 2002 "queue_map_max (zero-based) (%d) exceeds valid range "
1991 "[0 - %d] for (%d) queues on %s, resetting\n", 2003 "[0 - %d] for (%d) queues on %s, resetting\n",
1992 pkt_dev->queue_map_max, (ntxq ?: 1)- 1, ntxq, 2004 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
1993 pkt_dev->odev->name); 2005 pkt_dev->odev->name);
1994 pkt_dev->queue_map_max = ntxq - 1; 2006 pkt_dev->queue_map_max = ntxq - 1;
1995 } 2007 }
@@ -2030,7 +2042,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2030 */ 2042 */
2031 2043
2032 rcu_read_lock(); 2044 rcu_read_lock();
2033 if ((idev = __in6_dev_get(pkt_dev->odev)) != NULL) { 2045 idev = __in6_dev_get(pkt_dev->odev);
2046 if (idev) {
2034 struct inet6_ifaddr *ifp; 2047 struct inet6_ifaddr *ifp;
2035 2048
2036 read_lock_bh(&idev->lock); 2049 read_lock_bh(&idev->lock);
@@ -2089,27 +2102,40 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2089 pkt_dev->nflows = 0; 2102 pkt_dev->nflows = 0;
2090} 2103}
2091 2104
2092static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) 2105
2106static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2093{ 2107{
2094 __u64 start; 2108 ktime_t start;
2095 __u64 now; 2109 s32 remaining;
2110 struct hrtimer_sleeper t;
2111
2112 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2113 hrtimer_set_expires(&t.timer, spin_until);
2114
2115 remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
2116 if (remaining <= 0)
2117 return;
2096 2118
2097 start = now = getCurUs(); 2119 start = ktime_now();
2098 while (now < spin_until_us) { 2120 if (remaining < 100)
2099 /* TODO: optimize sleeping behavior */ 2121 udelay(remaining); /* really small just spin */
2100 if (spin_until_us - now > jiffies_to_usecs(1) + 1) 2122 else {
2101 schedule_timeout_interruptible(1); 2123 /* see do_nanosleep */
2102 else if (spin_until_us - now > 100) { 2124 hrtimer_init_sleeper(&t, current);
2103 if (!pkt_dev->running) 2125 do {
2104 return; 2126 set_current_state(TASK_INTERRUPTIBLE);
2105 if (need_resched()) 2127 hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
2128 if (!hrtimer_active(&t.timer))
2129 t.task = NULL;
2130
2131 if (likely(t.task))
2106 schedule(); 2132 schedule();
2107 }
2108 2133
2109 now = getCurUs(); 2134 hrtimer_cancel(&t.timer);
2135 } while (t.task && pkt_dev->running && !signal_pending(current));
2136 __set_current_state(TASK_RUNNING);
2110 } 2137 }
2111 2138 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start));
2112 pkt_dev->idle_acc += now - start;
2113} 2139}
2114 2140
2115static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2141static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
@@ -2120,13 +2146,9 @@ static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
2120 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); 2146 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
2121} 2147}
2122 2148
2123static inline int f_seen(struct pktgen_dev *pkt_dev, int flow) 2149static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow)
2124{ 2150{
2125 2151 return !!(pkt_dev->flows[flow].flags & F_INIT);
2126 if (pkt_dev->flows[flow].flags & F_INIT)
2127 return 1;
2128 else
2129 return 0;
2130} 2152}
2131 2153
2132static inline int f_pick(struct pktgen_dev *pkt_dev) 2154static inline int f_pick(struct pktgen_dev *pkt_dev)
@@ -2174,7 +2196,7 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2174 if (x) { 2196 if (x) {
2175 pkt_dev->flows[flow].x = x; 2197 pkt_dev->flows[flow].x = x;
2176 set_pkt_overhead(pkt_dev); 2198 set_pkt_overhead(pkt_dev);
2177 pkt_dev->pkt_overhead+=x->props.header_len; 2199 pkt_dev->pkt_overhead += x->props.header_len;
2178 } 2200 }
2179 2201
2180 } 2202 }
@@ -2313,18 +2335,18 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2313 2335
2314 if (!(pkt_dev->flags & F_IPV6)) { 2336 if (!(pkt_dev->flags & F_IPV6)) {
2315 2337
2316 if ((imn = ntohl(pkt_dev->saddr_min)) < (imx = 2338 imn = ntohl(pkt_dev->saddr_min);
2317 ntohl(pkt_dev-> 2339 imx = ntohl(pkt_dev->saddr_max);
2318 saddr_max))) { 2340 if (imn < imx) {
2319 __u32 t; 2341 __u32 t;
2320 if (pkt_dev->flags & F_IPSRC_RND) 2342 if (pkt_dev->flags & F_IPSRC_RND)
2321 t = random32() % (imx - imn) + imn; 2343 t = random32() % (imx - imn) + imn;
2322 else { 2344 else {
2323 t = ntohl(pkt_dev->cur_saddr); 2345 t = ntohl(pkt_dev->cur_saddr);
2324 t++; 2346 t++;
2325 if (t > imx) { 2347 if (t > imx)
2326 t = imn; 2348 t = imn;
2327 } 2349
2328 } 2350 }
2329 pkt_dev->cur_saddr = htonl(t); 2351 pkt_dev->cur_saddr = htonl(t);
2330 } 2352 }
@@ -2435,14 +2457,14 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2435 if (err) 2457 if (err)
2436 goto error; 2458 goto error;
2437 2459
2438 x->curlft.bytes +=skb->len; 2460 x->curlft.bytes += skb->len;
2439 x->curlft.packets++; 2461 x->curlft.packets++;
2440error: 2462error:
2441 spin_unlock(&x->lock); 2463 spin_unlock(&x->lock);
2442 return err; 2464 return err;
2443} 2465}
2444 2466
2445static inline void free_SAs(struct pktgen_dev *pkt_dev) 2467static void free_SAs(struct pktgen_dev *pkt_dev)
2446{ 2468{
2447 if (pkt_dev->cflows) { 2469 if (pkt_dev->cflows) {
2448 /* let go of the SAs if we have them */ 2470 /* let go of the SAs if we have them */
@@ -2457,7 +2479,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
2457 } 2479 }
2458} 2480}
2459 2481
2460static inline int process_ipsec(struct pktgen_dev *pkt_dev, 2482static int process_ipsec(struct pktgen_dev *pkt_dev,
2461 struct sk_buff *skb, __be16 protocol) 2483 struct sk_buff *skb, __be16 protocol)
2462{ 2484{
2463 if (pkt_dev->flags & F_IPSEC_ON) { 2485 if (pkt_dev->flags & F_IPSEC_ON) {
@@ -2467,11 +2489,11 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
2467 int ret; 2489 int ret;
2468 __u8 *eth; 2490 __u8 *eth;
2469 nhead = x->props.header_len - skb_headroom(skb); 2491 nhead = x->props.header_len - skb_headroom(skb);
2470 if (nhead >0) { 2492 if (nhead > 0) {
2471 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 2493 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
2472 if (ret < 0) { 2494 if (ret < 0) {
2473 printk(KERN_ERR "Error expanding " 2495 printk(KERN_ERR "Error expanding "
2474 "ipsec packet %d\n",ret); 2496 "ipsec packet %d\n", ret);
2475 goto err; 2497 goto err;
2476 } 2498 }
2477 } 2499 }
@@ -2481,13 +2503,13 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
2481 ret = pktgen_output_ipsec(skb, pkt_dev); 2503 ret = pktgen_output_ipsec(skb, pkt_dev);
2482 if (ret) { 2504 if (ret) {
2483 printk(KERN_ERR "Error creating ipsec " 2505 printk(KERN_ERR "Error creating ipsec "
2484 "packet %d\n",ret); 2506 "packet %d\n", ret);
2485 goto err; 2507 goto err;
2486 } 2508 }
2487 /* restore ll */ 2509 /* restore ll */
2488 eth = (__u8 *) skb_push(skb, ETH_HLEN); 2510 eth = (__u8 *) skb_push(skb, ETH_HLEN);
2489 memcpy(eth, pkt_dev->hh, 12); 2511 memcpy(eth, pkt_dev->hh, 12);
2490 *(u16 *) & eth[12] = protocol; 2512 *(u16 *) &eth[12] = protocol;
2491 } 2513 }
2492 } 2514 }
2493 return 1; 2515 return 1;
@@ -2500,9 +2522,9 @@ err:
2500static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2522static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
2501{ 2523{
2502 unsigned i; 2524 unsigned i;
2503 for (i = 0; i < pkt_dev->nr_labels; i++) { 2525 for (i = 0; i < pkt_dev->nr_labels; i++)
2504 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; 2526 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
2505 } 2527
2506 mpls--; 2528 mpls--;
2507 *mpls |= MPLS_STACK_BOTTOM; 2529 *mpls |= MPLS_STACK_BOTTOM;
2508} 2530}
@@ -2543,8 +2565,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2543 mod_cur_headers(pkt_dev); 2565 mod_cur_headers(pkt_dev);
2544 2566
2545 datalen = (odev->hard_header_len + 16) & ~0xf; 2567 datalen = (odev->hard_header_len + 16) & ~0xf;
2546 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + 2568 skb = __netdev_alloc_skb(odev,
2547 pkt_dev->pkt_overhead, GFP_ATOMIC); 2569 pkt_dev->cur_pkt_size + 64
2570 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
2548 if (!skb) { 2571 if (!skb) {
2549 sprintf(pkt_dev->result, "No memory"); 2572 sprintf(pkt_dev->result, "No memory");
2550 return NULL; 2573 return NULL;
@@ -2668,8 +2691,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2668 } 2691 }
2669 } 2692 }
2670 2693
2671 /* Stamp the time, and sequence number, convert them to network byte order */ 2694 /* Stamp the time, and sequence number,
2672 2695 * convert them to network byte order
2696 */
2673 if (pgh) { 2697 if (pgh) {
2674 struct timeval timestamp; 2698 struct timeval timestamp;
2675 2699
@@ -2882,8 +2906,9 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2882 queue_map = pkt_dev->cur_queue_map; 2906 queue_map = pkt_dev->cur_queue_map;
2883 mod_cur_headers(pkt_dev); 2907 mod_cur_headers(pkt_dev);
2884 2908
2885 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2909 skb = __netdev_alloc_skb(odev,
2886 pkt_dev->pkt_overhead, GFP_ATOMIC); 2910 pkt_dev->cur_pkt_size + 64
2911 + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT);
2887 if (!skb) { 2912 if (!skb) {
2888 sprintf(pkt_dev->result, "No memory"); 2913 sprintf(pkt_dev->result, "No memory");
2889 return NULL; 2914 return NULL;
@@ -2922,7 +2947,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2922 udph = udp_hdr(skb); 2947 udph = udp_hdr(skb);
2923 2948
2924 memcpy(eth, pkt_dev->hh, 12); 2949 memcpy(eth, pkt_dev->hh, 12);
2925 *(__be16 *) & eth[12] = protocol; 2950 *(__be16 *) &eth[12] = protocol;
2926 2951
2927 /* Eth + IPh + UDPh + mpls */ 2952 /* Eth + IPh + UDPh + mpls */
2928 datalen = pkt_dev->cur_pkt_size - 14 - 2953 datalen = pkt_dev->cur_pkt_size - 14 -
@@ -3016,8 +3041,10 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
3016 } 3041 }
3017 } 3042 }
3018 3043
3019 /* Stamp the time, and sequence number, convert them to network byte order */ 3044 /* Stamp the time, and sequence number,
3020 /* should we update cloned packets too ? */ 3045 * convert them to network byte order
3046 * should we update cloned packets too ?
3047 */
3021 if (pgh) { 3048 if (pgh) {
3022 struct timeval timestamp; 3049 struct timeval timestamp;
3023 3050
@@ -3033,8 +3060,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
3033 return skb; 3060 return skb;
3034} 3061}
3035 3062
3036static inline struct sk_buff *fill_packet(struct net_device *odev, 3063static struct sk_buff *fill_packet(struct net_device *odev,
3037 struct pktgen_dev *pkt_dev) 3064 struct pktgen_dev *pkt_dev)
3038{ 3065{
3039 if (pkt_dev->flags & F_IPV6) 3066 if (pkt_dev->flags & F_IPV6)
3040 return fill_packet_ipv6(odev, pkt_dev); 3067 return fill_packet_ipv6(odev, pkt_dev);
@@ -3072,9 +3099,9 @@ static void pktgen_run(struct pktgen_thread *t)
3072 pktgen_clear_counters(pkt_dev); 3099 pktgen_clear_counters(pkt_dev);
3073 pkt_dev->running = 1; /* Cranke yeself! */ 3100 pkt_dev->running = 1; /* Cranke yeself! */
3074 pkt_dev->skb = NULL; 3101 pkt_dev->skb = NULL;
3075 pkt_dev->started_at = getCurUs(); 3102 pkt_dev->started_at =
3076 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */ 3103 pkt_dev->next_tx = ktime_now();
3077 pkt_dev->next_tx_ns = 0; 3104
3078 set_pkt_overhead(pkt_dev); 3105 set_pkt_overhead(pkt_dev);
3079 3106
3080 strcpy(pkt_dev->result, "Starting"); 3107 strcpy(pkt_dev->result, "Starting");
@@ -3101,17 +3128,14 @@ static void pktgen_stop_all_threads_ifs(void)
3101 mutex_unlock(&pktgen_thread_lock); 3128 mutex_unlock(&pktgen_thread_lock);
3102} 3129}
3103 3130
3104static int thread_is_running(struct pktgen_thread *t) 3131static int thread_is_running(const struct pktgen_thread *t)
3105{ 3132{
3106 struct pktgen_dev *pkt_dev; 3133 const struct pktgen_dev *pkt_dev;
3107 int res = 0;
3108 3134
3109 list_for_each_entry(pkt_dev, &t->if_list, list) 3135 list_for_each_entry(pkt_dev, &t->if_list, list)
3110 if (pkt_dev->running) { 3136 if (pkt_dev->running)
3111 res = 1; 3137 return 1;
3112 break; 3138 return 0;
3113 }
3114 return res;
3115} 3139}
3116 3140
3117static int pktgen_wait_thread_run(struct pktgen_thread *t) 3141static int pktgen_wait_thread_run(struct pktgen_thread *t)
@@ -3168,7 +3192,8 @@ static void pktgen_run_all_threads(void)
3168 3192
3169 mutex_unlock(&pktgen_thread_lock); 3193 mutex_unlock(&pktgen_thread_lock);
3170 3194
3171 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 3195 /* Propagate thread->control */
3196 schedule_timeout_interruptible(msecs_to_jiffies(125));
3172 3197
3173 pktgen_wait_all_threads_run(); 3198 pktgen_wait_all_threads_run();
3174} 3199}
@@ -3186,35 +3211,29 @@ static void pktgen_reset_all_threads(void)
3186 3211
3187 mutex_unlock(&pktgen_thread_lock); 3212 mutex_unlock(&pktgen_thread_lock);
3188 3213
3189 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 3214 /* Propagate thread->control */
3215 schedule_timeout_interruptible(msecs_to_jiffies(125));
3190 3216
3191 pktgen_wait_all_threads_run(); 3217 pktgen_wait_all_threads_run();
3192} 3218}
3193 3219
3194static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) 3220static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3195{ 3221{
3196 __u64 total_us, bps, mbps, pps, idle; 3222 __u64 bps, mbps, pps;
3197 char *p = pkt_dev->result; 3223 char *p = pkt_dev->result;
3198 3224 ktime_t elapsed = ktime_sub(pkt_dev->stopped_at,
3199 total_us = pkt_dev->stopped_at - pkt_dev->started_at; 3225 pkt_dev->started_at);
3200 3226 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
3201 idle = pkt_dev->idle_acc; 3227
3202 3228 p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n",
3203 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", 3229 (unsigned long long)ktime_to_us(elapsed),
3204 (unsigned long long)total_us, 3230 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
3205 (unsigned long long)(total_us - idle), 3231 (unsigned long long)ktime_to_us(idle),
3206 (unsigned long long)idle,
3207 (unsigned long long)pkt_dev->sofar, 3232 (unsigned long long)pkt_dev->sofar,
3208 pkt_dev->cur_pkt_size, nr_frags); 3233 pkt_dev->cur_pkt_size, nr_frags);
3209 3234
3210 pps = pkt_dev->sofar * USEC_PER_SEC; 3235 pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC,
3211 3236 ktime_to_ns(elapsed));
3212 while ((total_us >> 32) != 0) {
3213 pps >>= 1;
3214 total_us >>= 1;
3215 }
3216
3217 do_div(pps, total_us);
3218 3237
3219 bps = pps * 8 * pkt_dev->cur_pkt_size; 3238 bps = pps * 8 * pkt_dev->cur_pkt_size;
3220 3239
@@ -3228,7 +3247,6 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3228} 3247}
3229 3248
3230/* Set stopped-at timer, remove from running list, do counters & statistics */ 3249/* Set stopped-at timer, remove from running list, do counters & statistics */
3231
3232static int pktgen_stop_device(struct pktgen_dev *pkt_dev) 3250static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3233{ 3251{
3234 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; 3252 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
@@ -3239,7 +3257,9 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3239 return -EINVAL; 3257 return -EINVAL;
3240 } 3258 }
3241 3259
3242 pkt_dev->stopped_at = getCurUs(); 3260 kfree_skb(pkt_dev->skb);
3261 pkt_dev->skb = NULL;
3262 pkt_dev->stopped_at = ktime_now();
3243 pkt_dev->running = 0; 3263 pkt_dev->running = 0;
3244 3264
3245 show_results(pkt_dev, nr_frags); 3265 show_results(pkt_dev, nr_frags);
@@ -3258,7 +3278,7 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
3258 continue; 3278 continue;
3259 if (best == NULL) 3279 if (best == NULL)
3260 best = pkt_dev; 3280 best = pkt_dev;
3261 else if (pkt_dev->next_tx_us < best->next_tx_us) 3281 else if (ktime_lt(pkt_dev->next_tx, best->next_tx))
3262 best = pkt_dev; 3282 best = pkt_dev;
3263 } 3283 }
3264 if_unlock(t); 3284 if_unlock(t);
@@ -3275,9 +3295,6 @@ static void pktgen_stop(struct pktgen_thread *t)
3275 3295
3276 list_for_each_entry(pkt_dev, &t->if_list, list) { 3296 list_for_each_entry(pkt_dev, &t->if_list, list) {
3277 pktgen_stop_device(pkt_dev); 3297 pktgen_stop_device(pkt_dev);
3278 kfree_skb(pkt_dev->skb);
3279
3280 pkt_dev->skb = NULL;
3281 } 3298 }
3282 3299
3283 if_unlock(t); 3300 if_unlock(t);
@@ -3348,30 +3365,37 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
3348 mutex_unlock(&pktgen_thread_lock); 3365 mutex_unlock(&pktgen_thread_lock);
3349} 3366}
3350 3367
3351static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 3368static void idle(struct pktgen_dev *pkt_dev)
3369{
3370 ktime_t idle_start = ktime_now();
3371
3372 if (need_resched())
3373 schedule();
3374 else
3375 cpu_relax();
3376
3377 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
3378}
3379
3380
3381static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3352{ 3382{
3353 struct net_device *odev = pkt_dev->odev; 3383 struct net_device *odev = pkt_dev->odev;
3354 int (*xmit)(struct sk_buff *, struct net_device *) 3384 netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *)
3355 = odev->netdev_ops->ndo_start_xmit; 3385 = odev->netdev_ops->ndo_start_xmit;
3356 struct netdev_queue *txq; 3386 struct netdev_queue *txq;
3357 __u64 idle_start = 0;
3358 u16 queue_map; 3387 u16 queue_map;
3359 int ret; 3388 int ret;
3360 3389
3361 if (pkt_dev->delay_us || pkt_dev->delay_ns) { 3390 if (pkt_dev->delay) {
3362 u64 now; 3391 spin(pkt_dev, pkt_dev->next_tx);
3363
3364 now = getCurUs();
3365 if (now < pkt_dev->next_tx_us)
3366 spin(pkt_dev, pkt_dev->next_tx_us);
3367 3392
3368 /* This is max DELAY, this has special meaning of 3393 /* This is max DELAY, this has special meaning of
3369 * "never transmit" 3394 * "never transmit"
3370 */ 3395 */
3371 if (pkt_dev->delay_us == 0x7FFFFFFF) { 3396 if (pkt_dev->delay == ULLONG_MAX) {
3372 pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us; 3397 pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
3373 pkt_dev->next_tx_ns = pkt_dev->delay_ns; 3398 return;
3374 goto out;
3375 } 3399 }
3376 } 3400 }
3377 3401
@@ -3383,47 +3407,32 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3383 } 3407 }
3384 3408
3385 txq = netdev_get_tx_queue(odev, queue_map); 3409 txq = netdev_get_tx_queue(odev, queue_map);
3386 if (netif_tx_queue_stopped(txq) || 3410 /* Did we saturate the queue already? */
3387 netif_tx_queue_frozen(txq) || 3411 if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) {
3388 need_resched()) { 3412 /* If device is down, then all queues are permnantly frozen */
3389 idle_start = getCurUs(); 3413 if (netif_running(odev))
3390 3414 idle(pkt_dev);
3391 if (!netif_running(odev)) { 3415 else
3392 pktgen_stop_device(pkt_dev); 3416 pktgen_stop_device(pkt_dev);
3393 kfree_skb(pkt_dev->skb); 3417 return;
3394 pkt_dev->skb = NULL;
3395 goto out;
3396 }
3397 if (need_resched())
3398 schedule();
3399
3400 pkt_dev->idle_acc += getCurUs() - idle_start;
3401
3402 if (netif_tx_queue_stopped(txq) ||
3403 netif_tx_queue_frozen(txq)) {
3404 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3405 pkt_dev->next_tx_ns = 0;
3406 goto out; /* Try the next interface */
3407 }
3408 } 3418 }
3409 3419
3410 if (pkt_dev->last_ok || !pkt_dev->skb) { 3420 if (!pkt_dev->skb || (pkt_dev->last_ok &&
3411 if ((++pkt_dev->clone_count >= pkt_dev->clone_skb) 3421 ++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
3412 || (!pkt_dev->skb)) { 3422 /* build a new pkt */
3413 /* build a new pkt */ 3423 kfree_skb(pkt_dev->skb);
3414 kfree_skb(pkt_dev->skb);
3415 3424
3416 pkt_dev->skb = fill_packet(odev, pkt_dev); 3425 pkt_dev->skb = fill_packet(odev, pkt_dev);
3417 if (pkt_dev->skb == NULL) { 3426 if (pkt_dev->skb == NULL) {
3418 printk(KERN_ERR "pktgen: ERROR: couldn't " 3427 printk(KERN_ERR "pktgen: ERROR: couldn't "
3419 "allocate skb in fill_packet.\n"); 3428 "allocate skb in fill_packet.\n");
3420 schedule(); 3429 schedule();
3421 pkt_dev->clone_count--; /* back out increment, OOM */ 3430 pkt_dev->clone_count--; /* back out increment, OOM */
3422 goto out; 3431 return;
3423 }
3424 pkt_dev->allocated_skbs++;
3425 pkt_dev->clone_count = 0; /* reset counter */
3426 } 3432 }
3433
3434 pkt_dev->allocated_skbs++;
3435 pkt_dev->clone_count = 0; /* reset counter */
3427 } 3436 }
3428 3437
3429 /* fill_packet() might have changed the queue */ 3438 /* fill_packet() might have changed the queue */
@@ -3431,73 +3440,53 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3431 txq = netdev_get_tx_queue(odev, queue_map); 3440 txq = netdev_get_tx_queue(odev, queue_map);
3432 3441
3433 __netif_tx_lock_bh(txq); 3442 __netif_tx_lock_bh(txq);
3434 if (!netif_tx_queue_stopped(txq) && 3443 if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
3435 !netif_tx_queue_frozen(txq)) { 3444 pkt_dev->last_ok = 0;
3436 3445 else {
3437 atomic_inc(&(pkt_dev->skb->users)); 3446 atomic_inc(&(pkt_dev->skb->users));
3438 retry_now: 3447
3448 retry_now:
3439 ret = (*xmit)(pkt_dev->skb, odev); 3449 ret = (*xmit)(pkt_dev->skb, odev);
3440 if (likely(ret == NETDEV_TX_OK)) { 3450 switch (ret) {
3451 case NETDEV_TX_OK:
3441 txq_trans_update(txq); 3452 txq_trans_update(txq);
3442 pkt_dev->last_ok = 1; 3453 pkt_dev->last_ok = 1;
3443 pkt_dev->sofar++; 3454 pkt_dev->sofar++;
3444 pkt_dev->seq_num++; 3455 pkt_dev->seq_num++;
3445 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; 3456 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
3446 3457 break;
3447 } else if (ret == NETDEV_TX_LOCKED 3458 case NETDEV_TX_LOCKED:
3448 && (odev->features & NETIF_F_LLTX)) {
3449 cpu_relax(); 3459 cpu_relax();
3450 goto retry_now; 3460 goto retry_now;
3451 } else { /* Retry it next time */ 3461 default: /* Drivers are not supposed to return other values! */
3452 3462 if (net_ratelimit())
3453 atomic_dec(&(pkt_dev->skb->users)); 3463 pr_info("pktgen: %s xmit error: %d\n",
3454 3464 odev->name, ret);
3455 if (debug && net_ratelimit())
3456 printk(KERN_INFO "pktgen: Hard xmit error\n");
3457
3458 pkt_dev->errors++; 3465 pkt_dev->errors++;
3466 /* fallthru */
3467 case NETDEV_TX_BUSY:
3468 /* Retry it next time */
3469 atomic_dec(&(pkt_dev->skb->users));
3459 pkt_dev->last_ok = 0; 3470 pkt_dev->last_ok = 0;
3460 } 3471 }
3461 3472
3462 pkt_dev->next_tx_us = getCurUs(); 3473 if (pkt_dev->delay)
3463 pkt_dev->next_tx_ns = 0; 3474 pkt_dev->next_tx = ktime_add_ns(ktime_now(),
3464 3475 pkt_dev->delay);
3465 pkt_dev->next_tx_us += pkt_dev->delay_us;
3466 pkt_dev->next_tx_ns += pkt_dev->delay_ns;
3467
3468 if (pkt_dev->next_tx_ns > 1000) {
3469 pkt_dev->next_tx_us++;
3470 pkt_dev->next_tx_ns -= 1000;
3471 }
3472 } 3476 }
3473
3474 else { /* Retry it next time */
3475 pkt_dev->last_ok = 0;
3476 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3477 pkt_dev->next_tx_ns = 0;
3478 }
3479
3480 __netif_tx_unlock_bh(txq); 3477 __netif_tx_unlock_bh(txq);
3481 3478
3482 /* If pkt_dev->count is zero, then run forever */ 3479 /* If pkt_dev->count is zero, then run forever */
3483 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3480 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
3484 if (atomic_read(&(pkt_dev->skb->users)) != 1) { 3481 while (atomic_read(&(pkt_dev->skb->users)) != 1) {
3485 idle_start = getCurUs(); 3482 if (signal_pending(current))
3486 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 3483 break;
3487 if (signal_pending(current)) { 3484 idle(pkt_dev);
3488 break;
3489 }
3490 schedule();
3491 }
3492 pkt_dev->idle_acc += getCurUs() - idle_start;
3493 } 3485 }
3494 3486
3495 /* Done with this */ 3487 /* Done with this */
3496 pktgen_stop_device(pkt_dev); 3488 pktgen_stop_device(pkt_dev);
3497 kfree_skb(pkt_dev->skb);
3498 pkt_dev->skb = NULL;
3499 } 3489 }
3500out:;
3501} 3490}
3502 3491
3503/* 3492/*
@@ -3516,7 +3505,8 @@ static int pktgen_thread_worker(void *arg)
3516 init_waitqueue_head(&t->queue); 3505 init_waitqueue_head(&t->queue);
3517 complete(&t->start_done); 3506 complete(&t->start_done);
3518 3507
3519 pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); 3508 pr_debug("pktgen: starting pktgen/%d: pid=%d\n",
3509 cpu, task_pid_nr(current));
3520 3510
3521 set_current_state(TASK_INTERRUPTIBLE); 3511 set_current_state(TASK_INTERRUPTIBLE);
3522 3512
@@ -3651,8 +3641,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3651 pkt_dev->max_pkt_size = ETH_ZLEN; 3641 pkt_dev->max_pkt_size = ETH_ZLEN;
3652 pkt_dev->nfrags = 0; 3642 pkt_dev->nfrags = 0;
3653 pkt_dev->clone_skb = pg_clone_skb_d; 3643 pkt_dev->clone_skb = pg_clone_skb_d;
3654 pkt_dev->delay_us = pg_delay_d / 1000; 3644 pkt_dev->delay = pg_delay_d;
3655 pkt_dev->delay_ns = pg_delay_d % 1000;
3656 pkt_dev->count = pg_count_d; 3645 pkt_dev->count = pg_count_d;
3657 pkt_dev->sofar = 0; 3646 pkt_dev->sofar = 0;
3658 pkt_dev->udp_src_min = 9; /* sink port */ 3647 pkt_dev->udp_src_min = 9; /* sink port */
@@ -3864,10 +3853,15 @@ static void __exit pg_cleanup(void)
3864module_init(pg_init); 3853module_init(pg_init);
3865module_exit(pg_cleanup); 3854module_exit(pg_cleanup);
3866 3855
3867MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se"); 3856MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>");
3868MODULE_DESCRIPTION("Packet Generator tool"); 3857MODULE_DESCRIPTION("Packet Generator tool");
3869MODULE_LICENSE("GPL"); 3858MODULE_LICENSE("GPL");
3859MODULE_VERSION(VERSION);
3870module_param(pg_count_d, int, 0); 3860module_param(pg_count_d, int, 0);
3861MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject");
3871module_param(pg_delay_d, int, 0); 3862module_param(pg_delay_d, int, 0);
3863MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)");
3872module_param(pg_clone_skb_d, int, 0); 3864module_param(pg_clone_skb_d, int, 0);
3865MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet");
3873module_param(debug, int, 0); 3866module_param(debug, int, 0);
3867MODULE_PARM_DESC(debug, "Enable debugging of pktgen module");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d78030f88bd0..eb42873f2a3a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,7 +35,6 @@
35#include <linux/security.h> 35#include <linux/security.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/if_addr.h> 37#include <linux/if_addr.h>
38#include <linux/nsproxy.h>
39 38
40#include <asm/uaccess.h> 39#include <asm/uaccess.h>
41#include <asm/system.h> 40#include <asm/system.h>
@@ -52,6 +51,7 @@
52#include <net/pkt_sched.h> 51#include <net/pkt_sched.h>
53#include <net/fib_rules.h> 52#include <net/fib_rules.h>
54#include <net/rtnetlink.h> 53#include <net/rtnetlink.h>
54#include <net/net_namespace.h>
55 55
56struct rtnl_link 56struct rtnl_link
57{ 57{
@@ -606,7 +606,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
606 int type, u32 pid, u32 seq, u32 change, 606 int type, u32 pid, u32 seq, u32 change,
607 unsigned int flags) 607 unsigned int flags)
608{ 608{
609 struct netdev_queue *txq;
610 struct ifinfomsg *ifm; 609 struct ifinfomsg *ifm;
611 struct nlmsghdr *nlh; 610 struct nlmsghdr *nlh;
612 const struct net_device_stats *stats; 611 const struct net_device_stats *stats;
@@ -637,9 +636,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
637 if (dev->master) 636 if (dev->master)
638 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); 637 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
639 638
640 txq = netdev_get_tx_queue(dev, 0); 639 if (dev->qdisc)
641 if (txq->qdisc_sleeping) 640 NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id);
642 NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
643 641
644 if (dev->ifalias) 642 if (dev->ifalias)
645 NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias); 643 NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
@@ -725,25 +723,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
725 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 723 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
726}; 724};
727 725
728static struct net *get_net_ns_by_pid(pid_t pid)
729{
730 struct task_struct *tsk;
731 struct net *net;
732
733 /* Lookup the network namespace */
734 net = ERR_PTR(-ESRCH);
735 rcu_read_lock();
736 tsk = find_task_by_vpid(pid);
737 if (tsk) {
738 struct nsproxy *nsproxy;
739 nsproxy = task_nsproxy(tsk);
740 if (nsproxy)
741 net = get_net(nsproxy->net_ns);
742 }
743 rcu_read_unlock();
744 return net;
745}
746
747static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) 726static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
748{ 727{
749 if (dev) { 728 if (dev) {
@@ -993,12 +972,20 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
993{ 972{
994 int err; 973 int err;
995 struct net_device *dev; 974 struct net_device *dev;
975 unsigned int num_queues = 1;
976 unsigned int real_num_queues = 1;
996 977
978 if (ops->get_tx_queues) {
979 err = ops->get_tx_queues(net, tb, &num_queues, &real_num_queues);
980 if (err)
981 goto err;
982 }
997 err = -ENOMEM; 983 err = -ENOMEM;
998 dev = alloc_netdev(ops->priv_size, ifname, ops->setup); 984 dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
999 if (!dev) 985 if (!dev)
1000 goto err; 986 goto err;
1001 987
988 dev->real_num_tx_queues = real_num_queues;
1002 if (strchr(dev->name, '%')) { 989 if (strchr(dev->name, '%')) {
1003 err = dev_alloc_name(dev, dev->name); 990 err = dev_alloc_name(dev, dev->name);
1004 if (err < 0) 991 if (err < 0)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9e0597d189b0..80a96166df39 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -559,9 +559,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
559#endif 559#endif
560#endif 560#endif
561 new->vlan_tci = old->vlan_tci; 561 new->vlan_tci = old->vlan_tci;
562#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
563 new->do_not_encrypt = old->do_not_encrypt;
564#endif
565 562
566 skb_copy_secmark(new, old); 563 skb_copy_secmark(new, old);
567} 564}
diff --git a/net/core/sock.c b/net/core/sock.c
index 76334228ed1c..30d5446512f9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -142,7 +142,7 @@ static struct lock_class_key af_family_slock_keys[AF_MAX];
142 * strings build-time, so that runtime initialization of socket 142 * strings build-time, so that runtime initialization of socket
143 * locks is fast): 143 * locks is fast):
144 */ 144 */
145static const char *af_family_key_strings[AF_MAX+1] = { 145static const char *const af_family_key_strings[AF_MAX+1] = {
146 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 146 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
147 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 147 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
148 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 148 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
@@ -158,7 +158,7 @@ static const char *af_family_key_strings[AF_MAX+1] = {
158 "sk_lock-AF_IEEE802154", 158 "sk_lock-AF_IEEE802154",
159 "sk_lock-AF_MAX" 159 "sk_lock-AF_MAX"
160}; 160};
161static const char *af_family_slock_key_strings[AF_MAX+1] = { 161static const char *const af_family_slock_key_strings[AF_MAX+1] = {
162 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 162 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
163 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 163 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
164 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 164 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
@@ -174,7 +174,7 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = {
174 "slock-AF_IEEE802154", 174 "slock-AF_IEEE802154",
175 "slock-AF_MAX" 175 "slock-AF_MAX"
176}; 176};
177static const char *af_family_clock_key_strings[AF_MAX+1] = { 177static const char *const af_family_clock_key_strings[AF_MAX+1] = {
178 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 178 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
179 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 179 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
180 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 180 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
@@ -482,6 +482,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
482 sk->sk_reuse = valbool; 482 sk->sk_reuse = valbool;
483 break; 483 break;
484 case SO_TYPE: 484 case SO_TYPE:
485 case SO_PROTOCOL:
486 case SO_DOMAIN:
485 case SO_ERROR: 487 case SO_ERROR:
486 ret = -ENOPROTOOPT; 488 ret = -ENOPROTOOPT;
487 break; 489 break;
@@ -764,6 +766,14 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
764 v.val = sk->sk_type; 766 v.val = sk->sk_type;
765 break; 767 break;
766 768
769 case SO_PROTOCOL:
770 v.val = sk->sk_protocol;
771 break;
772
773 case SO_DOMAIN:
774 v.val = sk->sk_family;
775 break;
776
767 case SO_ERROR: 777 case SO_ERROR:
768 v.val = -sock_error(sk); 778 v.val = -sock_error(sk);
769 if (v.val == 0) 779 if (v.val == 0)