aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-22 22:09:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-22 22:09:51 -0400
commitc010b2f76c3032e48097a6eef291d8593d5d79a6 (patch)
tree16077c83703527732991a55dea1abe330c0ccdc6 /net
parent6069fb2ef5d4f47432359c97f350e0cfcc4d208e (diff)
parent521c4d96e0840ecce25b956e00f416ed499ef2ba (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (82 commits) ipw2200: Call netif_*_queue() interfaces properly. netxen: Needs to include linux/vmalloc.h [netdrvr] atl1d: fix !CONFIG_PM build r6040: rework init_one error handling r6040: bump release number to 0.18 r6040: handle RX fifo full and no descriptor interrupts r6040: change the default waiting time r6040: use definitions for magic values in descriptor status r6040: completely rework the RX path r6040: call napi_disable when puting down the interface and set lp->dev accordingly. mv643xx_eth: fix NETPOLL build r6040: rework the RX buffers allocation routine r6040: fix scheduling while atomic in r6040_tx_timeout r6040: fix null pointer access and tx timeouts r6040: prefix all functions with r6040 rndis_host: support WM6 devices as modems at91_ether: use netstats in net_device structure sfc: Create one RX queue and interrupt per CPU package by default sfc: Use a separate workqueue for resets sfc: I2C adapter initialisation fixes ...
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/core/dev.c102
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_fib.c60
-rw-r--r--net/ipv6/route.c10
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_generic.c6
-rw-r--r--net/sctp/outqueue.c4
-rw-r--r--net/sctp/proc.c5
13 files changed, 115 insertions, 90 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index f42bc2b26b85..4bf014e51f8c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -569,6 +569,7 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
569 * separate class since they always nest. 569 * separate class since they always nest.
570 */ 570 */
571static struct lock_class_key vlan_netdev_xmit_lock_key; 571static struct lock_class_key vlan_netdev_xmit_lock_key;
572static struct lock_class_key vlan_netdev_addr_lock_key;
572 573
573static void vlan_dev_set_lockdep_one(struct net_device *dev, 574static void vlan_dev_set_lockdep_one(struct net_device *dev,
574 struct netdev_queue *txq, 575 struct netdev_queue *txq,
@@ -581,6 +582,9 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev,
581 582
582static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) 583static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
583{ 584{
585 lockdep_set_class_and_subclass(&dev->addr_list_lock,
586 &vlan_netdev_addr_lock_key,
587 subclass);
584 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); 588 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
585} 589}
586 590
diff --git a/net/core/dev.c b/net/core/dev.c
index cbc34c0db376..6bf217da9d8f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -261,7 +261,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
261 261
262DEFINE_PER_CPU(struct softnet_data, softnet_data); 262DEFINE_PER_CPU(struct softnet_data, softnet_data);
263 263
264#ifdef CONFIG_DEBUG_LOCK_ALLOC 264#ifdef CONFIG_LOCKDEP
265/* 265/*
266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
267 * according to dev->type 267 * according to dev->type
@@ -301,6 +301,7 @@ static const char *netdev_lock_name[] =
301 "_xmit_NONE"}; 301 "_xmit_NONE"};
302 302
303static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 303static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
304static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
304 305
305static inline unsigned short netdev_lock_pos(unsigned short dev_type) 306static inline unsigned short netdev_lock_pos(unsigned short dev_type)
306{ 307{
@@ -313,8 +314,8 @@ static inline unsigned short netdev_lock_pos(unsigned short dev_type)
313 return ARRAY_SIZE(netdev_lock_type) - 1; 314 return ARRAY_SIZE(netdev_lock_type) - 1;
314} 315}
315 316
316static inline void netdev_set_lockdep_class(spinlock_t *lock, 317static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
317 unsigned short dev_type) 318 unsigned short dev_type)
318{ 319{
319 int i; 320 int i;
320 321
@@ -322,9 +323,22 @@ static inline void netdev_set_lockdep_class(spinlock_t *lock,
322 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
323 netdev_lock_name[i]); 324 netdev_lock_name[i]);
324} 325}
326
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
328{
329 int i;
330
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
335}
325#else 336#else
326static inline void netdev_set_lockdep_class(spinlock_t *lock, 337static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
327 unsigned short dev_type) 338 unsigned short dev_type)
339{
340}
341static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
328{ 342{
329} 343}
330#endif 344#endif
@@ -1645,32 +1659,6 @@ out_kfree_skb:
1645 return 0; 1659 return 0;
1646} 1660}
1647 1661
1648/**
1649 * dev_queue_xmit - transmit a buffer
1650 * @skb: buffer to transmit
1651 *
1652 * Queue a buffer for transmission to a network device. The caller must
1653 * have set the device and priority and built the buffer before calling
1654 * this function. The function can be called from an interrupt.
1655 *
1656 * A negative errno code is returned on a failure. A success does not
1657 * guarantee the frame will be transmitted as it may be dropped due
1658 * to congestion or traffic shaping.
1659 *
1660 * -----------------------------------------------------------------------------------
1661 * I notice this method can also return errors from the queue disciplines,
1662 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1663 * be positive.
1664 *
1665 * Regardless of the return value, the skb is consumed, so it is currently
1666 * difficult to retry a send to this method. (You can bump the ref count
1667 * before sending to hold a reference for retry if you are careful.)
1668 *
1669 * When calling this method, interrupts MUST be enabled. This is because
1670 * the BH enable code must have IRQs enabled so that it will not deadlock.
1671 * --BLG
1672 */
1673
1674static u32 simple_tx_hashrnd; 1662static u32 simple_tx_hashrnd;
1675static int simple_tx_hashrnd_initialized = 0; 1663static int simple_tx_hashrnd_initialized = 0;
1676 1664
@@ -1738,6 +1726,31 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1738 return netdev_get_tx_queue(dev, queue_index); 1726 return netdev_get_tx_queue(dev, queue_index);
1739} 1727}
1740 1728
1729/**
1730 * dev_queue_xmit - transmit a buffer
1731 * @skb: buffer to transmit
1732 *
1733 * Queue a buffer for transmission to a network device. The caller must
1734 * have set the device and priority and built the buffer before calling
1735 * this function. The function can be called from an interrupt.
1736 *
1737 * A negative errno code is returned on a failure. A success does not
1738 * guarantee the frame will be transmitted as it may be dropped due
1739 * to congestion or traffic shaping.
1740 *
1741 * -----------------------------------------------------------------------------------
1742 * I notice this method can also return errors from the queue disciplines,
1743 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1744 * be positive.
1745 *
1746 * Regardless of the return value, the skb is consumed, so it is currently
1747 * difficult to retry a send to this method. (You can bump the ref count
1748 * before sending to hold a reference for retry if you are careful.)
1749 *
1750 * When calling this method, interrupts MUST be enabled. This is because
1751 * the BH enable code must have IRQs enabled so that it will not deadlock.
1752 * --BLG
1753 */
1741int dev_queue_xmit(struct sk_buff *skb) 1754int dev_queue_xmit(struct sk_buff *skb)
1742{ 1755{
1743 struct net_device *dev = skb->dev; 1756 struct net_device *dev = skb->dev;
@@ -3852,7 +3865,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev,
3852 void *_unused) 3865 void *_unused)
3853{ 3866{
3854 spin_lock_init(&dev_queue->_xmit_lock); 3867 spin_lock_init(&dev_queue->_xmit_lock);
3855 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type); 3868 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3856 dev_queue->xmit_lock_owner = -1; 3869 dev_queue->xmit_lock_owner = -1;
3857} 3870}
3858 3871
@@ -3897,6 +3910,7 @@ int register_netdevice(struct net_device *dev)
3897 net = dev_net(dev); 3910 net = dev_net(dev);
3898 3911
3899 spin_lock_init(&dev->addr_list_lock); 3912 spin_lock_init(&dev->addr_list_lock);
3913 netdev_set_addr_lockdep_class(dev);
3900 netdev_init_queue_locks(dev); 3914 netdev_init_queue_locks(dev);
3901 3915
3902 dev->iflink = -1; 3916 dev->iflink = -1;
@@ -4207,7 +4221,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4207{ 4221{
4208 struct netdev_queue *tx; 4222 struct netdev_queue *tx;
4209 struct net_device *dev; 4223 struct net_device *dev;
4210 int alloc_size; 4224 size_t alloc_size;
4211 void *p; 4225 void *p;
4212 4226
4213 BUG_ON(strlen(name) >= sizeof(dev->name)); 4227 BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -4227,7 +4241,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4227 return NULL; 4241 return NULL;
4228 } 4242 }
4229 4243
4230 tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL); 4244 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4231 if (!tx) { 4245 if (!tx) {
4232 printk(KERN_ERR "alloc_netdev: Unable to allocate " 4246 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4233 "tx qdiscs.\n"); 4247 "tx qdiscs.\n");
@@ -4686,6 +4700,26 @@ err_name:
4686 return -ENOMEM; 4700 return -ENOMEM;
4687} 4701}
4688 4702
4703char *netdev_drivername(struct net_device *dev, char *buffer, int len)
4704{
4705 struct device_driver *driver;
4706 struct device *parent;
4707
4708 if (len <= 0 || !buffer)
4709 return buffer;
4710 buffer[0] = 0;
4711
4712 parent = dev->dev.parent;
4713
4714 if (!parent)
4715 return buffer;
4716
4717 driver = parent->driver;
4718 if (driver && driver->name)
4719 strlcpy(buffer, driver->name, len);
4720 return buffer;
4721}
4722
4689static void __net_exit netdev_exit(struct net *net) 4723static void __net_exit netdev_exit(struct net *net)
4690{ 4724{
4691 kfree(net->dev_name_head); 4725 kfree(net->dev_name_head);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1fa683c0ba9b..a00532de2a8c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -472,7 +472,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
472 } 472 }
473 if (likely(sysctl_tcp_sack)) { 473 if (likely(sysctl_tcp_sack)) {
474 opts->options |= OPTION_SACK_ADVERTISE; 474 opts->options |= OPTION_SACK_ADVERTISE;
475 if (unlikely(!OPTION_TS & opts->options)) 475 if (unlikely(!(OPTION_TS & opts->options)))
476 size += TCPOLEN_SACKPERM_ALIGNED; 476 size += TCPOLEN_SACKPERM_ALIGNED;
477 } 477 }
478 478
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index a751770947a3..383d17359d01 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1325,6 +1325,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1325 return -ENOPROTOOPT; 1325 return -ENOPROTOOPT;
1326 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 1326 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
1327 val = 8; 1327 val = 8;
1328 else if (val > USHORT_MAX)
1329 val = USHORT_MAX;
1328 up->pcslen = val; 1330 up->pcslen = val;
1329 up->pcflag |= UDPLITE_SEND_CC; 1331 up->pcflag |= UDPLITE_SEND_CC;
1330 break; 1332 break;
@@ -1337,6 +1339,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1337 return -ENOPROTOOPT; 1339 return -ENOPROTOOPT;
1338 if (val != 0 && val < 8) /* Avoid silly minimal values. */ 1340 if (val != 0 && val < 8) /* Avoid silly minimal values. */
1339 val = 8; 1341 val = 8;
1342 else if (val > USHORT_MAX)
1343 val = USHORT_MAX;
1340 up->pcrlen = val; 1344 up->pcrlen = val;
1341 up->pcflag |= UDPLITE_RECV_CC; 1345 up->pcflag |= UDPLITE_RECV_CC;
1342 break; 1346 break;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 9f4fcce6379b..74d543d504a1 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -153,7 +153,7 @@ static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
153 153
154static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); 154static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
155 155
156struct ipv6_devconf ipv6_devconf __read_mostly = { 156static struct ipv6_devconf ipv6_devconf __read_mostly = {
157 .forwarding = 0, 157 .forwarding = 0,
158 .hop_limit = IPV6_DEFAULT_HOPLIMIT, 158 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
159 .mtu6 = IPV6_MIN_MTU, 159 .mtu6 = IPV6_MIN_MTU,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 4de2b9efcacb..08ea2de28d63 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -661,17 +661,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
661 661
662static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt) 662static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
663{ 663{
664 if (net->ipv6.ip6_fib_timer->expires == 0 && 664 if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
665 (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) 665 (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
666 mod_timer(net->ipv6.ip6_fib_timer, jiffies + 666 mod_timer(&net->ipv6.ip6_fib_timer,
667 net->ipv6.sysctl.ip6_rt_gc_interval); 667 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
668} 668}
669 669
670void fib6_force_start_gc(struct net *net) 670void fib6_force_start_gc(struct net *net)
671{ 671{
672 if (net->ipv6.ip6_fib_timer->expires == 0) 672 if (!timer_pending(&net->ipv6.ip6_fib_timer))
673 mod_timer(net->ipv6.ip6_fib_timer, jiffies + 673 mod_timer(&net->ipv6.ip6_fib_timer,
674 net->ipv6.sysctl.ip6_rt_gc_interval); 674 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
675} 675}
676 676
677/* 677/*
@@ -1447,27 +1447,23 @@ void fib6_run_gc(unsigned long expires, struct net *net)
1447 gc_args.timeout = expires ? (int)expires : 1447 gc_args.timeout = expires ? (int)expires :
1448 net->ipv6.sysctl.ip6_rt_gc_interval; 1448 net->ipv6.sysctl.ip6_rt_gc_interval;
1449 } else { 1449 } else {
1450 local_bh_disable(); 1450 if (!spin_trylock_bh(&fib6_gc_lock)) {
1451 if (!spin_trylock(&fib6_gc_lock)) { 1451 mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
1452 mod_timer(net->ipv6.ip6_fib_timer, jiffies + HZ);
1453 local_bh_enable();
1454 return; 1452 return;
1455 } 1453 }
1456 gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval; 1454 gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
1457 } 1455 }
1458 gc_args.more = 0;
1459 1456
1460 icmp6_dst_gc(&gc_args.more); 1457 gc_args.more = icmp6_dst_gc();
1461 1458
1462 fib6_clean_all(net, fib6_age, 0, NULL); 1459 fib6_clean_all(net, fib6_age, 0, NULL);
1463 1460
1464 if (gc_args.more) 1461 if (gc_args.more)
1465 mod_timer(net->ipv6.ip6_fib_timer, jiffies + 1462 mod_timer(&net->ipv6.ip6_fib_timer,
1466 net->ipv6.sysctl.ip6_rt_gc_interval); 1463 round_jiffies(jiffies
1467 else { 1464 + net->ipv6.sysctl.ip6_rt_gc_interval));
1468 del_timer(net->ipv6.ip6_fib_timer); 1465 else
1469 net->ipv6.ip6_fib_timer->expires = 0; 1466 del_timer(&net->ipv6.ip6_fib_timer);
1470 }
1471 spin_unlock_bh(&fib6_gc_lock); 1467 spin_unlock_bh(&fib6_gc_lock);
1472} 1468}
1473 1469
@@ -1478,24 +1474,15 @@ static void fib6_gc_timer_cb(unsigned long arg)
1478 1474
1479static int fib6_net_init(struct net *net) 1475static int fib6_net_init(struct net *net)
1480{ 1476{
1481 int ret; 1477 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
1482 struct timer_list *timer;
1483
1484 ret = -ENOMEM;
1485 timer = kzalloc(sizeof(*timer), GFP_KERNEL);
1486 if (!timer)
1487 goto out;
1488
1489 setup_timer(timer, fib6_gc_timer_cb, (unsigned long)net);
1490 net->ipv6.ip6_fib_timer = timer;
1491 1478
1492 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL); 1479 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
1493 if (!net->ipv6.rt6_stats) 1480 if (!net->ipv6.rt6_stats)
1494 goto out_timer; 1481 goto out_timer;
1495 1482
1496 net->ipv6.fib_table_hash = 1483 net->ipv6.fib_table_hash = kcalloc(FIB_TABLE_HASHSZ,
1497 kzalloc(sizeof(*net->ipv6.fib_table_hash)*FIB_TABLE_HASHSZ, 1484 sizeof(*net->ipv6.fib_table_hash),
1498 GFP_KERNEL); 1485 GFP_KERNEL);
1499 if (!net->ipv6.fib_table_hash) 1486 if (!net->ipv6.fib_table_hash)
1500 goto out_rt6_stats; 1487 goto out_rt6_stats;
1501 1488
@@ -1521,9 +1508,7 @@ static int fib6_net_init(struct net *net)
1521#endif 1508#endif
1522 fib6_tables_init(net); 1509 fib6_tables_init(net);
1523 1510
1524 ret = 0; 1511 return 0;
1525out:
1526 return ret;
1527 1512
1528#ifdef CONFIG_IPV6_MULTIPLE_TABLES 1513#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1529out_fib6_main_tbl: 1514out_fib6_main_tbl:
@@ -1534,15 +1519,14 @@ out_fib_table_hash:
1534out_rt6_stats: 1519out_rt6_stats:
1535 kfree(net->ipv6.rt6_stats); 1520 kfree(net->ipv6.rt6_stats);
1536out_timer: 1521out_timer:
1537 kfree(timer); 1522 return -ENOMEM;
1538 goto out;
1539 } 1523 }
1540 1524
1541static void fib6_net_exit(struct net *net) 1525static void fib6_net_exit(struct net *net)
1542{ 1526{
1543 rt6_ifdown(net, NULL); 1527 rt6_ifdown(net, NULL);
1544 del_timer_sync(net->ipv6.ip6_fib_timer); 1528 del_timer_sync(&net->ipv6.ip6_fib_timer);
1545 kfree(net->ipv6.ip6_fib_timer); 1529
1546#ifdef CONFIG_IPV6_MULTIPLE_TABLES 1530#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1547 kfree(net->ipv6.fib6_local_tbl); 1531 kfree(net->ipv6.fib6_local_tbl);
1548#endif 1532#endif
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 615b328de251..86540b24b27c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -978,13 +978,12 @@ out:
978 return &rt->u.dst; 978 return &rt->u.dst;
979} 979}
980 980
981int icmp6_dst_gc(int *more) 981int icmp6_dst_gc(void)
982{ 982{
983 struct dst_entry *dst, *next, **pprev; 983 struct dst_entry *dst, *next, **pprev;
984 int freed; 984 int more = 0;
985 985
986 next = NULL; 986 next = NULL;
987 freed = 0;
988 987
989 spin_lock_bh(&icmp6_dst_lock); 988 spin_lock_bh(&icmp6_dst_lock);
990 pprev = &icmp6_dst_gc_list; 989 pprev = &icmp6_dst_gc_list;
@@ -993,16 +992,15 @@ int icmp6_dst_gc(int *more)
993 if (!atomic_read(&dst->__refcnt)) { 992 if (!atomic_read(&dst->__refcnt)) {
994 *pprev = dst->next; 993 *pprev = dst->next;
995 dst_free(dst); 994 dst_free(dst);
996 freed++;
997 } else { 995 } else {
998 pprev = &dst->next; 996 pprev = &dst->next;
999 (*more)++; 997 ++more;
1000 } 998 }
1001 } 999 }
1002 1000
1003 spin_unlock_bh(&icmp6_dst_lock); 1001 spin_unlock_bh(&icmp6_dst_lock);
1004 1002
1005 return freed; 1003 return more;
1006} 1004}
1007 1005
1008static int ip6_dst_gc(struct dst_ops *ops) 1006static int ip6_dst_gc(struct dst_ops *ops)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index fccc250f95f5..532e4faa29f7 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -73,6 +73,7 @@ static const struct proto_ops nr_proto_ops;
73 * separate class since they always nest. 73 * separate class since they always nest.
74 */ 74 */
75static struct lock_class_key nr_netdev_xmit_lock_key; 75static struct lock_class_key nr_netdev_xmit_lock_key;
76static struct lock_class_key nr_netdev_addr_lock_key;
76 77
77static void nr_set_lockdep_one(struct net_device *dev, 78static void nr_set_lockdep_one(struct net_device *dev,
78 struct netdev_queue *txq, 79 struct netdev_queue *txq,
@@ -83,6 +84,7 @@ static void nr_set_lockdep_one(struct net_device *dev,
83 84
84static void nr_set_lockdep_key(struct net_device *dev) 85static void nr_set_lockdep_key(struct net_device *dev)
85{ 86{
87 lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
86 netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); 88 netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
87} 89}
88 90
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index dbc963b4f5fb..a7f1ce11bc22 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -74,6 +74,7 @@ ax25_address rose_callsign;
74 * separate class since they always nest. 74 * separate class since they always nest.
75 */ 75 */
76static struct lock_class_key rose_netdev_xmit_lock_key; 76static struct lock_class_key rose_netdev_xmit_lock_key;
77static struct lock_class_key rose_netdev_addr_lock_key;
77 78
78static void rose_set_lockdep_one(struct net_device *dev, 79static void rose_set_lockdep_one(struct net_device *dev,
79 struct netdev_queue *txq, 80 struct netdev_queue *txq,
@@ -84,6 +85,7 @@ static void rose_set_lockdep_one(struct net_device *dev,
84 85
85static void rose_set_lockdep_key(struct net_device *dev) 86static void rose_set_lockdep_key(struct net_device *dev)
86{ 87{
88 lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
87 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); 89 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
88} 90}
89 91
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 5219d5f9d754..b0601642e227 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -447,7 +447,7 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
447} 447}
448EXPORT_SYMBOL(qdisc_watchdog_cancel); 448EXPORT_SYMBOL(qdisc_watchdog_cancel);
449 449
450struct hlist_head *qdisc_class_hash_alloc(unsigned int n) 450static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
451{ 451{
452 unsigned int size = n * sizeof(struct hlist_head), i; 452 unsigned int size = n * sizeof(struct hlist_head), i;
453 struct hlist_head *h; 453 struct hlist_head *h;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cb625b4d6da5..4ac7e3a8c253 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -212,9 +212,9 @@ static void dev_watchdog(unsigned long arg)
212 if (some_queue_stopped && 212 if (some_queue_stopped &&
213 time_after(jiffies, (dev->trans_start + 213 time_after(jiffies, (dev->trans_start +
214 dev->watchdog_timeo))) { 214 dev->watchdog_timeo))) {
215 printk(KERN_INFO "NETDEV WATCHDOG: %s: " 215 char drivername[64];
216 "transmit timed out\n", 216 printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
217 dev->name); 217 dev->name, netdev_drivername(dev, drivername, 64));
218 dev->tx_timeout(dev); 218 dev->tx_timeout(dev);
219 WARN_ON_ONCE(1); 219 WARN_ON_ONCE(1);
220 } 220 }
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 70ead8dc3485..4328ad5439c9 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -71,6 +71,8 @@ static void sctp_mark_missing(struct sctp_outq *q,
71 71
72static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); 72static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
73 73
74static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
75
74/* Add data to the front of the queue. */ 76/* Add data to the front of the queue. */
75static inline void sctp_outq_head_data(struct sctp_outq *q, 77static inline void sctp_outq_head_data(struct sctp_outq *q,
76 struct sctp_chunk *ch) 78 struct sctp_chunk *ch)
@@ -712,7 +714,7 @@ int sctp_outq_uncork(struct sctp_outq *q)
712 * locking concerns must be made. Today we use the sock lock to protect 714 * locking concerns must be made. Today we use the sock lock to protect
713 * this function. 715 * this function.
714 */ 716 */
715int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) 717static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
716{ 718{
717 struct sctp_packet *packet; 719 struct sctp_packet *packet;
718 struct sctp_packet singleton; 720 struct sctp_packet singleton;
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 5dd89831eceb..f268910620be 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -519,8 +519,3 @@ int __init sctp_remaddr_proc_init(void)
519 519
520 return 0; 520 return 0;
521} 521}
522
523void sctp_assoc_proc_exit(void)
524{
525 remove_proc_entry("remaddr", proc_net_sctp);
526}