aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/bluetooth/hci_sysfs.c7
-rw-r--r--net/core/dev.c137
-rw-r--r--net/core/user_dma.c1
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c38
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_fib.c60
-rw-r--r--net/ipv6/route.c10
-rw-r--r--net/netfilter/Kconfig9
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/nf_conntrack_acct.c104
-rw-r--r--net/netfilter/nf_conntrack_core.c39
-rw-r--r--net/netfilter/nf_conntrack_netlink.c44
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c8
-rw-r--r--net/netfilter/nf_conntrack_standalone.c18
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/xt_TCPMSS.c42
-rw-r--r--net/netfilter/xt_connbytes.c8
-rw-r--r--net/netfilter/xt_time.c2
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_generic.c108
-rw-r--r--net/sctp/outqueue.c4
-rw-r--r--net/sctp/proc.c5
29 files changed, 443 insertions, 255 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index f42bc2b26b85..4bf014e51f8c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -569,6 +569,7 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
569 * separate class since they always nest. 569 * separate class since they always nest.
570 */ 570 */
571static struct lock_class_key vlan_netdev_xmit_lock_key; 571static struct lock_class_key vlan_netdev_xmit_lock_key;
572static struct lock_class_key vlan_netdev_addr_lock_key;
572 573
573static void vlan_dev_set_lockdep_one(struct net_device *dev, 574static void vlan_dev_set_lockdep_one(struct net_device *dev,
574 struct netdev_queue *txq, 575 struct netdev_queue *txq,
@@ -581,6 +582,9 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev,
581 582
582static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) 583static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
583{ 584{
585 lockdep_set_class_and_subclass(&dev->addr_list_lock,
586 &vlan_netdev_addr_lock_key,
587 subclass);
584 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); 588 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
585} 589}
586 590
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 844ca5f1b2d4..c85bf8f678dc 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -398,10 +398,6 @@ int hci_register_sysfs(struct hci_dev *hdev)
398 if (device_create_file(dev, bt_attrs[i]) < 0) 398 if (device_create_file(dev, bt_attrs[i]) < 0)
399 BT_ERR("Failed to create device attribute"); 399 BT_ERR("Failed to create device attribute");
400 400
401 if (sysfs_create_link(&bt_class->subsys.kobj,
402 &dev->kobj, kobject_name(&dev->kobj)) < 0)
403 BT_ERR("Failed to create class symlink");
404
405 return 0; 401 return 0;
406} 402}
407 403
@@ -409,9 +405,6 @@ void hci_unregister_sysfs(struct hci_dev *hdev)
409{ 405{
410 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 406 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
411 407
412 sysfs_remove_link(&bt_class->subsys.kobj,
413 kobject_name(&hdev->dev.kobj));
414
415 device_del(&hdev->dev); 408 device_del(&hdev->dev);
416} 409}
417 410
diff --git a/net/core/dev.c b/net/core/dev.c
index 106d5e6d987c..7463a2150b09 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -124,6 +124,8 @@
124#include <linux/ip.h> 124#include <linux/ip.h>
125#include <linux/ipv6.h> 125#include <linux/ipv6.h>
126#include <linux/in.h> 126#include <linux/in.h>
127#include <linux/jhash.h>
128#include <linux/random.h>
127 129
128#include "net-sysfs.h" 130#include "net-sysfs.h"
129 131
@@ -259,7 +261,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
259 261
260DEFINE_PER_CPU(struct softnet_data, softnet_data); 262DEFINE_PER_CPU(struct softnet_data, softnet_data);
261 263
262#ifdef CONFIG_DEBUG_LOCK_ALLOC 264#ifdef CONFIG_LOCKDEP
263/* 265/*
264 * register_netdevice() inits txq->_xmit_lock and sets lockdep class 266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
265 * according to dev->type 267 * according to dev->type
@@ -299,6 +301,7 @@ static const char *netdev_lock_name[] =
299 "_xmit_NONE"}; 301 "_xmit_NONE"};
300 302
301static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 303static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
304static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
302 305
303static inline unsigned short netdev_lock_pos(unsigned short dev_type) 306static inline unsigned short netdev_lock_pos(unsigned short dev_type)
304{ 307{
@@ -311,8 +314,8 @@ static inline unsigned short netdev_lock_pos(unsigned short dev_type)
311 return ARRAY_SIZE(netdev_lock_type) - 1; 314 return ARRAY_SIZE(netdev_lock_type) - 1;
312} 315}
313 316
314static inline void netdev_set_lockdep_class(spinlock_t *lock, 317static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
315 unsigned short dev_type) 318 unsigned short dev_type)
316{ 319{
317 int i; 320 int i;
318 321
@@ -320,9 +323,22 @@ static inline void netdev_set_lockdep_class(spinlock_t *lock,
320 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], 323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
321 netdev_lock_name[i]); 324 netdev_lock_name[i]);
322} 325}
326
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
328{
329 int i;
330
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
335}
323#else 336#else
324static inline void netdev_set_lockdep_class(spinlock_t *lock, 337static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type) 338 unsigned short dev_type)
339{
340}
341static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
326{ 342{
327} 343}
328#endif 344#endif
@@ -1325,7 +1341,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1325 1341
1326void __netif_schedule(struct Qdisc *q) 1342void __netif_schedule(struct Qdisc *q)
1327{ 1343{
1328 BUG_ON(q == &noop_qdisc); 1344 if (WARN_ON_ONCE(q == &noop_qdisc))
1345 return;
1329 1346
1330 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { 1347 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
1331 struct softnet_data *sd; 1348 struct softnet_data *sd;
@@ -1642,60 +1659,37 @@ out_kfree_skb:
1642 return 0; 1659 return 0;
1643} 1660}
1644 1661
1645/** 1662static u32 simple_tx_hashrnd;
1646 * dev_queue_xmit - transmit a buffer 1663static int simple_tx_hashrnd_initialized = 0;
1647 * @skb: buffer to transmit
1648 *
1649 * Queue a buffer for transmission to a network device. The caller must
1650 * have set the device and priority and built the buffer before calling
1651 * this function. The function can be called from an interrupt.
1652 *
1653 * A negative errno code is returned on a failure. A success does not
1654 * guarantee the frame will be transmitted as it may be dropped due
1655 * to congestion or traffic shaping.
1656 *
1657 * -----------------------------------------------------------------------------------
1658 * I notice this method can also return errors from the queue disciplines,
1659 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1660 * be positive.
1661 *
1662 * Regardless of the return value, the skb is consumed, so it is currently
1663 * difficult to retry a send to this method. (You can bump the ref count
1664 * before sending to hold a reference for retry if you are careful.)
1665 *
1666 * When calling this method, interrupts MUST be enabled. This is because
1667 * the BH enable code must have IRQs enabled so that it will not deadlock.
1668 * --BLG
1669 */
1670 1664
1671static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) 1665static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1672{ 1666{
1673 u32 *addr, *ports, hash, ihl; 1667 u32 addr1, addr2, ports;
1668 u32 hash, ihl;
1674 u8 ip_proto; 1669 u8 ip_proto;
1675 int alen; 1670
1671 if (unlikely(!simple_tx_hashrnd_initialized)) {
1672 get_random_bytes(&simple_tx_hashrnd, 4);
1673 simple_tx_hashrnd_initialized = 1;
1674 }
1676 1675
1677 switch (skb->protocol) { 1676 switch (skb->protocol) {
1678 case __constant_htons(ETH_P_IP): 1677 case __constant_htons(ETH_P_IP):
1679 ip_proto = ip_hdr(skb)->protocol; 1678 ip_proto = ip_hdr(skb)->protocol;
1680 addr = &ip_hdr(skb)->saddr; 1679 addr1 = ip_hdr(skb)->saddr;
1680 addr2 = ip_hdr(skb)->daddr;
1681 ihl = ip_hdr(skb)->ihl; 1681 ihl = ip_hdr(skb)->ihl;
1682 alen = 2;
1683 break; 1682 break;
1684 case __constant_htons(ETH_P_IPV6): 1683 case __constant_htons(ETH_P_IPV6):
1685 ip_proto = ipv6_hdr(skb)->nexthdr; 1684 ip_proto = ipv6_hdr(skb)->nexthdr;
1686 addr = &ipv6_hdr(skb)->saddr.s6_addr32[0]; 1685 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1686 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1687 ihl = (40 >> 2); 1687 ihl = (40 >> 2);
1688 alen = 8;
1689 break; 1688 break;
1690 default: 1689 default:
1691 return 0; 1690 return 0;
1692 } 1691 }
1693 1692
1694 ports = (u32 *) (skb_network_header(skb) + (ihl * 4));
1695
1696 hash = 0;
1697 while (alen--)
1698 hash ^= *addr++;
1699 1693
1700 switch (ip_proto) { 1694 switch (ip_proto) {
1701 case IPPROTO_TCP: 1695 case IPPROTO_TCP:
@@ -1705,14 +1699,17 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1705 case IPPROTO_AH: 1699 case IPPROTO_AH:
1706 case IPPROTO_SCTP: 1700 case IPPROTO_SCTP:
1707 case IPPROTO_UDPLITE: 1701 case IPPROTO_UDPLITE:
1708 hash ^= *ports; 1702 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1709 break; 1703 break;
1710 1704
1711 default: 1705 default:
1706 ports = 0;
1712 break; 1707 break;
1713 } 1708 }
1714 1709
1715 return hash % dev->real_num_tx_queues; 1710 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1711
1712 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1716} 1713}
1717 1714
1718static struct netdev_queue *dev_pick_tx(struct net_device *dev, 1715static struct netdev_queue *dev_pick_tx(struct net_device *dev,
@@ -1729,6 +1726,31 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1729 return netdev_get_tx_queue(dev, queue_index); 1726 return netdev_get_tx_queue(dev, queue_index);
1730} 1727}
1731 1728
1729/**
1730 * dev_queue_xmit - transmit a buffer
1731 * @skb: buffer to transmit
1732 *
1733 * Queue a buffer for transmission to a network device. The caller must
1734 * have set the device and priority and built the buffer before calling
1735 * this function. The function can be called from an interrupt.
1736 *
1737 * A negative errno code is returned on a failure. A success does not
1738 * guarantee the frame will be transmitted as it may be dropped due
1739 * to congestion or traffic shaping.
1740 *
1741 * -----------------------------------------------------------------------------------
1742 * I notice this method can also return errors from the queue disciplines,
1743 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1744 * be positive.
1745 *
1746 * Regardless of the return value, the skb is consumed, so it is currently
1747 * difficult to retry a send to this method. (You can bump the ref count
1748 * before sending to hold a reference for retry if you are careful.)
1749 *
1750 * When calling this method, interrupts MUST be enabled. This is because
1751 * the BH enable code must have IRQs enabled so that it will not deadlock.
1752 * --BLG
1753 */
1732int dev_queue_xmit(struct sk_buff *skb) 1754int dev_queue_xmit(struct sk_buff *skb)
1733{ 1755{
1734 struct net_device *dev = skb->dev; 1756 struct net_device *dev = skb->dev;
@@ -3843,7 +3865,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev,
3843 void *_unused) 3865 void *_unused)
3844{ 3866{
3845 spin_lock_init(&dev_queue->_xmit_lock); 3867 spin_lock_init(&dev_queue->_xmit_lock);
3846 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type); 3868 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3847 dev_queue->xmit_lock_owner = -1; 3869 dev_queue->xmit_lock_owner = -1;
3848} 3870}
3849 3871
@@ -3888,6 +3910,7 @@ int register_netdevice(struct net_device *dev)
3888 net = dev_net(dev); 3910 net = dev_net(dev);
3889 3911
3890 spin_lock_init(&dev->addr_list_lock); 3912 spin_lock_init(&dev->addr_list_lock);
3913 netdev_set_addr_lockdep_class(dev);
3891 netdev_init_queue_locks(dev); 3914 netdev_init_queue_locks(dev);
3892 3915
3893 dev->iflink = -1; 3916 dev->iflink = -1;
@@ -4198,7 +4221,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4198{ 4221{
4199 struct netdev_queue *tx; 4222 struct netdev_queue *tx;
4200 struct net_device *dev; 4223 struct net_device *dev;
4201 int alloc_size; 4224 size_t alloc_size;
4202 void *p; 4225 void *p;
4203 4226
4204 BUG_ON(strlen(name) >= sizeof(dev->name)); 4227 BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -4218,7 +4241,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4218 return NULL; 4241 return NULL;
4219 } 4242 }
4220 4243
4221 tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL); 4244 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4222 if (!tx) { 4245 if (!tx) {
4223 printk(KERN_ERR "alloc_netdev: Unable to allocate " 4246 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4224 "tx qdiscs.\n"); 4247 "tx qdiscs.\n");
@@ -4677,6 +4700,26 @@ err_name:
4677 return -ENOMEM; 4700 return -ENOMEM;
4678} 4701}
4679 4702
4703char *netdev_drivername(struct net_device *dev, char *buffer, int len)
4704{
4705 struct device_driver *driver;
4706 struct device *parent;
4707
4708 if (len <= 0 || !buffer)
4709 return buffer;
4710 buffer[0] = 0;
4711
4712 parent = dev->dev.parent;
4713
4714 if (!parent)
4715 return buffer;
4716
4717 driver = parent->driver;
4718 if (driver && driver->name)
4719 strlcpy(buffer, driver->name, len);
4720 return buffer;
4721}
4722
4680static void __net_exit netdev_exit(struct net *net) 4723static void __net_exit netdev_exit(struct net *net)
4681{ 4724{
4682 kfree(net->dev_name_head); 4725 kfree(net->dev_name_head);
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index c77aff9c6eb3..8c6b706963ff 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -34,6 +34,7 @@
34#define NET_DMA_DEFAULT_COPYBREAK 4096 34#define NET_DMA_DEFAULT_COPYBREAK 4096
35 35
36int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK; 36int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
37EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
37 38
38/** 39/**
39 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec. 40 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 40a46d482490..3a020720e40b 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -18,19 +18,7 @@
18#include <net/netfilter/nf_conntrack_l3proto.h> 18#include <net/netfilter/nf_conntrack_l3proto.h>
19#include <net/netfilter/nf_conntrack_l4proto.h> 19#include <net/netfilter/nf_conntrack_l4proto.h>
20#include <net/netfilter/nf_conntrack_expect.h> 20#include <net/netfilter/nf_conntrack_expect.h>
21 21#include <net/netfilter/nf_conntrack_acct.h>
22#ifdef CONFIG_NF_CT_ACCT
23static unsigned int
24seq_print_counters(struct seq_file *s,
25 const struct ip_conntrack_counter *counter)
26{
27 return seq_printf(s, "packets=%llu bytes=%llu ",
28 (unsigned long long)counter->packets,
29 (unsigned long long)counter->bytes);
30}
31#else
32#define seq_print_counters(x, y) 0
33#endif
34 22
35struct ct_iter_state { 23struct ct_iter_state {
36 unsigned int bucket; 24 unsigned int bucket;
@@ -127,7 +115,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
127 l3proto, l4proto)) 115 l3proto, l4proto))
128 return -ENOSPC; 116 return -ENOSPC;
129 117
130 if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL])) 118 if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
131 return -ENOSPC; 119 return -ENOSPC;
132 120
133 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) 121 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
@@ -138,7 +126,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
138 l3proto, l4proto)) 126 l3proto, l4proto))
139 return -ENOSPC; 127 return -ENOSPC;
140 128
141 if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY])) 129 if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
142 return -ENOSPC; 130 return -ENOSPC;
143 131
144 if (test_bit(IPS_ASSURED_BIT, &ct->status)) 132 if (test_bit(IPS_ASSURED_BIT, &ct->status))
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index d2a887fc8d9b..6c6a3cba8d50 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -240,12 +240,12 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
240 This is only required for source (ie. NAT/masq) mappings. 240 This is only required for source (ie. NAT/masq) mappings.
241 So far, we don't do local source mappings, so multiple 241 So far, we don't do local source mappings, so multiple
242 manips not an issue. */ 242 manips not an issue. */
243 if (maniptype == IP_NAT_MANIP_SRC) { 243 if (maniptype == IP_NAT_MANIP_SRC &&
244 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
244 if (find_appropriate_src(orig_tuple, tuple, range)) { 245 if (find_appropriate_src(orig_tuple, tuple, range)) {
245 pr_debug("get_unique_tuple: Found current src map\n"); 246 pr_debug("get_unique_tuple: Found current src map\n");
246 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) 247 if (!nf_nat_used_tuple(tuple, ct))
247 if (!nf_nat_used_tuple(tuple, ct)) 248 return;
248 return;
249 } 249 }
250 } 250 }
251 251
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 4334d5cabc5b..14544320c545 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -318,11 +318,11 @@ static int mangle_content_len(struct sk_buff *skb,
318 buffer, buflen); 318 buffer, buflen);
319} 319}
320 320
321static unsigned mangle_sdp_packet(struct sk_buff *skb, const char **dptr, 321static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
322 unsigned int dataoff, unsigned int *datalen, 322 unsigned int dataoff, unsigned int *datalen,
323 enum sdp_header_types type, 323 enum sdp_header_types type,
324 enum sdp_header_types term, 324 enum sdp_header_types term,
325 char *buffer, int buflen) 325 char *buffer, int buflen)
326{ 326{
327 enum ip_conntrack_info ctinfo; 327 enum ip_conntrack_info ctinfo;
328 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 328 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
@@ -330,9 +330,9 @@ static unsigned mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
330 330
331 if (ct_sip_get_sdp_header(ct, *dptr, dataoff, *datalen, type, term, 331 if (ct_sip_get_sdp_header(ct, *dptr, dataoff, *datalen, type, term,
332 &matchoff, &matchlen) <= 0) 332 &matchoff, &matchlen) <= 0)
333 return 0; 333 return -ENOENT;
334 return mangle_packet(skb, dptr, datalen, matchoff, matchlen, 334 return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
335 buffer, buflen); 335 buffer, buflen) ? 0 : -EINVAL;
336} 336}
337 337
338static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr, 338static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
@@ -346,8 +346,8 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
346 unsigned int buflen; 346 unsigned int buflen;
347 347
348 buflen = sprintf(buffer, NIPQUAD_FMT, NIPQUAD(addr->ip)); 348 buflen = sprintf(buffer, NIPQUAD_FMT, NIPQUAD(addr->ip));
349 if (!mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term, 349 if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term,
350 buffer, buflen)) 350 buffer, buflen))
351 return 0; 351 return 0;
352 352
353 return mangle_content_len(skb, dptr, datalen); 353 return mangle_content_len(skb, dptr, datalen);
@@ -381,15 +381,27 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
381 381
382 /* Mangle session description owner and contact addresses */ 382 /* Mangle session description owner and contact addresses */
383 buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(addr->ip)); 383 buflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(addr->ip));
384 if (!mangle_sdp_packet(skb, dptr, dataoff, datalen, 384 if (mangle_sdp_packet(skb, dptr, dataoff, datalen,
385 SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA, 385 SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
386 buffer, buflen)) 386 buffer, buflen))
387 return 0; 387 return 0;
388 388
389 if (!mangle_sdp_packet(skb, dptr, dataoff, datalen, 389 switch (mangle_sdp_packet(skb, dptr, dataoff, datalen,
390 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA, 390 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
391 buffer, buflen)) 391 buffer, buflen)) {
392 case 0:
393 /*
394 * RFC 2327:
395 *
396 * Session description
397 *
398 * c=* (connection information - not required if included in all media)
399 */
400 case -ENOENT:
401 break;
402 default:
392 return 0; 403 return 0;
404 }
393 405
394 return mangle_content_len(skb, dptr, datalen); 406 return mangle_content_len(skb, dptr, datalen);
395} 407}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1fa683c0ba9b..a00532de2a8c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -472,7 +472,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
472 } 472 }
473 if (likely(sysctl_tcp_sack)) { 473 if (likely(sysctl_tcp_sack)) {
474 opts->options |= OPTION_SACK_ADVERTISE; 474 opts->options |= OPTION_SACK_ADVERTISE;
475 if (unlikely(!OPTION_TS & opts->options)) 475 if (unlikely(!(OPTION_TS & opts->options)))
476 size += TCPOLEN_SACKPERM_ALIGNED; 476 size += TCPOLEN_SACKPERM_ALIGNED;
477 } 477 }
478 478
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index a751770947a3..383d17359d01 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1325,6 +1325,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1325 return -ENOPROTOOPT; 1325 return -ENOPROTOOPT;
1326 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 1326 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
1327 val = 8; 1327 val = 8;
1328 else if (val > USHORT_MAX)
1329 val = USHORT_MAX;
1328 up->pcslen = val; 1330 up->pcslen = val;
1329 up->pcflag |= UDPLITE_SEND_CC; 1331 up->pcflag |= UDPLITE_SEND_CC;
1330 break; 1332 break;
@@ -1337,6 +1339,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1337 return -ENOPROTOOPT; 1339 return -ENOPROTOOPT;
1338 if (val != 0 && val < 8) /* Avoid silly minimal values. */ 1340 if (val != 0 && val < 8) /* Avoid silly minimal values. */
1339 val = 8; 1341 val = 8;
1342 else if (val > USHORT_MAX)
1343 val = USHORT_MAX;
1340 up->pcrlen = val; 1344 up->pcrlen = val;
1341 up->pcflag |= UDPLITE_RECV_CC; 1345 up->pcflag |= UDPLITE_RECV_CC;
1342 break; 1346 break;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 9f4fcce6379b..74d543d504a1 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -153,7 +153,7 @@ static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
153 153
154static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); 154static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
155 155
156struct ipv6_devconf ipv6_devconf __read_mostly = { 156static struct ipv6_devconf ipv6_devconf __read_mostly = {
157 .forwarding = 0, 157 .forwarding = 0,
158 .hop_limit = IPV6_DEFAULT_HOPLIMIT, 158 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
159 .mtu6 = IPV6_MIN_MTU, 159 .mtu6 = IPV6_MIN_MTU,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 4de2b9efcacb..08ea2de28d63 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -661,17 +661,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
661 661
662static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt) 662static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
663{ 663{
664 if (net->ipv6.ip6_fib_timer->expires == 0 && 664 if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
665 (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) 665 (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
666 mod_timer(net->ipv6.ip6_fib_timer, jiffies + 666 mod_timer(&net->ipv6.ip6_fib_timer,
667 net->ipv6.sysctl.ip6_rt_gc_interval); 667 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
668} 668}
669 669
670void fib6_force_start_gc(struct net *net) 670void fib6_force_start_gc(struct net *net)
671{ 671{
672 if (net->ipv6.ip6_fib_timer->expires == 0) 672 if (!timer_pending(&net->ipv6.ip6_fib_timer))
673 mod_timer(net->ipv6.ip6_fib_timer, jiffies + 673 mod_timer(&net->ipv6.ip6_fib_timer,
674 net->ipv6.sysctl.ip6_rt_gc_interval); 674 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
675} 675}
676 676
677/* 677/*
@@ -1447,27 +1447,23 @@ void fib6_run_gc(unsigned long expires, struct net *net)
1447 gc_args.timeout = expires ? (int)expires : 1447 gc_args.timeout = expires ? (int)expires :
1448 net->ipv6.sysctl.ip6_rt_gc_interval; 1448 net->ipv6.sysctl.ip6_rt_gc_interval;
1449 } else { 1449 } else {
1450 local_bh_disable(); 1450 if (!spin_trylock_bh(&fib6_gc_lock)) {
1451 if (!spin_trylock(&fib6_gc_lock)) { 1451 mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
1452 mod_timer(net->ipv6.ip6_fib_timer, jiffies + HZ);
1453 local_bh_enable();
1454 return; 1452 return;
1455 } 1453 }
1456 gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval; 1454 gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
1457 } 1455 }
1458 gc_args.more = 0;
1459 1456
1460 icmp6_dst_gc(&gc_args.more); 1457 gc_args.more = icmp6_dst_gc();
1461 1458
1462 fib6_clean_all(net, fib6_age, 0, NULL); 1459 fib6_clean_all(net, fib6_age, 0, NULL);
1463 1460
1464 if (gc_args.more) 1461 if (gc_args.more)
1465 mod_timer(net->ipv6.ip6_fib_timer, jiffies + 1462 mod_timer(&net->ipv6.ip6_fib_timer,
1466 net->ipv6.sysctl.ip6_rt_gc_interval); 1463 round_jiffies(jiffies
1467 else { 1464 + net->ipv6.sysctl.ip6_rt_gc_interval));
1468 del_timer(net->ipv6.ip6_fib_timer); 1465 else
1469 net->ipv6.ip6_fib_timer->expires = 0; 1466 del_timer(&net->ipv6.ip6_fib_timer);
1470 }
1471 spin_unlock_bh(&fib6_gc_lock); 1467 spin_unlock_bh(&fib6_gc_lock);
1472} 1468}
1473 1469
@@ -1478,24 +1474,15 @@ static void fib6_gc_timer_cb(unsigned long arg)
1478 1474
1479static int fib6_net_init(struct net *net) 1475static int fib6_net_init(struct net *net)
1480{ 1476{
1481 int ret; 1477 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
1482 struct timer_list *timer;
1483
1484 ret = -ENOMEM;
1485 timer = kzalloc(sizeof(*timer), GFP_KERNEL);
1486 if (!timer)
1487 goto out;
1488
1489 setup_timer(timer, fib6_gc_timer_cb, (unsigned long)net);
1490 net->ipv6.ip6_fib_timer = timer;
1491 1478
1492 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL); 1479 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
1493 if (!net->ipv6.rt6_stats) 1480 if (!net->ipv6.rt6_stats)
1494 goto out_timer; 1481 goto out_timer;
1495 1482
1496 net->ipv6.fib_table_hash = 1483 net->ipv6.fib_table_hash = kcalloc(FIB_TABLE_HASHSZ,
1497 kzalloc(sizeof(*net->ipv6.fib_table_hash)*FIB_TABLE_HASHSZ, 1484 sizeof(*net->ipv6.fib_table_hash),
1498 GFP_KERNEL); 1485 GFP_KERNEL);
1499 if (!net->ipv6.fib_table_hash) 1486 if (!net->ipv6.fib_table_hash)
1500 goto out_rt6_stats; 1487 goto out_rt6_stats;
1501 1488
@@ -1521,9 +1508,7 @@ static int fib6_net_init(struct net *net)
1521#endif 1508#endif
1522 fib6_tables_init(net); 1509 fib6_tables_init(net);
1523 1510
1524 ret = 0; 1511 return 0;
1525out:
1526 return ret;
1527 1512
1528#ifdef CONFIG_IPV6_MULTIPLE_TABLES 1513#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1529out_fib6_main_tbl: 1514out_fib6_main_tbl:
@@ -1534,15 +1519,14 @@ out_fib_table_hash:
1534out_rt6_stats: 1519out_rt6_stats:
1535 kfree(net->ipv6.rt6_stats); 1520 kfree(net->ipv6.rt6_stats);
1536out_timer: 1521out_timer:
1537 kfree(timer); 1522 return -ENOMEM;
1538 goto out;
1539 } 1523 }
1540 1524
1541static void fib6_net_exit(struct net *net) 1525static void fib6_net_exit(struct net *net)
1542{ 1526{
1543 rt6_ifdown(net, NULL); 1527 rt6_ifdown(net, NULL);
1544 del_timer_sync(net->ipv6.ip6_fib_timer); 1528 del_timer_sync(&net->ipv6.ip6_fib_timer);
1545 kfree(net->ipv6.ip6_fib_timer); 1529
1546#ifdef CONFIG_IPV6_MULTIPLE_TABLES 1530#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1547 kfree(net->ipv6.fib6_local_tbl); 1531 kfree(net->ipv6.fib6_local_tbl);
1548#endif 1532#endif
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 615b328de251..86540b24b27c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -978,13 +978,12 @@ out:
978 return &rt->u.dst; 978 return &rt->u.dst;
979} 979}
980 980
981int icmp6_dst_gc(int *more) 981int icmp6_dst_gc(void)
982{ 982{
983 struct dst_entry *dst, *next, **pprev; 983 struct dst_entry *dst, *next, **pprev;
984 int freed; 984 int more = 0;
985 985
986 next = NULL; 986 next = NULL;
987 freed = 0;
988 987
989 spin_lock_bh(&icmp6_dst_lock); 988 spin_lock_bh(&icmp6_dst_lock);
990 pprev = &icmp6_dst_gc_list; 989 pprev = &icmp6_dst_gc_list;
@@ -993,16 +992,15 @@ int icmp6_dst_gc(int *more)
993 if (!atomic_read(&dst->__refcnt)) { 992 if (!atomic_read(&dst->__refcnt)) {
994 *pprev = dst->next; 993 *pprev = dst->next;
995 dst_free(dst); 994 dst_free(dst);
996 freed++;
997 } else { 995 } else {
998 pprev = &dst->next; 996 pprev = &dst->next;
999 (*more)++; 997 ++more;
1000 } 998 }
1001 } 999 }
1002 1000
1003 spin_unlock_bh(&icmp6_dst_lock); 1001 spin_unlock_bh(&icmp6_dst_lock);
1004 1002
1005 return freed; 1003 return more;
1006} 1004}
1007 1005
1008static int ip6_dst_gc(struct dst_ops *ops) 1006static int ip6_dst_gc(struct dst_ops *ops)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 316c7af1d2b1..ee898e74808d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -49,6 +49,15 @@ config NF_CT_ACCT
49 Those counters can be used for flow-based accounting or the 49 Those counters can be used for flow-based accounting or the
50 `connbytes' match. 50 `connbytes' match.
51 51
52 Please note that currently this option only sets a default state.
53 You may change it at boot time with nf_conntrack.acct=0/1 kernel
54 paramater or by loading the nf_conntrack module with acct=0/1.
55
56 You may also disable/enable it on a running system with:
57 sysctl net.netfilter.nf_conntrack_acct=0/1
58
59 This option will be removed in 2.6.29.
60
52 If unsure, say `N'. 61 If unsure, say `N'.
53 62
54config NF_CONNTRACK_MARK 63config NF_CONNTRACK_MARK
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 5c4b183f6422..3bd2cc556aea 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,6 @@
1netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o 1netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
2 2
3nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o 3nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
4nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o 4nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
5 5
6obj-$(CONFIG_NETFILTER) = netfilter.o 6obj-$(CONFIG_NETFILTER) = netfilter.o
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
new file mode 100644
index 000000000000..59bd8b903a19
--- /dev/null
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -0,0 +1,104 @@
1/* Accouting handling for netfilter. */
2
3/*
4 * (C) 2008 Krzysztof Piotr Oledzki <ole@ans.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/netfilter.h>
12#include <linux/kernel.h>
13#include <linux/moduleparam.h>
14
15#include <net/netfilter/nf_conntrack.h>
16#include <net/netfilter/nf_conntrack_extend.h>
17#include <net/netfilter/nf_conntrack_acct.h>
18
19#ifdef CONFIG_NF_CT_ACCT
20#define NF_CT_ACCT_DEFAULT 1
21#else
22#define NF_CT_ACCT_DEFAULT 0
23#endif
24
25int nf_ct_acct __read_mostly = NF_CT_ACCT_DEFAULT;
26EXPORT_SYMBOL_GPL(nf_ct_acct);
27
28module_param_named(acct, nf_ct_acct, bool, 0644);
29MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
30
31#ifdef CONFIG_SYSCTL
32static struct ctl_table_header *acct_sysctl_header;
33static struct ctl_table acct_sysctl_table[] = {
34 {
35 .ctl_name = CTL_UNNUMBERED,
36 .procname = "nf_conntrack_acct",
37 .data = &nf_ct_acct,
38 .maxlen = sizeof(unsigned int),
39 .mode = 0644,
40 .proc_handler = &proc_dointvec,
41 },
42 {}
43};
44#endif /* CONFIG_SYSCTL */
45
46unsigned int
47seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
48{
49 struct nf_conn_counter *acct;
50
51 acct = nf_conn_acct_find(ct);
52 if (!acct)
53 return 0;
54
55 return seq_printf(s, "packets=%llu bytes=%llu ",
56 (unsigned long long)acct[dir].packets,
57 (unsigned long long)acct[dir].bytes);
58};
59EXPORT_SYMBOL_GPL(seq_print_acct);
60
61static struct nf_ct_ext_type acct_extend __read_mostly = {
62 .len = sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]),
63 .align = __alignof__(struct nf_conn_counter[IP_CT_DIR_MAX]),
64 .id = NF_CT_EXT_ACCT,
65};
66
67int nf_conntrack_acct_init(void)
68{
69 int ret;
70
71#ifdef CONFIG_NF_CT_ACCT
72 printk(KERN_WARNING "CONFIG_NF_CT_ACCT is deprecated and will be removed soon. Plase use\n");
73 printk(KERN_WARNING "nf_conntrack.acct=1 kernel paramater, acct=1 nf_conntrack module option or\n");
74 printk(KERN_WARNING "sysctl net.netfilter.nf_conntrack_acct=1 to enable it.\n");
75#endif
76
77 ret = nf_ct_extend_register(&acct_extend);
78 if (ret < 0) {
79 printk(KERN_ERR "nf_conntrack_acct: Unable to register extension\n");
80 return ret;
81 }
82
83#ifdef CONFIG_SYSCTL
84 acct_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path,
85 acct_sysctl_table);
86
87 if (!acct_sysctl_header) {
88 nf_ct_extend_unregister(&acct_extend);
89
90 printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n");
91 return -ENOMEM;
92 }
93#endif
94
95 return 0;
96}
97
98void nf_conntrack_acct_fini(void)
99{
100#ifdef CONFIG_SYSCTL
101 unregister_sysctl_table(acct_sysctl_header);
102#endif
103 nf_ct_extend_unregister(&acct_extend);
104}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 28d03e64200b..c519d090bdb9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -37,6 +37,7 @@
37#include <net/netfilter/nf_conntrack_helper.h> 37#include <net/netfilter/nf_conntrack_helper.h>
38#include <net/netfilter/nf_conntrack_core.h> 38#include <net/netfilter/nf_conntrack_core.h>
39#include <net/netfilter/nf_conntrack_extend.h> 39#include <net/netfilter/nf_conntrack_extend.h>
40#include <net/netfilter/nf_conntrack_acct.h>
40 41
41#define NF_CONNTRACK_VERSION "0.5.0" 42#define NF_CONNTRACK_VERSION "0.5.0"
42 43
@@ -555,6 +556,8 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
555 return NULL; 556 return NULL;
556 } 557 }
557 558
559 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
560
558 spin_lock_bh(&nf_conntrack_lock); 561 spin_lock_bh(&nf_conntrack_lock);
559 exp = nf_ct_find_expectation(tuple); 562 exp = nf_ct_find_expectation(tuple);
560 if (exp) { 563 if (exp) {
@@ -828,17 +831,16 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
828 } 831 }
829 832
830acct: 833acct:
831#ifdef CONFIG_NF_CT_ACCT
832 if (do_acct) { 834 if (do_acct) {
833 ct->counters[CTINFO2DIR(ctinfo)].packets++; 835 struct nf_conn_counter *acct;
834 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
835 skb->len - skb_network_offset(skb);
836 836
837 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) 837 acct = nf_conn_acct_find(ct);
838 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) 838 if (acct) {
839 event |= IPCT_COUNTER_FILLING; 839 acct[CTINFO2DIR(ctinfo)].packets++;
840 acct[CTINFO2DIR(ctinfo)].bytes +=
841 skb->len - skb_network_offset(skb);
842 }
840 } 843 }
841#endif
842 844
843 spin_unlock_bh(&nf_conntrack_lock); 845 spin_unlock_bh(&nf_conntrack_lock);
844 846
@@ -853,15 +855,19 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
853 const struct sk_buff *skb, 855 const struct sk_buff *skb,
854 int do_acct) 856 int do_acct)
855{ 857{
856#ifdef CONFIG_NF_CT_ACCT
857 if (do_acct) { 858 if (do_acct) {
859 struct nf_conn_counter *acct;
860
858 spin_lock_bh(&nf_conntrack_lock); 861 spin_lock_bh(&nf_conntrack_lock);
859 ct->counters[CTINFO2DIR(ctinfo)].packets++; 862 acct = nf_conn_acct_find(ct);
860 ct->counters[CTINFO2DIR(ctinfo)].bytes += 863 if (acct) {
861 skb->len - skb_network_offset(skb); 864 acct[CTINFO2DIR(ctinfo)].packets++;
865 acct[CTINFO2DIR(ctinfo)].bytes +=
866 skb->len - skb_network_offset(skb);
867 }
862 spin_unlock_bh(&nf_conntrack_lock); 868 spin_unlock_bh(&nf_conntrack_lock);
863 } 869 }
864#endif 870
865 if (del_timer(&ct->timeout)) { 871 if (del_timer(&ct->timeout)) {
866 ct->timeout.function((unsigned long)ct); 872 ct->timeout.function((unsigned long)ct);
867 return true; 873 return true;
@@ -1029,6 +1035,7 @@ void nf_conntrack_cleanup(void)
1029 nf_conntrack_proto_fini(); 1035 nf_conntrack_proto_fini();
1030 nf_conntrack_helper_fini(); 1036 nf_conntrack_helper_fini();
1031 nf_conntrack_expect_fini(); 1037 nf_conntrack_expect_fini();
1038 nf_conntrack_acct_fini();
1032} 1039}
1033 1040
1034struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced) 1041struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced)
@@ -1168,6 +1175,10 @@ int __init nf_conntrack_init(void)
1168 if (ret < 0) 1175 if (ret < 0)
1169 goto out_fini_expect; 1176 goto out_fini_expect;
1170 1177
1178 ret = nf_conntrack_acct_init();
1179 if (ret < 0)
1180 goto out_fini_helper;
1181
1171 /* For use by REJECT target */ 1182 /* For use by REJECT target */
1172 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); 1183 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1173 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); 1184 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
@@ -1180,6 +1191,8 @@ int __init nf_conntrack_init(void)
1180 1191
1181 return ret; 1192 return ret;
1182 1193
1194out_fini_helper:
1195 nf_conntrack_helper_fini();
1183out_fini_expect: 1196out_fini_expect:
1184 nf_conntrack_expect_fini(); 1197 nf_conntrack_expect_fini();
1185out_fini_proto: 1198out_fini_proto:
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 95a7967731f9..105a616c5c78 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -37,6 +37,7 @@
37#include <net/netfilter/nf_conntrack_l3proto.h> 37#include <net/netfilter/nf_conntrack_l3proto.h>
38#include <net/netfilter/nf_conntrack_l4proto.h> 38#include <net/netfilter/nf_conntrack_l4proto.h>
39#include <net/netfilter/nf_conntrack_tuple.h> 39#include <net/netfilter/nf_conntrack_tuple.h>
40#include <net/netfilter/nf_conntrack_acct.h>
40#ifdef CONFIG_NF_NAT_NEEDED 41#ifdef CONFIG_NF_NAT_NEEDED
41#include <net/netfilter/nf_nat_core.h> 42#include <net/netfilter/nf_nat_core.h>
42#include <net/netfilter/nf_nat_protocol.h> 43#include <net/netfilter/nf_nat_protocol.h>
@@ -206,22 +207,26 @@ nla_put_failure:
206 return -1; 207 return -1;
207} 208}
208 209
209#ifdef CONFIG_NF_CT_ACCT
210static int 210static int
211ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, 211ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
212 enum ip_conntrack_dir dir) 212 enum ip_conntrack_dir dir)
213{ 213{
214 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; 214 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
215 struct nlattr *nest_count; 215 struct nlattr *nest_count;
216 const struct nf_conn_counter *acct;
217
218 acct = nf_conn_acct_find(ct);
219 if (!acct)
220 return 0;
216 221
217 nest_count = nla_nest_start(skb, type | NLA_F_NESTED); 222 nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
218 if (!nest_count) 223 if (!nest_count)
219 goto nla_put_failure; 224 goto nla_put_failure;
220 225
221 NLA_PUT_BE32(skb, CTA_COUNTERS32_PACKETS, 226 NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS,
222 htonl(ct->counters[dir].packets)); 227 cpu_to_be64(acct[dir].packets));
223 NLA_PUT_BE32(skb, CTA_COUNTERS32_BYTES, 228 NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES,
224 htonl(ct->counters[dir].bytes)); 229 cpu_to_be64(acct[dir].bytes));
225 230
226 nla_nest_end(skb, nest_count); 231 nla_nest_end(skb, nest_count);
227 232
@@ -230,9 +235,6 @@ ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
230nla_put_failure: 235nla_put_failure:
231 return -1; 236 return -1;
232} 237}
233#else
234#define ctnetlink_dump_counters(a, b, c) (0)
235#endif
236 238
237#ifdef CONFIG_NF_CONNTRACK_MARK 239#ifdef CONFIG_NF_CONNTRACK_MARK
238static inline int 240static inline int
@@ -501,11 +503,6 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
501 goto nla_put_failure; 503 goto nla_put_failure;
502#endif 504#endif
503 505
504 if (events & IPCT_COUNTER_FILLING &&
505 (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
506 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0))
507 goto nla_put_failure;
508
509 if (events & IPCT_RELATED && 506 if (events & IPCT_RELATED &&
510 ctnetlink_dump_master(skb, ct) < 0) 507 ctnetlink_dump_master(skb, ct) < 0)
511 goto nla_put_failure; 508 goto nla_put_failure;
@@ -576,11 +573,15 @@ restart:
576 cb->args[1] = (unsigned long)ct; 573 cb->args[1] = (unsigned long)ct;
577 goto out; 574 goto out;
578 } 575 }
579#ifdef CONFIG_NF_CT_ACCT 576
580 if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == 577 if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) ==
581 IPCTNL_MSG_CT_GET_CTRZERO) 578 IPCTNL_MSG_CT_GET_CTRZERO) {
582 memset(&ct->counters, 0, sizeof(ct->counters)); 579 struct nf_conn_counter *acct;
583#endif 580
581 acct = nf_conn_acct_find(ct);
582 if (acct)
583 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
584 }
584 } 585 }
585 if (cb->args[1]) { 586 if (cb->args[1]) {
586 cb->args[1] = 0; 587 cb->args[1] = 0;
@@ -832,14 +833,9 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
832 u_int8_t u3 = nfmsg->nfgen_family; 833 u_int8_t u3 = nfmsg->nfgen_family;
833 int err = 0; 834 int err = 0;
834 835
835 if (nlh->nlmsg_flags & NLM_F_DUMP) { 836 if (nlh->nlmsg_flags & NLM_F_DUMP)
836#ifndef CONFIG_NF_CT_ACCT
837 if (NFNL_MSG_TYPE(nlh->nlmsg_type) == IPCTNL_MSG_CT_GET_CTRZERO)
838 return -ENOTSUPP;
839#endif
840 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 837 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
841 ctnetlink_done); 838 ctnetlink_done);
842 }
843 839
844 if (cda[CTA_TUPLE_ORIG]) 840 if (cda[CTA_TUPLE_ORIG])
845 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); 841 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
@@ -1152,6 +1148,8 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1152 goto err; 1148 goto err;
1153 } 1149 }
1154 1150
1151 nf_ct_acct_ext_add(ct, GFP_KERNEL);
1152
1155#if defined(CONFIG_NF_CONNTRACK_MARK) 1153#if defined(CONFIG_NF_CONNTRACK_MARK)
1156 if (cda[CTA_MARK]) 1154 if (cda[CTA_MARK])
1157 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); 1155 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 41183a4d2d62..30aa5b94a771 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -482,11 +482,11 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
482 482
483 NLA_PUT_BE32(skb, 483 NLA_PUT_BE32(skb,
484 CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, 484 CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
485 htonl(ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL])); 485 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]);
486 486
487 NLA_PUT_BE32(skb, 487 NLA_PUT_BE32(skb,
488 CTA_PROTOINFO_SCTP_VTAG_REPLY, 488 CTA_PROTOINFO_SCTP_VTAG_REPLY,
489 htonl(ct->proto.sctp.vtag[IP_CT_DIR_REPLY])); 489 ct->proto.sctp.vtag[IP_CT_DIR_REPLY]);
490 490
491 read_unlock_bh(&sctp_lock); 491 read_unlock_bh(&sctp_lock);
492 492
@@ -530,9 +530,9 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
530 write_lock_bh(&sctp_lock); 530 write_lock_bh(&sctp_lock);
531 ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]); 531 ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]);
532 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = 532 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] =
533 ntohl(nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL])); 533 nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]);
534 ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = 534 ct->proto.sctp.vtag[IP_CT_DIR_REPLY] =
535 ntohl(nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY])); 535 nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]);
536 write_unlock_bh(&sctp_lock); 536 write_unlock_bh(&sctp_lock);
537 537
538 return 0; 538 return 0;
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 46ea542d0df9..869ef9349d0f 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -25,6 +25,7 @@
25#include <net/netfilter/nf_conntrack_l4proto.h> 25#include <net/netfilter/nf_conntrack_l4proto.h>
26#include <net/netfilter/nf_conntrack_expect.h> 26#include <net/netfilter/nf_conntrack_expect.h>
27#include <net/netfilter/nf_conntrack_helper.h> 27#include <net/netfilter/nf_conntrack_helper.h>
28#include <net/netfilter/nf_conntrack_acct.h>
28 29
29MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
30 31
@@ -38,19 +39,6 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
38} 39}
39EXPORT_SYMBOL_GPL(print_tuple); 40EXPORT_SYMBOL_GPL(print_tuple);
40 41
41#ifdef CONFIG_NF_CT_ACCT
42static unsigned int
43seq_print_counters(struct seq_file *s,
44 const struct ip_conntrack_counter *counter)
45{
46 return seq_printf(s, "packets=%llu bytes=%llu ",
47 (unsigned long long)counter->packets,
48 (unsigned long long)counter->bytes);
49}
50#else
51#define seq_print_counters(x, y) 0
52#endif
53
54struct ct_iter_state { 42struct ct_iter_state {
55 unsigned int bucket; 43 unsigned int bucket;
56}; 44};
@@ -146,7 +134,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
146 l3proto, l4proto)) 134 l3proto, l4proto))
147 return -ENOSPC; 135 return -ENOSPC;
148 136
149 if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL])) 137 if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
150 return -ENOSPC; 138 return -ENOSPC;
151 139
152 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) 140 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
@@ -157,7 +145,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
157 l3proto, l4proto)) 145 l3proto, l4proto))
158 return -ENOSPC; 146 return -ENOSPC;
159 147
160 if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY])) 148 if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
161 return -ENOSPC; 149 return -ENOSPC;
162 150
163 if (test_bit(IPS_ASSURED_BIT, &ct->status)) 151 if (test_bit(IPS_ASSURED_BIT, &ct->status))
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index b8173af8c24a..9a35b57ab76d 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -453,6 +453,14 @@ __build_packet_message(struct nfulnl_instance *inst,
453 } 453 }
454 } 454 }
455 455
456 if (indev && skb_mac_header_was_set(skb)) {
457 NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type));
458 NLA_PUT_BE16(inst->skb, NFULA_HWLEN,
459 htons(skb->dev->hard_header_len));
460 NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
461 skb_mac_header(skb));
462 }
463
456 if (skb->tstamp.tv64) { 464 if (skb->tstamp.tv64) {
457 struct nfulnl_msg_packet_timestamp ts; 465 struct nfulnl_msg_packet_timestamp ts;
458 struct timeval tv = ktime_to_timeval(skb->tstamp); 466 struct timeval tv = ktime_to_timeval(skb->tstamp);
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 217e2b686322..beb5094703cb 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -147,17 +147,21 @@ tcpmss_mangle_packet(struct sk_buff *skb,
147 return TCPOLEN_MSS; 147 return TCPOLEN_MSS;
148} 148}
149 149
150static u_int32_t tcpmss_reverse_mtu4(const struct iphdr *iph) 150static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
151 unsigned int family)
151{ 152{
152 struct flowi fl = { 153 struct flowi fl = {};
153 .fl4_dst = iph->saddr,
154 };
155 const struct nf_afinfo *ai; 154 const struct nf_afinfo *ai;
156 struct rtable *rt = NULL; 155 struct rtable *rt = NULL;
157 u_int32_t mtu = ~0U; 156 u_int32_t mtu = ~0U;
158 157
158 if (family == PF_INET)
159 fl.fl4_dst = ip_hdr(skb)->saddr;
160 else
161 fl.fl6_dst = ipv6_hdr(skb)->saddr;
162
159 rcu_read_lock(); 163 rcu_read_lock();
160 ai = nf_get_afinfo(AF_INET); 164 ai = nf_get_afinfo(family);
161 if (ai != NULL) 165 if (ai != NULL)
162 ai->route((struct dst_entry **)&rt, &fl); 166 ai->route((struct dst_entry **)&rt, &fl);
163 rcu_read_unlock(); 167 rcu_read_unlock();
@@ -178,7 +182,8 @@ tcpmss_tg4(struct sk_buff *skb, const struct net_device *in,
178 __be16 newlen; 182 __be16 newlen;
179 int ret; 183 int ret;
180 184
181 ret = tcpmss_mangle_packet(skb, targinfo, tcpmss_reverse_mtu4(iph), 185 ret = tcpmss_mangle_packet(skb, targinfo,
186 tcpmss_reverse_mtu(skb, PF_INET),
182 iph->ihl * 4, 187 iph->ihl * 4,
183 sizeof(*iph) + sizeof(struct tcphdr)); 188 sizeof(*iph) + sizeof(struct tcphdr));
184 if (ret < 0) 189 if (ret < 0)
@@ -193,28 +198,6 @@ tcpmss_tg4(struct sk_buff *skb, const struct net_device *in,
193} 198}
194 199
195#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) 200#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
196static u_int32_t tcpmss_reverse_mtu6(const struct ipv6hdr *iph)
197{
198 struct flowi fl = {
199 .fl6_dst = iph->saddr,
200 };
201 const struct nf_afinfo *ai;
202 struct rtable *rt = NULL;
203 u_int32_t mtu = ~0U;
204
205 rcu_read_lock();
206 ai = nf_get_afinfo(AF_INET6);
207 if (ai != NULL)
208 ai->route((struct dst_entry **)&rt, &fl);
209 rcu_read_unlock();
210
211 if (rt != NULL) {
212 mtu = dst_mtu(&rt->u.dst);
213 dst_release(&rt->u.dst);
214 }
215 return mtu;
216}
217
218static unsigned int 201static unsigned int
219tcpmss_tg6(struct sk_buff *skb, const struct net_device *in, 202tcpmss_tg6(struct sk_buff *skb, const struct net_device *in,
220 const struct net_device *out, unsigned int hooknum, 203 const struct net_device *out, unsigned int hooknum,
@@ -229,7 +212,8 @@ tcpmss_tg6(struct sk_buff *skb, const struct net_device *in,
229 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr); 212 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
230 if (tcphoff < 0) 213 if (tcphoff < 0)
231 return NF_DROP; 214 return NF_DROP;
232 ret = tcpmss_mangle_packet(skb, targinfo, tcpmss_reverse_mtu6(ipv6h), 215 ret = tcpmss_mangle_packet(skb, targinfo,
216 tcpmss_reverse_mtu(skb, PF_INET6),
233 tcphoff, 217 tcphoff,
234 sizeof(*ipv6h) + sizeof(struct tcphdr)); 218 sizeof(*ipv6h) + sizeof(struct tcphdr));
235 if (ret < 0) 219 if (ret < 0)
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index d7e8983cd37f..3e39c4fe1931 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -8,6 +8,7 @@
8#include <linux/netfilter/x_tables.h> 8#include <linux/netfilter/x_tables.h>
9#include <linux/netfilter/xt_connbytes.h> 9#include <linux/netfilter/xt_connbytes.h>
10#include <net/netfilter/nf_conntrack.h> 10#include <net/netfilter/nf_conntrack.h>
11#include <net/netfilter/nf_conntrack_acct.h>
11 12
12MODULE_LICENSE("GPL"); 13MODULE_LICENSE("GPL");
13MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 14MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -27,12 +28,15 @@ connbytes_mt(const struct sk_buff *skb, const struct net_device *in,
27 u_int64_t what = 0; /* initialize to make gcc happy */ 28 u_int64_t what = 0; /* initialize to make gcc happy */
28 u_int64_t bytes = 0; 29 u_int64_t bytes = 0;
29 u_int64_t pkts = 0; 30 u_int64_t pkts = 0;
30 const struct ip_conntrack_counter *counters; 31 const struct nf_conn_counter *counters;
31 32
32 ct = nf_ct_get(skb, &ctinfo); 33 ct = nf_ct_get(skb, &ctinfo);
33 if (!ct) 34 if (!ct)
34 return false; 35 return false;
35 counters = ct->counters; 36
37 counters = nf_conn_acct_find(ct);
38 if (!counters)
39 return false;
36 40
37 switch (sinfo->what) { 41 switch (sinfo->what) {
38 case XT_CONNBYTES_PKTS: 42 case XT_CONNBYTES_PKTS:
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index ed76baab4734..9f328593287e 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -173,7 +173,7 @@ time_mt(const struct sk_buff *skb, const struct net_device *in,
173 __net_timestamp((struct sk_buff *)skb); 173 __net_timestamp((struct sk_buff *)skb);
174 174
175 stamp = ktime_to_ns(skb->tstamp); 175 stamp = ktime_to_ns(skb->tstamp);
176 do_div(stamp, NSEC_PER_SEC); 176 stamp = div_s64(stamp, NSEC_PER_SEC);
177 177
178 if (info->flags & XT_TIME_LOCAL_TZ) 178 if (info->flags & XT_TIME_LOCAL_TZ)
179 /* Adjust for local timezone */ 179 /* Adjust for local timezone */
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index fccc250f95f5..532e4faa29f7 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -73,6 +73,7 @@ static const struct proto_ops nr_proto_ops;
73 * separate class since they always nest. 73 * separate class since they always nest.
74 */ 74 */
75static struct lock_class_key nr_netdev_xmit_lock_key; 75static struct lock_class_key nr_netdev_xmit_lock_key;
76static struct lock_class_key nr_netdev_addr_lock_key;
76 77
77static void nr_set_lockdep_one(struct net_device *dev, 78static void nr_set_lockdep_one(struct net_device *dev,
78 struct netdev_queue *txq, 79 struct netdev_queue *txq,
@@ -83,6 +84,7 @@ static void nr_set_lockdep_one(struct net_device *dev,
83 84
84static void nr_set_lockdep_key(struct net_device *dev) 85static void nr_set_lockdep_key(struct net_device *dev)
85{ 86{
87 lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
86 netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); 88 netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
87} 89}
88 90
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index dbc963b4f5fb..a7f1ce11bc22 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -74,6 +74,7 @@ ax25_address rose_callsign;
74 * separate class since they always nest. 74 * separate class since they always nest.
75 */ 75 */
76static struct lock_class_key rose_netdev_xmit_lock_key; 76static struct lock_class_key rose_netdev_xmit_lock_key;
77static struct lock_class_key rose_netdev_addr_lock_key;
77 78
78static void rose_set_lockdep_one(struct net_device *dev, 79static void rose_set_lockdep_one(struct net_device *dev,
79 struct netdev_queue *txq, 80 struct netdev_queue *txq,
@@ -84,6 +85,7 @@ static void rose_set_lockdep_one(struct net_device *dev,
84 85
85static void rose_set_lockdep_key(struct net_device *dev) 86static void rose_set_lockdep_key(struct net_device *dev)
86{ 87{
88 lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
87 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); 89 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
88} 90}
89 91
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 5219d5f9d754..b0601642e227 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -447,7 +447,7 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
447} 447}
448EXPORT_SYMBOL(qdisc_watchdog_cancel); 448EXPORT_SYMBOL(qdisc_watchdog_cancel);
449 449
450struct hlist_head *qdisc_class_hash_alloc(unsigned int n) 450static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
451{ 451{
452 unsigned int size = n * sizeof(struct hlist_head), i; 452 unsigned int size = n * sizeof(struct hlist_head), i;
453 struct hlist_head *h; 453 struct hlist_head *h;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 0ddf69286f92..4ac7e3a8c253 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -212,9 +212,9 @@ static void dev_watchdog(unsigned long arg)
212 if (some_queue_stopped && 212 if (some_queue_stopped &&
213 time_after(jiffies, (dev->trans_start + 213 time_after(jiffies, (dev->trans_start +
214 dev->watchdog_timeo))) { 214 dev->watchdog_timeo))) {
215 printk(KERN_INFO "NETDEV WATCHDOG: %s: " 215 char drivername[64];
216 "transmit timed out\n", 216 printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
217 dev->name); 217 dev->name, netdev_drivername(dev, drivername, 64));
218 dev->tx_timeout(dev); 218 dev->tx_timeout(dev);
219 WARN_ON_ONCE(1); 219 WARN_ON_ONCE(1);
220 } 220 }
@@ -356,44 +356,99 @@ static struct Qdisc noqueue_qdisc = {
356}; 356};
357 357
358 358
359static int fifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) 359static const u8 prio2band[TC_PRIO_MAX+1] =
360 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
361
362/* 3-band FIFO queue: old style, but should be a bit faster than
363 generic prio+fifo combination.
364 */
365
366#define PFIFO_FAST_BANDS 3
367
368static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
369 struct Qdisc *qdisc)
370{
371 struct sk_buff_head *list = qdisc_priv(qdisc);
372 return list + prio2band[skb->priority & TC_PRIO_MAX];
373}
374
375static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
360{ 376{
361 struct sk_buff_head *list = &qdisc->q; 377 struct sk_buff_head *list = prio2list(skb, qdisc);
362 378
363 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) 379 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
380 qdisc->q.qlen++;
364 return __qdisc_enqueue_tail(skb, qdisc, list); 381 return __qdisc_enqueue_tail(skb, qdisc, list);
382 }
365 383
366 return qdisc_drop(skb, qdisc); 384 return qdisc_drop(skb, qdisc);
367} 385}
368 386
369static struct sk_buff *fifo_fast_dequeue(struct Qdisc* qdisc) 387static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
370{ 388{
371 struct sk_buff_head *list = &qdisc->q; 389 int prio;
390 struct sk_buff_head *list = qdisc_priv(qdisc);
372 391
373 if (!skb_queue_empty(list)) 392 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
374 return __qdisc_dequeue_head(qdisc, list); 393 if (!skb_queue_empty(list + prio)) {
394 qdisc->q.qlen--;
395 return __qdisc_dequeue_head(qdisc, list + prio);
396 }
397 }
375 398
376 return NULL; 399 return NULL;
377} 400}
378 401
379static int fifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) 402static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
380{ 403{
381 return __qdisc_requeue(skb, qdisc, &qdisc->q); 404 qdisc->q.qlen++;
405 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
382} 406}
383 407
384static void fifo_fast_reset(struct Qdisc* qdisc) 408static void pfifo_fast_reset(struct Qdisc* qdisc)
385{ 409{
386 __qdisc_reset_queue(qdisc, &qdisc->q); 410 int prio;
411 struct sk_buff_head *list = qdisc_priv(qdisc);
412
413 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
414 __qdisc_reset_queue(qdisc, list + prio);
415
387 qdisc->qstats.backlog = 0; 416 qdisc->qstats.backlog = 0;
417 qdisc->q.qlen = 0;
388} 418}
389 419
390static struct Qdisc_ops fifo_fast_ops __read_mostly = { 420static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
391 .id = "fifo_fast", 421{
392 .priv_size = 0, 422 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
393 .enqueue = fifo_fast_enqueue, 423
394 .dequeue = fifo_fast_dequeue, 424 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
395 .requeue = fifo_fast_requeue, 425 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
396 .reset = fifo_fast_reset, 426 return skb->len;
427
428nla_put_failure:
429 return -1;
430}
431
432static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
433{
434 int prio;
435 struct sk_buff_head *list = qdisc_priv(qdisc);
436
437 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
438 skb_queue_head_init(list + prio);
439
440 return 0;
441}
442
443static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
444 .id = "pfifo_fast",
445 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
446 .enqueue = pfifo_fast_enqueue,
447 .dequeue = pfifo_fast_dequeue,
448 .requeue = pfifo_fast_requeue,
449 .init = pfifo_fast_init,
450 .reset = pfifo_fast_reset,
451 .dump = pfifo_fast_dump,
397 .owner = THIS_MODULE, 452 .owner = THIS_MODULE,
398}; 453};
399 454
@@ -522,7 +577,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
522 577
523 if (dev->tx_queue_len) { 578 if (dev->tx_queue_len) {
524 qdisc = qdisc_create_dflt(dev, dev_queue, 579 qdisc = qdisc_create_dflt(dev, dev_queue,
525 &fifo_fast_ops, TC_H_ROOT); 580 &pfifo_fast_ops, TC_H_ROOT);
526 if (!qdisc) { 581 if (!qdisc) {
527 printk(KERN_INFO "%s: activation failed\n", dev->name); 582 printk(KERN_INFO "%s: activation failed\n", dev->name);
528 return; 583 return;
@@ -550,9 +605,9 @@ void dev_activate(struct net_device *dev)
550 int need_watchdog; 605 int need_watchdog;
551 606
552 /* No queueing discipline is attached to device; 607 /* No queueing discipline is attached to device;
553 * create default one i.e. fifo_fast for devices, 608 create default one i.e. pfifo_fast for devices,
554 * which need queueing and noqueue_qdisc for 609 which need queueing and noqueue_qdisc for
555 * virtual interfaces. 610 virtual interfaces
556 */ 611 */
557 612
558 if (dev_all_qdisc_sleeping_noop(dev)) 613 if (dev_all_qdisc_sleeping_noop(dev))
@@ -576,7 +631,6 @@ static void dev_deactivate_queue(struct net_device *dev,
576 void *_qdisc_default) 631 void *_qdisc_default)
577{ 632{
578 struct Qdisc *qdisc_default = _qdisc_default; 633 struct Qdisc *qdisc_default = _qdisc_default;
579 struct sk_buff *skb = NULL;
580 struct Qdisc *qdisc; 634 struct Qdisc *qdisc;
581 635
582 qdisc = dev_queue->qdisc; 636 qdisc = dev_queue->qdisc;
@@ -588,8 +642,6 @@ static void dev_deactivate_queue(struct net_device *dev,
588 642
589 spin_unlock_bh(qdisc_lock(qdisc)); 643 spin_unlock_bh(qdisc_lock(qdisc));
590 } 644 }
591
592 kfree_skb(skb);
593} 645}
594 646
595static bool some_qdisc_is_running(struct net_device *dev, int lock) 647static bool some_qdisc_is_running(struct net_device *dev, int lock)
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 70ead8dc3485..4328ad5439c9 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -71,6 +71,8 @@ static void sctp_mark_missing(struct sctp_outq *q,
71 71
72static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); 72static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
73 73
74static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
75
74/* Add data to the front of the queue. */ 76/* Add data to the front of the queue. */
75static inline void sctp_outq_head_data(struct sctp_outq *q, 77static inline void sctp_outq_head_data(struct sctp_outq *q,
76 struct sctp_chunk *ch) 78 struct sctp_chunk *ch)
@@ -712,7 +714,7 @@ int sctp_outq_uncork(struct sctp_outq *q)
712 * locking concerns must be made. Today we use the sock lock to protect 714 * locking concerns must be made. Today we use the sock lock to protect
713 * this function. 715 * this function.
714 */ 716 */
715int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) 717static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
716{ 718{
717 struct sctp_packet *packet; 719 struct sctp_packet *packet;
718 struct sctp_packet singleton; 720 struct sctp_packet singleton;
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 5dd89831eceb..f268910620be 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -519,8 +519,3 @@ int __init sctp_remaddr_proc_init(void)
519 519
520 return 0; 520 return 0;
521} 521}
522
523void sctp_assoc_proc_exit(void)
524{
525 remove_proc_entry("remaddr", proc_net_sctp);
526}