aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2006-08-18 14:02:52 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2006-08-18 14:02:52 -0400
commitef7d1b244fa6c94fb76d5f787b8629df64ea4046 (patch)
treeeb6b35bb250f46d75853764bd9309c75e4f03c72
parented0da6fc9d3096f54c4a76737eeae57ac81418cf (diff)
parent78eb887733ec8ff5d6e6c69e3c32a187a9303622 (diff)
Merge gregkh@master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
-rw-r--r--Documentation/networking/ip-sysctl.txt6
-rw-r--r--drivers/net/bnx2.c49
-rw-r--r--drivers/net/bnx2.h12
-rw-r--r--drivers/net/ppp_generic.c30
-rw-r--r--include/linux/if_vlan.h5
-rw-r--r--include/linux/netdevice.h27
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/bridge/br_if.c7
-rw-r--r--net/core/dev.c37
-rw-r--r--net/core/utils.c7
-rw-r--r--net/ipv4/fib_semantics.c12
-rw-r--r--net/ipv4/igmp.c38
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c17
-rw-r--r--net/ipv4/netfilter/ip_tables.c3
-rw-r--r--net/ipv6/icmp.c13
-rw-r--r--net/ipv6/mcast.c10
-rw-r--r--net/netfilter/nf_conntrack_netlink.c17
-rw-r--r--net/netfilter/xt_physdev.c1
-rw-r--r--net/sched/cls_u32.c2
19 files changed, 175 insertions, 120 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index d46338af6002..3e0c017e7877 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -294,15 +294,15 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
294 Default: 87380*2 bytes. 294 Default: 87380*2 bytes.
295 295
296tcp_mem - vector of 3 INTEGERs: min, pressure, max 296tcp_mem - vector of 3 INTEGERs: min, pressure, max
297 low: below this number of pages TCP is not bothered about its 297 min: below this number of pages TCP is not bothered about its
298 memory appetite. 298 memory appetite.
299 299
300 pressure: when amount of memory allocated by TCP exceeds this number 300 pressure: when amount of memory allocated by TCP exceeds this number
301 of pages, TCP moderates its memory consumption and enters memory 301 of pages, TCP moderates its memory consumption and enters memory
302 pressure mode, which is exited when memory consumption falls 302 pressure mode, which is exited when memory consumption falls
303 under "low". 303 under "min".
304 304
305 high: number of pages allowed for queueing by all TCP sockets. 305 max: number of pages allowed for queueing by all TCP sockets.
306 306
307 Defaults are calculated at boot time from amount of available 307 Defaults are calculated at boot time from amount of available
308 memory. 308 memory.
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index db73de0d2511..652eb05a6c2d 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56 56
57#define DRV_MODULE_NAME "bnx2" 57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": " 58#define PFX DRV_MODULE_NAME ": "
59#define DRV_MODULE_VERSION "1.4.43" 59#define DRV_MODULE_VERSION "1.4.44"
60#define DRV_MODULE_RELDATE "June 28, 2006" 60#define DRV_MODULE_RELDATE "August 10, 2006"
61 61
62#define RUN_AT(x) (jiffies + (x)) 62#define RUN_AT(x) (jiffies + (x))
63 63
@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209 209
210static inline u32 bnx2_tx_avail(struct bnx2 *bp) 210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{ 211{
212 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); 212 u32 diff;
213 213
214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
214 if (diff > MAX_TX_DESC_CNT) 216 if (diff > MAX_TX_DESC_CNT)
215 diff = (diff & MAX_TX_DESC_CNT) - 1; 217 diff = (diff & MAX_TX_DESC_CNT) - 1;
216 return (bp->tx_ring_size - diff); 218 return (bp->tx_ring_size - diff);
@@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1569 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; 1571 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1570 unsigned long align; 1572 unsigned long align;
1571 1573
1572 skb = dev_alloc_skb(bp->rx_buf_size); 1574 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1573 if (skb == NULL) { 1575 if (skb == NULL) {
1574 return -ENOMEM; 1576 return -ENOMEM;
1575 } 1577 }
@@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1578 skb_reserve(skb, 8 - align); 1580 skb_reserve(skb, 8 - align);
1579 } 1581 }
1580 1582
1581 skb->dev = bp->dev;
1582 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 1583 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1583 PCI_DMA_FROMDEVICE); 1584 PCI_DMA_FROMDEVICE);
1584 1585
@@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp)
1686 } 1687 }
1687 1688
1688 bp->tx_cons = sw_cons; 1689 bp->tx_cons = sw_cons;
1690 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1691 * before checking for netif_queue_stopped(). Without the
1692 * memory barrier, there is a small possibility that bnx2_start_xmit()
1693 * will miss it and cause the queue to be stopped forever.
1694 */
1695 smp_mb();
1689 1696
1690 if (unlikely(netif_queue_stopped(bp->dev))) { 1697 if (unlikely(netif_queue_stopped(bp->dev)) &&
1691 spin_lock(&bp->tx_lock); 1698 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1699 netif_tx_lock(bp->dev);
1692 if ((netif_queue_stopped(bp->dev)) && 1700 if ((netif_queue_stopped(bp->dev)) &&
1693 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) { 1701 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1694
1695 netif_wake_queue(bp->dev); 1702 netif_wake_queue(bp->dev);
1696 } 1703 netif_tx_unlock(bp->dev);
1697 spin_unlock(&bp->tx_lock);
1698 } 1704 }
1699} 1705}
1700 1706
@@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1786 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { 1792 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1787 struct sk_buff *new_skb; 1793 struct sk_buff *new_skb;
1788 1794
1789 new_skb = dev_alloc_skb(len + 2); 1795 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1790 if (new_skb == NULL) 1796 if (new_skb == NULL)
1791 goto reuse_rx; 1797 goto reuse_rx;
1792 1798
@@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1797 1803
1798 skb_reserve(new_skb, 2); 1804 skb_reserve(new_skb, 2);
1799 skb_put(new_skb, len); 1805 skb_put(new_skb, len);
1800 new_skb->dev = bp->dev;
1801 1806
1802 bnx2_reuse_rx_skb(bp, skb, 1807 bnx2_reuse_rx_skb(bp, skb,
1803 sw_ring_cons, sw_ring_prod); 1808 sw_ring_cons, sw_ring_prod);
@@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp)
3503 struct tx_bd *txbd; 3508 struct tx_bd *txbd;
3504 u32 val; 3509 u32 val;
3505 3510
3511 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3512
3506 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; 3513 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3507 3514
3508 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; 3515 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
@@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3952 return -EINVAL; 3959 return -EINVAL;
3953 3960
3954 pkt_size = 1514; 3961 pkt_size = 1514;
3955 skb = dev_alloc_skb(pkt_size); 3962 skb = netdev_alloc_skb(bp->dev, pkt_size);
3956 if (!skb) 3963 if (!skb)
3957 return -ENOMEM; 3964 return -ENOMEM;
3958 packet = skb_put(skb, pkt_size); 3965 packet = skb_put(skb, pkt_size);
@@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4390#endif 4397#endif
4391 4398
4392/* Called with netif_tx_lock. 4399/* Called with netif_tx_lock.
4393 * hard_start_xmit is pseudo-lockless - a lock is only required when 4400 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4394 * the tx queue is full. This way, we get the benefit of lockless 4401 * netif_wake_queue().
4395 * operations most of the time without the complexities to handle
4396 * netif_stop_queue/wake_queue race conditions.
4397 */ 4402 */
4398static int 4403static int
4399bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) 4404bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4512 dev->trans_start = jiffies; 4517 dev->trans_start = jiffies;
4513 4518
4514 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { 4519 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4515 spin_lock(&bp->tx_lock);
4516 netif_stop_queue(dev); 4520 netif_stop_queue(dev);
4517 4521 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4518 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4519 netif_wake_queue(dev); 4522 netif_wake_queue(dev);
4520 spin_unlock(&bp->tx_lock);
4521 } 4523 }
4522 4524
4523 return NETDEV_TX_OK; 4525 return NETDEV_TX_OK;
@@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5628 bp->pdev = pdev; 5630 bp->pdev = pdev;
5629 5631
5630 spin_lock_init(&bp->phy_lock); 5632 spin_lock_init(&bp->phy_lock);
5631 spin_lock_init(&bp->tx_lock);
5632 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); 5633 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5633 5634
5634 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 5635 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
@@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5751 bp->mac_addr[5] = (u8) reg; 5752 bp->mac_addr[5] = (u8) reg;
5752 5753
5753 bp->tx_ring_size = MAX_TX_DESC_CNT; 5754 bp->tx_ring_size = MAX_TX_DESC_CNT;
5754 bnx2_set_rx_ring_size(bp, 100); 5755 bnx2_set_rx_ring_size(bp, 255);
5755 5756
5756 bp->rx_csum = 1; 5757 bp->rx_csum = 1;
5757 5758
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 658c5ee95c73..fe804763c607 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3890,10 +3890,6 @@ struct bnx2 {
3890 u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); 3890 u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES)));
3891 u16 tx_prod; 3891 u16 tx_prod;
3892 3892
3893 struct tx_bd *tx_desc_ring;
3894 struct sw_bd *tx_buf_ring;
3895 int tx_ring_size;
3896
3897 u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); 3893 u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES)));
3898 u16 hw_tx_cons; 3894 u16 hw_tx_cons;
3899 3895
@@ -3916,9 +3912,11 @@ struct bnx2 {
3916 struct sw_bd *rx_buf_ring; 3912 struct sw_bd *rx_buf_ring;
3917 struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; 3913 struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
3918 3914
3919 /* Only used to synchronize netif_stop_queue/wake_queue when tx */ 3915 /* TX constants */
3920 /* ring is full */ 3916 struct tx_bd *tx_desc_ring;
3921 spinlock_t tx_lock; 3917 struct sw_bd *tx_buf_ring;
3918 int tx_ring_size;
3919 u32 tx_wake_thresh;
3922 3920
3923 /* End of fields used in the performance code paths. */ 3921 /* End of fields used in the performance code paths. */
3924 3922
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 0ec6e9d57b94..c872f7c6cce3 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -192,7 +192,7 @@ struct cardmap {
192 void *ptr[CARDMAP_WIDTH]; 192 void *ptr[CARDMAP_WIDTH];
193}; 193};
194static void *cardmap_get(struct cardmap *map, unsigned int nr); 194static void *cardmap_get(struct cardmap *map, unsigned int nr);
195static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); 195static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
196static unsigned int cardmap_find_first_free(struct cardmap *map); 196static unsigned int cardmap_find_first_free(struct cardmap *map);
197static void cardmap_destroy(struct cardmap **map); 197static void cardmap_destroy(struct cardmap **map);
198 198
@@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan)
1995{ 1995{
1996 struct channel *pch; 1996 struct channel *pch;
1997 1997
1998 pch = kmalloc(sizeof(struct channel), GFP_KERNEL); 1998 pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1999 if (pch == 0) 1999 if (pch == 0)
2000 return -ENOMEM; 2000 return -ENOMEM;
2001 memset(pch, 0, sizeof(struct channel));
2002 pch->ppp = NULL; 2001 pch->ppp = NULL;
2003 pch->chan = chan; 2002 pch->chan = chan;
2004 chan->ppp = pch; 2003 chan->ppp = pch;
@@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp)
2408 int ret = -ENOMEM; 2407 int ret = -ENOMEM;
2409 int i; 2408 int i;
2410 2409
2411 ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); 2410 ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
2412 if (!ppp) 2411 if (!ppp)
2413 goto out; 2412 goto out;
2414 dev = alloc_netdev(0, "", ppp_setup); 2413 dev = alloc_netdev(0, "", ppp_setup);
2415 if (!dev) 2414 if (!dev)
2416 goto out1; 2415 goto out1;
2417 memset(ppp, 0, sizeof(struct ppp));
2418 2416
2419 ppp->mru = PPP_MRU; 2417 ppp->mru = PPP_MRU;
2420 init_ppp_file(&ppp->file, INTERFACE); 2418 init_ppp_file(&ppp->file, INTERFACE);
@@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp)
2454 } 2452 }
2455 2453
2456 atomic_inc(&ppp_unit_count); 2454 atomic_inc(&ppp_unit_count);
2457 cardmap_set(&all_ppp_units, unit, ppp); 2455 ret = cardmap_set(&all_ppp_units, unit, ppp);
2456 if (ret != 0)
2457 goto out3;
2458
2458 mutex_unlock(&all_ppp_mutex); 2459 mutex_unlock(&all_ppp_mutex);
2459 *retp = 0; 2460 *retp = 0;
2460 return ppp; 2461 return ppp;
2461 2462
2463out3:
2464 atomic_dec(&ppp_unit_count);
2462out2: 2465out2:
2463 mutex_unlock(&all_ppp_mutex); 2466 mutex_unlock(&all_ppp_mutex);
2464 free_netdev(dev); 2467 free_netdev(dev);
@@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr)
2695 return NULL; 2698 return NULL;
2696} 2699}
2697 2700
2698static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) 2701static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2699{ 2702{
2700 struct cardmap *p; 2703 struct cardmap *p;
2701 int i; 2704 int i;
@@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2704 if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { 2707 if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
2705 do { 2708 do {
2706 /* need a new top level */ 2709 /* need a new top level */
2707 struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); 2710 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
2708 memset(np, 0, sizeof(*np)); 2711 if (!np)
2712 goto enomem;
2709 np->ptr[0] = p; 2713 np->ptr[0] = p;
2710 if (p != NULL) { 2714 if (p != NULL) {
2711 np->shift = p->shift + CARDMAP_ORDER; 2715 np->shift = p->shift + CARDMAP_ORDER;
@@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2719 while (p->shift > 0) { 2723 while (p->shift > 0) {
2720 i = (nr >> p->shift) & CARDMAP_MASK; 2724 i = (nr >> p->shift) & CARDMAP_MASK;
2721 if (p->ptr[i] == NULL) { 2725 if (p->ptr[i] == NULL) {
2722 struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); 2726 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
2723 memset(np, 0, sizeof(*np)); 2727 if (!np)
2728 goto enomem;
2724 np->shift = p->shift - CARDMAP_ORDER; 2729 np->shift = p->shift - CARDMAP_ORDER;
2725 np->parent = p; 2730 np->parent = p;
2726 p->ptr[i] = np; 2731 p->ptr[i] = np;
@@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2735 set_bit(i, &p->inuse); 2740 set_bit(i, &p->inuse);
2736 else 2741 else
2737 clear_bit(i, &p->inuse); 2742 clear_bit(i, &p->inuse);
2743 return 0;
2744 enomem:
2745 return -ENOMEM;
2738} 2746}
2739 2747
2740static unsigned int cardmap_find_first_free(struct cardmap *map) 2748static unsigned int cardmap_find_first_free(struct cardmap *map)
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 383627ad328f..ab2740832742 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -155,6 +155,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
155{ 155{
156 struct net_device_stats *stats; 156 struct net_device_stats *stats;
157 157
158 if (skb_bond_should_drop(skb)) {
159 dev_kfree_skb_any(skb);
160 return NET_RX_DROP;
161 }
162
158 skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK]; 163 skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
159 if (skb->dev == NULL) { 164 if (skb->dev == NULL) {
160 dev_kfree_skb_any(skb); 165 dev_kfree_skb_any(skb);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 75f02d8c6ed3..50a4719512ed 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -320,6 +320,9 @@ struct net_device
320#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) 320#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
321#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) 321#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
322 322
323 /* List of features with software fallbacks. */
324#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
325
323#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 326#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
324#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) 327#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
325 328
@@ -1012,6 +1015,30 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1012 unlikely(skb->ip_summed != CHECKSUM_HW)); 1015 unlikely(skb->ip_summed != CHECKSUM_HW));
1013} 1016}
1014 1017
1018/* On bonding slaves other than the currently active slave, suppress
1019 * duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast.
1020 */
1021static inline int skb_bond_should_drop(struct sk_buff *skb)
1022{
1023 struct net_device *dev = skb->dev;
1024 struct net_device *master = dev->master;
1025
1026 if (master &&
1027 (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
1028 if (master->priv_flags & IFF_MASTER_ALB) {
1029 if (skb->pkt_type != PACKET_BROADCAST &&
1030 skb->pkt_type != PACKET_MULTICAST)
1031 return 0;
1032 }
1033 if (master->priv_flags & IFF_MASTER_8023AD &&
1034 skb->protocol == __constant_htons(ETH_P_SLOW))
1035 return 0;
1036
1037 return 1;
1038 }
1039 return 0;
1040}
1041
1015#endif /* __KERNEL__ */ 1042#endif /* __KERNEL__ */
1016 1043
1017#endif /* _LINUX_DEV_H */ 1044#endif /* _LINUX_DEV_H */
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 3f95b0886a6a..91fe5f53ff11 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -507,7 +507,7 @@ err_out:
507 goto out; 507 goto out;
508} 508}
509 509
510void __exit atm_proc_exit(void) 510void atm_proc_exit(void)
511{ 511{
512 atm_proc_dirs_remove(); 512 atm_proc_dirs_remove();
513} 513}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f55ef682ef84..b1211d5342f6 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -386,12 +386,17 @@ void br_features_recompute(struct net_bridge *br)
386 checksum = 0; 386 checksum = 0;
387 387
388 if (feature & NETIF_F_GSO) 388 if (feature & NETIF_F_GSO)
389 feature |= NETIF_F_TSO; 389 feature |= NETIF_F_GSO_SOFTWARE;
390 feature |= NETIF_F_GSO; 390 feature |= NETIF_F_GSO;
391 391
392 features &= feature; 392 features &= feature;
393 } 393 }
394 394
395 if (!(checksum & NETIF_F_ALL_CSUM))
396 features &= ~NETIF_F_SG;
397 if (!(features & NETIF_F_SG))
398 features &= ~NETIF_F_GSO_MASK;
399
395 br->dev->features = features | checksum | NETIF_F_LLTX | 400 br->dev->features = features | checksum | NETIF_F_LLTX |
396 NETIF_F_GSO_ROBUST; 401 NETIF_F_GSO_ROBUST;
397} 402}
diff --git a/net/core/dev.c b/net/core/dev.c
index d95e2626d944..d4a1ec3bded5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -116,6 +116,7 @@
116#include <linux/audit.h> 116#include <linux/audit.h>
117#include <linux/dmaengine.h> 117#include <linux/dmaengine.h>
118#include <linux/err.h> 118#include <linux/err.h>
119#include <linux/ctype.h>
119 120
120/* 121/*
121 * The list of packet types we will receive (as opposed to discard) 122 * The list of packet types we will receive (as opposed to discard)
@@ -632,14 +633,22 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas
632 * @name: name string 633 * @name: name string
633 * 634 *
634 * Network device names need to be valid file names to 635 * Network device names need to be valid file names to
635 * to allow sysfs to work 636 * to allow sysfs to work. We also disallow any kind of
637 * whitespace.
636 */ 638 */
637int dev_valid_name(const char *name) 639int dev_valid_name(const char *name)
638{ 640{
639 return !(*name == '\0' 641 if (*name == '\0')
640 || !strcmp(name, ".") 642 return 0;
641 || !strcmp(name, "..") 643 if (!strcmp(name, ".") || !strcmp(name, ".."))
642 || strchr(name, '/')); 644 return 0;
645
646 while (*name) {
647 if (*name == '/' || isspace(*name))
648 return 0;
649 name++;
650 }
651 return 1;
643} 652}
644 653
645/** 654/**
@@ -1619,26 +1628,10 @@ static inline struct net_device *skb_bond(struct sk_buff *skb)
1619 struct net_device *dev = skb->dev; 1628 struct net_device *dev = skb->dev;
1620 1629
1621 if (dev->master) { 1630 if (dev->master) {
1622 /* 1631 if (skb_bond_should_drop(skb)) {
1623 * On bonding slaves other than the currently active
1624 * slave, suppress duplicates except for 802.3ad
1625 * ETH_P_SLOW and alb non-mcast/bcast.
1626 */
1627 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1628 if (dev->master->priv_flags & IFF_MASTER_ALB) {
1629 if (skb->pkt_type != PACKET_BROADCAST &&
1630 skb->pkt_type != PACKET_MULTICAST)
1631 goto keep;
1632 }
1633
1634 if (dev->master->priv_flags & IFF_MASTER_8023AD &&
1635 skb->protocol == __constant_htons(ETH_P_SLOW))
1636 goto keep;
1637
1638 kfree_skb(skb); 1632 kfree_skb(skb);
1639 return NULL; 1633 return NULL;
1640 } 1634 }
1641keep:
1642 skb->dev = dev->master; 1635 skb->dev = dev->master;
1643 } 1636 }
1644 1637
diff --git a/net/core/utils.c b/net/core/utils.c
index 4f96f389243d..e31c90e05594 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -130,12 +130,13 @@ void __init net_random_init(void)
130static int net_random_reseed(void) 130static int net_random_reseed(void)
131{ 131{
132 int i; 132 int i;
133 unsigned long seed[NR_CPUS]; 133 unsigned long seed;
134 134
135 get_random_bytes(seed, sizeof(seed));
136 for_each_possible_cpu(i) { 135 for_each_possible_cpu(i) {
137 struct nrnd_state *state = &per_cpu(net_rand_state,i); 136 struct nrnd_state *state = &per_cpu(net_rand_state,i);
138 __net_srandom(state, seed[i]); 137
138 get_random_bytes(&seed, sizeof(seed));
139 __net_srandom(state, seed);
139 } 140 }
140 return 0; 141 return 0;
141} 142}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 9be53a8e72c3..51738000f3dc 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -159,7 +159,7 @@ void free_fib_info(struct fib_info *fi)
159 159
160void fib_release_info(struct fib_info *fi) 160void fib_release_info(struct fib_info *fi)
161{ 161{
162 write_lock(&fib_info_lock); 162 write_lock_bh(&fib_info_lock);
163 if (fi && --fi->fib_treeref == 0) { 163 if (fi && --fi->fib_treeref == 0) {
164 hlist_del(&fi->fib_hash); 164 hlist_del(&fi->fib_hash);
165 if (fi->fib_prefsrc) 165 if (fi->fib_prefsrc)
@@ -172,7 +172,7 @@ void fib_release_info(struct fib_info *fi)
172 fi->fib_dead = 1; 172 fi->fib_dead = 1;
173 fib_info_put(fi); 173 fib_info_put(fi);
174 } 174 }
175 write_unlock(&fib_info_lock); 175 write_unlock_bh(&fib_info_lock);
176} 176}
177 177
178static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi) 178static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
@@ -598,7 +598,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
598 unsigned int old_size = fib_hash_size; 598 unsigned int old_size = fib_hash_size;
599 unsigned int i, bytes; 599 unsigned int i, bytes;
600 600
601 write_lock(&fib_info_lock); 601 write_lock_bh(&fib_info_lock);
602 old_info_hash = fib_info_hash; 602 old_info_hash = fib_info_hash;
603 old_laddrhash = fib_info_laddrhash; 603 old_laddrhash = fib_info_laddrhash;
604 fib_hash_size = new_size; 604 fib_hash_size = new_size;
@@ -639,7 +639,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
639 } 639 }
640 fib_info_laddrhash = new_laddrhash; 640 fib_info_laddrhash = new_laddrhash;
641 641
642 write_unlock(&fib_info_lock); 642 write_unlock_bh(&fib_info_lock);
643 643
644 bytes = old_size * sizeof(struct hlist_head *); 644 bytes = old_size * sizeof(struct hlist_head *);
645 fib_hash_free(old_info_hash, bytes); 645 fib_hash_free(old_info_hash, bytes);
@@ -820,7 +820,7 @@ link_it:
820 820
821 fi->fib_treeref++; 821 fi->fib_treeref++;
822 atomic_inc(&fi->fib_clntref); 822 atomic_inc(&fi->fib_clntref);
823 write_lock(&fib_info_lock); 823 write_lock_bh(&fib_info_lock);
824 hlist_add_head(&fi->fib_hash, 824 hlist_add_head(&fi->fib_hash,
825 &fib_info_hash[fib_info_hashfn(fi)]); 825 &fib_info_hash[fib_info_hashfn(fi)]);
826 if (fi->fib_prefsrc) { 826 if (fi->fib_prefsrc) {
@@ -839,7 +839,7 @@ link_it:
839 head = &fib_info_devhash[hash]; 839 head = &fib_info_devhash[hash];
840 hlist_add_head(&nh->nh_hash, head); 840 hlist_add_head(&nh->nh_hash, head);
841 } endfor_nexthops(fi) 841 } endfor_nexthops(fi)
842 write_unlock(&fib_info_lock); 842 write_unlock_bh(&fib_info_lock);
843 return fi; 843 return fi;
844 844
845err_inval: 845err_inval:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 9f4b752f5a33..8e8117c19e4d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1793,29 +1793,35 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1793 struct in_device *in_dev; 1793 struct in_device *in_dev;
1794 u32 group = imr->imr_multiaddr.s_addr; 1794 u32 group = imr->imr_multiaddr.s_addr;
1795 u32 ifindex; 1795 u32 ifindex;
1796 int ret = -EADDRNOTAVAIL;
1796 1797
1797 rtnl_lock(); 1798 rtnl_lock();
1798 in_dev = ip_mc_find_dev(imr); 1799 in_dev = ip_mc_find_dev(imr);
1799 if (!in_dev) {
1800 rtnl_unlock();
1801 return -ENODEV;
1802 }
1803 ifindex = imr->imr_ifindex; 1800 ifindex = imr->imr_ifindex;
1804 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { 1801 for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
1805 if (iml->multi.imr_multiaddr.s_addr == group && 1802 if (iml->multi.imr_multiaddr.s_addr != group)
1806 iml->multi.imr_ifindex == ifindex) { 1803 continue;
1807 (void) ip_mc_leave_src(sk, iml, in_dev); 1804 if (ifindex) {
1805 if (iml->multi.imr_ifindex != ifindex)
1806 continue;
1807 } else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
1808 iml->multi.imr_address.s_addr)
1809 continue;
1810
1811 (void) ip_mc_leave_src(sk, iml, in_dev);
1808 1812
1809 *imlp = iml->next; 1813 *imlp = iml->next;
1810 1814
1815 if (in_dev)
1811 ip_mc_dec_group(in_dev, group); 1816 ip_mc_dec_group(in_dev, group);
1812 rtnl_unlock(); 1817 rtnl_unlock();
1813 sock_kfree_s(sk, iml, sizeof(*iml)); 1818 sock_kfree_s(sk, iml, sizeof(*iml));
1814 return 0; 1819 return 0;
1815 }
1816 } 1820 }
1821 if (!in_dev)
1822 ret = -ENODEV;
1817 rtnl_unlock(); 1823 rtnl_unlock();
1818 return -EADDRNOTAVAIL; 1824 return ret;
1819} 1825}
1820 1826
1821int ip_mc_source(int add, int omode, struct sock *sk, struct 1827int ip_mc_source(int add, int omode, struct sock *sk, struct
@@ -2199,13 +2205,13 @@ void ip_mc_drop_socket(struct sock *sk)
2199 struct in_device *in_dev; 2205 struct in_device *in_dev;
2200 inet->mc_list = iml->next; 2206 inet->mc_list = iml->next;
2201 2207
2202 if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) { 2208 in_dev = inetdev_by_index(iml->multi.imr_ifindex);
2203 (void) ip_mc_leave_src(sk, iml, in_dev); 2209 (void) ip_mc_leave_src(sk, iml, in_dev);
2210 if (in_dev != NULL) {
2204 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2211 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2205 in_dev_put(in_dev); 2212 in_dev_put(in_dev);
2206 } 2213 }
2207 sock_kfree_s(sk, iml, sizeof(*iml)); 2214 sock_kfree_s(sk, iml, sizeof(*iml));
2208
2209 } 2215 }
2210 rtnl_unlock(); 2216 rtnl_unlock();
2211} 2217}
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 33891bb1fde4..0d4cc92391fa 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -415,21 +415,18 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
415 cb->args[0], *id); 415 cb->args[0], *id);
416 416
417 read_lock_bh(&ip_conntrack_lock); 417 read_lock_bh(&ip_conntrack_lock);
418 last = (struct ip_conntrack *)cb->args[1];
418 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) { 419 for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) {
419restart: 420restart:
420 last = (struct ip_conntrack *)cb->args[1];
421 list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) { 421 list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) {
422 h = (struct ip_conntrack_tuple_hash *) i; 422 h = (struct ip_conntrack_tuple_hash *) i;
423 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) 423 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
424 continue; 424 continue;
425 ct = tuplehash_to_ctrack(h); 425 ct = tuplehash_to_ctrack(h);
426 if (last != NULL) { 426 if (cb->args[1]) {
427 if (ct == last) { 427 if (ct != last)
428 ip_conntrack_put(last);
429 cb->args[1] = 0;
430 last = NULL;
431 } else
432 continue; 428 continue;
429 cb->args[1] = 0;
433 } 430 }
434 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 431 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
435 cb->nlh->nlmsg_seq, 432 cb->nlh->nlmsg_seq,
@@ -440,17 +437,17 @@ restart:
440 goto out; 437 goto out;
441 } 438 }
442 } 439 }
443 if (last != NULL) { 440 if (cb->args[1]) {
444 ip_conntrack_put(last);
445 cb->args[1] = 0; 441 cb->args[1] = 0;
446 goto restart; 442 goto restart;
447 } 443 }
448 } 444 }
449out: 445out:
450 read_unlock_bh(&ip_conntrack_lock); 446 read_unlock_bh(&ip_conntrack_lock);
447 if (last)
448 ip_conntrack_put(last);
451 449
452 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); 450 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
453
454 return skb->len; 451 return skb->len;
455} 452}
456 453
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index f316ff5fd8a6..048514f15f2f 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -230,7 +230,7 @@ ipt_do_table(struct sk_buff **pskb,
230 const char *indev, *outdev; 230 const char *indev, *outdev;
231 void *table_base; 231 void *table_base;
232 struct ipt_entry *e, *back; 232 struct ipt_entry *e, *back;
233 struct xt_table_info *private = table->private; 233 struct xt_table_info *private;
234 234
235 /* Initialization */ 235 /* Initialization */
236 ip = (*pskb)->nh.iph; 236 ip = (*pskb)->nh.iph;
@@ -247,6 +247,7 @@ ipt_do_table(struct sk_buff **pskb,
247 247
248 read_lock_bh(&table->lock); 248 read_lock_bh(&table->lock);
249 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 249 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
250 private = table->private;
250 table_base = (void *)private->entries[smp_processor_id()]; 251 table_base = (void *)private->entries[smp_processor_id()];
251 e = get_entry(table_base, private->hook_entry[hook]); 252 e = get_entry(table_base, private->hook_entry[hook]);
252 253
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 1044b6fce0d5..3d6e9a351150 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -712,6 +712,11 @@ discard_it:
712 return 0; 712 return 0;
713} 713}
714 714
715/*
716 * Special lock-class for __icmpv6_socket:
717 */
718static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
719
715int __init icmpv6_init(struct net_proto_family *ops) 720int __init icmpv6_init(struct net_proto_family *ops)
716{ 721{
717 struct sock *sk; 722 struct sock *sk;
@@ -730,6 +735,14 @@ int __init icmpv6_init(struct net_proto_family *ops)
730 735
731 sk = per_cpu(__icmpv6_socket, i)->sk; 736 sk = per_cpu(__icmpv6_socket, i)->sk;
732 sk->sk_allocation = GFP_ATOMIC; 737 sk->sk_allocation = GFP_ATOMIC;
738 /*
739 * Split off their lock-class, because sk->sk_dst_lock
740 * gets used from softirqs, which is safe for
741 * __icmpv6_socket (because those never get directly used
742 * via userspace syscalls), but unsafe for normal sockets.
743 */
744 lockdep_set_class(&sk->sk_dst_lock,
745 &icmpv6_socket_sk_dst_lock_key);
733 746
734 /* Enough space for 2 64K ICMP packets, including 747 /* Enough space for 2 64K ICMP packets, including
735 * sk_buff struct overhead. 748 * sk_buff struct overhead.
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 9d697d4dcffc..639eb20c9f1f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -268,13 +268,14 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
268 if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) { 268 if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) {
269 struct inet6_dev *idev = in6_dev_get(dev); 269 struct inet6_dev *idev = in6_dev_get(dev);
270 270
271 (void) ip6_mc_leave_src(sk, mc_lst, idev);
271 if (idev) { 272 if (idev) {
272 (void) ip6_mc_leave_src(sk,mc_lst,idev);
273 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 273 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
274 in6_dev_put(idev); 274 in6_dev_put(idev);
275 } 275 }
276 dev_put(dev); 276 dev_put(dev);
277 } 277 } else
278 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
278 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 279 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
279 return 0; 280 return 0;
280 } 281 }
@@ -334,13 +335,14 @@ void ipv6_sock_mc_close(struct sock *sk)
334 if (dev) { 335 if (dev) {
335 struct inet6_dev *idev = in6_dev_get(dev); 336 struct inet6_dev *idev = in6_dev_get(dev);
336 337
338 (void) ip6_mc_leave_src(sk, mc_lst, idev);
337 if (idev) { 339 if (idev) {
338 (void) ip6_mc_leave_src(sk, mc_lst, idev);
339 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 340 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
340 in6_dev_put(idev); 341 in6_dev_put(idev);
341 } 342 }
342 dev_put(dev); 343 dev_put(dev);
343 } 344 } else
345 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
344 346
345 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 347 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
346 348
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index af4845971f70..6527d4e048d8 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -429,9 +429,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
429 cb->args[0], *id); 429 cb->args[0], *id);
430 430
431 read_lock_bh(&nf_conntrack_lock); 431 read_lock_bh(&nf_conntrack_lock);
432 last = (struct nf_conn *)cb->args[1];
432 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 433 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
433restart: 434restart:
434 last = (struct nf_conn *)cb->args[1];
435 list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) { 435 list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) {
436 h = (struct nf_conntrack_tuple_hash *) i; 436 h = (struct nf_conntrack_tuple_hash *) i;
437 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) 437 if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
@@ -442,13 +442,10 @@ restart:
442 * then dump everything. */ 442 * then dump everything. */
443 if (l3proto && L3PROTO(ct) != l3proto) 443 if (l3proto && L3PROTO(ct) != l3proto)
444 continue; 444 continue;
445 if (last != NULL) { 445 if (cb->args[1]) {
446 if (ct == last) { 446 if (ct != last)
447 nf_ct_put(last);
448 cb->args[1] = 0;
449 last = NULL;
450 } else
451 continue; 447 continue;
448 cb->args[1] = 0;
452 } 449 }
453 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 450 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
454 cb->nlh->nlmsg_seq, 451 cb->nlh->nlmsg_seq,
@@ -459,17 +456,17 @@ restart:
459 goto out; 456 goto out;
460 } 457 }
461 } 458 }
462 if (last != NULL) { 459 if (cb->args[1]) {
463 nf_ct_put(last);
464 cb->args[1] = 0; 460 cb->args[1] = 0;
465 goto restart; 461 goto restart;
466 } 462 }
467 } 463 }
468out: 464out:
469 read_unlock_bh(&nf_conntrack_lock); 465 read_unlock_bh(&nf_conntrack_lock);
466 if (last)
467 nf_ct_put(last);
470 468
471 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id); 469 DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
472
473 return skb->len; 470 return skb->len;
474} 471}
475 472
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index a9f4f6f3c628..63a965467465 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/skbuff.h> 12#include <linux/skbuff.h>
13#include <linux/netfilter_bridge.h>
13#include <linux/netfilter/xt_physdev.h> 14#include <linux/netfilter/xt_physdev.h>
14#include <linux/netfilter/x_tables.h> 15#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter_bridge.h> 16#include <linux/netfilter_bridge.h>
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index eea366966740..0a6cfa0005be 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -796,7 +796,7 @@ static int __init init_u32(void)
796{ 796{
797 printk("u32 classifier\n"); 797 printk("u32 classifier\n");
798#ifdef CONFIG_CLS_U32_PERF 798#ifdef CONFIG_CLS_U32_PERF
799 printk(" Perfomance counters on\n"); 799 printk(" Performance counters on\n");
800#endif 800#endif
801#ifdef CONFIG_NET_CLS_POLICE 801#ifdef CONFIG_NET_CLS_POLICE
802 printk(" OLD policer on \n"); 802 printk(" OLD policer on \n");