aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-05-19 21:43:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-05-19 21:43:50 -0400
commitfbb5ba92766a0a7803635f053220c325d26def9c (patch)
tree4cf6d9ed725b5d1254d4d29f5750c37676173490
parent4fe1103201057e74f630b1cb8d8d49bd6ce0e666 (diff)
parentbc8a5397433e4effbaddfa7e462d10b3c060cabb (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: ipv4: make default for INET_LRO consistent with help text net: fix skb_seq_read returning wrong offset/length for page frag data pkt_sched: gen_estimator: use 64 bit intermediate counters for bps be2net: add two new pci device ids to pci device table sch_teql: should not dereference skb after ndo_start_xmit() tcp: fix MSG_PEEK race check Doc: fixed descriptions on /proc/sys/net/core/* and /proc/sys/net/unix/* Neterion: *FIFO1_DMA_ERR set twice, should 2nd be *FIFO2_DMA_ERR? mv643xx_eth: fix PPC DMA breakage bonding: fix link down handling in 802.3ad mode bridge: fix initial packet flood if !STP bridge: relay bridge multicast pkgs if !STP NET: Meth: Fix unsafe mix of irq and non-irq spinlocks. mlx4_en: Fix not deleted napi structures ipconfig: handle case of delayed DHCP server netpoll: don't dereference NULL dev from np wimax/i2400m: fix device crash: fix optimization in _roq_queue_update_ws
-rw-r--r--Documentation/networking/ip-sysctl.txt15
-rw-r--r--drivers/net/benet/be.h14
-rw-r--r--drivers/net/benet/be_main.c10
-rw-r--r--drivers/net/bonding/bond_3ad.c11
-rw-r--r--drivers/net/meth.c27
-rw-r--r--drivers/net/mlx4/en_cq.c4
-rw-r--r--drivers/net/mv643xx_eth.c41
-rw-r--r--drivers/net/vxge/vxge-traffic.c2
-rw-r--r--drivers/net/wimax/i2400m/rx.c5
-rw-r--r--net/bridge/br_input.c5
-rw-r--r--net/bridge/br_stp.c3
-rw-r--r--net/core/gen_estimator.c13
-rw-r--r--net/core/netpoll.c8
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/ipconfig.c12
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/sched/sch_teql.c5
18 files changed, 123 insertions, 61 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ec5de02f543f..b121c5db707f 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1266,13 +1266,22 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
1266sctp_wmem - vector of 3 INTEGERs: min, default, max 1266sctp_wmem - vector of 3 INTEGERs: min, default, max
1267 See tcp_wmem for a description. 1267 See tcp_wmem for a description.
1268 1268
1269UNDOCUMENTED:
1270 1269
1271/proc/sys/net/core/* 1270/proc/sys/net/core/*
1272 dev_weight FIXME 1271dev_weight - INTEGER
1272 The maximum number of packets that kernel can handle on a NAPI
1273 interrupt, it's a Per-CPU variable.
1274
1275 Default: 64
1273 1276
1274/proc/sys/net/unix/* 1277/proc/sys/net/unix/*
1275 max_dgram_qlen FIXME 1278max_dgram_qlen - INTEGER
1279 The maximum length of dgram socket receive queue
1280
1281 Default: 10
1282
1283
1284UNDOCUMENTED:
1276 1285
1277/proc/sys/net/irda/* 1286/proc/sys/net/irda/*
1278 fast_poll_increase FIXME 1287 fast_poll_increase FIXME
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index c49ddd08b2aa..b4bb06fdf307 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -35,8 +35,22 @@
35#define DRV_VER "2.0.348" 35#define DRV_VER "2.0.348"
36#define DRV_NAME "be2net" 36#define DRV_NAME "be2net"
37#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 37#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
38#define OC_NAME "Emulex OneConnect 10Gbps NIC"
38#define DRV_DESC BE_NAME "Driver" 39#define DRV_DESC BE_NAME "Driver"
39 40
41#define BE_VENDOR_ID 0x19a2
42#define BE_DEVICE_ID1 0x211
43#define OC_DEVICE_ID1 0x700
44#define OC_DEVICE_ID2 0x701
45
46static inline char *nic_name(struct pci_dev *pdev)
47{
48 if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2)
49 return OC_NAME;
50 else
51 return BE_NAME;
52}
53
40/* Number of bytes of an RX frame that are copied to skb->data */ 54/* Number of bytes of an RX frame that are copied to skb->data */
41#define BE_HDR_LEN 64 55#define BE_HDR_LEN 64
42#define BE_MAX_JUMBO_FRAME_SIZE 9018 56#define BE_MAX_JUMBO_FRAME_SIZE 9018
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 30d0c81c989e..5c378b5e8e41 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -28,10 +28,10 @@ static unsigned int rx_frag_size = 2048;
28module_param(rx_frag_size, uint, S_IRUGO); 28module_param(rx_frag_size, uint, S_IRUGO);
29MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); 29MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
30 30
31#define BE_VENDOR_ID 0x19a2
32#define BE2_DEVICE_ID_1 0x0211
33static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { 31static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
34 { PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) }, 32 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
33 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
34 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
35 { 0 } 35 { 0 }
36}; 36};
37MODULE_DEVICE_TABLE(pci, be_dev_ids); 37MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -1859,7 +1859,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
1859 if (status != 0) 1859 if (status != 0)
1860 goto stats_clean; 1860 goto stats_clean;
1861 1861
1862 dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num); 1862 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
1863 return 0; 1863 return 0;
1864 1864
1865stats_clean: 1865stats_clean:
@@ -1873,7 +1873,7 @@ rel_reg:
1873disable_dev: 1873disable_dev:
1874 pci_disable_device(pdev); 1874 pci_disable_device(pdev);
1875do_none: 1875do_none:
1876 dev_warn(&pdev->dev, BE_NAME " initialization failed\n"); 1876 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
1877 return status; 1877 return status;
1878} 1878}
1879 1879
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 8c2e5ab51f08..faf094abef7f 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1465,6 +1465,12 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
1465 return best; 1465 return best;
1466} 1466}
1467 1467
1468static int agg_device_up(const struct aggregator *agg)
1469{
1470 return (netif_running(agg->slave->dev) &&
1471 netif_carrier_ok(agg->slave->dev));
1472}
1473
1468/** 1474/**
1469 * ad_agg_selection_logic - select an aggregation group for a team 1475 * ad_agg_selection_logic - select an aggregation group for a team
1470 * @aggregator: the aggregator we're looking at 1476 * @aggregator: the aggregator we're looking at
@@ -1496,14 +1502,13 @@ static void ad_agg_selection_logic(struct aggregator *agg)
1496 struct port *port; 1502 struct port *port;
1497 1503
1498 origin = agg; 1504 origin = agg;
1499
1500 active = __get_active_agg(agg); 1505 active = __get_active_agg(agg);
1501 best = active; 1506 best = (active && agg_device_up(active)) ? active : NULL;
1502 1507
1503 do { 1508 do {
1504 agg->is_active = 0; 1509 agg->is_active = 0;
1505 1510
1506 if (agg->num_of_ports) 1511 if (agg->num_of_ports && agg_device_up(agg))
1507 best = ad_agg_selection_test(best, agg); 1512 best = ad_agg_selection_test(best, agg);
1508 1513
1509 } while ((agg = __get_next_agg(agg))); 1514 } while ((agg = __get_next_agg(agg)));
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index aa08987f6e81..dbd3436912b8 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -127,11 +127,11 @@ static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg)
127static int mdio_probe(struct meth_private *priv) 127static int mdio_probe(struct meth_private *priv)
128{ 128{
129 int i; 129 int i;
130 unsigned long p2, p3; 130 unsigned long p2, p3, flags;
131 /* check if phy is detected already */ 131 /* check if phy is detected already */
132 if(priv->phy_addr>=0&&priv->phy_addr<32) 132 if(priv->phy_addr>=0&&priv->phy_addr<32)
133 return 0; 133 return 0;
134 spin_lock(&priv->meth_lock); 134 spin_lock_irqsave(&priv->meth_lock, flags);
135 for (i=0;i<32;++i){ 135 for (i=0;i<32;++i){
136 priv->phy_addr=i; 136 priv->phy_addr=i;
137 p2=mdio_read(priv,2); 137 p2=mdio_read(priv,2);
@@ -157,7 +157,7 @@ static int mdio_probe(struct meth_private *priv)
157 break; 157 break;
158 } 158 }
159 } 159 }
160 spin_unlock(&priv->meth_lock); 160 spin_unlock_irqrestore(&priv->meth_lock, flags);
161 if(priv->phy_addr<32) { 161 if(priv->phy_addr<32) {
162 return 0; 162 return 0;
163 } 163 }
@@ -373,14 +373,14 @@ static int meth_release(struct net_device *dev)
373static void meth_rx(struct net_device* dev, unsigned long int_status) 373static void meth_rx(struct net_device* dev, unsigned long int_status)
374{ 374{
375 struct sk_buff *skb; 375 struct sk_buff *skb;
376 unsigned long status; 376 unsigned long status, flags;
377 struct meth_private *priv = netdev_priv(dev); 377 struct meth_private *priv = netdev_priv(dev);
378 unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8; 378 unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
379 379
380 spin_lock(&priv->meth_lock); 380 spin_lock_irqsave(&priv->meth_lock, flags);
381 priv->dma_ctrl &= ~METH_DMA_RX_INT_EN; 381 priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
382 mace->eth.dma_ctrl = priv->dma_ctrl; 382 mace->eth.dma_ctrl = priv->dma_ctrl;
383 spin_unlock(&priv->meth_lock); 383 spin_unlock_irqrestore(&priv->meth_lock, flags);
384 384
385 if (int_status & METH_INT_RX_UNDERFLOW) { 385 if (int_status & METH_INT_RX_UNDERFLOW) {
386 fifo_rptr = (fifo_rptr - 1) & 0x0f; 386 fifo_rptr = (fifo_rptr - 1) & 0x0f;
@@ -452,12 +452,12 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
452 mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; 452 mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
453 ADVANCE_RX_PTR(priv->rx_write); 453 ADVANCE_RX_PTR(priv->rx_write);
454 } 454 }
455 spin_lock(&priv->meth_lock); 455 spin_lock_irqsave(&priv->meth_lock, flags);
456 /* In case there was underflow, and Rx DMA was disabled */ 456 /* In case there was underflow, and Rx DMA was disabled */
457 priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN; 457 priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN;
458 mace->eth.dma_ctrl = priv->dma_ctrl; 458 mace->eth.dma_ctrl = priv->dma_ctrl;
459 mace->eth.int_stat = METH_INT_RX_THRESHOLD; 459 mace->eth.int_stat = METH_INT_RX_THRESHOLD;
460 spin_unlock(&priv->meth_lock); 460 spin_unlock_irqrestore(&priv->meth_lock, flags);
461} 461}
462 462
463static int meth_tx_full(struct net_device *dev) 463static int meth_tx_full(struct net_device *dev)
@@ -470,11 +470,11 @@ static int meth_tx_full(struct net_device *dev)
470static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) 470static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
471{ 471{
472 struct meth_private *priv = netdev_priv(dev); 472 struct meth_private *priv = netdev_priv(dev);
473 unsigned long status; 473 unsigned long status, flags;
474 struct sk_buff *skb; 474 struct sk_buff *skb;
475 unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16; 475 unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
476 476
477 spin_lock(&priv->meth_lock); 477 spin_lock_irqsave(&priv->meth_lock, flags);
478 478
479 /* Stop DMA notification */ 479 /* Stop DMA notification */
480 priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); 480 priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
@@ -527,12 +527,13 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
527 } 527 }
528 528
529 mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT; 529 mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
530 spin_unlock(&priv->meth_lock); 530 spin_unlock_irqrestore(&priv->meth_lock, flags);
531} 531}
532 532
533static void meth_error(struct net_device* dev, unsigned status) 533static void meth_error(struct net_device* dev, unsigned status)
534{ 534{
535 struct meth_private *priv = netdev_priv(dev); 535 struct meth_private *priv = netdev_priv(dev);
536 unsigned long flags;
536 537
537 printk(KERN_WARNING "meth: error status: 0x%08x\n",status); 538 printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
538 /* check for errors too... */ 539 /* check for errors too... */
@@ -547,7 +548,7 @@ static void meth_error(struct net_device* dev, unsigned status)
547 printk(KERN_WARNING "meth: Rx overflow\n"); 548 printk(KERN_WARNING "meth: Rx overflow\n");
548 if (status & (METH_INT_RX_UNDERFLOW)) { 549 if (status & (METH_INT_RX_UNDERFLOW)) {
549 printk(KERN_WARNING "meth: Rx underflow\n"); 550 printk(KERN_WARNING "meth: Rx underflow\n");
550 spin_lock(&priv->meth_lock); 551 spin_lock_irqsave(&priv->meth_lock, flags);
551 mace->eth.int_stat = METH_INT_RX_UNDERFLOW; 552 mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
552 /* more underflow interrupts will be delivered, 553 /* more underflow interrupts will be delivered,
553 * effectively throwing us into an infinite loop. 554 * effectively throwing us into an infinite loop.
@@ -555,7 +556,7 @@ static void meth_error(struct net_device* dev, unsigned status)
555 priv->dma_ctrl &= ~METH_DMA_RX_EN; 556 priv->dma_ctrl &= ~METH_DMA_RX_EN;
556 mace->eth.dma_ctrl = priv->dma_ctrl; 557 mace->eth.dma_ctrl = priv->dma_ctrl;
557 DPRINTK("Disabled meth Rx DMA temporarily\n"); 558 DPRINTK("Disabled meth Rx DMA temporarily\n");
558 spin_unlock(&priv->meth_lock); 559 spin_unlock_irqrestore(&priv->meth_lock, flags);
559 } 560 }
560 mace->eth.int_stat = METH_INT_ERROR; 561 mace->eth.int_stat = METH_INT_ERROR;
561} 562}
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 91f50de84be9..a276125b709b 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -125,8 +125,10 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
125 125
126 if (cq->is_tx) 126 if (cq->is_tx)
127 del_timer(&cq->timer); 127 del_timer(&cq->timer);
128 else 128 else {
129 napi_disable(&cq->napi); 129 napi_disable(&cq->napi);
130 netif_napi_del(&cq->napi);
131 }
130 132
131 mlx4_cq_free(mdev->dev, &cq->mcq); 133 mlx4_cq_free(mdev->dev, &cq->mcq);
132} 134}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a400d7115f78..6bb5af35eda6 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -569,7 +569,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
569 if (rxq->rx_curr_desc == rxq->rx_ring_size) 569 if (rxq->rx_curr_desc == rxq->rx_ring_size)
570 rxq->rx_curr_desc = 0; 570 rxq->rx_curr_desc = 0;
571 571
572 dma_unmap_single(NULL, rx_desc->buf_ptr, 572 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
573 rx_desc->buf_size, DMA_FROM_DEVICE); 573 rx_desc->buf_size, DMA_FROM_DEVICE);
574 rxq->rx_desc_count--; 574 rxq->rx_desc_count--;
575 rx++; 575 rx++;
@@ -678,8 +678,9 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
678 678
679 rx_desc = rxq->rx_desc_area + rx; 679 rx_desc = rxq->rx_desc_area + rx;
680 680
681 rx_desc->buf_ptr = dma_map_single(NULL, skb->data, 681 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
682 mp->skb_size, DMA_FROM_DEVICE); 682 skb->data, mp->skb_size,
683 DMA_FROM_DEVICE);
683 rx_desc->buf_size = mp->skb_size; 684 rx_desc->buf_size = mp->skb_size;
684 rxq->rx_skb[rx] = skb; 685 rxq->rx_skb[rx] = skb;
685 wmb(); 686 wmb();
@@ -718,6 +719,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
718 719
719static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 720static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
720{ 721{
722 struct mv643xx_eth_private *mp = txq_to_mp(txq);
721 int nr_frags = skb_shinfo(skb)->nr_frags; 723 int nr_frags = skb_shinfo(skb)->nr_frags;
722 int frag; 724 int frag;
723 725
@@ -746,10 +748,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
746 748
747 desc->l4i_chk = 0; 749 desc->l4i_chk = 0;
748 desc->byte_cnt = this_frag->size; 750 desc->byte_cnt = this_frag->size;
749 desc->buf_ptr = dma_map_page(NULL, this_frag->page, 751 desc->buf_ptr = dma_map_page(mp->dev->dev.parent,
750 this_frag->page_offset, 752 this_frag->page,
751 this_frag->size, 753 this_frag->page_offset,
752 DMA_TO_DEVICE); 754 this_frag->size, DMA_TO_DEVICE);
753 } 755 }
754} 756}
755 757
@@ -826,7 +828,8 @@ no_csum:
826 828
827 desc->l4i_chk = l4i_chk; 829 desc->l4i_chk = l4i_chk;
828 desc->byte_cnt = length; 830 desc->byte_cnt = length;
829 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); 831 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
832 length, DMA_TO_DEVICE);
830 833
831 __skb_queue_tail(&txq->tx_skb, skb); 834 __skb_queue_tail(&txq->tx_skb, skb);
832 835
@@ -956,10 +959,10 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
956 } 959 }
957 960
958 if (cmd_sts & TX_FIRST_DESC) { 961 if (cmd_sts & TX_FIRST_DESC) {
959 dma_unmap_single(NULL, desc->buf_ptr, 962 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
960 desc->byte_cnt, DMA_TO_DEVICE); 963 desc->byte_cnt, DMA_TO_DEVICE);
961 } else { 964 } else {
962 dma_unmap_page(NULL, desc->buf_ptr, 965 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
963 desc->byte_cnt, DMA_TO_DEVICE); 966 desc->byte_cnt, DMA_TO_DEVICE);
964 } 967 }
965 968
@@ -1894,9 +1897,9 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1894 mp->rx_desc_sram_size); 1897 mp->rx_desc_sram_size);
1895 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1898 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1896 } else { 1899 } else {
1897 rxq->rx_desc_area = dma_alloc_coherent(NULL, size, 1900 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1898 &rxq->rx_desc_dma, 1901 size, &rxq->rx_desc_dma,
1899 GFP_KERNEL); 1902 GFP_KERNEL);
1900 } 1903 }
1901 1904
1902 if (rxq->rx_desc_area == NULL) { 1905 if (rxq->rx_desc_area == NULL) {
@@ -1947,7 +1950,7 @@ out_free:
1947 if (index == 0 && size <= mp->rx_desc_sram_size) 1950 if (index == 0 && size <= mp->rx_desc_sram_size)
1948 iounmap(rxq->rx_desc_area); 1951 iounmap(rxq->rx_desc_area);
1949 else 1952 else
1950 dma_free_coherent(NULL, size, 1953 dma_free_coherent(mp->dev->dev.parent, size,
1951 rxq->rx_desc_area, 1954 rxq->rx_desc_area,
1952 rxq->rx_desc_dma); 1955 rxq->rx_desc_dma);
1953 1956
@@ -1979,7 +1982,7 @@ static void rxq_deinit(struct rx_queue *rxq)
1979 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1982 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1980 iounmap(rxq->rx_desc_area); 1983 iounmap(rxq->rx_desc_area);
1981 else 1984 else
1982 dma_free_coherent(NULL, rxq->rx_desc_area_size, 1985 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
1983 rxq->rx_desc_area, rxq->rx_desc_dma); 1986 rxq->rx_desc_area, rxq->rx_desc_dma);
1984 1987
1985 kfree(rxq->rx_skb); 1988 kfree(rxq->rx_skb);
@@ -2007,9 +2010,9 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
2007 mp->tx_desc_sram_size); 2010 mp->tx_desc_sram_size);
2008 txq->tx_desc_dma = mp->tx_desc_sram_addr; 2011 txq->tx_desc_dma = mp->tx_desc_sram_addr;
2009 } else { 2012 } else {
2010 txq->tx_desc_area = dma_alloc_coherent(NULL, size, 2013 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
2011 &txq->tx_desc_dma, 2014 size, &txq->tx_desc_dma,
2012 GFP_KERNEL); 2015 GFP_KERNEL);
2013 } 2016 }
2014 2017
2015 if (txq->tx_desc_area == NULL) { 2018 if (txq->tx_desc_area == NULL) {
@@ -2053,7 +2056,7 @@ static void txq_deinit(struct tx_queue *txq)
2053 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 2056 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2054 iounmap(txq->tx_desc_area); 2057 iounmap(txq->tx_desc_area);
2055 else 2058 else
2056 dma_free_coherent(NULL, txq->tx_desc_area_size, 2059 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2057 txq->tx_desc_area, txq->tx_desc_dma); 2060 txq->tx_desc_area, txq->tx_desc_dma);
2058} 2061}
2059 2062
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 7be0ae10d69b..c2eeac4125f3 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -115,7 +115,7 @@ enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON| 115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON| 116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR| 117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32), 118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119 &vp_reg->kdfcctl_errors_mask); 119 &vp_reg->kdfcctl_errors_mask);
120 120
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask); 121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 02419bfd64b5..f9fc38902322 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -819,10 +819,9 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
819 roq_data = (struct i2400m_roq_data *) &skb->cb; 819 roq_data = (struct i2400m_roq_data *) &skb->cb;
820 i2400m_net_erx(i2400m, skb, roq_data->cs); 820 i2400m_net_erx(i2400m, skb, roq_data->cs);
821 } 821 }
822 else { 822 else
823 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn); 823 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
824 __i2400m_roq_update_ws(i2400m, roq, sn + 1); 824 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
825 }
826 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS, 825 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
827 old_ws, len, sn, nsn, roq->ws); 826 old_ws, len, sn, nsn, roq->ws);
828 } 827 }
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 30b88777c3df..5ee1a3682bf2 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -134,6 +134,10 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
134 if (skb->protocol == htons(ETH_P_PAUSE)) 134 if (skb->protocol == htons(ETH_P_PAUSE))
135 goto drop; 135 goto drop;
136 136
137 /* If STP is turned off, then forward */
138 if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
139 goto forward;
140
137 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, 141 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
138 NULL, br_handle_local_finish)) 142 NULL, br_handle_local_finish))
139 return NULL; /* frame consumed by filter */ 143 return NULL; /* frame consumed by filter */
@@ -141,6 +145,7 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
141 return skb; /* continue processing */ 145 return skb; /* continue processing */
142 } 146 }
143 147
148forward:
144 switch (p->state) { 149 switch (p->state) {
145 case BR_STATE_FORWARDING: 150 case BR_STATE_FORWARDING:
146 rhook = rcu_dereference(br_should_route_hook); 151 rhook = rcu_dereference(br_should_route_hook);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 6e63ec3f1fcf..0660515f3992 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -297,6 +297,9 @@ void br_topology_change_detection(struct net_bridge *br)
297{ 297{
298 int isroot = br_is_root_bridge(br); 298 int isroot = br_is_root_bridge(br);
299 299
300 if (br->stp_enabled != BR_KERNEL_STP)
301 return;
302
300 pr_info("%s: topology change detected, %s\n", br->dev->name, 303 pr_info("%s: topology change detected, %s\n", br->dev->name,
301 isroot ? "propagating" : "sending tcn bpdu"); 304 isroot ? "propagating" : "sending tcn bpdu");
302 305
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9cc9f95b109e..6d62d4618cfc 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,9 +66,9 @@
66 66
67 NOTES. 67 NOTES.
68 68
69 * The stored value for avbps is scaled by 2^5, so that maximal 69 * avbps is scaled by 2^5, avpps is scaled by 2^10.
70 rate is ~1Gbit, avpps is scaled by 2^10. 70 * both values are reported as 32 bit unsigned values. bps can
71 71 overflow for fast links : max speed being 34360Mbit/sec
72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor 72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
73 for HZ=100 and HZ=1024 8)), maximal interval 73 for HZ=100 and HZ=1024 8)), maximal interval
74 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals 74 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
@@ -86,9 +86,9 @@ struct gen_estimator
86 spinlock_t *stats_lock; 86 spinlock_t *stats_lock;
87 int ewma_log; 87 int ewma_log;
88 u64 last_bytes; 88 u64 last_bytes;
89 u64 avbps;
89 u32 last_packets; 90 u32 last_packets;
90 u32 avpps; 91 u32 avpps;
91 u32 avbps;
92 struct rcu_head e_rcu; 92 struct rcu_head e_rcu;
93 struct rb_node node; 93 struct rb_node node;
94}; 94};
@@ -115,6 +115,7 @@ static void est_timer(unsigned long arg)
115 rcu_read_lock(); 115 rcu_read_lock();
116 list_for_each_entry_rcu(e, &elist[idx].list, list) { 116 list_for_each_entry_rcu(e, &elist[idx].list, list) {
117 u64 nbytes; 117 u64 nbytes;
118 u64 brate;
118 u32 npackets; 119 u32 npackets;
119 u32 rate; 120 u32 rate;
120 121
@@ -125,9 +126,9 @@ static void est_timer(unsigned long arg)
125 126
126 nbytes = e->bstats->bytes; 127 nbytes = e->bstats->bytes;
127 npackets = e->bstats->packets; 128 npackets = e->bstats->packets;
128 rate = (nbytes - e->last_bytes)<<(7 - idx); 129 brate = (nbytes - e->last_bytes)<<(7 - idx);
129 e->last_bytes = nbytes; 130 e->last_bytes = nbytes;
130 e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log; 131 e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log;
131 e->rate_est->bps = (e->avbps+0xF)>>5; 132 e->rate_est->bps = (e->avbps+0xF)>>5;
132 133
133 rate = (npackets - e->last_packets)<<(12 - idx); 134 rate = (npackets - e->last_packets)<<(12 - idx);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b5873bdff612..64f51eec6576 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -175,9 +175,13 @@ static void service_arp_queue(struct netpoll_info *npi)
175void netpoll_poll(struct netpoll *np) 175void netpoll_poll(struct netpoll *np)
176{ 176{
177 struct net_device *dev = np->dev; 177 struct net_device *dev = np->dev;
178 const struct net_device_ops *ops = dev->netdev_ops; 178 const struct net_device_ops *ops;
179
180 if (!dev || !netif_running(dev))
181 return;
179 182
180 if (!dev || !netif_running(dev) || !ops->ndo_poll_controller) 183 ops = dev->netdev_ops;
184 if (!ops->ndo_poll_controller)
181 return; 185 return;
182 186
183 /* Process pending work on NIC */ 187 /* Process pending work on NIC */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d152394b2611..e505b5392e1e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2288,7 +2288,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2288next_skb: 2288next_skb:
2289 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2289 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2290 2290
2291 if (abs_offset < block_limit) { 2291 if (abs_offset < block_limit && !st->frag_data) {
2292 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2292 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2293 return block_limit - abs_offset; 2293 return block_limit - abs_offset;
2294 } 2294 }
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9d26a3da37e5..5b919f7b45db 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -408,7 +408,7 @@ config INET_XFRM_MODE_BEET
408 408
409config INET_LRO 409config INET_LRO
410 bool "Large Receive Offload (ipv4/tcp)" 410 bool "Large Receive Offload (ipv4/tcp)"
411 411 default y
412 ---help--- 412 ---help---
413 Support for Large Receive Offload (ipv4/tcp). 413 Support for Large Receive Offload (ipv4/tcp).
414 414
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 90d22ae0a419..88bf051d0cbb 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -139,6 +139,8 @@ __be32 ic_servaddr = NONE; /* Boot server IP address */
139__be32 root_server_addr = NONE; /* Address of NFS server */ 139__be32 root_server_addr = NONE; /* Address of NFS server */
140u8 root_server_path[256] = { 0, }; /* Path to mount as root */ 140u8 root_server_path[256] = { 0, }; /* Path to mount as root */
141 141
142u32 ic_dev_xid; /* Device under configuration */
143
142/* vendor class identifier */ 144/* vendor class identifier */
143static char vendor_class_identifier[253] __initdata; 145static char vendor_class_identifier[253] __initdata;
144 146
@@ -932,6 +934,13 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
932 goto drop_unlock; 934 goto drop_unlock;
933 } 935 }
934 936
937 /* Is it a reply for the device we are configuring? */
938 if (b->xid != ic_dev_xid) {
939 if (net_ratelimit())
940 printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n");
941 goto drop_unlock;
942 }
943
935 /* Parse extensions */ 944 /* Parse extensions */
936 if (ext_len >= 4 && 945 if (ext_len >= 4 &&
937 !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */ 946 !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */
@@ -1115,6 +1124,9 @@ static int __init ic_dynamic(void)
1115 get_random_bytes(&timeout, sizeof(timeout)); 1124 get_random_bytes(&timeout, sizeof(timeout));
1116 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM); 1125 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
1117 for (;;) { 1126 for (;;) {
1127 /* Track the device we are configuring */
1128 ic_dev_xid = d->xid;
1129
1118#ifdef IPCONFIG_BOOTP 1130#ifdef IPCONFIG_BOOTP
1119 if (do_bootp && (d->able & IC_BOOTP)) 1131 if (do_bootp && (d->able & IC_BOOTP))
1120 ic_bootp_send_if(d, jiffies - start_jiffies); 1132 ic_bootp_send_if(d, jiffies - start_jiffies);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1d7f49c6f0ca..7a0f0b27bf1f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1321,6 +1321,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1321 struct task_struct *user_recv = NULL; 1321 struct task_struct *user_recv = NULL;
1322 int copied_early = 0; 1322 int copied_early = 0;
1323 struct sk_buff *skb; 1323 struct sk_buff *skb;
1324 u32 urg_hole = 0;
1324 1325
1325 lock_sock(sk); 1326 lock_sock(sk);
1326 1327
@@ -1532,7 +1533,8 @@ do_prequeue:
1532 } 1533 }
1533 } 1534 }
1534 } 1535 }
1535 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) { 1536 if ((flags & MSG_PEEK) &&
1537 (peek_seq - copied - urg_hole != tp->copied_seq)) {
1536 if (net_ratelimit()) 1538 if (net_ratelimit())
1537 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", 1539 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1538 current->comm, task_pid_nr(current)); 1540 current->comm, task_pid_nr(current));
@@ -1553,6 +1555,7 @@ do_prequeue:
1553 if (!urg_offset) { 1555 if (!urg_offset) {
1554 if (!sock_flag(sk, SOCK_URGINLINE)) { 1556 if (!sock_flag(sk, SOCK_URGINLINE)) {
1555 ++*seq; 1557 ++*seq;
1558 urg_hole++;
1556 offset++; 1559 offset++;
1557 used--; 1560 used--;
1558 if (!used) 1561 if (!used)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index ec697cebb63b..3b6418297231 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -303,6 +303,8 @@ restart:
303 switch (teql_resolve(skb, skb_res, slave)) { 303 switch (teql_resolve(skb, skb_res, slave)) {
304 case 0: 304 case 0:
305 if (__netif_tx_trylock(slave_txq)) { 305 if (__netif_tx_trylock(slave_txq)) {
306 unsigned int length = qdisc_pkt_len(skb);
307
306 if (!netif_tx_queue_stopped(slave_txq) && 308 if (!netif_tx_queue_stopped(slave_txq) &&
307 !netif_tx_queue_frozen(slave_txq) && 309 !netif_tx_queue_frozen(slave_txq) &&
308 slave_ops->ndo_start_xmit(skb, slave) == 0) { 310 slave_ops->ndo_start_xmit(skb, slave) == 0) {
@@ -310,8 +312,7 @@ restart:
310 master->slaves = NEXT_SLAVE(q); 312 master->slaves = NEXT_SLAVE(q);
311 netif_wake_queue(dev); 313 netif_wake_queue(dev);
312 master->stats.tx_packets++; 314 master->stats.tx_packets++;
313 master->stats.tx_bytes += 315 master->stats.tx_bytes += length;
314 qdisc_pkt_len(skb);
315 return 0; 316 return 0;
316 } 317 }
317 __netif_tx_unlock(slave_txq); 318 __netif_tx_unlock(slave_txq);