aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-09-22 03:23:13 -0400
committerDavid S. Miller <davem@davemloft.net>2011-09-22 03:23:13 -0400
commit8decf868790b48a727d7e7ca164f2bcd3c1389c0 (patch)
treeb759a5f861f842af7ea76f9011b579d06e9d5508 /drivers/net
parent3fc72370186be2f9d4d6ef06d99e1caa5d92c564 (diff)
parentd93dc5c4478c1fd5de85a3e8aece9aad7bbae044 (diff)
Merge branch 'master' of github.com:davem330/net
Conflicts: MAINTAINERS drivers/net/Kconfig drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c drivers/net/ethernet/broadcom/tg3.c drivers/net/wireless/iwlwifi/iwl-pci.c drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c drivers/net/wireless/rt2x00/rt2800usb.c drivers/net/wireless/wl12xx/main.c
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h124
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c27
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c60
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig11
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c302
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c18
-rw-r--r--drivers/net/ethernet/sfc/io.h6
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c46
-rw-r--r--drivers/net/ethernet/sfc/nic.c7
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/siena.c25
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h2
-rw-r--r--drivers/net/ethernet/sun/cassini.c3
-rw-r--r--drivers/net/netconsole.c8
-rw-r--r--drivers/net/phy/national.c17
-rw-r--r--drivers/net/ppp/ppp_generic.c7
-rw-r--r--drivers/net/rionet.c23
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c10
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c21
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c39
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c31
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c11
-rw-r--r--drivers/net/wireless/wl12xx/acx.c6
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c50
55 files changed, 807 insertions, 525 deletions
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f7bbde9eb2c..2adc294f512 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -46,6 +46,7 @@
46#include <linux/skbuff.h> 46#include <linux/skbuff.h>
47#include <linux/platform_device.h> 47#include <linux/platform_device.h>
48#include <linux/clk.h> 48#include <linux/clk.h>
49#include <linux/io.h>
49 50
50#include <linux/can/dev.h> 51#include <linux/can/dev.h>
51#include <linux/can/error.h> 52#include <linux/can/error.h>
@@ -503,9 +504,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
503 spin_unlock_irqrestore(&priv->mbx_lock, flags); 504 spin_unlock_irqrestore(&priv->mbx_lock, flags);
504 505
505 /* Prepare mailbox for transmission */ 506 /* Prepare mailbox for transmission */
507 data = cf->can_dlc | (get_tx_head_prio(priv) << 8);
506 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ 508 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
507 data |= HECC_CANMCF_RTR; 509 data |= HECC_CANMCF_RTR;
508 data |= get_tx_head_prio(priv) << 8;
509 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); 510 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
510 511
511 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ 512 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
@@ -923,6 +924,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
923 priv->can.do_get_state = ti_hecc_get_state; 924 priv->can.do_get_state = ti_hecc_get_state;
924 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 925 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
925 926
927 spin_lock_init(&priv->mbx_lock);
926 ndev->irq = irq->start; 928 ndev->irq = irq->start;
927 ndev->flags |= IFF_ECHO; 929 ndev->flags |= IFF_ECHO;
928 platform_set_drvdata(pdev, ndev); 930 platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index bc3bd34c43f..6715bf54f04 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -427,6 +427,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
427 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); 427 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
428 428
429 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); 429 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
430 greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
430 431
431 /* Wrap around descriptor ring */ 432 /* Wrap around descriptor ring */
432 if (greth->tx_next == GRETH_TXBD_NUM_MASK) { 433 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -489,7 +490,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
489 if (nr_frags != 0) 490 if (nr_frags != 0)
490 status = GRETH_TXBD_MORE; 491 status = GRETH_TXBD_MORE;
491 492
492 status |= GRETH_TXBD_CSALL; 493 if (skb->ip_summed == CHECKSUM_PARTIAL)
494 status |= GRETH_TXBD_CSALL;
493 status |= skb_headlen(skb) & GRETH_BD_LEN; 495 status |= skb_headlen(skb) & GRETH_BD_LEN;
494 if (greth->tx_next == GRETH_TXBD_NUM_MASK) 496 if (greth->tx_next == GRETH_TXBD_NUM_MASK)
495 status |= GRETH_BD_WR; 497 status |= GRETH_BD_WR;
@@ -512,7 +514,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
512 greth->tx_skbuff[curr_tx] = NULL; 514 greth->tx_skbuff[curr_tx] = NULL;
513 bdp = greth->tx_bd_base + curr_tx; 515 bdp = greth->tx_bd_base + curr_tx;
514 516
515 status = GRETH_TXBD_CSALL | GRETH_BD_EN; 517 status = GRETH_BD_EN;
518 if (skb->ip_summed == CHECKSUM_PARTIAL)
519 status |= GRETH_TXBD_CSALL;
516 status |= frag->size & GRETH_BD_LEN; 520 status |= frag->size & GRETH_BD_LEN;
517 521
518 /* Wrap around descriptor ring */ 522 /* Wrap around descriptor ring */
@@ -637,6 +641,7 @@ static void greth_clean_tx(struct net_device *dev)
637 dev->stats.tx_fifo_errors++; 641 dev->stats.tx_fifo_errors++;
638 } 642 }
639 dev->stats.tx_packets++; 643 dev->stats.tx_packets++;
644 dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
640 greth->tx_last = NEXT_TX(greth->tx_last); 645 greth->tx_last = NEXT_TX(greth->tx_last);
641 greth->tx_free++; 646 greth->tx_free++;
642 } 647 }
@@ -691,6 +696,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
691 greth->tx_skbuff[greth->tx_last] = NULL; 696 greth->tx_skbuff[greth->tx_last] = NULL;
692 697
693 greth_update_tx_stats(dev, stat); 698 greth_update_tx_stats(dev, stat);
699 dev->stats.tx_bytes += skb->len;
694 700
695 bdp = greth->tx_bd_base + greth->tx_last; 701 bdp = greth->tx_bd_base + greth->tx_last;
696 702
@@ -792,6 +798,7 @@ static int greth_rx(struct net_device *dev, int limit)
792 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); 798 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
793 799
794 skb->protocol = eth_type_trans(skb, dev); 800 skb->protocol = eth_type_trans(skb, dev);
801 dev->stats.rx_bytes += pkt_len;
795 dev->stats.rx_packets++; 802 dev->stats.rx_packets++;
796 netif_receive_skb(skb); 803 netif_receive_skb(skb);
797 } 804 }
@@ -906,6 +913,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
906 913
907 skb->protocol = eth_type_trans(skb, dev); 914 skb->protocol = eth_type_trans(skb, dev);
908 dev->stats.rx_packets++; 915 dev->stats.rx_packets++;
916 dev->stats.rx_bytes += pkt_len;
909 netif_receive_skb(skb); 917 netif_receive_skb(skb);
910 918
911 greth->rx_skbuff[greth->rx_cur] = newskb; 919 greth->rx_skbuff[greth->rx_cur] = newskb;
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h
index 9a0040dee4d..232a622a85b 100644
--- a/drivers/net/ethernet/aeroflex/greth.h
+++ b/drivers/net/ethernet/aeroflex/greth.h
@@ -103,6 +103,7 @@ struct greth_private {
103 103
104 unsigned char *tx_bufs[GRETH_TXBD_NUM]; 104 unsigned char *tx_bufs[GRETH_TXBD_NUM];
105 unsigned char *rx_bufs[GRETH_RXBD_NUM]; 105 unsigned char *rx_bufs[GRETH_RXBD_NUM];
106 u16 tx_bufs_length[GRETH_TXBD_NUM];
106 107
107 u16 tx_next; 108 u16 tx_next;
108 u16 tx_last; 109 u16 tx_last;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index c2b630c5e85..7d5ded80d2d 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
308 struct net_device *dev = (struct net_device *)data; 308 struct net_device *dev = (struct net_device *)data;
309 struct dev_priv *priv = netdev_priv(dev); 309 struct dev_priv *priv = netdev_priv(dev);
310 unsigned int lnkstat, carrier; 310 unsigned int lnkstat, carrier;
311 unsigned long flags;
311 312
313 spin_lock_irqsave(&priv->chip_lock, flags);
312 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; 314 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
315 spin_unlock_irqrestore(&priv->chip_lock, flags);
313 carrier = netif_carrier_ok(dev); 316 carrier = netif_carrier_ok(dev);
314 317
315 if (lnkstat && !carrier) { 318 if (lnkstat && !carrier) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index f127768e4e8..2f92487724c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -310,6 +310,14 @@ union db_prod {
310 u32 raw; 310 u32 raw;
311}; 311};
312 312
313/* dropless fc FW/HW related params */
314#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
315#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
316 ETH_MAX_AGGREGATION_QUEUES_E1 :\
317 ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
318#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
319#define FW_PREFETCH_CNT 16
320#define DROPLESS_FC_HEADROOM 100
313 321
314/* MC hsi */ 322/* MC hsi */
315#define BCM_PAGE_SHIFT 12 323#define BCM_PAGE_SHIFT 12
@@ -326,15 +334,35 @@ union db_prod {
326/* SGE ring related macros */ 334/* SGE ring related macros */
327#define NUM_RX_SGE_PAGES 2 335#define NUM_RX_SGE_PAGES 2
328#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 336#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
329#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) 337#define NEXT_PAGE_SGE_DESC_CNT 2
338#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
330/* RX_SGE_CNT is promised to be a power of 2 */ 339/* RX_SGE_CNT is promised to be a power of 2 */
331#define RX_SGE_MASK (RX_SGE_CNT - 1) 340#define RX_SGE_MASK (RX_SGE_CNT - 1)
332#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) 341#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
333#define MAX_RX_SGE (NUM_RX_SGE - 1) 342#define MAX_RX_SGE (NUM_RX_SGE - 1)
334#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ 343#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
335 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) 344 (MAX_RX_SGE_CNT - 1)) ? \
345 (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
346 (x) + 1)
336#define RX_SGE(x) ((x) & MAX_RX_SGE) 347#define RX_SGE(x) ((x) & MAX_RX_SGE)
337 348
349/*
350 * Number of required SGEs is the sum of two:
351 * 1. Number of possible opened aggregations (next packet for
352 * these aggregations will probably consume SGE immidiatelly)
353 * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
354 * after placement on BD for new TPA aggregation)
355 *
356 * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
357 */
358#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
359 (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
360#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
361 MAX_RX_SGE_CNT)
362#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
363 NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
364#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
365
338/* Manipulate a bit vector defined as an array of u64 */ 366/* Manipulate a bit vector defined as an array of u64 */
339 367
340/* Number of bits in one sge_mask array element */ 368/* Number of bits in one sge_mask array element */
@@ -546,24 +574,43 @@ struct bnx2x_fastpath {
546 574
547#define NUM_TX_RINGS 16 575#define NUM_TX_RINGS 16
548#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) 576#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
549#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 577#define NEXT_PAGE_TX_DESC_CNT 1
578#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
550#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 579#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
551#define MAX_TX_BD (NUM_TX_BD - 1) 580#define MAX_TX_BD (NUM_TX_BD - 1)
552#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 581#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
553#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 582#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
554 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 583 (MAX_TX_DESC_CNT - 1)) ? \
584 (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
585 (x) + 1)
555#define TX_BD(x) ((x) & MAX_TX_BD) 586#define TX_BD(x) ((x) & MAX_TX_BD)
556#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) 587#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
557 588
558/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ 589/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
559#define NUM_RX_RINGS 8 590#define NUM_RX_RINGS 8
560#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) 591#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
561#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) 592#define NEXT_PAGE_RX_DESC_CNT 2
593#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
562#define RX_DESC_MASK (RX_DESC_CNT - 1) 594#define RX_DESC_MASK (RX_DESC_CNT - 1)
563#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) 595#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
564#define MAX_RX_BD (NUM_RX_BD - 1) 596#define MAX_RX_BD (NUM_RX_BD - 1)
565#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 597#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
566#define MIN_RX_AVAIL 128 598
599/* dropless fc calculations for BDs
600 *
601 * Number of BDs should as number of buffers in BRB:
602 * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
603 * "next" elements on each page
604 */
605#define NUM_BD_REQ BRB_SIZE(bp)
606#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
607 MAX_RX_DESC_CNT)
608#define BD_TH_LO(bp) (NUM_BD_REQ + \
609 NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
610 FW_DROP_LEVEL(bp))
611#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
612
613#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
567 614
568#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ 615#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
569 ETH_MIN_RX_CQES_WITH_TPA_E1 : \ 616 ETH_MIN_RX_CQES_WITH_TPA_E1 : \
@@ -574,7 +621,9 @@ struct bnx2x_fastpath {
574 MIN_RX_AVAIL)) 621 MIN_RX_AVAIL))
575 622
576#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 623#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
577 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 624 (MAX_RX_DESC_CNT - 1)) ? \
625 (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
626 (x) + 1)
578#define RX_BD(x) ((x) & MAX_RX_BD) 627#define RX_BD(x) ((x) & MAX_RX_BD)
579 628
580/* 629/*
@@ -584,14 +633,31 @@ struct bnx2x_fastpath {
584#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) 633#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
585#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) 634#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
586#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) 635#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
587#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) 636#define NEXT_PAGE_RCQ_DESC_CNT 1
637#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
588#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) 638#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
589#define MAX_RCQ_BD (NUM_RCQ_BD - 1) 639#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
590#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) 640#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
591#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ 641#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
592 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 642 (MAX_RCQ_DESC_CNT - 1)) ? \
643 (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
644 (x) + 1)
593#define RCQ_BD(x) ((x) & MAX_RCQ_BD) 645#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
594 646
647/* dropless fc calculations for RCQs
648 *
649 * Number of RCQs should be as number of buffers in BRB:
650 * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
651 * "next" elements on each page
652 */
653#define NUM_RCQ_REQ BRB_SIZE(bp)
654#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
655 MAX_RCQ_DESC_CNT)
656#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
657 NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
658 FW_DROP_LEVEL(bp))
659#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
660
595 661
596/* This is needed for determining of last_max */ 662/* This is needed for determining of last_max */
597#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) 663#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
@@ -680,24 +746,17 @@ struct bnx2x_fastpath {
680#define FP_CSB_FUNC_OFF \ 746#define FP_CSB_FUNC_OFF \
681 offsetof(struct cstorm_status_block_c, func) 747 offsetof(struct cstorm_status_block_c, func)
682 748
683#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ 749#define HC_INDEX_ETH_RX_CQ_CONS 1
684 /* (HC_INDEX_U_TOE_RX_CQ_CONS) */
685#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */
686 /* (HC_INDEX_U_ETH_RX_CQ_CONS) */
687#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */
688 /* (HC_INDEX_U_ETH_RX_BD_CONS) */
689
690#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
691 /* (HC_INDEX_C_TOE_TX_CQ_CONS) */
692#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
693 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
694#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
695 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
696#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
697 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
698 750
699#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 751#define HC_INDEX_OOO_TX_CQ_CONS 4
700 752
753#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
754
755#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
756
757#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
758
759#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
701 760
702#define BNX2X_RX_SB_INDEX \ 761#define BNX2X_RX_SB_INDEX \
703 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) 762 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
@@ -1095,11 +1154,12 @@ struct bnx2x {
1095#define BP_PORT(bp) (bp->pfid & 1) 1154#define BP_PORT(bp) (bp->pfid & 1)
1096#define BP_FUNC(bp) (bp->pfid) 1155#define BP_FUNC(bp) (bp->pfid)
1097#define BP_ABS_FUNC(bp) (bp->pf_num) 1156#define BP_ABS_FUNC(bp) (bp->pf_num)
1098#define BP_E1HVN(bp) (bp->pfid >> 1) 1157#define BP_VN(bp) ((bp)->pfid >> 1)
1099#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ 1158#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
1100#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 1159#define BP_L_ID(bp) (BP_VN(bp) << 2)
1101#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ 1160#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
1102 BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1161 (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
1162#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
1103 1163
1104 struct net_device *dev; 1164 struct net_device *dev;
1105 struct pci_dev *pdev; 1165 struct pci_dev *pdev;
@@ -1762,7 +1822,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1762 1822
1763#define MAX_DMAE_C_PER_PORT 8 1823#define MAX_DMAE_C_PER_PORT 8
1764#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1824#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1765 BP_E1HVN(bp)) 1825 BP_VN(bp))
1766#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1826#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1767 E1HVN_MAX) 1827 E1HVN_MAX)
1768 1828
@@ -1788,7 +1848,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1788 1848
1789/* must be used on a CID before placing it on a HW ring */ 1849/* must be used on a CID before placing it on a HW ring */
1790#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ 1850#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
1791 (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ 1851 (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
1792 (x)) 1852 (x))
1793 1853
1794#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) 1854#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 5c3eb17c4f4..e575e89c7d4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -993,8 +993,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
993void bnx2x_init_rx_rings(struct bnx2x *bp) 993void bnx2x_init_rx_rings(struct bnx2x *bp)
994{ 994{
995 int func = BP_FUNC(bp); 995 int func = BP_FUNC(bp);
996 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
997 ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
998 u16 ring_prod; 996 u16 ring_prod;
999 int i, j; 997 int i, j;
1000 998
@@ -1007,7 +1005,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1007 1005
1008 if (!fp->disable_tpa) { 1006 if (!fp->disable_tpa) {
1009 /* Fill the per-aggregtion pool */ 1007 /* Fill the per-aggregtion pool */
1010 for (i = 0; i < max_agg_queues; i++) { 1008 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1011 struct bnx2x_agg_info *tpa_info = 1009 struct bnx2x_agg_info *tpa_info =
1012 &fp->tpa_info[i]; 1010 &fp->tpa_info[i];
1013 struct sw_rx_bd *first_buf = 1011 struct sw_rx_bd *first_buf =
@@ -1047,7 +1045,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1047 bnx2x_free_rx_sge_range(bp, fp, 1045 bnx2x_free_rx_sge_range(bp, fp,
1048 ring_prod); 1046 ring_prod);
1049 bnx2x_free_tpa_pool(bp, fp, 1047 bnx2x_free_tpa_pool(bp, fp,
1050 max_agg_queues); 1048 MAX_AGG_QS(bp));
1051 fp->disable_tpa = 1; 1049 fp->disable_tpa = 1;
1052 ring_prod = 0; 1050 ring_prod = 0;
1053 break; 1051 break;
@@ -1143,9 +1141,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1143 bnx2x_free_rx_bds(fp); 1141 bnx2x_free_rx_bds(fp);
1144 1142
1145 if (!fp->disable_tpa) 1143 if (!fp->disable_tpa)
1146 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? 1144 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1147 ETH_MAX_AGGREGATION_QUEUES_E1 :
1148 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
1149 } 1145 }
1150} 1146}
1151 1147
@@ -3100,15 +3096,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3100 struct bnx2x_fastpath *fp = &bp->fp[index]; 3096 struct bnx2x_fastpath *fp = &bp->fp[index];
3101 int ring_size = 0; 3097 int ring_size = 0;
3102 u8 cos; 3098 u8 cos;
3099 int rx_ring_size = 0;
3103 3100
3104 /* if rx_ring_size specified - use it */ 3101 /* if rx_ring_size specified - use it */
3105 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : 3102 if (!bp->rx_ring_size) {
3106 MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3107 3103
3108 /* allocate at least number of buffers required by FW */ 3104 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3109 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3105
3110 MIN_RX_SIZE_TPA, 3106 /* allocate at least number of buffers required by FW */
3111 rx_ring_size); 3107 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3108 MIN_RX_SIZE_TPA, rx_ring_size);
3109
3110 bp->rx_ring_size = rx_ring_size;
3111 } else
3112 rx_ring_size = bp->rx_ring_size;
3112 3113
3113 /* Common */ 3114 /* Common */
3114 sb = &bnx2x_fp(bp, index, status_blk); 3115 sb = &bnx2x_fp(bp, index, status_blk);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ce14f11c0de..a49f8cfa2dc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -366,13 +366,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
366 } 366 }
367 367
368 /* advertise the requested speed and duplex if supported */ 368 /* advertise the requested speed and duplex if supported */
369 cmd->advertising &= bp->port.supported[cfg_idx]; 369 if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
370 DP(NETIF_MSG_LINK, "Advertisement parameters "
371 "are not supported\n");
372 return -EINVAL;
373 }
370 374
371 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; 375 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
372 bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; 376 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
373 bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | 377 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
374 cmd->advertising); 378 cmd->advertising);
379 if (cmd->advertising) {
380
381 bp->link_params.speed_cap_mask[cfg_idx] = 0;
382 if (cmd->advertising & ADVERTISED_10baseT_Half) {
383 bp->link_params.speed_cap_mask[cfg_idx] |=
384 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
385 }
386 if (cmd->advertising & ADVERTISED_10baseT_Full)
387 bp->link_params.speed_cap_mask[cfg_idx] |=
388 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
375 389
390 if (cmd->advertising & ADVERTISED_100baseT_Full)
391 bp->link_params.speed_cap_mask[cfg_idx] |=
392 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
393
394 if (cmd->advertising & ADVERTISED_100baseT_Half) {
395 bp->link_params.speed_cap_mask[cfg_idx] |=
396 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
397 }
398 if (cmd->advertising & ADVERTISED_1000baseT_Half) {
399 bp->link_params.speed_cap_mask[cfg_idx] |=
400 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
401 }
402 if (cmd->advertising & (ADVERTISED_1000baseT_Full |
403 ADVERTISED_1000baseKX_Full))
404 bp->link_params.speed_cap_mask[cfg_idx] |=
405 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
406
407 if (cmd->advertising & (ADVERTISED_10000baseT_Full |
408 ADVERTISED_10000baseKX4_Full |
409 ADVERTISED_10000baseKR_Full))
410 bp->link_params.speed_cap_mask[cfg_idx] |=
411 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
412 }
376 } else { /* forced speed */ 413 } else { /* forced speed */
377 /* advertise the requested speed and duplex if supported */ 414 /* advertise the requested speed and duplex if supported */
378 switch (speed) { 415 switch (speed) {
@@ -1313,10 +1350,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
1313 if (bp->rx_ring_size) 1350 if (bp->rx_ring_size)
1314 ering->rx_pending = bp->rx_ring_size; 1351 ering->rx_pending = bp->rx_ring_size;
1315 else 1352 else
1316 if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) 1353 ering->rx_pending = MAX_RX_AVAIL;
1317 ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
1318 else
1319 ering->rx_pending = MAX_RX_AVAIL;
1320 1354
1321 ering->rx_mini_pending = 0; 1355 ering->rx_mini_pending = 0;
1322 ering->rx_jumbo_pending = 0; 1356 ering->rx_jumbo_pending = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 8e9b87be300..818723c9e67 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
778{ 778{
779 u32 nig_reg_adress_crd_weight = 0; 779 u32 nig_reg_adress_crd_weight = 0;
780 u32 pbf_reg_adress_crd_weight = 0; 780 u32 pbf_reg_adress_crd_weight = 0;
781 /* Calculate and set BW for this COS*/ 781 /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
782 const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; 782 const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
783 const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; 783 const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
784 784
785 switch (cos_entry) { 785 switch (cos_entry) {
786 case 0: 786 case 0:
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
852 /* Calculate total BW requested */ 852 /* Calculate total BW requested */
853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { 854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
855 855 *total_bw +=
856 if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { 856 ets_params->cos[cos_idx].params.bw_params.bw;
857 DP(NETIF_MSG_LINK,
858 "bnx2x_ets_E3B0_config BW was set to 0\n");
859 return -EINVAL;
860 } 857 }
861 *total_bw +=
862 ets_params->cos[cos_idx].params.bw_params.bw;
863 }
864 } 858 }
865 859
866 /*Check taotl BW is valid */ 860 /* Check total BW is valid */
867 if ((100 != *total_bw) || (0 == *total_bw)) { 861 if ((100 != *total_bw) || (0 == *total_bw)) {
868 if (0 == *total_bw) { 862 if (0 == *total_bw) {
869 DP(NETIF_MSG_LINK, 863 DP(NETIF_MSG_LINK,
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
1726 1720
1727 /* Check loopback mode */ 1721 /* Check loopback mode */
1728 if (lb) 1722 if (lb)
1729 val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; 1723 val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
1730 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); 1724 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
1731 bnx2x_set_xumac_nig(params, 1725 bnx2x_set_xumac_nig(params,
1732 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); 1726 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3630 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3624 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3625 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
3632 3626
3627 /* Advertised and set FEC (Forward Error Correction) */
3628 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3629 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
3630 (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
3632
3633 /* Enable CL37 BAM */ 3633 /* Enable CL37 BAM */
3634 if (REG_RD(bp, params->shmem_base + 3634 if (REG_RD(bp, params->shmem_base +
3635 offsetof(struct shmem_region, dev_info. 3635 offsetof(struct shmem_region, dev_info.
@@ -5925,7 +5925,7 @@ int bnx2x_set_led(struct link_params *params,
5925 (tmp | EMAC_LED_OVERRIDE)); 5925 (tmp | EMAC_LED_OVERRIDE));
5926 /* 5926 /*
5927 * return here without enabling traffic 5927 * return here without enabling traffic
5928 * LED blink andsetting rate in ON mode. 5928 * LED blink and setting rate in ON mode.
5929 * In oper mode, enabling LED blink 5929 * In oper mode, enabling LED blink
5930 * and setting rate is needed. 5930 * and setting rate is needed.
5931 */ 5931 */
@@ -5937,7 +5937,11 @@ int bnx2x_set_led(struct link_params *params,
5937 * This is a work-around for HW issue found when link 5937 * This is a work-around for HW issue found when link
5938 * is up in CL73 5938 * is up in CL73
5939 */ 5939 */
5940 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5940 if ((!CHIP_IS_E3(bp)) ||
5941 (CHIP_IS_E3(bp) &&
5942 mode == LED_MODE_ON))
5943 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5944
5941 if (CHIP_IS_E1x(bp) || 5945 if (CHIP_IS_E1x(bp) ||
5942 CHIP_IS_E2(bp) || 5946 CHIP_IS_E2(bp) ||
5943 (mode == LED_MODE_ON)) 5947 (mode == LED_MODE_ON))
@@ -10644,8 +10648,7 @@ static struct bnx2x_phy phy_warpcore = {
10644 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10648 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
10645 .addr = 0xff, 10649 .addr = 0xff,
10646 .def_md_devad = 0, 10650 .def_md_devad = 0,
10647 .flags = (FLAGS_HW_LOCK_REQUIRED | 10651 .flags = FLAGS_HW_LOCK_REQUIRED,
10648 FLAGS_TX_ERROR_CHECK),
10649 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10652 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10650 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10653 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10651 .mdio_ctrl = 0, 10654 .mdio_ctrl = 0,
@@ -10771,8 +10774,7 @@ static struct bnx2x_phy phy_8706 = {
10771 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, 10774 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
10772 .addr = 0xff, 10775 .addr = 0xff,
10773 .def_md_devad = 0, 10776 .def_md_devad = 0,
10774 .flags = (FLAGS_INIT_XGXS_FIRST | 10777 .flags = FLAGS_INIT_XGXS_FIRST,
10775 FLAGS_TX_ERROR_CHECK),
10776 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10778 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10777 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10779 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10778 .mdio_ctrl = 0, 10780 .mdio_ctrl = 0,
@@ -10803,8 +10805,7 @@ static struct bnx2x_phy phy_8726 = {
10803 .addr = 0xff, 10805 .addr = 0xff,
10804 .def_md_devad = 0, 10806 .def_md_devad = 0,
10805 .flags = (FLAGS_HW_LOCK_REQUIRED | 10807 .flags = (FLAGS_HW_LOCK_REQUIRED |
10806 FLAGS_INIT_XGXS_FIRST | 10808 FLAGS_INIT_XGXS_FIRST),
10807 FLAGS_TX_ERROR_CHECK),
10808 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10809 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10809 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10810 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10810 .mdio_ctrl = 0, 10811 .mdio_ctrl = 0,
@@ -10835,8 +10836,7 @@ static struct bnx2x_phy phy_8727 = {
10835 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 10836 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
10836 .addr = 0xff, 10837 .addr = 0xff,
10837 .def_md_devad = 0, 10838 .def_md_devad = 0,
10838 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 10839 .flags = FLAGS_FAN_FAILURE_DET_REQ,
10839 FLAGS_TX_ERROR_CHECK),
10840 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10840 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10841 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10841 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10842 .mdio_ctrl = 0, 10842 .mdio_ctrl = 0,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 85dd294aeab..621ab281ed8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -408,8 +408,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
408 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 408 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
409 409
410 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 410 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
411 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | 411 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
412 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 412 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
413 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 413 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
414 414
415#ifdef __BIG_ENDIAN 415#ifdef __BIG_ENDIAN
@@ -1417,7 +1417,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1417 if (!CHIP_IS_E1(bp)) { 1417 if (!CHIP_IS_E1(bp)) {
1418 /* init leading/trailing edge */ 1418 /* init leading/trailing edge */
1419 if (IS_MF(bp)) { 1419 if (IS_MF(bp)) {
1420 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1420 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1421 if (bp->port.pmf) 1421 if (bp->port.pmf)
1422 /* enable nig and gpio3 attention */ 1422 /* enable nig and gpio3 attention */
1423 val |= 0x1100; 1423 val |= 0x1100;
@@ -1469,7 +1469,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1469 1469
1470 /* init leading/trailing edge */ 1470 /* init leading/trailing edge */
1471 if (IS_MF(bp)) { 1471 if (IS_MF(bp)) {
1472 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1472 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1473 if (bp->port.pmf) 1473 if (bp->port.pmf)
1474 /* enable nig and gpio3 attention */ 1474 /* enable nig and gpio3 attention */
1475 val |= 0x1100; 1475 val |= 0x1100;
@@ -2285,7 +2285,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2285 int vn; 2285 int vn;
2286 2286
2287 bp->vn_weight_sum = 0; 2287 bp->vn_weight_sum = 0;
2288 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2288 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2289 u32 vn_cfg = bp->mf_config[vn]; 2289 u32 vn_cfg = bp->mf_config[vn];
2290 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2290 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2291 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2291 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -2318,12 +2318,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2318 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2318 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2319} 2319}
2320 2320
2321/* returns func by VN for current port */
2322static inline int func_by_vn(struct bnx2x *bp, int vn)
2323{
2324 return 2 * vn + BP_PORT(bp);
2325}
2326
2321static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2327static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
2322{ 2328{
2323 struct rate_shaping_vars_per_vn m_rs_vn; 2329 struct rate_shaping_vars_per_vn m_rs_vn;
2324 struct fairness_vars_per_vn m_fair_vn; 2330 struct fairness_vars_per_vn m_fair_vn;
2325 u32 vn_cfg = bp->mf_config[vn]; 2331 u32 vn_cfg = bp->mf_config[vn];
2326 int func = 2*vn + BP_PORT(bp); 2332 int func = func_by_vn(bp, vn);
2327 u16 vn_min_rate, vn_max_rate; 2333 u16 vn_min_rate, vn_max_rate;
2328 int i; 2334 int i;
2329 2335
@@ -2420,7 +2426,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
2420 * 2426 *
2421 * and there are 2 functions per port 2427 * and there are 2 functions per port
2422 */ 2428 */
2423 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2429 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2424 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2430 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2425 2431
2426 if (func >= E1H_FUNC_MAX) 2432 if (func >= E1H_FUNC_MAX)
@@ -2452,7 +2458,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2452 2458
2453 /* calculate and set min-max rate for each vn */ 2459 /* calculate and set min-max rate for each vn */
2454 if (bp->port.pmf) 2460 if (bp->port.pmf)
2455 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2461 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2456 bnx2x_init_vn_minmax(bp, vn); 2462 bnx2x_init_vn_minmax(bp, vn);
2457 2463
2458 /* always enable rate shaping and fairness */ 2464 /* always enable rate shaping and fairness */
@@ -2471,16 +2477,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2471 2477
2472static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 2478static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2473{ 2479{
2474 int port = BP_PORT(bp);
2475 int func; 2480 int func;
2476 int vn; 2481 int vn;
2477 2482
2478 /* Set the attention towards other drivers on the same port */ 2483 /* Set the attention towards other drivers on the same port */
2479 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2484 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2480 if (vn == BP_E1HVN(bp)) 2485 if (vn == BP_VN(bp))
2481 continue; 2486 continue;
2482 2487
2483 func = ((vn << 1) | port); 2488 func = func_by_vn(bp, vn);
2484 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 2489 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2485 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 2490 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2486 } 2491 }
@@ -2575,7 +2580,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
2575 bnx2x_dcbx_pmf_update(bp); 2580 bnx2x_dcbx_pmf_update(bp);
2576 2581
2577 /* enable nig attention */ 2582 /* enable nig attention */
2578 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2583 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2579 if (bp->common.int_block == INT_BLOCK_HC) { 2584 if (bp->common.int_block == INT_BLOCK_HC) {
2580 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2585 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2581 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2586 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
@@ -2754,8 +2759,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2754 u16 tpa_agg_size = 0; 2759 u16 tpa_agg_size = 0;
2755 2760
2756 if (!fp->disable_tpa) { 2761 if (!fp->disable_tpa) {
2757 pause->sge_th_hi = 250; 2762 pause->sge_th_lo = SGE_TH_LO(bp);
2758 pause->sge_th_lo = 150; 2763 pause->sge_th_hi = SGE_TH_HI(bp);
2764
2765 /* validate SGE ring has enough to cross high threshold */
2766 WARN_ON(bp->dropless_fc &&
2767 pause->sge_th_hi + FW_PREFETCH_CNT >
2768 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
2769
2759 tpa_agg_size = min_t(u32, 2770 tpa_agg_size = min_t(u32,
2760 (min_t(u32, 8, MAX_SKB_FRAGS) * 2771 (min_t(u32, 8, MAX_SKB_FRAGS) *
2761 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 2772 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
@@ -2769,10 +2780,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2769 2780
2770 /* pause - not for e1 */ 2781 /* pause - not for e1 */
2771 if (!CHIP_IS_E1(bp)) { 2782 if (!CHIP_IS_E1(bp)) {
2772 pause->bd_th_hi = 350; 2783 pause->bd_th_lo = BD_TH_LO(bp);
2773 pause->bd_th_lo = 250; 2784 pause->bd_th_hi = BD_TH_HI(bp);
2774 pause->rcq_th_hi = 350; 2785
2775 pause->rcq_th_lo = 250; 2786 pause->rcq_th_lo = RCQ_TH_LO(bp);
2787 pause->rcq_th_hi = RCQ_TH_HI(bp);
2788 /*
2789 * validate that rings have enough entries to cross
2790 * high thresholds
2791 */
2792 WARN_ON(bp->dropless_fc &&
2793 pause->bd_th_hi + FW_PREFETCH_CNT >
2794 bp->rx_ring_size);
2795 WARN_ON(bp->dropless_fc &&
2796 pause->rcq_th_hi + FW_PREFETCH_CNT >
2797 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
2776 2798
2777 pause->pri_map = 1; 2799 pause->pri_map = 1;
2778 } 2800 }
@@ -2800,9 +2822,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2800 * For PF Clients it should be the maximum avaliable number. 2822 * For PF Clients it should be the maximum avaliable number.
2801 * VF driver(s) may want to define it to a smaller value. 2823 * VF driver(s) may want to define it to a smaller value.
2802 */ 2824 */
2803 rxq_init->max_tpa_queues = 2825 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
2804 (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
2805 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
2806 2826
2807 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2827 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2808 rxq_init->fw_sb_id = fp->fw_sb_id; 2828 rxq_init->fw_sb_id = fp->fw_sb_id;
@@ -4804,6 +4824,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4804 hc_sm->time_to_expire = 0xFFFFFFFF; 4824 hc_sm->time_to_expire = 0xFFFFFFFF;
4805} 4825}
4806 4826
4827
4828/* allocates state machine ids. */
4829static inline
4830void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
4831{
4832 /* zero out state machine indices */
4833 /* rx indices */
4834 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4835
4836 /* tx indices */
4837 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4838 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
4839 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
4840 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
4841
4842 /* map indices */
4843 /* rx indices */
4844 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
4845 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4846
4847 /* tx indices */
4848 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
4849 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4850 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
4851 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4852 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
4853 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4854 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
4855 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4856}
4857
4807static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 4858static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4808 u8 vf_valid, int fw_sb_id, int igu_sb_id) 4859 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4809{ 4860{
@@ -4835,6 +4886,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4835 hc_sm_p = sb_data_e2.common.state_machine; 4886 hc_sm_p = sb_data_e2.common.state_machine;
4836 sb_data_p = (u32 *)&sb_data_e2; 4887 sb_data_p = (u32 *)&sb_data_e2;
4837 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 4888 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4889 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
4838 } else { 4890 } else {
4839 memset(&sb_data_e1x, 0, 4891 memset(&sb_data_e1x, 0,
4840 sizeof(struct hc_status_block_data_e1x)); 4892 sizeof(struct hc_status_block_data_e1x));
@@ -4849,6 +4901,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4849 hc_sm_p = sb_data_e1x.common.state_machine; 4901 hc_sm_p = sb_data_e1x.common.state_machine;
4850 sb_data_p = (u32 *)&sb_data_e1x; 4902 sb_data_p = (u32 *)&sb_data_e1x;
4851 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 4903 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4904 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
4852 } 4905 }
4853 4906
4854 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 4907 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
@@ -5798,7 +5851,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5798 * take the UNDI lock to protect undi_unload flow from accessing 5851 * take the UNDI lock to protect undi_unload flow from accessing
5799 * registers while we're resetting the chip 5852 * registers while we're resetting the chip
5800 */ 5853 */
5801 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5854 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5802 5855
5803 bnx2x_reset_common(bp); 5856 bnx2x_reset_common(bp);
5804 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5857 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@@ -5810,7 +5863,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5810 } 5863 }
5811 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 5864 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
5812 5865
5813 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5866 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5814 5867
5815 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 5868 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
5816 5869
@@ -6667,12 +6720,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
6667 if (CHIP_MODE_IS_4_PORT(bp)) 6720 if (CHIP_MODE_IS_4_PORT(bp))
6668 dsb_idx = BP_FUNC(bp); 6721 dsb_idx = BP_FUNC(bp);
6669 else 6722 else
6670 dsb_idx = BP_E1HVN(bp); 6723 dsb_idx = BP_VN(bp);
6671 6724
6672 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 6725 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
6673 IGU_BC_BASE_DSB_PROD + dsb_idx : 6726 IGU_BC_BASE_DSB_PROD + dsb_idx :
6674 IGU_NORM_BASE_DSB_PROD + dsb_idx); 6727 IGU_NORM_BASE_DSB_PROD + dsb_idx);
6675 6728
6729 /*
6730 * igu prods come in chunks of E1HVN_MAX (4) -
6731 * does not matters what is the current chip mode
6732 */
6676 for (i = 0; i < (num_segs * E1HVN_MAX); 6733 for (i = 0; i < (num_segs * E1HVN_MAX);
6677 i += E1HVN_MAX) { 6734 i += E1HVN_MAX) {
6678 addr = IGU_REG_PROD_CONS_MEMORY + 6735 addr = IGU_REG_PROD_CONS_MEMORY +
@@ -7566,7 +7623,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
7566 u32 val; 7623 u32 val;
7567 /* The mac address is written to entries 1-4 to 7624 /* The mac address is written to entries 1-4 to
7568 preserve entry 0 which is used by the PMF */ 7625 preserve entry 0 which is used by the PMF */
7569 u8 entry = (BP_E1HVN(bp) + 1)*8; 7626 u8 entry = (BP_VN(bp) + 1)*8;
7570 7627
7571 val = (mac_addr[0] << 8) | mac_addr[1]; 7628 val = (mac_addr[0] << 8) | mac_addr[1];
7572 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 7629 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
@@ -8542,10 +8599,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8542 /* Check if there is any driver already loaded */ 8599 /* Check if there is any driver already loaded */
8543 val = REG_RD(bp, MISC_REG_UNPREPARED); 8600 val = REG_RD(bp, MISC_REG_UNPREPARED);
8544 if (val == 0x1) { 8601 if (val == 0x1) {
8545 /* Check if it is the UNDI driver 8602
8603 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8604 /*
8605 * Check if it is the UNDI driver
8546 * UNDI driver initializes CID offset for normal bell to 0x7 8606 * UNDI driver initializes CID offset for normal bell to 0x7
8547 */ 8607 */
8548 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8549 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 8608 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8550 if (val == 0x7) { 8609 if (val == 0x7) {
8551 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8610 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8583,9 +8642,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8583 bnx2x_fw_command(bp, reset_code, 0); 8642 bnx2x_fw_command(bp, reset_code, 0);
8584 } 8643 }
8585 8644
8586 /* now it's safe to release the lock */
8587 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8588
8589 bnx2x_undi_int_disable(bp); 8645 bnx2x_undi_int_disable(bp);
8590 port = BP_PORT(bp); 8646 port = BP_PORT(bp);
8591 8647
@@ -8635,8 +8691,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8635 bp->fw_seq = 8691 bp->fw_seq =
8636 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & 8692 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8637 DRV_MSG_SEQ_NUMBER_MASK); 8693 DRV_MSG_SEQ_NUMBER_MASK);
8638 } else 8694 }
8639 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8695
8696 /* now it's safe to release the lock */
8697 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8640 } 8698 }
8641} 8699}
8642 8700
@@ -8773,13 +8831,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8773static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 8831static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8774{ 8832{
8775 int pfid = BP_FUNC(bp); 8833 int pfid = BP_FUNC(bp);
8776 int vn = BP_E1HVN(bp);
8777 int igu_sb_id; 8834 int igu_sb_id;
8778 u32 val; 8835 u32 val;
8779 u8 fid, igu_sb_cnt = 0; 8836 u8 fid, igu_sb_cnt = 0;
8780 8837
8781 bp->igu_base_sb = 0xff; 8838 bp->igu_base_sb = 0xff;
8782 if (CHIP_INT_MODE_IS_BC(bp)) { 8839 if (CHIP_INT_MODE_IS_BC(bp)) {
8840 int vn = BP_VN(bp);
8783 igu_sb_cnt = bp->igu_sb_cnt; 8841 igu_sb_cnt = bp->igu_sb_cnt;
8784 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 8842 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8785 FP_SB_MAX_E1x; 8843 FP_SB_MAX_E1x;
@@ -9410,6 +9468,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9410 bp->igu_base_sb = 0; 9468 bp->igu_base_sb = 0;
9411 } else { 9469 } else {
9412 bp->common.int_block = INT_BLOCK_IGU; 9470 bp->common.int_block = INT_BLOCK_IGU;
9471
9472 /* do not allow device reset during IGU info preocessing */
9473 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9474
9413 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9475 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9414 9476
9415 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 9477 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
@@ -9441,6 +9503,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9441 9503
9442 bnx2x_get_igu_cam_info(bp); 9504 bnx2x_get_igu_cam_info(bp);
9443 9505
9506 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9444 } 9507 }
9445 9508
9446 /* 9509 /*
@@ -9467,7 +9530,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9467 9530
9468 bp->mf_ov = 0; 9531 bp->mf_ov = 0;
9469 bp->mf_mode = 0; 9532 bp->mf_mode = 0;
9470 vn = BP_E1HVN(bp); 9533 vn = BP_VN(bp);
9471 9534
9472 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 9535 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
9473 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 9536 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -9587,13 +9650,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9587 /* port info */ 9650 /* port info */
9588 bnx2x_get_port_hwinfo(bp); 9651 bnx2x_get_port_hwinfo(bp);
9589 9652
9590 if (!BP_NOMCP(bp)) {
9591 bp->fw_seq =
9592 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9593 DRV_MSG_SEQ_NUMBER_MASK);
9594 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9595 }
9596
9597 /* Get MAC addresses */ 9653 /* Get MAC addresses */
9598 bnx2x_get_mac_hwinfo(bp); 9654 bnx2x_get_mac_hwinfo(bp);
9599 9655
@@ -9759,6 +9815,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9759 if (!BP_NOMCP(bp)) 9815 if (!BP_NOMCP(bp))
9760 bnx2x_undi_unload(bp); 9816 bnx2x_undi_unload(bp);
9761 9817
9818 /* init fw_seq after undi_unload! */
9819 if (!BP_NOMCP(bp)) {
9820 bp->fw_seq =
9821 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9822 DRV_MSG_SEQ_NUMBER_MASK);
9823 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9824 }
9825
9762 if (CHIP_REV_IS_FPGA(bp)) 9826 if (CHIP_REV_IS_FPGA(bp))
9763 dev_err(&bp->pdev->dev, "FPGA detected\n"); 9827 dev_err(&bp->pdev->dev, "FPGA detected\n");
9764 9828
@@ -10253,17 +10317,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10253 /* clean indirect addresses */ 10317 /* clean indirect addresses */
10254 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 10318 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10255 PCICFG_VENDOR_ID_OFFSET); 10319 PCICFG_VENDOR_ID_OFFSET);
10256 /* Clean the following indirect addresses for all functions since it 10320 /*
10321 * Clean the following indirect addresses for all functions since it
10257 * is not used by the driver. 10322 * is not used by the driver.
10258 */ 10323 */
10259 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 10324 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
10260 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 10325 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
10261 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 10326 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
10262 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 10327 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
10263 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10328
10264 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10329 if (CHIP_IS_E1x(bp)) {
10265 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10330 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
10266 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 10331 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
10332 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
10333 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
10334 }
10267 10335
10268 /* 10336 /*
10269 * Enable internal target-read (in case we are probed after PF FLR). 10337 * Enable internal target-read (in case we are probed after PF FLR).
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 40266c14e6d..750e8445dac 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -5320,7 +5320,7 @@
5320#define XCM_REG_XX_OVFL_EVNT_ID 0x20058 5320#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
5321#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) 5321#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
5322#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) 5322#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
5323#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) 5323#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
5324#define XMAC_CTRL_REG_RX_EN (0x1<<1) 5324#define XMAC_CTRL_REG_RX_EN (0x1<<1)
5325#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) 5325#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
5326#define XMAC_CTRL_REG_TX_EN (0x1<<0) 5326#define XMAC_CTRL_REG_TX_EN (0x1<<0)
@@ -5766,7 +5766,7 @@
5766#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 5766#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
5767#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 5767#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
5768#define HW_LOCK_RESOURCE_SPIO 2 5768#define HW_LOCK_RESOURCE_SPIO 2
5769#define HW_LOCK_RESOURCE_UNDI 5 5769#define HW_LOCK_RESOURCE_RESET 5
5770#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) 5770#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
5771#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) 5771#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
5772#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) 5772#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
@@ -6853,6 +6853,9 @@ Theotherbitsarereservedandshouldbezero*/
6853#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 6853#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
6854#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 6854#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
6855#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 6855#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
6856#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
6857#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
6858#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
6856#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 6859#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
6857#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 6860#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
6858#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e 6861#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 628f7b99614..02ac6a771bf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -713,7 +713,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
713 break; 713 break;
714 714
715 case MAC_TYPE_NONE: /* unreached */ 715 case MAC_TYPE_NONE: /* unreached */
716 BNX2X_ERR("stats updated by DMAE but no MAC active\n"); 716 DP(BNX2X_MSG_STATS,
717 "stats updated by DMAE but no MAC active\n");
717 return -1; 718 return -1;
718 719
719 default: /* unreached */ 720 default: /* unreached */
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1391 1392
1392static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1393static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1393{ 1394{
1394 int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; 1395 int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
1395 u32 func_stx; 1396 u32 func_stx;
1396 1397
1397 /* sanity */ 1398 /* sanity */
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1404 func_stx = bp->func_stx; 1405 func_stx = bp->func_stx;
1405 1406
1406 for (vn = VN_0; vn < vn_max; vn++) { 1407 for (vn = VN_0; vn < vn_max; vn++) {
1407 int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; 1408 int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
1408 1409
1409 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1410 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1410 bnx2x_func_stats_init(bp); 1411 bnx2x_func_stats_init(bp);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1485013b4b8..26c6bd44a60 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6738,12 +6738,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6738 !mss && skb->len > VLAN_ETH_FRAME_LEN) 6738 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6739 base_flags |= TXD_FLAG_JMB_PKT; 6739 base_flags |= TXD_FLAG_JMB_PKT;
6740 6740
6741#ifdef BCM_KERNEL_SUPPORTS_8021Q
6742 if (vlan_tx_tag_present(skb)) { 6741 if (vlan_tx_tag_present(skb)) {
6743 base_flags |= TXD_FLAG_VLAN; 6742 base_flags |= TXD_FLAG_VLAN;
6744 vlan = vlan_tx_tag_get(skb); 6743 vlan = vlan_tx_tag_get(skb);
6745 } 6744 }
6746#endif
6747 6745
6748 len = skb_headlen(skb); 6746 len = skb_headlen(skb);
6749 6747
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index f30b96fee84..212736bab6b 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1669,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv,
1669 u32 i = 0; 1669 u32 i = 0;
1670 1670
1671 list_for_each_entry(comp, &priv->rx_list.list, list) { 1671 list_for_each_entry(comp, &priv->rx_list.list, list) {
1672 if (i <= cmd->rule_cnt) { 1672 if (i == cmd->rule_cnt)
1673 rule_locs[i] = comp->fs.location; 1673 return -EMSGSIZE;
1674 i++; 1674 rule_locs[i] = comp->fs.location;
1675 } 1675 i++;
1676 } 1676 }
1677 1677
1678 cmd->data = MAX_FILER_IDX; 1678 cmd->data = MAX_FILER_IDX;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 8cca4a62b39..72b84de4875 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -395,7 +395,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
395} 395}
396 396
397/* recycle the current buffer on the rx queue */ 397/* recycle the current buffer on the rx queue */
398static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) 398static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
399{ 399{
400 u32 q_index = adapter->rx_queue.index; 400 u32 q_index = adapter->rx_queue.index;
401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; 401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
@@ -403,6 +403,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
403 unsigned int index = correlator & 0xffffffffUL; 403 unsigned int index = correlator & 0xffffffffUL;
404 union ibmveth_buf_desc desc; 404 union ibmveth_buf_desc desc;
405 unsigned long lpar_rc; 405 unsigned long lpar_rc;
406 int ret = 1;
406 407
407 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); 408 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
408 BUG_ON(index >= adapter->rx_buff_pool[pool].size); 409 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
@@ -410,7 +411,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
410 if (!adapter->rx_buff_pool[pool].active) { 411 if (!adapter->rx_buff_pool[pool].active) {
411 ibmveth_rxq_harvest_buffer(adapter); 412 ibmveth_rxq_harvest_buffer(adapter);
412 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 413 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
413 return; 414 goto out;
414 } 415 }
415 416
416 desc.fields.flags_len = IBMVETH_BUF_VALID | 417 desc.fields.flags_len = IBMVETH_BUF_VALID |
@@ -423,12 +424,16 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
423 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " 424 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
424 "during recycle rc=%ld", lpar_rc); 425 "during recycle rc=%ld", lpar_rc);
425 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 426 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
427 ret = 0;
426 } 428 }
427 429
428 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 430 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
429 adapter->rx_queue.index = 0; 431 adapter->rx_queue.index = 0;
430 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 432 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
431 } 433 }
434
435out:
436 return ret;
432} 437}
433 438
434static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) 439static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
@@ -752,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
752 struct ibmveth_adapter *adapter = netdev_priv(dev); 757 struct ibmveth_adapter *adapter = netdev_priv(dev);
753 unsigned long set_attr, clr_attr, ret_attr; 758 unsigned long set_attr, clr_attr, ret_attr;
754 unsigned long set_attr6, clr_attr6; 759 unsigned long set_attr6, clr_attr6;
755 long ret, ret6; 760 long ret, ret4, ret6;
756 int rc1 = 0, rc2 = 0; 761 int rc1 = 0, rc2 = 0;
757 int restart = 0; 762 int restart = 0;
758 763
@@ -765,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
765 770
766 set_attr = 0; 771 set_attr = 0;
767 clr_attr = 0; 772 clr_attr = 0;
773 set_attr6 = 0;
774 clr_attr6 = 0;
768 775
769 if (data) { 776 if (data) {
770 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 777 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
@@ -779,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
779 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && 786 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
780 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && 787 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
781 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { 788 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
782 ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 789 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
783 set_attr, &ret_attr); 790 set_attr, &ret_attr);
784 791
785 if (ret != H_SUCCESS) { 792 if (ret4 != H_SUCCESS) {
786 netdev_err(dev, "unable to change IPv4 checksum " 793 netdev_err(dev, "unable to change IPv4 checksum "
787 "offload settings. %d rc=%ld\n", 794 "offload settings. %d rc=%ld\n",
788 data, ret); 795 data, ret4);
796
797 h_illan_attributes(adapter->vdev->unit_address,
798 set_attr, clr_attr, &ret_attr);
799
800 if (data == 1)
801 dev->features &= ~NETIF_F_IP_CSUM;
789 802
790 ret = h_illan_attributes(adapter->vdev->unit_address,
791 set_attr, clr_attr, &ret_attr);
792 } else { 803 } else {
793 adapter->fw_ipv4_csum_support = data; 804 adapter->fw_ipv4_csum_support = data;
794 } 805 }
@@ -799,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
799 if (ret6 != H_SUCCESS) { 810 if (ret6 != H_SUCCESS) {
800 netdev_err(dev, "unable to change IPv6 checksum " 811 netdev_err(dev, "unable to change IPv6 checksum "
801 "offload settings. %d rc=%ld\n", 812 "offload settings. %d rc=%ld\n",
802 data, ret); 813 data, ret6);
814
815 h_illan_attributes(adapter->vdev->unit_address,
816 set_attr6, clr_attr6, &ret_attr);
817
818 if (data == 1)
819 dev->features &= ~NETIF_F_IPV6_CSUM;
803 820
804 ret = h_illan_attributes(adapter->vdev->unit_address,
805 set_attr6, clr_attr6,
806 &ret_attr);
807 } else 821 } else
808 adapter->fw_ipv6_csum_support = data; 822 adapter->fw_ipv6_csum_support = data;
809 823
810 if (ret != H_SUCCESS || ret6 != H_SUCCESS) 824 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
811 adapter->rx_csum = data; 825 adapter->rx_csum = data;
812 else 826 else
813 rc1 = -EIO; 827 rc1 = -EIO;
@@ -925,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
925 union ibmveth_buf_desc descs[6]; 939 union ibmveth_buf_desc descs[6];
926 int last, i; 940 int last, i;
927 int force_bounce = 0; 941 int force_bounce = 0;
942 dma_addr_t dma_addr;
928 943
929 /* 944 /*
930 * veth handles a maximum of 6 segments including the header, so 945 * veth handles a maximum of 6 segments including the header, so
@@ -989,17 +1004,16 @@ retry_bounce:
989 } 1004 }
990 1005
991 /* Map the header */ 1006 /* Map the header */
992 descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 1007 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
993 skb_headlen(skb), 1008 skb_headlen(skb), DMA_TO_DEVICE);
994 DMA_TO_DEVICE); 1009 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
995 if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
996 goto map_failed; 1010 goto map_failed;
997 1011
998 descs[0].fields.flags_len = desc_flags | skb_headlen(skb); 1012 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1013 descs[0].fields.address = dma_addr;
999 1014
1000 /* Map the frags */ 1015 /* Map the frags */
1001 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1016 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1002 unsigned long dma_addr;
1003 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1017 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1004 1018
1005 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, 1019 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
@@ -1020,7 +1034,12 @@ retry_bounce:
1020 netdev->stats.tx_bytes += skb->len; 1034 netdev->stats.tx_bytes += skb->len;
1021 } 1035 }
1022 1036
1023 for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) 1037 dma_unmap_single(&adapter->vdev->dev,
1038 descs[0].fields.address,
1039 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1040 DMA_TO_DEVICE);
1041
1042 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1024 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 1043 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1025 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1044 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1026 DMA_TO_DEVICE); 1045 DMA_TO_DEVICE);
@@ -1083,8 +1102,9 @@ restart_poll:
1083 if (rx_flush) 1102 if (rx_flush)
1084 ibmveth_flush_buffer(skb->data, 1103 ibmveth_flush_buffer(skb->data,
1085 length + offset); 1104 length + offset);
1105 if (!ibmveth_rxq_recycle_buffer(adapter))
1106 kfree_skb(skb);
1086 skb = new_skb; 1107 skb = new_skb;
1087 ibmveth_rxq_recycle_buffer(adapter);
1088 } else { 1108 } else {
1089 ibmveth_rxq_harvest_buffer(adapter); 1109 ibmveth_rxq_harvest_buffer(adapter);
1090 skb_reserve(skb, offset); 1110 skb_reserve(skb, offset);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8545c7aa93e..a5a89ecb6f3 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
4026 checksum += eeprom_data; 4026 checksum += eeprom_data;
4027 } 4027 }
4028 4028
4029#ifdef CONFIG_PARISC
4030 /* This is a signature and not a checksum on HP c8000 */
4031 if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
4032 return E1000_SUCCESS;
4033
4034#endif
4029 if (checksum == (u16) EEPROM_SUM) 4035 if (checksum == (u16) EEPROM_SUM)
4030 return E1000_SUCCESS; 4036 return E1000_SUCCESS;
4031 else { 4037 else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 49e82de136a..08439ca6073 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1306,8 +1306,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1306 if (ring_is_rsc_enabled(rx_ring)) 1306 if (ring_is_rsc_enabled(rx_ring))
1307 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); 1307 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
1308 1308
1309 /* if this is a skb from previous receive DMA will be 0 */ 1309 /* linear means we are building an skb from multiple pages */
1310 if (rx_buffer_info->dma) { 1310 if (!skb_is_nonlinear(skb)) {
1311 u16 hlen; 1311 u16 hlen;
1312 if (pkt_is_rsc && 1312 if (pkt_is_rsc &&
1313 !(staterr & IXGBE_RXD_STAT_EOP) && 1313 !(staterr & IXGBE_RXD_STAT_EOP) &&
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1a3033d8e7e..d17d0624c5e 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -40,6 +40,7 @@
40#include <linux/clk.h> 40#include <linux/clk.h>
41#include <linux/phy.h> 41#include <linux/phy.h>
42#include <linux/io.h> 42#include <linux/io.h>
43#include <linux/interrupt.h>
43#include <linux/types.h> 44#include <linux/types.h>
44#include <asm/pgtable.h> 45#include <asm/pgtable.h>
45#include <asm/system.h> 46#include <asm/system.h>
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 7efa6242723..00bc4fc968c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config PCH_GBE 5config PCH_GBE
6 tristate "OKI SEMICONDUCTOR ML7223 IOH GbE (Intel EG20T PCH)" 6 tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
7 depends on PCI 7 depends on PCI
8 select NET_CORE 8 select NET_CORE
9 select MII 9 select MII
@@ -15,7 +15,8 @@ config PCH_GBE
15 to Gigabit Ethernet. This driver enables Gigabit Ethernet function. 15 to Gigabit Ethernet. This driver enables Gigabit Ethernet function.
16 16
17 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 17 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
18 Output Hub), ML7223. 18 Output Hub), ML7223/ML7831.
19 ML7223 IOH is for MP(Media Phone) use. 19 ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
20 ML7223 is companion chip for Intel Atom E6xx series. 20 purpose use.
21 ML7223 is completely compatible for Intel EG20T PCH. 21 ML7223/ML7831 is companion chip for Intel Atom E6xx series.
22 ML7223/ML7831 is completely compatible for Intel EG20T PCH.
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 59fac77d0db..a09a07197eb 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -127,8 +127,8 @@ struct pch_gbe_regs {
127 127
128/* Reset */ 128/* Reset */
129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */ 129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
130#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ 130#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
131#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ 131#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
132 132
133/* TCP/IP Accelerator Control */ 133/* TCP/IP Accelerator Control */
134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ 134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
@@ -276,6 +276,9 @@ struct pch_gbe_regs {
276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ 276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ 277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
278 278
279/* RX DMA STATUS */
280#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
281
279/* Wake On LAN Status */ 282/* Wake On LAN Status */
280#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ 283#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
281#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ 284#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
471struct pch_gbe_buffer { 474struct pch_gbe_buffer {
472 struct sk_buff *skb; 475 struct sk_buff *skb;
473 dma_addr_t dma; 476 dma_addr_t dma;
477 unsigned char *rx_buffer;
474 unsigned long time_stamp; 478 unsigned long time_stamp;
475 u16 length; 479 u16 length;
476 bool mapped; 480 bool mapped;
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
511struct pch_gbe_rx_ring { 515struct pch_gbe_rx_ring {
512 struct pch_gbe_rx_desc *desc; 516 struct pch_gbe_rx_desc *desc;
513 dma_addr_t dma; 517 dma_addr_t dma;
518 unsigned char *rx_buff_pool;
519 dma_addr_t rx_buff_pool_logic;
520 unsigned int rx_buff_pool_size;
514 unsigned int size; 521 unsigned int size;
515 unsigned int count; 522 unsigned int count;
516 unsigned int next_to_use; 523 unsigned int next_to_use;
@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
622 unsigned long rx_buffer_len; 629 unsigned long rx_buffer_len;
623 unsigned long tx_queue_len; 630 unsigned long tx_queue_len;
624 bool have_msi; 631 bool have_msi;
632 bool rx_stop_flag;
625}; 633};
626 634
627extern const char pch_driver_version[]; 635extern const char pch_driver_version[];
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 72276fe78f8..35a7c21680b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -20,7 +20,6 @@
20 20
21#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include "pch_gbe_api.h" 22#include "pch_gbe_api.h"
23#include <linux/prefetch.h>
24 23
25#define DRV_VERSION "1.00" 24#define DRV_VERSION "1.00"
26const char pch_driver_version[] = DRV_VERSION; 25const char pch_driver_version[] = DRV_VERSION;
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;
34#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
35#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
36#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
36#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
37 37
38/* Macros for ML7223 */ 38/* Macros for ML7223 */
39#define PCI_VENDOR_ID_ROHM 0x10db 39#define PCI_VENDOR_ID_ROHM 0x10db
40#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 40#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
41 41
42/* Macros for ML7831 */
43#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
44
42#define PCH_GBE_TX_WEIGHT 64 45#define PCH_GBE_TX_WEIGHT 64
43#define PCH_GBE_RX_WEIGHT 64 46#define PCH_GBE_RX_WEIGHT 64
44#define PCH_GBE_RX_BUFFER_WRITE 16 47#define PCH_GBE_RX_BUFFER_WRITE 16
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;
52 ) 55 )
53 56
54/* Ethertype field values */ 57/* Ethertype field values */
58#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
55#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 59#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
56#define PCH_GBE_FRAME_SIZE_2048 2048 60#define PCH_GBE_FRAME_SIZE_2048 2048
57#define PCH_GBE_FRAME_SIZE_4096 4096 61#define PCH_GBE_FRAME_SIZE_4096 4096
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;
83#define PCH_GBE_INT_ENABLE_MASK ( \ 87#define PCH_GBE_INT_ENABLE_MASK ( \
84 PCH_GBE_INT_RX_DMA_CMPLT | \ 88 PCH_GBE_INT_RX_DMA_CMPLT | \
85 PCH_GBE_INT_RX_DSC_EMP | \ 89 PCH_GBE_INT_RX_DSC_EMP | \
90 PCH_GBE_INT_RX_FIFO_ERR | \
86 PCH_GBE_INT_WOL_DET | \ 91 PCH_GBE_INT_WOL_DET | \
87 PCH_GBE_INT_TX_CMPLT \ 92 PCH_GBE_INT_TX_CMPLT \
88 ) 93 )
89 94
95#define PCH_GBE_INT_DISABLE_ALL 0
90 96
91static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 97static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
92 98
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
138 if (!tmp) 144 if (!tmp)
139 pr_err("Error: busy bit is not cleared\n"); 145 pr_err("Error: busy bit is not cleared\n");
140} 146}
147
148/**
149 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
150 * @reg: Pointer of register
151 * @busy: Busy bit
152 */
153static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
154{
155 u32 tmp;
156 int ret = -1;
157 /* wait busy */
158 tmp = 20;
159 while ((ioread32(reg) & bit) && --tmp)
160 udelay(5);
161 if (!tmp)
162 pr_err("Error: busy bit is not cleared\n");
163 else
164 ret = 0;
165 return ret;
166}
167
141/** 168/**
142 * pch_gbe_mac_mar_set - Set MAC address register 169 * pch_gbe_mac_mar_set - Set MAC address register
143 * @hw: Pointer to the HW structure 170 * @hw: Pointer to the HW structure
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
189 return; 216 return;
190} 217}
191 218
219static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
220{
221 /* Read the MAC address. and store to the private data */
222 pch_gbe_mac_read_mac_addr(hw);
223 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
224 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
225 /* Setup the MAC address */
226 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
227 return;
228}
229
192/** 230/**
193 * pch_gbe_mac_init_rx_addrs - Initialize receive address's 231 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
194 * @hw: Pointer to the HW structure 232 * @hw: Pointer to the HW structure
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
671 709
672 tcpip = ioread32(&hw->reg->TCPIP_ACC); 710 tcpip = ioread32(&hw->reg->TCPIP_ACC);
673 711
674 if (netdev->features & NETIF_F_RXCSUM) { 712 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
675 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; 713 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
676 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
677 } else {
678 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
679 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
680 }
681 iowrite32(tcpip, &hw->reg->TCPIP_ACC); 714 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
682 return; 715 return;
683} 716}
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
717 iowrite32(rdba, &hw->reg->RX_DSC_BASE); 750 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
718 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); 751 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
719 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); 752 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
720
721 /* Enables Receive DMA */
722 rxdma = ioread32(&hw->reg->DMA_CTRL);
723 rxdma |= PCH_GBE_RX_DMA_EN;
724 iowrite32(rxdma, &hw->reg->DMA_CTRL);
725 /* Enables Receive */
726 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
727} 753}
728 754
729/** 755/**
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1097 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1123 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1098} 1124}
1099 1125
1126static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1127{
1128 struct pch_gbe_hw *hw = &adapter->hw;
1129 u32 rxdma;
1130 u16 value;
1131 int ret;
1132
1133 /* Disable Receive DMA */
1134 rxdma = ioread32(&hw->reg->DMA_CTRL);
1135 rxdma &= ~PCH_GBE_RX_DMA_EN;
1136 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1137 /* Wait Rx DMA BUS is IDLE */
1138 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1139 if (ret) {
1140 /* Disable Bus master */
1141 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1142 value &= ~PCI_COMMAND_MASTER;
1143 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1144 /* Stop Receive */
1145 pch_gbe_mac_reset_rx(hw);
1146 /* Enable Bus master */
1147 value |= PCI_COMMAND_MASTER;
1148 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1149 } else {
1150 /* Stop Receive */
1151 pch_gbe_mac_reset_rx(hw);
1152 }
1153}
1154
1155static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1156{
1157 u32 rxdma;
1158
1159 /* Enables Receive DMA */
1160 rxdma = ioread32(&hw->reg->DMA_CTRL);
1161 rxdma |= PCH_GBE_RX_DMA_EN;
1162 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1163 /* Enables Receive */
1164 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1165 return;
1166}
1167
1100/** 1168/**
1101 * pch_gbe_intr - Interrupt Handler 1169 * pch_gbe_intr - Interrupt Handler
1102 * @irq: Interrupt number 1170 * @irq: Interrupt number
@@ -1123,7 +1191,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1123 if (int_st & PCH_GBE_INT_RX_FRAME_ERR) 1191 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1124 adapter->stats.intr_rx_frame_err_count++; 1192 adapter->stats.intr_rx_frame_err_count++;
1125 if (int_st & PCH_GBE_INT_RX_FIFO_ERR) 1193 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1126 adapter->stats.intr_rx_fifo_err_count++; 1194 if (!adapter->rx_stop_flag) {
1195 adapter->stats.intr_rx_fifo_err_count++;
1196 pr_debug("Rx fifo over run\n");
1197 adapter->rx_stop_flag = true;
1198 int_en = ioread32(&hw->reg->INT_EN);
1199 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1200 &hw->reg->INT_EN);
1201 pch_gbe_stop_receive(adapter);
1202 }
1127 if (int_st & PCH_GBE_INT_RX_DMA_ERR) 1203 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1128 adapter->stats.intr_rx_dma_err_count++; 1204 adapter->stats.intr_rx_dma_err_count++;
1129 if (int_st & PCH_GBE_INT_TX_FIFO_ERR) 1205 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
@@ -1135,7 +1211,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1135 /* When Rx descriptor is empty */ 1211 /* When Rx descriptor is empty */
1136 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { 1212 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1137 adapter->stats.intr_rx_dsc_empty_count++; 1213 adapter->stats.intr_rx_dsc_empty_count++;
1138 pr_err("Rx descriptor is empty\n"); 1214 pr_debug("Rx descriptor is empty\n");
1139 int_en = ioread32(&hw->reg->INT_EN); 1215 int_en = ioread32(&hw->reg->INT_EN);
1140 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); 1216 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1141 if (hw->mac.tx_fc_enable) { 1217 if (hw->mac.tx_fc_enable) {
@@ -1185,29 +1261,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1185 unsigned int i; 1261 unsigned int i;
1186 unsigned int bufsz; 1262 unsigned int bufsz;
1187 1263
1188 bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; 1264 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1189 i = rx_ring->next_to_use; 1265 i = rx_ring->next_to_use;
1190 1266
1191 while ((cleaned_count--)) { 1267 while ((cleaned_count--)) {
1192 buffer_info = &rx_ring->buffer_info[i]; 1268 buffer_info = &rx_ring->buffer_info[i];
1193 skb = buffer_info->skb; 1269 skb = netdev_alloc_skb(netdev, bufsz);
1194 if (skb) { 1270 if (unlikely(!skb)) {
1195 skb_trim(skb, 0); 1271 /* Better luck next round */
1196 } else { 1272 adapter->stats.rx_alloc_buff_failed++;
1197 skb = netdev_alloc_skb(netdev, bufsz); 1273 break;
1198 if (unlikely(!skb)) {
1199 /* Better luck next round */
1200 adapter->stats.rx_alloc_buff_failed++;
1201 break;
1202 }
1203 /* 64byte align */
1204 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1205
1206 buffer_info->skb = skb;
1207 buffer_info->length = adapter->rx_buffer_len;
1208 } 1274 }
1275 /* align */
1276 skb_reserve(skb, NET_IP_ALIGN);
1277 buffer_info->skb = skb;
1278
1209 buffer_info->dma = dma_map_single(&pdev->dev, 1279 buffer_info->dma = dma_map_single(&pdev->dev,
1210 skb->data, 1280 buffer_info->rx_buffer,
1211 buffer_info->length, 1281 buffer_info->length,
1212 DMA_FROM_DEVICE); 1282 DMA_FROM_DEVICE);
1213 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1283 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
@@ -1240,6 +1310,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1240 return; 1310 return;
1241} 1311}
1242 1312
1313static int
1314pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1315 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1316{
1317 struct pci_dev *pdev = adapter->pdev;
1318 struct pch_gbe_buffer *buffer_info;
1319 unsigned int i;
1320 unsigned int bufsz;
1321 unsigned int size;
1322
1323 bufsz = adapter->rx_buffer_len;
1324
1325 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1326 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1327 &rx_ring->rx_buff_pool_logic,
1328 GFP_KERNEL);
1329 if (!rx_ring->rx_buff_pool) {
1330 pr_err("Unable to allocate memory for the receive poll buffer\n");
1331 return -ENOMEM;
1332 }
1333 memset(rx_ring->rx_buff_pool, 0, size);
1334 rx_ring->rx_buff_pool_size = size;
1335 for (i = 0; i < rx_ring->count; i++) {
1336 buffer_info = &rx_ring->buffer_info[i];
1337 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1338 buffer_info->length = bufsz;
1339 }
1340 return 0;
1341}
1342
1243/** 1343/**
1244 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers 1344 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1245 * @adapter: Board private structure 1345 * @adapter: Board private structure
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1380 unsigned int i; 1480 unsigned int i;
1381 unsigned int cleaned_count = 0; 1481 unsigned int cleaned_count = 0;
1382 bool cleaned = false; 1482 bool cleaned = false;
1383 struct sk_buff *skb, *new_skb; 1483 struct sk_buff *skb;
1384 u8 dma_status; 1484 u8 dma_status;
1385 u16 gbec_status; 1485 u16 gbec_status;
1386 u32 tcp_ip_status; 1486 u32 tcp_ip_status;
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1401 rx_desc->gbec_status = DSC_INIT16; 1501 rx_desc->gbec_status = DSC_INIT16;
1402 buffer_info = &rx_ring->buffer_info[i]; 1502 buffer_info = &rx_ring->buffer_info[i];
1403 skb = buffer_info->skb; 1503 skb = buffer_info->skb;
1504 buffer_info->skb = NULL;
1404 1505
1405 /* unmap dma */ 1506 /* unmap dma */
1406 dma_unmap_single(&pdev->dev, buffer_info->dma, 1507 dma_unmap_single(&pdev->dev, buffer_info->dma,
1407 buffer_info->length, DMA_FROM_DEVICE); 1508 buffer_info->length, DMA_FROM_DEVICE);
1408 buffer_info->mapped = false; 1509 buffer_info->mapped = false;
1409 /* Prefetch the packet */
1410 prefetch(skb->data);
1411 1510
1412 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " 1511 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1413 "TCP:0x%08x] BufInf = 0x%p\n", 1512 "TCP:0x%08x] BufInf = 0x%p\n",
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1427 pr_err("Receive CRC Error\n"); 1526 pr_err("Receive CRC Error\n");
1428 } else { 1527 } else {
1429 /* get receive length */ 1528 /* get receive length */
1430 /* length convert[-3] */ 1529 /* length convert[-3], length includes FCS length */
1431 length = (rx_desc->rx_words_eob) - 3; 1530 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1432 1531 if (rx_desc->rx_words_eob & 0x02)
1433 /* Decide the data conversion method */ 1532 length = length - 4;
1434 if (!(netdev->features & NETIF_F_RXCSUM)) { 1533 /*
1435 /* [Header:14][payload] */ 1534 * buffer_info->rx_buffer: [Header:14][payload]
1436 if (NET_IP_ALIGN) { 1535 * skb->data: [Reserve:2][Header:14][payload]
1437 /* Because alignment differs, 1536 */
1438 * the new_skb is newly allocated, 1537 memcpy(skb->data, buffer_info->rx_buffer, length);
1439 * and data is copied to new_skb.*/ 1538
1440 new_skb = netdev_alloc_skb(netdev,
1441 length + NET_IP_ALIGN);
1442 if (!new_skb) {
1443 /* dorrop error */
1444 pr_err("New skb allocation "
1445 "Error\n");
1446 goto dorrop;
1447 }
1448 skb_reserve(new_skb, NET_IP_ALIGN);
1449 memcpy(new_skb->data, skb->data,
1450 length);
1451 skb = new_skb;
1452 } else {
1453 /* DMA buffer is used as SKB as it is.*/
1454 buffer_info->skb = NULL;
1455 }
1456 } else {
1457 /* [Header:14][padding:2][payload] */
1458 /* The length includes padding length */
1459 length = length - PCH_GBE_DMA_PADDING;
1460 if ((length < copybreak) ||
1461 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1462 /* Because alignment differs,
1463 * the new_skb is newly allocated,
1464 * and data is copied to new_skb.
1465 * Padding data is deleted
1466 * at the time of a copy.*/
1467 new_skb = netdev_alloc_skb(netdev,
1468 length + NET_IP_ALIGN);
1469 if (!new_skb) {
1470 /* dorrop error */
1471 pr_err("New skb allocation "
1472 "Error\n");
1473 goto dorrop;
1474 }
1475 skb_reserve(new_skb, NET_IP_ALIGN);
1476 memcpy(new_skb->data, skb->data,
1477 ETH_HLEN);
1478 memcpy(&new_skb->data[ETH_HLEN],
1479 &skb->data[ETH_HLEN +
1480 PCH_GBE_DMA_PADDING],
1481 length - ETH_HLEN);
1482 skb = new_skb;
1483 } else {
1484 /* Padding data is deleted
1485 * by moving header data.*/
1486 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1487 &skb->data[0], ETH_HLEN);
1488 skb_reserve(skb, NET_IP_ALIGN);
1489 buffer_info->skb = NULL;
1490 }
1491 }
1492 /* The length includes FCS length */
1493 length = length - ETH_FCS_LEN;
1494 /* update status of driver */ 1539 /* update status of driver */
1495 adapter->stats.rx_bytes += length; 1540 adapter->stats.rx_bytes += length;
1496 adapter->stats.rx_packets++; 1541 adapter->stats.rx_packets++;
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1509 pr_debug("Receive skb->ip_summed: %d length: %d\n", 1554 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1510 skb->ip_summed, length); 1555 skb->ip_summed, length);
1511 } 1556 }
1512dorrop:
1513 /* return some buffers to hardware, one at a time is too slow */ 1557 /* return some buffers to hardware, one at a time is too slow */
1514 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { 1558 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1515 pch_gbe_alloc_rx_buffers(adapter, rx_ring, 1559 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1714 pr_err("Error: can't bring device up\n"); 1758 pr_err("Error: can't bring device up\n");
1715 return err; 1759 return err;
1716 } 1760 }
1761 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1762 if (err) {
1763 pr_err("Error: can't bring device up\n");
1764 return err;
1765 }
1717 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1766 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1718 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1767 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1719 adapter->tx_queue_len = netdev->tx_queue_len; 1768 adapter->tx_queue_len = netdev->tx_queue_len;
1769 pch_gbe_start_receive(&adapter->hw);
1720 1770
1721 mod_timer(&adapter->watchdog_timer, jiffies); 1771 mod_timer(&adapter->watchdog_timer, jiffies);
1722 1772
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1734void pch_gbe_down(struct pch_gbe_adapter *adapter) 1784void pch_gbe_down(struct pch_gbe_adapter *adapter)
1735{ 1785{
1736 struct net_device *netdev = adapter->netdev; 1786 struct net_device *netdev = adapter->netdev;
1787 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1737 1788
1738 /* signal that we're down so the interrupt handler does not 1789 /* signal that we're down so the interrupt handler does not
1739 * reschedule our watchdog timer */ 1790 * reschedule our watchdog timer */
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
1752 pch_gbe_reset(adapter); 1803 pch_gbe_reset(adapter);
1753 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); 1804 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1754 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); 1805 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1806
1807 pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
1808 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1809 rx_ring->rx_buff_pool_logic = 0;
1810 rx_ring->rx_buff_pool_size = 0;
1811 rx_ring->rx_buff_pool = NULL;
1755} 1812}
1756 1813
1757/** 1814/**
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2004{ 2061{
2005 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2062 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2006 int max_frame; 2063 int max_frame;
2064 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2065 int err;
2007 2066
2008 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2067 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2009 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 2068 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2018 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) 2077 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2019 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; 2078 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2020 else 2079 else
2021 adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; 2080 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2022 netdev->mtu = new_mtu;
2023 adapter->hw.mac.max_frame_size = max_frame;
2024 2081
2025 if (netif_running(netdev)) 2082 if (netif_running(netdev)) {
2026 pch_gbe_reinit_locked(adapter); 2083 pch_gbe_down(adapter);
2027 else 2084 err = pch_gbe_up(adapter);
2085 if (err) {
2086 adapter->rx_buffer_len = old_rx_buffer_len;
2087 pch_gbe_up(adapter);
2088 return -ENOMEM;
2089 } else {
2090 netdev->mtu = new_mtu;
2091 adapter->hw.mac.max_frame_size = max_frame;
2092 }
2093 } else {
2028 pch_gbe_reset(adapter); 2094 pch_gbe_reset(adapter);
2095 netdev->mtu = new_mtu;
2096 adapter->hw.mac.max_frame_size = max_frame;
2097 }
2029 2098
2030 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", 2099 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2031 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, 2100 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
@@ -2103,6 +2172,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2103 int work_done = 0; 2172 int work_done = 0;
2104 bool poll_end_flag = false; 2173 bool poll_end_flag = false;
2105 bool cleaned = false; 2174 bool cleaned = false;
2175 u32 int_en;
2106 2176
2107 pr_debug("budget : %d\n", budget); 2177 pr_debug("budget : %d\n", budget);
2108 2178
@@ -2110,8 +2180,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2110 if (!netif_carrier_ok(netdev)) { 2180 if (!netif_carrier_ok(netdev)) {
2111 poll_end_flag = true; 2181 poll_end_flag = true;
2112 } else { 2182 } else {
2113 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2114 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2183 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2184 if (adapter->rx_stop_flag) {
2185 adapter->rx_stop_flag = false;
2186 pch_gbe_start_receive(&adapter->hw);
2187 int_en = ioread32(&adapter->hw.reg->INT_EN);
2188 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
2189 &adapter->hw.reg->INT_EN);
2190 }
2191 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2115 2192
2116 if (cleaned) 2193 if (cleaned)
2117 work_done = budget; 2194 work_done = budget;
@@ -2452,6 +2529,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2452 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2529 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2453 .class_mask = (0xFFFF00) 2530 .class_mask = (0xFFFF00)
2454 }, 2531 },
2532 {.vendor = PCI_VENDOR_ID_ROHM,
2533 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2534 .subvendor = PCI_ANY_ID,
2535 .subdevice = PCI_ANY_ID,
2536 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2537 .class_mask = (0xFFFF00)
2538 },
2455 /* required last entry */ 2539 /* required last entry */
2456 {0} 2540 {0}
2457}; 2541};
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 835bbb534c5..6eb9f4ea3bf 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -407,6 +407,7 @@ enum rtl_register_content {
407 RxOK = 0x0001, 407 RxOK = 0x0001,
408 408
409 /* RxStatusDesc */ 409 /* RxStatusDesc */
410 RxBOVF = (1 << 24),
410 RxFOVF = (1 << 23), 411 RxFOVF = (1 << 23),
411 RxRWT = (1 << 22), 412 RxRWT = (1 << 22),
412 RxRES = (1 << 21), 413 RxRES = (1 << 21),
@@ -682,6 +683,7 @@ struct rtl8169_private {
682 struct mii_if_info mii; 683 struct mii_if_info mii;
683 struct rtl8169_counters counters; 684 struct rtl8169_counters counters;
684 u32 saved_wolopts; 685 u32 saved_wolopts;
686 u32 opts1_mask;
685 687
686 struct rtl_fw { 688 struct rtl_fw {
687 const struct firmware *fw; 689 const struct firmware *fw;
@@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1);
710MODULE_FIRMWARE(FIRMWARE_8168D_2); 712MODULE_FIRMWARE(FIRMWARE_8168D_2);
711MODULE_FIRMWARE(FIRMWARE_8168E_1); 713MODULE_FIRMWARE(FIRMWARE_8168E_1);
712MODULE_FIRMWARE(FIRMWARE_8168E_2); 714MODULE_FIRMWARE(FIRMWARE_8168E_2);
715MODULE_FIRMWARE(FIRMWARE_8168E_3);
713MODULE_FIRMWARE(FIRMWARE_8105E_1); 716MODULE_FIRMWARE(FIRMWARE_8105E_1);
714 717
715static int rtl8169_open(struct net_device *dev); 718static int rtl8169_open(struct net_device *dev);
@@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev,
3077 netif_err(tp, link, dev, "PHY reset failed\n"); 3080 netif_err(tp, link, dev, "PHY reset failed\n");
3078} 3081}
3079 3082
3083static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3084{
3085 void __iomem *ioaddr = tp->mmio_addr;
3086
3087 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3088 (RTL_R8(PHYstatus) & TBI_Enable);
3089}
3090
3080static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) 3091static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3081{ 3092{
3082 void __iomem *ioaddr = tp->mmio_addr; 3093 void __iomem *ioaddr = tp->mmio_addr;
@@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3109 ADVERTISED_1000baseT_Half | 3120 ADVERTISED_1000baseT_Half |
3110 ADVERTISED_1000baseT_Full : 0)); 3121 ADVERTISED_1000baseT_Full : 0));
3111 3122
3112 if (RTL_R8(PHYstatus) & TBI_Enable) 3123 if (rtl_tbi_enabled(tp))
3113 netif_info(tp, link, dev, "TBI auto-negotiating\n"); 3124 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3114} 3125}
3115 3126
@@ -3319,9 +3330,16 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
3319 3330
3320static void r810x_pll_power_down(struct rtl8169_private *tp) 3331static void r810x_pll_power_down(struct rtl8169_private *tp)
3321{ 3332{
3333 void __iomem *ioaddr = tp->mmio_addr;
3334
3322 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 3335 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
3323 rtl_writephy(tp, 0x1f, 0x0000); 3336 rtl_writephy(tp, 0x1f, 0x0000);
3324 rtl_writephy(tp, MII_BMCR, 0x0000); 3337 rtl_writephy(tp, MII_BMCR, 0x0000);
3338
3339 if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
3340 tp->mac_version == RTL_GIGA_MAC_VER_30)
3341 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3342 AcceptMulticast | AcceptMyPhys);
3325 return; 3343 return;
3326 } 3344 }
3327 3345
@@ -3417,7 +3435,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
3417 rtl_writephy(tp, MII_BMCR, 0x0000); 3435 rtl_writephy(tp, MII_BMCR, 0x0000);
3418 3436
3419 if (tp->mac_version == RTL_GIGA_MAC_VER_32 || 3437 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3420 tp->mac_version == RTL_GIGA_MAC_VER_33) 3438 tp->mac_version == RTL_GIGA_MAC_VER_33 ||
3439 tp->mac_version == RTL_GIGA_MAC_VER_34)
3421 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | 3440 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3422 AcceptMulticast | AcceptMyPhys); 3441 AcceptMulticast | AcceptMyPhys);
3423 return; 3442 return;
@@ -3727,8 +3746,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3727 tp->features |= rtl_try_msi(pdev, ioaddr, cfg); 3746 tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
3728 RTL_W8(Cfg9346, Cfg9346_Lock); 3747 RTL_W8(Cfg9346, Cfg9346_Lock);
3729 3748
3730 if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) && 3749 if (rtl_tbi_enabled(tp)) {
3731 (RTL_R8(PHYstatus) & TBI_Enable)) {
3732 tp->set_speed = rtl8169_set_speed_tbi; 3750 tp->set_speed = rtl8169_set_speed_tbi;
3733 tp->get_settings = rtl8169_gset_tbi; 3751 tp->get_settings = rtl8169_gset_tbi;
3734 tp->phy_reset_enable = rtl8169_tbi_reset_enable; 3752 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
@@ -3777,6 +3795,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3777 tp->intr_event = cfg->intr_event; 3795 tp->intr_event = cfg->intr_event;
3778 tp->napi_event = cfg->napi_event; 3796 tp->napi_event = cfg->napi_event;
3779 3797
3798 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
3799 ~(RxBOVF | RxFOVF) : ~0;
3800
3780 init_timer(&tp->timer); 3801 init_timer(&tp->timer);
3781 tp->timer.data = (unsigned long) dev; 3802 tp->timer.data = (unsigned long) dev;
3782 tp->timer.function = rtl8169_phy_timer; 3803 tp->timer.function = rtl8169_phy_timer;
@@ -3988,6 +4009,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3988 while (RTL_R8(TxPoll) & NPQ) 4009 while (RTL_R8(TxPoll) & NPQ)
3989 udelay(20); 4010 udelay(20);
3990 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) { 4011 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
4012 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
3991 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) 4013 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
3992 udelay(100); 4014 udelay(100);
3993 } else { 4015 } else {
@@ -5314,7 +5336,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
5314 u32 status; 5336 u32 status;
5315 5337
5316 rmb(); 5338 rmb();
5317 status = le32_to_cpu(desc->opts1); 5339 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
5318 5340
5319 if (status & DescOwn) 5341 if (status & DescOwn)
5320 break; 5342 break;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index bf2404ae3b8..4479a45f732 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -31,6 +31,7 @@
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/cache.h> 32#include <linux/cache.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/interrupt.h>
34#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/ethtool.h> 37#include <linux/ethtool.h>
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 76dcadfaaa4..de9afebe183 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)
1050{ 1050{
1051 struct pci_dev *pci_dev = efx->pci_dev; 1051 struct pci_dev *pci_dev = efx->pci_dev;
1052 dma_addr_t dma_mask = efx->type->max_dma_mask; 1052 dma_addr_t dma_mask = efx->type->max_dma_mask;
1053 bool use_wc;
1054 int rc; 1053 int rc;
1055 1054
1056 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1055 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)
1101 rc = -EIO; 1100 rc = -EIO;
1102 goto fail3; 1101 goto fail3;
1103 } 1102 }
1104 1103 efx->membase = ioremap_nocache(efx->membase_phys,
1105 /* bug22643: If SR-IOV is enabled then tx push over a write combined 1104 efx->type->mem_map_size);
1106 * mapping is unsafe. We need to disable write combining in this case.
1107 * MSI is unsupported when SR-IOV is enabled, and the firmware will
1108 * have removed the MSI capability. So write combining is safe if
1109 * there is an MSI capability.
1110 */
1111 use_wc = (!EFX_WORKAROUND_22643(efx) ||
1112 pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
1113 if (use_wc)
1114 efx->membase = ioremap_wc(efx->membase_phys,
1115 efx->type->mem_map_size);
1116 else
1117 efx->membase = ioremap_nocache(efx->membase_phys,
1118 efx->type->mem_map_size);
1119 if (!efx->membase) { 1105 if (!efx->membase) {
1120 netif_err(efx, probe, efx->net_dev, 1106 netif_err(efx, probe, efx->net_dev,
1121 "could not map memory BAR at %llx+%x\n", 1107 "could not map memory BAR at %llx+%x\n",
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index cc978803d48..751d1ec112c 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
103 _efx_writed(efx, value->u32[2], reg + 8); 103 _efx_writed(efx, value->u32[2], reg + 8);
104 _efx_writed(efx, value->u32[3], reg + 12); 104 _efx_writed(efx, value->u32[3], reg + 12);
105#endif 105#endif
106 wmb();
107 mmiowb(); 106 mmiowb();
108 spin_unlock_irqrestore(&efx->biu_lock, flags); 107 spin_unlock_irqrestore(&efx->biu_lock, flags);
109} 108}
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
126 __raw_writel((__force u32)value->u32[0], membase + addr); 125 __raw_writel((__force u32)value->u32[0], membase + addr);
127 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 126 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
128#endif 127#endif
129 wmb();
130 mmiowb(); 128 mmiowb();
131 spin_unlock_irqrestore(&efx->biu_lock, flags); 129 spin_unlock_irqrestore(&efx->biu_lock, flags);
132} 130}
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
141 139
142 /* No lock required */ 140 /* No lock required */
143 _efx_writed(efx, value->u32[0], reg); 141 _efx_writed(efx, value->u32[0], reg);
144 wmb();
145} 142}
146 143
147/* Read a 128-bit CSR, locking as appropriate. */ 144/* Read a 128-bit CSR, locking as appropriate. */
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
152 149
153 spin_lock_irqsave(&efx->biu_lock, flags); 150 spin_lock_irqsave(&efx->biu_lock, flags);
154 value->u32[0] = _efx_readd(efx, reg + 0); 151 value->u32[0] = _efx_readd(efx, reg + 0);
155 rmb();
156 value->u32[1] = _efx_readd(efx, reg + 4); 152 value->u32[1] = _efx_readd(efx, reg + 4);
157 value->u32[2] = _efx_readd(efx, reg + 8); 153 value->u32[2] = _efx_readd(efx, reg + 8);
158 value->u32[3] = _efx_readd(efx, reg + 12); 154 value->u32[3] = _efx_readd(efx, reg + 12);
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
175 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 171 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
176#else 172#else
177 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 173 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
178 rmb();
179 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 174 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
180#endif 175#endif
181 spin_unlock_irqrestore(&efx->biu_lock, flags); 176 spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
249 _efx_writed(efx, value->u32[2], reg + 8); 244 _efx_writed(efx, value->u32[2], reg + 8);
250 _efx_writed(efx, value->u32[3], reg + 12); 245 _efx_writed(efx, value->u32[3], reg + 12);
251#endif 246#endif
252 wmb();
253} 247}
254#define efx_writeo_page(efx, value, reg, page) \ 248#define efx_writeo_page(efx, value, reg, page) \
255 _efx_writeo_page(efx, value, \ 249 _efx_writeo_page(efx, value, \
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 3dd45ed61f0..81a42539746 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
50 return &nic_data->mcdi; 50 return &nic_data->mcdi;
51} 51}
52 52
53static inline void
54efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
55{
56 struct siena_nic_data *nic_data = efx->nic_data;
57 value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
58}
59
60static inline void
61efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
62{
63 struct siena_nic_data *nic_data = efx->nic_data;
64 __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
65}
66
67void efx_mcdi_init(struct efx_nic *efx) 53void efx_mcdi_init(struct efx_nic *efx)
68{ 54{
69 struct efx_mcdi_iface *mcdi; 55 struct efx_mcdi_iface *mcdi;
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
84 const u8 *inbuf, size_t inlen) 70 const u8 *inbuf, size_t inlen)
85{ 71{
86 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
87 unsigned pdu = MCDI_PDU(efx); 73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
88 unsigned doorbell = MCDI_DOORBELL(efx); 74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
89 unsigned int i; 75 unsigned int i;
90 efx_dword_t hdr; 76 efx_dword_t hdr;
91 u32 xflags, seqno; 77 u32 xflags, seqno;
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
106 MCDI_HEADER_SEQ, seqno, 92 MCDI_HEADER_SEQ, seqno,
107 MCDI_HEADER_XFLAGS, xflags); 93 MCDI_HEADER_XFLAGS, xflags);
108 94
109 efx_mcdi_writed(efx, &hdr, pdu); 95 efx_writed(efx, &hdr, pdu);
110 96
111 for (i = 0; i < inlen; i += 4) 97 for (i = 0; i < inlen; i += 4)
112 efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), 98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
113 pdu + 4 + i); 99
100 /* Ensure the payload is written out before the header */
101 wmb();
114 102
115 /* ring the doorbell with a distinctive value */ 103 /* ring the doorbell with a distinctive value */
116 EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); 104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
117 efx_mcdi_writed(efx, &hdr, doorbell);
118} 105}
119 106
120static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
121{ 108{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned int pdu = MCDI_PDU(efx); 110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
124 int i; 111 int i;
125 112
126 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
127 BUG_ON(outlen & 3 || outlen >= 0x100); 114 BUG_ON(outlen & 3 || outlen >= 0x100);
128 115
129 for (i = 0; i < outlen; i += 4) 116 for (i = 0; i < outlen; i += 4)
130 efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); 117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
131} 118}
132 119
133static int efx_mcdi_poll(struct efx_nic *efx) 120static int efx_mcdi_poll(struct efx_nic *efx)
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
135 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
136 unsigned int time, finish; 123 unsigned int time, finish;
137 unsigned int respseq, respcmd, error; 124 unsigned int respseq, respcmd, error;
138 unsigned int pdu = MCDI_PDU(efx); 125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
139 unsigned int rc, spins; 126 unsigned int rc, spins;
140 efx_dword_t reg; 127 efx_dword_t reg;
141 128
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
161 148
162 time = get_seconds(); 149 time = get_seconds();
163 150
164 efx_mcdi_readd(efx, &reg, pdu); 151 rmb();
152 efx_readd(efx, &reg, pdu);
165 153
166 /* All 1's indicates that shared memory is in reset (and is 154 /* All 1's indicates that shared memory is in reset (and is
167 * not a valid header). Wait for it to come out reset before 155 * not a valid header). Wait for it to come out reset before
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
188 respseq, mcdi->seqno); 176 respseq, mcdi->seqno);
189 rc = EIO; 177 rc = EIO;
190 } else if (error) { 178 } else if (error) {
191 efx_mcdi_readd(efx, &reg, pdu + 4); 179 efx_readd(efx, &reg, pdu + 4);
192 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 180 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
193#define TRANSLATE_ERROR(name) \ 181#define TRANSLATE_ERROR(name) \
194 case MC_CMD_ERR_ ## name: \ 182 case MC_CMD_ERR_ ## name: \
@@ -222,21 +210,21 @@ out:
222/* Test and clear MC-rebooted flag for this port/function */ 210/* Test and clear MC-rebooted flag for this port/function */
223int efx_mcdi_poll_reboot(struct efx_nic *efx) 211int efx_mcdi_poll_reboot(struct efx_nic *efx)
224{ 212{
225 unsigned int addr = MCDI_REBOOT_FLAG(efx); 213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
226 efx_dword_t reg; 214 efx_dword_t reg;
227 uint32_t value; 215 uint32_t value;
228 216
229 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 217 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
230 return false; 218 return false;
231 219
232 efx_mcdi_readd(efx, &reg, addr); 220 efx_readd(efx, &reg, addr);
233 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 221 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
234 222
235 if (value == 0) 223 if (value == 0)
236 return 0; 224 return 0;
237 225
238 EFX_ZERO_DWORD(reg); 226 EFX_ZERO_DWORD(reg);
239 efx_mcdi_writed(efx, &reg, addr); 227 efx_writed(efx, &reg, addr);
240 228
241 if (value == MC_STATUS_DWORD_ASSERT) 229 if (value == MC_STATUS_DWORD_ASSERT)
242 return -EINTR; 230 return -EINTR;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index bafa23a6874..3edfbaf5f02 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1936 1936
1937 size = min_t(size_t, table->step, 16); 1937 size = min_t(size_t, table->step, 16);
1938 1938
1939 if (table->offset >= efx->type->mem_map_size) {
1940 /* No longer mapped; return dummy data */
1941 memcpy(buf, "\xde\xc0\xad\xde", 4);
1942 buf += table->rows * size;
1943 continue;
1944 }
1945
1946 for (i = 0; i < table->rows; i++) { 1939 for (i = 0; i < table->rows; i++) {
1947 switch (table->step) { 1940 switch (table->step) {
1948 case 4: /* 32-bit register or SRAM */ 1941 case 4: /* 32-bit register or SRAM */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index b5b288628c6..5fb24d3aa3c 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
143/** 143/**
144 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
145 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
146 * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
147 * @wol_filter_id: Wake-on-LAN packet filter id 146 * @wol_filter_id: Wake-on-LAN packet filter id
148 */ 147 */
149struct siena_nic_data { 148struct siena_nic_data {
150 struct efx_mcdi_iface mcdi; 149 struct efx_mcdi_iface mcdi;
151 void __iomem *mcdi_smem;
152 int wol_filter_id; 150 int wol_filter_id;
153}; 151};
154 152
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 4fdd148747b..cc2549cb707 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -252,26 +252,12 @@ static int siena_probe_nic(struct efx_nic *efx)
252 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 252 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
253 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 253 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
254 254
255 /* Initialise MCDI */
256 nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
257 FR_CZ_MC_TREG_SMEM,
258 FR_CZ_MC_TREG_SMEM_STEP *
259 FR_CZ_MC_TREG_SMEM_ROWS);
260 if (!nic_data->mcdi_smem) {
261 netif_err(efx, probe, efx->net_dev,
262 "could not map MCDI at %llx+%x\n",
263 (unsigned long long)efx->membase_phys +
264 FR_CZ_MC_TREG_SMEM,
265 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
266 rc = -ENOMEM;
267 goto fail1;
268 }
269 efx_mcdi_init(efx); 255 efx_mcdi_init(efx);
270 256
271 /* Recover from a failed assertion before probing */ 257 /* Recover from a failed assertion before probing */
272 rc = efx_mcdi_handle_assertion(efx); 258 rc = efx_mcdi_handle_assertion(efx);
273 if (rc) 259 if (rc)
274 goto fail2; 260 goto fail1;
275 261
276 /* Let the BMC know that the driver is now in charge of link and 262 /* Let the BMC know that the driver is now in charge of link and
277 * filter settings. We must do this before we reset the NIC */ 263 * filter settings. We must do this before we reset the NIC */
@@ -326,7 +312,6 @@ fail4:
326fail3: 312fail3:
327 efx_mcdi_drv_attach(efx, false, NULL); 313 efx_mcdi_drv_attach(efx, false, NULL);
328fail2: 314fail2:
329 iounmap(nic_data->mcdi_smem);
330fail1: 315fail1:
331 kfree(efx->nic_data); 316 kfree(efx->nic_data);
332 return rc; 317 return rc;
@@ -406,8 +391,6 @@ static int siena_init_nic(struct efx_nic *efx)
406 391
407static void siena_remove_nic(struct efx_nic *efx) 392static void siena_remove_nic(struct efx_nic *efx)
408{ 393{
409 struct siena_nic_data *nic_data = efx->nic_data;
410
411 efx_nic_free_buffer(efx, &efx->irq_status); 394 efx_nic_free_buffer(efx, &efx->irq_status);
412 395
413 siena_reset_hw(efx, RESET_TYPE_ALL); 396 siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -417,8 +400,7 @@ static void siena_remove_nic(struct efx_nic *efx)
417 efx_mcdi_drv_attach(efx, false, NULL); 400 efx_mcdi_drv_attach(efx, false, NULL);
418 401
419 /* Tear down the private nic state */ 402 /* Tear down the private nic state */
420 iounmap(nic_data->mcdi_smem); 403 kfree(efx->nic_data);
421 kfree(nic_data);
422 efx->nic_data = NULL; 404 efx->nic_data = NULL;
423} 405}
424 406
@@ -658,7 +640,8 @@ const struct efx_nic_type siena_a0_nic_type = {
658 .default_mac_ops = &efx_mcdi_mac_operations, 640 .default_mac_ops = &efx_mcdi_mac_operations,
659 641
660 .revision = EFX_REV_SIENA_A0, 642 .revision = EFX_REV_SIENA_A0,
661 .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ 643 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
644 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
662 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 645 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
663 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 646 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
664 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 647 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index 99ff11400ce..e4dd3a7f304 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -38,8 +38,6 @@
38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
39/* Legacy interrupt storm when interrupt fifo fills */ 39/* Legacy interrupt storm when interrupt fifo fills */
40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
41/* Write combining and sriov=enabled are incompatible */
42#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
43 41
44/* Spurious parity errors in TSORT buffers */ 42/* Spurious parity errors in TSORT buffers */
45#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index f07a72150c6..12068219059 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2452,14 +2452,13 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
2452 struct net_device *dev = dev_id; 2452 struct net_device *dev = dev_id;
2453 struct cas *cp = netdev_priv(dev); 2453 struct cas *cp = netdev_priv(dev);
2454 unsigned long flags; 2454 unsigned long flags;
2455 int ring; 2455 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2456 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); 2456 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2457 2457
2458 /* check for shared irq */ 2458 /* check for shared irq */
2459 if (status == 0) 2459 if (status == 0)
2460 return IRQ_NONE; 2460 return IRQ_NONE;
2461 2461
2462 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2463 spin_lock_irqsave(&cp->lock, flags); 2462 spin_lock_irqsave(&cp->lock, flags);
2464 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2463 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2465#ifdef USE_NAPI 2464#ifdef USE_NAPI
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index dfc82720065..ed2a3977c6e 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -799,5 +799,11 @@ static void __exit cleanup_netconsole(void)
799 } 799 }
800} 800}
801 801
802module_init(init_netconsole); 802/*
803 * Use late_initcall to ensure netconsole is
804 * initialized after network device driver if built-in.
805 *
806 * late_initcall() and module_init() are identical if built as module.
807 */
808late_initcall(init_netconsole);
803module_exit(cleanup_netconsole); 809module_exit(cleanup_netconsole);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 0620ba96350..04bb8fcc0cb 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -25,8 +25,9 @@
25/* DP83865 phy identifier values */ 25/* DP83865 phy identifier values */
26#define DP83865_PHY_ID 0x20005c7a 26#define DP83865_PHY_ID 0x20005c7a
27 27
28#define DP83865_INT_MASK_REG 0x15 28#define DP83865_INT_STATUS 0x14
29#define DP83865_INT_MASK_STATUS 0x14 29#define DP83865_INT_MASK 0x15
30#define DP83865_INT_CLEAR 0x17
30 31
31#define DP83865_INT_REMOTE_FAULT 0x0008 32#define DP83865_INT_REMOTE_FAULT 0x0008
32#define DP83865_INT_ANE_COMPLETED 0x0010 33#define DP83865_INT_ANE_COMPLETED 0x0010
@@ -68,21 +69,25 @@ static int ns_config_intr(struct phy_device *phydev)
68 int err; 69 int err;
69 70
70 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 71 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
71 err = phy_write(phydev, DP83865_INT_MASK_REG, 72 err = phy_write(phydev, DP83865_INT_MASK,
72 DP83865_INT_MASK_DEFAULT); 73 DP83865_INT_MASK_DEFAULT);
73 else 74 else
74 err = phy_write(phydev, DP83865_INT_MASK_REG, 0); 75 err = phy_write(phydev, DP83865_INT_MASK, 0);
75 76
76 return err; 77 return err;
77} 78}
78 79
79static int ns_ack_interrupt(struct phy_device *phydev) 80static int ns_ack_interrupt(struct phy_device *phydev)
80{ 81{
81 int ret = phy_read(phydev, DP83865_INT_MASK_STATUS); 82 int ret = phy_read(phydev, DP83865_INT_STATUS);
82 if (ret < 0) 83 if (ret < 0)
83 return ret; 84 return ret;
84 85
85 return 0; 86 /* Clear the interrupt status bit by writing a “1”
87 * to the corresponding bit in INT_CLEAR (2:0 are reserved) */
88 ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7);
89
90 return ret;
86} 91}
87 92
88static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) 93static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 10e5d985afa..edfa15d2e79 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1465 continue; 1465 continue;
1466 } 1466 }
1467 1467
1468 mtu = pch->chan->mtu - hdrlen; 1468 /*
1469 * hdrlen includes the 2-byte PPP protocol field, but the
1470 * MTU counts only the payload excluding the protocol field.
1471 * (RFC1661 Section 2)
1472 */
1473 mtu = pch->chan->mtu - (hdrlen - 2);
1469 if (mtu < 4) 1474 if (mtu < 4)
1470 mtu = 4; 1475 mtu = 4;
1471 if (flen > mtu) 1476 if (flen > mtu)
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 86ac38c96bc..3bb13113703 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -80,13 +80,13 @@ static int rionet_capable = 1;
80 */ 80 */
81static struct rio_dev **rionet_active; 81static struct rio_dev **rionet_active;
82 82
83#define is_rionet_capable(pef, src_ops, dst_ops) \ 83#define is_rionet_capable(src_ops, dst_ops) \
84 ((pef & RIO_PEF_INB_MBOX) && \ 84 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
85 (pef & RIO_PEF_INB_DOORBELL) && \ 85 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
86 (src_ops & RIO_SRC_OPS_DOORBELL) && \ 86 (src_ops & RIO_SRC_OPS_DOORBELL) && \
87 (dst_ops & RIO_DST_OPS_DOORBELL)) 87 (dst_ops & RIO_DST_OPS_DOORBELL))
88#define dev_rionet_capable(dev) \ 88#define dev_rionet_capable(dev) \
89 is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) 89 is_rionet_capable(dev->src_ops, dev->dst_ops)
90 90
91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) 91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) 92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
@@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev)
282{ 282{
283 int i, rc = 0; 283 int i, rc = 0;
284 struct rionet_peer *peer, *tmp; 284 struct rionet_peer *peer, *tmp;
285 u32 pwdcsr;
286 struct rionet_private *rnet = netdev_priv(ndev); 285 struct rionet_private *rnet = netdev_priv(ndev);
287 286
288 if (netif_msg_ifup(rnet)) 287 if (netif_msg_ifup(rnet))
@@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev)
332 continue; 331 continue;
333 } 332 }
334 333
335 /* 334 /* Send a join message */
336 * If device has initialized inbound doorbells, 335 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
337 * send a join message
338 */
339 rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
340 if (pwdcsr & RIO_DOORBELL_AVAIL)
341 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
342 } 336 }
343 337
344 out: 338 out:
@@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
492static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) 486static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
493{ 487{
494 int rc = -ENODEV; 488 int rc = -ENODEV;
495 u32 lpef, lsrc_ops, ldst_ops; 489 u32 lsrc_ops, ldst_ops;
496 struct rionet_peer *peer; 490 struct rionet_peer *peer;
497 struct net_device *ndev = NULL; 491 struct net_device *ndev = NULL;
498 492
@@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
515 * on later probes 509 * on later probes
516 */ 510 */
517 if (!rionet_check) { 511 if (!rionet_check) {
518 rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
519 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, 512 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
520 &lsrc_ops); 513 &lsrc_ops);
521 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, 514 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
522 &ldst_ops); 515 &ldst_ops);
523 if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { 516 if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
524 printk(KERN_ERR 517 printk(KERN_ERR
525 "%s: local device is not network capable\n", 518 "%s: local device is not network capable\n",
526 DRV_NAME); 519 DRV_NAME);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 15772b1b6a9..13c1f044b40 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
59#define USB_PRODUCT_IPHONE_3G 0x1292 59#define USB_PRODUCT_IPHONE_3G 0x1292
60#define USB_PRODUCT_IPHONE_3GS 0x1294 60#define USB_PRODUCT_IPHONE_3GS 0x1294
61#define USB_PRODUCT_IPHONE_4 0x1297 61#define USB_PRODUCT_IPHONE_4 0x1297
62#define USB_PRODUCT_IPHONE_4_VZW 0x129c
62 63
63#define IPHETH_USBINTF_CLASS 255 64#define IPHETH_USBINTF_CLASS 255
64#define IPHETH_USBINTF_SUBCLASS 253 65#define IPHETH_USBINTF_SUBCLASS 253
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {
98 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, 99 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
99 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 100 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
100 IPHETH_USBINTF_PROTO) }, 101 IPHETH_USBINTF_PROTO) },
102 { USB_DEVICE_AND_INTERFACE_INFO(
103 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
104 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
105 IPHETH_USBINTF_PROTO) },
101 { } 106 { }
102}; 107};
103MODULE_DEVICE_TABLE(usb, ipheth_table); 108MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 2d4c0910295..2d394af8217 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
41 case ADC_DC_CAL: 41 case ADC_DC_CAL:
42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ 42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
43 if (!IS_CHAN_B(chan) && 43 if (!IS_CHAN_B(chan) &&
44 !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) 44 !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
45 IS_CHAN_HT20(chan)))
45 supported = true; 46 supported = true;
46 break; 47 break;
47 } 48 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index a73e50d80cb..51398f0063e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -50,7 +50,7 @@ static int ar9003_hw_power_interpolate(int32_t x,
50static const struct ar9300_eeprom ar9300_default = { 50static const struct ar9300_eeprom ar9300_default = {
51 .eepromVersion = 2, 51 .eepromVersion = 2,
52 .templateVersion = 2, 52 .templateVersion = 2,
53 .macAddr = {1, 2, 3, 4, 5, 6}, 53 .macAddr = {0, 2, 3, 4, 5, 6},
54 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 54 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
56 .baseEepHeader = { 56 .baseEepHeader = {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 95147948794..4956d09cb58 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -678,7 +678,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
678 REG_WRITE_ARRAY(&ah->iniModesAdditional, 678 REG_WRITE_ARRAY(&ah->iniModesAdditional,
679 modesIndex, regWrites); 679 modesIndex, regWrites);
680 680
681 if (AR_SREV_9300(ah)) 681 if (AR_SREV_9330(ah))
682 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); 682 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
683 683
684 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) 684 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 7910165cf0e..a16f53994a7 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2272,7 +2272,11 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2272 2272
2273 mutex_lock(&sc->mutex); 2273 mutex_lock(&sc->mutex);
2274 ah->coverage_class = coverage_class; 2274 ah->coverage_class = coverage_class;
2275
2276 ath9k_ps_wakeup(sc);
2275 ath9k_hw_init_global_settings(ah); 2277 ath9k_hw_init_global_settings(ah);
2278 ath9k_ps_restore(sc);
2279
2276 mutex_unlock(&sc->mutex); 2280 mutex_unlock(&sc->mutex);
2277} 2281}
2278 2282
@@ -2288,6 +2292,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2288 mutex_lock(&sc->mutex); 2292 mutex_lock(&sc->mutex);
2289 cancel_delayed_work_sync(&sc->tx_complete_work); 2293 cancel_delayed_work_sync(&sc->tx_complete_work);
2290 2294
2295 if (ah->ah_flags & AH_UNPLUGGED) {
2296 ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
2297 mutex_unlock(&sc->mutex);
2298 return;
2299 }
2300
2291 if (sc->sc_flags & SC_OP_INVALID) { 2301 if (sc->sc_flags & SC_OP_INVALID) {
2292 ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); 2302 ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
2293 mutex_unlock(&sc->mutex); 2303 mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 782b8f3ae58..af351ecd87c 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1115,8 +1115,10 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1115 * the high througput speed in 802.11n networks. 1115 * the high througput speed in 802.11n networks.
1116 */ 1116 */
1117 1117
1118 if (!is_main_vif(ar, vif)) 1118 if (!is_main_vif(ar, vif)) {
1119 mutex_lock(&ar->mutex);
1119 goto err_softw; 1120 goto err_softw;
1121 }
1120 1122
1121 /* 1123 /*
1122 * While the hardware supports *catch-all* key, for offloading 1124 * While the hardware supports *catch-all* key, for offloading
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 24077023d48..56fa3a3648c 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1637,7 +1637,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
1637 u32 cmd, beacon0_valid, beacon1_valid; 1637 u32 cmd, beacon0_valid, beacon1_valid;
1638 1638
1639 if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && 1639 if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
1640 !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) 1640 !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) &&
1641 !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
1641 return; 1642 return;
1642 1643
1643 /* This is the bottom half of the asynchronous beacon update. */ 1644 /* This is the bottom half of the asynchronous beacon update. */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 3774dd03474..ef9ad79d1bf 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,15 +1903,17 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1903static int ipw2100_net_init(struct net_device *dev) 1903static int ipw2100_net_init(struct net_device *dev)
1904{ 1904{
1905 struct ipw2100_priv *priv = libipw_priv(dev); 1905 struct ipw2100_priv *priv = libipw_priv(dev);
1906
1907 return ipw2100_up(priv, 1);
1908}
1909
1910static int ipw2100_wdev_init(struct net_device *dev)
1911{
1912 struct ipw2100_priv *priv = libipw_priv(dev);
1906 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 1913 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1907 struct wireless_dev *wdev = &priv->ieee->wdev; 1914 struct wireless_dev *wdev = &priv->ieee->wdev;
1908 int ret;
1909 int i; 1915 int i;
1910 1916
1911 ret = ipw2100_up(priv, 1);
1912 if (ret)
1913 return ret;
1914
1915 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 1917 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
1916 1918
1917 /* fill-out priv->ieee->bg_band */ 1919 /* fill-out priv->ieee->bg_band */
@@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6350 "Error calling register_netdev.\n"); 6352 "Error calling register_netdev.\n");
6351 goto fail; 6353 goto fail;
6352 } 6354 }
6355 registered = 1;
6356
6357 err = ipw2100_wdev_init(dev);
6358 if (err)
6359 goto fail;
6353 6360
6354 mutex_lock(&priv->action_mutex); 6361 mutex_lock(&priv->action_mutex);
6355 registered = 1;
6356 6362
6357 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); 6363 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
6358 6364
@@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6389 6395
6390 fail_unlock: 6396 fail_unlock:
6391 mutex_unlock(&priv->action_mutex); 6397 mutex_unlock(&priv->action_mutex);
6392 6398 wiphy_unregister(priv->ieee->wdev.wiphy);
6399 kfree(priv->ieee->bg_band.channels);
6393 fail: 6400 fail:
6394 if (dev) { 6401 if (dev) {
6395 if (registered) 6402 if (registered)
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index f303df43ed3..99a710dfe77 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11426,16 +11426,23 @@ static void ipw_bg_down(struct work_struct *work)
11426/* Called by register_netdev() */ 11426/* Called by register_netdev() */
11427static int ipw_net_init(struct net_device *dev) 11427static int ipw_net_init(struct net_device *dev)
11428{ 11428{
11429 int rc = 0;
11430 struct ipw_priv *priv = libipw_priv(dev);
11431
11432 mutex_lock(&priv->mutex);
11433 if (ipw_up(priv))
11434 rc = -EIO;
11435 mutex_unlock(&priv->mutex);
11436
11437 return rc;
11438}
11439
11440static int ipw_wdev_init(struct net_device *dev)
11441{
11429 int i, rc = 0; 11442 int i, rc = 0;
11430 struct ipw_priv *priv = libipw_priv(dev); 11443 struct ipw_priv *priv = libipw_priv(dev);
11431 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 11444 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11432 struct wireless_dev *wdev = &priv->ieee->wdev; 11445 struct wireless_dev *wdev = &priv->ieee->wdev;
11433 mutex_lock(&priv->mutex);
11434
11435 if (ipw_up(priv)) {
11436 rc = -EIO;
11437 goto out;
11438 }
11439 11446
11440 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 11447 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11441 11448
@@ -11520,13 +11527,9 @@ static int ipw_net_init(struct net_device *dev)
11520 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 11527 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11521 11528
11522 /* With that information in place, we can now register the wiphy... */ 11529 /* With that information in place, we can now register the wiphy... */
11523 if (wiphy_register(wdev->wiphy)) { 11530 if (wiphy_register(wdev->wiphy))
11524 rc = -EIO; 11531 rc = -EIO;
11525 goto out;
11526 }
11527
11528out: 11532out:
11529 mutex_unlock(&priv->mutex);
11530 return rc; 11533 return rc;
11531} 11534}
11532 11535
@@ -11833,14 +11836,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11833 goto out_remove_sysfs; 11836 goto out_remove_sysfs;
11834 } 11837 }
11835 11838
11839 err = ipw_wdev_init(net_dev);
11840 if (err) {
11841 IPW_ERROR("failed to register wireless device\n");
11842 goto out_unregister_netdev;
11843 }
11844
11836#ifdef CONFIG_IPW2200_PROMISCUOUS 11845#ifdef CONFIG_IPW2200_PROMISCUOUS
11837 if (rtap_iface) { 11846 if (rtap_iface) {
11838 err = ipw_prom_alloc(priv); 11847 err = ipw_prom_alloc(priv);
11839 if (err) { 11848 if (err) {
11840 IPW_ERROR("Failed to register promiscuous network " 11849 IPW_ERROR("Failed to register promiscuous network "
11841 "device (error %d).\n", err); 11850 "device (error %d).\n", err);
11842 unregister_netdev(priv->net_dev); 11851 wiphy_unregister(priv->ieee->wdev.wiphy);
11843 goto out_remove_sysfs; 11852 kfree(priv->ieee->a_band.channels);
11853 kfree(priv->ieee->bg_band.channels);
11854 goto out_unregister_netdev;
11844 } 11855 }
11845 } 11856 }
11846#endif 11857#endif
@@ -11852,6 +11863,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11852 11863
11853 return 0; 11864 return 0;
11854 11865
11866 out_unregister_netdev:
11867 unregister_netdev(priv->net_dev);
11855 out_remove_sysfs: 11868 out_remove_sysfs:
11856 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11869 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11857 out_release_irq: 11870 out_release_irq:
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 0cc5177d738..8faeaf2ddde 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -821,12 +821,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
821 821
822 out: 822 out:
823 823
824 rs_sta->last_txrate_idx = index; 824 if (sband->band == IEEE80211_BAND_5GHZ) {
825 if (sband->band == IEEE80211_BAND_5GHZ) 825 if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
826 info->control.rates[0].idx = rs_sta->last_txrate_idx - 826 index = IWL_FIRST_OFDM_RATE;
827 IWL_FIRST_OFDM_RATE; 827 rs_sta->last_txrate_idx = index;
828 else 828 info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
829 } else {
830 rs_sta->last_txrate_idx = index;
829 info->control.rates[0].idx = rs_sta->last_txrate_idx; 831 info->control.rates[0].idx = rs_sta->last_txrate_idx;
832 }
830 833
831 IWL_DEBUG_RATE(priv, "leave: %d\n", index); 834 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
832} 835}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index ea31d7674df..a7b89145386 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -168,7 +168,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
168 168
169 memset(&cmd, 0, sizeof(cmd)); 169 memset(&cmd, 0, sizeof(cmd));
170 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 170 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
171 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); 171 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
172 if (!(cmd.radio_sensor_offset)) 172 if (!(cmd.radio_sensor_offset))
173 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; 173 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
174 174
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 7f6c58ebbc4..6057e18f688 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1780,7 +1780,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
1780 IEEE80211_HW_SPECTRUM_MGMT | 1780 IEEE80211_HW_SPECTRUM_MGMT |
1781 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 1781 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
1782 1782
1783 /*
1784 * Including the following line will crash some AP's. This
1785 * workaround removes the stimulus which causes the crash until
1786 * the AP software can be fixed.
1783 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 1787 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1788 */
1784 1789
1785 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 1790 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
1786 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 1791 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index ca686dbf589..f6d823f012d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -925,6 +925,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb)
925 cmd = txq->cmd[cmd_index]; 925 cmd = txq->cmd[cmd_index];
926 meta = &txq->meta[cmd_index]; 926 meta = &txq->meta[cmd_index];
927 927
928 txq->time_stamp = jiffies;
929
928 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], 930 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
929 DMA_BIDIRECTIONAL); 931 DMA_BIDIRECTIONAL);
930 932
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index a5ddb39ca4a..31c98509f7e 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3769,14 +3769,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
3769 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg); 3769 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
3770 3770
3771 /* Apparently the data is read from end to start */ 3771 /* Apparently the data is read from end to start */
3772 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, 3772 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
3773 (u32 *)&rt2x00dev->eeprom[i]); 3773 /* The returned value is in CPU order, but eeprom is le */
3774 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, 3774 rt2x00dev->eeprom[i] = cpu_to_le32(reg);
3775 (u32 *)&rt2x00dev->eeprom[i + 2]); 3775 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
3776 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, 3776 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
3777 (u32 *)&rt2x00dev->eeprom[i + 4]); 3777 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
3778 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, 3778 *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
3779 (u32 *)&rt2x00dev->eeprom[i + 6]); 3779 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
3780 *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
3780 3781
3781 mutex_unlock(&rt2x00dev->csr_mutex); 3782 mutex_unlock(&rt2x00dev->csr_mutex);
3782} 3783}
@@ -3942,19 +3943,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3942 return -ENODEV; 3943 return -ENODEV;
3943 } 3944 }
3944 3945
3945 if (!rt2x00_rf(rt2x00dev, RF2820) && 3946 switch (rt2x00dev->chip.rf) {
3946 !rt2x00_rf(rt2x00dev, RF2850) && 3947 case RF2820:
3947 !rt2x00_rf(rt2x00dev, RF2720) && 3948 case RF2850:
3948 !rt2x00_rf(rt2x00dev, RF2750) && 3949 case RF2720:
3949 !rt2x00_rf(rt2x00dev, RF3020) && 3950 case RF2750:
3950 !rt2x00_rf(rt2x00dev, RF2020) && 3951 case RF3020:
3951 !rt2x00_rf(rt2x00dev, RF3021) && 3952 case RF2020:
3952 !rt2x00_rf(rt2x00dev, RF3022) && 3953 case RF3021:
3953 !rt2x00_rf(rt2x00dev, RF3052) && 3954 case RF3022:
3954 !rt2x00_rf(rt2x00dev, RF3320) && 3955 case RF3052:
3955 !rt2x00_rf(rt2x00dev, RF5370) && 3956 case RF3320:
3956 !rt2x00_rf(rt2x00dev, RF5390)) { 3957 case RF5370:
3957 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 3958 case RF5390:
3959 break;
3960 default:
3961 ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n",
3962 rt2x00dev->chip.rf);
3958 return -ENODEV; 3963 return -ENODEV;
3959 } 3964 }
3960 3965
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 677b5ababbd..f1565792f27 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -464,6 +464,15 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
464 int wcid, ack, pid; 464 int wcid, ack, pid;
465 int tx_wcid, tx_ack, tx_pid; 465 int tx_wcid, tx_ack, tx_pid;
466 466
467 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
468 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) {
469 WARNING(entry->queue->rt2x00dev,
470 "Data pending for entry %u in queue %u\n",
471 entry->entry_idx, entry->queue->qid);
472 cond_resched();
473 return false;
474 }
475
467 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); 476 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
468 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); 477 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
469 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); 478 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
@@ -529,13 +538,12 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
529 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 538 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
530 if (rt2800usb_txdone_entry_check(entry, reg)) 539 if (rt2800usb_txdone_entry_check(entry, reg))
531 break; 540 break;
541 entry = NULL;
532 } 542 }
533 543
534 if (!entry || rt2x00queue_empty(queue)) 544 if (entry)
535 break; 545 rt2800_txdone_entry(entry, reg,
536 546 rt2800usb_get_txwi(entry));
537 rt2800_txdone_entry(entry, reg,
538 rt2800usb_get_txwi(entry));
539 } 547 }
540} 548}
541 549
@@ -559,8 +567,10 @@ static void rt2800usb_work_txdone(struct work_struct *work)
559 while (!rt2x00queue_empty(queue)) { 567 while (!rt2x00queue_empty(queue)) {
560 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 568 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
561 569
562 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 570 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
571 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
563 break; 572 break;
573
564 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) 574 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
565 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); 575 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
566 else if (rt2x00queue_status_timeout(entry)) 576 else if (rt2x00queue_status_timeout(entry))
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index b6b4542c246..1e31050dafc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
262 struct queue_entry *entry = (struct queue_entry *)urb->context; 262 struct queue_entry *entry = (struct queue_entry *)urb->context;
263 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 263 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
264 264
265 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 265 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
266 return; 266 return;
267
268 if (rt2x00dev->ops->lib->tx_dma_done)
269 rt2x00dev->ops->lib->tx_dma_done(entry);
270
271 /*
272 * Report the frame as DMA done
273 */
274 rt2x00lib_dmadone(entry);
275
276 /* 267 /*
277 * Check if the frame was correctly uploaded 268 * Check if the frame was correctly uploaded
278 */ 269 */
279 if (urb->status) 270 if (urb->status)
280 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 271 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
272 /*
273 * Report the frame as DMA done
274 */
275 rt2x00lib_dmadone(entry);
281 276
277 if (rt2x00dev->ops->lib->tx_dma_done)
278 rt2x00dev->ops->lib->tx_dma_done(entry);
282 /* 279 /*
283 * Schedule the delayed work for reading the TX status 280 * Schedule the delayed work for reading the TX status
284 * from the device. 281 * from the device.
@@ -874,18 +871,8 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
874{ 871{
875 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 872 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
876 struct rt2x00_dev *rt2x00dev = hw->priv; 873 struct rt2x00_dev *rt2x00dev = hw->priv;
877 int retval;
878
879 retval = rt2x00lib_suspend(rt2x00dev, state);
880 if (retval)
881 return retval;
882 874
883 /* 875 return rt2x00lib_suspend(rt2x00dev, state);
884 * Decrease usbdev refcount.
885 */
886 usb_put_dev(interface_to_usbdev(usb_intf));
887
888 return 0;
889} 876}
890EXPORT_SYMBOL_GPL(rt2x00usb_suspend); 877EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
891 878
@@ -894,8 +881,6 @@ int rt2x00usb_resume(struct usb_interface *usb_intf)
894 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 881 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
895 struct rt2x00_dev *rt2x00dev = hw->priv; 882 struct rt2x00_dev *rt2x00dev = hw->priv;
896 883
897 usb_get_dev(interface_to_usbdev(usb_intf));
898
899 return rt2x00lib_resume(rt2x00dev); 884 return rt2x00lib_resume(rt2x00dev);
900} 885}
901EXPORT_SYMBOL_GPL(rt2x00usb_resume); 886EXPORT_SYMBOL_GPL(rt2x00usb_resume);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 1bdc1aa305c..04c4e9eb6ee 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
610 610
611 mac->link_state = MAC80211_NOLINK; 611 mac->link_state = MAC80211_NOLINK;
612 memset(mac->bssid, 0, 6); 612 memset(mac->bssid, 0, 6);
613
614 /* reset sec info */
615 rtl_cam_reset_sec_info(hw);
616
617 rtl_cam_reset_all_entry(hw);
613 mac->vendor = PEER_UNKNOWN; 618 mac->vendor = PEER_UNKNOWN;
614 619
615 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 620 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1063 *or clear all entry here. 1068 *or clear all entry here.
1064 */ 1069 */
1065 rtl_cam_delete_one_entry(hw, mac_addr, key_idx); 1070 rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
1071
1072 rtl_cam_reset_sec_info(hw);
1073
1066 break; 1074 break;
1067 default: 1075 default:
1068 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1076 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index c4161148e0d..bc33b147f44 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -548,15 +548,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
548 (tcb_desc->rts_use_shortpreamble ? 1 : 0) 548 (tcb_desc->rts_use_shortpreamble ? 1 : 0)
549 : (tcb_desc->rts_use_shortgi ? 1 : 0))); 549 : (tcb_desc->rts_use_shortgi ? 1 : 0)));
550 if (mac->bw_40) { 550 if (mac->bw_40) {
551 if (tcb_desc->packet_bw) { 551 if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
552 SET_TX_DESC_DATA_BW(txdesc, 1); 552 SET_TX_DESC_DATA_BW(txdesc, 1);
553 SET_TX_DESC_DATA_SC(txdesc, 3); 553 SET_TX_DESC_DATA_SC(txdesc, 3);
554 } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
555 SET_TX_DESC_DATA_BW(txdesc, 1);
556 SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
554 } else { 557 } else {
555 SET_TX_DESC_DATA_BW(txdesc, 0); 558 SET_TX_DESC_DATA_BW(txdesc, 0);
556 if (rate_flag & IEEE80211_TX_RC_DUP_DATA) 559 SET_TX_DESC_DATA_SC(txdesc, 0);
557 SET_TX_DESC_DATA_SC(txdesc, 560 }
558 mac->cur_40_prime_sc);
559 }
560 } else { 561 } else {
561 SET_TX_DESC_DATA_BW(txdesc, 0); 562 SET_TX_DESC_DATA_BW(txdesc, 0);
562 SET_TX_DESC_DATA_SC(txdesc, 0); 563 SET_TX_DESC_DATA_SC(txdesc, 0);
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index e047594794a..f2838ae07da 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -78,8 +78,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
78 auth->sleep_auth = sleep_auth; 78 auth->sleep_auth = sleep_auth;
79 79
80 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); 80 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
81 if (ret < 0)
82 return ret;
83 81
84out: 82out:
85 kfree(auth); 83 kfree(auth);
@@ -576,10 +574,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)
576 574
577 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, 575 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
578 detection, sizeof(*detection)); 576 detection, sizeof(*detection));
579 if (ret < 0) { 577 if (ret < 0)
580 wl1271_warning("failed to set cca threshold: %d", ret); 578 wl1271_warning("failed to set cca threshold: %d", ret);
581 return ret;
582 }
583 579
584out: 580out:
585 kfree(detection); 581 kfree(detection);
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index ac2e5661397..516a8980723 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -164,7 +164,7 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
164 /* If enabled, tell runtime PM not to power off the card */ 164 /* If enabled, tell runtime PM not to power off the card */
165 if (pm_runtime_enabled(&func->dev)) { 165 if (pm_runtime_enabled(&func->dev)) {
166 ret = pm_runtime_get_sync(&func->dev); 166 ret = pm_runtime_get_sync(&func->dev);
167 if (ret) 167 if (ret < 0)
168 goto out; 168 goto out;
169 } else { 169 } else {
170 /* Runtime PM is disabled: power up the card manually */ 170 /* Runtime PM is disabled: power up the card manually */
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 5d5e1ef8720..4ae8effaee2 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -36,7 +36,6 @@ enum wl1271_tm_commands {
36 WL1271_TM_CMD_TEST, 36 WL1271_TM_CMD_TEST,
37 WL1271_TM_CMD_INTERROGATE, 37 WL1271_TM_CMD_INTERROGATE,
38 WL1271_TM_CMD_CONFIGURE, 38 WL1271_TM_CMD_CONFIGURE,
39 WL1271_TM_CMD_NVS_PUSH,
40 WL1271_TM_CMD_SET_PLT_MODE, 39 WL1271_TM_CMD_SET_PLT_MODE,
41 WL1271_TM_CMD_RECOVER, 40 WL1271_TM_CMD_RECOVER,
42 41
@@ -139,12 +138,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
139 138
140 if (ret < 0) { 139 if (ret < 0) {
141 wl1271_warning("testmode cmd interrogate failed: %d", ret); 140 wl1271_warning("testmode cmd interrogate failed: %d", ret);
141 kfree(cmd);
142 return ret; 142 return ret;
143 } 143 }
144 144
145 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); 145 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
146 if (!skb) 146 if (!skb) {
147 kfree(cmd);
147 return -ENOMEM; 148 return -ENOMEM;
149 }
148 150
149 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); 151 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
150 152
@@ -187,48 +189,6 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
187 return 0; 189 return 0;
188} 190}
189 191
190static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
191{
192 int ret = 0;
193 size_t len;
194 void *buf;
195
196 wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push");
197
198 if (!tb[WL1271_TM_ATTR_DATA])
199 return -EINVAL;
200
201 buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
202 len = nla_len(tb[WL1271_TM_ATTR_DATA]);
203
204 mutex_lock(&wl->mutex);
205
206 kfree(wl->nvs);
207
208 if ((wl->chip.id == CHIP_ID_1283_PG20) &&
209 (len != sizeof(struct wl128x_nvs_file)))
210 return -EINVAL;
211 else if (len != sizeof(struct wl1271_nvs_file))
212 return -EINVAL;
213
214 wl->nvs = kzalloc(len, GFP_KERNEL);
215 if (!wl->nvs) {
216 wl1271_error("could not allocate memory for the nvs file");
217 ret = -ENOMEM;
218 goto out;
219 }
220
221 memcpy(wl->nvs, buf, len);
222 wl->nvs_len = len;
223
224 wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs");
225
226out:
227 mutex_unlock(&wl->mutex);
228
229 return ret;
230}
231
232static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) 192static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
233{ 193{
234 u32 val; 194 u32 val;
@@ -285,8 +245,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
285 return wl1271_tm_cmd_interrogate(wl, tb); 245 return wl1271_tm_cmd_interrogate(wl, tb);
286 case WL1271_TM_CMD_CONFIGURE: 246 case WL1271_TM_CMD_CONFIGURE:
287 return wl1271_tm_cmd_configure(wl, tb); 247 return wl1271_tm_cmd_configure(wl, tb);
288 case WL1271_TM_CMD_NVS_PUSH:
289 return wl1271_tm_cmd_nvs_push(wl, tb);
290 case WL1271_TM_CMD_SET_PLT_MODE: 248 case WL1271_TM_CMD_SET_PLT_MODE:
291 return wl1271_tm_cmd_set_plt_mode(wl, tb); 249 return wl1271_tm_cmd_set_plt_mode(wl, tb);
292 case WL1271_TM_CMD_RECOVER: 250 case WL1271_TM_CMD_RECOVER: