diff options
author | Arnd Bergmann <arnd@arndb.de> | 2011-10-20 09:14:25 -0400 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2011-10-20 09:14:25 -0400 |
commit | b4cbb8a4e602ea77b0525d06eff89c6a6070dab3 (patch) | |
tree | a5dd723679582505ef3905c90f0c2c032d191b94 /drivers/net | |
parent | 526b264163068f77c5f2409031f5e25caf3900a9 (diff) | |
parent | c5d7a9230e5e277f262b6806b7f4d6b35de5a3fb (diff) |
Merge branch 'imx-features-for-arnd' of git://git.pengutronix.de/git/imx/linux-2.6 into imx/devel
Conflicts:
arch/arm/mach-mx5/clock-mx51-mx53.c
arch/arm/mach-mx5/devices-imx53.h
Diffstat (limited to 'drivers/net')
85 files changed, 1213 insertions, 674 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8d0314dbd946..a44874e24f2a 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2535,7 +2535,7 @@ config S6GMAC | |||
2535 | source "drivers/net/stmmac/Kconfig" | 2535 | source "drivers/net/stmmac/Kconfig" |
2536 | 2536 | ||
2537 | config PCH_GBE | 2537 | config PCH_GBE |
2538 | tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" | 2538 | tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" |
2539 | depends on PCI | 2539 | depends on PCI |
2540 | select MII | 2540 | select MII |
2541 | ---help--- | 2541 | ---help--- |
@@ -2548,10 +2548,11 @@ config PCH_GBE | |||
2548 | This driver enables Gigabit Ethernet function. | 2548 | This driver enables Gigabit Ethernet function. |
2549 | 2549 | ||
2550 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ | 2550 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ |
2551 | Output Hub), ML7223. | 2551 | Output Hub), ML7223/ML7831. |
2552 | ML7223 IOH is for MP(Media Phone) use. | 2552 | ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general |
2553 | ML7223 is companion chip for Intel Atom E6xx series. | 2553 | purpose use. |
2554 | ML7223 is completely compatible for Intel EG20T PCH. | 2554 | ML7223/ML7831 is companion chip for Intel Atom E6xx series. |
2555 | ML7223/ML7831 is completely compatible for Intel EG20T PCH. | ||
2555 | 2556 | ||
2556 | config FTGMAC100 | 2557 | config FTGMAC100 |
2557 | tristate "Faraday FTGMAC100 Gigabit Ethernet support" | 2558 | tristate "Faraday FTGMAC100 Gigabit Ethernet support" |
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 52fe21e1e2cd..3b1416e3d217 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c | |||
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data) | |||
308 | struct net_device *dev = (struct net_device *)data; | 308 | struct net_device *dev = (struct net_device *)data; |
309 | struct dev_priv *priv = netdev_priv(dev); | 309 | struct dev_priv *priv = netdev_priv(dev); |
310 | unsigned int lnkstat, carrier; | 310 | unsigned int lnkstat, carrier; |
311 | unsigned long flags; | ||
311 | 312 | ||
313 | spin_lock_irqsave(&priv->chip_lock, flags); | ||
312 | lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; | 314 | lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; |
315 | spin_unlock_irqrestore(&priv->chip_lock, flags); | ||
313 | carrier = netif_carrier_ok(dev); | 316 | carrier = netif_carrier_ok(dev); |
314 | 317 | ||
315 | if (lnkstat && !carrier) { | 318 | if (lnkstat && !carrier) { |
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index c423504a755f..e46df5331c55 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -315,6 +315,14 @@ union db_prod { | |||
315 | u32 raw; | 315 | u32 raw; |
316 | }; | 316 | }; |
317 | 317 | ||
318 | /* dropless fc FW/HW related params */ | ||
319 | #define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) | ||
320 | #define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ | ||
321 | ETH_MAX_AGGREGATION_QUEUES_E1 :\ | ||
322 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2) | ||
323 | #define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) | ||
324 | #define FW_PREFETCH_CNT 16 | ||
325 | #define DROPLESS_FC_HEADROOM 100 | ||
318 | 326 | ||
319 | /* MC hsi */ | 327 | /* MC hsi */ |
320 | #define BCM_PAGE_SHIFT 12 | 328 | #define BCM_PAGE_SHIFT 12 |
@@ -331,15 +339,35 @@ union db_prod { | |||
331 | /* SGE ring related macros */ | 339 | /* SGE ring related macros */ |
332 | #define NUM_RX_SGE_PAGES 2 | 340 | #define NUM_RX_SGE_PAGES 2 |
333 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) | 341 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) |
334 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) | 342 | #define NEXT_PAGE_SGE_DESC_CNT 2 |
343 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) | ||
335 | /* RX_SGE_CNT is promised to be a power of 2 */ | 344 | /* RX_SGE_CNT is promised to be a power of 2 */ |
336 | #define RX_SGE_MASK (RX_SGE_CNT - 1) | 345 | #define RX_SGE_MASK (RX_SGE_CNT - 1) |
337 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) | 346 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) |
338 | #define MAX_RX_SGE (NUM_RX_SGE - 1) | 347 | #define MAX_RX_SGE (NUM_RX_SGE - 1) |
339 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ | 348 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ |
340 | (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) | 349 | (MAX_RX_SGE_CNT - 1)) ? \ |
350 | (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ | ||
351 | (x) + 1) | ||
341 | #define RX_SGE(x) ((x) & MAX_RX_SGE) | 352 | #define RX_SGE(x) ((x) & MAX_RX_SGE) |
342 | 353 | ||
354 | /* | ||
355 | * Number of required SGEs is the sum of two: | ||
356 | * 1. Number of possible opened aggregations (next packet for | ||
357 | * these aggregations will probably consume SGE immidiatelly) | ||
358 | * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only | ||
359 | * after placement on BD for new TPA aggregation) | ||
360 | * | ||
361 | * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page | ||
362 | */ | ||
363 | #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ | ||
364 | (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) | ||
365 | #define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ | ||
366 | MAX_RX_SGE_CNT) | ||
367 | #define SGE_TH_LO(bp) (NUM_SGE_REQ + \ | ||
368 | NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) | ||
369 | #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) | ||
370 | |||
343 | /* Manipulate a bit vector defined as an array of u64 */ | 371 | /* Manipulate a bit vector defined as an array of u64 */ |
344 | 372 | ||
345 | /* Number of bits in one sge_mask array element */ | 373 | /* Number of bits in one sge_mask array element */ |
@@ -551,24 +579,43 @@ struct bnx2x_fastpath { | |||
551 | 579 | ||
552 | #define NUM_TX_RINGS 16 | 580 | #define NUM_TX_RINGS 16 |
553 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) | 581 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) |
554 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) | 582 | #define NEXT_PAGE_TX_DESC_CNT 1 |
583 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) | ||
555 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) | 584 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) |
556 | #define MAX_TX_BD (NUM_TX_BD - 1) | 585 | #define MAX_TX_BD (NUM_TX_BD - 1) |
557 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) | 586 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) |
558 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ | 587 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ |
559 | (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 588 | (MAX_TX_DESC_CNT - 1)) ? \ |
589 | (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ | ||
590 | (x) + 1) | ||
560 | #define TX_BD(x) ((x) & MAX_TX_BD) | 591 | #define TX_BD(x) ((x) & MAX_TX_BD) |
561 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) | 592 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) |
562 | 593 | ||
563 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ | 594 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ |
564 | #define NUM_RX_RINGS 8 | 595 | #define NUM_RX_RINGS 8 |
565 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) | 596 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) |
566 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) | 597 | #define NEXT_PAGE_RX_DESC_CNT 2 |
598 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) | ||
567 | #define RX_DESC_MASK (RX_DESC_CNT - 1) | 599 | #define RX_DESC_MASK (RX_DESC_CNT - 1) |
568 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) | 600 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) |
569 | #define MAX_RX_BD (NUM_RX_BD - 1) | 601 | #define MAX_RX_BD (NUM_RX_BD - 1) |
570 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) | 602 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) |
571 | #define MIN_RX_AVAIL 128 | 603 | |
604 | /* dropless fc calculations for BDs | ||
605 | * | ||
606 | * Number of BDs should as number of buffers in BRB: | ||
607 | * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT | ||
608 | * "next" elements on each page | ||
609 | */ | ||
610 | #define NUM_BD_REQ BRB_SIZE(bp) | ||
611 | #define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ | ||
612 | MAX_RX_DESC_CNT) | ||
613 | #define BD_TH_LO(bp) (NUM_BD_REQ + \ | ||
614 | NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ | ||
615 | FW_DROP_LEVEL(bp)) | ||
616 | #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) | ||
617 | |||
618 | #define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) | ||
572 | 619 | ||
573 | #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ | 620 | #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ |
574 | ETH_MIN_RX_CQES_WITH_TPA_E1 : \ | 621 | ETH_MIN_RX_CQES_WITH_TPA_E1 : \ |
@@ -579,7 +626,9 @@ struct bnx2x_fastpath { | |||
579 | MIN_RX_AVAIL)) | 626 | MIN_RX_AVAIL)) |
580 | 627 | ||
581 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ | 628 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ |
582 | (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) | 629 | (MAX_RX_DESC_CNT - 1)) ? \ |
630 | (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ | ||
631 | (x) + 1) | ||
583 | #define RX_BD(x) ((x) & MAX_RX_BD) | 632 | #define RX_BD(x) ((x) & MAX_RX_BD) |
584 | 633 | ||
585 | /* | 634 | /* |
@@ -589,14 +638,31 @@ struct bnx2x_fastpath { | |||
589 | #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) | 638 | #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) |
590 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) | 639 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) |
591 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) | 640 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) |
592 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) | 641 | #define NEXT_PAGE_RCQ_DESC_CNT 1 |
642 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) | ||
593 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) | 643 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) |
594 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) | 644 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) |
595 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) | 645 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) |
596 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ | 646 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ |
597 | (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 647 | (MAX_RCQ_DESC_CNT - 1)) ? \ |
648 | (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ | ||
649 | (x) + 1) | ||
598 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) | 650 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) |
599 | 651 | ||
652 | /* dropless fc calculations for RCQs | ||
653 | * | ||
654 | * Number of RCQs should be as number of buffers in BRB: | ||
655 | * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT | ||
656 | * "next" elements on each page | ||
657 | */ | ||
658 | #define NUM_RCQ_REQ BRB_SIZE(bp) | ||
659 | #define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ | ||
660 | MAX_RCQ_DESC_CNT) | ||
661 | #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ | ||
662 | NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ | ||
663 | FW_DROP_LEVEL(bp)) | ||
664 | #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) | ||
665 | |||
600 | 666 | ||
601 | /* This is needed for determining of last_max */ | 667 | /* This is needed for determining of last_max */ |
602 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) | 668 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) |
@@ -685,24 +751,17 @@ struct bnx2x_fastpath { | |||
685 | #define FP_CSB_FUNC_OFF \ | 751 | #define FP_CSB_FUNC_OFF \ |
686 | offsetof(struct cstorm_status_block_c, func) | 752 | offsetof(struct cstorm_status_block_c, func) |
687 | 753 | ||
688 | #define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ | 754 | #define HC_INDEX_ETH_RX_CQ_CONS 1 |
689 | /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ | ||
690 | #define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ | ||
691 | /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ | ||
692 | #define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ | ||
693 | /* (HC_INDEX_U_ETH_RX_BD_CONS) */ | ||
694 | |||
695 | #define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ | ||
696 | /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ | ||
697 | #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */ | ||
698 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
699 | #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */ | ||
700 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
701 | #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */ | ||
702 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
703 | 755 | ||
704 | #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 | 756 | #define HC_INDEX_OOO_TX_CQ_CONS 4 |
705 | 757 | ||
758 | #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 | ||
759 | |||
760 | #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 | ||
761 | |||
762 | #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 | ||
763 | |||
764 | #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 | ||
706 | 765 | ||
707 | #define BNX2X_RX_SB_INDEX \ | 766 | #define BNX2X_RX_SB_INDEX \ |
708 | (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) | 767 | (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) |
@@ -1100,11 +1159,12 @@ struct bnx2x { | |||
1100 | #define BP_PORT(bp) (bp->pfid & 1) | 1159 | #define BP_PORT(bp) (bp->pfid & 1) |
1101 | #define BP_FUNC(bp) (bp->pfid) | 1160 | #define BP_FUNC(bp) (bp->pfid) |
1102 | #define BP_ABS_FUNC(bp) (bp->pf_num) | 1161 | #define BP_ABS_FUNC(bp) (bp->pf_num) |
1103 | #define BP_E1HVN(bp) (bp->pfid >> 1) | 1162 | #define BP_VN(bp) ((bp)->pfid >> 1) |
1104 | #define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ | 1163 | #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) |
1105 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | 1164 | #define BP_L_ID(bp) (BP_VN(bp) << 2) |
1106 | #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ | 1165 | #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ |
1107 | BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) | 1166 | (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) |
1167 | #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) | ||
1108 | 1168 | ||
1109 | struct net_device *dev; | 1169 | struct net_device *dev; |
1110 | struct pci_dev *pdev; | 1170 | struct pci_dev *pdev; |
@@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1767 | 1827 | ||
1768 | #define MAX_DMAE_C_PER_PORT 8 | 1828 | #define MAX_DMAE_C_PER_PORT 8 |
1769 | #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ | 1829 | #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ |
1770 | BP_E1HVN(bp)) | 1830 | BP_VN(bp)) |
1771 | #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ | 1831 | #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ |
1772 | E1HVN_MAX) | 1832 | E1HVN_MAX) |
1773 | 1833 | ||
@@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1793 | 1853 | ||
1794 | /* must be used on a CID before placing it on a HW ring */ | 1854 | /* must be used on a CID before placing it on a HW ring */ |
1795 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ | 1855 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ |
1796 | (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ | 1856 | (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ |
1797 | (x)) | 1857 | (x)) |
1798 | 1858 | ||
1799 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) | 1859 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 37e5790681ad..c4cbf9736414 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp) | |||
987 | void bnx2x_init_rx_rings(struct bnx2x *bp) | 987 | void bnx2x_init_rx_rings(struct bnx2x *bp) |
988 | { | 988 | { |
989 | int func = BP_FUNC(bp); | 989 | int func = BP_FUNC(bp); |
990 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
991 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2; | ||
992 | u16 ring_prod; | 990 | u16 ring_prod; |
993 | int i, j; | 991 | int i, j; |
994 | 992 | ||
@@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1001 | 999 | ||
1002 | if (!fp->disable_tpa) { | 1000 | if (!fp->disable_tpa) { |
1003 | /* Fill the per-aggregtion pool */ | 1001 | /* Fill the per-aggregtion pool */ |
1004 | for (i = 0; i < max_agg_queues; i++) { | 1002 | for (i = 0; i < MAX_AGG_QS(bp); i++) { |
1005 | struct bnx2x_agg_info *tpa_info = | 1003 | struct bnx2x_agg_info *tpa_info = |
1006 | &fp->tpa_info[i]; | 1004 | &fp->tpa_info[i]; |
1007 | struct sw_rx_bd *first_buf = | 1005 | struct sw_rx_bd *first_buf = |
@@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1041 | bnx2x_free_rx_sge_range(bp, fp, | 1039 | bnx2x_free_rx_sge_range(bp, fp, |
1042 | ring_prod); | 1040 | ring_prod); |
1043 | bnx2x_free_tpa_pool(bp, fp, | 1041 | bnx2x_free_tpa_pool(bp, fp, |
1044 | max_agg_queues); | 1042 | MAX_AGG_QS(bp)); |
1045 | fp->disable_tpa = 1; | 1043 | fp->disable_tpa = 1; |
1046 | ring_prod = 0; | 1044 | ring_prod = 0; |
1047 | break; | 1045 | break; |
@@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
1137 | bnx2x_free_rx_bds(fp); | 1135 | bnx2x_free_rx_bds(fp); |
1138 | 1136 | ||
1139 | if (!fp->disable_tpa) | 1137 | if (!fp->disable_tpa) |
1140 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? | 1138 | bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); |
1141 | ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
1142 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2); | ||
1143 | } | 1139 | } |
1144 | } | 1140 | } |
1145 | 1141 | ||
@@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) | |||
3095 | struct bnx2x_fastpath *fp = &bp->fp[index]; | 3091 | struct bnx2x_fastpath *fp = &bp->fp[index]; |
3096 | int ring_size = 0; | 3092 | int ring_size = 0; |
3097 | u8 cos; | 3093 | u8 cos; |
3094 | int rx_ring_size = 0; | ||
3098 | 3095 | ||
3099 | /* if rx_ring_size specified - use it */ | 3096 | /* if rx_ring_size specified - use it */ |
3100 | int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : | 3097 | if (!bp->rx_ring_size) { |
3101 | MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); | ||
3102 | 3098 | ||
3103 | /* allocate at least number of buffers required by FW */ | 3099 | rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); |
3104 | rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : | 3100 | |
3105 | MIN_RX_SIZE_TPA, | 3101 | /* allocate at least number of buffers required by FW */ |
3106 | rx_ring_size); | 3102 | rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : |
3103 | MIN_RX_SIZE_TPA, rx_ring_size); | ||
3104 | |||
3105 | bp->rx_ring_size = rx_ring_size; | ||
3106 | } else | ||
3107 | rx_ring_size = bp->rx_ring_size; | ||
3107 | 3108 | ||
3108 | /* Common */ | 3109 | /* Common */ |
3109 | sb = &bnx2x_fp(bp, index, status_blk); | 3110 | sb = &bnx2x_fp(bp, index, status_blk); |
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index a1e004a82f7a..0b4acf67e0c6 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c | |||
@@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) | |||
2120 | break; | 2120 | break; |
2121 | case DCB_CAP_ATTR_DCBX: | 2121 | case DCB_CAP_ATTR_DCBX: |
2122 | *cap = BNX2X_DCBX_CAPS; | 2122 | *cap = BNX2X_DCBX_CAPS; |
2123 | break; | ||
2123 | default: | 2124 | default: |
2124 | rval = -EINVAL; | 2125 | rval = -EINVAL; |
2125 | break; | 2126 | break; |
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 221863059dae..cf3e47914dd7 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
363 | } | 363 | } |
364 | 364 | ||
365 | /* advertise the requested speed and duplex if supported */ | 365 | /* advertise the requested speed and duplex if supported */ |
366 | cmd->advertising &= bp->port.supported[cfg_idx]; | 366 | if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { |
367 | DP(NETIF_MSG_LINK, "Advertisement parameters " | ||
368 | "are not supported\n"); | ||
369 | return -EINVAL; | ||
370 | } | ||
367 | 371 | ||
368 | bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; | 372 | bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; |
369 | bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; | 373 | bp->link_params.req_duplex[cfg_idx] = cmd->duplex; |
370 | bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | | 374 | bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | |
371 | cmd->advertising); | 375 | cmd->advertising); |
376 | if (cmd->advertising) { | ||
377 | |||
378 | bp->link_params.speed_cap_mask[cfg_idx] = 0; | ||
379 | if (cmd->advertising & ADVERTISED_10baseT_Half) { | ||
380 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
381 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; | ||
382 | } | ||
383 | if (cmd->advertising & ADVERTISED_10baseT_Full) | ||
384 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
385 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; | ||
372 | 386 | ||
387 | if (cmd->advertising & ADVERTISED_100baseT_Full) | ||
388 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
389 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; | ||
390 | |||
391 | if (cmd->advertising & ADVERTISED_100baseT_Half) { | ||
392 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
393 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; | ||
394 | } | ||
395 | if (cmd->advertising & ADVERTISED_1000baseT_Half) { | ||
396 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
397 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; | ||
398 | } | ||
399 | if (cmd->advertising & (ADVERTISED_1000baseT_Full | | ||
400 | ADVERTISED_1000baseKX_Full)) | ||
401 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
402 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; | ||
403 | |||
404 | if (cmd->advertising & (ADVERTISED_10000baseT_Full | | ||
405 | ADVERTISED_10000baseKX4_Full | | ||
406 | ADVERTISED_10000baseKR_Full)) | ||
407 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
408 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; | ||
409 | } | ||
373 | } else { /* forced speed */ | 410 | } else { /* forced speed */ |
374 | /* advertise the requested speed and duplex if supported */ | 411 | /* advertise the requested speed and duplex if supported */ |
375 | switch (speed) { | 412 | switch (speed) { |
@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev, | |||
1310 | if (bp->rx_ring_size) | 1347 | if (bp->rx_ring_size) |
1311 | ering->rx_pending = bp->rx_ring_size; | 1348 | ering->rx_pending = bp->rx_ring_size; |
1312 | else | 1349 | else |
1313 | if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) | 1350 | ering->rx_pending = MAX_RX_AVAIL; |
1314 | ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; | ||
1315 | else | ||
1316 | ering->rx_pending = MAX_RX_AVAIL; | ||
1317 | 1351 | ||
1318 | ering->rx_mini_pending = 0; | 1352 | ering->rx_mini_pending = 0; |
1319 | ering->rx_jumbo_pending = 0; | 1353 | ering->rx_jumbo_pending = 0; |
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index d45b1555a602..ba15bdc5a1a9 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c | |||
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, | |||
778 | { | 778 | { |
779 | u32 nig_reg_adress_crd_weight = 0; | 779 | u32 nig_reg_adress_crd_weight = 0; |
780 | u32 pbf_reg_adress_crd_weight = 0; | 780 | u32 pbf_reg_adress_crd_weight = 0; |
781 | /* Calculate and set BW for this COS*/ | 781 | /* Calculate and set BW for this COS - use 1 instead of 0 for BW */ |
782 | const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; | 782 | const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; |
783 | const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; | 783 | const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw; |
784 | 784 | ||
785 | switch (cos_entry) { | 785 | switch (cos_entry) { |
786 | case 0: | 786 | case 0: |
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw( | |||
852 | /* Calculate total BW requested */ | 852 | /* Calculate total BW requested */ |
853 | for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { | 853 | for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { |
854 | if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { | 854 | if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { |
855 | 855 | *total_bw += | |
856 | if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { | 856 | ets_params->cos[cos_idx].params.bw_params.bw; |
857 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" | ||
858 | "was set to 0\n"); | ||
859 | return -EINVAL; | ||
860 | } | 857 | } |
861 | *total_bw += | ||
862 | ets_params->cos[cos_idx].params.bw_params.bw; | ||
863 | } | ||
864 | } | 858 | } |
865 | 859 | ||
866 | /*Check taotl BW is valid */ | 860 | /* Check total BW is valid */ |
867 | if ((100 != *total_bw) || (0 == *total_bw)) { | 861 | if ((100 != *total_bw) || (0 == *total_bw)) { |
868 | if (0 == *total_bw) { | 862 | if (0 == *total_bw) { |
869 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" | 863 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" |
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params, | |||
1726 | 1720 | ||
1727 | /* Check loopback mode */ | 1721 | /* Check loopback mode */ |
1728 | if (lb) | 1722 | if (lb) |
1729 | val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; | 1723 | val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; |
1730 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); | 1724 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); |
1731 | bnx2x_set_xumac_nig(params, | 1725 | bnx2x_set_xumac_nig(params, |
1732 | ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); | 1726 | ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); |
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3630 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | 3624 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, |
3631 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); | 3625 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); |
3632 | 3626 | ||
3627 | /* Advertised and set FEC (Forward Error Correction) */ | ||
3628 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | ||
3629 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, | ||
3630 | (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | | ||
3631 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); | ||
3632 | |||
3633 | /* Enable CL37 BAM */ | 3633 | /* Enable CL37 BAM */ |
3634 | if (REG_RD(bp, params->shmem_base + | 3634 | if (REG_RD(bp, params->shmem_base + |
3635 | offsetof(struct shmem_region, dev_info. | 3635 | offsetof(struct shmem_region, dev_info. |
@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params, | |||
5924 | (tmp | EMAC_LED_OVERRIDE)); | 5924 | (tmp | EMAC_LED_OVERRIDE)); |
5925 | /* | 5925 | /* |
5926 | * return here without enabling traffic | 5926 | * return here without enabling traffic |
5927 | * LED blink andsetting rate in ON mode. | 5927 | * LED blink and setting rate in ON mode. |
5928 | * In oper mode, enabling LED blink | 5928 | * In oper mode, enabling LED blink |
5929 | * and setting rate is needed. | 5929 | * and setting rate is needed. |
5930 | */ | 5930 | */ |
@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params, | |||
5936 | * This is a work-around for HW issue found when link | 5936 | * This is a work-around for HW issue found when link |
5937 | * is up in CL73 | 5937 | * is up in CL73 |
5938 | */ | 5938 | */ |
5939 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); | 5939 | if ((!CHIP_IS_E3(bp)) || |
5940 | (CHIP_IS_E3(bp) && | ||
5941 | mode == LED_MODE_ON)) | ||
5942 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); | ||
5943 | |||
5940 | if (CHIP_IS_E1x(bp) || | 5944 | if (CHIP_IS_E1x(bp) || |
5941 | CHIP_IS_E2(bp) || | 5945 | CHIP_IS_E2(bp) || |
5942 | (mode == LED_MODE_ON)) | 5946 | (mode == LED_MODE_ON)) |
@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = { | |||
10638 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, | 10642 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, |
10639 | .addr = 0xff, | 10643 | .addr = 0xff, |
10640 | .def_md_devad = 0, | 10644 | .def_md_devad = 0, |
10641 | .flags = (FLAGS_HW_LOCK_REQUIRED | | 10645 | .flags = FLAGS_HW_LOCK_REQUIRED, |
10642 | FLAGS_TX_ERROR_CHECK), | ||
10643 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10646 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10644 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10647 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10645 | .mdio_ctrl = 0, | 10648 | .mdio_ctrl = 0, |
@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = { | |||
10765 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, | 10768 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, |
10766 | .addr = 0xff, | 10769 | .addr = 0xff, |
10767 | .def_md_devad = 0, | 10770 | .def_md_devad = 0, |
10768 | .flags = (FLAGS_INIT_XGXS_FIRST | | 10771 | .flags = FLAGS_INIT_XGXS_FIRST, |
10769 | FLAGS_TX_ERROR_CHECK), | ||
10770 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10772 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10771 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10773 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10772 | .mdio_ctrl = 0, | 10774 | .mdio_ctrl = 0, |
@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = { | |||
10797 | .addr = 0xff, | 10799 | .addr = 0xff, |
10798 | .def_md_devad = 0, | 10800 | .def_md_devad = 0, |
10799 | .flags = (FLAGS_HW_LOCK_REQUIRED | | 10801 | .flags = (FLAGS_HW_LOCK_REQUIRED | |
10800 | FLAGS_INIT_XGXS_FIRST | | 10802 | FLAGS_INIT_XGXS_FIRST), |
10801 | FLAGS_TX_ERROR_CHECK), | ||
10802 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10803 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10803 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10804 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10804 | .mdio_ctrl = 0, | 10805 | .mdio_ctrl = 0, |
@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = { | |||
10829 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, | 10830 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, |
10830 | .addr = 0xff, | 10831 | .addr = 0xff, |
10831 | .def_md_devad = 0, | 10832 | .def_md_devad = 0, |
10832 | .flags = (FLAGS_FAN_FAILURE_DET_REQ | | 10833 | .flags = FLAGS_FAN_FAILURE_DET_REQ, |
10833 | FLAGS_TX_ERROR_CHECK), | ||
10834 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10834 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10835 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10835 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10836 | .mdio_ctrl = 0, | 10836 | .mdio_ctrl = 0, |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index f74582a22c68..15f800085bb2 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, | |||
407 | opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); | 407 | opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); |
408 | 408 | ||
409 | opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); | 409 | opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); |
410 | opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | | 410 | opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | |
411 | (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); | 411 | (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); |
412 | opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); | 412 | opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); |
413 | 413 | ||
414 | #ifdef __BIG_ENDIAN | 414 | #ifdef __BIG_ENDIAN |
@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) | |||
1419 | if (!CHIP_IS_E1(bp)) { | 1419 | if (!CHIP_IS_E1(bp)) { |
1420 | /* init leading/trailing edge */ | 1420 | /* init leading/trailing edge */ |
1421 | if (IS_MF(bp)) { | 1421 | if (IS_MF(bp)) { |
1422 | val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); | 1422 | val = (0xee0f | (1 << (BP_VN(bp) + 4))); |
1423 | if (bp->port.pmf) | 1423 | if (bp->port.pmf) |
1424 | /* enable nig and gpio3 attention */ | 1424 | /* enable nig and gpio3 attention */ |
1425 | val |= 0x1100; | 1425 | val |= 0x1100; |
@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) | |||
1471 | 1471 | ||
1472 | /* init leading/trailing edge */ | 1472 | /* init leading/trailing edge */ |
1473 | if (IS_MF(bp)) { | 1473 | if (IS_MF(bp)) { |
1474 | val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); | 1474 | val = (0xee0f | (1 << (BP_VN(bp) + 4))); |
1475 | if (bp->port.pmf) | 1475 | if (bp->port.pmf) |
1476 | /* enable nig and gpio3 attention */ | 1476 | /* enable nig and gpio3 attention */ |
1477 | val |= 0x1100; | 1477 | val |= 0x1100; |
@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) | |||
2287 | int vn; | 2287 | int vn; |
2288 | 2288 | ||
2289 | bp->vn_weight_sum = 0; | 2289 | bp->vn_weight_sum = 0; |
2290 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | 2290 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { |
2291 | u32 vn_cfg = bp->mf_config[vn]; | 2291 | u32 vn_cfg = bp->mf_config[vn]; |
2292 | u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | 2292 | u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> |
2293 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | 2293 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; |
@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) | |||
2320 | CMNG_FLAGS_PER_PORT_FAIRNESS_VN; | 2320 | CMNG_FLAGS_PER_PORT_FAIRNESS_VN; |
2321 | } | 2321 | } |
2322 | 2322 | ||
2323 | /* returns func by VN for current port */ | ||
2324 | static inline int func_by_vn(struct bnx2x *bp, int vn) | ||
2325 | { | ||
2326 | return 2 * vn + BP_PORT(bp); | ||
2327 | } | ||
2328 | |||
2323 | static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | 2329 | static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) |
2324 | { | 2330 | { |
2325 | struct rate_shaping_vars_per_vn m_rs_vn; | 2331 | struct rate_shaping_vars_per_vn m_rs_vn; |
2326 | struct fairness_vars_per_vn m_fair_vn; | 2332 | struct fairness_vars_per_vn m_fair_vn; |
2327 | u32 vn_cfg = bp->mf_config[vn]; | 2333 | u32 vn_cfg = bp->mf_config[vn]; |
2328 | int func = 2*vn + BP_PORT(bp); | 2334 | int func = func_by_vn(bp, vn); |
2329 | u16 vn_min_rate, vn_max_rate; | 2335 | u16 vn_min_rate, vn_max_rate; |
2330 | int i; | 2336 | int i; |
2331 | 2337 | ||
@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) | |||
2422 | * | 2428 | * |
2423 | * and there are 2 functions per port | 2429 | * and there are 2 functions per port |
2424 | */ | 2430 | */ |
2425 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | 2431 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { |
2426 | int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); | 2432 | int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); |
2427 | 2433 | ||
2428 | if (func >= E1H_FUNC_MAX) | 2434 | if (func >= E1H_FUNC_MAX) |
@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2454 | 2460 | ||
2455 | /* calculate and set min-max rate for each vn */ | 2461 | /* calculate and set min-max rate for each vn */ |
2456 | if (bp->port.pmf) | 2462 | if (bp->port.pmf) |
2457 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | 2463 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) |
2458 | bnx2x_init_vn_minmax(bp, vn); | 2464 | bnx2x_init_vn_minmax(bp, vn); |
2459 | 2465 | ||
2460 | /* always enable rate shaping and fairness */ | 2466 | /* always enable rate shaping and fairness */ |
@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2473 | 2479 | ||
2474 | static inline void bnx2x_link_sync_notify(struct bnx2x *bp) | 2480 | static inline void bnx2x_link_sync_notify(struct bnx2x *bp) |
2475 | { | 2481 | { |
2476 | int port = BP_PORT(bp); | ||
2477 | int func; | 2482 | int func; |
2478 | int vn; | 2483 | int vn; |
2479 | 2484 | ||
2480 | /* Set the attention towards other drivers on the same port */ | 2485 | /* Set the attention towards other drivers on the same port */ |
2481 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | 2486 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { |
2482 | if (vn == BP_E1HVN(bp)) | 2487 | if (vn == BP_VN(bp)) |
2483 | continue; | 2488 | continue; |
2484 | 2489 | ||
2485 | func = ((vn << 1) | port); | 2490 | func = func_by_vn(bp, vn); |
2486 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + | 2491 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + |
2487 | (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); | 2492 | (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); |
2488 | } | 2493 | } |
@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp) | |||
2577 | bnx2x_dcbx_pmf_update(bp); | 2582 | bnx2x_dcbx_pmf_update(bp); |
2578 | 2583 | ||
2579 | /* enable nig attention */ | 2584 | /* enable nig attention */ |
2580 | val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); | 2585 | val = (0xff0f | (1 << (BP_VN(bp) + 4))); |
2581 | if (bp->common.int_block == INT_BLOCK_HC) { | 2586 | if (bp->common.int_block == INT_BLOCK_HC) { |
2582 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); | 2587 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); |
2583 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); | 2588 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); |
@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
2756 | u16 tpa_agg_size = 0; | 2761 | u16 tpa_agg_size = 0; |
2757 | 2762 | ||
2758 | if (!fp->disable_tpa) { | 2763 | if (!fp->disable_tpa) { |
2759 | pause->sge_th_hi = 250; | 2764 | pause->sge_th_lo = SGE_TH_LO(bp); |
2760 | pause->sge_th_lo = 150; | 2765 | pause->sge_th_hi = SGE_TH_HI(bp); |
2766 | |||
2767 | /* validate SGE ring has enough to cross high threshold */ | ||
2768 | WARN_ON(bp->dropless_fc && | ||
2769 | pause->sge_th_hi + FW_PREFETCH_CNT > | ||
2770 | MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); | ||
2771 | |||
2761 | tpa_agg_size = min_t(u32, | 2772 | tpa_agg_size = min_t(u32, |
2762 | (min_t(u32, 8, MAX_SKB_FRAGS) * | 2773 | (min_t(u32, 8, MAX_SKB_FRAGS) * |
2763 | SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); | 2774 | SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); |
@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
2771 | 2782 | ||
2772 | /* pause - not for e1 */ | 2783 | /* pause - not for e1 */ |
2773 | if (!CHIP_IS_E1(bp)) { | 2784 | if (!CHIP_IS_E1(bp)) { |
2774 | pause->bd_th_hi = 350; | 2785 | pause->bd_th_lo = BD_TH_LO(bp); |
2775 | pause->bd_th_lo = 250; | 2786 | pause->bd_th_hi = BD_TH_HI(bp); |
2776 | pause->rcq_th_hi = 350; | 2787 | |
2777 | pause->rcq_th_lo = 250; | 2788 | pause->rcq_th_lo = RCQ_TH_LO(bp); |
2789 | pause->rcq_th_hi = RCQ_TH_HI(bp); | ||
2790 | /* | ||
2791 | * validate that rings have enough entries to cross | ||
2792 | * high thresholds | ||
2793 | */ | ||
2794 | WARN_ON(bp->dropless_fc && | ||
2795 | pause->bd_th_hi + FW_PREFETCH_CNT > | ||
2796 | bp->rx_ring_size); | ||
2797 | WARN_ON(bp->dropless_fc && | ||
2798 | pause->rcq_th_hi + FW_PREFETCH_CNT > | ||
2799 | NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); | ||
2778 | 2800 | ||
2779 | pause->pri_map = 1; | 2801 | pause->pri_map = 1; |
2780 | } | 2802 | } |
@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
2802 | * For PF Clients it should be the maximum avaliable number. | 2824 | * For PF Clients it should be the maximum avaliable number. |
2803 | * VF driver(s) may want to define it to a smaller value. | 2825 | * VF driver(s) may want to define it to a smaller value. |
2804 | */ | 2826 | */ |
2805 | rxq_init->max_tpa_queues = | 2827 | rxq_init->max_tpa_queues = MAX_AGG_QS(bp); |
2806 | (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
2807 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2); | ||
2808 | 2828 | ||
2809 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; | 2829 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; |
2810 | rxq_init->fw_sb_id = fp->fw_sb_id; | 2830 | rxq_init->fw_sb_id = fp->fw_sb_id; |
@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, | |||
4808 | hc_sm->time_to_expire = 0xFFFFFFFF; | 4828 | hc_sm->time_to_expire = 0xFFFFFFFF; |
4809 | } | 4829 | } |
4810 | 4830 | ||
4831 | |||
4832 | /* allocates state machine ids. */ | ||
4833 | static inline | ||
4834 | void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) | ||
4835 | { | ||
4836 | /* zero out state machine indices */ | ||
4837 | /* rx indices */ | ||
4838 | index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4839 | |||
4840 | /* tx indices */ | ||
4841 | index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4842 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4843 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4844 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4845 | |||
4846 | /* map indices */ | ||
4847 | /* rx indices */ | ||
4848 | index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= | ||
4849 | SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4850 | |||
4851 | /* tx indices */ | ||
4852 | index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= | ||
4853 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4854 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= | ||
4855 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4856 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= | ||
4857 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4858 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= | ||
4859 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4860 | } | ||
4861 | |||
4811 | static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | 4862 | static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, |
4812 | u8 vf_valid, int fw_sb_id, int igu_sb_id) | 4863 | u8 vf_valid, int fw_sb_id, int igu_sb_id) |
4813 | { | 4864 | { |
@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
4839 | hc_sm_p = sb_data_e2.common.state_machine; | 4890 | hc_sm_p = sb_data_e2.common.state_machine; |
4840 | sb_data_p = (u32 *)&sb_data_e2; | 4891 | sb_data_p = (u32 *)&sb_data_e2; |
4841 | data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); | 4892 | data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); |
4893 | bnx2x_map_sb_state_machines(sb_data_e2.index_data); | ||
4842 | } else { | 4894 | } else { |
4843 | memset(&sb_data_e1x, 0, | 4895 | memset(&sb_data_e1x, 0, |
4844 | sizeof(struct hc_status_block_data_e1x)); | 4896 | sizeof(struct hc_status_block_data_e1x)); |
@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
4853 | hc_sm_p = sb_data_e1x.common.state_machine; | 4905 | hc_sm_p = sb_data_e1x.common.state_machine; |
4854 | sb_data_p = (u32 *)&sb_data_e1x; | 4906 | sb_data_p = (u32 *)&sb_data_e1x; |
4855 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); | 4907 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); |
4908 | bnx2x_map_sb_state_machines(sb_data_e1x.index_data); | ||
4856 | } | 4909 | } |
4857 | 4910 | ||
4858 | bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], | 4911 | bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], |
@@ -4890,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4890 | int igu_seg_id; | 4943 | int igu_seg_id; |
4891 | int port = BP_PORT(bp); | 4944 | int port = BP_PORT(bp); |
4892 | int func = BP_FUNC(bp); | 4945 | int func = BP_FUNC(bp); |
4893 | int reg_offset; | 4946 | int reg_offset, reg_offset_en5; |
4894 | u64 section; | 4947 | u64 section; |
4895 | int index; | 4948 | int index; |
4896 | struct hc_sp_status_block_data sp_sb_data; | 4949 | struct hc_sp_status_block_data sp_sb_data; |
@@ -4913,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4913 | 4966 | ||
4914 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 4967 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
4915 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 4968 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
4969 | reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : | ||
4970 | MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); | ||
4916 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { | 4971 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
4917 | int sindex; | 4972 | int sindex; |
4918 | /* take care of sig[0]..sig[4] */ | 4973 | /* take care of sig[0]..sig[4] */ |
@@ -4927,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4927 | * and not 16 between the different groups | 4982 | * and not 16 between the different groups |
4928 | */ | 4983 | */ |
4929 | bp->attn_group[index].sig[4] = REG_RD(bp, | 4984 | bp->attn_group[index].sig[4] = REG_RD(bp, |
4930 | reg_offset + 0x10 + 0x4*index); | 4985 | reg_offset_en5 + 0x4*index); |
4931 | else | 4986 | else |
4932 | bp->attn_group[index].sig[4] = 0; | 4987 | bp->attn_group[index].sig[4] = 0; |
4933 | } | 4988 | } |
@@ -5802,7 +5857,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
5802 | * take the UNDI lock to protect undi_unload flow from accessing | 5857 | * take the UNDI lock to protect undi_unload flow from accessing |
5803 | * registers while we're resetting the chip | 5858 | * registers while we're resetting the chip |
5804 | */ | 5859 | */ |
5805 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 5860 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); |
5806 | 5861 | ||
5807 | bnx2x_reset_common(bp); | 5862 | bnx2x_reset_common(bp); |
5808 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); | 5863 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); |
@@ -5814,7 +5869,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
5814 | } | 5869 | } |
5815 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); | 5870 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); |
5816 | 5871 | ||
5817 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 5872 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); |
5818 | 5873 | ||
5819 | bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); | 5874 | bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); |
5820 | 5875 | ||
@@ -6671,12 +6726,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
6671 | if (CHIP_MODE_IS_4_PORT(bp)) | 6726 | if (CHIP_MODE_IS_4_PORT(bp)) |
6672 | dsb_idx = BP_FUNC(bp); | 6727 | dsb_idx = BP_FUNC(bp); |
6673 | else | 6728 | else |
6674 | dsb_idx = BP_E1HVN(bp); | 6729 | dsb_idx = BP_VN(bp); |
6675 | 6730 | ||
6676 | prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? | 6731 | prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? |
6677 | IGU_BC_BASE_DSB_PROD + dsb_idx : | 6732 | IGU_BC_BASE_DSB_PROD + dsb_idx : |
6678 | IGU_NORM_BASE_DSB_PROD + dsb_idx); | 6733 | IGU_NORM_BASE_DSB_PROD + dsb_idx); |
6679 | 6734 | ||
6735 | /* | ||
6736 | * igu prods come in chunks of E1HVN_MAX (4) - | ||
6737 | * does not matters what is the current chip mode | ||
6738 | */ | ||
6680 | for (i = 0; i < (num_segs * E1HVN_MAX); | 6739 | for (i = 0; i < (num_segs * E1HVN_MAX); |
6681 | i += E1HVN_MAX) { | 6740 | i += E1HVN_MAX) { |
6682 | addr = IGU_REG_PROD_CONS_MEMORY + | 6741 | addr = IGU_REG_PROD_CONS_MEMORY + |
@@ -7568,9 +7627,12 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7568 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 7627 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
7569 | u8 *mac_addr = bp->dev->dev_addr; | 7628 | u8 *mac_addr = bp->dev->dev_addr; |
7570 | u32 val; | 7629 | u32 val; |
7630 | u16 pmc; | ||
7631 | |||
7571 | /* The mac address is written to entries 1-4 to | 7632 | /* The mac address is written to entries 1-4 to |
7572 | preserve entry 0 which is used by the PMF */ | 7633 | * preserve entry 0 which is used by the PMF |
7573 | u8 entry = (BP_E1HVN(bp) + 1)*8; | 7634 | */ |
7635 | u8 entry = (BP_VN(bp) + 1)*8; | ||
7574 | 7636 | ||
7575 | val = (mac_addr[0] << 8) | mac_addr[1]; | 7637 | val = (mac_addr[0] << 8) | mac_addr[1]; |
7576 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); | 7638 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); |
@@ -7579,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7579 | (mac_addr[4] << 8) | mac_addr[5]; | 7641 | (mac_addr[4] << 8) | mac_addr[5]; |
7580 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); | 7642 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); |
7581 | 7643 | ||
7644 | /* Enable the PME and clear the status */ | ||
7645 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); | ||
7646 | pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; | ||
7647 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); | ||
7648 | |||
7582 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | 7649 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
7583 | 7650 | ||
7584 | } else | 7651 | } else |
@@ -8546,10 +8613,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
8546 | /* Check if there is any driver already loaded */ | 8613 | /* Check if there is any driver already loaded */ |
8547 | val = REG_RD(bp, MISC_REG_UNPREPARED); | 8614 | val = REG_RD(bp, MISC_REG_UNPREPARED); |
8548 | if (val == 0x1) { | 8615 | if (val == 0x1) { |
8549 | /* Check if it is the UNDI driver | 8616 | |
8617 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
8618 | /* | ||
8619 | * Check if it is the UNDI driver | ||
8550 | * UNDI driver initializes CID offset for normal bell to 0x7 | 8620 | * UNDI driver initializes CID offset for normal bell to 0x7 |
8551 | */ | 8621 | */ |
8552 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
8553 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); | 8622 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); |
8554 | if (val == 0x7) { | 8623 | if (val == 0x7) { |
8555 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 8624 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
@@ -8587,9 +8656,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
8587 | bnx2x_fw_command(bp, reset_code, 0); | 8656 | bnx2x_fw_command(bp, reset_code, 0); |
8588 | } | 8657 | } |
8589 | 8658 | ||
8590 | /* now it's safe to release the lock */ | ||
8591 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
8592 | |||
8593 | bnx2x_undi_int_disable(bp); | 8659 | bnx2x_undi_int_disable(bp); |
8594 | port = BP_PORT(bp); | 8660 | port = BP_PORT(bp); |
8595 | 8661 | ||
@@ -8639,8 +8705,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
8639 | bp->fw_seq = | 8705 | bp->fw_seq = |
8640 | (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & | 8706 | (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & |
8641 | DRV_MSG_SEQ_NUMBER_MASK); | 8707 | DRV_MSG_SEQ_NUMBER_MASK); |
8642 | } else | 8708 | } |
8643 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 8709 | |
8710 | /* now it's safe to release the lock */ | ||
8711 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
8644 | } | 8712 | } |
8645 | } | 8713 | } |
8646 | 8714 | ||
@@ -8777,13 +8845,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
8777 | static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) | 8845 | static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) |
8778 | { | 8846 | { |
8779 | int pfid = BP_FUNC(bp); | 8847 | int pfid = BP_FUNC(bp); |
8780 | int vn = BP_E1HVN(bp); | ||
8781 | int igu_sb_id; | 8848 | int igu_sb_id; |
8782 | u32 val; | 8849 | u32 val; |
8783 | u8 fid, igu_sb_cnt = 0; | 8850 | u8 fid, igu_sb_cnt = 0; |
8784 | 8851 | ||
8785 | bp->igu_base_sb = 0xff; | 8852 | bp->igu_base_sb = 0xff; |
8786 | if (CHIP_INT_MODE_IS_BC(bp)) { | 8853 | if (CHIP_INT_MODE_IS_BC(bp)) { |
8854 | int vn = BP_VN(bp); | ||
8787 | igu_sb_cnt = bp->igu_sb_cnt; | 8855 | igu_sb_cnt = bp->igu_sb_cnt; |
8788 | bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * | 8856 | bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * |
8789 | FP_SB_MAX_E1x; | 8857 | FP_SB_MAX_E1x; |
@@ -9416,6 +9484,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9416 | bp->igu_base_sb = 0; | 9484 | bp->igu_base_sb = 0; |
9417 | } else { | 9485 | } else { |
9418 | bp->common.int_block = INT_BLOCK_IGU; | 9486 | bp->common.int_block = INT_BLOCK_IGU; |
9487 | |||
9488 | /* do not allow device reset during IGU info preocessing */ | ||
9489 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
9490 | |||
9419 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); | 9491 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); |
9420 | 9492 | ||
9421 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { | 9493 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { |
@@ -9447,6 +9519,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9447 | 9519 | ||
9448 | bnx2x_get_igu_cam_info(bp); | 9520 | bnx2x_get_igu_cam_info(bp); |
9449 | 9521 | ||
9522 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
9450 | } | 9523 | } |
9451 | 9524 | ||
9452 | /* | 9525 | /* |
@@ -9473,7 +9546,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9473 | 9546 | ||
9474 | bp->mf_ov = 0; | 9547 | bp->mf_ov = 0; |
9475 | bp->mf_mode = 0; | 9548 | bp->mf_mode = 0; |
9476 | vn = BP_E1HVN(bp); | 9549 | vn = BP_VN(bp); |
9477 | 9550 | ||
9478 | if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { | 9551 | if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { |
9479 | BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", | 9552 | BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", |
@@ -9593,13 +9666,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9593 | /* port info */ | 9666 | /* port info */ |
9594 | bnx2x_get_port_hwinfo(bp); | 9667 | bnx2x_get_port_hwinfo(bp); |
9595 | 9668 | ||
9596 | if (!BP_NOMCP(bp)) { | ||
9597 | bp->fw_seq = | ||
9598 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & | ||
9599 | DRV_MSG_SEQ_NUMBER_MASK); | ||
9600 | BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); | ||
9601 | } | ||
9602 | |||
9603 | /* Get MAC addresses */ | 9669 | /* Get MAC addresses */ |
9604 | bnx2x_get_mac_hwinfo(bp); | 9670 | bnx2x_get_mac_hwinfo(bp); |
9605 | 9671 | ||
@@ -9765,6 +9831,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
9765 | if (!BP_NOMCP(bp)) | 9831 | if (!BP_NOMCP(bp)) |
9766 | bnx2x_undi_unload(bp); | 9832 | bnx2x_undi_unload(bp); |
9767 | 9833 | ||
9834 | /* init fw_seq after undi_unload! */ | ||
9835 | if (!BP_NOMCP(bp)) { | ||
9836 | bp->fw_seq = | ||
9837 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & | ||
9838 | DRV_MSG_SEQ_NUMBER_MASK); | ||
9839 | BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); | ||
9840 | } | ||
9841 | |||
9768 | if (CHIP_REV_IS_FPGA(bp)) | 9842 | if (CHIP_REV_IS_FPGA(bp)) |
9769 | dev_err(&bp->pdev->dev, "FPGA detected\n"); | 9843 | dev_err(&bp->pdev->dev, "FPGA detected\n"); |
9770 | 9844 | ||
@@ -10259,17 +10333,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
10259 | /* clean indirect addresses */ | 10333 | /* clean indirect addresses */ |
10260 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | 10334 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, |
10261 | PCICFG_VENDOR_ID_OFFSET); | 10335 | PCICFG_VENDOR_ID_OFFSET); |
10262 | /* Clean the following indirect addresses for all functions since it | 10336 | /* |
10337 | * Clean the following indirect addresses for all functions since it | ||
10263 | * is not used by the driver. | 10338 | * is not used by the driver. |
10264 | */ | 10339 | */ |
10265 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); | 10340 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); |
10266 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); | 10341 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); |
10267 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); | 10342 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); |
10268 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); | 10343 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); |
10269 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); | 10344 | |
10270 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); | 10345 | if (CHIP_IS_E1x(bp)) { |
10271 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); | 10346 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); |
10272 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); | 10347 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); |
10348 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); | ||
10349 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); | ||
10350 | } | ||
10273 | 10351 | ||
10274 | /* | 10352 | /* |
10275 | * Enable internal target-read (in case we are probed after PF FLR). | 10353 | * Enable internal target-read (in case we are probed after PF FLR). |
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 40266c14e6dc..fc7bd0f23c0b 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
@@ -1384,6 +1384,18 @@ | |||
1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ | 1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ |
1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 | 1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 |
1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 | 1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 |
1387 | /* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped | ||
1388 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1389 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1390 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1391 | * parity; [31-10] Reserved; */ | ||
1392 | #define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 | ||
1393 | /* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped | ||
1394 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1395 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1396 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1397 | * parity; [31-10] Reserved; */ | ||
1398 | #define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 | ||
1387 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu | 1399 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu |
1388 | 128 bit vector */ | 1400 | 128 bit vector */ |
1389 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 | 1401 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 |
@@ -5320,7 +5332,7 @@ | |||
5320 | #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 | 5332 | #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 |
5321 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) | 5333 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) |
5322 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) | 5334 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) |
5323 | #define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) | 5335 | #define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2) |
5324 | #define XMAC_CTRL_REG_RX_EN (0x1<<1) | 5336 | #define XMAC_CTRL_REG_RX_EN (0x1<<1) |
5325 | #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) | 5337 | #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) |
5326 | #define XMAC_CTRL_REG_TX_EN (0x1<<0) | 5338 | #define XMAC_CTRL_REG_TX_EN (0x1<<0) |
@@ -5766,7 +5778,7 @@ | |||
5766 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 | 5778 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 |
5767 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 | 5779 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 |
5768 | #define HW_LOCK_RESOURCE_SPIO 2 | 5780 | #define HW_LOCK_RESOURCE_SPIO 2 |
5769 | #define HW_LOCK_RESOURCE_UNDI 5 | 5781 | #define HW_LOCK_RESOURCE_RESET 5 |
5770 | #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) | 5782 | #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) |
5771 | #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) | 5783 | #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) |
5772 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) | 5784 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) |
@@ -6853,6 +6865,9 @@ Theotherbitsarereservedandshouldbezero*/ | |||
6853 | #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 | 6865 | #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 |
6854 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 | 6866 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 |
6855 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 | 6867 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 |
6868 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 | ||
6869 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 | ||
6870 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 | ||
6856 | #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 | 6871 | #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 |
6857 | #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 | 6872 | #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 |
6858 | #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e | 6873 | #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e |
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index 771f6803b238..9908f2bbcf73 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c | |||
@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) | |||
710 | break; | 710 | break; |
711 | 711 | ||
712 | case MAC_TYPE_NONE: /* unreached */ | 712 | case MAC_TYPE_NONE: /* unreached */ |
713 | BNX2X_ERR("stats updated by DMAE but no MAC active\n"); | 713 | DP(BNX2X_MSG_STATS, |
714 | "stats updated by DMAE but no MAC active\n"); | ||
714 | return -1; | 715 | return -1; |
715 | 716 | ||
716 | default: /* unreached */ | 717 | default: /* unreached */ |
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) | |||
1391 | 1392 | ||
1392 | static void bnx2x_func_stats_base_init(struct bnx2x *bp) | 1393 | static void bnx2x_func_stats_base_init(struct bnx2x *bp) |
1393 | { | 1394 | { |
1394 | int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; | 1395 | int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; |
1395 | u32 func_stx; | 1396 | u32 func_stx; |
1396 | 1397 | ||
1397 | /* sanity */ | 1398 | /* sanity */ |
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp) | |||
1404 | func_stx = bp->func_stx; | 1405 | func_stx = bp->func_stx; |
1405 | 1406 | ||
1406 | for (vn = VN_0; vn < vn_max; vn++) { | 1407 | for (vn = VN_0; vn < vn_max; vn++) { |
1407 | int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; | 1408 | int mb_idx = BP_FW_MB_IDX_VN(bp, vn); |
1408 | 1409 | ||
1409 | bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); | 1410 | bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); |
1410 | bnx2x_func_stats_init(bp); | 1411 | bnx2x_func_stats_init(bp); |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a047eb973e3b..47b928ed08f8 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2168 | } | 2168 | } |
2169 | 2169 | ||
2170 | re_arm: | 2170 | re_arm: |
2171 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | 2171 | if (!bond->kill_timers) |
2172 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | ||
2172 | out: | 2173 | out: |
2173 | read_unlock(&bond->lock); | 2174 | read_unlock(&bond->lock); |
2174 | } | 2175 | } |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 7f8b20a34ee3..d4fbd2e62616 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work) | |||
1440 | } | 1440 | } |
1441 | 1441 | ||
1442 | re_arm: | 1442 | re_arm: |
1443 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); | 1443 | if (!bond->kill_timers) |
1444 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); | ||
1444 | out: | 1445 | out: |
1445 | read_unlock(&bond->lock); | 1446 | read_unlock(&bond->lock); |
1446 | } | 1447 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 38a83acd502e..6d79b78cfc75 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) | |||
777 | 777 | ||
778 | read_lock(&bond->lock); | 778 | read_lock(&bond->lock); |
779 | 779 | ||
780 | if (bond->kill_timers) | ||
781 | goto out; | ||
782 | |||
780 | /* rejoin all groups on bond device */ | 783 | /* rejoin all groups on bond device */ |
781 | __bond_resend_igmp_join_requests(bond->dev); | 784 | __bond_resend_igmp_join_requests(bond->dev); |
782 | 785 | ||
@@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) | |||
790 | __bond_resend_igmp_join_requests(vlan_dev); | 793 | __bond_resend_igmp_join_requests(vlan_dev); |
791 | } | 794 | } |
792 | 795 | ||
793 | if (--bond->igmp_retrans > 0) | 796 | if ((--bond->igmp_retrans > 0) && !bond->kill_timers) |
794 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); | 797 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); |
795 | 798 | out: | |
796 | read_unlock(&bond->lock); | 799 | read_unlock(&bond->lock); |
797 | } | 800 | } |
798 | 801 | ||
@@ -2538,7 +2541,7 @@ void bond_mii_monitor(struct work_struct *work) | |||
2538 | } | 2541 | } |
2539 | 2542 | ||
2540 | re_arm: | 2543 | re_arm: |
2541 | if (bond->params.miimon) | 2544 | if (bond->params.miimon && !bond->kill_timers) |
2542 | queue_delayed_work(bond->wq, &bond->mii_work, | 2545 | queue_delayed_work(bond->wq, &bond->mii_work, |
2543 | msecs_to_jiffies(bond->params.miimon)); | 2546 | msecs_to_jiffies(bond->params.miimon)); |
2544 | out: | 2547 | out: |
@@ -2886,7 +2889,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2886 | } | 2889 | } |
2887 | 2890 | ||
2888 | re_arm: | 2891 | re_arm: |
2889 | if (bond->params.arp_interval) | 2892 | if (bond->params.arp_interval && !bond->kill_timers) |
2890 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 2893 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
2891 | out: | 2894 | out: |
2892 | read_unlock(&bond->lock); | 2895 | read_unlock(&bond->lock); |
@@ -3154,7 +3157,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
3154 | bond_ab_arp_probe(bond); | 3157 | bond_ab_arp_probe(bond); |
3155 | 3158 | ||
3156 | re_arm: | 3159 | re_arm: |
3157 | if (bond->params.arp_interval) | 3160 | if (bond->params.arp_interval && !bond->kill_timers) |
3158 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 3161 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
3159 | out: | 3162 | out: |
3160 | read_unlock(&bond->lock); | 3163 | read_unlock(&bond->lock); |
@@ -3419,9 +3422,27 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) | |||
3419 | static int bond_open(struct net_device *bond_dev) | 3422 | static int bond_open(struct net_device *bond_dev) |
3420 | { | 3423 | { |
3421 | struct bonding *bond = netdev_priv(bond_dev); | 3424 | struct bonding *bond = netdev_priv(bond_dev); |
3425 | struct slave *slave; | ||
3426 | int i; | ||
3422 | 3427 | ||
3423 | bond->kill_timers = 0; | 3428 | bond->kill_timers = 0; |
3424 | 3429 | ||
3430 | /* reset slave->backup and slave->inactive */ | ||
3431 | read_lock(&bond->lock); | ||
3432 | if (bond->slave_cnt > 0) { | ||
3433 | read_lock(&bond->curr_slave_lock); | ||
3434 | bond_for_each_slave(bond, slave, i) { | ||
3435 | if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) | ||
3436 | && (slave != bond->curr_active_slave)) { | ||
3437 | bond_set_slave_inactive_flags(slave); | ||
3438 | } else { | ||
3439 | bond_set_slave_active_flags(slave); | ||
3440 | } | ||
3441 | } | ||
3442 | read_unlock(&bond->curr_slave_lock); | ||
3443 | } | ||
3444 | read_unlock(&bond->lock); | ||
3445 | |||
3425 | INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); | 3446 | INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); |
3426 | 3447 | ||
3427 | if (bond_is_lb(bond)) { | 3448 | if (bond_is_lb(bond)) { |
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 231385b8e08f..c7f3d4ea1167 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c | |||
@@ -408,7 +408,7 @@ static void plx_pci_del_card(struct pci_dev *pdev) | |||
408 | struct sja1000_priv *priv; | 408 | struct sja1000_priv *priv; |
409 | int i = 0; | 409 | int i = 0; |
410 | 410 | ||
411 | for (i = 0; i < card->channels; i++) { | 411 | for (i = 0; i < PLX_PCI_MAX_CHAN; i++) { |
412 | dev = card->net_dev[i]; | 412 | dev = card->net_dev[i]; |
413 | if (!dev) | 413 | if (!dev) |
414 | continue; | 414 | continue; |
@@ -536,7 +536,6 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev, | |||
536 | if (err) { | 536 | if (err) { |
537 | dev_err(&pdev->dev, "Registering device failed " | 537 | dev_err(&pdev->dev, "Registering device failed " |
538 | "(err=%d)\n", err); | 538 | "(err=%d)\n", err); |
539 | free_sja1000dev(dev); | ||
540 | goto failure_cleanup; | 539 | goto failure_cleanup; |
541 | } | 540 | } |
542 | 541 | ||
@@ -549,6 +548,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev, | |||
549 | dev_err(&pdev->dev, "Channel #%d not detected\n", | 548 | dev_err(&pdev->dev, "Channel #%d not detected\n", |
550 | i + 1); | 549 | i + 1); |
551 | free_sja1000dev(dev); | 550 | free_sja1000dev(dev); |
551 | card->net_dev[i] = NULL; | ||
552 | } | 552 | } |
553 | } | 553 | } |
554 | 554 | ||
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index f7bbde9eb2cb..2adc294f512a 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/skbuff.h> | 46 | #include <linux/skbuff.h> |
47 | #include <linux/platform_device.h> | 47 | #include <linux/platform_device.h> |
48 | #include <linux/clk.h> | 48 | #include <linux/clk.h> |
49 | #include <linux/io.h> | ||
49 | 50 | ||
50 | #include <linux/can/dev.h> | 51 | #include <linux/can/dev.h> |
51 | #include <linux/can/error.h> | 52 | #include <linux/can/error.h> |
@@ -503,9 +504,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
503 | spin_unlock_irqrestore(&priv->mbx_lock, flags); | 504 | spin_unlock_irqrestore(&priv->mbx_lock, flags); |
504 | 505 | ||
505 | /* Prepare mailbox for transmission */ | 506 | /* Prepare mailbox for transmission */ |
507 | data = cf->can_dlc | (get_tx_head_prio(priv) << 8); | ||
506 | if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ | 508 | if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ |
507 | data |= HECC_CANMCF_RTR; | 509 | data |= HECC_CANMCF_RTR; |
508 | data |= get_tx_head_prio(priv) << 8; | ||
509 | hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); | 510 | hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); |
510 | 511 | ||
511 | if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ | 512 | if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ |
@@ -923,6 +924,7 @@ static int ti_hecc_probe(struct platform_device *pdev) | |||
923 | priv->can.do_get_state = ti_hecc_get_state; | 924 | priv->can.do_get_state = ti_hecc_get_state; |
924 | priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; | 925 | priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; |
925 | 926 | ||
927 | spin_lock_init(&priv->mbx_lock); | ||
926 | ndev->irq = irq->start; | 928 | ndev->irq = irq->start; |
927 | ndev->flags |= IFF_ECHO; | 929 | ndev->flags |= IFF_ECHO; |
928 | platform_set_drvdata(pdev, ndev); | 930 | platform_set_drvdata(pdev, ndev); |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 646c86bcc545..fdb7a1756409 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -2452,14 +2452,13 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id) | |||
2452 | struct net_device *dev = dev_id; | 2452 | struct net_device *dev = dev_id; |
2453 | struct cas *cp = netdev_priv(dev); | 2453 | struct cas *cp = netdev_priv(dev); |
2454 | unsigned long flags; | 2454 | unsigned long flags; |
2455 | int ring; | 2455 | int ring = (irq == cp->pci_irq_INTC) ? 2 : 3; |
2456 | u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); | 2456 | u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); |
2457 | 2457 | ||
2458 | /* check for shared irq */ | 2458 | /* check for shared irq */ |
2459 | if (status == 0) | 2459 | if (status == 0) |
2460 | return IRQ_NONE; | 2460 | return IRQ_NONE; |
2461 | 2461 | ||
2462 | ring = (irq == cp->pci_irq_INTC) ? 2 : 3; | ||
2463 | spin_lock_irqsave(&cp->lock, flags); | 2462 | spin_lock_irqsave(&cp->lock, flags); |
2464 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ | 2463 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ |
2465 | #ifdef USE_NAPI | 2464 | #ifdef USE_NAPI |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 805076c54f1b..da5a5d9b8aff 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) | |||
1146 | if (te && te->ctx && te->client && te->client->redirect) { | 1146 | if (te && te->ctx && te->client && te->client->redirect) { |
1147 | update_tcb = te->client->redirect(te->ctx, old, new, e); | 1147 | update_tcb = te->client->redirect(te->ctx, old, new, e); |
1148 | if (update_tcb) { | 1148 | if (update_tcb) { |
1149 | rcu_read_lock(); | ||
1149 | l2t_hold(L2DATA(tdev), e); | 1150 | l2t_hold(L2DATA(tdev), e); |
1151 | rcu_read_unlock(); | ||
1150 | set_l2t_ix(tdev, tid, e); | 1152 | set_l2t_ix(tdev, tid, e); |
1151 | } | 1153 | } |
1152 | } | 1154 | } |
1153 | } | 1155 | } |
1154 | l2t_release(L2DATA(tdev), e); | 1156 | l2t_release(tdev, e); |
1155 | } | 1157 | } |
1156 | 1158 | ||
1157 | /* | 1159 | /* |
@@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter) | |||
1264 | goto out_free; | 1266 | goto out_free; |
1265 | 1267 | ||
1266 | err = -ENOMEM; | 1268 | err = -ENOMEM; |
1267 | L2DATA(dev) = t3_init_l2t(l2t_capacity); | 1269 | RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); |
1268 | if (!L2DATA(dev)) | 1270 | if (!L2DATA(dev)) |
1269 | goto out_free; | 1271 | goto out_free; |
1270 | 1272 | ||
@@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter) | |||
1298 | 1300 | ||
1299 | out_free_l2t: | 1301 | out_free_l2t: |
1300 | t3_free_l2t(L2DATA(dev)); | 1302 | t3_free_l2t(L2DATA(dev)); |
1301 | L2DATA(dev) = NULL; | 1303 | rcu_assign_pointer(dev->l2opt, NULL); |
1302 | out_free: | 1304 | out_free: |
1303 | kfree(t); | 1305 | kfree(t); |
1304 | return err; | 1306 | return err; |
1305 | } | 1307 | } |
1306 | 1308 | ||
1309 | static void clean_l2_data(struct rcu_head *head) | ||
1310 | { | ||
1311 | struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); | ||
1312 | t3_free_l2t(d); | ||
1313 | } | ||
1314 | |||
1315 | |||
1307 | void cxgb3_offload_deactivate(struct adapter *adapter) | 1316 | void cxgb3_offload_deactivate(struct adapter *adapter) |
1308 | { | 1317 | { |
1309 | struct t3cdev *tdev = &adapter->tdev; | 1318 | struct t3cdev *tdev = &adapter->tdev; |
1310 | struct t3c_data *t = T3C_DATA(tdev); | 1319 | struct t3c_data *t = T3C_DATA(tdev); |
1320 | struct l2t_data *d; | ||
1311 | 1321 | ||
1312 | remove_adapter(adapter); | 1322 | remove_adapter(adapter); |
1313 | if (list_empty(&adapter_list)) | 1323 | if (list_empty(&adapter_list)) |
@@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter) | |||
1315 | 1325 | ||
1316 | free_tid_maps(&t->tid_maps); | 1326 | free_tid_maps(&t->tid_maps); |
1317 | T3C_DATA(tdev) = NULL; | 1327 | T3C_DATA(tdev) = NULL; |
1318 | t3_free_l2t(L2DATA(tdev)); | 1328 | rcu_read_lock(); |
1319 | L2DATA(tdev) = NULL; | 1329 | d = L2DATA(tdev); |
1330 | rcu_read_unlock(); | ||
1331 | rcu_assign_pointer(tdev->l2opt, NULL); | ||
1332 | call_rcu(&d->rcu_head, clean_l2_data); | ||
1320 | if (t->nofail_skb) | 1333 | if (t->nofail_skb) |
1321 | kfree_skb(t->nofail_skb); | 1334 | kfree_skb(t->nofail_skb); |
1322 | kfree(t); | 1335 | kfree(t); |
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c index f452c4003253..41540978a173 100644 --- a/drivers/net/cxgb3/l2t.c +++ b/drivers/net/cxgb3/l2t.c | |||
@@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) | |||
300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | 300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, |
301 | struct net_device *dev) | 301 | struct net_device *dev) |
302 | { | 302 | { |
303 | struct l2t_entry *e; | 303 | struct l2t_entry *e = NULL; |
304 | struct l2t_data *d = L2DATA(cdev); | 304 | struct l2t_data *d; |
305 | int hash; | ||
305 | u32 addr = *(u32 *) neigh->primary_key; | 306 | u32 addr = *(u32 *) neigh->primary_key; |
306 | int ifidx = neigh->dev->ifindex; | 307 | int ifidx = neigh->dev->ifindex; |
307 | int hash = arp_hash(addr, ifidx, d); | ||
308 | struct port_info *p = netdev_priv(dev); | 308 | struct port_info *p = netdev_priv(dev); |
309 | int smt_idx = p->port_id; | 309 | int smt_idx = p->port_id; |
310 | 310 | ||
311 | rcu_read_lock(); | ||
312 | d = L2DATA(cdev); | ||
313 | if (!d) | ||
314 | goto done_rcu; | ||
315 | |||
316 | hash = arp_hash(addr, ifidx, d); | ||
317 | |||
311 | write_lock_bh(&d->lock); | 318 | write_lock_bh(&d->lock); |
312 | for (e = d->l2tab[hash].first; e; e = e->next) | 319 | for (e = d->l2tab[hash].first; e; e = e->next) |
313 | if (e->addr == addr && e->ifindex == ifidx && | 320 | if (e->addr == addr && e->ifindex == ifidx && |
@@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | |||
338 | } | 345 | } |
339 | done: | 346 | done: |
340 | write_unlock_bh(&d->lock); | 347 | write_unlock_bh(&d->lock); |
348 | done_rcu: | ||
349 | rcu_read_unlock(); | ||
341 | return e; | 350 | return e; |
342 | } | 351 | } |
343 | 352 | ||
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h index 7a12d52ed4fc..c5f54796e2cb 100644 --- a/drivers/net/cxgb3/l2t.h +++ b/drivers/net/cxgb3/l2t.h | |||
@@ -76,6 +76,7 @@ struct l2t_data { | |||
76 | atomic_t nfree; /* number of free entries */ | 76 | atomic_t nfree; /* number of free entries */ |
77 | rwlock_t lock; | 77 | rwlock_t lock; |
78 | struct l2t_entry l2tab[0]; | 78 | struct l2t_entry l2tab[0]; |
79 | struct rcu_head rcu_head; /* to handle rcu cleanup */ | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | typedef void (*arp_failure_handler_func)(struct t3cdev * dev, | 82 | typedef void (*arp_failure_handler_func)(struct t3cdev * dev, |
@@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb, | |||
99 | /* | 100 | /* |
100 | * Getting to the L2 data from an offload device. | 101 | * Getting to the L2 data from an offload device. |
101 | */ | 102 | */ |
102 | #define L2DATA(dev) ((dev)->l2opt) | 103 | #define L2DATA(cdev) (rcu_dereference((cdev)->l2opt)) |
103 | 104 | ||
104 | #define W_TCB_L2T_IX 0 | 105 | #define W_TCB_L2T_IX 0 |
105 | #define S_TCB_L2T_IX 7 | 106 | #define S_TCB_L2T_IX 7 |
@@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, | |||
126 | return t3_l2t_send_slow(dev, skb, e); | 127 | return t3_l2t_send_slow(dev, skb, e); |
127 | } | 128 | } |
128 | 129 | ||
129 | static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) | 130 | static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e) |
130 | { | 131 | { |
131 | if (atomic_dec_and_test(&e->refcnt)) | 132 | struct l2t_data *d; |
133 | |||
134 | rcu_read_lock(); | ||
135 | d = L2DATA(t); | ||
136 | |||
137 | if (atomic_dec_and_test(&e->refcnt) && d) | ||
132 | t3_l2e_free(d, e); | 138 | t3_l2e_free(d, e); |
139 | |||
140 | rcu_read_unlock(); | ||
133 | } | 141 | } |
134 | 142 | ||
135 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) | 143 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) |
136 | { | 144 | { |
137 | if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ | 145 | if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ |
138 | atomic_dec(&d->nfree); | 146 | atomic_dec(&d->nfree); |
139 | } | 147 | } |
140 | 148 | ||
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index c9957b7f17b5..b4efa292fd6f 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c | |||
@@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3712 | setup_debugfs(adapter); | 3712 | setup_debugfs(adapter); |
3713 | } | 3713 | } |
3714 | 3714 | ||
3715 | /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ | ||
3716 | pdev->needs_freset = 1; | ||
3717 | |||
3715 | if (is_offload(adapter)) | 3718 | if (is_offload(adapter)) |
3716 | attach_ulds(adapter); | 3719 | attach_ulds(adapter); |
3717 | 3720 | ||
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 8545c7aa93eb..a5a89ecb6f36 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) | |||
4026 | checksum += eeprom_data; | 4026 | checksum += eeprom_data; |
4027 | } | 4027 | } |
4028 | 4028 | ||
4029 | #ifdef CONFIG_PARISC | ||
4030 | /* This is a signature and not a checksum on HP c8000 */ | ||
4031 | if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) | ||
4032 | return E1000_SUCCESS; | ||
4033 | |||
4034 | #endif | ||
4029 | if (checksum == (u16) EEPROM_SUM) | 4035 | if (checksum == (u16) EEPROM_SUM) |
4030 | return E1000_SUCCESS; | 4036 | return E1000_SUCCESS; |
4031 | else { | 4037 | else { |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 35916f485028..8533ad7f3559 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -155,6 +155,9 @@ struct e1000_info; | |||
155 | #define HV_M_STATUS_SPEED_1000 0x0200 | 155 | #define HV_M_STATUS_SPEED_1000 0x0200 |
156 | #define HV_M_STATUS_LINK_UP 0x0040 | 156 | #define HV_M_STATUS_LINK_UP 0x0040 |
157 | 157 | ||
158 | #define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ | ||
159 | #define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 | ||
160 | |||
158 | /* Time to wait before putting the device into D3 if there's no link (in ms). */ | 161 | /* Time to wait before putting the device into D3 if there's no link (in ms). */ |
159 | #define LINK_TIMEOUT 100 | 162 | #define LINK_TIMEOUT 100 |
160 | 163 | ||
@@ -454,6 +457,7 @@ struct e1000_info { | |||
454 | #define FLAG2_DISABLE_AIM (1 << 8) | 457 | #define FLAG2_DISABLE_AIM (1 << 8) |
455 | #define FLAG2_CHECK_PHY_HANG (1 << 9) | 458 | #define FLAG2_CHECK_PHY_HANG (1 << 9) |
456 | #define FLAG2_NO_DISABLE_RX (1 << 10) | 459 | #define FLAG2_NO_DISABLE_RX (1 << 10) |
460 | #define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) | ||
457 | 461 | ||
458 | #define E1000_RX_DESC_PS(R, i) \ | 462 | #define E1000_RX_DESC_PS(R, i) \ |
459 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 463 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 4e36978b8fd8..54add27c8f76 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -137,8 +137,9 @@ | |||
137 | #define HV_PM_CTRL PHY_REG(770, 17) | 137 | #define HV_PM_CTRL PHY_REG(770, 17) |
138 | 138 | ||
139 | /* PHY Low Power Idle Control */ | 139 | /* PHY Low Power Idle Control */ |
140 | #define I82579_LPI_CTRL PHY_REG(772, 20) | 140 | #define I82579_LPI_CTRL PHY_REG(772, 20) |
141 | #define I82579_LPI_CTRL_ENABLE_MASK 0x6000 | 141 | #define I82579_LPI_CTRL_ENABLE_MASK 0x6000 |
142 | #define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 | ||
142 | 143 | ||
143 | /* EMI Registers */ | 144 | /* EMI Registers */ |
144 | #define I82579_EMI_ADDR 0x10 | 145 | #define I82579_EMI_ADDR 0x10 |
@@ -163,6 +164,11 @@ | |||
163 | #define HV_KMRN_MODE_CTRL PHY_REG(769, 16) | 164 | #define HV_KMRN_MODE_CTRL PHY_REG(769, 16) |
164 | #define HV_KMRN_MDIO_SLOW 0x0400 | 165 | #define HV_KMRN_MDIO_SLOW 0x0400 |
165 | 166 | ||
167 | /* KMRN FIFO Control and Status */ | ||
168 | #define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) | ||
169 | #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 | ||
170 | #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 | ||
171 | |||
166 | /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ | 172 | /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ |
167 | /* Offset 04h HSFSTS */ | 173 | /* Offset 04h HSFSTS */ |
168 | union ich8_hws_flash_status { | 174 | union ich8_hws_flash_status { |
@@ -657,6 +663,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
657 | struct e1000_mac_info *mac = &hw->mac; | 663 | struct e1000_mac_info *mac = &hw->mac; |
658 | s32 ret_val; | 664 | s32 ret_val; |
659 | bool link; | 665 | bool link; |
666 | u16 phy_reg; | ||
660 | 667 | ||
661 | /* | 668 | /* |
662 | * We only want to go out to the PHY registers to see if Auto-Neg | 669 | * We only want to go out to the PHY registers to see if Auto-Neg |
@@ -689,16 +696,35 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
689 | 696 | ||
690 | mac->get_link_status = false; | 697 | mac->get_link_status = false; |
691 | 698 | ||
692 | if (hw->phy.type == e1000_phy_82578) { | 699 | switch (hw->mac.type) { |
693 | ret_val = e1000_link_stall_workaround_hv(hw); | 700 | case e1000_pch2lan: |
694 | if (ret_val) | ||
695 | goto out; | ||
696 | } | ||
697 | |||
698 | if (hw->mac.type == e1000_pch2lan) { | ||
699 | ret_val = e1000_k1_workaround_lv(hw); | 701 | ret_val = e1000_k1_workaround_lv(hw); |
700 | if (ret_val) | 702 | if (ret_val) |
701 | goto out; | 703 | goto out; |
704 | /* fall-thru */ | ||
705 | case e1000_pchlan: | ||
706 | if (hw->phy.type == e1000_phy_82578) { | ||
707 | ret_val = e1000_link_stall_workaround_hv(hw); | ||
708 | if (ret_val) | ||
709 | goto out; | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Workaround for PCHx parts in half-duplex: | ||
714 | * Set the number of preambles removed from the packet | ||
715 | * when it is passed from the PHY to the MAC to prevent | ||
716 | * the MAC from misinterpreting the packet type. | ||
717 | */ | ||
718 | e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); | ||
719 | phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; | ||
720 | |||
721 | if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) | ||
722 | phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); | ||
723 | |||
724 | e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); | ||
725 | break; | ||
726 | default: | ||
727 | break; | ||
702 | } | 728 | } |
703 | 729 | ||
704 | /* | 730 | /* |
@@ -788,6 +814,11 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) | |||
788 | (adapter->hw.phy.type == e1000_phy_igp_3)) | 814 | (adapter->hw.phy.type == e1000_phy_igp_3)) |
789 | adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; | 815 | adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; |
790 | 816 | ||
817 | /* Enable workaround for 82579 w/ ME enabled */ | ||
818 | if ((adapter->hw.mac.type == e1000_pch2lan) && | ||
819 | (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) | ||
820 | adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; | ||
821 | |||
791 | /* Disable EEE by default until IEEE802.3az spec is finalized */ | 822 | /* Disable EEE by default until IEEE802.3az spec is finalized */ |
792 | if (adapter->flags2 & FLAG2_HAS_EEE) | 823 | if (adapter->flags2 & FLAG2_HAS_EEE) |
793 | adapter->hw.dev_spec.ich8lan.eee_disable = true; | 824 | adapter->hw.dev_spec.ich8lan.eee_disable = true; |
@@ -1355,7 +1386,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) | |||
1355 | return ret_val; | 1386 | return ret_val; |
1356 | 1387 | ||
1357 | /* Preamble tuning for SSC */ | 1388 | /* Preamble tuning for SSC */ |
1358 | ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); | 1389 | ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); |
1359 | if (ret_val) | 1390 | if (ret_val) |
1360 | return ret_val; | 1391 | return ret_val; |
1361 | } | 1392 | } |
@@ -1645,6 +1676,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) | |||
1645 | s32 ret_val = 0; | 1676 | s32 ret_val = 0; |
1646 | u16 status_reg = 0; | 1677 | u16 status_reg = 0; |
1647 | u32 mac_reg; | 1678 | u32 mac_reg; |
1679 | u16 phy_reg; | ||
1648 | 1680 | ||
1649 | if (hw->mac.type != e1000_pch2lan) | 1681 | if (hw->mac.type != e1000_pch2lan) |
1650 | goto out; | 1682 | goto out; |
@@ -1659,12 +1691,19 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) | |||
1659 | mac_reg = er32(FEXTNVM4); | 1691 | mac_reg = er32(FEXTNVM4); |
1660 | mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; | 1692 | mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; |
1661 | 1693 | ||
1662 | if (status_reg & HV_M_STATUS_SPEED_1000) | 1694 | ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); |
1695 | if (ret_val) | ||
1696 | goto out; | ||
1697 | |||
1698 | if (status_reg & HV_M_STATUS_SPEED_1000) { | ||
1663 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; | 1699 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; |
1664 | else | 1700 | phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; |
1701 | } else { | ||
1665 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; | 1702 | mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; |
1666 | 1703 | phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; | |
1704 | } | ||
1667 | ew32(FEXTNVM4, mac_reg); | 1705 | ew32(FEXTNVM4, mac_reg); |
1706 | ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); | ||
1668 | } | 1707 | } |
1669 | 1708 | ||
1670 | out: | 1709 | out: |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 362f70382cdd..2198e615f241 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -519,6 +519,63 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | |||
519 | } | 519 | } |
520 | 520 | ||
521 | /** | 521 | /** |
522 | * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() | ||
523 | * @hw: pointer to the HW structure | ||
524 | * @tail: address of tail descriptor register | ||
525 | * @i: value to write to tail descriptor register | ||
526 | * | ||
527 | * When updating the tail register, the ME could be accessing Host CSR | ||
528 | * registers at the same time. Normally, this is handled in h/w by an | ||
529 | * arbiter but on some parts there is a bug that acknowledges Host accesses | ||
530 | * later than it should which could result in the descriptor register to | ||
531 | * have an incorrect value. Workaround this by checking the FWSM register | ||
532 | * which has bit 24 set while ME is accessing Host CSR registers, wait | ||
533 | * if it is set and try again a number of times. | ||
534 | **/ | ||
535 | static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, | ||
536 | unsigned int i) | ||
537 | { | ||
538 | unsigned int j = 0; | ||
539 | |||
540 | while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && | ||
541 | (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) | ||
542 | udelay(50); | ||
543 | |||
544 | writel(i, tail); | ||
545 | |||
546 | if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) | ||
547 | return E1000_ERR_SWFW_SYNC; | ||
548 | |||
549 | return 0; | ||
550 | } | ||
551 | |||
552 | static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) | ||
553 | { | ||
554 | u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail); | ||
555 | struct e1000_hw *hw = &adapter->hw; | ||
556 | |||
557 | if (e1000e_update_tail_wa(hw, tail, i)) { | ||
558 | u32 rctl = er32(RCTL); | ||
559 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | ||
560 | e_err("ME firmware caused invalid RDT - resetting\n"); | ||
561 | schedule_work(&adapter->reset_task); | ||
562 | } | ||
563 | } | ||
564 | |||
565 | static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) | ||
566 | { | ||
567 | u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail); | ||
568 | struct e1000_hw *hw = &adapter->hw; | ||
569 | |||
570 | if (e1000e_update_tail_wa(hw, tail, i)) { | ||
571 | u32 tctl = er32(TCTL); | ||
572 | ew32(TCTL, tctl & ~E1000_TCTL_EN); | ||
573 | e_err("ME firmware caused invalid TDT - resetting\n"); | ||
574 | schedule_work(&adapter->reset_task); | ||
575 | } | ||
576 | } | ||
577 | |||
578 | /** | ||
522 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended | 579 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended |
523 | * @adapter: address of board private structure | 580 | * @adapter: address of board private structure |
524 | **/ | 581 | **/ |
@@ -573,7 +630,10 @@ map_skb: | |||
573 | * such as IA-64). | 630 | * such as IA-64). |
574 | */ | 631 | */ |
575 | wmb(); | 632 | wmb(); |
576 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | 633 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) |
634 | e1000e_update_rdt_wa(adapter, i); | ||
635 | else | ||
636 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
577 | } | 637 | } |
578 | i++; | 638 | i++; |
579 | if (i == rx_ring->count) | 639 | if (i == rx_ring->count) |
@@ -673,7 +733,11 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
673 | * such as IA-64). | 733 | * such as IA-64). |
674 | */ | 734 | */ |
675 | wmb(); | 735 | wmb(); |
676 | writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); | 736 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) |
737 | e1000e_update_rdt_wa(adapter, i << 1); | ||
738 | else | ||
739 | writel(i << 1, | ||
740 | adapter->hw.hw_addr + rx_ring->tail); | ||
677 | } | 741 | } |
678 | 742 | ||
679 | i++; | 743 | i++; |
@@ -756,7 +820,10 @@ check_page: | |||
756 | * applicable for weak-ordered memory model archs, | 820 | * applicable for weak-ordered memory model archs, |
757 | * such as IA-64). */ | 821 | * such as IA-64). */ |
758 | wmb(); | 822 | wmb(); |
759 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | 823 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) |
824 | e1000e_update_rdt_wa(adapter, i); | ||
825 | else | ||
826 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
760 | } | 827 | } |
761 | } | 828 | } |
762 | 829 | ||
@@ -4689,7 +4756,12 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, | |||
4689 | wmb(); | 4756 | wmb(); |
4690 | 4757 | ||
4691 | tx_ring->next_to_use = i; | 4758 | tx_ring->next_to_use = i; |
4692 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | 4759 | |
4760 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | ||
4761 | e1000e_update_tdt_wa(adapter, i); | ||
4762 | else | ||
4763 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | ||
4764 | |||
4693 | /* | 4765 | /* |
4694 | * we need this if more than one processor can write to our tail | 4766 | * we need this if more than one processor can write to our tail |
4695 | * at a time, it synchronizes IO on IA64/Altix systems | 4767 | * at a time, it synchronizes IO on IA64/Altix systems |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index e55df308a3af..6d5fbd4d4256 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5615,7 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5615 | goto out_error; | 5615 | goto out_error; |
5616 | } | 5616 | } |
5617 | 5617 | ||
5618 | nv_vlan_mode(dev, dev->features); | 5618 | if (id->driver_data & DEV_HAS_VLAN) |
5619 | nv_vlan_mode(dev, dev->features); | ||
5619 | 5620 | ||
5620 | netif_carrier_off(dev); | 5621 | netif_carrier_off(dev); |
5621 | 5622 | ||
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 2659daad783d..31d5c574e5a9 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -2710,8 +2710,13 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |||
2710 | /* Tell the skb what kind of packet this is */ | 2710 | /* Tell the skb what kind of packet this is */ |
2711 | skb->protocol = eth_type_trans(skb, dev); | 2711 | skb->protocol = eth_type_trans(skb, dev); |
2712 | 2712 | ||
2713 | /* Set vlan tag */ | 2713 | /* |
2714 | if (fcb->flags & RXFCB_VLN) | 2714 | * There's need to check for NETIF_F_HW_VLAN_RX here. |
2715 | * Even if vlan rx accel is disabled, on some chips | ||
2716 | * RXFCB_VLN is pseudo randomly set. | ||
2717 | */ | ||
2718 | if (dev->features & NETIF_F_HW_VLAN_RX && | ||
2719 | fcb->flags & RXFCB_VLN) | ||
2715 | __vlan_hwaccel_put_tag(skb, fcb->vlctl); | 2720 | __vlan_hwaccel_put_tag(skb, fcb->vlctl); |
2716 | 2721 | ||
2717 | /* Send the packet up the stack */ | 2722 | /* Send the packet up the stack */ |
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 6e350692d118..0caf3c323ec0 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c | |||
@@ -686,10 +686,21 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u | |||
686 | { | 686 | { |
687 | unsigned int last_rule_idx = priv->cur_filer_idx; | 687 | unsigned int last_rule_idx = priv->cur_filer_idx; |
688 | unsigned int cmp_rqfpr; | 688 | unsigned int cmp_rqfpr; |
689 | unsigned int local_rqfpr[MAX_FILER_IDX + 1]; | 689 | unsigned int *local_rqfpr; |
690 | unsigned int local_rqfcr[MAX_FILER_IDX + 1]; | 690 | unsigned int *local_rqfcr; |
691 | int i = 0x0, k = 0x0; | 691 | int i = 0x0, k = 0x0; |
692 | int j = MAX_FILER_IDX, l = 0x0; | 692 | int j = MAX_FILER_IDX, l = 0x0; |
693 | int ret = 1; | ||
694 | |||
695 | local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), | ||
696 | GFP_KERNEL); | ||
697 | local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), | ||
698 | GFP_KERNEL); | ||
699 | if (!local_rqfpr || !local_rqfcr) { | ||
700 | pr_err("Out of memory\n"); | ||
701 | ret = 0; | ||
702 | goto err; | ||
703 | } | ||
693 | 704 | ||
694 | switch (class) { | 705 | switch (class) { |
695 | case TCP_V4_FLOW: | 706 | case TCP_V4_FLOW: |
@@ -706,7 +717,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u | |||
706 | break; | 717 | break; |
707 | default: | 718 | default: |
708 | pr_err("Right now this class is not supported\n"); | 719 | pr_err("Right now this class is not supported\n"); |
709 | return 0; | 720 | ret = 0; |
721 | goto err; | ||
710 | } | 722 | } |
711 | 723 | ||
712 | for (i = 0; i < MAX_FILER_IDX + 1; i++) { | 724 | for (i = 0; i < MAX_FILER_IDX + 1; i++) { |
@@ -721,7 +733,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u | |||
721 | 733 | ||
722 | if (i == MAX_FILER_IDX + 1) { | 734 | if (i == MAX_FILER_IDX + 1) { |
723 | pr_err("No parse rule found, can't create hash rules\n"); | 735 | pr_err("No parse rule found, can't create hash rules\n"); |
724 | return 0; | 736 | ret = 0; |
737 | goto err; | ||
725 | } | 738 | } |
726 | 739 | ||
727 | /* If a match was found, then it begins the starting of a cluster rule | 740 | /* If a match was found, then it begins the starting of a cluster rule |
@@ -765,7 +778,10 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u | |||
765 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | 778 | priv->cur_filer_idx = priv->cur_filer_idx - 1; |
766 | } | 779 | } |
767 | 780 | ||
768 | return 1; | 781 | err: |
782 | kfree(local_rqfcr); | ||
783 | kfree(local_rqfpr); | ||
784 | return ret; | ||
769 | } | 785 | } |
770 | 786 | ||
771 | static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) | 787 | static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) |
@@ -1653,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv, | |||
1653 | u32 i = 0; | 1669 | u32 i = 0; |
1654 | 1670 | ||
1655 | list_for_each_entry(comp, &priv->rx_list.list, list) { | 1671 | list_for_each_entry(comp, &priv->rx_list.list, list) { |
1656 | if (i <= cmd->rule_cnt) { | 1672 | if (i == cmd->rule_cnt) |
1657 | rule_locs[i] = comp->fs.location; | 1673 | return -EMSGSIZE; |
1658 | i++; | 1674 | rule_locs[i] = comp->fs.location; |
1659 | } | 1675 | i++; |
1660 | } | 1676 | } |
1661 | 1677 | ||
1662 | cmd->data = MAX_FILER_IDX; | 1678 | cmd->data = MAX_FILER_IDX; |
diff --git a/drivers/net/greth.c b/drivers/net/greth.c index 16ce45c11934..52a39000c42c 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c | |||
@@ -428,6 +428,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
428 | dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); | 428 | dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); |
429 | 429 | ||
430 | status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); | 430 | status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); |
431 | greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN; | ||
431 | 432 | ||
432 | /* Wrap around descriptor ring */ | 433 | /* Wrap around descriptor ring */ |
433 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) { | 434 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) { |
@@ -490,7 +491,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
490 | if (nr_frags != 0) | 491 | if (nr_frags != 0) |
491 | status = GRETH_TXBD_MORE; | 492 | status = GRETH_TXBD_MORE; |
492 | 493 | ||
493 | status |= GRETH_TXBD_CSALL; | 494 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
495 | status |= GRETH_TXBD_CSALL; | ||
494 | status |= skb_headlen(skb) & GRETH_BD_LEN; | 496 | status |= skb_headlen(skb) & GRETH_BD_LEN; |
495 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) | 497 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) |
496 | status |= GRETH_BD_WR; | 498 | status |= GRETH_BD_WR; |
@@ -513,7 +515,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
513 | greth->tx_skbuff[curr_tx] = NULL; | 515 | greth->tx_skbuff[curr_tx] = NULL; |
514 | bdp = greth->tx_bd_base + curr_tx; | 516 | bdp = greth->tx_bd_base + curr_tx; |
515 | 517 | ||
516 | status = GRETH_TXBD_CSALL | GRETH_BD_EN; | 518 | status = GRETH_BD_EN; |
519 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
520 | status |= GRETH_TXBD_CSALL; | ||
517 | status |= frag->size & GRETH_BD_LEN; | 521 | status |= frag->size & GRETH_BD_LEN; |
518 | 522 | ||
519 | /* Wrap around descriptor ring */ | 523 | /* Wrap around descriptor ring */ |
@@ -641,6 +645,7 @@ static void greth_clean_tx(struct net_device *dev) | |||
641 | dev->stats.tx_fifo_errors++; | 645 | dev->stats.tx_fifo_errors++; |
642 | } | 646 | } |
643 | dev->stats.tx_packets++; | 647 | dev->stats.tx_packets++; |
648 | dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last]; | ||
644 | greth->tx_last = NEXT_TX(greth->tx_last); | 649 | greth->tx_last = NEXT_TX(greth->tx_last); |
645 | greth->tx_free++; | 650 | greth->tx_free++; |
646 | } | 651 | } |
@@ -695,6 +700,7 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
695 | greth->tx_skbuff[greth->tx_last] = NULL; | 700 | greth->tx_skbuff[greth->tx_last] = NULL; |
696 | 701 | ||
697 | greth_update_tx_stats(dev, stat); | 702 | greth_update_tx_stats(dev, stat); |
703 | dev->stats.tx_bytes += skb->len; | ||
698 | 704 | ||
699 | bdp = greth->tx_bd_base + greth->tx_last; | 705 | bdp = greth->tx_bd_base + greth->tx_last; |
700 | 706 | ||
@@ -796,6 +802,7 @@ static int greth_rx(struct net_device *dev, int limit) | |||
796 | memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); | 802 | memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); |
797 | 803 | ||
798 | skb->protocol = eth_type_trans(skb, dev); | 804 | skb->protocol = eth_type_trans(skb, dev); |
805 | dev->stats.rx_bytes += pkt_len; | ||
799 | dev->stats.rx_packets++; | 806 | dev->stats.rx_packets++; |
800 | netif_receive_skb(skb); | 807 | netif_receive_skb(skb); |
801 | } | 808 | } |
@@ -910,6 +917,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
910 | 917 | ||
911 | skb->protocol = eth_type_trans(skb, dev); | 918 | skb->protocol = eth_type_trans(skb, dev); |
912 | dev->stats.rx_packets++; | 919 | dev->stats.rx_packets++; |
920 | dev->stats.rx_bytes += pkt_len; | ||
913 | netif_receive_skb(skb); | 921 | netif_receive_skb(skb); |
914 | 922 | ||
915 | greth->rx_skbuff[greth->rx_cur] = newskb; | 923 | greth->rx_skbuff[greth->rx_cur] = newskb; |
diff --git a/drivers/net/greth.h b/drivers/net/greth.h index 9a0040dee4da..232a622a85b7 100644 --- a/drivers/net/greth.h +++ b/drivers/net/greth.h | |||
@@ -103,6 +103,7 @@ struct greth_private { | |||
103 | 103 | ||
104 | unsigned char *tx_bufs[GRETH_TXBD_NUM]; | 104 | unsigned char *tx_bufs[GRETH_TXBD_NUM]; |
105 | unsigned char *rx_bufs[GRETH_RXBD_NUM]; | 105 | unsigned char *rx_bufs[GRETH_RXBD_NUM]; |
106 | u16 tx_bufs_length[GRETH_TXBD_NUM]; | ||
106 | 107 | ||
107 | u16 tx_next; | 108 | u16 tx_next; |
108 | u16 tx_last; | 109 | u16 tx_last; |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index ba99af05bf62..d393f1e764ed 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -395,7 +395,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada | |||
395 | } | 395 | } |
396 | 396 | ||
397 | /* recycle the current buffer on the rx queue */ | 397 | /* recycle the current buffer on the rx queue */ |
398 | static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | 398 | static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) |
399 | { | 399 | { |
400 | u32 q_index = adapter->rx_queue.index; | 400 | u32 q_index = adapter->rx_queue.index; |
401 | u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; | 401 | u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; |
@@ -403,6 +403,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
403 | unsigned int index = correlator & 0xffffffffUL; | 403 | unsigned int index = correlator & 0xffffffffUL; |
404 | union ibmveth_buf_desc desc; | 404 | union ibmveth_buf_desc desc; |
405 | unsigned long lpar_rc; | 405 | unsigned long lpar_rc; |
406 | int ret = 1; | ||
406 | 407 | ||
407 | BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); | 408 | BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); |
408 | BUG_ON(index >= adapter->rx_buff_pool[pool].size); | 409 | BUG_ON(index >= adapter->rx_buff_pool[pool].size); |
@@ -410,7 +411,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
410 | if (!adapter->rx_buff_pool[pool].active) { | 411 | if (!adapter->rx_buff_pool[pool].active) { |
411 | ibmveth_rxq_harvest_buffer(adapter); | 412 | ibmveth_rxq_harvest_buffer(adapter); |
412 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); | 413 | ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); |
413 | return; | 414 | goto out; |
414 | } | 415 | } |
415 | 416 | ||
416 | desc.fields.flags_len = IBMVETH_BUF_VALID | | 417 | desc.fields.flags_len = IBMVETH_BUF_VALID | |
@@ -423,12 +424,16 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) | |||
423 | netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " | 424 | netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " |
424 | "during recycle rc=%ld", lpar_rc); | 425 | "during recycle rc=%ld", lpar_rc); |
425 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); | 426 | ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); |
427 | ret = 0; | ||
426 | } | 428 | } |
427 | 429 | ||
428 | if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { | 430 | if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { |
429 | adapter->rx_queue.index = 0; | 431 | adapter->rx_queue.index = 0; |
430 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; | 432 | adapter->rx_queue.toggle = !adapter->rx_queue.toggle; |
431 | } | 433 | } |
434 | |||
435 | out: | ||
436 | return ret; | ||
432 | } | 437 | } |
433 | 438 | ||
434 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | 439 | static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) |
@@ -631,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
631 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", | 636 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", |
632 | netdev->irq, rc); | 637 | netdev->irq, rc); |
633 | do { | 638 | do { |
634 | rc = h_free_logical_lan(adapter->vdev->unit_address); | 639 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); |
635 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | 640 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); |
636 | 641 | ||
637 | goto err_out; | 642 | goto err_out; |
638 | } | 643 | } |
@@ -752,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) | |||
752 | struct ibmveth_adapter *adapter = netdev_priv(dev); | 757 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
753 | unsigned long set_attr, clr_attr, ret_attr; | 758 | unsigned long set_attr, clr_attr, ret_attr; |
754 | unsigned long set_attr6, clr_attr6; | 759 | unsigned long set_attr6, clr_attr6; |
755 | long ret, ret6; | 760 | long ret, ret4, ret6; |
756 | int rc1 = 0, rc2 = 0; | 761 | int rc1 = 0, rc2 = 0; |
757 | int restart = 0; | 762 | int restart = 0; |
758 | 763 | ||
@@ -765,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) | |||
765 | 770 | ||
766 | set_attr = 0; | 771 | set_attr = 0; |
767 | clr_attr = 0; | 772 | clr_attr = 0; |
773 | set_attr6 = 0; | ||
774 | clr_attr6 = 0; | ||
768 | 775 | ||
769 | if (data) { | 776 | if (data) { |
770 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; | 777 | set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; |
@@ -779,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) | |||
779 | if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && | 786 | if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && |
780 | !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && | 787 | !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && |
781 | (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { | 788 | (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { |
782 | ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, | 789 | ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, |
783 | set_attr, &ret_attr); | 790 | set_attr, &ret_attr); |
784 | 791 | ||
785 | if (ret != H_SUCCESS) { | 792 | if (ret4 != H_SUCCESS) { |
786 | netdev_err(dev, "unable to change IPv4 checksum " | 793 | netdev_err(dev, "unable to change IPv4 checksum " |
787 | "offload settings. %d rc=%ld\n", | 794 | "offload settings. %d rc=%ld\n", |
788 | data, ret); | 795 | data, ret4); |
796 | |||
797 | h_illan_attributes(adapter->vdev->unit_address, | ||
798 | set_attr, clr_attr, &ret_attr); | ||
799 | |||
800 | if (data == 1) | ||
801 | dev->features &= ~NETIF_F_IP_CSUM; | ||
789 | 802 | ||
790 | ret = h_illan_attributes(adapter->vdev->unit_address, | ||
791 | set_attr, clr_attr, &ret_attr); | ||
792 | } else { | 803 | } else { |
793 | adapter->fw_ipv4_csum_support = data; | 804 | adapter->fw_ipv4_csum_support = data; |
794 | } | 805 | } |
@@ -799,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) | |||
799 | if (ret6 != H_SUCCESS) { | 810 | if (ret6 != H_SUCCESS) { |
800 | netdev_err(dev, "unable to change IPv6 checksum " | 811 | netdev_err(dev, "unable to change IPv6 checksum " |
801 | "offload settings. %d rc=%ld\n", | 812 | "offload settings. %d rc=%ld\n", |
802 | data, ret); | 813 | data, ret6); |
814 | |||
815 | h_illan_attributes(adapter->vdev->unit_address, | ||
816 | set_attr6, clr_attr6, &ret_attr); | ||
817 | |||
818 | if (data == 1) | ||
819 | dev->features &= ~NETIF_F_IPV6_CSUM; | ||
803 | 820 | ||
804 | ret = h_illan_attributes(adapter->vdev->unit_address, | ||
805 | set_attr6, clr_attr6, | ||
806 | &ret_attr); | ||
807 | } else | 821 | } else |
808 | adapter->fw_ipv6_csum_support = data; | 822 | adapter->fw_ipv6_csum_support = data; |
809 | 823 | ||
810 | if (ret != H_SUCCESS || ret6 != H_SUCCESS) | 824 | if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) |
811 | adapter->rx_csum = data; | 825 | adapter->rx_csum = data; |
812 | else | 826 | else |
813 | rc1 = -EIO; | 827 | rc1 = -EIO; |
@@ -925,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, | |||
925 | union ibmveth_buf_desc descs[6]; | 939 | union ibmveth_buf_desc descs[6]; |
926 | int last, i; | 940 | int last, i; |
927 | int force_bounce = 0; | 941 | int force_bounce = 0; |
942 | dma_addr_t dma_addr; | ||
928 | 943 | ||
929 | /* | 944 | /* |
930 | * veth handles a maximum of 6 segments including the header, so | 945 | * veth handles a maximum of 6 segments including the header, so |
@@ -989,17 +1004,16 @@ retry_bounce: | |||
989 | } | 1004 | } |
990 | 1005 | ||
991 | /* Map the header */ | 1006 | /* Map the header */ |
992 | descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, | 1007 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
993 | skb_headlen(skb), | 1008 | skb_headlen(skb), DMA_TO_DEVICE); |
994 | DMA_TO_DEVICE); | 1009 | if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) |
995 | if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address)) | ||
996 | goto map_failed; | 1010 | goto map_failed; |
997 | 1011 | ||
998 | descs[0].fields.flags_len = desc_flags | skb_headlen(skb); | 1012 | descs[0].fields.flags_len = desc_flags | skb_headlen(skb); |
1013 | descs[0].fields.address = dma_addr; | ||
999 | 1014 | ||
1000 | /* Map the frags */ | 1015 | /* Map the frags */ |
1001 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 1016 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1002 | unsigned long dma_addr; | ||
1003 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1017 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1004 | 1018 | ||
1005 | dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, | 1019 | dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, |
@@ -1021,7 +1035,12 @@ retry_bounce: | |||
1021 | netdev->stats.tx_bytes += skb->len; | 1035 | netdev->stats.tx_bytes += skb->len; |
1022 | } | 1036 | } |
1023 | 1037 | ||
1024 | for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) | 1038 | dma_unmap_single(&adapter->vdev->dev, |
1039 | descs[0].fields.address, | ||
1040 | descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, | ||
1041 | DMA_TO_DEVICE); | ||
1042 | |||
1043 | for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) | ||
1025 | dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, | 1044 | dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, |
1026 | descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, | 1045 | descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, |
1027 | DMA_TO_DEVICE); | 1046 | DMA_TO_DEVICE); |
@@ -1084,8 +1103,9 @@ restart_poll: | |||
1084 | if (rx_flush) | 1103 | if (rx_flush) |
1085 | ibmveth_flush_buffer(skb->data, | 1104 | ibmveth_flush_buffer(skb->data, |
1086 | length + offset); | 1105 | length + offset); |
1106 | if (!ibmveth_rxq_recycle_buffer(adapter)) | ||
1107 | kfree_skb(skb); | ||
1087 | skb = new_skb; | 1108 | skb = new_skb; |
1088 | ibmveth_rxq_recycle_buffer(adapter); | ||
1089 | } else { | 1109 | } else { |
1090 | ibmveth_rxq_harvest_buffer(adapter); | 1110 | ibmveth_rxq_harvest_buffer(adapter); |
1091 | skb_reserve(skb, offset); | 1111 | skb_reserve(skb, offset); |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e86297b32733..e1fcc9589278 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -1321,8 +1321,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1321 | if (ring_is_rsc_enabled(rx_ring)) | 1321 | if (ring_is_rsc_enabled(rx_ring)) |
1322 | pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); | 1322 | pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); |
1323 | 1323 | ||
1324 | /* if this is a skb from previous receive DMA will be 0 */ | 1324 | /* linear means we are building an skb from multiple pages */ |
1325 | if (rx_buffer_info->dma) { | 1325 | if (!skb_is_nonlinear(skb)) { |
1326 | u16 hlen; | 1326 | u16 hlen; |
1327 | if (pkt_is_rsc && | 1327 | if (pkt_is_rsc && |
1328 | !(staterr & IXGBE_RXD_STAT_EOP) && | 1328 | !(staterr & IXGBE_RXD_STAT_EOP) && |
@@ -1459,8 +1459,10 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1459 | if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { | 1459 | if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { |
1460 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, | 1460 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, |
1461 | staterr); | 1461 | staterr); |
1462 | if (!ddp_bytes) | 1462 | if (!ddp_bytes) { |
1463 | dev_kfree_skb_any(skb); | ||
1463 | goto next_desc; | 1464 | goto next_desc; |
1465 | } | ||
1464 | } | 1466 | } |
1465 | #endif /* IXGBE_FCOE */ | 1467 | #endif /* IXGBE_FCOE */ |
1466 | ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); | 1468 | ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index dfc82720065a..ed2a3977c6e7 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
@@ -799,5 +799,11 @@ static void __exit cleanup_netconsole(void) | |||
799 | } | 799 | } |
800 | } | 800 | } |
801 | 801 | ||
802 | module_init(init_netconsole); | 802 | /* |
803 | * Use late_initcall to ensure netconsole is | ||
804 | * initialized after network device driver if built-in. | ||
805 | * | ||
806 | * late_initcall() and module_init() are identical if built as module. | ||
807 | */ | ||
808 | late_initcall(init_netconsole); | ||
803 | module_exit(cleanup_netconsole); | 809 | module_exit(cleanup_netconsole); |
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index 59fac77d0dbb..a09a07197eb5 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h | |||
@@ -127,8 +127,8 @@ struct pch_gbe_regs { | |||
127 | 127 | ||
128 | /* Reset */ | 128 | /* Reset */ |
129 | #define PCH_GBE_ALL_RST 0x80000000 /* All reset */ | 129 | #define PCH_GBE_ALL_RST 0x80000000 /* All reset */ |
130 | #define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ | 130 | #define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */ |
131 | #define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ | 131 | #define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */ |
132 | 132 | ||
133 | /* TCP/IP Accelerator Control */ | 133 | /* TCP/IP Accelerator Control */ |
134 | #define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ | 134 | #define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ |
@@ -276,6 +276,9 @@ struct pch_gbe_regs { | |||
276 | #define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ | 276 | #define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ |
277 | #define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ | 277 | #define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ |
278 | 278 | ||
279 | /* RX DMA STATUS */ | ||
280 | #define PCH_GBE_IDLE_CHECK 0xFFFFFFFE | ||
281 | |||
279 | /* Wake On LAN Status */ | 282 | /* Wake On LAN Status */ |
280 | #define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ | 283 | #define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ |
281 | #define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ | 284 | #define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ |
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc { | |||
471 | struct pch_gbe_buffer { | 474 | struct pch_gbe_buffer { |
472 | struct sk_buff *skb; | 475 | struct sk_buff *skb; |
473 | dma_addr_t dma; | 476 | dma_addr_t dma; |
477 | unsigned char *rx_buffer; | ||
474 | unsigned long time_stamp; | 478 | unsigned long time_stamp; |
475 | u16 length; | 479 | u16 length; |
476 | bool mapped; | 480 | bool mapped; |
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring { | |||
511 | struct pch_gbe_rx_ring { | 515 | struct pch_gbe_rx_ring { |
512 | struct pch_gbe_rx_desc *desc; | 516 | struct pch_gbe_rx_desc *desc; |
513 | dma_addr_t dma; | 517 | dma_addr_t dma; |
518 | unsigned char *rx_buff_pool; | ||
519 | dma_addr_t rx_buff_pool_logic; | ||
520 | unsigned int rx_buff_pool_size; | ||
514 | unsigned int size; | 521 | unsigned int size; |
515 | unsigned int count; | 522 | unsigned int count; |
516 | unsigned int next_to_use; | 523 | unsigned int next_to_use; |
@@ -622,6 +629,7 @@ struct pch_gbe_adapter { | |||
622 | unsigned long rx_buffer_len; | 629 | unsigned long rx_buffer_len; |
623 | unsigned long tx_queue_len; | 630 | unsigned long tx_queue_len; |
624 | bool have_msi; | 631 | bool have_msi; |
632 | bool rx_stop_flag; | ||
625 | }; | 633 | }; |
626 | 634 | ||
627 | extern const char pch_driver_version[]; | 635 | extern const char pch_driver_version[]; |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index eac3c5ca9731..b8b4ba27b0e7 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -20,7 +20,6 @@ | |||
20 | 20 | ||
21 | #include "pch_gbe.h" | 21 | #include "pch_gbe.h" |
22 | #include "pch_gbe_api.h" | 22 | #include "pch_gbe_api.h" |
23 | #include <linux/prefetch.h> | ||
24 | 23 | ||
25 | #define DRV_VERSION "1.00" | 24 | #define DRV_VERSION "1.00" |
26 | const char pch_driver_version[] = DRV_VERSION; | 25 | const char pch_driver_version[] = DRV_VERSION; |
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION; | |||
34 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ | 33 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ |
35 | #define PCH_GBE_COPYBREAK_DEFAULT 256 | 34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 |
36 | #define PCH_GBE_PCI_BAR 1 | 35 | #define PCH_GBE_PCI_BAR 1 |
36 | #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ | ||
37 | 37 | ||
38 | /* Macros for ML7223 */ | 38 | /* Macros for ML7223 */ |
39 | #define PCI_VENDOR_ID_ROHM 0x10db | 39 | #define PCI_VENDOR_ID_ROHM 0x10db |
40 | #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 | 40 | #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 |
41 | 41 | ||
42 | /* Macros for ML7831 */ | ||
43 | #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 | ||
44 | |||
42 | #define PCH_GBE_TX_WEIGHT 64 | 45 | #define PCH_GBE_TX_WEIGHT 64 |
43 | #define PCH_GBE_RX_WEIGHT 64 | 46 | #define PCH_GBE_RX_WEIGHT 64 |
44 | #define PCH_GBE_RX_BUFFER_WRITE 16 | 47 | #define PCH_GBE_RX_BUFFER_WRITE 16 |
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION; | |||
52 | ) | 55 | ) |
53 | 56 | ||
54 | /* Ethertype field values */ | 57 | /* Ethertype field values */ |
58 | #define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880 | ||
55 | #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 | 59 | #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 |
56 | #define PCH_GBE_FRAME_SIZE_2048 2048 | 60 | #define PCH_GBE_FRAME_SIZE_2048 2048 |
57 | #define PCH_GBE_FRAME_SIZE_4096 4096 | 61 | #define PCH_GBE_FRAME_SIZE_4096 4096 |
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION; | |||
83 | #define PCH_GBE_INT_ENABLE_MASK ( \ | 87 | #define PCH_GBE_INT_ENABLE_MASK ( \ |
84 | PCH_GBE_INT_RX_DMA_CMPLT | \ | 88 | PCH_GBE_INT_RX_DMA_CMPLT | \ |
85 | PCH_GBE_INT_RX_DSC_EMP | \ | 89 | PCH_GBE_INT_RX_DSC_EMP | \ |
90 | PCH_GBE_INT_RX_FIFO_ERR | \ | ||
86 | PCH_GBE_INT_WOL_DET | \ | 91 | PCH_GBE_INT_WOL_DET | \ |
87 | PCH_GBE_INT_TX_CMPLT \ | 92 | PCH_GBE_INT_TX_CMPLT \ |
88 | ) | 93 | ) |
89 | 94 | ||
95 | #define PCH_GBE_INT_DISABLE_ALL 0 | ||
90 | 96 | ||
91 | static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; | 97 | static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; |
92 | 98 | ||
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit) | |||
138 | if (!tmp) | 144 | if (!tmp) |
139 | pr_err("Error: busy bit is not cleared\n"); | 145 | pr_err("Error: busy bit is not cleared\n"); |
140 | } | 146 | } |
147 | |||
148 | /** | ||
149 | * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context | ||
150 | * @reg: Pointer of register | ||
151 | * @busy: Busy bit | ||
152 | */ | ||
153 | static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit) | ||
154 | { | ||
155 | u32 tmp; | ||
156 | int ret = -1; | ||
157 | /* wait busy */ | ||
158 | tmp = 20; | ||
159 | while ((ioread32(reg) & bit) && --tmp) | ||
160 | udelay(5); | ||
161 | if (!tmp) | ||
162 | pr_err("Error: busy bit is not cleared\n"); | ||
163 | else | ||
164 | ret = 0; | ||
165 | return ret; | ||
166 | } | ||
167 | |||
141 | /** | 168 | /** |
142 | * pch_gbe_mac_mar_set - Set MAC address register | 169 | * pch_gbe_mac_mar_set - Set MAC address register |
143 | * @hw: Pointer to the HW structure | 170 | * @hw: Pointer to the HW structure |
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) | |||
189 | return; | 216 | return; |
190 | } | 217 | } |
191 | 218 | ||
219 | static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) | ||
220 | { | ||
221 | /* Read the MAC address. and store to the private data */ | ||
222 | pch_gbe_mac_read_mac_addr(hw); | ||
223 | iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); | ||
224 | pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); | ||
225 | /* Setup the MAC address */ | ||
226 | pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); | ||
227 | return; | ||
228 | } | ||
229 | |||
192 | /** | 230 | /** |
193 | * pch_gbe_mac_init_rx_addrs - Initialize receive address's | 231 | * pch_gbe_mac_init_rx_addrs - Initialize receive address's |
194 | * @hw: Pointer to the HW structure | 232 | * @hw: Pointer to the HW structure |
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter) | |||
671 | 709 | ||
672 | tcpip = ioread32(&hw->reg->TCPIP_ACC); | 710 | tcpip = ioread32(&hw->reg->TCPIP_ACC); |
673 | 711 | ||
674 | if (netdev->features & NETIF_F_RXCSUM) { | 712 | tcpip |= PCH_GBE_RX_TCPIPACC_OFF; |
675 | tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; | 713 | tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; |
676 | tcpip |= PCH_GBE_RX_TCPIPACC_EN; | ||
677 | } else { | ||
678 | tcpip |= PCH_GBE_RX_TCPIPACC_OFF; | ||
679 | tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; | ||
680 | } | ||
681 | iowrite32(tcpip, &hw->reg->TCPIP_ACC); | 714 | iowrite32(tcpip, &hw->reg->TCPIP_ACC); |
682 | return; | 715 | return; |
683 | } | 716 | } |
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) | |||
717 | iowrite32(rdba, &hw->reg->RX_DSC_BASE); | 750 | iowrite32(rdba, &hw->reg->RX_DSC_BASE); |
718 | iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); | 751 | iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); |
719 | iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); | 752 | iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); |
720 | |||
721 | /* Enables Receive DMA */ | ||
722 | rxdma = ioread32(&hw->reg->DMA_CTRL); | ||
723 | rxdma |= PCH_GBE_RX_DMA_EN; | ||
724 | iowrite32(rxdma, &hw->reg->DMA_CTRL); | ||
725 | /* Enables Receive */ | ||
726 | iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); | ||
727 | } | 753 | } |
728 | 754 | ||
729 | /** | 755 | /** |
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter) | |||
1097 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 1123 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
1098 | } | 1124 | } |
1099 | 1125 | ||
1126 | static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) | ||
1127 | { | ||
1128 | struct pch_gbe_hw *hw = &adapter->hw; | ||
1129 | u32 rxdma; | ||
1130 | u16 value; | ||
1131 | int ret; | ||
1132 | |||
1133 | /* Disable Receive DMA */ | ||
1134 | rxdma = ioread32(&hw->reg->DMA_CTRL); | ||
1135 | rxdma &= ~PCH_GBE_RX_DMA_EN; | ||
1136 | iowrite32(rxdma, &hw->reg->DMA_CTRL); | ||
1137 | /* Wait Rx DMA BUS is IDLE */ | ||
1138 | ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK); | ||
1139 | if (ret) { | ||
1140 | /* Disable Bus master */ | ||
1141 | pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); | ||
1142 | value &= ~PCI_COMMAND_MASTER; | ||
1143 | pci_write_config_word(adapter->pdev, PCI_COMMAND, value); | ||
1144 | /* Stop Receive */ | ||
1145 | pch_gbe_mac_reset_rx(hw); | ||
1146 | /* Enable Bus master */ | ||
1147 | value |= PCI_COMMAND_MASTER; | ||
1148 | pci_write_config_word(adapter->pdev, PCI_COMMAND, value); | ||
1149 | } else { | ||
1150 | /* Stop Receive */ | ||
1151 | pch_gbe_mac_reset_rx(hw); | ||
1152 | } | ||
1153 | } | ||
1154 | |||
1155 | static void pch_gbe_start_receive(struct pch_gbe_hw *hw) | ||
1156 | { | ||
1157 | u32 rxdma; | ||
1158 | |||
1159 | /* Enables Receive DMA */ | ||
1160 | rxdma = ioread32(&hw->reg->DMA_CTRL); | ||
1161 | rxdma |= PCH_GBE_RX_DMA_EN; | ||
1162 | iowrite32(rxdma, &hw->reg->DMA_CTRL); | ||
1163 | /* Enables Receive */ | ||
1164 | iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); | ||
1165 | return; | ||
1166 | } | ||
1167 | |||
1100 | /** | 1168 | /** |
1101 | * pch_gbe_intr - Interrupt Handler | 1169 | * pch_gbe_intr - Interrupt Handler |
1102 | * @irq: Interrupt number | 1170 | * @irq: Interrupt number |
@@ -1123,7 +1191,17 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1123 | if (int_st & PCH_GBE_INT_RX_FRAME_ERR) | 1191 | if (int_st & PCH_GBE_INT_RX_FRAME_ERR) |
1124 | adapter->stats.intr_rx_frame_err_count++; | 1192 | adapter->stats.intr_rx_frame_err_count++; |
1125 | if (int_st & PCH_GBE_INT_RX_FIFO_ERR) | 1193 | if (int_st & PCH_GBE_INT_RX_FIFO_ERR) |
1126 | adapter->stats.intr_rx_fifo_err_count++; | 1194 | if (!adapter->rx_stop_flag) { |
1195 | adapter->stats.intr_rx_fifo_err_count++; | ||
1196 | pr_debug("Rx fifo over run\n"); | ||
1197 | adapter->rx_stop_flag = true; | ||
1198 | int_en = ioread32(&hw->reg->INT_EN); | ||
1199 | iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), | ||
1200 | &hw->reg->INT_EN); | ||
1201 | pch_gbe_stop_receive(adapter); | ||
1202 | int_st |= ioread32(&hw->reg->INT_ST); | ||
1203 | int_st = int_st & ioread32(&hw->reg->INT_EN); | ||
1204 | } | ||
1127 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) | 1205 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) |
1128 | adapter->stats.intr_rx_dma_err_count++; | 1206 | adapter->stats.intr_rx_dma_err_count++; |
1129 | if (int_st & PCH_GBE_INT_TX_FIFO_ERR) | 1207 | if (int_st & PCH_GBE_INT_TX_FIFO_ERR) |
@@ -1135,21 +1213,18 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1135 | /* When Rx descriptor is empty */ | 1213 | /* When Rx descriptor is empty */ |
1136 | if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { | 1214 | if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { |
1137 | adapter->stats.intr_rx_dsc_empty_count++; | 1215 | adapter->stats.intr_rx_dsc_empty_count++; |
1138 | pr_err("Rx descriptor is empty\n"); | 1216 | pr_debug("Rx descriptor is empty\n"); |
1139 | int_en = ioread32(&hw->reg->INT_EN); | 1217 | int_en = ioread32(&hw->reg->INT_EN); |
1140 | iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); | 1218 | iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); |
1141 | if (hw->mac.tx_fc_enable) { | 1219 | if (hw->mac.tx_fc_enable) { |
1142 | /* Set Pause packet */ | 1220 | /* Set Pause packet */ |
1143 | pch_gbe_mac_set_pause_packet(hw); | 1221 | pch_gbe_mac_set_pause_packet(hw); |
1144 | } | 1222 | } |
1145 | if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) | ||
1146 | == 0) { | ||
1147 | return IRQ_HANDLED; | ||
1148 | } | ||
1149 | } | 1223 | } |
1150 | 1224 | ||
1151 | /* When request status is Receive interruption */ | 1225 | /* When request status is Receive interruption */ |
1152 | if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { | 1226 | if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || |
1227 | (adapter->rx_stop_flag == true)) { | ||
1153 | if (likely(napi_schedule_prep(&adapter->napi))) { | 1228 | if (likely(napi_schedule_prep(&adapter->napi))) { |
1154 | /* Enable only Rx Descriptor empty */ | 1229 | /* Enable only Rx Descriptor empty */ |
1155 | atomic_inc(&adapter->irq_sem); | 1230 | atomic_inc(&adapter->irq_sem); |
@@ -1185,29 +1260,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, | |||
1185 | unsigned int i; | 1260 | unsigned int i; |
1186 | unsigned int bufsz; | 1261 | unsigned int bufsz; |
1187 | 1262 | ||
1188 | bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; | 1263 | bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; |
1189 | i = rx_ring->next_to_use; | 1264 | i = rx_ring->next_to_use; |
1190 | 1265 | ||
1191 | while ((cleaned_count--)) { | 1266 | while ((cleaned_count--)) { |
1192 | buffer_info = &rx_ring->buffer_info[i]; | 1267 | buffer_info = &rx_ring->buffer_info[i]; |
1193 | skb = buffer_info->skb; | 1268 | skb = netdev_alloc_skb(netdev, bufsz); |
1194 | if (skb) { | 1269 | if (unlikely(!skb)) { |
1195 | skb_trim(skb, 0); | 1270 | /* Better luck next round */ |
1196 | } else { | 1271 | adapter->stats.rx_alloc_buff_failed++; |
1197 | skb = netdev_alloc_skb(netdev, bufsz); | 1272 | break; |
1198 | if (unlikely(!skb)) { | ||
1199 | /* Better luck next round */ | ||
1200 | adapter->stats.rx_alloc_buff_failed++; | ||
1201 | break; | ||
1202 | } | ||
1203 | /* 64byte align */ | ||
1204 | skb_reserve(skb, PCH_GBE_DMA_ALIGN); | ||
1205 | |||
1206 | buffer_info->skb = skb; | ||
1207 | buffer_info->length = adapter->rx_buffer_len; | ||
1208 | } | 1273 | } |
1274 | /* align */ | ||
1275 | skb_reserve(skb, NET_IP_ALIGN); | ||
1276 | buffer_info->skb = skb; | ||
1277 | |||
1209 | buffer_info->dma = dma_map_single(&pdev->dev, | 1278 | buffer_info->dma = dma_map_single(&pdev->dev, |
1210 | skb->data, | 1279 | buffer_info->rx_buffer, |
1211 | buffer_info->length, | 1280 | buffer_info->length, |
1212 | DMA_FROM_DEVICE); | 1281 | DMA_FROM_DEVICE); |
1213 | if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { | 1282 | if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { |
@@ -1240,6 +1309,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, | |||
1240 | return; | 1309 | return; |
1241 | } | 1310 | } |
1242 | 1311 | ||
1312 | static int | ||
1313 | pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, | ||
1314 | struct pch_gbe_rx_ring *rx_ring, int cleaned_count) | ||
1315 | { | ||
1316 | struct pci_dev *pdev = adapter->pdev; | ||
1317 | struct pch_gbe_buffer *buffer_info; | ||
1318 | unsigned int i; | ||
1319 | unsigned int bufsz; | ||
1320 | unsigned int size; | ||
1321 | |||
1322 | bufsz = adapter->rx_buffer_len; | ||
1323 | |||
1324 | size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; | ||
1325 | rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, | ||
1326 | &rx_ring->rx_buff_pool_logic, | ||
1327 | GFP_KERNEL); | ||
1328 | if (!rx_ring->rx_buff_pool) { | ||
1329 | pr_err("Unable to allocate memory for the receive poll buffer\n"); | ||
1330 | return -ENOMEM; | ||
1331 | } | ||
1332 | memset(rx_ring->rx_buff_pool, 0, size); | ||
1333 | rx_ring->rx_buff_pool_size = size; | ||
1334 | for (i = 0; i < rx_ring->count; i++) { | ||
1335 | buffer_info = &rx_ring->buffer_info[i]; | ||
1336 | buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i; | ||
1337 | buffer_info->length = bufsz; | ||
1338 | } | ||
1339 | return 0; | ||
1340 | } | ||
1341 | |||
1243 | /** | 1342 | /** |
1244 | * pch_gbe_alloc_tx_buffers - Allocate transmit buffers | 1343 | * pch_gbe_alloc_tx_buffers - Allocate transmit buffers |
1245 | * @adapter: Board private structure | 1344 | * @adapter: Board private structure |
@@ -1285,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1285 | struct sk_buff *skb; | 1384 | struct sk_buff *skb; |
1286 | unsigned int i; | 1385 | unsigned int i; |
1287 | unsigned int cleaned_count = 0; | 1386 | unsigned int cleaned_count = 0; |
1288 | bool cleaned = false; | 1387 | bool cleaned = true; |
1289 | 1388 | ||
1290 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); | 1389 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); |
1291 | 1390 | ||
@@ -1296,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1296 | 1395 | ||
1297 | while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { | 1396 | while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { |
1298 | pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); | 1397 | pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); |
1299 | cleaned = true; | ||
1300 | buffer_info = &tx_ring->buffer_info[i]; | 1398 | buffer_info = &tx_ring->buffer_info[i]; |
1301 | skb = buffer_info->skb; | 1399 | skb = buffer_info->skb; |
1302 | 1400 | ||
@@ -1339,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1339 | tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); | 1437 | tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); |
1340 | 1438 | ||
1341 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | 1439 | /* weight of a sort for tx, to avoid endless transmit cleanup */ |
1342 | if (cleaned_count++ == PCH_GBE_TX_WEIGHT) | 1440 | if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { |
1441 | cleaned = false; | ||
1343 | break; | 1442 | break; |
1443 | } | ||
1344 | } | 1444 | } |
1345 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", | 1445 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", |
1346 | cleaned_count); | 1446 | cleaned_count); |
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1380 | unsigned int i; | 1480 | unsigned int i; |
1381 | unsigned int cleaned_count = 0; | 1481 | unsigned int cleaned_count = 0; |
1382 | bool cleaned = false; | 1482 | bool cleaned = false; |
1383 | struct sk_buff *skb, *new_skb; | 1483 | struct sk_buff *skb; |
1384 | u8 dma_status; | 1484 | u8 dma_status; |
1385 | u16 gbec_status; | 1485 | u16 gbec_status; |
1386 | u32 tcp_ip_status; | 1486 | u32 tcp_ip_status; |
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1401 | rx_desc->gbec_status = DSC_INIT16; | 1501 | rx_desc->gbec_status = DSC_INIT16; |
1402 | buffer_info = &rx_ring->buffer_info[i]; | 1502 | buffer_info = &rx_ring->buffer_info[i]; |
1403 | skb = buffer_info->skb; | 1503 | skb = buffer_info->skb; |
1504 | buffer_info->skb = NULL; | ||
1404 | 1505 | ||
1405 | /* unmap dma */ | 1506 | /* unmap dma */ |
1406 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 1507 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
1407 | buffer_info->length, DMA_FROM_DEVICE); | 1508 | buffer_info->length, DMA_FROM_DEVICE); |
1408 | buffer_info->mapped = false; | 1509 | buffer_info->mapped = false; |
1409 | /* Prefetch the packet */ | ||
1410 | prefetch(skb->data); | ||
1411 | 1510 | ||
1412 | pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " | 1511 | pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " |
1413 | "TCP:0x%08x] BufInf = 0x%p\n", | 1512 | "TCP:0x%08x] BufInf = 0x%p\n", |
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1427 | pr_err("Receive CRC Error\n"); | 1526 | pr_err("Receive CRC Error\n"); |
1428 | } else { | 1527 | } else { |
1429 | /* get receive length */ | 1528 | /* get receive length */ |
1430 | /* length convert[-3] */ | 1529 | /* length convert[-3], length includes FCS length */ |
1431 | length = (rx_desc->rx_words_eob) - 3; | 1530 | length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN; |
1432 | 1531 | if (rx_desc->rx_words_eob & 0x02) | |
1433 | /* Decide the data conversion method */ | 1532 | length = length - 4; |
1434 | if (!(netdev->features & NETIF_F_RXCSUM)) { | 1533 | /* |
1435 | /* [Header:14][payload] */ | 1534 | * buffer_info->rx_buffer: [Header:14][payload] |
1436 | if (NET_IP_ALIGN) { | 1535 | * skb->data: [Reserve:2][Header:14][payload] |
1437 | /* Because alignment differs, | 1536 | */ |
1438 | * the new_skb is newly allocated, | 1537 | memcpy(skb->data, buffer_info->rx_buffer, length); |
1439 | * and data is copied to new_skb.*/ | 1538 | |
1440 | new_skb = netdev_alloc_skb(netdev, | ||
1441 | length + NET_IP_ALIGN); | ||
1442 | if (!new_skb) { | ||
1443 | /* dorrop error */ | ||
1444 | pr_err("New skb allocation " | ||
1445 | "Error\n"); | ||
1446 | goto dorrop; | ||
1447 | } | ||
1448 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1449 | memcpy(new_skb->data, skb->data, | ||
1450 | length); | ||
1451 | skb = new_skb; | ||
1452 | } else { | ||
1453 | /* DMA buffer is used as SKB as it is.*/ | ||
1454 | buffer_info->skb = NULL; | ||
1455 | } | ||
1456 | } else { | ||
1457 | /* [Header:14][padding:2][payload] */ | ||
1458 | /* The length includes padding length */ | ||
1459 | length = length - PCH_GBE_DMA_PADDING; | ||
1460 | if ((length < copybreak) || | ||
1461 | (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { | ||
1462 | /* Because alignment differs, | ||
1463 | * the new_skb is newly allocated, | ||
1464 | * and data is copied to new_skb. | ||
1465 | * Padding data is deleted | ||
1466 | * at the time of a copy.*/ | ||
1467 | new_skb = netdev_alloc_skb(netdev, | ||
1468 | length + NET_IP_ALIGN); | ||
1469 | if (!new_skb) { | ||
1470 | /* dorrop error */ | ||
1471 | pr_err("New skb allocation " | ||
1472 | "Error\n"); | ||
1473 | goto dorrop; | ||
1474 | } | ||
1475 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1476 | memcpy(new_skb->data, skb->data, | ||
1477 | ETH_HLEN); | ||
1478 | memcpy(&new_skb->data[ETH_HLEN], | ||
1479 | &skb->data[ETH_HLEN + | ||
1480 | PCH_GBE_DMA_PADDING], | ||
1481 | length - ETH_HLEN); | ||
1482 | skb = new_skb; | ||
1483 | } else { | ||
1484 | /* Padding data is deleted | ||
1485 | * by moving header data.*/ | ||
1486 | memmove(&skb->data[PCH_GBE_DMA_PADDING], | ||
1487 | &skb->data[0], ETH_HLEN); | ||
1488 | skb_reserve(skb, NET_IP_ALIGN); | ||
1489 | buffer_info->skb = NULL; | ||
1490 | } | ||
1491 | } | ||
1492 | /* The length includes FCS length */ | ||
1493 | length = length - ETH_FCS_LEN; | ||
1494 | /* update status of driver */ | 1539 | /* update status of driver */ |
1495 | adapter->stats.rx_bytes += length; | 1540 | adapter->stats.rx_bytes += length; |
1496 | adapter->stats.rx_packets++; | 1541 | adapter->stats.rx_packets++; |
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1509 | pr_debug("Receive skb->ip_summed: %d length: %d\n", | 1554 | pr_debug("Receive skb->ip_summed: %d length: %d\n", |
1510 | skb->ip_summed, length); | 1555 | skb->ip_summed, length); |
1511 | } | 1556 | } |
1512 | dorrop: | ||
1513 | /* return some buffers to hardware, one at a time is too slow */ | 1557 | /* return some buffers to hardware, one at a time is too slow */ |
1514 | if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { | 1558 | if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { |
1515 | pch_gbe_alloc_rx_buffers(adapter, rx_ring, | 1559 | pch_gbe_alloc_rx_buffers(adapter, rx_ring, |
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) | |||
1714 | pr_err("Error: can't bring device up\n"); | 1758 | pr_err("Error: can't bring device up\n"); |
1715 | return err; | 1759 | return err; |
1716 | } | 1760 | } |
1761 | err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); | ||
1762 | if (err) { | ||
1763 | pr_err("Error: can't bring device up\n"); | ||
1764 | return err; | ||
1765 | } | ||
1717 | pch_gbe_alloc_tx_buffers(adapter, tx_ring); | 1766 | pch_gbe_alloc_tx_buffers(adapter, tx_ring); |
1718 | pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); | 1767 | pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); |
1719 | adapter->tx_queue_len = netdev->tx_queue_len; | 1768 | adapter->tx_queue_len = netdev->tx_queue_len; |
1769 | pch_gbe_start_receive(&adapter->hw); | ||
1720 | 1770 | ||
1721 | mod_timer(&adapter->watchdog_timer, jiffies); | 1771 | mod_timer(&adapter->watchdog_timer, jiffies); |
1722 | 1772 | ||
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) | |||
1734 | void pch_gbe_down(struct pch_gbe_adapter *adapter) | 1784 | void pch_gbe_down(struct pch_gbe_adapter *adapter) |
1735 | { | 1785 | { |
1736 | struct net_device *netdev = adapter->netdev; | 1786 | struct net_device *netdev = adapter->netdev; |
1787 | struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; | ||
1737 | 1788 | ||
1738 | /* signal that we're down so the interrupt handler does not | 1789 | /* signal that we're down so the interrupt handler does not |
1739 | * reschedule our watchdog timer */ | 1790 | * reschedule our watchdog timer */ |
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter) | |||
1752 | pch_gbe_reset(adapter); | 1803 | pch_gbe_reset(adapter); |
1753 | pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); | 1804 | pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); |
1754 | pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); | 1805 | pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); |
1806 | |||
1807 | pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, | ||
1808 | rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); | ||
1809 | rx_ring->rx_buff_pool_logic = 0; | ||
1810 | rx_ring->rx_buff_pool_size = 0; | ||
1811 | rx_ring->rx_buff_pool = NULL; | ||
1755 | } | 1812 | } |
1756 | 1813 | ||
1757 | /** | 1814 | /** |
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
2004 | { | 2061 | { |
2005 | struct pch_gbe_adapter *adapter = netdev_priv(netdev); | 2062 | struct pch_gbe_adapter *adapter = netdev_priv(netdev); |
2006 | int max_frame; | 2063 | int max_frame; |
2064 | unsigned long old_rx_buffer_len = adapter->rx_buffer_len; | ||
2065 | int err; | ||
2007 | 2066 | ||
2008 | max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | 2067 | max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
2009 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || | 2068 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || |
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
2018 | else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) | 2077 | else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) |
2019 | adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; | 2078 | adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; |
2020 | else | 2079 | else |
2021 | adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; | 2080 | adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE; |
2022 | netdev->mtu = new_mtu; | ||
2023 | adapter->hw.mac.max_frame_size = max_frame; | ||
2024 | 2081 | ||
2025 | if (netif_running(netdev)) | 2082 | if (netif_running(netdev)) { |
2026 | pch_gbe_reinit_locked(adapter); | 2083 | pch_gbe_down(adapter); |
2027 | else | 2084 | err = pch_gbe_up(adapter); |
2085 | if (err) { | ||
2086 | adapter->rx_buffer_len = old_rx_buffer_len; | ||
2087 | pch_gbe_up(adapter); | ||
2088 | return -ENOMEM; | ||
2089 | } else { | ||
2090 | netdev->mtu = new_mtu; | ||
2091 | adapter->hw.mac.max_frame_size = max_frame; | ||
2092 | } | ||
2093 | } else { | ||
2028 | pch_gbe_reset(adapter); | 2094 | pch_gbe_reset(adapter); |
2095 | netdev->mtu = new_mtu; | ||
2096 | adapter->hw.mac.max_frame_size = max_frame; | ||
2097 | } | ||
2029 | 2098 | ||
2030 | pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", | 2099 | pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", |
2031 | max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, | 2100 | max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, |
@@ -2099,33 +2168,39 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2099 | { | 2168 | { |
2100 | struct pch_gbe_adapter *adapter = | 2169 | struct pch_gbe_adapter *adapter = |
2101 | container_of(napi, struct pch_gbe_adapter, napi); | 2170 | container_of(napi, struct pch_gbe_adapter, napi); |
2102 | struct net_device *netdev = adapter->netdev; | ||
2103 | int work_done = 0; | 2171 | int work_done = 0; |
2104 | bool poll_end_flag = false; | 2172 | bool poll_end_flag = false; |
2105 | bool cleaned = false; | 2173 | bool cleaned = false; |
2174 | u32 int_en; | ||
2106 | 2175 | ||
2107 | pr_debug("budget : %d\n", budget); | 2176 | pr_debug("budget : %d\n", budget); |
2108 | 2177 | ||
2109 | /* Keep link state information with original netdev */ | 2178 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); |
2110 | if (!netif_carrier_ok(netdev)) { | 2179 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); |
2111 | poll_end_flag = true; | ||
2112 | } else { | ||
2113 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); | ||
2114 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); | ||
2115 | 2180 | ||
2116 | if (cleaned) | 2181 | if (!cleaned) |
2117 | work_done = budget; | 2182 | work_done = budget; |
2118 | /* If no Tx and not enough Rx work done, | 2183 | /* If no Tx and not enough Rx work done, |
2119 | * exit the polling mode | 2184 | * exit the polling mode |
2120 | */ | 2185 | */ |
2121 | if ((work_done < budget) || !netif_running(netdev)) | 2186 | if (work_done < budget) |
2122 | poll_end_flag = true; | 2187 | poll_end_flag = true; |
2123 | } | ||
2124 | 2188 | ||
2125 | if (poll_end_flag) { | 2189 | if (poll_end_flag) { |
2126 | napi_complete(napi); | 2190 | napi_complete(napi); |
2191 | if (adapter->rx_stop_flag) { | ||
2192 | adapter->rx_stop_flag = false; | ||
2193 | pch_gbe_start_receive(&adapter->hw); | ||
2194 | } | ||
2127 | pch_gbe_irq_enable(adapter); | 2195 | pch_gbe_irq_enable(adapter); |
2128 | } | 2196 | } else |
2197 | if (adapter->rx_stop_flag) { | ||
2198 | adapter->rx_stop_flag = false; | ||
2199 | pch_gbe_start_receive(&adapter->hw); | ||
2200 | int_en = ioread32(&adapter->hw.reg->INT_EN); | ||
2201 | iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), | ||
2202 | &adapter->hw.reg->INT_EN); | ||
2203 | } | ||
2129 | 2204 | ||
2130 | pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", | 2205 | pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", |
2131 | poll_end_flag, work_done, budget); | 2206 | poll_end_flag, work_done, budget); |
@@ -2452,6 +2527,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { | |||
2452 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), | 2527 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), |
2453 | .class_mask = (0xFFFF00) | 2528 | .class_mask = (0xFFFF00) |
2454 | }, | 2529 | }, |
2530 | {.vendor = PCI_VENDOR_ID_ROHM, | ||
2531 | .device = PCI_DEVICE_ID_ROHM_ML7831_GBE, | ||
2532 | .subvendor = PCI_ANY_ID, | ||
2533 | .subdevice = PCI_ANY_ID, | ||
2534 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), | ||
2535 | .class_mask = (0xFFFF00) | ||
2536 | }, | ||
2455 | /* required last entry */ | 2537 | /* required last entry */ |
2456 | {0} | 2538 | {0} |
2457 | }; | 2539 | }; |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index cb6e0b486b1e..edd7304773eb 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640, | |||
589 | prune_rx_ts(dp83640); | 589 | prune_rx_ts(dp83640); |
590 | 590 | ||
591 | if (list_empty(&dp83640->rxpool)) { | 591 | if (list_empty(&dp83640->rxpool)) { |
592 | pr_warning("dp83640: rx timestamp pool is empty\n"); | 592 | pr_debug("dp83640: rx timestamp pool is empty\n"); |
593 | goto out; | 593 | goto out; |
594 | } | 594 | } |
595 | rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); | 595 | rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); |
@@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
612 | skb = skb_dequeue(&dp83640->tx_queue); | 612 | skb = skb_dequeue(&dp83640->tx_queue); |
613 | 613 | ||
614 | if (!skb) { | 614 | if (!skb) { |
615 | pr_warning("dp83640: have timestamp but tx_queue empty\n"); | 615 | pr_debug("dp83640: have timestamp but tx_queue empty\n"); |
616 | return; | 616 | return; |
617 | } | 617 | } |
618 | ns = phy2txts(phy_txts); | 618 | ns = phy2txts(phy_txts); |
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 0620ba963508..04bb8fcc0cb5 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c | |||
@@ -25,8 +25,9 @@ | |||
25 | /* DP83865 phy identifier values */ | 25 | /* DP83865 phy identifier values */ |
26 | #define DP83865_PHY_ID 0x20005c7a | 26 | #define DP83865_PHY_ID 0x20005c7a |
27 | 27 | ||
28 | #define DP83865_INT_MASK_REG 0x15 | 28 | #define DP83865_INT_STATUS 0x14 |
29 | #define DP83865_INT_MASK_STATUS 0x14 | 29 | #define DP83865_INT_MASK 0x15 |
30 | #define DP83865_INT_CLEAR 0x17 | ||
30 | 31 | ||
31 | #define DP83865_INT_REMOTE_FAULT 0x0008 | 32 | #define DP83865_INT_REMOTE_FAULT 0x0008 |
32 | #define DP83865_INT_ANE_COMPLETED 0x0010 | 33 | #define DP83865_INT_ANE_COMPLETED 0x0010 |
@@ -68,21 +69,25 @@ static int ns_config_intr(struct phy_device *phydev) | |||
68 | int err; | 69 | int err; |
69 | 70 | ||
70 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | 71 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) |
71 | err = phy_write(phydev, DP83865_INT_MASK_REG, | 72 | err = phy_write(phydev, DP83865_INT_MASK, |
72 | DP83865_INT_MASK_DEFAULT); | 73 | DP83865_INT_MASK_DEFAULT); |
73 | else | 74 | else |
74 | err = phy_write(phydev, DP83865_INT_MASK_REG, 0); | 75 | err = phy_write(phydev, DP83865_INT_MASK, 0); |
75 | 76 | ||
76 | return err; | 77 | return err; |
77 | } | 78 | } |
78 | 79 | ||
79 | static int ns_ack_interrupt(struct phy_device *phydev) | 80 | static int ns_ack_interrupt(struct phy_device *phydev) |
80 | { | 81 | { |
81 | int ret = phy_read(phydev, DP83865_INT_MASK_STATUS); | 82 | int ret = phy_read(phydev, DP83865_INT_STATUS); |
82 | if (ret < 0) | 83 | if (ret < 0) |
83 | return ret; | 84 | return ret; |
84 | 85 | ||
85 | return 0; | 86 | /* Clear the interrupt status bit by writing a “1” |
87 | * to the corresponding bit in INT_CLEAR (2:0 are reserved) */ | ||
88 | ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7); | ||
89 | |||
90 | return ret; | ||
86 | } | 91 | } |
87 | 92 | ||
88 | static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) | 93 | static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 10e5d985afa3..edfa15d2e795 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1465 | continue; | 1465 | continue; |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | mtu = pch->chan->mtu - hdrlen; | 1468 | /* |
1469 | * hdrlen includes the 2-byte PPP protocol field, but the | ||
1470 | * MTU counts only the payload excluding the protocol field. | ||
1471 | * (RFC1661 Section 2) | ||
1472 | */ | ||
1473 | mtu = pch->chan->mtu - (hdrlen - 2); | ||
1469 | if (mtu < 4) | 1474 | if (mtu < 4) |
1470 | mtu = 4; | 1475 | mtu = 4; |
1471 | if (flen > mtu) | 1476 | if (flen > mtu) |
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c index 1a3033d8e7ed..d17d0624c5e6 100644 --- a/drivers/net/pxa168_eth.c +++ b/drivers/net/pxa168_eth.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/clk.h> | 40 | #include <linux/clk.h> |
41 | #include <linux/phy.h> | 41 | #include <linux/phy.h> |
42 | #include <linux/io.h> | 42 | #include <linux/io.h> |
43 | #include <linux/interrupt.h> | ||
43 | #include <linux/types.h> | 44 | #include <linux/types.h> |
44 | #include <asm/pgtable.h> | 45 | #include <asm/pgtable.h> |
45 | #include <asm/system.h> | 46 | #include <asm/system.h> |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 02339b3352e7..c23667017922 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -407,6 +407,7 @@ enum rtl_register_content { | |||
407 | RxOK = 0x0001, | 407 | RxOK = 0x0001, |
408 | 408 | ||
409 | /* RxStatusDesc */ | 409 | /* RxStatusDesc */ |
410 | RxBOVF = (1 << 24), | ||
410 | RxFOVF = (1 << 23), | 411 | RxFOVF = (1 << 23), |
411 | RxRWT = (1 << 22), | 412 | RxRWT = (1 << 22), |
412 | RxRES = (1 << 21), | 413 | RxRES = (1 << 21), |
@@ -682,6 +683,7 @@ struct rtl8169_private { | |||
682 | struct mii_if_info mii; | 683 | struct mii_if_info mii; |
683 | struct rtl8169_counters counters; | 684 | struct rtl8169_counters counters; |
684 | u32 saved_wolopts; | 685 | u32 saved_wolopts; |
686 | u32 opts1_mask; | ||
685 | 687 | ||
686 | struct rtl_fw { | 688 | struct rtl_fw { |
687 | const struct firmware *fw; | 689 | const struct firmware *fw; |
@@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1); | |||
710 | MODULE_FIRMWARE(FIRMWARE_8168D_2); | 712 | MODULE_FIRMWARE(FIRMWARE_8168D_2); |
711 | MODULE_FIRMWARE(FIRMWARE_8168E_1); | 713 | MODULE_FIRMWARE(FIRMWARE_8168E_1); |
712 | MODULE_FIRMWARE(FIRMWARE_8168E_2); | 714 | MODULE_FIRMWARE(FIRMWARE_8168E_2); |
715 | MODULE_FIRMWARE(FIRMWARE_8168E_3); | ||
713 | MODULE_FIRMWARE(FIRMWARE_8105E_1); | 716 | MODULE_FIRMWARE(FIRMWARE_8105E_1); |
714 | 717 | ||
715 | static int rtl8169_open(struct net_device *dev); | 718 | static int rtl8169_open(struct net_device *dev); |
@@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev, | |||
3077 | netif_err(tp, link, dev, "PHY reset failed\n"); | 3080 | netif_err(tp, link, dev, "PHY reset failed\n"); |
3078 | } | 3081 | } |
3079 | 3082 | ||
3083 | static bool rtl_tbi_enabled(struct rtl8169_private *tp) | ||
3084 | { | ||
3085 | void __iomem *ioaddr = tp->mmio_addr; | ||
3086 | |||
3087 | return (tp->mac_version == RTL_GIGA_MAC_VER_01) && | ||
3088 | (RTL_R8(PHYstatus) & TBI_Enable); | ||
3089 | } | ||
3090 | |||
3080 | static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | 3091 | static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) |
3081 | { | 3092 | { |
3082 | void __iomem *ioaddr = tp->mmio_addr; | 3093 | void __iomem *ioaddr = tp->mmio_addr; |
@@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | |||
3109 | ADVERTISED_1000baseT_Half | | 3120 | ADVERTISED_1000baseT_Half | |
3110 | ADVERTISED_1000baseT_Full : 0)); | 3121 | ADVERTISED_1000baseT_Full : 0)); |
3111 | 3122 | ||
3112 | if (RTL_R8(PHYstatus) & TBI_Enable) | 3123 | if (rtl_tbi_enabled(tp)) |
3113 | netif_info(tp, link, dev, "TBI auto-negotiating\n"); | 3124 | netif_info(tp, link, dev, "TBI auto-negotiating\n"); |
3114 | } | 3125 | } |
3115 | 3126 | ||
@@ -3319,9 +3330,16 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) | |||
3319 | 3330 | ||
3320 | static void r810x_pll_power_down(struct rtl8169_private *tp) | 3331 | static void r810x_pll_power_down(struct rtl8169_private *tp) |
3321 | { | 3332 | { |
3333 | void __iomem *ioaddr = tp->mmio_addr; | ||
3334 | |||
3322 | if (__rtl8169_get_wol(tp) & WAKE_ANY) { | 3335 | if (__rtl8169_get_wol(tp) & WAKE_ANY) { |
3323 | rtl_writephy(tp, 0x1f, 0x0000); | 3336 | rtl_writephy(tp, 0x1f, 0x0000); |
3324 | rtl_writephy(tp, MII_BMCR, 0x0000); | 3337 | rtl_writephy(tp, MII_BMCR, 0x0000); |
3338 | |||
3339 | if (tp->mac_version == RTL_GIGA_MAC_VER_29 || | ||
3340 | tp->mac_version == RTL_GIGA_MAC_VER_30) | ||
3341 | RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | | ||
3342 | AcceptMulticast | AcceptMyPhys); | ||
3325 | return; | 3343 | return; |
3326 | } | 3344 | } |
3327 | 3345 | ||
@@ -3417,7 +3435,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
3417 | rtl_writephy(tp, MII_BMCR, 0x0000); | 3435 | rtl_writephy(tp, MII_BMCR, 0x0000); |
3418 | 3436 | ||
3419 | if (tp->mac_version == RTL_GIGA_MAC_VER_32 || | 3437 | if (tp->mac_version == RTL_GIGA_MAC_VER_32 || |
3420 | tp->mac_version == RTL_GIGA_MAC_VER_33) | 3438 | tp->mac_version == RTL_GIGA_MAC_VER_33 || |
3439 | tp->mac_version == RTL_GIGA_MAC_VER_34) | ||
3421 | RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | | 3440 | RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | |
3422 | AcceptMulticast | AcceptMyPhys); | 3441 | AcceptMulticast | AcceptMyPhys); |
3423 | return; | 3442 | return; |
@@ -3727,8 +3746,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3727 | tp->features |= rtl_try_msi(pdev, ioaddr, cfg); | 3746 | tp->features |= rtl_try_msi(pdev, ioaddr, cfg); |
3728 | RTL_W8(Cfg9346, Cfg9346_Lock); | 3747 | RTL_W8(Cfg9346, Cfg9346_Lock); |
3729 | 3748 | ||
3730 | if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) && | 3749 | if (rtl_tbi_enabled(tp)) { |
3731 | (RTL_R8(PHYstatus) & TBI_Enable)) { | ||
3732 | tp->set_speed = rtl8169_set_speed_tbi; | 3750 | tp->set_speed = rtl8169_set_speed_tbi; |
3733 | tp->get_settings = rtl8169_gset_tbi; | 3751 | tp->get_settings = rtl8169_gset_tbi; |
3734 | tp->phy_reset_enable = rtl8169_tbi_reset_enable; | 3752 | tp->phy_reset_enable = rtl8169_tbi_reset_enable; |
@@ -3777,6 +3795,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3777 | tp->intr_event = cfg->intr_event; | 3795 | tp->intr_event = cfg->intr_event; |
3778 | tp->napi_event = cfg->napi_event; | 3796 | tp->napi_event = cfg->napi_event; |
3779 | 3797 | ||
3798 | tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? | ||
3799 | ~(RxBOVF | RxFOVF) : ~0; | ||
3800 | |||
3780 | init_timer(&tp->timer); | 3801 | init_timer(&tp->timer); |
3781 | tp->timer.data = (unsigned long) dev; | 3802 | tp->timer.data = (unsigned long) dev; |
3782 | tp->timer.function = rtl8169_phy_timer; | 3803 | tp->timer.function = rtl8169_phy_timer; |
@@ -3988,6 +4009,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) | |||
3988 | while (RTL_R8(TxPoll) & NPQ) | 4009 | while (RTL_R8(TxPoll) & NPQ) |
3989 | udelay(20); | 4010 | udelay(20); |
3990 | } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) { | 4011 | } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) { |
4012 | RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); | ||
3991 | while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) | 4013 | while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) |
3992 | udelay(100); | 4014 | udelay(100); |
3993 | } else { | 4015 | } else { |
@@ -5314,7 +5336,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
5314 | u32 status; | 5336 | u32 status; |
5315 | 5337 | ||
5316 | rmb(); | 5338 | rmb(); |
5317 | status = le32_to_cpu(desc->opts1); | 5339 | status = le32_to_cpu(desc->opts1) & tp->opts1_mask; |
5318 | 5340 | ||
5319 | if (status & DescOwn) | 5341 | if (status & DescOwn) |
5320 | break; | 5342 | break; |
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 86ac38c96bcf..3bb131137033 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c | |||
@@ -80,13 +80,13 @@ static int rionet_capable = 1; | |||
80 | */ | 80 | */ |
81 | static struct rio_dev **rionet_active; | 81 | static struct rio_dev **rionet_active; |
82 | 82 | ||
83 | #define is_rionet_capable(pef, src_ops, dst_ops) \ | 83 | #define is_rionet_capable(src_ops, dst_ops) \ |
84 | ((pef & RIO_PEF_INB_MBOX) && \ | 84 | ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ |
85 | (pef & RIO_PEF_INB_DOORBELL) && \ | 85 | (dst_ops & RIO_DST_OPS_DATA_MSG) && \ |
86 | (src_ops & RIO_SRC_OPS_DOORBELL) && \ | 86 | (src_ops & RIO_SRC_OPS_DOORBELL) && \ |
87 | (dst_ops & RIO_DST_OPS_DOORBELL)) | 87 | (dst_ops & RIO_DST_OPS_DOORBELL)) |
88 | #define dev_rionet_capable(dev) \ | 88 | #define dev_rionet_capable(dev) \ |
89 | is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) | 89 | is_rionet_capable(dev->src_ops, dev->dst_ops) |
90 | 90 | ||
91 | #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) | 91 | #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) |
92 | #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) | 92 | #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) |
@@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev) | |||
282 | { | 282 | { |
283 | int i, rc = 0; | 283 | int i, rc = 0; |
284 | struct rionet_peer *peer, *tmp; | 284 | struct rionet_peer *peer, *tmp; |
285 | u32 pwdcsr; | ||
286 | struct rionet_private *rnet = netdev_priv(ndev); | 285 | struct rionet_private *rnet = netdev_priv(ndev); |
287 | 286 | ||
288 | if (netif_msg_ifup(rnet)) | 287 | if (netif_msg_ifup(rnet)) |
@@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev) | |||
332 | continue; | 331 | continue; |
333 | } | 332 | } |
334 | 333 | ||
335 | /* | 334 | /* Send a join message */ |
336 | * If device has initialized inbound doorbells, | 335 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); |
337 | * send a join message | ||
338 | */ | ||
339 | rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr); | ||
340 | if (pwdcsr & RIO_DOORBELL_AVAIL) | ||
341 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); | ||
342 | } | 336 | } |
343 | 337 | ||
344 | out: | 338 | out: |
@@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) | |||
492 | static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | 486 | static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) |
493 | { | 487 | { |
494 | int rc = -ENODEV; | 488 | int rc = -ENODEV; |
495 | u32 lpef, lsrc_ops, ldst_ops; | 489 | u32 lsrc_ops, ldst_ops; |
496 | struct rionet_peer *peer; | 490 | struct rionet_peer *peer; |
497 | struct net_device *ndev = NULL; | 491 | struct net_device *ndev = NULL; |
498 | 492 | ||
@@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | |||
515 | * on later probes | 509 | * on later probes |
516 | */ | 510 | */ |
517 | if (!rionet_check) { | 511 | if (!rionet_check) { |
518 | rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef); | ||
519 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, | 512 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, |
520 | &lsrc_ops); | 513 | &lsrc_ops); |
521 | rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, | 514 | rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, |
522 | &ldst_ops); | 515 | &ldst_ops); |
523 | if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { | 516 | if (!is_rionet_capable(lsrc_ops, ldst_ops)) { |
524 | printk(KERN_ERR | 517 | printk(KERN_ERR |
525 | "%s: local device is not network capable\n", | 518 | "%s: local device is not network capable\n", |
526 | DRV_NAME); | 519 | DRV_NAME); |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index faca764aa21b..b59abc706d93 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx) | |||
1050 | { | 1050 | { |
1051 | struct pci_dev *pci_dev = efx->pci_dev; | 1051 | struct pci_dev *pci_dev = efx->pci_dev; |
1052 | dma_addr_t dma_mask = efx->type->max_dma_mask; | 1052 | dma_addr_t dma_mask = efx->type->max_dma_mask; |
1053 | bool use_wc; | ||
1054 | int rc; | 1053 | int rc; |
1055 | 1054 | ||
1056 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); | 1055 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); |
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx) | |||
1101 | rc = -EIO; | 1100 | rc = -EIO; |
1102 | goto fail3; | 1101 | goto fail3; |
1103 | } | 1102 | } |
1104 | 1103 | efx->membase = ioremap_nocache(efx->membase_phys, | |
1105 | /* bug22643: If SR-IOV is enabled then tx push over a write combined | 1104 | efx->type->mem_map_size); |
1106 | * mapping is unsafe. We need to disable write combining in this case. | ||
1107 | * MSI is unsupported when SR-IOV is enabled, and the firmware will | ||
1108 | * have removed the MSI capability. So write combining is safe if | ||
1109 | * there is an MSI capability. | ||
1110 | */ | ||
1111 | use_wc = (!EFX_WORKAROUND_22643(efx) || | ||
1112 | pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); | ||
1113 | if (use_wc) | ||
1114 | efx->membase = ioremap_wc(efx->membase_phys, | ||
1115 | efx->type->mem_map_size); | ||
1116 | else | ||
1117 | efx->membase = ioremap_nocache(efx->membase_phys, | ||
1118 | efx->type->mem_map_size); | ||
1119 | if (!efx->membase) { | 1105 | if (!efx->membase) { |
1120 | netif_err(efx, probe, efx->net_dev, | 1106 | netif_err(efx, probe, efx->net_dev, |
1121 | "could not map memory BAR at %llx+%x\n", | 1107 | "could not map memory BAR at %llx+%x\n", |
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h index cc978803d484..751d1ec112cc 100644 --- a/drivers/net/sfc/io.h +++ b/drivers/net/sfc/io.h | |||
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, | |||
103 | _efx_writed(efx, value->u32[2], reg + 8); | 103 | _efx_writed(efx, value->u32[2], reg + 8); |
104 | _efx_writed(efx, value->u32[3], reg + 12); | 104 | _efx_writed(efx, value->u32[3], reg + 12); |
105 | #endif | 105 | #endif |
106 | wmb(); | ||
107 | mmiowb(); | 106 | mmiowb(); |
108 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 107 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
109 | } | 108 | } |
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, | |||
126 | __raw_writel((__force u32)value->u32[0], membase + addr); | 125 | __raw_writel((__force u32)value->u32[0], membase + addr); |
127 | __raw_writel((__force u32)value->u32[1], membase + addr + 4); | 126 | __raw_writel((__force u32)value->u32[1], membase + addr + 4); |
128 | #endif | 127 | #endif |
129 | wmb(); | ||
130 | mmiowb(); | 128 | mmiowb(); |
131 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 129 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
132 | } | 130 | } |
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, | |||
141 | 139 | ||
142 | /* No lock required */ | 140 | /* No lock required */ |
143 | _efx_writed(efx, value->u32[0], reg); | 141 | _efx_writed(efx, value->u32[0], reg); |
144 | wmb(); | ||
145 | } | 142 | } |
146 | 143 | ||
147 | /* Read a 128-bit CSR, locking as appropriate. */ | 144 | /* Read a 128-bit CSR, locking as appropriate. */ |
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | |||
152 | 149 | ||
153 | spin_lock_irqsave(&efx->biu_lock, flags); | 150 | spin_lock_irqsave(&efx->biu_lock, flags); |
154 | value->u32[0] = _efx_readd(efx, reg + 0); | 151 | value->u32[0] = _efx_readd(efx, reg + 0); |
155 | rmb(); | ||
156 | value->u32[1] = _efx_readd(efx, reg + 4); | 152 | value->u32[1] = _efx_readd(efx, reg + 4); |
157 | value->u32[2] = _efx_readd(efx, reg + 8); | 153 | value->u32[2] = _efx_readd(efx, reg + 8); |
158 | value->u32[3] = _efx_readd(efx, reg + 12); | 154 | value->u32[3] = _efx_readd(efx, reg + 12); |
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | |||
175 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); | 171 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); |
176 | #else | 172 | #else |
177 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); | 173 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); |
178 | rmb(); | ||
179 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); | 174 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); |
180 | #endif | 175 | #endif |
181 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 176 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, | |||
249 | _efx_writed(efx, value->u32[2], reg + 8); | 244 | _efx_writed(efx, value->u32[2], reg + 8); |
250 | _efx_writed(efx, value->u32[3], reg + 12); | 245 | _efx_writed(efx, value->u32[3], reg + 12); |
251 | #endif | 246 | #endif |
252 | wmb(); | ||
253 | } | 247 | } |
254 | #define efx_writeo_page(efx, value, reg, page) \ | 248 | #define efx_writeo_page(efx, value, reg, page) \ |
255 | _efx_writeo_page(efx, value, \ | 249 | _efx_writeo_page(efx, value, \ |
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index 3dd45ed61f0a..81a425397468 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c | |||
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | |||
50 | return &nic_data->mcdi; | 50 | return &nic_data->mcdi; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline void | ||
54 | efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) | ||
55 | { | ||
56 | struct siena_nic_data *nic_data = efx->nic_data; | ||
57 | value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); | ||
58 | } | ||
59 | |||
60 | static inline void | ||
61 | efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) | ||
62 | { | ||
63 | struct siena_nic_data *nic_data = efx->nic_data; | ||
64 | __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); | ||
65 | } | ||
66 | |||
67 | void efx_mcdi_init(struct efx_nic *efx) | 53 | void efx_mcdi_init(struct efx_nic *efx) |
68 | { | 54 | { |
69 | struct efx_mcdi_iface *mcdi; | 55 | struct efx_mcdi_iface *mcdi; |
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | |||
84 | const u8 *inbuf, size_t inlen) | 70 | const u8 *inbuf, size_t inlen) |
85 | { | 71 | { |
86 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 72 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
87 | unsigned pdu = MCDI_PDU(efx); | 73 | unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
88 | unsigned doorbell = MCDI_DOORBELL(efx); | 74 | unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); |
89 | unsigned int i; | 75 | unsigned int i; |
90 | efx_dword_t hdr; | 76 | efx_dword_t hdr; |
91 | u32 xflags, seqno; | 77 | u32 xflags, seqno; |
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | |||
106 | MCDI_HEADER_SEQ, seqno, | 92 | MCDI_HEADER_SEQ, seqno, |
107 | MCDI_HEADER_XFLAGS, xflags); | 93 | MCDI_HEADER_XFLAGS, xflags); |
108 | 94 | ||
109 | efx_mcdi_writed(efx, &hdr, pdu); | 95 | efx_writed(efx, &hdr, pdu); |
110 | 96 | ||
111 | for (i = 0; i < inlen; i += 4) | 97 | for (i = 0; i < inlen; i += 4) |
112 | efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), | 98 | _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); |
113 | pdu + 4 + i); | 99 | |
100 | /* Ensure the payload is written out before the header */ | ||
101 | wmb(); | ||
114 | 102 | ||
115 | /* ring the doorbell with a distinctive value */ | 103 | /* ring the doorbell with a distinctive value */ |
116 | EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); | 104 | _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); |
117 | efx_mcdi_writed(efx, &hdr, doorbell); | ||
118 | } | 105 | } |
119 | 106 | ||
120 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) | 107 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) |
121 | { | 108 | { |
122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 109 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
123 | unsigned int pdu = MCDI_PDU(efx); | 110 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
124 | int i; | 111 | int i; |
125 | 112 | ||
126 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | 113 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); |
127 | BUG_ON(outlen & 3 || outlen >= 0x100); | 114 | BUG_ON(outlen & 3 || outlen >= 0x100); |
128 | 115 | ||
129 | for (i = 0; i < outlen; i += 4) | 116 | for (i = 0; i < outlen; i += 4) |
130 | efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); | 117 | *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); |
131 | } | 118 | } |
132 | 119 | ||
133 | static int efx_mcdi_poll(struct efx_nic *efx) | 120 | static int efx_mcdi_poll(struct efx_nic *efx) |
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
135 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
136 | unsigned int time, finish; | 123 | unsigned int time, finish; |
137 | unsigned int respseq, respcmd, error; | 124 | unsigned int respseq, respcmd, error; |
138 | unsigned int pdu = MCDI_PDU(efx); | 125 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
139 | unsigned int rc, spins; | 126 | unsigned int rc, spins; |
140 | efx_dword_t reg; | 127 | efx_dword_t reg; |
141 | 128 | ||
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
161 | 148 | ||
162 | time = get_seconds(); | 149 | time = get_seconds(); |
163 | 150 | ||
164 | efx_mcdi_readd(efx, ®, pdu); | 151 | rmb(); |
152 | efx_readd(efx, ®, pdu); | ||
165 | 153 | ||
166 | /* All 1's indicates that shared memory is in reset (and is | 154 | /* All 1's indicates that shared memory is in reset (and is |
167 | * not a valid header). Wait for it to come out reset before | 155 | * not a valid header). Wait for it to come out reset before |
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) | |||
188 | respseq, mcdi->seqno); | 176 | respseq, mcdi->seqno); |
189 | rc = EIO; | 177 | rc = EIO; |
190 | } else if (error) { | 178 | } else if (error) { |
191 | efx_mcdi_readd(efx, ®, pdu + 4); | 179 | efx_readd(efx, ®, pdu + 4); |
192 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { | 180 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { |
193 | #define TRANSLATE_ERROR(name) \ | 181 | #define TRANSLATE_ERROR(name) \ |
194 | case MC_CMD_ERR_ ## name: \ | 182 | case MC_CMD_ERR_ ## name: \ |
@@ -222,21 +210,21 @@ out: | |||
222 | /* Test and clear MC-rebooted flag for this port/function */ | 210 | /* Test and clear MC-rebooted flag for this port/function */ |
223 | int efx_mcdi_poll_reboot(struct efx_nic *efx) | 211 | int efx_mcdi_poll_reboot(struct efx_nic *efx) |
224 | { | 212 | { |
225 | unsigned int addr = MCDI_REBOOT_FLAG(efx); | 213 | unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); |
226 | efx_dword_t reg; | 214 | efx_dword_t reg; |
227 | uint32_t value; | 215 | uint32_t value; |
228 | 216 | ||
229 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 217 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
230 | return false; | 218 | return false; |
231 | 219 | ||
232 | efx_mcdi_readd(efx, ®, addr); | 220 | efx_readd(efx, ®, addr); |
233 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); | 221 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); |
234 | 222 | ||
235 | if (value == 0) | 223 | if (value == 0) |
236 | return 0; | 224 | return 0; |
237 | 225 | ||
238 | EFX_ZERO_DWORD(reg); | 226 | EFX_ZERO_DWORD(reg); |
239 | efx_mcdi_writed(efx, ®, addr); | 227 | efx_writed(efx, ®, addr); |
240 | 228 | ||
241 | if (value == MC_STATUS_DWORD_ASSERT) | 229 | if (value == MC_STATUS_DWORD_ASSERT) |
242 | return -EINTR; | 230 | return -EINTR; |
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index bafa23a6874c..3edfbaf5f022 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) | |||
1936 | 1936 | ||
1937 | size = min_t(size_t, table->step, 16); | 1937 | size = min_t(size_t, table->step, 16); |
1938 | 1938 | ||
1939 | if (table->offset >= efx->type->mem_map_size) { | ||
1940 | /* No longer mapped; return dummy data */ | ||
1941 | memcpy(buf, "\xde\xc0\xad\xde", 4); | ||
1942 | buf += table->rows * size; | ||
1943 | continue; | ||
1944 | } | ||
1945 | |||
1946 | for (i = 0; i < table->rows; i++) { | 1939 | for (i = 0; i < table->rows; i++) { |
1947 | switch (table->step) { | 1940 | switch (table->step) { |
1948 | case 4: /* 32-bit register or SRAM */ | 1941 | case 4: /* 32-bit register or SRAM */ |
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index 4bd1f2839dfe..7443f99c977f 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h | |||
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) | |||
143 | /** | 143 | /** |
144 | * struct siena_nic_data - Siena NIC state | 144 | * struct siena_nic_data - Siena NIC state |
145 | * @mcdi: Management-Controller-to-Driver Interface | 145 | * @mcdi: Management-Controller-to-Driver Interface |
146 | * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. | ||
147 | * @wol_filter_id: Wake-on-LAN packet filter id | 146 | * @wol_filter_id: Wake-on-LAN packet filter id |
148 | */ | 147 | */ |
149 | struct siena_nic_data { | 148 | struct siena_nic_data { |
150 | struct efx_mcdi_iface mcdi; | 149 | struct efx_mcdi_iface mcdi; |
151 | void __iomem *mcdi_smem; | ||
152 | int wol_filter_id; | 150 | int wol_filter_id; |
153 | }; | 151 | }; |
154 | 152 | ||
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index 5735e84c69de..2c3bd93fab54 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c | |||
@@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx) | |||
250 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); | 250 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); |
251 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; | 251 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; |
252 | 252 | ||
253 | /* Initialise MCDI */ | ||
254 | nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + | ||
255 | FR_CZ_MC_TREG_SMEM, | ||
256 | FR_CZ_MC_TREG_SMEM_STEP * | ||
257 | FR_CZ_MC_TREG_SMEM_ROWS); | ||
258 | if (!nic_data->mcdi_smem) { | ||
259 | netif_err(efx, probe, efx->net_dev, | ||
260 | "could not map MCDI at %llx+%x\n", | ||
261 | (unsigned long long)efx->membase_phys + | ||
262 | FR_CZ_MC_TREG_SMEM, | ||
263 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); | ||
264 | rc = -ENOMEM; | ||
265 | goto fail1; | ||
266 | } | ||
267 | efx_mcdi_init(efx); | 253 | efx_mcdi_init(efx); |
268 | 254 | ||
269 | /* Recover from a failed assertion before probing */ | 255 | /* Recover from a failed assertion before probing */ |
270 | rc = efx_mcdi_handle_assertion(efx); | 256 | rc = efx_mcdi_handle_assertion(efx); |
271 | if (rc) | 257 | if (rc) |
272 | goto fail2; | 258 | goto fail1; |
273 | 259 | ||
274 | /* Let the BMC know that the driver is now in charge of link and | 260 | /* Let the BMC know that the driver is now in charge of link and |
275 | * filter settings. We must do this before we reset the NIC */ | 261 | * filter settings. We must do this before we reset the NIC */ |
@@ -324,7 +310,6 @@ fail4: | |||
324 | fail3: | 310 | fail3: |
325 | efx_mcdi_drv_attach(efx, false, NULL); | 311 | efx_mcdi_drv_attach(efx, false, NULL); |
326 | fail2: | 312 | fail2: |
327 | iounmap(nic_data->mcdi_smem); | ||
328 | fail1: | 313 | fail1: |
329 | kfree(efx->nic_data); | 314 | kfree(efx->nic_data); |
330 | return rc; | 315 | return rc; |
@@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx) | |||
404 | 389 | ||
405 | static void siena_remove_nic(struct efx_nic *efx) | 390 | static void siena_remove_nic(struct efx_nic *efx) |
406 | { | 391 | { |
407 | struct siena_nic_data *nic_data = efx->nic_data; | ||
408 | |||
409 | efx_nic_free_buffer(efx, &efx->irq_status); | 392 | efx_nic_free_buffer(efx, &efx->irq_status); |
410 | 393 | ||
411 | siena_reset_hw(efx, RESET_TYPE_ALL); | 394 | siena_reset_hw(efx, RESET_TYPE_ALL); |
@@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx) | |||
415 | efx_mcdi_drv_attach(efx, false, NULL); | 398 | efx_mcdi_drv_attach(efx, false, NULL); |
416 | 399 | ||
417 | /* Tear down the private nic state */ | 400 | /* Tear down the private nic state */ |
418 | iounmap(nic_data->mcdi_smem); | 401 | kfree(efx->nic_data); |
419 | kfree(nic_data); | ||
420 | efx->nic_data = NULL; | 402 | efx->nic_data = NULL; |
421 | } | 403 | } |
422 | 404 | ||
@@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = { | |||
656 | .default_mac_ops = &efx_mcdi_mac_operations, | 638 | .default_mac_ops = &efx_mcdi_mac_operations, |
657 | 639 | ||
658 | .revision = EFX_REV_SIENA_A0, | 640 | .revision = EFX_REV_SIENA_A0, |
659 | .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ | 641 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + |
642 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), | ||
660 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | 643 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, |
661 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, | 644 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, |
662 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, | 645 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, |
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index 99ff11400cef..e4dd3a7f304b 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
@@ -38,8 +38,6 @@ | |||
38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS | 38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS |
39 | /* Legacy interrupt storm when interrupt fifo fills */ | 39 | /* Legacy interrupt storm when interrupt fifo fills */ |
40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA | 40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA |
41 | /* Write combining and sriov=enabled are incompatible */ | ||
42 | #define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA | ||
43 | 41 | ||
44 | /* Spurious parity errors in TSORT buffers */ | 42 | /* Spurious parity errors in TSORT buffers */ |
45 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A | 43 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A |
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index ad35c210b839..1c1666e99106 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -21,6 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/interrupt.h> | ||
24 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
25 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
26 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
@@ -30,6 +31,7 @@ | |||
30 | #include <linux/phy.h> | 31 | #include <linux/phy.h> |
31 | #include <linux/cache.h> | 32 | #include <linux/cache.h> |
32 | #include <linux/io.h> | 33 | #include <linux/io.h> |
34 | #include <linux/interrupt.h> | ||
33 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
34 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
35 | #include <linux/ethtool.h> | 37 | #include <linux/ethtool.h> |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index dc3fbf61910b..4a1374df6084 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -6234,12 +6234,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6234 | } | 6234 | } |
6235 | } | 6235 | } |
6236 | 6236 | ||
6237 | #ifdef BCM_KERNEL_SUPPORTS_8021Q | ||
6238 | if (vlan_tx_tag_present(skb)) { | 6237 | if (vlan_tx_tag_present(skb)) { |
6239 | base_flags |= TXD_FLAG_VLAN; | 6238 | base_flags |= TXD_FLAG_VLAN; |
6240 | vlan = vlan_tx_tag_get(skb); | 6239 | vlan = vlan_tx_tag_get(skb); |
6241 | } | 6240 | } |
6242 | #endif | ||
6243 | 6241 | ||
6244 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && | 6242 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && |
6245 | !mss && skb->len > VLAN_ETH_FRAME_LEN) | 6243 | !mss && skb->len > VLAN_ETH_FRAME_LEN) |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index a03336e086d5..f06fb78383a1 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -228,23 +228,40 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
228 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { | 228 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { |
229 | 229 | ||
230 | if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { | 230 | if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { |
231 | struct usb_cdc_ncm_ndp_input_size ndp_in_sz; | 231 | struct usb_cdc_ncm_ndp_input_size *ndp_in_sz; |
232 | |||
233 | ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL); | ||
234 | if (!ndp_in_sz) { | ||
235 | err = -ENOMEM; | ||
236 | goto size_err; | ||
237 | } | ||
238 | |||
232 | err = usb_control_msg(ctx->udev, | 239 | err = usb_control_msg(ctx->udev, |
233 | usb_sndctrlpipe(ctx->udev, 0), | 240 | usb_sndctrlpipe(ctx->udev, 0), |
234 | USB_CDC_SET_NTB_INPUT_SIZE, | 241 | USB_CDC_SET_NTB_INPUT_SIZE, |
235 | USB_TYPE_CLASS | USB_DIR_OUT | 242 | USB_TYPE_CLASS | USB_DIR_OUT |
236 | | USB_RECIP_INTERFACE, | 243 | | USB_RECIP_INTERFACE, |
237 | 0, iface_no, &ndp_in_sz, 8, 1000); | 244 | 0, iface_no, ndp_in_sz, 8, 1000); |
245 | kfree(ndp_in_sz); | ||
238 | } else { | 246 | } else { |
239 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | 247 | __le32 *dwNtbInMaxSize; |
248 | dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize), | ||
249 | GFP_KERNEL); | ||
250 | if (!dwNtbInMaxSize) { | ||
251 | err = -ENOMEM; | ||
252 | goto size_err; | ||
253 | } | ||
254 | *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | ||
255 | |||
240 | err = usb_control_msg(ctx->udev, | 256 | err = usb_control_msg(ctx->udev, |
241 | usb_sndctrlpipe(ctx->udev, 0), | 257 | usb_sndctrlpipe(ctx->udev, 0), |
242 | USB_CDC_SET_NTB_INPUT_SIZE, | 258 | USB_CDC_SET_NTB_INPUT_SIZE, |
243 | USB_TYPE_CLASS | USB_DIR_OUT | 259 | USB_TYPE_CLASS | USB_DIR_OUT |
244 | | USB_RECIP_INTERFACE, | 260 | | USB_RECIP_INTERFACE, |
245 | 0, iface_no, &dwNtbInMaxSize, 4, 1000); | 261 | 0, iface_no, dwNtbInMaxSize, 4, 1000); |
262 | kfree(dwNtbInMaxSize); | ||
246 | } | 263 | } |
247 | 264 | size_err: | |
248 | if (err < 0) | 265 | if (err < 0) |
249 | pr_debug("Setting NTB Input Size failed\n"); | 266 | pr_debug("Setting NTB Input Size failed\n"); |
250 | } | 267 | } |
@@ -325,19 +342,29 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
325 | 342 | ||
326 | /* set Max Datagram Size (MTU) */ | 343 | /* set Max Datagram Size (MTU) */ |
327 | if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { | 344 | if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { |
328 | __le16 max_datagram_size; | 345 | __le16 *max_datagram_size; |
329 | u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); | 346 | u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); |
347 | |||
348 | max_datagram_size = kzalloc(sizeof(*max_datagram_size), | ||
349 | GFP_KERNEL); | ||
350 | if (!max_datagram_size) { | ||
351 | err = -ENOMEM; | ||
352 | goto max_dgram_err; | ||
353 | } | ||
354 | |||
330 | err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0), | 355 | err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0), |
331 | USB_CDC_GET_MAX_DATAGRAM_SIZE, | 356 | USB_CDC_GET_MAX_DATAGRAM_SIZE, |
332 | USB_TYPE_CLASS | USB_DIR_IN | 357 | USB_TYPE_CLASS | USB_DIR_IN |
333 | | USB_RECIP_INTERFACE, | 358 | | USB_RECIP_INTERFACE, |
334 | 0, iface_no, &max_datagram_size, | 359 | 0, iface_no, max_datagram_size, |
335 | 2, 1000); | 360 | 2, 1000); |
336 | if (err < 0) { | 361 | if (err < 0) { |
337 | pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", | 362 | pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", |
338 | CDC_NCM_MIN_DATAGRAM_SIZE); | 363 | CDC_NCM_MIN_DATAGRAM_SIZE); |
364 | kfree(max_datagram_size); | ||
339 | } else { | 365 | } else { |
340 | ctx->max_datagram_size = le16_to_cpu(max_datagram_size); | 366 | ctx->max_datagram_size = |
367 | le16_to_cpu(*max_datagram_size); | ||
341 | /* Check Eth descriptor value */ | 368 | /* Check Eth descriptor value */ |
342 | if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { | 369 | if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { |
343 | if (ctx->max_datagram_size > eth_max_sz) | 370 | if (ctx->max_datagram_size > eth_max_sz) |
@@ -360,8 +387,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
360 | USB_TYPE_CLASS | USB_DIR_OUT | 387 | USB_TYPE_CLASS | USB_DIR_OUT |
361 | | USB_RECIP_INTERFACE, | 388 | | USB_RECIP_INTERFACE, |
362 | 0, | 389 | 0, |
363 | iface_no, &max_datagram_size, | 390 | iface_no, max_datagram_size, |
364 | 2, 1000); | 391 | 2, 1000); |
392 | kfree(max_datagram_size); | ||
393 | max_dgram_err: | ||
365 | if (err < 0) | 394 | if (err < 0) |
366 | pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); | 395 | pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); |
367 | } | 396 | } |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 15772b1b6a91..13c1f044b40d 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #define USB_PRODUCT_IPHONE_3G 0x1292 | 59 | #define USB_PRODUCT_IPHONE_3G 0x1292 |
60 | #define USB_PRODUCT_IPHONE_3GS 0x1294 | 60 | #define USB_PRODUCT_IPHONE_3GS 0x1294 |
61 | #define USB_PRODUCT_IPHONE_4 0x1297 | 61 | #define USB_PRODUCT_IPHONE_4 0x1297 |
62 | #define USB_PRODUCT_IPHONE_4_VZW 0x129c | ||
62 | 63 | ||
63 | #define IPHETH_USBINTF_CLASS 255 | 64 | #define IPHETH_USBINTF_CLASS 255 |
64 | #define IPHETH_USBINTF_SUBCLASS 253 | 65 | #define IPHETH_USBINTF_SUBCLASS 253 |
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = { | |||
98 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, | 99 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, |
99 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | 100 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, |
100 | IPHETH_USBINTF_PROTO) }, | 101 | IPHETH_USBINTF_PROTO) }, |
102 | { USB_DEVICE_AND_INTERFACE_INFO( | ||
103 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, | ||
104 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | ||
105 | IPHETH_USBINTF_PROTO) }, | ||
101 | { } | 106 | { } |
102 | }; | 107 | }; |
103 | MODULE_DEVICE_TABLE(usb, ipheth_table); | 108 | MODULE_DEVICE_TABLE(usb, ipheth_table); |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index deb1eca13c9f..7c5336c5c37f 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -515,10 +515,6 @@ static void velocity_init_cam_filter(struct velocity_info *vptr) | |||
515 | mac_set_cam_mask(regs, vptr->mCAMmask); | 515 | mac_set_cam_mask(regs, vptr->mCAMmask); |
516 | 516 | ||
517 | /* Enable VCAMs */ | 517 | /* Enable VCAMs */ |
518 | |||
519 | if (test_bit(0, vptr->active_vlans)) | ||
520 | WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); | ||
521 | |||
522 | for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { | 518 | for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { |
523 | mac_set_vlan_cam(regs, i, (u8 *) &vid); | 519 | mac_set_vlan_cam(regs, i, (u8 *) &vid); |
524 | vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); | 520 | vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 1cbacb389652..0959583feb27 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -1929,14 +1929,17 @@ static void | |||
1929 | vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | 1929 | vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
1930 | { | 1930 | { |
1931 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1931 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1932 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | ||
1933 | unsigned long flags; | ||
1934 | 1932 | ||
1935 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); | 1933 | if (!(netdev->flags & IFF_PROMISC)) { |
1936 | spin_lock_irqsave(&adapter->cmd_lock, flags); | 1934 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1937 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1935 | unsigned long flags; |
1938 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1936 | |
1939 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | 1937 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); |
1938 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1939 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1940 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | ||
1941 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1942 | } | ||
1940 | 1943 | ||
1941 | set_bit(vid, adapter->active_vlans); | 1944 | set_bit(vid, adapter->active_vlans); |
1942 | } | 1945 | } |
@@ -1946,14 +1949,17 @@ static void | |||
1946 | vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 1949 | vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
1947 | { | 1950 | { |
1948 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1951 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1949 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | ||
1950 | unsigned long flags; | ||
1951 | 1952 | ||
1952 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); | 1953 | if (!(netdev->flags & IFF_PROMISC)) { |
1953 | spin_lock_irqsave(&adapter->cmd_lock, flags); | 1954 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1954 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1955 | unsigned long flags; |
1955 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1956 | |
1956 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | 1957 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); |
1958 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1959 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1960 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | ||
1961 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1962 | } | ||
1957 | 1963 | ||
1958 | clear_bit(vid, adapter->active_vlans); | 1964 | clear_bit(vid, adapter->active_vlans); |
1959 | } | 1965 | } |
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 2d4c0910295b..2d394af82171 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c | |||
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah, | |||
41 | case ADC_DC_CAL: | 41 | case ADC_DC_CAL: |
42 | /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ | 42 | /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ |
43 | if (!IS_CHAN_B(chan) && | 43 | if (!IS_CHAN_B(chan) && |
44 | !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) | 44 | !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) && |
45 | IS_CHAN_HT20(chan))) | ||
45 | supported = true; | 46 | supported = true; |
46 | break; | 47 | break; |
47 | } | 48 | } |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 2339728a7306..3e69c631ebb4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h | |||
@@ -1514,7 +1514,7 @@ static const u32 ar9300_2p2_mac_core[][2] = { | |||
1514 | {0x00008258, 0x00000000}, | 1514 | {0x00008258, 0x00000000}, |
1515 | {0x0000825c, 0x40000000}, | 1515 | {0x0000825c, 0x40000000}, |
1516 | {0x00008260, 0x00080922}, | 1516 | {0x00008260, 0x00080922}, |
1517 | {0x00008264, 0x9bc00010}, | 1517 | {0x00008264, 0x9d400010}, |
1518 | {0x00008268, 0xffffffff}, | 1518 | {0x00008268, 0xffffffff}, |
1519 | {0x0000826c, 0x0000ffff}, | 1519 | {0x0000826c, 0x0000ffff}, |
1520 | {0x00008270, 0x00000000}, | 1520 | {0x00008270, 0x00000000}, |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index c34bef1bf2b0..1b9400371eaf 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | |||
@@ -69,7 +69,7 @@ static int ar9003_hw_power_interpolate(int32_t x, | |||
69 | static const struct ar9300_eeprom ar9300_default = { | 69 | static const struct ar9300_eeprom ar9300_default = { |
70 | .eepromVersion = 2, | 70 | .eepromVersion = 2, |
71 | .templateVersion = 2, | 71 | .templateVersion = 2, |
72 | .macAddr = {1, 2, 3, 4, 5, 6}, | 72 | .macAddr = {0, 2, 3, 4, 5, 6}, |
73 | .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 73 | .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
74 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, | 74 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, |
75 | .baseEepHeader = { | 75 | .baseEepHeader = { |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 1baca8e4715d..fcafec0605f4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c | |||
@@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, | |||
671 | REG_WRITE_ARRAY(&ah->iniModesAdditional, | 671 | REG_WRITE_ARRAY(&ah->iniModesAdditional, |
672 | modesIndex, regWrites); | 672 | modesIndex, regWrites); |
673 | 673 | ||
674 | if (AR_SREV_9300(ah)) | 674 | if (AR_SREV_9330(ah)) |
675 | REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); | 675 | REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); |
676 | 676 | ||
677 | if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) | 677 | if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9098aaad97a9..722967b86cf1 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -2283,7 +2283,11 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) | |||
2283 | 2283 | ||
2284 | mutex_lock(&sc->mutex); | 2284 | mutex_lock(&sc->mutex); |
2285 | ah->coverage_class = coverage_class; | 2285 | ah->coverage_class = coverage_class; |
2286 | |||
2287 | ath9k_ps_wakeup(sc); | ||
2286 | ath9k_hw_init_global_settings(ah); | 2288 | ath9k_hw_init_global_settings(ah); |
2289 | ath9k_ps_restore(sc); | ||
2290 | |||
2287 | mutex_unlock(&sc->mutex); | 2291 | mutex_unlock(&sc->mutex); |
2288 | } | 2292 | } |
2289 | 2293 | ||
@@ -2299,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) | |||
2299 | mutex_lock(&sc->mutex); | 2303 | mutex_lock(&sc->mutex); |
2300 | cancel_delayed_work_sync(&sc->tx_complete_work); | 2304 | cancel_delayed_work_sync(&sc->tx_complete_work); |
2301 | 2305 | ||
2306 | if (ah->ah_flags & AH_UNPLUGGED) { | ||
2307 | ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n"); | ||
2308 | mutex_unlock(&sc->mutex); | ||
2309 | return; | ||
2310 | } | ||
2311 | |||
2302 | if (sc->sc_flags & SC_OP_INVALID) { | 2312 | if (sc->sc_flags & SC_OP_INVALID) { |
2303 | ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); | 2313 | ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); |
2304 | mutex_unlock(&sc->mutex); | 2314 | mutex_unlock(&sc->mutex); |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 9a4850154fb2..4c21f8cbdeb5 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -205,14 +205,22 @@ static void ath_rx_remove_buffer(struct ath_softc *sc, | |||
205 | 205 | ||
206 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | 206 | static void ath_rx_edma_cleanup(struct ath_softc *sc) |
207 | { | 207 | { |
208 | struct ath_hw *ah = sc->sc_ah; | ||
209 | struct ath_common *common = ath9k_hw_common(ah); | ||
208 | struct ath_buf *bf; | 210 | struct ath_buf *bf; |
209 | 211 | ||
210 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | 212 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); |
211 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | 213 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); |
212 | 214 | ||
213 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | 215 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
214 | if (bf->bf_mpdu) | 216 | if (bf->bf_mpdu) { |
217 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | ||
218 | common->rx_bufsize, | ||
219 | DMA_BIDIRECTIONAL); | ||
215 | dev_kfree_skb_any(bf->bf_mpdu); | 220 | dev_kfree_skb_any(bf->bf_mpdu); |
221 | bf->bf_buf_addr = 0; | ||
222 | bf->bf_mpdu = NULL; | ||
223 | } | ||
216 | } | 224 | } |
217 | 225 | ||
218 | INIT_LIST_HEAD(&sc->rx.rxbuf); | 226 | INIT_LIST_HEAD(&sc->rx.rxbuf); |
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 0122930b14c7..0474e6638d21 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c | |||
@@ -1066,8 +1066,10 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
1066 | * the high througput speed in 802.11n networks. | 1066 | * the high througput speed in 802.11n networks. |
1067 | */ | 1067 | */ |
1068 | 1068 | ||
1069 | if (!is_main_vif(ar, vif)) | 1069 | if (!is_main_vif(ar, vif)) { |
1070 | mutex_lock(&ar->mutex); | ||
1070 | goto err_softw; | 1071 | goto err_softw; |
1072 | } | ||
1071 | 1073 | ||
1072 | /* | 1074 | /* |
1073 | * While the hardware supports *catch-all* key, for offloading | 1075 | * While the hardware supports *catch-all* key, for offloading |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 26f1ab840cc7..e293a7921bf0 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -1632,7 +1632,8 @@ static void handle_irq_beacon(struct b43_wldev *dev) | |||
1632 | u32 cmd, beacon0_valid, beacon1_valid; | 1632 | u32 cmd, beacon0_valid, beacon1_valid; |
1633 | 1633 | ||
1634 | if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && | 1634 | if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && |
1635 | !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) | 1635 | !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) && |
1636 | !b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) | ||
1636 | return; | 1637 | return; |
1637 | 1638 | ||
1638 | /* This is the bottom half of the asynchronous beacon update. */ | 1639 | /* This is the bottom half of the asynchronous beacon update. */ |
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 3774dd034746..ef9ad79d1bfd 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -1903,15 +1903,17 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
1903 | static int ipw2100_net_init(struct net_device *dev) | 1903 | static int ipw2100_net_init(struct net_device *dev) |
1904 | { | 1904 | { |
1905 | struct ipw2100_priv *priv = libipw_priv(dev); | 1905 | struct ipw2100_priv *priv = libipw_priv(dev); |
1906 | |||
1907 | return ipw2100_up(priv, 1); | ||
1908 | } | ||
1909 | |||
1910 | static int ipw2100_wdev_init(struct net_device *dev) | ||
1911 | { | ||
1912 | struct ipw2100_priv *priv = libipw_priv(dev); | ||
1906 | const struct libipw_geo *geo = libipw_get_geo(priv->ieee); | 1913 | const struct libipw_geo *geo = libipw_get_geo(priv->ieee); |
1907 | struct wireless_dev *wdev = &priv->ieee->wdev; | 1914 | struct wireless_dev *wdev = &priv->ieee->wdev; |
1908 | int ret; | ||
1909 | int i; | 1915 | int i; |
1910 | 1916 | ||
1911 | ret = ipw2100_up(priv, 1); | ||
1912 | if (ret) | ||
1913 | return ret; | ||
1914 | |||
1915 | memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); | 1917 | memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); |
1916 | 1918 | ||
1917 | /* fill-out priv->ieee->bg_band */ | 1919 | /* fill-out priv->ieee->bg_band */ |
@@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6350 | "Error calling register_netdev.\n"); | 6352 | "Error calling register_netdev.\n"); |
6351 | goto fail; | 6353 | goto fail; |
6352 | } | 6354 | } |
6355 | registered = 1; | ||
6356 | |||
6357 | err = ipw2100_wdev_init(dev); | ||
6358 | if (err) | ||
6359 | goto fail; | ||
6353 | 6360 | ||
6354 | mutex_lock(&priv->action_mutex); | 6361 | mutex_lock(&priv->action_mutex); |
6355 | registered = 1; | ||
6356 | 6362 | ||
6357 | IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); | 6363 | IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); |
6358 | 6364 | ||
@@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6389 | 6395 | ||
6390 | fail_unlock: | 6396 | fail_unlock: |
6391 | mutex_unlock(&priv->action_mutex); | 6397 | mutex_unlock(&priv->action_mutex); |
6392 | 6398 | wiphy_unregister(priv->ieee->wdev.wiphy); | |
6399 | kfree(priv->ieee->bg_band.channels); | ||
6393 | fail: | 6400 | fail: |
6394 | if (dev) { | 6401 | if (dev) { |
6395 | if (registered) | 6402 | if (registered) |
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 87813c33bdc2..4ffebede5e03 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c | |||
@@ -11425,16 +11425,23 @@ static void ipw_bg_down(struct work_struct *work) | |||
11425 | /* Called by register_netdev() */ | 11425 | /* Called by register_netdev() */ |
11426 | static int ipw_net_init(struct net_device *dev) | 11426 | static int ipw_net_init(struct net_device *dev) |
11427 | { | 11427 | { |
11428 | int rc = 0; | ||
11429 | struct ipw_priv *priv = libipw_priv(dev); | ||
11430 | |||
11431 | mutex_lock(&priv->mutex); | ||
11432 | if (ipw_up(priv)) | ||
11433 | rc = -EIO; | ||
11434 | mutex_unlock(&priv->mutex); | ||
11435 | |||
11436 | return rc; | ||
11437 | } | ||
11438 | |||
11439 | static int ipw_wdev_init(struct net_device *dev) | ||
11440 | { | ||
11428 | int i, rc = 0; | 11441 | int i, rc = 0; |
11429 | struct ipw_priv *priv = libipw_priv(dev); | 11442 | struct ipw_priv *priv = libipw_priv(dev); |
11430 | const struct libipw_geo *geo = libipw_get_geo(priv->ieee); | 11443 | const struct libipw_geo *geo = libipw_get_geo(priv->ieee); |
11431 | struct wireless_dev *wdev = &priv->ieee->wdev; | 11444 | struct wireless_dev *wdev = &priv->ieee->wdev; |
11432 | mutex_lock(&priv->mutex); | ||
11433 | |||
11434 | if (ipw_up(priv)) { | ||
11435 | rc = -EIO; | ||
11436 | goto out; | ||
11437 | } | ||
11438 | 11445 | ||
11439 | memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); | 11446 | memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); |
11440 | 11447 | ||
@@ -11519,13 +11526,9 @@ static int ipw_net_init(struct net_device *dev) | |||
11519 | set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); | 11526 | set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); |
11520 | 11527 | ||
11521 | /* With that information in place, we can now register the wiphy... */ | 11528 | /* With that information in place, we can now register the wiphy... */ |
11522 | if (wiphy_register(wdev->wiphy)) { | 11529 | if (wiphy_register(wdev->wiphy)) |
11523 | rc = -EIO; | 11530 | rc = -EIO; |
11524 | goto out; | ||
11525 | } | ||
11526 | |||
11527 | out: | 11531 | out: |
11528 | mutex_unlock(&priv->mutex); | ||
11529 | return rc; | 11532 | return rc; |
11530 | } | 11533 | } |
11531 | 11534 | ||
@@ -11832,14 +11835,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, | |||
11832 | goto out_remove_sysfs; | 11835 | goto out_remove_sysfs; |
11833 | } | 11836 | } |
11834 | 11837 | ||
11838 | err = ipw_wdev_init(net_dev); | ||
11839 | if (err) { | ||
11840 | IPW_ERROR("failed to register wireless device\n"); | ||
11841 | goto out_unregister_netdev; | ||
11842 | } | ||
11843 | |||
11835 | #ifdef CONFIG_IPW2200_PROMISCUOUS | 11844 | #ifdef CONFIG_IPW2200_PROMISCUOUS |
11836 | if (rtap_iface) { | 11845 | if (rtap_iface) { |
11837 | err = ipw_prom_alloc(priv); | 11846 | err = ipw_prom_alloc(priv); |
11838 | if (err) { | 11847 | if (err) { |
11839 | IPW_ERROR("Failed to register promiscuous network " | 11848 | IPW_ERROR("Failed to register promiscuous network " |
11840 | "device (error %d).\n", err); | 11849 | "device (error %d).\n", err); |
11841 | unregister_netdev(priv->net_dev); | 11850 | wiphy_unregister(priv->ieee->wdev.wiphy); |
11842 | goto out_remove_sysfs; | 11851 | kfree(priv->ieee->a_band.channels); |
11852 | kfree(priv->ieee->bg_band.channels); | ||
11853 | goto out_unregister_netdev; | ||
11843 | } | 11854 | } |
11844 | } | 11855 | } |
11845 | #endif | 11856 | #endif |
@@ -11851,6 +11862,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, | |||
11851 | 11862 | ||
11852 | return 0; | 11863 | return 0; |
11853 | 11864 | ||
11865 | out_unregister_netdev: | ||
11866 | unregister_netdev(priv->net_dev); | ||
11854 | out_remove_sysfs: | 11867 | out_remove_sysfs: |
11855 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); | 11868 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); |
11856 | out_release_irq: | 11869 | out_release_irq: |
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c index 977bd2477c6a..164bcae821f8 100644 --- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c | |||
@@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, | |||
822 | 822 | ||
823 | out: | 823 | out: |
824 | 824 | ||
825 | rs_sta->last_txrate_idx = index; | 825 | if (sband->band == IEEE80211_BAND_5GHZ) { |
826 | if (sband->band == IEEE80211_BAND_5GHZ) | 826 | if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE)) |
827 | info->control.rates[0].idx = rs_sta->last_txrate_idx - | 827 | index = IWL_FIRST_OFDM_RATE; |
828 | IWL_FIRST_OFDM_RATE; | 828 | rs_sta->last_txrate_idx = index; |
829 | else | 829 | info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE; |
830 | } else { | ||
831 | rs_sta->last_txrate_idx = index; | ||
830 | info->control.rates[0].idx = rs_sta->last_txrate_idx; | 832 | info->control.rates[0].idx = rs_sta->last_txrate_idx; |
833 | } | ||
831 | 834 | ||
832 | IWL_DEBUG_RATE(priv, "leave: %d\n", index); | 835 | IWL_DEBUG_RATE(priv, "leave: %d\n", index); |
833 | } | 836 | } |
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index 35cd2537e7fd..e5971fe9d169 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c | |||
@@ -937,7 +937,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv) | |||
937 | &priv->contexts[IWL_RXON_CTX_BSS]); | 937 | &priv->contexts[IWL_RXON_CTX_BSS]); |
938 | #endif | 938 | #endif |
939 | 939 | ||
940 | wake_up_interruptible(&priv->wait_command_queue); | 940 | wake_up(&priv->wait_command_queue); |
941 | 941 | ||
942 | /* Keep the restart process from trying to send host | 942 | /* Keep the restart process from trying to send host |
943 | * commands by clearing the INIT status bit */ | 943 | * commands by clearing the INIT status bit */ |
@@ -1746,7 +1746,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external) | |||
1746 | 1746 | ||
1747 | /* Set the FW error flag -- cleared on iwl_down */ | 1747 | /* Set the FW error flag -- cleared on iwl_down */ |
1748 | set_bit(STATUS_FW_ERROR, &priv->status); | 1748 | set_bit(STATUS_FW_ERROR, &priv->status); |
1749 | wake_up_interruptible(&priv->wait_command_queue); | 1749 | wake_up(&priv->wait_command_queue); |
1750 | /* | 1750 | /* |
1751 | * Keep the restart process from trying to send host | 1751 | * Keep the restart process from trying to send host |
1752 | * commands by clearing the INIT status bit | 1752 | * commands by clearing the INIT status bit |
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c index 62b4b09122cb..ce1fc9feb61f 100644 --- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c +++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c | |||
@@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
167 | goto out; | 167 | goto out; |
168 | } | 168 | } |
169 | 169 | ||
170 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 170 | ret = wait_event_timeout(priv->wait_command_queue, |
171 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), | 171 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), |
172 | HOST_COMPLETE_TIMEOUT); | 172 | HOST_COMPLETE_TIMEOUT); |
173 | if (!ret) { | 173 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c index 4fff995c6f3e..ef9e268bf8a0 100644 --- a/drivers/net/wireless/iwlegacy/iwl-tx.c +++ b/drivers/net/wireless/iwlegacy/iwl-tx.c | |||
@@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
625 | cmd = txq->cmd[cmd_index]; | 625 | cmd = txq->cmd[cmd_index]; |
626 | meta = &txq->meta[cmd_index]; | 626 | meta = &txq->meta[cmd_index]; |
627 | 627 | ||
628 | txq->time_stamp = jiffies; | ||
629 | |||
628 | pci_unmap_single(priv->pci_dev, | 630 | pci_unmap_single(priv->pci_dev, |
629 | dma_unmap_addr(meta, mapping), | 631 | dma_unmap_addr(meta, mapping), |
630 | dma_unmap_len(meta, len), | 632 | dma_unmap_len(meta, len), |
@@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
645 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 647 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |
646 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", | 648 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", |
647 | iwl_legacy_get_cmd_string(cmd->hdr.cmd)); | 649 | iwl_legacy_get_cmd_string(cmd->hdr.cmd)); |
648 | wake_up_interruptible(&priv->wait_command_queue); | 650 | wake_up(&priv->wait_command_queue); |
649 | } | 651 | } |
650 | 652 | ||
651 | /* Mark as unmapped */ | 653 | /* Mark as unmapped */ |
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c index 795826a014ed..66ee15629a76 100644 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c | |||
@@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, | |||
841 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | 841 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, |
842 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | 842 | test_bit(STATUS_RF_KILL_HW, &priv->status)); |
843 | else | 843 | else |
844 | wake_up_interruptible(&priv->wait_command_queue); | 844 | wake_up(&priv->wait_command_queue); |
845 | } | 845 | } |
846 | 846 | ||
847 | /** | 847 | /** |
@@ -2269,7 +2269,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv) | |||
2269 | iwl3945_reg_txpower_periodic(priv); | 2269 | iwl3945_reg_txpower_periodic(priv); |
2270 | 2270 | ||
2271 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | 2271 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); |
2272 | wake_up_interruptible(&priv->wait_command_queue); | 2272 | wake_up(&priv->wait_command_queue); |
2273 | 2273 | ||
2274 | return; | 2274 | return; |
2275 | 2275 | ||
@@ -2300,7 +2300,7 @@ static void __iwl3945_down(struct iwl_priv *priv) | |||
2300 | iwl_legacy_clear_driver_stations(priv); | 2300 | iwl_legacy_clear_driver_stations(priv); |
2301 | 2301 | ||
2302 | /* Unblock any waiting calls */ | 2302 | /* Unblock any waiting calls */ |
2303 | wake_up_interruptible_all(&priv->wait_command_queue); | 2303 | wake_up_all(&priv->wait_command_queue); |
2304 | 2304 | ||
2305 | /* Wipe out the EXIT_PENDING status bit if we are not actually | 2305 | /* Wipe out the EXIT_PENDING status bit if we are not actually |
2306 | * exiting the module */ | 2306 | * exiting the module */ |
@@ -2853,7 +2853,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw) | |||
2853 | 2853 | ||
2854 | /* Wait for START_ALIVE from ucode. Otherwise callbacks from | 2854 | /* Wait for START_ALIVE from ucode. Otherwise callbacks from |
2855 | * mac80211 will not be run successfully. */ | 2855 | * mac80211 will not be run successfully. */ |
2856 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 2856 | ret = wait_event_timeout(priv->wait_command_queue, |
2857 | test_bit(STATUS_READY, &priv->status), | 2857 | test_bit(STATUS_READY, &priv->status), |
2858 | UCODE_READY_TIMEOUT); | 2858 | UCODE_READY_TIMEOUT); |
2859 | if (!ret) { | 2859 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c index 14334668034e..aa0c2539761e 100644 --- a/drivers/net/wireless/iwlegacy/iwl4965-base.c +++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c | |||
@@ -576,7 +576,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, | |||
576 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | 576 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, |
577 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | 577 | test_bit(STATUS_RF_KILL_HW, &priv->status)); |
578 | else | 578 | else |
579 | wake_up_interruptible(&priv->wait_command_queue); | 579 | wake_up(&priv->wait_command_queue); |
580 | } | 580 | } |
581 | 581 | ||
582 | /** | 582 | /** |
@@ -926,7 +926,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv) | |||
926 | handled |= CSR_INT_BIT_FH_TX; | 926 | handled |= CSR_INT_BIT_FH_TX; |
927 | /* Wake up uCode load routine, now that load is complete */ | 927 | /* Wake up uCode load routine, now that load is complete */ |
928 | priv->ucode_write_complete = 1; | 928 | priv->ucode_write_complete = 1; |
929 | wake_up_interruptible(&priv->wait_command_queue); | 929 | wake_up(&priv->wait_command_queue); |
930 | } | 930 | } |
931 | 931 | ||
932 | if (inta & ~handled) { | 932 | if (inta & ~handled) { |
@@ -1795,7 +1795,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv) | |||
1795 | iwl4965_rf_kill_ct_config(priv); | 1795 | iwl4965_rf_kill_ct_config(priv); |
1796 | 1796 | ||
1797 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | 1797 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); |
1798 | wake_up_interruptible(&priv->wait_command_queue); | 1798 | wake_up(&priv->wait_command_queue); |
1799 | 1799 | ||
1800 | iwl_legacy_power_update_mode(priv, true); | 1800 | iwl_legacy_power_update_mode(priv, true); |
1801 | IWL_DEBUG_INFO(priv, "Updated power mode\n"); | 1801 | IWL_DEBUG_INFO(priv, "Updated power mode\n"); |
@@ -1828,7 +1828,7 @@ static void __iwl4965_down(struct iwl_priv *priv) | |||
1828 | iwl_legacy_clear_driver_stations(priv); | 1828 | iwl_legacy_clear_driver_stations(priv); |
1829 | 1829 | ||
1830 | /* Unblock any waiting calls */ | 1830 | /* Unblock any waiting calls */ |
1831 | wake_up_interruptible_all(&priv->wait_command_queue); | 1831 | wake_up_all(&priv->wait_command_queue); |
1832 | 1832 | ||
1833 | /* Wipe out the EXIT_PENDING status bit if we are not actually | 1833 | /* Wipe out the EXIT_PENDING status bit if we are not actually |
1834 | * exiting the module */ | 1834 | * exiting the module */ |
@@ -2266,7 +2266,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw) | |||
2266 | 2266 | ||
2267 | /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from | 2267 | /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from |
2268 | * mac80211 will not be run successfully. */ | 2268 | * mac80211 will not be run successfully. */ |
2269 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 2269 | ret = wait_event_timeout(priv->wait_command_queue, |
2270 | test_bit(STATUS_READY, &priv->status), | 2270 | test_bit(STATUS_READY, &priv->status), |
2271 | UCODE_READY_TIMEOUT); | 2271 | UCODE_READY_TIMEOUT); |
2272 | if (!ret) { | 2272 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c index a895a099d086..56211006a182 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c | |||
@@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv) | |||
167 | 167 | ||
168 | memset(&cmd, 0, sizeof(cmd)); | 168 | memset(&cmd, 0, sizeof(cmd)); |
169 | iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); | 169 | iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); |
170 | memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); | 170 | memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib)); |
171 | if (!(cmd.radio_sensor_offset)) | 171 | if (!(cmd.radio_sensor_offset)) |
172 | cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; | 172 | cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; |
173 | 173 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index b0ae4de7f083..f9c3cd95d614 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -2140,7 +2140,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv, | |||
2140 | IEEE80211_HW_SPECTRUM_MGMT | | 2140 | IEEE80211_HW_SPECTRUM_MGMT | |
2141 | IEEE80211_HW_REPORTS_TX_ACK_STATUS; | 2141 | IEEE80211_HW_REPORTS_TX_ACK_STATUS; |
2142 | 2142 | ||
2143 | /* | ||
2144 | * Including the following line will crash some AP's. This | ||
2145 | * workaround removes the stimulus which causes the crash until | ||
2146 | * the AP software can be fixed. | ||
2143 | hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; | 2147 | hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; |
2148 | */ | ||
2144 | 2149 | ||
2145 | hw->flags |= IEEE80211_HW_SUPPORTS_PS | | 2150 | hw->flags |= IEEE80211_HW_SUPPORTS_PS | |
2146 | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; | 2151 | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c index 69d4ec467dca..2fdbffa079c1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c | |||
@@ -478,27 +478,22 @@ out_no_pci: | |||
478 | return err; | 478 | return err; |
479 | } | 479 | } |
480 | 480 | ||
481 | static void iwl_pci_down(struct iwl_bus *bus) | ||
482 | { | ||
483 | struct iwl_pci_bus *pci_bus = (struct iwl_pci_bus *) bus->bus_specific; | ||
484 | |||
485 | pci_disable_msi(pci_bus->pci_dev); | ||
486 | pci_iounmap(pci_bus->pci_dev, pci_bus->hw_base); | ||
487 | pci_release_regions(pci_bus->pci_dev); | ||
488 | pci_disable_device(pci_bus->pci_dev); | ||
489 | pci_set_drvdata(pci_bus->pci_dev, NULL); | ||
490 | |||
491 | kfree(bus); | ||
492 | } | ||
493 | |||
494 | static void __devexit iwl_pci_remove(struct pci_dev *pdev) | 481 | static void __devexit iwl_pci_remove(struct pci_dev *pdev) |
495 | { | 482 | { |
496 | struct iwl_priv *priv = pci_get_drvdata(pdev); | 483 | struct iwl_priv *priv = pci_get_drvdata(pdev); |
497 | void *bus_specific = priv->bus->bus_specific; | 484 | struct iwl_bus *bus = priv->bus; |
485 | struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus); | ||
486 | struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); | ||
498 | 487 | ||
499 | iwl_remove(priv); | 488 | iwl_remove(priv); |
500 | 489 | ||
501 | iwl_pci_down(bus_specific); | 490 | pci_disable_msi(pci_dev); |
491 | pci_iounmap(pci_dev, pci_bus->hw_base); | ||
492 | pci_release_regions(pci_dev); | ||
493 | pci_disable_device(pci_dev); | ||
494 | pci_set_drvdata(pci_dev, NULL); | ||
495 | |||
496 | kfree(bus); | ||
502 | } | 497 | } |
503 | 498 | ||
504 | #ifdef CONFIG_PM | 499 | #ifdef CONFIG_PM |
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index dd6937e97055..77e528f5db88 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c | |||
@@ -405,31 +405,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, | |||
405 | 405 | ||
406 | mutex_lock(&priv->mutex); | 406 | mutex_lock(&priv->mutex); |
407 | 407 | ||
408 | if (test_bit(STATUS_SCANNING, &priv->status) && | ||
409 | priv->scan_type != IWL_SCAN_NORMAL) { | ||
410 | IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); | ||
411 | ret = -EAGAIN; | ||
412 | goto out_unlock; | ||
413 | } | ||
414 | |||
415 | /* mac80211 will only ask for one band at a time */ | ||
416 | priv->scan_request = req; | ||
417 | priv->scan_vif = vif; | ||
418 | |||
419 | /* | 408 | /* |
420 | * If an internal scan is in progress, just set | 409 | * If an internal scan is in progress, just set |
421 | * up the scan_request as per above. | 410 | * up the scan_request as per above. |
422 | */ | 411 | */ |
423 | if (priv->scan_type != IWL_SCAN_NORMAL) { | 412 | if (priv->scan_type != IWL_SCAN_NORMAL) { |
424 | IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); | 413 | IWL_DEBUG_SCAN(priv, |
414 | "SCAN request during internal scan - defer\n"); | ||
415 | priv->scan_request = req; | ||
416 | priv->scan_vif = vif; | ||
425 | ret = 0; | 417 | ret = 0; |
426 | } else | 418 | } else { |
419 | priv->scan_request = req; | ||
420 | priv->scan_vif = vif; | ||
421 | /* | ||
422 | * mac80211 will only ask for one band at a time | ||
423 | * so using channels[0] here is ok | ||
424 | */ | ||
427 | ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, | 425 | ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, |
428 | req->channels[0]->band); | 426 | req->channels[0]->band); |
427 | if (ret) { | ||
428 | priv->scan_request = NULL; | ||
429 | priv->scan_vif = NULL; | ||
430 | } | ||
431 | } | ||
429 | 432 | ||
430 | IWL_DEBUG_MAC80211(priv, "leave\n"); | 433 | IWL_DEBUG_MAC80211(priv, "leave\n"); |
431 | 434 | ||
432 | out_unlock: | ||
433 | mutex_unlock(&priv->mutex); | 435 | mutex_unlock(&priv->mutex); |
434 | 436 | ||
435 | return ret; | 437 | return ret; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c index a6b2b1db0b1d..222d410c586e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c | |||
@@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
771 | cmd = txq->cmd[cmd_index]; | 771 | cmd = txq->cmd[cmd_index]; |
772 | meta = &txq->meta[cmd_index]; | 772 | meta = &txq->meta[cmd_index]; |
773 | 773 | ||
774 | txq->time_stamp = jiffies; | ||
775 | |||
774 | iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); | 776 | iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); |
775 | 777 | ||
776 | /* Input error checking is done when commands are added to queue. */ | 778 | /* Input error checking is done when commands are added to queue. */ |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index ef67f6786a84..0019dfd8fb01 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
@@ -3697,14 +3697,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i) | |||
3697 | rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, ®); | 3697 | rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, ®); |
3698 | 3698 | ||
3699 | /* Apparently the data is read from end to start */ | 3699 | /* Apparently the data is read from end to start */ |
3700 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, | 3700 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, ®); |
3701 | (u32 *)&rt2x00dev->eeprom[i]); | 3701 | /* The returned value is in CPU order, but eeprom is le */ |
3702 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, | 3702 | rt2x00dev->eeprom[i] = cpu_to_le32(reg); |
3703 | (u32 *)&rt2x00dev->eeprom[i + 2]); | 3703 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, ®); |
3704 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, | 3704 | *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); |
3705 | (u32 *)&rt2x00dev->eeprom[i + 4]); | 3705 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, ®); |
3706 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, | 3706 | *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg); |
3707 | (u32 *)&rt2x00dev->eeprom[i + 6]); | 3707 | rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, ®); |
3708 | *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg); | ||
3708 | 3709 | ||
3709 | mutex_unlock(&rt2x00dev->csr_mutex); | 3710 | mutex_unlock(&rt2x00dev->csr_mutex); |
3710 | } | 3711 | } |
@@ -3870,19 +3871,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) | |||
3870 | return -ENODEV; | 3871 | return -ENODEV; |
3871 | } | 3872 | } |
3872 | 3873 | ||
3873 | if (!rt2x00_rf(rt2x00dev, RF2820) && | 3874 | switch (rt2x00dev->chip.rf) { |
3874 | !rt2x00_rf(rt2x00dev, RF2850) && | 3875 | case RF2820: |
3875 | !rt2x00_rf(rt2x00dev, RF2720) && | 3876 | case RF2850: |
3876 | !rt2x00_rf(rt2x00dev, RF2750) && | 3877 | case RF2720: |
3877 | !rt2x00_rf(rt2x00dev, RF3020) && | 3878 | case RF2750: |
3878 | !rt2x00_rf(rt2x00dev, RF2020) && | 3879 | case RF3020: |
3879 | !rt2x00_rf(rt2x00dev, RF3021) && | 3880 | case RF2020: |
3880 | !rt2x00_rf(rt2x00dev, RF3022) && | 3881 | case RF3021: |
3881 | !rt2x00_rf(rt2x00dev, RF3052) && | 3882 | case RF3022: |
3882 | !rt2x00_rf(rt2x00dev, RF3320) && | 3883 | case RF3052: |
3883 | !rt2x00_rf(rt2x00dev, RF5370) && | 3884 | case RF3320: |
3884 | !rt2x00_rf(rt2x00dev, RF5390)) { | 3885 | case RF5370: |
3885 | ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); | 3886 | case RF5390: |
3887 | break; | ||
3888 | default: | ||
3889 | ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n", | ||
3890 | rt2x00dev->chip.rf); | ||
3886 | return -ENODEV; | 3891 | return -ENODEV; |
3887 | } | 3892 | } |
3888 | 3893 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 939563162fb3..dbf501ca317f 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
@@ -464,6 +464,15 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg) | |||
464 | int wcid, ack, pid; | 464 | int wcid, ack, pid; |
465 | int tx_wcid, tx_ack, tx_pid; | 465 | int tx_wcid, tx_ack, tx_pid; |
466 | 466 | ||
467 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || | ||
468 | !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) { | ||
469 | WARNING(entry->queue->rt2x00dev, | ||
470 | "Data pending for entry %u in queue %u\n", | ||
471 | entry->entry_idx, entry->queue->qid); | ||
472 | cond_resched(); | ||
473 | return false; | ||
474 | } | ||
475 | |||
467 | wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); | 476 | wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); |
468 | ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); | 477 | ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); |
469 | pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); | 478 | pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); |
@@ -529,12 +538,11 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev) | |||
529 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); | 538 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); |
530 | if (rt2800usb_txdone_entry_check(entry, reg)) | 539 | if (rt2800usb_txdone_entry_check(entry, reg)) |
531 | break; | 540 | break; |
541 | entry = NULL; | ||
532 | } | 542 | } |
533 | 543 | ||
534 | if (!entry || rt2x00queue_empty(queue)) | 544 | if (entry) |
535 | break; | 545 | rt2800_txdone_entry(entry, reg); |
536 | |||
537 | rt2800_txdone_entry(entry, reg); | ||
538 | } | 546 | } |
539 | } | 547 | } |
540 | 548 | ||
@@ -558,8 +566,10 @@ static void rt2800usb_work_txdone(struct work_struct *work) | |||
558 | while (!rt2x00queue_empty(queue)) { | 566 | while (!rt2x00queue_empty(queue)) { |
559 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); | 567 | entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); |
560 | 568 | ||
561 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) | 569 | if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || |
570 | !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) | ||
562 | break; | 571 | break; |
572 | |||
563 | if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) | 573 | if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) |
564 | rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); | 574 | rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); |
565 | else if (rt2x00queue_status_timeout(entry)) | 575 | else if (rt2x00queue_status_timeout(entry)) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index b6b4542c2460..1e31050dafc9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c | |||
@@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb) | |||
262 | struct queue_entry *entry = (struct queue_entry *)urb->context; | 262 | struct queue_entry *entry = (struct queue_entry *)urb->context; |
263 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | 263 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
264 | 264 | ||
265 | if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) | 265 | if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) |
266 | return; | 266 | return; |
267 | |||
268 | if (rt2x00dev->ops->lib->tx_dma_done) | ||
269 | rt2x00dev->ops->lib->tx_dma_done(entry); | ||
270 | |||
271 | /* | ||
272 | * Report the frame as DMA done | ||
273 | */ | ||
274 | rt2x00lib_dmadone(entry); | ||
275 | |||
276 | /* | 267 | /* |
277 | * Check if the frame was correctly uploaded | 268 | * Check if the frame was correctly uploaded |
278 | */ | 269 | */ |
279 | if (urb->status) | 270 | if (urb->status) |
280 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); | 271 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); |
272 | /* | ||
273 | * Report the frame as DMA done | ||
274 | */ | ||
275 | rt2x00lib_dmadone(entry); | ||
281 | 276 | ||
277 | if (rt2x00dev->ops->lib->tx_dma_done) | ||
278 | rt2x00dev->ops->lib->tx_dma_done(entry); | ||
282 | /* | 279 | /* |
283 | * Schedule the delayed work for reading the TX status | 280 | * Schedule the delayed work for reading the TX status |
284 | * from the device. | 281 | * from the device. |
@@ -874,18 +871,8 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) | |||
874 | { | 871 | { |
875 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); | 872 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); |
876 | struct rt2x00_dev *rt2x00dev = hw->priv; | 873 | struct rt2x00_dev *rt2x00dev = hw->priv; |
877 | int retval; | ||
878 | |||
879 | retval = rt2x00lib_suspend(rt2x00dev, state); | ||
880 | if (retval) | ||
881 | return retval; | ||
882 | 874 | ||
883 | /* | 875 | return rt2x00lib_suspend(rt2x00dev, state); |
884 | * Decrease usbdev refcount. | ||
885 | */ | ||
886 | usb_put_dev(interface_to_usbdev(usb_intf)); | ||
887 | |||
888 | return 0; | ||
889 | } | 876 | } |
890 | EXPORT_SYMBOL_GPL(rt2x00usb_suspend); | 877 | EXPORT_SYMBOL_GPL(rt2x00usb_suspend); |
891 | 878 | ||
@@ -894,8 +881,6 @@ int rt2x00usb_resume(struct usb_interface *usb_intf) | |||
894 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); | 881 | struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); |
895 | struct rt2x00_dev *rt2x00dev = hw->priv; | 882 | struct rt2x00_dev *rt2x00dev = hw->priv; |
896 | 883 | ||
897 | usb_get_dev(interface_to_usbdev(usb_intf)); | ||
898 | |||
899 | return rt2x00lib_resume(rt2x00dev); | 884 | return rt2x00lib_resume(rt2x00dev); |
900 | } | 885 | } |
901 | EXPORT_SYMBOL_GPL(rt2x00usb_resume); | 886 | EXPORT_SYMBOL_GPL(rt2x00usb_resume); |
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 1bdc1aa305c0..04c4e9eb6ee6 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c | |||
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, | |||
610 | 610 | ||
611 | mac->link_state = MAC80211_NOLINK; | 611 | mac->link_state = MAC80211_NOLINK; |
612 | memset(mac->bssid, 0, 6); | 612 | memset(mac->bssid, 0, 6); |
613 | |||
614 | /* reset sec info */ | ||
615 | rtl_cam_reset_sec_info(hw); | ||
616 | |||
617 | rtl_cam_reset_all_entry(hw); | ||
613 | mac->vendor = PEER_UNKNOWN; | 618 | mac->vendor = PEER_UNKNOWN; |
614 | 619 | ||
615 | RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, | 620 | RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, |
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
1063 | *or clear all entry here. | 1068 | *or clear all entry here. |
1064 | */ | 1069 | */ |
1065 | rtl_cam_delete_one_entry(hw, mac_addr, key_idx); | 1070 | rtl_cam_delete_one_entry(hw, mac_addr, key_idx); |
1071 | |||
1072 | rtl_cam_reset_sec_info(hw); | ||
1073 | |||
1066 | break; | 1074 | break; |
1067 | default: | 1075 | default: |
1068 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, | 1076 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index 906e7aa55bc3..3e52a5496224 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c | |||
@@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, | |||
549 | (tcb_desc->rts_use_shortpreamble ? 1 : 0) | 549 | (tcb_desc->rts_use_shortpreamble ? 1 : 0) |
550 | : (tcb_desc->rts_use_shortgi ? 1 : 0))); | 550 | : (tcb_desc->rts_use_shortgi ? 1 : 0))); |
551 | if (mac->bw_40) { | 551 | if (mac->bw_40) { |
552 | if (tcb_desc->packet_bw) { | 552 | if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { |
553 | SET_TX_DESC_DATA_BW(txdesc, 1); | 553 | SET_TX_DESC_DATA_BW(txdesc, 1); |
554 | SET_TX_DESC_DATA_SC(txdesc, 3); | 554 | SET_TX_DESC_DATA_SC(txdesc, 3); |
555 | } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ | ||
556 | SET_TX_DESC_DATA_BW(txdesc, 1); | ||
557 | SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc); | ||
555 | } else { | 558 | } else { |
556 | SET_TX_DESC_DATA_BW(txdesc, 0); | 559 | SET_TX_DESC_DATA_BW(txdesc, 0); |
557 | if (rate_flag & IEEE80211_TX_RC_DUP_DATA) | 560 | SET_TX_DESC_DATA_SC(txdesc, 0); |
558 | SET_TX_DESC_DATA_SC(txdesc, | 561 | } |
559 | mac->cur_40_prime_sc); | ||
560 | } | ||
561 | } else { | 562 | } else { |
562 | SET_TX_DESC_DATA_BW(txdesc, 0); | 563 | SET_TX_DESC_DATA_BW(txdesc, 0); |
563 | SET_TX_DESC_DATA_SC(txdesc, 0); | 564 | SET_TX_DESC_DATA_SC(txdesc, 0); |
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 8b1cef0ffde6..4bf3cf457ef0 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c | |||
@@ -863,6 +863,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
863 | u8 tid = 0; | 863 | u8 tid = 0; |
864 | u16 seq_number = 0; | 864 | u16 seq_number = 0; |
865 | 865 | ||
866 | memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); | ||
866 | if (ieee80211_is_auth(fc)) { | 867 | if (ieee80211_is_auth(fc)) { |
867 | RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); | 868 | RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); |
868 | rtl_ips_nic_on(hw); | 869 | rtl_ips_nic_on(hw); |
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c index 7e33f1f4f3d4..34f6ab53e519 100644 --- a/drivers/net/wireless/wl12xx/acx.c +++ b/drivers/net/wireless/wl12xx/acx.c | |||
@@ -77,8 +77,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth) | |||
77 | auth->sleep_auth = sleep_auth; | 77 | auth->sleep_auth = sleep_auth; |
78 | 78 | ||
79 | ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); | 79 | ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); |
80 | if (ret < 0) | ||
81 | return ret; | ||
82 | 80 | ||
83 | out: | 81 | out: |
84 | kfree(auth); | 82 | kfree(auth); |
@@ -624,10 +622,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl) | |||
624 | 622 | ||
625 | ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, | 623 | ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, |
626 | detection, sizeof(*detection)); | 624 | detection, sizeof(*detection)); |
627 | if (ret < 0) { | 625 | if (ret < 0) |
628 | wl1271_warning("failed to set cca threshold: %d", ret); | 626 | wl1271_warning("failed to set cca threshold: %d", ret); |
629 | return ret; | ||
630 | } | ||
631 | 627 | ||
632 | out: | 628 | out: |
633 | kfree(detection); | 629 | kfree(detection); |
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c index e58c22d21e39..b70ae40ad660 100644 --- a/drivers/net/wireless/wl12xx/main.c +++ b/drivers/net/wireless/wl12xx/main.c | |||
@@ -4283,6 +4283,7 @@ int wl1271_init_ieee80211(struct wl1271 *wl) | |||
4283 | wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | | 4283 | wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | |
4284 | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); | 4284 | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); |
4285 | wl->hw->wiphy->max_scan_ssids = 1; | 4285 | wl->hw->wiphy->max_scan_ssids = 1; |
4286 | wl->hw->wiphy->max_sched_scan_ssids = 1; | ||
4286 | /* | 4287 | /* |
4287 | * Maximum length of elements in scanning probe request templates | 4288 | * Maximum length of elements in scanning probe request templates |
4288 | * should be the maximum length possible for a template, without | 4289 | * should be the maximum length possible for a template, without |
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c index 5cf18c2c23f0..fb1fd5af75ea 100644 --- a/drivers/net/wireless/wl12xx/sdio.c +++ b/drivers/net/wireless/wl12xx/sdio.c | |||
@@ -164,7 +164,7 @@ static int wl1271_sdio_power_on(struct wl1271 *wl) | |||
164 | /* If enabled, tell runtime PM not to power off the card */ | 164 | /* If enabled, tell runtime PM not to power off the card */ |
165 | if (pm_runtime_enabled(&func->dev)) { | 165 | if (pm_runtime_enabled(&func->dev)) { |
166 | ret = pm_runtime_get_sync(&func->dev); | 166 | ret = pm_runtime_get_sync(&func->dev); |
167 | if (ret) | 167 | if (ret < 0) |
168 | goto out; | 168 | goto out; |
169 | } else { | 169 | } else { |
170 | /* Runtime PM is disabled: power up the card manually */ | 170 | /* Runtime PM is disabled: power up the card manually */ |
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c index 5d5e1ef87206..4ae8effaee22 100644 --- a/drivers/net/wireless/wl12xx/testmode.c +++ b/drivers/net/wireless/wl12xx/testmode.c | |||
@@ -36,7 +36,6 @@ enum wl1271_tm_commands { | |||
36 | WL1271_TM_CMD_TEST, | 36 | WL1271_TM_CMD_TEST, |
37 | WL1271_TM_CMD_INTERROGATE, | 37 | WL1271_TM_CMD_INTERROGATE, |
38 | WL1271_TM_CMD_CONFIGURE, | 38 | WL1271_TM_CMD_CONFIGURE, |
39 | WL1271_TM_CMD_NVS_PUSH, | ||
40 | WL1271_TM_CMD_SET_PLT_MODE, | 39 | WL1271_TM_CMD_SET_PLT_MODE, |
41 | WL1271_TM_CMD_RECOVER, | 40 | WL1271_TM_CMD_RECOVER, |
42 | 41 | ||
@@ -139,12 +138,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) | |||
139 | 138 | ||
140 | if (ret < 0) { | 139 | if (ret < 0) { |
141 | wl1271_warning("testmode cmd interrogate failed: %d", ret); | 140 | wl1271_warning("testmode cmd interrogate failed: %d", ret); |
141 | kfree(cmd); | ||
142 | return ret; | 142 | return ret; |
143 | } | 143 | } |
144 | 144 | ||
145 | skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); | 145 | skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); |
146 | if (!skb) | 146 | if (!skb) { |
147 | kfree(cmd); | ||
147 | return -ENOMEM; | 148 | return -ENOMEM; |
149 | } | ||
148 | 150 | ||
149 | NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); | 151 | NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); |
150 | 152 | ||
@@ -187,48 +189,6 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) | |||
187 | return 0; | 189 | return 0; |
188 | } | 190 | } |
189 | 191 | ||
190 | static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[]) | ||
191 | { | ||
192 | int ret = 0; | ||
193 | size_t len; | ||
194 | void *buf; | ||
195 | |||
196 | wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push"); | ||
197 | |||
198 | if (!tb[WL1271_TM_ATTR_DATA]) | ||
199 | return -EINVAL; | ||
200 | |||
201 | buf = nla_data(tb[WL1271_TM_ATTR_DATA]); | ||
202 | len = nla_len(tb[WL1271_TM_ATTR_DATA]); | ||
203 | |||
204 | mutex_lock(&wl->mutex); | ||
205 | |||
206 | kfree(wl->nvs); | ||
207 | |||
208 | if ((wl->chip.id == CHIP_ID_1283_PG20) && | ||
209 | (len != sizeof(struct wl128x_nvs_file))) | ||
210 | return -EINVAL; | ||
211 | else if (len != sizeof(struct wl1271_nvs_file)) | ||
212 | return -EINVAL; | ||
213 | |||
214 | wl->nvs = kzalloc(len, GFP_KERNEL); | ||
215 | if (!wl->nvs) { | ||
216 | wl1271_error("could not allocate memory for the nvs file"); | ||
217 | ret = -ENOMEM; | ||
218 | goto out; | ||
219 | } | ||
220 | |||
221 | memcpy(wl->nvs, buf, len); | ||
222 | wl->nvs_len = len; | ||
223 | |||
224 | wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs"); | ||
225 | |||
226 | out: | ||
227 | mutex_unlock(&wl->mutex); | ||
228 | |||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) | 192 | static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) |
233 | { | 193 | { |
234 | u32 val; | 194 | u32 val; |
@@ -285,8 +245,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len) | |||
285 | return wl1271_tm_cmd_interrogate(wl, tb); | 245 | return wl1271_tm_cmd_interrogate(wl, tb); |
286 | case WL1271_TM_CMD_CONFIGURE: | 246 | case WL1271_TM_CMD_CONFIGURE: |
287 | return wl1271_tm_cmd_configure(wl, tb); | 247 | return wl1271_tm_cmd_configure(wl, tb); |
288 | case WL1271_TM_CMD_NVS_PUSH: | ||
289 | return wl1271_tm_cmd_nvs_push(wl, tb); | ||
290 | case WL1271_TM_CMD_SET_PLT_MODE: | 248 | case WL1271_TM_CMD_SET_PLT_MODE: |
291 | return wl1271_tm_cmd_set_plt_mode(wl, tb); | 249 | return wl1271_tm_cmd_set_plt_mode(wl, tb); |
292 | case WL1271_TM_CMD_RECOVER: | 250 | case WL1271_TM_CMD_RECOVER: |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 0ca86f9ec4ed..182562952c79 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
327 | xenvif_get(vif); | 327 | xenvif_get(vif); |
328 | 328 | ||
329 | rtnl_lock(); | 329 | rtnl_lock(); |
330 | if (netif_running(vif->dev)) | ||
331 | xenvif_up(vif); | ||
332 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | 330 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
333 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 331 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
334 | netdev_update_features(vif->dev); | 332 | netdev_update_features(vif->dev); |
335 | netif_carrier_on(vif->dev); | 333 | netif_carrier_on(vif->dev); |
334 | if (netif_running(vif->dev)) | ||
335 | xenvif_up(vif); | ||
336 | rtnl_unlock(); | 336 | rtnl_unlock(); |
337 | 337 | ||
338 | return 0; | 338 | return 0; |