aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-09-18 14:02:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-09-18 14:02:26 -0400
commitb0e7031ac08fa0aa242531c8d9a0cf9ae8ee276d (patch)
tree86ae983c51b9df07ead6f00aeddbf276f672c839
parent01a7143586f51f80e1b29ebf240c6e5390657450 (diff)
parent8e2ec639173f325977818c45011ee176ef2b11f6 (diff)
Merge git://github.com/davem330/net
* git://github.com/davem330/net: (62 commits) ipv6: don't use inetpeer to store metrics for routes. can: ti_hecc: include linux/io.h IRDA: Fix global type conflicts in net/irda/irsysctl.c v2 net: Handle different key sizes between address families in flow cache net: Align AF-specific flowi structs to long ipv4: Fix fib_info->fib_metrics leak caif: fix a potential NULL dereference sctp: deal with multiple COOKIE_ECHO chunks ibmveth: Fix checksum offload failure handling ibmveth: Checksum offload is always disabled ibmveth: Fix issue with DMA mapping failure ibmveth: Fix DMA unmap error pch_gbe: support ML7831 IOH pch_gbe: added the process of FIFO over run error pch_gbe: fixed the issue which receives an unnecessary packet. sfc: Use 64-bit writes for TX push where possible Revert "sfc: Use write-combining to reduce TX latency" and follow-ups bnx2x: Fix ethtool advertisement bnx2x: Fix 578xx link LED bnx2x: Fix XMAC loopback test ...
-rw-r--r--Documentation/networking/dmfe.txt3
-rw-r--r--MAINTAINERS6
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/bnx2x/bnx2x.h124
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c27
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c48
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c46
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c162
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h7
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ibmveth.c48
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h12
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c302
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/io.h6
-rw-r--r--drivers/net/sfc/mcdi.c46
-rw-r--r--drivers/net/sfc/nic.c7
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c25
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c6
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c11
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/snmp.h2
-rw-r--r--include/net/flow.h25
-rw-r--r--include/net/request_sock.h3
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/net/transp_v6.h1
-rw-r--r--net/bridge/netfilter/Kconfig2
-rw-r--r--net/caif/caif_dev.c6
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/flow.c36
-rw-r--r--net/core/skbuff.c22
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/af_inet.c7
-rw-r--r--net/ipv4/fib_semantics.c10
-rw-r--r--net/ipv4/netfilter/ip_queue.c12
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/tcp_ipv4.c49
-rw-r--r--net/ipv6/datagram.c5
-rw-r--r--net/ipv6/ip6_flowlabel.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/netfilter/ip6_queue.c12
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/route.c33
-rw-r--r--net/ipv6/tcp_ipv6.c31
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/irda/irsysctl.c6
-rw-r--r--net/irda/qos.c6
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/nf_conntrack_pptp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netfilter/xt_rateest.c9
-rw-r--r--net/sched/cls_rsvp.h27
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sctp/sm_statefuns.c6
68 files changed, 826 insertions, 507 deletions
diff --git a/Documentation/networking/dmfe.txt b/Documentation/networking/dmfe.txt
index 8006c227fda2..25320bf19c86 100644
--- a/Documentation/networking/dmfe.txt
+++ b/Documentation/networking/dmfe.txt
@@ -1,3 +1,5 @@
1Note: This driver doesn't have a maintainer.
2
1Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux. 3Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux.
2 4
3This program is free software; you can redistribute it and/or 5This program is free software; you can redistribute it and/or
@@ -55,7 +57,6 @@ Test and make sure PCI latency is now correct for all cases.
55Authors: 57Authors:
56 58
57Sten Wang <sten_wang@davicom.com.tw > : Original Author 59Sten Wang <sten_wang@davicom.com.tw > : Original Author
58Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer
59 60
60Contributors: 61Contributors:
61 62
diff --git a/MAINTAINERS b/MAINTAINERS
index 0acf9ab3d287..ae8820e173a2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1278,7 +1278,6 @@ F: drivers/input/misc/ati_remote2.c
1278ATLX ETHERNET DRIVERS 1278ATLX ETHERNET DRIVERS
1279M: Jay Cliburn <jcliburn@gmail.com> 1279M: Jay Cliburn <jcliburn@gmail.com>
1280M: Chris Snook <chris.snook@gmail.com> 1280M: Chris Snook <chris.snook@gmail.com>
1281M: Jie Yang <jie.yang@atheros.com>
1282L: netdev@vger.kernel.org 1281L: netdev@vger.kernel.org
1283W: http://sourceforge.net/projects/atl1 1282W: http://sourceforge.net/projects/atl1
1284W: http://atl1.sourceforge.net 1283W: http://atl1.sourceforge.net
@@ -1574,7 +1573,6 @@ F: drivers/scsi/bfa/
1574 1573
1575BROCADE BNA 10 GIGABIT ETHERNET DRIVER 1574BROCADE BNA 10 GIGABIT ETHERNET DRIVER
1576M: Rasesh Mody <rmody@brocade.com> 1575M: Rasesh Mody <rmody@brocade.com>
1577M: Debashis Dutt <ddutt@brocade.com>
1578L: netdev@vger.kernel.org 1576L: netdev@vger.kernel.org
1579S: Supported 1577S: Supported
1580F: drivers/net/bna/ 1578F: drivers/net/bna/
@@ -1758,7 +1756,6 @@ F: Documentation/zh_CN/
1758 1756
1759CISCO VIC ETHERNET NIC DRIVER 1757CISCO VIC ETHERNET NIC DRIVER
1760M: Christian Benvenuti <benve@cisco.com> 1758M: Christian Benvenuti <benve@cisco.com>
1761M: Vasanthy Kolluri <vkolluri@cisco.com>
1762M: Roopa Prabhu <roprabhu@cisco.com> 1759M: Roopa Prabhu <roprabhu@cisco.com>
1763M: David Wang <dwang2@cisco.com> 1760M: David Wang <dwang2@cisco.com>
1764S: Supported 1761S: Supported
@@ -4415,7 +4412,8 @@ L: netfilter@vger.kernel.org
4415L: coreteam@netfilter.org 4412L: coreteam@netfilter.org
4416W: http://www.netfilter.org/ 4413W: http://www.netfilter.org/
4417W: http://www.iptables.org/ 4414W: http://www.iptables.org/
4418T: git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git 4415T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
4416T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
4419S: Supported 4417S: Supported
4420F: include/linux/netfilter* 4418F: include/linux/netfilter*
4421F: include/linux/netfilter/ 4419F: include/linux/netfilter/
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8d0314dbd946..a44874e24f2a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2535,7 +2535,7 @@ config S6GMAC
2535source "drivers/net/stmmac/Kconfig" 2535source "drivers/net/stmmac/Kconfig"
2536 2536
2537config PCH_GBE 2537config PCH_GBE
2538 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" 2538 tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
2539 depends on PCI 2539 depends on PCI
2540 select MII 2540 select MII
2541 ---help--- 2541 ---help---
@@ -2548,10 +2548,11 @@ config PCH_GBE
2548 This driver enables Gigabit Ethernet function. 2548 This driver enables Gigabit Ethernet function.
2549 2549
2550 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 2550 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
2551 Output Hub), ML7223. 2551 Output Hub), ML7223/ML7831.
2552 ML7223 IOH is for MP(Media Phone) use. 2552 ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
2553 ML7223 is companion chip for Intel Atom E6xx series. 2553 purpose use.
2554 ML7223 is completely compatible for Intel EG20T PCH. 2554 ML7223/ML7831 is companion chip for Intel Atom E6xx series.
2555 ML7223/ML7831 is completely compatible for Intel EG20T PCH.
2555 2556
2556config FTGMAC100 2557config FTGMAC100
2557 tristate "Faraday FTGMAC100 Gigabit Ethernet support" 2558 tristate "Faraday FTGMAC100 Gigabit Ethernet support"
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index c423504a755f..e46df5331c55 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -315,6 +315,14 @@ union db_prod {
315 u32 raw; 315 u32 raw;
316}; 316};
317 317
318/* dropless fc FW/HW related params */
319#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
320#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
321 ETH_MAX_AGGREGATION_QUEUES_E1 :\
322 ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
323#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
324#define FW_PREFETCH_CNT 16
325#define DROPLESS_FC_HEADROOM 100
318 326
319/* MC hsi */ 327/* MC hsi */
320#define BCM_PAGE_SHIFT 12 328#define BCM_PAGE_SHIFT 12
@@ -331,15 +339,35 @@ union db_prod {
331/* SGE ring related macros */ 339/* SGE ring related macros */
332#define NUM_RX_SGE_PAGES 2 340#define NUM_RX_SGE_PAGES 2
333#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 341#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
334#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) 342#define NEXT_PAGE_SGE_DESC_CNT 2
343#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
335/* RX_SGE_CNT is promised to be a power of 2 */ 344/* RX_SGE_CNT is promised to be a power of 2 */
336#define RX_SGE_MASK (RX_SGE_CNT - 1) 345#define RX_SGE_MASK (RX_SGE_CNT - 1)
337#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) 346#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
338#define MAX_RX_SGE (NUM_RX_SGE - 1) 347#define MAX_RX_SGE (NUM_RX_SGE - 1)
339#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ 348#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
340 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) 349 (MAX_RX_SGE_CNT - 1)) ? \
350 (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
351 (x) + 1)
341#define RX_SGE(x) ((x) & MAX_RX_SGE) 352#define RX_SGE(x) ((x) & MAX_RX_SGE)
342 353
354/*
355 * Number of required SGEs is the sum of two:
356 * 1. Number of possible opened aggregations (next packet for
357 * these aggregations will probably consume SGE immidiatelly)
358 * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
359 * after placement on BD for new TPA aggregation)
360 *
361 * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
362 */
363#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
364 (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
365#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
366 MAX_RX_SGE_CNT)
367#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
368 NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
369#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
370
343/* Manipulate a bit vector defined as an array of u64 */ 371/* Manipulate a bit vector defined as an array of u64 */
344 372
345/* Number of bits in one sge_mask array element */ 373/* Number of bits in one sge_mask array element */
@@ -551,24 +579,43 @@ struct bnx2x_fastpath {
551 579
552#define NUM_TX_RINGS 16 580#define NUM_TX_RINGS 16
553#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) 581#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
554#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 582#define NEXT_PAGE_TX_DESC_CNT 1
583#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
555#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 584#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
556#define MAX_TX_BD (NUM_TX_BD - 1) 585#define MAX_TX_BD (NUM_TX_BD - 1)
557#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 586#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
558#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 587#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
559 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 588 (MAX_TX_DESC_CNT - 1)) ? \
589 (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
590 (x) + 1)
560#define TX_BD(x) ((x) & MAX_TX_BD) 591#define TX_BD(x) ((x) & MAX_TX_BD)
561#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) 592#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
562 593
563/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ 594/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
564#define NUM_RX_RINGS 8 595#define NUM_RX_RINGS 8
565#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) 596#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
566#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) 597#define NEXT_PAGE_RX_DESC_CNT 2
598#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
567#define RX_DESC_MASK (RX_DESC_CNT - 1) 599#define RX_DESC_MASK (RX_DESC_CNT - 1)
568#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) 600#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
569#define MAX_RX_BD (NUM_RX_BD - 1) 601#define MAX_RX_BD (NUM_RX_BD - 1)
570#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 602#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
571#define MIN_RX_AVAIL 128 603
604/* dropless fc calculations for BDs
605 *
606 * Number of BDs should as number of buffers in BRB:
607 * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
608 * "next" elements on each page
609 */
610#define NUM_BD_REQ BRB_SIZE(bp)
611#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
612 MAX_RX_DESC_CNT)
613#define BD_TH_LO(bp) (NUM_BD_REQ + \
614 NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
615 FW_DROP_LEVEL(bp))
616#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
617
618#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
572 619
573#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ 620#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
574 ETH_MIN_RX_CQES_WITH_TPA_E1 : \ 621 ETH_MIN_RX_CQES_WITH_TPA_E1 : \
@@ -579,7 +626,9 @@ struct bnx2x_fastpath {
579 MIN_RX_AVAIL)) 626 MIN_RX_AVAIL))
580 627
581#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 628#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
582 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 629 (MAX_RX_DESC_CNT - 1)) ? \
630 (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
631 (x) + 1)
583#define RX_BD(x) ((x) & MAX_RX_BD) 632#define RX_BD(x) ((x) & MAX_RX_BD)
584 633
585/* 634/*
@@ -589,14 +638,31 @@ struct bnx2x_fastpath {
589#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) 638#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
590#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) 639#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
591#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) 640#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
592#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) 641#define NEXT_PAGE_RCQ_DESC_CNT 1
642#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
593#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) 643#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
594#define MAX_RCQ_BD (NUM_RCQ_BD - 1) 644#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
595#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) 645#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
596#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ 646#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
597 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 647 (MAX_RCQ_DESC_CNT - 1)) ? \
648 (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
649 (x) + 1)
598#define RCQ_BD(x) ((x) & MAX_RCQ_BD) 650#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
599 651
652/* dropless fc calculations for RCQs
653 *
654 * Number of RCQs should be as number of buffers in BRB:
655 * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
656 * "next" elements on each page
657 */
658#define NUM_RCQ_REQ BRB_SIZE(bp)
659#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
660 MAX_RCQ_DESC_CNT)
661#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
662 NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
663 FW_DROP_LEVEL(bp))
664#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
665
600 666
601/* This is needed for determining of last_max */ 667/* This is needed for determining of last_max */
602#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) 668#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
@@ -685,24 +751,17 @@ struct bnx2x_fastpath {
685#define FP_CSB_FUNC_OFF \ 751#define FP_CSB_FUNC_OFF \
686 offsetof(struct cstorm_status_block_c, func) 752 offsetof(struct cstorm_status_block_c, func)
687 753
688#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ 754#define HC_INDEX_ETH_RX_CQ_CONS 1
689 /* (HC_INDEX_U_TOE_RX_CQ_CONS) */
690#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */
691 /* (HC_INDEX_U_ETH_RX_CQ_CONS) */
692#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */
693 /* (HC_INDEX_U_ETH_RX_BD_CONS) */
694
695#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
696 /* (HC_INDEX_C_TOE_TX_CQ_CONS) */
697#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
698 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
699#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
700 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
701#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
702 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
703 755
704#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 756#define HC_INDEX_OOO_TX_CQ_CONS 4
705 757
758#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
759
760#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
761
762#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
763
764#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
706 765
707#define BNX2X_RX_SB_INDEX \ 766#define BNX2X_RX_SB_INDEX \
708 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) 767 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
@@ -1100,11 +1159,12 @@ struct bnx2x {
1100#define BP_PORT(bp) (bp->pfid & 1) 1159#define BP_PORT(bp) (bp->pfid & 1)
1101#define BP_FUNC(bp) (bp->pfid) 1160#define BP_FUNC(bp) (bp->pfid)
1102#define BP_ABS_FUNC(bp) (bp->pf_num) 1161#define BP_ABS_FUNC(bp) (bp->pf_num)
1103#define BP_E1HVN(bp) (bp->pfid >> 1) 1162#define BP_VN(bp) ((bp)->pfid >> 1)
1104#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ 1163#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
1105#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 1164#define BP_L_ID(bp) (BP_VN(bp) << 2)
1106#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ 1165#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
1107 BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1166 (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
1167#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
1108 1168
1109 struct net_device *dev; 1169 struct net_device *dev;
1110 struct pci_dev *pdev; 1170 struct pci_dev *pdev;
@@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1767 1827
1768#define MAX_DMAE_C_PER_PORT 8 1828#define MAX_DMAE_C_PER_PORT 8
1769#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1829#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1770 BP_E1HVN(bp)) 1830 BP_VN(bp))
1771#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1831#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1772 E1HVN_MAX) 1832 E1HVN_MAX)
1773 1833
@@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1793 1853
1794/* must be used on a CID before placing it on a HW ring */ 1854/* must be used on a CID before placing it on a HW ring */
1795#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ 1855#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
1796 (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ 1856 (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
1797 (x)) 1857 (x))
1798 1858
1799#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) 1859#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 37e5790681ad..c4cbf9736414 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
987void bnx2x_init_rx_rings(struct bnx2x *bp) 987void bnx2x_init_rx_rings(struct bnx2x *bp)
988{ 988{
989 int func = BP_FUNC(bp); 989 int func = BP_FUNC(bp);
990 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
991 ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
992 u16 ring_prod; 990 u16 ring_prod;
993 int i, j; 991 int i, j;
994 992
@@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1001 999
1002 if (!fp->disable_tpa) { 1000 if (!fp->disable_tpa) {
1003 /* Fill the per-aggregtion pool */ 1001 /* Fill the per-aggregtion pool */
1004 for (i = 0; i < max_agg_queues; i++) { 1002 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1005 struct bnx2x_agg_info *tpa_info = 1003 struct bnx2x_agg_info *tpa_info =
1006 &fp->tpa_info[i]; 1004 &fp->tpa_info[i];
1007 struct sw_rx_bd *first_buf = 1005 struct sw_rx_bd *first_buf =
@@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1041 bnx2x_free_rx_sge_range(bp, fp, 1039 bnx2x_free_rx_sge_range(bp, fp,
1042 ring_prod); 1040 ring_prod);
1043 bnx2x_free_tpa_pool(bp, fp, 1041 bnx2x_free_tpa_pool(bp, fp,
1044 max_agg_queues); 1042 MAX_AGG_QS(bp));
1045 fp->disable_tpa = 1; 1043 fp->disable_tpa = 1;
1046 ring_prod = 0; 1044 ring_prod = 0;
1047 break; 1045 break;
@@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1137 bnx2x_free_rx_bds(fp); 1135 bnx2x_free_rx_bds(fp);
1138 1136
1139 if (!fp->disable_tpa) 1137 if (!fp->disable_tpa)
1140 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? 1138 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1141 ETH_MAX_AGGREGATION_QUEUES_E1 :
1142 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
1143 } 1139 }
1144} 1140}
1145 1141
@@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3095 struct bnx2x_fastpath *fp = &bp->fp[index]; 3091 struct bnx2x_fastpath *fp = &bp->fp[index];
3096 int ring_size = 0; 3092 int ring_size = 0;
3097 u8 cos; 3093 u8 cos;
3094 int rx_ring_size = 0;
3098 3095
3099 /* if rx_ring_size specified - use it */ 3096 /* if rx_ring_size specified - use it */
3100 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : 3097 if (!bp->rx_ring_size) {
3101 MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3102 3098
3103 /* allocate at least number of buffers required by FW */ 3099 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3104 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3100
3105 MIN_RX_SIZE_TPA, 3101 /* allocate at least number of buffers required by FW */
3106 rx_ring_size); 3102 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3103 MIN_RX_SIZE_TPA, rx_ring_size);
3104
3105 bp->rx_ring_size = rx_ring_size;
3106 } else
3107 rx_ring_size = bp->rx_ring_size;
3107 3108
3108 /* Common */ 3109 /* Common */
3109 sb = &bnx2x_fp(bp, index, status_blk); 3110 sb = &bnx2x_fp(bp, index, status_blk);
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 221863059dae..cf3e47914dd7 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
363 } 363 }
364 364
365 /* advertise the requested speed and duplex if supported */ 365 /* advertise the requested speed and duplex if supported */
366 cmd->advertising &= bp->port.supported[cfg_idx]; 366 if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
367 DP(NETIF_MSG_LINK, "Advertisement parameters "
368 "are not supported\n");
369 return -EINVAL;
370 }
367 371
368 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; 372 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
369 bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; 373 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
370 bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | 374 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
371 cmd->advertising); 375 cmd->advertising);
376 if (cmd->advertising) {
377
378 bp->link_params.speed_cap_mask[cfg_idx] = 0;
379 if (cmd->advertising & ADVERTISED_10baseT_Half) {
380 bp->link_params.speed_cap_mask[cfg_idx] |=
381 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
382 }
383 if (cmd->advertising & ADVERTISED_10baseT_Full)
384 bp->link_params.speed_cap_mask[cfg_idx] |=
385 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
372 386
387 if (cmd->advertising & ADVERTISED_100baseT_Full)
388 bp->link_params.speed_cap_mask[cfg_idx] |=
389 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
390
391 if (cmd->advertising & ADVERTISED_100baseT_Half) {
392 bp->link_params.speed_cap_mask[cfg_idx] |=
393 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
394 }
395 if (cmd->advertising & ADVERTISED_1000baseT_Half) {
396 bp->link_params.speed_cap_mask[cfg_idx] |=
397 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
398 }
399 if (cmd->advertising & (ADVERTISED_1000baseT_Full |
400 ADVERTISED_1000baseKX_Full))
401 bp->link_params.speed_cap_mask[cfg_idx] |=
402 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
403
404 if (cmd->advertising & (ADVERTISED_10000baseT_Full |
405 ADVERTISED_10000baseKX4_Full |
406 ADVERTISED_10000baseKR_Full))
407 bp->link_params.speed_cap_mask[cfg_idx] |=
408 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
409 }
373 } else { /* forced speed */ 410 } else { /* forced speed */
374 /* advertise the requested speed and duplex if supported */ 411 /* advertise the requested speed and duplex if supported */
375 switch (speed) { 412 switch (speed) {
@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
1310 if (bp->rx_ring_size) 1347 if (bp->rx_ring_size)
1311 ering->rx_pending = bp->rx_ring_size; 1348 ering->rx_pending = bp->rx_ring_size;
1312 else 1349 else
1313 if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) 1350 ering->rx_pending = MAX_RX_AVAIL;
1314 ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
1315 else
1316 ering->rx_pending = MAX_RX_AVAIL;
1317 1351
1318 ering->rx_mini_pending = 0; 1352 ering->rx_mini_pending = 0;
1319 ering->rx_jumbo_pending = 0; 1353 ering->rx_jumbo_pending = 0;
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index d45b1555a602..ba15bdc5a1a9 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
778{ 778{
779 u32 nig_reg_adress_crd_weight = 0; 779 u32 nig_reg_adress_crd_weight = 0;
780 u32 pbf_reg_adress_crd_weight = 0; 780 u32 pbf_reg_adress_crd_weight = 0;
781 /* Calculate and set BW for this COS*/ 781 /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
782 const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; 782 const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
783 const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; 783 const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
784 784
785 switch (cos_entry) { 785 switch (cos_entry) {
786 case 0: 786 case 0:
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
852 /* Calculate total BW requested */ 852 /* Calculate total BW requested */
853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { 854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
855 855 *total_bw +=
856 if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { 856 ets_params->cos[cos_idx].params.bw_params.bw;
857 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
858 "was set to 0\n");
859 return -EINVAL;
860 } 857 }
861 *total_bw +=
862 ets_params->cos[cos_idx].params.bw_params.bw;
863 }
864 } 858 }
865 859
866 /*Check taotl BW is valid */ 860 /* Check total BW is valid */
867 if ((100 != *total_bw) || (0 == *total_bw)) { 861 if ((100 != *total_bw) || (0 == *total_bw)) {
868 if (0 == *total_bw) { 862 if (0 == *total_bw) {
869 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" 863 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
1726 1720
1727 /* Check loopback mode */ 1721 /* Check loopback mode */
1728 if (lb) 1722 if (lb)
1729 val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; 1723 val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
1730 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); 1724 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
1731 bnx2x_set_xumac_nig(params, 1725 bnx2x_set_xumac_nig(params,
1732 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); 1726 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3630 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3624 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3625 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
3632 3626
3627 /* Advertised and set FEC (Forward Error Correction) */
3628 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3629 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
3630 (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
3632
3633 /* Enable CL37 BAM */ 3633 /* Enable CL37 BAM */
3634 if (REG_RD(bp, params->shmem_base + 3634 if (REG_RD(bp, params->shmem_base +
3635 offsetof(struct shmem_region, dev_info. 3635 offsetof(struct shmem_region, dev_info.
@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params,
5924 (tmp | EMAC_LED_OVERRIDE)); 5924 (tmp | EMAC_LED_OVERRIDE));
5925 /* 5925 /*
5926 * return here without enabling traffic 5926 * return here without enabling traffic
5927 * LED blink andsetting rate in ON mode. 5927 * LED blink and setting rate in ON mode.
5928 * In oper mode, enabling LED blink 5928 * In oper mode, enabling LED blink
5929 * and setting rate is needed. 5929 * and setting rate is needed.
5930 */ 5930 */
@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params,
5936 * This is a work-around for HW issue found when link 5936 * This is a work-around for HW issue found when link
5937 * is up in CL73 5937 * is up in CL73
5938 */ 5938 */
5939 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5939 if ((!CHIP_IS_E3(bp)) ||
5940 (CHIP_IS_E3(bp) &&
5941 mode == LED_MODE_ON))
5942 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5943
5940 if (CHIP_IS_E1x(bp) || 5944 if (CHIP_IS_E1x(bp) ||
5941 CHIP_IS_E2(bp) || 5945 CHIP_IS_E2(bp) ||
5942 (mode == LED_MODE_ON)) 5946 (mode == LED_MODE_ON))
@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = {
10638 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10642 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
10639 .addr = 0xff, 10643 .addr = 0xff,
10640 .def_md_devad = 0, 10644 .def_md_devad = 0,
10641 .flags = (FLAGS_HW_LOCK_REQUIRED | 10645 .flags = FLAGS_HW_LOCK_REQUIRED,
10642 FLAGS_TX_ERROR_CHECK),
10643 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10646 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10644 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10647 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10645 .mdio_ctrl = 0, 10648 .mdio_ctrl = 0,
@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = {
10765 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, 10768 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
10766 .addr = 0xff, 10769 .addr = 0xff,
10767 .def_md_devad = 0, 10770 .def_md_devad = 0,
10768 .flags = (FLAGS_INIT_XGXS_FIRST | 10771 .flags = FLAGS_INIT_XGXS_FIRST,
10769 FLAGS_TX_ERROR_CHECK),
10770 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10772 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10771 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10773 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10772 .mdio_ctrl = 0, 10774 .mdio_ctrl = 0,
@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = {
10797 .addr = 0xff, 10799 .addr = 0xff,
10798 .def_md_devad = 0, 10800 .def_md_devad = 0,
10799 .flags = (FLAGS_HW_LOCK_REQUIRED | 10801 .flags = (FLAGS_HW_LOCK_REQUIRED |
10800 FLAGS_INIT_XGXS_FIRST | 10802 FLAGS_INIT_XGXS_FIRST),
10801 FLAGS_TX_ERROR_CHECK),
10802 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10803 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10803 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10804 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10804 .mdio_ctrl = 0, 10805 .mdio_ctrl = 0,
@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = {
10829 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 10830 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
10830 .addr = 0xff, 10831 .addr = 0xff,
10831 .def_md_devad = 0, 10832 .def_md_devad = 0,
10832 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 10833 .flags = FLAGS_FAN_FAILURE_DET_REQ,
10833 FLAGS_TX_ERROR_CHECK),
10834 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10834 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10835 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10835 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10836 .mdio_ctrl = 0, 10836 .mdio_ctrl = 0,
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index f74582a22c68..c027e9341a1a 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
407 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 407 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
408 408
409 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 409 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
410 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | 410 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
411 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 411 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
412 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 412 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
413 413
414#ifdef __BIG_ENDIAN 414#ifdef __BIG_ENDIAN
@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1419 if (!CHIP_IS_E1(bp)) { 1419 if (!CHIP_IS_E1(bp)) {
1420 /* init leading/trailing edge */ 1420 /* init leading/trailing edge */
1421 if (IS_MF(bp)) { 1421 if (IS_MF(bp)) {
1422 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1422 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1423 if (bp->port.pmf) 1423 if (bp->port.pmf)
1424 /* enable nig and gpio3 attention */ 1424 /* enable nig and gpio3 attention */
1425 val |= 0x1100; 1425 val |= 0x1100;
@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1471 1471
1472 /* init leading/trailing edge */ 1472 /* init leading/trailing edge */
1473 if (IS_MF(bp)) { 1473 if (IS_MF(bp)) {
1474 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1474 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1475 if (bp->port.pmf) 1475 if (bp->port.pmf)
1476 /* enable nig and gpio3 attention */ 1476 /* enable nig and gpio3 attention */
1477 val |= 0x1100; 1477 val |= 0x1100;
@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2287 int vn; 2287 int vn;
2288 2288
2289 bp->vn_weight_sum = 0; 2289 bp->vn_weight_sum = 0;
2290 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2290 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2291 u32 vn_cfg = bp->mf_config[vn]; 2291 u32 vn_cfg = bp->mf_config[vn];
2292 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2292 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2293 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2293 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2320 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2320 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2321} 2321}
2322 2322
2323/* returns func by VN for current port */
2324static inline int func_by_vn(struct bnx2x *bp, int vn)
2325{
2326 return 2 * vn + BP_PORT(bp);
2327}
2328
2323static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2329static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
2324{ 2330{
2325 struct rate_shaping_vars_per_vn m_rs_vn; 2331 struct rate_shaping_vars_per_vn m_rs_vn;
2326 struct fairness_vars_per_vn m_fair_vn; 2332 struct fairness_vars_per_vn m_fair_vn;
2327 u32 vn_cfg = bp->mf_config[vn]; 2333 u32 vn_cfg = bp->mf_config[vn];
2328 int func = 2*vn + BP_PORT(bp); 2334 int func = func_by_vn(bp, vn);
2329 u16 vn_min_rate, vn_max_rate; 2335 u16 vn_min_rate, vn_max_rate;
2330 int i; 2336 int i;
2331 2337
@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
2422 * 2428 *
2423 * and there are 2 functions per port 2429 * and there are 2 functions per port
2424 */ 2430 */
2425 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2431 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2426 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2432 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2427 2433
2428 if (func >= E1H_FUNC_MAX) 2434 if (func >= E1H_FUNC_MAX)
@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2454 2460
2455 /* calculate and set min-max rate for each vn */ 2461 /* calculate and set min-max rate for each vn */
2456 if (bp->port.pmf) 2462 if (bp->port.pmf)
2457 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2463 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2458 bnx2x_init_vn_minmax(bp, vn); 2464 bnx2x_init_vn_minmax(bp, vn);
2459 2465
2460 /* always enable rate shaping and fairness */ 2466 /* always enable rate shaping and fairness */
@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2473 2479
2474static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 2480static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2475{ 2481{
2476 int port = BP_PORT(bp);
2477 int func; 2482 int func;
2478 int vn; 2483 int vn;
2479 2484
2480 /* Set the attention towards other drivers on the same port */ 2485 /* Set the attention towards other drivers on the same port */
2481 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2486 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2482 if (vn == BP_E1HVN(bp)) 2487 if (vn == BP_VN(bp))
2483 continue; 2488 continue;
2484 2489
2485 func = ((vn << 1) | port); 2490 func = func_by_vn(bp, vn);
2486 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 2491 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2487 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 2492 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2488 } 2493 }
@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
2577 bnx2x_dcbx_pmf_update(bp); 2582 bnx2x_dcbx_pmf_update(bp);
2578 2583
2579 /* enable nig attention */ 2584 /* enable nig attention */
2580 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2585 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2581 if (bp->common.int_block == INT_BLOCK_HC) { 2586 if (bp->common.int_block == INT_BLOCK_HC) {
2582 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2587 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2583 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2588 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2756 u16 tpa_agg_size = 0; 2761 u16 tpa_agg_size = 0;
2757 2762
2758 if (!fp->disable_tpa) { 2763 if (!fp->disable_tpa) {
2759 pause->sge_th_hi = 250; 2764 pause->sge_th_lo = SGE_TH_LO(bp);
2760 pause->sge_th_lo = 150; 2765 pause->sge_th_hi = SGE_TH_HI(bp);
2766
2767 /* validate SGE ring has enough to cross high threshold */
2768 WARN_ON(bp->dropless_fc &&
2769 pause->sge_th_hi + FW_PREFETCH_CNT >
2770 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
2771
2761 tpa_agg_size = min_t(u32, 2772 tpa_agg_size = min_t(u32,
2762 (min_t(u32, 8, MAX_SKB_FRAGS) * 2773 (min_t(u32, 8, MAX_SKB_FRAGS) *
2763 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 2774 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2771 2782
2772 /* pause - not for e1 */ 2783 /* pause - not for e1 */
2773 if (!CHIP_IS_E1(bp)) { 2784 if (!CHIP_IS_E1(bp)) {
2774 pause->bd_th_hi = 350; 2785 pause->bd_th_lo = BD_TH_LO(bp);
2775 pause->bd_th_lo = 250; 2786 pause->bd_th_hi = BD_TH_HI(bp);
2776 pause->rcq_th_hi = 350; 2787
2777 pause->rcq_th_lo = 250; 2788 pause->rcq_th_lo = RCQ_TH_LO(bp);
2789 pause->rcq_th_hi = RCQ_TH_HI(bp);
2790 /*
2791 * validate that rings have enough entries to cross
2792 * high thresholds
2793 */
2794 WARN_ON(bp->dropless_fc &&
2795 pause->bd_th_hi + FW_PREFETCH_CNT >
2796 bp->rx_ring_size);
2797 WARN_ON(bp->dropless_fc &&
2798 pause->rcq_th_hi + FW_PREFETCH_CNT >
2799 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
2778 2800
2779 pause->pri_map = 1; 2801 pause->pri_map = 1;
2780 } 2802 }
@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2802 * For PF Clients it should be the maximum avaliable number. 2824 * For PF Clients it should be the maximum avaliable number.
2803 * VF driver(s) may want to define it to a smaller value. 2825 * VF driver(s) may want to define it to a smaller value.
2804 */ 2826 */
2805 rxq_init->max_tpa_queues = 2827 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
2806 (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
2807 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
2808 2828
2809 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2829 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2810 rxq_init->fw_sb_id = fp->fw_sb_id; 2830 rxq_init->fw_sb_id = fp->fw_sb_id;
@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4808 hc_sm->time_to_expire = 0xFFFFFFFF; 4828 hc_sm->time_to_expire = 0xFFFFFFFF;
4809} 4829}
4810 4830
4831
4832/* allocates state machine ids. */
4833static inline
4834void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
4835{
4836 /* zero out state machine indices */
4837 /* rx indices */
4838 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4839
4840 /* tx indices */
4841 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4842 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
4843 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
4844 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
4845
4846 /* map indices */
4847 /* rx indices */
4848 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
4849 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4850
4851 /* tx indices */
4852 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
4853 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4854 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
4855 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4856 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
4857 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4858 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
4859 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4860}
4861
4811static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 4862static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4812 u8 vf_valid, int fw_sb_id, int igu_sb_id) 4863 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4813{ 4864{
@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4839 hc_sm_p = sb_data_e2.common.state_machine; 4890 hc_sm_p = sb_data_e2.common.state_machine;
4840 sb_data_p = (u32 *)&sb_data_e2; 4891 sb_data_p = (u32 *)&sb_data_e2;
4841 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 4892 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4893 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
4842 } else { 4894 } else {
4843 memset(&sb_data_e1x, 0, 4895 memset(&sb_data_e1x, 0,
4844 sizeof(struct hc_status_block_data_e1x)); 4896 sizeof(struct hc_status_block_data_e1x));
@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4853 hc_sm_p = sb_data_e1x.common.state_machine; 4905 hc_sm_p = sb_data_e1x.common.state_machine;
4854 sb_data_p = (u32 *)&sb_data_e1x; 4906 sb_data_p = (u32 *)&sb_data_e1x;
4855 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 4907 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4908 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
4856 } 4909 }
4857 4910
4858 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 4911 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
@@ -5802,7 +5855,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5802 * take the UNDI lock to protect undi_unload flow from accessing 5855 * take the UNDI lock to protect undi_unload flow from accessing
5803 * registers while we're resetting the chip 5856 * registers while we're resetting the chip
5804 */ 5857 */
5805 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5858 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5806 5859
5807 bnx2x_reset_common(bp); 5860 bnx2x_reset_common(bp);
5808 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5861 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@@ -5814,7 +5867,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5814 } 5867 }
5815 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 5868 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
5816 5869
5817 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5870 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5818 5871
5819 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 5872 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
5820 5873
@@ -6671,12 +6724,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
6671 if (CHIP_MODE_IS_4_PORT(bp)) 6724 if (CHIP_MODE_IS_4_PORT(bp))
6672 dsb_idx = BP_FUNC(bp); 6725 dsb_idx = BP_FUNC(bp);
6673 else 6726 else
6674 dsb_idx = BP_E1HVN(bp); 6727 dsb_idx = BP_VN(bp);
6675 6728
6676 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 6729 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
6677 IGU_BC_BASE_DSB_PROD + dsb_idx : 6730 IGU_BC_BASE_DSB_PROD + dsb_idx :
6678 IGU_NORM_BASE_DSB_PROD + dsb_idx); 6731 IGU_NORM_BASE_DSB_PROD + dsb_idx);
6679 6732
6733 /*
6734 * igu prods come in chunks of E1HVN_MAX (4) -
6735 * does not matters what is the current chip mode
6736 */
6680 for (i = 0; i < (num_segs * E1HVN_MAX); 6737 for (i = 0; i < (num_segs * E1HVN_MAX);
6681 i += E1HVN_MAX) { 6738 i += E1HVN_MAX) {
6682 addr = IGU_REG_PROD_CONS_MEMORY + 6739 addr = IGU_REG_PROD_CONS_MEMORY +
@@ -7570,7 +7627,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
7570 u32 val; 7627 u32 val;
7571 /* The mac address is written to entries 1-4 to 7628 /* The mac address is written to entries 1-4 to
7572 preserve entry 0 which is used by the PMF */ 7629 preserve entry 0 which is used by the PMF */
7573 u8 entry = (BP_E1HVN(bp) + 1)*8; 7630 u8 entry = (BP_VN(bp) + 1)*8;
7574 7631
7575 val = (mac_addr[0] << 8) | mac_addr[1]; 7632 val = (mac_addr[0] << 8) | mac_addr[1];
7576 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 7633 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
@@ -8546,10 +8603,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8546 /* Check if there is any driver already loaded */ 8603 /* Check if there is any driver already loaded */
8547 val = REG_RD(bp, MISC_REG_UNPREPARED); 8604 val = REG_RD(bp, MISC_REG_UNPREPARED);
8548 if (val == 0x1) { 8605 if (val == 0x1) {
8549 /* Check if it is the UNDI driver 8606
8607 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8608 /*
8609 * Check if it is the UNDI driver
8550 * UNDI driver initializes CID offset for normal bell to 0x7 8610 * UNDI driver initializes CID offset for normal bell to 0x7
8551 */ 8611 */
8552 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8553 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 8612 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8554 if (val == 0x7) { 8613 if (val == 0x7) {
8555 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8614 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8587,9 +8646,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8587 bnx2x_fw_command(bp, reset_code, 0); 8646 bnx2x_fw_command(bp, reset_code, 0);
8588 } 8647 }
8589 8648
8590 /* now it's safe to release the lock */
8591 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8592
8593 bnx2x_undi_int_disable(bp); 8649 bnx2x_undi_int_disable(bp);
8594 port = BP_PORT(bp); 8650 port = BP_PORT(bp);
8595 8651
@@ -8639,8 +8695,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8639 bp->fw_seq = 8695 bp->fw_seq =
8640 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & 8696 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8641 DRV_MSG_SEQ_NUMBER_MASK); 8697 DRV_MSG_SEQ_NUMBER_MASK);
8642 } else 8698 }
8643 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8699
8700 /* now it's safe to release the lock */
8701 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8644 } 8702 }
8645} 8703}
8646 8704
@@ -8777,13 +8835,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8777static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 8835static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8778{ 8836{
8779 int pfid = BP_FUNC(bp); 8837 int pfid = BP_FUNC(bp);
8780 int vn = BP_E1HVN(bp);
8781 int igu_sb_id; 8838 int igu_sb_id;
8782 u32 val; 8839 u32 val;
8783 u8 fid, igu_sb_cnt = 0; 8840 u8 fid, igu_sb_cnt = 0;
8784 8841
8785 bp->igu_base_sb = 0xff; 8842 bp->igu_base_sb = 0xff;
8786 if (CHIP_INT_MODE_IS_BC(bp)) { 8843 if (CHIP_INT_MODE_IS_BC(bp)) {
8844 int vn = BP_VN(bp);
8787 igu_sb_cnt = bp->igu_sb_cnt; 8845 igu_sb_cnt = bp->igu_sb_cnt;
8788 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 8846 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8789 FP_SB_MAX_E1x; 8847 FP_SB_MAX_E1x;
@@ -9416,6 +9474,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9416 bp->igu_base_sb = 0; 9474 bp->igu_base_sb = 0;
9417 } else { 9475 } else {
9418 bp->common.int_block = INT_BLOCK_IGU; 9476 bp->common.int_block = INT_BLOCK_IGU;
9477
9478 /* do not allow device reset during IGU info preocessing */
9479 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9480
9419 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9481 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9420 9482
9421 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 9483 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
@@ -9447,6 +9509,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9447 9509
9448 bnx2x_get_igu_cam_info(bp); 9510 bnx2x_get_igu_cam_info(bp);
9449 9511
9512 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9450 } 9513 }
9451 9514
9452 /* 9515 /*
@@ -9473,7 +9536,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9473 9536
9474 bp->mf_ov = 0; 9537 bp->mf_ov = 0;
9475 bp->mf_mode = 0; 9538 bp->mf_mode = 0;
9476 vn = BP_E1HVN(bp); 9539 vn = BP_VN(bp);
9477 9540
9478 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 9541 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
9479 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 9542 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -9593,13 +9656,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9593 /* port info */ 9656 /* port info */
9594 bnx2x_get_port_hwinfo(bp); 9657 bnx2x_get_port_hwinfo(bp);
9595 9658
9596 if (!BP_NOMCP(bp)) {
9597 bp->fw_seq =
9598 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9599 DRV_MSG_SEQ_NUMBER_MASK);
9600 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9601 }
9602
9603 /* Get MAC addresses */ 9659 /* Get MAC addresses */
9604 bnx2x_get_mac_hwinfo(bp); 9660 bnx2x_get_mac_hwinfo(bp);
9605 9661
@@ -9765,6 +9821,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9765 if (!BP_NOMCP(bp)) 9821 if (!BP_NOMCP(bp))
9766 bnx2x_undi_unload(bp); 9822 bnx2x_undi_unload(bp);
9767 9823
9824 /* init fw_seq after undi_unload! */
9825 if (!BP_NOMCP(bp)) {
9826 bp->fw_seq =
9827 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9828 DRV_MSG_SEQ_NUMBER_MASK);
9829 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9830 }
9831
9768 if (CHIP_REV_IS_FPGA(bp)) 9832 if (CHIP_REV_IS_FPGA(bp))
9769 dev_err(&bp->pdev->dev, "FPGA detected\n"); 9833 dev_err(&bp->pdev->dev, "FPGA detected\n");
9770 9834
@@ -10259,17 +10323,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10259 /* clean indirect addresses */ 10323 /* clean indirect addresses */
10260 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 10324 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10261 PCICFG_VENDOR_ID_OFFSET); 10325 PCICFG_VENDOR_ID_OFFSET);
10262 /* Clean the following indirect addresses for all functions since it 10326 /*
10327 * Clean the following indirect addresses for all functions since it
10263 * is not used by the driver. 10328 * is not used by the driver.
10264 */ 10329 */
10265 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 10330 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
10266 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 10331 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
10267 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 10332 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
10268 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 10333 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
10269 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10334
10270 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10335 if (CHIP_IS_E1x(bp)) {
10271 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10336 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
10272 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 10337 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
10338 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
10339 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
10340 }
10273 10341
10274 /* 10342 /*
10275 * Enable internal target-read (in case we are probed after PF FLR). 10343 * Enable internal target-read (in case we are probed after PF FLR).
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 40266c14e6dc..750e8445dac4 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -5320,7 +5320,7 @@
5320#define XCM_REG_XX_OVFL_EVNT_ID 0x20058 5320#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
5321#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) 5321#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
5322#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) 5322#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
5323#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) 5323#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
5324#define XMAC_CTRL_REG_RX_EN (0x1<<1) 5324#define XMAC_CTRL_REG_RX_EN (0x1<<1)
5325#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) 5325#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
5326#define XMAC_CTRL_REG_TX_EN (0x1<<0) 5326#define XMAC_CTRL_REG_TX_EN (0x1<<0)
@@ -5766,7 +5766,7 @@
5766#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 5766#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
5767#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 5767#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
5768#define HW_LOCK_RESOURCE_SPIO 2 5768#define HW_LOCK_RESOURCE_SPIO 2
5769#define HW_LOCK_RESOURCE_UNDI 5 5769#define HW_LOCK_RESOURCE_RESET 5
5770#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) 5770#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
5771#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) 5771#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
5772#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) 5772#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
@@ -6853,6 +6853,9 @@ Theotherbitsarereservedandshouldbezero*/
6853#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 6853#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
6854#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 6854#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
6855#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 6855#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
6856#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
6857#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
6858#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
6856#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 6859#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
6857#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 6860#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
6858#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e 6861#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 771f6803b238..9908f2bbcf73 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
710 break; 710 break;
711 711
712 case MAC_TYPE_NONE: /* unreached */ 712 case MAC_TYPE_NONE: /* unreached */
713 BNX2X_ERR("stats updated by DMAE but no MAC active\n"); 713 DP(BNX2X_MSG_STATS,
714 "stats updated by DMAE but no MAC active\n");
714 return -1; 715 return -1;
715 716
716 default: /* unreached */ 717 default: /* unreached */
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1391 1392
1392static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1393static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1393{ 1394{
1394 int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; 1395 int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
1395 u32 func_stx; 1396 u32 func_stx;
1396 1397
1397 /* sanity */ 1398 /* sanity */
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1404 func_stx = bp->func_stx; 1405 func_stx = bp->func_stx;
1405 1406
1406 for (vn = VN_0; vn < vn_max; vn++) { 1407 for (vn = VN_0; vn < vn_max; vn++) {
1407 int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; 1408 int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
1408 1409
1409 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1410 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1410 bnx2x_func_stats_init(bp); 1411 bnx2x_func_stats_init(bp);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index a81249246ece..2adc294f512a 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -46,6 +46,7 @@
46#include <linux/skbuff.h> 46#include <linux/skbuff.h>
47#include <linux/platform_device.h> 47#include <linux/platform_device.h>
48#include <linux/clk.h> 48#include <linux/clk.h>
49#include <linux/io.h>
49 50
50#include <linux/can/dev.h> 51#include <linux/can/dev.h>
51#include <linux/can/error.h> 52#include <linux/can/error.h>
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8545c7aa93eb..a5a89ecb6f36 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
4026 checksum += eeprom_data; 4026 checksum += eeprom_data;
4027 } 4027 }
4028 4028
4029#ifdef CONFIG_PARISC
4030 /* This is a signature and not a checksum on HP c8000 */
4031 if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
4032 return E1000_SUCCESS;
4033
4034#endif
4029 if (checksum == (u16) EEPROM_SUM) 4035 if (checksum == (u16) EEPROM_SUM)
4030 return E1000_SUCCESS; 4036 return E1000_SUCCESS;
4031 else { 4037 else {
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 3e6679269400..8dd5fccef725 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -757,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
757 struct ibmveth_adapter *adapter = netdev_priv(dev); 757 struct ibmveth_adapter *adapter = netdev_priv(dev);
758 unsigned long set_attr, clr_attr, ret_attr; 758 unsigned long set_attr, clr_attr, ret_attr;
759 unsigned long set_attr6, clr_attr6; 759 unsigned long set_attr6, clr_attr6;
760 long ret, ret6; 760 long ret, ret4, ret6;
761 int rc1 = 0, rc2 = 0; 761 int rc1 = 0, rc2 = 0;
762 int restart = 0; 762 int restart = 0;
763 763
@@ -770,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
770 770
771 set_attr = 0; 771 set_attr = 0;
772 clr_attr = 0; 772 clr_attr = 0;
773 set_attr6 = 0;
774 clr_attr6 = 0;
773 775
774 if (data) { 776 if (data) {
775 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 777 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
@@ -784,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
784 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && 786 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
785 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && 787 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
786 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { 788 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
787 ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 789 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
788 set_attr, &ret_attr); 790 set_attr, &ret_attr);
789 791
790 if (ret != H_SUCCESS) { 792 if (ret4 != H_SUCCESS) {
791 netdev_err(dev, "unable to change IPv4 checksum " 793 netdev_err(dev, "unable to change IPv4 checksum "
792 "offload settings. %d rc=%ld\n", 794 "offload settings. %d rc=%ld\n",
793 data, ret); 795 data, ret4);
796
797 h_illan_attributes(adapter->vdev->unit_address,
798 set_attr, clr_attr, &ret_attr);
799
800 if (data == 1)
801 dev->features &= ~NETIF_F_IP_CSUM;
794 802
795 ret = h_illan_attributes(adapter->vdev->unit_address,
796 set_attr, clr_attr, &ret_attr);
797 } else { 803 } else {
798 adapter->fw_ipv4_csum_support = data; 804 adapter->fw_ipv4_csum_support = data;
799 } 805 }
@@ -804,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
804 if (ret6 != H_SUCCESS) { 810 if (ret6 != H_SUCCESS) {
805 netdev_err(dev, "unable to change IPv6 checksum " 811 netdev_err(dev, "unable to change IPv6 checksum "
806 "offload settings. %d rc=%ld\n", 812 "offload settings. %d rc=%ld\n",
807 data, ret); 813 data, ret6);
814
815 h_illan_attributes(adapter->vdev->unit_address,
816 set_attr6, clr_attr6, &ret_attr);
817
818 if (data == 1)
819 dev->features &= ~NETIF_F_IPV6_CSUM;
808 820
809 ret = h_illan_attributes(adapter->vdev->unit_address,
810 set_attr6, clr_attr6,
811 &ret_attr);
812 } else 821 } else
813 adapter->fw_ipv6_csum_support = data; 822 adapter->fw_ipv6_csum_support = data;
814 823
815 if (ret != H_SUCCESS || ret6 != H_SUCCESS) 824 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
816 adapter->rx_csum = data; 825 adapter->rx_csum = data;
817 else 826 else
818 rc1 = -EIO; 827 rc1 = -EIO;
@@ -930,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
930 union ibmveth_buf_desc descs[6]; 939 union ibmveth_buf_desc descs[6];
931 int last, i; 940 int last, i;
932 int force_bounce = 0; 941 int force_bounce = 0;
942 dma_addr_t dma_addr;
933 943
934 /* 944 /*
935 * veth handles a maximum of 6 segments including the header, so 945 * veth handles a maximum of 6 segments including the header, so
@@ -994,17 +1004,16 @@ retry_bounce:
994 } 1004 }
995 1005
996 /* Map the header */ 1006 /* Map the header */
997 descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 1007 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
998 skb_headlen(skb), 1008 skb_headlen(skb), DMA_TO_DEVICE);
999 DMA_TO_DEVICE); 1009 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1000 if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
1001 goto map_failed; 1010 goto map_failed;
1002 1011
1003 descs[0].fields.flags_len = desc_flags | skb_headlen(skb); 1012 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1013 descs[0].fields.address = dma_addr;
1004 1014
1005 /* Map the frags */ 1015 /* Map the frags */
1006 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1016 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1007 unsigned long dma_addr;
1008 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1017 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1009 1018
1010 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, 1019 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
@@ -1026,7 +1035,12 @@ retry_bounce:
1026 netdev->stats.tx_bytes += skb->len; 1035 netdev->stats.tx_bytes += skb->len;
1027 } 1036 }
1028 1037
1029 for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) 1038 dma_unmap_single(&adapter->vdev->dev,
1039 descs[0].fields.address,
1040 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1041 DMA_TO_DEVICE);
1042
1043 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1030 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 1044 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1031 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1045 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1032 DMA_TO_DEVICE); 1046 DMA_TO_DEVICE);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index 59fac77d0dbb..a09a07197eb5 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -127,8 +127,8 @@ struct pch_gbe_regs {
127 127
128/* Reset */ 128/* Reset */
129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */ 129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
130#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ 130#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
131#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ 131#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
132 132
133/* TCP/IP Accelerator Control */ 133/* TCP/IP Accelerator Control */
134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ 134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
@@ -276,6 +276,9 @@ struct pch_gbe_regs {
276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ 276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ 277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
278 278
279/* RX DMA STATUS */
280#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
281
279/* Wake On LAN Status */ 282/* Wake On LAN Status */
280#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ 283#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
281#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ 284#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
471struct pch_gbe_buffer { 474struct pch_gbe_buffer {
472 struct sk_buff *skb; 475 struct sk_buff *skb;
473 dma_addr_t dma; 476 dma_addr_t dma;
477 unsigned char *rx_buffer;
474 unsigned long time_stamp; 478 unsigned long time_stamp;
475 u16 length; 479 u16 length;
476 bool mapped; 480 bool mapped;
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
511struct pch_gbe_rx_ring { 515struct pch_gbe_rx_ring {
512 struct pch_gbe_rx_desc *desc; 516 struct pch_gbe_rx_desc *desc;
513 dma_addr_t dma; 517 dma_addr_t dma;
518 unsigned char *rx_buff_pool;
519 dma_addr_t rx_buff_pool_logic;
520 unsigned int rx_buff_pool_size;
514 unsigned int size; 521 unsigned int size;
515 unsigned int count; 522 unsigned int count;
516 unsigned int next_to_use; 523 unsigned int next_to_use;
@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
622 unsigned long rx_buffer_len; 629 unsigned long rx_buffer_len;
623 unsigned long tx_queue_len; 630 unsigned long tx_queue_len;
624 bool have_msi; 631 bool have_msi;
632 bool rx_stop_flag;
625}; 633};
626 634
627extern const char pch_driver_version[]; 635extern const char pch_driver_version[];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index eac3c5ca9731..567ff10889be 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -20,7 +20,6 @@
20 20
21#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include "pch_gbe_api.h" 22#include "pch_gbe_api.h"
23#include <linux/prefetch.h>
24 23
25#define DRV_VERSION "1.00" 24#define DRV_VERSION "1.00"
26const char pch_driver_version[] = DRV_VERSION; 25const char pch_driver_version[] = DRV_VERSION;
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;
34#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
35#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
36#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
36#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
37 37
38/* Macros for ML7223 */ 38/* Macros for ML7223 */
39#define PCI_VENDOR_ID_ROHM 0x10db 39#define PCI_VENDOR_ID_ROHM 0x10db
40#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 40#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
41 41
42/* Macros for ML7831 */
43#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
44
42#define PCH_GBE_TX_WEIGHT 64 45#define PCH_GBE_TX_WEIGHT 64
43#define PCH_GBE_RX_WEIGHT 64 46#define PCH_GBE_RX_WEIGHT 64
44#define PCH_GBE_RX_BUFFER_WRITE 16 47#define PCH_GBE_RX_BUFFER_WRITE 16
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;
52 ) 55 )
53 56
54/* Ethertype field values */ 57/* Ethertype field values */
58#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
55#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 59#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
56#define PCH_GBE_FRAME_SIZE_2048 2048 60#define PCH_GBE_FRAME_SIZE_2048 2048
57#define PCH_GBE_FRAME_SIZE_4096 4096 61#define PCH_GBE_FRAME_SIZE_4096 4096
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;
83#define PCH_GBE_INT_ENABLE_MASK ( \ 87#define PCH_GBE_INT_ENABLE_MASK ( \
84 PCH_GBE_INT_RX_DMA_CMPLT | \ 88 PCH_GBE_INT_RX_DMA_CMPLT | \
85 PCH_GBE_INT_RX_DSC_EMP | \ 89 PCH_GBE_INT_RX_DSC_EMP | \
90 PCH_GBE_INT_RX_FIFO_ERR | \
86 PCH_GBE_INT_WOL_DET | \ 91 PCH_GBE_INT_WOL_DET | \
87 PCH_GBE_INT_TX_CMPLT \ 92 PCH_GBE_INT_TX_CMPLT \
88 ) 93 )
89 94
95#define PCH_GBE_INT_DISABLE_ALL 0
90 96
91static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 97static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
92 98
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
138 if (!tmp) 144 if (!tmp)
139 pr_err("Error: busy bit is not cleared\n"); 145 pr_err("Error: busy bit is not cleared\n");
140} 146}
147
148/**
149 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
150 * @reg: Pointer of register
151 * @busy: Busy bit
152 */
153static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
154{
155 u32 tmp;
156 int ret = -1;
157 /* wait busy */
158 tmp = 20;
159 while ((ioread32(reg) & bit) && --tmp)
160 udelay(5);
161 if (!tmp)
162 pr_err("Error: busy bit is not cleared\n");
163 else
164 ret = 0;
165 return ret;
166}
167
141/** 168/**
142 * pch_gbe_mac_mar_set - Set MAC address register 169 * pch_gbe_mac_mar_set - Set MAC address register
143 * @hw: Pointer to the HW structure 170 * @hw: Pointer to the HW structure
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
189 return; 216 return;
190} 217}
191 218
219static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
220{
221 /* Read the MAC address. and store to the private data */
222 pch_gbe_mac_read_mac_addr(hw);
223 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
224 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
225 /* Setup the MAC address */
226 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
227 return;
228}
229
192/** 230/**
193 * pch_gbe_mac_init_rx_addrs - Initialize receive address's 231 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
194 * @hw: Pointer to the HW structure 232 * @hw: Pointer to the HW structure
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
671 709
672 tcpip = ioread32(&hw->reg->TCPIP_ACC); 710 tcpip = ioread32(&hw->reg->TCPIP_ACC);
673 711
674 if (netdev->features & NETIF_F_RXCSUM) { 712 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
675 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; 713 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
676 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
677 } else {
678 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
679 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
680 }
681 iowrite32(tcpip, &hw->reg->TCPIP_ACC); 714 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
682 return; 715 return;
683} 716}
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
717 iowrite32(rdba, &hw->reg->RX_DSC_BASE); 750 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
718 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); 751 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
719 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); 752 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
720
721 /* Enables Receive DMA */
722 rxdma = ioread32(&hw->reg->DMA_CTRL);
723 rxdma |= PCH_GBE_RX_DMA_EN;
724 iowrite32(rxdma, &hw->reg->DMA_CTRL);
725 /* Enables Receive */
726 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
727} 753}
728 754
729/** 755/**
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1097 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1123 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1098} 1124}
1099 1125
1126static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1127{
1128 struct pch_gbe_hw *hw = &adapter->hw;
1129 u32 rxdma;
1130 u16 value;
1131 int ret;
1132
1133 /* Disable Receive DMA */
1134 rxdma = ioread32(&hw->reg->DMA_CTRL);
1135 rxdma &= ~PCH_GBE_RX_DMA_EN;
1136 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1137 /* Wait Rx DMA BUS is IDLE */
1138 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1139 if (ret) {
1140 /* Disable Bus master */
1141 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1142 value &= ~PCI_COMMAND_MASTER;
1143 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1144 /* Stop Receive */
1145 pch_gbe_mac_reset_rx(hw);
1146 /* Enable Bus master */
1147 value |= PCI_COMMAND_MASTER;
1148 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1149 } else {
1150 /* Stop Receive */
1151 pch_gbe_mac_reset_rx(hw);
1152 }
1153}
1154
1155static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1156{
1157 u32 rxdma;
1158
1159 /* Enables Receive DMA */
1160 rxdma = ioread32(&hw->reg->DMA_CTRL);
1161 rxdma |= PCH_GBE_RX_DMA_EN;
1162 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1163 /* Enables Receive */
1164 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1165 return;
1166}
1167
1100/** 1168/**
1101 * pch_gbe_intr - Interrupt Handler 1169 * pch_gbe_intr - Interrupt Handler
1102 * @irq: Interrupt number 1170 * @irq: Interrupt number
@@ -1123,7 +1191,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1123 if (int_st & PCH_GBE_INT_RX_FRAME_ERR) 1191 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1124 adapter->stats.intr_rx_frame_err_count++; 1192 adapter->stats.intr_rx_frame_err_count++;
1125 if (int_st & PCH_GBE_INT_RX_FIFO_ERR) 1193 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1126 adapter->stats.intr_rx_fifo_err_count++; 1194 if (!adapter->rx_stop_flag) {
1195 adapter->stats.intr_rx_fifo_err_count++;
1196 pr_debug("Rx fifo over run\n");
1197 adapter->rx_stop_flag = true;
1198 int_en = ioread32(&hw->reg->INT_EN);
1199 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1200 &hw->reg->INT_EN);
1201 pch_gbe_stop_receive(adapter);
1202 }
1127 if (int_st & PCH_GBE_INT_RX_DMA_ERR) 1203 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1128 adapter->stats.intr_rx_dma_err_count++; 1204 adapter->stats.intr_rx_dma_err_count++;
1129 if (int_st & PCH_GBE_INT_TX_FIFO_ERR) 1205 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
@@ -1135,7 +1211,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1135 /* When Rx descriptor is empty */ 1211 /* When Rx descriptor is empty */
1136 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { 1212 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1137 adapter->stats.intr_rx_dsc_empty_count++; 1213 adapter->stats.intr_rx_dsc_empty_count++;
1138 pr_err("Rx descriptor is empty\n"); 1214 pr_debug("Rx descriptor is empty\n");
1139 int_en = ioread32(&hw->reg->INT_EN); 1215 int_en = ioread32(&hw->reg->INT_EN);
1140 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); 1216 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1141 if (hw->mac.tx_fc_enable) { 1217 if (hw->mac.tx_fc_enable) {
@@ -1185,29 +1261,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1185 unsigned int i; 1261 unsigned int i;
1186 unsigned int bufsz; 1262 unsigned int bufsz;
1187 1263
1188 bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; 1264 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1189 i = rx_ring->next_to_use; 1265 i = rx_ring->next_to_use;
1190 1266
1191 while ((cleaned_count--)) { 1267 while ((cleaned_count--)) {
1192 buffer_info = &rx_ring->buffer_info[i]; 1268 buffer_info = &rx_ring->buffer_info[i];
1193 skb = buffer_info->skb; 1269 skb = netdev_alloc_skb(netdev, bufsz);
1194 if (skb) { 1270 if (unlikely(!skb)) {
1195 skb_trim(skb, 0); 1271 /* Better luck next round */
1196 } else { 1272 adapter->stats.rx_alloc_buff_failed++;
1197 skb = netdev_alloc_skb(netdev, bufsz); 1273 break;
1198 if (unlikely(!skb)) {
1199 /* Better luck next round */
1200 adapter->stats.rx_alloc_buff_failed++;
1201 break;
1202 }
1203 /* 64byte align */
1204 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1205
1206 buffer_info->skb = skb;
1207 buffer_info->length = adapter->rx_buffer_len;
1208 } 1274 }
1275 /* align */
1276 skb_reserve(skb, NET_IP_ALIGN);
1277 buffer_info->skb = skb;
1278
1209 buffer_info->dma = dma_map_single(&pdev->dev, 1279 buffer_info->dma = dma_map_single(&pdev->dev,
1210 skb->data, 1280 buffer_info->rx_buffer,
1211 buffer_info->length, 1281 buffer_info->length,
1212 DMA_FROM_DEVICE); 1282 DMA_FROM_DEVICE);
1213 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1283 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
@@ -1240,6 +1310,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1240 return; 1310 return;
1241} 1311}
1242 1312
1313static int
1314pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1315 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1316{
1317 struct pci_dev *pdev = adapter->pdev;
1318 struct pch_gbe_buffer *buffer_info;
1319 unsigned int i;
1320 unsigned int bufsz;
1321 unsigned int size;
1322
1323 bufsz = adapter->rx_buffer_len;
1324
1325 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1326 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1327 &rx_ring->rx_buff_pool_logic,
1328 GFP_KERNEL);
1329 if (!rx_ring->rx_buff_pool) {
1330 pr_err("Unable to allocate memory for the receive poll buffer\n");
1331 return -ENOMEM;
1332 }
1333 memset(rx_ring->rx_buff_pool, 0, size);
1334 rx_ring->rx_buff_pool_size = size;
1335 for (i = 0; i < rx_ring->count; i++) {
1336 buffer_info = &rx_ring->buffer_info[i];
1337 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1338 buffer_info->length = bufsz;
1339 }
1340 return 0;
1341}
1342
1243/** 1343/**
1244 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers 1344 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1245 * @adapter: Board private structure 1345 * @adapter: Board private structure
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1380 unsigned int i; 1480 unsigned int i;
1381 unsigned int cleaned_count = 0; 1481 unsigned int cleaned_count = 0;
1382 bool cleaned = false; 1482 bool cleaned = false;
1383 struct sk_buff *skb, *new_skb; 1483 struct sk_buff *skb;
1384 u8 dma_status; 1484 u8 dma_status;
1385 u16 gbec_status; 1485 u16 gbec_status;
1386 u32 tcp_ip_status; 1486 u32 tcp_ip_status;
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1401 rx_desc->gbec_status = DSC_INIT16; 1501 rx_desc->gbec_status = DSC_INIT16;
1402 buffer_info = &rx_ring->buffer_info[i]; 1502 buffer_info = &rx_ring->buffer_info[i];
1403 skb = buffer_info->skb; 1503 skb = buffer_info->skb;
1504 buffer_info->skb = NULL;
1404 1505
1405 /* unmap dma */ 1506 /* unmap dma */
1406 dma_unmap_single(&pdev->dev, buffer_info->dma, 1507 dma_unmap_single(&pdev->dev, buffer_info->dma,
1407 buffer_info->length, DMA_FROM_DEVICE); 1508 buffer_info->length, DMA_FROM_DEVICE);
1408 buffer_info->mapped = false; 1509 buffer_info->mapped = false;
1409 /* Prefetch the packet */
1410 prefetch(skb->data);
1411 1510
1412 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " 1511 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1413 "TCP:0x%08x] BufInf = 0x%p\n", 1512 "TCP:0x%08x] BufInf = 0x%p\n",
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1427 pr_err("Receive CRC Error\n"); 1526 pr_err("Receive CRC Error\n");
1428 } else { 1527 } else {
1429 /* get receive length */ 1528 /* get receive length */
1430 /* length convert[-3] */ 1529 /* length convert[-3], length includes FCS length */
1431 length = (rx_desc->rx_words_eob) - 3; 1530 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1432 1531 if (rx_desc->rx_words_eob & 0x02)
1433 /* Decide the data conversion method */ 1532 length = length - 4;
1434 if (!(netdev->features & NETIF_F_RXCSUM)) { 1533 /*
1435 /* [Header:14][payload] */ 1534 * buffer_info->rx_buffer: [Header:14][payload]
1436 if (NET_IP_ALIGN) { 1535 * skb->data: [Reserve:2][Header:14][payload]
1437 /* Because alignment differs, 1536 */
1438 * the new_skb is newly allocated, 1537 memcpy(skb->data, buffer_info->rx_buffer, length);
1439 * and data is copied to new_skb.*/ 1538
1440 new_skb = netdev_alloc_skb(netdev,
1441 length + NET_IP_ALIGN);
1442 if (!new_skb) {
1443 /* dorrop error */
1444 pr_err("New skb allocation "
1445 "Error\n");
1446 goto dorrop;
1447 }
1448 skb_reserve(new_skb, NET_IP_ALIGN);
1449 memcpy(new_skb->data, skb->data,
1450 length);
1451 skb = new_skb;
1452 } else {
1453 /* DMA buffer is used as SKB as it is.*/
1454 buffer_info->skb = NULL;
1455 }
1456 } else {
1457 /* [Header:14][padding:2][payload] */
1458 /* The length includes padding length */
1459 length = length - PCH_GBE_DMA_PADDING;
1460 if ((length < copybreak) ||
1461 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1462 /* Because alignment differs,
1463 * the new_skb is newly allocated,
1464 * and data is copied to new_skb.
1465 * Padding data is deleted
1466 * at the time of a copy.*/
1467 new_skb = netdev_alloc_skb(netdev,
1468 length + NET_IP_ALIGN);
1469 if (!new_skb) {
1470 /* dorrop error */
1471 pr_err("New skb allocation "
1472 "Error\n");
1473 goto dorrop;
1474 }
1475 skb_reserve(new_skb, NET_IP_ALIGN);
1476 memcpy(new_skb->data, skb->data,
1477 ETH_HLEN);
1478 memcpy(&new_skb->data[ETH_HLEN],
1479 &skb->data[ETH_HLEN +
1480 PCH_GBE_DMA_PADDING],
1481 length - ETH_HLEN);
1482 skb = new_skb;
1483 } else {
1484 /* Padding data is deleted
1485 * by moving header data.*/
1486 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1487 &skb->data[0], ETH_HLEN);
1488 skb_reserve(skb, NET_IP_ALIGN);
1489 buffer_info->skb = NULL;
1490 }
1491 }
1492 /* The length includes FCS length */
1493 length = length - ETH_FCS_LEN;
1494 /* update status of driver */ 1539 /* update status of driver */
1495 adapter->stats.rx_bytes += length; 1540 adapter->stats.rx_bytes += length;
1496 adapter->stats.rx_packets++; 1541 adapter->stats.rx_packets++;
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1509 pr_debug("Receive skb->ip_summed: %d length: %d\n", 1554 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1510 skb->ip_summed, length); 1555 skb->ip_summed, length);
1511 } 1556 }
1512dorrop:
1513 /* return some buffers to hardware, one at a time is too slow */ 1557 /* return some buffers to hardware, one at a time is too slow */
1514 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { 1558 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1515 pch_gbe_alloc_rx_buffers(adapter, rx_ring, 1559 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1714 pr_err("Error: can't bring device up\n"); 1758 pr_err("Error: can't bring device up\n");
1715 return err; 1759 return err;
1716 } 1760 }
1761 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1762 if (err) {
1763 pr_err("Error: can't bring device up\n");
1764 return err;
1765 }
1717 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1766 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1718 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1767 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1719 adapter->tx_queue_len = netdev->tx_queue_len; 1768 adapter->tx_queue_len = netdev->tx_queue_len;
1769 pch_gbe_start_receive(&adapter->hw);
1720 1770
1721 mod_timer(&adapter->watchdog_timer, jiffies); 1771 mod_timer(&adapter->watchdog_timer, jiffies);
1722 1772
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1734void pch_gbe_down(struct pch_gbe_adapter *adapter) 1784void pch_gbe_down(struct pch_gbe_adapter *adapter)
1735{ 1785{
1736 struct net_device *netdev = adapter->netdev; 1786 struct net_device *netdev = adapter->netdev;
1787 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1737 1788
1738 /* signal that we're down so the interrupt handler does not 1789 /* signal that we're down so the interrupt handler does not
1739 * reschedule our watchdog timer */ 1790 * reschedule our watchdog timer */
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
1752 pch_gbe_reset(adapter); 1803 pch_gbe_reset(adapter);
1753 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); 1804 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1754 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); 1805 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1806
1807 pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
1808 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1809 rx_ring->rx_buff_pool_logic = 0;
1810 rx_ring->rx_buff_pool_size = 0;
1811 rx_ring->rx_buff_pool = NULL;
1755} 1812}
1756 1813
1757/** 1814/**
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2004{ 2061{
2005 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2062 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2006 int max_frame; 2063 int max_frame;
2064 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2065 int err;
2007 2066
2008 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2067 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2009 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 2068 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2018 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) 2077 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2019 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; 2078 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2020 else 2079 else
2021 adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; 2080 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2022 netdev->mtu = new_mtu;
2023 adapter->hw.mac.max_frame_size = max_frame;
2024 2081
2025 if (netif_running(netdev)) 2082 if (netif_running(netdev)) {
2026 pch_gbe_reinit_locked(adapter); 2083 pch_gbe_down(adapter);
2027 else 2084 err = pch_gbe_up(adapter);
2085 if (err) {
2086 adapter->rx_buffer_len = old_rx_buffer_len;
2087 pch_gbe_up(adapter);
2088 return -ENOMEM;
2089 } else {
2090 netdev->mtu = new_mtu;
2091 adapter->hw.mac.max_frame_size = max_frame;
2092 }
2093 } else {
2028 pch_gbe_reset(adapter); 2094 pch_gbe_reset(adapter);
2095 netdev->mtu = new_mtu;
2096 adapter->hw.mac.max_frame_size = max_frame;
2097 }
2029 2098
2030 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", 2099 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2031 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, 2100 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
@@ -2103,6 +2172,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2103 int work_done = 0; 2172 int work_done = 0;
2104 bool poll_end_flag = false; 2173 bool poll_end_flag = false;
2105 bool cleaned = false; 2174 bool cleaned = false;
2175 u32 int_en;
2106 2176
2107 pr_debug("budget : %d\n", budget); 2177 pr_debug("budget : %d\n", budget);
2108 2178
@@ -2110,8 +2180,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2110 if (!netif_carrier_ok(netdev)) { 2180 if (!netif_carrier_ok(netdev)) {
2111 poll_end_flag = true; 2181 poll_end_flag = true;
2112 } else { 2182 } else {
2113 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2114 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2183 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2184 if (adapter->rx_stop_flag) {
2185 adapter->rx_stop_flag = false;
2186 pch_gbe_start_receive(&adapter->hw);
2187 int_en = ioread32(&adapter->hw.reg->INT_EN);
2188 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
2189 &adapter->hw.reg->INT_EN);
2190 }
2191 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2115 2192
2116 if (cleaned) 2193 if (cleaned)
2117 work_done = budget; 2194 work_done = budget;
@@ -2452,6 +2529,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2452 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2529 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2453 .class_mask = (0xFFFF00) 2530 .class_mask = (0xFFFF00)
2454 }, 2531 },
2532 {.vendor = PCI_VENDOR_ID_ROHM,
2533 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2534 .subvendor = PCI_ANY_ID,
2535 .subdevice = PCI_ANY_ID,
2536 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2537 .class_mask = (0xFFFF00)
2538 },
2455 /* required last entry */ 2539 /* required last entry */
2456 {0} 2540 {0}
2457}; 2541};
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index faca764aa21b..b59abc706d93 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)
1050{ 1050{
1051 struct pci_dev *pci_dev = efx->pci_dev; 1051 struct pci_dev *pci_dev = efx->pci_dev;
1052 dma_addr_t dma_mask = efx->type->max_dma_mask; 1052 dma_addr_t dma_mask = efx->type->max_dma_mask;
1053 bool use_wc;
1054 int rc; 1053 int rc;
1055 1054
1056 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1055 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)
1101 rc = -EIO; 1100 rc = -EIO;
1102 goto fail3; 1101 goto fail3;
1103 } 1102 }
1104 1103 efx->membase = ioremap_nocache(efx->membase_phys,
1105 /* bug22643: If SR-IOV is enabled then tx push over a write combined 1104 efx->type->mem_map_size);
1106 * mapping is unsafe. We need to disable write combining in this case.
1107 * MSI is unsupported when SR-IOV is enabled, and the firmware will
1108 * have removed the MSI capability. So write combining is safe if
1109 * there is an MSI capability.
1110 */
1111 use_wc = (!EFX_WORKAROUND_22643(efx) ||
1112 pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
1113 if (use_wc)
1114 efx->membase = ioremap_wc(efx->membase_phys,
1115 efx->type->mem_map_size);
1116 else
1117 efx->membase = ioremap_nocache(efx->membase_phys,
1118 efx->type->mem_map_size);
1119 if (!efx->membase) { 1105 if (!efx->membase) {
1120 netif_err(efx, probe, efx->net_dev, 1106 netif_err(efx, probe, efx->net_dev,
1121 "could not map memory BAR at %llx+%x\n", 1107 "could not map memory BAR at %llx+%x\n",
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index cc978803d484..751d1ec112cc 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
103 _efx_writed(efx, value->u32[2], reg + 8); 103 _efx_writed(efx, value->u32[2], reg + 8);
104 _efx_writed(efx, value->u32[3], reg + 12); 104 _efx_writed(efx, value->u32[3], reg + 12);
105#endif 105#endif
106 wmb();
107 mmiowb(); 106 mmiowb();
108 spin_unlock_irqrestore(&efx->biu_lock, flags); 107 spin_unlock_irqrestore(&efx->biu_lock, flags);
109} 108}
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
126 __raw_writel((__force u32)value->u32[0], membase + addr); 125 __raw_writel((__force u32)value->u32[0], membase + addr);
127 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 126 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
128#endif 127#endif
129 wmb();
130 mmiowb(); 128 mmiowb();
131 spin_unlock_irqrestore(&efx->biu_lock, flags); 129 spin_unlock_irqrestore(&efx->biu_lock, flags);
132} 130}
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
141 139
142 /* No lock required */ 140 /* No lock required */
143 _efx_writed(efx, value->u32[0], reg); 141 _efx_writed(efx, value->u32[0], reg);
144 wmb();
145} 142}
146 143
147/* Read a 128-bit CSR, locking as appropriate. */ 144/* Read a 128-bit CSR, locking as appropriate. */
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
152 149
153 spin_lock_irqsave(&efx->biu_lock, flags); 150 spin_lock_irqsave(&efx->biu_lock, flags);
154 value->u32[0] = _efx_readd(efx, reg + 0); 151 value->u32[0] = _efx_readd(efx, reg + 0);
155 rmb();
156 value->u32[1] = _efx_readd(efx, reg + 4); 152 value->u32[1] = _efx_readd(efx, reg + 4);
157 value->u32[2] = _efx_readd(efx, reg + 8); 153 value->u32[2] = _efx_readd(efx, reg + 8);
158 value->u32[3] = _efx_readd(efx, reg + 12); 154 value->u32[3] = _efx_readd(efx, reg + 12);
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
175 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 171 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
176#else 172#else
177 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 173 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
178 rmb();
179 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 174 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
180#endif 175#endif
181 spin_unlock_irqrestore(&efx->biu_lock, flags); 176 spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
249 _efx_writed(efx, value->u32[2], reg + 8); 244 _efx_writed(efx, value->u32[2], reg + 8);
250 _efx_writed(efx, value->u32[3], reg + 12); 245 _efx_writed(efx, value->u32[3], reg + 12);
251#endif 246#endif
252 wmb();
253} 247}
254#define efx_writeo_page(efx, value, reg, page) \ 248#define efx_writeo_page(efx, value, reg, page) \
255 _efx_writeo_page(efx, value, \ 249 _efx_writeo_page(efx, value, \
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 3dd45ed61f0a..81a425397468 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
50 return &nic_data->mcdi; 50 return &nic_data->mcdi;
51} 51}
52 52
53static inline void
54efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
55{
56 struct siena_nic_data *nic_data = efx->nic_data;
57 value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
58}
59
60static inline void
61efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
62{
63 struct siena_nic_data *nic_data = efx->nic_data;
64 __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
65}
66
67void efx_mcdi_init(struct efx_nic *efx) 53void efx_mcdi_init(struct efx_nic *efx)
68{ 54{
69 struct efx_mcdi_iface *mcdi; 55 struct efx_mcdi_iface *mcdi;
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
84 const u8 *inbuf, size_t inlen) 70 const u8 *inbuf, size_t inlen)
85{ 71{
86 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
87 unsigned pdu = MCDI_PDU(efx); 73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
88 unsigned doorbell = MCDI_DOORBELL(efx); 74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
89 unsigned int i; 75 unsigned int i;
90 efx_dword_t hdr; 76 efx_dword_t hdr;
91 u32 xflags, seqno; 77 u32 xflags, seqno;
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
106 MCDI_HEADER_SEQ, seqno, 92 MCDI_HEADER_SEQ, seqno,
107 MCDI_HEADER_XFLAGS, xflags); 93 MCDI_HEADER_XFLAGS, xflags);
108 94
109 efx_mcdi_writed(efx, &hdr, pdu); 95 efx_writed(efx, &hdr, pdu);
110 96
111 for (i = 0; i < inlen; i += 4) 97 for (i = 0; i < inlen; i += 4)
112 efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), 98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
113 pdu + 4 + i); 99
100 /* Ensure the payload is written out before the header */
101 wmb();
114 102
115 /* ring the doorbell with a distinctive value */ 103 /* ring the doorbell with a distinctive value */
116 EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); 104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
117 efx_mcdi_writed(efx, &hdr, doorbell);
118} 105}
119 106
120static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
121{ 108{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned int pdu = MCDI_PDU(efx); 110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
124 int i; 111 int i;
125 112
126 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
127 BUG_ON(outlen & 3 || outlen >= 0x100); 114 BUG_ON(outlen & 3 || outlen >= 0x100);
128 115
129 for (i = 0; i < outlen; i += 4) 116 for (i = 0; i < outlen; i += 4)
130 efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); 117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
131} 118}
132 119
133static int efx_mcdi_poll(struct efx_nic *efx) 120static int efx_mcdi_poll(struct efx_nic *efx)
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
135 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
136 unsigned int time, finish; 123 unsigned int time, finish;
137 unsigned int respseq, respcmd, error; 124 unsigned int respseq, respcmd, error;
138 unsigned int pdu = MCDI_PDU(efx); 125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
139 unsigned int rc, spins; 126 unsigned int rc, spins;
140 efx_dword_t reg; 127 efx_dword_t reg;
141 128
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
161 148
162 time = get_seconds(); 149 time = get_seconds();
163 150
164 efx_mcdi_readd(efx, &reg, pdu); 151 rmb();
152 efx_readd(efx, &reg, pdu);
165 153
166 /* All 1's indicates that shared memory is in reset (and is 154 /* All 1's indicates that shared memory is in reset (and is
167 * not a valid header). Wait for it to come out reset before 155 * not a valid header). Wait for it to come out reset before
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
188 respseq, mcdi->seqno); 176 respseq, mcdi->seqno);
189 rc = EIO; 177 rc = EIO;
190 } else if (error) { 178 } else if (error) {
191 efx_mcdi_readd(efx, &reg, pdu + 4); 179 efx_readd(efx, &reg, pdu + 4);
192 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 180 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
193#define TRANSLATE_ERROR(name) \ 181#define TRANSLATE_ERROR(name) \
194 case MC_CMD_ERR_ ## name: \ 182 case MC_CMD_ERR_ ## name: \
@@ -222,21 +210,21 @@ out:
222/* Test and clear MC-rebooted flag for this port/function */ 210/* Test and clear MC-rebooted flag for this port/function */
223int efx_mcdi_poll_reboot(struct efx_nic *efx) 211int efx_mcdi_poll_reboot(struct efx_nic *efx)
224{ 212{
225 unsigned int addr = MCDI_REBOOT_FLAG(efx); 213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
226 efx_dword_t reg; 214 efx_dword_t reg;
227 uint32_t value; 215 uint32_t value;
228 216
229 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 217 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
230 return false; 218 return false;
231 219
232 efx_mcdi_readd(efx, &reg, addr); 220 efx_readd(efx, &reg, addr);
233 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 221 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
234 222
235 if (value == 0) 223 if (value == 0)
236 return 0; 224 return 0;
237 225
238 EFX_ZERO_DWORD(reg); 226 EFX_ZERO_DWORD(reg);
239 efx_mcdi_writed(efx, &reg, addr); 227 efx_writed(efx, &reg, addr);
240 228
241 if (value == MC_STATUS_DWORD_ASSERT) 229 if (value == MC_STATUS_DWORD_ASSERT)
242 return -EINTR; 230 return -EINTR;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index bafa23a6874c..3edfbaf5f022 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1936 1936
1937 size = min_t(size_t, table->step, 16); 1937 size = min_t(size_t, table->step, 16);
1938 1938
1939 if (table->offset >= efx->type->mem_map_size) {
1940 /* No longer mapped; return dummy data */
1941 memcpy(buf, "\xde\xc0\xad\xde", 4);
1942 buf += table->rows * size;
1943 continue;
1944 }
1945
1946 for (i = 0; i < table->rows; i++) { 1939 for (i = 0; i < table->rows; i++) {
1947 switch (table->step) { 1940 switch (table->step) {
1948 case 4: /* 32-bit register or SRAM */ 1941 case 4: /* 32-bit register or SRAM */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 4bd1f2839dfe..7443f99c977f 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
143/** 143/**
144 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
145 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
146 * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
147 * @wol_filter_id: Wake-on-LAN packet filter id 146 * @wol_filter_id: Wake-on-LAN packet filter id
148 */ 147 */
149struct siena_nic_data { 148struct siena_nic_data {
150 struct efx_mcdi_iface mcdi; 149 struct efx_mcdi_iface mcdi;
151 void __iomem *mcdi_smem;
152 int wol_filter_id; 150 int wol_filter_id;
153}; 151};
154 152
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 5735e84c69de..2c3bd93fab54 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx)
250 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 250 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
251 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 251 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
252 252
253 /* Initialise MCDI */
254 nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
255 FR_CZ_MC_TREG_SMEM,
256 FR_CZ_MC_TREG_SMEM_STEP *
257 FR_CZ_MC_TREG_SMEM_ROWS);
258 if (!nic_data->mcdi_smem) {
259 netif_err(efx, probe, efx->net_dev,
260 "could not map MCDI at %llx+%x\n",
261 (unsigned long long)efx->membase_phys +
262 FR_CZ_MC_TREG_SMEM,
263 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
264 rc = -ENOMEM;
265 goto fail1;
266 }
267 efx_mcdi_init(efx); 253 efx_mcdi_init(efx);
268 254
269 /* Recover from a failed assertion before probing */ 255 /* Recover from a failed assertion before probing */
270 rc = efx_mcdi_handle_assertion(efx); 256 rc = efx_mcdi_handle_assertion(efx);
271 if (rc) 257 if (rc)
272 goto fail2; 258 goto fail1;
273 259
274 /* Let the BMC know that the driver is now in charge of link and 260 /* Let the BMC know that the driver is now in charge of link and
275 * filter settings. We must do this before we reset the NIC */ 261 * filter settings. We must do this before we reset the NIC */
@@ -324,7 +310,6 @@ fail4:
324fail3: 310fail3:
325 efx_mcdi_drv_attach(efx, false, NULL); 311 efx_mcdi_drv_attach(efx, false, NULL);
326fail2: 312fail2:
327 iounmap(nic_data->mcdi_smem);
328fail1: 313fail1:
329 kfree(efx->nic_data); 314 kfree(efx->nic_data);
330 return rc; 315 return rc;
@@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx)
404 389
405static void siena_remove_nic(struct efx_nic *efx) 390static void siena_remove_nic(struct efx_nic *efx)
406{ 391{
407 struct siena_nic_data *nic_data = efx->nic_data;
408
409 efx_nic_free_buffer(efx, &efx->irq_status); 392 efx_nic_free_buffer(efx, &efx->irq_status);
410 393
411 siena_reset_hw(efx, RESET_TYPE_ALL); 394 siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx)
415 efx_mcdi_drv_attach(efx, false, NULL); 398 efx_mcdi_drv_attach(efx, false, NULL);
416 399
417 /* Tear down the private nic state */ 400 /* Tear down the private nic state */
418 iounmap(nic_data->mcdi_smem); 401 kfree(efx->nic_data);
419 kfree(nic_data);
420 efx->nic_data = NULL; 402 efx->nic_data = NULL;
421} 403}
422 404
@@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = {
656 .default_mac_ops = &efx_mcdi_mac_operations, 638 .default_mac_ops = &efx_mcdi_mac_operations,
657 639
658 .revision = EFX_REV_SIENA_A0, 640 .revision = EFX_REV_SIENA_A0,
659 .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ 641 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
642 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
660 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 643 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
661 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 644 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
662 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 645 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 99ff11400cef..e4dd3a7f304b 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -38,8 +38,6 @@
38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
39/* Legacy interrupt storm when interrupt fifo fills */ 39/* Legacy interrupt storm when interrupt fifo fills */
40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
41/* Write combining and sriov=enabled are incompatible */
42#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
43 41
44/* Spurious parity errors in TSORT buffers */ 42/* Spurious parity errors in TSORT buffers */
45#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 15772b1b6a91..13c1f044b40d 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
59#define USB_PRODUCT_IPHONE_3G 0x1292 59#define USB_PRODUCT_IPHONE_3G 0x1292
60#define USB_PRODUCT_IPHONE_3GS 0x1294 60#define USB_PRODUCT_IPHONE_3GS 0x1294
61#define USB_PRODUCT_IPHONE_4 0x1297 61#define USB_PRODUCT_IPHONE_4 0x1297
62#define USB_PRODUCT_IPHONE_4_VZW 0x129c
62 63
63#define IPHETH_USBINTF_CLASS 255 64#define IPHETH_USBINTF_CLASS 255
64#define IPHETH_USBINTF_SUBCLASS 253 65#define IPHETH_USBINTF_SUBCLASS 253
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {
98 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, 99 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
99 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 100 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
100 IPHETH_USBINTF_PROTO) }, 101 IPHETH_USBINTF_PROTO) },
102 { USB_DEVICE_AND_INTERFACE_INFO(
103 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
104 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
105 IPHETH_USBINTF_PROTO) },
101 { } 106 { }
102}; 107};
103MODULE_DEVICE_TABLE(usb, ipheth_table); 108MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 2d4c0910295b..2d394af82171 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
41 case ADC_DC_CAL: 41 case ADC_DC_CAL:
42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ 42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
43 if (!IS_CHAN_B(chan) && 43 if (!IS_CHAN_B(chan) &&
44 !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) 44 !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
45 IS_CHAN_HT20(chan)))
45 supported = true; 46 supported = true;
46 break; 47 break;
47 } 48 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1baca8e4715d..fcafec0605f4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
671 REG_WRITE_ARRAY(&ah->iniModesAdditional, 671 REG_WRITE_ARRAY(&ah->iniModesAdditional,
672 modesIndex, regWrites); 672 modesIndex, regWrites);
673 673
674 if (AR_SREV_9300(ah)) 674 if (AR_SREV_9330(ah))
675 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); 675 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
676 676
677 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) 677 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6530694a59ae..722967b86cf1 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2303,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2303 mutex_lock(&sc->mutex); 2303 mutex_lock(&sc->mutex);
2304 cancel_delayed_work_sync(&sc->tx_complete_work); 2304 cancel_delayed_work_sync(&sc->tx_complete_work);
2305 2305
2306 if (ah->ah_flags & AH_UNPLUGGED) {
2307 ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
2308 mutex_unlock(&sc->mutex);
2309 return;
2310 }
2311
2306 if (sc->sc_flags & SC_OP_INVALID) { 2312 if (sc->sc_flags & SC_OP_INVALID) {
2307 ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); 2313 ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
2308 mutex_unlock(&sc->mutex); 2314 mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 977bd2477c6a..164bcae821f8 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
822 822
823 out: 823 out:
824 824
825 rs_sta->last_txrate_idx = index; 825 if (sband->band == IEEE80211_BAND_5GHZ) {
826 if (sband->band == IEEE80211_BAND_5GHZ) 826 if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
827 info->control.rates[0].idx = rs_sta->last_txrate_idx - 827 index = IWL_FIRST_OFDM_RATE;
828 IWL_FIRST_OFDM_RATE; 828 rs_sta->last_txrate_idx = index;
829 else 829 info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
830 } else {
831 rs_sta->last_txrate_idx = index;
830 info->control.rates[0].idx = rs_sta->last_txrate_idx; 832 info->control.rates[0].idx = rs_sta->last_txrate_idx;
833 }
831 834
832 IWL_DEBUG_RATE(priv, "leave: %d\n", index); 835 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
833} 836}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index a895a099d086..56211006a182 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
167 167
168 memset(&cmd, 0, sizeof(cmd)); 168 memset(&cmd, 0, sizeof(cmd));
169 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 169 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
170 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); 170 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
171 if (!(cmd.radio_sensor_offset)) 171 if (!(cmd.radio_sensor_offset))
172 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; 172 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
173 173
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index a6b2b1db0b1d..222d410c586e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
771 cmd = txq->cmd[cmd_index]; 771 cmd = txq->cmd[cmd_index];
772 meta = &txq->meta[cmd_index]; 772 meta = &txq->meta[cmd_index];
773 773
774 txq->time_stamp = jiffies;
775
774 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 776 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
775 777
776 /* Input error checking is done when commands are added to queue. */ 778 /* Input error checking is done when commands are added to queue. */
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 1bdc1aa305c0..04c4e9eb6ee6 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
610 610
611 mac->link_state = MAC80211_NOLINK; 611 mac->link_state = MAC80211_NOLINK;
612 memset(mac->bssid, 0, 6); 612 memset(mac->bssid, 0, 6);
613
614 /* reset sec info */
615 rtl_cam_reset_sec_info(hw);
616
617 rtl_cam_reset_all_entry(hw);
613 mac->vendor = PEER_UNKNOWN; 618 mac->vendor = PEER_UNKNOWN;
614 619
615 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 620 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1063 *or clear all entry here. 1068 *or clear all entry here.
1064 */ 1069 */
1065 rtl_cam_delete_one_entry(hw, mac_addr, key_idx); 1070 rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
1071
1072 rtl_cam_reset_sec_info(hw);
1073
1066 break; 1074 break;
1067 default: 1075 default:
1068 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1076 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 906e7aa55bc3..3e52a5496224 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
549 (tcb_desc->rts_use_shortpreamble ? 1 : 0) 549 (tcb_desc->rts_use_shortpreamble ? 1 : 0)
550 : (tcb_desc->rts_use_shortgi ? 1 : 0))); 550 : (tcb_desc->rts_use_shortgi ? 1 : 0)));
551 if (mac->bw_40) { 551 if (mac->bw_40) {
552 if (tcb_desc->packet_bw) { 552 if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
553 SET_TX_DESC_DATA_BW(txdesc, 1); 553 SET_TX_DESC_DATA_BW(txdesc, 1);
554 SET_TX_DESC_DATA_SC(txdesc, 3); 554 SET_TX_DESC_DATA_SC(txdesc, 3);
555 } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
556 SET_TX_DESC_DATA_BW(txdesc, 1);
557 SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
555 } else { 558 } else {
556 SET_TX_DESC_DATA_BW(txdesc, 0); 559 SET_TX_DESC_DATA_BW(txdesc, 0);
557 if (rate_flag & IEEE80211_TX_RC_DUP_DATA) 560 SET_TX_DESC_DATA_SC(txdesc, 0);
558 SET_TX_DESC_DATA_SC(txdesc, 561 }
559 mac->cur_40_prime_sc);
560 }
561 } else { 562 } else {
562 SET_TX_DESC_DATA_BW(txdesc, 0); 563 SET_TX_DESC_DATA_BW(txdesc, 0);
563 SET_TX_DESC_DATA_SC(txdesc, 0); 564 SET_TX_DESC_DATA_SC(txdesc, 0);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7b996ed86d5b..8bd383caa363 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -524,6 +524,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
524extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 524extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
525 525
526extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 526extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
527extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
527extern struct sk_buff *skb_clone(struct sk_buff *skb, 528extern struct sk_buff *skb_clone(struct sk_buff *skb,
528 gfp_t priority); 529 gfp_t priority);
529extern struct sk_buff *skb_copy(const struct sk_buff *skb, 530extern struct sk_buff *skb_copy(const struct sk_buff *skb,
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 12b2b18e50c1..e16557a357e5 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -231,6 +231,8 @@ enum
231 LINUX_MIB_TCPDEFERACCEPTDROP, 231 LINUX_MIB_TCPDEFERACCEPTDROP,
232 LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ 232 LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
233 LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ 233 LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */
234 LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */
235 LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
234 __LINUX_MIB_MAX 236 __LINUX_MIB_MAX
235}; 237};
236 238
diff --git a/include/net/flow.h b/include/net/flow.h
index 78113daadd63..a09447749e2d 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -7,6 +7,7 @@
7#ifndef _NET_FLOW_H 7#ifndef _NET_FLOW_H
8#define _NET_FLOW_H 8#define _NET_FLOW_H
9 9
10#include <linux/socket.h>
10#include <linux/in6.h> 11#include <linux/in6.h>
11#include <linux/atomic.h> 12#include <linux/atomic.h>
12 13
@@ -68,7 +69,7 @@ struct flowi4 {
68#define fl4_ipsec_spi uli.spi 69#define fl4_ipsec_spi uli.spi
69#define fl4_mh_type uli.mht.type 70#define fl4_mh_type uli.mht.type
70#define fl4_gre_key uli.gre_key 71#define fl4_gre_key uli.gre_key
71}; 72} __attribute__((__aligned__(BITS_PER_LONG/8)));
72 73
73static inline void flowi4_init_output(struct flowi4 *fl4, int oif, 74static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
74 __u32 mark, __u8 tos, __u8 scope, 75 __u32 mark, __u8 tos, __u8 scope,
@@ -112,7 +113,7 @@ struct flowi6 {
112#define fl6_ipsec_spi uli.spi 113#define fl6_ipsec_spi uli.spi
113#define fl6_mh_type uli.mht.type 114#define fl6_mh_type uli.mht.type
114#define fl6_gre_key uli.gre_key 115#define fl6_gre_key uli.gre_key
115}; 116} __attribute__((__aligned__(BITS_PER_LONG/8)));
116 117
117struct flowidn { 118struct flowidn {
118 struct flowi_common __fl_common; 119 struct flowi_common __fl_common;
@@ -127,7 +128,7 @@ struct flowidn {
127 union flowi_uli uli; 128 union flowi_uli uli;
128#define fld_sport uli.ports.sport 129#define fld_sport uli.ports.sport
129#define fld_dport uli.ports.dport 130#define fld_dport uli.ports.dport
130}; 131} __attribute__((__aligned__(BITS_PER_LONG/8)));
131 132
132struct flowi { 133struct flowi {
133 union { 134 union {
@@ -161,6 +162,24 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
161 return container_of(fldn, struct flowi, u.dn); 162 return container_of(fldn, struct flowi, u.dn);
162} 163}
163 164
165typedef unsigned long flow_compare_t;
166
167static inline size_t flow_key_size(u16 family)
168{
169 switch (family) {
170 case AF_INET:
171 BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
172 return sizeof(struct flowi4) / sizeof(flow_compare_t);
173 case AF_INET6:
174 BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
175 return sizeof(struct flowi6) / sizeof(flow_compare_t);
176 case AF_DECnet:
177 BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
178 return sizeof(struct flowidn) / sizeof(flow_compare_t);
179 }
180 return 0;
181}
182
164#define FLOW_DIR_IN 0 183#define FLOW_DIR_IN 0
165#define FLOW_DIR_OUT 1 184#define FLOW_DIR_OUT 1
166#define FLOW_DIR_FWD 2 185#define FLOW_DIR_FWD 2
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 99e6e19b57c2..4c0766e201e3 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -96,7 +96,8 @@ extern int sysctl_max_syn_backlog;
96 */ 96 */
97struct listen_sock { 97struct listen_sock {
98 u8 max_qlen_log; 98 u8 max_qlen_log;
99 /* 3 bytes hole, try to use */ 99 u8 synflood_warned;
100 /* 2 bytes hole, try to use */
100 int qlen; 101 int qlen;
101 int qlen_young; 102 int qlen_young;
102 int clock_hand; 103 int clock_hand;
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 6506458ccd33..712b3bebeda7 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -109,6 +109,7 @@ typedef enum {
109 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 109 SCTP_CMD_SEND_MSG, /* Send the whole use message */
110 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ 110 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
111 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ 111 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
112 SCTP_CMD_SET_ASOC, /* Restore association context */
112 SCTP_CMD_LAST 113 SCTP_CMD_LAST
113} sctp_verb_t; 114} sctp_verb_t;
114 115
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 149a415d1e0a..e9b48b094683 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -460,6 +460,9 @@ extern int tcp_write_wakeup(struct sock *);
460extern void tcp_send_fin(struct sock *sk); 460extern void tcp_send_fin(struct sock *sk);
461extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); 461extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
462extern int tcp_send_synack(struct sock *); 462extern int tcp_send_synack(struct sock *);
463extern int tcp_syn_flood_action(struct sock *sk,
464 const struct sk_buff *skb,
465 const char *proto);
463extern void tcp_push_one(struct sock *, unsigned int mss_now); 466extern void tcp_push_one(struct sock *, unsigned int mss_now);
464extern void tcp_send_ack(struct sock *sk); 467extern void tcp_send_ack(struct sock *sk);
465extern void tcp_send_delayed_ack(struct sock *sk); 468extern void tcp_send_delayed_ack(struct sock *sk);
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index 5271a741c3a3..498433dd067d 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -39,6 +39,7 @@ extern int datagram_recv_ctl(struct sock *sk,
39 struct sk_buff *skb); 39 struct sk_buff *skb);
40 40
41extern int datagram_send_ctl(struct net *net, 41extern int datagram_send_ctl(struct net *net,
42 struct sock *sk,
42 struct msghdr *msg, 43 struct msghdr *msg,
43 struct flowi6 *fl6, 44 struct flowi6 *fl6,
44 struct ipv6_txoptions *opt, 45 struct ipv6_txoptions *opt,
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index ba6f73eb06c6..a9aff9c7d027 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig BRIDGE_NF_EBTABLES 5menuconfig BRIDGE_NF_EBTABLES
6 tristate "Ethernet Bridge tables (ebtables) support" 6 tristate "Ethernet Bridge tables (ebtables) support"
7 depends on BRIDGE && BRIDGE_NETFILTER 7 depends on BRIDGE && NETFILTER
8 select NETFILTER_XTABLES 8 select NETFILTER_XTABLES
9 help 9 help
10 ebtables is a general, extensible frame/packet identification 10 ebtables is a general, extensible frame/packet identification
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 7c2fa0a08148..7f9ac0742d19 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
93 caifdevs = caif_device_list(dev_net(dev)); 93 caifdevs = caif_device_list(dev_net(dev));
94 BUG_ON(!caifdevs); 94 BUG_ON(!caifdevs);
95 95
96 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 96 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
97 if (!caifd) 97 if (!caifd)
98 return NULL; 98 return NULL;
99 caifd->pcpu_refcnt = alloc_percpu(int); 99 caifd->pcpu_refcnt = alloc_percpu(int);
100 if (!caifd->pcpu_refcnt) {
101 kfree(caifd);
102 return NULL;
103 }
100 caifd->netdev = dev; 104 caifd->netdev = dev;
101 dev_hold(dev); 105 dev_hold(dev);
102 return caifd; 106 return caifd;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 8ce926d3b2cb..9b0c32a2690c 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -857,7 +857,7 @@ static __exit void can_exit(void)
857 struct net_device *dev; 857 struct net_device *dev;
858 858
859 if (stats_timer) 859 if (stats_timer)
860 del_timer(&can_stattimer); 860 del_timer_sync(&can_stattimer);
861 861
862 can_remove_proc(); 862 can_remove_proc();
863 863
diff --git a/net/core/dev.c b/net/core/dev.c
index 17d67b579beb..b10ff0a71855 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev,
1515 */ 1515 */
1516int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1516int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1517{ 1517{
1518 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1519 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1520 atomic_long_inc(&dev->rx_dropped);
1521 kfree_skb(skb);
1522 return NET_RX_DROP;
1523 }
1524 }
1525
1518 skb_orphan(skb); 1526 skb_orphan(skb);
1519 nf_reset(skb); 1527 nf_reset(skb);
1520 1528
diff --git a/net/core/flow.c b/net/core/flow.c
index bf32c33cad3b..555a456efb07 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -30,6 +30,7 @@ struct flow_cache_entry {
30 struct hlist_node hlist; 30 struct hlist_node hlist;
31 struct list_head gc_list; 31 struct list_head gc_list;
32 } u; 32 } u;
33 struct net *net;
33 u16 family; 34 u16 family;
34 u8 dir; 35 u8 dir;
35 u32 genid; 36 u32 genid;
@@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc,
172 173
173static u32 flow_hash_code(struct flow_cache *fc, 174static u32 flow_hash_code(struct flow_cache *fc,
174 struct flow_cache_percpu *fcp, 175 struct flow_cache_percpu *fcp,
175 const struct flowi *key) 176 const struct flowi *key,
177 size_t keysize)
176{ 178{
177 const u32 *k = (const u32 *) key; 179 const u32 *k = (const u32 *) key;
180 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
178 181
179 return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) 182 return jhash2(k, length, fcp->hash_rnd)
180 & (flow_cache_hash_size(fc) - 1); 183 & (flow_cache_hash_size(fc) - 1);
181} 184}
182 185
183typedef unsigned long flow_compare_t;
184
185/* I hear what you're saying, use memcmp. But memcmp cannot make 186/* I hear what you're saying, use memcmp. But memcmp cannot make
186 * important assumptions that we can here, such as alignment and 187 * important assumptions that we can here, such as alignment.
187 * constant size.
188 */ 188 */
189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) 189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
190 size_t keysize)
190{ 191{
191 const flow_compare_t *k1, *k1_lim, *k2; 192 const flow_compare_t *k1, *k1_lim, *k2;
192 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
193
194 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
195 193
196 k1 = (const flow_compare_t *) key1; 194 k1 = (const flow_compare_t *) key1;
197 k1_lim = k1 + n_elem; 195 k1_lim = k1 + keysize;
198 196
199 k2 = (const flow_compare_t *) key2; 197 k2 = (const flow_compare_t *) key2;
200 198
@@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
215 struct flow_cache_entry *fle, *tfle; 213 struct flow_cache_entry *fle, *tfle;
216 struct hlist_node *entry; 214 struct hlist_node *entry;
217 struct flow_cache_object *flo; 215 struct flow_cache_object *flo;
216 size_t keysize;
218 unsigned int hash; 217 unsigned int hash;
219 218
220 local_bh_disable(); 219 local_bh_disable();
@@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
222 221
223 fle = NULL; 222 fle = NULL;
224 flo = NULL; 223 flo = NULL;
224
225 keysize = flow_key_size(family);
226 if (!keysize)
227 goto nocache;
228
225 /* Packet really early in init? Making flow_cache_init a 229 /* Packet really early in init? Making flow_cache_init a
226 * pre-smp initcall would solve this. --RR */ 230 * pre-smp initcall would solve this. --RR */
227 if (!fcp->hash_table) 231 if (!fcp->hash_table)
@@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
230 if (fcp->hash_rnd_recalc) 234 if (fcp->hash_rnd_recalc)
231 flow_new_hash_rnd(fc, fcp); 235 flow_new_hash_rnd(fc, fcp);
232 236
233 hash = flow_hash_code(fc, fcp, key); 237 hash = flow_hash_code(fc, fcp, key, keysize);
234 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
235 if (tfle->family == family && 239 if (tfle->net == net &&
240 tfle->family == family &&
236 tfle->dir == dir && 241 tfle->dir == dir &&
237 flow_key_compare(key, &tfle->key) == 0) { 242 flow_key_compare(key, &tfle->key, keysize) == 0) {
238 fle = tfle; 243 fle = tfle;
239 break; 244 break;
240 } 245 }
@@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
246 251
247 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 252 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
248 if (fle) { 253 if (fle) {
254 fle->net = net;
249 fle->family = family; 255 fle->family = family;
250 fle->dir = dir; 256 fle->dir = dir;
251 memcpy(&fle->key, key, sizeof(*key)); 257 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
252 fle->object = NULL; 258 fle->object = NULL;
253 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); 259 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
254 fcp->hash_count++; 260 fcp->hash_count++;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 27002dffe7ed..387703f56fce 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
611} 611}
612EXPORT_SYMBOL_GPL(skb_morph); 612EXPORT_SYMBOL_GPL(skb_morph);
613 613
614/* skb frags copy userspace buffers to kernel */ 614/* skb_copy_ubufs - copy userspace skb frags buffers to kernel
615static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 615 * @skb: the skb to modify
616 * @gfp_mask: allocation priority
617 *
618 * This must be called on SKBTX_DEV_ZEROCOPY skb.
619 * It will copy all frags into kernel and drop the reference
620 * to userspace pages.
621 *
622 * If this function is called from an interrupt gfp_mask() must be
623 * %GFP_ATOMIC.
624 *
625 * Returns 0 on success or a negative error code on failure
626 * to allocate kernel memory to copy to.
627 */
628int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
616{ 629{
617 int i; 630 int i;
618 int num_frags = skb_shinfo(skb)->nr_frags; 631 int num_frags = skb_shinfo(skb)->nr_frags;
@@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
652 skb_shinfo(skb)->frags[i - 1].page = head; 665 skb_shinfo(skb)->frags[i - 1].page = head;
653 head = (struct page *)head->private; 666 head = (struct page *)head->private;
654 } 667 }
668
669 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
655 return 0; 670 return 0;
656} 671}
657 672
@@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
677 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 692 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
678 if (skb_copy_ubufs(skb, gfp_mask)) 693 if (skb_copy_ubufs(skb, gfp_mask))
679 return NULL; 694 return NULL;
680 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
681 } 695 }
682 696
683 n = skb + 1; 697 n = skb + 1;
@@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
803 n = NULL; 817 n = NULL;
804 goto out; 818 goto out;
805 } 819 }
806 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
807 } 820 }
808 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
809 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 822 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
896 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 909 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
897 if (skb_copy_ubufs(skb, gfp_mask)) 910 if (skb_copy_ubufs(skb, gfp_mask))
898 goto nofrags; 911 goto nofrags;
899 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
900 } 912 }
901 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
902 get_page(skb_shinfo(skb)->frags[i].page); 914 get_page(skb_shinfo(skb)->frags[i].page);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 27997d35ebd3..a2468363978e 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev)
340 dev->addr_len = ETH_ALEN; 340 dev->addr_len = ETH_ALEN;
341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */ 341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */
342 dev->flags = IFF_BROADCAST|IFF_MULTICAST; 342 dev->flags = IFF_BROADCAST|IFF_MULTICAST;
343 dev->priv_flags = IFF_TX_SKB_SHARING; 343 dev->priv_flags |= IFF_TX_SKB_SHARING;
344 344
345 memset(dev->broadcast, 0xFF, ETH_ALEN); 345 memset(dev->broadcast, 0xFF, ETH_ALEN);
346 346
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1b745d412cf6..dd2b9478ddd1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
466 goto out; 466 goto out;
467 467
468 if (addr->sin_family != AF_INET) { 468 if (addr->sin_family != AF_INET) {
469 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
470 * only if s_addr is INADDR_ANY.
471 */
469 err = -EAFNOSUPPORT; 472 err = -EAFNOSUPPORT;
470 goto out; 473 if (addr->sin_family != AF_UNSPEC ||
474 addr->sin_addr.s_addr != htonl(INADDR_ANY))
475 goto out;
471 } 476 }
472 477
473 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 478 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 33e2c35b74b7..80106d89d548 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
142}; 142};
143 143
144/* Release a nexthop info record */ 144/* Release a nexthop info record */
145static void free_fib_info_rcu(struct rcu_head *head)
146{
147 struct fib_info *fi = container_of(head, struct fib_info, rcu);
148
149 if (fi->fib_metrics != (u32 *) dst_default_metrics)
150 kfree(fi->fib_metrics);
151 kfree(fi);
152}
145 153
146void free_fib_info(struct fib_info *fi) 154void free_fib_info(struct fib_info *fi)
147{ 155{
@@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi)
156 } endfor_nexthops(fi); 164 } endfor_nexthops(fi);
157 fib_info_cnt--; 165 fib_info_cnt--;
158 release_net(fi->fib_net); 166 release_net(fi->fib_net);
159 kfree_rcu(fi, rcu); 167 call_rcu(&fi->rcu, free_fib_info_rcu);
160} 168}
161 169
162void fib_release_info(struct fib_info *fi) 170void fib_release_info(struct fib_info *fi)
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 5c9b9d963918..e59aabd0eae4 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
218 return skb; 218 return skb;
219 219
220nlmsg_failure: 220nlmsg_failure:
221 kfree_skb(skb);
221 *errp = -EINVAL; 222 *errp = -EINVAL;
222 printk(KERN_ERR "ip_queue: error creating packet message\n"); 223 printk(KERN_ERR "ip_queue: error creating packet message\n");
223 return NULL; 224 return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
313{ 314{
314 struct nf_queue_entry *entry; 315 struct nf_queue_entry *entry;
315 316
316 if (vmsg->value > NF_MAX_VERDICT) 317 if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
317 return -EINVAL; 318 return -EINVAL;
318 319
319 entry = ipq_find_dequeue_entry(vmsg->id); 320 entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
358 break; 359 break;
359 360
360 case IPQM_VERDICT: 361 case IPQM_VERDICT:
361 if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 362 status = ipq_set_verdict(&pmsg->msg.verdict,
362 status = -EINVAL; 363 len - sizeof(*pmsg));
363 else 364 break;
364 status = ipq_set_verdict(&pmsg->msg.verdict,
365 len - sizeof(*pmsg));
366 break;
367 default: 365 default:
368 status = -EINVAL; 366 status = -EINVAL;
369 } 367 }
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index b14ec7d03b6e..4bfad5da94f4 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = {
254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), 255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
257 SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
258 SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
257 SNMP_MIB_SENTINEL 259 SNMP_MIB_SENTINEL
258}; 260};
259 261
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1c12b8ec849d..c34f01513945 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
808 kfree(inet_rsk(req)->opt); 808 kfree(inet_rsk(req)->opt);
809} 809}
810 810
811static void syn_flood_warning(const struct sk_buff *skb) 811/*
812 * Return 1 if a syncookie should be sent
813 */
814int tcp_syn_flood_action(struct sock *sk,
815 const struct sk_buff *skb,
816 const char *proto)
812{ 817{
813 const char *msg; 818 const char *msg = "Dropping request";
819 int want_cookie = 0;
820 struct listen_sock *lopt;
821
822
814 823
815#ifdef CONFIG_SYN_COOKIES 824#ifdef CONFIG_SYN_COOKIES
816 if (sysctl_tcp_syncookies) 825 if (sysctl_tcp_syncookies) {
817 msg = "Sending cookies"; 826 msg = "Sending cookies";
818 else 827 want_cookie = 1;
828 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
829 } else
819#endif 830#endif
820 msg = "Dropping request"; 831 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
821 832
822 pr_info("TCP: Possible SYN flooding on port %d. %s.\n", 833 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
823 ntohs(tcp_hdr(skb)->dest), msg); 834 if (!lopt->synflood_warned) {
835 lopt->synflood_warned = 1;
836 pr_info("%s: Possible SYN flooding on port %d. %s. "
837 " Check SNMP counters.\n",
838 proto, ntohs(tcp_hdr(skb)->dest), msg);
839 }
840 return want_cookie;
824} 841}
842EXPORT_SYMBOL(tcp_syn_flood_action);
825 843
826/* 844/*
827 * Save and compile IPv4 options into the request_sock if needed. 845 * Save and compile IPv4 options into the request_sock if needed.
@@ -1235,11 +1253,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1235 __be32 saddr = ip_hdr(skb)->saddr; 1253 __be32 saddr = ip_hdr(skb)->saddr;
1236 __be32 daddr = ip_hdr(skb)->daddr; 1254 __be32 daddr = ip_hdr(skb)->daddr;
1237 __u32 isn = TCP_SKB_CB(skb)->when; 1255 __u32 isn = TCP_SKB_CB(skb)->when;
1238#ifdef CONFIG_SYN_COOKIES
1239 int want_cookie = 0; 1256 int want_cookie = 0;
1240#else
1241#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1242#endif
1243 1257
1244 /* Never answer to SYNs send to broadcast or multicast */ 1258 /* Never answer to SYNs send to broadcast or multicast */
1245 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1259 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1250,14 +1264,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1250 * evidently real one. 1264 * evidently real one.
1251 */ 1265 */
1252 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1266 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1253 if (net_ratelimit()) 1267 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1254 syn_flood_warning(skb); 1268 if (!want_cookie)
1255#ifdef CONFIG_SYN_COOKIES 1269 goto drop;
1256 if (sysctl_tcp_syncookies) {
1257 want_cookie = 1;
1258 } else
1259#endif
1260 goto drop;
1261 } 1270 }
1262 1271
1263 /* Accept backlog is full. If we have already queued enough 1272 /* Accept backlog is full. If we have already queued enough
@@ -1303,9 +1312,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1303 while (l-- > 0) 1312 while (l-- > 0)
1304 *c++ ^= *hash_location++; 1313 *c++ ^= *hash_location++;
1305 1314
1306#ifdef CONFIG_SYN_COOKIES
1307 want_cookie = 0; /* not our kind of cookie */ 1315 want_cookie = 0; /* not our kind of cookie */
1308#endif
1309 tmp_ext.cookie_out_never = 0; /* false */ 1316 tmp_ext.cookie_out_never = 0; /* false */
1310 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1317 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1311 } else if (!tp->rx_opt.cookie_in_always) { 1318 } else if (!tp->rx_opt.cookie_in_always) {
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9ef1831746ef..b46e9f88ce37 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
599 return 0; 599 return 0;
600} 600}
601 601
602int datagram_send_ctl(struct net *net, 602int datagram_send_ctl(struct net *net, struct sock *sk,
603 struct msghdr *msg, struct flowi6 *fl6, 603 struct msghdr *msg, struct flowi6 *fl6,
604 struct ipv6_txoptions *opt, 604 struct ipv6_txoptions *opt,
605 int *hlimit, int *tclass, int *dontfrag) 605 int *hlimit, int *tclass, int *dontfrag)
@@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net,
658 658
659 if (addr_type != IPV6_ADDR_ANY) { 659 if (addr_type != IPV6_ADDR_ANY) {
660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
661 if (!ipv6_chk_addr(net, &src_info->ipi6_addr, 661 if (!inet_sk(sk)->transparent &&
662 !ipv6_chk_addr(net, &src_info->ipi6_addr,
662 strict ? dev : NULL, 0)) 663 strict ? dev : NULL, 0))
663 err = -EINVAL; 664 err = -EINVAL;
664 else 665 else
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index f3caf1b8d572..543039450193 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
322} 322}
323 323
324static struct ip6_flowlabel * 324static struct ip6_flowlabel *
325fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 325fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
326 int optlen, int *err_p) 326 char __user *optval, int optlen, int *err_p)
327{ 327{
328 struct ip6_flowlabel *fl = NULL; 328 struct ip6_flowlabel *fl = NULL;
329 int olen; 329 int olen;
@@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
360 msg.msg_control = (void*)(fl->opt+1); 360 msg.msg_control = (void*)(fl->opt+1);
361 memset(&flowi6, 0, sizeof(flowi6)); 361 memset(&flowi6, 0, sizeof(flowi6));
362 362
363 err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, 363 err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
364 &junk, &junk); 364 &junk, &junk);
365 if (err) 365 if (err)
366 goto done; 366 goto done;
@@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) 528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
529 return -EINVAL; 529 return -EINVAL;
530 530
531 fl = fl_create(net, &freq, optval, optlen, &err); 531 fl = fl_create(net, sk, &freq, optval, optlen, &err);
532 if (fl == NULL) 532 if (fl == NULL)
533 return err; 533 return err;
534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); 534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 147ede38ab48..2fbda5fc4cc4 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -475,7 +475,7 @@ sticky_done:
475 msg.msg_controllen = optlen; 475 msg.msg_controllen = optlen;
476 msg.msg_control = (void*)(opt+1); 476 msg.msg_control = (void*)(opt+1);
477 477
478 retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, 478 retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
479 &junk); 479 &junk);
480 if (retv) 480 if (retv)
481 goto done; 481 goto done;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 249394863284..e63c3972a739 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
218 return skb; 218 return skb;
219 219
220nlmsg_failure: 220nlmsg_failure:
221 kfree_skb(skb);
221 *errp = -EINVAL; 222 *errp = -EINVAL;
222 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 223 printk(KERN_ERR "ip6_queue: error creating packet message\n");
223 return NULL; 224 return NULL;
@@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
313{ 314{
314 struct nf_queue_entry *entry; 315 struct nf_queue_entry *entry;
315 316
316 if (vmsg->value > NF_MAX_VERDICT) 317 if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
317 return -EINVAL; 318 return -EINVAL;
318 319
319 entry = ipq_find_dequeue_entry(vmsg->id); 320 entry = ipq_find_dequeue_entry(vmsg->id);
@@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
358 break; 359 break;
359 360
360 case IPQM_VERDICT: 361 case IPQM_VERDICT:
361 if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 362 status = ipq_set_verdict(&pmsg->msg.verdict,
362 status = -EINVAL; 363 len - sizeof(*pmsg));
363 else 364 break;
364 status = ipq_set_verdict(&pmsg->msg.verdict,
365 len - sizeof(*pmsg));
366 break;
367 default: 365 default:
368 status = -EINVAL; 366 status = -EINVAL;
369 } 367 }
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6a79f3081bdb..343852e5c703 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
817 memset(opt, 0, sizeof(struct ipv6_txoptions)); 817 memset(opt, 0, sizeof(struct ipv6_txoptions));
818 opt->tot_len = sizeof(struct ipv6_txoptions); 818 opt->tot_len = sizeof(struct ipv6_txoptions);
819 819
820 err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 820 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
821 &tclass, &dontfrag); 821 &hlimit, &tclass, &dontfrag);
822 if (err < 0) { 822 if (err < 0) {
823 fl6_sock_release(flowlabel); 823 fl6_sock_release(flowlabel);
824 return err; 824 return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9e69eb0ec6dd..1250f9020670 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
104 struct inet_peer *peer; 104 struct inet_peer *peer;
105 u32 *p = NULL; 105 u32 *p = NULL;
106 106
107 if (!(rt->dst.flags & DST_HOST))
108 return NULL;
109
107 if (!rt->rt6i_peer) 110 if (!rt->rt6i_peer)
108 rt6_bind_peer(rt, 1); 111 rt6_bind_peer(rt, 1);
109 112
@@ -252,6 +255,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
252 struct inet6_dev *idev = rt->rt6i_idev; 255 struct inet6_dev *idev = rt->rt6i_idev;
253 struct inet_peer *peer = rt->rt6i_peer; 256 struct inet_peer *peer = rt->rt6i_peer;
254 257
258 if (!(rt->dst.flags & DST_HOST))
259 dst_destroy_metrics_generic(dst);
260
255 if (idev != NULL) { 261 if (idev != NULL) {
256 rt->rt6i_idev = NULL; 262 rt->rt6i_idev = NULL;
257 in6_dev_put(idev); 263 in6_dev_put(idev);
@@ -723,9 +729,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
723 ipv6_addr_copy(&rt->rt6i_gateway, daddr); 729 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
724 } 730 }
725 731
726 rt->rt6i_dst.plen = 128;
727 rt->rt6i_flags |= RTF_CACHE; 732 rt->rt6i_flags |= RTF_CACHE;
728 rt->dst.flags |= DST_HOST;
729 733
730#ifdef CONFIG_IPV6_SUBTREES 734#ifdef CONFIG_IPV6_SUBTREES
731 if (rt->rt6i_src.plen && saddr) { 735 if (rt->rt6i_src.plen && saddr) {
@@ -775,9 +779,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
775 struct rt6_info *rt = ip6_rt_copy(ort, daddr); 779 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
776 780
777 if (rt) { 781 if (rt) {
778 rt->rt6i_dst.plen = 128;
779 rt->rt6i_flags |= RTF_CACHE; 782 rt->rt6i_flags |= RTF_CACHE;
780 rt->dst.flags |= DST_HOST;
781 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); 783 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
782 } 784 }
783 return rt; 785 return rt;
@@ -1078,12 +1080,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1078 neigh = NULL; 1080 neigh = NULL;
1079 } 1081 }
1080 1082
1081 rt->rt6i_idev = idev; 1083 rt->dst.flags |= DST_HOST;
1084 rt->dst.output = ip6_output;
1082 dst_set_neighbour(&rt->dst, neigh); 1085 dst_set_neighbour(&rt->dst, neigh);
1083 atomic_set(&rt->dst.__refcnt, 1); 1086 atomic_set(&rt->dst.__refcnt, 1);
1084 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1085 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1087 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1086 rt->dst.output = ip6_output; 1088
1089 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1090 rt->rt6i_dst.plen = 128;
1091 rt->rt6i_idev = idev;
1087 1092
1088 spin_lock_bh(&icmp6_dst_lock); 1093 spin_lock_bh(&icmp6_dst_lock);
1089 rt->dst.next = icmp6_dst_gc_list; 1094 rt->dst.next = icmp6_dst_gc_list;
@@ -1261,6 +1266,14 @@ int ip6_route_add(struct fib6_config *cfg)
1261 if (rt->rt6i_dst.plen == 128) 1266 if (rt->rt6i_dst.plen == 128)
1262 rt->dst.flags |= DST_HOST; 1267 rt->dst.flags |= DST_HOST;
1263 1268
1269 if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1270 u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1271 if (!metrics) {
1272 err = -ENOMEM;
1273 goto out;
1274 }
1275 dst_init_metrics(&rt->dst, metrics, 0);
1276 }
1264#ifdef CONFIG_IPV6_SUBTREES 1277#ifdef CONFIG_IPV6_SUBTREES
1265 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1278 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1266 rt->rt6i_src.plen = cfg->fc_src_len; 1279 rt->rt6i_src.plen = cfg->fc_src_len;
@@ -1607,9 +1620,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1607 if (on_link) 1620 if (on_link)
1608 nrt->rt6i_flags &= ~RTF_GATEWAY; 1621 nrt->rt6i_flags &= ~RTF_GATEWAY;
1609 1622
1610 nrt->rt6i_dst.plen = 128;
1611 nrt->dst.flags |= DST_HOST;
1612
1613 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1623 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1614 dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); 1624 dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
1615 1625
@@ -1754,9 +1764,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
1754 if (rt) { 1764 if (rt) {
1755 rt->dst.input = ort->dst.input; 1765 rt->dst.input = ort->dst.input;
1756 rt->dst.output = ort->dst.output; 1766 rt->dst.output = ort->dst.output;
1767 rt->dst.flags |= DST_HOST;
1757 1768
1758 ipv6_addr_copy(&rt->rt6i_dst.addr, dest); 1769 ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
1759 rt->rt6i_dst.plen = ort->rt6i_dst.plen; 1770 rt->rt6i_dst.plen = 128;
1760 dst_copy_metrics(&rt->dst, &ort->dst); 1771 dst_copy_metrics(&rt->dst, &ort->dst);
1761 rt->dst.error = ort->dst.error; 1772 rt->dst.error = ort->dst.error;
1762 rt->rt6i_idev = ort->rt6i_idev; 1773 rt->rt6i_idev = ort->rt6i_idev;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d1fb63f4aeb7..3c9fa618b69d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
531 return tcp_v6_send_synack(sk, req, rvp); 531 return tcp_v6_send_synack(sk, req, rvp);
532} 532}
533 533
534static inline void syn_flood_warning(struct sk_buff *skb)
535{
536#ifdef CONFIG_SYN_COOKIES
537 if (sysctl_tcp_syncookies)
538 printk(KERN_INFO
539 "TCPv6: Possible SYN flooding on port %d. "
540 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
541 else
542#endif
543 printk(KERN_INFO
544 "TCPv6: Possible SYN flooding on port %d. "
545 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
546}
547
548static void tcp_v6_reqsk_destructor(struct request_sock *req) 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
549{ 535{
550 kfree_skb(inet6_rsk(req)->pktopts); 536 kfree_skb(inet6_rsk(req)->pktopts);
@@ -1179,11 +1165,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1179 struct tcp_sock *tp = tcp_sk(sk); 1165 struct tcp_sock *tp = tcp_sk(sk);
1180 __u32 isn = TCP_SKB_CB(skb)->when; 1166 __u32 isn = TCP_SKB_CB(skb)->when;
1181 struct dst_entry *dst = NULL; 1167 struct dst_entry *dst = NULL;
1182#ifdef CONFIG_SYN_COOKIES
1183 int want_cookie = 0; 1168 int want_cookie = 0;
1184#else
1185#define want_cookie 0
1186#endif
1187 1169
1188 if (skb->protocol == htons(ETH_P_IP)) 1170 if (skb->protocol == htons(ETH_P_IP))
1189 return tcp_v4_conn_request(sk, skb); 1171 return tcp_v4_conn_request(sk, skb);
@@ -1192,14 +1174,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1192 goto drop; 1174 goto drop;
1193 1175
1194 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1176 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1195 if (net_ratelimit()) 1177 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1196 syn_flood_warning(skb); 1178 if (!want_cookie)
1197#ifdef CONFIG_SYN_COOKIES 1179 goto drop;
1198 if (sysctl_tcp_syncookies)
1199 want_cookie = 1;
1200 else
1201#endif
1202 goto drop;
1203 } 1180 }
1204 1181
1205 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1182 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
@@ -1249,9 +1226,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1249 while (l-- > 0) 1226 while (l-- > 0)
1250 *c++ ^= *hash_location++; 1227 *c++ ^= *hash_location++;
1251 1228
1252#ifdef CONFIG_SYN_COOKIES
1253 want_cookie = 0; /* not our kind of cookie */ 1229 want_cookie = 0; /* not our kind of cookie */
1254#endif
1255 tmp_ext.cookie_out_never = 0; /* false */ 1230 tmp_ext.cookie_out_never = 0; /* false */
1256 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1231 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1257 } else if (!tp->rx_opt.cookie_in_always) { 1232 } else if (!tp->rx_opt.cookie_in_always) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 29213b51c499..bb95e8e1c6f9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1090,8 +1090,8 @@ do_udp_sendmsg:
1090 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1090 memset(opt, 0, sizeof(struct ipv6_txoptions));
1091 opt->tot_len = sizeof(*opt); 1091 opt->tot_len = sizeof(*opt);
1092 1092
1093 err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 1093 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
1094 &tclass, &dontfrag); 1094 &hlimit, &tclass, &dontfrag);
1095 if (err < 0) { 1095 if (err < 0) {
1096 fl6_sock_release(flowlabel); 1096 fl6_sock_release(flowlabel);
1097 return err; 1097 return err;
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index d0b70dadf73b..2615ffc8e785 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -40,9 +40,9 @@ extern int sysctl_slot_timeout;
40extern int sysctl_fast_poll_increase; 40extern int sysctl_fast_poll_increase;
41extern char sysctl_devname[]; 41extern char sysctl_devname[];
42extern int sysctl_max_baud_rate; 42extern int sysctl_max_baud_rate;
43extern int sysctl_min_tx_turn_time; 43extern unsigned int sysctl_min_tx_turn_time;
44extern int sysctl_max_tx_data_size; 44extern unsigned int sysctl_max_tx_data_size;
45extern int sysctl_max_tx_window; 45extern unsigned int sysctl_max_tx_window;
46extern int sysctl_max_noreply_time; 46extern int sysctl_max_noreply_time;
47extern int sysctl_warn_noreply_time; 47extern int sysctl_warn_noreply_time;
48extern int sysctl_lap_keepalive_time; 48extern int sysctl_lap_keepalive_time;
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 1b51bcf42394..4369f7f41bcb 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12;
60 * Default is 10us which means using the unmodified value given by the 60 * Default is 10us which means using the unmodified value given by the
61 * peer except if it's 0 (0 is likely a bug in the other stack). 61 * peer except if it's 0 (0 is likely a bug in the other stack).
62 */ 62 */
63unsigned sysctl_min_tx_turn_time = 10; 63unsigned int sysctl_min_tx_turn_time = 10;
64/* 64/*
65 * Maximum data size to be used in transmission in payload of LAP frame. 65 * Maximum data size to be used in transmission in payload of LAP frame.
66 * There is a bit of confusion in the IrDA spec : 66 * There is a bit of confusion in the IrDA spec :
@@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10;
75 * bytes frames or all negotiated frame sizes, but you can use the sysctl 75 * bytes frames or all negotiated frame sizes, but you can use the sysctl
76 * to play with this value anyway. 76 * to play with this value anyway.
77 * Jean II */ 77 * Jean II */
78unsigned sysctl_max_tx_data_size = 2042; 78unsigned int sysctl_max_tx_data_size = 2042;
79/* 79/*
80 * Maximum transmit window, i.e. number of LAP frames between turn-around. 80 * Maximum transmit window, i.e. number of LAP frames between turn-around.
81 * This allow to override what the peer told us. Some peers are buggy and 81 * This allow to override what the peer told us. Some peers are buggy and
82 * don't always support what they tell us. 82 * don't always support what they tell us.
83 * Jean II */ 83 * Jean II */
84unsigned sysctl_max_tx_window = 7; 84unsigned int sysctl_max_tx_window = 7;
85 85
86static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); 86static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
87static int irlap_param_link_disconnect(void *instance, irda_param_t *parm, 87static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 3db78b696c5c..21070e9bc8d0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
665 BUG_ON(!sdata->bss); 665 BUG_ON(!sdata->bss);
666 666
667 atomic_dec(&sdata->bss->num_sta_ps); 667 atomic_dec(&sdata->bss->num_sta_ps);
668 __sta_info_clear_tim_bit(sdata->bss, sta); 668 sta_info_clear_tim_bit(sta);
669 } 669 }
670 670
671 local->num_sta--; 671 local->num_sta--;
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 2fd4565144de..31d56b23b9e9 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
364 break; 364 break;
365 365
366 case PPTP_WAN_ERROR_NOTIFY: 366 case PPTP_WAN_ERROR_NOTIFY:
367 case PPTP_SET_LINK_INFO:
367 case PPTP_ECHO_REQUEST: 368 case PPTP_ECHO_REQUEST:
368 case PPTP_ECHO_REPLY: 369 case PPTP_ECHO_REPLY:
369 /* I don't have to explain these ;) */ 370 /* I don't have to explain these ;) */
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 37bf94394be0..8235b86b4e87 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb,
409 if (opsize < 2) /* "silly options" */ 409 if (opsize < 2) /* "silly options" */
410 return; 410 return;
411 if (opsize > length) 411 if (opsize > length)
412 break; /* don't parse partial options */ 412 return; /* don't parse partial options */
413 413
414 if (opcode == TCPOPT_SACK_PERM 414 if (opcode == TCPOPT_SACK_PERM
415 && opsize == TCPOLEN_SACK_PERM) 415 && opsize == TCPOLEN_SACK_PERM)
@@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
447 BUG_ON(ptr == NULL); 447 BUG_ON(ptr == NULL);
448 448
449 /* Fast path for timestamp-only option */ 449 /* Fast path for timestamp-only option */
450 if (length == TCPOLEN_TSTAMP_ALIGNED*4 450 if (length == TCPOLEN_TSTAMP_ALIGNED
451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) 451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
452 | (TCPOPT_NOP << 16) 452 | (TCPOPT_NOP << 16)
453 | (TCPOPT_TIMESTAMP << 8) 453 | (TCPOPT_TIMESTAMP << 8)
@@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
469 if (opsize < 2) /* "silly options" */ 469 if (opsize < 2) /* "silly options" */
470 return; 470 return;
471 if (opsize > length) 471 if (opsize > length)
472 break; /* don't parse partial options */ 472 return; /* don't parse partial options */
473 473
474 if (opcode == TCPOPT_SACK 474 if (opcode == TCPOPT_SACK
475 && opsize >= (TCPOLEN_SACK_BASE 475 && opsize >= (TCPOLEN_SACK_BASE
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 00bd475eab4b..a80b0cb03f17 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[])
646 return NULL; 646 return NULL;
647 647
648 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); 648 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
649 verdict = ntohl(vhdr->verdict); 649 verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
650 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) 650 if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
651 return NULL; 651 return NULL;
652 return vhdr; 652 return vhdr;
653} 653}
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 76a083184d8e..ed0db15ab00e 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
78{ 78{
79 struct xt_rateest_match_info *info = par->matchinfo; 79 struct xt_rateest_match_info *info = par->matchinfo;
80 struct xt_rateest *est1, *est2; 80 struct xt_rateest *est1, *est2;
81 int ret = false; 81 int ret = -EINVAL;
82 82
83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | 83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
84 XT_RATEEST_MATCH_REL)) != 1) 84 XT_RATEEST_MATCH_REL)) != 1)
@@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
101 if (!est1) 101 if (!est1)
102 goto err1; 102 goto err1;
103 103
104 est2 = NULL;
104 if (info->flags & XT_RATEEST_MATCH_REL) { 105 if (info->flags & XT_RATEEST_MATCH_REL) {
105 est2 = xt_rateest_lookup(info->name2); 106 est2 = xt_rateest_lookup(info->name2);
106 if (!est2) 107 if (!est2)
107 goto err2; 108 goto err2;
108 } else 109 }
109 est2 = NULL;
110
111 110
112 info->est1 = est1; 111 info->est1 = est1;
113 info->est2 = est2; 112 info->est2 = est2;
@@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
116err2: 115err2:
117 xt_rateest_put(est1); 116 xt_rateest_put(est1);
118err1: 117err1:
119 return -EINVAL; 118 return ret;
120} 119}
121 120
122static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par) 121static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index be4505ee67a9..b01427924f81 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
425 struct rsvp_filter *f, **fp; 425 struct rsvp_filter *f, **fp;
426 struct rsvp_session *s, **sp; 426 struct rsvp_session *s, **sp;
427 struct tc_rsvp_pinfo *pinfo = NULL; 427 struct tc_rsvp_pinfo *pinfo = NULL;
428 struct nlattr *opt = tca[TCA_OPTIONS-1]; 428 struct nlattr *opt = tca[TCA_OPTIONS];
429 struct nlattr *tb[TCA_RSVP_MAX + 1]; 429 struct nlattr *tb[TCA_RSVP_MAX + 1];
430 struct tcf_exts e; 430 struct tcf_exts e;
431 unsigned int h1, h2; 431 unsigned int h1, h2;
@@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
439 if (err < 0) 439 if (err < 0)
440 return err; 440 return err;
441 441
442 err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); 442 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
443 if (err < 0) 443 if (err < 0)
444 return err; 444 return err;
445 445
@@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
449 449
450 if (f->handle != handle && handle) 450 if (f->handle != handle && handle)
451 goto errout2; 451 goto errout2;
452 if (tb[TCA_RSVP_CLASSID-1]) { 452 if (tb[TCA_RSVP_CLASSID]) {
453 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 453 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
454 tcf_bind_filter(tp, &f->res, base); 454 tcf_bind_filter(tp, &f->res, base);
455 } 455 }
456 456
@@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
462 err = -EINVAL; 462 err = -EINVAL;
463 if (handle) 463 if (handle)
464 goto errout2; 464 goto errout2;
465 if (tb[TCA_RSVP_DST-1] == NULL) 465 if (tb[TCA_RSVP_DST] == NULL)
466 goto errout2; 466 goto errout2;
467 467
468 err = -ENOBUFS; 468 err = -ENOBUFS;
@@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
471 goto errout2; 471 goto errout2;
472 472
473 h2 = 16; 473 h2 = 16;
474 if (tb[TCA_RSVP_SRC-1]) { 474 if (tb[TCA_RSVP_SRC]) {
475 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); 475 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
476 h2 = hash_src(f->src); 476 h2 = hash_src(f->src);
477 } 477 }
478 if (tb[TCA_RSVP_PINFO-1]) { 478 if (tb[TCA_RSVP_PINFO]) {
479 pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); 479 pinfo = nla_data(tb[TCA_RSVP_PINFO]);
480 f->spi = pinfo->spi; 480 f->spi = pinfo->spi;
481 f->tunnelhdr = pinfo->tunnelhdr; 481 f->tunnelhdr = pinfo->tunnelhdr;
482 } 482 }
483 if (tb[TCA_RSVP_CLASSID-1]) 483 if (tb[TCA_RSVP_CLASSID])
484 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 484 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
485 485
486 dst = nla_data(tb[TCA_RSVP_DST-1]); 486 dst = nla_data(tb[TCA_RSVP_DST]);
487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); 487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
488 488
489 err = -ENOMEM; 489 err = -ENOMEM;
@@ -642,8 +642,7 @@ nla_put_failure:
642 return -1; 642 return -1;
643} 643}
644 644
645static struct tcf_proto_ops RSVP_OPS = { 645static struct tcf_proto_ops RSVP_OPS __read_mostly = {
646 .next = NULL,
647 .kind = RSVP_ID, 646 .kind = RSVP_ID,
648 .classify = rsvp_classify, 647 .classify = rsvp_classify,
649 .init = rsvp_init, 648 .init = rsvp_init,
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 167c880cf8da..76388b083f28 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1689 case SCTP_CMD_PURGE_ASCONF_QUEUE: 1689 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1690 sctp_asconf_queue_teardown(asoc); 1690 sctp_asconf_queue_teardown(asoc);
1691 break; 1691 break;
1692
1693 case SCTP_CMD_SET_ASOC:
1694 asoc = cmd->obj.asoc;
1695 break;
1696
1692 default: 1697 default:
1693 pr_warn("Impossible command: %u, %p\n", 1698 pr_warn("Impossible command: %u, %p\n",
1694 cmd->verb, cmd->obj.ptr); 1699 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 49b847b00f99..a0f31e6c1c63 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
2049 2049
2050 /* Restore association pointer to provide SCTP command interpeter
2051 * with a valid context in case it needs to manipulate
2052 * the queues */
2053 sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
2054 SCTP_ASOC((struct sctp_association *)asoc));
2055
2050 return retval; 2056 return retval;
2051 2057
2052nomem: 2058nomem: