aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig27
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arm/ks8695net.c24
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/atlx/atl2.h2
-rw-r--r--drivers/net/benet/be_cmds.c6
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/benet/be_main.c21
-rw-r--r--drivers/net/bnx2.c14
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/bonding/bond_main.c66
-rw-r--r--drivers/net/bonding/bond_sysfs.c5
-rw-r--r--drivers/net/can/bfin_can.c97
-rw-r--r--drivers/net/chelsio/sge.c2
-rw-r--r--drivers/net/cnic.c10
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/adapter.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c57
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h5
-rw-r--r--drivers/net/cxgb3/regs.h16
-rw-r--r--drivers/net/cxgb3/sge.c14
-rw-r--r--drivers/net/cxgb3/t3_hw.c5
-rw-r--r--drivers/net/cxgb4/Makefile7
-rw-r--r--drivers/net/cxgb4/cxgb4.h741
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3388
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h239
-rw-r--r--drivers/net/cxgb4/l2t.c624
-rw-r--r--drivers/net/cxgb4/l2t.h110
-rw-r--r--drivers/net/cxgb4/sge.c2431
-rw-r--r--drivers/net/cxgb4/t4_hw.c3131
-rw-r--r--drivers/net/cxgb4/t4_hw.h100
-rw-r--r--drivers/net/cxgb4/t4_msg.h664
-rw-r--r--drivers/net/cxgb4/t4_regs.h878
-rw-r--r--drivers/net/cxgb4/t4fw_api.h1580
-rw-r--r--drivers/net/davinci_emac.c74
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000/e1000.h1
-rw-r--r--drivers/net/e1000/e1000_main.c9
-rw-r--r--drivers/net/e1000e/82571.c2
-rw-r--r--drivers/net/e1000e/e1000.h1
-rw-r--r--drivers/net/e1000e/lib.c2
-rw-r--r--drivers/net/e1000e/netdev.c11
-rw-r--r--drivers/net/gianfar.c29
-rw-r--r--drivers/net/gianfar.h6
-rw-r--r--drivers/net/ibmveth.c2
-rw-r--r--drivers/net/igb/e1000_82575.c1
-rw-r--r--drivers/net/igb/e1000_hw.h1
-rw-r--r--drivers/net/igb/e1000_mac.c6
-rw-r--r--drivers/net/igb/igb.h1
-rw-r--r--drivers/net/igb/igb_main.c25
-rw-r--r--drivers/net/igbvf/igbvf.h1
-rw-r--r--drivers/net/igbvf/netdev.c11
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/w83977af_ir.c36
-rw-r--r--drivers/net/iseries_veth.c4
-rw-r--r--drivers/net/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c78
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c21
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c39
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c67
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ixgbevf/ethtool.c42
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c79
-rw-r--r--drivers/net/ixgbevf/vf.h6
-rw-r--r--drivers/net/jme.c35
-rw-r--r--drivers/net/jme.h2
-rw-r--r--drivers/net/ks8851.c3
-rw-r--r--drivers/net/ksz884x.c10
-rw-r--r--drivers/net/mlx4/main.c1
-rw-r--r--drivers/net/myri10ge/myri10ge.c3
-rw-r--r--drivers/net/ne.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c14
-rw-r--r--drivers/net/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c49
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c3
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c12
-rw-r--r--drivers/net/pppol2tp.c6
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c3
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c2
-rw-r--r--drivers/net/r6040.c11
-rw-r--r--drivers/net/r8169.c54
-rw-r--r--drivers/net/s2io.c4
-rw-r--r--drivers/net/sfc/regs.h2
-rw-r--r--drivers/net/sgiseeq.c4
-rw-r--r--drivers/net/skfp/ess.c2
-rw-r--r--drivers/net/sky2.c2
-rw-r--r--drivers/net/smc91x.h28
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/stmmac/Kconfig1
-rw-r--r--drivers/net/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/tehuti.c2
-rw-r--r--drivers/net/tg3.c4
-rw-r--r--drivers/net/tokenring/tms380tr.c4
-rw-r--r--drivers/net/tulip/eeprom.c54
-rw-r--r--drivers/net/tulip/uli526x.c8
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/typhoon.c6
-rw-r--r--drivers/net/ucc_geth.c2
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.c30
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/usb/smsc75xx.c1288
-rw-r--r--drivers/net/usb/smsc75xx.h421
-rw-r--r--drivers/net/usb/smsc95xx.c33
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wan/cosa.c10
-rw-r--r--drivers/net/wan/hdlc_cisco.c8
-rw-r--r--drivers/net/wan/hdlc_x25.c4
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wimax/i2400m/sdio.c4
-rw-r--r--drivers/net/wimax/i2400m/usb.c4
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
129 files changed, 16480 insertions, 589 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7029cd50c458..7b832c727f87 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -907,7 +907,7 @@ config SMC91X
907 select CRC32 907 select CRC32
908 select MII 908 select MII
909 depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \ 909 depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \
910 MIPS || BLACKFIN || MN10300 910 MIPS || BLACKFIN || MN10300 || COLDFIRE
911 help 911 help
912 This is a driver for SMC's 91x series of Ethernet chipsets, 912 This is a driver for SMC's 91x series of Ethernet chipsets,
913 including the SMC91C94 and the SMC91C111. Say Y if you want it 913 including the SMC91C94 and the SMC91C111. Say Y if you want it
@@ -2582,6 +2582,31 @@ config CHELSIO_T3
2582 To compile this driver as a module, choose M here: the module 2582 To compile this driver as a module, choose M here: the module
2583 will be called cxgb3. 2583 will be called cxgb3.
2584 2584
2585config CHELSIO_T4_DEPENDS
2586 tristate
2587 depends on PCI && INET
2588 default y
2589
2590config CHELSIO_T4
2591 tristate "Chelsio Communications T4 Ethernet support"
2592 depends on CHELSIO_T4_DEPENDS
2593 select FW_LOADER
2594 select MDIO
2595 help
2596 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
2597 adapters.
2598
2599 For general information about Chelsio and our products, visit
2600 our website at <http://www.chelsio.com>.
2601
2602 For customer support, please visit our customer support page at
2603 <http://www.chelsio.com/support.htm>.
2604
2605 Please send feedback to <linux-bugs@chelsio.com>.
2606
2607 To compile this driver as a module choose M here; the module
2608 will be called cxgb4.
2609
2585config EHEA 2610config EHEA
2586 tristate "eHEA Ethernet support" 2611 tristate "eHEA Ethernet support"
2587 depends on IBMEBUS && INET && SPARSEMEM 2612 depends on IBMEBUS && INET && SPARSEMEM
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 478886234c28..a583b50d9de8 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IXGB) += ixgb/
19obj-$(CONFIG_IP1000) += ipg.o 19obj-$(CONFIG_IP1000) += ipg.o
20obj-$(CONFIG_CHELSIO_T1) += chelsio/ 20obj-$(CONFIG_CHELSIO_T1) += chelsio/
21obj-$(CONFIG_CHELSIO_T3) += cxgb3/ 21obj-$(CONFIG_CHELSIO_T3) += cxgb3/
22obj-$(CONFIG_CHELSIO_T4) += cxgb4/
22obj-$(CONFIG_EHEA) += ehea/ 23obj-$(CONFIG_EHEA) += ehea/
23obj-$(CONFIG_CAN) += can/ 24obj-$(CONFIG_CAN) += can/
24obj-$(CONFIG_BONDING) += bonding/ 25obj-$(CONFIG_BONDING) += bonding/
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index a1d4188c430b..e7810b74f396 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -449,11 +449,10 @@ ks8695_rx_irq(int irq, void *dev_id)
449} 449}
450 450
451/** 451/**
452 * ks8695_rx - Receive packets called by NAPI poll method 452 * ks8695_rx - Receive packets called by NAPI poll method
453 * @ksp: Private data for the KS8695 Ethernet 453 * @ksp: Private data for the KS8695 Ethernet
454 * @budget: The max packets would be receive 454 * @budget: Number of packets allowed to process
455 */ 455 */
456
457static int ks8695_rx(struct ks8695_priv *ksp, int budget) 456static int ks8695_rx(struct ks8695_priv *ksp, int budget)
458{ 457{
459 struct net_device *ndev = ksp->ndev; 458 struct net_device *ndev = ksp->ndev;
@@ -461,7 +460,6 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
461 int buff_n; 460 int buff_n;
462 u32 flags; 461 u32 flags;
463 int pktlen; 462 int pktlen;
464 int last_rx_processed = -1;
465 int received = 0; 463 int received = 0;
466 464
467 buff_n = ksp->next_rx_desc_read; 465 buff_n = ksp->next_rx_desc_read;
@@ -471,6 +469,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
471 cpu_to_le32(RDES_OWN)))) { 469 cpu_to_le32(RDES_OWN)))) {
472 rmb(); 470 rmb();
473 flags = le32_to_cpu(ksp->rx_ring[buff_n].status); 471 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
472
474 /* Found an SKB which we own, this means we 473 /* Found an SKB which we own, this means we
475 * received a packet 474 * received a packet
476 */ 475 */
@@ -533,23 +532,18 @@ rx_failure:
533 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); 532 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
534rx_finished: 533rx_finished:
535 received++; 534 received++;
536 /* And note this as processed so we can start
537 * from here next time
538 */
539 last_rx_processed = buff_n;
540 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; 535 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
541 /*And note which RX descriptor we last did */
542 if (likely(last_rx_processed != -1))
543 ksp->next_rx_desc_read =
544 (last_rx_processed + 1) &
545 MAX_RX_DESC_MASK;
546 } 536 }
537
538 /* And note which RX descriptor we last did */
539 ksp->next_rx_desc_read = buff_n;
540
547 /* And refill the buffers */ 541 /* And refill the buffers */
548 ks8695_refill_rxbuffers(ksp); 542 ks8695_refill_rxbuffers(ksp);
549 543
550 /* Kick the RX DMA engine, in case it became 544 /* Kick the RX DMA engine, in case it became suspended */
551 * suspended */
552 ks8695_writereg(ksp, KS8695_DRSC, 0); 545 ks8695_writereg(ksp, KS8695_DRSC, 0);
546
553 return received; 547 return received;
554} 548}
555 549
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 9ba547069db3..0ebd8208f606 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -84,7 +84,7 @@
84 84
85#define ATLX_DRIVER_VERSION "2.1.3" 85#define ATLX_DRIVER_VERSION "2.1.3"
86MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ 86MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
87 Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); 87Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
88MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
89MODULE_VERSION(ATLX_DRIVER_VERSION); 89MODULE_VERSION(ATLX_DRIVER_VERSION);
90 90
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h
index d918bbe621ea..927e4de6474d 100644
--- a/drivers/net/atlx/atl2.h
+++ b/drivers/net/atlx/atl2.h
@@ -442,7 +442,7 @@ struct atl2_hw {
442struct atl2_ring_header { 442struct atl2_ring_header {
443 /* pointer to the descriptor ring memory */ 443 /* pointer to the descriptor ring memory */
444 void *desc; 444 void *desc;
445 /* physical adress of the descriptor ring */ 445 /* physical address of the descriptor ring */
446 dma_addr_t dma; 446 dma_addr_t dma;
447 /* length of descriptor ring in bytes */ 447 /* length of descriptor ring in bytes */
448 unsigned int size; 448 unsigned int size;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index c59215361f4e..d0ef4ac987cd 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -673,7 +673,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
673 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 673 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
674 OPCODE_COMMON_MCC_CREATE, sizeof(*req)); 674 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
675 675
676 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 676 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
677 677
678 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); 678 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
679 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, 679 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
@@ -1464,8 +1464,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1464 1464
1465 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); 1465 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1466 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 1466 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1467 req->params.offset = offset; 1467 req->params.offset = cpu_to_le32(offset);
1468 req->params.data_buf_size = 0x4; 1468 req->params.data_buf_size = cpu_to_le32(0x4);
1469 1469
1470 status = be_mcc_notify_wait(adapter); 1470 status = be_mcc_notify_wait(adapter);
1471 if (!status) 1471 if (!status)
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9560d48944ab..51e1065e7897 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -490,7 +490,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
490{ 490{
491 int ret, i; 491 int ret, i;
492 struct be_dma_mem ddrdma_cmd; 492 struct be_dma_mem ddrdma_cmd;
493 u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5}; 493 u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
494 494
495 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 495 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
496 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, 496 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 43e8032f9236..ec6ace802256 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -807,7 +807,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
807 return; 807 return;
808 } 808 }
809 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 809 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
810 vid = be16_to_cpu(vid); 810 vid = swab16(vid);
811 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); 811 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
812 } else { 812 } else {
813 netif_receive_skb(skb); 813 netif_receive_skb(skb);
@@ -884,7 +884,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
884 napi_gro_frags(&eq_obj->napi); 884 napi_gro_frags(&eq_obj->napi);
885 } else { 885 } else {
886 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 886 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
887 vid = be16_to_cpu(vid); 887 vid = swab16(vid);
888 888
889 if (!adapter->vlan_grp || adapter->vlans_added == 0) 889 if (!adapter->vlan_grp || adapter->vlans_added == 0)
890 return; 890 return;
@@ -1855,7 +1855,7 @@ static bool be_flash_redboot(struct be_adapter *adapter,
1855 p += crc_offset; 1855 p += crc_offset;
1856 1856
1857 status = be_cmd_get_flash_crc(adapter, flashed_crc, 1857 status = be_cmd_get_flash_crc(adapter, flashed_crc,
1858 (img_start + image_size - 4)); 1858 (image_size - 4));
1859 if (status) { 1859 if (status) {
1860 dev_err(&adapter->pdev->dev, 1860 dev_err(&adapter->pdev->dev,
1861 "could not get crc from flash, not flashing redboot\n"); 1861 "could not get crc from flash, not flashing redboot\n");
@@ -1991,7 +1991,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
1991 struct flash_file_hdr_g3 *fhdr3; 1991 struct flash_file_hdr_g3 *fhdr3;
1992 struct image_hdr *img_hdr_ptr = NULL; 1992 struct image_hdr *img_hdr_ptr = NULL;
1993 struct be_dma_mem flash_cmd; 1993 struct be_dma_mem flash_cmd;
1994 int status, i = 0; 1994 int status, i = 0, num_imgs = 0;
1995 const u8 *p; 1995 const u8 *p;
1996 1996
1997 strcpy(fw_file, func); 1997 strcpy(fw_file, func);
@@ -2017,15 +2017,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2017 if ((adapter->generation == BE_GEN3) && 2017 if ((adapter->generation == BE_GEN3) &&
2018 (get_ufigen_type(fhdr) == BE_GEN3)) { 2018 (get_ufigen_type(fhdr) == BE_GEN3)) {
2019 fhdr3 = (struct flash_file_hdr_g3 *) fw->data; 2019 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2020 for (i = 0; i < fhdr3->num_imgs; i++) { 2020 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2021 for (i = 0; i < num_imgs; i++) {
2021 img_hdr_ptr = (struct image_hdr *) (fw->data + 2022 img_hdr_ptr = (struct image_hdr *) (fw->data +
2022 (sizeof(struct flash_file_hdr_g3) + 2023 (sizeof(struct flash_file_hdr_g3) +
2023 i * sizeof(struct image_hdr))); 2024 i * sizeof(struct image_hdr)));
2024 if (img_hdr_ptr->imageid == 1) { 2025 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2025 status = be_flash_data(adapter, fw, 2026 status = be_flash_data(adapter, fw, &flash_cmd,
2026 &flash_cmd, fhdr3->num_imgs); 2027 num_imgs);
2027 }
2028
2029 } 2028 }
2030 } else if ((adapter->generation == BE_GEN2) && 2029 } else if ((adapter->generation == BE_GEN2) &&
2031 (get_ufigen_type(fhdr) == BE_GEN2)) { 2030 (get_ufigen_type(fhdr) == BE_GEN2)) {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 381887ba677c..a257babd1bb4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -246,6 +246,8 @@ static const struct flash_spec flash_5709 = {
246 246
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 248
249static void bnx2_init_napi(struct bnx2 *bp);
250
249static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) 251static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250{ 252{
251 u32 diff; 253 u32 diff;
@@ -6197,6 +6199,7 @@ bnx2_open(struct net_device *dev)
6197 bnx2_disable_int(bp); 6199 bnx2_disable_int(bp);
6198 6200
6199 bnx2_setup_int_mode(bp, disable_msi); 6201 bnx2_setup_int_mode(bp, disable_msi);
6202 bnx2_init_napi(bp);
6200 bnx2_napi_enable(bp); 6203 bnx2_napi_enable(bp);
6201 rc = bnx2_alloc_mem(bp); 6204 rc = bnx2_alloc_mem(bp);
6202 if (rc) 6205 if (rc)
@@ -7643,9 +7646,11 @@ poll_bnx2(struct net_device *dev)
7643 int i; 7646 int i;
7644 7647
7645 for (i = 0; i < bp->irq_nvecs; i++) { 7648 for (i = 0; i < bp->irq_nvecs; i++) {
7646 disable_irq(bp->irq_tbl[i].vector); 7649 struct bnx2_irq *irq = &bp->irq_tbl[i];
7647 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]); 7650
7648 enable_irq(bp->irq_tbl[i].vector); 7651 disable_irq(irq->vector);
7652 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7653 enable_irq(irq->vector);
7649 } 7654 }
7650} 7655}
7651#endif 7656#endif
@@ -8207,7 +8212,7 @@ bnx2_init_napi(struct bnx2 *bp)
8207{ 8212{
8208 int i; 8213 int i;
8209 8214
8210 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 8215 for (i = 0; i < bp->irq_nvecs; i++) {
8211 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; 8216 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8212 int (*poll)(struct napi_struct *, int); 8217 int (*poll)(struct napi_struct *, int);
8213 8218
@@ -8276,7 +8281,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8276 dev->ethtool_ops = &bnx2_ethtool_ops; 8281 dev->ethtool_ops = &bnx2_ethtool_ops;
8277 8282
8278 bp = netdev_priv(dev); 8283 bp = netdev_priv(dev);
8279 bnx2_init_napi(bp);
8280 8284
8281 pci_set_drvdata(pdev, dev); 8285 pci_set_drvdata(pdev, dev);
8282 8286
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index ed785a30e98b..6c042a72d6cc 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -893,7 +893,6 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
893 u16 prod; 893 u16 prod;
894 u16 cons; 894 u16 cons;
895 895
896 barrier(); /* Tell compiler that prod and cons can change */
897 prod = fp->tx_bd_prod; 896 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons; 897 cons = fp->tx_bd_cons;
899 898
@@ -963,7 +962,7 @@ static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
963 * start_xmit() will miss it and cause the queue to be stopped 962 * start_xmit() will miss it and cause the queue to be stopped
964 * forever. 963 * forever.
965 */ 964 */
966 smp_wmb(); 965 smp_mb();
967 966
968 /* TBD need a thresh? */ 967 /* TBD need a thresh? */
969 if (unlikely(netif_tx_queue_stopped(txq))) { 968 if (unlikely(netif_tx_queue_stopped(txq))) {
@@ -11429,9 +11428,12 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11429 11428
11430 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { 11429 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11431 netif_tx_stop_queue(txq); 11430 netif_tx_stop_queue(txq);
11432 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod 11431
11433 if we put Tx into XOFF state. */ 11432 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
11433 * ordering of set_bit() in netif_tx_stop_queue() and read of
11434 * fp->bd_tx_cons */
11434 smp_mb(); 11435 smp_mb();
11436
11435 fp->eth_q_stats.driver_xoff++; 11437 fp->eth_q_stats.driver_xoff++;
11436 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 11438 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11437 netif_tx_wake_queue(txq); 11439 netif_tx_wake_queue(txq);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 430c02267d7e..0075514bf32f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1235,6 +1235,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1235 write_lock_bh(&bond->curr_slave_lock); 1235 write_lock_bh(&bond->curr_slave_lock);
1236 } 1236 }
1237 } 1237 }
1238
1239 /* resend IGMP joins since all were sent on curr_active_slave */
1240 if (bond->params.mode == BOND_MODE_ROUNDROBIN) {
1241 bond_resend_igmp_join_requests(bond);
1242 }
1238} 1243}
1239 1244
1240/** 1245/**
@@ -4138,22 +4143,41 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
4138 struct bonding *bond = netdev_priv(bond_dev); 4143 struct bonding *bond = netdev_priv(bond_dev);
4139 struct slave *slave, *start_at; 4144 struct slave *slave, *start_at;
4140 int i, slave_no, res = 1; 4145 int i, slave_no, res = 1;
4146 struct iphdr *iph = ip_hdr(skb);
4141 4147
4142 read_lock(&bond->lock); 4148 read_lock(&bond->lock);
4143 4149
4144 if (!BOND_IS_OK(bond)) 4150 if (!BOND_IS_OK(bond))
4145 goto out; 4151 goto out;
4146
4147 /* 4152 /*
4148 * Concurrent TX may collide on rr_tx_counter; we accept that 4153 * Start with the curr_active_slave that joined the bond as the
4149 * as being rare enough not to justify using an atomic op here 4154 * default for sending IGMP traffic. For failover purposes one
4155 * needs to maintain some consistency for the interface that will
4156 * send the join/membership reports. The curr_active_slave found
4157 * will send all of this type of traffic.
4150 */ 4158 */
4151 slave_no = bond->rr_tx_counter++ % bond->slave_cnt; 4159 if ((iph->protocol == IPPROTO_IGMP) &&
4160 (skb->protocol == htons(ETH_P_IP))) {
4152 4161
4153 bond_for_each_slave(bond, slave, i) { 4162 read_lock(&bond->curr_slave_lock);
4154 slave_no--; 4163 slave = bond->curr_active_slave;
4155 if (slave_no < 0) 4164 read_unlock(&bond->curr_slave_lock);
4156 break; 4165
4166 if (!slave)
4167 goto out;
4168 } else {
4169 /*
4170 * Concurrent TX may collide on rr_tx_counter; we accept
4171 * that as being rare enough not to justify using an
4172 * atomic op here.
4173 */
4174 slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
4175
4176 bond_for_each_slave(bond, slave, i) {
4177 slave_no--;
4178 if (slave_no < 0)
4179 break;
4180 }
4157 } 4181 }
4158 4182
4159 start_at = slave; 4183 start_at = slave;
@@ -4426,6 +4450,14 @@ static const struct net_device_ops bond_netdev_ops = {
4426 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4450 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4427}; 4451};
4428 4452
4453static void bond_destructor(struct net_device *bond_dev)
4454{
4455 struct bonding *bond = netdev_priv(bond_dev);
4456 if (bond->wq)
4457 destroy_workqueue(bond->wq);
4458 free_netdev(bond_dev);
4459}
4460
4429static void bond_setup(struct net_device *bond_dev) 4461static void bond_setup(struct net_device *bond_dev)
4430{ 4462{
4431 struct bonding *bond = netdev_priv(bond_dev); 4463 struct bonding *bond = netdev_priv(bond_dev);
@@ -4446,7 +4478,7 @@ static void bond_setup(struct net_device *bond_dev)
4446 bond_dev->ethtool_ops = &bond_ethtool_ops; 4478 bond_dev->ethtool_ops = &bond_ethtool_ops;
4447 bond_set_mode_ops(bond, bond->params.mode); 4479 bond_set_mode_ops(bond, bond->params.mode);
4448 4480
4449 bond_dev->destructor = free_netdev; 4481 bond_dev->destructor = bond_destructor;
4450 4482
4451 /* Initialize the device options */ 4483 /* Initialize the device options */
4452 bond_dev->tx_queue_len = 0; 4484 bond_dev->tx_queue_len = 0;
@@ -4518,9 +4550,6 @@ static void bond_uninit(struct net_device *bond_dev)
4518 4550
4519 bond_remove_proc_entry(bond); 4551 bond_remove_proc_entry(bond);
4520 4552
4521 if (bond->wq)
4522 destroy_workqueue(bond->wq);
4523
4524 netif_addr_lock_bh(bond_dev); 4553 netif_addr_lock_bh(bond_dev);
4525 bond_mc_list_destroy(bond); 4554 bond_mc_list_destroy(bond);
4526 netif_addr_unlock_bh(bond_dev); 4555 netif_addr_unlock_bh(bond_dev);
@@ -4932,8 +4961,8 @@ int bond_create(struct net *net, const char *name)
4932 bond_setup); 4961 bond_setup);
4933 if (!bond_dev) { 4962 if (!bond_dev) {
4934 pr_err("%s: eek! can't alloc netdev!\n", name); 4963 pr_err("%s: eek! can't alloc netdev!\n", name);
4935 res = -ENOMEM; 4964 rtnl_unlock();
4936 goto out; 4965 return -ENOMEM;
4937 } 4966 }
4938 4967
4939 dev_net_set(bond_dev, net); 4968 dev_net_set(bond_dev, net);
@@ -4942,19 +4971,16 @@ int bond_create(struct net *net, const char *name)
4942 if (!name) { 4971 if (!name) {
4943 res = dev_alloc_name(bond_dev, "bond%d"); 4972 res = dev_alloc_name(bond_dev, "bond%d");
4944 if (res < 0) 4973 if (res < 0)
4945 goto out_netdev; 4974 goto out;
4946 } 4975 }
4947 4976
4948 res = register_netdevice(bond_dev); 4977 res = register_netdevice(bond_dev);
4949 if (res < 0)
4950 goto out_netdev;
4951 4978
4952out: 4979out:
4953 rtnl_unlock(); 4980 rtnl_unlock();
4981 if (res < 0)
4982 bond_destructor(bond_dev);
4954 return res; 4983 return res;
4955out_netdev:
4956 free_netdev(bond_dev);
4957 goto out;
4958} 4984}
4959 4985
4960static int __net_init bond_net_init(struct net *net) 4986static int __net_init bond_net_init(struct net *net)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5acd557cea9b..b8bec086daa1 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -51,7 +51,9 @@
51 * "show" function for the bond_masters attribute. 51 * "show" function for the bond_masters attribute.
52 * The class parameter is ignored. 52 * The class parameter is ignored.
53 */ 53 */
54static ssize_t bonding_show_bonds(struct class *cls, char *buf) 54static ssize_t bonding_show_bonds(struct class *cls,
55 struct class_attribute *attr,
56 char *buf)
55{ 57{
56 struct net *net = current->nsproxy->net_ns; 58 struct net *net = current->nsproxy->net_ns;
57 struct bond_net *bn = net_generic(net, bond_net_id); 59 struct bond_net *bn = net_generic(net, bond_net_id);
@@ -98,6 +100,7 @@ static struct net_device *bond_get_by_name(struct net *net, const char *ifname)
98 */ 100 */
99 101
100static ssize_t bonding_store_bonds(struct class *cls, 102static ssize_t bonding_store_bonds(struct class *cls,
103 struct class_attribute *attr,
101 const char *buffer, size_t count) 104 const char *buffer, size_t count)
102{ 105{
103 struct net *net = current->nsproxy->net_ns; 106 struct net *net = current->nsproxy->net_ns;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 866905fa4119..03489864376d 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -22,6 +22,7 @@
22#include <linux/can/dev.h> 22#include <linux/can/dev.h>
23#include <linux/can/error.h> 23#include <linux/can/error.h>
24 24
25#include <asm/bfin_can.h>
25#include <asm/portmux.h> 26#include <asm/portmux.h>
26 27
27#define DRV_NAME "bfin_can" 28#define DRV_NAME "bfin_can"
@@ -29,90 +30,6 @@
29#define TX_ECHO_SKB_MAX 1 30#define TX_ECHO_SKB_MAX 1
30 31
31/* 32/*
32 * transmit and receive channels
33 */
34#define TRANSMIT_CHL 24
35#define RECEIVE_STD_CHL 0
36#define RECEIVE_EXT_CHL 4
37#define RECEIVE_RTR_CHL 8
38#define RECEIVE_EXT_RTR_CHL 12
39#define MAX_CHL_NUMBER 32
40
41/*
42 * bfin can registers layout
43 */
44struct bfin_can_mask_regs {
45 u16 aml;
46 u16 dummy1;
47 u16 amh;
48 u16 dummy2;
49};
50
51struct bfin_can_channel_regs {
52 u16 data[8];
53 u16 dlc;
54 u16 dummy1;
55 u16 tsv;
56 u16 dummy2;
57 u16 id0;
58 u16 dummy3;
59 u16 id1;
60 u16 dummy4;
61};
62
63struct bfin_can_regs {
64 /*
65 * global control and status registers
66 */
67 u16 mc1; /* offset 0 */
68 u16 dummy1;
69 u16 md1; /* offset 4 */
70 u16 rsv1[13];
71 u16 mbtif1; /* offset 0x20 */
72 u16 dummy2;
73 u16 mbrif1; /* offset 0x24 */
74 u16 dummy3;
75 u16 mbim1; /* offset 0x28 */
76 u16 rsv2[11];
77 u16 mc2; /* offset 0x40 */
78 u16 dummy4;
79 u16 md2; /* offset 0x44 */
80 u16 dummy5;
81 u16 trs2; /* offset 0x48 */
82 u16 rsv3[11];
83 u16 mbtif2; /* offset 0x60 */
84 u16 dummy6;
85 u16 mbrif2; /* offset 0x64 */
86 u16 dummy7;
87 u16 mbim2; /* offset 0x68 */
88 u16 rsv4[11];
89 u16 clk; /* offset 0x80 */
90 u16 dummy8;
91 u16 timing; /* offset 0x84 */
92 u16 rsv5[3];
93 u16 status; /* offset 0x8c */
94 u16 dummy9;
95 u16 cec; /* offset 0x90 */
96 u16 dummy10;
97 u16 gis; /* offset 0x94 */
98 u16 dummy11;
99 u16 gim; /* offset 0x98 */
100 u16 rsv6[3];
101 u16 ctrl; /* offset 0xa0 */
102 u16 dummy12;
103 u16 intr; /* offset 0xa4 */
104 u16 rsv7[7];
105 u16 esr; /* offset 0xb4 */
106 u16 rsv8[37];
107
108 /*
109 * channel(mailbox) mask and message registers
110 */
111 struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */
112 struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */
113};
114
115/*
116 * bfin can private data 33 * bfin can private data
117 */ 34 */
118struct bfin_can_priv { 35struct bfin_can_priv {
@@ -163,7 +80,7 @@ static int bfin_can_set_bittiming(struct net_device *dev)
163 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) 80 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
164 timing |= SAM; 81 timing |= SAM;
165 82
166 bfin_write16(&reg->clk, clk); 83 bfin_write16(&reg->clock, clk);
167 bfin_write16(&reg->timing, timing); 84 bfin_write16(&reg->timing, timing);
168 85
169 dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n", 86 dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
@@ -185,11 +102,11 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
185 bfin_write16(&reg->gim, 0); 102 bfin_write16(&reg->gim, 0);
186 103
187 /* reset can and enter configuration mode */ 104 /* reset can and enter configuration mode */
188 bfin_write16(&reg->ctrl, SRS | CCR); 105 bfin_write16(&reg->control, SRS | CCR);
189 SSYNC(); 106 SSYNC();
190 bfin_write16(&reg->ctrl, CCR); 107 bfin_write16(&reg->control, CCR);
191 SSYNC(); 108 SSYNC();
192 while (!(bfin_read16(&reg->ctrl) & CCA)) { 109 while (!(bfin_read16(&reg->control) & CCA)) {
193 udelay(10); 110 udelay(10);
194 if (--timeout == 0) { 111 if (--timeout == 0) {
195 dev_err(dev->dev.parent, 112 dev_err(dev->dev.parent,
@@ -244,7 +161,7 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
244 /* 161 /*
245 * leave configuration mode 162 * leave configuration mode
246 */ 163 */
247 bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) & ~CCR); 164 bfin_write16(&reg->control, bfin_read16(&reg->control) & ~CCR);
248 165
249 while (bfin_read16(&reg->status) & CCA) { 166 while (bfin_read16(&reg->status) & CCA) {
250 udelay(10); 167 udelay(10);
@@ -726,7 +643,7 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
726 643
727 if (netif_running(dev)) { 644 if (netif_running(dev)) {
728 /* enter sleep mode */ 645 /* enter sleep mode */
729 bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) | SMR); 646 bfin_write16(&reg->control, bfin_read16(&reg->control) | SMR);
730 SSYNC(); 647 SSYNC();
731 while (!(bfin_read16(&reg->intr) & SMACK)) { 648 while (!(bfin_read16(&reg->intr) & SMACK)) {
732 udelay(10); 649 udelay(10);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 71384114a4ed..55d99ca82f8a 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -248,7 +248,7 @@ static void restart_sched(unsigned long);
248 * 248 *
249 * Interrupts are handled by a single CPU and it is likely that on a MP system 249 * Interrupts are handled by a single CPU and it is likely that on a MP system
250 * the application is migrated to another CPU. In that scenario, we try to 250 * the application is migrated to another CPU. In that scenario, we try to
251 * seperate the RX(in irq context) and TX state in order to decrease memory 251 * separate the RX(in irq context) and TX state in order to decrease memory
252 * contention. 252 * contention.
253 */ 253 */
254struct sge { 254struct sge {
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 9781942992e9..4b451a7c03e9 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2334,13 +2334,13 @@ static int cnic_service_bnx2x(void *data, void *status_blk)
2334 struct cnic_local *cp = dev->cnic_priv; 2334 struct cnic_local *cp = dev->cnic_priv;
2335 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; 2335 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2336 2336
2337 prefetch(cp->status_blk.bnx2x); 2337 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2338 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2338 prefetch(cp->status_blk.bnx2x);
2339 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2339 2340
2340 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2341 tasklet_schedule(&cp->cnic_irq_task); 2341 tasklet_schedule(&cp->cnic_irq_task);
2342 2342 cnic_chk_pkt_rings(cp);
2343 cnic_chk_pkt_rings(cp); 2343 }
2344 2344
2345 return 0; 2345 return 0;
2346} 2346}
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 14624019ce71..b0208e474f7e 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -580,7 +580,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
580 } 580 }
581 581
582#ifdef CONFIG_SH_HICOSH4 582#ifdef CONFIG_SH_HICOSH4
583 /* truely reset the chip */ 583 /* truly reset the chip */
584 writeword(ioaddr, ADD_PORT, 0x0114); 584 writeword(ioaddr, ADD_PORT, 0x0114);
585 writeword(ioaddr, DATA_PORT, 0x0040); 585 writeword(ioaddr, DATA_PORT, 0x0040);
586#endif 586#endif
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 3e8618b4efbc..4cd7f420766a 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -264,6 +264,10 @@ struct adapter {
264 struct work_struct fatal_error_handler_task; 264 struct work_struct fatal_error_handler_task;
265 struct work_struct link_fault_handler_task; 265 struct work_struct link_fault_handler_task;
266 266
267 struct work_struct db_full_task;
268 struct work_struct db_empty_task;
269 struct work_struct db_drop_task;
270
267 struct dentry *debugfs_root; 271 struct dentry *debugfs_root;
268 272
269 struct mutex mdio_lock; 273 struct mutex mdio_lock;
@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
335int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 339int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
336 unsigned char *data); 340 unsigned char *data);
337irqreturn_t t3_sge_intr_msix(int irq, void *cookie); 341irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
342extern struct workqueue_struct *cxgb3_wq;
338 343
339int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); 344int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
340 345
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index cecdec1551db..9e3e8750b46a 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -45,6 +45,7 @@
45#include <linux/firmware.h> 45#include <linux/firmware.h>
46#include <linux/log2.h> 46#include <linux/log2.h>
47#include <linux/stringify.h> 47#include <linux/stringify.h>
48#include <linux/sched.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49 50
50#include "common.h" 51#include "common.h"
@@ -140,7 +141,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting 141 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
141 * for our work to complete. Get our own work queue to solve this. 142 * for our work to complete. Get our own work queue to solve this.
142 */ 143 */
143static struct workqueue_struct *cxgb3_wq; 144struct workqueue_struct *cxgb3_wq;
144 145
145/** 146/**
146 * link_report - show link status and link speed/duplex 147 * link_report - show link status and link speed/duplex
@@ -586,6 +587,19 @@ static void setup_rss(struct adapter *adap)
586 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); 587 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
587} 588}
588 589
590static void ring_dbs(struct adapter *adap)
591{
592 int i, j;
593
594 for (i = 0; i < SGE_QSETS; i++) {
595 struct sge_qset *qs = &adap->sge.qs[i];
596
597 if (qs->adap)
598 for (j = 0; j < SGE_TXQ_PER_SET; j++)
599 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
600 }
601}
602
589static void init_napi(struct adapter *adap) 603static void init_napi(struct adapter *adap)
590{ 604{
591 int i; 605 int i;
@@ -2751,6 +2765,42 @@ static void t3_adap_check_task(struct work_struct *work)
2751 spin_unlock_irq(&adapter->work_lock); 2765 spin_unlock_irq(&adapter->work_lock);
2752} 2766}
2753 2767
2768static void db_full_task(struct work_struct *work)
2769{
2770 struct adapter *adapter = container_of(work, struct adapter,
2771 db_full_task);
2772
2773 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2774}
2775
2776static void db_empty_task(struct work_struct *work)
2777{
2778 struct adapter *adapter = container_of(work, struct adapter,
2779 db_empty_task);
2780
2781 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2782}
2783
2784static void db_drop_task(struct work_struct *work)
2785{
2786 struct adapter *adapter = container_of(work, struct adapter,
2787 db_drop_task);
2788 unsigned long delay = 1000;
2789 unsigned short r;
2790
2791 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2792
2793 /*
2794 * Sleep a while before ringing the driver qset dbs.
2795 * The delay is between 1000-2023 usecs.
2796 */
2797 get_random_bytes(&r, 2);
2798 delay += r & 1023;
2799 set_current_state(TASK_UNINTERRUPTIBLE);
2800 schedule_timeout(usecs_to_jiffies(delay));
2801 ring_dbs(adapter);
2802}
2803
2754/* 2804/*
2755 * Processes external (PHY) interrupts in process context. 2805 * Processes external (PHY) interrupts in process context.
2756 */ 2806 */
@@ -3219,6 +3269,11 @@ static int __devinit init_one(struct pci_dev *pdev,
3219 INIT_LIST_HEAD(&adapter->adapter_list); 3269 INIT_LIST_HEAD(&adapter->adapter_list);
3220 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); 3270 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3221 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); 3271 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3272
3273 INIT_WORK(&adapter->db_full_task, db_full_task);
3274 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3275 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3276
3222 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); 3277 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3223 3278
3224 for (i = 0; i < ai->nports0 + ai->nports1; ++i) { 3279 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 670aa62042da..929c298115ca 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -73,7 +73,10 @@ enum {
73 OFFLOAD_STATUS_UP, 73 OFFLOAD_STATUS_UP,
74 OFFLOAD_STATUS_DOWN, 74 OFFLOAD_STATUS_DOWN,
75 OFFLOAD_PORT_DOWN, 75 OFFLOAD_PORT_DOWN,
76 OFFLOAD_PORT_UP 76 OFFLOAD_PORT_UP,
77 OFFLOAD_DB_FULL,
78 OFFLOAD_DB_EMPTY,
79 OFFLOAD_DB_DROP
77}; 80};
78 81
79struct cxgb3_client { 82struct cxgb3_client {
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 1b5327b5a965..cb42353c9fdd 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -254,6 +254,22 @@
254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) 254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) 255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
256 256
257#define S_HIPRIORITYDBFULL 7
258#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
259#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
260
261#define S_HIPRIORITYDBEMPTY 6
262#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
263#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
264
265#define S_LOPRIORITYDBFULL 5
266#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
267#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
268
269#define S_LOPRIORITYDBEMPTY 4
270#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
271#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
272
257#define S_RSPQDISABLED 3 273#define S_RSPQDISABLED 3
258#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) 274#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
259#define F_RSPQDISABLED V_RSPQDISABLED(1U) 275#define F_RSPQDISABLED V_RSPQDISABLED(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 048205903741..67e61b2a8c42 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -42,6 +42,7 @@
42#include "sge_defs.h" 42#include "sge_defs.h"
43#include "t3_cpl.h" 43#include "t3_cpl.h"
44#include "firmware_exports.h" 44#include "firmware_exports.h"
45#include "cxgb3_offload.h"
45 46
46#define USE_GTS 0 47#define USE_GTS 0
47 48
@@ -196,13 +197,13 @@ static inline void refill_rspq(struct adapter *adapter,
196/** 197/**
197 * need_skb_unmap - does the platform need unmapping of sk_buffs? 198 * need_skb_unmap - does the platform need unmapping of sk_buffs?
198 * 199 *
199 * Returns true if the platfrom needs sk_buff unmapping. The compiler 200 * Returns true if the platform needs sk_buff unmapping. The compiler
200 * optimizes away unecessary code if this returns true. 201 * optimizes away unecessary code if this returns true.
201 */ 202 */
202static inline int need_skb_unmap(void) 203static inline int need_skb_unmap(void)
203{ 204{
204 /* 205 /*
205 * This structure is used to tell if the platfrom needs buffer 206 * This structure is used to tell if the platform needs buffer
206 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. 207 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
207 */ 208 */
208 struct dummy { 209 struct dummy {
@@ -2841,8 +2842,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
2841 } 2842 }
2842 2843
2843 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) 2844 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2844 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", 2845 queue_work(cxgb3_wq, &adapter->db_drop_task);
2845 status & F_HIPIODRBDROPERR ? "high" : "lo"); 2846
2847 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2848 queue_work(cxgb3_wq, &adapter->db_full_task);
2849
2850 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2851 queue_work(cxgb3_wq, &adapter->db_empty_task);
2846 2852
2847 t3_write_reg(adapter, A_SG_INT_CAUSE, status); 2853 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2848 if (status & SGE_FATALERR) 2854 if (status & SGE_FATALERR)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 3ab9f51918aa..95a8ba0759f1 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1433,7 +1433,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1433 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ 1433 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1434 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ 1434 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1435 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ 1435 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1436 F_HIRCQPARITYERROR) 1436 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1437 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1438 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1439 F_LOPIODRBDROPERR)
1437#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ 1440#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1438 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ 1441 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1439 F_NFASRCHFAIL) 1442 F_NFASRCHFAIL)
diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile
new file mode 100644
index 000000000000..498667487f52
--- /dev/null
+++ b/drivers/net/cxgb4/Makefile
@@ -0,0 +1,7 @@
1#
2# Chelsio T4 driver
3#
4
5obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
6
7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
new file mode 100644
index 000000000000..3d8ff4889b56
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -0,0 +1,741 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_H__
36#define __CXGB4_H__
37
38#include <linux/bitops.h>
39#include <linux/cache.h>
40#include <linux/interrupt.h>
41#include <linux/list.h>
42#include <linux/netdevice.h>
43#include <linux/pci.h>
44#include <linux/spinlock.h>
45#include <linux/timer.h>
46#include <asm/io.h>
47#include "cxgb4_uld.h"
48#include "t4_hw.h"
49
50#define FW_VERSION_MAJOR 1
51#define FW_VERSION_MINOR 1
52#define FW_VERSION_MICRO 0
53
54enum {
55 MAX_NPORTS = 4, /* max # of ports */
56 SERNUM_LEN = 16, /* Serial # length */
57 EC_LEN = 16, /* E/C length */
58 ID_LEN = 16, /* ID length */
59};
60
61enum {
62 MEM_EDC0,
63 MEM_EDC1,
64 MEM_MC
65};
66
67enum dev_master {
68 MASTER_CANT,
69 MASTER_MAY,
70 MASTER_MUST
71};
72
73enum dev_state {
74 DEV_STATE_UNINIT,
75 DEV_STATE_INIT,
76 DEV_STATE_ERR
77};
78
79enum {
80 PAUSE_RX = 1 << 0,
81 PAUSE_TX = 1 << 1,
82 PAUSE_AUTONEG = 1 << 2
83};
84
85struct port_stats {
86 u64 tx_octets; /* total # of octets in good frames */
87 u64 tx_frames; /* all good frames */
88 u64 tx_bcast_frames; /* all broadcast frames */
89 u64 tx_mcast_frames; /* all multicast frames */
90 u64 tx_ucast_frames; /* all unicast frames */
91 u64 tx_error_frames; /* all error frames */
92
93 u64 tx_frames_64; /* # of Tx frames in a particular range */
94 u64 tx_frames_65_127;
95 u64 tx_frames_128_255;
96 u64 tx_frames_256_511;
97 u64 tx_frames_512_1023;
98 u64 tx_frames_1024_1518;
99 u64 tx_frames_1519_max;
100
101 u64 tx_drop; /* # of dropped Tx frames */
102 u64 tx_pause; /* # of transmitted pause frames */
103 u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
104 u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
105 u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
106 u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
107 u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
108 u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
109 u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
110 u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
111
112 u64 rx_octets; /* total # of octets in good frames */
113 u64 rx_frames; /* all good frames */
114 u64 rx_bcast_frames; /* all broadcast frames */
115 u64 rx_mcast_frames; /* all multicast frames */
116 u64 rx_ucast_frames; /* all unicast frames */
117 u64 rx_too_long; /* # of frames exceeding MTU */
118 u64 rx_jabber; /* # of jabber frames */
119 u64 rx_fcs_err; /* # of received frames with bad FCS */
120 u64 rx_len_err; /* # of received frames with length error */
121 u64 rx_symbol_err; /* symbol errors */
122 u64 rx_runt; /* # of short frames */
123
124 u64 rx_frames_64; /* # of Rx frames in a particular range */
125 u64 rx_frames_65_127;
126 u64 rx_frames_128_255;
127 u64 rx_frames_256_511;
128 u64 rx_frames_512_1023;
129 u64 rx_frames_1024_1518;
130 u64 rx_frames_1519_max;
131
132 u64 rx_pause; /* # of received pause frames */
133 u64 rx_ppp0; /* # of received PPP prio 0 frames */
134 u64 rx_ppp1; /* # of received PPP prio 1 frames */
135 u64 rx_ppp2; /* # of received PPP prio 2 frames */
136 u64 rx_ppp3; /* # of received PPP prio 3 frames */
137 u64 rx_ppp4; /* # of received PPP prio 4 frames */
138 u64 rx_ppp5; /* # of received PPP prio 5 frames */
139 u64 rx_ppp6; /* # of received PPP prio 6 frames */
140 u64 rx_ppp7; /* # of received PPP prio 7 frames */
141
142 u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
143 u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
144 u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
145 u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
146 u64 rx_trunc0; /* buffer-group 0 truncated packets */
147 u64 rx_trunc1; /* buffer-group 1 truncated packets */
148 u64 rx_trunc2; /* buffer-group 2 truncated packets */
149 u64 rx_trunc3; /* buffer-group 3 truncated packets */
150};
151
152struct lb_port_stats {
153 u64 octets;
154 u64 frames;
155 u64 bcast_frames;
156 u64 mcast_frames;
157 u64 ucast_frames;
158 u64 error_frames;
159
160 u64 frames_64;
161 u64 frames_65_127;
162 u64 frames_128_255;
163 u64 frames_256_511;
164 u64 frames_512_1023;
165 u64 frames_1024_1518;
166 u64 frames_1519_max;
167
168 u64 drop;
169
170 u64 ovflow0;
171 u64 ovflow1;
172 u64 ovflow2;
173 u64 ovflow3;
174 u64 trunc0;
175 u64 trunc1;
176 u64 trunc2;
177 u64 trunc3;
178};
179
180struct tp_tcp_stats {
181 u32 tcpOutRsts;
182 u64 tcpInSegs;
183 u64 tcpOutSegs;
184 u64 tcpRetransSegs;
185};
186
187struct tp_err_stats {
188 u32 macInErrs[4];
189 u32 hdrInErrs[4];
190 u32 tcpInErrs[4];
191 u32 tnlCongDrops[4];
192 u32 ofldChanDrops[4];
193 u32 tnlTxDrops[4];
194 u32 ofldVlanDrops[4];
195 u32 tcp6InErrs[4];
196 u32 ofldNoNeigh;
197 u32 ofldCongDefer;
198};
199
200struct tp_params {
201 unsigned int ntxchan; /* # of Tx channels */
202 unsigned int tre; /* log2 of core clocks per TP tick */
203};
204
205struct vpd_params {
206 unsigned int cclk;
207 u8 ec[EC_LEN + 1];
208 u8 sn[SERNUM_LEN + 1];
209 u8 id[ID_LEN + 1];
210};
211
212struct pci_params {
213 unsigned char speed;
214 unsigned char width;
215};
216
217struct adapter_params {
218 struct tp_params tp;
219 struct vpd_params vpd;
220 struct pci_params pci;
221
222 unsigned int fw_vers;
223 unsigned int tp_vers;
224 u8 api_vers[7];
225
226 unsigned short mtus[NMTUS];
227 unsigned short a_wnd[NCCTRL_WIN];
228 unsigned short b_wnd[NCCTRL_WIN];
229
230 unsigned char nports; /* # of ethernet ports */
231 unsigned char portvec;
232 unsigned char rev; /* chip revision */
233 unsigned char offload;
234
235 unsigned int ofldq_wr_cred;
236};
237
238struct trace_params {
239 u32 data[TRACE_LEN / 4];
240 u32 mask[TRACE_LEN / 4];
241 unsigned short snap_len;
242 unsigned short min_len;
243 unsigned char skip_ofst;
244 unsigned char skip_len;
245 unsigned char invert;
246 unsigned char port;
247};
248
249struct link_config {
250 unsigned short supported; /* link capabilities */
251 unsigned short advertising; /* advertised capabilities */
252 unsigned short requested_speed; /* speed user has requested */
253 unsigned short speed; /* actual link speed */
254 unsigned char requested_fc; /* flow control user has requested */
255 unsigned char fc; /* actual link flow control */
256 unsigned char autoneg; /* autonegotiating? */
257 unsigned char link_ok; /* link up? */
258};
259
260#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
261
262enum {
263 MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
264 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
265 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
266 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
267};
268
269enum {
270 MAX_EGRQ = 128, /* max # of egress queues, including FLs */
271 MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
272};
273
274struct adapter;
275struct vlan_group;
276struct sge_rspq;
277
278struct port_info {
279 struct adapter *adapter;
280 struct vlan_group *vlan_grp;
281 u16 viid;
282 s16 xact_addr_filt; /* index of exact MAC address filter */
283 u16 rss_size; /* size of VI's RSS table slice */
284 s8 mdio_addr;
285 u8 port_type;
286 u8 mod_type;
287 u8 port_id;
288 u8 tx_chan;
289 u8 lport; /* associated offload logical port */
290 u8 rx_offload; /* CSO, etc */
291 u8 nqsets; /* # of qsets */
292 u8 first_qset; /* index of first qset */
293 struct link_config link_cfg;
294};
295
296/* port_info.rx_offload flags */
297enum {
298 RX_CSO = 1 << 0,
299};
300
301struct dentry;
302struct work_struct;
303
304enum { /* adapter flags */
305 FULL_INIT_DONE = (1 << 0),
306 USING_MSI = (1 << 1),
307 USING_MSIX = (1 << 2),
308 QUEUES_BOUND = (1 << 3),
309 FW_OK = (1 << 4),
310};
311
312struct rx_sw_desc;
313
314struct sge_fl { /* SGE free-buffer queue state */
315 unsigned int avail; /* # of available Rx buffers */
316 unsigned int pend_cred; /* new buffers since last FL DB ring */
317 unsigned int cidx; /* consumer index */
318 unsigned int pidx; /* producer index */
319 unsigned long alloc_failed; /* # of times buffer allocation failed */
320 unsigned long large_alloc_failed;
321 unsigned long starving;
322 /* RO fields */
323 unsigned int cntxt_id; /* SGE context id for the free list */
324 unsigned int size; /* capacity of free list */
325 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
326 __be64 *desc; /* address of HW Rx descriptor ring */
327 dma_addr_t addr; /* bus address of HW ring start */
328};
329
330/* A packet gather list */
331struct pkt_gl {
332 skb_frag_t frags[MAX_SKB_FRAGS];
333 void *va; /* virtual address of first byte */
334 unsigned int nfrags; /* # of fragments */
335 unsigned int tot_len; /* total length of fragments */
336};
337
338typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
339 const struct pkt_gl *gl);
340
341struct sge_rspq { /* state for an SGE response queue */
342 struct napi_struct napi;
343 const __be64 *cur_desc; /* current descriptor in queue */
344 unsigned int cidx; /* consumer index */
345 u8 gen; /* current generation bit */
346 u8 intr_params; /* interrupt holdoff parameters */
347 u8 next_intr_params; /* holdoff params for next interrupt */
348 u8 pktcnt_idx; /* interrupt packet threshold */
349 u8 uld; /* ULD handling this queue */
350 u8 idx; /* queue index within its group */
351 int offset; /* offset into current Rx buffer */
352 u16 cntxt_id; /* SGE context id for the response q */
353 u16 abs_id; /* absolute SGE id for the response q */
354 __be64 *desc; /* address of HW response ring */
355 dma_addr_t phys_addr; /* physical address of the ring */
356 unsigned int iqe_len; /* entry size */
357 unsigned int size; /* capacity of response queue */
358 struct adapter *adap;
359 struct net_device *netdev; /* associated net device */
360 rspq_handler_t handler;
361};
362
363struct sge_eth_stats { /* Ethernet queue statistics */
364 unsigned long pkts; /* # of ethernet packets */
365 unsigned long lro_pkts; /* # of LRO super packets */
366 unsigned long lro_merged; /* # of wire packets merged by LRO */
367 unsigned long rx_cso; /* # of Rx checksum offloads */
368 unsigned long vlan_ex; /* # of Rx VLAN extractions */
369 unsigned long rx_drops; /* # of packets dropped due to no mem */
370};
371
372struct sge_eth_rxq { /* SW Ethernet Rx queue */
373 struct sge_rspq rspq;
374 struct sge_fl fl;
375 struct sge_eth_stats stats;
376} ____cacheline_aligned_in_smp;
377
378struct sge_ofld_stats { /* offload queue statistics */
379 unsigned long pkts; /* # of packets */
380 unsigned long imm; /* # of immediate-data packets */
381 unsigned long an; /* # of asynchronous notifications */
382 unsigned long nomem; /* # of responses deferred due to no mem */
383};
384
385struct sge_ofld_rxq { /* SW offload Rx queue */
386 struct sge_rspq rspq;
387 struct sge_fl fl;
388 struct sge_ofld_stats stats;
389} ____cacheline_aligned_in_smp;
390
391struct tx_desc {
392 __be64 flit[8];
393};
394
395struct tx_sw_desc;
396
397struct sge_txq {
398 unsigned int in_use; /* # of in-use Tx descriptors */
399 unsigned int size; /* # of descriptors */
400 unsigned int cidx; /* SW consumer index */
401 unsigned int pidx; /* producer index */
402 unsigned long stops; /* # of times q has been stopped */
403 unsigned long restarts; /* # of queue restarts */
404 unsigned int cntxt_id; /* SGE context id for the Tx q */
405 struct tx_desc *desc; /* address of HW Tx descriptor ring */
406 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
407 struct sge_qstat *stat; /* queue status entry */
408 dma_addr_t phys_addr; /* physical address of the ring */
409};
410
411struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
412 struct sge_txq q;
413 struct netdev_queue *txq; /* associated netdev TX queue */
414 unsigned long tso; /* # of TSO requests */
415 unsigned long tx_cso; /* # of Tx checksum offloads */
416 unsigned long vlan_ins; /* # of Tx VLAN insertions */
417 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
418} ____cacheline_aligned_in_smp;
419
420struct sge_ofld_txq { /* state for an SGE offload Tx queue */
421 struct sge_txq q;
422 struct adapter *adap;
423 struct sk_buff_head sendq; /* list of backpressured packets */
424 struct tasklet_struct qresume_tsk; /* restarts the queue */
425 u8 full; /* the Tx ring is full */
426 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
427} ____cacheline_aligned_in_smp;
428
429struct sge_ctrl_txq { /* state for an SGE control Tx queue */
430 struct sge_txq q;
431 struct adapter *adap;
432 struct sk_buff_head sendq; /* list of backpressured packets */
433 struct tasklet_struct qresume_tsk; /* restarts the queue */
434 u8 full; /* the Tx ring is full */
435} ____cacheline_aligned_in_smp;
436
437struct sge {
438 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
439 struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
440 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
441
442 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
443 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
444 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
445 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
446
447 struct sge_rspq intrq ____cacheline_aligned_in_smp;
448 spinlock_t intrq_lock;
449
450 u16 max_ethqsets; /* # of available Ethernet queue sets */
451 u16 ethqsets; /* # of active Ethernet queue sets */
452 u16 ethtxq_rover; /* Tx queue to clean up next */
453 u16 ofldqsets; /* # of active offload queue sets */
454 u16 rdmaqs; /* # of available RDMA Rx queues */
455 u16 ofld_rxq[MAX_OFLD_QSETS];
456 u16 rdma_rxq[NCHAN];
457 u16 timer_val[SGE_NTIMERS];
458 u8 counter_val[SGE_NCOUNTERS];
459 unsigned int starve_thres;
460 u8 idma_state[2];
461 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
462 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
463 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
464 DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
465 struct timer_list rx_timer; /* refills starving FLs */
466 struct timer_list tx_timer; /* checks Tx queues */
467};
468
469#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
470#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
471#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
472
473struct l2t_data;
474
475struct adapter {
476 void __iomem *regs;
477 struct pci_dev *pdev;
478 struct device *pdev_dev;
479 unsigned long registered_device_map;
480 unsigned long open_device_map;
481 unsigned long flags;
482
483 const char *name;
484 int msg_enable;
485
486 struct adapter_params params;
487 struct cxgb4_virt_res vres;
488 unsigned int swintr;
489
490 unsigned int wol;
491
492 struct {
493 unsigned short vec;
494 char desc[14];
495 } msix_info[MAX_INGQ + 1];
496
497 struct sge sge;
498
499 struct net_device *port[MAX_NPORTS];
500 u8 chan_map[NCHAN]; /* channel -> port map */
501
502 struct l2t_data *l2t;
503 void *uld_handle[CXGB4_ULD_MAX];
504 struct list_head list_node;
505
506 struct tid_info tids;
507 void **tid_release_head;
508 spinlock_t tid_release_lock;
509 struct work_struct tid_release_task;
510 bool tid_release_task_busy;
511
512 struct dentry *debugfs_root;
513
514 spinlock_t stats_lock;
515};
516
517static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
518{
519 return readl(adap->regs + reg_addr);
520}
521
522static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
523{
524 writel(val, adap->regs + reg_addr);
525}
526
527#ifndef readq
528static inline u64 readq(const volatile void __iomem *addr)
529{
530 return readl(addr) + ((u64)readl(addr + 4) << 32);
531}
532
533static inline void writeq(u64 val, volatile void __iomem *addr)
534{
535 writel(val, addr);
536 writel(val >> 32, addr + 4);
537}
538#endif
539
540static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
541{
542 return readq(adap->regs + reg_addr);
543}
544
545static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
546{
547 writeq(val, adap->regs + reg_addr);
548}
549
550/**
551 * netdev2pinfo - return the port_info structure associated with a net_device
552 * @dev: the netdev
553 *
554 * Return the struct port_info associated with a net_device
555 */
556static inline struct port_info *netdev2pinfo(const struct net_device *dev)
557{
558 return netdev_priv(dev);
559}
560
561/**
562 * adap2pinfo - return the port_info of a port
563 * @adap: the adapter
564 * @idx: the port index
565 *
566 * Return the port_info structure for the port of the given index.
567 */
568static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
569{
570 return netdev_priv(adap->port[idx]);
571}
572
573/**
574 * netdev2adap - return the adapter structure associated with a net_device
575 * @dev: the netdev
576 *
577 * Return the struct adapter associated with a net_device
578 */
579static inline struct adapter *netdev2adap(const struct net_device *dev)
580{
581 return netdev2pinfo(dev)->adapter;
582}
583
584void t4_os_portmod_changed(const struct adapter *adap, int port_id);
585void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
586
587void *t4_alloc_mem(size_t size);
588void t4_free_mem(void *addr);
589
590void t4_free_sge_resources(struct adapter *adap);
591irq_handler_t t4_intr_handler(struct adapter *adap);
592netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
593int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
594 const struct pkt_gl *gl);
595int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
596int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
597int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
598 struct net_device *dev, int intr_idx,
599 struct sge_fl *fl, rspq_handler_t hnd);
600int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
601 struct net_device *dev, struct netdev_queue *netdevq,
602 unsigned int iqid);
603int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
604 struct net_device *dev, unsigned int iqid,
605 unsigned int cmplqid);
606int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
607 struct net_device *dev, unsigned int iqid);
608irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
609void t4_sge_init(struct adapter *adap);
610void t4_sge_start(struct adapter *adap);
611void t4_sge_stop(struct adapter *adap);
612
613#define for_each_port(adapter, iter) \
614 for (iter = 0; iter < (adapter)->params.nports; ++iter)
615
616static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
617{
618 return adap->params.vpd.cclk / 1000;
619}
620
621static inline unsigned int us_to_core_ticks(const struct adapter *adap,
622 unsigned int us)
623{
624 return (us * adap->params.vpd.cclk) / 1000;
625}
626
627void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
628 u32 val);
629
630int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
631 void *rpl, bool sleep_ok);
632
633static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
634 int size, void *rpl)
635{
636 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
637}
638
639static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
640 int size, void *rpl)
641{
642 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
643}
644
645void t4_intr_enable(struct adapter *adapter);
646void t4_intr_disable(struct adapter *adapter);
647void t4_intr_clear(struct adapter *adapter);
648int t4_slow_intr_handler(struct adapter *adapter);
649
650int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
651 struct link_config *lc);
652int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
653int t4_seeprom_wp(struct adapter *adapter, bool enable);
654int t4_read_flash(struct adapter *adapter, unsigned int addr,
655 unsigned int nwords, u32 *data, int byte_oriented);
656int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
657int t4_check_fw_version(struct adapter *adapter);
658int t4_prep_adapter(struct adapter *adapter);
659int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
660void t4_fatal_err(struct adapter *adapter);
661void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
662int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
663 int filter_index, int enable);
664void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
665 int filter_index, int *enabled);
666int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
667 int start, int n, const u16 *rspq, unsigned int nrspq);
668int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
669 unsigned int flags);
670int t4_read_rss(struct adapter *adapter, u16 *entries);
671int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
672int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
673 u64 *parity);
674
675void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
676void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
677
678void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
679void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
680void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
681 struct tp_tcp_stats *v6);
682void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
683 const unsigned short *alpha, const unsigned short *beta);
684
685void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
686 const u8 *addr);
687int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
688 u64 mask0, u64 mask1, unsigned int crc, bool enable);
689
690int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
691 enum dev_master master, enum dev_state *state);
692int t4_fw_bye(struct adapter *adap, unsigned int mbox);
693int t4_early_init(struct adapter *adap, unsigned int mbox);
694int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
695int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
696 unsigned int vf, unsigned int nparams, const u32 *params,
697 u32 *val);
698int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
699 unsigned int vf, unsigned int nparams, const u32 *params,
700 const u32 *val);
701int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
702 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
703 unsigned int rxqi, unsigned int rxq, unsigned int tc,
704 unsigned int vi, unsigned int cmask, unsigned int pmask,
705 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
706int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
707 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
708 unsigned int *rss_size);
709int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
710 unsigned int vf, unsigned int viid);
711int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
712 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok);
713int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
714 unsigned int viid, bool free, unsigned int naddr,
715 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
716int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
717 int idx, const u8 *addr, bool persist, bool add_smt);
718int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
719 bool ucast, u64 vec, bool sleep_ok);
720int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
721 bool rx_en, bool tx_en);
722int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
723 unsigned int nblinks);
724int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
725 unsigned int mmd, unsigned int reg, u16 *valp);
726int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
727 unsigned int mmd, unsigned int reg, u16 val);
728int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
729 unsigned int pf, unsigned int vf, unsigned int iqid,
730 unsigned int fl0id, unsigned int fl1id);
731int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
732 unsigned int vf, unsigned int iqtype, unsigned int iqid,
733 unsigned int fl0id, unsigned int fl1id);
734int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
735 unsigned int vf, unsigned int eqid);
736int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
737 unsigned int vf, unsigned int eqid);
738int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
739 unsigned int vf, unsigned int eqid);
740int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
741#endif /* __CXGB4_H__ */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
new file mode 100644
index 000000000000..a7e30a23d322
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -0,0 +1,3388 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if_vlan.h>
45#include <linux/init.h>
46#include <linux/log2.h>
47#include <linux/mdio.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/mutex.h>
51#include <linux/netdevice.h>
52#include <linux/pci.h>
53#include <linux/aer.h>
54#include <linux/rtnetlink.h>
55#include <linux/sched.h>
56#include <linux/seq_file.h>
57#include <linux/sockios.h>
58#include <linux/vmalloc.h>
59#include <linux/workqueue.h>
60#include <net/neighbour.h>
61#include <net/netevent.h>
62#include <asm/uaccess.h>
63
64#include "cxgb4.h"
65#include "t4_regs.h"
66#include "t4_msg.h"
67#include "t4fw_api.h"
68#include "l2t.h"
69
70#define DRV_VERSION "1.0.0-ko"
71#define DRV_DESC "Chelsio T4 Network Driver"
72
73/*
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
76 * recover.
77 */
78#define MAX_SGE_TIMERVAL 200U
79
80enum {
81 MEMWIN0_APERTURE = 65536,
82 MEMWIN0_BASE = 0x30000,
83 MEMWIN1_APERTURE = 32768,
84 MEMWIN1_BASE = 0x28000,
85 MEMWIN2_APERTURE = 2048,
86 MEMWIN2_BASE = 0x1b800,
87};
88
89enum {
90 MAX_TXQ_ENTRIES = 16384,
91 MAX_CTRL_TXQ_ENTRIES = 1024,
92 MAX_RSPQ_ENTRIES = 16384,
93 MAX_RX_BUFFERS = 16384,
94 MIN_TXQ_ENTRIES = 32,
95 MIN_CTRL_TXQ_ENTRIES = 32,
96 MIN_RSPQ_ENTRIES = 128,
97 MIN_FL_ENTRIES = 16
98};
99
100#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
101 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
102 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
103
104#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
105
106static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
107 CH_DEVICE(0xa000), /* PE10K */
108 { 0, }
109};
110
111#define FW_FNAME "cxgb4/t4fw.bin"
112
113MODULE_DESCRIPTION(DRV_DESC);
114MODULE_AUTHOR("Chelsio Communications");
115MODULE_LICENSE("Dual BSD/GPL");
116MODULE_VERSION(DRV_VERSION);
117MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
118MODULE_FIRMWARE(FW_FNAME);
119
120static int dflt_msg_enable = DFLT_MSG_ENABLE;
121
122module_param(dflt_msg_enable, int, 0644);
123MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
124
125/*
126 * The driver uses the best interrupt scheme available on a platform in the
127 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
128 * of these schemes the driver may consider as follows:
129 *
130 * msi = 2: choose from among all three options
131 * msi = 1: only consider MSI and INTx interrupts
132 * msi = 0: force INTx interrupts
133 */
134static int msi = 2;
135
136module_param(msi, int, 0644);
137MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
138
139/*
140 * Queue interrupt hold-off timer values. Queues default to the first of these
141 * upon creation.
142 */
143static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
144
145module_param_array(intr_holdoff, uint, NULL, 0644);
146MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
147 "0..4 in microseconds");
148
149static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
150
151module_param_array(intr_cnt, uint, NULL, 0644);
152MODULE_PARM_DESC(intr_cnt,
153 "thresholds 1..3 for queue interrupt packet counters");
154
155static int vf_acls;
156
157#ifdef CONFIG_PCI_IOV
158module_param(vf_acls, bool, 0644);
159MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
160
161static unsigned int num_vf[4];
162
163module_param_array(num_vf, uint, NULL, 0644);
164MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
165#endif
166
167static struct dentry *cxgb4_debugfs_root;
168
169static LIST_HEAD(adapter_list);
170static DEFINE_MUTEX(uld_mutex);
171static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
172static const char *uld_str[] = { "RDMA", "iSCSI" };
173
174static void link_report(struct net_device *dev)
175{
176 if (!netif_carrier_ok(dev))
177 netdev_info(dev, "link down\n");
178 else {
179 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
180
181 const char *s = "10Mbps";
182 const struct port_info *p = netdev_priv(dev);
183
184 switch (p->link_cfg.speed) {
185 case SPEED_10000:
186 s = "10Gbps";
187 break;
188 case SPEED_1000:
189 s = "1000Mbps";
190 break;
191 case SPEED_100:
192 s = "100Mbps";
193 break;
194 }
195
196 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
197 fc[p->link_cfg.fc]);
198 }
199}
200
201void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
202{
203 struct net_device *dev = adapter->port[port_id];
204
205 /* Skip changes from disabled ports. */
206 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
207 if (link_stat)
208 netif_carrier_on(dev);
209 else
210 netif_carrier_off(dev);
211
212 link_report(dev);
213 }
214}
215
216void t4_os_portmod_changed(const struct adapter *adap, int port_id)
217{
218 static const char *mod_str[] = {
219 NULL, "LR", "SR", "ER", "passive DA", "active DA"
220 };
221
222 const struct net_device *dev = adap->port[port_id];
223 const struct port_info *pi = netdev_priv(dev);
224
225 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
226 netdev_info(dev, "port module unplugged\n");
227 else
228 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
229}
230
231/*
232 * Configure the exact and hash address filters to handle a port's multicast
233 * and secondary unicast MAC addresses.
234 */
235static int set_addr_filters(const struct net_device *dev, bool sleep)
236{
237 u64 mhash = 0;
238 u64 uhash = 0;
239 bool free = true;
240 u16 filt_idx[7];
241 const u8 *addr[7];
242 int ret, naddr = 0;
243 const struct dev_addr_list *d;
244 const struct netdev_hw_addr *ha;
245 int uc_cnt = netdev_uc_count(dev);
246 const struct port_info *pi = netdev_priv(dev);
247
248 /* first do the secondary unicast addresses */
249 netdev_for_each_uc_addr(ha, dev) {
250 addr[naddr++] = ha->addr;
251 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
252 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
253 naddr, addr, filt_idx, &uhash, sleep);
254 if (ret < 0)
255 return ret;
256
257 free = false;
258 naddr = 0;
259 }
260 }
261
262 /* next set up the multicast addresses */
263 netdev_for_each_mc_addr(d, dev) {
264 addr[naddr++] = d->dmi_addr;
265 if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) {
266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
267 naddr, addr, filt_idx, &mhash, sleep);
268 if (ret < 0)
269 return ret;
270
271 free = false;
272 naddr = 0;
273 }
274 }
275
276 return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
277 uhash | mhash, sleep);
278}
279
280/*
281 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
282 * If @mtu is -1 it is left unchanged.
283 */
284static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
285{
286 int ret;
287 struct port_info *pi = netdev_priv(dev);
288
289 ret = set_addr_filters(dev, sleep_ok);
290 if (ret == 0)
291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
292 (dev->flags & IFF_PROMISC) ? 1 : 0,
293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
294 sleep_ok);
295 return ret;
296}
297
298/**
299 * link_start - enable a port
300 * @dev: the port to enable
301 *
302 * Performs the MAC and PHY actions needed to enable a port.
303 */
304static int link_start(struct net_device *dev)
305{
306 int ret;
307 struct port_info *pi = netdev_priv(dev);
308
309 /*
310 * We do not set address filters and promiscuity here, the stack does
311 * that step explicitly.
312 */
313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
314 true);
315 if (ret == 0) {
316 ret = t4_change_mac(pi->adapter, 0, pi->viid,
317 pi->xact_addr_filt, dev->dev_addr, true,
318 false);
319 if (ret >= 0) {
320 pi->xact_addr_filt = ret;
321 ret = 0;
322 }
323 }
324 if (ret == 0)
325 ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
326 if (ret == 0)
327 ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
328 return ret;
329}
330
331/*
332 * Response queue handler for the FW event queue.
333 */
334static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
335 const struct pkt_gl *gl)
336{
337 u8 opcode = ((const struct rss_header *)rsp)->opcode;
338
339 rsp++; /* skip RSS header */
340 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
341 const struct cpl_sge_egr_update *p = (void *)rsp;
342 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
343 struct sge_txq *txq = q->adap->sge.egr_map[qid];
344
345 txq->restarts++;
346 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
347 struct sge_eth_txq *eq;
348
349 eq = container_of(txq, struct sge_eth_txq, q);
350 netif_tx_wake_queue(eq->txq);
351 } else {
352 struct sge_ofld_txq *oq;
353
354 oq = container_of(txq, struct sge_ofld_txq, q);
355 tasklet_schedule(&oq->qresume_tsk);
356 }
357 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
358 const struct cpl_fw6_msg *p = (void *)rsp;
359
360 if (p->type == 0)
361 t4_handle_fw_rpl(q->adap, p->data);
362 } else if (opcode == CPL_L2T_WRITE_RPL) {
363 const struct cpl_l2t_write_rpl *p = (void *)rsp;
364
365 do_l2t_write_rpl(q->adap, p);
366 } else
367 dev_err(q->adap->pdev_dev,
368 "unexpected CPL %#x on FW event queue\n", opcode);
369 return 0;
370}
371
372/**
373 * uldrx_handler - response queue handler for ULD queues
374 * @q: the response queue that received the packet
375 * @rsp: the response queue descriptor holding the offload message
376 * @gl: the gather list of packet fragments
377 *
378 * Deliver an ingress offload packet to a ULD. All processing is done by
379 * the ULD, we just maintain statistics.
380 */
381static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
382 const struct pkt_gl *gl)
383{
384 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
385
386 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
387 rxq->stats.nomem++;
388 return -1;
389 }
390 if (gl == NULL)
391 rxq->stats.imm++;
392 else if (gl == CXGB4_MSG_AN)
393 rxq->stats.an++;
394 else
395 rxq->stats.pkts++;
396 return 0;
397}
398
399static void disable_msi(struct adapter *adapter)
400{
401 if (adapter->flags & USING_MSIX) {
402 pci_disable_msix(adapter->pdev);
403 adapter->flags &= ~USING_MSIX;
404 } else if (adapter->flags & USING_MSI) {
405 pci_disable_msi(adapter->pdev);
406 adapter->flags &= ~USING_MSI;
407 }
408}
409
410/*
411 * Interrupt handler for non-data events used with MSI-X.
412 */
413static irqreturn_t t4_nondata_intr(int irq, void *cookie)
414{
415 struct adapter *adap = cookie;
416
417 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
418 if (v & PFSW) {
419 adap->swintr = 1;
420 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
421 }
422 t4_slow_intr_handler(adap);
423 return IRQ_HANDLED;
424}
425
426/*
427 * Name the MSI-X interrupts.
428 */
429static void name_msix_vecs(struct adapter *adap)
430{
431 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
432
433 /* non-data interrupts */
434 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
435 adap->msix_info[0].desc[n] = 0;
436
437 /* FW events */
438 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
439 adap->msix_info[1].desc[n] = 0;
440
441 /* Ethernet queues */
442 for_each_port(adap, j) {
443 struct net_device *d = adap->port[j];
444 const struct port_info *pi = netdev_priv(d);
445
446 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
447 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
448 d->name, i);
449 adap->msix_info[msi_idx].desc[n] = 0;
450 }
451 }
452
453 /* offload queues */
454 for_each_ofldrxq(&adap->sge, i) {
455 snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
456 adap->name, i);
457 adap->msix_info[msi_idx++].desc[n] = 0;
458 }
459 for_each_rdmarxq(&adap->sge, i) {
460 snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
461 adap->name, i);
462 adap->msix_info[msi_idx++].desc[n] = 0;
463 }
464}
465
466static int request_msix_queue_irqs(struct adapter *adap)
467{
468 struct sge *s = &adap->sge;
469 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
470
471 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
472 adap->msix_info[1].desc, &s->fw_evtq);
473 if (err)
474 return err;
475
476 for_each_ethrxq(s, ethqidx) {
477 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
478 adap->msix_info[msi].desc,
479 &s->ethrxq[ethqidx].rspq);
480 if (err)
481 goto unwind;
482 msi++;
483 }
484 for_each_ofldrxq(s, ofldqidx) {
485 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
486 adap->msix_info[msi].desc,
487 &s->ofldrxq[ofldqidx].rspq);
488 if (err)
489 goto unwind;
490 msi++;
491 }
492 for_each_rdmarxq(s, rdmaqidx) {
493 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
494 adap->msix_info[msi].desc,
495 &s->rdmarxq[rdmaqidx].rspq);
496 if (err)
497 goto unwind;
498 msi++;
499 }
500 return 0;
501
502unwind:
503 while (--rdmaqidx >= 0)
504 free_irq(adap->msix_info[--msi].vec,
505 &s->rdmarxq[rdmaqidx].rspq);
506 while (--ofldqidx >= 0)
507 free_irq(adap->msix_info[--msi].vec,
508 &s->ofldrxq[ofldqidx].rspq);
509 while (--ethqidx >= 0)
510 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
511 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
512 return err;
513}
514
515static void free_msix_queue_irqs(struct adapter *adap)
516{
517 int i, msi = 2;
518 struct sge *s = &adap->sge;
519
520 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
521 for_each_ethrxq(s, i)
522 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
523 for_each_ofldrxq(s, i)
524 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
525 for_each_rdmarxq(s, i)
526 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
527}
528
529/**
530 * setup_rss - configure RSS
531 * @adap: the adapter
532 *
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for all ports since the mapping
538 * table has plenty of entries.
539 */
540static int setup_rss(struct adapter *adap)
541{
542 int i, j, err;
543 u16 rss[MAX_ETH_QSETS];
544
545 for_each_port(adap, i) {
546 const struct port_info *pi = adap2pinfo(adap, i);
547 const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
548
549 for (j = 0; j < pi->nqsets; j++)
550 rss[j] = q[j].rspq.abs_id;
551
552 err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size,
553 rss, pi->nqsets);
554 if (err)
555 return err;
556 }
557 return 0;
558}
559
560/*
561 * Wait until all NAPI handlers are descheduled.
562 */
563static void quiesce_rx(struct adapter *adap)
564{
565 int i;
566
567 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
568 struct sge_rspq *q = adap->sge.ingr_map[i];
569
570 if (q && q->handler)
571 napi_disable(&q->napi);
572 }
573}
574
575/*
576 * Enable NAPI scheduling and interrupt generation for all Rx queues.
577 */
578static void enable_rx(struct adapter *adap)
579{
580 int i;
581
582 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
583 struct sge_rspq *q = adap->sge.ingr_map[i];
584
585 if (!q)
586 continue;
587 if (q->handler)
588 napi_enable(&q->napi);
589 /* 0-increment GTS to start the timer and enable interrupts */
590 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
591 SEINTARM(q->intr_params) |
592 INGRESSQID(q->cntxt_id));
593 }
594}
595
596/**
597 * setup_sge_queues - configure SGE Tx/Rx/response queues
598 * @adap: the adapter
599 *
600 * Determines how many sets of SGE queues to use and initializes them.
601 * We support multiple queue sets per port if we have MSI-X, otherwise
602 * just one queue set per port.
603 */
604static int setup_sge_queues(struct adapter *adap)
605{
606 int err, msi_idx, i, j;
607 struct sge *s = &adap->sge;
608
609 bitmap_zero(s->starving_fl, MAX_EGRQ);
610 bitmap_zero(s->txq_maperr, MAX_EGRQ);
611
612 if (adap->flags & USING_MSIX)
613 msi_idx = 1; /* vector 0 is for non-queue interrupts */
614 else {
615 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
616 NULL, NULL);
617 if (err)
618 return err;
619 msi_idx = -((int)s->intrq.abs_id + 1);
620 }
621
622 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
623 msi_idx, NULL, fwevtq_handler);
624 if (err) {
625freeout: t4_free_sge_resources(adap);
626 return err;
627 }
628
629 for_each_port(adap, i) {
630 struct net_device *dev = adap->port[i];
631 struct port_info *pi = netdev_priv(dev);
632 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
633 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
634
635 for (j = 0; j < pi->nqsets; j++, q++) {
636 if (msi_idx > 0)
637 msi_idx++;
638 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
639 msi_idx, &q->fl,
640 t4_ethrx_handler);
641 if (err)
642 goto freeout;
643 q->rspq.idx = j;
644 memset(&q->stats, 0, sizeof(q->stats));
645 }
646 for (j = 0; j < pi->nqsets; j++, t++) {
647 err = t4_sge_alloc_eth_txq(adap, t, dev,
648 netdev_get_tx_queue(dev, j),
649 s->fw_evtq.cntxt_id);
650 if (err)
651 goto freeout;
652 }
653 }
654
655 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
656 for_each_ofldrxq(s, i) {
657 struct sge_ofld_rxq *q = &s->ofldrxq[i];
658 struct net_device *dev = adap->port[i / j];
659
660 if (msi_idx > 0)
661 msi_idx++;
662 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
663 &q->fl, uldrx_handler);
664 if (err)
665 goto freeout;
666 memset(&q->stats, 0, sizeof(q->stats));
667 s->ofld_rxq[i] = q->rspq.abs_id;
668 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
669 s->fw_evtq.cntxt_id);
670 if (err)
671 goto freeout;
672 }
673
674 for_each_rdmarxq(s, i) {
675 struct sge_ofld_rxq *q = &s->rdmarxq[i];
676
677 if (msi_idx > 0)
678 msi_idx++;
679 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
680 msi_idx, &q->fl, uldrx_handler);
681 if (err)
682 goto freeout;
683 memset(&q->stats, 0, sizeof(q->stats));
684 s->rdma_rxq[i] = q->rspq.abs_id;
685 }
686
687 for_each_port(adap, i) {
688 /*
689 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
690 * have RDMA queues, and that's the right value.
691 */
692 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
693 s->fw_evtq.cntxt_id,
694 s->rdmarxq[i].rspq.cntxt_id);
695 if (err)
696 goto freeout;
697 }
698
699 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
700 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
701 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
702 return 0;
703}
704
705/*
706 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
707 * started but failed, and a negative errno if flash load couldn't start.
708 */
709static int upgrade_fw(struct adapter *adap)
710{
711 int ret;
712 u32 vers;
713 const struct fw_hdr *hdr;
714 const struct firmware *fw;
715 struct device *dev = adap->pdev_dev;
716
717 ret = request_firmware(&fw, FW_FNAME, dev);
718 if (ret < 0) {
719 dev_err(dev, "unable to load firmware image " FW_FNAME
720 ", error %d\n", ret);
721 return ret;
722 }
723
724 hdr = (const struct fw_hdr *)fw->data;
725 vers = ntohl(hdr->fw_ver);
726 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
727 ret = -EINVAL; /* wrong major version, won't do */
728 goto out;
729 }
730
731 /*
732 * If the flash FW is unusable or we found something newer, load it.
733 */
734 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
735 vers > adap->params.fw_vers) {
736 ret = -t4_load_fw(adap, fw->data, fw->size);
737 if (!ret)
738 dev_info(dev, "firmware upgraded to version %pI4 from "
739 FW_FNAME "\n", &hdr->fw_ver);
740 }
741out: release_firmware(fw);
742 return ret;
743}
744
745/*
746 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
747 * The allocated memory is cleared.
748 */
749void *t4_alloc_mem(size_t size)
750{
751 void *p = kmalloc(size, GFP_KERNEL);
752
753 if (!p)
754 p = vmalloc(size);
755 if (p)
756 memset(p, 0, size);
757 return p;
758}
759
760/*
761 * Free memory allocated through alloc_mem().
762 */
763void t4_free_mem(void *addr)
764{
765 if (is_vmalloc_addr(addr))
766 vfree(addr);
767 else
768 kfree(addr);
769}
770
771static inline int is_offload(const struct adapter *adap)
772{
773 return adap->params.offload;
774}
775
776/*
777 * Implementation of ethtool operations.
778 */
779
780static u32 get_msglevel(struct net_device *dev)
781{
782 return netdev2adap(dev)->msg_enable;
783}
784
785static void set_msglevel(struct net_device *dev, u32 val)
786{
787 netdev2adap(dev)->msg_enable = val;
788}
789
790static char stats_strings[][ETH_GSTRING_LEN] = {
791 "TxOctetsOK ",
792 "TxFramesOK ",
793 "TxBroadcastFrames ",
794 "TxMulticastFrames ",
795 "TxUnicastFrames ",
796 "TxErrorFrames ",
797
798 "TxFrames64 ",
799 "TxFrames65To127 ",
800 "TxFrames128To255 ",
801 "TxFrames256To511 ",
802 "TxFrames512To1023 ",
803 "TxFrames1024To1518 ",
804 "TxFrames1519ToMax ",
805
806 "TxFramesDropped ",
807 "TxPauseFrames ",
808 "TxPPP0Frames ",
809 "TxPPP1Frames ",
810 "TxPPP2Frames ",
811 "TxPPP3Frames ",
812 "TxPPP4Frames ",
813 "TxPPP5Frames ",
814 "TxPPP6Frames ",
815 "TxPPP7Frames ",
816
817 "RxOctetsOK ",
818 "RxFramesOK ",
819 "RxBroadcastFrames ",
820 "RxMulticastFrames ",
821 "RxUnicastFrames ",
822
823 "RxFramesTooLong ",
824 "RxJabberErrors ",
825 "RxFCSErrors ",
826 "RxLengthErrors ",
827 "RxSymbolErrors ",
828 "RxRuntFrames ",
829
830 "RxFrames64 ",
831 "RxFrames65To127 ",
832 "RxFrames128To255 ",
833 "RxFrames256To511 ",
834 "RxFrames512To1023 ",
835 "RxFrames1024To1518 ",
836 "RxFrames1519ToMax ",
837
838 "RxPauseFrames ",
839 "RxPPP0Frames ",
840 "RxPPP1Frames ",
841 "RxPPP2Frames ",
842 "RxPPP3Frames ",
843 "RxPPP4Frames ",
844 "RxPPP5Frames ",
845 "RxPPP6Frames ",
846 "RxPPP7Frames ",
847
848 "RxBG0FramesDropped ",
849 "RxBG1FramesDropped ",
850 "RxBG2FramesDropped ",
851 "RxBG3FramesDropped ",
852 "RxBG0FramesTrunc ",
853 "RxBG1FramesTrunc ",
854 "RxBG2FramesTrunc ",
855 "RxBG3FramesTrunc ",
856
857 "TSO ",
858 "TxCsumOffload ",
859 "RxCsumGood ",
860 "VLANextractions ",
861 "VLANinsertions ",
862};
863
864static int get_sset_count(struct net_device *dev, int sset)
865{
866 switch (sset) {
867 case ETH_SS_STATS:
868 return ARRAY_SIZE(stats_strings);
869 default:
870 return -EOPNOTSUPP;
871 }
872}
873
874#define T4_REGMAP_SIZE (160 * 1024)
875
876static int get_regs_len(struct net_device *dev)
877{
878 return T4_REGMAP_SIZE;
879}
880
881static int get_eeprom_len(struct net_device *dev)
882{
883 return EEPROMSIZE;
884}
885
886static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
887{
888 struct adapter *adapter = netdev2adap(dev);
889
890 strcpy(info->driver, KBUILD_MODNAME);
891 strcpy(info->version, DRV_VERSION);
892 strcpy(info->bus_info, pci_name(adapter->pdev));
893
894 if (!adapter->params.fw_vers)
895 strcpy(info->fw_version, "N/A");
896 else
897 snprintf(info->fw_version, sizeof(info->fw_version),
898 "%u.%u.%u.%u, TP %u.%u.%u.%u",
899 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
900 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
901 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
902 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
903 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
904 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
905 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
906 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
907}
908
909static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
910{
911 if (stringset == ETH_SS_STATS)
912 memcpy(data, stats_strings, sizeof(stats_strings));
913}
914
915/*
916 * port stats maintained per queue of the port. They should be in the same
917 * order as in stats_strings above.
918 */
919struct queue_port_stats {
920 u64 tso;
921 u64 tx_csum;
922 u64 rx_csum;
923 u64 vlan_ex;
924 u64 vlan_ins;
925};
926
927static void collect_sge_port_stats(const struct adapter *adap,
928 const struct port_info *p, struct queue_port_stats *s)
929{
930 int i;
931 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
932 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
933
934 memset(s, 0, sizeof(*s));
935 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
936 s->tso += tx->tso;
937 s->tx_csum += tx->tx_cso;
938 s->rx_csum += rx->stats.rx_cso;
939 s->vlan_ex += rx->stats.vlan_ex;
940 s->vlan_ins += tx->vlan_ins;
941 }
942}
943
944static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
945 u64 *data)
946{
947 struct port_info *pi = netdev_priv(dev);
948 struct adapter *adapter = pi->adapter;
949
950 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
951
952 data += sizeof(struct port_stats) / sizeof(u64);
953 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
954}
955
956/*
957 * Return a version number to identify the type of adapter. The scheme is:
958 * - bits 0..9: chip version
959 * - bits 10..15: chip revision
960 */
961static inline unsigned int mk_adap_vers(const struct adapter *ap)
962{
963 return 4 | (ap->params.rev << 10);
964}
965
966static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
967 unsigned int end)
968{
969 u32 *p = buf + start;
970
971 for ( ; start <= end; start += sizeof(u32))
972 *p++ = t4_read_reg(ap, start);
973}
974
975static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
976 void *buf)
977{
978 static const unsigned int reg_ranges[] = {
979 0x1008, 0x1108,
980 0x1180, 0x11b4,
981 0x11fc, 0x123c,
982 0x1300, 0x173c,
983 0x1800, 0x18fc,
984 0x3000, 0x30d8,
985 0x30e0, 0x5924,
986 0x5960, 0x59d4,
987 0x5a00, 0x5af8,
988 0x6000, 0x6098,
989 0x6100, 0x6150,
990 0x6200, 0x6208,
991 0x6240, 0x6248,
992 0x6280, 0x6338,
993 0x6370, 0x638c,
994 0x6400, 0x643c,
995 0x6500, 0x6524,
996 0x6a00, 0x6a38,
997 0x6a60, 0x6a78,
998 0x6b00, 0x6b84,
999 0x6bf0, 0x6c84,
1000 0x6cf0, 0x6d84,
1001 0x6df0, 0x6e84,
1002 0x6ef0, 0x6f84,
1003 0x6ff0, 0x7084,
1004 0x70f0, 0x7184,
1005 0x71f0, 0x7284,
1006 0x72f0, 0x7384,
1007 0x73f0, 0x7450,
1008 0x7500, 0x7530,
1009 0x7600, 0x761c,
1010 0x7680, 0x76cc,
1011 0x7700, 0x7798,
1012 0x77c0, 0x77fc,
1013 0x7900, 0x79fc,
1014 0x7b00, 0x7c38,
1015 0x7d00, 0x7efc,
1016 0x8dc0, 0x8e1c,
1017 0x8e30, 0x8e78,
1018 0x8ea0, 0x8f6c,
1019 0x8fc0, 0x9074,
1020 0x90fc, 0x90fc,
1021 0x9400, 0x9458,
1022 0x9600, 0x96bc,
1023 0x9800, 0x9808,
1024 0x9820, 0x983c,
1025 0x9850, 0x9864,
1026 0x9c00, 0x9c6c,
1027 0x9c80, 0x9cec,
1028 0x9d00, 0x9d6c,
1029 0x9d80, 0x9dec,
1030 0x9e00, 0x9e6c,
1031 0x9e80, 0x9eec,
1032 0x9f00, 0x9f6c,
1033 0x9f80, 0x9fec,
1034 0xd004, 0xd03c,
1035 0xdfc0, 0xdfe0,
1036 0xe000, 0xea7c,
1037 0xf000, 0x11190,
1038 0x19040, 0x19124,
1039 0x19150, 0x191b0,
1040 0x191d0, 0x191e8,
1041 0x19238, 0x1924c,
1042 0x193f8, 0x19474,
1043 0x19490, 0x194f8,
1044 0x19800, 0x19f30,
1045 0x1a000, 0x1a06c,
1046 0x1a0b0, 0x1a120,
1047 0x1a128, 0x1a138,
1048 0x1a190, 0x1a1c4,
1049 0x1a1fc, 0x1a1fc,
1050 0x1e040, 0x1e04c,
1051 0x1e240, 0x1e28c,
1052 0x1e2c0, 0x1e2c0,
1053 0x1e2e0, 0x1e2e0,
1054 0x1e300, 0x1e384,
1055 0x1e3c0, 0x1e3c8,
1056 0x1e440, 0x1e44c,
1057 0x1e640, 0x1e68c,
1058 0x1e6c0, 0x1e6c0,
1059 0x1e6e0, 0x1e6e0,
1060 0x1e700, 0x1e784,
1061 0x1e7c0, 0x1e7c8,
1062 0x1e840, 0x1e84c,
1063 0x1ea40, 0x1ea8c,
1064 0x1eac0, 0x1eac0,
1065 0x1eae0, 0x1eae0,
1066 0x1eb00, 0x1eb84,
1067 0x1ebc0, 0x1ebc8,
1068 0x1ec40, 0x1ec4c,
1069 0x1ee40, 0x1ee8c,
1070 0x1eec0, 0x1eec0,
1071 0x1eee0, 0x1eee0,
1072 0x1ef00, 0x1ef84,
1073 0x1efc0, 0x1efc8,
1074 0x1f040, 0x1f04c,
1075 0x1f240, 0x1f28c,
1076 0x1f2c0, 0x1f2c0,
1077 0x1f2e0, 0x1f2e0,
1078 0x1f300, 0x1f384,
1079 0x1f3c0, 0x1f3c8,
1080 0x1f440, 0x1f44c,
1081 0x1f640, 0x1f68c,
1082 0x1f6c0, 0x1f6c0,
1083 0x1f6e0, 0x1f6e0,
1084 0x1f700, 0x1f784,
1085 0x1f7c0, 0x1f7c8,
1086 0x1f840, 0x1f84c,
1087 0x1fa40, 0x1fa8c,
1088 0x1fac0, 0x1fac0,
1089 0x1fae0, 0x1fae0,
1090 0x1fb00, 0x1fb84,
1091 0x1fbc0, 0x1fbc8,
1092 0x1fc40, 0x1fc4c,
1093 0x1fe40, 0x1fe8c,
1094 0x1fec0, 0x1fec0,
1095 0x1fee0, 0x1fee0,
1096 0x1ff00, 0x1ff84,
1097 0x1ffc0, 0x1ffc8,
1098 0x20000, 0x2002c,
1099 0x20100, 0x2013c,
1100 0x20190, 0x201c8,
1101 0x20200, 0x20318,
1102 0x20400, 0x20528,
1103 0x20540, 0x20614,
1104 0x21000, 0x21040,
1105 0x2104c, 0x21060,
1106 0x210c0, 0x210ec,
1107 0x21200, 0x21268,
1108 0x21270, 0x21284,
1109 0x212fc, 0x21388,
1110 0x21400, 0x21404,
1111 0x21500, 0x21518,
1112 0x2152c, 0x2153c,
1113 0x21550, 0x21554,
1114 0x21600, 0x21600,
1115 0x21608, 0x21628,
1116 0x21630, 0x2163c,
1117 0x21700, 0x2171c,
1118 0x21780, 0x2178c,
1119 0x21800, 0x21c38,
1120 0x21c80, 0x21d7c,
1121 0x21e00, 0x21e04,
1122 0x22000, 0x2202c,
1123 0x22100, 0x2213c,
1124 0x22190, 0x221c8,
1125 0x22200, 0x22318,
1126 0x22400, 0x22528,
1127 0x22540, 0x22614,
1128 0x23000, 0x23040,
1129 0x2304c, 0x23060,
1130 0x230c0, 0x230ec,
1131 0x23200, 0x23268,
1132 0x23270, 0x23284,
1133 0x232fc, 0x23388,
1134 0x23400, 0x23404,
1135 0x23500, 0x23518,
1136 0x2352c, 0x2353c,
1137 0x23550, 0x23554,
1138 0x23600, 0x23600,
1139 0x23608, 0x23628,
1140 0x23630, 0x2363c,
1141 0x23700, 0x2371c,
1142 0x23780, 0x2378c,
1143 0x23800, 0x23c38,
1144 0x23c80, 0x23d7c,
1145 0x23e00, 0x23e04,
1146 0x24000, 0x2402c,
1147 0x24100, 0x2413c,
1148 0x24190, 0x241c8,
1149 0x24200, 0x24318,
1150 0x24400, 0x24528,
1151 0x24540, 0x24614,
1152 0x25000, 0x25040,
1153 0x2504c, 0x25060,
1154 0x250c0, 0x250ec,
1155 0x25200, 0x25268,
1156 0x25270, 0x25284,
1157 0x252fc, 0x25388,
1158 0x25400, 0x25404,
1159 0x25500, 0x25518,
1160 0x2552c, 0x2553c,
1161 0x25550, 0x25554,
1162 0x25600, 0x25600,
1163 0x25608, 0x25628,
1164 0x25630, 0x2563c,
1165 0x25700, 0x2571c,
1166 0x25780, 0x2578c,
1167 0x25800, 0x25c38,
1168 0x25c80, 0x25d7c,
1169 0x25e00, 0x25e04,
1170 0x26000, 0x2602c,
1171 0x26100, 0x2613c,
1172 0x26190, 0x261c8,
1173 0x26200, 0x26318,
1174 0x26400, 0x26528,
1175 0x26540, 0x26614,
1176 0x27000, 0x27040,
1177 0x2704c, 0x27060,
1178 0x270c0, 0x270ec,
1179 0x27200, 0x27268,
1180 0x27270, 0x27284,
1181 0x272fc, 0x27388,
1182 0x27400, 0x27404,
1183 0x27500, 0x27518,
1184 0x2752c, 0x2753c,
1185 0x27550, 0x27554,
1186 0x27600, 0x27600,
1187 0x27608, 0x27628,
1188 0x27630, 0x2763c,
1189 0x27700, 0x2771c,
1190 0x27780, 0x2778c,
1191 0x27800, 0x27c38,
1192 0x27c80, 0x27d7c,
1193 0x27e00, 0x27e04
1194 };
1195
1196 int i;
1197 struct adapter *ap = netdev2adap(dev);
1198
1199 regs->version = mk_adap_vers(ap);
1200
1201 memset(buf, 0, T4_REGMAP_SIZE);
1202 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1203 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1204}
1205
1206static int restart_autoneg(struct net_device *dev)
1207{
1208 struct port_info *p = netdev_priv(dev);
1209
1210 if (!netif_running(dev))
1211 return -EAGAIN;
1212 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1213 return -EINVAL;
1214 t4_restart_aneg(p->adapter, 0, p->tx_chan);
1215 return 0;
1216}
1217
1218static int identify_port(struct net_device *dev, u32 data)
1219{
1220 if (data == 0)
1221 data = 2; /* default to 2 seconds */
1222
1223 return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
1224 data * 5);
1225}
1226
1227static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1228{
1229 unsigned int v = 0;
1230
1231 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
1232 v |= SUPPORTED_TP;
1233 if (caps & FW_PORT_CAP_SPEED_100M)
1234 v |= SUPPORTED_100baseT_Full;
1235 if (caps & FW_PORT_CAP_SPEED_1G)
1236 v |= SUPPORTED_1000baseT_Full;
1237 if (caps & FW_PORT_CAP_SPEED_10G)
1238 v |= SUPPORTED_10000baseT_Full;
1239 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1240 v |= SUPPORTED_Backplane;
1241 if (caps & FW_PORT_CAP_SPEED_1G)
1242 v |= SUPPORTED_1000baseKX_Full;
1243 if (caps & FW_PORT_CAP_SPEED_10G)
1244 v |= SUPPORTED_10000baseKX4_Full;
1245 } else if (type == FW_PORT_TYPE_KR)
1246 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1247 else if (type == FW_PORT_TYPE_FIBER)
1248 v |= SUPPORTED_FIBRE;
1249
1250 if (caps & FW_PORT_CAP_ANEG)
1251 v |= SUPPORTED_Autoneg;
1252 return v;
1253}
1254
1255static unsigned int to_fw_linkcaps(unsigned int caps)
1256{
1257 unsigned int v = 0;
1258
1259 if (caps & ADVERTISED_100baseT_Full)
1260 v |= FW_PORT_CAP_SPEED_100M;
1261 if (caps & ADVERTISED_1000baseT_Full)
1262 v |= FW_PORT_CAP_SPEED_1G;
1263 if (caps & ADVERTISED_10000baseT_Full)
1264 v |= FW_PORT_CAP_SPEED_10G;
1265 return v;
1266}
1267
1268static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1269{
1270 const struct port_info *p = netdev_priv(dev);
1271
1272 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1273 p->port_type == FW_PORT_TYPE_BT_XAUI)
1274 cmd->port = PORT_TP;
1275 else if (p->port_type == FW_PORT_TYPE_FIBER)
1276 cmd->port = PORT_FIBRE;
1277 else if (p->port_type == FW_PORT_TYPE_TWINAX)
1278 cmd->port = PORT_DA;
1279 else
1280 cmd->port = PORT_OTHER;
1281
1282 if (p->mdio_addr >= 0) {
1283 cmd->phy_address = p->mdio_addr;
1284 cmd->transceiver = XCVR_EXTERNAL;
1285 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1286 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1287 } else {
1288 cmd->phy_address = 0; /* not really, but no better option */
1289 cmd->transceiver = XCVR_INTERNAL;
1290 cmd->mdio_support = 0;
1291 }
1292
1293 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1294 cmd->advertising = from_fw_linkcaps(p->port_type,
1295 p->link_cfg.advertising);
1296 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1297 cmd->duplex = DUPLEX_FULL;
1298 cmd->autoneg = p->link_cfg.autoneg;
1299 cmd->maxtxpkt = 0;
1300 cmd->maxrxpkt = 0;
1301 return 0;
1302}
1303
1304static unsigned int speed_to_caps(int speed)
1305{
1306 if (speed == SPEED_100)
1307 return FW_PORT_CAP_SPEED_100M;
1308 if (speed == SPEED_1000)
1309 return FW_PORT_CAP_SPEED_1G;
1310 if (speed == SPEED_10000)
1311 return FW_PORT_CAP_SPEED_10G;
1312 return 0;
1313}
1314
1315static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1316{
1317 unsigned int cap;
1318 struct port_info *p = netdev_priv(dev);
1319 struct link_config *lc = &p->link_cfg;
1320
1321 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1322 return -EINVAL;
1323
1324 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1325 /*
1326 * PHY offers a single speed. See if that's what's
1327 * being requested.
1328 */
1329 if (cmd->autoneg == AUTONEG_DISABLE &&
1330 (lc->supported & speed_to_caps(cmd->speed)))
1331 return 0;
1332 return -EINVAL;
1333 }
1334
1335 if (cmd->autoneg == AUTONEG_DISABLE) {
1336 cap = speed_to_caps(cmd->speed);
1337
1338 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1339 cmd->speed == SPEED_10000)
1340 return -EINVAL;
1341 lc->requested_speed = cap;
1342 lc->advertising = 0;
1343 } else {
1344 cap = to_fw_linkcaps(cmd->advertising);
1345 if (!(lc->supported & cap))
1346 return -EINVAL;
1347 lc->requested_speed = 0;
1348 lc->advertising = cap | FW_PORT_CAP_ANEG;
1349 }
1350 lc->autoneg = cmd->autoneg;
1351
1352 if (netif_running(dev))
1353 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1354 return 0;
1355}
1356
1357static void get_pauseparam(struct net_device *dev,
1358 struct ethtool_pauseparam *epause)
1359{
1360 struct port_info *p = netdev_priv(dev);
1361
1362 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1363 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1364 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1365}
1366
1367static int set_pauseparam(struct net_device *dev,
1368 struct ethtool_pauseparam *epause)
1369{
1370 struct port_info *p = netdev_priv(dev);
1371 struct link_config *lc = &p->link_cfg;
1372
1373 if (epause->autoneg == AUTONEG_DISABLE)
1374 lc->requested_fc = 0;
1375 else if (lc->supported & FW_PORT_CAP_ANEG)
1376 lc->requested_fc = PAUSE_AUTONEG;
1377 else
1378 return -EINVAL;
1379
1380 if (epause->rx_pause)
1381 lc->requested_fc |= PAUSE_RX;
1382 if (epause->tx_pause)
1383 lc->requested_fc |= PAUSE_TX;
1384 if (netif_running(dev))
1385 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1386 return 0;
1387}
1388
1389static u32 get_rx_csum(struct net_device *dev)
1390{
1391 struct port_info *p = netdev_priv(dev);
1392
1393 return p->rx_offload & RX_CSO;
1394}
1395
1396static int set_rx_csum(struct net_device *dev, u32 data)
1397{
1398 struct port_info *p = netdev_priv(dev);
1399
1400 if (data)
1401 p->rx_offload |= RX_CSO;
1402 else
1403 p->rx_offload &= ~RX_CSO;
1404 return 0;
1405}
1406
1407static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1408{
1409 const struct port_info *pi = netdev_priv(dev);
1410 const struct sge *s = &pi->adapter->sge;
1411
1412 e->rx_max_pending = MAX_RX_BUFFERS;
1413 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1414 e->rx_jumbo_max_pending = 0;
1415 e->tx_max_pending = MAX_TXQ_ENTRIES;
1416
1417 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1418 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1419 e->rx_jumbo_pending = 0;
1420 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1421}
1422
1423static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1424{
1425 int i;
1426 const struct port_info *pi = netdev_priv(dev);
1427 struct adapter *adapter = pi->adapter;
1428 struct sge *s = &adapter->sge;
1429
1430 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1431 e->tx_pending > MAX_TXQ_ENTRIES ||
1432 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1433 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1434 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1435 return -EINVAL;
1436
1437 if (adapter->flags & FULL_INIT_DONE)
1438 return -EBUSY;
1439
1440 for (i = 0; i < pi->nqsets; ++i) {
1441 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1442 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1443 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1444 }
1445 return 0;
1446}
1447
1448static int closest_timer(const struct sge *s, int time)
1449{
1450 int i, delta, match = 0, min_delta = INT_MAX;
1451
1452 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1453 delta = time - s->timer_val[i];
1454 if (delta < 0)
1455 delta = -delta;
1456 if (delta < min_delta) {
1457 min_delta = delta;
1458 match = i;
1459 }
1460 }
1461 return match;
1462}
1463
1464static int closest_thres(const struct sge *s, int thres)
1465{
1466 int i, delta, match = 0, min_delta = INT_MAX;
1467
1468 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1469 delta = thres - s->counter_val[i];
1470 if (delta < 0)
1471 delta = -delta;
1472 if (delta < min_delta) {
1473 min_delta = delta;
1474 match = i;
1475 }
1476 }
1477 return match;
1478}
1479
1480/*
1481 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1482 */
1483static unsigned int qtimer_val(const struct adapter *adap,
1484 const struct sge_rspq *q)
1485{
1486 unsigned int idx = q->intr_params >> 1;
1487
1488 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1489}
1490
1491/**
1492 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1493 * @adap: the adapter
1494 * @q: the Rx queue
1495 * @us: the hold-off time in us, or 0 to disable timer
1496 * @cnt: the hold-off packet count, or 0 to disable counter
1497 *
1498 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1499 * one of the two needs to be enabled for the queue to generate interrupts.
1500 */
1501static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1502 unsigned int us, unsigned int cnt)
1503{
1504 if ((us | cnt) == 0)
1505 cnt = 1;
1506
1507 if (cnt) {
1508 int err;
1509 u32 v, new_idx;
1510
1511 new_idx = closest_thres(&adap->sge, cnt);
1512 if (q->desc && q->pktcnt_idx != new_idx) {
1513 /* the queue has already been created, update it */
1514 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1515 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1516 FW_PARAMS_PARAM_YZ(q->cntxt_id);
1517 err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
1518 if (err)
1519 return err;
1520 }
1521 q->pktcnt_idx = new_idx;
1522 }
1523
1524 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1525 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1526 return 0;
1527}
1528
1529static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1530{
1531 const struct port_info *pi = netdev_priv(dev);
1532 struct adapter *adap = pi->adapter;
1533
1534 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1535 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1536}
1537
1538static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1539{
1540 const struct port_info *pi = netdev_priv(dev);
1541 const struct adapter *adap = pi->adapter;
1542 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1543
1544 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1545 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1546 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1547 return 0;
1548}
1549
1550/*
1551 * Translate a physical EEPROM address to virtual. The first 1K is accessed
1552 * through virtual addresses starting at 31K, the rest is accessed through
1553 * virtual addresses starting at 0. This mapping is correct only for PF0.
1554 */
1555static int eeprom_ptov(unsigned int phys_addr)
1556{
1557 if (phys_addr < 1024)
1558 return phys_addr + (31 << 10);
1559 if (phys_addr < EEPROMSIZE)
1560 return phys_addr - 1024;
1561 return -EINVAL;
1562}
1563
1564/*
1565 * The next two routines implement eeprom read/write from physical addresses.
1566 * The physical->virtual translation is correct only for PF0.
1567 */
1568static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1569{
1570 int vaddr = eeprom_ptov(phys_addr);
1571
1572 if (vaddr >= 0)
1573 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1574 return vaddr < 0 ? vaddr : 0;
1575}
1576
1577static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1578{
1579 int vaddr = eeprom_ptov(phys_addr);
1580
1581 if (vaddr >= 0)
1582 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1583 return vaddr < 0 ? vaddr : 0;
1584}
1585
1586#define EEPROM_MAGIC 0x38E2F10C
1587
1588static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1589 u8 *data)
1590{
1591 int i, err = 0;
1592 struct adapter *adapter = netdev2adap(dev);
1593
1594 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1595 if (!buf)
1596 return -ENOMEM;
1597
1598 e->magic = EEPROM_MAGIC;
1599 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1600 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1601
1602 if (!err)
1603 memcpy(data, buf + e->offset, e->len);
1604 kfree(buf);
1605 return err;
1606}
1607
1608static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1609 u8 *data)
1610{
1611 u8 *buf;
1612 int err = 0;
1613 u32 aligned_offset, aligned_len, *p;
1614 struct adapter *adapter = netdev2adap(dev);
1615
1616 if (eeprom->magic != EEPROM_MAGIC)
1617 return -EINVAL;
1618
1619 aligned_offset = eeprom->offset & ~3;
1620 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1621
1622 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1623 /*
1624 * RMW possibly needed for first or last words.
1625 */
1626 buf = kmalloc(aligned_len, GFP_KERNEL);
1627 if (!buf)
1628 return -ENOMEM;
1629 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1630 if (!err && aligned_len > 4)
1631 err = eeprom_rd_phys(adapter,
1632 aligned_offset + aligned_len - 4,
1633 (u32 *)&buf[aligned_len - 4]);
1634 if (err)
1635 goto out;
1636 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1637 } else
1638 buf = data;
1639
1640 err = t4_seeprom_wp(adapter, false);
1641 if (err)
1642 goto out;
1643
1644 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1645 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1646 aligned_offset += 4;
1647 }
1648
1649 if (!err)
1650 err = t4_seeprom_wp(adapter, true);
1651out:
1652 if (buf != data)
1653 kfree(buf);
1654 return err;
1655}
1656
1657static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1658{
1659 int ret;
1660 const struct firmware *fw;
1661 struct adapter *adap = netdev2adap(netdev);
1662
1663 ef->data[sizeof(ef->data) - 1] = '\0';
1664 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1665 if (ret < 0)
1666 return ret;
1667
1668 ret = t4_load_fw(adap, fw->data, fw->size);
1669 release_firmware(fw);
1670 if (!ret)
1671 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1672 return ret;
1673}
1674
1675#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1676#define BCAST_CRC 0xa0ccc1a6
1677
1678static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1679{
1680 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1681 wol->wolopts = netdev2adap(dev)->wol;
1682 memset(&wol->sopass, 0, sizeof(wol->sopass));
1683}
1684
1685static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1686{
1687 int err = 0;
1688 struct port_info *pi = netdev_priv(dev);
1689
1690 if (wol->wolopts & ~WOL_SUPPORTED)
1691 return -EINVAL;
1692 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1693 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1694 if (wol->wolopts & WAKE_BCAST) {
1695 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1696 ~0ULL, 0, false);
1697 if (!err)
1698 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1699 ~6ULL, ~0ULL, BCAST_CRC, true);
1700 } else
1701 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1702 return err;
1703}
1704
1705static int set_tso(struct net_device *dev, u32 value)
1706{
1707 if (value)
1708 dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1709 else
1710 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1711 return 0;
1712}
1713
1714static struct ethtool_ops cxgb_ethtool_ops = {
1715 .get_settings = get_settings,
1716 .set_settings = set_settings,
1717 .get_drvinfo = get_drvinfo,
1718 .get_msglevel = get_msglevel,
1719 .set_msglevel = set_msglevel,
1720 .get_ringparam = get_sge_param,
1721 .set_ringparam = set_sge_param,
1722 .get_coalesce = get_coalesce,
1723 .set_coalesce = set_coalesce,
1724 .get_eeprom_len = get_eeprom_len,
1725 .get_eeprom = get_eeprom,
1726 .set_eeprom = set_eeprom,
1727 .get_pauseparam = get_pauseparam,
1728 .set_pauseparam = set_pauseparam,
1729 .get_rx_csum = get_rx_csum,
1730 .set_rx_csum = set_rx_csum,
1731 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1732 .set_sg = ethtool_op_set_sg,
1733 .get_link = ethtool_op_get_link,
1734 .get_strings = get_strings,
1735 .phys_id = identify_port,
1736 .nway_reset = restart_autoneg,
1737 .get_sset_count = get_sset_count,
1738 .get_ethtool_stats = get_stats,
1739 .get_regs_len = get_regs_len,
1740 .get_regs = get_regs,
1741 .get_wol = get_wol,
1742 .set_wol = set_wol,
1743 .set_tso = set_tso,
1744 .flash_device = set_flash,
1745};
1746
1747/*
1748 * debugfs support
1749 */
1750
1751static int mem_open(struct inode *inode, struct file *file)
1752{
1753 file->private_data = inode->i_private;
1754 return 0;
1755}
1756
1757static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
1758 loff_t *ppos)
1759{
1760 loff_t pos = *ppos;
1761 loff_t avail = file->f_path.dentry->d_inode->i_size;
1762 unsigned int mem = (uintptr_t)file->private_data & 3;
1763 struct adapter *adap = file->private_data - mem;
1764
1765 if (pos < 0)
1766 return -EINVAL;
1767 if (pos >= avail)
1768 return 0;
1769 if (count > avail - pos)
1770 count = avail - pos;
1771
1772 while (count) {
1773 size_t len;
1774 int ret, ofst;
1775 __be32 data[16];
1776
1777 if (mem == MEM_MC)
1778 ret = t4_mc_read(adap, pos, data, NULL);
1779 else
1780 ret = t4_edc_read(adap, mem, pos, data, NULL);
1781 if (ret)
1782 return ret;
1783
1784 ofst = pos % sizeof(data);
1785 len = min(count, sizeof(data) - ofst);
1786 if (copy_to_user(buf, (u8 *)data + ofst, len))
1787 return -EFAULT;
1788
1789 buf += len;
1790 pos += len;
1791 count -= len;
1792 }
1793 count = pos - *ppos;
1794 *ppos = pos;
1795 return count;
1796}
1797
1798static const struct file_operations mem_debugfs_fops = {
1799 .owner = THIS_MODULE,
1800 .open = mem_open,
1801 .read = mem_read,
1802};
1803
1804static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
1805 unsigned int idx, unsigned int size_mb)
1806{
1807 struct dentry *de;
1808
1809 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
1810 (void *)adap + idx, &mem_debugfs_fops);
1811 if (de && de->d_inode)
1812 de->d_inode->i_size = size_mb << 20;
1813}
1814
1815static int __devinit setup_debugfs(struct adapter *adap)
1816{
1817 int i;
1818
1819 if (IS_ERR_OR_NULL(adap->debugfs_root))
1820 return -1;
1821
1822 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
1823 if (i & EDRAM0_ENABLE)
1824 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
1825 if (i & EDRAM1_ENABLE)
1826 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
1827 if (i & EXT_MEM_ENABLE)
1828 add_debugfs_mem(adap, "mc", MEM_MC,
1829 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
1830 if (adap->l2t)
1831 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
1832 &t4_l2t_fops);
1833 return 0;
1834}
1835
1836/*
1837 * upper-layer driver support
1838 */
1839
1840/*
1841 * Allocate an active-open TID and set it to the supplied value.
1842 */
1843int cxgb4_alloc_atid(struct tid_info *t, void *data)
1844{
1845 int atid = -1;
1846
1847 spin_lock_bh(&t->atid_lock);
1848 if (t->afree) {
1849 union aopen_entry *p = t->afree;
1850
1851 atid = p - t->atid_tab;
1852 t->afree = p->next;
1853 p->data = data;
1854 t->atids_in_use++;
1855 }
1856 spin_unlock_bh(&t->atid_lock);
1857 return atid;
1858}
1859EXPORT_SYMBOL(cxgb4_alloc_atid);
1860
1861/*
1862 * Release an active-open TID.
1863 */
1864void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1865{
1866 union aopen_entry *p = &t->atid_tab[atid];
1867
1868 spin_lock_bh(&t->atid_lock);
1869 p->next = t->afree;
1870 t->afree = p;
1871 t->atids_in_use--;
1872 spin_unlock_bh(&t->atid_lock);
1873}
1874EXPORT_SYMBOL(cxgb4_free_atid);
1875
1876/*
1877 * Allocate a server TID and set it to the supplied value.
1878 */
1879int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1880{
1881 int stid;
1882
1883 spin_lock_bh(&t->stid_lock);
1884 if (family == PF_INET) {
1885 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1886 if (stid < t->nstids)
1887 __set_bit(stid, t->stid_bmap);
1888 else
1889 stid = -1;
1890 } else {
1891 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
1892 if (stid < 0)
1893 stid = -1;
1894 }
1895 if (stid >= 0) {
1896 t->stid_tab[stid].data = data;
1897 stid += t->stid_base;
1898 t->stids_in_use++;
1899 }
1900 spin_unlock_bh(&t->stid_lock);
1901 return stid;
1902}
1903EXPORT_SYMBOL(cxgb4_alloc_stid);
1904
1905/*
1906 * Release a server TID.
1907 */
1908void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1909{
1910 stid -= t->stid_base;
1911 spin_lock_bh(&t->stid_lock);
1912 if (family == PF_INET)
1913 __clear_bit(stid, t->stid_bmap);
1914 else
1915 bitmap_release_region(t->stid_bmap, stid, 2);
1916 t->stid_tab[stid].data = NULL;
1917 t->stids_in_use--;
1918 spin_unlock_bh(&t->stid_lock);
1919}
1920EXPORT_SYMBOL(cxgb4_free_stid);
1921
1922/*
1923 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1924 */
1925static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1926 unsigned int tid)
1927{
1928 struct cpl_tid_release *req;
1929
1930 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1931 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1932 INIT_TP_WR(req, tid);
1933 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1934}
1935
1936/*
1937 * Queue a TID release request and if necessary schedule a work queue to
1938 * process it.
1939 */
1940void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1941 unsigned int tid)
1942{
1943 void **p = &t->tid_tab[tid];
1944 struct adapter *adap = container_of(t, struct adapter, tids);
1945
1946 spin_lock_bh(&adap->tid_release_lock);
1947 *p = adap->tid_release_head;
1948 /* Low 2 bits encode the Tx channel number */
1949 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1950 if (!adap->tid_release_task_busy) {
1951 adap->tid_release_task_busy = true;
1952 schedule_work(&adap->tid_release_task);
1953 }
1954 spin_unlock_bh(&adap->tid_release_lock);
1955}
1956EXPORT_SYMBOL(cxgb4_queue_tid_release);
1957
1958/*
1959 * Process the list of pending TID release requests.
1960 */
1961static void process_tid_release_list(struct work_struct *work)
1962{
1963 struct sk_buff *skb;
1964 struct adapter *adap;
1965
1966 adap = container_of(work, struct adapter, tid_release_task);
1967
1968 spin_lock_bh(&adap->tid_release_lock);
1969 while (adap->tid_release_head) {
1970 void **p = adap->tid_release_head;
1971 unsigned int chan = (uintptr_t)p & 3;
1972 p = (void *)p - chan;
1973
1974 adap->tid_release_head = *p;
1975 *p = NULL;
1976 spin_unlock_bh(&adap->tid_release_lock);
1977
1978 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1979 GFP_KERNEL)))
1980 schedule_timeout_uninterruptible(1);
1981
1982 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1983 t4_ofld_send(adap, skb);
1984 spin_lock_bh(&adap->tid_release_lock);
1985 }
1986 adap->tid_release_task_busy = false;
1987 spin_unlock_bh(&adap->tid_release_lock);
1988}
1989
1990/*
1991 * Release a TID and inform HW. If we are unable to allocate the release
1992 * message we defer to a work queue.
1993 */
1994void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1995{
1996 void *old;
1997 struct sk_buff *skb;
1998 struct adapter *adap = container_of(t, struct adapter, tids);
1999
2000 old = t->tid_tab[tid];
2001 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2002 if (likely(skb)) {
2003 t->tid_tab[tid] = NULL;
2004 mk_tid_release(skb, chan, tid);
2005 t4_ofld_send(adap, skb);
2006 } else
2007 cxgb4_queue_tid_release(t, chan, tid);
2008 if (old)
2009 atomic_dec(&t->tids_in_use);
2010}
2011EXPORT_SYMBOL(cxgb4_remove_tid);
2012
2013/*
2014 * Allocate and initialize the TID tables. Returns 0 on success.
2015 */
2016static int tid_init(struct tid_info *t)
2017{
2018 size_t size;
2019 unsigned int natids = t->natids;
2020
2021 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2022 t->nstids * sizeof(*t->stid_tab) +
2023 BITS_TO_LONGS(t->nstids) * sizeof(long);
2024 t->tid_tab = t4_alloc_mem(size);
2025 if (!t->tid_tab)
2026 return -ENOMEM;
2027
2028 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2029 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2030 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2031 spin_lock_init(&t->stid_lock);
2032 spin_lock_init(&t->atid_lock);
2033
2034 t->stids_in_use = 0;
2035 t->afree = NULL;
2036 t->atids_in_use = 0;
2037 atomic_set(&t->tids_in_use, 0);
2038
2039 /* Setup the free list for atid_tab and clear the stid bitmap. */
2040 if (natids) {
2041 while (--natids)
2042 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2043 t->afree = t->atid_tab;
2044 }
2045 bitmap_zero(t->stid_bmap, t->nstids);
2046 return 0;
2047}
2048
2049/**
2050 * cxgb4_create_server - create an IP server
2051 * @dev: the device
2052 * @stid: the server TID
2053 * @sip: local IP address to bind server to
2054 * @sport: the server's TCP port
2055 * @queue: queue to direct messages from this server to
2056 *
2057 * Create an IP server for the given port and address.
2058 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2059 */
2060int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2061 __be32 sip, __be16 sport, unsigned int queue)
2062{
2063 unsigned int chan;
2064 struct sk_buff *skb;
2065 struct adapter *adap;
2066 struct cpl_pass_open_req *req;
2067
2068 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2069 if (!skb)
2070 return -ENOMEM;
2071
2072 adap = netdev2adap(dev);
2073 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2074 INIT_TP_WR(req, 0);
2075 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2076 req->local_port = sport;
2077 req->peer_port = htons(0);
2078 req->local_ip = sip;
2079 req->peer_ip = htonl(0);
2080 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2081 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2082 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2083 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2084 return t4_mgmt_tx(adap, skb);
2085}
2086EXPORT_SYMBOL(cxgb4_create_server);
2087
2088/**
2089 * cxgb4_create_server6 - create an IPv6 server
2090 * @dev: the device
2091 * @stid: the server TID
2092 * @sip: local IPv6 address to bind server to
2093 * @sport: the server's TCP port
2094 * @queue: queue to direct messages from this server to
2095 *
2096 * Create an IPv6 server for the given port and address.
2097 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2098 */
2099int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2100 const struct in6_addr *sip, __be16 sport,
2101 unsigned int queue)
2102{
2103 unsigned int chan;
2104 struct sk_buff *skb;
2105 struct adapter *adap;
2106 struct cpl_pass_open_req6 *req;
2107
2108 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2109 if (!skb)
2110 return -ENOMEM;
2111
2112 adap = netdev2adap(dev);
2113 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2114 INIT_TP_WR(req, 0);
2115 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2116 req->local_port = sport;
2117 req->peer_port = htons(0);
2118 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2119 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2120 req->peer_ip_hi = cpu_to_be64(0);
2121 req->peer_ip_lo = cpu_to_be64(0);
2122 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2123 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2124 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2125 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2126 return t4_mgmt_tx(adap, skb);
2127}
2128EXPORT_SYMBOL(cxgb4_create_server6);
2129
2130/**
2131 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2132 * @mtus: the HW MTU table
2133 * @mtu: the target MTU
2134 * @idx: index of selected entry in the MTU table
2135 *
2136 * Returns the index and the value in the HW MTU table that is closest to
2137 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2138 * table, in which case that smallest available value is selected.
2139 */
2140unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2141 unsigned int *idx)
2142{
2143 unsigned int i = 0;
2144
2145 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2146 ++i;
2147 if (idx)
2148 *idx = i;
2149 return mtus[i];
2150}
2151EXPORT_SYMBOL(cxgb4_best_mtu);
2152
2153/**
2154 * cxgb4_port_chan - get the HW channel of a port
2155 * @dev: the net device for the port
2156 *
2157 * Return the HW Tx channel of the given port.
2158 */
2159unsigned int cxgb4_port_chan(const struct net_device *dev)
2160{
2161 return netdev2pinfo(dev)->tx_chan;
2162}
2163EXPORT_SYMBOL(cxgb4_port_chan);
2164
2165/**
2166 * cxgb4_port_viid - get the VI id of a port
2167 * @dev: the net device for the port
2168 *
2169 * Return the VI id of the given port.
2170 */
2171unsigned int cxgb4_port_viid(const struct net_device *dev)
2172{
2173 return netdev2pinfo(dev)->viid;
2174}
2175EXPORT_SYMBOL(cxgb4_port_viid);
2176
2177/**
2178 * cxgb4_port_idx - get the index of a port
2179 * @dev: the net device for the port
2180 *
2181 * Return the index of the given port.
2182 */
2183unsigned int cxgb4_port_idx(const struct net_device *dev)
2184{
2185 return netdev2pinfo(dev)->port_id;
2186}
2187EXPORT_SYMBOL(cxgb4_port_idx);
2188
2189/**
2190 * cxgb4_netdev_by_hwid - return the net device of a HW port
2191 * @pdev: identifies the adapter
2192 * @id: the HW port id
2193 *
2194 * Return the net device associated with the interface with the given HW
2195 * id.
2196 */
2197struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2198{
2199 const struct adapter *adap = pci_get_drvdata(pdev);
2200
2201 if (!adap || id >= NCHAN)
2202 return NULL;
2203 id = adap->chan_map[id];
2204 return id < MAX_NPORTS ? adap->port[id] : NULL;
2205}
2206EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2207
2208void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2209 struct tp_tcp_stats *v6)
2210{
2211 struct adapter *adap = pci_get_drvdata(pdev);
2212
2213 spin_lock(&adap->stats_lock);
2214 t4_tp_get_tcp_stats(adap, v4, v6);
2215 spin_unlock(&adap->stats_lock);
2216}
2217EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2218
2219void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2220 const unsigned int *pgsz_order)
2221{
2222 struct adapter *adap = netdev2adap(dev);
2223
2224 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2225 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2226 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2227 HPZ3(pgsz_order[3]));
2228}
2229EXPORT_SYMBOL(cxgb4_iscsi_init);
2230
2231static struct pci_driver cxgb4_driver;
2232
2233static void check_neigh_update(struct neighbour *neigh)
2234{
2235 const struct device *parent;
2236 const struct net_device *netdev = neigh->dev;
2237
2238 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2239 netdev = vlan_dev_real_dev(netdev);
2240 parent = netdev->dev.parent;
2241 if (parent && parent->driver == &cxgb4_driver.driver)
2242 t4_l2t_update(dev_get_drvdata(parent), neigh);
2243}
2244
2245static int netevent_cb(struct notifier_block *nb, unsigned long event,
2246 void *data)
2247{
2248 switch (event) {
2249 case NETEVENT_NEIGH_UPDATE:
2250 check_neigh_update(data);
2251 break;
2252 case NETEVENT_PMTU_UPDATE:
2253 case NETEVENT_REDIRECT:
2254 default:
2255 break;
2256 }
2257 return 0;
2258}
2259
2260static bool netevent_registered;
2261static struct notifier_block cxgb4_netevent_nb = {
2262 .notifier_call = netevent_cb
2263};
2264
2265static void uld_attach(struct adapter *adap, unsigned int uld)
2266{
2267 void *handle;
2268 struct cxgb4_lld_info lli;
2269
2270 lli.pdev = adap->pdev;
2271 lli.l2t = adap->l2t;
2272 lli.tids = &adap->tids;
2273 lli.ports = adap->port;
2274 lli.vr = &adap->vres;
2275 lli.mtus = adap->params.mtus;
2276 if (uld == CXGB4_ULD_RDMA) {
2277 lli.rxq_ids = adap->sge.rdma_rxq;
2278 lli.nrxq = adap->sge.rdmaqs;
2279 } else if (uld == CXGB4_ULD_ISCSI) {
2280 lli.rxq_ids = adap->sge.ofld_rxq;
2281 lli.nrxq = adap->sge.ofldqsets;
2282 }
2283 lli.ntxq = adap->sge.ofldqsets;
2284 lli.nchan = adap->params.nports;
2285 lli.nports = adap->params.nports;
2286 lli.wr_cred = adap->params.ofldq_wr_cred;
2287 lli.adapter_type = adap->params.rev;
2288 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2289 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2290 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
2291 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2292 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
2293 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2294 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2295 lli.fw_vers = adap->params.fw_vers;
2296
2297 handle = ulds[uld].add(&lli);
2298 if (IS_ERR(handle)) {
2299 dev_warn(adap->pdev_dev,
2300 "could not attach to the %s driver, error %ld\n",
2301 uld_str[uld], PTR_ERR(handle));
2302 return;
2303 }
2304
2305 adap->uld_handle[uld] = handle;
2306
2307 if (!netevent_registered) {
2308 register_netevent_notifier(&cxgb4_netevent_nb);
2309 netevent_registered = true;
2310 }
2311}
2312
2313static void attach_ulds(struct adapter *adap)
2314{
2315 unsigned int i;
2316
2317 mutex_lock(&uld_mutex);
2318 list_add_tail(&adap->list_node, &adapter_list);
2319 for (i = 0; i < CXGB4_ULD_MAX; i++)
2320 if (ulds[i].add)
2321 uld_attach(adap, i);
2322 mutex_unlock(&uld_mutex);
2323}
2324
2325static void detach_ulds(struct adapter *adap)
2326{
2327 unsigned int i;
2328
2329 mutex_lock(&uld_mutex);
2330 list_del(&adap->list_node);
2331 for (i = 0; i < CXGB4_ULD_MAX; i++)
2332 if (adap->uld_handle[i]) {
2333 ulds[i].state_change(adap->uld_handle[i],
2334 CXGB4_STATE_DETACH);
2335 adap->uld_handle[i] = NULL;
2336 }
2337 if (netevent_registered && list_empty(&adapter_list)) {
2338 unregister_netevent_notifier(&cxgb4_netevent_nb);
2339 netevent_registered = false;
2340 }
2341 mutex_unlock(&uld_mutex);
2342}
2343
2344static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2345{
2346 unsigned int i;
2347
2348 mutex_lock(&uld_mutex);
2349 for (i = 0; i < CXGB4_ULD_MAX; i++)
2350 if (adap->uld_handle[i])
2351 ulds[i].state_change(adap->uld_handle[i], new_state);
2352 mutex_unlock(&uld_mutex);
2353}
2354
2355/**
2356 * cxgb4_register_uld - register an upper-layer driver
2357 * @type: the ULD type
2358 * @p: the ULD methods
2359 *
2360 * Registers an upper-layer driver with this driver and notifies the ULD
2361 * about any presently available devices that support its type. Returns
2362 * %-EBUSY if a ULD of the same type is already registered.
2363 */
2364int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2365{
2366 int ret = 0;
2367 struct adapter *adap;
2368
2369 if (type >= CXGB4_ULD_MAX)
2370 return -EINVAL;
2371 mutex_lock(&uld_mutex);
2372 if (ulds[type].add) {
2373 ret = -EBUSY;
2374 goto out;
2375 }
2376 ulds[type] = *p;
2377 list_for_each_entry(adap, &adapter_list, list_node)
2378 uld_attach(adap, type);
2379out: mutex_unlock(&uld_mutex);
2380 return ret;
2381}
2382EXPORT_SYMBOL(cxgb4_register_uld);
2383
2384/**
2385 * cxgb4_unregister_uld - unregister an upper-layer driver
2386 * @type: the ULD type
2387 *
2388 * Unregisters an existing upper-layer driver.
2389 */
2390int cxgb4_unregister_uld(enum cxgb4_uld type)
2391{
2392 struct adapter *adap;
2393
2394 if (type >= CXGB4_ULD_MAX)
2395 return -EINVAL;
2396 mutex_lock(&uld_mutex);
2397 list_for_each_entry(adap, &adapter_list, list_node)
2398 adap->uld_handle[type] = NULL;
2399 ulds[type].add = NULL;
2400 mutex_unlock(&uld_mutex);
2401 return 0;
2402}
2403EXPORT_SYMBOL(cxgb4_unregister_uld);
2404
2405/**
2406 * cxgb_up - enable the adapter
2407 * @adap: adapter being enabled
2408 *
2409 * Called when the first port is enabled, this function performs the
2410 * actions necessary to make an adapter operational, such as completing
2411 * the initialization of HW modules, and enabling interrupts.
2412 *
2413 * Must be called with the rtnl lock held.
2414 */
2415static int cxgb_up(struct adapter *adap)
2416{
2417 int err = 0;
2418
2419 if (!(adap->flags & FULL_INIT_DONE)) {
2420 err = setup_sge_queues(adap);
2421 if (err)
2422 goto out;
2423 err = setup_rss(adap);
2424 if (err) {
2425 t4_free_sge_resources(adap);
2426 goto out;
2427 }
2428 if (adap->flags & USING_MSIX)
2429 name_msix_vecs(adap);
2430 adap->flags |= FULL_INIT_DONE;
2431 }
2432
2433 if (adap->flags & USING_MSIX) {
2434 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2435 adap->msix_info[0].desc, adap);
2436 if (err)
2437 goto irq_err;
2438
2439 err = request_msix_queue_irqs(adap);
2440 if (err) {
2441 free_irq(adap->msix_info[0].vec, adap);
2442 goto irq_err;
2443 }
2444 } else {
2445 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2446 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2447 adap->name, adap);
2448 if (err)
2449 goto irq_err;
2450 }
2451 enable_rx(adap);
2452 t4_sge_start(adap);
2453 t4_intr_enable(adap);
2454 notify_ulds(adap, CXGB4_STATE_UP);
2455 out:
2456 return err;
2457 irq_err:
2458 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2459 goto out;
2460}
2461
2462static void cxgb_down(struct adapter *adapter)
2463{
2464 t4_intr_disable(adapter);
2465 cancel_work_sync(&adapter->tid_release_task);
2466 adapter->tid_release_task_busy = false;
2467
2468 if (adapter->flags & USING_MSIX) {
2469 free_msix_queue_irqs(adapter);
2470 free_irq(adapter->msix_info[0].vec, adapter);
2471 } else
2472 free_irq(adapter->pdev->irq, adapter);
2473 quiesce_rx(adapter);
2474}
2475
2476/*
2477 * net_device operations
2478 */
2479static int cxgb_open(struct net_device *dev)
2480{
2481 int err;
2482 struct port_info *pi = netdev_priv(dev);
2483 struct adapter *adapter = pi->adapter;
2484
2485 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
2486 return err;
2487
2488 dev->real_num_tx_queues = pi->nqsets;
2489 set_bit(pi->tx_chan, &adapter->open_device_map);
2490 link_start(dev);
2491 netif_tx_start_all_queues(dev);
2492 return 0;
2493}
2494
2495static int cxgb_close(struct net_device *dev)
2496{
2497 int ret;
2498 struct port_info *pi = netdev_priv(dev);
2499 struct adapter *adapter = pi->adapter;
2500
2501 netif_tx_stop_all_queues(dev);
2502 netif_carrier_off(dev);
2503 ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
2504
2505 clear_bit(pi->tx_chan, &adapter->open_device_map);
2506
2507 if (!adapter->open_device_map)
2508 cxgb_down(adapter);
2509 return 0;
2510}
2511
2512static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
2513{
2514 struct port_stats stats;
2515 struct port_info *p = netdev_priv(dev);
2516 struct adapter *adapter = p->adapter;
2517 struct net_device_stats *ns = &dev->stats;
2518
2519 spin_lock(&adapter->stats_lock);
2520 t4_get_port_stats(adapter, p->tx_chan, &stats);
2521 spin_unlock(&adapter->stats_lock);
2522
2523 ns->tx_bytes = stats.tx_octets;
2524 ns->tx_packets = stats.tx_frames;
2525 ns->rx_bytes = stats.rx_octets;
2526 ns->rx_packets = stats.rx_frames;
2527 ns->multicast = stats.rx_mcast_frames;
2528
2529 /* detailed rx_errors */
2530 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2531 stats.rx_runt;
2532 ns->rx_over_errors = 0;
2533 ns->rx_crc_errors = stats.rx_fcs_err;
2534 ns->rx_frame_errors = stats.rx_symbol_err;
2535 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2536 stats.rx_ovflow2 + stats.rx_ovflow3 +
2537 stats.rx_trunc0 + stats.rx_trunc1 +
2538 stats.rx_trunc2 + stats.rx_trunc3;
2539 ns->rx_missed_errors = 0;
2540
2541 /* detailed tx_errors */
2542 ns->tx_aborted_errors = 0;
2543 ns->tx_carrier_errors = 0;
2544 ns->tx_fifo_errors = 0;
2545 ns->tx_heartbeat_errors = 0;
2546 ns->tx_window_errors = 0;
2547
2548 ns->tx_errors = stats.tx_error_frames;
2549 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2550 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2551 return ns;
2552}
2553
2554static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2555{
2556 int ret = 0, prtad, devad;
2557 struct port_info *pi = netdev_priv(dev);
2558 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2559
2560 switch (cmd) {
2561 case SIOCGMIIPHY:
2562 if (pi->mdio_addr < 0)
2563 return -EOPNOTSUPP;
2564 data->phy_id = pi->mdio_addr;
2565 break;
2566 case SIOCGMIIREG:
2567 case SIOCSMIIREG:
2568 if (mdio_phy_id_is_c45(data->phy_id)) {
2569 prtad = mdio_phy_id_prtad(data->phy_id);
2570 devad = mdio_phy_id_devad(data->phy_id);
2571 } else if (data->phy_id < 32) {
2572 prtad = data->phy_id;
2573 devad = 0;
2574 data->reg_num &= 0x1f;
2575 } else
2576 return -EINVAL;
2577
2578 if (cmd == SIOCGMIIREG)
2579 ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
2580 data->reg_num, &data->val_out);
2581 else
2582 ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
2583 data->reg_num, data->val_in);
2584 break;
2585 default:
2586 return -EOPNOTSUPP;
2587 }
2588 return ret;
2589}
2590
2591static void cxgb_set_rxmode(struct net_device *dev)
2592{
2593 /* unfortunately we can't return errors to the stack */
2594 set_rxmode(dev, -1, false);
2595}
2596
2597static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2598{
2599 int ret;
2600 struct port_info *pi = netdev_priv(dev);
2601
2602 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2603 return -EINVAL;
2604 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1,
2605 true);
2606 if (!ret)
2607 dev->mtu = new_mtu;
2608 return ret;
2609}
2610
2611static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2612{
2613 int ret;
2614 struct sockaddr *addr = p;
2615 struct port_info *pi = netdev_priv(dev);
2616
2617 if (!is_valid_ether_addr(addr->sa_data))
2618 return -EINVAL;
2619
2620 ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
2621 addr->sa_data, true, true);
2622 if (ret < 0)
2623 return ret;
2624
2625 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2626 pi->xact_addr_filt = ret;
2627 return 0;
2628}
2629
2630static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2631{
2632 struct port_info *pi = netdev_priv(dev);
2633
2634 pi->vlan_grp = grp;
2635 t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL);
2636}
2637
2638#ifdef CONFIG_NET_POLL_CONTROLLER
2639static void cxgb_netpoll(struct net_device *dev)
2640{
2641 struct port_info *pi = netdev_priv(dev);
2642 struct adapter *adap = pi->adapter;
2643
2644 if (adap->flags & USING_MSIX) {
2645 int i;
2646 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2647
2648 for (i = pi->nqsets; i; i--, rx++)
2649 t4_sge_intr_msix(0, &rx->rspq);
2650 } else
2651 t4_intr_handler(adap)(0, adap);
2652}
2653#endif
2654
2655static const struct net_device_ops cxgb4_netdev_ops = {
2656 .ndo_open = cxgb_open,
2657 .ndo_stop = cxgb_close,
2658 .ndo_start_xmit = t4_eth_xmit,
2659 .ndo_get_stats = cxgb_get_stats,
2660 .ndo_set_rx_mode = cxgb_set_rxmode,
2661 .ndo_set_mac_address = cxgb_set_mac_addr,
2662 .ndo_validate_addr = eth_validate_addr,
2663 .ndo_do_ioctl = cxgb_ioctl,
2664 .ndo_change_mtu = cxgb_change_mtu,
2665 .ndo_vlan_rx_register = vlan_rx_register,
2666#ifdef CONFIG_NET_POLL_CONTROLLER
2667 .ndo_poll_controller = cxgb_netpoll,
2668#endif
2669};
2670
2671void t4_fatal_err(struct adapter *adap)
2672{
2673 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2674 t4_intr_disable(adap);
2675 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2676}
2677
2678static void setup_memwin(struct adapter *adap)
2679{
2680 u32 bar0;
2681
2682 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2683 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2684 (bar0 + MEMWIN0_BASE) | BIR(0) |
2685 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2686 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2687 (bar0 + MEMWIN1_BASE) | BIR(0) |
2688 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2689 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2690 (bar0 + MEMWIN2_BASE) | BIR(0) |
2691 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2692}
2693
2694/*
2695 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2696 */
2697#define MAX_ATIDS 8192U
2698
2699/*
2700 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2701 */
2702static int adap_init0(struct adapter *adap)
2703{
2704 int ret;
2705 u32 v, port_vec;
2706 enum dev_state state;
2707 u32 params[7], val[7];
2708 struct fw_caps_config_cmd c;
2709
2710 ret = t4_check_fw_version(adap);
2711 if (ret == -EINVAL || ret > 0) {
2712 if (upgrade_fw(adap) >= 0) /* recache FW version */
2713 ret = t4_check_fw_version(adap);
2714 }
2715 if (ret < 0)
2716 return ret;
2717
2718 /* contact FW, request master */
2719 ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
2720 if (ret < 0) {
2721 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2722 ret);
2723 return ret;
2724 }
2725
2726 /* reset device */
2727 ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
2728 if (ret < 0)
2729 goto bye;
2730
2731 /* get device capabilities */
2732 memset(&c, 0, sizeof(c));
2733 c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2734 FW_CMD_REQUEST | FW_CMD_READ);
2735 c.retval_len16 = htonl(FW_LEN16(c));
2736 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2737 if (ret < 0)
2738 goto bye;
2739
2740 /* select capabilities we'll be using */
2741 if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2742 if (!vf_acls)
2743 c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2744 else
2745 c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2746 } else if (vf_acls) {
2747 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2748 goto bye;
2749 }
2750 c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2751 FW_CMD_REQUEST | FW_CMD_WRITE);
2752 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
2753 if (ret < 0)
2754 goto bye;
2755
2756 ret = t4_config_glbl_rss(adap, 0,
2757 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2758 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2759 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2760 if (ret < 0)
2761 goto bye;
2762
2763 ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
2764 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2765 if (ret < 0)
2766 goto bye;
2767
2768 for (v = 0; v < SGE_NTIMERS - 1; v++)
2769 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
2770 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
2771 adap->sge.counter_val[0] = 1;
2772 for (v = 1; v < SGE_NCOUNTERS; v++)
2773 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2774 THRESHOLD_3_MASK);
2775 t4_sge_init(adap);
2776
2777 /* get basic stuff going */
2778 ret = t4_early_init(adap, 0);
2779 if (ret < 0)
2780 goto bye;
2781
2782#define FW_PARAM_DEV(param) \
2783 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2784 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2785
2786#define FW_PARAM_PFVF(param) \
2787 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2788 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2789
2790 params[0] = FW_PARAM_DEV(PORTVEC);
2791 params[1] = FW_PARAM_PFVF(L2T_START);
2792 params[2] = FW_PARAM_PFVF(L2T_END);
2793 params[3] = FW_PARAM_PFVF(FILTER_START);
2794 params[4] = FW_PARAM_PFVF(FILTER_END);
2795 ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
2796 if (ret < 0)
2797 goto bye;
2798 port_vec = val[0];
2799 adap->tids.ftid_base = val[3];
2800 adap->tids.nftids = val[4] - val[3] + 1;
2801
2802 if (c.ofldcaps) {
2803 /* query offload-related parameters */
2804 params[0] = FW_PARAM_DEV(NTID);
2805 params[1] = FW_PARAM_PFVF(SERVER_START);
2806 params[2] = FW_PARAM_PFVF(SERVER_END);
2807 params[3] = FW_PARAM_PFVF(TDDP_START);
2808 params[4] = FW_PARAM_PFVF(TDDP_END);
2809 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2810 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2811 if (ret < 0)
2812 goto bye;
2813 adap->tids.ntids = val[0];
2814 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
2815 adap->tids.stid_base = val[1];
2816 adap->tids.nstids = val[2] - val[1] + 1;
2817 adap->vres.ddp.start = val[3];
2818 adap->vres.ddp.size = val[4] - val[3] + 1;
2819 adap->params.ofldq_wr_cred = val[5];
2820 adap->params.offload = 1;
2821 }
2822 if (c.rdmacaps) {
2823 params[0] = FW_PARAM_PFVF(STAG_START);
2824 params[1] = FW_PARAM_PFVF(STAG_END);
2825 params[2] = FW_PARAM_PFVF(RQ_START);
2826 params[3] = FW_PARAM_PFVF(RQ_END);
2827 params[4] = FW_PARAM_PFVF(PBL_START);
2828 params[5] = FW_PARAM_PFVF(PBL_END);
2829 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2830 if (ret < 0)
2831 goto bye;
2832 adap->vres.stag.start = val[0];
2833 adap->vres.stag.size = val[1] - val[0] + 1;
2834 adap->vres.rq.start = val[2];
2835 adap->vres.rq.size = val[3] - val[2] + 1;
2836 adap->vres.pbl.start = val[4];
2837 adap->vres.pbl.size = val[5] - val[4] + 1;
2838 }
2839 if (c.iscsicaps) {
2840 params[0] = FW_PARAM_PFVF(ISCSI_START);
2841 params[1] = FW_PARAM_PFVF(ISCSI_END);
2842 ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
2843 if (ret < 0)
2844 goto bye;
2845 adap->vres.iscsi.start = val[0];
2846 adap->vres.iscsi.size = val[1] - val[0] + 1;
2847 }
2848#undef FW_PARAM_PFVF
2849#undef FW_PARAM_DEV
2850
2851 adap->params.nports = hweight32(port_vec);
2852 adap->params.portvec = port_vec;
2853 adap->flags |= FW_OK;
2854
2855 /* These are finalized by FW initialization, load their values now */
2856 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
2857 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
2858 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
2859 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
2860 adap->params.b_wnd);
2861
2862 /* tweak some settings */
2863 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2864 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2865 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2866 v = t4_read_reg(adap, TP_PIO_DATA);
2867 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2868 setup_memwin(adap);
2869 return 0;
2870
2871 /*
2872 * If a command timed out or failed with EIO FW does not operate within
2873 * its spec or something catastrophic happened to HW/FW, stop issuing
2874 * commands.
2875 */
2876bye: if (ret != -ETIMEDOUT && ret != -EIO)
2877 t4_fw_bye(adap, 0);
2878 return ret;
2879}
2880
2881static inline bool is_10g_port(const struct link_config *lc)
2882{
2883 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
2884}
2885
2886static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
2887 unsigned int size, unsigned int iqe_size)
2888{
2889 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
2890 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
2891 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
2892 q->iqe_len = iqe_size;
2893 q->size = size;
2894}
2895
2896/*
2897 * Perform default configuration of DMA queues depending on the number and type
2898 * of ports we found and the number of available CPUs. Most settings can be
2899 * modified by the admin prior to actual use.
2900 */
2901static void __devinit cfg_queues(struct adapter *adap)
2902{
2903 struct sge *s = &adap->sge;
2904 int i, q10g = 0, n10g = 0, qidx = 0;
2905
2906 for_each_port(adap, i)
2907 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
2908
2909 /*
2910 * We default to 1 queue per non-10G port and up to # of cores queues
2911 * per 10G port.
2912 */
2913 if (n10g)
2914 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
2915 if (q10g > num_online_cpus())
2916 q10g = num_online_cpus();
2917
2918 for_each_port(adap, i) {
2919 struct port_info *pi = adap2pinfo(adap, i);
2920
2921 pi->first_qset = qidx;
2922 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
2923 qidx += pi->nqsets;
2924 }
2925
2926 s->ethqsets = qidx;
2927 s->max_ethqsets = qidx; /* MSI-X may lower it later */
2928
2929 if (is_offload(adap)) {
2930 /*
2931 * For offload we use 1 queue/channel if all ports are up to 1G,
2932 * otherwise we divide all available queues amongst the channels
2933 * capped by the number of available cores.
2934 */
2935 if (n10g) {
2936 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
2937 num_online_cpus());
2938 s->ofldqsets = roundup(i, adap->params.nports);
2939 } else
2940 s->ofldqsets = adap->params.nports;
2941 /* For RDMA one Rx queue per channel suffices */
2942 s->rdmaqs = adap->params.nports;
2943 }
2944
2945 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
2946 struct sge_eth_rxq *r = &s->ethrxq[i];
2947
2948 init_rspq(&r->rspq, 0, 0, 1024, 64);
2949 r->fl.size = 72;
2950 }
2951
2952 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
2953 s->ethtxq[i].q.size = 1024;
2954
2955 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
2956 s->ctrlq[i].q.size = 512;
2957
2958 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
2959 s->ofldtxq[i].q.size = 1024;
2960
2961 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
2962 struct sge_ofld_rxq *r = &s->ofldrxq[i];
2963
2964 init_rspq(&r->rspq, 0, 0, 1024, 64);
2965 r->rspq.uld = CXGB4_ULD_ISCSI;
2966 r->fl.size = 72;
2967 }
2968
2969 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
2970 struct sge_ofld_rxq *r = &s->rdmarxq[i];
2971
2972 init_rspq(&r->rspq, 0, 0, 511, 64);
2973 r->rspq.uld = CXGB4_ULD_RDMA;
2974 r->fl.size = 72;
2975 }
2976
2977 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
2978 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
2979}
2980
2981/*
2982 * Reduce the number of Ethernet queues across all ports to at most n.
2983 * n provides at least one queue per port.
2984 */
2985static void __devinit reduce_ethqs(struct adapter *adap, int n)
2986{
2987 int i;
2988 struct port_info *pi;
2989
2990 while (n < adap->sge.ethqsets)
2991 for_each_port(adap, i) {
2992 pi = adap2pinfo(adap, i);
2993 if (pi->nqsets > 1) {
2994 pi->nqsets--;
2995 adap->sge.ethqsets--;
2996 if (adap->sge.ethqsets <= n)
2997 break;
2998 }
2999 }
3000
3001 n = 0;
3002 for_each_port(adap, i) {
3003 pi = adap2pinfo(adap, i);
3004 pi->first_qset = n;
3005 n += pi->nqsets;
3006 }
3007}
3008
3009/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3010#define EXTRA_VECS 2
3011
3012static int __devinit enable_msix(struct adapter *adap)
3013{
3014 int ofld_need = 0;
3015 int i, err, want, need;
3016 struct sge *s = &adap->sge;
3017 unsigned int nchan = adap->params.nports;
3018 struct msix_entry entries[MAX_INGQ + 1];
3019
3020 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3021 entries[i].entry = i;
3022
3023 want = s->max_ethqsets + EXTRA_VECS;
3024 if (is_offload(adap)) {
3025 want += s->rdmaqs + s->ofldqsets;
3026 /* need nchan for each possible ULD */
3027 ofld_need = 2 * nchan;
3028 }
3029 need = adap->params.nports + EXTRA_VECS + ofld_need;
3030
3031 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3032 want = err;
3033
3034 if (!err) {
3035 /*
3036 * Distribute available vectors to the various queue groups.
3037 * Every group gets its minimum requirement and NIC gets top
3038 * priority for leftovers.
3039 */
3040 i = want - EXTRA_VECS - ofld_need;
3041 if (i < s->max_ethqsets) {
3042 s->max_ethqsets = i;
3043 if (i < s->ethqsets)
3044 reduce_ethqs(adap, i);
3045 }
3046 if (is_offload(adap)) {
3047 i = want - EXTRA_VECS - s->max_ethqsets;
3048 i -= ofld_need - nchan;
3049 s->ofldqsets = (i / nchan) * nchan; /* round down */
3050 }
3051 for (i = 0; i < want; ++i)
3052 adap->msix_info[i].vec = entries[i].vector;
3053 } else if (err > 0)
3054 dev_info(adap->pdev_dev,
3055 "only %d MSI-X vectors left, not using MSI-X\n", err);
3056 return err;
3057}
3058
3059#undef EXTRA_VECS
3060
3061static void __devinit print_port_info(struct adapter *adap)
3062{
3063 static const char *base[] = {
3064 "R", "KX4", "T", "KX", "T", "KR", "CX4"
3065 };
3066
3067 int i;
3068 char buf[80];
3069
3070 for_each_port(adap, i) {
3071 struct net_device *dev = adap->port[i];
3072 const struct port_info *pi = netdev_priv(dev);
3073 char *bufp = buf;
3074
3075 if (!test_bit(i, &adap->registered_device_map))
3076 continue;
3077
3078 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3079 bufp += sprintf(bufp, "100/");
3080 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3081 bufp += sprintf(bufp, "1000/");
3082 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3083 bufp += sprintf(bufp, "10G/");
3084 if (bufp != buf)
3085 --bufp;
3086 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3087
3088 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n",
3089 adap->params.vpd.id, adap->params.rev,
3090 buf, is_offload(adap) ? "R" : "",
3091 adap->params.pci.width,
3092 (adap->flags & USING_MSIX) ? " MSI-X" :
3093 (adap->flags & USING_MSI) ? " MSI" : "");
3094 if (adap->name == dev->name)
3095 netdev_info(dev, "S/N: %s, E/C: %s\n",
3096 adap->params.vpd.sn, adap->params.vpd.ec);
3097 }
3098}
3099
3100#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
3101 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3102
3103static int __devinit init_one(struct pci_dev *pdev,
3104 const struct pci_device_id *ent)
3105{
3106 int func, i, err;
3107 struct port_info *pi;
3108 unsigned int highdma = 0;
3109 struct adapter *adapter = NULL;
3110
3111 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3112
3113 err = pci_request_regions(pdev, KBUILD_MODNAME);
3114 if (err) {
3115 /* Just info, some other driver may have claimed the device. */
3116 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3117 return err;
3118 }
3119
3120 /* We control everything through PF 0 */
3121 func = PCI_FUNC(pdev->devfn);
3122 if (func > 0)
3123 goto sriov;
3124
3125 err = pci_enable_device(pdev);
3126 if (err) {
3127 dev_err(&pdev->dev, "cannot enable PCI device\n");
3128 goto out_release_regions;
3129 }
3130
3131 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3132 highdma = NETIF_F_HIGHDMA;
3133 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3134 if (err) {
3135 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3136 "coherent allocations\n");
3137 goto out_disable_device;
3138 }
3139 } else {
3140 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3141 if (err) {
3142 dev_err(&pdev->dev, "no usable DMA configuration\n");
3143 goto out_disable_device;
3144 }
3145 }
3146
3147 pci_enable_pcie_error_reporting(pdev);
3148 pci_set_master(pdev);
3149 pci_save_state(pdev);
3150
3151 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3152 if (!adapter) {
3153 err = -ENOMEM;
3154 goto out_disable_device;
3155 }
3156
3157 adapter->regs = pci_ioremap_bar(pdev, 0);
3158 if (!adapter->regs) {
3159 dev_err(&pdev->dev, "cannot map device registers\n");
3160 err = -ENOMEM;
3161 goto out_free_adapter;
3162 }
3163
3164 adapter->pdev = pdev;
3165 adapter->pdev_dev = &pdev->dev;
3166 adapter->name = pci_name(pdev);
3167 adapter->msg_enable = dflt_msg_enable;
3168 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3169
3170 spin_lock_init(&adapter->stats_lock);
3171 spin_lock_init(&adapter->tid_release_lock);
3172
3173 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3174
3175 err = t4_prep_adapter(adapter);
3176 if (err)
3177 goto out_unmap_bar;
3178 err = adap_init0(adapter);
3179 if (err)
3180 goto out_unmap_bar;
3181
3182 for_each_port(adapter, i) {
3183 struct net_device *netdev;
3184
3185 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3186 MAX_ETH_QSETS);
3187 if (!netdev) {
3188 err = -ENOMEM;
3189 goto out_free_dev;
3190 }
3191
3192 SET_NETDEV_DEV(netdev, &pdev->dev);
3193
3194 adapter->port[i] = netdev;
3195 pi = netdev_priv(netdev);
3196 pi->adapter = adapter;
3197 pi->xact_addr_filt = -1;
3198 pi->rx_offload = RX_CSO;
3199 pi->port_id = i;
3200 netif_carrier_off(netdev);
3201 netif_tx_stop_all_queues(netdev);
3202 netdev->irq = pdev->irq;
3203
3204 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
3205 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3206 netdev->features |= NETIF_F_GRO | highdma;
3207 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3208 netdev->vlan_features = netdev->features & VLAN_FEAT;
3209
3210 netdev->netdev_ops = &cxgb4_netdev_ops;
3211 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3212 }
3213
3214 pci_set_drvdata(pdev, adapter);
3215
3216 if (adapter->flags & FW_OK) {
3217 err = t4_port_init(adapter, 0, 0, 0);
3218 if (err)
3219 goto out_free_dev;
3220 }
3221
3222 /*
3223 * Configure queues and allocate tables now, they can be needed as
3224 * soon as the first register_netdev completes.
3225 */
3226 cfg_queues(adapter);
3227
3228 adapter->l2t = t4_init_l2t();
3229 if (!adapter->l2t) {
3230 /* We tolerate a lack of L2T, giving up some functionality */
3231 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3232 adapter->params.offload = 0;
3233 }
3234
3235 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3236 dev_warn(&pdev->dev, "could not allocate TID table, "
3237 "continuing\n");
3238 adapter->params.offload = 0;
3239 }
3240
3241 /*
3242 * The card is now ready to go. If any errors occur during device
3243 * registration we do not fail the whole card but rather proceed only
3244 * with the ports we manage to register successfully. However we must
3245 * register at least one net device.
3246 */
3247 for_each_port(adapter, i) {
3248 err = register_netdev(adapter->port[i]);
3249 if (err)
3250 dev_warn(&pdev->dev,
3251 "cannot register net device %s, skipping\n",
3252 adapter->port[i]->name);
3253 else {
3254 /*
3255 * Change the name we use for messages to the name of
3256 * the first successfully registered interface.
3257 */
3258 if (!adapter->registered_device_map)
3259 adapter->name = adapter->port[i]->name;
3260
3261 __set_bit(i, &adapter->registered_device_map);
3262 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3263 }
3264 }
3265 if (!adapter->registered_device_map) {
3266 dev_err(&pdev->dev, "could not register any net devices\n");
3267 goto out_free_dev;
3268 }
3269
3270 if (cxgb4_debugfs_root) {
3271 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3272 cxgb4_debugfs_root);
3273 setup_debugfs(adapter);
3274 }
3275
3276 /* See what interrupts we'll be using */
3277 if (msi > 1 && enable_msix(adapter) == 0)
3278 adapter->flags |= USING_MSIX;
3279 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3280 adapter->flags |= USING_MSI;
3281
3282 if (is_offload(adapter))
3283 attach_ulds(adapter);
3284
3285 print_port_info(adapter);
3286
3287sriov:
3288#ifdef CONFIG_PCI_IOV
3289 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3290 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3291 dev_info(&pdev->dev,
3292 "instantiated %u virtual functions\n",
3293 num_vf[func]);
3294#endif
3295 return 0;
3296
3297 out_free_dev:
3298 t4_free_mem(adapter->tids.tid_tab);
3299 t4_free_mem(adapter->l2t);
3300 for_each_port(adapter, i)
3301 if (adapter->port[i])
3302 free_netdev(adapter->port[i]);
3303 if (adapter->flags & FW_OK)
3304 t4_fw_bye(adapter, 0);
3305 out_unmap_bar:
3306 iounmap(adapter->regs);
3307 out_free_adapter:
3308 kfree(adapter);
3309 out_disable_device:
3310 pci_disable_pcie_error_reporting(pdev);
3311 pci_disable_device(pdev);
3312 out_release_regions:
3313 pci_release_regions(pdev);
3314 pci_set_drvdata(pdev, NULL);
3315 return err;
3316}
3317
3318static void __devexit remove_one(struct pci_dev *pdev)
3319{
3320 struct adapter *adapter = pci_get_drvdata(pdev);
3321
3322 pci_disable_sriov(pdev);
3323
3324 if (adapter) {
3325 int i;
3326
3327 if (is_offload(adapter))
3328 detach_ulds(adapter);
3329
3330 for_each_port(adapter, i)
3331 if (test_bit(i, &adapter->registered_device_map))
3332 unregister_netdev(adapter->port[i]);
3333
3334 if (adapter->debugfs_root)
3335 debugfs_remove_recursive(adapter->debugfs_root);
3336
3337 t4_sge_stop(adapter);
3338 t4_free_sge_resources(adapter);
3339 t4_free_mem(adapter->l2t);
3340 t4_free_mem(adapter->tids.tid_tab);
3341 disable_msi(adapter);
3342
3343 for_each_port(adapter, i)
3344 if (adapter->port[i])
3345 free_netdev(adapter->port[i]);
3346
3347 if (adapter->flags & FW_OK)
3348 t4_fw_bye(adapter, 0);
3349 iounmap(adapter->regs);
3350 kfree(adapter);
3351 pci_disable_pcie_error_reporting(pdev);
3352 pci_disable_device(pdev);
3353 pci_release_regions(pdev);
3354 pci_set_drvdata(pdev, NULL);
3355 } else if (PCI_FUNC(pdev->devfn) > 0)
3356 pci_release_regions(pdev);
3357}
3358
3359static struct pci_driver cxgb4_driver = {
3360 .name = KBUILD_MODNAME,
3361 .id_table = cxgb4_pci_tbl,
3362 .probe = init_one,
3363 .remove = __devexit_p(remove_one),
3364};
3365
3366static int __init cxgb4_init_module(void)
3367{
3368 int ret;
3369
3370 /* Debugfs support is optional, just warn if this fails */
3371 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3372 if (!cxgb4_debugfs_root)
3373 pr_warning("could not create debugfs entry, continuing\n");
3374
3375 ret = pci_register_driver(&cxgb4_driver);
3376 if (ret < 0)
3377 debugfs_remove(cxgb4_debugfs_root);
3378 return ret;
3379}
3380
3381static void __exit cxgb4_cleanup_module(void)
3382{
3383 pci_unregister_driver(&cxgb4_driver);
3384 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3385}
3386
3387module_init(cxgb4_init_module);
3388module_exit(cxgb4_cleanup_module);
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
new file mode 100644
index 000000000000..5b98546ac92d
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -0,0 +1,239 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_OFLD_H
36#define __CXGB4_OFLD_H
37
38#include <linux/cache.h>
39#include <linux/spinlock.h>
40#include <linux/skbuff.h>
41#include <asm/atomic.h>
42
43/* CPL message priority levels */
44enum {
45 CPL_PRIORITY_DATA = 0, /* data messages */
46 CPL_PRIORITY_SETUP = 1, /* connection setup messages */
47 CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
48 CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
49 CPL_PRIORITY_ACK = 1, /* RX ACK messages */
50 CPL_PRIORITY_CONTROL = 1 /* control messages */
51};
52
53#define INIT_TP_WR(w, tid) do { \
54 (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \
55 FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
56 (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
57 FW_WR_FLOWID(tid)); \
58 (w)->wr.wr_lo = cpu_to_be64(0); \
59} while (0)
60
61#define INIT_TP_WR_CPL(w, cpl, tid) do { \
62 INIT_TP_WR(w, tid); \
63 OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
64} while (0)
65
66#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
67 (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \
68 (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
69 FW_WR_FLOWID(tid)); \
70 (w)->wr.wr_lo = cpu_to_be64(0); \
71} while (0)
72
73/* Special asynchronous notification message */
74#define CXGB4_MSG_AN ((void *)1)
75
76struct serv_entry {
77 void *data;
78};
79
80union aopen_entry {
81 void *data;
82 union aopen_entry *next;
83};
84
85/*
86 * Holds the size, base address, free list start, etc of the TID, server TID,
87 * and active-open TID tables. The tables themselves are allocated dynamically.
88 */
89struct tid_info {
90 void **tid_tab;
91 unsigned int ntids;
92
93 struct serv_entry *stid_tab;
94 unsigned long *stid_bmap;
95 unsigned int nstids;
96 unsigned int stid_base;
97
98 union aopen_entry *atid_tab;
99 unsigned int natids;
100
101 unsigned int nftids;
102 unsigned int ftid_base;
103
104 spinlock_t atid_lock ____cacheline_aligned_in_smp;
105 union aopen_entry *afree;
106 unsigned int atids_in_use;
107
108 spinlock_t stid_lock;
109 unsigned int stids_in_use;
110
111 atomic_t tids_in_use;
112};
113
114static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
115{
116 return tid < t->ntids ? t->tid_tab[tid] : NULL;
117}
118
119static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
120{
121 return atid < t->natids ? t->atid_tab[atid].data : NULL;
122}
123
124static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
125{
126 stid -= t->stid_base;
127 return stid < t->nstids ? t->stid_tab[stid].data : NULL;
128}
129
130static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
131 unsigned int tid)
132{
133 t->tid_tab[tid] = data;
134 atomic_inc(&t->tids_in_use);
135}
136
137int cxgb4_alloc_atid(struct tid_info *t, void *data);
138int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
139void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
140void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
141void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
142void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
143 unsigned int tid);
144
145struct in6_addr;
146
147int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
148 __be32 sip, __be16 sport, unsigned int queue);
149int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
150 const struct in6_addr *sip, __be16 sport,
151 unsigned int queue);
152
153static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
154{
155 skb_set_queue_mapping(skb, (queue << 1) | prio);
156}
157
158enum cxgb4_uld {
159 CXGB4_ULD_RDMA,
160 CXGB4_ULD_ISCSI,
161 CXGB4_ULD_MAX
162};
163
164enum cxgb4_state {
165 CXGB4_STATE_UP,
166 CXGB4_STATE_START_RECOVERY,
167 CXGB4_STATE_DOWN,
168 CXGB4_STATE_DETACH
169};
170
171struct pci_dev;
172struct l2t_data;
173struct net_device;
174struct pkt_gl;
175struct tp_tcp_stats;
176
177struct cxgb4_range {
178 unsigned int start;
179 unsigned int size;
180};
181
182struct cxgb4_virt_res { /* virtualized HW resources */
183 struct cxgb4_range ddp;
184 struct cxgb4_range iscsi;
185 struct cxgb4_range stag;
186 struct cxgb4_range rq;
187 struct cxgb4_range pbl;
188};
189
190/*
191 * Block of information the LLD provides to ULDs attaching to a device.
192 */
193struct cxgb4_lld_info {
194 struct pci_dev *pdev; /* associated PCI device */
195 struct l2t_data *l2t; /* L2 table */
196 struct tid_info *tids; /* TID table */
197 struct net_device **ports; /* device ports */
198 const struct cxgb4_virt_res *vr; /* assorted HW resources */
199 const unsigned short *mtus; /* MTU table */
200 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
201 unsigned short nrxq; /* # of Rx queues */
202 unsigned short ntxq; /* # of Tx queues */
203 unsigned char nchan:4; /* # of channels */
204 unsigned char nports:4; /* # of ports */
205 unsigned char wr_cred; /* WR 16-byte credits */
206 unsigned char adapter_type; /* type of adapter */
207 unsigned char fw_api_ver; /* FW API version */
208 unsigned int fw_vers; /* FW version */
209 unsigned int iscsi_iolen; /* iSCSI max I/O length */
210 unsigned short udb_density; /* # of user DB/page */
211 unsigned short ucq_density; /* # of user CQs/page */
212 void __iomem *gts_reg; /* address of GTS register */
213 void __iomem *db_reg; /* address of kernel doorbell */
214};
215
216struct cxgb4_uld_info {
217 const char *name;
218 void *(*add)(const struct cxgb4_lld_info *p);
219 int (*rx_handler)(void *handle, const __be64 *rsp,
220 const struct pkt_gl *gl);
221 int (*state_change)(void *handle, enum cxgb4_state new_state);
222};
223
224int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
225int cxgb4_unregister_uld(enum cxgb4_uld type);
226int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
227unsigned int cxgb4_port_chan(const struct net_device *dev);
228unsigned int cxgb4_port_viid(const struct net_device *dev);
229unsigned int cxgb4_port_idx(const struct net_device *dev);
230struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id);
231unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
232 unsigned int *idx);
233void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
234 struct tp_tcp_stats *v6);
235void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
236 const unsigned int *pgsz_order);
237struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
238 unsigned int skb_len, unsigned int pull_len);
239#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
new file mode 100644
index 000000000000..9f96724a133a
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.c
@@ -0,0 +1,624 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/if.h>
38#include <linux/if_vlan.h>
39#include <linux/jhash.h>
40#include <net/neighbour.h>
41#include "cxgb4.h"
42#include "l2t.h"
43#include "t4_msg.h"
44#include "t4fw_api.h"
45
46#define VLAN_NONE 0xfff
47
48/* identifies sync vs async L2T_WRITE_REQs */
49#define F_SYNC_WR (1 << 12)
50
51enum {
52 L2T_STATE_VALID, /* entry is up to date */
53 L2T_STATE_STALE, /* entry may be used but needs revalidation */
54 L2T_STATE_RESOLVING, /* entry needs address resolution */
55 L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
56
57 /* when state is one of the below the entry is not hashed */
58 L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
59 L2T_STATE_UNUSED /* entry not in use */
60};
61
62struct l2t_data {
63 rwlock_t lock;
64 atomic_t nfree; /* number of free entries */
65 struct l2t_entry *rover; /* starting point for next allocation */
66 struct l2t_entry l2tab[L2T_SIZE];
67};
68
69static inline unsigned int vlan_prio(const struct l2t_entry *e)
70{
71 return e->vlan >> 13;
72}
73
74static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
75{
76 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
77 atomic_dec(&d->nfree);
78}
79
80/*
81 * To avoid having to check address families we do not allow v4 and v6
82 * neighbors to be on the same hash chain. We keep v4 entries in the first
83 * half of available hash buckets and v6 in the second.
84 */
85enum {
86 L2T_SZ_HALF = L2T_SIZE / 2,
87 L2T_HASH_MASK = L2T_SZ_HALF - 1
88};
89
90static inline unsigned int arp_hash(const u32 *key, int ifindex)
91{
92 return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
93}
94
95static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
96{
97 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
98
99 return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
100}
101
102static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
103{
104 return addr_len == 4 ? arp_hash(addr, ifindex) :
105 ipv6_hash(addr, ifindex);
106}
107
108/*
109 * Checks if an L2T entry is for the given IP/IPv6 address. It does not check
110 * whether the L2T entry and the address are of the same address family.
111 * Callers ensure an address is only checked against L2T entries of the same
112 * family, something made trivial by the separation of IP and IPv6 hash chains
113 * mentioned above. Returns 0 if there's a match,
114 */
115static int addreq(const struct l2t_entry *e, const u32 *addr)
116{
117 if (e->v6)
118 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
119 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
120 return e->addr[0] ^ addr[0];
121}
122
123static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
124{
125 neigh_hold(n);
126 if (e->neigh)
127 neigh_release(e->neigh);
128 e->neigh = n;
129}
130
131/*
132 * Write an L2T entry. Must be called with the entry locked.
133 * The write may be synchronous or asynchronous.
134 */
135static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
136{
137 struct sk_buff *skb;
138 struct cpl_l2t_write_req *req;
139
140 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
141 if (!skb)
142 return -ENOMEM;
143
144 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
145 INIT_TP_WR(req, 0);
146
147 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
148 e->idx | (sync ? F_SYNC_WR : 0) |
149 TID_QID(adap->sge.fw_evtq.abs_id)));
150 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
151 req->l2t_idx = htons(e->idx);
152 req->vlan = htons(e->vlan);
153 if (e->neigh)
154 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
155 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
156
157 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
158 t4_ofld_send(adap, skb);
159
160 if (sync && e->state != L2T_STATE_SWITCHING)
161 e->state = L2T_STATE_SYNC_WRITE;
162 return 0;
163}
164
165/*
166 * Send packets waiting in an L2T entry's ARP queue. Must be called with the
167 * entry locked.
168 */
169static void send_pending(struct adapter *adap, struct l2t_entry *e)
170{
171 while (e->arpq_head) {
172 struct sk_buff *skb = e->arpq_head;
173
174 e->arpq_head = skb->next;
175 skb->next = NULL;
176 t4_ofld_send(adap, skb);
177 }
178 e->arpq_tail = NULL;
179}
180
181/*
182 * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a
183 * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T
184 * index it refers to.
185 */
186void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
187{
188 unsigned int tid = GET_TID(rpl);
189 unsigned int idx = tid & (L2T_SIZE - 1);
190
191 if (unlikely(rpl->status != CPL_ERR_NONE)) {
192 dev_err(adap->pdev_dev,
193 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
194 rpl->status, idx);
195 return;
196 }
197
198 if (tid & F_SYNC_WR) {
199 struct l2t_entry *e = &adap->l2t->l2tab[idx];
200
201 spin_lock(&e->lock);
202 if (e->state != L2T_STATE_SWITCHING) {
203 send_pending(adap, e);
204 e->state = (e->neigh->nud_state & NUD_STALE) ?
205 L2T_STATE_STALE : L2T_STATE_VALID;
206 }
207 spin_unlock(&e->lock);
208 }
209}
210
211/*
212 * Add a packet to an L2T entry's queue of packets awaiting resolution.
213 * Must be called with the entry's lock held.
214 */
215static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
216{
217 skb->next = NULL;
218 if (e->arpq_head)
219 e->arpq_tail->next = skb;
220 else
221 e->arpq_head = skb;
222 e->arpq_tail = skb;
223}
224
225int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
226 struct l2t_entry *e)
227{
228 struct adapter *adap = netdev2adap(dev);
229
230again:
231 switch (e->state) {
232 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
233 neigh_event_send(e->neigh, NULL);
234 spin_lock_bh(&e->lock);
235 if (e->state == L2T_STATE_STALE)
236 e->state = L2T_STATE_VALID;
237 spin_unlock_bh(&e->lock);
238 case L2T_STATE_VALID: /* fast-path, send the packet on */
239 return t4_ofld_send(adap, skb);
240 case L2T_STATE_RESOLVING:
241 case L2T_STATE_SYNC_WRITE:
242 spin_lock_bh(&e->lock);
243 if (e->state != L2T_STATE_SYNC_WRITE &&
244 e->state != L2T_STATE_RESOLVING) {
245 spin_unlock_bh(&e->lock);
246 goto again;
247 }
248 arpq_enqueue(e, skb);
249 spin_unlock_bh(&e->lock);
250
251 if (e->state == L2T_STATE_RESOLVING &&
252 !neigh_event_send(e->neigh, NULL)) {
253 spin_lock_bh(&e->lock);
254 if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
255 write_l2e(adap, e, 1);
256 spin_unlock_bh(&e->lock);
257 }
258 }
259 return 0;
260}
261EXPORT_SYMBOL(cxgb4_l2t_send);
262
263/*
264 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
265 */
266static struct l2t_entry *alloc_l2e(struct l2t_data *d)
267{
268 struct l2t_entry *end, *e, **p;
269
270 if (!atomic_read(&d->nfree))
271 return NULL;
272
273 /* there's definitely a free entry */
274 for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
275 if (atomic_read(&e->refcnt) == 0)
276 goto found;
277
278 for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
279 ;
280found:
281 d->rover = e + 1;
282 atomic_dec(&d->nfree);
283
284 /*
285 * The entry we found may be an inactive entry that is
286 * presently in the hash table. We need to remove it.
287 */
288 if (e->state < L2T_STATE_SWITCHING)
289 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
290 if (*p == e) {
291 *p = e->next;
292 e->next = NULL;
293 break;
294 }
295
296 e->state = L2T_STATE_UNUSED;
297 return e;
298}
299
300/*
301 * Called when an L2T entry has no more users.
302 */
303static void t4_l2e_free(struct l2t_entry *e)
304{
305 struct l2t_data *d;
306
307 spin_lock_bh(&e->lock);
308 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
309 if (e->neigh) {
310 neigh_release(e->neigh);
311 e->neigh = NULL;
312 }
313 }
314 spin_unlock_bh(&e->lock);
315
316 d = container_of(e, struct l2t_data, l2tab[e->idx]);
317 atomic_inc(&d->nfree);
318}
319
320void cxgb4_l2t_release(struct l2t_entry *e)
321{
322 if (atomic_dec_and_test(&e->refcnt))
323 t4_l2e_free(e);
324}
325EXPORT_SYMBOL(cxgb4_l2t_release);
326
327/*
328 * Update an L2T entry that was previously used for the same next hop as neigh.
329 * Must be called with softirqs disabled.
330 */
331static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
332{
333 unsigned int nud_state;
334
335 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
336 if (neigh != e->neigh)
337 neigh_replace(e, neigh);
338 nud_state = neigh->nud_state;
339 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
340 !(nud_state & NUD_VALID))
341 e->state = L2T_STATE_RESOLVING;
342 else if (nud_state & NUD_CONNECTED)
343 e->state = L2T_STATE_VALID;
344 else
345 e->state = L2T_STATE_STALE;
346 spin_unlock(&e->lock);
347}
348
349struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
350 const struct net_device *physdev,
351 unsigned int priority)
352{
353 u8 lport;
354 u16 vlan;
355 struct l2t_entry *e;
356 int addr_len = neigh->tbl->key_len;
357 u32 *addr = (u32 *)neigh->primary_key;
358 int ifidx = neigh->dev->ifindex;
359 int hash = addr_hash(addr, addr_len, ifidx);
360
361 if (neigh->dev->flags & IFF_LOOPBACK)
362 lport = netdev2pinfo(physdev)->tx_chan + 4;
363 else
364 lport = netdev2pinfo(physdev)->lport;
365
366 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
367 vlan = vlan_dev_vlan_id(neigh->dev);
368 else
369 vlan = VLAN_NONE;
370
371 write_lock_bh(&d->lock);
372 for (e = d->l2tab[hash].first; e; e = e->next)
373 if (!addreq(e, addr) && e->ifindex == ifidx &&
374 e->vlan == vlan && e->lport == lport) {
375 l2t_hold(d, e);
376 if (atomic_read(&e->refcnt) == 1)
377 reuse_entry(e, neigh);
378 goto done;
379 }
380
381 /* Need to allocate a new entry */
382 e = alloc_l2e(d);
383 if (e) {
384 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
385 e->state = L2T_STATE_RESOLVING;
386 memcpy(e->addr, addr, addr_len);
387 e->ifindex = ifidx;
388 e->hash = hash;
389 e->lport = lport;
390 e->v6 = addr_len == 16;
391 atomic_set(&e->refcnt, 1);
392 neigh_replace(e, neigh);
393 e->vlan = vlan;
394 e->next = d->l2tab[hash].first;
395 d->l2tab[hash].first = e;
396 spin_unlock(&e->lock);
397 }
398done:
399 write_unlock_bh(&d->lock);
400 return e;
401}
402EXPORT_SYMBOL(cxgb4_l2t_get);
403
404/*
405 * Called when address resolution fails for an L2T entry to handle packets
406 * on the arpq head. If a packet specifies a failure handler it is invoked,
407 * otherwise the packet is sent to the device.
408 */
409static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
410{
411 while (arpq) {
412 struct sk_buff *skb = arpq;
413 const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
414
415 arpq = skb->next;
416 skb->next = NULL;
417 if (cb->arp_err_handler)
418 cb->arp_err_handler(cb->handle, skb);
419 else
420 t4_ofld_send(adap, skb);
421 }
422}
423
424/*
425 * Called when the host's neighbor layer makes a change to some entry that is
426 * loaded into the HW L2 table.
427 */
428void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
429{
430 struct l2t_entry *e;
431 struct sk_buff *arpq = NULL;
432 struct l2t_data *d = adap->l2t;
433 int addr_len = neigh->tbl->key_len;
434 u32 *addr = (u32 *) neigh->primary_key;
435 int ifidx = neigh->dev->ifindex;
436 int hash = addr_hash(addr, addr_len, ifidx);
437
438 read_lock_bh(&d->lock);
439 for (e = d->l2tab[hash].first; e; e = e->next)
440 if (!addreq(e, addr) && e->ifindex == ifidx) {
441 spin_lock(&e->lock);
442 if (atomic_read(&e->refcnt))
443 goto found;
444 spin_unlock(&e->lock);
445 break;
446 }
447 read_unlock_bh(&d->lock);
448 return;
449
450 found:
451 read_unlock(&d->lock);
452
453 if (neigh != e->neigh)
454 neigh_replace(e, neigh);
455
456 if (e->state == L2T_STATE_RESOLVING) {
457 if (neigh->nud_state & NUD_FAILED) {
458 arpq = e->arpq_head;
459 e->arpq_head = e->arpq_tail = NULL;
460 } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
461 e->arpq_head) {
462 write_l2e(adap, e, 1);
463 }
464 } else {
465 e->state = neigh->nud_state & NUD_CONNECTED ?
466 L2T_STATE_VALID : L2T_STATE_STALE;
467 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
468 write_l2e(adap, e, 0);
469 }
470
471 spin_unlock_bh(&e->lock);
472
473 if (arpq)
474 handle_failed_resolution(adap, arpq);
475}
476
477/*
478 * Allocate an L2T entry for use by a switching rule. Such entries need to be
479 * explicitly freed and while busy they are not on any hash chain, so normal
480 * address resolution updates do not see them.
481 */
482struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
483{
484 struct l2t_entry *e;
485
486 write_lock_bh(&d->lock);
487 e = alloc_l2e(d);
488 if (e) {
489 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
490 e->state = L2T_STATE_SWITCHING;
491 atomic_set(&e->refcnt, 1);
492 spin_unlock(&e->lock);
493 }
494 write_unlock_bh(&d->lock);
495 return e;
496}
497
498/*
499 * Sets/updates the contents of a switching L2T entry that has been allocated
500 * with an earlier call to @t4_l2t_alloc_switching.
501 */
502int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
503 u8 port, u8 *eth_addr)
504{
505 e->vlan = vlan;
506 e->lport = port;
507 memcpy(e->dmac, eth_addr, ETH_ALEN);
508 return write_l2e(adap, e, 0);
509}
510
511struct l2t_data *t4_init_l2t(void)
512{
513 int i;
514 struct l2t_data *d;
515
516 d = t4_alloc_mem(sizeof(*d));
517 if (!d)
518 return NULL;
519
520 d->rover = d->l2tab;
521 atomic_set(&d->nfree, L2T_SIZE);
522 rwlock_init(&d->lock);
523
524 for (i = 0; i < L2T_SIZE; ++i) {
525 d->l2tab[i].idx = i;
526 d->l2tab[i].state = L2T_STATE_UNUSED;
527 spin_lock_init(&d->l2tab[i].lock);
528 atomic_set(&d->l2tab[i].refcnt, 0);
529 }
530 return d;
531}
532
533#include <linux/module.h>
534#include <linux/debugfs.h>
535#include <linux/seq_file.h>
536
537static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
538{
539 struct l2t_entry *l2tab = seq->private;
540
541 return pos >= L2T_SIZE ? NULL : &l2tab[pos];
542}
543
544static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
545{
546 return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
547}
548
549static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
550{
551 v = l2t_get_idx(seq, *pos);
552 if (v)
553 ++*pos;
554 return v;
555}
556
557static void l2t_seq_stop(struct seq_file *seq, void *v)
558{
559}
560
561static char l2e_state(const struct l2t_entry *e)
562{
563 switch (e->state) {
564 case L2T_STATE_VALID: return 'V';
565 case L2T_STATE_STALE: return 'S';
566 case L2T_STATE_SYNC_WRITE: return 'W';
567 case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
568 case L2T_STATE_SWITCHING: return 'X';
569 default:
570 return 'U';
571 }
572}
573
574static int l2t_seq_show(struct seq_file *seq, void *v)
575{
576 if (v == SEQ_START_TOKEN)
577 seq_puts(seq, " Idx IP address "
578 "Ethernet address VLAN/P LP State Users Port\n");
579 else {
580 char ip[60];
581 struct l2t_entry *e = v;
582
583 spin_lock_bh(&e->lock);
584 if (e->state == L2T_STATE_SWITCHING)
585 ip[0] = '\0';
586 else
587 sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
588 seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
589 e->idx, ip, e->dmac,
590 e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
591 l2e_state(e), atomic_read(&e->refcnt),
592 e->neigh ? e->neigh->dev->name : "");
593 spin_unlock_bh(&e->lock);
594 }
595 return 0;
596}
597
598static const struct seq_operations l2t_seq_ops = {
599 .start = l2t_seq_start,
600 .next = l2t_seq_next,
601 .stop = l2t_seq_stop,
602 .show = l2t_seq_show
603};
604
605static int l2t_seq_open(struct inode *inode, struct file *file)
606{
607 int rc = seq_open(file, &l2t_seq_ops);
608
609 if (!rc) {
610 struct adapter *adap = inode->i_private;
611 struct seq_file *seq = file->private_data;
612
613 seq->private = adap->l2t->l2tab;
614 }
615 return rc;
616}
617
618const struct file_operations t4_l2t_fops = {
619 .owner = THIS_MODULE,
620 .open = l2t_seq_open,
621 .read = seq_read,
622 .llseek = seq_lseek,
623 .release = seq_release,
624};
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
new file mode 100644
index 000000000000..643f27ed3cf4
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.h
@@ -0,0 +1,110 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_L2T_H
36#define __CXGB4_L2T_H
37
38#include <linux/spinlock.h>
39#include <linux/if_ether.h>
40#include <asm/atomic.h>
41
42struct adapter;
43struct l2t_data;
44struct neighbour;
45struct net_device;
46struct file_operations;
47struct cpl_l2t_write_rpl;
48
49/*
50 * Each L2T entry plays multiple roles. First of all, it keeps state for the
51 * corresponding entry of the HW L2 table and maintains a queue of offload
52 * packets awaiting address resolution. Second, it is a node of a hash table
53 * chain, where the nodes of the chain are linked together through their next
54 * pointer. Finally, each node is a bucket of a hash table, pointing to the
55 * first element in its chain through its first pointer.
56 */
57struct l2t_entry {
58 u16 state; /* entry state */
59 u16 idx; /* entry index */
60 u32 addr[4]; /* next hop IP or IPv6 address */
61 int ifindex; /* neighbor's net_device's ifindex */
62 struct neighbour *neigh; /* associated neighbour */
63 struct l2t_entry *first; /* start of hash chain */
64 struct l2t_entry *next; /* next l2t_entry on chain */
65 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
66 struct sk_buff *arpq_tail;
67 spinlock_t lock;
68 atomic_t refcnt; /* entry reference count */
69 u16 hash; /* hash bucket the entry is on */
70 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
71 u8 v6; /* whether entry is for IPv6 */
72 u8 lport; /* associated offload logical interface */
73 u8 dmac[ETH_ALEN]; /* neighbour's MAC address */
74};
75
76typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb);
77
78/*
79 * Callback stored in an skb to handle address resolution failure.
80 */
81struct l2t_skb_cb {
82 void *handle;
83 arp_err_handler_t arp_err_handler;
84};
85
86#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
87
88static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle,
89 arp_err_handler_t handler)
90{
91 L2T_SKB_CB(skb)->handle = handle;
92 L2T_SKB_CB(skb)->arp_err_handler = handler;
93}
94
95void cxgb4_l2t_release(struct l2t_entry *e);
96int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
97 struct l2t_entry *e);
98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
99 const struct net_device *physdev,
100 unsigned int priority);
101
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
105 u8 port, u8 *eth_addr);
106struct l2t_data *t4_init_l2t(void);
107void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
108
109extern const struct file_operations t4_l2t_fops;
110#endif /* __CXGB4_L2T_H */
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
new file mode 100644
index 000000000000..14adc58e71c3
--- /dev/null
+++ b/drivers/net/cxgb4/sge.c
@@ -0,0 +1,2431 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/if_vlan.h>
39#include <linux/ip.h>
40#include <linux/dma-mapping.h>
41#include <linux/jiffies.h>
42#include <net/ipv6.h>
43#include <net/tcp.h>
44#include "cxgb4.h"
45#include "t4_regs.h"
46#include "t4_msg.h"
47#include "t4fw_api.h"
48
49/*
50 * Rx buffer size. We use largish buffers if possible but settle for single
51 * pages under memory shortage.
52 */
53#if PAGE_SHIFT >= 16
54# define FL_PG_ORDER 0
55#else
56# define FL_PG_ORDER (16 - PAGE_SHIFT)
57#endif
58
59/* RX_PULL_LEN should be <= RX_COPY_THRES */
60#define RX_COPY_THRES 256
61#define RX_PULL_LEN 128
62
63/*
64 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
65 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
66 */
67#define RX_PKT_SKB_LEN 512
68
69/* Ethernet header padding prepended to RX_PKTs */
70#define RX_PKT_PAD 2
71
72/*
73 * Max number of Tx descriptors we clean up at a time. Should be modest as
74 * freeing skbs isn't cheap and it happens while holding locks. We just need
75 * to free packets faster than they arrive, we eventually catch up and keep
76 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
77 */
78#define MAX_TX_RECLAIM 16
79
80/*
81 * Max number of Rx buffers we replenish at a time. Again keep this modest,
82 * allocating buffers isn't cheap either.
83 */
84#define MAX_RX_REFILL 16U
85
86/*
87 * Period of the Rx queue check timer. This timer is infrequent as it has
88 * something to do only when the system experiences severe memory shortage.
89 */
90#define RX_QCHECK_PERIOD (HZ / 2)
91
92/*
93 * Period of the Tx queue check timer.
94 */
95#define TX_QCHECK_PERIOD (HZ / 2)
96
97/*
98 * Max number of Tx descriptors to be reclaimed by the Tx timer.
99 */
100#define MAX_TIMER_TX_RECLAIM 100
101
102/*
103 * Timer index used when backing off due to memory shortage.
104 */
105#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
106
107/*
108 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
109 * attempt to refill it.
110 */
111#define FL_STARVE_THRES 4
112
113/*
114 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
115 * This is the same as calc_tx_descs() for a TSO packet with
116 * nr_frags == MAX_SKB_FRAGS.
117 */
118#define ETHTXQ_STOP_THRES \
119 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
120
121/*
122 * Suspension threshold for non-Ethernet Tx queues. We require enough room
123 * for a full sized WR.
124 */
125#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
126
127/*
128 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
129 * into a WR.
130 */
131#define MAX_IMM_TX_PKT_LEN 128
132
133/*
134 * Max size of a WR sent through a control Tx queue.
135 */
136#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
137
138enum {
139 /* packet alignment in FL buffers */
140 FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
141 /* egress status entry size */
142 STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
143};
144
145struct tx_sw_desc { /* SW state per Tx descriptor */
146 struct sk_buff *skb;
147 struct ulptx_sgl *sgl;
148};
149
150struct rx_sw_desc { /* SW state per Rx descriptor */
151 struct page *page;
152 dma_addr_t dma_addr;
153};
154
155/*
156 * The low bits of rx_sw_desc.dma_addr have special meaning.
157 */
158enum {
159 RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */
160 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
161};
162
163static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
164{
165 return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
166}
167
168static inline bool is_buf_mapped(const struct rx_sw_desc *d)
169{
170 return !(d->dma_addr & RX_UNMAPPED_BUF);
171}
172
173/**
174 * txq_avail - return the number of available slots in a Tx queue
175 * @q: the Tx queue
176 *
177 * Returns the number of descriptors in a Tx queue available to write new
178 * packets.
179 */
180static inline unsigned int txq_avail(const struct sge_txq *q)
181{
182 return q->size - 1 - q->in_use;
183}
184
185/**
186 * fl_cap - return the capacity of a free-buffer list
187 * @fl: the FL
188 *
189 * Returns the capacity of a free-buffer list. The capacity is less than
190 * the size because one descriptor needs to be left unpopulated, otherwise
191 * HW will think the FL is empty.
192 */
193static inline unsigned int fl_cap(const struct sge_fl *fl)
194{
195 return fl->size - 8; /* 1 descriptor = 8 buffers */
196}
197
198static inline bool fl_starving(const struct sge_fl *fl)
199{
200 return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
201}
202
203static int map_skb(struct device *dev, const struct sk_buff *skb,
204 dma_addr_t *addr)
205{
206 const skb_frag_t *fp, *end;
207 const struct skb_shared_info *si;
208
209 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
210 if (dma_mapping_error(dev, *addr))
211 goto out_err;
212
213 si = skb_shinfo(skb);
214 end = &si->frags[si->nr_frags];
215
216 for (fp = si->frags; fp < end; fp++) {
217 *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
218 DMA_TO_DEVICE);
219 if (dma_mapping_error(dev, *addr))
220 goto unwind;
221 }
222 return 0;
223
224unwind:
225 while (fp-- > si->frags)
226 dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
227
228 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
229out_err:
230 return -ENOMEM;
231}
232
233#ifdef CONFIG_NEED_DMA_MAP_STATE
234static void unmap_skb(struct device *dev, const struct sk_buff *skb,
235 const dma_addr_t *addr)
236{
237 const skb_frag_t *fp, *end;
238 const struct skb_shared_info *si;
239
240 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
241
242 si = skb_shinfo(skb);
243 end = &si->frags[si->nr_frags];
244 for (fp = si->frags; fp < end; fp++)
245 dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
246}
247
248/**
249 * deferred_unmap_destructor - unmap a packet when it is freed
250 * @skb: the packet
251 *
252 * This is the packet destructor used for Tx packets that need to remain
253 * mapped until they are freed rather than until their Tx descriptors are
254 * freed.
255 */
256static void deferred_unmap_destructor(struct sk_buff *skb)
257{
258 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
259}
260#endif
261
262static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
263 const struct ulptx_sgl *sgl, const struct sge_txq *q)
264{
265 const struct ulptx_sge_pair *p;
266 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
267
268 if (likely(skb_headlen(skb)))
269 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
270 DMA_TO_DEVICE);
271 else {
272 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
273 DMA_TO_DEVICE);
274 nfrags--;
275 }
276
277 /*
278 * the complexity below is because of the possibility of a wrap-around
279 * in the middle of an SGL
280 */
281 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
282 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
283unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
284 ntohl(p->len[0]), DMA_TO_DEVICE);
285 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
286 ntohl(p->len[1]), DMA_TO_DEVICE);
287 p++;
288 } else if ((u8 *)p == (u8 *)q->stat) {
289 p = (const struct ulptx_sge_pair *)q->desc;
290 goto unmap;
291 } else if ((u8 *)p + 8 == (u8 *)q->stat) {
292 const __be64 *addr = (const __be64 *)q->desc;
293
294 dma_unmap_page(dev, be64_to_cpu(addr[0]),
295 ntohl(p->len[0]), DMA_TO_DEVICE);
296 dma_unmap_page(dev, be64_to_cpu(addr[1]),
297 ntohl(p->len[1]), DMA_TO_DEVICE);
298 p = (const struct ulptx_sge_pair *)&addr[2];
299 } else {
300 const __be64 *addr = (const __be64 *)q->desc;
301
302 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
303 ntohl(p->len[0]), DMA_TO_DEVICE);
304 dma_unmap_page(dev, be64_to_cpu(addr[0]),
305 ntohl(p->len[1]), DMA_TO_DEVICE);
306 p = (const struct ulptx_sge_pair *)&addr[1];
307 }
308 }
309 if (nfrags) {
310 __be64 addr;
311
312 if ((u8 *)p == (u8 *)q->stat)
313 p = (const struct ulptx_sge_pair *)q->desc;
314 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
315 *(const __be64 *)q->desc;
316 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
317 DMA_TO_DEVICE);
318 }
319}
320
321/**
322 * free_tx_desc - reclaims Tx descriptors and their buffers
323 * @adapter: the adapter
324 * @q: the Tx queue to reclaim descriptors from
325 * @n: the number of descriptors to reclaim
326 * @unmap: whether the buffers should be unmapped for DMA
327 *
328 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
329 * Tx buffers. Called with the Tx queue lock held.
330 */
331static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
332 unsigned int n, bool unmap)
333{
334 struct tx_sw_desc *d;
335 unsigned int cidx = q->cidx;
336 struct device *dev = adap->pdev_dev;
337
338 d = &q->sdesc[cidx];
339 while (n--) {
340 if (d->skb) { /* an SGL is present */
341 if (unmap)
342 unmap_sgl(dev, d->skb, d->sgl, q);
343 kfree_skb(d->skb);
344 d->skb = NULL;
345 }
346 ++d;
347 if (++cidx == q->size) {
348 cidx = 0;
349 d = q->sdesc;
350 }
351 }
352 q->cidx = cidx;
353}
354
355/*
356 * Return the number of reclaimable descriptors in a Tx queue.
357 */
358static inline int reclaimable(const struct sge_txq *q)
359{
360 int hw_cidx = ntohs(q->stat->cidx);
361 hw_cidx -= q->cidx;
362 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
363}
364
365/**
366 * reclaim_completed_tx - reclaims completed Tx descriptors
367 * @adap: the adapter
368 * @q: the Tx queue to reclaim completed descriptors from
369 * @unmap: whether the buffers should be unmapped for DMA
370 *
371 * Reclaims Tx descriptors that the SGE has indicated it has processed,
372 * and frees the associated buffers if possible. Called with the Tx
373 * queue locked.
374 */
375static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
376 bool unmap)
377{
378 int avail = reclaimable(q);
379
380 if (avail) {
381 /*
382 * Limit the amount of clean up work we do at a time to keep
383 * the Tx lock hold time O(1).
384 */
385 if (avail > MAX_TX_RECLAIM)
386 avail = MAX_TX_RECLAIM;
387
388 free_tx_desc(adap, q, avail, unmap);
389 q->in_use -= avail;
390 }
391}
392
393static inline int get_buf_size(const struct rx_sw_desc *d)
394{
395#if FL_PG_ORDER > 0
396 return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
397 PAGE_SIZE;
398#else
399 return PAGE_SIZE;
400#endif
401}
402
403/**
404 * free_rx_bufs - free the Rx buffers on an SGE free list
405 * @adap: the adapter
406 * @q: the SGE free list to free buffers from
407 * @n: how many buffers to free
408 *
409 * Release the next @n buffers on an SGE free-buffer Rx queue. The
410 * buffers must be made inaccessible to HW before calling this function.
411 */
412static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
413{
414 while (n--) {
415 struct rx_sw_desc *d = &q->sdesc[q->cidx];
416
417 if (is_buf_mapped(d))
418 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
419 get_buf_size(d), PCI_DMA_FROMDEVICE);
420 put_page(d->page);
421 d->page = NULL;
422 if (++q->cidx == q->size)
423 q->cidx = 0;
424 q->avail--;
425 }
426}
427
428/**
429 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
430 * @adap: the adapter
431 * @q: the SGE free list
432 *
433 * Unmap the current buffer on an SGE free-buffer Rx queue. The
434 * buffer must be made inaccessible to HW before calling this function.
435 *
436 * This is similar to @free_rx_bufs above but does not free the buffer.
437 * Do note that the FL still loses any further access to the buffer.
438 */
439static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
440{
441 struct rx_sw_desc *d = &q->sdesc[q->cidx];
442
443 if (is_buf_mapped(d))
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
445 get_buf_size(d), PCI_DMA_FROMDEVICE);
446 d->page = NULL;
447 if (++q->cidx == q->size)
448 q->cidx = 0;
449 q->avail--;
450}
451
452static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
453{
454 if (q->pend_cred >= 8) {
455 wmb();
456 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
457 QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
458 q->pend_cred &= 7;
459 }
460}
461
462static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
463 dma_addr_t mapping)
464{
465 sd->page = pg;
466 sd->dma_addr = mapping; /* includes size low bits */
467}
468
469/**
470 * refill_fl - refill an SGE Rx buffer ring
471 * @adap: the adapter
472 * @q: the ring to refill
473 * @n: the number of new buffers to allocate
474 * @gfp: the gfp flags for the allocations
475 *
476 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
477 * allocated with the supplied gfp flags. The caller must assure that
478 * @n does not exceed the queue's capacity. If afterwards the queue is
479 * found critically low mark it as starving in the bitmap of starving FLs.
480 *
481 * Returns the number of buffers allocated.
482 */
483static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
484 gfp_t gfp)
485{
486 struct page *pg;
487 dma_addr_t mapping;
488 unsigned int cred = q->avail;
489 __be64 *d = &q->desc[q->pidx];
490 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
491
492 gfp |= __GFP_NOWARN; /* failures are expected */
493
494#if FL_PG_ORDER > 0
495 /*
496 * Prefer large buffers
497 */
498 while (n) {
499 pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
500 if (unlikely(!pg)) {
501 q->large_alloc_failed++;
502 break; /* fall back to single pages */
503 }
504
505 mapping = dma_map_page(adap->pdev_dev, pg, 0,
506 PAGE_SIZE << FL_PG_ORDER,
507 PCI_DMA_FROMDEVICE);
508 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
509 __free_pages(pg, FL_PG_ORDER);
510 goto out; /* do not try small pages for this error */
511 }
512 mapping |= RX_LARGE_BUF;
513 *d++ = cpu_to_be64(mapping);
514
515 set_rx_sw_desc(sd, pg, mapping);
516 sd++;
517
518 q->avail++;
519 if (++q->pidx == q->size) {
520 q->pidx = 0;
521 sd = q->sdesc;
522 d = q->desc;
523 }
524 n--;
525 }
526#endif
527
528 while (n--) {
529 pg = __netdev_alloc_page(adap->port[0], gfp);
530 if (unlikely(!pg)) {
531 q->alloc_failed++;
532 break;
533 }
534
535 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
536 PCI_DMA_FROMDEVICE);
537 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
538 netdev_free_page(adap->port[0], pg);
539 goto out;
540 }
541 *d++ = cpu_to_be64(mapping);
542
543 set_rx_sw_desc(sd, pg, mapping);
544 sd++;
545
546 q->avail++;
547 if (++q->pidx == q->size) {
548 q->pidx = 0;
549 sd = q->sdesc;
550 d = q->desc;
551 }
552 }
553
554out: cred = q->avail - cred;
555 q->pend_cred += cred;
556 ring_fl_db(adap, q);
557
558 if (unlikely(fl_starving(q))) {
559 smp_wmb();
560 set_bit(q->cntxt_id, adap->sge.starving_fl);
561 }
562
563 return cred;
564}
565
566static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
567{
568 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
569 GFP_ATOMIC);
570}
571
572/**
573 * alloc_ring - allocate resources for an SGE descriptor ring
574 * @dev: the PCI device's core device
575 * @nelem: the number of descriptors
576 * @elem_size: the size of each descriptor
577 * @sw_size: the size of the SW state associated with each ring element
578 * @phys: the physical address of the allocated ring
579 * @metadata: address of the array holding the SW state for the ring
580 * @stat_size: extra space in HW ring for status information
581 *
582 * Allocates resources for an SGE descriptor ring, such as Tx queues,
583 * free buffer lists, or response queues. Each SGE ring requires
584 * space for its HW descriptors plus, optionally, space for the SW state
585 * associated with each HW entry (the metadata). The function returns
586 * three values: the virtual address for the HW ring (the return value
587 * of the function), the bus address of the HW ring, and the address
588 * of the SW ring.
589 */
590static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
591 size_t sw_size, dma_addr_t *phys, void *metadata,
592 size_t stat_size)
593{
594 size_t len = nelem * elem_size + stat_size;
595 void *s = NULL;
596 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
597
598 if (!p)
599 return NULL;
600 if (sw_size) {
601 s = kcalloc(nelem, sw_size, GFP_KERNEL);
602
603 if (!s) {
604 dma_free_coherent(dev, len, p, *phys);
605 return NULL;
606 }
607 }
608 if (metadata)
609 *(void **)metadata = s;
610 memset(p, 0, len);
611 return p;
612}
613
614/**
615 * sgl_len - calculates the size of an SGL of the given capacity
616 * @n: the number of SGL entries
617 *
618 * Calculates the number of flits needed for a scatter/gather list that
619 * can hold the given number of entries.
620 */
621static inline unsigned int sgl_len(unsigned int n)
622{
623 n--;
624 return (3 * n) / 2 + (n & 1) + 2;
625}
626
627/**
628 * flits_to_desc - returns the num of Tx descriptors for the given flits
629 * @n: the number of flits
630 *
631 * Returns the number of Tx descriptors needed for the supplied number
632 * of flits.
633 */
634static inline unsigned int flits_to_desc(unsigned int n)
635{
636 BUG_ON(n > SGE_MAX_WR_LEN / 8);
637 return DIV_ROUND_UP(n, 8);
638}
639
640/**
641 * is_eth_imm - can an Ethernet packet be sent as immediate data?
642 * @skb: the packet
643 *
644 * Returns whether an Ethernet packet is small enough to fit as
645 * immediate data.
646 */
647static inline int is_eth_imm(const struct sk_buff *skb)
648{
649 return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
650}
651
652/**
653 * calc_tx_flits - calculate the number of flits for a packet Tx WR
654 * @skb: the packet
655 *
656 * Returns the number of flits needed for a Tx WR for the given Ethernet
657 * packet, including the needed WR and CPL headers.
658 */
659static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
660{
661 unsigned int flits;
662
663 if (is_eth_imm(skb))
664 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
665
666 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
667 if (skb_shinfo(skb)->gso_size)
668 flits += 2;
669 return flits;
670}
671
672/**
673 * calc_tx_descs - calculate the number of Tx descriptors for a packet
674 * @skb: the packet
675 *
676 * Returns the number of Tx descriptors needed for the given Ethernet
677 * packet, including the needed WR and CPL headers.
678 */
679static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
680{
681 return flits_to_desc(calc_tx_flits(skb));
682}
683
684/**
685 * write_sgl - populate a scatter/gather list for a packet
686 * @skb: the packet
687 * @q: the Tx queue we are writing into
688 * @sgl: starting location for writing the SGL
689 * @end: points right after the end of the SGL
690 * @start: start offset into skb main-body data to include in the SGL
691 * @addr: the list of bus addresses for the SGL elements
692 *
693 * Generates a gather list for the buffers that make up a packet.
694 * The caller must provide adequate space for the SGL that will be written.
695 * The SGL includes all of the packet's page fragments and the data in its
696 * main body except for the first @start bytes. @sgl must be 16-byte
697 * aligned and within a Tx descriptor with available space. @end points
698 * right after the end of the SGL but does not account for any potential
699 * wrap around, i.e., @end > @sgl.
700 */
701static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
702 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
703 const dma_addr_t *addr)
704{
705 unsigned int i, len;
706 struct ulptx_sge_pair *to;
707 const struct skb_shared_info *si = skb_shinfo(skb);
708 unsigned int nfrags = si->nr_frags;
709 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
710
711 len = skb_headlen(skb) - start;
712 if (likely(len)) {
713 sgl->len0 = htonl(len);
714 sgl->addr0 = cpu_to_be64(addr[0] + start);
715 nfrags++;
716 } else {
717 sgl->len0 = htonl(si->frags[0].size);
718 sgl->addr0 = cpu_to_be64(addr[1]);
719 }
720
721 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
722 if (likely(--nfrags == 0))
723 return;
724 /*
725 * Most of the complexity below deals with the possibility we hit the
726 * end of the queue in the middle of writing the SGL. For this case
727 * only we create the SGL in a temporary buffer and then copy it.
728 */
729 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
730
731 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
732 to->len[0] = cpu_to_be32(si->frags[i].size);
733 to->len[1] = cpu_to_be32(si->frags[++i].size);
734 to->addr[0] = cpu_to_be64(addr[i]);
735 to->addr[1] = cpu_to_be64(addr[++i]);
736 }
737 if (nfrags) {
738 to->len[0] = cpu_to_be32(si->frags[i].size);
739 to->len[1] = cpu_to_be32(0);
740 to->addr[0] = cpu_to_be64(addr[i + 1]);
741 }
742 if (unlikely((u8 *)end > (u8 *)q->stat)) {
743 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
744
745 if (likely(part0))
746 memcpy(sgl->sge, buf, part0);
747 part1 = (u8 *)end - (u8 *)q->stat;
748 memcpy(q->desc, (u8 *)buf + part0, part1);
749 end = (void *)q->desc + part1;
750 }
751 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
752 *(u64 *)end = 0;
753}
754
755/**
756 * ring_tx_db - check and potentially ring a Tx queue's doorbell
757 * @adap: the adapter
758 * @q: the Tx queue
759 * @n: number of new descriptors to give to HW
760 *
761 * Ring the doorbel for a Tx queue.
762 */
763static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
764{
765 wmb(); /* write descriptors before telling HW */
766 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
767 QID(q->cntxt_id) | PIDX(n));
768}
769
770/**
771 * inline_tx_skb - inline a packet's data into Tx descriptors
772 * @skb: the packet
773 * @q: the Tx queue where the packet will be inlined
774 * @pos: starting position in the Tx queue where to inline the packet
775 *
776 * Inline a packet's contents directly into Tx descriptors, starting at
777 * the given position within the Tx DMA ring.
778 * Most of the complexity of this operation is dealing with wrap arounds
779 * in the middle of the packet we want to inline.
780 */
781static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
782 void *pos)
783{
784 u64 *p;
785 int left = (void *)q->stat - pos;
786
787 if (likely(skb->len <= left)) {
788 if (likely(!skb->data_len))
789 skb_copy_from_linear_data(skb, pos, skb->len);
790 else
791 skb_copy_bits(skb, 0, pos, skb->len);
792 pos += skb->len;
793 } else {
794 skb_copy_bits(skb, 0, pos, left);
795 skb_copy_bits(skb, left, q->desc, skb->len - left);
796 pos = (void *)q->desc + (skb->len - left);
797 }
798
799 /* 0-pad to multiple of 16 */
800 p = PTR_ALIGN(pos, 8);
801 if ((uintptr_t)p & 8)
802 *p = 0;
803}
804
805/*
806 * Figure out what HW csum a packet wants and return the appropriate control
807 * bits.
808 */
809static u64 hwcsum(const struct sk_buff *skb)
810{
811 int csum_type;
812 const struct iphdr *iph = ip_hdr(skb);
813
814 if (iph->version == 4) {
815 if (iph->protocol == IPPROTO_TCP)
816 csum_type = TX_CSUM_TCPIP;
817 else if (iph->protocol == IPPROTO_UDP)
818 csum_type = TX_CSUM_UDPIP;
819 else {
820nocsum: /*
821 * unknown protocol, disable HW csum
822 * and hope a bad packet is detected
823 */
824 return TXPKT_L4CSUM_DIS;
825 }
826 } else {
827 /*
828 * this doesn't work with extension headers
829 */
830 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
831
832 if (ip6h->nexthdr == IPPROTO_TCP)
833 csum_type = TX_CSUM_TCPIP6;
834 else if (ip6h->nexthdr == IPPROTO_UDP)
835 csum_type = TX_CSUM_UDPIP6;
836 else
837 goto nocsum;
838 }
839
840 if (likely(csum_type >= TX_CSUM_TCPIP))
841 return TXPKT_CSUM_TYPE(csum_type) |
842 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
843 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
844 else {
845 int start = skb_transport_offset(skb);
846
847 return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
848 TXPKT_CSUM_LOC(start + skb->csum_offset);
849 }
850}
851
852static void eth_txq_stop(struct sge_eth_txq *q)
853{
854 netif_tx_stop_queue(q->txq);
855 q->q.stops++;
856}
857
858static inline void txq_advance(struct sge_txq *q, unsigned int n)
859{
860 q->in_use += n;
861 q->pidx += n;
862 if (q->pidx >= q->size)
863 q->pidx -= q->size;
864}
865
866/**
867 * t4_eth_xmit - add a packet to an Ethernet Tx queue
868 * @skb: the packet
869 * @dev: the egress net device
870 *
871 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
872 */
873netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
874{
875 u32 wr_mid;
876 u64 cntrl, *end;
877 int qidx, credits;
878 unsigned int flits, ndesc;
879 struct adapter *adap;
880 struct sge_eth_txq *q;
881 const struct port_info *pi;
882 struct fw_eth_tx_pkt_wr *wr;
883 struct cpl_tx_pkt_core *cpl;
884 const struct skb_shared_info *ssi;
885 dma_addr_t addr[MAX_SKB_FRAGS + 1];
886
887 /*
888 * The chip min packet length is 10 octets but play safe and reject
889 * anything shorter than an Ethernet header.
890 */
891 if (unlikely(skb->len < ETH_HLEN)) {
892out_free: dev_kfree_skb(skb);
893 return NETDEV_TX_OK;
894 }
895
896 pi = netdev_priv(dev);
897 adap = pi->adapter;
898 qidx = skb_get_queue_mapping(skb);
899 q = &adap->sge.ethtxq[qidx + pi->first_qset];
900
901 reclaim_completed_tx(adap, &q->q, true);
902
903 flits = calc_tx_flits(skb);
904 ndesc = flits_to_desc(flits);
905 credits = txq_avail(&q->q) - ndesc;
906
907 if (unlikely(credits < 0)) {
908 eth_txq_stop(q);
909 dev_err(adap->pdev_dev,
910 "%s: Tx ring %u full while queue awake!\n",
911 dev->name, qidx);
912 return NETDEV_TX_BUSY;
913 }
914
915 if (!is_eth_imm(skb) &&
916 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
917 q->mapping_err++;
918 goto out_free;
919 }
920
921 wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
922 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
923 eth_txq_stop(q);
924 wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
925 }
926
927 wr = (void *)&q->q.desc[q->q.pidx];
928 wr->equiq_to_len16 = htonl(wr_mid);
929 wr->r3 = cpu_to_be64(0);
930 end = (u64 *)wr + flits;
931
932 ssi = skb_shinfo(skb);
933 if (ssi->gso_size) {
934 struct cpl_tx_pkt_lso *lso = (void *)wr;
935 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
936 int l3hdr_len = skb_network_header_len(skb);
937 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
938
939 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
940 FW_WR_IMMDLEN(sizeof(*lso)));
941 lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
942 LSO_FIRST_SLICE | LSO_LAST_SLICE |
943 LSO_IPV6(v6) |
944 LSO_ETHHDR_LEN(eth_xtra_len / 4) |
945 LSO_IPHDR_LEN(l3hdr_len / 4) |
946 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
947 lso->ipid_ofst = htons(0);
948 lso->mss = htons(ssi->gso_size);
949 lso->seqno_offset = htonl(0);
950 lso->len = htonl(skb->len);
951 cpl = (void *)(lso + 1);
952 cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
953 TXPKT_IPHDR_LEN(l3hdr_len) |
954 TXPKT_ETHHDR_LEN(eth_xtra_len);
955 q->tso++;
956 q->tx_cso += ssi->gso_segs;
957 } else {
958 int len;
959
960 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
961 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
962 FW_WR_IMMDLEN(len));
963 cpl = (void *)(wr + 1);
964 if (skb->ip_summed == CHECKSUM_PARTIAL) {
965 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
966 q->tx_cso++;
967 } else
968 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
969 }
970
971 if (vlan_tx_tag_present(skb)) {
972 q->vlan_ins++;
973 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
974 }
975
976 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
977 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0));
978 cpl->pack = htons(0);
979 cpl->len = htons(skb->len);
980 cpl->ctrl1 = cpu_to_be64(cntrl);
981
982 if (is_eth_imm(skb)) {
983 inline_tx_skb(skb, &q->q, cpl + 1);
984 dev_kfree_skb(skb);
985 } else {
986 int last_desc;
987
988 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
989 addr);
990 skb_orphan(skb);
991
992 last_desc = q->q.pidx + ndesc - 1;
993 if (last_desc >= q->q.size)
994 last_desc -= q->q.size;
995 q->q.sdesc[last_desc].skb = skb;
996 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
997 }
998
999 txq_advance(&q->q, ndesc);
1000
1001 ring_tx_db(adap, &q->q, ndesc);
1002 return NETDEV_TX_OK;
1003}
1004
1005/**
1006 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1007 * @q: the SGE control Tx queue
1008 *
1009 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1010 * that send only immediate data (presently just the control queues) and
1011 * thus do not have any sk_buffs to release.
1012 */
1013static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1014{
1015 int hw_cidx = ntohs(q->stat->cidx);
1016 int reclaim = hw_cidx - q->cidx;
1017
1018 if (reclaim < 0)
1019 reclaim += q->size;
1020
1021 q->in_use -= reclaim;
1022 q->cidx = hw_cidx;
1023}
1024
1025/**
1026 * is_imm - check whether a packet can be sent as immediate data
1027 * @skb: the packet
1028 *
1029 * Returns true if a packet can be sent as a WR with immediate data.
1030 */
1031static inline int is_imm(const struct sk_buff *skb)
1032{
1033 return skb->len <= MAX_CTRL_WR_LEN;
1034}
1035
1036/**
1037 * ctrlq_check_stop - check if a control queue is full and should stop
1038 * @q: the queue
1039 * @wr: most recent WR written to the queue
1040 *
1041 * Check if a control queue has become full and should be stopped.
1042 * We clean up control queue descriptors very lazily, only when we are out.
1043 * If the queue is still full after reclaiming any completed descriptors
1044 * we suspend it and have the last WR wake it up.
1045 */
1046static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1047{
1048 reclaim_completed_tx_imm(&q->q);
1049 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1050 wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1051 q->q.stops++;
1052 q->full = 1;
1053 }
1054}
1055
1056/**
1057 * ctrl_xmit - send a packet through an SGE control Tx queue
1058 * @q: the control queue
1059 * @skb: the packet
1060 *
1061 * Send a packet through an SGE control Tx queue. Packets sent through
1062 * a control queue must fit entirely as immediate data.
1063 */
1064static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1065{
1066 unsigned int ndesc;
1067 struct fw_wr_hdr *wr;
1068
1069 if (unlikely(!is_imm(skb))) {
1070 WARN_ON(1);
1071 dev_kfree_skb(skb);
1072 return NET_XMIT_DROP;
1073 }
1074
1075 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1076 spin_lock(&q->sendq.lock);
1077
1078 if (unlikely(q->full)) {
1079 skb->priority = ndesc; /* save for restart */
1080 __skb_queue_tail(&q->sendq, skb);
1081 spin_unlock(&q->sendq.lock);
1082 return NET_XMIT_CN;
1083 }
1084
1085 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1086 inline_tx_skb(skb, &q->q, wr);
1087
1088 txq_advance(&q->q, ndesc);
1089 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1090 ctrlq_check_stop(q, wr);
1091
1092 ring_tx_db(q->adap, &q->q, ndesc);
1093 spin_unlock(&q->sendq.lock);
1094
1095 kfree_skb(skb);
1096 return NET_XMIT_SUCCESS;
1097}
1098
1099/**
1100 * restart_ctrlq - restart a suspended control queue
1101 * @data: the control queue to restart
1102 *
1103 * Resumes transmission on a suspended Tx control queue.
1104 */
1105static void restart_ctrlq(unsigned long data)
1106{
1107 struct sk_buff *skb;
1108 unsigned int written = 0;
1109 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1110
1111 spin_lock(&q->sendq.lock);
1112 reclaim_completed_tx_imm(&q->q);
1113 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
1114
1115 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1116 struct fw_wr_hdr *wr;
1117 unsigned int ndesc = skb->priority; /* previously saved */
1118
1119 /*
1120 * Write descriptors and free skbs outside the lock to limit
1121 * wait times. q->full is still set so new skbs will be queued.
1122 */
1123 spin_unlock(&q->sendq.lock);
1124
1125 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1126 inline_tx_skb(skb, &q->q, wr);
1127 kfree_skb(skb);
1128
1129 written += ndesc;
1130 txq_advance(&q->q, ndesc);
1131 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1132 unsigned long old = q->q.stops;
1133
1134 ctrlq_check_stop(q, wr);
1135 if (q->q.stops != old) { /* suspended anew */
1136 spin_lock(&q->sendq.lock);
1137 goto ringdb;
1138 }
1139 }
1140 if (written > 16) {
1141 ring_tx_db(q->adap, &q->q, written);
1142 written = 0;
1143 }
1144 spin_lock(&q->sendq.lock);
1145 }
1146 q->full = 0;
1147ringdb: if (written)
1148 ring_tx_db(q->adap, &q->q, written);
1149 spin_unlock(&q->sendq.lock);
1150}
1151
1152/**
1153 * t4_mgmt_tx - send a management message
1154 * @adap: the adapter
1155 * @skb: the packet containing the management message
1156 *
1157 * Send a management message through control queue 0.
1158 */
1159int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1160{
1161 int ret;
1162
1163 local_bh_disable();
1164 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1165 local_bh_enable();
1166 return ret;
1167}
1168
1169/**
1170 * is_ofld_imm - check whether a packet can be sent as immediate data
1171 * @skb: the packet
1172 *
1173 * Returns true if a packet can be sent as an offload WR with immediate
1174 * data. We currently use the same limit as for Ethernet packets.
1175 */
1176static inline int is_ofld_imm(const struct sk_buff *skb)
1177{
1178 return skb->len <= MAX_IMM_TX_PKT_LEN;
1179}
1180
1181/**
1182 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1183 * @skb: the packet
1184 *
1185 * Returns the number of flits needed for the given offload packet.
1186 * These packets are already fully constructed and no additional headers
1187 * will be added.
1188 */
1189static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1190{
1191 unsigned int flits, cnt;
1192
1193 if (is_ofld_imm(skb))
1194 return DIV_ROUND_UP(skb->len, 8);
1195
1196 flits = skb_transport_offset(skb) / 8U; /* headers */
1197 cnt = skb_shinfo(skb)->nr_frags;
1198 if (skb->tail != skb->transport_header)
1199 cnt++;
1200 return flits + sgl_len(cnt);
1201}
1202
1203/**
1204 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1205 * @adap: the adapter
1206 * @q: the queue to stop
1207 *
1208 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1209 * inability to map packets. A periodic timer attempts to restart
1210 * queues so marked.
1211 */
1212static void txq_stop_maperr(struct sge_ofld_txq *q)
1213{
1214 q->mapping_err++;
1215 q->q.stops++;
1216 set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr);
1217}
1218
1219/**
1220 * ofldtxq_stop - stop an offload Tx queue that has become full
1221 * @q: the queue to stop
1222 * @skb: the packet causing the queue to become full
1223 *
1224 * Stops an offload Tx queue that has become full and modifies the packet
1225 * being written to request a wakeup.
1226 */
1227static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1228{
1229 struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1230
1231 wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1232 q->q.stops++;
1233 q->full = 1;
1234}
1235
1236/**
1237 * service_ofldq - restart a suspended offload queue
1238 * @q: the offload queue
1239 *
1240 * Services an offload Tx queue by moving packets from its packet queue
1241 * to the HW Tx ring. The function starts and ends with the queue locked.
1242 */
1243static void service_ofldq(struct sge_ofld_txq *q)
1244{
1245 u64 *pos;
1246 int credits;
1247 struct sk_buff *skb;
1248 unsigned int written = 0;
1249 unsigned int flits, ndesc;
1250
1251 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1252 /*
1253 * We drop the lock but leave skb on sendq, thus retaining
1254 * exclusive access to the state of the queue.
1255 */
1256 spin_unlock(&q->sendq.lock);
1257
1258 reclaim_completed_tx(q->adap, &q->q, false);
1259
1260 flits = skb->priority; /* previously saved */
1261 ndesc = flits_to_desc(flits);
1262 credits = txq_avail(&q->q) - ndesc;
1263 BUG_ON(credits < 0);
1264 if (unlikely(credits < TXQ_STOP_THRES))
1265 ofldtxq_stop(q, skb);
1266
1267 pos = (u64 *)&q->q.desc[q->q.pidx];
1268 if (is_ofld_imm(skb))
1269 inline_tx_skb(skb, &q->q, pos);
1270 else if (map_skb(q->adap->pdev_dev, skb,
1271 (dma_addr_t *)skb->head)) {
1272 txq_stop_maperr(q);
1273 spin_lock(&q->sendq.lock);
1274 break;
1275 } else {
1276 int last_desc, hdr_len = skb_transport_offset(skb);
1277
1278 memcpy(pos, skb->data, hdr_len);
1279 write_sgl(skb, &q->q, (void *)pos + hdr_len,
1280 pos + flits, hdr_len,
1281 (dma_addr_t *)skb->head);
1282#ifdef CONFIG_NEED_DMA_MAP_STATE
1283 skb->dev = q->adap->port[0];
1284 skb->destructor = deferred_unmap_destructor;
1285#endif
1286 last_desc = q->q.pidx + ndesc - 1;
1287 if (last_desc >= q->q.size)
1288 last_desc -= q->q.size;
1289 q->q.sdesc[last_desc].skb = skb;
1290 }
1291
1292 txq_advance(&q->q, ndesc);
1293 written += ndesc;
1294 if (unlikely(written > 32)) {
1295 ring_tx_db(q->adap, &q->q, written);
1296 written = 0;
1297 }
1298
1299 spin_lock(&q->sendq.lock);
1300 __skb_unlink(skb, &q->sendq);
1301 if (is_ofld_imm(skb))
1302 kfree_skb(skb);
1303 }
1304 if (likely(written))
1305 ring_tx_db(q->adap, &q->q, written);
1306}
1307
1308/**
1309 * ofld_xmit - send a packet through an offload queue
1310 * @q: the Tx offload queue
1311 * @skb: the packet
1312 *
1313 * Send an offload packet through an SGE offload queue.
1314 */
1315static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1316{
1317 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
1318 spin_lock(&q->sendq.lock);
1319 __skb_queue_tail(&q->sendq, skb);
1320 if (q->sendq.qlen == 1)
1321 service_ofldq(q);
1322 spin_unlock(&q->sendq.lock);
1323 return NET_XMIT_SUCCESS;
1324}
1325
1326/**
1327 * restart_ofldq - restart a suspended offload queue
1328 * @data: the offload queue to restart
1329 *
1330 * Resumes transmission on a suspended Tx offload queue.
1331 */
1332static void restart_ofldq(unsigned long data)
1333{
1334 struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1335
1336 spin_lock(&q->sendq.lock);
1337 q->full = 0; /* the queue actually is completely empty now */
1338 service_ofldq(q);
1339 spin_unlock(&q->sendq.lock);
1340}
1341
1342/**
1343 * skb_txq - return the Tx queue an offload packet should use
1344 * @skb: the packet
1345 *
1346 * Returns the Tx queue an offload packet should use as indicated by bits
1347 * 1-15 in the packet's queue_mapping.
1348 */
1349static inline unsigned int skb_txq(const struct sk_buff *skb)
1350{
1351 return skb->queue_mapping >> 1;
1352}
1353
1354/**
1355 * is_ctrl_pkt - return whether an offload packet is a control packet
1356 * @skb: the packet
1357 *
1358 * Returns whether an offload packet should use an OFLD or a CTRL
1359 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1360 */
1361static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1362{
1363 return skb->queue_mapping & 1;
1364}
1365
1366static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1367{
1368 unsigned int idx = skb_txq(skb);
1369
1370 if (unlikely(is_ctrl_pkt(skb)))
1371 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1372 return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1373}
1374
1375/**
1376 * t4_ofld_send - send an offload packet
1377 * @adap: the adapter
1378 * @skb: the packet
1379 *
1380 * Sends an offload packet. We use the packet queue_mapping to select the
1381 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1382 * should be sent as regular or control, bits 1-15 select the queue.
1383 */
1384int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1385{
1386 int ret;
1387
1388 local_bh_disable();
1389 ret = ofld_send(adap, skb);
1390 local_bh_enable();
1391 return ret;
1392}
1393
1394/**
1395 * cxgb4_ofld_send - send an offload packet
1396 * @dev: the net device
1397 * @skb: the packet
1398 *
1399 * Sends an offload packet. This is an exported version of @t4_ofld_send,
1400 * intended for ULDs.
1401 */
1402int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1403{
1404 return t4_ofld_send(netdev2adap(dev), skb);
1405}
1406EXPORT_SYMBOL(cxgb4_ofld_send);
1407
1408static inline void copy_frags(struct skb_shared_info *ssi,
1409 const struct pkt_gl *gl, unsigned int offset)
1410{
1411 unsigned int n;
1412
1413 /* usually there's just one frag */
1414 ssi->frags[0].page = gl->frags[0].page;
1415 ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
1416 ssi->frags[0].size = gl->frags[0].size - offset;
1417 ssi->nr_frags = gl->nfrags;
1418 n = gl->nfrags - 1;
1419 if (n)
1420 memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
1421
1422 /* get a reference to the last page, we don't own it */
1423 get_page(gl->frags[n].page);
1424}
1425
1426/**
1427 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1428 * @gl: the gather list
1429 * @skb_len: size of sk_buff main body if it carries fragments
1430 * @pull_len: amount of data to move to the sk_buff's main body
1431 *
1432 * Builds an sk_buff from the given packet gather list. Returns the
1433 * sk_buff or %NULL if sk_buff allocation failed.
1434 */
1435struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1436 unsigned int skb_len, unsigned int pull_len)
1437{
1438 struct sk_buff *skb;
1439
1440 /*
1441 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1442 * size, which is expected since buffers are at least PAGE_SIZEd.
1443 * In this case packets up to RX_COPY_THRES have only one fragment.
1444 */
1445 if (gl->tot_len <= RX_COPY_THRES) {
1446 skb = dev_alloc_skb(gl->tot_len);
1447 if (unlikely(!skb))
1448 goto out;
1449 __skb_put(skb, gl->tot_len);
1450 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1451 } else {
1452 skb = dev_alloc_skb(skb_len);
1453 if (unlikely(!skb))
1454 goto out;
1455 __skb_put(skb, pull_len);
1456 skb_copy_to_linear_data(skb, gl->va, pull_len);
1457
1458 copy_frags(skb_shinfo(skb), gl, pull_len);
1459 skb->len = gl->tot_len;
1460 skb->data_len = skb->len - pull_len;
1461 skb->truesize += skb->data_len;
1462 }
1463out: return skb;
1464}
1465EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1466
1467/**
1468 * t4_pktgl_free - free a packet gather list
1469 * @gl: the gather list
1470 *
1471 * Releases the pages of a packet gather list. We do not own the last
1472 * page on the list and do not free it.
1473 */
1474void t4_pktgl_free(const struct pkt_gl *gl)
1475{
1476 int n;
1477 const skb_frag_t *p;
1478
1479 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1480 put_page(p->page);
1481}
1482
1483/*
1484 * Process an MPS trace packet. Give it an unused protocol number so it won't
1485 * be delivered to anyone and send it to the stack for capture.
1486 */
1487static noinline int handle_trace_pkt(struct adapter *adap,
1488 const struct pkt_gl *gl)
1489{
1490 struct sk_buff *skb;
1491 struct cpl_trace_pkt *p;
1492
1493 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1494 if (unlikely(!skb)) {
1495 t4_pktgl_free(gl);
1496 return 0;
1497 }
1498
1499 p = (struct cpl_trace_pkt *)skb->data;
1500 __skb_pull(skb, sizeof(*p));
1501 skb_reset_mac_header(skb);
1502 skb->protocol = htons(0xffff);
1503 skb->dev = adap->port[0];
1504 netif_receive_skb(skb);
1505 return 0;
1506}
1507
1508static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1509 const struct cpl_rx_pkt *pkt)
1510{
1511 int ret;
1512 struct sk_buff *skb;
1513
1514 skb = napi_get_frags(&rxq->rspq.napi);
1515 if (unlikely(!skb)) {
1516 t4_pktgl_free(gl);
1517 rxq->stats.rx_drops++;
1518 return;
1519 }
1520
1521 copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
1522 skb->len = gl->tot_len - RX_PKT_PAD;
1523 skb->data_len = skb->len;
1524 skb->truesize += skb->data_len;
1525 skb->ip_summed = CHECKSUM_UNNECESSARY;
1526 skb_record_rx_queue(skb, rxq->rspq.idx);
1527
1528 if (unlikely(pkt->vlan_ex)) {
1529 struct port_info *pi = netdev_priv(rxq->rspq.netdev);
1530 struct vlan_group *grp = pi->vlan_grp;
1531
1532 rxq->stats.vlan_ex++;
1533 if (likely(grp)) {
1534 ret = vlan_gro_frags(&rxq->rspq.napi, grp,
1535 ntohs(pkt->vlan));
1536 goto stats;
1537 }
1538 }
1539 ret = napi_gro_frags(&rxq->rspq.napi);
1540stats: if (ret == GRO_HELD)
1541 rxq->stats.lro_pkts++;
1542 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1543 rxq->stats.lro_merged++;
1544 rxq->stats.pkts++;
1545 rxq->stats.rx_cso++;
1546}
1547
1548/**
1549 * t4_ethrx_handler - process an ingress ethernet packet
1550 * @q: the response queue that received the packet
1551 * @rsp: the response queue descriptor holding the RX_PKT message
1552 * @si: the gather list of packet fragments
1553 *
1554 * Process an ingress ethernet packet and deliver it to the stack.
1555 */
1556int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1557 const struct pkt_gl *si)
1558{
1559 bool csum_ok;
1560 struct sk_buff *skb;
1561 struct port_info *pi;
1562 const struct cpl_rx_pkt *pkt;
1563 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1564
1565 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1566 return handle_trace_pkt(q->adap, si);
1567
1568 pkt = (void *)&rsp[1];
1569 csum_ok = pkt->csum_calc && !pkt->err_vec;
1570 if ((pkt->l2info & htonl(RXF_TCP)) &&
1571 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1572 do_gro(rxq, si, pkt);
1573 return 0;
1574 }
1575
1576 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1577 if (unlikely(!skb)) {
1578 t4_pktgl_free(si);
1579 rxq->stats.rx_drops++;
1580 return 0;
1581 }
1582
1583 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
1584 skb->protocol = eth_type_trans(skb, q->netdev);
1585 skb_record_rx_queue(skb, q->idx);
1586 pi = netdev_priv(skb->dev);
1587 rxq->stats.pkts++;
1588
1589 if (csum_ok && (pi->rx_offload & RX_CSO) &&
1590 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1591 if (!pkt->ip_frag)
1592 skb->ip_summed = CHECKSUM_UNNECESSARY;
1593 else {
1594 __sum16 c = (__force __sum16)pkt->csum;
1595 skb->csum = csum_unfold(c);
1596 skb->ip_summed = CHECKSUM_COMPLETE;
1597 }
1598 rxq->stats.rx_cso++;
1599 } else
1600 skb->ip_summed = CHECKSUM_NONE;
1601
1602 if (unlikely(pkt->vlan_ex)) {
1603 struct vlan_group *grp = pi->vlan_grp;
1604
1605 rxq->stats.vlan_ex++;
1606 if (likely(grp))
1607 vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
1608 else
1609 dev_kfree_skb_any(skb);
1610 } else
1611 netif_receive_skb(skb);
1612
1613 return 0;
1614}
1615
1616/**
1617 * restore_rx_bufs - put back a packet's Rx buffers
1618 * @si: the packet gather list
1619 * @q: the SGE free list
1620 * @frags: number of FL buffers to restore
1621 *
1622 * Puts back on an FL the Rx buffers associated with @si. The buffers
1623 * have already been unmapped and are left unmapped, we mark them so to
1624 * prevent further unmapping attempts.
1625 *
1626 * This function undoes a series of @unmap_rx_buf calls when we find out
1627 * that the current packet can't be processed right away afterall and we
1628 * need to come back to it later. This is a very rare event and there's
1629 * no effort to make this particularly efficient.
1630 */
1631static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1632 int frags)
1633{
1634 struct rx_sw_desc *d;
1635
1636 while (frags--) {
1637 if (q->cidx == 0)
1638 q->cidx = q->size - 1;
1639 else
1640 q->cidx--;
1641 d = &q->sdesc[q->cidx];
1642 d->page = si->frags[frags].page;
1643 d->dma_addr |= RX_UNMAPPED_BUF;
1644 q->avail++;
1645 }
1646}
1647
1648/**
1649 * is_new_response - check if a response is newly written
1650 * @r: the response descriptor
1651 * @q: the response queue
1652 *
1653 * Returns true if a response descriptor contains a yet unprocessed
1654 * response.
1655 */
1656static inline bool is_new_response(const struct rsp_ctrl *r,
1657 const struct sge_rspq *q)
1658{
1659 return RSPD_GEN(r->type_gen) == q->gen;
1660}
1661
1662/**
1663 * rspq_next - advance to the next entry in a response queue
1664 * @q: the queue
1665 *
1666 * Updates the state of a response queue to advance it to the next entry.
1667 */
1668static inline void rspq_next(struct sge_rspq *q)
1669{
1670 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1671 if (unlikely(++q->cidx == q->size)) {
1672 q->cidx = 0;
1673 q->gen ^= 1;
1674 q->cur_desc = q->desc;
1675 }
1676}
1677
1678/**
1679 * process_responses - process responses from an SGE response queue
1680 * @q: the ingress queue to process
1681 * @budget: how many responses can be processed in this round
1682 *
1683 * Process responses from an SGE response queue up to the supplied budget.
1684 * Responses include received packets as well as control messages from FW
1685 * or HW.
1686 *
1687 * Additionally choose the interrupt holdoff time for the next interrupt
1688 * on this queue. If the system is under memory shortage use a fairly
1689 * long delay to help recovery.
1690 */
1691static int process_responses(struct sge_rspq *q, int budget)
1692{
1693 int ret, rsp_type;
1694 int budget_left = budget;
1695 const struct rsp_ctrl *rc;
1696 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1697
1698 while (likely(budget_left)) {
1699 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1700 if (!is_new_response(rc, q))
1701 break;
1702
1703 rmb();
1704 rsp_type = RSPD_TYPE(rc->type_gen);
1705 if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1706 skb_frag_t *fp;
1707 struct pkt_gl si;
1708 const struct rx_sw_desc *rsd;
1709 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1710
1711 if (len & RSPD_NEWBUF) {
1712 if (likely(q->offset > 0)) {
1713 free_rx_bufs(q->adap, &rxq->fl, 1);
1714 q->offset = 0;
1715 }
1716 len &= RSPD_LEN;
1717 }
1718 si.tot_len = len;
1719
1720 /* gather packet fragments */
1721 for (frags = 0, fp = si.frags; ; frags++, fp++) {
1722 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1723 bufsz = get_buf_size(rsd);
1724 fp->page = rsd->page;
1725 fp->page_offset = q->offset;
1726 fp->size = min(bufsz, len);
1727 len -= fp->size;
1728 if (!len)
1729 break;
1730 unmap_rx_buf(q->adap, &rxq->fl);
1731 }
1732
1733 /*
1734 * Last buffer remains mapped so explicitly make it
1735 * coherent for CPU access.
1736 */
1737 dma_sync_single_for_cpu(q->adap->pdev_dev,
1738 get_buf_addr(rsd),
1739 fp->size, DMA_FROM_DEVICE);
1740
1741 si.va = page_address(si.frags[0].page) +
1742 si.frags[0].page_offset;
1743 prefetch(si.va);
1744
1745 si.nfrags = frags + 1;
1746 ret = q->handler(q, q->cur_desc, &si);
1747 if (likely(ret == 0))
1748 q->offset += ALIGN(fp->size, FL_ALIGN);
1749 else
1750 restore_rx_bufs(&si, &rxq->fl, frags);
1751 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1752 ret = q->handler(q, q->cur_desc, NULL);
1753 } else {
1754 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1755 }
1756
1757 if (unlikely(ret)) {
1758 /* couldn't process descriptor, back off for recovery */
1759 q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1760 break;
1761 }
1762
1763 rspq_next(q);
1764 budget_left--;
1765 }
1766
1767 if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1768 __refill_fl(q->adap, &rxq->fl);
1769 return budget - budget_left;
1770}
1771
1772/**
1773 * napi_rx_handler - the NAPI handler for Rx processing
1774 * @napi: the napi instance
1775 * @budget: how many packets we can process in this round
1776 *
1777 * Handler for new data events when using NAPI. This does not need any
1778 * locking or protection from interrupts as data interrupts are off at
1779 * this point and other adapter interrupts do not interfere (the latter
1780 * in not a concern at all with MSI-X as non-data interrupts then have
1781 * a separate handler).
1782 */
1783static int napi_rx_handler(struct napi_struct *napi, int budget)
1784{
1785 unsigned int params;
1786 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1787 int work_done = process_responses(q, budget);
1788
1789 if (likely(work_done < budget)) {
1790 napi_complete(napi);
1791 params = q->next_intr_params;
1792 q->next_intr_params = q->intr_params;
1793 } else
1794 params = QINTR_TIMER_IDX(7);
1795
1796 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
1797 INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
1798 return work_done;
1799}
1800
1801/*
1802 * The MSI-X interrupt handler for an SGE response queue.
1803 */
1804irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
1805{
1806 struct sge_rspq *q = cookie;
1807
1808 napi_schedule(&q->napi);
1809 return IRQ_HANDLED;
1810}
1811
1812/*
1813 * Process the indirect interrupt entries in the interrupt queue and kick off
1814 * NAPI for each queue that has generated an entry.
1815 */
1816static unsigned int process_intrq(struct adapter *adap)
1817{
1818 unsigned int credits;
1819 const struct rsp_ctrl *rc;
1820 struct sge_rspq *q = &adap->sge.intrq;
1821
1822 spin_lock(&adap->sge.intrq_lock);
1823 for (credits = 0; ; credits++) {
1824 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1825 if (!is_new_response(rc, q))
1826 break;
1827
1828 rmb();
1829 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1830 unsigned int qid = ntohl(rc->pldbuflen_qid);
1831
1832 napi_schedule(&adap->sge.ingr_map[qid]->napi);
1833 }
1834
1835 rspq_next(q);
1836 }
1837
1838 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
1839 INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
1840 spin_unlock(&adap->sge.intrq_lock);
1841 return credits;
1842}
1843
1844/*
1845 * The MSI interrupt handler, which handles data events from SGE response queues
1846 * as well as error and other async events as they all use the same MSI vector.
1847 */
1848static irqreturn_t t4_intr_msi(int irq, void *cookie)
1849{
1850 struct adapter *adap = cookie;
1851
1852 t4_slow_intr_handler(adap);
1853 process_intrq(adap);
1854 return IRQ_HANDLED;
1855}
1856
1857/*
1858 * Interrupt handler for legacy INTx interrupts.
1859 * Handles data events from SGE response queues as well as error and other
1860 * async events as they all use the same interrupt line.
1861 */
1862static irqreturn_t t4_intr_intx(int irq, void *cookie)
1863{
1864 struct adapter *adap = cookie;
1865
1866 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
1867 if (t4_slow_intr_handler(adap) | process_intrq(adap))
1868 return IRQ_HANDLED;
1869 return IRQ_NONE; /* probably shared interrupt */
1870}
1871
1872/**
1873 * t4_intr_handler - select the top-level interrupt handler
1874 * @adap: the adapter
1875 *
1876 * Selects the top-level interrupt handler based on the type of interrupts
1877 * (MSI-X, MSI, or INTx).
1878 */
1879irq_handler_t t4_intr_handler(struct adapter *adap)
1880{
1881 if (adap->flags & USING_MSIX)
1882 return t4_sge_intr_msix;
1883 if (adap->flags & USING_MSI)
1884 return t4_intr_msi;
1885 return t4_intr_intx;
1886}
1887
1888static void sge_rx_timer_cb(unsigned long data)
1889{
1890 unsigned long m;
1891 unsigned int i, cnt[2];
1892 struct adapter *adap = (struct adapter *)data;
1893 struct sge *s = &adap->sge;
1894
1895 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
1896 for (m = s->starving_fl[i]; m; m &= m - 1) {
1897 struct sge_eth_rxq *rxq;
1898 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
1899 struct sge_fl *fl = s->egr_map[id];
1900
1901 clear_bit(id, s->starving_fl);
1902 smp_mb__after_clear_bit();
1903
1904 if (fl_starving(fl)) {
1905 rxq = container_of(fl, struct sge_eth_rxq, fl);
1906 if (napi_reschedule(&rxq->rspq.napi))
1907 fl->starving++;
1908 else
1909 set_bit(id, s->starving_fl);
1910 }
1911 }
1912
1913 t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
1914 cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
1915 cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
1916
1917 for (i = 0; i < 2; i++)
1918 if (cnt[i] >= s->starve_thres) {
1919 if (s->idma_state[i] || cnt[i] == 0xffffffff)
1920 continue;
1921 s->idma_state[i] = 1;
1922 t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
1923 m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
1924 dev_warn(adap->pdev_dev,
1925 "SGE idma%u starvation detected for "
1926 "queue %lu\n", i, m & 0xffff);
1927 } else if (s->idma_state[i])
1928 s->idma_state[i] = 0;
1929
1930 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
1931}
1932
1933static void sge_tx_timer_cb(unsigned long data)
1934{
1935 unsigned long m;
1936 unsigned int i, budget;
1937 struct adapter *adap = (struct adapter *)data;
1938 struct sge *s = &adap->sge;
1939
1940 for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
1941 for (m = s->txq_maperr[i]; m; m &= m - 1) {
1942 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
1943 struct sge_ofld_txq *txq = s->egr_map[id];
1944
1945 clear_bit(id, s->txq_maperr);
1946 tasklet_schedule(&txq->qresume_tsk);
1947 }
1948
1949 budget = MAX_TIMER_TX_RECLAIM;
1950 i = s->ethtxq_rover;
1951 do {
1952 struct sge_eth_txq *q = &s->ethtxq[i];
1953
1954 if (q->q.in_use &&
1955 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
1956 __netif_tx_trylock(q->txq)) {
1957 int avail = reclaimable(&q->q);
1958
1959 if (avail) {
1960 if (avail > budget)
1961 avail = budget;
1962
1963 free_tx_desc(adap, &q->q, avail, true);
1964 q->q.in_use -= avail;
1965 budget -= avail;
1966 }
1967 __netif_tx_unlock(q->txq);
1968 }
1969
1970 if (++i >= s->ethqsets)
1971 i = 0;
1972 } while (budget && i != s->ethtxq_rover);
1973 s->ethtxq_rover = i;
1974 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
1975}
1976
1977int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1978 struct net_device *dev, int intr_idx,
1979 struct sge_fl *fl, rspq_handler_t hnd)
1980{
1981 int ret, flsz = 0;
1982 struct fw_iq_cmd c;
1983 struct port_info *pi = netdev_priv(dev);
1984
1985 /* Size needs to be multiple of 16, including status entry. */
1986 iq->size = roundup(iq->size, 16);
1987
1988 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
1989 &iq->phys_addr, NULL, 0);
1990 if (!iq->desc)
1991 return -ENOMEM;
1992
1993 memset(&c, 0, sizeof(c));
1994 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
1995 FW_CMD_WRITE | FW_CMD_EXEC |
1996 FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0));
1997 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
1998 FW_LEN16(c));
1999 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
2000 FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
2001 FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
2002 FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
2003 -intr_idx - 1));
2004 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
2005 FW_IQ_CMD_IQGTSMODE |
2006 FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
2007 FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2008 c.iqsize = htons(iq->size);
2009 c.iqaddr = cpu_to_be64(iq->phys_addr);
2010
2011 if (fl) {
2012 fl->size = roundup(fl->size, 8);
2013 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2014 sizeof(struct rx_sw_desc), &fl->addr,
2015 &fl->sdesc, STAT_LEN);
2016 if (!fl->desc)
2017 goto fl_nomem;
2018
2019 flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
2020 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2021 FW_IQ_CMD_FL0PADEN);
2022 c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2023 FW_IQ_CMD_FL0FBMAX(3));
2024 c.fl0size = htons(flsz);
2025 c.fl0addr = cpu_to_be64(fl->addr);
2026 }
2027
2028 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2029 if (ret)
2030 goto err;
2031
2032 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2033 iq->cur_desc = iq->desc;
2034 iq->cidx = 0;
2035 iq->gen = 1;
2036 iq->next_intr_params = iq->intr_params;
2037 iq->cntxt_id = ntohs(c.iqid);
2038 iq->abs_id = ntohs(c.physiqid);
2039 iq->size--; /* subtract status entry */
2040 iq->adap = adap;
2041 iq->netdev = dev;
2042 iq->handler = hnd;
2043
2044 /* set offset to -1 to distinguish ingress queues without FL */
2045 iq->offset = fl ? 0 : -1;
2046
2047 adap->sge.ingr_map[iq->cntxt_id] = iq;
2048
2049 if (fl) {
2050 fl->cntxt_id = htons(c.fl0id);
2051 fl->avail = fl->pend_cred = 0;
2052 fl->pidx = fl->cidx = 0;
2053 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2054 adap->sge.egr_map[fl->cntxt_id] = fl;
2055 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2056 }
2057 return 0;
2058
2059fl_nomem:
2060 ret = -ENOMEM;
2061err:
2062 if (iq->desc) {
2063 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2064 iq->desc, iq->phys_addr);
2065 iq->desc = NULL;
2066 }
2067 if (fl && fl->desc) {
2068 kfree(fl->sdesc);
2069 fl->sdesc = NULL;
2070 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2071 fl->desc, fl->addr);
2072 fl->desc = NULL;
2073 }
2074 return ret;
2075}
2076
2077static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2078{
2079 q->in_use = 0;
2080 q->cidx = q->pidx = 0;
2081 q->stops = q->restarts = 0;
2082 q->stat = (void *)&q->desc[q->size];
2083 q->cntxt_id = id;
2084 adap->sge.egr_map[id] = q;
2085}
2086
2087int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2088 struct net_device *dev, struct netdev_queue *netdevq,
2089 unsigned int iqid)
2090{
2091 int ret, nentries;
2092 struct fw_eq_eth_cmd c;
2093 struct port_info *pi = netdev_priv(dev);
2094
2095 /* Add status entries */
2096 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2097
2098 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2099 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2100 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
2101 if (!txq->q.desc)
2102 return -ENOMEM;
2103
2104 memset(&c, 0, sizeof(c));
2105 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2106 FW_CMD_WRITE | FW_CMD_EXEC |
2107 FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0));
2108 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2109 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2110 c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
2111 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2112 FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2113 FW_EQ_ETH_CMD_IQID(iqid));
2114 c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2115 FW_EQ_ETH_CMD_FBMAX(3) |
2116 FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2117 FW_EQ_ETH_CMD_EQSIZE(nentries));
2118 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2119
2120 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2121 if (ret) {
2122 kfree(txq->q.sdesc);
2123 txq->q.sdesc = NULL;
2124 dma_free_coherent(adap->pdev_dev,
2125 nentries * sizeof(struct tx_desc),
2126 txq->q.desc, txq->q.phys_addr);
2127 txq->q.desc = NULL;
2128 return ret;
2129 }
2130
2131 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2132 txq->txq = netdevq;
2133 txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2134 txq->mapping_err = 0;
2135 return 0;
2136}
2137
2138int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2139 struct net_device *dev, unsigned int iqid,
2140 unsigned int cmplqid)
2141{
2142 int ret, nentries;
2143 struct fw_eq_ctrl_cmd c;
2144 struct port_info *pi = netdev_priv(dev);
2145
2146 /* Add status entries */
2147 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2148
2149 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2150 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2151 NULL, 0);
2152 if (!txq->q.desc)
2153 return -ENOMEM;
2154
2155 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2156 FW_CMD_WRITE | FW_CMD_EXEC |
2157 FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0));
2158 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2159 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2160 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
2161 c.physeqid_pkd = htonl(0);
2162 c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2163 FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2164 FW_EQ_CTRL_CMD_IQID(iqid));
2165 c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2166 FW_EQ_CTRL_CMD_FBMAX(3) |
2167 FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2168 FW_EQ_CTRL_CMD_EQSIZE(nentries));
2169 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2170
2171 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2172 if (ret) {
2173 dma_free_coherent(adap->pdev_dev,
2174 nentries * sizeof(struct tx_desc),
2175 txq->q.desc, txq->q.phys_addr);
2176 txq->q.desc = NULL;
2177 return ret;
2178 }
2179
2180 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2181 txq->adap = adap;
2182 skb_queue_head_init(&txq->sendq);
2183 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2184 txq->full = 0;
2185 return 0;
2186}
2187
2188int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2189 struct net_device *dev, unsigned int iqid)
2190{
2191 int ret, nentries;
2192 struct fw_eq_ofld_cmd c;
2193 struct port_info *pi = netdev_priv(dev);
2194
2195 /* Add status entries */
2196 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2197
2198 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2199 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2200 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
2201 if (!txq->q.desc)
2202 return -ENOMEM;
2203
2204 memset(&c, 0, sizeof(c));
2205 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2206 FW_CMD_WRITE | FW_CMD_EXEC |
2207 FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0));
2208 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2209 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2210 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2211 FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2212 FW_EQ_OFLD_CMD_IQID(iqid));
2213 c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2214 FW_EQ_OFLD_CMD_FBMAX(3) |
2215 FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2216 FW_EQ_OFLD_CMD_EQSIZE(nentries));
2217 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2218
2219 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2220 if (ret) {
2221 kfree(txq->q.sdesc);
2222 txq->q.sdesc = NULL;
2223 dma_free_coherent(adap->pdev_dev,
2224 nentries * sizeof(struct tx_desc),
2225 txq->q.desc, txq->q.phys_addr);
2226 txq->q.desc = NULL;
2227 return ret;
2228 }
2229
2230 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2231 txq->adap = adap;
2232 skb_queue_head_init(&txq->sendq);
2233 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2234 txq->full = 0;
2235 txq->mapping_err = 0;
2236 return 0;
2237}
2238
2239static void free_txq(struct adapter *adap, struct sge_txq *q)
2240{
2241 dma_free_coherent(adap->pdev_dev,
2242 q->size * sizeof(struct tx_desc) + STAT_LEN,
2243 q->desc, q->phys_addr);
2244 q->cntxt_id = 0;
2245 q->sdesc = NULL;
2246 q->desc = NULL;
2247}
2248
2249static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2250 struct sge_fl *fl)
2251{
2252 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2253
2254 adap->sge.ingr_map[rq->cntxt_id] = NULL;
2255 t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id,
2256 0xffff);
2257 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2258 rq->desc, rq->phys_addr);
2259 netif_napi_del(&rq->napi);
2260 rq->netdev = NULL;
2261 rq->cntxt_id = rq->abs_id = 0;
2262 rq->desc = NULL;
2263
2264 if (fl) {
2265 free_rx_bufs(adap, fl, fl->avail);
2266 dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
2267 fl->desc, fl->addr);
2268 kfree(fl->sdesc);
2269 fl->sdesc = NULL;
2270 fl->cntxt_id = 0;
2271 fl->desc = NULL;
2272 }
2273}
2274
2275/**
2276 * t4_free_sge_resources - free SGE resources
2277 * @adap: the adapter
2278 *
2279 * Frees resources used by the SGE queue sets.
2280 */
2281void t4_free_sge_resources(struct adapter *adap)
2282{
2283 int i;
2284 struct sge_eth_rxq *eq = adap->sge.ethrxq;
2285 struct sge_eth_txq *etq = adap->sge.ethtxq;
2286 struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
2287
2288 /* clean up Ethernet Tx/Rx queues */
2289 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2290 if (eq->rspq.desc)
2291 free_rspq_fl(adap, &eq->rspq, &eq->fl);
2292 if (etq->q.desc) {
2293 t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id);
2294 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2295 kfree(etq->q.sdesc);
2296 free_txq(adap, &etq->q);
2297 }
2298 }
2299
2300 /* clean up RDMA and iSCSI Rx queues */
2301 for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
2302 if (oq->rspq.desc)
2303 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2304 }
2305 for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
2306 if (oq->rspq.desc)
2307 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2308 }
2309
2310 /* clean up offload Tx queues */
2311 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2312 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2313
2314 if (q->q.desc) {
2315 tasklet_kill(&q->qresume_tsk);
2316 t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id);
2317 free_tx_desc(adap, &q->q, q->q.in_use, false);
2318 kfree(q->q.sdesc);
2319 __skb_queue_purge(&q->sendq);
2320 free_txq(adap, &q->q);
2321 }
2322 }
2323
2324 /* clean up control Tx queues */
2325 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2326 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2327
2328 if (cq->q.desc) {
2329 tasklet_kill(&cq->qresume_tsk);
2330 t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id);
2331 __skb_queue_purge(&cq->sendq);
2332 free_txq(adap, &cq->q);
2333 }
2334 }
2335
2336 if (adap->sge.fw_evtq.desc)
2337 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2338
2339 if (adap->sge.intrq.desc)
2340 free_rspq_fl(adap, &adap->sge.intrq, NULL);
2341
2342 /* clear the reverse egress queue map */
2343 memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2344}
2345
2346void t4_sge_start(struct adapter *adap)
2347{
2348 adap->sge.ethtxq_rover = 0;
2349 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2350 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2351}
2352
2353/**
2354 * t4_sge_stop - disable SGE operation
2355 * @adap: the adapter
2356 *
2357 * Stop tasklets and timers associated with the DMA engine. Note that
2358 * this is effective only if measures have been taken to disable any HW
2359 * events that may restart them.
2360 */
2361void t4_sge_stop(struct adapter *adap)
2362{
2363 int i;
2364 struct sge *s = &adap->sge;
2365
2366 if (in_interrupt()) /* actions below require waiting */
2367 return;
2368
2369 if (s->rx_timer.function)
2370 del_timer_sync(&s->rx_timer);
2371 if (s->tx_timer.function)
2372 del_timer_sync(&s->tx_timer);
2373
2374 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2375 struct sge_ofld_txq *q = &s->ofldtxq[i];
2376
2377 if (q->q.desc)
2378 tasklet_kill(&q->qresume_tsk);
2379 }
2380 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2381 struct sge_ctrl_txq *cq = &s->ctrlq[i];
2382
2383 if (cq->q.desc)
2384 tasklet_kill(&cq->qresume_tsk);
2385 }
2386}
2387
2388/**
2389 * t4_sge_init - initialize SGE
2390 * @adap: the adapter
2391 *
2392 * Performs SGE initialization needed every time after a chip reset.
2393 * We do not initialize any of the queues here, instead the driver
2394 * top-level must request them individually.
2395 */
2396void t4_sge_init(struct adapter *adap)
2397{
2398 struct sge *s = &adap->sge;
2399 unsigned int fl_align_log = ilog2(FL_ALIGN);
2400
2401 t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
2402 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
2403 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
2404 RXPKTCPLMODE |
2405 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
2406 t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK,
2407 HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
2408 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
2409#if FL_PG_ORDER > 0
2410 t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
2411#endif
2412 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2413 THRESHOLD_0(s->counter_val[0]) |
2414 THRESHOLD_1(s->counter_val[1]) |
2415 THRESHOLD_2(s->counter_val[2]) |
2416 THRESHOLD_3(s->counter_val[3]));
2417 t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2418 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2419 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2420 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2421 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
2422 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
2423 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2424 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
2425 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
2426 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2427 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2428 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2429 s->idma_state[0] = s->idma_state[1] = 0;
2430 spin_lock_init(&s->intrq_lock);
2431}
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
new file mode 100644
index 000000000000..a814a3afe123
--- /dev/null
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -0,0 +1,3131 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
56int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
112void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals, unsigned int nregs,
114 unsigned int start_idx)
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
123/**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138{
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143}
144
145/*
146 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
147 */
148static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
149 u32 mbox_addr)
150{
151 for ( ; nflit; nflit--, mbox_addr += 8)
152 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
153}
154
155/*
156 * Handle a FW assertion reported in a mailbox.
157 */
158static void fw_asrt(struct adapter *adap, u32 mbox_addr)
159{
160 struct fw_debug_cmd asrt;
161
162 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
163 dev_alert(adap->pdev_dev,
164 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
165 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
166 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
167}
168
169static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
170{
171 dev_err(adap->pdev_dev,
172 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
173 (unsigned long long)t4_read_reg64(adap, data_reg),
174 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
175 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
181}
182
183/**
184 * t4_wr_mbox_meat - send a command to FW through the given mailbox
185 * @adap: the adapter
186 * @mbox: index of the mailbox to use
187 * @cmd: the command to write
188 * @size: command length in bytes
189 * @rpl: where to optionally store the reply
190 * @sleep_ok: if true we may sleep while awaiting command completion
191 *
192 * Sends the given command to FW through the selected mailbox and waits
193 * for the FW to execute the command. If @rpl is not %NULL it is used to
194 * store the FW's reply to the command. The command and its optional
195 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
196 * to respond. @sleep_ok determines whether we may sleep while awaiting
197 * the response. If sleeping is allowed we use progressive backoff
198 * otherwise we spin.
199 *
200 * The return value is 0 on success or a negative errno on failure. A
201 * failure can happen either because we are not able to execute the
202 * command or FW executes it but signals an error. In the latter case
203 * the return value is the error code indicated by FW (negated).
204 */
205int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
206 void *rpl, bool sleep_ok)
207{
208 static int delay[] = {
209 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
210 };
211
212 u32 v;
213 u64 res;
214 int i, ms, delay_idx;
215 const __be64 *p = cmd;
216 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
217 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
218
219 if ((size & 15) || size > MBOX_LEN)
220 return -EINVAL;
221
222 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
223 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
224 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
225
226 if (v != MBOX_OWNER_DRV)
227 return v ? -EBUSY : -ETIMEDOUT;
228
229 for (i = 0; i < size; i += 8)
230 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
231
232 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
233 t4_read_reg(adap, ctl_reg); /* flush write */
234
235 delay_idx = 0;
236 ms = delay[0];
237
238 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
239 if (sleep_ok) {
240 ms = delay[delay_idx]; /* last element may repeat */
241 if (delay_idx < ARRAY_SIZE(delay) - 1)
242 delay_idx++;
243 msleep(ms);
244 } else
245 mdelay(ms);
246
247 v = t4_read_reg(adap, ctl_reg);
248 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
249 if (!(v & MBMSGVALID)) {
250 t4_write_reg(adap, ctl_reg, 0);
251 continue;
252 }
253
254 res = t4_read_reg64(adap, data_reg);
255 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
256 fw_asrt(adap, data_reg);
257 res = FW_CMD_RETVAL(EIO);
258 } else if (rpl)
259 get_mbox_rpl(adap, rpl, size / 8, data_reg);
260
261 if (FW_CMD_RETVAL_GET((int)res))
262 dump_mbox(adap, mbox, data_reg);
263 t4_write_reg(adap, ctl_reg, 0);
264 return -FW_CMD_RETVAL_GET((int)res);
265 }
266 }
267
268 dump_mbox(adap, mbox, data_reg);
269 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
270 *(const u8 *)cmd, mbox);
271 return -ETIMEDOUT;
272}
273
274/**
275 * t4_mc_read - read from MC through backdoor accesses
276 * @adap: the adapter
277 * @addr: address of first byte requested
278 * @data: 64 bytes of data containing the requested address
279 * @ecc: where to store the corresponding 64-bit ECC word
280 *
281 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
282 * that covers the requested address @addr. If @parity is not %NULL it
283 * is assigned the 64-bit ECC word for the read data.
284 */
285int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
286{
287 int i;
288
289 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
290 return -EBUSY;
291 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
292 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
293 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
294 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
295 BIST_CMD_GAP(1));
296 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
297 if (i)
298 return i;
299
300#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
301
302 for (i = 15; i >= 0; i--)
303 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
304 if (ecc)
305 *ecc = t4_read_reg64(adap, MC_DATA(16));
306#undef MC_DATA
307 return 0;
308}
309
310/**
311 * t4_edc_read - read from EDC through backdoor accesses
312 * @adap: the adapter
313 * @idx: which EDC to access
314 * @addr: address of first byte requested
315 * @data: 64 bytes of data containing the requested address
316 * @ecc: where to store the corresponding 64-bit ECC word
317 *
318 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
319 * that covers the requested address @addr. If @parity is not %NULL it
320 * is assigned the 64-bit ECC word for the read data.
321 */
322int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
323{
324 int i;
325
326 idx *= EDC_STRIDE;
327 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
328 return -EBUSY;
329 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
330 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
331 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
332 t4_write_reg(adap, EDC_BIST_CMD + idx,
333 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
334 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
335 if (i)
336 return i;
337
338#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
339
340 for (i = 15; i >= 0; i--)
341 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
342 if (ecc)
343 *ecc = t4_read_reg64(adap, EDC_DATA(16));
344#undef EDC_DATA
345 return 0;
346}
347
348#define VPD_ENTRY(name, len) \
349 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
350
351/*
352 * Partial EEPROM Vital Product Data structure. Includes only the ID and
353 * VPD-R sections.
354 */
355struct t4_vpd {
356 u8 id_tag;
357 u8 id_len[2];
358 u8 id_data[ID_LEN];
359 u8 vpdr_tag;
360 u8 vpdr_len[2];
361 VPD_ENTRY(pn, 16); /* part number */
362 VPD_ENTRY(ec, EC_LEN); /* EC level */
363 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
364 VPD_ENTRY(na, 12); /* MAC address base */
365 VPD_ENTRY(port_type, 8); /* port types */
366 VPD_ENTRY(gpio, 14); /* GPIO usage */
367 VPD_ENTRY(cclk, 6); /* core clock */
368 VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
369 VPD_ENTRY(rv, 1); /* csum */
370 u32 pad; /* for multiple-of-4 sizing and alignment */
371};
372
373#define EEPROM_STAT_ADDR 0x7bfc
374#define VPD_BASE 0
375
376/**
377 * t4_seeprom_wp - enable/disable EEPROM write protection
378 * @adapter: the adapter
379 * @enable: whether to enable or disable write protection
380 *
381 * Enables or disables write protection on the serial EEPROM.
382 */
383int t4_seeprom_wp(struct adapter *adapter, bool enable)
384{
385 unsigned int v = enable ? 0xc : 0;
386 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
387 return ret < 0 ? ret : 0;
388}
389
390/**
391 * get_vpd_params - read VPD parameters from VPD EEPROM
392 * @adapter: adapter to read
393 * @p: where to store the parameters
394 *
395 * Reads card parameters stored in VPD EEPROM.
396 */
397static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
398{
399 int ret;
400 struct t4_vpd vpd;
401 u8 *q = (u8 *)&vpd, csum;
402
403 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd);
404 if (ret < 0)
405 return ret;
406
407 for (csum = 0; q <= vpd.rv_data; q++)
408 csum += *q;
409
410 if (csum) {
411 dev_err(adapter->pdev_dev,
412 "corrupted VPD EEPROM, actual csum %u\n", csum);
413 return -EINVAL;
414 }
415
416 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
417 memcpy(p->id, vpd.id_data, sizeof(vpd.id_data));
418 strim(p->id);
419 memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data));
420 strim(p->ec);
421 memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data));
422 strim(p->sn);
423 return 0;
424}
425
426/* serial flash and firmware constants */
427enum {
428 SF_ATTEMPTS = 10, /* max retries for SF operations */
429
430 /* flash command opcodes */
431 SF_PROG_PAGE = 2, /* program page */
432 SF_WR_DISABLE = 4, /* disable writes */
433 SF_RD_STATUS = 5, /* read status register */
434 SF_WR_ENABLE = 6, /* enable writes */
435 SF_RD_DATA_FAST = 0xb, /* read flash */
436 SF_ERASE_SECTOR = 0xd8, /* erase sector */
437
438 FW_START_SEC = 8, /* first flash sector for FW */
439 FW_END_SEC = 15, /* last flash sector for FW */
440 FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
441 FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
442};
443
444/**
445 * sf1_read - read data from the serial flash
446 * @adapter: the adapter
447 * @byte_cnt: number of bytes to read
448 * @cont: whether another operation will be chained
449 * @lock: whether to lock SF for PL access only
450 * @valp: where to store the read data
451 *
452 * Reads up to 4 bytes of data from the serial flash. The location of
453 * the read needs to be specified prior to calling this by issuing the
454 * appropriate commands to the serial flash.
455 */
456static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
457 int lock, u32 *valp)
458{
459 int ret;
460
461 if (!byte_cnt || byte_cnt > 4)
462 return -EINVAL;
463 if (t4_read_reg(adapter, SF_OP) & BUSY)
464 return -EBUSY;
465 cont = cont ? SF_CONT : 0;
466 lock = lock ? SF_LOCK : 0;
467 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
468 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
469 if (!ret)
470 *valp = t4_read_reg(adapter, SF_DATA);
471 return ret;
472}
473
474/**
475 * sf1_write - write data to the serial flash
476 * @adapter: the adapter
477 * @byte_cnt: number of bytes to write
478 * @cont: whether another operation will be chained
479 * @lock: whether to lock SF for PL access only
480 * @val: value to write
481 *
482 * Writes up to 4 bytes of data to the serial flash. The location of
483 * the write needs to be specified prior to calling this by issuing the
484 * appropriate commands to the serial flash.
485 */
486static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
487 int lock, u32 val)
488{
489 if (!byte_cnt || byte_cnt > 4)
490 return -EINVAL;
491 if (t4_read_reg(adapter, SF_OP) & BUSY)
492 return -EBUSY;
493 cont = cont ? SF_CONT : 0;
494 lock = lock ? SF_LOCK : 0;
495 t4_write_reg(adapter, SF_DATA, val);
496 t4_write_reg(adapter, SF_OP, lock |
497 cont | BYTECNT(byte_cnt - 1) | OP_WR);
498 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
499}
500
501/**
502 * flash_wait_op - wait for a flash operation to complete
503 * @adapter: the adapter
504 * @attempts: max number of polls of the status register
505 * @delay: delay between polls in ms
506 *
507 * Wait for a flash operation to complete by polling the status register.
508 */
509static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
510{
511 int ret;
512 u32 status;
513
514 while (1) {
515 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
516 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
517 return ret;
518 if (!(status & 1))
519 return 0;
520 if (--attempts == 0)
521 return -EAGAIN;
522 if (delay)
523 msleep(delay);
524 }
525}
526
527/**
528 * t4_read_flash - read words from serial flash
529 * @adapter: the adapter
530 * @addr: the start address for the read
531 * @nwords: how many 32-bit words to read
532 * @data: where to store the read data
533 * @byte_oriented: whether to store data as bytes or as words
534 *
535 * Read the specified number of 32-bit words from the serial flash.
536 * If @byte_oriented is set the read data is stored as a byte array
537 * (i.e., big-endian), otherwise as 32-bit words in the platform's
538 * natural endianess.
539 */
540int t4_read_flash(struct adapter *adapter, unsigned int addr,
541 unsigned int nwords, u32 *data, int byte_oriented)
542{
543 int ret;
544
545 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
546 return -EINVAL;
547
548 addr = swab32(addr) | SF_RD_DATA_FAST;
549
550 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
551 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
552 return ret;
553
554 for ( ; nwords; nwords--, data++) {
555 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
556 if (nwords == 1)
557 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
558 if (ret)
559 return ret;
560 if (byte_oriented)
561 *data = htonl(*data);
562 }
563 return 0;
564}
565
566/**
567 * t4_write_flash - write up to a page of data to the serial flash
568 * @adapter: the adapter
569 * @addr: the start address to write
570 * @n: length of data to write in bytes
571 * @data: the data to write
572 *
573 * Writes up to a page of data (256 bytes) to the serial flash starting
574 * at the given address. All the data must be written to the same page.
575 */
576static int t4_write_flash(struct adapter *adapter, unsigned int addr,
577 unsigned int n, const u8 *data)
578{
579 int ret;
580 u32 buf[64];
581 unsigned int i, c, left, val, offset = addr & 0xff;
582
583 if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE)
584 return -EINVAL;
585
586 val = swab32(addr) | SF_PROG_PAGE;
587
588 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
589 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
590 goto unlock;
591
592 for (left = n; left; left -= c) {
593 c = min(left, 4U);
594 for (val = 0, i = 0; i < c; ++i)
595 val = (val << 8) + *data++;
596
597 ret = sf1_write(adapter, c, c != left, 1, val);
598 if (ret)
599 goto unlock;
600 }
601 ret = flash_wait_op(adapter, 5, 1);
602 if (ret)
603 goto unlock;
604
605 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
606
607 /* Read the page to verify the write succeeded */
608 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
609 if (ret)
610 return ret;
611
612 if (memcmp(data - n, (u8 *)buf + offset, n)) {
613 dev_err(adapter->pdev_dev,
614 "failed to correctly write the flash page at %#x\n",
615 addr);
616 return -EIO;
617 }
618 return 0;
619
620unlock:
621 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
622 return ret;
623}
624
625/**
626 * get_fw_version - read the firmware version
627 * @adapter: the adapter
628 * @vers: where to place the version
629 *
630 * Reads the FW version from flash.
631 */
632static int get_fw_version(struct adapter *adapter, u32 *vers)
633{
634 return t4_read_flash(adapter,
635 FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
636 vers, 0);
637}
638
639/**
640 * get_tp_version - read the TP microcode version
641 * @adapter: the adapter
642 * @vers: where to place the version
643 *
644 * Reads the TP microcode version from flash.
645 */
646static int get_tp_version(struct adapter *adapter, u32 *vers)
647{
648 return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
649 tp_microcode_ver),
650 1, vers, 0);
651}
652
653/**
654 * t4_check_fw_version - check if the FW is compatible with this driver
655 * @adapter: the adapter
656 *
657 * Checks if an adapter's FW is compatible with the driver. Returns 0
658 * if there's exact match, a negative error if the version could not be
659 * read or there's a major version mismatch, and a positive value if the
660 * expected major version is found but there's a minor version mismatch.
661 */
662int t4_check_fw_version(struct adapter *adapter)
663{
664 u32 api_vers[2];
665 int ret, major, minor, micro;
666
667 ret = get_fw_version(adapter, &adapter->params.fw_vers);
668 if (!ret)
669 ret = get_tp_version(adapter, &adapter->params.tp_vers);
670 if (!ret)
671 ret = t4_read_flash(adapter,
672 FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
673 2, api_vers, 1);
674 if (ret)
675 return ret;
676
677 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
678 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
679 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
680 memcpy(adapter->params.api_vers, api_vers,
681 sizeof(adapter->params.api_vers));
682
683 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
684 dev_err(adapter->pdev_dev,
685 "card FW has major version %u, driver wants %u\n",
686 major, FW_VERSION_MAJOR);
687 return -EINVAL;
688 }
689
690 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
691 return 0; /* perfect match */
692
693 /* Minor/micro version mismatch. Report it but often it's OK. */
694 return 1;
695}
696
697/**
698 * t4_flash_erase_sectors - erase a range of flash sectors
699 * @adapter: the adapter
700 * @start: the first sector to erase
701 * @end: the last sector to erase
702 *
703 * Erases the sectors in the given inclusive range.
704 */
705static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
706{
707 int ret = 0;
708
709 while (start <= end) {
710 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
711 (ret = sf1_write(adapter, 4, 0, 1,
712 SF_ERASE_SECTOR | (start << 8))) != 0 ||
713 (ret = flash_wait_op(adapter, 5, 500)) != 0) {
714 dev_err(adapter->pdev_dev,
715 "erase of flash sector %d failed, error %d\n",
716 start, ret);
717 break;
718 }
719 start++;
720 }
721 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
722 return ret;
723}
724
725/**
726 * t4_load_fw - download firmware
727 * @adap: the adapter
728 * @fw_data: the firmware image to write
729 * @size: image size
730 *
731 * Write the supplied firmware image to the card's serial flash.
732 */
733int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
734{
735 u32 csum;
736 int ret, addr;
737 unsigned int i;
738 u8 first_page[SF_PAGE_SIZE];
739 const u32 *p = (const u32 *)fw_data;
740 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
741
742 if (!size) {
743 dev_err(adap->pdev_dev, "FW image has no data\n");
744 return -EINVAL;
745 }
746 if (size & 511) {
747 dev_err(adap->pdev_dev,
748 "FW image size not multiple of 512 bytes\n");
749 return -EINVAL;
750 }
751 if (ntohs(hdr->len512) * 512 != size) {
752 dev_err(adap->pdev_dev,
753 "FW image size differs from size in FW header\n");
754 return -EINVAL;
755 }
756 if (size > FW_MAX_SIZE) {
757 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
758 FW_MAX_SIZE);
759 return -EFBIG;
760 }
761
762 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
763 csum += ntohl(p[i]);
764
765 if (csum != 0xffffffff) {
766 dev_err(adap->pdev_dev,
767 "corrupted firmware image, checksum %#x\n", csum);
768 return -EINVAL;
769 }
770
771 i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
772 ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
773 if (ret)
774 goto out;
775
776 /*
777 * We write the correct version at the end so the driver can see a bad
778 * version if the FW write fails. Start by writing a copy of the
779 * first page with a bad version.
780 */
781 memcpy(first_page, fw_data, SF_PAGE_SIZE);
782 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
783 ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
784 if (ret)
785 goto out;
786
787 addr = FW_IMG_START;
788 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
789 addr += SF_PAGE_SIZE;
790 fw_data += SF_PAGE_SIZE;
791 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
792 if (ret)
793 goto out;
794 }
795
796 ret = t4_write_flash(adap,
797 FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
798 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
799out:
800 if (ret)
801 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
802 ret);
803 return ret;
804}
805
806#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
807 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
808
809/**
810 * t4_link_start - apply link configuration to MAC/PHY
811 * @phy: the PHY to setup
812 * @mac: the MAC to setup
813 * @lc: the requested link configuration
814 *
815 * Set up a port's MAC and PHY according to a desired link configuration.
816 * - If the PHY can auto-negotiate first decide what to advertise, then
817 * enable/disable auto-negotiation as desired, and reset.
818 * - If the PHY does not auto-negotiate just reset it.
819 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
820 * otherwise do it later based on the outcome of auto-negotiation.
821 */
822int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
823 struct link_config *lc)
824{
825 struct fw_port_cmd c;
826 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
827
828 lc->link_ok = 0;
829 if (lc->requested_fc & PAUSE_RX)
830 fc |= FW_PORT_CAP_FC_RX;
831 if (lc->requested_fc & PAUSE_TX)
832 fc |= FW_PORT_CAP_FC_TX;
833
834 memset(&c, 0, sizeof(c));
835 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
836 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
837 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
838 FW_LEN16(c));
839
840 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
841 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
842 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
843 } else if (lc->autoneg == AUTONEG_DISABLE) {
844 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
845 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
846 } else
847 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
848
849 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
850}
851
852/**
853 * t4_restart_aneg - restart autonegotiation
854 * @adap: the adapter
855 * @mbox: mbox to use for the FW command
856 * @port: the port id
857 *
858 * Restarts autonegotiation for the selected port.
859 */
860int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
861{
862 struct fw_port_cmd c;
863
864 memset(&c, 0, sizeof(c));
865 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
866 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
867 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
868 FW_LEN16(c));
869 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
870 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
871}
872
873/**
874 * t4_set_vlan_accel - configure HW VLAN extraction
875 * @adap: the adapter
876 * @ports: bitmap of adapter ports to operate on
877 * @on: enable (1) or disable (0) HW VLAN extraction
878 *
879 * Enables or disables HW extraction of VLAN tags for the ports specified
880 * by @ports. @ports is a bitmap with the ith bit designating the port
881 * associated with the ith adapter channel.
882 */
883void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
884{
885 ports <<= VLANEXTENABLE_SHIFT;
886 t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
887}
888
889struct intr_info {
890 unsigned int mask; /* bits to check in interrupt status */
891 const char *msg; /* message to print or NULL */
892 short stat_idx; /* stat counter to increment or -1 */
893 unsigned short fatal; /* whether the condition reported is fatal */
894};
895
896/**
897 * t4_handle_intr_status - table driven interrupt handler
898 * @adapter: the adapter that generated the interrupt
899 * @reg: the interrupt status register to process
900 * @acts: table of interrupt actions
901 *
902 * A table driven interrupt handler that applies a set of masks to an
903 * interrupt status word and performs the corresponding actions if the
904 * interrupts described by the mask have occured. The actions include
905 * optionally emitting a warning or alert message. The table is terminated
906 * by an entry specifying mask 0. Returns the number of fatal interrupt
907 * conditions.
908 */
909static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
910 const struct intr_info *acts)
911{
912 int fatal = 0;
913 unsigned int mask = 0;
914 unsigned int status = t4_read_reg(adapter, reg);
915
916 for ( ; acts->mask; ++acts) {
917 if (!(status & acts->mask))
918 continue;
919 if (acts->fatal) {
920 fatal++;
921 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
922 status & acts->mask);
923 } else if (acts->msg && printk_ratelimit())
924 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
925 status & acts->mask);
926 mask |= acts->mask;
927 }
928 status &= mask;
929 if (status) /* clear processed interrupts */
930 t4_write_reg(adapter, reg, status);
931 return fatal;
932}
933
934/*
935 * Interrupt handler for the PCIE module.
936 */
937static void pcie_intr_handler(struct adapter *adapter)
938{
939 static struct intr_info sysbus_intr_info[] = {
940 { RNPP, "RXNP array parity error", -1, 1 },
941 { RPCP, "RXPC array parity error", -1, 1 },
942 { RCIP, "RXCIF array parity error", -1, 1 },
943 { RCCP, "Rx completions control array parity error", -1, 1 },
944 { RFTP, "RXFT array parity error", -1, 1 },
945 { 0 }
946 };
947 static struct intr_info pcie_port_intr_info[] = {
948 { TPCP, "TXPC array parity error", -1, 1 },
949 { TNPP, "TXNP array parity error", -1, 1 },
950 { TFTP, "TXFT array parity error", -1, 1 },
951 { TCAP, "TXCA array parity error", -1, 1 },
952 { TCIP, "TXCIF array parity error", -1, 1 },
953 { RCAP, "RXCA array parity error", -1, 1 },
954 { OTDD, "outbound request TLP discarded", -1, 1 },
955 { RDPE, "Rx data parity error", -1, 1 },
956 { TDUE, "Tx uncorrectable data error", -1, 1 },
957 { 0 }
958 };
959 static struct intr_info pcie_intr_info[] = {
960 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
961 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
962 { MSIDATAPERR, "MSI data parity error", -1, 1 },
963 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
964 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
965 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
966 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
967 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
968 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
969 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
970 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
971 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
972 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
973 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
974 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
975 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
976 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
977 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
978 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
979 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
980 { FIDPERR, "PCI FID parity error", -1, 1 },
981 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
982 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
983 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
984 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
985 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
986 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
987 { PCIESINT, "PCI core secondary fault", -1, 1 },
988 { PCIEPINT, "PCI core primary fault", -1, 1 },
989 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
990 { 0 }
991 };
992
993 int fat;
994
995 fat = t4_handle_intr_status(adapter,
996 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
997 sysbus_intr_info) +
998 t4_handle_intr_status(adapter,
999 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1000 pcie_port_intr_info) +
1001 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1002 if (fat)
1003 t4_fatal_err(adapter);
1004}
1005
1006/*
1007 * TP interrupt handler.
1008 */
1009static void tp_intr_handler(struct adapter *adapter)
1010{
1011 static struct intr_info tp_intr_info[] = {
1012 { 0x3fffffff, "TP parity error", -1, 1 },
1013 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1014 { 0 }
1015 };
1016
1017 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1018 t4_fatal_err(adapter);
1019}
1020
1021/*
1022 * SGE interrupt handler.
1023 */
1024static void sge_intr_handler(struct adapter *adapter)
1025{
1026 u64 v;
1027
1028 static struct intr_info sge_intr_info[] = {
1029 { ERR_CPL_EXCEED_IQE_SIZE,
1030 "SGE received CPL exceeding IQE size", -1, 1 },
1031 { ERR_INVALID_CIDX_INC,
1032 "SGE GTS CIDX increment too large", -1, 0 },
1033 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1034 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1035 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1036 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1037 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1038 0 },
1039 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1040 0 },
1041 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1042 0 },
1043 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1044 0 },
1045 { ERR_ING_CTXT_PRIO,
1046 "SGE too many priority ingress contexts", -1, 0 },
1047 { ERR_EGR_CTXT_PRIO,
1048 "SGE too many priority egress contexts", -1, 0 },
1049 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1050 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1051 { 0 }
1052 };
1053
1054 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1055 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1056 if (v) {
1057 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1058 (unsigned long long)v);
1059 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1060 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1061 }
1062
1063 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1064 v != 0)
1065 t4_fatal_err(adapter);
1066}
1067
1068/*
1069 * CIM interrupt handler.
1070 */
1071static void cim_intr_handler(struct adapter *adapter)
1072{
1073 static struct intr_info cim_intr_info[] = {
1074 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1075 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1076 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1077 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1078 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1079 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1080 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1081 { 0 }
1082 };
1083 static struct intr_info cim_upintr_info[] = {
1084 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1085 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1086 { ILLWRINT, "CIM illegal write", -1, 1 },
1087 { ILLRDINT, "CIM illegal read", -1, 1 },
1088 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1089 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1090 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1091 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1092 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1093 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1094 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1095 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1096 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1097 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1098 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1099 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1100 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1101 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1102 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1103 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1104 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1105 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1106 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1107 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1108 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1109 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1110 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1111 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1112 { 0 }
1113 };
1114
1115 int fat;
1116
1117 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1118 cim_intr_info) +
1119 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1120 cim_upintr_info);
1121 if (fat)
1122 t4_fatal_err(adapter);
1123}
1124
1125/*
1126 * ULP RX interrupt handler.
1127 */
1128static void ulprx_intr_handler(struct adapter *adapter)
1129{
1130 static struct intr_info ulprx_intr_info[] = {
1131 { 0x7fffff, "ULPRX parity error", -1, 1 },
1132 { 0 }
1133 };
1134
1135 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1136 t4_fatal_err(adapter);
1137}
1138
1139/*
1140 * ULP TX interrupt handler.
1141 */
1142static void ulptx_intr_handler(struct adapter *adapter)
1143{
1144 static struct intr_info ulptx_intr_info[] = {
1145 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1146 0 },
1147 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1148 0 },
1149 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1150 0 },
1151 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1152 0 },
1153 { 0xfffffff, "ULPTX parity error", -1, 1 },
1154 { 0 }
1155 };
1156
1157 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1158 t4_fatal_err(adapter);
1159}
1160
1161/*
1162 * PM TX interrupt handler.
1163 */
1164static void pmtx_intr_handler(struct adapter *adapter)
1165{
1166 static struct intr_info pmtx_intr_info[] = {
1167 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1168 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1169 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1170 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1171 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1172 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1173 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1174 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1175 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1176 { 0 }
1177 };
1178
1179 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1180 t4_fatal_err(adapter);
1181}
1182
1183/*
1184 * PM RX interrupt handler.
1185 */
1186static void pmrx_intr_handler(struct adapter *adapter)
1187{
1188 static struct intr_info pmrx_intr_info[] = {
1189 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1190 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1191 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1192 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1193 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1194 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1195 { 0 }
1196 };
1197
1198 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1199 t4_fatal_err(adapter);
1200}
1201
1202/*
1203 * CPL switch interrupt handler.
1204 */
1205static void cplsw_intr_handler(struct adapter *adapter)
1206{
1207 static struct intr_info cplsw_intr_info[] = {
1208 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1209 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1210 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1211 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1212 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1213 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1214 { 0 }
1215 };
1216
1217 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1218 t4_fatal_err(adapter);
1219}
1220
1221/*
1222 * LE interrupt handler.
1223 */
1224static void le_intr_handler(struct adapter *adap)
1225{
1226 static struct intr_info le_intr_info[] = {
1227 { LIPMISS, "LE LIP miss", -1, 0 },
1228 { LIP0, "LE 0 LIP error", -1, 0 },
1229 { PARITYERR, "LE parity error", -1, 1 },
1230 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1231 { REQQPARERR, "LE request queue parity error", -1, 1 },
1232 { 0 }
1233 };
1234
1235 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1236 t4_fatal_err(adap);
1237}
1238
1239/*
1240 * MPS interrupt handler.
1241 */
1242static void mps_intr_handler(struct adapter *adapter)
1243{
1244 static struct intr_info mps_rx_intr_info[] = {
1245 { 0xffffff, "MPS Rx parity error", -1, 1 },
1246 { 0 }
1247 };
1248 static struct intr_info mps_tx_intr_info[] = {
1249 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1250 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1251 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1252 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1253 { BUBBLE, "MPS Tx underflow", -1, 1 },
1254 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1255 { FRMERR, "MPS Tx framing error", -1, 1 },
1256 { 0 }
1257 };
1258 static struct intr_info mps_trc_intr_info[] = {
1259 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1260 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1261 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1262 { 0 }
1263 };
1264 static struct intr_info mps_stat_sram_intr_info[] = {
1265 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1266 { 0 }
1267 };
1268 static struct intr_info mps_stat_tx_intr_info[] = {
1269 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1270 { 0 }
1271 };
1272 static struct intr_info mps_stat_rx_intr_info[] = {
1273 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1274 { 0 }
1275 };
1276 static struct intr_info mps_cls_intr_info[] = {
1277 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1278 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1279 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1280 { 0 }
1281 };
1282
1283 int fat;
1284
1285 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1286 mps_rx_intr_info) +
1287 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1288 mps_tx_intr_info) +
1289 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1290 mps_trc_intr_info) +
1291 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1292 mps_stat_sram_intr_info) +
1293 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1294 mps_stat_tx_intr_info) +
1295 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1296 mps_stat_rx_intr_info) +
1297 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1298 mps_cls_intr_info);
1299
1300 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1301 RXINT | TXINT | STATINT);
1302 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1303 if (fat)
1304 t4_fatal_err(adapter);
1305}
1306
1307#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1308
1309/*
1310 * EDC/MC interrupt handler.
1311 */
1312static void mem_intr_handler(struct adapter *adapter, int idx)
1313{
1314 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1315
1316 unsigned int addr, cnt_addr, v;
1317
1318 if (idx <= MEM_EDC1) {
1319 addr = EDC_REG(EDC_INT_CAUSE, idx);
1320 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1321 } else {
1322 addr = MC_INT_CAUSE;
1323 cnt_addr = MC_ECC_STATUS;
1324 }
1325
1326 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1327 if (v & PERR_INT_CAUSE)
1328 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1329 name[idx]);
1330 if (v & ECC_CE_INT_CAUSE) {
1331 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1332
1333 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1334 if (printk_ratelimit())
1335 dev_warn(adapter->pdev_dev,
1336 "%u %s correctable ECC data error%s\n",
1337 cnt, name[idx], cnt > 1 ? "s" : "");
1338 }
1339 if (v & ECC_UE_INT_CAUSE)
1340 dev_alert(adapter->pdev_dev,
1341 "%s uncorrectable ECC data error\n", name[idx]);
1342
1343 t4_write_reg(adapter, addr, v);
1344 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1345 t4_fatal_err(adapter);
1346}
1347
1348/*
1349 * MA interrupt handler.
1350 */
1351static void ma_intr_handler(struct adapter *adap)
1352{
1353 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1354
1355 if (status & MEM_PERR_INT_CAUSE)
1356 dev_alert(adap->pdev_dev,
1357 "MA parity error, parity status %#x\n",
1358 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1359 if (status & MEM_WRAP_INT_CAUSE) {
1360 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1361 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1362 "client %u to address %#x\n",
1363 MEM_WRAP_CLIENT_NUM_GET(v),
1364 MEM_WRAP_ADDRESS_GET(v) << 4);
1365 }
1366 t4_write_reg(adap, MA_INT_CAUSE, status);
1367 t4_fatal_err(adap);
1368}
1369
1370/*
1371 * SMB interrupt handler.
1372 */
1373static void smb_intr_handler(struct adapter *adap)
1374{
1375 static struct intr_info smb_intr_info[] = {
1376 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1377 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1378 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1379 { 0 }
1380 };
1381
1382 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1383 t4_fatal_err(adap);
1384}
1385
1386/*
1387 * NC-SI interrupt handler.
1388 */
1389static void ncsi_intr_handler(struct adapter *adap)
1390{
1391 static struct intr_info ncsi_intr_info[] = {
1392 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1393 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1394 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1395 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1396 { 0 }
1397 };
1398
1399 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1400 t4_fatal_err(adap);
1401}
1402
1403/*
1404 * XGMAC interrupt handler.
1405 */
1406static void xgmac_intr_handler(struct adapter *adap, int port)
1407{
1408 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1409
1410 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1411 if (!v)
1412 return;
1413
1414 if (v & TXFIFO_PRTY_ERR)
1415 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1416 port);
1417 if (v & RXFIFO_PRTY_ERR)
1418 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1419 port);
1420 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1421 t4_fatal_err(adap);
1422}
1423
1424/*
1425 * PL interrupt handler.
1426 */
1427static void pl_intr_handler(struct adapter *adap)
1428{
1429 static struct intr_info pl_intr_info[] = {
1430 { FATALPERR, "T4 fatal parity error", -1, 1 },
1431 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1432 { 0 }
1433 };
1434
1435 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1436 t4_fatal_err(adap);
1437}
1438
1439#define PF_INTR_MASK (PFSW | PFCIM)
1440#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1441 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1442 CPL_SWITCH | SGE | ULP_TX)
1443
1444/**
1445 * t4_slow_intr_handler - control path interrupt handler
1446 * @adapter: the adapter
1447 *
1448 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1449 * The designation 'slow' is because it involves register reads, while
1450 * data interrupts typically don't involve any MMIOs.
1451 */
1452int t4_slow_intr_handler(struct adapter *adapter)
1453{
1454 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1455
1456 if (!(cause & GLBL_INTR_MASK))
1457 return 0;
1458 if (cause & CIM)
1459 cim_intr_handler(adapter);
1460 if (cause & MPS)
1461 mps_intr_handler(adapter);
1462 if (cause & NCSI)
1463 ncsi_intr_handler(adapter);
1464 if (cause & PL)
1465 pl_intr_handler(adapter);
1466 if (cause & SMB)
1467 smb_intr_handler(adapter);
1468 if (cause & XGMAC0)
1469 xgmac_intr_handler(adapter, 0);
1470 if (cause & XGMAC1)
1471 xgmac_intr_handler(adapter, 1);
1472 if (cause & XGMAC_KR0)
1473 xgmac_intr_handler(adapter, 2);
1474 if (cause & XGMAC_KR1)
1475 xgmac_intr_handler(adapter, 3);
1476 if (cause & PCIE)
1477 pcie_intr_handler(adapter);
1478 if (cause & MC)
1479 mem_intr_handler(adapter, MEM_MC);
1480 if (cause & EDC0)
1481 mem_intr_handler(adapter, MEM_EDC0);
1482 if (cause & EDC1)
1483 mem_intr_handler(adapter, MEM_EDC1);
1484 if (cause & LE)
1485 le_intr_handler(adapter);
1486 if (cause & TP)
1487 tp_intr_handler(adapter);
1488 if (cause & MA)
1489 ma_intr_handler(adapter);
1490 if (cause & PM_TX)
1491 pmtx_intr_handler(adapter);
1492 if (cause & PM_RX)
1493 pmrx_intr_handler(adapter);
1494 if (cause & ULP_RX)
1495 ulprx_intr_handler(adapter);
1496 if (cause & CPL_SWITCH)
1497 cplsw_intr_handler(adapter);
1498 if (cause & SGE)
1499 sge_intr_handler(adapter);
1500 if (cause & ULP_TX)
1501 ulptx_intr_handler(adapter);
1502
1503 /* Clear the interrupts just processed for which we are the master. */
1504 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1505 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1506 return 1;
1507}
1508
1509/**
1510 * t4_intr_enable - enable interrupts
1511 * @adapter: the adapter whose interrupts should be enabled
1512 *
1513 * Enable PF-specific interrupts for the calling function and the top-level
1514 * interrupt concentrator for global interrupts. Interrupts are already
1515 * enabled at each module, here we just enable the roots of the interrupt
1516 * hierarchies.
1517 *
1518 * Note: this function should be called only when the driver manages
1519 * non PF-specific interrupts from the various HW modules. Only one PCI
1520 * function at a time should be doing this.
1521 */
1522void t4_intr_enable(struct adapter *adapter)
1523{
1524 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1525
1526 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1527 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1528 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1529 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1530 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1531 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1532 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1533 EGRESS_SIZE_ERR);
1534 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1535 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1536}
1537
1538/**
1539 * t4_intr_disable - disable interrupts
1540 * @adapter: the adapter whose interrupts should be disabled
1541 *
1542 * Disable interrupts. We only disable the top-level interrupt
1543 * concentrators. The caller must be a PCI function managing global
1544 * interrupts.
1545 */
1546void t4_intr_disable(struct adapter *adapter)
1547{
1548 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1549
1550 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1551 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1552}
1553
1554/**
1555 * t4_intr_clear - clear all interrupts
1556 * @adapter: the adapter whose interrupts should be cleared
1557 *
1558 * Clears all interrupts. The caller must be a PCI function managing
1559 * global interrupts.
1560 */
1561void t4_intr_clear(struct adapter *adapter)
1562{
1563 static const unsigned int cause_reg[] = {
1564 SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1565 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1566 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1567 PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1568 MC_INT_CAUSE,
1569 MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1570 EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1571 CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1572 MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1573 TP_INT_CAUSE,
1574 ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1575 PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1576 MPS_RX_PERR_INT_CAUSE,
1577 CPL_INTR_CAUSE,
1578 MYPF_REG(PL_PF_INT_CAUSE),
1579 PL_PL_INT_CAUSE,
1580 LE_DB_INT_CAUSE,
1581 };
1582
1583 unsigned int i;
1584
1585 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1586 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1587
1588 t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1589 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1590}
1591
1592/**
1593 * hash_mac_addr - return the hash value of a MAC address
1594 * @addr: the 48-bit Ethernet MAC address
1595 *
1596 * Hashes a MAC address according to the hash function used by HW inexact
1597 * (hash) address matching.
1598 */
1599static int hash_mac_addr(const u8 *addr)
1600{
1601 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1602 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1603 a ^= b;
1604 a ^= (a >> 12);
1605 a ^= (a >> 6);
1606 return a & 0x3f;
1607}
1608
1609/**
1610 * t4_config_rss_range - configure a portion of the RSS mapping table
1611 * @adapter: the adapter
1612 * @mbox: mbox to use for the FW command
1613 * @viid: virtual interface whose RSS subtable is to be written
1614 * @start: start entry in the table to write
1615 * @n: how many table entries to write
1616 * @rspq: values for the response queue lookup table
1617 * @nrspq: number of values in @rspq
1618 *
1619 * Programs the selected part of the VI's RSS mapping table with the
1620 * provided values. If @nrspq < @n the supplied values are used repeatedly
1621 * until the full table range is populated.
1622 *
1623 * The caller must ensure the values in @rspq are in the range allowed for
1624 * @viid.
1625 */
1626int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1627 int start, int n, const u16 *rspq, unsigned int nrspq)
1628{
1629 int ret;
1630 const u16 *rsp = rspq;
1631 const u16 *rsp_end = rspq + nrspq;
1632 struct fw_rss_ind_tbl_cmd cmd;
1633
1634 memset(&cmd, 0, sizeof(cmd));
1635 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1636 FW_CMD_REQUEST | FW_CMD_WRITE |
1637 FW_RSS_IND_TBL_CMD_VIID(viid));
1638 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1639
1640 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1641 while (n > 0) {
1642 int nq = min(n, 32);
1643 __be32 *qp = &cmd.iq0_to_iq2;
1644
1645 cmd.niqid = htons(nq);
1646 cmd.startidx = htons(start);
1647
1648 start += nq;
1649 n -= nq;
1650
1651 while (nq > 0) {
1652 unsigned int v;
1653
1654 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1655 if (++rsp >= rsp_end)
1656 rsp = rspq;
1657 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1658 if (++rsp >= rsp_end)
1659 rsp = rspq;
1660 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1661 if (++rsp >= rsp_end)
1662 rsp = rspq;
1663
1664 *qp++ = htonl(v);
1665 nq -= 3;
1666 }
1667
1668 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1669 if (ret)
1670 return ret;
1671 }
1672 return 0;
1673}
1674
1675/**
1676 * t4_config_glbl_rss - configure the global RSS mode
1677 * @adapter: the adapter
1678 * @mbox: mbox to use for the FW command
1679 * @mode: global RSS mode
1680 * @flags: mode-specific flags
1681 *
1682 * Sets the global RSS mode.
1683 */
1684int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1685 unsigned int flags)
1686{
1687 struct fw_rss_glb_config_cmd c;
1688
1689 memset(&c, 0, sizeof(c));
1690 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1691 FW_CMD_REQUEST | FW_CMD_WRITE);
1692 c.retval_len16 = htonl(FW_LEN16(c));
1693 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1694 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1695 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1696 c.u.basicvirtual.mode_pkd =
1697 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1698 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1699 } else
1700 return -EINVAL;
1701 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1702}
1703
1704/* Read an RSS table row */
1705static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1706{
1707 t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1708 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1709 5, 0, val);
1710}
1711
1712/**
1713 * t4_read_rss - read the contents of the RSS mapping table
1714 * @adapter: the adapter
1715 * @map: holds the contents of the RSS mapping table
1716 *
1717 * Reads the contents of the RSS hash->queue mapping table.
1718 */
1719int t4_read_rss(struct adapter *adapter, u16 *map)
1720{
1721 u32 val;
1722 int i, ret;
1723
1724 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1725 ret = rd_rss_row(adapter, i, &val);
1726 if (ret)
1727 return ret;
1728 *map++ = LKPTBLQUEUE0_GET(val);
1729 *map++ = LKPTBLQUEUE1_GET(val);
1730 }
1731 return 0;
1732}
1733
1734/**
1735 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1736 * @adap: the adapter
1737 * @v4: holds the TCP/IP counter values
1738 * @v6: holds the TCP/IPv6 counter values
1739 *
1740 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1741 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1742 */
1743void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1744 struct tp_tcp_stats *v6)
1745{
1746 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1747
1748#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1749#define STAT(x) val[STAT_IDX(x)]
1750#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1751
1752 if (v4) {
1753 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1754 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1755 v4->tcpOutRsts = STAT(OUT_RST);
1756 v4->tcpInSegs = STAT64(IN_SEG);
1757 v4->tcpOutSegs = STAT64(OUT_SEG);
1758 v4->tcpRetransSegs = STAT64(RXT_SEG);
1759 }
1760 if (v6) {
1761 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1762 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1763 v6->tcpOutRsts = STAT(OUT_RST);
1764 v6->tcpInSegs = STAT64(IN_SEG);
1765 v6->tcpOutSegs = STAT64(OUT_SEG);
1766 v6->tcpRetransSegs = STAT64(RXT_SEG);
1767 }
1768#undef STAT64
1769#undef STAT
1770#undef STAT_IDX
1771}
1772
1773/**
1774 * t4_tp_get_err_stats - read TP's error MIB counters
1775 * @adap: the adapter
1776 * @st: holds the counter values
1777 *
1778 * Returns the values of TP's error counters.
1779 */
1780void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1781{
1782 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1783 12, TP_MIB_MAC_IN_ERR_0);
1784 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1785 8, TP_MIB_TNL_CNG_DROP_0);
1786 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1787 4, TP_MIB_TNL_DROP_0);
1788 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1789 4, TP_MIB_OFD_VLN_DROP_0);
1790 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1791 4, TP_MIB_TCP_V6IN_ERR_0);
1792 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1793 2, TP_MIB_OFD_ARP_DROP);
1794}
1795
1796/**
1797 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1798 * @adap: the adapter
1799 * @mtus: where to store the MTU values
1800 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1801 *
1802 * Reads the HW path MTU table.
1803 */
1804void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1805{
1806 u32 v;
1807 int i;
1808
1809 for (i = 0; i < NMTUS; ++i) {
1810 t4_write_reg(adap, TP_MTU_TABLE,
1811 MTUINDEX(0xff) | MTUVALUE(i));
1812 v = t4_read_reg(adap, TP_MTU_TABLE);
1813 mtus[i] = MTUVALUE_GET(v);
1814 if (mtu_log)
1815 mtu_log[i] = MTUWIDTH_GET(v);
1816 }
1817}
1818
1819/**
1820 * init_cong_ctrl - initialize congestion control parameters
1821 * @a: the alpha values for congestion control
1822 * @b: the beta values for congestion control
1823 *
1824 * Initialize the congestion control parameters.
1825 */
1826static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1827{
1828 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1829 a[9] = 2;
1830 a[10] = 3;
1831 a[11] = 4;
1832 a[12] = 5;
1833 a[13] = 6;
1834 a[14] = 7;
1835 a[15] = 8;
1836 a[16] = 9;
1837 a[17] = 10;
1838 a[18] = 14;
1839 a[19] = 17;
1840 a[20] = 21;
1841 a[21] = 25;
1842 a[22] = 30;
1843 a[23] = 35;
1844 a[24] = 45;
1845 a[25] = 60;
1846 a[26] = 80;
1847 a[27] = 100;
1848 a[28] = 200;
1849 a[29] = 300;
1850 a[30] = 400;
1851 a[31] = 500;
1852
1853 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1854 b[9] = b[10] = 1;
1855 b[11] = b[12] = 2;
1856 b[13] = b[14] = b[15] = b[16] = 3;
1857 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1858 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1859 b[28] = b[29] = 6;
1860 b[30] = b[31] = 7;
1861}
1862
1863/* The minimum additive increment value for the congestion control table */
1864#define CC_MIN_INCR 2U
1865
1866/**
1867 * t4_load_mtus - write the MTU and congestion control HW tables
1868 * @adap: the adapter
1869 * @mtus: the values for the MTU table
1870 * @alpha: the values for the congestion control alpha parameter
1871 * @beta: the values for the congestion control beta parameter
1872 *
1873 * Write the HW MTU table with the supplied MTUs and the high-speed
1874 * congestion control table with the supplied alpha, beta, and MTUs.
1875 * We write the two tables together because the additive increments
1876 * depend on the MTUs.
1877 */
1878void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1879 const unsigned short *alpha, const unsigned short *beta)
1880{
1881 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1882 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1883 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1884 28672, 40960, 57344, 81920, 114688, 163840, 229376
1885 };
1886
1887 unsigned int i, w;
1888
1889 for (i = 0; i < NMTUS; ++i) {
1890 unsigned int mtu = mtus[i];
1891 unsigned int log2 = fls(mtu);
1892
1893 if (!(mtu & ((1 << log2) >> 2))) /* round */
1894 log2--;
1895 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1896 MTUWIDTH(log2) | MTUVALUE(mtu));
1897
1898 for (w = 0; w < NCCTRL_WIN; ++w) {
1899 unsigned int inc;
1900
1901 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1902 CC_MIN_INCR);
1903
1904 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1905 (w << 16) | (beta[w] << 13) | inc);
1906 }
1907 }
1908}
1909
1910/**
1911 * t4_set_trace_filter - configure one of the tracing filters
1912 * @adap: the adapter
1913 * @tp: the desired trace filter parameters
1914 * @idx: which filter to configure
1915 * @enable: whether to enable or disable the filter
1916 *
1917 * Configures one of the tracing filters available in HW. If @enable is
1918 * %0 @tp is not examined and may be %NULL.
1919 */
1920int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1921 int idx, int enable)
1922{
1923 int i, ofst = idx * 4;
1924 u32 data_reg, mask_reg, cfg;
1925 u32 multitrc = TRCMULTIFILTER;
1926
1927 if (!enable) {
1928 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1929 goto out;
1930 }
1931
1932 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1933 tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1934 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1935 return -EINVAL;
1936
1937 if (tp->snap_len > 256) { /* must be tracer 0 */
1938 if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1939 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1940 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1941 return -EINVAL; /* other tracers are enabled */
1942 multitrc = 0;
1943 } else if (idx) {
1944 i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1945 if (TFCAPTUREMAX_GET(i) > 256 &&
1946 (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1947 return -EINVAL;
1948 }
1949
1950 /* stop the tracer we'll be changing */
1951 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1952
1953 /* disable tracing globally if running in the wrong single/multi mode */
1954 cfg = t4_read_reg(adap, MPS_TRC_CFG);
1955 if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1956 t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1957 t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1958 msleep(1);
1959 if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1960 return -ETIMEDOUT;
1961 }
1962 /*
1963 * At this point either the tracing is enabled and in the right mode or
1964 * disabled.
1965 */
1966
1967 idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1968 data_reg = MPS_TRC_FILTER0_MATCH + idx;
1969 mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1970
1971 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1972 t4_write_reg(adap, data_reg, tp->data[i]);
1973 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1974 }
1975 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1976 TFCAPTUREMAX(tp->snap_len) |
1977 TFMINPKTSIZE(tp->min_len));
1978 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1979 TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1980 TFPORT(tp->port) | TFEN |
1981 (tp->invert ? TFINVERTMATCH : 0));
1982
1983 cfg &= ~TRCMULTIFILTER;
1984 t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
1985out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1986 return 0;
1987}
1988
1989/**
1990 * t4_get_trace_filter - query one of the tracing filters
1991 * @adap: the adapter
1992 * @tp: the current trace filter parameters
1993 * @idx: which trace filter to query
1994 * @enabled: non-zero if the filter is enabled
1995 *
1996 * Returns the current settings of one of the HW tracing filters.
1997 */
1998void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
1999 int *enabled)
2000{
2001 u32 ctla, ctlb;
2002 int i, ofst = idx * 4;
2003 u32 data_reg, mask_reg;
2004
2005 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
2006 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
2007
2008 *enabled = !!(ctla & TFEN);
2009 tp->snap_len = TFCAPTUREMAX_GET(ctlb);
2010 tp->min_len = TFMINPKTSIZE_GET(ctlb);
2011 tp->skip_ofst = TFOFFSET_GET(ctla);
2012 tp->skip_len = TFLENGTH_GET(ctla);
2013 tp->invert = !!(ctla & TFINVERTMATCH);
2014 tp->port = TFPORT_GET(ctla);
2015
2016 ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2017 data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2018 mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2019
2020 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2021 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2022 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2023 }
2024}
2025
2026/**
2027 * get_mps_bg_map - return the buffer groups associated with a port
2028 * @adap: the adapter
2029 * @idx: the port index
2030 *
2031 * Returns a bitmap indicating which MPS buffer groups are associated
2032 * with the given port. Bit i is set if buffer group i is used by the
2033 * port.
2034 */
2035static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2036{
2037 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2038
2039 if (n == 0)
2040 return idx == 0 ? 0xf : 0;
2041 if (n == 1)
2042 return idx < 2 ? (3 << (2 * idx)) : 0;
2043 return 1 << idx;
2044}
2045
2046/**
2047 * t4_get_port_stats - collect port statistics
2048 * @adap: the adapter
2049 * @idx: the port index
2050 * @p: the stats structure to fill
2051 *
2052 * Collect statistics related to the given port from HW.
2053 */
2054void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2055{
2056 u32 bgmap = get_mps_bg_map(adap, idx);
2057
2058#define GET_STAT(name) \
2059 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2060#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2061
2062 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2063 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2064 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2065 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2066 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2067 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2068 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2069 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2070 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2071 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2072 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2073 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2074 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2075 p->tx_drop = GET_STAT(TX_PORT_DROP);
2076 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2077 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2078 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2079 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2080 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2081 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2082 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2083 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2084 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2085
2086 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2087 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2088 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2089 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2090 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2091 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2092 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2093 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2094 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2095 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2096 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2097 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2098 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2099 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2100 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2101 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2102 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2103 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2104 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2105 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2106 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2107 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2108 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2109 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2110 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2111 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2112 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2113
2114 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2115 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2116 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2117 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2118 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2119 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2120 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2121 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2122
2123#undef GET_STAT
2124#undef GET_STAT_COM
2125}
2126
2127/**
2128 * t4_get_lb_stats - collect loopback port statistics
2129 * @adap: the adapter
2130 * @idx: the loopback port index
2131 * @p: the stats structure to fill
2132 *
2133 * Return HW statistics for the given loopback port.
2134 */
2135void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2136{
2137 u32 bgmap = get_mps_bg_map(adap, idx);
2138
2139#define GET_STAT(name) \
2140 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2141#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2142
2143 p->octets = GET_STAT(BYTES);
2144 p->frames = GET_STAT(FRAMES);
2145 p->bcast_frames = GET_STAT(BCAST);
2146 p->mcast_frames = GET_STAT(MCAST);
2147 p->ucast_frames = GET_STAT(UCAST);
2148 p->error_frames = GET_STAT(ERROR);
2149
2150 p->frames_64 = GET_STAT(64B);
2151 p->frames_65_127 = GET_STAT(65B_127B);
2152 p->frames_128_255 = GET_STAT(128B_255B);
2153 p->frames_256_511 = GET_STAT(256B_511B);
2154 p->frames_512_1023 = GET_STAT(512B_1023B);
2155 p->frames_1024_1518 = GET_STAT(1024B_1518B);
2156 p->frames_1519_max = GET_STAT(1519B_MAX);
2157 p->drop = t4_read_reg(adap, PORT_REG(idx,
2158 MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2159
2160 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2161 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2162 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2163 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2164 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2165 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2166 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2167 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2168
2169#undef GET_STAT
2170#undef GET_STAT_COM
2171}
2172
2173/**
2174 * t4_wol_magic_enable - enable/disable magic packet WoL
2175 * @adap: the adapter
2176 * @port: the physical port index
2177 * @addr: MAC address expected in magic packets, %NULL to disable
2178 *
2179 * Enables/disables magic packet wake-on-LAN for the selected port.
2180 */
2181void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2182 const u8 *addr)
2183{
2184 if (addr) {
2185 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2186 (addr[2] << 24) | (addr[3] << 16) |
2187 (addr[4] << 8) | addr[5]);
2188 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2189 (addr[0] << 8) | addr[1]);
2190 }
2191 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2192 addr ? MAGICEN : 0);
2193}
2194
2195/**
2196 * t4_wol_pat_enable - enable/disable pattern-based WoL
2197 * @adap: the adapter
2198 * @port: the physical port index
2199 * @map: bitmap of which HW pattern filters to set
2200 * @mask0: byte mask for bytes 0-63 of a packet
2201 * @mask1: byte mask for bytes 64-127 of a packet
2202 * @crc: Ethernet CRC for selected bytes
2203 * @enable: enable/disable switch
2204 *
2205 * Sets the pattern filters indicated in @map to mask out the bytes
2206 * specified in @mask0/@mask1 in received packets and compare the CRC of
2207 * the resulting packet against @crc. If @enable is %true pattern-based
2208 * WoL is enabled, otherwise disabled.
2209 */
2210int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2211 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2212{
2213 int i;
2214
2215 if (!enable) {
2216 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2217 PATEN, 0);
2218 return 0;
2219 }
2220 if (map > 0xff)
2221 return -EINVAL;
2222
2223#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2224
2225 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2226 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2227 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2228
2229 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2230 if (!(map & 1))
2231 continue;
2232
2233 /* write byte masks */
2234 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2235 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2236 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2237 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2238 return -ETIMEDOUT;
2239
2240 /* write CRC */
2241 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2242 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2243 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2244 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2245 return -ETIMEDOUT;
2246 }
2247#undef EPIO_REG
2248
2249 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2250 return 0;
2251}
2252
2253#define INIT_CMD(var, cmd, rd_wr) do { \
2254 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2255 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2256 (var).retval_len16 = htonl(FW_LEN16(var)); \
2257} while (0)
2258
2259/**
2260 * t4_mdio_rd - read a PHY register through MDIO
2261 * @adap: the adapter
2262 * @mbox: mailbox to use for the FW command
2263 * @phy_addr: the PHY address
2264 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2265 * @reg: the register to read
2266 * @valp: where to store the value
2267 *
2268 * Issues a FW command through the given mailbox to read a PHY register.
2269 */
2270int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2271 unsigned int mmd, unsigned int reg, u16 *valp)
2272{
2273 int ret;
2274 struct fw_ldst_cmd c;
2275
2276 memset(&c, 0, sizeof(c));
2277 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2278 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2279 c.cycles_to_len16 = htonl(FW_LEN16(c));
2280 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2281 FW_LDST_CMD_MMD(mmd));
2282 c.u.mdio.raddr = htons(reg);
2283
2284 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2285 if (ret == 0)
2286 *valp = ntohs(c.u.mdio.rval);
2287 return ret;
2288}
2289
2290/**
2291 * t4_mdio_wr - write a PHY register through MDIO
2292 * @adap: the adapter
2293 * @mbox: mailbox to use for the FW command
2294 * @phy_addr: the PHY address
2295 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2296 * @reg: the register to write
2297 * @valp: value to write
2298 *
2299 * Issues a FW command through the given mailbox to write a PHY register.
2300 */
2301int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2302 unsigned int mmd, unsigned int reg, u16 val)
2303{
2304 struct fw_ldst_cmd c;
2305
2306 memset(&c, 0, sizeof(c));
2307 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2308 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2309 c.cycles_to_len16 = htonl(FW_LEN16(c));
2310 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2311 FW_LDST_CMD_MMD(mmd));
2312 c.u.mdio.raddr = htons(reg);
2313 c.u.mdio.rval = htons(val);
2314
2315 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2316}
2317
2318/**
2319 * t4_fw_hello - establish communication with FW
2320 * @adap: the adapter
2321 * @mbox: mailbox to use for the FW command
2322 * @evt_mbox: mailbox to receive async FW events
2323 * @master: specifies the caller's willingness to be the device master
2324 * @state: returns the current device state
2325 *
2326 * Issues a command to establish communication with FW.
2327 */
2328int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2329 enum dev_master master, enum dev_state *state)
2330{
2331 int ret;
2332 struct fw_hello_cmd c;
2333
2334 INIT_CMD(c, HELLO, WRITE);
2335 c.err_to_mbasyncnot = htonl(
2336 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2337 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2338 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2339 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2340
2341 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2342 if (ret == 0 && state) {
2343 u32 v = ntohl(c.err_to_mbasyncnot);
2344 if (v & FW_HELLO_CMD_INIT)
2345 *state = DEV_STATE_INIT;
2346 else if (v & FW_HELLO_CMD_ERR)
2347 *state = DEV_STATE_ERR;
2348 else
2349 *state = DEV_STATE_UNINIT;
2350 }
2351 return ret;
2352}
2353
2354/**
2355 * t4_fw_bye - end communication with FW
2356 * @adap: the adapter
2357 * @mbox: mailbox to use for the FW command
2358 *
2359 * Issues a command to terminate communication with FW.
2360 */
2361int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2362{
2363 struct fw_bye_cmd c;
2364
2365 INIT_CMD(c, BYE, WRITE);
2366 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2367}
2368
2369/**
2370 * t4_init_cmd - ask FW to initialize the device
2371 * @adap: the adapter
2372 * @mbox: mailbox to use for the FW command
2373 *
2374 * Issues a command to FW to partially initialize the device. This
2375 * performs initialization that generally doesn't depend on user input.
2376 */
2377int t4_early_init(struct adapter *adap, unsigned int mbox)
2378{
2379 struct fw_initialize_cmd c;
2380
2381 INIT_CMD(c, INITIALIZE, WRITE);
2382 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2383}
2384
2385/**
2386 * t4_fw_reset - issue a reset to FW
2387 * @adap: the adapter
2388 * @mbox: mailbox to use for the FW command
2389 * @reset: specifies the type of reset to perform
2390 *
2391 * Issues a reset command of the specified type to FW.
2392 */
2393int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2394{
2395 struct fw_reset_cmd c;
2396
2397 INIT_CMD(c, RESET, WRITE);
2398 c.val = htonl(reset);
2399 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2400}
2401
2402/**
2403 * t4_query_params - query FW or device parameters
2404 * @adap: the adapter
2405 * @mbox: mailbox to use for the FW command
2406 * @pf: the PF
2407 * @vf: the VF
2408 * @nparams: the number of parameters
2409 * @params: the parameter names
2410 * @val: the parameter values
2411 *
2412 * Reads the value of FW or device parameters. Up to 7 parameters can be
2413 * queried at once.
2414 */
2415int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2416 unsigned int vf, unsigned int nparams, const u32 *params,
2417 u32 *val)
2418{
2419 int i, ret;
2420 struct fw_params_cmd c;
2421 __be32 *p = &c.param[0].mnem;
2422
2423 if (nparams > 7)
2424 return -EINVAL;
2425
2426 memset(&c, 0, sizeof(c));
2427 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2428 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2429 FW_PARAMS_CMD_VFN(vf));
2430 c.retval_len16 = htonl(FW_LEN16(c));
2431 for (i = 0; i < nparams; i++, p += 2)
2432 *p = htonl(*params++);
2433
2434 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2435 if (ret == 0)
2436 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2437 *val++ = ntohl(*p);
2438 return ret;
2439}
2440
2441/**
2442 * t4_set_params - sets FW or device parameters
2443 * @adap: the adapter
2444 * @mbox: mailbox to use for the FW command
2445 * @pf: the PF
2446 * @vf: the VF
2447 * @nparams: the number of parameters
2448 * @params: the parameter names
2449 * @val: the parameter values
2450 *
2451 * Sets the value of FW or device parameters. Up to 7 parameters can be
2452 * specified at once.
2453 */
2454int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2455 unsigned int vf, unsigned int nparams, const u32 *params,
2456 const u32 *val)
2457{
2458 struct fw_params_cmd c;
2459 __be32 *p = &c.param[0].mnem;
2460
2461 if (nparams > 7)
2462 return -EINVAL;
2463
2464 memset(&c, 0, sizeof(c));
2465 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2466 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2467 FW_PARAMS_CMD_VFN(vf));
2468 c.retval_len16 = htonl(FW_LEN16(c));
2469 while (nparams--) {
2470 *p++ = htonl(*params++);
2471 *p++ = htonl(*val++);
2472 }
2473
2474 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2475}
2476
2477/**
2478 * t4_cfg_pfvf - configure PF/VF resource limits
2479 * @adap: the adapter
2480 * @mbox: mailbox to use for the FW command
2481 * @pf: the PF being configured
2482 * @vf: the VF being configured
2483 * @txq: the max number of egress queues
2484 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2485 * @rxqi: the max number of interrupt-capable ingress queues
2486 * @rxq: the max number of interruptless ingress queues
2487 * @tc: the PCI traffic class
2488 * @vi: the max number of virtual interfaces
2489 * @cmask: the channel access rights mask for the PF/VF
2490 * @pmask: the port access rights mask for the PF/VF
2491 * @nexact: the maximum number of exact MPS filters
2492 * @rcaps: read capabilities
2493 * @wxcaps: write/execute capabilities
2494 *
2495 * Configures resource limits and capabilities for a physical or virtual
2496 * function.
2497 */
2498int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2499 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2500 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2501 unsigned int vi, unsigned int cmask, unsigned int pmask,
2502 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2503{
2504 struct fw_pfvf_cmd c;
2505
2506 memset(&c, 0, sizeof(c));
2507 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2508 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2509 FW_PFVF_CMD_VFN(vf));
2510 c.retval_len16 = htonl(FW_LEN16(c));
2511 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2512 FW_PFVF_CMD_NIQ(rxq));
2513 c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2514 FW_PFVF_CMD_PMASK(pmask) |
2515 FW_PFVF_CMD_NEQ(txq));
2516 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2517 FW_PFVF_CMD_NEXACTF(nexact));
2518 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2519 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2520 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2521 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2522}
2523
2524/**
2525 * t4_alloc_vi - allocate a virtual interface
2526 * @adap: the adapter
2527 * @mbox: mailbox to use for the FW command
2528 * @port: physical port associated with the VI
2529 * @pf: the PF owning the VI
2530 * @vf: the VF owning the VI
2531 * @nmac: number of MAC addresses needed (1 to 5)
2532 * @mac: the MAC addresses of the VI
2533 * @rss_size: size of RSS table slice associated with this VI
2534 *
2535 * Allocates a virtual interface for the given physical port. If @mac is
2536 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2537 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2538 * stored consecutively so the space needed is @nmac * 6 bytes.
2539 * Returns a negative error number or the non-negative VI id.
2540 */
2541int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2542 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2543 unsigned int *rss_size)
2544{
2545 int ret;
2546 struct fw_vi_cmd c;
2547
2548 memset(&c, 0, sizeof(c));
2549 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2550 FW_CMD_WRITE | FW_CMD_EXEC |
2551 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2552 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2553 c.portid_pkd = FW_VI_CMD_PORTID(port);
2554 c.nmac = nmac - 1;
2555
2556 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2557 if (ret)
2558 return ret;
2559
2560 if (mac) {
2561 memcpy(mac, c.mac, sizeof(c.mac));
2562 switch (nmac) {
2563 case 5:
2564 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2565 case 4:
2566 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2567 case 3:
2568 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2569 case 2:
2570 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2571 }
2572 }
2573 if (rss_size)
2574 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2575 return ntohs(c.viid_pkd);
2576}
2577
2578/**
2579 * t4_free_vi - free a virtual interface
2580 * @adap: the adapter
2581 * @mbox: mailbox to use for the FW command
2582 * @pf: the PF owning the VI
2583 * @vf: the VF owning the VI
2584 * @viid: virtual interface identifiler
2585 *
2586 * Free a previously allocated virtual interface.
2587 */
2588int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2589 unsigned int vf, unsigned int viid)
2590{
2591 struct fw_vi_cmd c;
2592
2593 memset(&c, 0, sizeof(c));
2594 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2595 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2596 FW_VI_CMD_VFN(vf));
2597 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2598 c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
2599 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2600}
2601
2602/**
2603 * t4_set_rxmode - set Rx properties of a virtual interface
2604 * @adap: the adapter
2605 * @mbox: mailbox to use for the FW command
2606 * @viid: the VI id
2607 * @mtu: the new MTU or -1
2608 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2609 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2610 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2611 * @sleep_ok: if true we may sleep while awaiting command completion
2612 *
2613 * Sets Rx properties of a virtual interface.
2614 */
2615int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2616 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok)
2617{
2618 struct fw_vi_rxmode_cmd c;
2619
2620 /* convert to FW values */
2621 if (mtu < 0)
2622 mtu = FW_RXMODE_MTU_NO_CHG;
2623 if (promisc < 0)
2624 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2625 if (all_multi < 0)
2626 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2627 if (bcast < 0)
2628 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2629
2630 memset(&c, 0, sizeof(c));
2631 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2632 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2633 c.retval_len16 = htonl(FW_LEN16(c));
2634 c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2635 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2636 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2637 FW_VI_RXMODE_CMD_BROADCASTEN(bcast));
2638 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2639}
2640
2641/**
2642 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2643 * @adap: the adapter
2644 * @mbox: mailbox to use for the FW command
2645 * @viid: the VI id
2646 * @free: if true any existing filters for this VI id are first removed
2647 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2648 * @addr: the MAC address(es)
2649 * @idx: where to store the index of each allocated filter
2650 * @hash: pointer to hash address filter bitmap
2651 * @sleep_ok: call is allowed to sleep
2652 *
2653 * Allocates an exact-match filter for each of the supplied addresses and
2654 * sets it to the corresponding address. If @idx is not %NULL it should
2655 * have at least @naddr entries, each of which will be set to the index of
2656 * the filter allocated for the corresponding MAC address. If a filter
2657 * could not be allocated for an address its index is set to 0xffff.
2658 * If @hash is not %NULL addresses that fail to allocate an exact filter
2659 * are hashed and update the hash filter bitmap pointed at by @hash.
2660 *
2661 * Returns a negative error number or the number of filters allocated.
2662 */
2663int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2664 unsigned int viid, bool free, unsigned int naddr,
2665 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2666{
2667 int i, ret;
2668 struct fw_vi_mac_cmd c;
2669 struct fw_vi_mac_exact *p;
2670
2671 if (naddr > 7)
2672 return -EINVAL;
2673
2674 memset(&c, 0, sizeof(c));
2675 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2676 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2677 FW_VI_MAC_CMD_VIID(viid));
2678 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2679 FW_CMD_LEN16((naddr + 2) / 2));
2680
2681 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2682 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2683 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2684 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2685 }
2686
2687 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2688 if (ret)
2689 return ret;
2690
2691 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2692 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2693
2694 if (idx)
2695 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2696 if (index < NEXACT_MAC)
2697 ret++;
2698 else if (hash)
2699 *hash |= (1 << hash_mac_addr(addr[i]));
2700 }
2701 return ret;
2702}
2703
2704/**
2705 * t4_change_mac - modifies the exact-match filter for a MAC address
2706 * @adap: the adapter
2707 * @mbox: mailbox to use for the FW command
2708 * @viid: the VI id
2709 * @idx: index of existing filter for old value of MAC address, or -1
2710 * @addr: the new MAC address value
2711 * @persist: whether a new MAC allocation should be persistent
2712 * @add_smt: if true also add the address to the HW SMT
2713 *
2714 * Modifies an exact-match filter and sets it to the new MAC address.
2715 * Note that in general it is not possible to modify the value of a given
2716 * filter so the generic way to modify an address filter is to free the one
2717 * being used by the old address value and allocate a new filter for the
2718 * new address value. @idx can be -1 if the address is a new addition.
2719 *
2720 * Returns a negative error number or the index of the filter with the new
2721 * MAC value.
2722 */
2723int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2724 int idx, const u8 *addr, bool persist, bool add_smt)
2725{
2726 int ret, mode;
2727 struct fw_vi_mac_cmd c;
2728 struct fw_vi_mac_exact *p = c.u.exact;
2729
2730 if (idx < 0) /* new allocation */
2731 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2732 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2733
2734 memset(&c, 0, sizeof(c));
2735 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2736 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2737 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2738 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2739 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2740 FW_VI_MAC_CMD_IDX(idx));
2741 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2742
2743 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2744 if (ret == 0) {
2745 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2746 if (ret >= NEXACT_MAC)
2747 ret = -ENOMEM;
2748 }
2749 return ret;
2750}
2751
2752/**
2753 * t4_set_addr_hash - program the MAC inexact-match hash filter
2754 * @adap: the adapter
2755 * @mbox: mailbox to use for the FW command
2756 * @viid: the VI id
2757 * @ucast: whether the hash filter should also match unicast addresses
2758 * @vec: the value to be written to the hash filter
2759 * @sleep_ok: call is allowed to sleep
2760 *
2761 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2762 */
2763int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2764 bool ucast, u64 vec, bool sleep_ok)
2765{
2766 struct fw_vi_mac_cmd c;
2767
2768 memset(&c, 0, sizeof(c));
2769 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2770 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2771 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2772 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2773 FW_CMD_LEN16(1));
2774 c.u.hash.hashvec = cpu_to_be64(vec);
2775 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2776}
2777
2778/**
2779 * t4_enable_vi - enable/disable a virtual interface
2780 * @adap: the adapter
2781 * @mbox: mailbox to use for the FW command
2782 * @viid: the VI id
2783 * @rx_en: 1=enable Rx, 0=disable Rx
2784 * @tx_en: 1=enable Tx, 0=disable Tx
2785 *
2786 * Enables/disables a virtual interface.
2787 */
2788int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2789 bool rx_en, bool tx_en)
2790{
2791 struct fw_vi_enable_cmd c;
2792
2793 memset(&c, 0, sizeof(c));
2794 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2795 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2796 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2797 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2798 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2799}
2800
2801/**
2802 * t4_identify_port - identify a VI's port by blinking its LED
2803 * @adap: the adapter
2804 * @mbox: mailbox to use for the FW command
2805 * @viid: the VI id
2806 * @nblinks: how many times to blink LED at 2.5 Hz
2807 *
2808 * Identifies a VI's port by blinking its LED.
2809 */
2810int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2811 unsigned int nblinks)
2812{
2813 struct fw_vi_enable_cmd c;
2814
2815 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2816 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2817 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2818 c.blinkdur = htons(nblinks);
2819 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2820}
2821
2822/**
2823 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2824 * @adap: the adapter
2825 * @mbox: mailbox to use for the FW command
2826 * @start: %true to enable the queues, %false to disable them
2827 * @pf: the PF owning the queues
2828 * @vf: the VF owning the queues
2829 * @iqid: ingress queue id
2830 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2831 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2832 *
2833 * Starts or stops an ingress queue and its associated FLs, if any.
2834 */
2835int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2836 unsigned int pf, unsigned int vf, unsigned int iqid,
2837 unsigned int fl0id, unsigned int fl1id)
2838{
2839 struct fw_iq_cmd c;
2840
2841 memset(&c, 0, sizeof(c));
2842 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2843 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2844 FW_IQ_CMD_VFN(vf));
2845 c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2846 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2847 c.iqid = htons(iqid);
2848 c.fl0id = htons(fl0id);
2849 c.fl1id = htons(fl1id);
2850 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2851}
2852
2853/**
2854 * t4_iq_free - free an ingress queue and its FLs
2855 * @adap: the adapter
2856 * @mbox: mailbox to use for the FW command
2857 * @pf: the PF owning the queues
2858 * @vf: the VF owning the queues
2859 * @iqtype: the ingress queue type
2860 * @iqid: ingress queue id
2861 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2862 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2863 *
2864 * Frees an ingress queue and its associated FLs, if any.
2865 */
2866int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2867 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2868 unsigned int fl0id, unsigned int fl1id)
2869{
2870 struct fw_iq_cmd c;
2871
2872 memset(&c, 0, sizeof(c));
2873 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2874 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2875 FW_IQ_CMD_VFN(vf));
2876 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2877 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2878 c.iqid = htons(iqid);
2879 c.fl0id = htons(fl0id);
2880 c.fl1id = htons(fl1id);
2881 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2882}
2883
2884/**
2885 * t4_eth_eq_free - free an Ethernet egress queue
2886 * @adap: the adapter
2887 * @mbox: mailbox to use for the FW command
2888 * @pf: the PF owning the queue
2889 * @vf: the VF owning the queue
2890 * @eqid: egress queue id
2891 *
2892 * Frees an Ethernet egress queue.
2893 */
2894int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2895 unsigned int vf, unsigned int eqid)
2896{
2897 struct fw_eq_eth_cmd c;
2898
2899 memset(&c, 0, sizeof(c));
2900 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2901 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2902 FW_EQ_ETH_CMD_VFN(vf));
2903 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2904 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2905 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2906}
2907
2908/**
2909 * t4_ctrl_eq_free - free a control egress queue
2910 * @adap: the adapter
2911 * @mbox: mailbox to use for the FW command
2912 * @pf: the PF owning the queue
2913 * @vf: the VF owning the queue
2914 * @eqid: egress queue id
2915 *
2916 * Frees a control egress queue.
2917 */
2918int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2919 unsigned int vf, unsigned int eqid)
2920{
2921 struct fw_eq_ctrl_cmd c;
2922
2923 memset(&c, 0, sizeof(c));
2924 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2925 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2926 FW_EQ_CTRL_CMD_VFN(vf));
2927 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2928 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2929 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2930}
2931
2932/**
2933 * t4_ofld_eq_free - free an offload egress queue
2934 * @adap: the adapter
2935 * @mbox: mailbox to use for the FW command
2936 * @pf: the PF owning the queue
2937 * @vf: the VF owning the queue
2938 * @eqid: egress queue id
2939 *
2940 * Frees a control egress queue.
2941 */
2942int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2943 unsigned int vf, unsigned int eqid)
2944{
2945 struct fw_eq_ofld_cmd c;
2946
2947 memset(&c, 0, sizeof(c));
2948 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2949 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2950 FW_EQ_OFLD_CMD_VFN(vf));
2951 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2952 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2953 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2954}
2955
2956/**
2957 * t4_handle_fw_rpl - process a FW reply message
2958 * @adap: the adapter
2959 * @rpl: start of the FW message
2960 *
2961 * Processes a FW message, such as link state change messages.
2962 */
2963int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2964{
2965 u8 opcode = *(const u8 *)rpl;
2966
2967 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2968 int speed = 0, fc = 0;
2969 const struct fw_port_cmd *p = (void *)rpl;
2970 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2971 int port = adap->chan_map[chan];
2972 struct port_info *pi = adap2pinfo(adap, port);
2973 struct link_config *lc = &pi->link_cfg;
2974 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2975 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2976 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2977
2978 if (stat & FW_PORT_CMD_RXPAUSE)
2979 fc |= PAUSE_RX;
2980 if (stat & FW_PORT_CMD_TXPAUSE)
2981 fc |= PAUSE_TX;
2982 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2983 speed = SPEED_100;
2984 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2985 speed = SPEED_1000;
2986 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2987 speed = SPEED_10000;
2988
2989 if (link_ok != lc->link_ok || speed != lc->speed ||
2990 fc != lc->fc) { /* something changed */
2991 lc->link_ok = link_ok;
2992 lc->speed = speed;
2993 lc->fc = fc;
2994 t4_os_link_changed(adap, port, link_ok);
2995 }
2996 if (mod != pi->mod_type) {
2997 pi->mod_type = mod;
2998 t4_os_portmod_changed(adap, port);
2999 }
3000 }
3001 return 0;
3002}
3003
3004static void __devinit get_pci_mode(struct adapter *adapter,
3005 struct pci_params *p)
3006{
3007 u16 val;
3008 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3009
3010 if (pcie_cap) {
3011 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3012 &val);
3013 p->speed = val & PCI_EXP_LNKSTA_CLS;
3014 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3015 }
3016}
3017
3018/**
3019 * init_link_config - initialize a link's SW state
3020 * @lc: structure holding the link state
3021 * @caps: link capabilities
3022 *
3023 * Initializes the SW state maintained for each link, including the link's
3024 * capabilities and default speed/flow-control/autonegotiation settings.
3025 */
3026static void __devinit init_link_config(struct link_config *lc,
3027 unsigned int caps)
3028{
3029 lc->supported = caps;
3030 lc->requested_speed = 0;
3031 lc->speed = 0;
3032 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3033 if (lc->supported & FW_PORT_CAP_ANEG) {
3034 lc->advertising = lc->supported & ADVERT_MASK;
3035 lc->autoneg = AUTONEG_ENABLE;
3036 lc->requested_fc |= PAUSE_AUTONEG;
3037 } else {
3038 lc->advertising = 0;
3039 lc->autoneg = AUTONEG_DISABLE;
3040 }
3041}
3042
3043static int __devinit wait_dev_ready(struct adapter *adap)
3044{
3045 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3046 return 0;
3047 msleep(500);
3048 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3049}
3050
3051/**
3052 * t4_prep_adapter - prepare SW and HW for operation
3053 * @adapter: the adapter
3054 * @reset: if true perform a HW reset
3055 *
3056 * Initialize adapter SW state for the various HW modules, set initial
3057 * values for some adapter tunables, take PHYs out of reset, and
3058 * initialize the MDIO interface.
3059 */
3060int __devinit t4_prep_adapter(struct adapter *adapter)
3061{
3062 int ret;
3063
3064 ret = wait_dev_ready(adapter);
3065 if (ret < 0)
3066 return ret;
3067
3068 get_pci_mode(adapter, &adapter->params.pci);
3069 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3070
3071 ret = get_vpd_params(adapter, &adapter->params.vpd);
3072 if (ret < 0)
3073 return ret;
3074
3075 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3076
3077 /*
3078 * Default port for debugging in case we can't reach FW.
3079 */
3080 adapter->params.nports = 1;
3081 adapter->params.portvec = 1;
3082 return 0;
3083}
3084
3085int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3086{
3087 u8 addr[6];
3088 int ret, i, j = 0;
3089 struct fw_port_cmd c;
3090
3091 memset(&c, 0, sizeof(c));
3092
3093 for_each_port(adap, i) {
3094 unsigned int rss_size;
3095 struct port_info *p = adap2pinfo(adap, i);
3096
3097 while ((adap->params.portvec & (1 << j)) == 0)
3098 j++;
3099
3100 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3101 FW_CMD_REQUEST | FW_CMD_READ |
3102 FW_PORT_CMD_PORTID(j));
3103 c.action_to_len16 = htonl(
3104 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3105 FW_LEN16(c));
3106 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3107 if (ret)
3108 return ret;
3109
3110 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3111 if (ret < 0)
3112 return ret;
3113
3114 p->viid = ret;
3115 p->tx_chan = j;
3116 p->lport = j;
3117 p->rss_size = rss_size;
3118 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3119 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
3120
3121 ret = ntohl(c.u.info.lstatus_to_modtype);
3122 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3123 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3124 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3125 p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
3126
3127 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3128 j++;
3129 }
3130 return 0;
3131}
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
new file mode 100644
index 000000000000..025623285c93
--- /dev/null
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -0,0 +1,100 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_HW_H
36#define __T4_HW_H
37
38#include <linux/types.h>
39
40enum {
41 NCHAN = 4, /* # of HW channels */
42 MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
43 EEPROMSIZE = 17408, /* Serial EEPROM physical size */
44 EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
45 RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
46 TCB_SIZE = 128, /* TCB size */
47 NMTUS = 16, /* size of MTU table */
48 NCCTRL_WIN = 32, /* # of congestion control windows */
49 NEXACT_MAC = 336, /* # of exact MAC address filters */
50 L2T_SIZE = 4096, /* # of L2T entries */
51 MBOX_LEN = 64, /* mailbox size in bytes */
52 TRACE_LEN = 112, /* length of trace data and mask */
53 FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
54 NWOL_PAT = 8, /* # of WoL patterns */
55 WOL_PAT_LEN = 128, /* length of WoL patterns */
56};
57
58enum {
59 SF_PAGE_SIZE = 256, /* serial flash page size */
60 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
61 SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
62};
63
64enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
65
66enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
67
68enum {
69 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
70 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
71 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
72};
73
74struct sge_qstat { /* data written to SGE queue status entries */
75 __be32 qid;
76 __be16 cidx;
77 __be16 pidx;
78};
79
80/*
81 * Structure for last 128 bits of response descriptors
82 */
83struct rsp_ctrl {
84 __be32 hdrbuflen_pidx;
85 __be32 pldbuflen_qid;
86 union {
87 u8 type_gen;
88 __be64 last_flit;
89 };
90};
91
92#define RSPD_NEWBUF 0x80000000U
93#define RSPD_LEN 0x7fffffffU
94
95#define RSPD_GEN(x) ((x) >> 7)
96#define RSPD_TYPE(x) (((x) >> 4) & 3)
97
98#define QINTR_CNT_EN 0x1
99#define QINTR_TIMER_IDX(x) ((x) << 1)
100#endif /* __T4_HW_H */
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
new file mode 100644
index 000000000000..fdb117443144
--- /dev/null
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -0,0 +1,664 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_MSG_H
36#define __T4_MSG_H
37
38#include <linux/types.h>
39
40enum {
41 CPL_PASS_OPEN_REQ = 0x1,
42 CPL_PASS_ACCEPT_RPL = 0x2,
43 CPL_ACT_OPEN_REQ = 0x3,
44 CPL_SET_TCB_FIELD = 0x5,
45 CPL_GET_TCB = 0x6,
46 CPL_CLOSE_CON_REQ = 0x8,
47 CPL_CLOSE_LISTSRV_REQ = 0x9,
48 CPL_ABORT_REQ = 0xA,
49 CPL_ABORT_RPL = 0xB,
50 CPL_RX_DATA_ACK = 0xD,
51 CPL_TX_PKT = 0xE,
52 CPL_L2T_WRITE_REQ = 0x12,
53 CPL_TID_RELEASE = 0x1A,
54
55 CPL_CLOSE_LISTSRV_RPL = 0x20,
56 CPL_L2T_WRITE_RPL = 0x23,
57 CPL_PASS_OPEN_RPL = 0x24,
58 CPL_ACT_OPEN_RPL = 0x25,
59 CPL_PEER_CLOSE = 0x26,
60 CPL_ABORT_REQ_RSS = 0x2B,
61 CPL_ABORT_RPL_RSS = 0x2D,
62
63 CPL_CLOSE_CON_RPL = 0x32,
64 CPL_ISCSI_HDR = 0x33,
65 CPL_RDMA_CQE = 0x35,
66 CPL_RDMA_CQE_READ_RSP = 0x36,
67 CPL_RDMA_CQE_ERR = 0x37,
68 CPL_RX_DATA = 0x39,
69 CPL_SET_TCB_RPL = 0x3A,
70 CPL_RX_PKT = 0x3B,
71 CPL_RX_DDP_COMPLETE = 0x3F,
72
73 CPL_ACT_ESTABLISH = 0x40,
74 CPL_PASS_ESTABLISH = 0x41,
75 CPL_RX_DATA_DDP = 0x42,
76 CPL_PASS_ACCEPT_REQ = 0x44,
77
78 CPL_RDMA_READ_REQ = 0x60,
79
80 CPL_PASS_OPEN_REQ6 = 0x81,
81 CPL_ACT_OPEN_REQ6 = 0x83,
82
83 CPL_RDMA_TERMINATE = 0xA2,
84 CPL_RDMA_WRITE = 0xA4,
85 CPL_SGE_EGR_UPDATE = 0xA5,
86
87 CPL_TRACE_PKT = 0xB0,
88
89 CPL_FW4_MSG = 0xC0,
90 CPL_FW4_PLD = 0xC1,
91 CPL_FW4_ACK = 0xC3,
92
93 CPL_FW6_MSG = 0xE0,
94 CPL_FW6_PLD = 0xE1,
95 CPL_TX_PKT_LSO = 0xED,
96 CPL_TX_PKT_XT = 0xEE,
97
98 NUM_CPL_CMDS
99};
100
101enum CPL_error {
102 CPL_ERR_NONE = 0,
103 CPL_ERR_TCAM_FULL = 3,
104 CPL_ERR_BAD_LENGTH = 15,
105 CPL_ERR_BAD_ROUTE = 18,
106 CPL_ERR_CONN_RESET = 20,
107 CPL_ERR_CONN_EXIST_SYNRECV = 21,
108 CPL_ERR_CONN_EXIST = 22,
109 CPL_ERR_ARP_MISS = 23,
110 CPL_ERR_BAD_SYN = 24,
111 CPL_ERR_CONN_TIMEDOUT = 30,
112 CPL_ERR_XMIT_TIMEDOUT = 31,
113 CPL_ERR_PERSIST_TIMEDOUT = 32,
114 CPL_ERR_FINWAIT2_TIMEDOUT = 33,
115 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
116 CPL_ERR_RTX_NEG_ADVICE = 35,
117 CPL_ERR_PERSIST_NEG_ADVICE = 36,
118 CPL_ERR_ABORT_FAILED = 42,
119 CPL_ERR_IWARP_FLM = 50,
120};
121
122enum {
123 ULP_MODE_NONE = 0,
124 ULP_MODE_ISCSI = 2,
125 ULP_MODE_RDMA = 4,
126 ULP_MODE_FCOE = 6,
127};
128
129enum {
130 ULP_CRC_HEADER = 1 << 0,
131 ULP_CRC_DATA = 1 << 1
132};
133
134enum {
135 CPL_ABORT_SEND_RST = 0,
136 CPL_ABORT_NO_RST,
137};
138
139enum { /* TX_PKT_XT checksum types */
140 TX_CSUM_TCP = 0,
141 TX_CSUM_UDP = 1,
142 TX_CSUM_CRC16 = 4,
143 TX_CSUM_CRC32 = 5,
144 TX_CSUM_CRC32C = 6,
145 TX_CSUM_FCOE = 7,
146 TX_CSUM_TCPIP = 8,
147 TX_CSUM_UDPIP = 9,
148 TX_CSUM_TCPIP6 = 10,
149 TX_CSUM_UDPIP6 = 11,
150 TX_CSUM_IP = 12,
151};
152
153union opcode_tid {
154 __be32 opcode_tid;
155 u8 opcode;
156};
157
158#define CPL_OPCODE(x) ((x) << 24)
159#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
160#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
161#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
162
163/* partitioning of TID fields that also carry a queue id */
164#define GET_TID_TID(x) ((x) & 0x3fff)
165#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
166#define TID_QID(x) ((x) << 14)
167
168struct rss_header {
169 u8 opcode;
170#if defined(__LITTLE_ENDIAN_BITFIELD)
171 u8 channel:2;
172 u8 filter_hit:1;
173 u8 filter_tid:1;
174 u8 hash_type:2;
175 u8 ipv6:1;
176 u8 send2fw:1;
177#else
178 u8 send2fw:1;
179 u8 ipv6:1;
180 u8 hash_type:2;
181 u8 filter_tid:1;
182 u8 filter_hit:1;
183 u8 channel:2;
184#endif
185 __be16 qid;
186 __be32 hash_val;
187};
188
189struct work_request_hdr {
190 __be32 wr_hi;
191 __be32 wr_mid;
192 __be64 wr_lo;
193};
194
195#define WR_HDR struct work_request_hdr wr
196
197struct cpl_pass_open_req {
198 WR_HDR;
199 union opcode_tid ot;
200 __be16 local_port;
201 __be16 peer_port;
202 __be32 local_ip;
203 __be32 peer_ip;
204 __be64 opt0;
205#define TX_CHAN(x) ((x) << 2)
206#define DELACK(x) ((x) << 5)
207#define ULP_MODE(x) ((x) << 8)
208#define RCV_BUFSIZ(x) ((x) << 12)
209#define DSCP(x) ((x) << 22)
210#define SMAC_SEL(x) ((u64)(x) << 28)
211#define L2T_IDX(x) ((u64)(x) << 36)
212#define NAGLE(x) ((u64)(x) << 49)
213#define WND_SCALE(x) ((u64)(x) << 50)
214#define KEEP_ALIVE(x) ((u64)(x) << 54)
215#define MSS_IDX(x) ((u64)(x) << 60)
216 __be64 opt1;
217#define SYN_RSS_ENABLE (1 << 0)
218#define SYN_RSS_QUEUE(x) ((x) << 2)
219#define CONN_POLICY_ASK (1 << 22)
220};
221
222struct cpl_pass_open_req6 {
223 WR_HDR;
224 union opcode_tid ot;
225 __be16 local_port;
226 __be16 peer_port;
227 __be64 local_ip_hi;
228 __be64 local_ip_lo;
229 __be64 peer_ip_hi;
230 __be64 peer_ip_lo;
231 __be64 opt0;
232 __be64 opt1;
233};
234
235struct cpl_pass_open_rpl {
236 union opcode_tid ot;
237 u8 rsvd[3];
238 u8 status;
239};
240
241struct cpl_pass_accept_rpl {
242 WR_HDR;
243 union opcode_tid ot;
244 __be32 opt2;
245#define RSS_QUEUE(x) ((x) << 0)
246#define RSS_QUEUE_VALID (1 << 10)
247#define RX_COALESCE_VALID(x) ((x) << 11)
248#define RX_COALESCE(x) ((x) << 12)
249#define TX_QUEUE(x) ((x) << 23)
250#define RX_CHANNEL(x) ((x) << 26)
251#define WND_SCALE_EN(x) ((x) << 28)
252#define TSTAMPS_EN(x) ((x) << 29)
253#define SACK_EN(x) ((x) << 30)
254 __be64 opt0;
255};
256
257struct cpl_act_open_req {
258 WR_HDR;
259 union opcode_tid ot;
260 __be16 local_port;
261 __be16 peer_port;
262 __be32 local_ip;
263 __be32 peer_ip;
264 __be64 opt0;
265 __be32 params;
266 __be32 opt2;
267};
268
269struct cpl_act_open_req6 {
270 WR_HDR;
271 union opcode_tid ot;
272 __be16 local_port;
273 __be16 peer_port;
274 __be64 local_ip_hi;
275 __be64 local_ip_lo;
276 __be64 peer_ip_hi;
277 __be64 peer_ip_lo;
278 __be64 opt0;
279 __be32 params;
280 __be32 opt2;
281};
282
283struct cpl_act_open_rpl {
284 union opcode_tid ot;
285 __be32 atid_status;
286#define GET_AOPEN_STATUS(x) ((x) & 0xff)
287#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
288};
289
290struct cpl_pass_establish {
291 union opcode_tid ot;
292 __be32 rsvd;
293 __be32 tos_stid;
294#define GET_POPEN_TID(x) ((x) & 0xffffff)
295#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
296 __be16 mac_idx;
297 __be16 tcp_opt;
298#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
299#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
300#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
301#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
302#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
303 __be32 snd_isn;
304 __be32 rcv_isn;
305};
306
307struct cpl_act_establish {
308 union opcode_tid ot;
309 __be32 rsvd;
310 __be32 tos_atid;
311 __be16 mac_idx;
312 __be16 tcp_opt;
313 __be32 snd_isn;
314 __be32 rcv_isn;
315};
316
317struct cpl_get_tcb {
318 WR_HDR;
319 union opcode_tid ot;
320 __be16 reply_ctrl;
321#define QUEUENO(x) ((x) << 0)
322#define REPLY_CHAN(x) ((x) << 14)
323#define NO_REPLY(x) ((x) << 15)
324 __be16 cookie;
325};
326
327struct cpl_set_tcb_field {
328 WR_HDR;
329 union opcode_tid ot;
330 __be16 reply_ctrl;
331 __be16 word_cookie;
332#define TCB_WORD(x) ((x) << 0)
333#define TCB_COOKIE(x) ((x) << 5)
334 __be64 mask;
335 __be64 val;
336};
337
338struct cpl_set_tcb_rpl {
339 union opcode_tid ot;
340 __be16 rsvd;
341 u8 cookie;
342 u8 status;
343 __be64 oldval;
344};
345
346struct cpl_close_con_req {
347 WR_HDR;
348 union opcode_tid ot;
349 __be32 rsvd;
350};
351
352struct cpl_close_con_rpl {
353 union opcode_tid ot;
354 u8 rsvd[3];
355 u8 status;
356 __be32 snd_nxt;
357 __be32 rcv_nxt;
358};
359
360struct cpl_close_listsvr_req {
361 WR_HDR;
362 union opcode_tid ot;
363 __be16 reply_ctrl;
364#define LISTSVR_IPV6 (1 << 14)
365 __be16 rsvd;
366};
367
368struct cpl_close_listsvr_rpl {
369 union opcode_tid ot;
370 u8 rsvd[3];
371 u8 status;
372};
373
374struct cpl_abort_req_rss {
375 union opcode_tid ot;
376 u8 rsvd[3];
377 u8 status;
378};
379
380struct cpl_abort_req {
381 WR_HDR;
382 union opcode_tid ot;
383 __be32 rsvd0;
384 u8 rsvd1;
385 u8 cmd;
386 u8 rsvd2[6];
387};
388
389struct cpl_abort_rpl_rss {
390 union opcode_tid ot;
391 u8 rsvd[3];
392 u8 status;
393};
394
395struct cpl_abort_rpl {
396 WR_HDR;
397 union opcode_tid ot;
398 __be32 rsvd0;
399 u8 rsvd1;
400 u8 cmd;
401 u8 rsvd2[6];
402};
403
404struct cpl_peer_close {
405 union opcode_tid ot;
406 __be32 rcv_nxt;
407};
408
409struct cpl_tid_release {
410 WR_HDR;
411 union opcode_tid ot;
412 __be32 rsvd;
413};
414
415struct cpl_tx_pkt_core {
416 __be32 ctrl0;
417#define TXPKT_VF(x) ((x) << 0)
418#define TXPKT_PF(x) ((x) << 8)
419#define TXPKT_VF_VLD (1 << 11)
420#define TXPKT_OVLAN_IDX(x) ((x) << 12)
421#define TXPKT_INTF(x) ((x) << 16)
422#define TXPKT_INS_OVLAN (1 << 21)
423#define TXPKT_OPCODE(x) ((x) << 24)
424 __be16 pack;
425 __be16 len;
426 __be64 ctrl1;
427#define TXPKT_CSUM_END(x) ((x) << 12)
428#define TXPKT_CSUM_START(x) ((x) << 20)
429#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
430#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
431#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
432#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
433#define TXPKT_VLAN(x) ((u64)(x) << 44)
434#define TXPKT_VLAN_VLD (1ULL << 60)
435#define TXPKT_IPCSUM_DIS (1ULL << 62)
436#define TXPKT_L4CSUM_DIS (1ULL << 63)
437};
438
439struct cpl_tx_pkt {
440 WR_HDR;
441 struct cpl_tx_pkt_core c;
442};
443
444#define cpl_tx_pkt_xt cpl_tx_pkt
445
446struct cpl_tx_pkt_lso {
447 WR_HDR;
448 __be32 lso_ctrl;
449#define LSO_TCPHDR_LEN(x) ((x) << 0)
450#define LSO_IPHDR_LEN(x) ((x) << 4)
451#define LSO_ETHHDR_LEN(x) ((x) << 16)
452#define LSO_IPV6(x) ((x) << 20)
453#define LSO_LAST_SLICE (1 << 22)
454#define LSO_FIRST_SLICE (1 << 23)
455#define LSO_OPCODE(x) ((x) << 24)
456 __be16 ipid_ofst;
457 __be16 mss;
458 __be32 seqno_offset;
459 __be32 len;
460 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
461};
462
463struct cpl_iscsi_hdr {
464 union opcode_tid ot;
465 __be16 pdu_len_ddp;
466#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
467#define ISCSI_DDP (1 << 15)
468 __be16 len;
469 __be32 seq;
470 __be16 urg;
471 u8 rsvd;
472 u8 status;
473};
474
475struct cpl_rx_data {
476 union opcode_tid ot;
477 __be16 rsvd;
478 __be16 len;
479 __be32 seq;
480 __be16 urg;
481#if defined(__LITTLE_ENDIAN_BITFIELD)
482 u8 dack_mode:2;
483 u8 psh:1;
484 u8 heartbeat:1;
485 u8 ddp_off:1;
486 u8 :3;
487#else
488 u8 :3;
489 u8 ddp_off:1;
490 u8 heartbeat:1;
491 u8 psh:1;
492 u8 dack_mode:2;
493#endif
494 u8 status;
495};
496
497struct cpl_rx_data_ack {
498 WR_HDR;
499 union opcode_tid ot;
500 __be32 credit_dack;
501#define RX_CREDITS(x) ((x) << 0)
502#define RX_FORCE_ACK(x) ((x) << 28)
503};
504
505struct cpl_rx_pkt {
506 u8 opcode;
507#if defined(__LITTLE_ENDIAN_BITFIELD)
508 u8 iff:4;
509 u8 csum_calc:1;
510 u8 ipmi_pkt:1;
511 u8 vlan_ex:1;
512 u8 ip_frag:1;
513#else
514 u8 ip_frag:1;
515 u8 vlan_ex:1;
516 u8 ipmi_pkt:1;
517 u8 csum_calc:1;
518 u8 iff:4;
519#endif
520 __be16 csum;
521 __be16 vlan;
522 __be16 len;
523 __be32 l2info;
524#define RXF_UDP (1 << 22)
525#define RXF_TCP (1 << 23)
526 __be16 hdr_len;
527 __be16 err_vec;
528};
529
530struct cpl_trace_pkt {
531 u8 opcode;
532 u8 intf;
533#if defined(__LITTLE_ENDIAN_BITFIELD)
534 u8 runt:4;
535 u8 filter_hit:4;
536 u8 :6;
537 u8 err:1;
538 u8 trunc:1;
539#else
540 u8 filter_hit:4;
541 u8 runt:4;
542 u8 trunc:1;
543 u8 err:1;
544 u8 :6;
545#endif
546 __be16 rsvd;
547 __be16 len;
548 __be64 tstamp;
549};
550
551struct cpl_l2t_write_req {
552 WR_HDR;
553 union opcode_tid ot;
554 __be16 params;
555#define L2T_W_INFO(x) ((x) << 2)
556#define L2T_W_PORT(x) ((x) << 8)
557#define L2T_W_NOREPLY(x) ((x) << 15)
558 __be16 l2t_idx;
559 __be16 vlan;
560 u8 dst_mac[6];
561};
562
563struct cpl_l2t_write_rpl {
564 union opcode_tid ot;
565 u8 status;
566 u8 rsvd[3];
567};
568
569struct cpl_rdma_terminate {
570 union opcode_tid ot;
571 __be16 rsvd;
572 __be16 len;
573};
574
575struct cpl_sge_egr_update {
576 __be32 opcode_qid;
577#define EGR_QID(x) ((x) & 0x1FFFF)
578 __be16 cidx;
579 __be16 pidx;
580};
581
582struct cpl_fw4_pld {
583 u8 opcode;
584 u8 rsvd0[3];
585 u8 type;
586 u8 rsvd1;
587 __be16 len;
588 __be64 data;
589 __be64 rsvd2;
590};
591
592struct cpl_fw6_pld {
593 u8 opcode;
594 u8 rsvd[5];
595 __be16 len;
596 __be64 data[4];
597};
598
599struct cpl_fw4_msg {
600 u8 opcode;
601 u8 type;
602 __be16 rsvd0;
603 __be32 rsvd1;
604 __be64 data[2];
605};
606
607struct cpl_fw4_ack {
608 union opcode_tid ot;
609 u8 credits;
610 u8 rsvd0[2];
611 u8 seq_vld;
612 __be32 snd_nxt;
613 __be32 snd_una;
614 __be64 rsvd1;
615};
616
617struct cpl_fw6_msg {
618 u8 opcode;
619 u8 type;
620 __be16 rsvd0;
621 __be32 rsvd1;
622 __be64 data[4];
623};
624
625enum {
626 ULP_TX_MEM_READ = 2,
627 ULP_TX_MEM_WRITE = 3,
628 ULP_TX_PKT = 4
629};
630
631enum {
632 ULP_TX_SC_NOOP = 0x80,
633 ULP_TX_SC_IMM = 0x81,
634 ULP_TX_SC_DSGL = 0x82,
635 ULP_TX_SC_ISGL = 0x83
636};
637
638struct ulptx_sge_pair {
639 __be32 len[2];
640 __be64 addr[2];
641};
642
643struct ulptx_sgl {
644 __be32 cmd_nsge;
645#define ULPTX_CMD(x) ((x) << 24)
646#define ULPTX_NSGE(x) ((x) << 0)
647 __be32 len0;
648 __be64 addr0;
649 struct ulptx_sge_pair sge[0];
650};
651
652struct ulp_mem_io {
653 WR_HDR;
654 __be32 cmd;
655#define ULP_MEMIO_ORDER(x) ((x) << 23)
656 __be32 len16; /* command length */
657 __be32 dlen; /* data length in 32-byte units */
658#define ULP_MEMIO_DATA_LEN(x) ((x) << 0)
659 __be32 lock_addr;
660#define ULP_MEMIO_ADDR(x) ((x) << 0)
661#define ULP_MEMIO_LOCK(x) ((x) << 31)
662};
663
664#endif /* __T4_MSG_H */
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h
new file mode 100644
index 000000000000..5ed56483cbc2
--- /dev/null
+++ b/drivers/net/cxgb4/t4_regs.h
@@ -0,0 +1,878 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_REGS_H
36#define __T4_REGS_H
37
38#define MYPF_BASE 0x1b000
39#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
40
41#define PF0_BASE 0x1e000
42#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
43
44#define PF_STRIDE 0x400
45#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
46#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
47
48#define MYPORT_BASE 0x1c000
49#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
50
51#define PORT0_BASE 0x20000
52#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
53
54#define PORT_STRIDE 0x2000
55#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
56#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
57
58#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
59#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
60
61#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
62#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
65
66#define SGE_PF_KDOORBELL 0x0
67#define QID_MASK 0xffff8000U
68#define QID_SHIFT 15
69#define QID(x) ((x) << QID_SHIFT)
70#define DBPRIO 0x00004000U
71#define PIDX_MASK 0x00003fffU
72#define PIDX_SHIFT 0
73#define PIDX(x) ((x) << PIDX_SHIFT)
74
75#define SGE_PF_GTS 0x4
76#define INGRESSQID_MASK 0xffff0000U
77#define INGRESSQID_SHIFT 16
78#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
79#define TIMERREG_MASK 0x0000e000U
80#define TIMERREG_SHIFT 13
81#define TIMERREG(x) ((x) << TIMERREG_SHIFT)
82#define SEINTARM_MASK 0x00001000U
83#define SEINTARM_SHIFT 12
84#define SEINTARM(x) ((x) << SEINTARM_SHIFT)
85#define CIDXINC_MASK 0x00000fffU
86#define CIDXINC_SHIFT 0
87#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
88
89#define SGE_CONTROL 0x1008
90#define DCASYSTYPE 0x00080000U
91#define RXPKTCPLMODE 0x00040000U
92#define EGRSTATUSPAGESIZE 0x00020000U
93#define PKTSHIFT_MASK 0x00001c00U
94#define PKTSHIFT_SHIFT 10
95#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
96#define INGPCIEBOUNDARY_MASK 0x00000380U
97#define INGPCIEBOUNDARY_SHIFT 7
98#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
99#define INGPADBOUNDARY_MASK 0x00000070U
100#define INGPADBOUNDARY_SHIFT 4
101#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
102#define EGRPCIEBOUNDARY_MASK 0x0000000eU
103#define EGRPCIEBOUNDARY_SHIFT 1
104#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
105#define GLOBALENABLE 0x00000001U
106
107#define SGE_HOST_PAGE_SIZE 0x100c
108#define HOSTPAGESIZEPF0_MASK 0x0000000fU
109#define HOSTPAGESIZEPF0_SHIFT 0
110#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
111
112#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
113#define QUEUESPERPAGEPF0_MASK 0x0000000fU
114#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
115
116#define SGE_INT_CAUSE1 0x1024
117#define SGE_INT_CAUSE2 0x1030
118#define SGE_INT_CAUSE3 0x103c
119#define ERR_FLM_DBP 0x80000000U
120#define ERR_FLM_IDMA1 0x40000000U
121#define ERR_FLM_IDMA0 0x20000000U
122#define ERR_FLM_HINT 0x10000000U
123#define ERR_PCIE_ERROR3 0x08000000U
124#define ERR_PCIE_ERROR2 0x04000000U
125#define ERR_PCIE_ERROR1 0x02000000U
126#define ERR_PCIE_ERROR0 0x01000000U
127#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U
128#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U
129#define ERR_INVALID_CIDX_INC 0x00200000U
130#define ERR_ITP_TIME_PAUSED 0x00100000U
131#define ERR_CPL_OPCODE_0 0x00080000U
132#define ERR_DROPPED_DB 0x00040000U
133#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
134#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
135#define ERR_BAD_DB_PIDX3 0x00008000U
136#define ERR_BAD_DB_PIDX2 0x00004000U
137#define ERR_BAD_DB_PIDX1 0x00002000U
138#define ERR_BAD_DB_PIDX0 0x00001000U
139#define ERR_ING_PCIE_CHAN 0x00000800U
140#define ERR_ING_CTXT_PRIO 0x00000400U
141#define ERR_EGR_CTXT_PRIO 0x00000200U
142#define DBFIFO_HP_INT 0x00000100U
143#define DBFIFO_LP_INT 0x00000080U
144#define REG_ADDRESS_ERR 0x00000040U
145#define INGRESS_SIZE_ERR 0x00000020U
146#define EGRESS_SIZE_ERR 0x00000010U
147#define ERR_INV_CTXT3 0x00000008U
148#define ERR_INV_CTXT2 0x00000004U
149#define ERR_INV_CTXT1 0x00000002U
150#define ERR_INV_CTXT0 0x00000001U
151
152#define SGE_INT_ENABLE3 0x1040
153#define SGE_FL_BUFFER_SIZE0 0x1044
154#define SGE_FL_BUFFER_SIZE1 0x1048
155#define SGE_INGRESS_RX_THRESHOLD 0x10a0
156#define THRESHOLD_0_MASK 0x3f000000U
157#define THRESHOLD_0_SHIFT 24
158#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT)
159#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
160#define THRESHOLD_1_MASK 0x003f0000U
161#define THRESHOLD_1_SHIFT 16
162#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT)
163#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
164#define THRESHOLD_2_MASK 0x00003f00U
165#define THRESHOLD_2_SHIFT 8
166#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT)
167#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
168#define THRESHOLD_3_MASK 0x0000003fU
169#define THRESHOLD_3_SHIFT 0
170#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
171#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
172
173#define SGE_TIMER_VALUE_0_AND_1 0x10b8
174#define TIMERVALUE0_MASK 0xffff0000U
175#define TIMERVALUE0_SHIFT 16
176#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT)
177#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
178#define TIMERVALUE1_MASK 0x0000ffffU
179#define TIMERVALUE1_SHIFT 0
180#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT)
181#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
182
183#define SGE_TIMER_VALUE_2_AND_3 0x10bc
184#define SGE_TIMER_VALUE_4_AND_5 0x10c0
185#define SGE_DEBUG_INDEX 0x10cc
186#define SGE_DEBUG_DATA_HIGH 0x10d0
187#define SGE_DEBUG_DATA_LOW 0x10d4
188#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
189
190#define PCIE_PF_CLI 0x44
191#define PCIE_INT_CAUSE 0x3004
192#define UNXSPLCPLERR 0x20000000U
193#define PCIEPINT 0x10000000U
194#define PCIESINT 0x08000000U
195#define RPLPERR 0x04000000U
196#define RXWRPERR 0x02000000U
197#define RXCPLPERR 0x01000000U
198#define PIOTAGPERR 0x00800000U
199#define MATAGPERR 0x00400000U
200#define INTXCLRPERR 0x00200000U
201#define FIDPERR 0x00100000U
202#define CFGSNPPERR 0x00080000U
203#define HRSPPERR 0x00040000U
204#define HREQPERR 0x00020000U
205#define HCNTPERR 0x00010000U
206#define DRSPPERR 0x00008000U
207#define DREQPERR 0x00004000U
208#define DCNTPERR 0x00002000U
209#define CRSPPERR 0x00001000U
210#define CREQPERR 0x00000800U
211#define CCNTPERR 0x00000400U
212#define TARTAGPERR 0x00000200U
213#define PIOREQPERR 0x00000100U
214#define PIOCPLPERR 0x00000080U
215#define MSIXDIPERR 0x00000040U
216#define MSIXDATAPERR 0x00000020U
217#define MSIXADDRHPERR 0x00000010U
218#define MSIXADDRLPERR 0x00000008U
219#define MSIDATAPERR 0x00000004U
220#define MSIADDRHPERR 0x00000002U
221#define MSIADDRLPERR 0x00000001U
222
223#define PCIE_NONFAT_ERR 0x3010
224#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
225#define PCIEOFST_MASK 0xfffffc00U
226#define BIR_MASK 0x00000300U
227#define BIR_SHIFT 8
228#define BIR(x) ((x) << BIR_SHIFT)
229#define WINDOW_MASK 0x000000ffU
230#define WINDOW_SHIFT 0
231#define WINDOW(x) ((x) << WINDOW_SHIFT)
232
233#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
234#define RNPP 0x80000000U
235#define RPCP 0x20000000U
236#define RCIP 0x08000000U
237#define RCCP 0x04000000U
238#define RFTP 0x00800000U
239#define PTRP 0x00100000U
240
241#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
242#define TPCP 0x40000000U
243#define TNPP 0x20000000U
244#define TFTP 0x10000000U
245#define TCAP 0x08000000U
246#define TCIP 0x04000000U
247#define RCAP 0x02000000U
248#define PLUP 0x00800000U
249#define PLDN 0x00400000U
250#define OTDD 0x00200000U
251#define GTRP 0x00100000U
252#define RDPE 0x00040000U
253#define TDCE 0x00020000U
254#define TDUE 0x00010000U
255
256#define MC_INT_CAUSE 0x7518
257#define ECC_UE_INT_CAUSE 0x00000004U
258#define ECC_CE_INT_CAUSE 0x00000002U
259#define PERR_INT_CAUSE 0x00000001U
260
261#define MC_ECC_STATUS 0x751c
262#define ECC_CECNT_MASK 0xffff0000U
263#define ECC_CECNT_SHIFT 16
264#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT)
265#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
266#define ECC_UECNT_MASK 0x0000ffffU
267#define ECC_UECNT_SHIFT 0
268#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT)
269#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
270
271#define MC_BIST_CMD 0x7600
272#define START_BIST 0x80000000U
273#define BIST_CMD_GAP_MASK 0x0000ff00U
274#define BIST_CMD_GAP_SHIFT 8
275#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT)
276#define BIST_OPCODE_MASK 0x00000003U
277#define BIST_OPCODE_SHIFT 0
278#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT)
279
280#define MC_BIST_CMD_ADDR 0x7604
281#define MC_BIST_CMD_LEN 0x7608
282#define MC_BIST_DATA_PATTERN 0x760c
283#define BIST_DATA_TYPE_MASK 0x0000000fU
284#define BIST_DATA_TYPE_SHIFT 0
285#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT)
286
287#define MC_BIST_STATUS_RDATA 0x7688
288
289#define MA_EXT_MEMORY_BAR 0x77c8
290#define EXT_MEM_SIZE_MASK 0x00000fffU
291#define EXT_MEM_SIZE_SHIFT 0
292#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
293
294#define MA_TARGET_MEM_ENABLE 0x77d8
295#define EXT_MEM_ENABLE 0x00000004U
296#define EDRAM1_ENABLE 0x00000002U
297#define EDRAM0_ENABLE 0x00000001U
298
299#define MA_INT_CAUSE 0x77e0
300#define MEM_PERR_INT_CAUSE 0x00000002U
301#define MEM_WRAP_INT_CAUSE 0x00000001U
302
303#define MA_INT_WRAP_STATUS 0x77e4
304#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U
305#define MEM_WRAP_ADDRESS_SHIFT 4
306#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
307#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
308#define MEM_WRAP_CLIENT_NUM_SHIFT 0
309#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
310
311#define MA_PARITY_ERROR_STATUS 0x77f4
312
313#define EDC_0_BASE_ADDR 0x7900
314
315#define EDC_BIST_CMD 0x7904
316#define EDC_BIST_CMD_ADDR 0x7908
317#define EDC_BIST_CMD_LEN 0x790c
318#define EDC_BIST_DATA_PATTERN 0x7910
319#define EDC_BIST_STATUS_RDATA 0x7928
320#define EDC_INT_CAUSE 0x7978
321#define ECC_UE_PAR 0x00000020U
322#define ECC_CE_PAR 0x00000010U
323#define PERR_PAR_CAUSE 0x00000008U
324
325#define EDC_ECC_STATUS 0x797c
326
327#define EDC_1_BASE_ADDR 0x7980
328
329#define CIM_PF_MAILBOX_DATA 0x240
330#define CIM_PF_MAILBOX_CTRL 0x280
331#define MBMSGVALID 0x00000008U
332#define MBINTREQ 0x00000004U
333#define MBOWNER_MASK 0x00000003U
334#define MBOWNER_SHIFT 0
335#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
336#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
337
338#define CIM_PF_HOST_INT_CAUSE 0x28c
339#define MBMSGRDYINT 0x00080000U
340
341#define CIM_HOST_INT_CAUSE 0x7b2c
342#define TIEQOUTPARERRINT 0x00100000U
343#define TIEQINPARERRINT 0x00080000U
344#define MBHOSTPARERR 0x00040000U
345#define MBUPPARERR 0x00020000U
346#define IBQPARERR 0x0001f800U
347#define IBQTP0PARERR 0x00010000U
348#define IBQTP1PARERR 0x00008000U
349#define IBQULPPARERR 0x00004000U
350#define IBQSGELOPARERR 0x00002000U
351#define IBQSGEHIPARERR 0x00001000U
352#define IBQNCSIPARERR 0x00000800U
353#define OBQPARERR 0x000007e0U
354#define OBQULP0PARERR 0x00000400U
355#define OBQULP1PARERR 0x00000200U
356#define OBQULP2PARERR 0x00000100U
357#define OBQULP3PARERR 0x00000080U
358#define OBQSGEPARERR 0x00000040U
359#define OBQNCSIPARERR 0x00000020U
360#define PREFDROPINT 0x00000002U
361#define UPACCNONZERO 0x00000001U
362
363#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
364#define EEPROMWRINT 0x40000000U
365#define TIMEOUTMAINT 0x20000000U
366#define TIMEOUTINT 0x10000000U
367#define RSPOVRLOOKUPINT 0x08000000U
368#define REQOVRLOOKUPINT 0x04000000U
369#define BLKWRPLINT 0x02000000U
370#define BLKRDPLINT 0x01000000U
371#define SGLWRPLINT 0x00800000U
372#define SGLRDPLINT 0x00400000U
373#define BLKWRCTLINT 0x00200000U
374#define BLKRDCTLINT 0x00100000U
375#define SGLWRCTLINT 0x00080000U
376#define SGLRDCTLINT 0x00040000U
377#define BLKWREEPROMINT 0x00020000U
378#define BLKRDEEPROMINT 0x00010000U
379#define SGLWREEPROMINT 0x00008000U
380#define SGLRDEEPROMINT 0x00004000U
381#define BLKWRFLASHINT 0x00002000U
382#define BLKRDFLASHINT 0x00001000U
383#define SGLWRFLASHINT 0x00000800U
384#define SGLRDFLASHINT 0x00000400U
385#define BLKWRBOOTINT 0x00000200U
386#define BLKRDBOOTINT 0x00000100U
387#define SGLWRBOOTINT 0x00000080U
388#define SGLRDBOOTINT 0x00000040U
389#define ILLWRBEINT 0x00000020U
390#define ILLRDBEINT 0x00000010U
391#define ILLRDINT 0x00000008U
392#define ILLWRINT 0x00000004U
393#define ILLTRANSINT 0x00000002U
394#define RSVDSPACEINT 0x00000001U
395
396#define TP_OUT_CONFIG 0x7d04
397#define VLANEXTENABLE_MASK 0x0000f000U
398#define VLANEXTENABLE_SHIFT 12
399
400#define TP_PARA_REG2 0x7d68
401#define MAXRXDATA_MASK 0xffff0000U
402#define MAXRXDATA_SHIFT 16
403#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
404
405#define TP_TIMER_RESOLUTION 0x7d90
406#define TIMERRESOLUTION_MASK 0x00ff0000U
407#define TIMERRESOLUTION_SHIFT 16
408#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
409
410#define TP_SHIFT_CNT 0x7dc0
411
412#define TP_CCTRL_TABLE 0x7ddc
413#define TP_MTU_TABLE 0x7de4
414#define MTUINDEX_MASK 0xff000000U
415#define MTUINDEX_SHIFT 24
416#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT)
417#define MTUWIDTH_MASK 0x000f0000U
418#define MTUWIDTH_SHIFT 16
419#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT)
420#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
421#define MTUVALUE_MASK 0x00003fffU
422#define MTUVALUE_SHIFT 0
423#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT)
424#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
425
426#define TP_RSS_LKP_TABLE 0x7dec
427#define LKPTBLROWVLD 0x80000000U
428#define LKPTBLQUEUE1_MASK 0x000ffc00U
429#define LKPTBLQUEUE1_SHIFT 10
430#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT)
431#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
432#define LKPTBLQUEUE0_MASK 0x000003ffU
433#define LKPTBLQUEUE0_SHIFT 0
434#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT)
435#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
436
437#define TP_PIO_ADDR 0x7e40
438#define TP_PIO_DATA 0x7e44
439#define TP_MIB_INDEX 0x7e50
440#define TP_MIB_DATA 0x7e54
441#define TP_INT_CAUSE 0x7e74
442#define FLMTXFLSTEMPTY 0x40000000U
443
444#define TP_INGRESS_CONFIG 0x141
445#define VNIC 0x00000800U
446#define CSUM_HAS_PSEUDO_HDR 0x00000400U
447#define RM_OVLAN 0x00000200U
448#define LOOKUPEVERYPKT 0x00000100U
449
450#define TP_MIB_MAC_IN_ERR_0 0x0
451#define TP_MIB_TCP_OUT_RST 0xc
452#define TP_MIB_TCP_IN_SEG_HI 0x10
453#define TP_MIB_TCP_IN_SEG_LO 0x11
454#define TP_MIB_TCP_OUT_SEG_HI 0x12
455#define TP_MIB_TCP_OUT_SEG_LO 0x13
456#define TP_MIB_TCP_RXT_SEG_HI 0x14
457#define TP_MIB_TCP_RXT_SEG_LO 0x15
458#define TP_MIB_TNL_CNG_DROP_0 0x18
459#define TP_MIB_TCP_V6IN_ERR_0 0x28
460#define TP_MIB_TCP_V6OUT_RST 0x2c
461#define TP_MIB_OFD_ARP_DROP 0x36
462#define TP_MIB_TNL_DROP_0 0x44
463#define TP_MIB_OFD_VLN_DROP_0 0x58
464
465#define ULP_TX_INT_CAUSE 0x8dcc
466#define PBL_BOUND_ERR_CH3 0x80000000U
467#define PBL_BOUND_ERR_CH2 0x40000000U
468#define PBL_BOUND_ERR_CH1 0x20000000U
469#define PBL_BOUND_ERR_CH0 0x10000000U
470
471#define PM_RX_INT_CAUSE 0x8fdc
472#define ZERO_E_CMD_ERROR 0x00400000U
473#define PMRX_FRAMING_ERROR 0x003ffff0U
474#define OCSPI_PAR_ERROR 0x00000008U
475#define DB_OPTIONS_PAR_ERROR 0x00000004U
476#define IESPI_PAR_ERROR 0x00000002U
477#define E_PCMD_PAR_ERROR 0x00000001U
478
479#define PM_TX_INT_CAUSE 0x8ffc
480#define PCMD_LEN_OVFL0 0x80000000U
481#define PCMD_LEN_OVFL1 0x40000000U
482#define PCMD_LEN_OVFL2 0x20000000U
483#define ZERO_C_CMD_ERROR 0x10000000U
484#define PMTX_FRAMING_ERROR 0x0ffffff0U
485#define OESPI_PAR_ERROR 0x00000008U
486#define ICSPI_PAR_ERROR 0x00000002U
487#define C_PCMD_PAR_ERROR 0x00000001U
488
489#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
490#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
491#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
492#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
493#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
494#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
495#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
496#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
497#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
498#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
499#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
500#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
501#define MPS_PORT_STAT_TX_PORT_64B_L 0x430
502#define MPS_PORT_STAT_TX_PORT_64B_H 0x434
503#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
504#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
505#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
506#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
507#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
508#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
509#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
510#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
511#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
512#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
513#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
514#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
515#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468
516#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
517#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
518#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
519#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
520#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
521#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
522#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
523#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
524#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
525#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
526#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
527#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
528#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
529#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
530#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
531#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
532#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
533#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
534#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
535#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
536#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
537#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
538#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
539#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
540#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
541#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
542#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
543#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
544#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
545#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
546#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
547#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
548#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
549#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
550#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
551#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
552#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
553#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
554#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
555#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
556#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
557#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
558#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
559#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
560#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
561#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
562#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
563#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
564#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
565#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
566#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
567#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
568#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
569#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
570#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
571#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
572#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
573#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
574#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
575#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
576#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
577#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
578#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
579#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
580#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
581#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
582#define MPS_PORT_STAT_RX_PORT_64B_L 0x590
583#define MPS_PORT_STAT_RX_PORT_64B_H 0x594
584#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
585#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
586#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
587#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
588#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
589#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
590#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
591#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
592#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
593#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
594#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
595#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
596#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
597#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
598#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
599#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
600#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
601#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
602#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
603#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
604#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
605#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
606#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
607#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
608#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
609#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
610#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
611#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
612#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
613#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
614#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
615#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
616#define MPS_CMN_CTL 0x9000
617#define NUMPORTS_MASK 0x00000003U
618#define NUMPORTS_SHIFT 0
619#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
620
621#define MPS_INT_CAUSE 0x9008
622#define STATINT 0x00000020U
623#define TXINT 0x00000010U
624#define RXINT 0x00000008U
625#define TRCINT 0x00000004U
626#define CLSINT 0x00000002U
627#define PLINT 0x00000001U
628
629#define MPS_TX_INT_CAUSE 0x9408
630#define PORTERR 0x00010000U
631#define FRMERR 0x00008000U
632#define SECNTERR 0x00004000U
633#define BUBBLE 0x00002000U
634#define TXDESCFIFO 0x00001e00U
635#define TXDATAFIFO 0x000001e0U
636#define NCSIFIFO 0x00000010U
637#define TPFIFO 0x0000000fU
638
639#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
640#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
641#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
642
643#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
644#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
645#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
646#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
647#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
648#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
649#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
650#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
651#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
652#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
653#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
654#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
655#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
656#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
657#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
658#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
659#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
660#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
661#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
662#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
663#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
664#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
665#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
666#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
667#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
668#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
669#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
670#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
671#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
672#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
673#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
674#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
675#define MPS_TRC_CFG 0x9800
676#define TRCFIFOEMPTY 0x00000010U
677#define TRCIGNOREDROPINPUT 0x00000008U
678#define TRCKEEPDUPLICATES 0x00000004U
679#define TRCEN 0x00000002U
680#define TRCMULTIFILTER 0x00000001U
681
682#define MPS_TRC_RSS_CONTROL 0x9808
683#define RSSCONTROL_MASK 0x00ff0000U
684#define RSSCONTROL_SHIFT 16
685#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
686#define QUEUENUMBER_MASK 0x0000ffffU
687#define QUEUENUMBER_SHIFT 0
688#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT)
689
690#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
691#define TFINVERTMATCH 0x01000000U
692#define TFPKTTOOLARGE 0x00800000U
693#define TFEN 0x00400000U
694#define TFPORT_MASK 0x003c0000U
695#define TFPORT_SHIFT 18
696#define TFPORT(x) ((x) << TFPORT_SHIFT)
697#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
698#define TFDROP 0x00020000U
699#define TFSOPEOPERR 0x00010000U
700#define TFLENGTH_MASK 0x00001f00U
701#define TFLENGTH_SHIFT 8
702#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT)
703#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
704#define TFOFFSET_MASK 0x0000001fU
705#define TFOFFSET_SHIFT 0
706#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT)
707#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
708
709#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
710#define TFMINPKTSIZE_MASK 0x01ff0000U
711#define TFMINPKTSIZE_SHIFT 16
712#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT)
713#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
714#define TFCAPTUREMAX_MASK 0x00003fffU
715#define TFCAPTUREMAX_SHIFT 0
716#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT)
717#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
718
719#define MPS_TRC_INT_CAUSE 0x985c
720#define MISCPERR 0x00000100U
721#define PKTFIFO 0x000000f0U
722#define FILTMEM 0x0000000fU
723
724#define MPS_TRC_FILTER0_MATCH 0x9c00
725#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
726#define MPS_TRC_FILTER1_MATCH 0x9d00
727#define MPS_CLS_INT_CAUSE 0xd028
728#define PLERRENB 0x00000008U
729#define HASHSRAM 0x00000004U
730#define MATCHTCAM 0x00000002U
731#define MATCHSRAM 0x00000001U
732
733#define MPS_RX_PERR_INT_CAUSE 0x11074
734
735#define CPL_INTR_CAUSE 0x19054
736#define CIM_OP_MAP_PERR 0x00000020U
737#define CIM_OVFL_ERROR 0x00000010U
738#define TP_FRAMING_ERROR 0x00000008U
739#define SGE_FRAMING_ERROR 0x00000004U
740#define CIM_FRAMING_ERROR 0x00000002U
741#define ZERO_SWITCH_ERROR 0x00000001U
742
743#define SMB_INT_CAUSE 0x19090
744#define MSTTXFIFOPARINT 0x00200000U
745#define MSTRXFIFOPARINT 0x00100000U
746#define SLVFIFOPARINT 0x00080000U
747
748#define ULP_RX_INT_CAUSE 0x19158
749#define ULP_RX_ISCSI_TAGMASK 0x19164
750#define ULP_RX_ISCSI_PSZ 0x19168
751#define HPZ3_MASK 0x0f000000U
752#define HPZ3_SHIFT 24
753#define HPZ3(x) ((x) << HPZ3_SHIFT)
754#define HPZ2_MASK 0x000f0000U
755#define HPZ2_SHIFT 16
756#define HPZ2(x) ((x) << HPZ2_SHIFT)
757#define HPZ1_MASK 0x00000f00U
758#define HPZ1_SHIFT 8
759#define HPZ1(x) ((x) << HPZ1_SHIFT)
760#define HPZ0_MASK 0x0000000fU
761#define HPZ0_SHIFT 0
762#define HPZ0(x) ((x) << HPZ0_SHIFT)
763
764#define ULP_RX_TDDP_PSZ 0x19178
765
766#define SF_DATA 0x193f8
767#define SF_OP 0x193fc
768#define BUSY 0x80000000U
769#define SF_LOCK 0x00000010U
770#define SF_CONT 0x00000008U
771#define BYTECNT_MASK 0x00000006U
772#define BYTECNT_SHIFT 1
773#define BYTECNT(x) ((x) << BYTECNT_SHIFT)
774#define OP_WR 0x00000001U
775
776#define PL_PF_INT_CAUSE 0x3c0
777#define PFSW 0x00000008U
778#define PFSGE 0x00000004U
779#define PFCIM 0x00000002U
780#define PFMPS 0x00000001U
781
782#define PL_PF_INT_ENABLE 0x3c4
783#define PL_PF_CTL 0x3c8
784#define SWINT 0x00000001U
785
786#define PL_WHOAMI 0x19400
787#define SOURCEPF_MASK 0x00000700U
788#define SOURCEPF_SHIFT 8
789#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT)
790#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
791#define ISVF 0x00000080U
792#define VFID_MASK 0x0000007fU
793#define VFID_SHIFT 0
794#define VFID(x) ((x) << VFID_SHIFT)
795#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT)
796
797#define PL_INT_CAUSE 0x1940c
798#define ULP_TX 0x08000000U
799#define SGE 0x04000000U
800#define HMA 0x02000000U
801#define CPL_SWITCH 0x01000000U
802#define ULP_RX 0x00800000U
803#define PM_RX 0x00400000U
804#define PM_TX 0x00200000U
805#define MA 0x00100000U
806#define TP 0x00080000U
807#define LE 0x00040000U
808#define EDC1 0x00020000U
809#define EDC0 0x00010000U
810#define MC 0x00008000U
811#define PCIE 0x00004000U
812#define PMU 0x00002000U
813#define XGMAC_KR1 0x00001000U
814#define XGMAC_KR0 0x00000800U
815#define XGMAC1 0x00000400U
816#define XGMAC0 0x00000200U
817#define SMB 0x00000100U
818#define SF 0x00000080U
819#define PL 0x00000040U
820#define NCSI 0x00000020U
821#define MPS 0x00000010U
822#define MI 0x00000008U
823#define DBG 0x00000004U
824#define I2CM 0x00000002U
825#define CIM 0x00000001U
826
827#define PL_INT_MAP0 0x19414
828#define PL_RST 0x19428
829#define PIORST 0x00000002U
830#define PIORSTMODE 0x00000001U
831
832#define PL_PL_INT_CAUSE 0x19430
833#define FATALPERR 0x00000010U
834#define PERRVFID 0x00000001U
835
836#define PL_REV 0x1943c
837
838#define LE_DB_CONFIG 0x19c04
839#define HASHEN 0x00100000U
840
841#define LE_DB_SERVER_INDEX 0x19c18
842#define LE_DB_ACT_CNT_IPV4 0x19c20
843#define LE_DB_ACT_CNT_IPV6 0x19c24
844
845#define LE_DB_INT_CAUSE 0x19c3c
846#define REQQPARERR 0x00010000U
847#define UNKNOWNCMD 0x00008000U
848#define PARITYERR 0x00000040U
849#define LIPMISS 0x00000020U
850#define LIP0 0x00000010U
851
852#define LE_DB_TID_HASHBASE 0x19df8
853
854#define NCSI_INT_CAUSE 0x1a0d8
855#define CIM_DM_PRTY_ERR 0x00000100U
856#define MPS_DM_PRTY_ERR 0x00000080U
857#define TXFIFO_PRTY_ERR 0x00000002U
858#define RXFIFO_PRTY_ERR 0x00000001U
859
860#define XGMAC_PORT_CFG2 0x1018
861#define PATEN 0x00040000U
862#define MAGICEN 0x00020000U
863
864#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
865#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
866
867#define XGMAC_PORT_EPIO_DATA0 0x10c0
868#define XGMAC_PORT_EPIO_DATA1 0x10c4
869#define XGMAC_PORT_EPIO_DATA2 0x10c8
870#define XGMAC_PORT_EPIO_DATA3 0x10cc
871#define XGMAC_PORT_EPIO_OP 0x10d0
872#define EPIOWR 0x00000100U
873#define ADDRESS_MASK 0x000000ffU
874#define ADDRESS_SHIFT 0
875#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
876
877#define XGMAC_PORT_INT_CAUSE 0x10dc
878#endif /* __T4_REGS_H */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
new file mode 100644
index 000000000000..3393d05a388a
--- /dev/null
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -0,0 +1,1580 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef _T4FW_INTERFACE_H_
36#define _T4FW_INTERFACE_H_
37
38#define FW_T4VF_SGE_BASE_ADDR 0x0000
39#define FW_T4VF_MPS_BASE_ADDR 0x0100
40#define FW_T4VF_PL_BASE_ADDR 0x0200
41#define FW_T4VF_MBDATA_BASE_ADDR 0x0240
42#define FW_T4VF_CIM_BASE_ADDR 0x0300
43
44enum fw_wr_opcodes {
45 FW_FILTER_WR = 0x02,
46 FW_ULPTX_WR = 0x04,
47 FW_TP_WR = 0x05,
48 FW_ETH_TX_PKT_WR = 0x08,
49 FW_FLOWC_WR = 0x0a,
50 FW_OFLD_TX_DATA_WR = 0x0b,
51 FW_CMD_WR = 0x10,
52 FW_ETH_TX_PKT_VM_WR = 0x11,
53 FW_RI_RES_WR = 0x0c,
54 FW_RI_INIT_WR = 0x0d,
55 FW_RI_RDMA_WRITE_WR = 0x14,
56 FW_RI_SEND_WR = 0x15,
57 FW_RI_RDMA_READ_WR = 0x16,
58 FW_RI_RECV_WR = 0x17,
59 FW_RI_BIND_MW_WR = 0x18,
60 FW_RI_FR_NSMR_WR = 0x19,
61 FW_RI_INV_LSTAG_WR = 0x1a,
62 FW_LASTC2E_WR = 0x40
63};
64
65struct fw_wr_hdr {
66 __be32 hi;
67 __be32 lo;
68};
69
70#define FW_WR_OP(x) ((x) << 24)
71#define FW_WR_ATOMIC(x) ((x) << 23)
72#define FW_WR_FLUSH(x) ((x) << 22)
73#define FW_WR_COMPL(x) ((x) << 21)
74#define FW_WR_IMMDLEN(x) ((x) << 0)
75
76#define FW_WR_EQUIQ (1U << 31)
77#define FW_WR_EQUEQ (1U << 30)
78#define FW_WR_FLOWID(x) ((x) << 8)
79#define FW_WR_LEN16(x) ((x) << 0)
80
81struct fw_ulptx_wr {
82 __be32 op_to_compl;
83 __be32 flowid_len16;
84 u64 cookie;
85};
86
87struct fw_tp_wr {
88 __be32 op_to_immdlen;
89 __be32 flowid_len16;
90 u64 cookie;
91};
92
93struct fw_eth_tx_pkt_wr {
94 __be32 op_immdlen;
95 __be32 equiq_to_len16;
96 __be64 r3;
97};
98
99enum fw_flowc_mnem {
100 FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
101 FW_FLOWC_MNEM_CH,
102 FW_FLOWC_MNEM_PORT,
103 FW_FLOWC_MNEM_IQID,
104 FW_FLOWC_MNEM_SNDNXT,
105 FW_FLOWC_MNEM_RCVNXT,
106 FW_FLOWC_MNEM_SNDBUF,
107 FW_FLOWC_MNEM_MSS,
108};
109
110struct fw_flowc_mnemval {
111 u8 mnemonic;
112 u8 r4[3];
113 __be32 val;
114};
115
116struct fw_flowc_wr {
117 __be32 op_to_nparams;
118#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0)
119 __be32 flowid_len16;
120 struct fw_flowc_mnemval mnemval[0];
121};
122
123struct fw_ofld_tx_data_wr {
124 __be32 op_to_immdlen;
125 __be32 flowid_len16;
126 __be32 plen;
127 __be32 tunnel_to_proxy;
128#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19)
129#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18)
130#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17)
131#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16)
132#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15)
133#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14)
134#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10)
135#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6)
136};
137
138struct fw_cmd_wr {
139 __be32 op_dma;
140#define FW_CMD_WR_DMA (1U << 17)
141 __be32 len16_pkd;
142 __be64 cookie_daddr;
143};
144
145struct fw_eth_tx_pkt_vm_wr {
146 __be32 op_immdlen;
147 __be32 equiq_to_len16;
148 __be32 r3[2];
149 u8 ethmacdst[6];
150 u8 ethmacsrc[6];
151 __be16 ethtype;
152 __be16 vlantci;
153};
154
155#define FW_CMD_MAX_TIMEOUT 3000
156
157enum fw_cmd_opcodes {
158 FW_LDST_CMD = 0x01,
159 FW_RESET_CMD = 0x03,
160 FW_HELLO_CMD = 0x04,
161 FW_BYE_CMD = 0x05,
162 FW_INITIALIZE_CMD = 0x06,
163 FW_CAPS_CONFIG_CMD = 0x07,
164 FW_PARAMS_CMD = 0x08,
165 FW_PFVF_CMD = 0x09,
166 FW_IQ_CMD = 0x10,
167 FW_EQ_MNGT_CMD = 0x11,
168 FW_EQ_ETH_CMD = 0x12,
169 FW_EQ_CTRL_CMD = 0x13,
170 FW_EQ_OFLD_CMD = 0x21,
171 FW_VI_CMD = 0x14,
172 FW_VI_MAC_CMD = 0x15,
173 FW_VI_RXMODE_CMD = 0x16,
174 FW_VI_ENABLE_CMD = 0x17,
175 FW_ACL_MAC_CMD = 0x18,
176 FW_ACL_VLAN_CMD = 0x19,
177 FW_VI_STATS_CMD = 0x1a,
178 FW_PORT_CMD = 0x1b,
179 FW_PORT_STATS_CMD = 0x1c,
180 FW_PORT_LB_STATS_CMD = 0x1d,
181 FW_PORT_TRACE_CMD = 0x1e,
182 FW_PORT_TRACE_MMAP_CMD = 0x1f,
183 FW_RSS_IND_TBL_CMD = 0x20,
184 FW_RSS_GLB_CONFIG_CMD = 0x22,
185 FW_RSS_VI_CONFIG_CMD = 0x23,
186 FW_LASTC2E_CMD = 0x40,
187 FW_ERROR_CMD = 0x80,
188 FW_DEBUG_CMD = 0x81,
189};
190
191enum fw_cmd_cap {
192 FW_CMD_CAP_PF = 0x01,
193 FW_CMD_CAP_DMAQ = 0x02,
194 FW_CMD_CAP_PORT = 0x04,
195 FW_CMD_CAP_PORTPROMISC = 0x08,
196 FW_CMD_CAP_PORTSTATS = 0x10,
197 FW_CMD_CAP_VF = 0x80,
198};
199
200/*
201 * Generic command header flit0
202 */
203struct fw_cmd_hdr {
204 __be32 hi;
205 __be32 lo;
206};
207
208#define FW_CMD_OP(x) ((x) << 24)
209#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
210#define FW_CMD_REQUEST (1U << 23)
211#define FW_CMD_READ (1U << 22)
212#define FW_CMD_WRITE (1U << 21)
213#define FW_CMD_EXEC (1U << 20)
214#define FW_CMD_RAMASK(x) ((x) << 20)
215#define FW_CMD_RETVAL(x) ((x) << 8)
216#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
217#define FW_CMD_LEN16(x) ((x) << 0)
218
219enum fw_ldst_addrspc {
220 FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
221 FW_LDST_ADDRSPC_SGE_EGRC = 0x0008,
222 FW_LDST_ADDRSPC_SGE_INGC = 0x0009,
223 FW_LDST_ADDRSPC_SGE_FLMC = 0x000a,
224 FW_LDST_ADDRSPC_SGE_CONMC = 0x000b,
225 FW_LDST_ADDRSPC_TP_PIO = 0x0010,
226 FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011,
227 FW_LDST_ADDRSPC_TP_MIB = 0x0012,
228 FW_LDST_ADDRSPC_MDIO = 0x0018,
229 FW_LDST_ADDRSPC_MPS = 0x0020,
230 FW_LDST_ADDRSPC_FUNC = 0x0028
231};
232
233enum fw_ldst_mps_fid {
234 FW_LDST_MPS_ATRB,
235 FW_LDST_MPS_RPLC
236};
237
238enum fw_ldst_func_access_ctl {
239 FW_LDST_FUNC_ACC_CTL_VIID,
240 FW_LDST_FUNC_ACC_CTL_FID
241};
242
243enum fw_ldst_func_mod_index {
244 FW_LDST_FUNC_MPS
245};
246
247struct fw_ldst_cmd {
248 __be32 op_to_addrspace;
249#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0)
250 __be32 cycles_to_len16;
251 union fw_ldst {
252 struct fw_ldst_addrval {
253 __be32 addr;
254 __be32 val;
255 } addrval;
256 struct fw_ldst_idctxt {
257 __be32 physid;
258 __be32 msg_pkd;
259 __be32 ctxt_data7;
260 __be32 ctxt_data6;
261 __be32 ctxt_data5;
262 __be32 ctxt_data4;
263 __be32 ctxt_data3;
264 __be32 ctxt_data2;
265 __be32 ctxt_data1;
266 __be32 ctxt_data0;
267 } idctxt;
268 struct fw_ldst_mdio {
269 __be16 paddr_mmd;
270 __be16 raddr;
271 __be16 vctl;
272 __be16 rval;
273 } mdio;
274 struct fw_ldst_mps {
275 __be16 fid_ctl;
276 __be16 rplcpf_pkd;
277 __be32 rplc127_96;
278 __be32 rplc95_64;
279 __be32 rplc63_32;
280 __be32 rplc31_0;
281 __be32 atrb;
282 __be16 vlan[16];
283 } mps;
284 struct fw_ldst_func {
285 u8 access_ctl;
286 u8 mod_index;
287 __be16 ctl_id;
288 __be32 offset;
289 __be64 data0;
290 __be64 data1;
291 } func;
292 } u;
293};
294
295#define FW_LDST_CMD_MSG(x) ((x) << 31)
296#define FW_LDST_CMD_PADDR(x) ((x) << 8)
297#define FW_LDST_CMD_MMD(x) ((x) << 0)
298#define FW_LDST_CMD_FID(x) ((x) << 15)
299#define FW_LDST_CMD_CTL(x) ((x) << 0)
300#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
301
302struct fw_reset_cmd {
303 __be32 op_to_write;
304 __be32 retval_len16;
305 __be32 val;
306 __be32 r3;
307};
308
309struct fw_hello_cmd {
310 __be32 op_to_write;
311 __be32 retval_len16;
312 __be32 err_to_mbasyncnot;
313#define FW_HELLO_CMD_ERR (1U << 31)
314#define FW_HELLO_CMD_INIT (1U << 30)
315#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
316#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
317#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24)
318#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
319 __be32 fwrev;
320};
321
322struct fw_bye_cmd {
323 __be32 op_to_write;
324 __be32 retval_len16;
325 __be64 r3;
326};
327
328struct fw_initialize_cmd {
329 __be32 op_to_write;
330 __be32 retval_len16;
331 __be64 r3;
332};
333
334enum fw_caps_config_hm {
335 FW_CAPS_CONFIG_HM_PCIE = 0x00000001,
336 FW_CAPS_CONFIG_HM_PL = 0x00000002,
337 FW_CAPS_CONFIG_HM_SGE = 0x00000004,
338 FW_CAPS_CONFIG_HM_CIM = 0x00000008,
339 FW_CAPS_CONFIG_HM_ULPTX = 0x00000010,
340 FW_CAPS_CONFIG_HM_TP = 0x00000020,
341 FW_CAPS_CONFIG_HM_ULPRX = 0x00000040,
342 FW_CAPS_CONFIG_HM_PMRX = 0x00000080,
343 FW_CAPS_CONFIG_HM_PMTX = 0x00000100,
344 FW_CAPS_CONFIG_HM_MC = 0x00000200,
345 FW_CAPS_CONFIG_HM_LE = 0x00000400,
346 FW_CAPS_CONFIG_HM_MPS = 0x00000800,
347 FW_CAPS_CONFIG_HM_XGMAC = 0x00001000,
348 FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000,
349 FW_CAPS_CONFIG_HM_T4DBG = 0x00004000,
350 FW_CAPS_CONFIG_HM_MI = 0x00008000,
351 FW_CAPS_CONFIG_HM_I2CM = 0x00010000,
352 FW_CAPS_CONFIG_HM_NCSI = 0x00020000,
353 FW_CAPS_CONFIG_HM_SMB = 0x00040000,
354 FW_CAPS_CONFIG_HM_MA = 0x00080000,
355 FW_CAPS_CONFIG_HM_EDRAM = 0x00100000,
356 FW_CAPS_CONFIG_HM_PMU = 0x00200000,
357 FW_CAPS_CONFIG_HM_UART = 0x00400000,
358 FW_CAPS_CONFIG_HM_SF = 0x00800000,
359};
360
361enum fw_caps_config_nbm {
362 FW_CAPS_CONFIG_NBM_IPMI = 0x00000001,
363 FW_CAPS_CONFIG_NBM_NCSI = 0x00000002,
364};
365
366enum fw_caps_config_link {
367 FW_CAPS_CONFIG_LINK_PPP = 0x00000001,
368 FW_CAPS_CONFIG_LINK_QFC = 0x00000002,
369 FW_CAPS_CONFIG_LINK_DCBX = 0x00000004,
370};
371
372enum fw_caps_config_switch {
373 FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001,
374 FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002,
375};
376
377enum fw_caps_config_nic {
378 FW_CAPS_CONFIG_NIC = 0x00000001,
379 FW_CAPS_CONFIG_NIC_VM = 0x00000002,
380};
381
382enum fw_caps_config_ofld {
383 FW_CAPS_CONFIG_OFLD = 0x00000001,
384};
385
386enum fw_caps_config_rdma {
387 FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001,
388 FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002,
389};
390
391enum fw_caps_config_iscsi {
392 FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001,
393 FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002,
394 FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004,
395 FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
396};
397
398enum fw_caps_config_fcoe {
399 FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001,
400 FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
401};
402
403struct fw_caps_config_cmd {
404 __be32 op_to_write;
405 __be32 retval_len16;
406 __be32 r2;
407 __be32 hwmbitmap;
408 __be16 nbmcaps;
409 __be16 linkcaps;
410 __be16 switchcaps;
411 __be16 r3;
412 __be16 niccaps;
413 __be16 ofldcaps;
414 __be16 rdmacaps;
415 __be16 r4;
416 __be16 iscsicaps;
417 __be16 fcoecaps;
418 __be32 r5;
419 __be64 r6;
420};
421
422/*
423 * params command mnemonics
424 */
425enum fw_params_mnem {
426 FW_PARAMS_MNEM_DEV = 1, /* device params */
427 FW_PARAMS_MNEM_PFVF = 2, /* function params */
428 FW_PARAMS_MNEM_REG = 3, /* limited register access */
429 FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
430 FW_PARAMS_MNEM_LAST
431};
432
433/*
434 * device parameters
435 */
436enum fw_params_param_dev {
437 FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
438 FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
439 FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
440 * allocated by the device's
441 * Lookup Engine
442 */
443 FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03,
444 FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04,
445 FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05,
446 FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06,
447 FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07,
448 FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
449 FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
450 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A
451};
452
453/*
454 * physical and virtual function parameters
455 */
456enum fw_params_param_pfvf {
457 FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00,
458 FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01,
459 FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02,
460 FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
461 FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
462 FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
463 FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
464 FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07,
465 FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08,
466 FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09,
467 FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
468 FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
469 FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
470 FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
471 FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
472 FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
473 FW_PARAMS_PARAM_PFVF_RQ_END = 0x10,
474 FW_PARAMS_PARAM_PFVF_PBL_START = 0x11,
475 FW_PARAMS_PARAM_PFVF_PBL_END = 0x12,
476 FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
477 FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
478 FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
479};
480
481/*
482 * dma queue parameters
483 */
484enum fw_params_param_dmaq {
485 FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00,
486 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
487 FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
488 FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
489 FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
490};
491
492#define FW_PARAMS_MNEM(x) ((x) << 24)
493#define FW_PARAMS_PARAM_X(x) ((x) << 16)
494#define FW_PARAMS_PARAM_Y(x) ((x) << 8)
495#define FW_PARAMS_PARAM_Z(x) ((x) << 0)
496#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
497#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
498
499struct fw_params_cmd {
500 __be32 op_to_vfn;
501 __be32 retval_len16;
502 struct fw_params_param {
503 __be32 mnem;
504 __be32 val;
505 } param[7];
506};
507
508#define FW_PARAMS_CMD_PFN(x) ((x) << 8)
509#define FW_PARAMS_CMD_VFN(x) ((x) << 0)
510
511struct fw_pfvf_cmd {
512 __be32 op_to_vfn;
513 __be32 retval_len16;
514 __be32 niqflint_niq;
515 __be32 cmask_to_neq;
516 __be32 tc_to_nexactf;
517 __be32 r_caps_to_nethctrl;
518 __be16 nricq;
519 __be16 nriqp;
520 __be32 r4;
521};
522
523#define FW_PFVF_CMD_PFN(x) ((x) << 8)
524#define FW_PFVF_CMD_VFN(x) ((x) << 0)
525
526#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20)
527#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff)
528
529#define FW_PFVF_CMD_NIQ(x) ((x) << 0)
530#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
531
532#define FW_PFVF_CMD_CMASK(x) ((x) << 24)
533#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf)
534
535#define FW_PFVF_CMD_PMASK(x) ((x) << 20)
536#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf)
537
538#define FW_PFVF_CMD_NEQ(x) ((x) << 0)
539#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
540
541#define FW_PFVF_CMD_TC(x) ((x) << 24)
542#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff)
543
544#define FW_PFVF_CMD_NVI(x) ((x) << 16)
545#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff)
546
547#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0)
548#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff)
549
550#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24)
551#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff)
552
553#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16)
554#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff)
555
556#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0)
557#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff)
558
559enum fw_iq_type {
560 FW_IQ_TYPE_FL_INT_CAP,
561 FW_IQ_TYPE_NO_FL_INT_CAP
562};
563
564struct fw_iq_cmd {
565 __be32 op_to_vfn;
566 __be32 alloc_to_len16;
567 __be16 physiqid;
568 __be16 iqid;
569 __be16 fl0id;
570 __be16 fl1id;
571 __be32 type_to_iqandstindex;
572 __be16 iqdroprss_to_iqesize;
573 __be16 iqsize;
574 __be64 iqaddr;
575 __be32 iqns_to_fl0congen;
576 __be16 fl0dcaen_to_fl0cidxfthresh;
577 __be16 fl0size;
578 __be64 fl0addr;
579 __be32 fl1cngchmap_to_fl1congen;
580 __be16 fl1dcaen_to_fl1cidxfthresh;
581 __be16 fl1size;
582 __be64 fl1addr;
583};
584
585#define FW_IQ_CMD_PFN(x) ((x) << 8)
586#define FW_IQ_CMD_VFN(x) ((x) << 0)
587
588#define FW_IQ_CMD_ALLOC (1U << 31)
589#define FW_IQ_CMD_FREE (1U << 30)
590#define FW_IQ_CMD_MODIFY (1U << 29)
591#define FW_IQ_CMD_IQSTART(x) ((x) << 28)
592#define FW_IQ_CMD_IQSTOP(x) ((x) << 27)
593
594#define FW_IQ_CMD_TYPE(x) ((x) << 29)
595#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28)
596#define FW_IQ_CMD_VIID(x) ((x) << 16)
597#define FW_IQ_CMD_IQANDST(x) ((x) << 15)
598#define FW_IQ_CMD_IQANUS(x) ((x) << 14)
599#define FW_IQ_CMD_IQANUD(x) ((x) << 12)
600#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0)
601
602#define FW_IQ_CMD_IQDROPRSS (1U << 15)
603#define FW_IQ_CMD_IQGTSMODE (1U << 14)
604#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12)
605#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11)
606#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6)
607#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4)
608#define FW_IQ_CMD_IQO (1U << 3)
609#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2)
610#define FW_IQ_CMD_IQESIZE(x) ((x) << 0)
611
612#define FW_IQ_CMD_IQNS(x) ((x) << 31)
613#define FW_IQ_CMD_IQRO(x) ((x) << 30)
614#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28)
615#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27)
616#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26)
617#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20)
618#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15)
619#define FW_IQ_CMD_FL0DBP(x) ((x) << 14)
620#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13)
621#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12)
622#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11)
623#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10)
624#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9)
625#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8)
626#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7)
627#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
628#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
629#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
630#define FW_IQ_CMD_FL0PADEN (1U << 2)
631#define FW_IQ_CMD_FL0PACKEN (1U << 1)
632#define FW_IQ_CMD_FL0CONGEN (1U << 0)
633
634#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
635#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10)
636#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7)
637#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4)
638#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3)
639#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0)
640
641#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20)
642#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15)
643#define FW_IQ_CMD_FL1DBP(x) ((x) << 14)
644#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13)
645#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12)
646#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11)
647#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10)
648#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9)
649#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8)
650#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7)
651#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6)
652#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4)
653#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3)
654#define FW_IQ_CMD_FL1PADEN (1U << 2)
655#define FW_IQ_CMD_FL1PACKEN (1U << 1)
656#define FW_IQ_CMD_FL1CONGEN (1U << 0)
657
658#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15)
659#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10)
660#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7)
661#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4)
662#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3)
663#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0)
664
665struct fw_eq_eth_cmd {
666 __be32 op_to_vfn;
667 __be32 alloc_to_len16;
668 __be32 eqid_pkd;
669 __be32 physeqid_pkd;
670 __be32 fetchszm_to_iqid;
671 __be32 dcaen_to_eqsize;
672 __be64 eqaddr;
673 __be32 viid_pkd;
674 __be32 r8_lo;
675 __be64 r9;
676};
677
678#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8)
679#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0)
680#define FW_EQ_ETH_CMD_ALLOC (1U << 31)
681#define FW_EQ_ETH_CMD_FREE (1U << 30)
682#define FW_EQ_ETH_CMD_MODIFY (1U << 29)
683#define FW_EQ_ETH_CMD_EQSTART (1U << 28)
684#define FW_EQ_ETH_CMD_EQSTOP (1U << 27)
685
686#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
687#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
688#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
689
690#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
691#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
692#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24)
693#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23)
694#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22)
695#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20)
696#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19)
697#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18)
698#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16)
699#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0)
700
701#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31)
702#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26)
703#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23)
704#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20)
705#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19)
706#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
707#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
708
709#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
710
711struct fw_eq_ctrl_cmd {
712 __be32 op_to_vfn;
713 __be32 alloc_to_len16;
714 __be32 cmpliqid_eqid;
715 __be32 physeqid_pkd;
716 __be32 fetchszm_to_iqid;
717 __be32 dcaen_to_eqsize;
718 __be64 eqaddr;
719};
720
721#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8)
722#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0)
723
724#define FW_EQ_CTRL_CMD_ALLOC (1U << 31)
725#define FW_EQ_CTRL_CMD_FREE (1U << 30)
726#define FW_EQ_CTRL_CMD_MODIFY (1U << 29)
727#define FW_EQ_CTRL_CMD_EQSTART (1U << 28)
728#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27)
729
730#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20)
731#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0)
732#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
733#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
734
735#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26)
736#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25)
737#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24)
738#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23)
739#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22)
740#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20)
741#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19)
742#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18)
743#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16)
744#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0)
745
746#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31)
747#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26)
748#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23)
749#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20)
750#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19)
751#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16)
752#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0)
753
754struct fw_eq_ofld_cmd {
755 __be32 op_to_vfn;
756 __be32 alloc_to_len16;
757 __be32 eqid_pkd;
758 __be32 physeqid_pkd;
759 __be32 fetchszm_to_iqid;
760 __be32 dcaen_to_eqsize;
761 __be64 eqaddr;
762};
763
764#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8)
765#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0)
766
767#define FW_EQ_OFLD_CMD_ALLOC (1U << 31)
768#define FW_EQ_OFLD_CMD_FREE (1U << 30)
769#define FW_EQ_OFLD_CMD_MODIFY (1U << 29)
770#define FW_EQ_OFLD_CMD_EQSTART (1U << 28)
771#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27)
772
773#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0)
774#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
775#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
776
777#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26)
778#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25)
779#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24)
780#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23)
781#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22)
782#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20)
783#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19)
784#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18)
785#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16)
786#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0)
787
788#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31)
789#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26)
790#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23)
791#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20)
792#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19)
793#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16)
794#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0)
795
796/*
797 * Macros for VIID parsing:
798 * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
799 */
800#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7)
801#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1)
802#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F)
803
804struct fw_vi_cmd {
805 __be32 op_to_vfn;
806 __be32 alloc_to_len16;
807 __be16 viid_pkd;
808 u8 mac[6];
809 u8 portid_pkd;
810 u8 nmac;
811 u8 nmac0[6];
812 __be16 rsssize_pkd;
813 u8 nmac1[6];
814 __be16 r7;
815 u8 nmac2[6];
816 __be16 r8;
817 u8 nmac3[6];
818 __be64 r9;
819 __be64 r10;
820};
821
822#define FW_VI_CMD_PFN(x) ((x) << 8)
823#define FW_VI_CMD_VFN(x) ((x) << 0)
824#define FW_VI_CMD_ALLOC (1U << 31)
825#define FW_VI_CMD_FREE (1U << 30)
826#define FW_VI_CMD_VIID(x) ((x) << 0)
827#define FW_VI_CMD_PORTID(x) ((x) << 4)
828#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
829
830/* Special VI_MAC command index ids */
831#define FW_VI_MAC_ADD_MAC 0x3FF
832#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
833#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
834
835enum fw_vi_mac_smac {
836 FW_VI_MAC_MPS_TCAM_ENTRY,
837 FW_VI_MAC_MPS_TCAM_ONLY,
838 FW_VI_MAC_SMT_ONLY,
839 FW_VI_MAC_SMT_AND_MPSTCAM
840};
841
842enum fw_vi_mac_result {
843 FW_VI_MAC_R_SUCCESS,
844 FW_VI_MAC_R_F_NONEXISTENT_NOMEM,
845 FW_VI_MAC_R_SMAC_FAIL,
846 FW_VI_MAC_R_F_ACL_CHECK
847};
848
849struct fw_vi_mac_cmd {
850 __be32 op_to_viid;
851 __be32 freemacs_to_len16;
852 union fw_vi_mac {
853 struct fw_vi_mac_exact {
854 __be16 valid_to_idx;
855 u8 macaddr[6];
856 } exact[7];
857 struct fw_vi_mac_hash {
858 __be64 hashvec;
859 } hash;
860 } u;
861};
862
863#define FW_VI_MAC_CMD_VIID(x) ((x) << 0)
864#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31)
865#define FW_VI_MAC_CMD_HASHVECEN (1U << 23)
866#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22)
867#define FW_VI_MAC_CMD_VALID (1U << 15)
868#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12)
869#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10)
870#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3)
871#define FW_VI_MAC_CMD_IDX(x) ((x) << 0)
872#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff)
873
874#define FW_RXMODE_MTU_NO_CHG 65535
875
876struct fw_vi_rxmode_cmd {
877 __be32 op_to_viid;
878 __be32 retval_len16;
879 __be32 mtu_to_broadcasten;
880 __be32 r4_lo;
881};
882
883#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
884#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
885#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
886#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
887#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3
888#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
889#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
890#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
891
892struct fw_vi_enable_cmd {
893 __be32 op_to_viid;
894 __be32 ien_to_len16;
895 __be16 blinkdur;
896 __be16 r3;
897 __be32 r4;
898};
899
900#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0)
901#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31)
902#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30)
903#define FW_VI_ENABLE_CMD_LED (1U << 29)
904
905/* VI VF stats offset definitions */
906#define VI_VF_NUM_STATS 16
907enum fw_vi_stats_vf_index {
908 FW_VI_VF_STAT_TX_BCAST_BYTES_IX,
909 FW_VI_VF_STAT_TX_BCAST_FRAMES_IX,
910 FW_VI_VF_STAT_TX_MCAST_BYTES_IX,
911 FW_VI_VF_STAT_TX_MCAST_FRAMES_IX,
912 FW_VI_VF_STAT_TX_UCAST_BYTES_IX,
913 FW_VI_VF_STAT_TX_UCAST_FRAMES_IX,
914 FW_VI_VF_STAT_TX_DROP_FRAMES_IX,
915 FW_VI_VF_STAT_TX_OFLD_BYTES_IX,
916 FW_VI_VF_STAT_TX_OFLD_FRAMES_IX,
917 FW_VI_VF_STAT_RX_BCAST_BYTES_IX,
918 FW_VI_VF_STAT_RX_BCAST_FRAMES_IX,
919 FW_VI_VF_STAT_RX_MCAST_BYTES_IX,
920 FW_VI_VF_STAT_RX_MCAST_FRAMES_IX,
921 FW_VI_VF_STAT_RX_UCAST_BYTES_IX,
922 FW_VI_VF_STAT_RX_UCAST_FRAMES_IX,
923 FW_VI_VF_STAT_RX_ERR_FRAMES_IX
924};
925
926/* VI PF stats offset definitions */
927#define VI_PF_NUM_STATS 17
928enum fw_vi_stats_pf_index {
929 FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
930 FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
931 FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
932 FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
933 FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
934 FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
935 FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
936 FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
937 FW_VI_PF_STAT_RX_BYTES_IX,
938 FW_VI_PF_STAT_RX_FRAMES_IX,
939 FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
940 FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
941 FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
942 FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
943 FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
944 FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
945 FW_VI_PF_STAT_RX_ERR_FRAMES_IX
946};
947
948struct fw_vi_stats_cmd {
949 __be32 op_to_viid;
950 __be32 retval_len16;
951 union fw_vi_stats {
952 struct fw_vi_stats_ctl {
953 __be16 nstats_ix;
954 __be16 r6;
955 __be32 r7;
956 __be64 stat0;
957 __be64 stat1;
958 __be64 stat2;
959 __be64 stat3;
960 __be64 stat4;
961 __be64 stat5;
962 } ctl;
963 struct fw_vi_stats_pf {
964 __be64 tx_bcast_bytes;
965 __be64 tx_bcast_frames;
966 __be64 tx_mcast_bytes;
967 __be64 tx_mcast_frames;
968 __be64 tx_ucast_bytes;
969 __be64 tx_ucast_frames;
970 __be64 tx_offload_bytes;
971 __be64 tx_offload_frames;
972 __be64 rx_pf_bytes;
973 __be64 rx_pf_frames;
974 __be64 rx_bcast_bytes;
975 __be64 rx_bcast_frames;
976 __be64 rx_mcast_bytes;
977 __be64 rx_mcast_frames;
978 __be64 rx_ucast_bytes;
979 __be64 rx_ucast_frames;
980 __be64 rx_err_frames;
981 } pf;
982 struct fw_vi_stats_vf {
983 __be64 tx_bcast_bytes;
984 __be64 tx_bcast_frames;
985 __be64 tx_mcast_bytes;
986 __be64 tx_mcast_frames;
987 __be64 tx_ucast_bytes;
988 __be64 tx_ucast_frames;
989 __be64 tx_drop_frames;
990 __be64 tx_offload_bytes;
991 __be64 tx_offload_frames;
992 __be64 rx_bcast_bytes;
993 __be64 rx_bcast_frames;
994 __be64 rx_mcast_bytes;
995 __be64 rx_mcast_frames;
996 __be64 rx_ucast_bytes;
997 __be64 rx_ucast_frames;
998 __be64 rx_err_frames;
999 } vf;
1000 } u;
1001};
1002
1003#define FW_VI_STATS_CMD_VIID(x) ((x) << 0)
1004#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12)
1005#define FW_VI_STATS_CMD_IX(x) ((x) << 0)
1006
1007struct fw_acl_mac_cmd {
1008 __be32 op_to_vfn;
1009 __be32 en_to_len16;
1010 u8 nmac;
1011 u8 r3[7];
1012 __be16 r4;
1013 u8 macaddr0[6];
1014 __be16 r5;
1015 u8 macaddr1[6];
1016 __be16 r6;
1017 u8 macaddr2[6];
1018 __be16 r7;
1019 u8 macaddr3[6];
1020};
1021
1022#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8)
1023#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0)
1024#define FW_ACL_MAC_CMD_EN(x) ((x) << 31)
1025
1026struct fw_acl_vlan_cmd {
1027 __be32 op_to_vfn;
1028 __be32 en_to_len16;
1029 u8 nvlan;
1030 u8 dropnovlan_fm;
1031 u8 r3_lo[6];
1032 __be16 vlanid[16];
1033};
1034
1035#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8)
1036#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0)
1037#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31)
1038#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7)
1039#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6)
1040
1041enum fw_port_cap {
1042 FW_PORT_CAP_SPEED_100M = 0x0001,
1043 FW_PORT_CAP_SPEED_1G = 0x0002,
1044 FW_PORT_CAP_SPEED_2_5G = 0x0004,
1045 FW_PORT_CAP_SPEED_10G = 0x0008,
1046 FW_PORT_CAP_SPEED_40G = 0x0010,
1047 FW_PORT_CAP_SPEED_100G = 0x0020,
1048 FW_PORT_CAP_FC_RX = 0x0040,
1049 FW_PORT_CAP_FC_TX = 0x0080,
1050 FW_PORT_CAP_ANEG = 0x0100,
1051 FW_PORT_CAP_MDI_0 = 0x0200,
1052 FW_PORT_CAP_MDI_1 = 0x0400,
1053 FW_PORT_CAP_BEAN = 0x0800,
1054 FW_PORT_CAP_PMA_LPBK = 0x1000,
1055 FW_PORT_CAP_PCS_LPBK = 0x2000,
1056 FW_PORT_CAP_PHYXS_LPBK = 0x4000,
1057 FW_PORT_CAP_FAR_END_LPBK = 0x8000,
1058};
1059
1060enum fw_port_mdi {
1061 FW_PORT_MDI_UNCHANGED,
1062 FW_PORT_MDI_AUTO,
1063 FW_PORT_MDI_F_STRAIGHT,
1064 FW_PORT_MDI_F_CROSSOVER
1065};
1066
1067#define FW_PORT_MDI(x) ((x) << 9)
1068
1069enum fw_port_action {
1070 FW_PORT_ACTION_L1_CFG = 0x0001,
1071 FW_PORT_ACTION_L2_CFG = 0x0002,
1072 FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
1073 FW_PORT_ACTION_L2_PPP_CFG = 0x0004,
1074 FW_PORT_ACTION_L2_DCB_CFG = 0x0005,
1075 FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
1076 FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011,
1077 FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012,
1078 FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020,
1079 FW_PORT_ACTION_L1_LPBK = 0x0021,
1080 FW_PORT_ACTION_L1_PMA_LPBK = 0x0022,
1081 FW_PORT_ACTION_L1_PCS_LPBK = 0x0023,
1082 FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024,
1083 FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025,
1084 FW_PORT_ACTION_PHY_RESET = 0x0040,
1085 FW_PORT_ACTION_PMA_RESET = 0x0041,
1086 FW_PORT_ACTION_PCS_RESET = 0x0042,
1087 FW_PORT_ACTION_PHYXS_RESET = 0x0043,
1088 FW_PORT_ACTION_DTEXS_REEST = 0x0044,
1089 FW_PORT_ACTION_AN_RESET = 0x0045
1090};
1091
1092enum fw_port_l2cfg_ctlbf {
1093 FW_PORT_L2_CTLBF_OVLAN0 = 0x01,
1094 FW_PORT_L2_CTLBF_OVLAN1 = 0x02,
1095 FW_PORT_L2_CTLBF_OVLAN2 = 0x04,
1096 FW_PORT_L2_CTLBF_OVLAN3 = 0x08,
1097 FW_PORT_L2_CTLBF_IVLAN = 0x10,
1098 FW_PORT_L2_CTLBF_TXIPG = 0x20
1099};
1100
1101enum fw_port_dcb_cfg {
1102 FW_PORT_DCB_CFG_PG = 0x01,
1103 FW_PORT_DCB_CFG_PFC = 0x02,
1104 FW_PORT_DCB_CFG_APPL = 0x04
1105};
1106
1107enum fw_port_dcb_cfg_rc {
1108 FW_PORT_DCB_CFG_SUCCESS = 0x0,
1109 FW_PORT_DCB_CFG_ERROR = 0x1
1110};
1111
1112struct fw_port_cmd {
1113 __be32 op_to_portid;
1114 __be32 action_to_len16;
1115 union fw_port {
1116 struct fw_port_l1cfg {
1117 __be32 rcap;
1118 __be32 r;
1119 } l1cfg;
1120 struct fw_port_l2cfg {
1121 __be16 ctlbf_to_ivlan0;
1122 __be16 ivlantype;
1123 __be32 txipg_pkd;
1124 __be16 ovlan0mask;
1125 __be16 ovlan0type;
1126 __be16 ovlan1mask;
1127 __be16 ovlan1type;
1128 __be16 ovlan2mask;
1129 __be16 ovlan2type;
1130 __be16 ovlan3mask;
1131 __be16 ovlan3type;
1132 } l2cfg;
1133 struct fw_port_info {
1134 __be32 lstatus_to_modtype;
1135 __be16 pcap;
1136 __be16 acap;
1137 } info;
1138 struct fw_port_ppp {
1139 __be32 pppen_to_ncsich;
1140 __be32 r11;
1141 } ppp;
1142 struct fw_port_dcb {
1143 __be16 cfg;
1144 u8 up_map;
1145 u8 sf_cfgrc;
1146 __be16 prot_ix;
1147 u8 pe7_to_pe0;
1148 u8 numTCPFCs;
1149 __be32 pgid0_to_pgid7;
1150 __be32 numTCs_oui;
1151 u8 pgpc[8];
1152 } dcb;
1153 } u;
1154};
1155
1156#define FW_PORT_CMD_READ (1U << 22)
1157
1158#define FW_PORT_CMD_PORTID(x) ((x) << 0)
1159#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
1160
1161#define FW_PORT_CMD_ACTION(x) ((x) << 16)
1162
1163#define FW_PORT_CMD_CTLBF(x) ((x) << 10)
1164#define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
1165#define FW_PORT_CMD_OVLAN2(x) ((x) << 6)
1166#define FW_PORT_CMD_OVLAN1(x) ((x) << 5)
1167#define FW_PORT_CMD_OVLAN0(x) ((x) << 4)
1168#define FW_PORT_CMD_IVLAN0(x) ((x) << 3)
1169
1170#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
1171
1172#define FW_PORT_CMD_LSTATUS (1U << 31)
1173#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
1174#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
1175#define FW_PORT_CMD_TXPAUSE (1U << 23)
1176#define FW_PORT_CMD_RXPAUSE (1U << 22)
1177#define FW_PORT_CMD_MDIOCAP (1U << 21)
1178#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f)
1179#define FW_PORT_CMD_LPTXPAUSE (1U << 15)
1180#define FW_PORT_CMD_LPRXPAUSE (1U << 14)
1181#define FW_PORT_CMD_PTYPE_MASK 0x1f
1182#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK)
1183#define FW_PORT_CMD_MODTYPE_MASK 0x1f
1184#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK)
1185
1186#define FW_PORT_CMD_PPPEN(x) ((x) << 31)
1187#define FW_PORT_CMD_TPSRC(x) ((x) << 28)
1188#define FW_PORT_CMD_NCSISRC(x) ((x) << 24)
1189
1190#define FW_PORT_CMD_CH0(x) ((x) << 20)
1191#define FW_PORT_CMD_CH1(x) ((x) << 16)
1192#define FW_PORT_CMD_CH2(x) ((x) << 12)
1193#define FW_PORT_CMD_CH3(x) ((x) << 8)
1194#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
1195
1196enum fw_port_type {
1197 FW_PORT_TYPE_FIBER,
1198 FW_PORT_TYPE_KX4,
1199 FW_PORT_TYPE_BT_SGMII,
1200 FW_PORT_TYPE_KX,
1201 FW_PORT_TYPE_BT_XAUI,
1202 FW_PORT_TYPE_KR,
1203 FW_PORT_TYPE_CX4,
1204 FW_PORT_TYPE_TWINAX,
1205
1206 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
1207};
1208
1209enum fw_port_module_type {
1210 FW_PORT_MOD_TYPE_NA,
1211 FW_PORT_MOD_TYPE_LR,
1212 FW_PORT_MOD_TYPE_SR,
1213 FW_PORT_MOD_TYPE_ER,
1214
1215 FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
1216};
1217
1218/* port stats */
1219#define FW_NUM_PORT_STATS 50
1220#define FW_NUM_PORT_TX_STATS 23
1221#define FW_NUM_PORT_RX_STATS 27
1222
1223enum fw_port_stats_tx_index {
1224 FW_STAT_TX_PORT_BYTES_IX,
1225 FW_STAT_TX_PORT_FRAMES_IX,
1226 FW_STAT_TX_PORT_BCAST_IX,
1227 FW_STAT_TX_PORT_MCAST_IX,
1228 FW_STAT_TX_PORT_UCAST_IX,
1229 FW_STAT_TX_PORT_ERROR_IX,
1230 FW_STAT_TX_PORT_64B_IX,
1231 FW_STAT_TX_PORT_65B_127B_IX,
1232 FW_STAT_TX_PORT_128B_255B_IX,
1233 FW_STAT_TX_PORT_256B_511B_IX,
1234 FW_STAT_TX_PORT_512B_1023B_IX,
1235 FW_STAT_TX_PORT_1024B_1518B_IX,
1236 FW_STAT_TX_PORT_1519B_MAX_IX,
1237 FW_STAT_TX_PORT_DROP_IX,
1238 FW_STAT_TX_PORT_PAUSE_IX,
1239 FW_STAT_TX_PORT_PPP0_IX,
1240 FW_STAT_TX_PORT_PPP1_IX,
1241 FW_STAT_TX_PORT_PPP2_IX,
1242 FW_STAT_TX_PORT_PPP3_IX,
1243 FW_STAT_TX_PORT_PPP4_IX,
1244 FW_STAT_TX_PORT_PPP5_IX,
1245 FW_STAT_TX_PORT_PPP6_IX,
1246 FW_STAT_TX_PORT_PPP7_IX
1247};
1248
1249enum fw_port_stat_rx_index {
1250 FW_STAT_RX_PORT_BYTES_IX,
1251 FW_STAT_RX_PORT_FRAMES_IX,
1252 FW_STAT_RX_PORT_BCAST_IX,
1253 FW_STAT_RX_PORT_MCAST_IX,
1254 FW_STAT_RX_PORT_UCAST_IX,
1255 FW_STAT_RX_PORT_MTU_ERROR_IX,
1256 FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
1257 FW_STAT_RX_PORT_CRC_ERROR_IX,
1258 FW_STAT_RX_PORT_LEN_ERROR_IX,
1259 FW_STAT_RX_PORT_SYM_ERROR_IX,
1260 FW_STAT_RX_PORT_64B_IX,
1261 FW_STAT_RX_PORT_65B_127B_IX,
1262 FW_STAT_RX_PORT_128B_255B_IX,
1263 FW_STAT_RX_PORT_256B_511B_IX,
1264 FW_STAT_RX_PORT_512B_1023B_IX,
1265 FW_STAT_RX_PORT_1024B_1518B_IX,
1266 FW_STAT_RX_PORT_1519B_MAX_IX,
1267 FW_STAT_RX_PORT_PAUSE_IX,
1268 FW_STAT_RX_PORT_PPP0_IX,
1269 FW_STAT_RX_PORT_PPP1_IX,
1270 FW_STAT_RX_PORT_PPP2_IX,
1271 FW_STAT_RX_PORT_PPP3_IX,
1272 FW_STAT_RX_PORT_PPP4_IX,
1273 FW_STAT_RX_PORT_PPP5_IX,
1274 FW_STAT_RX_PORT_PPP6_IX,
1275 FW_STAT_RX_PORT_PPP7_IX,
1276 FW_STAT_RX_PORT_LESS_64B_IX
1277};
1278
1279struct fw_port_stats_cmd {
1280 __be32 op_to_portid;
1281 __be32 retval_len16;
1282 union fw_port_stats {
1283 struct fw_port_stats_ctl {
1284 u8 nstats_bg_bm;
1285 u8 tx_ix;
1286 __be16 r6;
1287 __be32 r7;
1288 __be64 stat0;
1289 __be64 stat1;
1290 __be64 stat2;
1291 __be64 stat3;
1292 __be64 stat4;
1293 __be64 stat5;
1294 } ctl;
1295 struct fw_port_stats_all {
1296 __be64 tx_bytes;
1297 __be64 tx_frames;
1298 __be64 tx_bcast;
1299 __be64 tx_mcast;
1300 __be64 tx_ucast;
1301 __be64 tx_error;
1302 __be64 tx_64b;
1303 __be64 tx_65b_127b;
1304 __be64 tx_128b_255b;
1305 __be64 tx_256b_511b;
1306 __be64 tx_512b_1023b;
1307 __be64 tx_1024b_1518b;
1308 __be64 tx_1519b_max;
1309 __be64 tx_drop;
1310 __be64 tx_pause;
1311 __be64 tx_ppp0;
1312 __be64 tx_ppp1;
1313 __be64 tx_ppp2;
1314 __be64 tx_ppp3;
1315 __be64 tx_ppp4;
1316 __be64 tx_ppp5;
1317 __be64 tx_ppp6;
1318 __be64 tx_ppp7;
1319 __be64 rx_bytes;
1320 __be64 rx_frames;
1321 __be64 rx_bcast;
1322 __be64 rx_mcast;
1323 __be64 rx_ucast;
1324 __be64 rx_mtu_error;
1325 __be64 rx_mtu_crc_error;
1326 __be64 rx_crc_error;
1327 __be64 rx_len_error;
1328 __be64 rx_sym_error;
1329 __be64 rx_64b;
1330 __be64 rx_65b_127b;
1331 __be64 rx_128b_255b;
1332 __be64 rx_256b_511b;
1333 __be64 rx_512b_1023b;
1334 __be64 rx_1024b_1518b;
1335 __be64 rx_1519b_max;
1336 __be64 rx_pause;
1337 __be64 rx_ppp0;
1338 __be64 rx_ppp1;
1339 __be64 rx_ppp2;
1340 __be64 rx_ppp3;
1341 __be64 rx_ppp4;
1342 __be64 rx_ppp5;
1343 __be64 rx_ppp6;
1344 __be64 rx_ppp7;
1345 __be64 rx_less_64b;
1346 __be64 rx_bg_drop;
1347 __be64 rx_bg_trunc;
1348 } all;
1349 } u;
1350};
1351
1352#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4)
1353#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0)
1354#define FW_PORT_STATS_CMD_TX(x) ((x) << 7)
1355#define FW_PORT_STATS_CMD_IX(x) ((x) << 0)
1356
1357/* port loopback stats */
1358#define FW_NUM_LB_STATS 16
1359enum fw_port_lb_stats_index {
1360 FW_STAT_LB_PORT_BYTES_IX,
1361 FW_STAT_LB_PORT_FRAMES_IX,
1362 FW_STAT_LB_PORT_BCAST_IX,
1363 FW_STAT_LB_PORT_MCAST_IX,
1364 FW_STAT_LB_PORT_UCAST_IX,
1365 FW_STAT_LB_PORT_ERROR_IX,
1366 FW_STAT_LB_PORT_64B_IX,
1367 FW_STAT_LB_PORT_65B_127B_IX,
1368 FW_STAT_LB_PORT_128B_255B_IX,
1369 FW_STAT_LB_PORT_256B_511B_IX,
1370 FW_STAT_LB_PORT_512B_1023B_IX,
1371 FW_STAT_LB_PORT_1024B_1518B_IX,
1372 FW_STAT_LB_PORT_1519B_MAX_IX,
1373 FW_STAT_LB_PORT_DROP_FRAMES_IX
1374};
1375
1376struct fw_port_lb_stats_cmd {
1377 __be32 op_to_lbport;
1378 __be32 retval_len16;
1379 union fw_port_lb_stats {
1380 struct fw_port_lb_stats_ctl {
1381 u8 nstats_bg_bm;
1382 u8 ix_pkd;
1383 __be16 r6;
1384 __be32 r7;
1385 __be64 stat0;
1386 __be64 stat1;
1387 __be64 stat2;
1388 __be64 stat3;
1389 __be64 stat4;
1390 __be64 stat5;
1391 } ctl;
1392 struct fw_port_lb_stats_all {
1393 __be64 tx_bytes;
1394 __be64 tx_frames;
1395 __be64 tx_bcast;
1396 __be64 tx_mcast;
1397 __be64 tx_ucast;
1398 __be64 tx_error;
1399 __be64 tx_64b;
1400 __be64 tx_65b_127b;
1401 __be64 tx_128b_255b;
1402 __be64 tx_256b_511b;
1403 __be64 tx_512b_1023b;
1404 __be64 tx_1024b_1518b;
1405 __be64 tx_1519b_max;
1406 __be64 rx_lb_drop;
1407 __be64 rx_lb_trunc;
1408 } all;
1409 } u;
1410};
1411
1412#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0)
1413#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4)
1414#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0)
1415#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0)
1416
1417struct fw_rss_ind_tbl_cmd {
1418 __be32 op_to_viid;
1419#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0)
1420 __be32 retval_len16;
1421 __be16 niqid;
1422 __be16 startidx;
1423 __be32 r3;
1424 __be32 iq0_to_iq2;
1425#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20)
1426#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10)
1427#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0)
1428 __be32 iq3_to_iq5;
1429 __be32 iq6_to_iq8;
1430 __be32 iq9_to_iq11;
1431 __be32 iq12_to_iq14;
1432 __be32 iq15_to_iq17;
1433 __be32 iq18_to_iq20;
1434 __be32 iq21_to_iq23;
1435 __be32 iq24_to_iq26;
1436 __be32 iq27_to_iq29;
1437 __be32 iq30_iq31;
1438 __be32 r15_lo;
1439};
1440
1441struct fw_rss_glb_config_cmd {
1442 __be32 op_to_write;
1443 __be32 retval_len16;
1444 union fw_rss_glb_config {
1445 struct fw_rss_glb_config_manual {
1446 __be32 mode_pkd;
1447 __be32 r3;
1448 __be64 r4;
1449 __be64 r5;
1450 } manual;
1451 struct fw_rss_glb_config_basicvirtual {
1452 __be32 mode_pkd;
1453 __be32 synmapen_to_hashtoeplitz;
1454#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8)
1455#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7)
1456#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6)
1457#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5)
1458#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4)
1459#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3)
1460#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2)
1461#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1)
1462#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0)
1463 __be64 r8;
1464 __be64 r9;
1465 } basicvirtual;
1466 } u;
1467};
1468
1469#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28)
1470
1471#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0
1472#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
1473
1474struct fw_rss_vi_config_cmd {
1475 __be32 op_to_viid;
1476#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0)
1477 __be32 retval_len16;
1478 union fw_rss_vi_config {
1479 struct fw_rss_vi_config_manual {
1480 __be64 r3;
1481 __be64 r4;
1482 __be64 r5;
1483 } manual;
1484 struct fw_rss_vi_config_basicvirtual {
1485 __be32 r6;
1486 __be32 defaultq_to_ip4udpen;
1487#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16)
1488#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
1489#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3)
1490#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
1491#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1)
1492#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0)
1493 __be64 r9;
1494 __be64 r10;
1495 } basicvirtual;
1496 } u;
1497};
1498
1499enum fw_error_type {
1500 FW_ERROR_TYPE_EXCEPTION = 0x0,
1501 FW_ERROR_TYPE_HWMODULE = 0x1,
1502 FW_ERROR_TYPE_WR = 0x2,
1503 FW_ERROR_TYPE_ACL = 0x3,
1504};
1505
1506struct fw_error_cmd {
1507 __be32 op_to_type;
1508 __be32 len16_pkd;
1509 union fw_error {
1510 struct fw_error_exception {
1511 __be32 info[6];
1512 } exception;
1513 struct fw_error_hwmodule {
1514 __be32 regaddr;
1515 __be32 regval;
1516 } hwmodule;
1517 struct fw_error_wr {
1518 __be16 cidx;
1519 __be16 pfn_vfn;
1520 __be32 eqid;
1521 u8 wrhdr[16];
1522 } wr;
1523 struct fw_error_acl {
1524 __be16 cidx;
1525 __be16 pfn_vfn;
1526 __be32 eqid;
1527 __be16 mv_pkd;
1528 u8 val[6];
1529 __be64 r4;
1530 } acl;
1531 } u;
1532};
1533
1534struct fw_debug_cmd {
1535 __be32 op_type;
1536#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff)
1537 __be32 len16_pkd;
1538 union fw_debug {
1539 struct fw_debug_assert {
1540 __be32 fcid;
1541 __be32 line;
1542 __be32 x;
1543 __be32 y;
1544 u8 filename_0_7[8];
1545 u8 filename_8_15[8];
1546 __be64 r3;
1547 } assert;
1548 struct fw_debug_prt {
1549 __be16 dprtstridx;
1550 __be16 r3[3];
1551 __be32 dprtstrparam0;
1552 __be32 dprtstrparam1;
1553 __be32 dprtstrparam2;
1554 __be32 dprtstrparam3;
1555 } prt;
1556 } u;
1557};
1558
1559struct fw_hdr {
1560 u8 ver;
1561 u8 reserved1;
1562 __be16 len512; /* bin length in units of 512-bytes */
1563 __be32 fw_ver; /* firmware version */
1564 __be32 tp_microcode_ver;
1565 u8 intfver_nic;
1566 u8 intfver_vnic;
1567 u8 intfver_ofld;
1568 u8 intfver_ri;
1569 u8 intfver_iscsipdu;
1570 u8 intfver_iscsi;
1571 u8 intfver_fcoe;
1572 u8 reserved2;
1573 __be32 reserved3[27];
1574};
1575
1576#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
1577#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
1578#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
1579#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
1580#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 32960b9b02ae..2b8edd2efbf6 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -29,10 +29,6 @@
29 * PHY layer usage 29 * PHY layer usage
30 */ 30 */
31 31
32/** Pending Items in this driver:
33 * 1. Use Linux cache infrastcture for DMA'ed memory (dma_xxx functions)
34 */
35
36#include <linux/module.h> 32#include <linux/module.h>
37#include <linux/kernel.h> 33#include <linux/kernel.h>
38#include <linux/sched.h> 34#include <linux/sched.h>
@@ -504,12 +500,6 @@ static unsigned long mdio_max_freq;
504 500
505/* Cache macros - Packet buffers would be from skb pool which is cached */ 501/* Cache macros - Packet buffers would be from skb pool which is cached */
506#define EMAC_VIRT_NOCACHE(addr) (addr) 502#define EMAC_VIRT_NOCACHE(addr) (addr)
507#define EMAC_CACHE_INVALIDATE(addr, size) \
508 dma_cache_maint((void *)addr, size, DMA_FROM_DEVICE)
509#define EMAC_CACHE_WRITEBACK(addr, size) \
510 dma_cache_maint((void *)addr, size, DMA_TO_DEVICE)
511#define EMAC_CACHE_WRITEBACK_INVALIDATE(addr, size) \
512 dma_cache_maint((void *)addr, size, DMA_BIDIRECTIONAL)
513 503
514/* DM644x does not have BD's in cached memory - so no cache functions */ 504/* DM644x does not have BD's in cached memory - so no cache functions */
515#define BD_CACHE_INVALIDATE(addr, size) 505#define BD_CACHE_INVALIDATE(addr, size)
@@ -1235,6 +1225,10 @@ static void emac_txch_teardown(struct emac_priv *priv, u32 ch)
1235 if (1 == txch->queue_active) { 1225 if (1 == txch->queue_active) {
1236 curr_bd = txch->active_queue_head; 1226 curr_bd = txch->active_queue_head;
1237 while (curr_bd != NULL) { 1227 while (curr_bd != NULL) {
1228 dma_unmap_single(emac_dev, curr_bd->buff_ptr,
1229 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
1230 DMA_TO_DEVICE);
1231
1238 emac_net_tx_complete(priv, (void __force *) 1232 emac_net_tx_complete(priv, (void __force *)
1239 &curr_bd->buf_token, 1, ch); 1233 &curr_bd->buf_token, 1, ch);
1240 if (curr_bd != txch->active_queue_tail) 1234 if (curr_bd != txch->active_queue_tail)
@@ -1327,6 +1321,11 @@ static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
1327 txch->queue_active = 0; /* end of queue */ 1321 txch->queue_active = 0; /* end of queue */
1328 } 1322 }
1329 } 1323 }
1324
1325 dma_unmap_single(emac_dev, curr_bd->buff_ptr,
1326 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
1327 DMA_TO_DEVICE);
1328
1330 *tx_complete_ptr = (u32) curr_bd->buf_token; 1329 *tx_complete_ptr = (u32) curr_bd->buf_token;
1331 ++tx_complete_ptr; 1330 ++tx_complete_ptr;
1332 ++tx_complete_cnt; 1331 ++tx_complete_cnt;
@@ -1387,8 +1386,8 @@ static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
1387 1386
1388 txch->bd_pool_head = curr_bd->next; 1387 txch->bd_pool_head = curr_bd->next;
1389 curr_bd->buf_token = buf_list->buf_token; 1388 curr_bd->buf_token = buf_list->buf_token;
1390 /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */ 1389 curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr,
1391 curr_bd->buff_ptr = virt_to_phys(buf_list->data_ptr); 1390 buf_list->length, DMA_TO_DEVICE);
1392 curr_bd->off_b_len = buf_list->length; 1391 curr_bd->off_b_len = buf_list->length;
1393 curr_bd->h_next = 0; 1392 curr_bd->h_next = 0;
1394 curr_bd->next = NULL; 1393 curr_bd->next = NULL;
@@ -1468,7 +1467,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1468 tx_buf.length = skb->len; 1467 tx_buf.length = skb->len;
1469 tx_buf.buf_token = (void *)skb; 1468 tx_buf.buf_token = (void *)skb;
1470 tx_buf.data_ptr = skb->data; 1469 tx_buf.data_ptr = skb->data;
1471 EMAC_CACHE_WRITEBACK((unsigned long)skb->data, skb->len);
1472 ndev->trans_start = jiffies; 1470 ndev->trans_start = jiffies;
1473 ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH); 1471 ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
1474 if (unlikely(ret_code != 0)) { 1472 if (unlikely(ret_code != 0)) {
@@ -1543,7 +1541,6 @@ static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
1543 p_skb->dev = ndev; 1541 p_skb->dev = ndev;
1544 skb_reserve(p_skb, NET_IP_ALIGN); 1542 skb_reserve(p_skb, NET_IP_ALIGN);
1545 *data_token = (void *) p_skb; 1543 *data_token = (void *) p_skb;
1546 EMAC_CACHE_WRITEBACK_INVALIDATE((unsigned long)p_skb->data, buf_size);
1547 return p_skb->data; 1544 return p_skb->data;
1548} 1545}
1549 1546
@@ -1612,8 +1609,8 @@ static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
1612 /* populate the hardware descriptor */ 1609 /* populate the hardware descriptor */
1613 curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head, 1610 curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
1614 priv); 1611 priv);
1615 /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */ 1612 curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr,
1616 curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr); 1613 rxch->buf_size, DMA_FROM_DEVICE);
1617 curr_bd->off_b_len = rxch->buf_size; 1614 curr_bd->off_b_len = rxch->buf_size;
1618 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; 1615 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
1619 1616
@@ -1697,6 +1694,12 @@ static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch)
1697 curr_bd = rxch->active_queue_head; 1694 curr_bd = rxch->active_queue_head;
1698 while (curr_bd) { 1695 while (curr_bd) {
1699 if (curr_bd->buf_token) { 1696 if (curr_bd->buf_token) {
1697 dma_unmap_single(&priv->ndev->dev,
1698 curr_bd->buff_ptr,
1699 curr_bd->off_b_len
1700 & EMAC_RX_BD_BUF_SIZE,
1701 DMA_FROM_DEVICE);
1702
1700 dev_kfree_skb_any((struct sk_buff *)\ 1703 dev_kfree_skb_any((struct sk_buff *)\
1701 curr_bd->buf_token); 1704 curr_bd->buf_token);
1702 } 1705 }
@@ -1871,8 +1874,8 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
1871 1874
1872 /* populate the hardware descriptor */ 1875 /* populate the hardware descriptor */
1873 curr_bd->h_next = 0; 1876 curr_bd->h_next = 0;
1874 /* FIXME buff_ptr = dma_map_single(... buffer ...) */ 1877 curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer,
1875 curr_bd->buff_ptr = virt_to_phys(buffer); 1878 rxch->buf_size, DMA_FROM_DEVICE);
1876 curr_bd->off_b_len = rxch->buf_size; 1879 curr_bd->off_b_len = rxch->buf_size;
1877 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; 1880 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
1878 curr_bd->next = NULL; 1881 curr_bd->next = NULL;
@@ -1927,7 +1930,6 @@ static int emac_net_rx_cb(struct emac_priv *priv,
1927 p_skb = (struct sk_buff *)net_pkt_list->pkt_token; 1930 p_skb = (struct sk_buff *)net_pkt_list->pkt_token;
1928 /* set length of packet */ 1931 /* set length of packet */
1929 skb_put(p_skb, net_pkt_list->pkt_length); 1932 skb_put(p_skb, net_pkt_list->pkt_length);
1930 EMAC_CACHE_INVALIDATE((unsigned long)p_skb->data, p_skb->len);
1931 p_skb->protocol = eth_type_trans(p_skb, priv->ndev); 1933 p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
1932 netif_receive_skb(p_skb); 1934 netif_receive_skb(p_skb);
1933 priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length; 1935 priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length;
@@ -1990,6 +1992,11 @@ static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
1990 rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr; 1992 rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
1991 rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE; 1993 rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
1992 rx_buf_obj->buf_token = curr_bd->buf_token; 1994 rx_buf_obj->buf_token = curr_bd->buf_token;
1995
1996 dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr,
1997 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
1998 DMA_FROM_DEVICE);
1999
1993 curr_pkt->pkt_token = curr_pkt->buf_list->buf_token; 2000 curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
1994 curr_pkt->num_bufs = 1; 2001 curr_pkt->num_bufs = 1;
1995 curr_pkt->pkt_length = 2002 curr_pkt->pkt_length =
@@ -2658,7 +2665,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2658 2665
2659 pdata = pdev->dev.platform_data; 2666 pdata = pdev->dev.platform_data;
2660 if (!pdata) { 2667 if (!pdata) {
2661 printk(KERN_ERR "DaVinci EMAC: No platfrom data\n"); 2668 printk(KERN_ERR "DaVinci EMAC: No platform data\n");
2662 return -ENODEV; 2669 return -ENODEV;
2663 } 2670 }
2664 2671
@@ -2820,31 +2827,37 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
2820 return 0; 2827 return 0;
2821} 2828}
2822 2829
2823static 2830static int davinci_emac_suspend(struct device *dev)
2824int davinci_emac_suspend(struct platform_device *pdev, pm_message_t state)
2825{ 2831{
2826 struct net_device *dev = platform_get_drvdata(pdev); 2832 struct platform_device *pdev = to_platform_device(dev);
2833 struct net_device *ndev = platform_get_drvdata(pdev);
2827 2834
2828 if (netif_running(dev)) 2835 if (netif_running(ndev))
2829 emac_dev_stop(dev); 2836 emac_dev_stop(ndev);
2830 2837
2831 clk_disable(emac_clk); 2838 clk_disable(emac_clk);
2832 2839
2833 return 0; 2840 return 0;
2834} 2841}
2835 2842
2836static int davinci_emac_resume(struct platform_device *pdev) 2843static int davinci_emac_resume(struct device *dev)
2837{ 2844{
2838 struct net_device *dev = platform_get_drvdata(pdev); 2845 struct platform_device *pdev = to_platform_device(dev);
2846 struct net_device *ndev = platform_get_drvdata(pdev);
2839 2847
2840 clk_enable(emac_clk); 2848 clk_enable(emac_clk);
2841 2849
2842 if (netif_running(dev)) 2850 if (netif_running(ndev))
2843 emac_dev_open(dev); 2851 emac_dev_open(ndev);
2844 2852
2845 return 0; 2853 return 0;
2846} 2854}
2847 2855
2856static const struct dev_pm_ops davinci_emac_pm_ops = {
2857 .suspend = davinci_emac_suspend,
2858 .resume = davinci_emac_resume,
2859};
2860
2848/** 2861/**
2849 * davinci_emac_driver: EMAC platform driver structure 2862 * davinci_emac_driver: EMAC platform driver structure
2850 */ 2863 */
@@ -2852,11 +2865,10 @@ static struct platform_driver davinci_emac_driver = {
2852 .driver = { 2865 .driver = {
2853 .name = "davinci_emac", 2866 .name = "davinci_emac",
2854 .owner = THIS_MODULE, 2867 .owner = THIS_MODULE,
2868 .pm = &davinci_emac_pm_ops,
2855 }, 2869 },
2856 .probe = davinci_emac_probe, 2870 .probe = davinci_emac_probe,
2857 .remove = __devexit_p(davinci_emac_remove), 2871 .remove = __devexit_p(davinci_emac_remove),
2858 .suspend = davinci_emac_suspend,
2859 .resume = davinci_emac_resume,
2860}; 2872};
2861 2873
2862/** 2874/**
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index a26ccab057d5..b997e578e58f 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2858,7 +2858,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2858 } 2858 }
2859 nic->cbs_pool = pci_pool_create(netdev->name, 2859 nic->cbs_pool = pci_pool_create(netdev->name,
2860 nic->pdev, 2860 nic->pdev,
2861 nic->params.cbs.count * sizeof(struct cb), 2861 nic->params.cbs.max * sizeof(struct cb),
2862 sizeof(u32), 2862 sizeof(u32),
2863 0); 2863 0);
2864 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n", 2864 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 9902b33b7160..2f29c2131851 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -261,7 +261,6 @@ struct e1000_adapter {
261 /* TX */ 261 /* TX */
262 struct e1000_tx_ring *tx_ring; /* One per active queue */ 262 struct e1000_tx_ring *tx_ring; /* One per active queue */
263 unsigned int restart_queue; 263 unsigned int restart_queue;
264 unsigned long tx_queue_len;
265 u32 txd_cmd; 264 u32 txd_cmd;
266 u32 tx_int_delay; 265 u32 tx_int_delay;
267 u32 tx_abs_int_delay; 266 u32 tx_abs_int_delay;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 8be6faee43e6..b15ece26ed84 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -383,8 +383,6 @@ static void e1000_configure(struct e1000_adapter *adapter)
383 adapter->alloc_rx_buf(adapter, ring, 383 adapter->alloc_rx_buf(adapter, ring,
384 E1000_DESC_UNUSED(ring)); 384 E1000_DESC_UNUSED(ring));
385 } 385 }
386
387 adapter->tx_queue_len = netdev->tx_queue_len;
388} 386}
389 387
390int e1000_up(struct e1000_adapter *adapter) 388int e1000_up(struct e1000_adapter *adapter)
@@ -503,7 +501,6 @@ void e1000_down(struct e1000_adapter *adapter)
503 del_timer_sync(&adapter->watchdog_timer); 501 del_timer_sync(&adapter->watchdog_timer);
504 del_timer_sync(&adapter->phy_info_timer); 502 del_timer_sync(&adapter->phy_info_timer);
505 503
506 netdev->tx_queue_len = adapter->tx_queue_len;
507 adapter->link_speed = 0; 504 adapter->link_speed = 0;
508 adapter->link_duplex = 0; 505 adapter->link_duplex = 0;
509 netif_carrier_off(netdev); 506 netif_carrier_off(netdev);
@@ -2316,19 +2313,15 @@ static void e1000_watchdog(unsigned long data)
2316 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2313 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2317 E1000_CTRL_TFCE) ? "TX" : "None" ))); 2314 E1000_CTRL_TFCE) ? "TX" : "None" )));
2318 2315
2319 /* tweak tx_queue_len according to speed/duplex 2316 /* adjust timeout factor according to speed/duplex */
2320 * and adjust the timeout factor */
2321 netdev->tx_queue_len = adapter->tx_queue_len;
2322 adapter->tx_timeout_factor = 1; 2317 adapter->tx_timeout_factor = 1;
2323 switch (adapter->link_speed) { 2318 switch (adapter->link_speed) {
2324 case SPEED_10: 2319 case SPEED_10:
2325 txb2b = false; 2320 txb2b = false;
2326 netdev->tx_queue_len = 10;
2327 adapter->tx_timeout_factor = 16; 2321 adapter->tx_timeout_factor = 16;
2328 break; 2322 break;
2329 case SPEED_100: 2323 case SPEED_100:
2330 txb2b = false; 2324 txb2b = false;
2331 netdev->tx_queue_len = 100;
2332 /* maybe add some timeout factor ? */ 2325 /* maybe add some timeout factor ? */
2333 break; 2326 break;
2334 } 2327 }
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 3c95acb3a87d..712ccc66ba25 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1346,7 +1346,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
1346 * 1346 *
1347 * 1) down 1347 * 1) down
1348 * 2) autoneg_progress 1348 * 2) autoneg_progress
1349 * 3) autoneg_complete (the link sucessfully autonegotiated) 1349 * 3) autoneg_complete (the link successfully autonegotiated)
1350 * 4) forced_up (the link has been forced up, it did not autonegotiate) 1350 * 4) forced_up (the link has been forced up, it did not autonegotiate)
1351 * 1351 *
1352 **/ 1352 **/
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c2ec095d2163..118bdf483593 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -279,7 +279,6 @@ struct e1000_adapter {
279 279
280 struct napi_struct napi; 280 struct napi_struct napi;
281 281
282 unsigned long tx_queue_len;
283 unsigned int restart_queue; 282 unsigned int restart_queue;
284 u32 txd_cmd; 283 u32 txd_cmd;
285 284
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 2425ed11d5cc..a8b2c0de27c4 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -647,7 +647,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
647 if (!(rxcw & E1000_RXCW_IV)) { 647 if (!(rxcw & E1000_RXCW_IV)) {
648 mac->serdes_has_link = true; 648 mac->serdes_has_link = true;
649 e_dbg("SERDES: Link up - autoneg " 649 e_dbg("SERDES: Link up - autoneg "
650 "completed sucessfully.\n"); 650 "completed successfully.\n");
651 } else { 651 } else {
652 mac->serdes_has_link = false; 652 mac->serdes_has_link = false;
653 e_dbg("SERDES: Link down - invalid" 653 e_dbg("SERDES: Link down - invalid"
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 88d54d3efcef..e1cceb606576 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2289,8 +2289,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2289 ew32(TCTL, tctl); 2289 ew32(TCTL, tctl);
2290 2290
2291 e1000e_config_collision_dist(hw); 2291 e1000e_config_collision_dist(hw);
2292
2293 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
2294} 2292}
2295 2293
2296/** 2294/**
@@ -2877,7 +2875,6 @@ void e1000e_down(struct e1000_adapter *adapter)
2877 del_timer_sync(&adapter->watchdog_timer); 2875 del_timer_sync(&adapter->watchdog_timer);
2878 del_timer_sync(&adapter->phy_info_timer); 2876 del_timer_sync(&adapter->phy_info_timer);
2879 2877
2880 netdev->tx_queue_len = adapter->tx_queue_len;
2881 netif_carrier_off(netdev); 2878 netif_carrier_off(netdev);
2882 adapter->link_speed = 0; 2879 adapter->link_speed = 0;
2883 adapter->link_duplex = 0; 2880 adapter->link_duplex = 0;
@@ -3588,21 +3585,15 @@ static void e1000_watchdog_task(struct work_struct *work)
3588 "link gets many collisions.\n"); 3585 "link gets many collisions.\n");
3589 } 3586 }
3590 3587
3591 /* 3588 /* adjust timeout factor according to speed/duplex */
3592 * tweak tx_queue_len according to speed/duplex
3593 * and adjust the timeout factor
3594 */
3595 netdev->tx_queue_len = adapter->tx_queue_len;
3596 adapter->tx_timeout_factor = 1; 3589 adapter->tx_timeout_factor = 1;
3597 switch (adapter->link_speed) { 3590 switch (adapter->link_speed) {
3598 case SPEED_10: 3591 case SPEED_10:
3599 txb2b = 0; 3592 txb2b = 0;
3600 netdev->tx_queue_len = 10;
3601 adapter->tx_timeout_factor = 16; 3593 adapter->tx_timeout_factor = 16;
3602 break; 3594 break;
3603 case SPEED_100: 3595 case SPEED_100:
3604 txb2b = 0; 3596 txb2b = 0;
3605 netdev->tx_queue_len = 100;
3606 adapter->tx_timeout_factor = 10; 3597 adapter->tx_timeout_factor = 10;
3607 break; 3598 break;
3608 } 3599 }
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c3f061957c04..080d1cea5b26 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -676,7 +676,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
676 priv->rx_queue[i] = NULL; 676 priv->rx_queue[i] = NULL;
677 677
678 for (i = 0; i < priv->num_tx_queues; i++) { 678 for (i = 0; i < priv->num_tx_queues; i++) {
679 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( 679 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc(
680 sizeof (struct gfar_priv_tx_q), GFP_KERNEL); 680 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
681 if (!priv->tx_queue[i]) { 681 if (!priv->tx_queue[i]) {
682 err = -ENOMEM; 682 err = -ENOMEM;
@@ -689,7 +689,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
689 } 689 }
690 690
691 for (i = 0; i < priv->num_rx_queues; i++) { 691 for (i = 0; i < priv->num_rx_queues; i++) {
692 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( 692 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
693 sizeof (struct gfar_priv_rx_q), GFP_KERNEL); 693 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
694 if (!priv->rx_queue[i]) { 694 if (!priv->rx_queue[i]) {
695 err = -ENOMEM; 695 err = -ENOMEM;
@@ -998,7 +998,7 @@ static int gfar_probe(struct of_device *ofdev,
998 } 998 }
999 999
1000 /* Need to reverse the bit maps as bit_map's MSB is q0 1000 /* Need to reverse the bit maps as bit_map's MSB is q0
1001 * but, for_each_bit parses from right to left, which 1001 * but, for_each_set_bit parses from right to left, which
1002 * basically reverses the queue numbers */ 1002 * basically reverses the queue numbers */
1003 for (i = 0; i< priv->num_grps; i++) { 1003 for (i = 0; i< priv->num_grps; i++) {
1004 priv->gfargrp[i].tx_bit_map = reverse_bitmap( 1004 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
@@ -1011,7 +1011,7 @@ static int gfar_probe(struct of_device *ofdev,
1011 * also assign queues to groups */ 1011 * also assign queues to groups */
1012 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1012 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1013 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1013 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1014 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1014 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1015 priv->num_rx_queues) { 1015 priv->num_rx_queues) {
1016 priv->gfargrp[grp_idx].num_rx_queues++; 1016 priv->gfargrp[grp_idx].num_rx_queues++;
1017 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1017 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1019,7 +1019,7 @@ static int gfar_probe(struct of_device *ofdev,
1019 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1019 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1020 } 1020 }
1021 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1021 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1022 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map, 1022 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1023 priv->num_tx_queues) { 1023 priv->num_tx_queues) {
1024 priv->gfargrp[grp_idx].num_tx_queues++; 1024 priv->gfargrp[grp_idx].num_tx_queues++;
1025 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1025 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1120,10 +1120,10 @@ static int gfar_probe(struct of_device *ofdev,
1120 /* provided which set of benchmarks. */ 1120 /* provided which set of benchmarks. */
1121 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1121 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
1122 for (i = 0; i < priv->num_rx_queues; i++) 1122 for (i = 0; i < priv->num_rx_queues; i++)
1123 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", 1123 printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
1124 dev->name, i, priv->rx_queue[i]->rx_ring_size); 1124 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1125 for(i = 0; i < priv->num_tx_queues; i++) 1125 for(i = 0; i < priv->num_tx_queues; i++)
1126 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", 1126 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
1127 dev->name, i, priv->tx_queue[i]->tx_ring_size); 1127 dev->name, i, priv->tx_queue[i]->tx_ring_size);
1128 1128
1129 return 0; 1129 return 0;
@@ -1638,13 +1638,13 @@ static void free_skb_resources(struct gfar_private *priv)
1638 /* Go through all the buffer descriptors and free their data buffers */ 1638 /* Go through all the buffer descriptors and free their data buffers */
1639 for (i = 0; i < priv->num_tx_queues; i++) { 1639 for (i = 0; i < priv->num_tx_queues; i++) {
1640 tx_queue = priv->tx_queue[i]; 1640 tx_queue = priv->tx_queue[i];
1641 if(!tx_queue->tx_skbuff) 1641 if(tx_queue->tx_skbuff)
1642 free_skb_tx_queue(tx_queue); 1642 free_skb_tx_queue(tx_queue);
1643 } 1643 }
1644 1644
1645 for (i = 0; i < priv->num_rx_queues; i++) { 1645 for (i = 0; i < priv->num_rx_queues; i++) {
1646 rx_queue = priv->rx_queue[i]; 1646 rx_queue = priv->rx_queue[i];
1647 if(!rx_queue->rx_skbuff) 1647 if(rx_queue->rx_skbuff)
1648 free_skb_rx_queue(rx_queue); 1648 free_skb_rx_queue(rx_queue);
1649 } 1649 }
1650 1650
@@ -1709,7 +1709,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
1709 1709
1710 if (priv->mode == MQ_MG_MODE) { 1710 if (priv->mode == MQ_MG_MODE) {
1711 baddr = &regs->txic0; 1711 baddr = &regs->txic0;
1712 for_each_bit (i, &tx_mask, priv->num_tx_queues) { 1712 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1713 if (likely(priv->tx_queue[i]->txcoalescing)) { 1713 if (likely(priv->tx_queue[i]->txcoalescing)) {
1714 gfar_write(baddr + i, 0); 1714 gfar_write(baddr + i, 0);
1715 gfar_write(baddr + i, priv->tx_queue[i]->txic); 1715 gfar_write(baddr + i, priv->tx_queue[i]->txic);
@@ -1717,7 +1717,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
1717 } 1717 }
1718 1718
1719 baddr = &regs->rxic0; 1719 baddr = &regs->rxic0;
1720 for_each_bit (i, &rx_mask, priv->num_rx_queues) { 1720 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1721 if (likely(priv->rx_queue[i]->rxcoalescing)) { 1721 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1722 gfar_write(baddr + i, 0); 1722 gfar_write(baddr + i, 0);
1723 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1723 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
@@ -2393,6 +2393,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
2393 * as many bytes as needed to align the data properly 2393 * as many bytes as needed to align the data properly
2394 */ 2394 */
2395 skb_reserve(skb, alignamount); 2395 skb_reserve(skb, alignamount);
2396 GFAR_CB(skb)->alignamount = alignamount;
2396 2397
2397 return skb; 2398 return skb;
2398} 2399}
@@ -2533,13 +2534,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2533 newskb = skb; 2534 newskb = skb;
2534 else if (skb) { 2535 else if (skb) {
2535 /* 2536 /*
2536 * We need to reset ->data to what it 2537 * We need to un-reserve() the skb to what it
2537 * was before gfar_new_skb() re-aligned 2538 * was before gfar_new_skb() re-aligned
2538 * it to an RXBUF_ALIGNMENT boundary 2539 * it to an RXBUF_ALIGNMENT boundary
2539 * before we put the skb back on the 2540 * before we put the skb back on the
2540 * recycle list. 2541 * recycle list.
2541 */ 2542 */
2542 skb->data = skb->head + NET_SKB_PAD; 2543 skb_reserve(skb, -GFAR_CB(skb)->alignamount);
2543 __skb_queue_head(&priv->rx_recycle, skb); 2544 __skb_queue_head(&priv->rx_recycle, skb);
2544 } 2545 }
2545 } else { 2546 } else {
@@ -2610,7 +2611,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2610 budget_per_queue = left_over_budget/num_queues; 2611 budget_per_queue = left_over_budget/num_queues;
2611 left_over_budget = 0; 2612 left_over_budget = 0;
2612 2613
2613 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2614 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2614 if (test_bit(i, &serviced_queues)) 2615 if (test_bit(i, &serviced_queues))
2615 continue; 2616 continue;
2616 rx_queue = priv->rx_queue[i]; 2617 rx_queue = priv->rx_queue[i];
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 3d72dc43dca5..17d25e714236 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -566,6 +566,12 @@ struct rxfcb {
566 u16 vlctl; /* VLAN control word */ 566 u16 vlctl; /* VLAN control word */
567}; 567};
568 568
569struct gianfar_skb_cb {
570 int alignamount;
571};
572
573#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
574
569struct rmon_mib 575struct rmon_mib
570{ 576{
571 u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ 577 u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index f2b937966950..0bc777bac9b4 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
1577 NULL, 1577 NULL,
1578}; 1578};
1579 1579
1580static struct sysfs_ops veth_pool_ops = { 1580static const struct sysfs_ops veth_pool_ops = {
1581 .show = veth_pool_show, 1581 .show = veth_pool_show,
1582 .store = veth_pool_store, 1582 .store = veth_pool_store,
1583}; 1583};
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 9d7fa2fb85ea..0bc990ec4a8e 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -94,6 +94,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
94 case E1000_DEV_ID_82576_FIBER: 94 case E1000_DEV_ID_82576_FIBER:
95 case E1000_DEV_ID_82576_SERDES: 95 case E1000_DEV_ID_82576_SERDES:
96 case E1000_DEV_ID_82576_QUAD_COPPER: 96 case E1000_DEV_ID_82576_QUAD_COPPER:
97 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
97 case E1000_DEV_ID_82576_SERDES_QUAD: 98 case E1000_DEV_ID_82576_SERDES_QUAD:
98 mac->type = e1000_82576; 99 mac->type = e1000_82576;
99 break; 100 break;
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 448005276b26..82a533f5192a 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -41,6 +41,7 @@ struct e1000_hw;
41#define E1000_DEV_ID_82576_FIBER 0x10E6 41#define E1000_DEV_ID_82576_FIBER 0x10E6
42#define E1000_DEV_ID_82576_SERDES 0x10E7 42#define E1000_DEV_ID_82576_SERDES 0x10E7
43#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 43#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
44#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
44#define E1000_DEV_ID_82576_NS 0x150A 45#define E1000_DEV_ID_82576_NS 0x150A
45#define E1000_DEV_ID_82576_NS_SERDES 0x1518 46#define E1000_DEV_ID_82576_NS_SERDES 0x1518
46#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D 47#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 2a8a886b37eb..be8d010e4021 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1367,7 +1367,8 @@ out:
1367 * igb_enable_mng_pass_thru - Enable processing of ARP's 1367 * igb_enable_mng_pass_thru - Enable processing of ARP's
1368 * @hw: pointer to the HW structure 1368 * @hw: pointer to the HW structure
1369 * 1369 *
1370 * Verifies the hardware needs to allow ARPs to be processed by the host. 1370 * Verifies the hardware needs to leave interface enabled so that frames can
1371 * be directed to and from the management interface.
1371 **/ 1372 **/
1372bool igb_enable_mng_pass_thru(struct e1000_hw *hw) 1373bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1373{ 1374{
@@ -1380,8 +1381,7 @@ bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1380 1381
1381 manc = rd32(E1000_MANC); 1382 manc = rd32(E1000_MANC);
1382 1383
1383 if (!(manc & E1000_MANC_RCV_TCO_EN) || 1384 if (!(manc & E1000_MANC_RCV_TCO_EN))
1384 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
1385 goto out; 1385 goto out;
1386 1386
1387 if (hw->mac.arc_subsystem_valid) { 1387 if (hw->mac.arc_subsystem_valid) {
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index a1775705b24c..3b772b822a5d 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -267,7 +267,6 @@ struct igb_adapter {
267 267
268 /* TX */ 268 /* TX */
269 struct igb_ring *tx_ring[16]; 269 struct igb_ring *tx_ring[16];
270 unsigned long tx_queue_len;
271 u32 tx_timeout_count; 270 u32 tx_timeout_count;
272 271
273 /* RX */ 272 /* RX */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 583a21c1def3..01c65c7447e1 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -72,6 +72,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, 74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, 76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
@@ -688,7 +689,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
688 /* start with one vector for every rx queue */ 689 /* start with one vector for every rx queue */
689 numvecs = adapter->num_rx_queues; 690 numvecs = adapter->num_rx_queues;
690 691
691 /* if tx handler is seperate add 1 for every tx queue */ 692 /* if tx handler is separate add 1 for every tx queue */
692 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) 693 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
693 numvecs += adapter->num_tx_queues; 694 numvecs += adapter->num_tx_queues;
694 695
@@ -1104,9 +1105,6 @@ static void igb_configure(struct igb_adapter *adapter)
1104 struct igb_ring *ring = adapter->rx_ring[i]; 1105 struct igb_ring *ring = adapter->rx_ring[i];
1105 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); 1106 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1106 } 1107 }
1107
1108
1109 adapter->tx_queue_len = netdev->tx_queue_len;
1110} 1108}
1111 1109
1112/** 1110/**
@@ -1212,7 +1210,6 @@ void igb_down(struct igb_adapter *adapter)
1212 del_timer_sync(&adapter->watchdog_timer); 1210 del_timer_sync(&adapter->watchdog_timer);
1213 del_timer_sync(&adapter->phy_info_timer); 1211 del_timer_sync(&adapter->phy_info_timer);
1214 1212
1215 netdev->tx_queue_len = adapter->tx_queue_len;
1216 netif_carrier_off(netdev); 1213 netif_carrier_off(netdev);
1217 1214
1218 /* record the stats before reset*/ 1215 /* record the stats before reset*/
@@ -3105,17 +3102,13 @@ static void igb_watchdog_task(struct work_struct *work)
3105 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 3102 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3106 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); 3103 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3107 3104
3108 /* tweak tx_queue_len according to speed/duplex and 3105 /* adjust timeout factor according to speed/duplex */
3109 * adjust the timeout factor */
3110 netdev->tx_queue_len = adapter->tx_queue_len;
3111 adapter->tx_timeout_factor = 1; 3106 adapter->tx_timeout_factor = 1;
3112 switch (adapter->link_speed) { 3107 switch (adapter->link_speed) {
3113 case SPEED_10: 3108 case SPEED_10:
3114 netdev->tx_queue_len = 10;
3115 adapter->tx_timeout_factor = 14; 3109 adapter->tx_timeout_factor = 14;
3116 break; 3110 break;
3117 case SPEED_100: 3111 case SPEED_100:
3118 netdev->tx_queue_len = 100;
3119 /* maybe add some timeout factor ? */ 3112 /* maybe add some timeout factor ? */
3120 break; 3113 break;
3121 } 3114 }
@@ -3962,7 +3955,7 @@ void igb_update_stats(struct igb_adapter *adapter)
3962 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); 3955 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3963 struct e1000_hw *hw = &adapter->hw; 3956 struct e1000_hw *hw = &adapter->hw;
3964 struct pci_dev *pdev = adapter->pdev; 3957 struct pci_dev *pdev = adapter->pdev;
3965 u32 rnbc, reg; 3958 u32 reg, mpc;
3966 u16 phy_tmp; 3959 u16 phy_tmp;
3967 int i; 3960 int i;
3968 u64 bytes, packets; 3961 u64 bytes, packets;
@@ -4020,7 +4013,9 @@ void igb_update_stats(struct igb_adapter *adapter)
4020 adapter->stats.symerrs += rd32(E1000_SYMERRS); 4013 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4021 adapter->stats.sec += rd32(E1000_SEC); 4014 adapter->stats.sec += rd32(E1000_SEC);
4022 4015
4023 adapter->stats.mpc += rd32(E1000_MPC); 4016 mpc = rd32(E1000_MPC);
4017 adapter->stats.mpc += mpc;
4018 net_stats->rx_fifo_errors += mpc;
4024 adapter->stats.scc += rd32(E1000_SCC); 4019 adapter->stats.scc += rd32(E1000_SCC);
4025 adapter->stats.ecol += rd32(E1000_ECOL); 4020 adapter->stats.ecol += rd32(E1000_ECOL);
4026 adapter->stats.mcc += rd32(E1000_MCC); 4021 adapter->stats.mcc += rd32(E1000_MCC);
@@ -4035,9 +4030,7 @@ void igb_update_stats(struct igb_adapter *adapter)
4035 adapter->stats.gptc += rd32(E1000_GPTC); 4030 adapter->stats.gptc += rd32(E1000_GPTC);
4036 adapter->stats.gotc += rd32(E1000_GOTCL); 4031 adapter->stats.gotc += rd32(E1000_GOTCL);
4037 rd32(E1000_GOTCH); /* clear GOTCL */ 4032 rd32(E1000_GOTCH); /* clear GOTCL */
4038 rnbc = rd32(E1000_RNBC); 4033 adapter->stats.rnbc += rd32(E1000_RNBC);
4039 adapter->stats.rnbc += rnbc;
4040 net_stats->rx_fifo_errors += rnbc;
4041 adapter->stats.ruc += rd32(E1000_RUC); 4034 adapter->stats.ruc += rd32(E1000_RUC);
4042 adapter->stats.rfc += rd32(E1000_RFC); 4035 adapter->stats.rfc += rd32(E1000_RFC);
4043 adapter->stats.rjc += rd32(E1000_RJC); 4036 adapter->stats.rjc += rd32(E1000_RJC);
@@ -5109,7 +5102,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
5109{ 5102{
5110 struct igb_adapter *adapter = q_vector->adapter; 5103 struct igb_adapter *adapter = q_vector->adapter;
5111 5104
5112 if (vlan_tag) 5105 if (vlan_tag && adapter->vlgrp)
5113 vlan_gro_receive(&q_vector->napi, adapter->vlgrp, 5106 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5114 vlan_tag, skb); 5107 vlan_tag, skb);
5115 else 5108 else
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index a1774b29d222..debeee2dc717 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -198,7 +198,6 @@ struct igbvf_adapter {
198 struct igbvf_ring *tx_ring /* One per active queue */ 198 struct igbvf_ring *tx_ring /* One per active queue */
199 ____cacheline_aligned_in_smp; 199 ____cacheline_aligned_in_smp;
200 200
201 unsigned long tx_queue_len;
202 unsigned int restart_queue; 201 unsigned int restart_queue;
203 u32 txd_cmd; 202 u32 txd_cmd;
204 203
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index a77afd8a14bb..b41037ed8083 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1304,8 +1304,6 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1304 1304
1305 /* enable Report Status bit */ 1305 /* enable Report Status bit */
1306 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; 1306 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1307
1308 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
1309} 1307}
1310 1308
1311/** 1309/**
@@ -1524,7 +1522,6 @@ void igbvf_down(struct igbvf_adapter *adapter)
1524 1522
1525 del_timer_sync(&adapter->watchdog_timer); 1523 del_timer_sync(&adapter->watchdog_timer);
1526 1524
1527 netdev->tx_queue_len = adapter->tx_queue_len;
1528 netif_carrier_off(netdev); 1525 netif_carrier_off(netdev);
1529 1526
1530 /* record the stats before reset*/ 1527 /* record the stats before reset*/
@@ -1857,21 +1854,15 @@ static void igbvf_watchdog_task(struct work_struct *work)
1857 &adapter->link_duplex); 1854 &adapter->link_duplex);
1858 igbvf_print_link_info(adapter); 1855 igbvf_print_link_info(adapter);
1859 1856
1860 /* 1857 /* adjust timeout factor according to speed/duplex */
1861 * tweak tx_queue_len according to speed/duplex
1862 * and adjust the timeout factor
1863 */
1864 netdev->tx_queue_len = adapter->tx_queue_len;
1865 adapter->tx_timeout_factor = 1; 1858 adapter->tx_timeout_factor = 1;
1866 switch (adapter->link_speed) { 1859 switch (adapter->link_speed) {
1867 case SPEED_10: 1860 case SPEED_10:
1868 txb2b = 0; 1861 txb2b = 0;
1869 netdev->tx_queue_len = 10;
1870 adapter->tx_timeout_factor = 16; 1862 adapter->tx_timeout_factor = 16;
1871 break; 1863 break;
1872 case SPEED_100: 1864 case SPEED_100:
1873 txb2b = 0; 1865 txb2b = 0;
1874 netdev->tx_queue_len = 100;
1875 /* maybe add some timeout factor ? */ 1866 /* maybe add some timeout factor ? */
1876 break; 1867 break;
1877 } 1868 }
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index c412e8026173..1dcdce0631aa 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -331,7 +331,7 @@ static int sa1100_irda_resume(struct platform_device *pdev)
331 * If we missed a speed change, initialise at the new speed 331 * If we missed a speed change, initialise at the new speed
332 * directly. It is debatable whether this is actually 332 * directly. It is debatable whether this is actually
333 * required, but in the interests of continuing from where 333 * required, but in the interests of continuing from where
334 * we left off it is desireable. The converse argument is 334 * we left off it is desirable. The converse argument is
335 * that we should re-negotiate at 9600 baud again. 335 * that we should re-negotiate at 9600 baud again.
336 */ 336 */
337 if (si->newspeed) { 337 if (si->newspeed) {
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 551810fd2976..980625feb2c0 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -65,7 +65,6 @@
65#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */ 65#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
66#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */ 66#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
67#endif 67#endif
68#undef CONFIG_USE_INTERNAL_TIMER /* Just cannot make that timer work */
69#define CONFIG_USE_W977_PNP /* Currently needed */ 68#define CONFIG_USE_W977_PNP /* Currently needed */
70#define PIO_MAX_SPEED 115200 69#define PIO_MAX_SPEED 115200
71 70
@@ -533,25 +532,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
533 self->tx_buff.len = skb->len; 532 self->tx_buff.len = skb->len;
534 533
535 mtt = irda_get_mtt(skb); 534 mtt = irda_get_mtt(skb);
536#ifdef CONFIG_USE_INTERNAL_TIMER
537 if (mtt > 50) {
538 /* Adjust for timer resolution */
539 mtt /= 1000+1;
540
541 /* Setup timer */
542 switch_bank(iobase, SET4);
543 outb(mtt & 0xff, iobase+TMRL);
544 outb((mtt >> 8) & 0x0f, iobase+TMRH);
545
546 /* Start timer */
547 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
548 self->io.direction = IO_XMIT;
549
550 /* Enable timer interrupt */
551 switch_bank(iobase, SET0);
552 outb(ICR_ETMRI, iobase+ICR);
553 } else {
554#endif
555 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); 535 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
556 if (mtt) 536 if (mtt)
557 udelay(mtt); 537 udelay(mtt);
@@ -560,9 +540,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
560 switch_bank(iobase, SET0); 540 switch_bank(iobase, SET0);
561 outb(ICR_EDMAI, iobase+ICR); 541 outb(ICR_EDMAI, iobase+ICR);
562 w83977af_dma_write(self, iobase); 542 w83977af_dma_write(self, iobase);
563#ifdef CONFIG_USE_INTERNAL_TIMER
564 }
565#endif
566 } else { 543 } else {
567 self->tx_buff.data = self->tx_buff.head; 544 self->tx_buff.data = self->tx_buff.head;
568 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 545 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
@@ -876,20 +853,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
876 /* Check if we have transferred all data to memory */ 853 /* Check if we have transferred all data to memory */
877 switch_bank(iobase, SET0); 854 switch_bank(iobase, SET0);
878 if (inb(iobase+USR) & USR_RDR) { 855 if (inb(iobase+USR) & USR_RDR) {
879#ifdef CONFIG_USE_INTERNAL_TIMER
880 /* Put this entry back in fifo */
881 st_fifo->head--;
882 st_fifo->len++;
883 st_fifo->entries[st_fifo->head].status = status;
884 st_fifo->entries[st_fifo->head].len = len;
885
886 /* Restore set register */
887 outb(set, iobase+SSR);
888
889 return FALSE; /* I'll be back! */
890#else
891 udelay(80); /* Should be enough!? */ 856 udelay(80); /* Should be enough!? */
892#endif
893 } 857 }
894 858
895 skb = dev_alloc_skb(len+1); 859 skb = dev_alloc_skb(len+1);
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 966de5d69521..e6e972d9b7ca 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
384 NULL 384 NULL
385}; 385};
386 386
387static struct sysfs_ops veth_cnx_sysfs_ops = { 387static const struct sysfs_ops veth_cnx_sysfs_ops = {
388 .show = veth_cnx_attribute_show 388 .show = veth_cnx_attribute_show
389}; 389};
390 390
@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
441 NULL 441 NULL
442}; 442};
443 443
444static struct sysfs_ops veth_port_sysfs_ops = { 444static const struct sysfs_ops veth_port_sysfs_ops = {
445 .show = veth_port_attribute_show 445 .show = veth_port_attribute_show
446}; 446};
447 447
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 19e94ee155a2..79c35ae3718c 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -204,14 +204,17 @@ enum ixgbe_ring_f_enum {
204#define IXGBE_MAX_FDIR_INDICES 64 204#define IXGBE_MAX_FDIR_INDICES 64
205#ifdef IXGBE_FCOE 205#ifdef IXGBE_FCOE
206#define IXGBE_MAX_FCOE_INDICES 8 206#define IXGBE_MAX_FCOE_INDICES 8
207#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
208#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
209#else
210#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
211#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
207#endif /* IXGBE_FCOE */ 212#endif /* IXGBE_FCOE */
208struct ixgbe_ring_feature { 213struct ixgbe_ring_feature {
209 int indices; 214 int indices;
210 int mask; 215 int mask;
211} ____cacheline_internodealigned_in_smp; 216} ____cacheline_internodealigned_in_smp;
212 217
213#define MAX_RX_QUEUES 128
214#define MAX_TX_QUEUES 128
215 218
216#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 219#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
217 ? 8 : 1) 220 ? 8 : 1)
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 1f30e163bd9c..b405a00817c6 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -39,6 +39,7 @@
39#define IXGBE_82599_MC_TBL_SIZE 128 39#define IXGBE_82599_MC_TBL_SIZE 128
40#define IXGBE_82599_VFT_TBL_SIZE 128 40#define IXGBE_82599_VFT_TBL_SIZE 128
41 41
42void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
42s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 43s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
43 ixgbe_link_speed speed, 44 ixgbe_link_speed speed,
44 bool autoneg, 45 bool autoneg,
@@ -68,7 +69,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
68 if (hw->phy.multispeed_fiber) { 69 if (hw->phy.multispeed_fiber) {
69 /* Set up dual speed SFP+ support */ 70 /* Set up dual speed SFP+ support */
70 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 71 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
72 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
71 } else { 73 } else {
74 mac->ops.flap_tx_laser = NULL;
72 if ((mac->ops.get_media_type(hw) == 75 if ((mac->ops.get_media_type(hw) ==
73 ixgbe_media_type_backplane) && 76 ixgbe_media_type_backplane) &&
74 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 77 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -413,6 +416,41 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
413} 416}
414 417
415/** 418/**
419 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
420 * @hw: pointer to hardware structure
421 *
422 * When the driver changes the link speeds that it can support,
423 * it sets autotry_restart to true to indicate that we need to
424 * initiate a new autotry session with the link partner. To do
425 * so, we set the speed then disable and re-enable the tx laser, to
426 * alert the link partner that it also needs to restart autotry on its
427 * end. This is consistent with true clause 37 autoneg, which also
428 * involves a loss of signal.
429 **/
430void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
431{
432 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
433
434 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
435
436 if (hw->mac.autotry_restart) {
437 /* Disable tx laser; allow 100us to go dark per spec */
438 esdp_reg |= IXGBE_ESDP_SDP3;
439 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
440 IXGBE_WRITE_FLUSH(hw);
441 udelay(100);
442
443 /* Enable tx laser; allow 100ms to light up */
444 esdp_reg &= ~IXGBE_ESDP_SDP3;
445 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
446 IXGBE_WRITE_FLUSH(hw);
447 msleep(100);
448
449 hw->mac.autotry_restart = false;
450 }
451}
452
453/**
416 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 454 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
417 * @hw: pointer to hardware structure 455 * @hw: pointer to hardware structure
418 * @speed: new link speed 456 * @speed: new link speed
@@ -440,16 +478,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
440 speed &= phy_link_speed; 478 speed &= phy_link_speed;
441 479
442 /* 480 /*
443 * When the driver changes the link speeds that it can support,
444 * it sets autotry_restart to true to indicate that we need to
445 * initiate a new autotry session with the link partner. To do
446 * so, we set the speed then disable and re-enable the tx laser, to
447 * alert the link partner that it also needs to restart autotry on its
448 * end. This is consistent with true clause 37 autoneg, which also
449 * involves a loss of signal.
450 */
451
452 /*
453 * Try each speed one by one, highest priority first. We do this in 481 * Try each speed one by one, highest priority first. We do this in
454 * software because 10gb fiber doesn't support speed autonegotiation. 482 * software because 10gb fiber doesn't support speed autonegotiation.
455 */ 483 */
@@ -466,6 +494,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
466 /* Set the module link speed */ 494 /* Set the module link speed */
467 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 495 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
468 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 496 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
497 IXGBE_WRITE_FLUSH(hw);
469 498
470 /* Allow module to change analog characteristics (1G->10G) */ 499 /* Allow module to change analog characteristics (1G->10G) */
471 msleep(40); 500 msleep(40);
@@ -478,19 +507,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
478 return status; 507 return status;
479 508
480 /* Flap the tx laser if it has not already been done */ 509 /* Flap the tx laser if it has not already been done */
481 if (hw->mac.autotry_restart) { 510 hw->mac.ops.flap_tx_laser(hw);
482 /* Disable tx laser; allow 100us to go dark per spec */
483 esdp_reg |= IXGBE_ESDP_SDP3;
484 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
485 udelay(100);
486
487 /* Enable tx laser; allow 2ms to light up per spec */
488 esdp_reg &= ~IXGBE_ESDP_SDP3;
489 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
490 msleep(2);
491
492 hw->mac.autotry_restart = false;
493 }
494 511
495 /* 512 /*
496 * Wait for the controller to acquire link. Per IEEE 802.3ap, 513 * Wait for the controller to acquire link. Per IEEE 802.3ap,
@@ -525,6 +542,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
525 esdp_reg &= ~IXGBE_ESDP_SDP5; 542 esdp_reg &= ~IXGBE_ESDP_SDP5;
526 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 543 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
527 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 544 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
545 IXGBE_WRITE_FLUSH(hw);
528 546
529 /* Allow module to change analog characteristics (10G->1G) */ 547 /* Allow module to change analog characteristics (10G->1G) */
530 msleep(40); 548 msleep(40);
@@ -537,19 +555,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
537 return status; 555 return status;
538 556
539 /* Flap the tx laser if it has not already been done */ 557 /* Flap the tx laser if it has not already been done */
540 if (hw->mac.autotry_restart) { 558 hw->mac.ops.flap_tx_laser(hw);
541 /* Disable tx laser; allow 100us to go dark per spec */
542 esdp_reg |= IXGBE_ESDP_SDP3;
543 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
544 udelay(100);
545
546 /* Enable tx laser; allow 2ms to light up per spec */
547 esdp_reg &= ~IXGBE_ESDP_SDP3;
548 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
549 msleep(2);
550
551 hw->mac.autotry_restart = false;
552 }
553 559
554 /* Wait for the link partner to also set speed */ 560 /* Wait for the link partner to also set speed */
555 msleep(100); 561 msleep(100);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 7949a446e4c7..1959ef76c962 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1853,6 +1853,26 @@ static void ixgbe_diag_test(struct net_device *netdev,
1853 if (ixgbe_link_test(adapter, &data[4])) 1853 if (ixgbe_link_test(adapter, &data[4]))
1854 eth_test->flags |= ETH_TEST_FL_FAILED; 1854 eth_test->flags |= ETH_TEST_FL_FAILED;
1855 1855
1856 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1857 int i;
1858 for (i = 0; i < adapter->num_vfs; i++) {
1859 if (adapter->vfinfo[i].clear_to_send) {
1860 netdev_warn(netdev, "%s",
1861 "offline diagnostic is not "
1862 "supported when VFs are "
1863 "present\n");
1864 data[0] = 1;
1865 data[1] = 1;
1866 data[2] = 1;
1867 data[3] = 1;
1868 eth_test->flags |= ETH_TEST_FL_FAILED;
1869 clear_bit(__IXGBE_TESTING,
1870 &adapter->state);
1871 goto skip_ol_tests;
1872 }
1873 }
1874 }
1875
1856 if (if_running) 1876 if (if_running)
1857 /* indicate we're in test mode */ 1877 /* indicate we're in test mode */
1858 dev_close(netdev); 1878 dev_close(netdev);
@@ -1908,6 +1928,7 @@ skip_loopback:
1908 1928
1909 clear_bit(__IXGBE_TESTING, &adapter->state); 1929 clear_bit(__IXGBE_TESTING, &adapter->state);
1910 } 1930 }
1931skip_ol_tests:
1911 msleep_interruptible(4 * 1000); 1932 msleep_interruptible(4 * 1000);
1912} 1933}
1913 1934
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 4123dec0dfb7..9276d5965b0d 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -202,6 +202,15 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
202 addr = sg_dma_address(sg); 202 addr = sg_dma_address(sg);
203 len = sg_dma_len(sg); 203 len = sg_dma_len(sg);
204 while (len) { 204 while (len) {
205 /* max number of buffers allowed in one DDP context */
206 if (j >= IXGBE_BUFFCNT_MAX) {
207 netif_err(adapter, drv, adapter->netdev,
208 "xid=%x:%d,%d,%d:addr=%llx "
209 "not enough descriptors\n",
210 xid, i, j, dmacount, (u64)addr);
211 goto out_noddp_free;
212 }
213
205 /* get the offset of length of current buffer */ 214 /* get the offset of length of current buffer */
206 thisoff = addr & ((dma_addr_t)bufflen - 1); 215 thisoff = addr & ((dma_addr_t)bufflen - 1);
207 thislen = min((bufflen - thisoff), len); 216 thislen = min((bufflen - thisoff), len);
@@ -227,20 +236,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
227 len -= thislen; 236 len -= thislen;
228 addr += thislen; 237 addr += thislen;
229 j++; 238 j++;
230 /* max number of buffers allowed in one DDP context */
231 if (j > IXGBE_BUFFCNT_MAX) {
232 DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx "
233 "not enough descriptors\n",
234 xid, i, j, dmacount, (u64)addr);
235 goto out_noddp_free;
236 }
237 } 239 }
238 } 240 }
239 /* only the last buffer may have non-full bufflen */ 241 /* only the last buffer may have non-full bufflen */
240 lastsize = thisoff + thislen; 242 lastsize = thisoff + thislen;
241 243
242 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 244 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
243 fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT); 245 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
244 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 246 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
245 fcbuff |= (IXGBE_FCBUFF_VALID); 247 fcbuff |= (IXGBE_FCBUFF_VALID);
246 248
@@ -520,6 +522,9 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
520 /* Enable L2 eth type filter for FCoE */ 522 /* Enable L2 eth type filter for FCoE */
521 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), 523 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
522 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); 524 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
525 /* Enable L2 eth type filter for FIP */
526 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
527 (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
523 if (adapter->ring_feature[RING_F_FCOE].indices) { 528 if (adapter->ring_feature[RING_F_FCOE].indices) {
524 /* Use multiple rx queues for FCoE by redirection table */ 529 /* Use multiple rx queues for FCoE by redirection table */
525 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 530 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
@@ -530,6 +535,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
530 } 535 }
531 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 536 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
532 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 537 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
538 fcoe_i = f->mask;
539 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
540 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
541 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
542 IXGBE_ETQS_QUEUE_EN |
543 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
533 } else { 544 } else {
534 /* Use single rx queue for FCoE */ 545 /* Use single rx queue for FCoE */
535 fcoe_i = f->mask; 546 fcoe_i = f->mask;
@@ -539,6 +550,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
539 IXGBE_ETQS_QUEUE_EN | 550 IXGBE_ETQS_QUEUE_EN |
540 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 551 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
541 } 552 }
553 /* send FIP frames to the first FCoE queue */
554 fcoe_i = f->mask;
555 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
556 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
557 IXGBE_ETQS_QUEUE_EN |
558 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
542 559
543 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, 560 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
544 IXGBE_FCRXCTRL_FCOELLI | 561 IXGBE_FCRXCTRL_FCOELLI |
@@ -614,9 +631,9 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
614 netdev->vlan_features |= NETIF_F_FSO; 631 netdev->vlan_features |= NETIF_F_FSO;
615 netdev->vlan_features |= NETIF_F_FCOE_MTU; 632 netdev->vlan_features |= NETIF_F_FCOE_MTU;
616 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 633 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
617 netdev_features_change(netdev);
618 634
619 ixgbe_init_interrupt_scheme(adapter); 635 ixgbe_init_interrupt_scheme(adapter);
636 netdev_features_change(netdev);
620 637
621 if (netif_running(netdev)) 638 if (netif_running(netdev))
622 netdev->netdev_ops->ndo_open(netdev); 639 netdev->netdev_ops->ndo_open(netdev);
@@ -660,11 +677,11 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
660 netdev->vlan_features &= ~NETIF_F_FSO; 677 netdev->vlan_features &= ~NETIF_F_FSO;
661 netdev->vlan_features &= ~NETIF_F_FCOE_MTU; 678 netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
662 netdev->fcoe_ddp_xid = 0; 679 netdev->fcoe_ddp_xid = 0;
663 netdev_features_change(netdev);
664 680
665 ixgbe_cleanup_fcoe(adapter); 681 ixgbe_cleanup_fcoe(adapter);
666
667 ixgbe_init_interrupt_scheme(adapter); 682 ixgbe_init_interrupt_scheme(adapter);
683 netdev_features_change(netdev);
684
668 if (netif_running(netdev)) 685 if (netif_running(netdev))
669 netdev->netdev_ops->ndo_open(netdev); 686 netdev->netdev_ops->ndo_open(netdev);
670 rc = 0; 687 rc = 0;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 45e3532b166f..0c553f6cb534 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -935,10 +935,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
935 if (skb->prev) 935 if (skb->prev)
936 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 936 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
937 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 937 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
938 if (IXGBE_RSC_CB(skb)->dma) 938 if (IXGBE_RSC_CB(skb)->dma) {
939 pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, 939 pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
940 rx_ring->rx_buf_len, 940 rx_ring->rx_buf_len,
941 PCI_DMA_FROMDEVICE); 941 PCI_DMA_FROMDEVICE);
942 IXGBE_RSC_CB(skb)->dma = 0;
943 }
942 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 944 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
943 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 945 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
944 else 946 else
@@ -1050,7 +1052,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1050 */ 1052 */
1051 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 1053 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1052 q_vector = adapter->q_vector[v_idx]; 1054 q_vector = adapter->q_vector[v_idx];
1053 /* XXX for_each_bit(...) */ 1055 /* XXX for_each_set_bit(...) */
1054 r_idx = find_first_bit(q_vector->rxr_idx, 1056 r_idx = find_first_bit(q_vector->rxr_idx,
1055 adapter->num_rx_queues); 1057 adapter->num_rx_queues);
1056 1058
@@ -3054,6 +3056,14 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3054 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 3056 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3055 msleep(1); 3057 msleep(1);
3056 ixgbe_down(adapter); 3058 ixgbe_down(adapter);
3059 /*
3060 * If SR-IOV enabled then wait a bit before bringing the adapter
3061 * back up to give the VFs time to respond to the reset. The
3062 * two second wait is based upon the watchdog timer cycle in
3063 * the VF driver.
3064 */
3065 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3066 msleep(2000);
3057 ixgbe_up(adapter); 3067 ixgbe_up(adapter);
3058 clear_bit(__IXGBE_RESETTING, &adapter->state); 3068 clear_bit(__IXGBE_RESETTING, &adapter->state);
3059} 3069}
@@ -3126,10 +3136,12 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3126 rx_buffer_info->skb = NULL; 3136 rx_buffer_info->skb = NULL;
3127 do { 3137 do {
3128 struct sk_buff *this = skb; 3138 struct sk_buff *this = skb;
3129 if (IXGBE_RSC_CB(this)->dma) 3139 if (IXGBE_RSC_CB(this)->dma) {
3130 pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, 3140 pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
3131 rx_ring->rx_buf_len, 3141 rx_ring->rx_buf_len,
3132 PCI_DMA_FROMDEVICE); 3142 PCI_DMA_FROMDEVICE);
3143 IXGBE_RSC_CB(this)->dma = 0;
3144 }
3133 skb = skb->prev; 3145 skb = skb->prev;
3134 dev_kfree_skb(this); 3146 dev_kfree_skb(this);
3135 } while (skb); 3147 } while (skb);
@@ -3232,13 +3244,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3232 3244
3233 /* disable receive for all VFs and wait one second */ 3245 /* disable receive for all VFs and wait one second */
3234 if (adapter->num_vfs) { 3246 if (adapter->num_vfs) {
3235 for (i = 0 ; i < adapter->num_vfs; i++)
3236 adapter->vfinfo[i].clear_to_send = 0;
3237
3238 /* ping all the active vfs to let them know we are going down */ 3247 /* ping all the active vfs to let them know we are going down */
3239 ixgbe_ping_all_vfs(adapter); 3248 ixgbe_ping_all_vfs(adapter);
3249
3240 /* Disable all VFTE/VFRE TX/RX */ 3250 /* Disable all VFTE/VFRE TX/RX */
3241 ixgbe_disable_tx_rx(adapter); 3251 ixgbe_disable_tx_rx(adapter);
3252
3253 /* Mark all the VFs as inactive */
3254 for (i = 0 ; i < adapter->num_vfs; i++)
3255 adapter->vfinfo[i].clear_to_send = 0;
3242 } 3256 }
3243 3257
3244 /* disable receives */ 3258 /* disable receives */
@@ -5018,6 +5032,7 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5018 autoneg = hw->phy.autoneg_advertised; 5032 autoneg = hw->phy.autoneg_advertised;
5019 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 5033 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
5020 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 5034 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
5035 hw->mac.autotry_restart = false;
5021 if (hw->mac.ops.setup_link) 5036 if (hw->mac.ops.setup_link)
5022 hw->mac.ops.setup_link(hw, autoneg, negotiation, true); 5037 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
5023 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 5038 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -5633,7 +5648,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5633 5648
5634#ifdef IXGBE_FCOE 5649#ifdef IXGBE_FCOE
5635 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5650 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5636 (skb->protocol == htons(ETH_P_FCOE))) { 5651 ((skb->protocol == htons(ETH_P_FCOE)) ||
5652 (skb->protocol == htons(ETH_P_FIP)))) {
5637 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 5653 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
5638 txq += adapter->ring_feature[RING_F_FCOE].mask; 5654 txq += adapter->ring_feature[RING_F_FCOE].mask;
5639 return txq; 5655 return txq;
@@ -5680,18 +5696,25 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5680 5696
5681 tx_ring = adapter->tx_ring[skb->queue_mapping]; 5697 tx_ring = adapter->tx_ring[skb->queue_mapping];
5682 5698
5683 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5684 (skb->protocol == htons(ETH_P_FCOE))) {
5685 tx_flags |= IXGBE_TX_FLAGS_FCOE;
5686#ifdef IXGBE_FCOE 5699#ifdef IXGBE_FCOE
5700 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
5687#ifdef CONFIG_IXGBE_DCB 5701#ifdef CONFIG_IXGBE_DCB
5688 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 5702 /* for FCoE with DCB, we force the priority to what
5689 << IXGBE_TX_FLAGS_VLAN_SHIFT); 5703 * was specified by the switch */
5690 tx_flags |= ((adapter->fcoe.up << 13) 5704 if ((skb->protocol == htons(ETH_P_FCOE)) ||
5691 << IXGBE_TX_FLAGS_VLAN_SHIFT); 5705 (skb->protocol == htons(ETH_P_FIP))) {
5692#endif 5706 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
5707 << IXGBE_TX_FLAGS_VLAN_SHIFT);
5708 tx_flags |= ((adapter->fcoe.up << 13)
5709 << IXGBE_TX_FLAGS_VLAN_SHIFT);
5710 }
5693#endif 5711#endif
5712 /* flag for FCoE offloads */
5713 if (skb->protocol == htons(ETH_P_FCOE))
5714 tx_flags |= IXGBE_TX_FLAGS_FCOE;
5694 } 5715 }
5716#endif
5717
5695 /* four things can cause us to need a context descriptor */ 5718 /* four things can cause us to need a context descriptor */
5696 if (skb_is_gso(skb) || 5719 if (skb_is_gso(skb) ||
5697 (skb->ip_summed == CHECKSUM_PARTIAL) || 5720 (skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -6046,7 +6069,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6046 indices += min_t(unsigned int, num_possible_cpus(), 6069 indices += min_t(unsigned int, num_possible_cpus(),
6047 IXGBE_MAX_FCOE_INDICES); 6070 IXGBE_MAX_FCOE_INDICES);
6048#endif 6071#endif
6049 indices = min_t(unsigned int, indices, MAX_TX_QUEUES);
6050 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); 6072 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
6051 if (!netdev) { 6073 if (!netdev) {
6052 err = -ENOMEM; 6074 err = -ENOMEM;
@@ -6245,9 +6267,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6245 case IXGBE_DEV_ID_82599_KX4: 6267 case IXGBE_DEV_ID_82599_KX4:
6246 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 6268 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6247 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 6269 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
6248 /* Enable ACPI wakeup in GRC */
6249 IXGBE_WRITE_REG(hw, IXGBE_GRC,
6250 (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
6251 break; 6270 break;
6252 default: 6271 default:
6253 adapter->wol = 0; 6272 adapter->wol = 0;
@@ -6380,6 +6399,16 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6380 del_timer_sync(&adapter->sfp_timer); 6399 del_timer_sync(&adapter->sfp_timer);
6381 cancel_work_sync(&adapter->watchdog_task); 6400 cancel_work_sync(&adapter->watchdog_task);
6382 cancel_work_sync(&adapter->sfp_task); 6401 cancel_work_sync(&adapter->sfp_task);
6402 if (adapter->hw.phy.multispeed_fiber) {
6403 struct ixgbe_hw *hw = &adapter->hw;
6404 /*
6405 * Restart clause 37 autoneg, disable and re-enable
6406 * the tx laser, to clear & alert the link partner
6407 * that it needs to restart autotry
6408 */
6409 hw->mac.autotry_restart = true;
6410 hw->mac.ops.flap_tx_laser(hw);
6411 }
6383 cancel_work_sync(&adapter->multispeed_fiber_task); 6412 cancel_work_sync(&adapter->multispeed_fiber_task);
6384 cancel_work_sync(&adapter->sfp_config_module_task); 6413 cancel_work_sync(&adapter->sfp_config_module_task);
6385 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 6414 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 2be907466593..4ec6dc1a5b75 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1298,6 +1298,7 @@
1298#define IXGBE_ETQF_FILTER_BCN 1 1298#define IXGBE_ETQF_FILTER_BCN 1
1299#define IXGBE_ETQF_FILTER_FCOE 2 1299#define IXGBE_ETQF_FILTER_FCOE 2
1300#define IXGBE_ETQF_FILTER_1588 3 1300#define IXGBE_ETQF_FILTER_1588 3
1301#define IXGBE_ETQF_FILTER_FIP 4
1301/* VLAN Control Bit Masks */ 1302/* VLAN Control Bit Masks */
1302#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ 1303#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
1303#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ 1304#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@@ -2397,6 +2398,7 @@ struct ixgbe_mac_operations {
2397 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2398 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2398 2399
2399 /* Link */ 2400 /* Link */
2401 void (*flap_tx_laser)(struct ixgbe_hw *);
2400 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); 2402 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
2401 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2403 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
2402 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, 2404 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 399be0c34c36..6fdd651abcd1 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -46,22 +46,32 @@ struct ixgbe_stats {
46 int sizeof_stat; 46 int sizeof_stat;
47 int stat_offset; 47 int stat_offset;
48 int base_stat_offset; 48 int base_stat_offset;
49 int saved_reset_offset;
49}; 50};
50 51
51#define IXGBEVF_STAT(m, b) sizeof(((struct ixgbevf_adapter *)0)->m), \ 52#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
52 offsetof(struct ixgbevf_adapter, m), \ 53 offsetof(struct ixgbevf_adapter, m), \
53 offsetof(struct ixgbevf_adapter, b) 54 offsetof(struct ixgbevf_adapter, b), \
55 offsetof(struct ixgbevf_adapter, r)
54static struct ixgbe_stats ixgbe_gstrings_stats[] = { 56static struct ixgbe_stats ixgbe_gstrings_stats[] = {
55 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)}, 57 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
56 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)}, 58 stats.saved_reset_vfgprc)},
57 {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)}, 59 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
58 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)}, 60 stats.saved_reset_vfgptc)},
59 {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)}, 61 {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
60 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)}, 62 stats.saved_reset_vfgorc)},
61 {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)}, 63 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
62 {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)}, 64 stats.saved_reset_vfgotc)},
63 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)}, 65 {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
64 {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)}, 66 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
67 stats.saved_reset_vfmprc)},
68 {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
69 zero_base)},
70 {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
71 zero_base)},
72 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
73 zero_base)},
74 {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
65}; 75};
66 76
67#define IXGBE_QUEUE_STATS_LEN 0 77#define IXGBE_QUEUE_STATS_LEN 0
@@ -455,10 +465,14 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
455 ixgbe_gstrings_stats[i].stat_offset; 465 ixgbe_gstrings_stats[i].stat_offset;
456 char *b = (char *)adapter + 466 char *b = (char *)adapter +
457 ixgbe_gstrings_stats[i].base_stat_offset; 467 ixgbe_gstrings_stats[i].base_stat_offset;
468 char *r = (char *)adapter +
469 ixgbe_gstrings_stats[i].saved_reset_offset;
458 data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat == 470 data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
459 sizeof(u64)) ? *(u64 *)p : *(u32 *)p) - 471 sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
460 ((ixgbe_gstrings_stats[i].sizeof_stat == 472 ((ixgbe_gstrings_stats[i].sizeof_stat ==
461 sizeof(u64)) ? *(u64 *)b : *(u32 *)b); 473 sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
474 ((ixgbe_gstrings_stats[i].sizeof_stat ==
475 sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
462 } 476 }
463} 477}
464 478
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 235b5fd4b8d4..1bbbef3ee3f4 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -751,7 +751,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
751 */ 751 */
752 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 752 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
753 q_vector = adapter->q_vector[v_idx]; 753 q_vector = adapter->q_vector[v_idx];
754 /* XXX for_each_bit(...) */ 754 /* XXX for_each_set_bit(...) */
755 r_idx = find_first_bit(q_vector->rxr_idx, 755 r_idx = find_first_bit(q_vector->rxr_idx,
756 adapter->num_rx_queues); 756 adapter->num_rx_queues);
757 757
@@ -965,7 +965,7 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
965 965
966 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 966 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
967 mod_timer(&adapter->watchdog_timer, 967 mod_timer(&adapter->watchdog_timer,
968 round_jiffies(jiffies + 10)); 968 round_jiffies(jiffies + 1));
969 969
970 return IRQ_HANDLED; 970 return IRQ_HANDLED;
971} 971}
@@ -1610,6 +1610,44 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1610 (adapter->rx_ring[rxr].count - 1)); 1610 (adapter->rx_ring[rxr].count - 1));
1611} 1611}
1612 1612
1613static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1614{
1615 /* Only save pre-reset stats if there are some */
1616 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1617 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1618 adapter->stats.base_vfgprc;
1619 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1620 adapter->stats.base_vfgptc;
1621 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1622 adapter->stats.base_vfgorc;
1623 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1624 adapter->stats.base_vfgotc;
1625 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1626 adapter->stats.base_vfmprc;
1627 }
1628}
1629
1630static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1631{
1632 struct ixgbe_hw *hw = &adapter->hw;
1633
1634 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1635 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1636 adapter->stats.last_vfgorc |=
1637 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1638 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1639 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1640 adapter->stats.last_vfgotc |=
1641 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1642 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1643
1644 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1645 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1646 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1647 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1648 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1649}
1650
1613static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1651static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1614{ 1652{
1615 struct net_device *netdev = adapter->netdev; 1653 struct net_device *netdev = adapter->netdev;
@@ -1656,6 +1694,9 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1656 /* enable transmits */ 1694 /* enable transmits */
1657 netif_tx_start_all_queues(netdev); 1695 netif_tx_start_all_queues(netdev);
1658 1696
1697 ixgbevf_save_reset_stats(adapter);
1698 ixgbevf_init_last_counter_stats(adapter);
1699
1659 /* bring the link up in the watchdog, this could race with our first 1700 /* bring the link up in the watchdog, this could race with our first
1660 * link up interrupt but shouldn't be a problem */ 1701 * link up interrupt but shouldn't be a problem */
1661 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 1702 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -2228,27 +2269,6 @@ out:
2228 return err; 2269 return err;
2229} 2270}
2230 2271
2231static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2232{
2233 struct ixgbe_hw *hw = &adapter->hw;
2234
2235 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2236 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2237 adapter->stats.last_vfgorc |=
2238 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2239 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2240 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2241 adapter->stats.last_vfgotc |=
2242 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2243 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2244
2245 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2246 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2247 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2248 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2249 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2250}
2251
2252#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2272#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2253 { \ 2273 { \
2254 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2274 u32 current_counter = IXGBE_READ_REG(hw, reg); \
@@ -2399,7 +2419,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2399 if (!netif_carrier_ok(netdev)) { 2419 if (!netif_carrier_ok(netdev)) {
2400 hw_dbg(&adapter->hw, "NIC Link is Up %s, ", 2420 hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
2401 ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2421 ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2402 "10 Gbps" : "1 Gbps")); 2422 "10 Gbps\n" : "1 Gbps\n"));
2403 netif_carrier_on(netdev); 2423 netif_carrier_on(netdev);
2404 netif_tx_wake_all_queues(netdev); 2424 netif_tx_wake_all_queues(netdev);
2405 } else { 2425 } else {
@@ -2416,9 +2436,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2416 } 2436 }
2417 } 2437 }
2418 2438
2419pf_has_reset:
2420 ixgbevf_update_stats(adapter); 2439 ixgbevf_update_stats(adapter);
2421 2440
2441pf_has_reset:
2422 /* Force detection of hung controller every watchdog period */ 2442 /* Force detection of hung controller every watchdog period */
2423 adapter->detect_tx_hung = true; 2443 adapter->detect_tx_hung = true;
2424 2444
@@ -2675,7 +2695,7 @@ static int ixgbevf_open(struct net_device *netdev)
2675 if (hw->adapter_stopped) { 2695 if (hw->adapter_stopped) {
2676 err = IXGBE_ERR_MBX; 2696 err = IXGBE_ERR_MBX;
2677 printk(KERN_ERR "Unable to start - perhaps the PF" 2697 printk(KERN_ERR "Unable to start - perhaps the PF"
2678 "Driver isn't up yet\n"); 2698 " Driver isn't up yet\n");
2679 goto err_setup_reset; 2699 goto err_setup_reset;
2680 } 2700 }
2681 } 2701 }
@@ -2923,9 +2943,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2923 struct ixgbevf_tx_buffer *tx_buffer_info; 2943 struct ixgbevf_tx_buffer *tx_buffer_info;
2924 unsigned int len; 2944 unsigned int len;
2925 unsigned int total = skb->len; 2945 unsigned int total = skb->len;
2926 unsigned int offset = 0, size, count = 0, i; 2946 unsigned int offset = 0, size, count = 0;
2927 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2947 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2928 unsigned int f; 2948 unsigned int f;
2949 int i;
2929 2950
2930 i = tx_ring->next_to_use; 2951 i = tx_ring->next_to_use;
2931 2952
@@ -3390,8 +3411,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3390 /* setup the private structure */ 3411 /* setup the private structure */
3391 err = ixgbevf_sw_init(adapter); 3412 err = ixgbevf_sw_init(adapter);
3392 3413
3393 ixgbevf_init_last_counter_stats(adapter);
3394
3395#ifdef MAX_SKB_FRAGS 3414#ifdef MAX_SKB_FRAGS
3396 netdev->features = NETIF_F_SG | 3415 netdev->features = NETIF_F_SG |
3397 NETIF_F_IP_CSUM | 3416 NETIF_F_IP_CSUM |
@@ -3449,6 +3468,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3449 3468
3450 adapter->netdev_registered = true; 3469 adapter->netdev_registered = true;
3451 3470
3471 ixgbevf_init_last_counter_stats(adapter);
3472
3452 /* print the MAC address */ 3473 /* print the MAC address */
3453 hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 3474 hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3454 netdev->dev_addr[0], 3475 netdev->dev_addr[0],
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 799600e92700..1f31b052d4b4 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -157,6 +157,12 @@ struct ixgbevf_hw_stats {
157 u64 vfgorc; 157 u64 vfgorc;
158 u64 vfgotc; 158 u64 vfgotc;
159 u64 vfmprc; 159 u64 vfmprc;
160
161 u64 saved_reset_vfgprc;
162 u64 saved_reset_vfgptc;
163 u64 saved_reset_vfgorc;
164 u64 saved_reset_vfgotc;
165 u64 saved_reset_vfmprc;
160}; 166};
161 167
162struct ixgbevf_info { 168struct ixgbevf_info {
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 0f31497833df..c0b59a555384 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -946,6 +946,8 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
946 jme->jme_vlan_rx(skb, jme->vlgrp, 946 jme->jme_vlan_rx(skb, jme->vlgrp,
947 le16_to_cpu(rxdesc->descwb.vlan)); 947 le16_to_cpu(rxdesc->descwb.vlan));
948 NET_STAT(jme).rx_bytes += 4; 948 NET_STAT(jme).rx_bytes += 4;
949 } else {
950 dev_kfree_skb(skb);
949 } 951 }
950 } else { 952 } else {
951 jme->jme_rx(skb); 953 jme->jme_rx(skb);
@@ -2081,12 +2083,45 @@ jme_tx_timeout(struct net_device *netdev)
2081 jme_reset_link(jme); 2083 jme_reset_link(jme);
2082} 2084}
2083 2085
2086static inline void jme_pause_rx(struct jme_adapter *jme)
2087{
2088 atomic_dec(&jme->link_changing);
2089
2090 jme_set_rx_pcc(jme, PCC_OFF);
2091 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2092 JME_NAPI_DISABLE(jme);
2093 } else {
2094 tasklet_disable(&jme->rxclean_task);
2095 tasklet_disable(&jme->rxempty_task);
2096 }
2097}
2098
2099static inline void jme_resume_rx(struct jme_adapter *jme)
2100{
2101 struct dynpcc_info *dpi = &(jme->dpi);
2102
2103 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2104 JME_NAPI_ENABLE(jme);
2105 } else {
2106 tasklet_hi_enable(&jme->rxclean_task);
2107 tasklet_hi_enable(&jme->rxempty_task);
2108 }
2109 dpi->cur = PCC_P1;
2110 dpi->attempt = PCC_P1;
2111 dpi->cnt = 0;
2112 jme_set_rx_pcc(jme, PCC_P1);
2113
2114 atomic_inc(&jme->link_changing);
2115}
2116
2084static void 2117static void
2085jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2118jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2086{ 2119{
2087 struct jme_adapter *jme = netdev_priv(netdev); 2120 struct jme_adapter *jme = netdev_priv(netdev);
2088 2121
2122 jme_pause_rx(jme);
2089 jme->vlgrp = grp; 2123 jme->vlgrp = grp;
2124 jme_resume_rx(jme);
2090} 2125}
2091 2126
2092static void 2127static void
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index c19db9146a2f..07ad3a457185 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -25,7 +25,7 @@
25#define __JME_H_INCLUDED__ 25#define __JME_H_INCLUDED__
26 26
27#define DRV_NAME "jme" 27#define DRV_NAME "jme"
28#define DRV_VERSION "1.0.5" 28#define DRV_VERSION "1.0.6"
29#define PFX DRV_NAME ": " 29#define PFX DRV_NAME ": "
30 30
31#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 31#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index b5219cce12ed..13cc1ca261d9 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -407,7 +407,7 @@ static irqreturn_t ks8851_irq(int irq, void *pw)
407 * @buff: The buffer address 407 * @buff: The buffer address
408 * @len: The length of the data to read 408 * @len: The length of the data to read
409 * 409 *
410 * Issue an RXQ FIFO read command and read the @len ammount of data from 410 * Issue an RXQ FIFO read command and read the @len amount of data from
411 * the FIFO into the buffer specified by @buff. 411 * the FIFO into the buffer specified by @buff.
412 */ 412 */
413static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) 413static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
@@ -976,7 +976,6 @@ static void ks8851_set_rx_mode(struct net_device *dev)
976 crc >>= (32 - 6); /* get top six bits */ 976 crc >>= (32 - 6); /* get top six bits */
977 977
978 rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf)); 978 rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
979 mcptr = mcptr->next;
980 } 979 }
981 980
982 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA; 981 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 7264a3e5c2c0..6c5327af1bf9 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -4899,8 +4899,10 @@ static int netdev_tx(struct sk_buff *skb, struct net_device *dev)
4899 struct sk_buff *org_skb = skb; 4899 struct sk_buff *org_skb = skb;
4900 4900
4901 skb = dev_alloc_skb(org_skb->len); 4901 skb = dev_alloc_skb(org_skb->len);
4902 if (!skb) 4902 if (!skb) {
4903 return NETDEV_TX_BUSY; 4903 rc = NETDEV_TX_BUSY;
4904 goto unlock;
4905 }
4904 skb_copy_and_csum_dev(org_skb, skb->data); 4906 skb_copy_and_csum_dev(org_skb, skb->data);
4905 org_skb->ip_summed = 0; 4907 org_skb->ip_summed = 0;
4906 skb->len = org_skb->len; 4908 skb->len = org_skb->len;
@@ -4914,7 +4916,7 @@ static int netdev_tx(struct sk_buff *skb, struct net_device *dev)
4914 netif_stop_queue(dev); 4916 netif_stop_queue(dev);
4915 rc = NETDEV_TX_BUSY; 4917 rc = NETDEV_TX_BUSY;
4916 } 4918 }
4917 4919unlock:
4918 spin_unlock_irq(&hw_priv->hwlock); 4920 spin_unlock_irq(&hw_priv->hwlock);
4919 4921
4920 return rc; 4922 return rc;
@@ -6320,7 +6322,7 @@ static int netdev_set_eeprom(struct net_device *dev,
6320 int len; 6322 int len;
6321 6323
6322 if (eeprom->magic != EEPROM_MAGIC) 6324 if (eeprom->magic != EEPROM_MAGIC)
6323 return 1; 6325 return -EINVAL;
6324 6326
6325 len = (eeprom->offset + eeprom->len + 1) / 2; 6327 len = (eeprom->offset + eeprom->len + 1) / 2;
6326 for (i = eeprom->offset / 2; i < len; i++) 6328 for (i = eeprom->offset / 2; i < len; i++)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 8f6e816a7395..b402a95c87c7 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1023,6 +1023,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1023 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 1023 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1024 info->port_attr.show = show_port_type; 1024 info->port_attr.show = show_port_type;
1025 info->port_attr.store = set_port_type; 1025 info->port_attr.store = set_port_type;
1026 sysfs_attr_init(&info->port_attr.attr);
1026 1027
1027 err = device_create_file(&dev->pdev->dev, &info->port_attr); 1028 err = device_create_file(&dev->pdev->dev, &info->port_attr);
1028 if (err) { 1029 if (err) {
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 676c513e12fc..3cb760706c01 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1689,7 +1689,7 @@ myri10ge_set_pauseparam(struct net_device *netdev,
1689 if (pause->tx_pause != mgp->pause) 1689 if (pause->tx_pause != mgp->pause)
1690 return myri10ge_change_pause(mgp, pause->tx_pause); 1690 return myri10ge_change_pause(mgp, pause->tx_pause);
1691 if (pause->rx_pause != mgp->pause) 1691 if (pause->rx_pause != mgp->pause)
1692 return myri10ge_change_pause(mgp, pause->tx_pause); 1692 return myri10ge_change_pause(mgp, pause->rx_pause);
1693 if (pause->autoneg != 0) 1693 if (pause->autoneg != 0)
1694 return -EINVAL; 1694 return -EINVAL;
1695 return 0; 1695 return 0;
@@ -3687,7 +3687,6 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3687 if (status != 0) { 3687 if (status != 0) {
3688 dev_err(&mgp->pdev->dev, "failed reset\n"); 3688 dev_err(&mgp->pdev->dev, "failed reset\n");
3689 goto abort_with_fw; 3689 goto abort_with_fw;
3690 return;
3691 } 3690 }
3692 3691
3693 mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot); 3692 mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 992dbfffdb05..f4347f88b6f2 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -142,7 +142,7 @@ bad_clone_list[] __initdata = {
142 {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ 142 {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
143 {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ 143 {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
144#ifdef CONFIG_MACH_TX49XX 144#ifdef CONFIG_MACH_TX49XX
145 {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ 145 {"RBHMA4X00-RTL8019", "RBHMA4X00-RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */
146#endif 146#endif
147 {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ 147 {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
148 {NULL,} 148 {NULL,}
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 144d2e880422..0f703838e21a 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 72 56#define _NETXEN_NIC_LINUX_SUBVERSION 73
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.72" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.73"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 2a8ef5fc9663..f26e54716c88 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -669,13 +669,15 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
669 } 669 }
670 sds_ring->desc_head = (struct status_desc *)addr; 670 sds_ring->desc_head = (struct status_desc *)addr;
671 671
672 sds_ring->crb_sts_consumer = 672 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
673 netxen_get_ioaddr(adapter, 673 sds_ring->crb_sts_consumer =
674 recv_crb_registers[port].crb_sts_consumer[ring]); 674 netxen_get_ioaddr(adapter,
675 recv_crb_registers[port].crb_sts_consumer[ring]);
675 676
676 sds_ring->crb_intr_mask = 677 sds_ring->crb_intr_mask =
677 netxen_get_ioaddr(adapter, 678 netxen_get_ioaddr(adapter,
678 recv_crb_registers[port].sw_int_mask[ring]); 679 recv_crb_registers[port].sw_int_mask[ring]);
680 }
679 } 681 }
680 682
681 683
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 1c63610ead42..7eb925a9f36e 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -761,7 +761,7 @@ nx_get_bios_version(struct netxen_adapter *adapter)
761 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { 761 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
762 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) 762 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
763 + NX_UNI_BIOS_VERSION_OFF)); 763 + NX_UNI_BIOS_VERSION_OFF));
764 return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + 764 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
765 (bios_ver >> 24); 765 (bios_ver >> 24);
766 } else 766 } else
767 return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); 767 return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 08780ef1c1f8..01808b28d1b6 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -604,16 +604,14 @@ netxen_cleanup_pci_map(struct netxen_adapter *adapter)
604static int 604static int
605netxen_setup_pci_map(struct netxen_adapter *adapter) 605netxen_setup_pci_map(struct netxen_adapter *adapter)
606{ 606{
607 void __iomem *mem_ptr0 = NULL;
608 void __iomem *mem_ptr1 = NULL;
609 void __iomem *mem_ptr2 = NULL;
610 void __iomem *db_ptr = NULL; 607 void __iomem *db_ptr = NULL;
611 608
612 resource_size_t mem_base, db_base; 609 resource_size_t mem_base, db_base;
613 unsigned long mem_len, db_len = 0, pci_len0 = 0; 610 unsigned long mem_len, db_len = 0;
614 611
615 struct pci_dev *pdev = adapter->pdev; 612 struct pci_dev *pdev = adapter->pdev;
616 int pci_func = adapter->ahw.pci_func; 613 int pci_func = adapter->ahw.pci_func;
614 struct netxen_hardware_context *ahw = &adapter->ahw;
617 615
618 int err = 0; 616 int err = 0;
619 617
@@ -630,24 +628,40 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
630 628
631 /* 128 Meg of memory */ 629 /* 128 Meg of memory */
632 if (mem_len == NETXEN_PCI_128MB_SIZE) { 630 if (mem_len == NETXEN_PCI_128MB_SIZE) {
633 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); 631
634 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, 632 ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
633 ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
635 SECOND_PAGE_GROUP_SIZE); 634 SECOND_PAGE_GROUP_SIZE);
636 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, 635 ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
637 THIRD_PAGE_GROUP_SIZE); 636 THIRD_PAGE_GROUP_SIZE);
638 pci_len0 = FIRST_PAGE_GROUP_SIZE; 637 if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
638 ahw->pci_base2 == NULL) {
639 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
640 err = -EIO;
641 goto err_out;
642 }
643
644 ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
645
639 } else if (mem_len == NETXEN_PCI_32MB_SIZE) { 646 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
640 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); 647
641 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - 648 ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
649 ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
642 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); 650 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
651 if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
652 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
653 err = -EIO;
654 goto err_out;
655 }
656
643 } else if (mem_len == NETXEN_PCI_2MB_SIZE) { 657 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
644 658
645 mem_ptr0 = pci_ioremap_bar(pdev, 0); 659 ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
646 if (mem_ptr0 == NULL) { 660 if (ahw->pci_base0 == NULL) {
647 dev_err(&pdev->dev, "failed to map PCI bar 0\n"); 661 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
648 return -EIO; 662 return -EIO;
649 } 663 }
650 pci_len0 = mem_len; 664 ahw->pci_len0 = mem_len;
651 } else { 665 } else {
652 return -EIO; 666 return -EIO;
653 } 667 }
@@ -656,11 +670,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
656 670
657 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); 671 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
658 672
659 adapter->ahw.pci_base0 = mem_ptr0;
660 adapter->ahw.pci_len0 = pci_len0;
661 adapter->ahw.pci_base1 = mem_ptr1;
662 adapter->ahw.pci_base2 = mem_ptr2;
663
664 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { 673 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
665 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, 674 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
666 NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); 675 NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
@@ -1246,8 +1255,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1246 int pci_func_id = PCI_FUNC(pdev->devfn); 1255 int pci_func_id = PCI_FUNC(pdev->devfn);
1247 uint8_t revision_id; 1256 uint8_t revision_id;
1248 1257
1249 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { 1258 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
1250 pr_warning("%s: chip revisions between 0x%x-0x%x" 1259 pr_warning("%s: chip revisions between 0x%x-0x%x "
1251 "will not be enabled.\n", 1260 "will not be enabled.\n",
1252 module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); 1261 module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
1253 return -ENODEV; 1262 return -ENODEV;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 776cad2f5715..1028fcb91a28 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1549,6 +1549,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1549 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), 1549 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101),
1550 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), 1550 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab),
1551 PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), 1551 PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
1552 PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
1552 PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), 1553 PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
1553 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), 1554 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
1554 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), 1555 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
@@ -1740,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1740 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), 1741 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
1741 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), 1742 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
1742 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), 1743 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
1743 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), 1744 PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"),
1744 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), 1745 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
1745 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), 1746 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
1746 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), 1747 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 5adc662c4bfb..ff7eb9116b6a 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -493,13 +493,14 @@ static int pcmcia_get_versmac(struct pcmcia_device *p_dev,
493{ 493{
494 struct net_device *dev = priv; 494 struct net_device *dev = priv;
495 cisparse_t parse; 495 cisparse_t parse;
496 u8 *buf;
496 497
497 if (pcmcia_parse_tuple(tuple, &parse)) 498 if (pcmcia_parse_tuple(tuple, &parse))
498 return -EINVAL; 499 return -EINVAL;
499 500
500 if ((parse.version_1.ns > 3) && 501 buf = parse.version_1.str + parse.version_1.ofs[3];
501 (cvt_ascii_address(dev, 502
502 (parse.version_1.str + parse.version_1.ofs[3])))) 503 if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0))
503 return 0; 504 return 0;
504 505
505 return -EINVAL; 506 return -EINVAL;
@@ -528,7 +529,7 @@ static int mhz_setup(struct pcmcia_device *link)
528 len = pcmcia_get_tuple(link, 0x81, &buf); 529 len = pcmcia_get_tuple(link, 0x81, &buf);
529 if (buf && len >= 13) { 530 if (buf && len >= 13) {
530 buf[12] = '\0'; 531 buf[12] = '\0';
531 if (cvt_ascii_address(dev, buf)) 532 if (cvt_ascii_address(dev, buf) == 0)
532 rc = 0; 533 rc = 0;
533 } 534 }
534 kfree(buf); 535 kfree(buf);
@@ -910,7 +911,7 @@ static int smc91c92_config(struct pcmcia_device *link)
910 911
911 if (i != 0) { 912 if (i != 0) {
912 printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n"); 913 printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n");
913 goto config_undo; 914 goto config_failed;
914 } 915 }
915 916
916 smc->duplex = 0; 917 smc->duplex = 0;
@@ -998,6 +999,7 @@ config_undo:
998 unregister_netdev(dev); 999 unregister_netdev(dev);
999config_failed: 1000config_failed:
1000 smc91c92_release(link); 1001 smc91c92_release(link);
1002 free_netdev(dev);
1001 return -ENODEV; 1003 return -ENODEV;
1002} /* smc91c92_config */ 1004} /* smc91c92_config */
1003 1005
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 9fbb2eba9a06..449a9825200d 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -756,6 +756,7 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
756 756
757 /* Try to dequeue as many skbs from reorder_q as we can. */ 757 /* Try to dequeue as many skbs from reorder_q as we can. */
758 pppol2tp_recv_dequeue(session); 758 pppol2tp_recv_dequeue(session);
759 sock_put(sock);
759 760
760 return 0; 761 return 0;
761 762
@@ -772,6 +773,7 @@ discard_bad_csum:
772 UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0); 773 UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
773 tunnel->stats.rx_errors++; 774 tunnel->stats.rx_errors++;
774 kfree_skb(skb); 775 kfree_skb(skb);
776 sock_put(sock);
775 777
776 return 0; 778 return 0;
777 779
@@ -1180,7 +1182,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1180 /* Calculate UDP checksum if configured to do so */ 1182 /* Calculate UDP checksum if configured to do so */
1181 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT) 1183 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
1182 skb->ip_summed = CHECKSUM_NONE; 1184 skb->ip_summed = CHECKSUM_NONE;
1183 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { 1185 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1186 (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1184 skb->ip_summed = CHECKSUM_COMPLETE; 1187 skb->ip_summed = CHECKSUM_COMPLETE;
1185 csum = skb_checksum(skb, 0, udp_len, 0); 1188 csum = skb_checksum(skb, 0, udp_len, 0);
1186 uh->check = csum_tcpudp_magic(inet->inet_saddr, 1189 uh->check = csum_tcpudp_magic(inet->inet_saddr,
@@ -1661,6 +1664,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1661 if (tunnel_sock == NULL) 1664 if (tunnel_sock == NULL)
1662 goto end; 1665 goto end;
1663 1666
1667 sock_hold(tunnel_sock);
1664 tunnel = tunnel_sock->sk_user_data; 1668 tunnel = tunnel_sock->sk_user_data;
1665 } else { 1669 } else {
1666 tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel); 1670 tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index da00e162b6d3..b1753138ac98 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -430,6 +430,9 @@ void qlcnic_set_multi(struct net_device *netdev)
430 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 430 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
431 u32 mode = VPORT_MISS_MODE_DROP; 431 u32 mode = VPORT_MISS_MODE_DROP;
432 432
433 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
434 return;
435
433 qlcnic_nic_add_mac(adapter, adapter->mac_addr); 436 qlcnic_nic_add_mac(adapter, adapter->mac_addr);
434 qlcnic_nic_add_mac(adapter, bcast_addr); 437 qlcnic_nic_add_mac(adapter, bcast_addr);
435 438
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 05b8bde9980d..7dbff87480dc 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -405,7 +405,7 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
405 u32 wol = 0; 405 u32 wol = 0;
406 status = ql_mb_wol_mode(qdev, wol); 406 status = ql_mb_wol_mode(qdev, wol);
407 netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n", 407 netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
408 status == 0 ? "cleared sucessfully" : "clear failed", 408 status == 0 ? "cleared successfully" : "clear failed",
409 wol); 409 wol);
410 } 410 }
411 411
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index c26ec5d740f6..fd34f266c0a8 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3855,7 +3855,7 @@ int ql_wol(struct ql_adapter *qdev)
3855 status = ql_mb_wol_mode(qdev, wol); 3855 status = ql_mb_wol_mode(qdev, wol);
3856 netif_err(qdev, drv, qdev->ndev, 3856 netif_err(qdev, drv, qdev->ndev,
3857 "WOL %s (wol code 0x%x) on %s\n", 3857 "WOL %s (wol code 0x%x) on %s\n",
3858 (status == 0) ? "Sucessfully set" : "Failed", 3858 (status == 0) ? "Successfully set" : "Failed",
3859 wol, qdev->ndev->name); 3859 wol, qdev->ndev->name);
3860 } 3860 }
3861 3861
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 15d5373dc8f3..1247bc6b19d4 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -135,7 +135,7 @@
135#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) 135#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
136#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) 136#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
137#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ 137#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
138#define MCAST_MAX 4 /* Max number multicast addresses to filter */ 138#define MCAST_MAX 3 /* Max number multicast addresses to filter */
139 139
140/* Descriptor status */ 140/* Descriptor status */
141#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ 141#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
@@ -983,9 +983,6 @@ static void r6040_multicast_list(struct net_device *dev)
983 crc >>= 26; 983 crc >>= 26;
984 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 984 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
985 } 985 }
986 /* Write the index of the hash table */
987 for (i = 0; i < 4; i++)
988 iowrite16(hash_table[i] << 14, ioaddr + MCR1);
989 /* Fill the MAC hash tables with their values */ 986 /* Fill the MAC hash tables with their values */
990 iowrite16(hash_table[0], ioaddr + MAR0); 987 iowrite16(hash_table[0], ioaddr + MAR0);
991 iowrite16(hash_table[1], ioaddr + MAR1); 988 iowrite16(hash_table[1], ioaddr + MAR1);
@@ -1001,9 +998,9 @@ static void r6040_multicast_list(struct net_device *dev)
1001 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); 998 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
1002 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); 999 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
1003 } else { 1000 } else {
1004 iowrite16(0xffff, ioaddr + MID_0L + 8 * i); 1001 iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
1005 iowrite16(0xffff, ioaddr + MID_0M + 8 * i); 1002 iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
1006 iowrite16(0xffff, ioaddr + MID_0H + 8 * i); 1003 iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
1007 } 1004 }
1008 i++; 1005 i++;
1009 } 1006 }
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 9d3ebf3e975e..dbb1f5a1824c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -186,8 +186,13 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
186 186
187MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); 187MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
188 188
189static int rx_copybreak = 200; 189/*
190static int use_dac = -1; 190 * we set our copybreak very high so that we don't have
191 * to allocate 16k frames all the time (see note in
192 * rtl8169_open()
193 */
194static int rx_copybreak = 16383;
195static int use_dac;
191static struct { 196static struct {
192 u32 msg_enable; 197 u32 msg_enable;
193} debug = { -1 }; 198} debug = { -1 };
@@ -511,8 +516,7 @@ MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
511module_param(rx_copybreak, int, 0); 516module_param(rx_copybreak, int, 0);
512MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 517MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
513module_param(use_dac, int, 0); 518module_param(use_dac, int, 0);
514MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only." 519MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
515" Unsafe on 32 bit PCI slot.");
516module_param_named(debug, debug.msg_enable, int, 0); 520module_param_named(debug, debug.msg_enable, int, 0);
517MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); 521MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
518MODULE_LICENSE("GPL"); 522MODULE_LICENSE("GPL");
@@ -2821,8 +2825,8 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
2821 spin_lock_irq(&tp->lock); 2825 spin_lock_irq(&tp->lock);
2822 2826
2823 RTL_W8(Cfg9346, Cfg9346_Unlock); 2827 RTL_W8(Cfg9346, Cfg9346_Unlock);
2824 RTL_W32(MAC0, low);
2825 RTL_W32(MAC4, high); 2828 RTL_W32(MAC4, high);
2829 RTL_W32(MAC0, low);
2826 RTL_W8(Cfg9346, Cfg9346_Lock); 2830 RTL_W8(Cfg9346, Cfg9346_Lock);
2827 2831
2828 spin_unlock_irq(&tp->lock); 2832 spin_unlock_irq(&tp->lock);
@@ -2974,7 +2978,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2974 void __iomem *ioaddr; 2978 void __iomem *ioaddr;
2975 unsigned int i; 2979 unsigned int i;
2976 int rc; 2980 int rc;
2977 int this_use_dac = use_dac;
2978 2981
2979 if (netif_msg_drv(&debug)) { 2982 if (netif_msg_drv(&debug)) {
2980 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", 2983 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
@@ -3040,17 +3043,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3040 3043
3041 tp->cp_cmd = PCIMulRW | RxChkSum; 3044 tp->cp_cmd = PCIMulRW | RxChkSum;
3042 3045
3043 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3044 if (!tp->pcie_cap)
3045 netif_info(tp, probe, dev, "no PCI Express capability\n");
3046
3047 if (this_use_dac < 0)
3048 this_use_dac = tp->pcie_cap != 0;
3049
3050 if ((sizeof(dma_addr_t) > 4) && 3046 if ((sizeof(dma_addr_t) > 4) &&
3051 this_use_dac && 3047 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
3052 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3053 netif_info(tp, probe, dev, "using 64-bit DMA\n");
3054 tp->cp_cmd |= PCIDAC; 3048 tp->cp_cmd |= PCIDAC;
3055 dev->features |= NETIF_F_HIGHDMA; 3049 dev->features |= NETIF_F_HIGHDMA;
3056 } else { 3050 } else {
@@ -3069,6 +3063,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3069 goto err_out_free_res_4; 3063 goto err_out_free_res_4;
3070 } 3064 }
3071 3065
3066 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3067 if (!tp->pcie_cap)
3068 netif_info(tp, probe, dev, "no PCI Express capability\n");
3069
3072 RTL_W16(IntrMask, 0x0000); 3070 RTL_W16(IntrMask, 0x0000);
3073 3071
3074 /* Soft reset the chip. */ 3072 /* Soft reset the chip. */
@@ -3224,9 +3222,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3224} 3222}
3225 3223
3226static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, 3224static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
3227 struct net_device *dev) 3225 unsigned int mtu)
3228{ 3226{
3229 unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 3227 unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
3228
3229 if (max_frame != 16383)
3230 printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
3231 "NIC may lead to frame reception errors!\n");
3230 3232
3231 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; 3233 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
3232} 3234}
@@ -3238,7 +3240,17 @@ static int rtl8169_open(struct net_device *dev)
3238 int retval = -ENOMEM; 3240 int retval = -ENOMEM;
3239 3241
3240 3242
3241 rtl8169_set_rxbufsize(tp, dev); 3243 /*
3244 * Note that we use a magic value here, its wierd I know
3245 * its done because, some subset of rtl8169 hardware suffers from
3246 * a problem in which frames received that are longer than
3247 * the size set in RxMaxSize register return garbage sizes
3248 * when received. To avoid this we need to turn off filtering,
3249 * which is done by setting a value of 16383 in the RxMaxSize register
3250 * and allocating 16k frames to handle the largest possible rx value
3251 * thats what the magic math below does.
3252 */
3253 rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
3242 3254
3243 /* 3255 /*
3244 * Rx and Tx desscriptors needs 256 bytes alignment. 3256 * Rx and Tx desscriptors needs 256 bytes alignment.
@@ -3891,7 +3903,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
3891 3903
3892 rtl8169_down(dev); 3904 rtl8169_down(dev);
3893 3905
3894 rtl8169_set_rxbufsize(tp, dev); 3906 rtl8169_set_rxbufsize(tp, dev->mtu);
3895 3907
3896 ret = rtl8169_init_ring(dev); 3908 ret = rtl8169_init_ring(dev);
3897 if (ret < 0) 3909 if (ret < 0)
@@ -4754,8 +4766,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
4754 mc_filter[1] = swab32(data); 4766 mc_filter[1] = swab32(data);
4755 } 4767 }
4756 4768
4757 RTL_W32(MAR0 + 0, mc_filter[0]);
4758 RTL_W32(MAR0 + 4, mc_filter[1]); 4769 RTL_W32(MAR0 + 4, mc_filter[1]);
4770 RTL_W32(MAR0 + 0, mc_filter[0]);
4759 4771
4760 RTL_W32(RxConfig, tmp); 4772 RTL_W32(RxConfig, tmp);
4761 4773
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index df70657260dd..2eb7f8a0d926 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5819,10 +5819,8 @@ static void s2io_vpd_read(struct s2io_nic *nic)
5819 } 5819 }
5820 } 5820 }
5821 5821
5822 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) { 5822 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN))
5823 memset(nic->product_name, 0, vpd_data[1]);
5824 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 5823 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5825 }
5826 kfree(vpd_data); 5824 kfree(vpd_data);
5827 swstats->mem_freed += 256; 5825 swstats->mem_freed += 256;
5828} 5826}
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 89d606fe9248..18a3be428348 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -95,7 +95,7 @@
95#define FRF_AA_INT_ACK_KER_FIELD_LBN 0 95#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
96#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 96#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
97 97
98/* INT_ISR0_REG: Function 0 Interrupt Acknowlege Status register */ 98/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */
99#define FR_BZ_INT_ISR0 0x00000090 99#define FR_BZ_INT_ISR0 0x00000090
100#define FRF_BZ_INT_ISR_REG_LBN 0 100#define FRF_BZ_INT_ISR_REG_LBN 0
101#define FRF_BZ_INT_ISR_REG_WIDTH 64 101#define FRF_BZ_INT_ISR_REG_WIDTH 64
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index ed999d31f1fa..37f6a00cde17 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -592,8 +592,10 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
592 /* Setup... */ 592 /* Setup... */
593 len = skb->len; 593 len = skb->len;
594 if (len < ETH_ZLEN) { 594 if (len < ETH_ZLEN) {
595 if (skb_padto(skb, ETH_ZLEN)) 595 if (skb_padto(skb, ETH_ZLEN)) {
596 spin_unlock_irqrestore(&sp->tx_lock, flags);
596 return NETDEV_TX_OK; 597 return NETDEV_TX_OK;
598 }
597 len = ETH_ZLEN; 599 len = ETH_ZLEN;
598 } 600 }
599 601
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c
index a85efcfd9d0e..e8387d25f24a 100644
--- a/drivers/net/skfp/ess.c
+++ b/drivers/net/skfp/ess.c
@@ -557,7 +557,7 @@ static void ess_send_alc_req(struct s_smc *smc)
557 557
558 /* 558 /*
559 * send never allocation request where the requested payload and 559 * send never allocation request where the requested payload and
560 * overhead is zero or deallocate bandwidht when no bandwidth is 560 * overhead is zero or deallocate bandwidth when no bandwidth is
561 * parsed 561 * parsed
562 */ 562 */
563 if (!smc->mib.fddiESSPayload) { 563 if (!smc->mib.fddiESSPayload) {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 653bdd76ef46..d8ec4c11fd49 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4863,6 +4863,7 @@ static int sky2_resume(struct pci_dev *pdev)
4863 if (!hw) 4863 if (!hw)
4864 return 0; 4864 return 0;
4865 4865
4866 rtnl_lock();
4866 err = pci_set_power_state(pdev, PCI_D0); 4867 err = pci_set_power_state(pdev, PCI_D0);
4867 if (err) 4868 if (err)
4868 goto out; 4869 goto out;
@@ -4884,7 +4885,6 @@ static int sky2_resume(struct pci_dev *pdev)
4884 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 4885 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
4885 napi_enable(&hw->napi); 4886 napi_enable(&hw->napi);
4886 4887
4887 rtnl_lock();
4888 for (i = 0; i < hw->ports; i++) { 4888 for (i = 0; i < hw->ports; i++) {
4889 err = sky2_reattach(hw->dev[i]); 4889 err = sky2_reattach(hw->dev[i]);
4890 if (err) 4890 if (err)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index a6ee883d1b0e..8d2772cc42f2 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -344,6 +344,34 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
344 344
345#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH 345#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
346 346
347#elif defined(CONFIG_COLDFIRE)
348
349#define SMC_CAN_USE_8BIT 0
350#define SMC_CAN_USE_16BIT 1
351#define SMC_CAN_USE_32BIT 0
352#define SMC_NOWAIT 1
353
354static inline void mcf_insw(void *a, unsigned char *p, int l)
355{
356 u16 *wp = (u16 *) p;
357 while (l-- > 0)
358 *wp++ = readw(a);
359}
360
361static inline void mcf_outsw(void *a, unsigned char *p, int l)
362{
363 u16 *wp = (u16 *) p;
364 while (l-- > 0)
365 writew(*wp++, a);
366}
367
368#define SMC_inw(a, r) _swapw(readw((a) + (r)))
369#define SMC_outw(v, a, r) writew(_swapw(v), (a) + (r))
370#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
371#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
372
373#define SMC_IRQ_FLAGS (IRQF_DISABLED)
374
347#else 375#else
348 376
349/* 377/*
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 30110a11d737..34fa10d8ad40 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1347,7 +1347,7 @@ static int smsc9420_open(struct net_device *dev)
1347 1347
1348 netif_carrier_off(dev); 1348 netif_carrier_off(dev);
1349 1349
1350 /* disable, mask and acknowlege all interrupts */ 1350 /* disable, mask and acknowledge all interrupts */
1351 spin_lock_irqsave(&pd->int_lock, flags); 1351 spin_lock_irqsave(&pd->int_lock, flags);
1352 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); 1352 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
1353 smsc9420_reg_write(pd, INT_CFG, int_cfg); 1353 smsc9420_reg_write(pd, INT_CFG, int_cfg);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 2f8a8c32021e..5ba9d989f8fc 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -474,7 +474,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
474 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses 474 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
475 * @card: card structure 475 * @card: card structure
476 * 476 *
477 * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the 477 * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
478 * chip by writing to the appropriate register. DMA is enabled in 478 * chip by writing to the appropriate register. DMA is enabled in
479 * spider_net_enable_rxdmac. 479 * spider_net_enable_rxdmac.
480 */ 480 */
@@ -1820,7 +1820,7 @@ spider_net_enable_card(struct spider_net_card *card)
1820 1820
1821 spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE); 1821 spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
1822 1822
1823 /* set chain tail adress for RX chains and 1823 /* set chain tail address for RX chains and
1824 * enable DMA */ 1824 * enable DMA */
1825 spider_net_enable_rxchtails(card); 1825 spider_net_enable_rxchtails(card);
1826 spider_net_enable_rxdmac(card); 1826 spider_net_enable_rxdmac(card);
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index fb287649a305..eb63d44748a7 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -2,6 +2,7 @@ config STMMAC_ETH
2 tristate "STMicroelectronics 10/100/1000 Ethernet driver" 2 tristate "STMicroelectronics 10/100/1000 Ethernet driver"
3 select MII 3 select MII
4 select PHYLIB 4 select PHYLIB
5 select CRC32
5 depends on NETDEVICES && CPU_SUBTYPE_ST40 6 depends on NETDEVICES && CPU_SUBTYPE_ST40
6 help 7 help
7 This is the driver for the Ethernet IPs are built around a 8 This is the driver for the Ethernet IPs are built around a
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index a6733612d64a..55317adec33d 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1685,7 +1685,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1685 } 1685 }
1686 pr_info("done!\n"); 1686 pr_info("done!\n");
1687 1687
1688 if (!request_mem_region(res->start, (res->end - res->start), 1688 if (!request_mem_region(res->start, resource_size(res),
1689 pdev->name)) { 1689 pdev->name)) {
1690 pr_err("%s: ERROR: memory allocation failed" 1690 pr_err("%s: ERROR: memory allocation failed"
1691 "cannot get the I/O addr 0x%x\n", 1691 "cannot get the I/O addr 0x%x\n",
@@ -1694,9 +1694,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1694 goto out; 1694 goto out;
1695 } 1695 }
1696 1696
1697 addr = ioremap(res->start, (res->end - res->start)); 1697 addr = ioremap(res->start, resource_size(res));
1698 if (!addr) { 1698 if (!addr) {
1699 pr_err("%s: ERROR: memory mapping failed \n", __func__); 1699 pr_err("%s: ERROR: memory mapping failed\n", __func__);
1700 ret = -ENOMEM; 1700 ret = -ENOMEM;
1701 goto out; 1701 goto out;
1702 } 1702 }
@@ -1774,7 +1774,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1774out: 1774out:
1775 if (ret < 0) { 1775 if (ret < 0) {
1776 platform_set_drvdata(pdev, NULL); 1776 platform_set_drvdata(pdev, NULL);
1777 release_mem_region(res->start, (res->end - res->start)); 1777 release_mem_region(res->start, resource_size(res));
1778 if (addr != NULL) 1778 if (addr != NULL)
1779 iounmap(addr); 1779 iounmap(addr);
1780 } 1780 }
@@ -1812,7 +1812,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1812 1812
1813 iounmap((void *)ndev->base_addr); 1813 iounmap((void *)ndev->base_addr);
1814 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1814 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1815 release_mem_region(res->start, (res->end - res->start)); 1815 release_mem_region(res->start, resource_size(res));
1816 1816
1817 free_netdev(ndev); 1817 free_netdev(ndev);
1818 1818
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 4344017bfaef..70196bc5fe61 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -782,7 +782,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
782 break; 782 break;
783 783
784 /* When writing back RX descriptor, GEM writes status 784 /* When writing back RX descriptor, GEM writes status
785 * then buffer address, possibly in seperate transactions. 785 * then buffer address, possibly in separate transactions.
786 * If we don't wait for the chip to write both, we could 786 * If we don't wait for the chip to write both, we could
787 * post a new buffer to this descriptor then have GEM spam 787 * post a new buffer to this descriptor then have GEM spam
788 * on the buffer address. We sync on the RX completion 788 * on the buffer address. We sync on the RX completion
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 0c9780217c87..f5493092521a 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -1851,7 +1851,7 @@ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1851 * @data - desc's data 1851 * @data - desc's data
1852 * @size - desc's size 1852 * @size - desc's size
1853 * 1853 *
1854 * NOTE: this func does check for available space and, if neccessary, waits for 1854 * NOTE: this func does check for available space and, if necessary, waits for
1855 * NIC to read existing data before writing new one. 1855 * NIC to read existing data before writing new one.
1856 */ 1856 */
1857static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size) 1857static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 0fa7688ab483..22cf1c446de3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -5279,7 +5279,7 @@ static void tg3_poll_controller(struct net_device *dev)
5279 struct tg3 *tp = netdev_priv(dev); 5279 struct tg3 *tp = netdev_priv(dev);
5280 5280
5281 for (i = 0; i < tp->irq_cnt; i++) 5281 for (i = 0; i < tp->irq_cnt; i++)
5282 tg3_interrupt(tp->napi[i].irq_vec, dev); 5282 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5283} 5283}
5284#endif 5284#endif
5285 5285
@@ -9776,7 +9776,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9776 ADVERTISED_Pause | 9776 ADVERTISED_Pause |
9777 ADVERTISED_Asym_Pause; 9777 ADVERTISED_Asym_Pause;
9778 9778
9779 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) 9779 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9780 mask |= ADVERTISED_1000baseT_Half | 9780 mask |= ADVERTISED_1000baseT_Half |
9781 ADVERTISED_1000baseT_Full; 9781 ADVERTISED_1000baseT_Full;
9782 9782
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 21a01753312a..ee71bcfb3753 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -693,7 +693,7 @@ static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb,
693 * NOTE: This function should be used whenever the status of any TPL must be 693 * NOTE: This function should be used whenever the status of any TPL must be
694 * modified by the driver, because the compiler may otherwise change the 694 * modified by the driver, because the compiler may otherwise change the
695 * order of instructions such that writing the TPL status may be executed at 695 * order of instructions such that writing the TPL status may be executed at
696 * an undesireable time. When this function is used, the status is always 696 * an undesirable time. When this function is used, the status is always
697 * written when the function is called. 697 * written when the function is called.
698 */ 698 */
699static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status) 699static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status)
@@ -2264,7 +2264,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2264 * This function should be used whenever the status of any RPL must be 2264 * This function should be used whenever the status of any RPL must be
2265 * modified by the driver, because the compiler may otherwise change the 2265 * modified by the driver, because the compiler may otherwise change the
2266 * order of instructions such that writing the RPL status may be executed 2266 * order of instructions such that writing the RPL status may be executed
2267 * at an undesireable time. When this function is used, the status is 2267 * at an undesirable time. When this function is used, the status is
2268 * always written when the function is called. 2268 * always written when the function is called.
2269 */ 2269 */
2270static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status) 2270static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 93f4e8309f81..49f05d1431f5 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -143,6 +143,12 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
143 143
144void __devinit tulip_parse_eeprom(struct net_device *dev) 144void __devinit tulip_parse_eeprom(struct net_device *dev)
145{ 145{
146 /*
147 dev is not registered at this point, so logging messages can't
148 use dev_<level> or netdev_<level> but dev->name is good via a
149 hack in the caller
150 */
151
146 /* The last media info list parsed, for multiport boards. */ 152 /* The last media info list parsed, for multiport boards. */
147 static struct mediatable *last_mediatable; 153 static struct mediatable *last_mediatable;
148 static unsigned char *last_ee_data; 154 static unsigned char *last_ee_data;
@@ -161,15 +167,14 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
161 if (ee_data[0] == 0xff) { 167 if (ee_data[0] == 0xff) {
162 if (last_mediatable) { 168 if (last_mediatable) {
163 controller_index++; 169 controller_index++;
164 dev_info(&dev->dev, 170 pr_info("%s: Controller %d of multiport board\n",
165 "Controller %d of multiport board\n", 171 dev->name, controller_index);
166 controller_index);
167 tp->mtable = last_mediatable; 172 tp->mtable = last_mediatable;
168 ee_data = last_ee_data; 173 ee_data = last_ee_data;
169 goto subsequent_board; 174 goto subsequent_board;
170 } else 175 } else
171 dev_info(&dev->dev, 176 pr_info("%s: Missing EEPROM, this interface may not work correctly!\n",
172 "Missing EEPROM, this interface may not work correctly!\n"); 177 dev->name);
173 return; 178 return;
174 } 179 }
175 /* Do a fix-up based on the vendor half of the station address prefix. */ 180 /* Do a fix-up based on the vendor half of the station address prefix. */
@@ -181,15 +186,14 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
181 i++; /* An Accton EN1207, not an outlaw Maxtech. */ 186 i++; /* An Accton EN1207, not an outlaw Maxtech. */
182 memcpy(ee_data + 26, eeprom_fixups[i].newtable, 187 memcpy(ee_data + 26, eeprom_fixups[i].newtable,
183 sizeof(eeprom_fixups[i].newtable)); 188 sizeof(eeprom_fixups[i].newtable));
184 dev_info(&dev->dev, 189 pr_info("%s: Old format EEPROM on '%s' board. Using substitute media control info\n",
185 "Old format EEPROM on '%s' board. Using substitute media control info\n", 190 dev->name, eeprom_fixups[i].name);
186 eeprom_fixups[i].name);
187 break; 191 break;
188 } 192 }
189 } 193 }
190 if (eeprom_fixups[i].name == NULL) { /* No fixup found. */ 194 if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
191 dev_info(&dev->dev, 195 pr_info("%s: Old style EEPROM with no media selection information\n",
192 "Old style EEPROM with no media selection information\n"); 196 dev->name);
193 return; 197 return;
194 } 198 }
195 } 199 }
@@ -217,8 +221,8 @@ subsequent_board:
217 /* there is no phy information, don't even try to build mtable */ 221 /* there is no phy information, don't even try to build mtable */
218 if (count == 0) { 222 if (count == 0) {
219 if (tulip_debug > 0) 223 if (tulip_debug > 0)
220 dev_warn(&dev->dev, 224 pr_warning("%s: no phy info, aborting mtable build\n",
221 "no phy info, aborting mtable build\n"); 225 dev->name);
222 return; 226 return;
223 } 227 }
224 228
@@ -234,8 +238,10 @@ subsequent_board:
234 mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0; 238 mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
235 mtable->csr15dir = mtable->csr15val = 0; 239 mtable->csr15dir = mtable->csr15val = 0;
236 240
237 dev_info(&dev->dev, "EEPROM default media type %s\n", 241 pr_info("%s: EEPROM default media type %s\n",
238 media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]); 242 dev->name,
243 media & 0x0800 ? "Autosense"
244 : medianame[media & MEDIA_MASK]);
239 for (i = 0; i < count; i++) { 245 for (i = 0; i < count; i++) {
240 struct medialeaf *leaf = &mtable->mleaf[i]; 246 struct medialeaf *leaf = &mtable->mleaf[i];
241 247
@@ -298,17 +304,17 @@ subsequent_board:
298 } 304 }
299 if (tulip_debug > 1 && leaf->media == 11) { 305 if (tulip_debug > 1 && leaf->media == 11) {
300 unsigned char *bp = leaf->leafdata; 306 unsigned char *bp = leaf->leafdata;
301 dev_info(&dev->dev, 307 pr_info("%s: MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n",
302 "MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n", 308 dev->name,
303 bp[0], bp[1], bp[2 + bp[1]*2], 309 bp[0], bp[1], bp[2 + bp[1]*2],
304 bp[5 + bp[2 + bp[1]*2]*2], 310 bp[5 + bp[2 + bp[1]*2]*2],
305 bp[4 + bp[2 + bp[1]*2]*2]); 311 bp[4 + bp[2 + bp[1]*2]*2]);
306 } 312 }
307 dev_info(&dev->dev, 313 pr_info("%s: Index #%d - Media %s (#%d) described by a %s (%d) block\n",
308 "Index #%d - Media %s (#%d) described by a %s (%d) block\n", 314 dev->name,
309 i, medianame[leaf->media & 15], leaf->media, 315 i, medianame[leaf->media & 15], leaf->media,
310 leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>", 316 leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
311 leaf->type); 317 leaf->type);
312 } 318 }
313 if (new_advertise) 319 if (new_advertise)
314 tp->sym_advertise = new_advertise; 320 tp->sym_advertise = new_advertise;
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 0ab05af237e5..a4f09d490531 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -851,13 +851,15 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
851 851
852 if ( !(rdes0 & 0x8000) || 852 if ( !(rdes0 & 0x8000) ||
853 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { 853 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
854 struct sk_buff *new_skb = NULL;
855
854 skb = rxptr->rx_skb_ptr; 856 skb = rxptr->rx_skb_ptr;
855 857
856 /* Good packet, send to upper layer */ 858 /* Good packet, send to upper layer */
857 /* Shorst packet used new SKB */ 859 /* Shorst packet used new SKB */
858 if ( (rxlen < RX_COPY_SIZE) && 860 if ((rxlen < RX_COPY_SIZE) &&
859 ( (skb = dev_alloc_skb(rxlen + 2) ) 861 (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
860 != NULL) ) { 862 skb = new_skb;
861 /* size less than COPY_SIZE, allocate a rxlen SKB */ 863 /* size less than COPY_SIZE, allocate a rxlen SKB */
862 skb_reserve(skb, 2); /* 16byte align */ 864 skb_reserve(skb, 2); /* 16byte align */
863 memcpy(skb_put(skb, rxlen), 865 memcpy(skb_put(skb, rxlen),
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ce1efa4c0b0d..96c39bddc78c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1437,7 +1437,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
1437 1437
1438 __tun_detach(tun); 1438 __tun_detach(tun);
1439 1439
1440 /* If desireable, unregister the netdevice. */ 1440 /* If desirable, unregister the netdevice. */
1441 if (!(tun->flags & TUN_PERSIST)) { 1441 if (!(tun->flags & TUN_PERSIST)) {
1442 rtnl_lock(); 1442 rtnl_lock();
1443 if (dev->reg_state == NETREG_REGISTERED) 1443 if (dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 1cf012d3e072..cd24e5f2b2a2 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -2098,7 +2098,7 @@ typhoon_tx_timeout(struct net_device *dev)
2098 2098
2099 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) { 2099 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2100 netdev_warn(dev, "could not reset in tx timeout\n"); 2100 netdev_warn(dev, "could not reset in tx timeout\n");
2101 goto truely_dead; 2101 goto truly_dead;
2102 } 2102 }
2103 2103
2104 /* If we ever start using the Hi ring, it will need cleaning too */ 2104 /* If we ever start using the Hi ring, it will need cleaning too */
@@ -2107,13 +2107,13 @@ typhoon_tx_timeout(struct net_device *dev)
2107 2107
2108 if(typhoon_start_runtime(tp) < 0) { 2108 if(typhoon_start_runtime(tp) < 0) {
2109 netdev_err(dev, "could not start runtime in tx timeout\n"); 2109 netdev_err(dev, "could not start runtime in tx timeout\n");
2110 goto truely_dead; 2110 goto truly_dead;
2111 } 2111 }
2112 2112
2113 netif_wake_queue(dev); 2113 netif_wake_queue(dev);
2114 return; 2114 return;
2115 2115
2116truely_dead: 2116truly_dead:
2117 /* Reset the hardware, and turn off carrier to avoid more timeouts */ 2117 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2118 typhoon_reset(tp->ioaddr, NoWait); 2118 typhoon_reset(tp->ioaddr, NoWait);
2119 netif_carrier_off(dev); 2119 netif_carrier_off(dev);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 23a97518bc1f..1b0aef37e495 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -430,7 +430,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
430 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); 430 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
431 431
432 /* Ethernet frames are defined in Little Endian mode, 432 /* Ethernet frames are defined in Little Endian mode,
433 therefor to insert */ 433 therefore to insert */
434 /* the address to the hash (Big Endian mode), we reverse the bytes.*/ 434 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
435 435
436 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); 436 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 32d93564a74d..ba56ce4382d9 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -204,6 +204,14 @@ config USB_NET_DM9601
204 This option adds support for Davicom DM9601 based USB 1.1 204 This option adds support for Davicom DM9601 based USB 1.1
205 10/100 Ethernet adapters. 205 10/100 Ethernet adapters.
206 206
207config USB_NET_SMSC75XX
208 tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
209 depends on USB_USBNET
210 select CRC32
211 help
212 This option adds support for SMSC LAN95XX based USB 2.0
213 Gigabit Ethernet adapters.
214
207config USB_NET_SMSC95XX 215config USB_NET_SMSC95XX
208 tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices" 216 tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices"
209 depends on USB_USBNET 217 depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index e17afb78f372..82ea62955b56 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_USB_NET_AX8817X) += asix.o
11obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o 11obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
12obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o 12obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
13obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 13obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
14obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
14obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o 15obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
15obj-$(CONFIG_USB_NET_GL620A) += gl620a.o 16obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
16obj-$(CONFIG_USB_NET_NET1080) += net1080.o 17obj-$(CONFIG_USB_NET_NET1080) += net1080.o
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 20e34608fa4a..9e05639435f2 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -54,6 +54,7 @@ static const char driver_name [] = "asix";
54#define AX_CMD_WRITE_IPG0 0x12 54#define AX_CMD_WRITE_IPG0 0x12
55#define AX_CMD_WRITE_IPG1 0x13 55#define AX_CMD_WRITE_IPG1 0x13
56#define AX_CMD_READ_NODE_ID 0x13 56#define AX_CMD_READ_NODE_ID 0x13
57#define AX_CMD_WRITE_NODE_ID 0x14
57#define AX_CMD_WRITE_IPG2 0x14 58#define AX_CMD_WRITE_IPG2 0x14
58#define AX_CMD_WRITE_MULTI_FILTER 0x16 59#define AX_CMD_WRITE_MULTI_FILTER 0x16
59#define AX88172_CMD_READ_NODE_ID 0x17 60#define AX88172_CMD_READ_NODE_ID 0x17
@@ -165,6 +166,7 @@ static const char driver_name [] = "asix";
165/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */ 166/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
166struct asix_data { 167struct asix_data {
167 u8 multi_filter[AX_MCAST_FILTER_SIZE]; 168 u8 multi_filter[AX_MCAST_FILTER_SIZE];
169 u8 mac_addr[ETH_ALEN];
168 u8 phymode; 170 u8 phymode;
169 u8 ledmode; 171 u8 ledmode;
170 u8 eeprom_len; 172 u8 eeprom_len;
@@ -732,6 +734,30 @@ static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
732 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); 734 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
733} 735}
734 736
737static int asix_set_mac_address(struct net_device *net, void *p)
738{
739 struct usbnet *dev = netdev_priv(net);
740 struct asix_data *data = (struct asix_data *)&dev->data;
741 struct sockaddr *addr = p;
742
743 if (netif_running(net))
744 return -EBUSY;
745 if (!is_valid_ether_addr(addr->sa_data))
746 return -EADDRNOTAVAIL;
747
748 memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
749
750 /* We use the 20 byte dev->data
751 * for our 6 byte mac buffer
752 * to avoid allocating memory that
753 * is tricky to free later */
754 memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
755 asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
756 data->mac_addr);
757
758 return 0;
759}
760
735/* We need to override some ethtool_ops so we require our 761/* We need to override some ethtool_ops so we require our
736 own structure so we don't interfere with other usbnet 762 own structure so we don't interfere with other usbnet
737 devices that may be connected at the same time. */ 763 devices that may be connected at the same time. */
@@ -919,7 +945,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
919 .ndo_start_xmit = usbnet_start_xmit, 945 .ndo_start_xmit = usbnet_start_xmit,
920 .ndo_tx_timeout = usbnet_tx_timeout, 946 .ndo_tx_timeout = usbnet_tx_timeout,
921 .ndo_change_mtu = usbnet_change_mtu, 947 .ndo_change_mtu = usbnet_change_mtu,
922 .ndo_set_mac_address = eth_mac_addr, 948 .ndo_set_mac_address = asix_set_mac_address,
923 .ndo_validate_addr = eth_validate_addr, 949 .ndo_validate_addr = eth_validate_addr,
924 .ndo_do_ioctl = asix_ioctl, 950 .ndo_do_ioctl = asix_ioctl,
925 .ndo_set_multicast_list = asix_set_multicast, 951 .ndo_set_multicast_list = asix_set_multicast,
@@ -1213,7 +1239,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
1213 .ndo_stop = usbnet_stop, 1239 .ndo_stop = usbnet_stop,
1214 .ndo_start_xmit = usbnet_start_xmit, 1240 .ndo_start_xmit = usbnet_start_xmit,
1215 .ndo_tx_timeout = usbnet_tx_timeout, 1241 .ndo_tx_timeout = usbnet_tx_timeout,
1216 .ndo_set_mac_address = eth_mac_addr, 1242 .ndo_set_mac_address = asix_set_mac_address,
1217 .ndo_validate_addr = eth_validate_addr, 1243 .ndo_validate_addr = eth_validate_addr,
1218 .ndo_set_multicast_list = asix_set_multicast, 1244 .ndo_set_multicast_list = asix_set_multicast,
1219 .ndo_do_ioctl = asix_ioctl, 1245 .ndo_do_ioctl = asix_ioctl,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6895f1531238..be0cc99e881a 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1155,9 +1155,6 @@ static void _hso_serial_set_termios(struct tty_struct *tty,
1155static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb) 1155static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb)
1156{ 1156{
1157 int result; 1157 int result;
1158#ifdef CONFIG_HSO_AUTOPM
1159 usb_mark_last_busy(urb->dev);
1160#endif
1161 /* We are done with this URB, resubmit it. Prep the USB to wait for 1158 /* We are done with this URB, resubmit it. Prep the USB to wait for
1162 * another frame */ 1159 * another frame */
1163 usb_fill_bulk_urb(urb, serial->parent->usb, 1160 usb_fill_bulk_urb(urb, serial->parent->usb,
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
new file mode 100644
index 000000000000..300e3e764fa2
--- /dev/null
+++ b/drivers/net/usb/smsc75xx.c
@@ -0,0 +1,1288 @@
1 /***************************************************************************
2 *
3 * Copyright (C) 2007-2010 SMSC
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 *****************************************************************************/
20
21#include <linux/module.h>
22#include <linux/kmod.h>
23#include <linux/init.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/mii.h>
28#include <linux/usb.h>
29#include <linux/crc32.h>
30#include <linux/usb/usbnet.h>
31#include "smsc75xx.h"
32
33#define SMSC_CHIPNAME "smsc75xx"
34#define SMSC_DRIVER_VERSION "1.0.0"
35#define HS_USB_PKT_SIZE (512)
36#define FS_USB_PKT_SIZE (64)
37#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
38#define DEFAULT_FS_BURST_CAP_SIZE (6 * 1024 + 33 * FS_USB_PKT_SIZE)
39#define DEFAULT_BULK_IN_DELAY (0x00002000)
40#define MAX_SINGLE_PACKET_SIZE (9000)
41#define LAN75XX_EEPROM_MAGIC (0x7500)
42#define EEPROM_MAC_OFFSET (0x01)
43#define DEFAULT_TX_CSUM_ENABLE (true)
44#define DEFAULT_RX_CSUM_ENABLE (true)
45#define DEFAULT_TSO_ENABLE (true)
46#define SMSC75XX_INTERNAL_PHY_ID (1)
47#define SMSC75XX_TX_OVERHEAD (8)
48#define MAX_RX_FIFO_SIZE (20 * 1024)
49#define MAX_TX_FIFO_SIZE (12 * 1024)
50#define USB_VENDOR_ID_SMSC (0x0424)
51#define USB_PRODUCT_ID_LAN7500 (0x7500)
52#define USB_PRODUCT_ID_LAN7505 (0x7505)
53
54#define check_warn(ret, fmt, args...) \
55 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
56
57#define check_warn_return(ret, fmt, args...) \
58 ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
59
60#define check_warn_goto_done(ret, fmt, args...) \
61 ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
62
63struct smsc75xx_priv {
64 struct usbnet *dev;
65 u32 rfe_ctl;
66 u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
67 bool use_rx_csum;
68 struct mutex dataport_mutex;
69 spinlock_t rfe_ctl_lock;
70 struct work_struct set_multicast;
71};
72
73struct usb_context {
74 struct usb_ctrlrequest req;
75 struct usbnet *dev;
76};
77
78static int turbo_mode = true;
79module_param(turbo_mode, bool, 0644);
80MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
81
82static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
83 u32 *data)
84{
85 u32 *buf = kmalloc(4, GFP_KERNEL);
86 int ret;
87
88 BUG_ON(!dev);
89
90 if (!buf)
91 return -ENOMEM;
92
93 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
94 USB_VENDOR_REQUEST_READ_REGISTER,
95 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
96 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
97
98 if (unlikely(ret < 0))
99 netdev_warn(dev->net,
100 "Failed to read register index 0x%08x", index);
101
102 le32_to_cpus(buf);
103 *data = *buf;
104 kfree(buf);
105
106 return ret;
107}
108
109static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
110 u32 data)
111{
112 u32 *buf = kmalloc(4, GFP_KERNEL);
113 int ret;
114
115 BUG_ON(!dev);
116
117 if (!buf)
118 return -ENOMEM;
119
120 *buf = data;
121 cpu_to_le32s(buf);
122
123 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
124 USB_VENDOR_REQUEST_WRITE_REGISTER,
125 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
126 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
127
128 if (unlikely(ret < 0))
129 netdev_warn(dev->net,
130 "Failed to write register index 0x%08x", index);
131
132 kfree(buf);
133
134 return ret;
135}
136
137/* Loop until the read is completed with timeout
138 * called with phy_mutex held */
139static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
140{
141 unsigned long start_time = jiffies;
142 u32 val;
143 int ret;
144
145 do {
146 ret = smsc75xx_read_reg(dev, MII_ACCESS, &val);
147 check_warn_return(ret, "Error reading MII_ACCESS");
148
149 if (!(val & MII_ACCESS_BUSY))
150 return 0;
151 } while (!time_after(jiffies, start_time + HZ));
152
153 return -EIO;
154}
155
156static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
157{
158 struct usbnet *dev = netdev_priv(netdev);
159 u32 val, addr;
160 int ret;
161
162 mutex_lock(&dev->phy_mutex);
163
164 /* confirm MII not busy */
165 ret = smsc75xx_phy_wait_not_busy(dev);
166 check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read");
167
168 /* set the address, index & direction (read from PHY) */
169 phy_id &= dev->mii.phy_id_mask;
170 idx &= dev->mii.reg_num_mask;
171 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
172 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
173 | MII_ACCESS_READ;
174 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
175 check_warn_goto_done(ret, "Error writing MII_ACCESS");
176
177 ret = smsc75xx_phy_wait_not_busy(dev);
178 check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
179
180 ret = smsc75xx_read_reg(dev, MII_DATA, &val);
181 check_warn_goto_done(ret, "Error reading MII_DATA");
182
183 ret = (u16)(val & 0xFFFF);
184
185done:
186 mutex_unlock(&dev->phy_mutex);
187 return ret;
188}
189
190static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
191 int regval)
192{
193 struct usbnet *dev = netdev_priv(netdev);
194 u32 val, addr;
195 int ret;
196
197 mutex_lock(&dev->phy_mutex);
198
199 /* confirm MII not busy */
200 ret = smsc75xx_phy_wait_not_busy(dev);
201 check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write");
202
203 val = regval;
204 ret = smsc75xx_write_reg(dev, MII_DATA, val);
205 check_warn_goto_done(ret, "Error writing MII_DATA");
206
207 /* set the address, index & direction (write to PHY) */
208 phy_id &= dev->mii.phy_id_mask;
209 idx &= dev->mii.reg_num_mask;
210 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
211 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
212 | MII_ACCESS_WRITE;
213 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
214 check_warn_goto_done(ret, "Error writing MII_ACCESS");
215
216 ret = smsc75xx_phy_wait_not_busy(dev);
217 check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
218
219done:
220 mutex_unlock(&dev->phy_mutex);
221}
222
223static int smsc75xx_wait_eeprom(struct usbnet *dev)
224{
225 unsigned long start_time = jiffies;
226 u32 val;
227 int ret;
228
229 do {
230 ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
231 check_warn_return(ret, "Error reading E2P_CMD");
232
233 if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT))
234 break;
235 udelay(40);
236 } while (!time_after(jiffies, start_time + HZ));
237
238 if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) {
239 netdev_warn(dev->net, "EEPROM read operation timeout");
240 return -EIO;
241 }
242
243 return 0;
244}
245
246static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev)
247{
248 unsigned long start_time = jiffies;
249 u32 val;
250 int ret;
251
252 do {
253 ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
254 check_warn_return(ret, "Error reading E2P_CMD");
255
256 if (!(val & E2P_CMD_BUSY))
257 return 0;
258
259 udelay(40);
260 } while (!time_after(jiffies, start_time + HZ));
261
262 netdev_warn(dev->net, "EEPROM is busy");
263 return -EIO;
264}
265
266static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
267 u8 *data)
268{
269 u32 val;
270 int i, ret;
271
272 BUG_ON(!dev);
273 BUG_ON(!data);
274
275 ret = smsc75xx_eeprom_confirm_not_busy(dev);
276 if (ret)
277 return ret;
278
279 for (i = 0; i < length; i++) {
280 val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR);
281 ret = smsc75xx_write_reg(dev, E2P_CMD, val);
282 check_warn_return(ret, "Error writing E2P_CMD");
283
284 ret = smsc75xx_wait_eeprom(dev);
285 if (ret < 0)
286 return ret;
287
288 ret = smsc75xx_read_reg(dev, E2P_DATA, &val);
289 check_warn_return(ret, "Error reading E2P_DATA");
290
291 data[i] = val & 0xFF;
292 offset++;
293 }
294
295 return 0;
296}
297
298static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
299 u8 *data)
300{
301 u32 val;
302 int i, ret;
303
304 BUG_ON(!dev);
305 BUG_ON(!data);
306
307 ret = smsc75xx_eeprom_confirm_not_busy(dev);
308 if (ret)
309 return ret;
310
311 /* Issue write/erase enable command */
312 val = E2P_CMD_BUSY | E2P_CMD_EWEN;
313 ret = smsc75xx_write_reg(dev, E2P_CMD, val);
314 check_warn_return(ret, "Error writing E2P_CMD");
315
316 ret = smsc75xx_wait_eeprom(dev);
317 if (ret < 0)
318 return ret;
319
320 for (i = 0; i < length; i++) {
321
322 /* Fill data register */
323 val = data[i];
324 ret = smsc75xx_write_reg(dev, E2P_DATA, val);
325 check_warn_return(ret, "Error writing E2P_DATA");
326
327 /* Send "write" command */
328 val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR);
329 ret = smsc75xx_write_reg(dev, E2P_CMD, val);
330 check_warn_return(ret, "Error writing E2P_CMD");
331
332 ret = smsc75xx_wait_eeprom(dev);
333 if (ret < 0)
334 return ret;
335
336 offset++;
337 }
338
339 return 0;
340}
341
342static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev)
343{
344 int i, ret;
345
346 for (i = 0; i < 100; i++) {
347 u32 dp_sel;
348 ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
349 check_warn_return(ret, "Error reading DP_SEL");
350
351 if (dp_sel & DP_SEL_DPRDY)
352 return 0;
353
354 udelay(40);
355 }
356
357 netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out");
358
359 return -EIO;
360}
361
362static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr,
363 u32 length, u32 *buf)
364{
365 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
366 u32 dp_sel;
367 int i, ret;
368
369 mutex_lock(&pdata->dataport_mutex);
370
371 ret = smsc75xx_dataport_wait_not_busy(dev);
372 check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry");
373
374 ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
375 check_warn_goto_done(ret, "Error reading DP_SEL");
376
377 dp_sel &= ~DP_SEL_RSEL;
378 dp_sel |= ram_select;
379 ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel);
380 check_warn_goto_done(ret, "Error writing DP_SEL");
381
382 for (i = 0; i < length; i++) {
383 ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i);
384 check_warn_goto_done(ret, "Error writing DP_ADDR");
385
386 ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]);
387 check_warn_goto_done(ret, "Error writing DP_DATA");
388
389 ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE);
390 check_warn_goto_done(ret, "Error writing DP_CMD");
391
392 ret = smsc75xx_dataport_wait_not_busy(dev);
393 check_warn_goto_done(ret, "smsc75xx_dataport_write timeout");
394 }
395
396done:
397 mutex_unlock(&pdata->dataport_mutex);
398 return ret;
399}
400
401/* returns hash bit number for given MAC address */
402static u32 smsc75xx_hash(char addr[ETH_ALEN])
403{
404 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
405}
406
407static void smsc75xx_deferred_multicast_write(struct work_struct *param)
408{
409 struct smsc75xx_priv *pdata =
410 container_of(param, struct smsc75xx_priv, set_multicast);
411 struct usbnet *dev = pdata->dev;
412 int ret;
413
414 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x",
415 pdata->rfe_ctl);
416
417 smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN,
418 DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table);
419
420 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
421 check_warn(ret, "Error writing RFE_CRL");
422}
423
424static void smsc75xx_set_multicast(struct net_device *netdev)
425{
426 struct usbnet *dev = netdev_priv(netdev);
427 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
428 unsigned long flags;
429 int i;
430
431 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
432
433 pdata->rfe_ctl &=
434 ~(RFE_CTL_AU | RFE_CTL_AM | RFE_CTL_DPF | RFE_CTL_MHF);
435 pdata->rfe_ctl |= RFE_CTL_AB;
436
437 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
438 pdata->multicast_hash_table[i] = 0;
439
440 if (dev->net->flags & IFF_PROMISC) {
441 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
442 pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU;
443 } else if (dev->net->flags & IFF_ALLMULTI) {
444 netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
445 pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
446 } else if (!netdev_mc_empty(dev->net)) {
447 struct dev_mc_list *mc_list;
448
449 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
450
451 pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
452
453 netdev_for_each_mc_addr(mc_list, netdev) {
454 u32 bitnum = smsc75xx_hash(mc_list->dmi_addr);
455 pdata->multicast_hash_table[bitnum / 32] |=
456 (1 << (bitnum % 32));
457 }
458 } else {
459 netif_dbg(dev, drv, dev->net, "receive own packets only");
460 pdata->rfe_ctl |= RFE_CTL_DPF;
461 }
462
463 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
464
465 /* defer register writes to a sleepable context */
466 schedule_work(&pdata->set_multicast);
467}
468
469static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex,
470 u16 lcladv, u16 rmtadv)
471{
472 u32 flow = 0, fct_flow = 0;
473 int ret;
474
475 if (duplex == DUPLEX_FULL) {
476 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
477
478 if (cap & FLOW_CTRL_TX) {
479 flow = (FLOW_TX_FCEN | 0xFFFF);
480 /* set fct_flow thresholds to 20% and 80% */
481 fct_flow = (8 << 8) | 32;
482 }
483
484 if (cap & FLOW_CTRL_RX)
485 flow |= FLOW_RX_FCEN;
486
487 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
488 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
489 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
490 } else {
491 netif_dbg(dev, link, dev->net, "half duplex");
492 }
493
494 ret = smsc75xx_write_reg(dev, FLOW, flow);
495 check_warn_return(ret, "Error writing FLOW");
496
497 ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow);
498 check_warn_return(ret, "Error writing FCT_FLOW");
499
500 return 0;
501}
502
503static int smsc75xx_link_reset(struct usbnet *dev)
504{
505 struct mii_if_info *mii = &dev->mii;
506 struct ethtool_cmd ecmd;
507 u16 lcladv, rmtadv;
508 int ret;
509
510 /* clear interrupt status */
511 ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
512 check_warn_return(ret, "Error reading PHY_INT_SRC");
513
514 ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
515 check_warn_return(ret, "Error writing INT_STS");
516
517 mii_check_media(mii, 1, 1);
518 mii_ethtool_gset(&dev->mii, &ecmd);
519 lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
520 rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
521
522 netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x"
523 " rmtadv: %04x", ecmd.speed, ecmd.duplex, lcladv, rmtadv);
524
525 return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
526}
527
528static void smsc75xx_status(struct usbnet *dev, struct urb *urb)
529{
530 u32 intdata;
531
532 if (urb->actual_length != 4) {
533 netdev_warn(dev->net,
534 "unexpected urb length %d", urb->actual_length);
535 return;
536 }
537
538 memcpy(&intdata, urb->transfer_buffer, 4);
539 le32_to_cpus(&intdata);
540
541 netif_dbg(dev, link, dev->net, "intdata: 0x%08X", intdata);
542
543 if (intdata & INT_ENP_PHY_INT)
544 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
545 else
546 netdev_warn(dev->net,
547 "unexpected interrupt, intdata=0x%08X", intdata);
548}
549
550/* Enable or disable Rx checksum offload engine */
551static int smsc75xx_set_rx_csum_offload(struct usbnet *dev)
552{
553 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
554 unsigned long flags;
555 int ret;
556
557 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
558
559 if (pdata->use_rx_csum)
560 pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM;
561 else
562 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM);
563
564 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
565
566 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
567 check_warn_return(ret, "Error writing RFE_CTL");
568
569 return 0;
570}
571
572static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net)
573{
574 return MAX_EEPROM_SIZE;
575}
576
577static int smsc75xx_ethtool_get_eeprom(struct net_device *netdev,
578 struct ethtool_eeprom *ee, u8 *data)
579{
580 struct usbnet *dev = netdev_priv(netdev);
581
582 ee->magic = LAN75XX_EEPROM_MAGIC;
583
584 return smsc75xx_read_eeprom(dev, ee->offset, ee->len, data);
585}
586
587static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
588 struct ethtool_eeprom *ee, u8 *data)
589{
590 struct usbnet *dev = netdev_priv(netdev);
591
592 if (ee->magic != LAN75XX_EEPROM_MAGIC) {
593 netdev_warn(dev->net,
594 "EEPROM: magic value mismatch: 0x%x", ee->magic);
595 return -EINVAL;
596 }
597
598 return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
599}
600
601static u32 smsc75xx_ethtool_get_rx_csum(struct net_device *netdev)
602{
603 struct usbnet *dev = netdev_priv(netdev);
604 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
605
606 return pdata->use_rx_csum;
607}
608
609static int smsc75xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val)
610{
611 struct usbnet *dev = netdev_priv(netdev);
612 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
613
614 pdata->use_rx_csum = !!val;
615
616 return smsc75xx_set_rx_csum_offload(dev);
617}
618
619static int smsc75xx_ethtool_set_tso(struct net_device *netdev, u32 data)
620{
621 if (data)
622 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
623 else
624 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
625
626 return 0;
627}
628
629static const struct ethtool_ops smsc75xx_ethtool_ops = {
630 .get_link = usbnet_get_link,
631 .nway_reset = usbnet_nway_reset,
632 .get_drvinfo = usbnet_get_drvinfo,
633 .get_msglevel = usbnet_get_msglevel,
634 .set_msglevel = usbnet_set_msglevel,
635 .get_settings = usbnet_get_settings,
636 .set_settings = usbnet_set_settings,
637 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
638 .get_eeprom = smsc75xx_ethtool_get_eeprom,
639 .set_eeprom = smsc75xx_ethtool_set_eeprom,
640 .get_tx_csum = ethtool_op_get_tx_csum,
641 .set_tx_csum = ethtool_op_set_tx_hw_csum,
642 .get_rx_csum = smsc75xx_ethtool_get_rx_csum,
643 .set_rx_csum = smsc75xx_ethtool_set_rx_csum,
644 .get_tso = ethtool_op_get_tso,
645 .set_tso = smsc75xx_ethtool_set_tso,
646};
647
648static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
649{
650 struct usbnet *dev = netdev_priv(netdev);
651
652 if (!netif_running(netdev))
653 return -EINVAL;
654
655 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
656}
657
658static void smsc75xx_init_mac_address(struct usbnet *dev)
659{
660 /* try reading mac address from EEPROM */
661 if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
662 dev->net->dev_addr) == 0) {
663 if (is_valid_ether_addr(dev->net->dev_addr)) {
664 /* eeprom values are valid so use them */
665 netif_dbg(dev, ifup, dev->net,
666 "MAC address read from EEPROM");
667 return;
668 }
669 }
670
671 /* no eeprom, or eeprom values are invalid. generate random MAC */
672 random_ether_addr(dev->net->dev_addr);
673 netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr");
674}
675
676static int smsc75xx_set_mac_address(struct usbnet *dev)
677{
678 u32 addr_lo = dev->net->dev_addr[0] | dev->net->dev_addr[1] << 8 |
679 dev->net->dev_addr[2] << 16 | dev->net->dev_addr[3] << 24;
680 u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8;
681
682 int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi);
683 check_warn_return(ret, "Failed to write RX_ADDRH: %d", ret);
684
685 ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo);
686 check_warn_return(ret, "Failed to write RX_ADDRL: %d", ret);
687
688 addr_hi |= ADDR_FILTX_FB_VALID;
689 ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi);
690 check_warn_return(ret, "Failed to write ADDR_FILTX: %d", ret);
691
692 ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo);
693 check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d", ret);
694
695 return 0;
696}
697
698static int smsc75xx_phy_initialize(struct usbnet *dev)
699{
700 int bmcr, timeout = 0;
701
702 /* Initialize MII structure */
703 dev->mii.dev = dev->net;
704 dev->mii.mdio_read = smsc75xx_mdio_read;
705 dev->mii.mdio_write = smsc75xx_mdio_write;
706 dev->mii.phy_id_mask = 0x1f;
707 dev->mii.reg_num_mask = 0x1f;
708 dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID;
709
710 /* reset phy and wait for reset to complete */
711 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
712
713 do {
714 msleep(10);
715 bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
716 check_warn_return(bmcr, "Error reading MII_BMCR");
717 timeout++;
718 } while ((bmcr & MII_BMCR) && (timeout < 100));
719
720 if (timeout >= 100) {
721 netdev_warn(dev->net, "timeout on PHY Reset");
722 return -EIO;
723 }
724
725 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
726 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
727 ADVERTISE_PAUSE_ASYM);
728
729 /* read to clear */
730 smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
731 check_warn_return(bmcr, "Error reading PHY_INT_SRC");
732
733 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
734 PHY_INT_MASK_DEFAULT);
735 mii_nway_restart(&dev->mii);
736
737 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
738 return 0;
739}
740
741static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
742{
743 int ret = 0;
744 u32 buf;
745 bool rxenabled;
746
747 ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
748 check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
749
750 rxenabled = ((buf & MAC_RX_RXEN) != 0);
751
752 if (rxenabled) {
753 buf &= ~MAC_RX_RXEN;
754 ret = smsc75xx_write_reg(dev, MAC_RX, buf);
755 check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
756 }
757
758 /* add 4 to size for FCS */
759 buf &= ~MAC_RX_MAX_SIZE;
760 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE);
761
762 ret = smsc75xx_write_reg(dev, MAC_RX, buf);
763 check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
764
765 if (rxenabled) {
766 buf |= MAC_RX_RXEN;
767 ret = smsc75xx_write_reg(dev, MAC_RX, buf);
768 check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
769 }
770
771 return 0;
772}
773
774static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
775{
776 struct usbnet *dev = netdev_priv(netdev);
777
778 int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
779 check_warn_return(ret, "Failed to set mac rx frame length");
780
781 return usbnet_change_mtu(netdev, new_mtu);
782}
783
784static int smsc75xx_reset(struct usbnet *dev)
785{
786 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
787 u32 buf;
788 int ret = 0, timeout;
789
790 netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
791
792 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
793 check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
794
795 buf |= HW_CFG_LRST;
796
797 ret = smsc75xx_write_reg(dev, HW_CFG, buf);
798 check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
799
800 timeout = 0;
801 do {
802 msleep(10);
803 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
804 check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
805 timeout++;
806 } while ((buf & HW_CFG_LRST) && (timeout < 100));
807
808 if (timeout >= 100) {
809 netdev_warn(dev->net, "timeout on completion of Lite Reset");
810 return -EIO;
811 }
812
813 netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY");
814
815 ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
816 check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
817
818 buf |= PMT_CTL_PHY_RST;
819
820 ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
821 check_warn_return(ret, "Failed to write PMT_CTL: %d", ret);
822
823 timeout = 0;
824 do {
825 msleep(10);
826 ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
827 check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
828 timeout++;
829 } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
830
831 if (timeout >= 100) {
832 netdev_warn(dev->net, "timeout waiting for PHY Reset");
833 return -EIO;
834 }
835
836 netif_dbg(dev, ifup, dev->net, "PHY reset complete");
837
838 smsc75xx_init_mac_address(dev);
839
840 ret = smsc75xx_set_mac_address(dev);
841 check_warn_return(ret, "Failed to set mac address");
842
843 netif_dbg(dev, ifup, dev->net, "MAC Address: %pM", dev->net->dev_addr);
844
845 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
846 check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
847
848 netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x", buf);
849
850 buf |= HW_CFG_BIR;
851
852 ret = smsc75xx_write_reg(dev, HW_CFG, buf);
853 check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
854
855 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
856 check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
857
858 netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after "
859 "writing HW_CFG_BIR: 0x%08x", buf);
860
861 if (!turbo_mode) {
862 buf = 0;
863 dev->rx_urb_size = MAX_SINGLE_PACKET_SIZE;
864 } else if (dev->udev->speed == USB_SPEED_HIGH) {
865 buf = DEFAULT_HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
866 dev->rx_urb_size = DEFAULT_HS_BURST_CAP_SIZE;
867 } else {
868 buf = DEFAULT_FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
869 dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
870 }
871
872 netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld",
873 (ulong)dev->rx_urb_size);
874
875 ret = smsc75xx_write_reg(dev, BURST_CAP, buf);
876 check_warn_return(ret, "Failed to write BURST_CAP: %d", ret);
877
878 ret = smsc75xx_read_reg(dev, BURST_CAP, &buf);
879 check_warn_return(ret, "Failed to read BURST_CAP: %d", ret);
880
881 netif_dbg(dev, ifup, dev->net,
882 "Read Value from BURST_CAP after writing: 0x%08x", buf);
883
884 ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
885 check_warn_return(ret, "Failed to write BULK_IN_DLY: %d", ret);
886
887 ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf);
888 check_warn_return(ret, "Failed to read BULK_IN_DLY: %d", ret);
889
890 netif_dbg(dev, ifup, dev->net,
891 "Read Value from BULK_IN_DLY after writing: 0x%08x", buf);
892
893 if (turbo_mode) {
894 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
895 check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
896
897 netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf);
898
899 buf |= (HW_CFG_MEF | HW_CFG_BCE);
900
901 ret = smsc75xx_write_reg(dev, HW_CFG, buf);
902 check_warn_return(ret, "Failed to write HW_CFG: %d", ret);
903
904 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
905 check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
906
907 netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf);
908 }
909
910 /* set FIFO sizes */
911 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
912 ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf);
913 check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d", ret);
914
915 netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x", buf);
916
917 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
918 ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf);
919 check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d", ret);
920
921 netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x", buf);
922
923 ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
924 check_warn_return(ret, "Failed to write INT_STS: %d", ret);
925
926 ret = smsc75xx_read_reg(dev, ID_REV, &buf);
927 check_warn_return(ret, "Failed to read ID_REV: %d", ret);
928
929 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf);
930
931 /* Configure GPIO pins as LED outputs */
932 ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf);
933 check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret);
934
935 buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL);
936 buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL;
937
938 ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf);
939 check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret);
940
941 ret = smsc75xx_write_reg(dev, FLOW, 0);
942 check_warn_return(ret, "Failed to write FLOW: %d", ret);
943
944 ret = smsc75xx_write_reg(dev, FCT_FLOW, 0);
945 check_warn_return(ret, "Failed to write FCT_FLOW: %d", ret);
946
947 /* Don't need rfe_ctl_lock during initialisation */
948 ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
949 check_warn_return(ret, "Failed to read RFE_CTL: %d", ret);
950
951 pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF;
952
953 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
954 check_warn_return(ret, "Failed to write RFE_CTL: %d", ret);
955
956 ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
957 check_warn_return(ret, "Failed to read RFE_CTL: %d", ret);
958
959 netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl);
960
961 /* Enable or disable checksum offload engines */
962 ethtool_op_set_tx_hw_csum(dev->net, DEFAULT_TX_CSUM_ENABLE);
963 ret = smsc75xx_set_rx_csum_offload(dev);
964 check_warn_return(ret, "Failed to set rx csum offload: %d", ret);
965
966 smsc75xx_ethtool_set_tso(dev->net, DEFAULT_TSO_ENABLE);
967
968 smsc75xx_set_multicast(dev->net);
969
970 ret = smsc75xx_phy_initialize(dev);
971 check_warn_return(ret, "Failed to initialize PHY: %d", ret);
972
973 ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf);
974 check_warn_return(ret, "Failed to read INT_EP_CTL: %d", ret);
975
976 /* enable PHY interrupts */
977 buf |= INT_ENP_PHY_INT;
978
979 ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf);
980 check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret);
981
982 ret = smsc75xx_read_reg(dev, MAC_TX, &buf);
983 check_warn_return(ret, "Failed to read MAC_TX: %d", ret);
984
985 buf |= MAC_TX_TXEN;
986
987 ret = smsc75xx_write_reg(dev, MAC_TX, buf);
988 check_warn_return(ret, "Failed to write MAC_TX: %d", ret);
989
990 netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x", buf);
991
992 ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf);
993 check_warn_return(ret, "Failed to read FCT_TX_CTL: %d", ret);
994
995 buf |= FCT_TX_CTL_EN;
996
997 ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf);
998 check_warn_return(ret, "Failed to write FCT_TX_CTL: %d", ret);
999
1000 netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf);
1001
1002 ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
1003 check_warn_return(ret, "Failed to set max rx frame length");
1004
1005 ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
1006 check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
1007
1008 buf |= MAC_RX_RXEN;
1009
1010 ret = smsc75xx_write_reg(dev, MAC_RX, buf);
1011 check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
1012
1013 netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x", buf);
1014
1015 ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf);
1016 check_warn_return(ret, "Failed to read FCT_RX_CTL: %d", ret);
1017
1018 buf |= FCT_RX_CTL_EN;
1019
1020 ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf);
1021 check_warn_return(ret, "Failed to write FCT_RX_CTL: %d", ret);
1022
1023 netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x", buf);
1024
1025 netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0");
1026 return 0;
1027}
1028
1029static const struct net_device_ops smsc75xx_netdev_ops = {
1030 .ndo_open = usbnet_open,
1031 .ndo_stop = usbnet_stop,
1032 .ndo_start_xmit = usbnet_start_xmit,
1033 .ndo_tx_timeout = usbnet_tx_timeout,
1034 .ndo_change_mtu = smsc75xx_change_mtu,
1035 .ndo_set_mac_address = eth_mac_addr,
1036 .ndo_validate_addr = eth_validate_addr,
1037 .ndo_do_ioctl = smsc75xx_ioctl,
1038 .ndo_set_multicast_list = smsc75xx_set_multicast,
1039};
1040
1041static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
1042{
1043 struct smsc75xx_priv *pdata = NULL;
1044 int ret;
1045
1046 printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
1047
1048 ret = usbnet_get_endpoints(dev, intf);
1049 check_warn_return(ret, "usbnet_get_endpoints failed: %d", ret);
1050
1051 dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv),
1052 GFP_KERNEL);
1053
1054 pdata = (struct smsc75xx_priv *)(dev->data[0]);
1055 if (!pdata) {
1056 netdev_warn(dev->net, "Unable to allocate smsc75xx_priv");
1057 return -ENOMEM;
1058 }
1059
1060 pdata->dev = dev;
1061
1062 spin_lock_init(&pdata->rfe_ctl_lock);
1063 mutex_init(&pdata->dataport_mutex);
1064
1065 INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
1066
1067 pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE;
1068
1069 /* We have to advertise SG otherwise TSO cannot be enabled */
1070 dev->net->features |= NETIF_F_SG;
1071
1072 /* Init all registers */
1073 ret = smsc75xx_reset(dev);
1074
1075 dev->net->netdev_ops = &smsc75xx_netdev_ops;
1076 dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
1077 dev->net->flags |= IFF_MULTICAST;
1078 dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
1079 return 0;
1080}
1081
1082static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1083{
1084 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1085 if (pdata) {
1086 netif_dbg(dev, ifdown, dev->net, "free pdata");
1087 kfree(pdata);
1088 pdata = NULL;
1089 dev->data[0] = 0;
1090 }
1091}
1092
1093static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a,
1094 u32 rx_cmd_b)
1095{
1096 if (unlikely(rx_cmd_a & RX_CMD_A_LCSM)) {
1097 skb->ip_summed = CHECKSUM_NONE;
1098 } else {
1099 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT));
1100 skb->ip_summed = CHECKSUM_COMPLETE;
1101 }
1102}
1103
1104static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1105{
1106 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1107
1108 while (skb->len > 0) {
1109 u32 rx_cmd_a, rx_cmd_b, align_count, size;
1110 struct sk_buff *ax_skb;
1111 unsigned char *packet;
1112
1113 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
1114 le32_to_cpus(&rx_cmd_a);
1115 skb_pull(skb, 4);
1116
1117 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
1118 le32_to_cpus(&rx_cmd_b);
1119 skb_pull(skb, 4 + NET_IP_ALIGN);
1120
1121 packet = skb->data;
1122
1123 /* get the packet length */
1124 size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN;
1125 align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
1126
1127 if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
1128 netif_dbg(dev, rx_err, dev->net,
1129 "Error rx_cmd_a=0x%08x", rx_cmd_a);
1130 dev->net->stats.rx_errors++;
1131 dev->net->stats.rx_dropped++;
1132
1133 if (rx_cmd_a & RX_CMD_A_FCS)
1134 dev->net->stats.rx_crc_errors++;
1135 else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
1136 dev->net->stats.rx_frame_errors++;
1137 } else {
1138 /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
1139 if (unlikely(size > (ETH_FRAME_LEN + 12))) {
1140 netif_dbg(dev, rx_err, dev->net,
1141 "size err rx_cmd_a=0x%08x", rx_cmd_a);
1142 return 0;
1143 }
1144
1145 /* last frame in this batch */
1146 if (skb->len == size) {
1147 if (pdata->use_rx_csum)
1148 smsc75xx_rx_csum_offload(skb, rx_cmd_a,
1149 rx_cmd_b);
1150 else
1151 skb->ip_summed = CHECKSUM_NONE;
1152
1153 skb_trim(skb, skb->len - 4); /* remove fcs */
1154 skb->truesize = size + sizeof(struct sk_buff);
1155
1156 return 1;
1157 }
1158
1159 ax_skb = skb_clone(skb, GFP_ATOMIC);
1160 if (unlikely(!ax_skb)) {
1161 netdev_warn(dev->net, "Error allocating skb");
1162 return 0;
1163 }
1164
1165 ax_skb->len = size;
1166 ax_skb->data = packet;
1167 skb_set_tail_pointer(ax_skb, size);
1168
1169 if (pdata->use_rx_csum)
1170 smsc75xx_rx_csum_offload(ax_skb, rx_cmd_a,
1171 rx_cmd_b);
1172 else
1173 ax_skb->ip_summed = CHECKSUM_NONE;
1174
1175 skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
1176 ax_skb->truesize = size + sizeof(struct sk_buff);
1177
1178 usbnet_skb_return(dev, ax_skb);
1179 }
1180
1181 skb_pull(skb, size);
1182
1183 /* padding bytes before the next frame starts */
1184 if (skb->len)
1185 skb_pull(skb, align_count);
1186 }
1187
1188 if (unlikely(skb->len < 0)) {
1189 netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
1190 return 0;
1191 }
1192
1193 return 1;
1194}
1195
1196static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
1197 struct sk_buff *skb, gfp_t flags)
1198{
1199 u32 tx_cmd_a, tx_cmd_b;
1200
1201 skb_linearize(skb);
1202
1203 if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
1204 struct sk_buff *skb2 =
1205 skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
1206 dev_kfree_skb_any(skb);
1207 skb = skb2;
1208 if (!skb)
1209 return NULL;
1210 }
1211
1212 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
1213
1214 if (skb->ip_summed == CHECKSUM_PARTIAL)
1215 tx_cmd_a |= TX_CMD_A_IPE | TX_CMD_A_TPE;
1216
1217 if (skb_is_gso(skb)) {
1218 u16 mss = max(skb_shinfo(skb)->gso_size, TX_MSS_MIN);
1219 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT) & TX_CMD_B_MSS;
1220
1221 tx_cmd_a |= TX_CMD_A_LSO;
1222 } else {
1223 tx_cmd_b = 0;
1224 }
1225
1226 skb_push(skb, 4);
1227 cpu_to_le32s(&tx_cmd_b);
1228 memcpy(skb->data, &tx_cmd_b, 4);
1229
1230 skb_push(skb, 4);
1231 cpu_to_le32s(&tx_cmd_a);
1232 memcpy(skb->data, &tx_cmd_a, 4);
1233
1234 return skb;
1235}
1236
1237static const struct driver_info smsc75xx_info = {
1238 .description = "smsc75xx USB 2.0 Gigabit Ethernet",
1239 .bind = smsc75xx_bind,
1240 .unbind = smsc75xx_unbind,
1241 .link_reset = smsc75xx_link_reset,
1242 .reset = smsc75xx_reset,
1243 .rx_fixup = smsc75xx_rx_fixup,
1244 .tx_fixup = smsc75xx_tx_fixup,
1245 .status = smsc75xx_status,
1246 .flags = FLAG_ETHER | FLAG_SEND_ZLP,
1247};
1248
1249static const struct usb_device_id products[] = {
1250 {
1251 /* SMSC7500 USB Gigabit Ethernet Device */
1252 USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7500),
1253 .driver_info = (unsigned long) &smsc75xx_info,
1254 },
1255 {
1256 /* SMSC7500 USB Gigabit Ethernet Device */
1257 USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7505),
1258 .driver_info = (unsigned long) &smsc75xx_info,
1259 },
1260 { }, /* END */
1261};
1262MODULE_DEVICE_TABLE(usb, products);
1263
1264static struct usb_driver smsc75xx_driver = {
1265 .name = SMSC_CHIPNAME,
1266 .id_table = products,
1267 .probe = usbnet_probe,
1268 .suspend = usbnet_suspend,
1269 .resume = usbnet_resume,
1270 .disconnect = usbnet_disconnect,
1271};
1272
1273static int __init smsc75xx_init(void)
1274{
1275 return usb_register(&smsc75xx_driver);
1276}
1277module_init(smsc75xx_init);
1278
1279static void __exit smsc75xx_exit(void)
1280{
1281 usb_deregister(&smsc75xx_driver);
1282}
1283module_exit(smsc75xx_exit);
1284
1285MODULE_AUTHOR("Nancy Lin");
1286MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
1287MODULE_DESCRIPTION("SMSC75XX USB 2.0 Gigabit Ethernet Devices");
1288MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/smsc75xx.h b/drivers/net/usb/smsc75xx.h
new file mode 100644
index 000000000000..16e98c778344
--- /dev/null
+++ b/drivers/net/usb/smsc75xx.h
@@ -0,0 +1,421 @@
1 /***************************************************************************
2 *
3 * Copyright (C) 2007-2010 SMSC
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 *****************************************************************************/
20
21#ifndef _SMSC75XX_H
22#define _SMSC75XX_H
23
24/* Tx command words */
25#define TX_CMD_A_LSO (0x08000000)
26#define TX_CMD_A_IPE (0x04000000)
27#define TX_CMD_A_TPE (0x02000000)
28#define TX_CMD_A_IVTG (0x01000000)
29#define TX_CMD_A_RVTG (0x00800000)
30#define TX_CMD_A_FCS (0x00400000)
31#define TX_CMD_A_LEN (0x000FFFFF)
32
33#define TX_CMD_B_MSS (0x3FFF0000)
34#define TX_CMD_B_MSS_SHIFT (16)
35#define TX_MSS_MIN ((u16)8)
36#define TX_CMD_B_VTAG (0x0000FFFF)
37
38/* Rx command words */
39#define RX_CMD_A_ICE (0x80000000)
40#define RX_CMD_A_TCE (0x40000000)
41#define RX_CMD_A_IPV (0x20000000)
42#define RX_CMD_A_PID (0x18000000)
43#define RX_CMD_A_PID_NIP (0x00000000)
44#define RX_CMD_A_PID_TCP (0x08000000)
45#define RX_CMD_A_PID_UDP (0x10000000)
46#define RX_CMD_A_PID_PP (0x18000000)
47#define RX_CMD_A_PFF (0x04000000)
48#define RX_CMD_A_BAM (0x02000000)
49#define RX_CMD_A_MAM (0x01000000)
50#define RX_CMD_A_FVTG (0x00800000)
51#define RX_CMD_A_RED (0x00400000)
52#define RX_CMD_A_RWT (0x00200000)
53#define RX_CMD_A_RUNT (0x00100000)
54#define RX_CMD_A_LONG (0x00080000)
55#define RX_CMD_A_RXE (0x00040000)
56#define RX_CMD_A_DRB (0x00020000)
57#define RX_CMD_A_FCS (0x00010000)
58#define RX_CMD_A_UAM (0x00008000)
59#define RX_CMD_A_LCSM (0x00004000)
60#define RX_CMD_A_LEN (0x00003FFF)
61
62#define RX_CMD_B_CSUM (0xFFFF0000)
63#define RX_CMD_B_CSUM_SHIFT (16)
64#define RX_CMD_B_VTAG (0x0000FFFF)
65
66/* SCSRs */
67#define ID_REV (0x0000)
68
69#define FPGA_REV (0x0004)
70
71#define BOND_CTL (0x0008)
72
73#define INT_STS (0x000C)
74#define INT_STS_RDFO_INT (0x00400000)
75#define INT_STS_TXE_INT (0x00200000)
76#define INT_STS_MACRTO_INT (0x00100000)
77#define INT_STS_TX_DIS_INT (0x00080000)
78#define INT_STS_RX_DIS_INT (0x00040000)
79#define INT_STS_PHY_INT_ (0x00020000)
80#define INT_STS_MAC_ERR_INT (0x00008000)
81#define INT_STS_TDFU (0x00004000)
82#define INT_STS_TDFO (0x00002000)
83#define INT_STS_GPIOS (0x00000FFF)
84#define INT_STS_CLEAR_ALL (0xFFFFFFFF)
85
86#define HW_CFG (0x0010)
87#define HW_CFG_SMDET_STS (0x00008000)
88#define HW_CFG_SMDET_EN (0x00004000)
89#define HW_CFG_EEM (0x00002000)
90#define HW_CFG_RST_PROTECT (0x00001000)
91#define HW_CFG_PORT_SWAP (0x00000800)
92#define HW_CFG_PHY_BOOST (0x00000600)
93#define HW_CFG_PHY_BOOST_NORMAL (0x00000000)
94#define HW_CFG_PHY_BOOST_4 (0x00002000)
95#define HW_CFG_PHY_BOOST_8 (0x00004000)
96#define HW_CFG_PHY_BOOST_12 (0x00006000)
97#define HW_CFG_LEDB (0x00000100)
98#define HW_CFG_BIR (0x00000080)
99#define HW_CFG_SBP (0x00000040)
100#define HW_CFG_IME (0x00000020)
101#define HW_CFG_MEF (0x00000010)
102#define HW_CFG_ETC (0x00000008)
103#define HW_CFG_BCE (0x00000004)
104#define HW_CFG_LRST (0x00000002)
105#define HW_CFG_SRST (0x00000001)
106
107#define PMT_CTL (0x0014)
108#define PMT_CTL_PHY_PWRUP (0x00000400)
109#define PMT_CTL_RES_CLR_WKP_EN (0x00000100)
110#define PMT_CTL_DEV_RDY (0x00000080)
111#define PMT_CTL_SUS_MODE (0x00000060)
112#define PMT_CTL_SUS_MODE_0 (0x00000000)
113#define PMT_CTL_SUS_MODE_1 (0x00000020)
114#define PMT_CTL_SUS_MODE_2 (0x00000040)
115#define PMT_CTL_SUS_MODE_3 (0x00000060)
116#define PMT_CTL_PHY_RST (0x00000010)
117#define PMT_CTL_WOL_EN (0x00000008)
118#define PMT_CTL_ED_EN (0x00000004)
119#define PMT_CTL_WUPS (0x00000003)
120#define PMT_CTL_WUPS_NO (0x00000000)
121#define PMT_CTL_WUPS_ED (0x00000001)
122#define PMT_CTL_WUPS_WOL (0x00000002)
123#define PMT_CTL_WUPS_MULTI (0x00000003)
124
125#define LED_GPIO_CFG (0x0018)
126#define LED_GPIO_CFG_LED2_FUN_SEL (0x80000000)
127#define LED_GPIO_CFG_LED10_FUN_SEL (0x40000000)
128#define LED_GPIO_CFG_LEDGPIO_EN (0x0000F000)
129#define LED_GPIO_CFG_LEDGPIO_EN_0 (0x00001000)
130#define LED_GPIO_CFG_LEDGPIO_EN_1 (0x00002000)
131#define LED_GPIO_CFG_LEDGPIO_EN_2 (0x00004000)
132#define LED_GPIO_CFG_LEDGPIO_EN_3 (0x00008000)
133#define LED_GPIO_CFG_GPBUF (0x00000F00)
134#define LED_GPIO_CFG_GPBUF_0 (0x00000100)
135#define LED_GPIO_CFG_GPBUF_1 (0x00000200)
136#define LED_GPIO_CFG_GPBUF_2 (0x00000400)
137#define LED_GPIO_CFG_GPBUF_3 (0x00000800)
138#define LED_GPIO_CFG_GPDIR (0x000000F0)
139#define LED_GPIO_CFG_GPDIR_0 (0x00000010)
140#define LED_GPIO_CFG_GPDIR_1 (0x00000020)
141#define LED_GPIO_CFG_GPDIR_2 (0x00000040)
142#define LED_GPIO_CFG_GPDIR_3 (0x00000080)
143#define LED_GPIO_CFG_GPDATA (0x0000000F)
144#define LED_GPIO_CFG_GPDATA_0 (0x00000001)
145#define LED_GPIO_CFG_GPDATA_1 (0x00000002)
146#define LED_GPIO_CFG_GPDATA_2 (0x00000004)
147#define LED_GPIO_CFG_GPDATA_3 (0x00000008)
148
149#define GPIO_CFG (0x001C)
150#define GPIO_CFG_SHIFT (24)
151#define GPIO_CFG_GPEN (0xFF000000)
152#define GPIO_CFG_GPBUF (0x00FF0000)
153#define GPIO_CFG_GPDIR (0x0000FF00)
154#define GPIO_CFG_GPDATA (0x000000FF)
155
156#define GPIO_WAKE (0x0020)
157#define GPIO_WAKE_PHY_LINKUP_EN (0x80000000)
158#define GPIO_WAKE_POL (0x0FFF0000)
159#define GPIO_WAKE_POL_SHIFT (16)
160#define GPIO_WAKE_WK (0x00000FFF)
161
162#define DP_SEL (0x0024)
163#define DP_SEL_DPRDY (0x80000000)
164#define DP_SEL_RSEL (0x0000000F)
165#define DP_SEL_URX (0x00000000)
166#define DP_SEL_VHF (0x00000001)
167#define DP_SEL_VHF_HASH_LEN (16)
168#define DP_SEL_VHF_VLAN_LEN (128)
169#define DP_SEL_LSO_HEAD (0x00000002)
170#define DP_SEL_FCT_RX (0x00000003)
171#define DP_SEL_FCT_TX (0x00000004)
172#define DP_SEL_DESCRIPTOR (0x00000005)
173#define DP_SEL_WOL (0x00000006)
174
175#define DP_CMD (0x0028)
176#define DP_CMD_WRITE (0x01)
177#define DP_CMD_READ (0x00)
178
179#define DP_ADDR (0x002C)
180
181#define DP_DATA (0x0030)
182
183#define BURST_CAP (0x0034)
184#define BURST_CAP_MASK (0x0000000F)
185
186#define INT_EP_CTL (0x0038)
187#define INT_EP_CTL_INTEP_ON (0x80000000)
188#define INT_EP_CTL_RDFO_EN (0x00400000)
189#define INT_EP_CTL_TXE_EN (0x00200000)
190#define INT_EP_CTL_MACROTO_EN (0x00100000)
191#define INT_EP_CTL_TX_DIS_EN (0x00080000)
192#define INT_EP_CTL_RX_DIS_EN (0x00040000)
193#define INT_EP_CTL_PHY_EN_ (0x00020000)
194#define INT_EP_CTL_MAC_ERR_EN (0x00008000)
195#define INT_EP_CTL_TDFU_EN (0x00004000)
196#define INT_EP_CTL_TDFO_EN (0x00002000)
197#define INT_EP_CTL_RX_FIFO_EN (0x00001000)
198#define INT_EP_CTL_GPIOX_EN (0x00000FFF)
199
200#define BULK_IN_DLY (0x003C)
201#define BULK_IN_DLY_MASK (0xFFFF)
202
203#define E2P_CMD (0x0040)
204#define E2P_CMD_BUSY (0x80000000)
205#define E2P_CMD_MASK (0x70000000)
206#define E2P_CMD_READ (0x00000000)
207#define E2P_CMD_EWDS (0x10000000)
208#define E2P_CMD_EWEN (0x20000000)
209#define E2P_CMD_WRITE (0x30000000)
210#define E2P_CMD_WRAL (0x40000000)
211#define E2P_CMD_ERASE (0x50000000)
212#define E2P_CMD_ERAL (0x60000000)
213#define E2P_CMD_RELOAD (0x70000000)
214#define E2P_CMD_TIMEOUT (0x00000400)
215#define E2P_CMD_LOADED (0x00000200)
216#define E2P_CMD_ADDR (0x000001FF)
217
218#define MAX_EEPROM_SIZE (512)
219
220#define E2P_DATA (0x0044)
221#define E2P_DATA_MASK_ (0x000000FF)
222
223#define RFE_CTL (0x0060)
224#define RFE_CTL_TCPUDP_CKM (0x00001000)
225#define RFE_CTL_IP_CKM (0x00000800)
226#define RFE_CTL_AB (0x00000400)
227#define RFE_CTL_AM (0x00000200)
228#define RFE_CTL_AU (0x00000100)
229#define RFE_CTL_VS (0x00000080)
230#define RFE_CTL_UF (0x00000040)
231#define RFE_CTL_VF (0x00000020)
232#define RFE_CTL_SPF (0x00000010)
233#define RFE_CTL_MHF (0x00000008)
234#define RFE_CTL_DHF (0x00000004)
235#define RFE_CTL_DPF (0x00000002)
236#define RFE_CTL_RST_RF (0x00000001)
237
238#define VLAN_TYPE (0x0064)
239#define VLAN_TYPE_MASK (0x0000FFFF)
240
241#define FCT_RX_CTL (0x0090)
242#define FCT_RX_CTL_EN (0x80000000)
243#define FCT_RX_CTL_RST (0x40000000)
244#define FCT_RX_CTL_SBF (0x02000000)
245#define FCT_RX_CTL_OVERFLOW (0x01000000)
246#define FCT_RX_CTL_FRM_DROP (0x00800000)
247#define FCT_RX_CTL_RX_NOT_EMPTY (0x00400000)
248#define FCT_RX_CTL_RX_EMPTY (0x00200000)
249#define FCT_RX_CTL_RX_DISABLED (0x00100000)
250#define FCT_RX_CTL_RXUSED (0x0000FFFF)
251
252#define FCT_TX_CTL (0x0094)
253#define FCT_TX_CTL_EN (0x80000000)
254#define FCT_TX_CTL_RST (0x40000000)
255#define FCT_TX_CTL_TX_NOT_EMPTY (0x00400000)
256#define FCT_TX_CTL_TX_EMPTY (0x00200000)
257#define FCT_TX_CTL_TX_DISABLED (0x00100000)
258#define FCT_TX_CTL_TXUSED (0x0000FFFF)
259
260#define FCT_RX_FIFO_END (0x0098)
261#define FCT_RX_FIFO_END_MASK (0x0000007F)
262
263#define FCT_TX_FIFO_END (0x009C)
264#define FCT_TX_FIFO_END_MASK (0x0000003F)
265
266#define FCT_FLOW (0x00A0)
267#define FCT_FLOW_THRESHOLD_OFF (0x00007F00)
268#define FCT_FLOW_THRESHOLD_OFF_SHIFT (8)
269#define FCT_FLOW_THRESHOLD_ON (0x0000007F)
270
271/* MAC CSRs */
272#define MAC_CR (0x100)
273#define MAC_CR_ADP (0x00002000)
274#define MAC_CR_ADD (0x00001000)
275#define MAC_CR_ASD (0x00000800)
276#define MAC_CR_INT_LOOP (0x00000400)
277#define MAC_CR_BOLMT (0x000000C0)
278#define MAC_CR_FDPX (0x00000008)
279#define MAC_CR_CFG (0x00000006)
280#define MAC_CR_CFG_10 (0x00000000)
281#define MAC_CR_CFG_100 (0x00000002)
282#define MAC_CR_CFG_1000 (0x00000004)
283#define MAC_CR_RST (0x00000001)
284
285#define MAC_RX (0x104)
286#define MAC_RX_MAX_SIZE (0x3FFF0000)
287#define MAC_RX_MAX_SIZE_SHIFT (16)
288#define MAC_RX_FCS_STRIP (0x00000010)
289#define MAC_RX_FSE (0x00000004)
290#define MAC_RX_RXD (0x00000002)
291#define MAC_RX_RXEN (0x00000001)
292
293#define MAC_TX (0x108)
294#define MAC_TX_BFCS (0x00000004)
295#define MAC_TX_TXD (0x00000002)
296#define MAC_TX_TXEN (0x00000001)
297
298#define FLOW (0x10C)
299#define FLOW_FORCE_FC (0x80000000)
300#define FLOW_TX_FCEN (0x40000000)
301#define FLOW_RX_FCEN (0x20000000)
302#define FLOW_FPF (0x10000000)
303#define FLOW_PAUSE_TIME (0x0000FFFF)
304
305#define RAND_SEED (0x110)
306#define RAND_SEED_MASK (0x0000FFFF)
307
308#define ERR_STS (0x114)
309#define ERR_STS_FCS_ERR (0x00000100)
310#define ERR_STS_LFRM_ERR (0x00000080)
311#define ERR_STS_RUNT_ERR (0x00000040)
312#define ERR_STS_COLLISION_ERR (0x00000010)
313#define ERR_STS_ALIGN_ERR (0x00000008)
314#define ERR_STS_URUN_ERR (0x00000004)
315
316#define RX_ADDRH (0x118)
317#define RX_ADDRH_MASK (0x0000FFFF)
318
319#define RX_ADDRL (0x11C)
320
321#define MII_ACCESS (0x120)
322#define MII_ACCESS_PHY_ADDR (0x0000F800)
323#define MII_ACCESS_PHY_ADDR_SHIFT (11)
324#define MII_ACCESS_REG_ADDR (0x000007C0)
325#define MII_ACCESS_REG_ADDR_SHIFT (6)
326#define MII_ACCESS_READ (0x00000000)
327#define MII_ACCESS_WRITE (0x00000002)
328#define MII_ACCESS_BUSY (0x00000001)
329
330#define MII_DATA (0x124)
331#define MII_DATA_MASK (0x0000FFFF)
332
333#define WUCSR (0x140)
334#define WUCSR_PFDA_FR (0x00000080)
335#define WUCSR_WUFR (0x00000040)
336#define WUCSR_MPR (0x00000020)
337#define WUCSR_BCAST_FR (0x00000010)
338#define WUCSR_PFDA_EN (0x00000008)
339#define WUCSR_WUEN (0x00000004)
340#define WUCSR_MPEN (0x00000002)
341#define WUCSR_BCST_EN (0x00000001)
342
343#define WUF_CFGX (0x144)
344#define WUF_CFGX_EN (0x80000000)
345#define WUF_CFGX_ATYPE (0x03000000)
346#define WUF_CFGX_ATYPE_UNICAST (0x00000000)
347#define WUF_CFGX_ATYPE_MULTICAST (0x02000000)
348#define WUF_CFGX_ATYPE_ALL (0x03000000)
349#define WUF_CFGX_PATTERN_OFFSET (0x007F0000)
350#define WUF_CFGX_PATTERN_OFFSET_SHIFT (16)
351#define WUF_CFGX_CRC16 (0x0000FFFF)
352#define WUF_NUM (8)
353
354#define WUF_MASKX (0x170)
355#define WUF_MASKX_AVALID (0x80000000)
356#define WUF_MASKX_ATYPE (0x40000000)
357
358#define ADDR_FILTX (0x300)
359#define ADDR_FILTX_FB_VALID (0x80000000)
360#define ADDR_FILTX_FB_TYPE (0x40000000)
361#define ADDR_FILTX_FB_ADDRHI (0x0000FFFF)
362#define ADDR_FILTX_SB_ADDRLO (0xFFFFFFFF)
363
364#define WUCSR2 (0x500)
365#define WUCSR2_NS_RCD (0x00000040)
366#define WUCSR2_ARP_RCD (0x00000020)
367#define WUCSR2_TCPSYN_RCD (0x00000010)
368#define WUCSR2_NS_OFFLOAD (0x00000004)
369#define WUCSR2_ARP_OFFLOAD (0x00000002)
370#define WUCSR2_TCPSYN_OFFLOAD (0x00000001)
371
372#define WOL_FIFO_STS (0x504)
373
374#define IPV6_ADDRX (0x510)
375
376#define IPV4_ADDRX (0x590)
377
378
379/* Vendor-specific PHY Definitions */
380
381/* Mode Control/Status Register */
382#define PHY_MODE_CTRL_STS (17)
383#define MODE_CTRL_STS_EDPWRDOWN ((u16)0x2000)
384#define MODE_CTRL_STS_ENERGYON ((u16)0x0002)
385
386#define PHY_INT_SRC (29)
387#define PHY_INT_SRC_ENERGY_ON ((u16)0x0080)
388#define PHY_INT_SRC_ANEG_COMP ((u16)0x0040)
389#define PHY_INT_SRC_REMOTE_FAULT ((u16)0x0020)
390#define PHY_INT_SRC_LINK_DOWN ((u16)0x0010)
391
392#define PHY_INT_MASK (30)
393#define PHY_INT_MASK_ENERGY_ON ((u16)0x0080)
394#define PHY_INT_MASK_ANEG_COMP ((u16)0x0040)
395#define PHY_INT_MASK_REMOTE_FAULT ((u16)0x0020)
396#define PHY_INT_MASK_LINK_DOWN ((u16)0x0010)
397#define PHY_INT_MASK_DEFAULT (PHY_INT_MASK_ANEG_COMP | \
398 PHY_INT_MASK_LINK_DOWN)
399
400#define PHY_SPECIAL (31)
401#define PHY_SPECIAL_SPD ((u16)0x001C)
402#define PHY_SPECIAL_SPD_10HALF ((u16)0x0004)
403#define PHY_SPECIAL_SPD_10FULL ((u16)0x0014)
404#define PHY_SPECIAL_SPD_100HALF ((u16)0x0008)
405#define PHY_SPECIAL_SPD_100FULL ((u16)0x0018)
406
407/* USB Vendor Requests */
408#define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0
409#define USB_VENDOR_REQUEST_READ_REGISTER 0xA1
410#define USB_VENDOR_REQUEST_GET_STATS 0xA2
411
412/* Interrupt Endpoint status word bitfields */
413#define INT_ENP_RDFO_INT ((u32)BIT(22))
414#define INT_ENP_TXE_INT ((u32)BIT(21))
415#define INT_ENP_TX_DIS_INT ((u32)BIT(19))
416#define INT_ENP_RX_DIS_INT ((u32)BIT(18))
417#define INT_ENP_PHY_INT ((u32)BIT(17))
418#define INT_ENP_MAC_ERR_INT ((u32)BIT(15))
419#define INT_ENP_RX_FIFO_DATA_INT ((u32)BIT(12))
420
421#endif /* _SMSC75XX_H */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index df9179a1c93b..73f9a31cf94d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -709,6 +709,8 @@ static void smsc95xx_start_rx_path(struct usbnet *dev)
709 709
710static int smsc95xx_phy_initialize(struct usbnet *dev) 710static int smsc95xx_phy_initialize(struct usbnet *dev)
711{ 711{
712 int bmcr, timeout = 0;
713
712 /* Initialize MII structure */ 714 /* Initialize MII structure */
713 dev->mii.dev = dev->net; 715 dev->mii.dev = dev->net;
714 dev->mii.mdio_read = smsc95xx_mdio_read; 716 dev->mii.mdio_read = smsc95xx_mdio_read;
@@ -717,7 +719,20 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
717 dev->mii.reg_num_mask = 0x1f; 719 dev->mii.reg_num_mask = 0x1f;
718 dev->mii.phy_id = SMSC95XX_INTERNAL_PHY_ID; 720 dev->mii.phy_id = SMSC95XX_INTERNAL_PHY_ID;
719 721
722 /* reset phy and wait for reset to complete */
720 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); 723 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
724
725 do {
726 msleep(10);
727 bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
728 timeout++;
729 } while ((bmcr & MII_BMCR) && (timeout < 100));
730
731 if (timeout >= 100) {
732 netdev_warn(dev->net, "timeout on PHY Reset");
733 return -EIO;
734 }
735
721 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, 736 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
722 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | 737 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
723 ADVERTISE_PAUSE_ASYM); 738 ADVERTISE_PAUSE_ASYM);
@@ -1174,9 +1189,21 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
1174 } 1189 }
1175 1190
1176 if (csum) { 1191 if (csum) {
1177 u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); 1192 if (skb->len <= 45) {
1178 skb_push(skb, 4); 1193 /* workaround - hardware tx checksum does not work
1179 memcpy(skb->data, &csum_preamble, 4); 1194 * properly with extremely small packets */
1195 long csstart = skb->csum_start - skb_headroom(skb);
1196 __wsum calc = csum_partial(skb->data + csstart,
1197 skb->len - csstart, 0);
1198 *((__sum16 *)(skb->data + csstart
1199 + skb->csum_offset)) = csum_fold(calc);
1200
1201 csum = false;
1202 } else {
1203 u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
1204 skb_push(skb, 4);
1205 memcpy(skb->data, &csum_preamble, 4);
1206 }
1180 } 1207 }
1181 1208
1182 skb_push(skb, 4); 1209 skb_push(skb, 4);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 3a486f3bad3d..bc278d4ee89d 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -812,7 +812,7 @@ static void set_mii_flow_control(struct velocity_info *vptr)
812 812
813 case FLOW_CNTL_TX_RX: 813 case FLOW_CNTL_TX_RX:
814 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 814 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
815 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 815 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
816 break; 816 break;
817 817
818 case FLOW_CNTL_DISABLE: 818 case FLOW_CNTL_DISABLE:
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index b36bf96eb502..f0bd70fb650c 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -811,7 +811,7 @@ static ssize_t cosa_read(struct file *file,
811 cosa_enable_rx(chan); 811 cosa_enable_rx(chan);
812 spin_lock_irqsave(&cosa->lock, flags); 812 spin_lock_irqsave(&cosa->lock, flags);
813 add_wait_queue(&chan->rxwaitq, &wait); 813 add_wait_queue(&chan->rxwaitq, &wait);
814 while(!chan->rx_status) { 814 while (!chan->rx_status) {
815 current->state = TASK_INTERRUPTIBLE; 815 current->state = TASK_INTERRUPTIBLE;
816 spin_unlock_irqrestore(&cosa->lock, flags); 816 spin_unlock_irqrestore(&cosa->lock, flags);
817 schedule(); 817 schedule();
@@ -896,7 +896,7 @@ static ssize_t cosa_write(struct file *file,
896 896
897 spin_lock_irqsave(&cosa->lock, flags); 897 spin_lock_irqsave(&cosa->lock, flags);
898 add_wait_queue(&chan->txwaitq, &wait); 898 add_wait_queue(&chan->txwaitq, &wait);
899 while(!chan->tx_status) { 899 while (!chan->tx_status) {
900 current->state = TASK_INTERRUPTIBLE; 900 current->state = TASK_INTERRUPTIBLE;
901 spin_unlock_irqrestore(&cosa->lock, flags); 901 spin_unlock_irqrestore(&cosa->lock, flags);
902 schedule(); 902 schedule();
@@ -1153,7 +1153,7 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
1153 struct channel_data *channel, unsigned int cmd, unsigned long arg) 1153 struct channel_data *channel, unsigned int cmd, unsigned long arg)
1154{ 1154{
1155 void __user *argp = (void __user *)arg; 1155 void __user *argp = (void __user *)arg;
1156 switch(cmd) { 1156 switch (cmd) {
1157 case COSAIORSET: /* Reset the device */ 1157 case COSAIORSET: /* Reset the device */
1158 if (!capable(CAP_NET_ADMIN)) 1158 if (!capable(CAP_NET_ADMIN))
1159 return -EACCES; 1159 return -EACCES;
@@ -1704,7 +1704,7 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
1704 spin_unlock_irqrestore(&cosa->lock, flags); 1704 spin_unlock_irqrestore(&cosa->lock, flags);
1705 return; 1705 return;
1706 } 1706 }
1707 while(1) { 1707 while (1) {
1708 cosa->txchan++; 1708 cosa->txchan++;
1709 i++; 1709 i++;
1710 if (cosa->txchan >= cosa->nchannels) 1710 if (cosa->txchan >= cosa->nchannels)
@@ -2010,7 +2010,7 @@ again:
2010static void debug_status_in(struct cosa_data *cosa, int status) 2010static void debug_status_in(struct cosa_data *cosa, int status)
2011{ 2011{
2012 char *s; 2012 char *s;
2013 switch(status & SR_CMD_FROM_SRP_MASK) { 2013 switch (status & SR_CMD_FROM_SRP_MASK) {
2014 case SR_UP_REQUEST: 2014 case SR_UP_REQUEST:
2015 s = "RX_REQ"; 2015 s = "RX_REQ";
2016 break; 2016 break;
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index f1bff98acd1f..1ceccf1ca6c7 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -141,7 +141,7 @@ static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
141 data->address != CISCO_UNICAST) 141 data->address != CISCO_UNICAST)
142 return cpu_to_be16(ETH_P_HDLC); 142 return cpu_to_be16(ETH_P_HDLC);
143 143
144 switch(data->protocol) { 144 switch (data->protocol) {
145 case cpu_to_be16(ETH_P_IP): 145 case cpu_to_be16(ETH_P_IP):
146 case cpu_to_be16(ETH_P_IPX): 146 case cpu_to_be16(ETH_P_IPX):
147 case cpu_to_be16(ETH_P_IPV6): 147 case cpu_to_be16(ETH_P_IPV6):
@@ -190,7 +190,7 @@ static int cisco_rx(struct sk_buff *skb)
190 cisco_data = (struct cisco_packet*)(skb->data + sizeof 190 cisco_data = (struct cisco_packet*)(skb->data + sizeof
191 (struct hdlc_header)); 191 (struct hdlc_header));
192 192
193 switch(ntohl (cisco_data->type)) { 193 switch (ntohl (cisco_data->type)) {
194 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ 194 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
195 in_dev = dev->ip_ptr; 195 in_dev = dev->ip_ptr;
196 addr = 0; 196 addr = 0;
@@ -245,8 +245,8 @@ static int cisco_rx(struct sk_buff *skb)
245 245
246 dev_kfree_skb_any(skb); 246 dev_kfree_skb_any(skb);
247 return NET_RX_SUCCESS; 247 return NET_RX_SUCCESS;
248 } /* switch(keepalive type) */ 248 } /* switch (keepalive type) */
249 } /* switch(protocol) */ 249 } /* switch (protocol) */
250 250
251 printk(KERN_INFO "%s: Unsupported protocol %x\n", dev->name, 251 printk(KERN_INFO "%s: Unsupported protocol %x\n", dev->name,
252 ntohs(data->protocol)); 252 ntohs(data->protocol));
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index aa9248f8eb1a..6e1ca256effd 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -202,10 +202,10 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
202 return 0; /* return protocol only, no settable parameters */ 202 return 0; /* return protocol only, no settable parameters */
203 203
204 case IF_PROTO_X25: 204 case IF_PROTO_X25:
205 if(!capable(CAP_NET_ADMIN)) 205 if (!capable(CAP_NET_ADMIN))
206 return -EPERM; 206 return -EPERM;
207 207
208 if(dev->flags & IFF_UP) 208 if (dev->flags & IFF_UP)
209 return -EBUSY; 209 return -EBUSY;
210 210
211 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 211 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index e803a7dc6502..25c24f0368d8 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -612,7 +612,7 @@ ssize_t i2400m_bm_cmd(struct i2400m *i2400m,
612 goto error_wait_for_ack; 612 goto error_wait_for_ack;
613 } 613 }
614 rx_bytes = result; 614 rx_bytes = result;
615 /* verify the ack and read more if neccessary [result is the 615 /* verify the ack and read more if necessary [result is the
616 * final amount of bytes we get in the ack] */ 616 * final amount of bytes we get in the ack] */
617 result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags); 617 result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags);
618 if (result < 0) 618 if (result < 0)
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 04df9bbe340f..820b128705ec 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -627,7 +627,7 @@ enum i2400m_bm_cmd_flags {
627 * @I2400M_BRI_NO_REBOOT: Do not reboot the device and proceed 627 * @I2400M_BRI_NO_REBOOT: Do not reboot the device and proceed
628 * directly to wait for a reboot barker from the device. 628 * directly to wait for a reboot barker from the device.
629 * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot 629 * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
630 * rom after reading the MAC adress. This is quite a dirty hack, 630 * rom after reading the MAC address. This is quite a dirty hack,
631 * if you ask me -- the device requires the bootrom to be 631 * if you ask me -- the device requires the bootrom to be
632 * intialized after reading the MAC address. 632 * intialized after reading the MAC address.
633 */ 633 */
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 76a50ac02ebb..14f876b1358b 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -304,7 +304,7 @@ error_kzalloc:
304 * 304 *
305 * The device will be fully reset internally, but won't be 305 * The device will be fully reset internally, but won't be
306 * disconnected from the bus (so no reenumeration will 306 * disconnected from the bus (so no reenumeration will
307 * happen). Firmware upload will be neccessary. 307 * happen). Firmware upload will be necessary.
308 * 308 *
309 * The device will send a reboot barker that will trigger the driver 309 * The device will send a reboot barker that will trigger the driver
310 * to reinitialize the state via __i2400m_dev_reset_handle. 310 * to reinitialize the state via __i2400m_dev_reset_handle.
@@ -314,7 +314,7 @@ error_kzalloc:
314 * 314 *
315 * The device will be fully reset internally, disconnected from the 315 * The device will be fully reset internally, disconnected from the
316 * bus an a reenumeration will happen. Firmware upload will be 316 * bus an a reenumeration will happen. Firmware upload will be
317 * neccessary. Thus, we don't do any locking or struct 317 * necessary. Thus, we don't do any locking or struct
318 * reinitialization, as we are going to be fully disconnected and 318 * reinitialization, as we are going to be fully disconnected and
319 * reenumerated. 319 * reenumerated.
320 * 320 *
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 98f4f8c5fb68..99f04c475898 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -246,7 +246,7 @@ error_kzalloc:
246 * 246 *
247 * The device will be fully reset internally, but won't be 247 * The device will be fully reset internally, but won't be
248 * disconnected from the USB bus (so no reenumeration will 248 * disconnected from the USB bus (so no reenumeration will
249 * happen). Firmware upload will be neccessary. 249 * happen). Firmware upload will be necessary.
250 * 250 *
251 * The device will send a reboot barker in the notification endpoint 251 * The device will send a reboot barker in the notification endpoint
252 * that will trigger the driver to reinitialize the state 252 * that will trigger the driver to reinitialize the state
@@ -257,7 +257,7 @@ error_kzalloc:
257 * 257 *
258 * The device will be fully reset internally, disconnected from the 258 * The device will be fully reset internally, disconnected from the
259 * USB bus an a reenumeration will happen. Firmware upload will be 259 * USB bus an a reenumeration will happen. Firmware upload will be
260 * neccessary. Thus, we don't do any locking or struct 260 * necessary. Thus, we don't do any locking or struct
261 * reinitialization, as we are going to be fully disconnected and 261 * reinitialization, as we are going to be fully disconnected and
262 * reenumerated. 262 * reenumerated.
263 * 263 *
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index f4650fcdebc9..257c734733d1 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -394,7 +394,7 @@ static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
394 ieee80211_tx_status_irqsafe(ar->hw, skb); 394 ieee80211_tx_status_irqsafe(ar->hw, skb);
395 } 395 }
396 396
397 for_each_bit(i, &queue_bitmap, BITS_PER_BYTE) { 397 for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
398#ifdef AR9170_QUEUE_STOP_DEBUG 398#ifdef AR9170_QUEUE_STOP_DEBUG
399 printk(KERN_DEBUG "%s: wake queue %d\n", 399 printk(KERN_DEBUG "%s: wake queue %d\n",
400 wiphy_name(ar->hw->wiphy), i); 400 wiphy_name(ar->hw->wiphy), i);
@@ -2512,7 +2512,7 @@ void *ar9170_alloc(size_t priv_size)
2512 /* 2512 /*
2513 * this buffer is used for rx stream reconstruction. 2513 * this buffer is used for rx stream reconstruction.
2514 * Under heavy load this device (or the transport layer?) 2514 * Under heavy load this device (or the transport layer?)
2515 * tends to split the streams into seperate rx descriptors. 2515 * tends to split the streams into separate rx descriptors.
2516 */ 2516 */
2517 2517
2518 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL); 2518 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index be992ca41cf1..c29c994de0e2 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -89,7 +89,7 @@ static int iwm_debugfs_dbg_modules_write(void *data, u64 val)
89 for (i = 0; i < __IWM_DM_NR; i++) 89 for (i = 0; i < __IWM_DM_NR; i++)
90 iwm->dbg.dbg_module[i] = 0; 90 iwm->dbg.dbg_module[i] = 0;
91 91
92 for_each_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR) 92 for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
93 iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level; 93 iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level;
94 94
95 return 0; 95 return 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index a3a79b5e2898..a855a99e49b8 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -262,7 +262,7 @@ struct iwm_ct_kill_cfg_cmd {
262 262
263/* Power Management */ 263/* Power Management */
264#define POWER_TABLE_CMD 0x77 264#define POWER_TABLE_CMD 0x77
265#define SAVE_RESTORE_ADRESS_CMD 0x78 265#define SAVE_RESTORE_ADDRESS_CMD 0x78
266#define REPLY_WATERMARK_CMD 0x79 266#define REPLY_WATERMARK_CMD 0x79
267#define PM_DEBUG_STATISTIC_NOTIFIC 0x7B 267#define PM_DEBUG_STATISTIC_NOTIFIC 0x7B
268#define PD_FLUSH_N_NOTIFICATION 0x7C 268#define PD_FLUSH_N_NOTIFICATION 0x7C
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index ad8f7eabb5aa..8456b4dbd146 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -1116,7 +1116,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
1116 return -EINVAL; 1116 return -EINVAL;
1117 } 1117 }
1118 1118
1119 for_each_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) { 1119 for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
1120 tid_info = &sta_info->tid_info[bit]; 1120 tid_info = &sta_info->tid_info[bit];
1121 1121
1122 mutex_lock(&tid_info->mutex); 1122 mutex_lock(&tid_info->mutex);
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index dbaa78138437..13444b6b3e37 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -368,7 +368,7 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
368 368
369 /* 369 /*
370 * The encryption key doesn't fit within the CSR cache, 370 * The encryption key doesn't fit within the CSR cache,
371 * this means we should allocate it seperately and use 371 * this means we should allocate it separately and use
372 * rt2x00usb_vendor_request() to send the key to the hardware. 372 * rt2x00usb_vendor_request() to send the key to the hardware.
373 */ 373 */
374 reg = KEY_ENTRY(key->hw_key_idx); 374 reg = KEY_ENTRY(key->hw_key_idx);
@@ -382,7 +382,7 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
382 /* 382 /*
383 * The driver does not support the IV/EIV generation 383 * The driver does not support the IV/EIV generation
384 * in hardware. However it demands the data to be provided 384 * in hardware. However it demands the data to be provided
385 * both seperately as well as inside the frame. 385 * both separately as well as inside the frame.
386 * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib 386 * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib
387 * to ensure rt2x00lib will not strip the data from the 387 * to ensure rt2x00lib will not strip the data from the
388 * frame after the copy, now we must tell mac80211 388 * frame after the copy, now we must tell mac80211
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 5e4ee2023fcf..d27d7d5d850c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -99,7 +99,7 @@ static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
99 * There are 2 variations of the rt2870 firmware. 99 * There are 2 variations of the rt2870 firmware.
100 * a) size: 4kb 100 * a) size: 4kb
101 * b) size: 8kb 101 * b) size: 8kb
102 * Note that (b) contains 2 seperate firmware blobs of 4k 102 * Note that (b) contains 2 separate firmware blobs of 4k
103 * within the file. The first blob is the same firmware as (a), 103 * within the file. The first blob is the same firmware as (a),
104 * but the second blob is for the additional chipsets. 104 * but the second blob is for the additional chipsets.
105 */ 105 */
@@ -117,7 +117,7 @@ static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
117 117
118 /* 118 /*
119 * 8kb firmware files must be checked as if it were 119 * 8kb firmware files must be checked as if it were
120 * 2 seperate firmware files. 120 * 2 separate firmware files.
121 */ 121 */
122 while (offset < len) { 122 while (offset < len) {
123 if (!rt2800usb_check_crc(data + offset, 4096)) 123 if (!rt2800usb_check_crc(data + offset, 4096))
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 70c04c282efc..28a1c46ec4eb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -109,7 +109,7 @@ struct rt2x00debug_intf {
109 109
110 /* 110 /*
111 * HW crypto statistics. 111 * HW crypto statistics.
112 * All statistics are stored seperately per cipher type. 112 * All statistics are stored separately per cipher type.
113 */ 113 */
114 struct rt2x00debug_crypto crypto_stats[CIPHER_MAX]; 114 struct rt2x00debug_crypto crypto_stats[CIPHER_MAX];
115 115
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index b93731b79903..dd5ab8fe2321 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -394,7 +394,7 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
394 /* 394 /*
395 * Hardware might have stripped the IV/EIV/ICV data, 395 * Hardware might have stripped the IV/EIV/ICV data,
396 * in that case it is possible that the data was 396 * in that case it is possible that the data was
397 * provided seperately (through hardware descriptor) 397 * provided separately (through hardware descriptor)
398 * in which case we should reinsert the data into the frame. 398 * in which case we should reinsert the data into the frame.
399 */ 399 */
400 if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) && 400 if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) &&
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 0b4801a14601..5b6b789cad3d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -497,7 +497,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
497 /* 497 /*
498 * When hardware encryption is supported, and this frame 498 * When hardware encryption is supported, and this frame
499 * is to be encrypted, we should strip the IV/EIV data from 499 * is to be encrypted, we should strip the IV/EIV data from
500 * the frame so we can provide it to the driver seperately. 500 * the frame so we can provide it to the driver separately.
501 */ 501 */
502 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 502 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
503 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 503 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index e2da928dd9f0..177472742172 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -476,7 +476,7 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
476 * The driver does not support the IV/EIV generation 476 * The driver does not support the IV/EIV generation
477 * in hardware. However it doesn't support the IV/EIV 477 * in hardware. However it doesn't support the IV/EIV
478 * inside the ieee80211 frame either, but requires it 478 * inside the ieee80211 frame either, but requires it
479 * to be provided seperately for the descriptor. 479 * to be provided separately for the descriptor.
480 * rt2x00lib will cut the IV/EIV data out of all frames 480 * rt2x00lib will cut the IV/EIV data out of all frames
481 * given to us by mac80211, but we must tell mac80211 481 * given to us by mac80211, but we must tell mac80211
482 * to generate the IV/EIV data. 482 * to generate the IV/EIV data.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 47f3e4a26d77..290d70bc5d22 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -339,7 +339,7 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
339 * The driver does not support the IV/EIV generation 339 * The driver does not support the IV/EIV generation
340 * in hardware. However it doesn't support the IV/EIV 340 * in hardware. However it doesn't support the IV/EIV
341 * inside the ieee80211 frame either, but requires it 341 * inside the ieee80211 frame either, but requires it
342 * to be provided seperately for the descriptor. 342 * to be provided separately for the descriptor.
343 * rt2x00lib will cut the IV/EIV data out of all frames 343 * rt2x00lib will cut the IV/EIV data out of all frames
344 * given to us by mac80211, but we must tell mac80211 344 * given to us by mac80211, but we must tell mac80211
345 * to generate the IV/EIV data. 345 * to generate the IV/EIV data.
@@ -439,7 +439,7 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
439 * The driver does not support the IV/EIV generation 439 * The driver does not support the IV/EIV generation
440 * in hardware. However it doesn't support the IV/EIV 440 * in hardware. However it doesn't support the IV/EIV
441 * inside the ieee80211 frame either, but requires it 441 * inside the ieee80211 frame either, but requires it
442 * to be provided seperately for the descriptor. 442 * to be provided separately for the descriptor.
443 * rt2x00lib will cut the IV/EIV data out of all frames 443 * rt2x00lib will cut the IV/EIV data out of all frames
444 * given to us by mac80211, but we must tell mac80211 444 * given to us by mac80211, but we must tell mac80211
445 * to generate the IV/EIV data. 445 * to generate the IV/EIV data.
@@ -1661,7 +1661,7 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1661 1661
1662 /* 1662 /*
1663 * Hardware has stripped IV/EIV data from 802.11 frame during 1663 * Hardware has stripped IV/EIV data from 802.11 frame during
1664 * decryption. It has provided the data seperately but rt2x00lib 1664 * decryption. It has provided the data separately but rt2x00lib
1665 * should decide if it should be reinserted. 1665 * should decide if it should be reinserted.
1666 */ 1666 */
1667 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 1667 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index e24099613d91..00e09e26c826 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -374,7 +374,7 @@ static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
374 * zd_mac_tx_failed - callback for failed frames 374 * zd_mac_tx_failed - callback for failed frames
375 * @dev: the mac80211 wireless device 375 * @dev: the mac80211 wireless device
376 * 376 *
377 * This function is called if a frame couldn't be successfully be 377 * This function is called if a frame couldn't be successfully
378 * transferred. The first frame from the tx queue, will be selected and 378 * transferred. The first frame from the tx queue, will be selected and
379 * reported as error to the upper layers. 379 * reported as error to the upper layers.
380 */ 380 */