aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/qlge.h161
-rw-r--r--drivers/net/qlge/qlge_dbg.c9
-rw-r--r--drivers/net/qlge/qlge_ethtool.c3
-rw-r--r--drivers/net/qlge/qlge_main.c630
-rw-r--r--drivers/net/qlge/qlge_mpi.c835
5 files changed, 1270 insertions, 368 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index aff9c5fec738..fcb159e4df54 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -28,8 +28,8 @@
28 } while (0) 28 } while (0)
29 29
30#define QLGE_VENDOR_ID 0x1077 30#define QLGE_VENDOR_ID 0x1077
31#define QLGE_DEVICE_ID 0x8012 31#define QLGE_DEVICE_ID_8012 0x8012
32 32#define QLGE_DEVICE_ID_8000 0x8000
33#define MAX_CPUS 8 33#define MAX_CPUS 8
34#define MAX_TX_RINGS MAX_CPUS 34#define MAX_TX_RINGS MAX_CPUS
35#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) 35#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
@@ -164,7 +164,7 @@ enum {
164 CSR_RP = (1 << 10), 164 CSR_RP = (1 << 10),
165 CSR_CMD_PARM_SHIFT = 22, 165 CSR_CMD_PARM_SHIFT = 22,
166 CSR_CMD_NOP = 0x00000000, 166 CSR_CMD_NOP = 0x00000000,
167 CSR_CMD_SET_RST = 0x1000000, 167 CSR_CMD_SET_RST = 0x10000000,
168 CSR_CMD_CLR_RST = 0x20000000, 168 CSR_CMD_CLR_RST = 0x20000000,
169 CSR_CMD_SET_PAUSE = 0x30000000, 169 CSR_CMD_SET_PAUSE = 0x30000000,
170 CSR_CMD_CLR_PAUSE = 0x40000000, 170 CSR_CMD_CLR_PAUSE = 0x40000000,
@@ -424,7 +424,7 @@ enum {
424 RX_SYMBOL_ERR = 0x00000370, 424 RX_SYMBOL_ERR = 0x00000370,
425 RX_MAC_ERR = 0x00000378, 425 RX_MAC_ERR = 0x00000378,
426 RX_CTL_PKTS = 0x00000380, 426 RX_CTL_PKTS = 0x00000380,
427 RX_PAUSE_PKTS = 0x00000384, 427 RX_PAUSE_PKTS = 0x00000388,
428 RX_64_PKTS = 0x00000390, 428 RX_64_PKTS = 0x00000390,
429 RX_65_TO_127_PKTS = 0x00000398, 429 RX_65_TO_127_PKTS = 0x00000398,
430 RX_128_255_PKTS = 0x000003a0, 430 RX_128_255_PKTS = 0x000003a0,
@@ -733,6 +733,11 @@ enum {
733 AEN_LINK_DOWN = 0x00008012, 733 AEN_LINK_DOWN = 0x00008012,
734 AEN_IDC_CMPLT = 0x00008100, 734 AEN_IDC_CMPLT = 0x00008100,
735 AEN_IDC_REQ = 0x00008101, 735 AEN_IDC_REQ = 0x00008101,
736 AEN_IDC_EXT = 0x00008102,
737 AEN_DCBX_CHG = 0x00008110,
738 AEN_AEN_LOST = 0x00008120,
739 AEN_AEN_SFP_IN = 0x00008130,
740 AEN_AEN_SFP_OUT = 0x00008131,
736 AEN_FW_INIT_DONE = 0x00008400, 741 AEN_FW_INIT_DONE = 0x00008400,
737 AEN_FW_INIT_FAIL = 0x00008401, 742 AEN_FW_INIT_FAIL = 0x00008401,
738 743
@@ -742,40 +747,48 @@ enum {
742 MB_CMD_MB_TEST = 0x00000006, 747 MB_CMD_MB_TEST = 0x00000006,
743 MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */ 748 MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
744 MB_CMD_ABOUT_FW = 0x00000008, 749 MB_CMD_ABOUT_FW = 0x00000008,
750 MB_CMD_COPY_RISC_RAM = 0x0000000a,
745 MB_CMD_LOAD_RISC_RAM = 0x0000000b, 751 MB_CMD_LOAD_RISC_RAM = 0x0000000b,
746 MB_CMD_DUMP_RISC_RAM = 0x0000000c, 752 MB_CMD_DUMP_RISC_RAM = 0x0000000c,
747 MB_CMD_WRITE_RAM = 0x0000000d, 753 MB_CMD_WRITE_RAM = 0x0000000d,
754 MB_CMD_INIT_RISC_RAM = 0x0000000e,
748 MB_CMD_READ_RAM = 0x0000000f, 755 MB_CMD_READ_RAM = 0x0000000f,
749 MB_CMD_STOP_FW = 0x00000014, 756 MB_CMD_STOP_FW = 0x00000014,
750 MB_CMD_MAKE_SYS_ERR = 0x0000002a, 757 MB_CMD_MAKE_SYS_ERR = 0x0000002a,
758 MB_CMD_WRITE_SFP = 0x00000030,
759 MB_CMD_READ_SFP = 0x00000031,
751 MB_CMD_INIT_FW = 0x00000060, 760 MB_CMD_INIT_FW = 0x00000060,
752 MB_CMD_GET_INIT_CB = 0x00000061, 761 MB_CMD_GET_IFCB = 0x00000061,
753 MB_CMD_GET_FW_STATE = 0x00000069, 762 MB_CMD_GET_FW_STATE = 0x00000069,
754 MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */ 763 MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
755 MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */ 764 MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
756 MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */ 765 MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
757 MB_WOL_DISABLE = 0x00000000, 766 MB_WOL_DISABLE = 0,
758 MB_WOL_MAGIC_PKT = 0x00000001, 767 MB_WOL_MAGIC_PKT = (1 << 1),
759 MB_WOL_FLTR = 0x00000002, 768 MB_WOL_FLTR = (1 << 2),
760 MB_WOL_UCAST = 0x00000004, 769 MB_WOL_UCAST = (1 << 3),
761 MB_WOL_MCAST = 0x00000008, 770 MB_WOL_MCAST = (1 << 4),
762 MB_WOL_BCAST = 0x00000010, 771 MB_WOL_BCAST = (1 << 5),
763 MB_WOL_LINK_UP = 0x00000020, 772 MB_WOL_LINK_UP = (1 << 6),
764 MB_WOL_LINK_DOWN = 0x00000040, 773 MB_WOL_LINK_DOWN = (1 << 7),
765 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ 774 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
766 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ 775 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
767 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ 776 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
768 MB_CMD_CLEAR_WOL_MAGIC = 0x00000114, /* Wake On Lan Magic Packet */ 777 MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */
778 MB_CMD_SET_WOL_IMMED = 0x00000115,
769 MB_CMD_PORT_RESET = 0x00000120, 779 MB_CMD_PORT_RESET = 0x00000120,
770 MB_CMD_SET_PORT_CFG = 0x00000122, 780 MB_CMD_SET_PORT_CFG = 0x00000122,
771 MB_CMD_GET_PORT_CFG = 0x00000123, 781 MB_CMD_GET_PORT_CFG = 0x00000123,
772 MB_CMD_SET_ASIC_VOLTS = 0x00000130, 782 MB_CMD_GET_LINK_STS = 0x00000124,
773 MB_CMD_GET_SNS_DATA = 0x00000131, /* Temp and Volt Sense data. */
774 783
775 /* Mailbox Command Status. */ 784 /* Mailbox Command Status. */
776 MB_CMD_STS_GOOD = 0x00004000, /* Success. */ 785 MB_CMD_STS_GOOD = 0x00004000, /* Success. */
777 MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */ 786 MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
778 MB_CMD_STS_ERR = 0x00004005, /* Error. */ 787 MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */
788 MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */
789 MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */
790 MB_CMD_STS_ERR = 0x00004005, /* System Error. */
791 MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */
779}; 792};
780 793
781struct mbox_params { 794struct mbox_params {
@@ -785,7 +798,7 @@ struct mbox_params {
785 int out_count; 798 int out_count;
786}; 799};
787 800
788struct flash_params { 801struct flash_params_8012 {
789 u8 dev_id_str[4]; 802 u8 dev_id_str[4];
790 __le16 size; 803 __le16 size;
791 __le16 csum; 804 __le16 csum;
@@ -795,6 +808,43 @@ struct flash_params {
795 __le16 res; 808 __le16 res;
796}; 809};
797 810
811/* 8000 device's flash is a different structure
812 * at a different offset in flash.
813 */
814#define FUNC0_FLASH_OFFSET 0x140200
815#define FUNC1_FLASH_OFFSET 0x140600
816
817/* Flash related data structures. */
818struct flash_params_8000 {
819 u8 dev_id_str[4]; /* "8000" */
820 __le16 ver;
821 __le16 size;
822 __le16 csum;
823 __le16 reserved0;
824 __le16 total_size;
825 __le16 entry_count;
826 u8 data_type0;
827 u8 data_size0;
828 u8 mac_addr[6];
829 u8 data_type1;
830 u8 data_size1;
831 u8 mac_addr1[6];
832 u8 data_type2;
833 u8 data_size2;
834 __le16 vlan_id;
835 u8 data_type3;
836 u8 data_size3;
837 __le16 last;
838 u8 reserved1[464];
839 __le16 subsys_ven_id;
840 __le16 subsys_dev_id;
841 u8 reserved2[4];
842};
843
844union flash_params {
845 struct flash_params_8012 flash_params_8012;
846 struct flash_params_8000 flash_params_8000;
847};
798 848
799/* 849/*
800 * doorbell space for the rx ring context 850 * doorbell space for the rx ring context
@@ -968,6 +1018,7 @@ struct ib_mac_iocb_rsp {
968 __le16 vlan_id; /* 12 bits */ 1018 __le16 vlan_id; /* 12 bits */
969#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ 1019#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
970#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */ 1020#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
1021#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
971 1022
972 __le16 reserved1; 1023 __le16 reserved1;
973 __le32 reserved2[6]; 1024 __le32 reserved2[6];
@@ -1033,6 +1084,7 @@ struct wqicb {
1033#define Q_LEN_CPP_16 0x0001 1084#define Q_LEN_CPP_16 0x0001
1034#define Q_LEN_CPP_32 0x0002 1085#define Q_LEN_CPP_32 0x0002
1035#define Q_LEN_CPP_64 0x0003 1086#define Q_LEN_CPP_64 0x0003
1087#define Q_LEN_CPP_512 0x0006
1036 __le16 flags; 1088 __le16 flags;
1037#define Q_PRI_SHIFT 1 1089#define Q_PRI_SHIFT 1
1038#define Q_FLAGS_LC 0x1000 1090#define Q_FLAGS_LC 0x1000
@@ -1314,27 +1366,49 @@ enum {
1314 QL_DMA64 = (1 << 5), 1366 QL_DMA64 = (1 << 5),
1315 QL_PROMISCUOUS = (1 << 6), 1367 QL_PROMISCUOUS = (1 << 6),
1316 QL_ALLMULTI = (1 << 7), 1368 QL_ALLMULTI = (1 << 7),
1369 QL_PORT_CFG = (1 << 8),
1370 QL_CAM_RT_SET = (1 << 9),
1317}; 1371};
1318 1372
1319/* link_status bit definitions */ 1373/* link_status bit definitions */
1320enum { 1374enum {
1321 LOOPBACK_MASK = 0x00000700, 1375 STS_LOOPBACK_MASK = 0x00000700,
1322 LOOPBACK_PCS = 0x00000100, 1376 STS_LOOPBACK_PCS = 0x00000100,
1323 LOOPBACK_HSS = 0x00000200, 1377 STS_LOOPBACK_HSS = 0x00000200,
1324 LOOPBACK_EXT = 0x00000300, 1378 STS_LOOPBACK_EXT = 0x00000300,
1325 PAUSE_MASK = 0x000000c0, 1379 STS_PAUSE_MASK = 0x000000c0,
1326 PAUSE_STD = 0x00000040, 1380 STS_PAUSE_STD = 0x00000040,
1327 PAUSE_PRI = 0x00000080, 1381 STS_PAUSE_PRI = 0x00000080,
1328 SPEED_MASK = 0x00000038, 1382 STS_SPEED_MASK = 0x00000038,
1329 SPEED_100Mb = 0x00000000, 1383 STS_SPEED_100Mb = 0x00000000,
1330 SPEED_1Gb = 0x00000008, 1384 STS_SPEED_1Gb = 0x00000008,
1331 SPEED_10Gb = 0x00000010, 1385 STS_SPEED_10Gb = 0x00000010,
1332 LINK_TYPE_MASK = 0x00000007, 1386 STS_LINK_TYPE_MASK = 0x00000007,
1333 LINK_TYPE_XFI = 0x00000001, 1387 STS_LINK_TYPE_XFI = 0x00000001,
1334 LINK_TYPE_XAUI = 0x00000002, 1388 STS_LINK_TYPE_XAUI = 0x00000002,
1335 LINK_TYPE_XFI_BP = 0x00000003, 1389 STS_LINK_TYPE_XFI_BP = 0x00000003,
1336 LINK_TYPE_XAUI_BP = 0x00000004, 1390 STS_LINK_TYPE_XAUI_BP = 0x00000004,
1337 LINK_TYPE_10GBASET = 0x00000005, 1391 STS_LINK_TYPE_10GBASET = 0x00000005,
1392};
1393
1394/* link_config bit definitions */
1395enum {
1396 CFG_JUMBO_FRAME_SIZE = 0x00010000,
1397 CFG_PAUSE_MASK = 0x00000060,
1398 CFG_PAUSE_STD = 0x00000020,
1399 CFG_PAUSE_PRI = 0x00000040,
1400 CFG_DCBX = 0x00000010,
1401 CFG_LOOPBACK_MASK = 0x00000007,
1402 CFG_LOOPBACK_PCS = 0x00000002,
1403 CFG_LOOPBACK_HSS = 0x00000004,
1404 CFG_LOOPBACK_EXT = 0x00000006,
1405 CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
1406};
1407
1408struct nic_operations {
1409
1410 int (*get_flash) (struct ql_adapter *);
1411 int (*port_initialize) (struct ql_adapter *);
1338}; 1412};
1339 1413
1340/* 1414/*
@@ -1377,6 +1451,8 @@ struct ql_adapter {
1377 1451
1378 u32 mailbox_in; 1452 u32 mailbox_in;
1379 u32 mailbox_out; 1453 u32 mailbox_out;
1454 struct mbox_params idc_mbc;
1455 struct mutex mpi_mutex;
1380 1456
1381 int tx_ring_size; 1457 int tx_ring_size;
1382 int rx_ring_size; 1458 int rx_ring_size;
@@ -1412,8 +1488,10 @@ struct ql_adapter {
1412 u32 port_link_up; 1488 u32 port_link_up;
1413 u32 port_init; 1489 u32 port_init;
1414 u32 link_status; 1490 u32 link_status;
1491 u32 link_config;
1492 u32 max_frame_size;
1415 1493
1416 struct flash_params flash; 1494 union flash_params flash;
1417 1495
1418 struct net_device_stats stats; 1496 struct net_device_stats stats;
1419 struct workqueue_struct *q_workqueue; 1497 struct workqueue_struct *q_workqueue;
@@ -1421,6 +1499,11 @@ struct ql_adapter {
1421 struct delayed_work asic_reset_work; 1499 struct delayed_work asic_reset_work;
1422 struct delayed_work mpi_reset_work; 1500 struct delayed_work mpi_reset_work;
1423 struct delayed_work mpi_work; 1501 struct delayed_work mpi_work;
1502 struct delayed_work mpi_port_cfg_work;
1503 struct delayed_work mpi_idc_work;
1504 struct completion ide_completion;
1505 struct nic_operations *nic_ops;
1506 u16 device_id;
1424}; 1507};
1425 1508
1426/* 1509/*
@@ -1493,6 +1576,10 @@ void ql_queue_asic_error(struct ql_adapter *qdev);
1493u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); 1576u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
1494void ql_set_ethtool_ops(struct net_device *ndev); 1577void ql_set_ethtool_ops(struct net_device *ndev);
1495int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); 1578int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
1579void ql_mpi_idc_work(struct work_struct *work);
1580void ql_mpi_port_cfg_work(struct work_struct *work);
1581int ql_mb_get_fw_state(struct ql_adapter *qdev);
1582int ql_cam_route_initialize(struct ql_adapter *qdev);
1496 1583
1497#if 1 1584#if 1
1498#define QL_ALL_DUMP 1585#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 379b895ed6e6..40a70c36f5ae 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -83,6 +83,10 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev)
83{ 83{
84 int i; 84 int i;
85 u32 value[3]; 85 u32 value[3];
86
87 i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
88 if (i)
89 return;
86 for (i = 0; i < 4; i++) { 90 for (i = 0; i < 4; i++) {
87 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { 91 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
88 printk(KERN_ERR PFX 92 printk(KERN_ERR PFX
@@ -111,12 +115,16 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev)
111 qdev->ndev->name, i, value[1], value[0]); 115 qdev->ndev->name, i, value[1], value[0]);
112 } 116 }
113 } 117 }
118 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
114} 119}
115 120
116void ql_dump_routing_entries(struct ql_adapter *qdev) 121void ql_dump_routing_entries(struct ql_adapter *qdev)
117{ 122{
118 int i; 123 int i;
119 u32 value; 124 u32 value;
125 i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
126 if (i)
127 return;
120 for (i = 0; i < 16; i++) { 128 for (i = 0; i < 16; i++) {
121 value = 0; 129 value = 0;
122 if (ql_get_routing_reg(qdev, i, &value)) { 130 if (ql_get_routing_reg(qdev, i, &value)) {
@@ -131,6 +139,7 @@ void ql_dump_routing_entries(struct ql_adapter *qdev)
131 qdev->ndev->name, i, value); 139 qdev->ndev->name, i, value);
132 } 140 }
133 } 141 }
142 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
134} 143}
135 144
136void ql_dump_regs(struct ql_adapter *qdev) 145void ql_dump_regs(struct ql_adapter *qdev)
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 9d922e2ff226..a50078627fb6 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -271,7 +271,8 @@ static int ql_get_settings(struct net_device *ndev,
271 ecmd->advertising = ADVERTISED_10000baseT_Full; 271 ecmd->advertising = ADVERTISED_10000baseT_Full;
272 ecmd->autoneg = AUTONEG_ENABLE; 272 ecmd->autoneg = AUTONEG_ENABLE;
273 ecmd->transceiver = XCVR_EXTERNAL; 273 ecmd->transceiver = XCVR_EXTERNAL;
274 if ((qdev->link_status & LINK_TYPE_MASK) == LINK_TYPE_10GBASET) { 274 if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
275 STS_LINK_TYPE_10GBASET) {
275 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); 276 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
276 ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); 277 ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
277 ecmd->port = PORT_TP; 278 ecmd->port = PORT_TP;
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 91191f761fba..170d3540f9c9 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -58,8 +58,8 @@ static const u32 default_msg =
58 NETIF_MSG_IFUP | 58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR | 59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR | 60 NETIF_MSG_TX_ERR |
61 NETIF_MSG_TX_QUEUED | 61/* NETIF_MSG_TX_QUEUED | */
62 NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | 62/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63/* NETIF_MSG_PKTDATA | */ 63/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0; 64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65 65
@@ -75,7 +75,8 @@ module_param(irq_type, int, MSIX_IRQ);
75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 76
77static struct pci_device_id qlge_pci_tbl[] __devinitdata = { 77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)}, 78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
79 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
79 /* required last entry */ 80 /* required last entry */
80 {0,} 81 {0,}
81}; 82};
@@ -247,9 +248,6 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
247 u32 offset = 0; 248 u32 offset = 0;
248 int status; 249 int status;
249 250
250 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
251 if (status)
252 return status;
253 switch (type) { 251 switch (type) {
254 case MAC_ADDR_TYPE_MULTI_MAC: 252 case MAC_ADDR_TYPE_MULTI_MAC:
255 case MAC_ADDR_TYPE_CAM_MAC: 253 case MAC_ADDR_TYPE_CAM_MAC:
@@ -308,7 +306,6 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
308 status = -EPERM; 306 status = -EPERM;
309 } 307 }
310exit: 308exit:
311 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
312 return status; 309 return status;
313} 310}
314 311
@@ -321,9 +318,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
321 u32 offset = 0; 318 u32 offset = 0;
322 int status = 0; 319 int status = 0;
323 320
324 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
325 if (status)
326 return status;
327 switch (type) { 321 switch (type) {
328 case MAC_ADDR_TYPE_MULTI_MAC: 322 case MAC_ADDR_TYPE_MULTI_MAC:
329 case MAC_ADDR_TYPE_CAM_MAC: 323 case MAC_ADDR_TYPE_CAM_MAC:
@@ -334,7 +328,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | 328 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
335 (addr[5]); 329 (addr[5]);
336 330
337 QPRINTK(qdev, IFUP, INFO, 331 QPRINTK(qdev, IFUP, DEBUG,
338 "Adding %s address %pM" 332 "Adding %s address %pM"
339 " at index %d in the CAM.\n", 333 " at index %d in the CAM.\n",
340 ((type == 334 ((type ==
@@ -415,7 +409,6 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
415 status = -EPERM; 409 status = -EPERM;
416 } 410 }
417exit: 411exit:
418 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
419 return status; 412 return status;
420} 413}
421 414
@@ -426,10 +419,6 @@ int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
426{ 419{
427 int status = 0; 420 int status = 0;
428 421
429 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
430 if (status)
431 goto exit;
432
433 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); 422 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
434 if (status) 423 if (status)
435 goto exit; 424 goto exit;
@@ -441,7 +430,6 @@ int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
441 goto exit; 430 goto exit;
442 *value = ql_read32(qdev, RT_DATA); 431 *value = ql_read32(qdev, RT_DATA);
443exit: 432exit:
444 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
445 return status; 433 return status;
446} 434}
447 435
@@ -453,13 +441,9 @@ exit:
453static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, 441static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
454 int enable) 442 int enable)
455{ 443{
456 int status; 444 int status = -EINVAL; /* Return error if no mask match. */
457 u32 value = 0; 445 u32 value = 0;
458 446
459 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
460 if (status)
461 return status;
462
463 QPRINTK(qdev, IFUP, DEBUG, 447 QPRINTK(qdev, IFUP, DEBUG,
464 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n", 448 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
465 (enable ? "Adding" : "Removing"), 449 (enable ? "Adding" : "Removing"),
@@ -555,7 +539,6 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
555 ql_write32(qdev, RT_DATA, enable ? mask : 0); 539 ql_write32(qdev, RT_DATA, enable ? mask : 0);
556 } 540 }
557exit: 541exit:
558 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
559 return status; 542 return status;
560} 543}
561 544
@@ -604,7 +587,6 @@ u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
604static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 587static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
605{ 588{
606 u32 var = 0; 589 u32 var = 0;
607 unsigned long hw_flags;
608 struct intr_context *ctx; 590 struct intr_context *ctx;
609 591
610 /* HW disables for us if we're MSIX multi interrupts and 592 /* HW disables for us if we're MSIX multi interrupts and
@@ -614,14 +596,14 @@ static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
614 return 0; 596 return 0;
615 597
616 ctx = qdev->intr_context + intr; 598 ctx = qdev->intr_context + intr;
617 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 599 spin_lock(&qdev->hw_lock);
618 if (!atomic_read(&ctx->irq_cnt)) { 600 if (!atomic_read(&ctx->irq_cnt)) {
619 ql_write32(qdev, INTR_EN, 601 ql_write32(qdev, INTR_EN,
620 ctx->intr_dis_mask); 602 ctx->intr_dis_mask);
621 var = ql_read32(qdev, STS); 603 var = ql_read32(qdev, STS);
622 } 604 }
623 atomic_inc(&ctx->irq_cnt); 605 atomic_inc(&ctx->irq_cnt);
624 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 606 spin_unlock(&qdev->hw_lock);
625 return var; 607 return var;
626} 608}
627 609
@@ -641,6 +623,28 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
641 623
642} 624}
643 625
626static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
627{
628 int status, i;
629 u16 csum = 0;
630 __le16 *flash = (__le16 *)&qdev->flash;
631
632 status = strncmp((char *)&qdev->flash, str, 4);
633 if (status) {
634 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
635 return status;
636 }
637
638 for (i = 0; i < size; i++)
639 csum += le16_to_cpu(*flash++);
640
641 if (csum)
642 QPRINTK(qdev, IFUP, ERR,
643 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
644
645 return csum;
646}
647
644static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) 648static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
645{ 649{
646 int status = 0; 650 int status = 0;
@@ -665,23 +669,75 @@ exit:
665 return status; 669 return status;
666} 670}
667 671
668static int ql_get_flash_params(struct ql_adapter *qdev) 672static int ql_get_8000_flash_params(struct ql_adapter *qdev)
673{
674 u32 i, size;
675 int status;
676 __le32 *p = (__le32 *)&qdev->flash;
677 u32 offset;
678
679 /* Get flash offset for function and adjust
680 * for dword access.
681 */
682 if (!qdev->func)
683 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
684 else
685 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
686
687 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
688 return -ETIMEDOUT;
689
690 size = sizeof(struct flash_params_8000) / sizeof(u32);
691 for (i = 0; i < size; i++, p++) {
692 status = ql_read_flash_word(qdev, i+offset, p);
693 if (status) {
694 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
695 goto exit;
696 }
697 }
698
699 status = ql_validate_flash(qdev,
700 sizeof(struct flash_params_8000) / sizeof(u16),
701 "8000");
702 if (status) {
703 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
704 status = -EINVAL;
705 goto exit;
706 }
707
708 if (!is_valid_ether_addr(qdev->flash.flash_params_8000.mac_addr)) {
709 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
710 status = -EINVAL;
711 goto exit;
712 }
713
714 memcpy(qdev->ndev->dev_addr,
715 qdev->flash.flash_params_8000.mac_addr,
716 qdev->ndev->addr_len);
717
718exit:
719 ql_sem_unlock(qdev, SEM_FLASH_MASK);
720 return status;
721}
722
723static int ql_get_8012_flash_params(struct ql_adapter *qdev)
669{ 724{
670 int i; 725 int i;
671 int status; 726 int status;
672 __le32 *p = (__le32 *)&qdev->flash; 727 __le32 *p = (__le32 *)&qdev->flash;
673 u32 offset = 0; 728 u32 offset = 0;
729 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
674 730
675 /* Second function's parameters follow the first 731 /* Second function's parameters follow the first
676 * function's. 732 * function's.
677 */ 733 */
678 if (qdev->func) 734 if (qdev->func)
679 offset = sizeof(qdev->flash) / sizeof(u32); 735 offset = size;
680 736
681 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) 737 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
682 return -ETIMEDOUT; 738 return -ETIMEDOUT;
683 739
684 for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) { 740 for (i = 0; i < size; i++, p++) {
685 status = ql_read_flash_word(qdev, i+offset, p); 741 status = ql_read_flash_word(qdev, i+offset, p);
686 if (status) { 742 if (status) {
687 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n"); 743 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
@@ -689,6 +745,25 @@ static int ql_get_flash_params(struct ql_adapter *qdev)
689 } 745 }
690 746
691 } 747 }
748
749 status = ql_validate_flash(qdev,
750 sizeof(struct flash_params_8012) / sizeof(u16),
751 "8012");
752 if (status) {
753 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
754 status = -EINVAL;
755 goto exit;
756 }
757
758 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
759 status = -EINVAL;
760 goto exit;
761 }
762
763 memcpy(qdev->ndev->dev_addr,
764 qdev->flash.flash_params_8012.mac_addr,
765 qdev->ndev->addr_len);
766
692exit: 767exit:
693 ql_sem_unlock(qdev, SEM_FLASH_MASK); 768 ql_sem_unlock(qdev, SEM_FLASH_MASK);
694 return status; 769 return status;
@@ -759,13 +834,25 @@ exit:
759 return status; 834 return status;
760} 835}
761 836
837static int ql_8000_port_initialize(struct ql_adapter *qdev)
838{
839 int status;
840 status = ql_mb_get_fw_state(qdev);
841 if (status)
842 goto exit;
843 /* Wake up a worker to get/set the TX/RX frame sizes. */
844 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
845exit:
846 return status;
847}
848
762/* Take the MAC Core out of reset. 849/* Take the MAC Core out of reset.
763 * Enable statistics counting. 850 * Enable statistics counting.
764 * Take the transmitter/receiver out of reset. 851 * Take the transmitter/receiver out of reset.
765 * This functionality may be done in the MPI firmware at a 852 * This functionality may be done in the MPI firmware at a
766 * later date. 853 * later date.
767 */ 854 */
768static int ql_port_initialize(struct ql_adapter *qdev) 855static int ql_8012_port_initialize(struct ql_adapter *qdev)
769{ 856{
770 int status = 0; 857 int status = 0;
771 u32 data; 858 u32 data;
@@ -881,7 +968,8 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
881/* Process (refill) a large buffer queue. */ 968/* Process (refill) a large buffer queue. */
882static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) 969static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
883{ 970{
884 int clean_idx = rx_ring->lbq_clean_idx; 971 u32 clean_idx = rx_ring->lbq_clean_idx;
972 u32 start_idx = clean_idx;
885 struct bq_desc *lbq_desc; 973 struct bq_desc *lbq_desc;
886 u64 map; 974 u64 map;
887 int i; 975 int i;
@@ -928,19 +1016,23 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
928 rx_ring->lbq_prod_idx += 16; 1016 rx_ring->lbq_prod_idx += 16;
929 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) 1017 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
930 rx_ring->lbq_prod_idx = 0; 1018 rx_ring->lbq_prod_idx = 0;
1019 rx_ring->lbq_free_cnt -= 16;
1020 }
1021
1022 if (start_idx != clean_idx) {
931 QPRINTK(qdev, RX_STATUS, DEBUG, 1023 QPRINTK(qdev, RX_STATUS, DEBUG,
932 "lbq: updating prod idx = %d.\n", 1024 "lbq: updating prod idx = %d.\n",
933 rx_ring->lbq_prod_idx); 1025 rx_ring->lbq_prod_idx);
934 ql_write_db_reg(rx_ring->lbq_prod_idx, 1026 ql_write_db_reg(rx_ring->lbq_prod_idx,
935 rx_ring->lbq_prod_idx_db_reg); 1027 rx_ring->lbq_prod_idx_db_reg);
936 rx_ring->lbq_free_cnt -= 16;
937 } 1028 }
938} 1029}
939 1030
940/* Process (refill) a small buffer queue. */ 1031/* Process (refill) a small buffer queue. */
941static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) 1032static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
942{ 1033{
943 int clean_idx = rx_ring->sbq_clean_idx; 1034 u32 clean_idx = rx_ring->sbq_clean_idx;
1035 u32 start_idx = clean_idx;
944 struct bq_desc *sbq_desc; 1036 struct bq_desc *sbq_desc;
945 u64 map; 1037 u64 map;
946 int i; 1038 int i;
@@ -990,13 +1082,15 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
990 rx_ring->sbq_prod_idx += 16; 1082 rx_ring->sbq_prod_idx += 16;
991 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) 1083 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
992 rx_ring->sbq_prod_idx = 0; 1084 rx_ring->sbq_prod_idx = 0;
1085 rx_ring->sbq_free_cnt -= 16;
1086 }
1087
1088 if (start_idx != clean_idx) {
993 QPRINTK(qdev, RX_STATUS, DEBUG, 1089 QPRINTK(qdev, RX_STATUS, DEBUG,
994 "sbq: updating prod idx = %d.\n", 1090 "sbq: updating prod idx = %d.\n",
995 rx_ring->sbq_prod_idx); 1091 rx_ring->sbq_prod_idx);
996 ql_write_db_reg(rx_ring->sbq_prod_idx, 1092 ql_write_db_reg(rx_ring->sbq_prod_idx,
997 rx_ring->sbq_prod_idx_db_reg); 1093 rx_ring->sbq_prod_idx_db_reg);
998
999 rx_ring->sbq_free_cnt -= 16;
1000 } 1094 }
1001} 1095}
1002 1096
@@ -1412,6 +1506,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1412{ 1506{
1413 struct net_device *ndev = qdev->ndev; 1507 struct net_device *ndev = qdev->ndev;
1414 struct sk_buff *skb = NULL; 1508 struct sk_buff *skb = NULL;
1509 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1510 IB_MAC_IOCB_RSP_VLAN_MASK)
1415 1511
1416 QL_DUMP_IB_MAC_RSP(ib_mac_rsp); 1512 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1417 1513
@@ -1463,18 +1559,26 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1463 } 1559 }
1464 } 1560 }
1465 } 1561 }
1562
1466 qdev->stats.rx_packets++; 1563 qdev->stats.rx_packets++;
1467 qdev->stats.rx_bytes += skb->len; 1564 qdev->stats.rx_bytes += skb->len;
1468 skb->protocol = eth_type_trans(skb, ndev); 1565 skb_record_rx_queue(skb,
1469 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) { 1566 rx_ring->cq_id - qdev->rss_ring_first_cq_id);
1470 QPRINTK(qdev, RX_STATUS, DEBUG, 1567 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1471 "Passing a VLAN packet upstream.\n"); 1568 if (qdev->vlgrp &&
1472 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, 1569 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1473 le16_to_cpu(ib_mac_rsp->vlan_id)); 1570 (vlan_id != 0))
1571 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1572 vlan_id, skb);
1573 else
1574 napi_gro_receive(&rx_ring->napi, skb);
1474 } else { 1575 } else {
1475 QPRINTK(qdev, RX_STATUS, DEBUG, 1576 if (qdev->vlgrp &&
1476 "Passing a normal packet upstream.\n"); 1577 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1477 netif_receive_skb(skb); 1578 (vlan_id != 0))
1579 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1580 else
1581 netif_receive_skb(skb);
1478 } 1582 }
1479} 1583}
1480 1584
@@ -1521,14 +1625,12 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1521/* Fire up a handler to reset the MPI processor. */ 1625/* Fire up a handler to reset the MPI processor. */
1522void ql_queue_fw_error(struct ql_adapter *qdev) 1626void ql_queue_fw_error(struct ql_adapter *qdev)
1523{ 1627{
1524 netif_stop_queue(qdev->ndev);
1525 netif_carrier_off(qdev->ndev); 1628 netif_carrier_off(qdev->ndev);
1526 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); 1629 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1527} 1630}
1528 1631
1529void ql_queue_asic_error(struct ql_adapter *qdev) 1632void ql_queue_asic_error(struct ql_adapter *qdev)
1530{ 1633{
1531 netif_stop_queue(qdev->ndev);
1532 netif_carrier_off(qdev->ndev); 1634 netif_carrier_off(qdev->ndev);
1533 ql_disable_interrupts(qdev); 1635 ql_disable_interrupts(qdev);
1534 /* Clear adapter up bit to signal the recovery 1636 /* Clear adapter up bit to signal the recovery
@@ -1583,6 +1685,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1583 struct ob_mac_iocb_rsp *net_rsp = NULL; 1685 struct ob_mac_iocb_rsp *net_rsp = NULL;
1584 int count = 0; 1686 int count = 0;
1585 1687
1688 struct tx_ring *tx_ring;
1586 /* While there are entries in the completion queue. */ 1689 /* While there are entries in the completion queue. */
1587 while (prod != rx_ring->cnsmr_idx) { 1690 while (prod != rx_ring->cnsmr_idx) {
1588 1691
@@ -1608,15 +1711,16 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1608 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 1711 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1609 } 1712 }
1610 ql_write_cq_idx(rx_ring); 1713 ql_write_cq_idx(rx_ring);
1611 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { 1714 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1612 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; 1715 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1716 net_rsp != NULL) {
1613 if (atomic_read(&tx_ring->queue_stopped) && 1717 if (atomic_read(&tx_ring->queue_stopped) &&
1614 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) 1718 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1615 /* 1719 /*
1616 * The queue got stopped because the tx_ring was full. 1720 * The queue got stopped because the tx_ring was full.
1617 * Wake it up, because it's now at least 25% empty. 1721 * Wake it up, because it's now at least 25% empty.
1618 */ 1722 */
1619 netif_wake_queue(qdev->ndev); 1723 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
1620 } 1724 }
1621 1725
1622 return count; 1726 return count;
@@ -1677,7 +1781,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1677 rx_ring->cq_id); 1781 rx_ring->cq_id);
1678 1782
1679 if (work_done < budget) { 1783 if (work_done < budget) {
1680 __netif_rx_complete(napi); 1784 napi_complete(napi);
1681 ql_enable_completion_interrupt(qdev, rx_ring->irq); 1785 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1682 } 1786 }
1683 return work_done; 1787 return work_done;
@@ -1703,19 +1807,29 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1703{ 1807{
1704 struct ql_adapter *qdev = netdev_priv(ndev); 1808 struct ql_adapter *qdev = netdev_priv(ndev);
1705 u32 enable_bit = MAC_ADDR_E; 1809 u32 enable_bit = MAC_ADDR_E;
1810 int status;
1706 1811
1812 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1813 if (status)
1814 return;
1707 spin_lock(&qdev->hw_lock); 1815 spin_lock(&qdev->hw_lock);
1708 if (ql_set_mac_addr_reg 1816 if (ql_set_mac_addr_reg
1709 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 1817 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1710 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); 1818 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1711 } 1819 }
1712 spin_unlock(&qdev->hw_lock); 1820 spin_unlock(&qdev->hw_lock);
1821 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1713} 1822}
1714 1823
1715static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) 1824static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1716{ 1825{
1717 struct ql_adapter *qdev = netdev_priv(ndev); 1826 struct ql_adapter *qdev = netdev_priv(ndev);
1718 u32 enable_bit = 0; 1827 u32 enable_bit = 0;
1828 int status;
1829
1830 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1831 if (status)
1832 return;
1719 1833
1720 spin_lock(&qdev->hw_lock); 1834 spin_lock(&qdev->hw_lock);
1721 if (ql_set_mac_addr_reg 1835 if (ql_set_mac_addr_reg
@@ -1723,6 +1837,7 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1723 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); 1837 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1724 } 1838 }
1725 spin_unlock(&qdev->hw_lock); 1839 spin_unlock(&qdev->hw_lock);
1840 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1726 1841
1727} 1842}
1728 1843
@@ -1762,7 +1877,7 @@ static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1762static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) 1877static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1763{ 1878{
1764 struct rx_ring *rx_ring = dev_id; 1879 struct rx_ring *rx_ring = dev_id;
1765 netif_rx_schedule(&rx_ring->napi); 1880 napi_schedule(&rx_ring->napi);
1766 return IRQ_HANDLED; 1881 return IRQ_HANDLED;
1767} 1882}
1768 1883
@@ -1848,7 +1963,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1848 &rx_ring->rx_work, 1963 &rx_ring->rx_work,
1849 0); 1964 0);
1850 else 1965 else
1851 netif_rx_schedule(&rx_ring->napi); 1966 napi_schedule(&rx_ring->napi);
1852 work_done++; 1967 work_done++;
1853 } 1968 }
1854 } 1969 }
@@ -1937,7 +2052,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1937 struct ql_adapter *qdev = netdev_priv(ndev); 2052 struct ql_adapter *qdev = netdev_priv(ndev);
1938 int tso; 2053 int tso;
1939 struct tx_ring *tx_ring; 2054 struct tx_ring *tx_ring;
1940 u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb); 2055 u32 tx_ring_idx = (u32) skb->queue_mapping;
1941 2056
1942 tx_ring = &qdev->tx_ring[tx_ring_idx]; 2057 tx_ring = &qdev->tx_ring[tx_ring_idx];
1943 2058
@@ -1948,7 +2063,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1948 QPRINTK(qdev, TX_QUEUED, INFO, 2063 QPRINTK(qdev, TX_QUEUED, INFO,
1949 "%s: shutting down tx queue %d du to lack of resources.\n", 2064 "%s: shutting down tx queue %d du to lack of resources.\n",
1950 __func__, tx_ring_idx); 2065 __func__, tx_ring_idx);
1951 netif_stop_queue(ndev); 2066 netif_stop_subqueue(ndev, tx_ring->wq_id);
1952 atomic_inc(&tx_ring->queue_stopped); 2067 atomic_inc(&tx_ring->queue_stopped);
1953 return NETDEV_TX_BUSY; 2068 return NETDEV_TX_BUSY;
1954 } 2069 }
@@ -2029,6 +2144,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2029 "Allocation of RX shadow space failed.\n"); 2144 "Allocation of RX shadow space failed.\n");
2030 return -ENOMEM; 2145 return -ENOMEM;
2031 } 2146 }
2147 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2032 qdev->tx_ring_shadow_reg_area = 2148 qdev->tx_ring_shadow_reg_area =
2033 pci_alloc_consistent(qdev->pdev, PAGE_SIZE, 2149 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2034 &qdev->tx_ring_shadow_reg_dma); 2150 &qdev->tx_ring_shadow_reg_dma);
@@ -2037,6 +2153,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2037 "Allocation of TX shadow space failed.\n"); 2153 "Allocation of TX shadow space failed.\n");
2038 goto err_wqp_sh_area; 2154 goto err_wqp_sh_area;
2039 } 2155 }
2156 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2040 return 0; 2157 return 0;
2041 2158
2042err_wqp_sh_area: 2159err_wqp_sh_area:
@@ -2121,47 +2238,6 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2121 } 2238 }
2122} 2239}
2123 2240
2124/*
2125 * Allocate and map a page for each element of the lbq.
2126 */
2127static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2128 struct rx_ring *rx_ring)
2129{
2130 int i;
2131 struct bq_desc *lbq_desc;
2132 u64 map;
2133 __le64 *bq = rx_ring->lbq_base;
2134
2135 for (i = 0; i < rx_ring->lbq_len; i++) {
2136 lbq_desc = &rx_ring->lbq[i];
2137 memset(lbq_desc, 0, sizeof(lbq_desc));
2138 lbq_desc->addr = bq;
2139 lbq_desc->index = i;
2140 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2141 if (unlikely(!lbq_desc->p.lbq_page)) {
2142 QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2143 goto mem_error;
2144 } else {
2145 map = pci_map_page(qdev->pdev,
2146 lbq_desc->p.lbq_page,
2147 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2148 if (pci_dma_mapping_error(qdev->pdev, map)) {
2149 QPRINTK(qdev, IFUP, ERR,
2150 "PCI mapping failed.\n");
2151 goto mem_error;
2152 }
2153 pci_unmap_addr_set(lbq_desc, mapaddr, map);
2154 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2155 *lbq_desc->addr = cpu_to_le64(map);
2156 }
2157 bq++;
2158 }
2159 return 0;
2160mem_error:
2161 ql_free_lbq_buffers(qdev, rx_ring);
2162 return -ENOMEM;
2163}
2164
2165static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2241static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2166{ 2242{
2167 int i; 2243 int i;
@@ -2184,63 +2260,72 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2184 } 2260 }
2185} 2261}
2186 2262
2187/* Allocate and map an skb for each element of the sbq. */ 2263/* Free all large and small rx buffers associated
2188static int ql_alloc_sbq_buffers(struct ql_adapter *qdev, 2264 * with the completion queues for this device.
2265 */
2266static void ql_free_rx_buffers(struct ql_adapter *qdev)
2267{
2268 int i;
2269 struct rx_ring *rx_ring;
2270
2271 for (i = 0; i < qdev->rx_ring_count; i++) {
2272 rx_ring = &qdev->rx_ring[i];
2273 if (rx_ring->lbq)
2274 ql_free_lbq_buffers(qdev, rx_ring);
2275 if (rx_ring->sbq)
2276 ql_free_sbq_buffers(qdev, rx_ring);
2277 }
2278}
2279
2280static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2281{
2282 struct rx_ring *rx_ring;
2283 int i;
2284
2285 for (i = 0; i < qdev->rx_ring_count; i++) {
2286 rx_ring = &qdev->rx_ring[i];
2287 if (rx_ring->type != TX_Q)
2288 ql_update_buffer_queues(qdev, rx_ring);
2289 }
2290}
2291
2292static void ql_init_lbq_ring(struct ql_adapter *qdev,
2293 struct rx_ring *rx_ring)
2294{
2295 int i;
2296 struct bq_desc *lbq_desc;
2297 __le64 *bq = rx_ring->lbq_base;
2298
2299 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2300 for (i = 0; i < rx_ring->lbq_len; i++) {
2301 lbq_desc = &rx_ring->lbq[i];
2302 memset(lbq_desc, 0, sizeof(*lbq_desc));
2303 lbq_desc->index = i;
2304 lbq_desc->addr = bq;
2305 bq++;
2306 }
2307}
2308
2309static void ql_init_sbq_ring(struct ql_adapter *qdev,
2189 struct rx_ring *rx_ring) 2310 struct rx_ring *rx_ring)
2190{ 2311{
2191 int i; 2312 int i;
2192 struct bq_desc *sbq_desc; 2313 struct bq_desc *sbq_desc;
2193 struct sk_buff *skb;
2194 u64 map;
2195 __le64 *bq = rx_ring->sbq_base; 2314 __le64 *bq = rx_ring->sbq_base;
2196 2315
2316 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2197 for (i = 0; i < rx_ring->sbq_len; i++) { 2317 for (i = 0; i < rx_ring->sbq_len; i++) {
2198 sbq_desc = &rx_ring->sbq[i]; 2318 sbq_desc = &rx_ring->sbq[i];
2199 memset(sbq_desc, 0, sizeof(sbq_desc)); 2319 memset(sbq_desc, 0, sizeof(*sbq_desc));
2200 sbq_desc->index = i; 2320 sbq_desc->index = i;
2201 sbq_desc->addr = bq; 2321 sbq_desc->addr = bq;
2202 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2203 if (unlikely(!skb)) {
2204 /* Better luck next round */
2205 QPRINTK(qdev, IFUP, ERR,
2206 "small buff alloc failed for %d bytes at index %d.\n",
2207 rx_ring->sbq_buf_size, i);
2208 goto mem_err;
2209 }
2210 skb_reserve(skb, QLGE_SB_PAD);
2211 sbq_desc->p.skb = skb;
2212 /*
2213 * Map only half the buffer. Because the
2214 * other half may get some data copied to it
2215 * when the completion arrives.
2216 */
2217 map = pci_map_single(qdev->pdev,
2218 skb->data,
2219 rx_ring->sbq_buf_size / 2,
2220 PCI_DMA_FROMDEVICE);
2221 if (pci_dma_mapping_error(qdev->pdev, map)) {
2222 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2223 goto mem_err;
2224 }
2225 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2226 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2227 *sbq_desc->addr = cpu_to_le64(map);
2228 bq++; 2322 bq++;
2229 } 2323 }
2230 return 0;
2231mem_err:
2232 ql_free_sbq_buffers(qdev, rx_ring);
2233 return -ENOMEM;
2234} 2324}
2235 2325
2236static void ql_free_rx_resources(struct ql_adapter *qdev, 2326static void ql_free_rx_resources(struct ql_adapter *qdev,
2237 struct rx_ring *rx_ring) 2327 struct rx_ring *rx_ring)
2238{ 2328{
2239 if (rx_ring->sbq_len)
2240 ql_free_sbq_buffers(qdev, rx_ring);
2241 if (rx_ring->lbq_len)
2242 ql_free_lbq_buffers(qdev, rx_ring);
2243
2244 /* Free the small buffer queue. */ 2329 /* Free the small buffer queue. */
2245 if (rx_ring->sbq_base) { 2330 if (rx_ring->sbq_base) {
2246 pci_free_consistent(qdev->pdev, 2331 pci_free_consistent(qdev->pdev,
@@ -2318,11 +2403,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2318 goto err_mem; 2403 goto err_mem;
2319 } 2404 }
2320 2405
2321 if (ql_alloc_sbq_buffers(qdev, rx_ring)) { 2406 ql_init_sbq_ring(qdev, rx_ring);
2322 QPRINTK(qdev, IFUP, ERR,
2323 "Small buffer allocation failed.\n");
2324 goto err_mem;
2325 }
2326 } 2407 }
2327 2408
2328 if (rx_ring->lbq_len) { 2409 if (rx_ring->lbq_len) {
@@ -2350,14 +2431,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2350 goto err_mem; 2431 goto err_mem;
2351 } 2432 }
2352 2433
2353 /* 2434 ql_init_lbq_ring(qdev, rx_ring);
2354 * Allocate the buffers.
2355 */
2356 if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2357 QPRINTK(qdev, IFUP, ERR,
2358 "Large buffer allocation failed.\n");
2359 goto err_mem;
2360 }
2361 } 2435 }
2362 2436
2363 return 0; 2437 return 0;
@@ -2451,6 +2525,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2451 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); 2525 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2452 int err = 0; 2526 int err = 0;
2453 u16 bq_len; 2527 u16 bq_len;
2528 u64 tmp;
2454 2529
2455 /* Set up the shadow registers for this ring. */ 2530 /* Set up the shadow registers for this ring. */
2456 rx_ring->prod_idx_sh_reg = shadow_reg; 2531 rx_ring->prod_idx_sh_reg = shadow_reg;
@@ -2496,7 +2571,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2496 FLAGS_LI; /* Load irq delay values */ 2571 FLAGS_LI; /* Load irq delay values */
2497 if (rx_ring->lbq_len) { 2572 if (rx_ring->lbq_len) {
2498 cqicb->flags |= FLAGS_LL; /* Load lbq values */ 2573 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2499 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma; 2574 tmp = (u64)rx_ring->lbq_base_dma;;
2575 *((__le64 *) rx_ring->lbq_base_indirect) = cpu_to_le64(tmp);
2500 cqicb->lbq_addr = 2576 cqicb->lbq_addr =
2501 cpu_to_le64(rx_ring->lbq_base_indirect_dma); 2577 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
2502 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : 2578 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
@@ -2505,25 +2581,26 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2505 bq_len = (rx_ring->lbq_len == 65536) ? 0 : 2581 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2506 (u16) rx_ring->lbq_len; 2582 (u16) rx_ring->lbq_len;
2507 cqicb->lbq_len = cpu_to_le16(bq_len); 2583 cqicb->lbq_len = cpu_to_le16(bq_len);
2508 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16; 2584 rx_ring->lbq_prod_idx = 0;
2509 rx_ring->lbq_curr_idx = 0; 2585 rx_ring->lbq_curr_idx = 0;
2510 rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx; 2586 rx_ring->lbq_clean_idx = 0;
2511 rx_ring->lbq_free_cnt = 16; 2587 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
2512 } 2588 }
2513 if (rx_ring->sbq_len) { 2589 if (rx_ring->sbq_len) {
2514 cqicb->flags |= FLAGS_LS; /* Load sbq values */ 2590 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2515 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma; 2591 tmp = (u64)rx_ring->sbq_base_dma;;
2592 *((__le64 *) rx_ring->sbq_base_indirect) = cpu_to_le64(tmp);
2516 cqicb->sbq_addr = 2593 cqicb->sbq_addr =
2517 cpu_to_le64(rx_ring->sbq_base_indirect_dma); 2594 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
2518 cqicb->sbq_buf_size = 2595 cqicb->sbq_buf_size =
2519 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8); 2596 cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
2520 bq_len = (rx_ring->sbq_len == 65536) ? 0 : 2597 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2521 (u16) rx_ring->sbq_len; 2598 (u16) rx_ring->sbq_len;
2522 cqicb->sbq_len = cpu_to_le16(bq_len); 2599 cqicb->sbq_len = cpu_to_le16(bq_len);
2523 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16; 2600 rx_ring->sbq_prod_idx = 0;
2524 rx_ring->sbq_curr_idx = 0; 2601 rx_ring->sbq_curr_idx = 0;
2525 rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx; 2602 rx_ring->sbq_clean_idx = 0;
2526 rx_ring->sbq_free_cnt = 16; 2603 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
2527 } 2604 }
2528 switch (rx_ring->type) { 2605 switch (rx_ring->type) {
2529 case TX_Q: 2606 case TX_Q:
@@ -2569,24 +2646,13 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2569 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n", 2646 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2570 rx_ring->type); 2647 rx_ring->type);
2571 } 2648 }
2572 QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n"); 2649 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
2573 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), 2650 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2574 CFG_LCQ, rx_ring->cq_id); 2651 CFG_LCQ, rx_ring->cq_id);
2575 if (err) { 2652 if (err) {
2576 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n"); 2653 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2577 return err; 2654 return err;
2578 } 2655 }
2579 QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2580 /*
2581 * Advance the producer index for the buffer queues.
2582 */
2583 wmb();
2584 if (rx_ring->lbq_len)
2585 ql_write_db_reg(rx_ring->lbq_prod_idx,
2586 rx_ring->lbq_prod_idx_db_reg);
2587 if (rx_ring->sbq_len)
2588 ql_write_db_reg(rx_ring->sbq_prod_idx,
2589 rx_ring->sbq_prod_idx_db_reg);
2590 return err; 2656 return err;
2591} 2657}
2592 2658
@@ -2633,7 +2699,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2633 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n"); 2699 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2634 return err; 2700 return err;
2635 } 2701 }
2636 QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n"); 2702 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
2637 return err; 2703 return err;
2638} 2704}
2639 2705
@@ -2675,7 +2741,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2675 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) { 2741 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2676 set_bit(QL_MSIX_ENABLED, &qdev->flags); 2742 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2677 qdev->intr_count = qdev->rx_ring_count; 2743 qdev->intr_count = qdev->rx_ring_count;
2678 QPRINTK(qdev, IFUP, INFO, 2744 QPRINTK(qdev, IFUP, DEBUG,
2679 "MSI-X Enabled, got %d vectors.\n", 2745 "MSI-X Enabled, got %d vectors.\n",
2680 qdev->intr_count); 2746 qdev->intr_count);
2681 return; 2747 return;
@@ -2802,11 +2868,11 @@ static void ql_free_irq(struct ql_adapter *qdev)
2802 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { 2868 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2803 free_irq(qdev->msi_x_entry[i].vector, 2869 free_irq(qdev->msi_x_entry[i].vector,
2804 &qdev->rx_ring[i]); 2870 &qdev->rx_ring[i]);
2805 QPRINTK(qdev, IFDOWN, ERR, 2871 QPRINTK(qdev, IFDOWN, DEBUG,
2806 "freeing msix interrupt %d.\n", i); 2872 "freeing msix interrupt %d.\n", i);
2807 } else { 2873 } else {
2808 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); 2874 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2809 QPRINTK(qdev, IFDOWN, ERR, 2875 QPRINTK(qdev, IFDOWN, DEBUG,
2810 "freeing msi interrupt %d.\n", i); 2876 "freeing msi interrupt %d.\n", i);
2811 } 2877 }
2812 } 2878 }
@@ -2837,7 +2903,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
2837 i); 2903 i);
2838 goto err_irq; 2904 goto err_irq;
2839 } else { 2905 } else {
2840 QPRINTK(qdev, IFUP, INFO, 2906 QPRINTK(qdev, IFUP, DEBUG,
2841 "Hooked intr %d, queue type %s%s%s, with name %s.\n", 2907 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2842 i, 2908 i,
2843 qdev->rx_ring[i].type == 2909 qdev->rx_ring[i].type ==
@@ -2912,14 +2978,14 @@ static int ql_start_rss(struct ql_adapter *qdev)
2912 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40); 2978 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2913 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16); 2979 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2914 2980
2915 QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n"); 2981 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
2916 2982
2917 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0); 2983 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2918 if (status) { 2984 if (status) {
2919 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n"); 2985 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2920 return status; 2986 return status;
2921 } 2987 }
2922 QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n"); 2988 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
2923 return status; 2989 return status;
2924} 2990}
2925 2991
@@ -2929,13 +2995,17 @@ static int ql_route_initialize(struct ql_adapter *qdev)
2929 int status = 0; 2995 int status = 0;
2930 int i; 2996 int i;
2931 2997
2998 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
2999 if (status)
3000 return status;
3001
2932 /* Clear all the entries in the routing table. */ 3002 /* Clear all the entries in the routing table. */
2933 for (i = 0; i < 16; i++) { 3003 for (i = 0; i < 16; i++) {
2934 status = ql_set_routing_reg(qdev, i, 0, 0); 3004 status = ql_set_routing_reg(qdev, i, 0, 0);
2935 if (status) { 3005 if (status) {
2936 QPRINTK(qdev, IFUP, ERR, 3006 QPRINTK(qdev, IFUP, ERR,
2937 "Failed to init routing register for CAM packets.\n"); 3007 "Failed to init routing register for CAM packets.\n");
2938 return status; 3008 goto exit;
2939 } 3009 }
2940 } 3010 }
2941 3011
@@ -2943,13 +3013,13 @@ static int ql_route_initialize(struct ql_adapter *qdev)
2943 if (status) { 3013 if (status) {
2944 QPRINTK(qdev, IFUP, ERR, 3014 QPRINTK(qdev, IFUP, ERR,
2945 "Failed to init routing register for error packets.\n"); 3015 "Failed to init routing register for error packets.\n");
2946 return status; 3016 goto exit;
2947 } 3017 }
2948 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); 3018 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2949 if (status) { 3019 if (status) {
2950 QPRINTK(qdev, IFUP, ERR, 3020 QPRINTK(qdev, IFUP, ERR,
2951 "Failed to init routing register for broadcast packets.\n"); 3021 "Failed to init routing register for broadcast packets.\n");
2952 return status; 3022 goto exit;
2953 } 3023 }
2954 /* If we have more than one inbound queue, then turn on RSS in the 3024 /* If we have more than one inbound queue, then turn on RSS in the
2955 * routing block. 3025 * routing block.
@@ -2960,17 +3030,39 @@ static int ql_route_initialize(struct ql_adapter *qdev)
2960 if (status) { 3030 if (status) {
2961 QPRINTK(qdev, IFUP, ERR, 3031 QPRINTK(qdev, IFUP, ERR,
2962 "Failed to init routing register for MATCH RSS packets.\n"); 3032 "Failed to init routing register for MATCH RSS packets.\n");
2963 return status; 3033 goto exit;
2964 } 3034 }
2965 } 3035 }
2966 3036
2967 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, 3037 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2968 RT_IDX_CAM_HIT, 1); 3038 RT_IDX_CAM_HIT, 1);
2969 if (status) { 3039 if (status)
2970 QPRINTK(qdev, IFUP, ERR, 3040 QPRINTK(qdev, IFUP, ERR,
2971 "Failed to init routing register for CAM packets.\n"); 3041 "Failed to init routing register for CAM packets.\n");
3042exit:
3043 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3044 return status;
3045}
3046
3047int ql_cam_route_initialize(struct ql_adapter *qdev)
3048{
3049 int status;
3050
3051 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3052 if (status)
3053 return status;
3054 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
3055 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
3056 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3057 if (status) {
3058 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
2972 return status; 3059 return status;
2973 } 3060 }
3061
3062 status = ql_route_initialize(qdev);
3063 if (status)
3064 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3065
2974 return status; 3066 return status;
2975} 3067}
2976 3068
@@ -3038,28 +3130,24 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3038 } 3130 }
3039 } 3131 }
3040 3132
3041 status = ql_port_initialize(qdev); 3133 /* Initialize the port and set the max framesize. */
3042 if (status) { 3134 status = qdev->nic_ops->port_initialize(qdev);
3043 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); 3135 if (status) {
3044 return status; 3136 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3045 } 3137 return status;
3138 }
3046 3139
3047 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr, 3140 /* Set up the MAC address and frame routing filter. */
3048 MAC_ADDR_TYPE_CAM_MAC, qdev->func); 3141 status = ql_cam_route_initialize(qdev);
3049 if (status) { 3142 if (status) {
3050 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n"); 3143 QPRINTK(qdev, IFUP, ERR,
3051 return status; 3144 "Failed to init CAM/Routing tables.\n");
3052 }
3053
3054 status = ql_route_initialize(qdev);
3055 if (status) {
3056 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3057 return status; 3145 return status;
3058 } 3146 }
3059 3147
3060 /* Start NAPI for the RSS queues. */ 3148 /* Start NAPI for the RSS queues. */
3061 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) { 3149 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3062 QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n", 3150 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
3063 i); 3151 i);
3064 napi_enable(&qdev->rx_ring[i].napi); 3152 napi_enable(&qdev->rx_ring[i].napi);
3065 } 3153 }
@@ -3071,36 +3159,23 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3071static int ql_adapter_reset(struct ql_adapter *qdev) 3159static int ql_adapter_reset(struct ql_adapter *qdev)
3072{ 3160{
3073 u32 value; 3161 u32 value;
3074 int max_wait_time;
3075 int status = 0; 3162 int status = 0;
3076 int resetCnt = 0; 3163 unsigned long end_jiffies = jiffies +
3164 max((unsigned long)1, usecs_to_jiffies(30));
3077 3165
3078#define MAX_RESET_CNT 1
3079issueReset:
3080 resetCnt++;
3081 QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3082 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); 3166 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3083 /* Wait for reset to complete. */ 3167
3084 max_wait_time = 3;
3085 QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3086 max_wait_time);
3087 do { 3168 do {
3088 value = ql_read32(qdev, RST_FO); 3169 value = ql_read32(qdev, RST_FO);
3089 if ((value & RST_FO_FR) == 0) 3170 if ((value & RST_FO_FR) == 0)
3090 break; 3171 break;
3172 cpu_relax();
3173 } while (time_before(jiffies, end_jiffies));
3091 3174
3092 ssleep(1);
3093 } while ((--max_wait_time));
3094 if (value & RST_FO_FR) { 3175 if (value & RST_FO_FR) {
3095 QPRINTK(qdev, IFDOWN, ERR, 3176 QPRINTK(qdev, IFDOWN, ERR,
3096 "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
3097 if (resetCnt < MAX_RESET_CNT)
3098 goto issueReset;
3099 }
3100 if (max_wait_time == 0) {
3101 status = -ETIMEDOUT;
3102 QPRINTK(qdev, IFDOWN, ERR,
3103 "ETIMEOUT!!! errored out of resetting the chip!\n"); 3177 "ETIMEOUT!!! errored out of resetting the chip!\n");
3178 status = -ETIMEDOUT;
3104 } 3179 }
3105 3180
3106 return status; 3181 return status;
@@ -3123,12 +3198,10 @@ static void ql_display_dev_info(struct net_device *ndev)
3123 3198
3124static int ql_adapter_down(struct ql_adapter *qdev) 3199static int ql_adapter_down(struct ql_adapter *qdev)
3125{ 3200{
3126 struct net_device *ndev = qdev->ndev;
3127 int i, status = 0; 3201 int i, status = 0;
3128 struct rx_ring *rx_ring; 3202 struct rx_ring *rx_ring;
3129 3203
3130 netif_stop_queue(ndev); 3204 netif_carrier_off(qdev->ndev);
3131 netif_carrier_off(ndev);
3132 3205
3133 /* Don't kill the reset worker thread if we 3206 /* Don't kill the reset worker thread if we
3134 * are in the process of recovery. 3207 * are in the process of recovery.
@@ -3137,6 +3210,8 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3137 cancel_delayed_work_sync(&qdev->asic_reset_work); 3210 cancel_delayed_work_sync(&qdev->asic_reset_work);
3138 cancel_delayed_work_sync(&qdev->mpi_reset_work); 3211 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3139 cancel_delayed_work_sync(&qdev->mpi_work); 3212 cancel_delayed_work_sync(&qdev->mpi_work);
3213 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3214 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3140 3215
3141 /* The default queue at index 0 is always processed in 3216 /* The default queue at index 0 is always processed in
3142 * a workqueue. 3217 * a workqueue.
@@ -3171,6 +3246,8 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3171 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) 3246 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
3172 netif_napi_del(&qdev->rx_ring[i].napi); 3247 netif_napi_del(&qdev->rx_ring[i].napi);
3173 3248
3249 ql_free_rx_buffers(qdev);
3250
3174 spin_lock(&qdev->hw_lock); 3251 spin_lock(&qdev->hw_lock);
3175 status = ql_adapter_reset(qdev); 3252 status = ql_adapter_reset(qdev);
3176 if (status) 3253 if (status)
@@ -3184,21 +3261,19 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3184{ 3261{
3185 int err = 0; 3262 int err = 0;
3186 3263
3187 spin_lock(&qdev->hw_lock);
3188 err = ql_adapter_initialize(qdev); 3264 err = ql_adapter_initialize(qdev);
3189 if (err) { 3265 if (err) {
3190 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n"); 3266 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3191 spin_unlock(&qdev->hw_lock); 3267 spin_unlock(&qdev->hw_lock);
3192 goto err_init; 3268 goto err_init;
3193 } 3269 }
3194 spin_unlock(&qdev->hw_lock);
3195 set_bit(QL_ADAPTER_UP, &qdev->flags); 3270 set_bit(QL_ADAPTER_UP, &qdev->flags);
3271 ql_alloc_rx_buffers(qdev);
3272 if ((ql_read32(qdev, STS) & qdev->port_init))
3273 netif_carrier_on(qdev->ndev);
3196 ql_enable_interrupts(qdev); 3274 ql_enable_interrupts(qdev);
3197 ql_enable_all_completion_interrupts(qdev); 3275 ql_enable_all_completion_interrupts(qdev);
3198 if ((ql_read32(qdev, STS) & qdev->port_init)) { 3276 netif_tx_start_all_queues(qdev->ndev);
3199 netif_carrier_on(qdev->ndev);
3200 netif_start_queue(qdev->ndev);
3201 }
3202 3277
3203 return 0; 3278 return 0;
3204err_init: 3279err_init:
@@ -3206,28 +3281,6 @@ err_init:
3206 return err; 3281 return err;
3207} 3282}
3208 3283
3209static int ql_cycle_adapter(struct ql_adapter *qdev)
3210{
3211 int status;
3212
3213 status = ql_adapter_down(qdev);
3214 if (status)
3215 goto error;
3216
3217 status = ql_adapter_up(qdev);
3218 if (status)
3219 goto error;
3220
3221 return status;
3222error:
3223 QPRINTK(qdev, IFUP, ALERT,
3224 "Driver up/down cycle failed, closing device\n");
3225 rtnl_lock();
3226 dev_close(qdev->ndev);
3227 rtnl_unlock();
3228 return status;
3229}
3230
3231static void ql_release_adapter_resources(struct ql_adapter *qdev) 3284static void ql_release_adapter_resources(struct ql_adapter *qdev)
3232{ 3285{
3233 ql_free_mem_resources(qdev); 3286 ql_free_mem_resources(qdev);
@@ -3308,6 +3361,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3308 * completion handler rx_rings. 3361 * completion handler rx_rings.
3309 */ 3362 */
3310 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; 3363 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3364 netif_set_gso_max_size(qdev->ndev, 65536);
3311 3365
3312 for (i = 0; i < qdev->tx_ring_count; i++) { 3366 for (i = 0; i < qdev->tx_ring_count; i++) {
3313 tx_ring = &qdev->tx_ring[i]; 3367 tx_ring = &qdev->tx_ring[i];
@@ -3414,6 +3468,8 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3414 3468
3415 if (ndev->mtu == 1500 && new_mtu == 9000) { 3469 if (ndev->mtu == 1500 && new_mtu == 9000) {
3416 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); 3470 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3471 queue_delayed_work(qdev->workqueue,
3472 &qdev->mpi_port_cfg_work, 0);
3417 } else if (ndev->mtu == 9000 && new_mtu == 1500) { 3473 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3418 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); 3474 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3419 } else if ((ndev->mtu == 1500 && new_mtu == 1500) || 3475 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
@@ -3436,8 +3492,11 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3436{ 3492{
3437 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3493 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3438 struct dev_mc_list *mc_ptr; 3494 struct dev_mc_list *mc_ptr;
3439 int i; 3495 int i, status;
3440 3496
3497 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3498 if (status)
3499 return;
3441 spin_lock(&qdev->hw_lock); 3500 spin_lock(&qdev->hw_lock);
3442 /* 3501 /*
3443 * Set or clear promiscuous mode if a 3502 * Set or clear promiscuous mode if a
@@ -3493,14 +3552,19 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3493 } 3552 }
3494 3553
3495 if (ndev->mc_count) { 3554 if (ndev->mc_count) {
3555 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3556 if (status)
3557 goto exit;
3496 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr; 3558 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3497 i++, mc_ptr = mc_ptr->next) 3559 i++, mc_ptr = mc_ptr->next)
3498 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, 3560 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3499 MAC_ADDR_TYPE_MULTI_MAC, i)) { 3561 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3500 QPRINTK(qdev, HW, ERR, 3562 QPRINTK(qdev, HW, ERR,
3501 "Failed to loadmulticast address.\n"); 3563 "Failed to loadmulticast address.\n");
3564 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3502 goto exit; 3565 goto exit;
3503 } 3566 }
3567 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3504 if (ql_set_routing_reg 3568 if (ql_set_routing_reg
3505 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { 3569 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3506 QPRINTK(qdev, HW, ERR, 3570 QPRINTK(qdev, HW, ERR,
@@ -3511,13 +3575,14 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3511 } 3575 }
3512exit: 3576exit:
3513 spin_unlock(&qdev->hw_lock); 3577 spin_unlock(&qdev->hw_lock);
3578 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3514} 3579}
3515 3580
3516static int qlge_set_mac_address(struct net_device *ndev, void *p) 3581static int qlge_set_mac_address(struct net_device *ndev, void *p)
3517{ 3582{
3518 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3583 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3519 struct sockaddr *addr = p; 3584 struct sockaddr *addr = p;
3520 int ret = 0; 3585 int status;
3521 3586
3522 if (netif_running(ndev)) 3587 if (netif_running(ndev))
3523 return -EBUSY; 3588 return -EBUSY;
@@ -3526,15 +3591,17 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3526 return -EADDRNOTAVAIL; 3591 return -EADDRNOTAVAIL;
3527 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3592 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3528 3593
3594 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3595 if (status)
3596 return status;
3529 spin_lock(&qdev->hw_lock); 3597 spin_lock(&qdev->hw_lock);
3530 if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, 3598 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3531 MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */ 3599 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
3532 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3533 ret = -1;
3534 }
3535 spin_unlock(&qdev->hw_lock); 3600 spin_unlock(&qdev->hw_lock);
3536 3601 if (status)
3537 return ret; 3602 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3603 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3604 return status;
3538} 3605}
3539 3606
3540static void qlge_tx_timeout(struct net_device *ndev) 3607static void qlge_tx_timeout(struct net_device *ndev)
@@ -3547,9 +3614,37 @@ static void ql_asic_reset_work(struct work_struct *work)
3547{ 3614{
3548 struct ql_adapter *qdev = 3615 struct ql_adapter *qdev =
3549 container_of(work, struct ql_adapter, asic_reset_work.work); 3616 container_of(work, struct ql_adapter, asic_reset_work.work);
3550 ql_cycle_adapter(qdev); 3617 int status;
3618
3619 status = ql_adapter_down(qdev);
3620 if (status)
3621 goto error;
3622
3623 status = ql_adapter_up(qdev);
3624 if (status)
3625 goto error;
3626
3627 return;
3628error:
3629 QPRINTK(qdev, IFUP, ALERT,
3630 "Driver up/down cycle failed, closing device\n");
3631 rtnl_lock();
3632 set_bit(QL_ADAPTER_UP, &qdev->flags);
3633 dev_close(qdev->ndev);
3634 rtnl_unlock();
3551} 3635}
3552 3636
3637static struct nic_operations qla8012_nic_ops = {
3638 .get_flash = ql_get_8012_flash_params,
3639 .port_initialize = ql_8012_port_initialize,
3640};
3641
3642static struct nic_operations qla8000_nic_ops = {
3643 .get_flash = ql_get_8000_flash_params,
3644 .port_initialize = ql_8000_port_initialize,
3645};
3646
3647
3553static void ql_get_board_info(struct ql_adapter *qdev) 3648static void ql_get_board_info(struct ql_adapter *qdev)
3554{ 3649{
3555 qdev->func = 3650 qdev->func =
@@ -3568,6 +3663,11 @@ static void ql_get_board_info(struct ql_adapter *qdev)
3568 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; 3663 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3569 } 3664 }
3570 qdev->chip_rev_id = ql_read32(qdev, REV_ID); 3665 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3666 qdev->device_id = qdev->pdev->device;
3667 if (qdev->device_id == QLGE_DEVICE_ID_8012)
3668 qdev->nic_ops = &qla8012_nic_ops;
3669 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3670 qdev->nic_ops = &qla8000_nic_ops;
3571} 3671}
3572 3672
3573static void ql_release_all(struct pci_dev *pdev) 3673static void ql_release_all(struct pci_dev *pdev)
@@ -3660,24 +3760,20 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3660 goto err_out; 3760 goto err_out;
3661 } 3761 }
3662 3762
3663 ql_get_board_info(qdev);
3664 qdev->ndev = ndev; 3763 qdev->ndev = ndev;
3665 qdev->pdev = pdev; 3764 qdev->pdev = pdev;
3765 ql_get_board_info(qdev);
3666 qdev->msg_enable = netif_msg_init(debug, default_msg); 3766 qdev->msg_enable = netif_msg_init(debug, default_msg);
3667 spin_lock_init(&qdev->hw_lock); 3767 spin_lock_init(&qdev->hw_lock);
3668 spin_lock_init(&qdev->stats_lock); 3768 spin_lock_init(&qdev->stats_lock);
3669 3769
3670 /* make sure the EEPROM is good */ 3770 /* make sure the EEPROM is good */
3671 err = ql_get_flash_params(qdev); 3771 err = qdev->nic_ops->get_flash(qdev);
3672 if (err) { 3772 if (err) {
3673 dev_err(&pdev->dev, "Invalid FLASH.\n"); 3773 dev_err(&pdev->dev, "Invalid FLASH.\n");
3674 goto err_out; 3774 goto err_out;
3675 } 3775 }
3676 3776
3677 if (!is_valid_ether_addr(qdev->flash.mac_addr))
3678 goto err_out;
3679
3680 memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3681 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 3777 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3682 3778
3683 /* Set up the default ring sizes. */ 3779 /* Set up the default ring sizes. */
@@ -3700,6 +3796,10 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3700 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); 3796 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3701 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); 3797 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3702 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); 3798 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3799 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
3800 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
3801 mutex_init(&qdev->mpi_mutex);
3802 init_completion(&qdev->ide_completion);
3703 3803
3704 if (!cards_found) { 3804 if (!cards_found) {
3705 dev_info(&pdev->dev, "%s\n", DRV_STRING); 3805 dev_info(&pdev->dev, "%s\n", DRV_STRING);
@@ -3737,7 +3837,8 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
3737 static int cards_found = 0; 3837 static int cards_found = 0;
3738 int err = 0; 3838 int err = 0;
3739 3839
3740 ndev = alloc_etherdev(sizeof(struct ql_adapter)); 3840 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
3841 min(MAX_CPUS, (int)num_online_cpus()));
3741 if (!ndev) 3842 if (!ndev)
3742 return -ENOMEM; 3843 return -ENOMEM;
3743 3844
@@ -3757,6 +3858,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
3757 | NETIF_F_TSO_ECN 3858 | NETIF_F_TSO_ECN
3758 | NETIF_F_HW_VLAN_TX 3859 | NETIF_F_HW_VLAN_TX
3759 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER); 3860 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3861 ndev->features |= NETIF_F_GRO;
3760 3862
3761 if (test_bit(QL_DMA64, &qdev->flags)) 3863 if (test_bit(QL_DMA64, &qdev->flags))
3762 ndev->features |= NETIF_F_HIGHDMA; 3864 ndev->features |= NETIF_F_HIGHDMA;
@@ -3779,7 +3881,6 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
3779 return err; 3881 return err;
3780 } 3882 }
3781 netif_carrier_off(ndev); 3883 netif_carrier_off(ndev);
3782 netif_stop_queue(ndev);
3783 ql_display_dev_info(ndev); 3884 ql_display_dev_info(ndev);
3784 cards_found++; 3885 cards_found++;
3785 return 0; 3886 return 0;
@@ -3833,7 +3934,6 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3833 pci_set_master(pdev); 3934 pci_set_master(pdev);
3834 3935
3835 netif_carrier_off(ndev); 3936 netif_carrier_off(ndev);
3836 netif_stop_queue(ndev);
3837 ql_adapter_reset(qdev); 3937 ql_adapter_reset(qdev);
3838 3938
3839 /* Make sure the EEPROM is good */ 3939 /* Make sure the EEPROM is good */
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index fa31891b6e62..9f81b797f10b 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,6 +1,26 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3static int ql_read_mbox_reg(struct ql_adapter *qdev, u32 reg, u32 *data) 3static void ql_display_mb_sts(struct ql_adapter *qdev,
4 struct mbox_params *mbcp)
5{
6 int i;
7 static char *err_sts[] = {
8 "Command Complete",
9 "Command Not Supported",
10 "Host Interface Error",
11 "Checksum Error",
12 "Unused Completion Status",
13 "Test Failed",
14 "Command Parameter Error"};
15
16 QPRINTK(qdev, DRV, DEBUG, "%s.\n",
17 err_sts[mbcp->mbox_out[0] & 0x0000000f]);
18 for (i = 0; i < mbcp->out_count; i++)
19 QPRINTK(qdev, DRV, DEBUG, "mbox_out[%d] = 0x%.08x.\n",
20 i, mbcp->mbox_out[i]);
21}
22
23int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
4{ 24{
5 int status; 25 int status;
6 /* wait for reg to come ready */ 26 /* wait for reg to come ready */
@@ -19,6 +39,32 @@ exit:
19 return status; 39 return status;
20} 40}
21 41
42int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
43{
44 int status = 0;
45 /* wait for reg to come ready */
46 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
47 if (status)
48 goto exit;
49 /* write the data to the data reg */
50 ql_write32(qdev, PROC_DATA, data);
51 /* trigger the write */
52 ql_write32(qdev, PROC_ADDR, reg);
53 /* wait for reg to come ready */
54 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
55 if (status)
56 goto exit;
57exit:
58 return status;
59}
60
61int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
62{
63 int status;
64 status = ql_write_mpi_reg(qdev, 0x00001010, 1);
65 return status;
66}
67
22static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) 68static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
23{ 69{
24 int i, status; 70 int i, status;
@@ -28,7 +74,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
28 return -EBUSY; 74 return -EBUSY;
29 for (i = 0; i < mbcp->out_count; i++) { 75 for (i = 0; i < mbcp->out_count; i++) {
30 status = 76 status =
31 ql_read_mbox_reg(qdev, qdev->mailbox_out + i, 77 ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
32 &mbcp->mbox_out[i]); 78 &mbcp->mbox_out[i]);
33 if (status) { 79 if (status) {
34 QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n"); 80 QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
@@ -39,102 +85,762 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
39 return status; 85 return status;
40} 86}
41 87
88/* Wait for a single mailbox command to complete.
89 * Returns zero on success.
90 */
91static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
92{
93 int count = 50; /* TODO: arbitrary for now. */
94 u32 value;
95
96 do {
97 value = ql_read32(qdev, STS);
98 if (value & STS_PI)
99 return 0;
100 udelay(UDELAY_DELAY); /* 10us */
101 } while (--count);
102 return -ETIMEDOUT;
103}
104
105/* Execute a single mailbox command.
106 * Caller must hold PROC_ADDR semaphore.
107 */
108static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
109{
110 int i, status;
111
112 /*
113 * Make sure there's nothing pending.
114 * This shouldn't happen.
115 */
116 if (ql_read32(qdev, CSR) & CSR_HRI)
117 return -EIO;
118
119 status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
120 if (status)
121 return status;
122
123 /*
124 * Fill the outbound mailboxes.
125 */
126 for (i = 0; i < mbcp->in_count; i++) {
127 status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
128 mbcp->mbox_in[i]);
129 if (status)
130 goto end;
131 }
132 /*
133 * Wake up the MPI firmware.
134 */
135 ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
136end:
137 ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
138 return status;
139}
140
141/* We are being asked by firmware to accept
142 * a change to the port. This is only
143 * a change to max frame sizes (Tx/Rx), pause
144 * paramters, or loopback mode. We wake up a worker
145 * to handler processing this since a mailbox command
146 * will need to be sent to ACK the request.
147 */
148static int ql_idc_req_aen(struct ql_adapter *qdev)
149{
150 int status;
151 struct mbox_params *mbcp = &qdev->idc_mbc;
152
153 QPRINTK(qdev, DRV, ERR, "Enter!\n");
154 /* Get the status data and start up a thread to
155 * handle the request.
156 */
157 mbcp = &qdev->idc_mbc;
158 mbcp->out_count = 4;
159 status = ql_get_mb_sts(qdev, mbcp);
160 if (status) {
161 QPRINTK(qdev, DRV, ERR,
162 "Could not read MPI, resetting ASIC!\n");
163 ql_queue_asic_error(qdev);
164 } else {
165 /* Begin polled mode early so
166 * we don't get another interrupt
167 * when we leave mpi_worker.
168 */
169 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
170 queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
171 }
172 return status;
173}
174
175/* Process an inter-device event completion.
176 * If good, signal the caller's completion.
177 */
178static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
179{
180 int status;
181 struct mbox_params *mbcp = &qdev->idc_mbc;
182 mbcp->out_count = 4;
183 status = ql_get_mb_sts(qdev, mbcp);
184 if (status) {
185 QPRINTK(qdev, DRV, ERR,
186 "Could not read MPI, resetting RISC!\n");
187 ql_queue_fw_error(qdev);
188 } else
189 /* Wake up the sleeping mpi_idc_work thread that is
190 * waiting for this event.
191 */
192 complete(&qdev->ide_completion);
193
194 return status;
195}
196
42static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) 197static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
43{ 198{
199 int status;
44 mbcp->out_count = 2; 200 mbcp->out_count = 2;
45 201
46 if (ql_get_mb_sts(qdev, mbcp)) 202 status = ql_get_mb_sts(qdev, mbcp);
47 goto exit; 203 if (status) {
204 QPRINTK(qdev, DRV, ERR,
205 "%s: Could not get mailbox status.\n", __func__);
206 return;
207 }
48 208
49 qdev->link_status = mbcp->mbox_out[1]; 209 qdev->link_status = mbcp->mbox_out[1];
50 QPRINTK(qdev, DRV, ERR, "Link Up.\n"); 210 QPRINTK(qdev, DRV, ERR, "Link Up.\n");
51 QPRINTK(qdev, DRV, INFO, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]); 211
52 if (!netif_carrier_ok(qdev->ndev)) { 212 /* If we're coming back from an IDC event
53 QPRINTK(qdev, LINK, INFO, "Link is Up.\n"); 213 * then set up the CAM and frame routing.
54 netif_carrier_on(qdev->ndev); 214 */
55 netif_wake_queue(qdev->ndev); 215 if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
216 status = ql_cam_route_initialize(qdev);
217 if (status) {
218 QPRINTK(qdev, IFUP, ERR,
219 "Failed to init CAM/Routing tables.\n");
220 return;
221 } else
222 clear_bit(QL_CAM_RT_SET, &qdev->flags);
56 } 223 }
57exit: 224
58 /* Clear the MPI firmware status. */ 225 /* Queue up a worker to check the frame
59 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); 226 * size information, and fix it if it's not
227 * to our liking.
228 */
229 if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
230 QPRINTK(qdev, DRV, ERR, "Queue Port Config Worker!\n");
231 set_bit(QL_PORT_CFG, &qdev->flags);
232 /* Begin polled mode early so
233 * we don't get another interrupt
234 * when we leave mpi_worker dpc.
235 */
236 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
237 queue_delayed_work(qdev->workqueue,
238 &qdev->mpi_port_cfg_work, 0);
239 }
240
241 netif_carrier_on(qdev->ndev);
60} 242}
61 243
62static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) 244static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
63{ 245{
246 int status;
247
64 mbcp->out_count = 3; 248 mbcp->out_count = 3;
65 249
66 if (ql_get_mb_sts(qdev, mbcp)) { 250 status = ql_get_mb_sts(qdev, mbcp);
67 QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n"); 251 if (status)
68 goto exit; 252 QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
69 } 253
254 netif_carrier_off(qdev->ndev);
255}
256
257static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
258{
259 int status;
260
261 mbcp->out_count = 5;
262
263 status = ql_get_mb_sts(qdev, mbcp);
264 if (status)
265 QPRINTK(qdev, DRV, ERR, "SFP in AEN broken!\n");
266 else
267 QPRINTK(qdev, DRV, ERR, "SFP insertion detected.\n");
268
269 return status;
270}
271
272static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
273{
274 int status;
275
276 mbcp->out_count = 1;
277
278 status = ql_get_mb_sts(qdev, mbcp);
279 if (status)
280 QPRINTK(qdev, DRV, ERR, "SFP out AEN broken!\n");
281 else
282 QPRINTK(qdev, DRV, ERR, "SFP removal detected.\n");
283
284 return status;
285}
286
287static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
288{
289 int status;
290
291 mbcp->out_count = 6;
292
293 status = ql_get_mb_sts(qdev, mbcp);
294 if (status)
295 QPRINTK(qdev, DRV, ERR, "Lost AEN broken!\n");
296 else {
297 int i;
298 QPRINTK(qdev, DRV, ERR, "Lost AEN detected.\n");
299 for (i = 0; i < mbcp->out_count; i++)
300 QPRINTK(qdev, DRV, ERR, "mbox_out[%d] = 0x%.08x.\n",
301 i, mbcp->mbox_out[i]);
70 302
71 if (netif_carrier_ok(qdev->ndev)) {
72 QPRINTK(qdev, LINK, INFO, "Link is Down.\n");
73 netif_carrier_off(qdev->ndev);
74 netif_stop_queue(qdev->ndev);
75 } 303 }
76 QPRINTK(qdev, DRV, ERR, "Link Down.\n"); 304
77 QPRINTK(qdev, DRV, ERR, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]); 305 return status;
78exit:
79 /* Clear the MPI firmware status. */
80 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
81} 306}
82 307
83static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) 308static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
84{ 309{
310 int status;
311
85 mbcp->out_count = 2; 312 mbcp->out_count = 2;
86 313
87 if (ql_get_mb_sts(qdev, mbcp)) { 314 status = ql_get_mb_sts(qdev, mbcp);
315 if (status) {
88 QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n"); 316 QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
89 goto exit; 317 } else {
318 QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
319 mbcp->mbox_out[1]);
320 status = ql_cam_route_initialize(qdev);
321 if (status)
322 QPRINTK(qdev, IFUP, ERR,
323 "Failed to init CAM/Routing tables.\n");
90 } 324 }
91 QPRINTK(qdev, DRV, ERR, "Firmware initialized!\n"); 325}
92 QPRINTK(qdev, DRV, ERR, "Firmware status = 0x%.08x.\n", 326
93 mbcp->mbox_out[0]); 327/* Process an async event and clear it unless it's an
94 QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n", 328 * error condition.
95 mbcp->mbox_out[1]); 329 * This can get called iteratively from the mpi_work thread
96exit: 330 * when events arrive via an interrupt.
97 /* Clear the MPI firmware status. */ 331 * It also gets called when a mailbox command is polling for
332 * it's completion. */
333static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
334{
335 int status;
336 int orig_count = mbcp->out_count;
337
338 /* Just get mailbox zero for now. */
339 mbcp->out_count = 1;
340 status = ql_get_mb_sts(qdev, mbcp);
341 if (status) {
342 QPRINTK(qdev, DRV, ERR,
343 "Could not read MPI, resetting ASIC!\n");
344 ql_queue_asic_error(qdev);
345 goto end;
346 }
347
348 switch (mbcp->mbox_out[0]) {
349
350 /* This case is only active when we arrive here
351 * as a result of issuing a mailbox command to
352 * the firmware.
353 */
354 case MB_CMD_STS_INTRMDT:
355 case MB_CMD_STS_GOOD:
356 case MB_CMD_STS_INVLD_CMD:
357 case MB_CMD_STS_XFC_ERR:
358 case MB_CMD_STS_CSUM_ERR:
359 case MB_CMD_STS_ERR:
360 case MB_CMD_STS_PARAM_ERR:
361 /* We can only get mailbox status if we're polling from an
362 * unfinished command. Get the rest of the status data and
363 * return back to the caller.
364 * We only end up here when we're polling for a mailbox
365 * command completion.
366 */
367 mbcp->out_count = orig_count;
368 status = ql_get_mb_sts(qdev, mbcp);
369 return status;
370
371 /* We are being asked by firmware to accept
372 * a change to the port. This is only
373 * a change to max frame sizes (Tx/Rx), pause
374 * paramters, or loopback mode.
375 */
376 case AEN_IDC_REQ:
377 status = ql_idc_req_aen(qdev);
378 break;
379
380 /* Process and inbound IDC event.
381 * This will happen when we're trying to
382 * change tx/rx max frame size, change pause
383 * paramters or loopback mode.
384 */
385 case AEN_IDC_CMPLT:
386 case AEN_IDC_EXT:
387 status = ql_idc_cmplt_aen(qdev);
388 break;
389
390 case AEN_LINK_UP:
391 ql_link_up(qdev, mbcp);
392 break;
393
394 case AEN_LINK_DOWN:
395 ql_link_down(qdev, mbcp);
396 break;
397
398 case AEN_FW_INIT_DONE:
399 /* If we're in process on executing the firmware,
400 * then convert the status to normal mailbox status.
401 */
402 if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
403 mbcp->out_count = orig_count;
404 status = ql_get_mb_sts(qdev, mbcp);
405 mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
406 return status;
407 }
408 ql_init_fw_done(qdev, mbcp);
409 break;
410
411 case AEN_AEN_SFP_IN:
412 ql_sfp_in(qdev, mbcp);
413 break;
414
415 case AEN_AEN_SFP_OUT:
416 ql_sfp_out(qdev, mbcp);
417 break;
418
419 /* This event can arrive at boot time or after an
420 * MPI reset if the firmware failed to initialize.
421 */
422 case AEN_FW_INIT_FAIL:
423 /* If we're in process on executing the firmware,
424 * then convert the status to normal mailbox status.
425 */
426 if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
427 mbcp->out_count = orig_count;
428 status = ql_get_mb_sts(qdev, mbcp);
429 mbcp->mbox_out[0] = MB_CMD_STS_ERR;
430 return status;
431 }
432 QPRINTK(qdev, DRV, ERR,
433 "Firmware initialization failed.\n");
434 status = -EIO;
435 ql_queue_fw_error(qdev);
436 break;
437
438 case AEN_SYS_ERR:
439 QPRINTK(qdev, DRV, ERR,
440 "System Error.\n");
441 ql_queue_fw_error(qdev);
442 status = -EIO;
443 break;
444
445 case AEN_AEN_LOST:
446 ql_aen_lost(qdev, mbcp);
447 break;
448
449 default:
450 QPRINTK(qdev, DRV, ERR,
451 "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
452 /* Clear the MPI firmware status. */
453 }
454end:
98 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); 455 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
456 return status;
99} 457}
100 458
101void ql_mpi_work(struct work_struct *work) 459/* Execute a single mailbox command.
460 * mbcp is a pointer to an array of u32. Each
461 * element in the array contains the value for it's
462 * respective mailbox register.
463 */
464static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
465{
466 int status, count;
467
468 mutex_lock(&qdev->mpi_mutex);
469
470 /* Begin polled mode for MPI */
471 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
472
473 /* Load the mailbox registers and wake up MPI RISC. */
474 status = ql_exec_mb_cmd(qdev, mbcp);
475 if (status)
476 goto end;
477
478
479 /* If we're generating a system error, then there's nothing
480 * to wait for.
481 */
482 if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
483 goto end;
484
485 /* Wait for the command to complete. We loop
486 * here because some AEN might arrive while
487 * we're waiting for the mailbox command to
488 * complete. If more than 5 arrive then we can
489 * assume something is wrong. */
490 count = 5;
491 do {
492 /* Wait for the interrupt to come in. */
493 status = ql_wait_mbx_cmd_cmplt(qdev);
494 if (status)
495 goto end;
496
497 /* Process the event. If it's an AEN, it
498 * will be handled in-line or a worker
499 * will be spawned. If it's our completion
500 * we will catch it below.
501 */
502 status = ql_mpi_handler(qdev, mbcp);
503 if (status)
504 goto end;
505
506 /* It's either the completion for our mailbox
507 * command complete or an AEN. If it's our
508 * completion then get out.
509 */
510 if (((mbcp->mbox_out[0] & 0x0000f000) ==
511 MB_CMD_STS_GOOD) ||
512 ((mbcp->mbox_out[0] & 0x0000f000) ==
513 MB_CMD_STS_INTRMDT))
514 break;
515 } while (--count);
516
517 if (!count) {
518 QPRINTK(qdev, DRV, ERR,
519 "Timed out waiting for mailbox complete.\n");
520 status = -ETIMEDOUT;
521 goto end;
522 }
523
524 /* Now we can clear the interrupt condition
525 * and look at our status.
526 */
527 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
528
529 if (((mbcp->mbox_out[0] & 0x0000f000) !=
530 MB_CMD_STS_GOOD) &&
531 ((mbcp->mbox_out[0] & 0x0000f000) !=
532 MB_CMD_STS_INTRMDT)) {
533 ql_display_mb_sts(qdev, mbcp);
534 status = -EIO;
535 }
536end:
537 mutex_unlock(&qdev->mpi_mutex);
538 /* End polled mode for MPI */
539 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
540 return status;
541}
542
543/* Get functional state for MPI firmware.
544 * Returns zero on success.
545 */
546int ql_mb_get_fw_state(struct ql_adapter *qdev)
547{
548 struct mbox_params mbc;
549 struct mbox_params *mbcp = &mbc;
550 int status = 0;
551
552 memset(mbcp, 0, sizeof(struct mbox_params));
553
554 mbcp->in_count = 1;
555 mbcp->out_count = 2;
556
557 mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
558
559 status = ql_mailbox_command(qdev, mbcp);
560 if (status)
561 return status;
562
563 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
564 QPRINTK(qdev, DRV, ERR,
565 "Failed Get Firmware State.\n");
566 status = -EIO;
567 }
568
569 /* If bit zero is set in mbx 1 then the firmware is
570 * running, but not initialized. This should never
571 * happen.
572 */
573 if (mbcp->mbox_out[1] & 1) {
574 QPRINTK(qdev, DRV, ERR,
575 "Firmware waiting for initialization.\n");
576 status = -EIO;
577 }
578
579 return status;
580}
581
582/* Send and ACK mailbox command to the firmware to
583 * let it continue with the change.
584 */
585int ql_mb_idc_ack(struct ql_adapter *qdev)
102{ 586{
103 struct ql_adapter *qdev =
104 container_of(work, struct ql_adapter, mpi_work.work);
105 struct mbox_params mbc; 587 struct mbox_params mbc;
106 struct mbox_params *mbcp = &mbc; 588 struct mbox_params *mbcp = &mbc;
589 int status = 0;
590
591 memset(mbcp, 0, sizeof(struct mbox_params));
592
593 mbcp->in_count = 5;
107 mbcp->out_count = 1; 594 mbcp->out_count = 1;
108 595
109 while (ql_read32(qdev, STS) & STS_PI) { 596 mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
110 if (ql_get_mb_sts(qdev, mbcp)) { 597 mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
111 QPRINTK(qdev, DRV, ERR, 598 mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
112 "Could not read MPI, resetting ASIC!\n"); 599 mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
113 ql_queue_asic_error(qdev); 600 mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
114 }
115 601
116 switch (mbcp->mbox_out[0]) { 602 status = ql_mailbox_command(qdev, mbcp);
117 case AEN_LINK_UP: 603 if (status)
118 ql_link_up(qdev, mbcp); 604 return status;
119 break; 605
120 case AEN_LINK_DOWN: 606 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
121 ql_link_down(qdev, mbcp); 607 QPRINTK(qdev, DRV, ERR,
122 break; 608 "Failed IDC ACK send.\n");
123 case AEN_FW_INIT_DONE: 609 status = -EIO;
124 ql_init_fw_done(qdev, mbcp); 610 }
611 return status;
612}
613
614/* Get link settings and maximum frame size settings
615 * for the current port.
616 * Most likely will block.
617 */
618static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
619{
620 struct mbox_params mbc;
621 struct mbox_params *mbcp = &mbc;
622 int status = 0;
623
624 memset(mbcp, 0, sizeof(struct mbox_params));
625
626 mbcp->in_count = 3;
627 mbcp->out_count = 1;
628
629 mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
630 mbcp->mbox_in[1] = qdev->link_config;
631 mbcp->mbox_in[2] = qdev->max_frame_size;
632
633
634 status = ql_mailbox_command(qdev, mbcp);
635 if (status)
636 return status;
637
638 if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
639 QPRINTK(qdev, DRV, ERR,
640 "Port Config sent, wait for IDC.\n");
641 } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
642 QPRINTK(qdev, DRV, ERR,
643 "Failed Set Port Configuration.\n");
644 status = -EIO;
645 }
646 return status;
647}
648
649/* Get link settings and maximum frame size settings
650 * for the current port.
651 * Most likely will block.
652 */
653static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
654{
655 struct mbox_params mbc;
656 struct mbox_params *mbcp = &mbc;
657 int status = 0;
658
659 memset(mbcp, 0, sizeof(struct mbox_params));
660
661 mbcp->in_count = 1;
662 mbcp->out_count = 3;
663
664 mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
665
666 status = ql_mailbox_command(qdev, mbcp);
667 if (status)
668 return status;
669
670 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
671 QPRINTK(qdev, DRV, ERR,
672 "Failed Get Port Configuration.\n");
673 status = -EIO;
674 } else {
675 QPRINTK(qdev, DRV, DEBUG,
676 "Passed Get Port Configuration.\n");
677 qdev->link_config = mbcp->mbox_out[1];
678 qdev->max_frame_size = mbcp->mbox_out[2];
679 }
680 return status;
681}
682
683/* IDC - Inter Device Communication...
684 * Some firmware commands require consent of adjacent FCOE
685 * function. This function waits for the OK, or a
686 * counter-request for a little more time.i
687 * The firmware will complete the request if the other
688 * function doesn't respond.
689 */
690static int ql_idc_wait(struct ql_adapter *qdev)
691{
692 int status = -ETIMEDOUT;
693 long wait_time = 1 * HZ;
694 struct mbox_params *mbcp = &qdev->idc_mbc;
695 do {
696 /* Wait here for the command to complete
697 * via the IDC process.
698 */
699 wait_time =
700 wait_for_completion_timeout(&qdev->ide_completion,
701 wait_time);
702 if (!wait_time) {
703 QPRINTK(qdev, DRV, ERR,
704 "IDC Timeout.\n");
125 break; 705 break;
126 case MB_CMD_STS_GOOD: 706 }
707 /* Now examine the response from the IDC process.
708 * We might have a good completion or a request for
709 * more wait time.
710 */
711 if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
712 QPRINTK(qdev, DRV, ERR,
713 "IDC Time Extension from function.\n");
714 wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
715 } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
716 QPRINTK(qdev, DRV, ERR,
717 "IDC Success.\n");
718 status = 0;
127 break; 719 break;
128 case AEN_FW_INIT_FAIL: 720 } else {
129 case AEN_SYS_ERR: 721 QPRINTK(qdev, DRV, ERR,
130 case MB_CMD_STS_ERR: 722 "IDC: Invalid State 0x%.04x.\n",
131 ql_queue_fw_error(qdev); 723 mbcp->mbox_out[0]);
132 default: 724 status = -EIO;
133 /* Clear the MPI firmware status. */
134 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
135 break; 725 break;
136 } 726 }
727 } while (wait_time);
728
729 return status;
730}
731
732/* API called in work thread context to set new TX/RX
733 * maximum frame size values to match MTU.
734 */
735static int ql_set_port_cfg(struct ql_adapter *qdev)
736{
737 int status;
738 status = ql_mb_set_port_cfg(qdev);
739 if (status)
740 return status;
741 status = ql_idc_wait(qdev);
742 return status;
743}
744
745/* The following routines are worker threads that process
746 * events that may sleep waiting for completion.
747 */
748
749/* This thread gets the maximum TX and RX frame size values
750 * from the firmware and, if necessary, changes them to match
751 * the MTU setting.
752 */
753void ql_mpi_port_cfg_work(struct work_struct *work)
754{
755 struct ql_adapter *qdev =
756 container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
757 struct net_device *ndev = qdev->ndev;
758 int status;
759
760 status = ql_mb_get_port_cfg(qdev);
761 if (status) {
762 QPRINTK(qdev, DRV, ERR,
763 "Bug: Failed to get port config data.\n");
764 goto err;
765 }
766
767 if (ndev->mtu <= 2500)
768 goto end;
769 else if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
770 qdev->max_frame_size ==
771 CFG_DEFAULT_MAX_FRAME_SIZE)
772 goto end;
773
774 qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
775 qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
776 status = ql_set_port_cfg(qdev);
777 if (status) {
778 QPRINTK(qdev, DRV, ERR,
779 "Bug: Failed to set port config data.\n");
780 goto err;
781 }
782end:
783 clear_bit(QL_PORT_CFG, &qdev->flags);
784 return;
785err:
786 ql_queue_fw_error(qdev);
787 goto end;
788}
789
790/* Process an inter-device request. This is issues by
791 * the firmware in response to another function requesting
792 * a change to the port. We set a flag to indicate a change
793 * has been made and then send a mailbox command ACKing
794 * the change request.
795 */
796void ql_mpi_idc_work(struct work_struct *work)
797{
798 struct ql_adapter *qdev =
799 container_of(work, struct ql_adapter, mpi_idc_work.work);
800 int status;
801 struct mbox_params *mbcp = &qdev->idc_mbc;
802 u32 aen;
803
804 aen = mbcp->mbox_out[1] >> 16;
805
806 switch (aen) {
807 default:
808 QPRINTK(qdev, DRV, ERR,
809 "Bug: Unhandled IDC action.\n");
810 break;
811 case MB_CMD_PORT_RESET:
812 case MB_CMD_SET_PORT_CFG:
813 case MB_CMD_STOP_FW:
814 netif_carrier_off(qdev->ndev);
815 /* Signal the resulting link up AEN
816 * that the frame routing and mac addr
817 * needs to be set.
818 * */
819 set_bit(QL_CAM_RT_SET, &qdev->flags);
820 status = ql_mb_idc_ack(qdev);
821 if (status) {
822 QPRINTK(qdev, DRV, ERR,
823 "Bug: No pending IDC!\n");
824 }
825 }
826}
827
828void ql_mpi_work(struct work_struct *work)
829{
830 struct ql_adapter *qdev =
831 container_of(work, struct ql_adapter, mpi_work.work);
832 struct mbox_params mbc;
833 struct mbox_params *mbcp = &mbc;
834
835 mutex_lock(&qdev->mpi_mutex);
836
837 while (ql_read32(qdev, STS) & STS_PI) {
838 memset(mbcp, 0, sizeof(struct mbox_params));
839 mbcp->out_count = 1;
840 ql_mpi_handler(qdev, mbcp);
137 } 841 }
842
843 mutex_unlock(&qdev->mpi_mutex);
138 ql_enable_completion_interrupt(qdev, 0); 844 ql_enable_completion_interrupt(qdev, 0);
139} 845}
140 846
@@ -142,9 +848,8 @@ void ql_mpi_reset_work(struct work_struct *work)
142{ 848{
143 struct ql_adapter *qdev = 849 struct ql_adapter *qdev =
144 container_of(work, struct ql_adapter, mpi_reset_work.work); 850 container_of(work, struct ql_adapter, mpi_reset_work.work);
145 QPRINTK(qdev, DRV, ERR, 851 cancel_delayed_work_sync(&qdev->mpi_work);
146 "Enter, qdev = %p..\n", qdev); 852 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
147 ql_write32(qdev, CSR, CSR_CMD_SET_RST); 853 cancel_delayed_work_sync(&qdev->mpi_idc_work);
148 msleep(50); 854 ql_soft_reset_mpi_risc(qdev);
149 ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
150} 855}