aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/myri10ge
diff options
context:
space:
mode:
authorBrice Goglin <brice@myri.com>2008-05-08 20:21:49 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-05-30 22:11:47 -0400
commit0dcffac1a329be69bab0ac604bf7283737108e68 (patch)
tree51d75909464c78e3fd5d4573b48bbbc8f7055a70 /drivers/net/myri10ge
parent779297320d192655c2f95a870c12e9b307612429 (diff)
myri10ge: add multislices support
Add multi-slice/MSI-X support. By default, a single slice (and the normal firmware) are used. To enable msi-x, multi-slice mode, one must load the driver with myri10ge_max_slices set to either -1, or something larger than 1. Signed-off-by: Brice Goglin <brice@myri.com> Signed-off-by: Andrew Gallatin <gallatin@myri.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/myri10ge')
-rw-r--r--drivers/net/myri10ge/myri10ge.c684
1 files changed, 442 insertions, 242 deletions
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index f3ef9e3c48cd..eddcee326f06 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -185,11 +185,13 @@ struct myri10ge_slice_state {
185 dma_addr_t fw_stats_bus; 185 dma_addr_t fw_stats_bus;
186 int watchdog_tx_done; 186 int watchdog_tx_done;
187 int watchdog_tx_req; 187 int watchdog_tx_req;
188 char irq_desc[32];
188}; 189};
189 190
190struct myri10ge_priv { 191struct myri10ge_priv {
191 struct myri10ge_slice_state ss; 192 struct myri10ge_slice_state *ss;
192 int tx_boundary; /* boundary transmits cannot cross */ 193 int tx_boundary; /* boundary transmits cannot cross */
194 int num_slices;
193 int running; /* running? */ 195 int running; /* running? */
194 int csum_flag; /* rx_csums? */ 196 int csum_flag; /* rx_csums? */
195 int small_bytes; 197 int small_bytes;
@@ -208,6 +210,8 @@ struct myri10ge_priv {
208 dma_addr_t cmd_bus; 210 dma_addr_t cmd_bus;
209 struct pci_dev *pdev; 211 struct pci_dev *pdev;
210 int msi_enabled; 212 int msi_enabled;
213 int msix_enabled;
214 struct msix_entry *msix_vectors;
211 u32 link_state; 215 u32 link_state;
212 unsigned int rdma_tags_available; 216 unsigned int rdma_tags_available;
213 int intr_coal_delay; 217 int intr_coal_delay;
@@ -244,6 +248,8 @@ struct myri10ge_priv {
244 248
245static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat"; 249static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
246static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; 250static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
251static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
252static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
247 253
248static char *myri10ge_fw_name = NULL; 254static char *myri10ge_fw_name = NULL;
249module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 255module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
@@ -321,6 +327,14 @@ static int myri10ge_wcfifo = 0;
321module_param(myri10ge_wcfifo, int, S_IRUGO); 327module_param(myri10ge_wcfifo, int, S_IRUGO);
322MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled"); 328MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled");
323 329
330static int myri10ge_max_slices = 1;
331module_param(myri10ge_max_slices, int, S_IRUGO);
332MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
333
334static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
335module_param(myri10ge_rss_hash, int, S_IRUGO);
336MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
337
324#define MYRI10GE_FW_OFFSET 1024*1024 338#define MYRI10GE_FW_OFFSET 1024*1024
325#define MYRI10GE_HIGHPART_TO_U32(X) \ 339#define MYRI10GE_HIGHPART_TO_U32(X) \
326(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) 340(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
@@ -657,7 +671,7 @@ static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
657 return 0; 671 return 0;
658} 672}
659 673
660static int myri10ge_load_firmware(struct myri10ge_priv *mgp) 674static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
661{ 675{
662 char __iomem *submit; 676 char __iomem *submit;
663 __be32 buf[16] __attribute__ ((__aligned__(8))); 677 __be32 buf[16] __attribute__ ((__aligned__(8)));
@@ -667,6 +681,8 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
667 size = 0; 681 size = 0;
668 status = myri10ge_load_hotplug_firmware(mgp, &size); 682 status = myri10ge_load_hotplug_firmware(mgp, &size);
669 if (status) { 683 if (status) {
684 if (!adopt)
685 return status;
670 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n"); 686 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
671 687
672 /* Do not attempt to adopt firmware if there 688 /* Do not attempt to adopt firmware if there
@@ -859,7 +875,8 @@ abort:
859static int myri10ge_reset(struct myri10ge_priv *mgp) 875static int myri10ge_reset(struct myri10ge_priv *mgp)
860{ 876{
861 struct myri10ge_cmd cmd; 877 struct myri10ge_cmd cmd;
862 int status; 878 struct myri10ge_slice_state *ss;
879 int i, status;
863 size_t bytes; 880 size_t bytes;
864 881
865 /* try to send a reset command to the card to see if it 882 /* try to send a reset command to the card to see if it
@@ -872,20 +889,74 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
872 } 889 }
873 890
874 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST); 891 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
892 /*
893 * Use non-ndis mcp_slot (eg, 4 bytes total,
894 * no toeplitz hash value returned. Older firmware will
895 * not understand this command, but will use the correct
896 * sized mcp_slot, so we ignore error returns
897 */
898 cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
899 (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
875 900
876 /* Now exchange information about interrupts */ 901 /* Now exchange information about interrupts */
877 902
878 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 903 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
879 memset(mgp->ss.rx_done.entry, 0, bytes);
880 cmd.data0 = (u32) bytes; 904 cmd.data0 = (u32) bytes;
881 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); 905 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
882 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus); 906
883 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus); 907 /*
884 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); 908 * Even though we already know how many slices are supported
909 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
910 * has magic side effects, and must be called after a reset.
911 * It must be called prior to calling any RSS related cmds,
912 * including assigning an interrupt queue for anything but
913 * slice 0. It must also be called *after*
914 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
915 * the firmware to compute offsets.
916 */
917
918 if (mgp->num_slices > 1) {
919
920 /* ask the maximum number of slices it supports */
921 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
922 &cmd, 0);
923 if (status != 0) {
924 dev_err(&mgp->pdev->dev,
925 "failed to get number of slices\n");
926 }
927
928 /*
929 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
930 * to setting up the interrupt queue DMA
931 */
932
933 cmd.data0 = mgp->num_slices;
934 cmd.data1 = 1; /* use MSI-X */
935 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
936 &cmd, 0);
937 if (status != 0) {
938 dev_err(&mgp->pdev->dev,
939 "failed to set number of slices\n");
940
941 return status;
942 }
943 }
944 for (i = 0; i < mgp->num_slices; i++) {
945 ss = &mgp->ss[i];
946 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
947 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
948 cmd.data2 = i;
949 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
950 &cmd, 0);
951 };
885 952
886 status |= 953 status |=
887 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); 954 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
888 mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); 955 for (i = 0; i < mgp->num_slices; i++) {
956 ss = &mgp->ss[i];
957 ss->irq_claim =
958 (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
959 }
889 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 960 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
890 &cmd, 0); 961 &cmd, 0);
891 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); 962 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
@@ -899,18 +970,25 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
899 } 970 }
900 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 971 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
901 972
902 memset(mgp->ss.rx_done.entry, 0, bytes);
903
904 /* reset mcp/driver shared state back to 0 */ 973 /* reset mcp/driver shared state back to 0 */
905 mgp->ss.tx.req = 0; 974
906 mgp->ss.tx.done = 0;
907 mgp->ss.tx.pkt_start = 0;
908 mgp->ss.tx.pkt_done = 0;
909 mgp->ss.rx_big.cnt = 0;
910 mgp->ss.rx_small.cnt = 0;
911 mgp->ss.rx_done.idx = 0;
912 mgp->ss.rx_done.cnt = 0;
913 mgp->link_changes = 0; 975 mgp->link_changes = 0;
976 for (i = 0; i < mgp->num_slices; i++) {
977 ss = &mgp->ss[i];
978
979 memset(ss->rx_done.entry, 0, bytes);
980 ss->tx.req = 0;
981 ss->tx.done = 0;
982 ss->tx.pkt_start = 0;
983 ss->tx.pkt_done = 0;
984 ss->rx_big.cnt = 0;
985 ss->rx_small.cnt = 0;
986 ss->rx_done.idx = 0;
987 ss->rx_done.cnt = 0;
988 ss->tx.wake_queue = 0;
989 ss->tx.stop_queue = 0;
990 }
991
914 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 992 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
915 myri10ge_change_pause(mgp, mgp->pause); 993 myri10ge_change_pause(mgp, mgp->pause);
916 myri10ge_set_multicast_list(mgp->dev); 994 myri10ge_set_multicast_list(mgp->dev);
@@ -1095,9 +1173,10 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
1095 rx_frags[0].size -= MXGEFW_PAD; 1173 rx_frags[0].size -= MXGEFW_PAD;
1096 len -= MXGEFW_PAD; 1174 len -= MXGEFW_PAD;
1097 lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, 1175 lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
1098 len, len,
1099 /* opaque, will come back in get_frag_header */ 1176 /* opaque, will come back in get_frag_header */
1177 len, len,
1100 (void *)(__force unsigned long)csum, csum); 1178 (void *)(__force unsigned long)csum, csum);
1179
1101 return 1; 1180 return 1;
1102 } 1181 }
1103 1182
@@ -1236,7 +1315,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1236 1315
1237static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) 1316static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1238{ 1317{
1239 struct mcp_irq_data *stats = mgp->ss.fw_stats; 1318 struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
1240 1319
1241 if (unlikely(stats->stats_updated)) { 1320 if (unlikely(stats->stats_updated)) {
1242 unsigned link_up = ntohl(stats->link_up); 1321 unsigned link_up = ntohl(stats->link_up);
@@ -1302,6 +1381,13 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1302 u32 send_done_count; 1381 u32 send_done_count;
1303 int i; 1382 int i;
1304 1383
1384 /* an interrupt on a non-zero slice is implicitly valid
1385 * since MSI-X irqs are not shared */
1386 if (ss != mgp->ss) {
1387 netif_rx_schedule(ss->dev, &ss->napi);
1388 return (IRQ_HANDLED);
1389 }
1390
1305 /* make sure it is our IRQ, and that the DMA has finished */ 1391 /* make sure it is our IRQ, and that the DMA has finished */
1306 if (unlikely(!stats->valid)) 1392 if (unlikely(!stats->valid))
1307 return (IRQ_NONE); 1393 return (IRQ_NONE);
@@ -1311,7 +1397,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1311 if (stats->valid & 1) 1397 if (stats->valid & 1)
1312 netif_rx_schedule(ss->dev, &ss->napi); 1398 netif_rx_schedule(ss->dev, &ss->napi);
1313 1399
1314 if (!mgp->msi_enabled) { 1400 if (!mgp->msi_enabled && !mgp->msix_enabled) {
1315 put_be32(0, mgp->irq_deassert); 1401 put_be32(0, mgp->irq_deassert);
1316 if (!myri10ge_deassert_wait) 1402 if (!myri10ge_deassert_wait)
1317 stats->valid = 0; 1403 stats->valid = 0;
@@ -1446,10 +1532,10 @@ myri10ge_get_ringparam(struct net_device *netdev,
1446{ 1532{
1447 struct myri10ge_priv *mgp = netdev_priv(netdev); 1533 struct myri10ge_priv *mgp = netdev_priv(netdev);
1448 1534
1449 ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1; 1535 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
1450 ring->rx_max_pending = mgp->ss.rx_big.mask + 1; 1536 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
1451 ring->rx_jumbo_max_pending = 0; 1537 ring->rx_jumbo_max_pending = 0;
1452 ring->tx_max_pending = mgp->ss.rx_small.mask + 1; 1538 ring->tx_max_pending = mgp->ss[0].rx_small.mask + 1;
1453 ring->rx_mini_pending = ring->rx_mini_max_pending; 1539 ring->rx_mini_pending = ring->rx_mini_max_pending;
1454 ring->rx_pending = ring->rx_max_pending; 1540 ring->rx_pending = ring->rx_max_pending;
1455 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; 1541 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
@@ -1497,7 +1583,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1497 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", 1583 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1498 "tx_heartbeat_errors", "tx_window_errors", 1584 "tx_heartbeat_errors", "tx_window_errors",
1499 /* device-specific stats */ 1585 /* device-specific stats */
1500 "tx_boundary", "WC", "irq", "MSI", 1586 "tx_boundary", "WC", "irq", "MSI", "MSIX",
1501 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1587 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1502 "serial_number", "watchdog_resets", 1588 "serial_number", "watchdog_resets",
1503 "link_changes", "link_up", "dropped_link_overflow", 1589 "link_changes", "link_up", "dropped_link_overflow",
@@ -1524,23 +1610,31 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1524static void 1610static void
1525myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) 1611myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
1526{ 1612{
1613 struct myri10ge_priv *mgp = netdev_priv(netdev);
1614 int i;
1615
1527 switch (stringset) { 1616 switch (stringset) {
1528 case ETH_SS_STATS: 1617 case ETH_SS_STATS:
1529 memcpy(data, *myri10ge_gstrings_main_stats, 1618 memcpy(data, *myri10ge_gstrings_main_stats,
1530 sizeof(myri10ge_gstrings_main_stats)); 1619 sizeof(myri10ge_gstrings_main_stats));
1531 data += sizeof(myri10ge_gstrings_main_stats); 1620 data += sizeof(myri10ge_gstrings_main_stats);
1532 memcpy(data, *myri10ge_gstrings_slice_stats, 1621 for (i = 0; i < mgp->num_slices; i++) {
1533 sizeof(myri10ge_gstrings_slice_stats)); 1622 memcpy(data, *myri10ge_gstrings_slice_stats,
1534 data += sizeof(myri10ge_gstrings_slice_stats); 1623 sizeof(myri10ge_gstrings_slice_stats));
1624 data += sizeof(myri10ge_gstrings_slice_stats);
1625 }
1535 break; 1626 break;
1536 } 1627 }
1537} 1628}
1538 1629
1539static int myri10ge_get_sset_count(struct net_device *netdev, int sset) 1630static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
1540{ 1631{
1632 struct myri10ge_priv *mgp = netdev_priv(netdev);
1633
1541 switch (sset) { 1634 switch (sset) {
1542 case ETH_SS_STATS: 1635 case ETH_SS_STATS:
1543 return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN; 1636 return MYRI10GE_MAIN_STATS_LEN +
1637 mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
1544 default: 1638 default:
1545 return -EOPNOTSUPP; 1639 return -EOPNOTSUPP;
1546 } 1640 }
@@ -1552,6 +1646,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1552{ 1646{
1553 struct myri10ge_priv *mgp = netdev_priv(netdev); 1647 struct myri10ge_priv *mgp = netdev_priv(netdev);
1554 struct myri10ge_slice_state *ss; 1648 struct myri10ge_slice_state *ss;
1649 int slice;
1555 int i; 1650 int i;
1556 1651
1557 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1652 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
@@ -1561,6 +1656,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1561 data[i++] = (unsigned int)mgp->wc_enabled; 1656 data[i++] = (unsigned int)mgp->wc_enabled;
1562 data[i++] = (unsigned int)mgp->pdev->irq; 1657 data[i++] = (unsigned int)mgp->pdev->irq;
1563 data[i++] = (unsigned int)mgp->msi_enabled; 1658 data[i++] = (unsigned int)mgp->msi_enabled;
1659 data[i++] = (unsigned int)mgp->msix_enabled;
1564 data[i++] = (unsigned int)mgp->read_dma; 1660 data[i++] = (unsigned int)mgp->read_dma;
1565 data[i++] = (unsigned int)mgp->write_dma; 1661 data[i++] = (unsigned int)mgp->write_dma;
1566 data[i++] = (unsigned int)mgp->read_write_dma; 1662 data[i++] = (unsigned int)mgp->read_write_dma;
@@ -1569,7 +1665,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1569 data[i++] = (unsigned int)mgp->link_changes; 1665 data[i++] = (unsigned int)mgp->link_changes;
1570 1666
1571 /* firmware stats are useful only in the first slice */ 1667 /* firmware stats are useful only in the first slice */
1572 ss = &mgp->ss; 1668 ss = &mgp->ss[0];
1573 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); 1669 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
1574 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); 1670 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
1575 data[i++] = 1671 data[i++] =
@@ -1585,24 +1681,27 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1585 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); 1681 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
1586 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); 1682 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
1587 1683
1588 data[i++] = 0; 1684 for (slice = 0; slice < mgp->num_slices; slice++) {
1589 data[i++] = (unsigned int)ss->tx.pkt_start; 1685 ss = &mgp->ss[slice];
1590 data[i++] = (unsigned int)ss->tx.pkt_done; 1686 data[i++] = slice;
1591 data[i++] = (unsigned int)ss->tx.req; 1687 data[i++] = (unsigned int)ss->tx.pkt_start;
1592 data[i++] = (unsigned int)ss->tx.done; 1688 data[i++] = (unsigned int)ss->tx.pkt_done;
1593 data[i++] = (unsigned int)ss->rx_small.cnt; 1689 data[i++] = (unsigned int)ss->tx.req;
1594 data[i++] = (unsigned int)ss->rx_big.cnt; 1690 data[i++] = (unsigned int)ss->tx.done;
1595 data[i++] = (unsigned int)ss->tx.wake_queue; 1691 data[i++] = (unsigned int)ss->rx_small.cnt;
1596 data[i++] = (unsigned int)ss->tx.stop_queue; 1692 data[i++] = (unsigned int)ss->rx_big.cnt;
1597 data[i++] = (unsigned int)ss->tx.linearized; 1693 data[i++] = (unsigned int)ss->tx.wake_queue;
1598 data[i++] = ss->rx_done.lro_mgr.stats.aggregated; 1694 data[i++] = (unsigned int)ss->tx.stop_queue;
1599 data[i++] = ss->rx_done.lro_mgr.stats.flushed; 1695 data[i++] = (unsigned int)ss->tx.linearized;
1600 if (ss->rx_done.lro_mgr.stats.flushed) 1696 data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
1601 data[i++] = ss->rx_done.lro_mgr.stats.aggregated / 1697 data[i++] = ss->rx_done.lro_mgr.stats.flushed;
1602 ss->rx_done.lro_mgr.stats.flushed; 1698 if (ss->rx_done.lro_mgr.stats.flushed)
1603 else 1699 data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
1604 data[i++] = 0; 1700 ss->rx_done.lro_mgr.stats.flushed;
1605 data[i++] = ss->rx_done.lro_mgr.stats.no_desc; 1701 else
1702 data[i++] = 0;
1703 data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
1704 }
1606} 1705}
1607 1706
1608static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) 1707static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
@@ -1645,12 +1744,15 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1645 struct net_device *dev = mgp->dev; 1744 struct net_device *dev = mgp->dev;
1646 int tx_ring_size, rx_ring_size; 1745 int tx_ring_size, rx_ring_size;
1647 int tx_ring_entries, rx_ring_entries; 1746 int tx_ring_entries, rx_ring_entries;
1648 int i, status; 1747 int i, slice, status;
1649 size_t bytes; 1748 size_t bytes;
1650 1749
1651 /* get ring sizes */ 1750 /* get ring sizes */
1751 slice = ss - mgp->ss;
1752 cmd.data0 = slice;
1652 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); 1753 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
1653 tx_ring_size = cmd.data0; 1754 tx_ring_size = cmd.data0;
1755 cmd.data0 = slice;
1654 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); 1756 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
1655 if (status != 0) 1757 if (status != 0)
1656 return status; 1758 return status;
@@ -1715,15 +1817,17 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1715 mgp->small_bytes + MXGEFW_PAD, 0); 1817 mgp->small_bytes + MXGEFW_PAD, 0);
1716 1818
1717 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { 1819 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
1718 printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", 1820 printk(KERN_ERR
1719 dev->name, ss->rx_small.fill_cnt); 1821 "myri10ge: %s:slice-%d: alloced only %d small bufs\n",
1822 dev->name, slice, ss->rx_small.fill_cnt);
1720 goto abort_with_rx_small_ring; 1823 goto abort_with_rx_small_ring;
1721 } 1824 }
1722 1825
1723 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); 1826 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1724 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { 1827 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
1725 printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", 1828 printk(KERN_ERR
1726 dev->name, ss->rx_big.fill_cnt); 1829 "myri10ge: %s:slice-%d: alloced only %d big bufs\n",
1830 dev->name, slice, ss->rx_big.fill_cnt);
1727 goto abort_with_rx_big_ring; 1831 goto abort_with_rx_big_ring;
1728 } 1832 }
1729 1833
@@ -1775,6 +1879,10 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
1775 struct myri10ge_tx_buf *tx; 1879 struct myri10ge_tx_buf *tx;
1776 int i, len, idx; 1880 int i, len, idx;
1777 1881
1882 /* If not allocated, skip it */
1883 if (ss->tx.req_list == NULL)
1884 return;
1885
1778 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { 1886 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
1779 idx = i & ss->rx_big.mask; 1887 idx = i & ss->rx_big.mask;
1780 if (i == ss->rx_big.fill_cnt - 1) 1888 if (i == ss->rx_big.fill_cnt - 1)
@@ -1837,25 +1945,67 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
1837static int myri10ge_request_irq(struct myri10ge_priv *mgp) 1945static int myri10ge_request_irq(struct myri10ge_priv *mgp)
1838{ 1946{
1839 struct pci_dev *pdev = mgp->pdev; 1947 struct pci_dev *pdev = mgp->pdev;
1948 struct myri10ge_slice_state *ss;
1949 struct net_device *netdev = mgp->dev;
1950 int i;
1840 int status; 1951 int status;
1841 1952
1953 mgp->msi_enabled = 0;
1954 mgp->msix_enabled = 0;
1955 status = 0;
1842 if (myri10ge_msi) { 1956 if (myri10ge_msi) {
1843 status = pci_enable_msi(pdev); 1957 if (mgp->num_slices > 1) {
1844 if (status != 0) 1958 status =
1845 dev_err(&pdev->dev, 1959 pci_enable_msix(pdev, mgp->msix_vectors,
1846 "Error %d setting up MSI; falling back to xPIC\n", 1960 mgp->num_slices);
1847 status); 1961 if (status == 0) {
1848 else 1962 mgp->msix_enabled = 1;
1849 mgp->msi_enabled = 1; 1963 } else {
1850 } else { 1964 dev_err(&pdev->dev,
1851 mgp->msi_enabled = 0; 1965 "Error %d setting up MSI-X\n", status);
1966 return status;
1967 }
1968 }
1969 if (mgp->msix_enabled == 0) {
1970 status = pci_enable_msi(pdev);
1971 if (status != 0) {
1972 dev_err(&pdev->dev,
1973 "Error %d setting up MSI; falling back to xPIC\n",
1974 status);
1975 } else {
1976 mgp->msi_enabled = 1;
1977 }
1978 }
1852 } 1979 }
1853 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, 1980 if (mgp->msix_enabled) {
1854 mgp->dev->name, mgp); 1981 for (i = 0; i < mgp->num_slices; i++) {
1855 if (status != 0) { 1982 ss = &mgp->ss[i];
1856 dev_err(&pdev->dev, "failed to allocate IRQ\n"); 1983 snprintf(ss->irq_desc, sizeof(ss->irq_desc),
1857 if (mgp->msi_enabled) 1984 "%s:slice-%d", netdev->name, i);
1858 pci_disable_msi(pdev); 1985 status = request_irq(mgp->msix_vectors[i].vector,
1986 myri10ge_intr, 0, ss->irq_desc,
1987 ss);
1988 if (status != 0) {
1989 dev_err(&pdev->dev,
1990 "slice %d failed to allocate IRQ\n", i);
1991 i--;
1992 while (i >= 0) {
1993 free_irq(mgp->msix_vectors[i].vector,
1994 &mgp->ss[i]);
1995 i--;
1996 }
1997 pci_disable_msix(pdev);
1998 return status;
1999 }
2000 }
2001 } else {
2002 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
2003 mgp->dev->name, &mgp->ss[0]);
2004 if (status != 0) {
2005 dev_err(&pdev->dev, "failed to allocate IRQ\n");
2006 if (mgp->msi_enabled)
2007 pci_disable_msi(pdev);
2008 }
1859 } 2009 }
1860 return status; 2010 return status;
1861} 2011}
@@ -1863,10 +2013,18 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
1863static void myri10ge_free_irq(struct myri10ge_priv *mgp) 2013static void myri10ge_free_irq(struct myri10ge_priv *mgp)
1864{ 2014{
1865 struct pci_dev *pdev = mgp->pdev; 2015 struct pci_dev *pdev = mgp->pdev;
2016 int i;
1866 2017
1867 free_irq(pdev->irq, mgp); 2018 if (mgp->msix_enabled) {
2019 for (i = 0; i < mgp->num_slices; i++)
2020 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
2021 } else {
2022 free_irq(pdev->irq, &mgp->ss[0]);
2023 }
1868 if (mgp->msi_enabled) 2024 if (mgp->msi_enabled)
1869 pci_disable_msi(pdev); 2025 pci_disable_msi(pdev);
2026 if (mgp->msix_enabled)
2027 pci_disable_msix(pdev);
1870} 2028}
1871 2029
1872static int 2030static int
@@ -1928,7 +2086,6 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
1928 return 0; 2086 return 0;
1929} 2087}
1930 2088
1931#if 0
1932static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice) 2089static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
1933{ 2090{
1934 struct myri10ge_cmd cmd; 2091 struct myri10ge_cmd cmd;
@@ -1996,14 +2153,15 @@ static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
1996 } 2153 }
1997 return 0; 2154 return 0;
1998} 2155}
1999#endif
2000 2156
2001static int myri10ge_open(struct net_device *dev) 2157static int myri10ge_open(struct net_device *dev)
2002{ 2158{
2159 struct myri10ge_slice_state *ss;
2003 struct myri10ge_priv *mgp = netdev_priv(dev); 2160 struct myri10ge_priv *mgp = netdev_priv(dev);
2004 struct myri10ge_cmd cmd; 2161 struct myri10ge_cmd cmd;
2162 int i, status, big_pow2, slice;
2163 u8 *itable;
2005 struct net_lro_mgr *lro_mgr; 2164 struct net_lro_mgr *lro_mgr;
2006 int status, big_pow2;
2007 2165
2008 if (mgp->running != MYRI10GE_ETH_STOPPED) 2166 if (mgp->running != MYRI10GE_ETH_STOPPED)
2009 return -EBUSY; 2167 return -EBUSY;
@@ -2015,6 +2173,48 @@ static int myri10ge_open(struct net_device *dev)
2015 goto abort_with_nothing; 2173 goto abort_with_nothing;
2016 } 2174 }
2017 2175
2176 if (mgp->num_slices > 1) {
2177 cmd.data0 = mgp->num_slices;
2178 cmd.data1 = 1; /* use MSI-X */
2179 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
2180 &cmd, 0);
2181 if (status != 0) {
2182 printk(KERN_ERR
2183 "myri10ge: %s: failed to set number of slices\n",
2184 dev->name);
2185 goto abort_with_nothing;
2186 }
2187 /* setup the indirection table */
2188 cmd.data0 = mgp->num_slices;
2189 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
2190 &cmd, 0);
2191
2192 status |= myri10ge_send_cmd(mgp,
2193 MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
2194 &cmd, 0);
2195 if (status != 0) {
2196 printk(KERN_ERR
2197 "myri10ge: %s: failed to setup rss tables\n",
2198 dev->name);
2199 }
2200
2201 /* just enable an identity mapping */
2202 itable = mgp->sram + cmd.data0;
2203 for (i = 0; i < mgp->num_slices; i++)
2204 __raw_writeb(i, &itable[i]);
2205
2206 cmd.data0 = 1;
2207 cmd.data1 = myri10ge_rss_hash;
2208 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
2209 &cmd, 0);
2210 if (status != 0) {
2211 printk(KERN_ERR
2212 "myri10ge: %s: failed to enable slices\n",
2213 dev->name);
2214 goto abort_with_nothing;
2215 }
2216 }
2217
2018 status = myri10ge_request_irq(mgp); 2218 status = myri10ge_request_irq(mgp);
2019 if (status != 0) 2219 if (status != 0)
2020 goto abort_with_nothing; 2220 goto abort_with_nothing;
@@ -2038,41 +2238,6 @@ static int myri10ge_open(struct net_device *dev)
2038 if (myri10ge_small_bytes > 0) 2238 if (myri10ge_small_bytes > 0)
2039 mgp->small_bytes = myri10ge_small_bytes; 2239 mgp->small_bytes = myri10ge_small_bytes;
2040 2240
2041 /* get the lanai pointers to the send and receive rings */
2042
2043 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
2044 mgp->ss.tx.lanai =
2045 (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0);
2046
2047 status |=
2048 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0);
2049 mgp->ss.rx_small.lanai =
2050 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
2051
2052 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
2053 mgp->ss.rx_big.lanai =
2054 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
2055
2056 if (status != 0) {
2057 printk(KERN_ERR
2058 "myri10ge: %s: failed to get ring sizes or locations\n",
2059 dev->name);
2060 mgp->running = MYRI10GE_ETH_STOPPED;
2061 goto abort_with_irq;
2062 }
2063
2064 if (myri10ge_wcfifo && mgp->wc_enabled) {
2065 mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
2066 mgp->ss.rx_small.wc_fifo =
2067 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
2068 mgp->ss.rx_big.wc_fifo =
2069 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG;
2070 } else {
2071 mgp->ss.tx.wc_fifo = NULL;
2072 mgp->ss.rx_small.wc_fifo = NULL;
2073 mgp->ss.rx_big.wc_fifo = NULL;
2074 }
2075
2076 /* Firmware needs the big buff size as a power of 2. Lie and 2241 /* Firmware needs the big buff size as a power of 2. Lie and
2077 * tell him the buffer is larger, because we only use 1 2242 * tell him the buffer is larger, because we only use 1
2078 * buffer/pkt, and the mtu will prevent overruns. 2243 * buffer/pkt, and the mtu will prevent overruns.
@@ -2087,9 +2252,44 @@ static int myri10ge_open(struct net_device *dev)
2087 mgp->big_bytes = big_pow2; 2252 mgp->big_bytes = big_pow2;
2088 } 2253 }
2089 2254
2090 status = myri10ge_allocate_rings(&mgp->ss); 2255 /* setup the per-slice data structures */
2091 if (status != 0) 2256 for (slice = 0; slice < mgp->num_slices; slice++) {
2092 goto abort_with_irq; 2257 ss = &mgp->ss[slice];
2258
2259 status = myri10ge_get_txrx(mgp, slice);
2260 if (status != 0) {
2261 printk(KERN_ERR
2262 "myri10ge: %s: failed to get ring sizes or locations\n",
2263 dev->name);
2264 goto abort_with_rings;
2265 }
2266 status = myri10ge_allocate_rings(ss);
2267 if (status != 0)
2268 goto abort_with_rings;
2269 if (slice == 0)
2270 status = myri10ge_set_stats(mgp, slice);
2271 if (status) {
2272 printk(KERN_ERR
2273 "myri10ge: %s: Couldn't set stats DMA\n",
2274 dev->name);
2275 goto abort_with_rings;
2276 }
2277
2278 lro_mgr = &ss->rx_done.lro_mgr;
2279 lro_mgr->dev = dev;
2280 lro_mgr->features = LRO_F_NAPI;
2281 lro_mgr->ip_summed = CHECKSUM_COMPLETE;
2282 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2283 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
2284 lro_mgr->lro_arr = ss->rx_done.lro_desc;
2285 lro_mgr->get_frag_header = myri10ge_get_frag_header;
2286 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
2287 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2288 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2289
2290 /* must happen prior to any irq */
2291 napi_enable(&(ss)->napi);
2292 }
2093 2293
2094 /* now give firmware buffers sizes, and MTU */ 2294 /* now give firmware buffers sizes, and MTU */
2095 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; 2295 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
@@ -2106,25 +2306,15 @@ static int myri10ge_open(struct net_device *dev)
2106 goto abort_with_rings; 2306 goto abort_with_rings;
2107 } 2307 }
2108 2308
2109 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus); 2309 /*
2110 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus); 2310 * Set Linux style TSO mode; this is needed only on newer
2111 cmd.data2 = sizeof(struct mcp_irq_data); 2311 * firmware versions. Older versions default to Linux
2112 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); 2312 * style TSO
2113 if (status == -ENOSYS) { 2313 */
2114 dma_addr_t bus = mgp->ss.fw_stats_bus; 2314 cmd.data0 = 0;
2115 bus += offsetof(struct mcp_irq_data, send_done_count); 2315 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
2116 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); 2316 if (status && status != -ENOSYS) {
2117 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); 2317 printk(KERN_ERR "myri10ge: %s: Couldn't set TSO mode\n",
2118 status = myri10ge_send_cmd(mgp,
2119 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
2120 &cmd, 0);
2121 /* Firmware cannot support multicast without STATS_DMA_V2 */
2122 mgp->fw_multicast_support = 0;
2123 } else {
2124 mgp->fw_multicast_support = 1;
2125 }
2126 if (status) {
2127 printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n",
2128 dev->name); 2318 dev->name);
2129 goto abort_with_rings; 2319 goto abort_with_rings;
2130 } 2320 }
@@ -2132,21 +2322,6 @@ static int myri10ge_open(struct net_device *dev)
2132 mgp->link_state = ~0U; 2322 mgp->link_state = ~0U;
2133 mgp->rdma_tags_available = 15; 2323 mgp->rdma_tags_available = 15;
2134 2324
2135 lro_mgr = &mgp->ss.rx_done.lro_mgr;
2136 lro_mgr->dev = dev;
2137 lro_mgr->features = LRO_F_NAPI;
2138 lro_mgr->ip_summed = CHECKSUM_COMPLETE;
2139 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2140 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
2141 lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc;
2142 lro_mgr->get_frag_header = myri10ge_get_frag_header;
2143 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
2144 lro_mgr->frag_align_pad = 2;
2145 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2146 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2147
2148 napi_enable(&mgp->ss.napi); /* must happen prior to any irq */
2149
2150 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); 2325 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
2151 if (status) { 2326 if (status) {
2152 printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n", 2327 printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n",
@@ -2154,8 +2329,6 @@ static int myri10ge_open(struct net_device *dev)
2154 goto abort_with_rings; 2329 goto abort_with_rings;
2155 } 2330 }
2156 2331
2157 mgp->ss.tx.wake_queue = 0;
2158 mgp->ss.tx.stop_queue = 0;
2159 mgp->running = MYRI10GE_ETH_RUNNING; 2332 mgp->running = MYRI10GE_ETH_RUNNING;
2160 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; 2333 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
2161 add_timer(&mgp->watchdog_timer); 2334 add_timer(&mgp->watchdog_timer);
@@ -2163,9 +2336,9 @@ static int myri10ge_open(struct net_device *dev)
2163 return 0; 2336 return 0;
2164 2337
2165abort_with_rings: 2338abort_with_rings:
2166 myri10ge_free_rings(&mgp->ss); 2339 for (i = 0; i < mgp->num_slices; i++)
2340 myri10ge_free_rings(&mgp->ss[i]);
2167 2341
2168abort_with_irq:
2169 myri10ge_free_irq(mgp); 2342 myri10ge_free_irq(mgp);
2170 2343
2171abort_with_nothing: 2344abort_with_nothing:
@@ -2178,16 +2351,19 @@ static int myri10ge_close(struct net_device *dev)
2178 struct myri10ge_priv *mgp = netdev_priv(dev); 2351 struct myri10ge_priv *mgp = netdev_priv(dev);
2179 struct myri10ge_cmd cmd; 2352 struct myri10ge_cmd cmd;
2180 int status, old_down_cnt; 2353 int status, old_down_cnt;
2354 int i;
2181 2355
2182 if (mgp->running != MYRI10GE_ETH_RUNNING) 2356 if (mgp->running != MYRI10GE_ETH_RUNNING)
2183 return 0; 2357 return 0;
2184 2358
2185 if (mgp->ss.tx.req_bytes == NULL) 2359 if (mgp->ss[0].tx.req_bytes == NULL)
2186 return 0; 2360 return 0;
2187 2361
2188 del_timer_sync(&mgp->watchdog_timer); 2362 del_timer_sync(&mgp->watchdog_timer);
2189 mgp->running = MYRI10GE_ETH_STOPPING; 2363 mgp->running = MYRI10GE_ETH_STOPPING;
2190 napi_disable(&mgp->ss.napi); 2364 for (i = 0; i < mgp->num_slices; i++) {
2365 napi_disable(&mgp->ss[i].napi);
2366 }
2191 netif_carrier_off(dev); 2367 netif_carrier_off(dev);
2192 netif_stop_queue(dev); 2368 netif_stop_queue(dev);
2193 old_down_cnt = mgp->down_cnt; 2369 old_down_cnt = mgp->down_cnt;
@@ -2203,7 +2379,8 @@ static int myri10ge_close(struct net_device *dev)
2203 2379
2204 netif_tx_disable(dev); 2380 netif_tx_disable(dev);
2205 myri10ge_free_irq(mgp); 2381 myri10ge_free_irq(mgp);
2206 myri10ge_free_rings(&mgp->ss); 2382 for (i = 0; i < mgp->num_slices; i++)
2383 myri10ge_free_rings(&mgp->ss[i]);
2207 2384
2208 mgp->running = MYRI10GE_ETH_STOPPED; 2385 mgp->running = MYRI10GE_ETH_STOPPED;
2209 return 0; 2386 return 0;
@@ -2324,7 +2501,7 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
2324 u8 flags, odd_flag; 2501 u8 flags, odd_flag;
2325 2502
2326 /* always transmit through slot 0 */ 2503 /* always transmit through slot 0 */
2327 ss = &mgp->ss; 2504 ss = mgp->ss;
2328 tx = &ss->tx; 2505 tx = &ss->tx;
2329again: 2506again:
2330 req = tx->req_list; 2507 req = tx->req_list;
@@ -2629,7 +2806,21 @@ drop:
2629static struct net_device_stats *myri10ge_get_stats(struct net_device *dev) 2806static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
2630{ 2807{
2631 struct myri10ge_priv *mgp = netdev_priv(dev); 2808 struct myri10ge_priv *mgp = netdev_priv(dev);
2632 return &mgp->stats; 2809 struct myri10ge_slice_netstats *slice_stats;
2810 struct net_device_stats *stats = &mgp->stats;
2811 int i;
2812
2813 memset(stats, 0, sizeof(*stats));
2814 for (i = 0; i < mgp->num_slices; i++) {
2815 slice_stats = &mgp->ss[i].stats;
2816 stats->rx_packets += slice_stats->rx_packets;
2817 stats->tx_packets += slice_stats->tx_packets;
2818 stats->rx_bytes += slice_stats->rx_bytes;
2819 stats->tx_bytes += slice_stats->tx_bytes;
2820 stats->rx_dropped += slice_stats->rx_dropped;
2821 stats->tx_dropped += slice_stats->tx_dropped;
2822 }
2823 return stats;
2633} 2824}
2634 2825
2635static void myri10ge_set_multicast_list(struct net_device *dev) 2826static void myri10ge_set_multicast_list(struct net_device *dev)
@@ -2840,10 +3031,10 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
2840 * 3031 *
2841 * If the driver can neither enable ECRC nor verify that it has 3032 * If the driver can neither enable ECRC nor verify that it has
2842 * already been enabled, then it must use a firmware image which works 3033 * already been enabled, then it must use a firmware image which works
2843 * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it 3034 * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
2844 * should also ensure that it never gives the device a Read-DMA which is 3035 * should also ensure that it never gives the device a Read-DMA which is
2845 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is 3036 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
2846 * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) 3037 * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
2847 * firmware image, and set tx_boundary to 4KB. 3038 * firmware image, and set tx_boundary to 4KB.
2848 */ 3039 */
2849 3040
@@ -2872,7 +3063,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
2872 * completions) in order to see if it works on this host. 3063 * completions) in order to see if it works on this host.
2873 */ 3064 */
2874 mgp->fw_name = myri10ge_fw_aligned; 3065 mgp->fw_name = myri10ge_fw_aligned;
2875 status = myri10ge_load_firmware(mgp); 3066 status = myri10ge_load_firmware(mgp, 1);
2876 if (status != 0) { 3067 if (status != 0) {
2877 goto abort; 3068 goto abort;
2878 } 3069 }
@@ -3053,6 +3244,7 @@ static void myri10ge_watchdog(struct work_struct *work)
3053 struct myri10ge_tx_buf *tx; 3244 struct myri10ge_tx_buf *tx;
3054 u32 reboot; 3245 u32 reboot;
3055 int status; 3246 int status;
3247 int i;
3056 u16 cmd, vendor; 3248 u16 cmd, vendor;
3057 3249
3058 mgp->watchdog_resets++; 3250 mgp->watchdog_resets++;
@@ -3100,20 +3292,26 @@ static void myri10ge_watchdog(struct work_struct *work)
3100 3292
3101 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", 3293 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
3102 mgp->dev->name); 3294 mgp->dev->name);
3103 tx = &mgp->ss.tx; 3295 for (i = 0; i < mgp->num_slices; i++) {
3104 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3296 tx = &mgp->ss[i].tx;
3105 mgp->dev->name, tx->req, tx->done, 3297 printk(KERN_INFO
3106 tx->pkt_start, tx->pkt_done, 3298 "myri10ge: %s: (%d): %d %d %d %d %d\n",
3107 (int)ntohl(mgp->ss.fw_stats->send_done_count)); 3299 mgp->dev->name, i, tx->req, tx->done,
3108 msleep(2000); 3300 tx->pkt_start, tx->pkt_done,
3109 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3301 (int)ntohl(mgp->ss[i].fw_stats->
3110 mgp->dev->name, tx->req, tx->done, 3302 send_done_count));
3111 tx->pkt_start, tx->pkt_done, 3303 msleep(2000);
3112 (int)ntohl(mgp->ss.fw_stats->send_done_count)); 3304 printk(KERN_INFO
3305 "myri10ge: %s: (%d): %d %d %d %d %d\n",
3306 mgp->dev->name, i, tx->req, tx->done,
3307 tx->pkt_start, tx->pkt_done,
3308 (int)ntohl(mgp->ss[i].fw_stats->
3309 send_done_count));
3310 }
3113 } 3311 }
3114 rtnl_lock(); 3312 rtnl_lock();
3115 myri10ge_close(mgp->dev); 3313 myri10ge_close(mgp->dev);
3116 status = myri10ge_load_firmware(mgp); 3314 status = myri10ge_load_firmware(mgp, 1);
3117 if (status != 0) 3315 if (status != 0)
3118 printk(KERN_ERR "myri10ge: %s: failed to load firmware\n", 3316 printk(KERN_ERR "myri10ge: %s: failed to load firmware\n",
3119 mgp->dev->name); 3317 mgp->dev->name);
@@ -3133,50 +3331,59 @@ static void myri10ge_watchdog_timer(unsigned long arg)
3133{ 3331{
3134 struct myri10ge_priv *mgp; 3332 struct myri10ge_priv *mgp;
3135 struct myri10ge_slice_state *ss; 3333 struct myri10ge_slice_state *ss;
3334 int i, reset_needed;
3136 u32 rx_pause_cnt; 3335 u32 rx_pause_cnt;
3137 3336
3138 mgp = (struct myri10ge_priv *)arg; 3337 mgp = (struct myri10ge_priv *)arg;
3139 3338
3140 rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause); 3339 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3340 for (i = 0, reset_needed = 0;
3341 i < mgp->num_slices && reset_needed == 0; ++i) {
3141 3342
3142 ss = &mgp->ss; 3343 ss = &mgp->ss[i];
3143 if (ss->rx_small.watchdog_needed) { 3344 if (ss->rx_small.watchdog_needed) {
3144 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 3345 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
3145 mgp->small_bytes + MXGEFW_PAD, 1); 3346 mgp->small_bytes + MXGEFW_PAD,
3146 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= 3347 1);
3147 myri10ge_fill_thresh) 3348 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
3148 ss->rx_small.watchdog_needed = 0; 3349 myri10ge_fill_thresh)
3149 } 3350 ss->rx_small.watchdog_needed = 0;
3150 if (ss->rx_big.watchdog_needed) { 3351 }
3151 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1); 3352 if (ss->rx_big.watchdog_needed) {
3152 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= 3353 myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
3153 myri10ge_fill_thresh) 3354 mgp->big_bytes, 1);
3154 ss->rx_big.watchdog_needed = 0; 3355 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
3155 } 3356 myri10ge_fill_thresh)
3156 3357 ss->rx_big.watchdog_needed = 0;
3157 if (ss->tx.req != ss->tx.done && 3358 }
3158 ss->tx.done == ss->watchdog_tx_done && 3359
3159 ss->watchdog_tx_req != ss->watchdog_tx_done) { 3360 if (ss->tx.req != ss->tx.done &&
3160 /* nic seems like it might be stuck.. */ 3361 ss->tx.done == ss->watchdog_tx_done &&
3161 if (rx_pause_cnt != mgp->watchdog_pause) { 3362 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3162 if (net_ratelimit()) 3363 /* nic seems like it might be stuck.. */
3163 printk(KERN_WARNING "myri10ge %s:" 3364 if (rx_pause_cnt != mgp->watchdog_pause) {
3164 "TX paused, check link partner\n", 3365 if (net_ratelimit())
3165 mgp->dev->name); 3366 printk(KERN_WARNING "myri10ge %s:"
3166 } else { 3367 "TX paused, check link partner\n",
3167 schedule_work(&mgp->watchdog_work); 3368 mgp->dev->name);
3168 return; 3369 } else {
3370 reset_needed = 1;
3371 }
3169 } 3372 }
3373 ss->watchdog_tx_done = ss->tx.done;
3374 ss->watchdog_tx_req = ss->tx.req;
3170 } 3375 }
3171 /* rearm timer */
3172 mod_timer(&mgp->watchdog_timer,
3173 jiffies + myri10ge_watchdog_timeout * HZ);
3174 ss->watchdog_tx_done = ss->tx.done;
3175 ss->watchdog_tx_req = ss->tx.req;
3176 mgp->watchdog_pause = rx_pause_cnt; 3376 mgp->watchdog_pause = rx_pause_cnt;
3377
3378 if (reset_needed) {
3379 schedule_work(&mgp->watchdog_work);
3380 } else {
3381 /* rearm timer */
3382 mod_timer(&mgp->watchdog_timer,
3383 jiffies + myri10ge_watchdog_timeout * HZ);
3384 }
3177} 3385}
3178 3386
3179#if 0
3180static void myri10ge_free_slices(struct myri10ge_priv *mgp) 3387static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3181{ 3388{
3182 struct myri10ge_slice_state *ss; 3389 struct myri10ge_slice_state *ss;
@@ -3360,14 +3567,12 @@ abort_with_fw:
3360 mgp->fw_name = old_fw; 3567 mgp->fw_name = old_fw;
3361 myri10ge_load_firmware(mgp, 0); 3568 myri10ge_load_firmware(mgp, 0);
3362} 3569}
3363#endif
3364 3570
3365static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3571static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3366{ 3572{
3367 struct net_device *netdev; 3573 struct net_device *netdev;
3368 struct myri10ge_priv *mgp; 3574 struct myri10ge_priv *mgp;
3369 struct device *dev = &pdev->dev; 3575 struct device *dev = &pdev->dev;
3370 size_t bytes;
3371 int i; 3576 int i;
3372 int status = -ENXIO; 3577 int status = -ENXIO;
3373 int dac_enabled; 3578 int dac_enabled;
@@ -3382,7 +3587,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3382 3587
3383 mgp = netdev_priv(netdev); 3588 mgp = netdev_priv(netdev);
3384 mgp->dev = netdev; 3589 mgp->dev = netdev;
3385 netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight);
3386 mgp->pdev = pdev; 3590 mgp->pdev = pdev;
3387 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 3591 mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
3388 mgp->pause = myri10ge_flow_control; 3592 mgp->pause = myri10ge_flow_control;
@@ -3428,11 +3632,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3428 if (mgp->cmd == NULL) 3632 if (mgp->cmd == NULL)
3429 goto abort_with_netdev; 3633 goto abort_with_netdev;
3430 3634
3431 mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
3432 &mgp->ss.fw_stats_bus, GFP_KERNEL);
3433 if (mgp->ss.fw_stats == NULL)
3434 goto abort_with_cmd;
3435
3436 mgp->board_span = pci_resource_len(pdev, 0); 3635 mgp->board_span = pci_resource_len(pdev, 0);
3437 mgp->iomem_base = pci_resource_start(pdev, 0); 3636 mgp->iomem_base = pci_resource_start(pdev, 0);
3438 mgp->mtrr = -1; 3637 mgp->mtrr = -1;
@@ -3469,26 +3668,24 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3469 for (i = 0; i < ETH_ALEN; i++) 3668 for (i = 0; i < ETH_ALEN; i++)
3470 netdev->dev_addr[i] = mgp->mac_addr[i]; 3669 netdev->dev_addr[i] = mgp->mac_addr[i];
3471 3670
3472 /* allocate rx done ring */
3473 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3474 mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3475 &mgp->ss.rx_done.bus, GFP_KERNEL);
3476 if (mgp->ss.rx_done.entry == NULL)
3477 goto abort_with_ioremap;
3478 memset(mgp->ss.rx_done.entry, 0, bytes);
3479
3480 myri10ge_select_firmware(mgp); 3671 myri10ge_select_firmware(mgp);
3481 3672
3482 status = myri10ge_load_firmware(mgp); 3673 status = myri10ge_load_firmware(mgp, 1);
3483 if (status != 0) { 3674 if (status != 0) {
3484 dev_err(&pdev->dev, "failed to load firmware\n"); 3675 dev_err(&pdev->dev, "failed to load firmware\n");
3485 goto abort_with_rx_done; 3676 goto abort_with_ioremap;
3677 }
3678 myri10ge_probe_slices(mgp);
3679 status = myri10ge_alloc_slices(mgp);
3680 if (status != 0) {
3681 dev_err(&pdev->dev, "failed to alloc slice state\n");
3682 goto abort_with_firmware;
3486 } 3683 }
3487 3684
3488 status = myri10ge_reset(mgp); 3685 status = myri10ge_reset(mgp);
3489 if (status != 0) { 3686 if (status != 0) {
3490 dev_err(&pdev->dev, "failed reset\n"); 3687 dev_err(&pdev->dev, "failed reset\n");
3491 goto abort_with_firmware; 3688 goto abort_with_slices;
3492 } 3689 }
3493 3690
3494 pci_set_drvdata(pdev, mgp); 3691 pci_set_drvdata(pdev, mgp);
@@ -3533,24 +3730,27 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3533 dev_err(&pdev->dev, "register_netdev failed: %d\n", status); 3730 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
3534 goto abort_with_state; 3731 goto abort_with_state;
3535 } 3732 }
3536 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 3733 if (mgp->msix_enabled)
3537 (mgp->msi_enabled ? "MSI" : "xPIC"), 3734 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
3538 netdev->irq, mgp->tx_boundary, mgp->fw_name, 3735 mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
3539 (mgp->wc_enabled ? "Enabled" : "Disabled")); 3736 (mgp->wc_enabled ? "Enabled" : "Disabled"));
3737 else
3738 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
3739 mgp->msi_enabled ? "MSI" : "xPIC",
3740 netdev->irq, mgp->tx_boundary, mgp->fw_name,
3741 (mgp->wc_enabled ? "Enabled" : "Disabled"));
3540 3742
3541 return 0; 3743 return 0;
3542 3744
3543abort_with_state: 3745abort_with_state:
3544 pci_restore_state(pdev); 3746 pci_restore_state(pdev);
3545 3747
3748abort_with_slices:
3749 myri10ge_free_slices(mgp);
3750
3546abort_with_firmware: 3751abort_with_firmware:
3547 myri10ge_dummy_rdma(mgp, 0); 3752 myri10ge_dummy_rdma(mgp, 0);
3548 3753
3549abort_with_rx_done:
3550 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3551 dma_free_coherent(&pdev->dev, bytes,
3552 mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
3553
3554abort_with_ioremap: 3754abort_with_ioremap:
3555 iounmap(mgp->sram); 3755 iounmap(mgp->sram);
3556 3756
@@ -3559,10 +3759,6 @@ abort_with_wc:
3559 if (mgp->mtrr >= 0) 3759 if (mgp->mtrr >= 0)
3560 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3760 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
3561#endif 3761#endif
3562 dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
3563 mgp->ss.fw_stats, mgp->ss.fw_stats_bus);
3564
3565abort_with_cmd:
3566 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3762 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3567 mgp->cmd, mgp->cmd_bus); 3763 mgp->cmd, mgp->cmd_bus);
3568 3764
@@ -3583,7 +3779,6 @@ static void myri10ge_remove(struct pci_dev *pdev)
3583{ 3779{
3584 struct myri10ge_priv *mgp; 3780 struct myri10ge_priv *mgp;
3585 struct net_device *netdev; 3781 struct net_device *netdev;
3586 size_t bytes;
3587 3782
3588 mgp = pci_get_drvdata(pdev); 3783 mgp = pci_get_drvdata(pdev);
3589 if (mgp == NULL) 3784 if (mgp == NULL)
@@ -3598,19 +3793,15 @@ static void myri10ge_remove(struct pci_dev *pdev)
3598 /* avoid a memory leak */ 3793 /* avoid a memory leak */
3599 pci_restore_state(pdev); 3794 pci_restore_state(pdev);
3600 3795
3601 bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
3602 dma_free_coherent(&pdev->dev, bytes,
3603 mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
3604
3605 iounmap(mgp->sram); 3796 iounmap(mgp->sram);
3606 3797
3607#ifdef CONFIG_MTRR 3798#ifdef CONFIG_MTRR
3608 if (mgp->mtrr >= 0) 3799 if (mgp->mtrr >= 0)
3609 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3800 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
3610#endif 3801#endif
3611 dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), 3802 myri10ge_free_slices(mgp);
3612 mgp->ss.fw_stats, mgp->ss.fw_stats_bus); 3803 if (mgp->msix_vectors != NULL)
3613 3804 kfree(mgp->msix_vectors);
3614 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3805 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3615 mgp->cmd, mgp->cmd_bus); 3806 mgp->cmd, mgp->cmd_bus);
3616 3807
@@ -3643,6 +3834,15 @@ static __init int myri10ge_init_module(void)
3643{ 3834{
3644 printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name, 3835 printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name,
3645 MYRI10GE_VERSION_STR); 3836 MYRI10GE_VERSION_STR);
3837
3838 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_SRC_PORT ||
3839 myri10ge_rss_hash < MXGEFW_RSS_HASH_TYPE_IPV4) {
3840 printk(KERN_ERR
3841 "%s: Illegal rssh hash type %d, defaulting to source port\n",
3842 myri10ge_driver.name, myri10ge_rss_hash);
3843 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
3844 }
3845
3646 return pci_register_driver(&myri10ge_driver); 3846 return pci_register_driver(&myri10ge_driver);
3647} 3847}
3648 3848