diff options
Diffstat (limited to 'drivers/net/cxgb4/cxgb4_main.c')
-rw-r--r-- | drivers/net/cxgb4/cxgb4_main.c | 409 |
1 files changed, 182 insertions, 227 deletions
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index c327527fbbc8..7e3cfbe89e3b 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c | |||
@@ -175,16 +175,26 @@ enum { | |||
175 | 175 | ||
176 | static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { | 176 | static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { |
177 | CH_DEVICE(0xa000, 0), /* PE10K */ | 177 | CH_DEVICE(0xa000, 0), /* PE10K */ |
178 | CH_DEVICE(0x4001, 0), | 178 | CH_DEVICE(0x4001, -1), |
179 | CH_DEVICE(0x4002, 0), | 179 | CH_DEVICE(0x4002, -1), |
180 | CH_DEVICE(0x4003, 0), | 180 | CH_DEVICE(0x4003, -1), |
181 | CH_DEVICE(0x4004, 0), | 181 | CH_DEVICE(0x4004, -1), |
182 | CH_DEVICE(0x4005, 0), | 182 | CH_DEVICE(0x4005, -1), |
183 | CH_DEVICE(0x4006, 0), | 183 | CH_DEVICE(0x4006, -1), |
184 | CH_DEVICE(0x4007, 0), | 184 | CH_DEVICE(0x4007, -1), |
185 | CH_DEVICE(0x4008, 0), | 185 | CH_DEVICE(0x4008, -1), |
186 | CH_DEVICE(0x4009, 0), | 186 | CH_DEVICE(0x4009, -1), |
187 | CH_DEVICE(0x400a, 0), | 187 | CH_DEVICE(0x400a, -1), |
188 | CH_DEVICE(0x4401, 4), | ||
189 | CH_DEVICE(0x4402, 4), | ||
190 | CH_DEVICE(0x4403, 4), | ||
191 | CH_DEVICE(0x4404, 4), | ||
192 | CH_DEVICE(0x4405, 4), | ||
193 | CH_DEVICE(0x4406, 4), | ||
194 | CH_DEVICE(0x4407, 4), | ||
195 | CH_DEVICE(0x4408, 4), | ||
196 | CH_DEVICE(0x4409, 4), | ||
197 | CH_DEVICE(0x440a, 4), | ||
188 | { 0, } | 198 | { 0, } |
189 | }; | 199 | }; |
190 | 200 | ||
@@ -393,7 +403,7 @@ static int link_start(struct net_device *dev) | |||
393 | * that step explicitly. | 403 | * that step explicitly. |
394 | */ | 404 | */ |
395 | ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, | 405 | ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, |
396 | pi->vlan_grp != NULL, true); | 406 | !!(dev->features & NETIF_F_HW_VLAN_RX), true); |
397 | if (ret == 0) { | 407 | if (ret == 0) { |
398 | ret = t4_change_mac(pi->adapter, mb, pi->viid, | 408 | ret = t4_change_mac(pi->adapter, mb, pi->viid, |
399 | pi->xact_addr_filt, dev->dev_addr, true, | 409 | pi->xact_addr_filt, dev->dev_addr, true, |
@@ -423,10 +433,11 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, | |||
423 | if (likely(opcode == CPL_SGE_EGR_UPDATE)) { | 433 | if (likely(opcode == CPL_SGE_EGR_UPDATE)) { |
424 | const struct cpl_sge_egr_update *p = (void *)rsp; | 434 | const struct cpl_sge_egr_update *p = (void *)rsp; |
425 | unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); | 435 | unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); |
426 | struct sge_txq *txq = q->adap->sge.egr_map[qid]; | 436 | struct sge_txq *txq; |
427 | 437 | ||
438 | txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; | ||
428 | txq->restarts++; | 439 | txq->restarts++; |
429 | if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) { | 440 | if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { |
430 | struct sge_eth_txq *eq; | 441 | struct sge_eth_txq *eq; |
431 | 442 | ||
432 | eq = container_of(txq, struct sge_eth_txq, q); | 443 | eq = container_of(txq, struct sge_eth_txq, q); |
@@ -511,39 +522,33 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie) | |||
511 | */ | 522 | */ |
512 | static void name_msix_vecs(struct adapter *adap) | 523 | static void name_msix_vecs(struct adapter *adap) |
513 | { | 524 | { |
514 | int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1; | 525 | int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); |
515 | 526 | ||
516 | /* non-data interrupts */ | 527 | /* non-data interrupts */ |
517 | snprintf(adap->msix_info[0].desc, n, "%s", adap->name); | 528 | snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); |
518 | adap->msix_info[0].desc[n] = 0; | ||
519 | 529 | ||
520 | /* FW events */ | 530 | /* FW events */ |
521 | snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name); | 531 | snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", |
522 | adap->msix_info[1].desc[n] = 0; | 532 | adap->port[0]->name); |
523 | 533 | ||
524 | /* Ethernet queues */ | 534 | /* Ethernet queues */ |
525 | for_each_port(adap, j) { | 535 | for_each_port(adap, j) { |
526 | struct net_device *d = adap->port[j]; | 536 | struct net_device *d = adap->port[j]; |
527 | const struct port_info *pi = netdev_priv(d); | 537 | const struct port_info *pi = netdev_priv(d); |
528 | 538 | ||
529 | for (i = 0; i < pi->nqsets; i++, msi_idx++) { | 539 | for (i = 0; i < pi->nqsets; i++, msi_idx++) |
530 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", | 540 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", |
531 | d->name, i); | 541 | d->name, i); |
532 | adap->msix_info[msi_idx].desc[n] = 0; | ||
533 | } | ||
534 | } | 542 | } |
535 | 543 | ||
536 | /* offload queues */ | 544 | /* offload queues */ |
537 | for_each_ofldrxq(&adap->sge, i) { | 545 | for_each_ofldrxq(&adap->sge, i) |
538 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d", | 546 | snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d", |
539 | adap->name, i); | 547 | adap->port[0]->name, i); |
540 | adap->msix_info[msi_idx++].desc[n] = 0; | 548 | |
541 | } | 549 | for_each_rdmarxq(&adap->sge, i) |
542 | for_each_rdmarxq(&adap->sge, i) { | 550 | snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", |
543 | snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d", | 551 | adap->port[0]->name, i); |
544 | adap->name, i); | ||
545 | adap->msix_info[msi_idx++].desc[n] = 0; | ||
546 | } | ||
547 | } | 552 | } |
548 | 553 | ||
549 | static int request_msix_queue_irqs(struct adapter *adap) | 554 | static int request_msix_queue_irqs(struct adapter *adap) |
@@ -658,6 +663,15 @@ static int setup_rss(struct adapter *adap) | |||
658 | } | 663 | } |
659 | 664 | ||
660 | /* | 665 | /* |
666 | * Return the channel of the ingress queue with the given qid. | ||
667 | */ | ||
668 | static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) | ||
669 | { | ||
670 | qid -= p->ingr_start; | ||
671 | return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; | ||
672 | } | ||
673 | |||
674 | /* | ||
661 | * Wait until all NAPI handlers are descheduled. | 675 | * Wait until all NAPI handlers are descheduled. |
662 | */ | 676 | */ |
663 | static void quiesce_rx(struct adapter *adap) | 677 | static void quiesce_rx(struct adapter *adap) |
@@ -848,19 +862,17 @@ out: release_firmware(fw); | |||
848 | */ | 862 | */ |
849 | void *t4_alloc_mem(size_t size) | 863 | void *t4_alloc_mem(size_t size) |
850 | { | 864 | { |
851 | void *p = kmalloc(size, GFP_KERNEL); | 865 | void *p = kzalloc(size, GFP_KERNEL); |
852 | 866 | ||
853 | if (!p) | 867 | if (!p) |
854 | p = vmalloc(size); | 868 | p = vzalloc(size); |
855 | if (p) | ||
856 | memset(p, 0, size); | ||
857 | return p; | 869 | return p; |
858 | } | 870 | } |
859 | 871 | ||
860 | /* | 872 | /* |
861 | * Free memory allocated through alloc_mem(). | 873 | * Free memory allocated through alloc_mem(). |
862 | */ | 874 | */ |
863 | void t4_free_mem(void *addr) | 875 | static void t4_free_mem(void *addr) |
864 | { | 876 | { |
865 | if (is_vmalloc_addr(addr)) | 877 | if (is_vmalloc_addr(addr)) |
866 | vfree(addr); | 878 | vfree(addr); |
@@ -1324,15 +1336,20 @@ static int restart_autoneg(struct net_device *dev) | |||
1324 | return 0; | 1336 | return 0; |
1325 | } | 1337 | } |
1326 | 1338 | ||
1327 | static int identify_port(struct net_device *dev, u32 data) | 1339 | static int identify_port(struct net_device *dev, |
1340 | enum ethtool_phys_id_state state) | ||
1328 | { | 1341 | { |
1342 | unsigned int val; | ||
1329 | struct adapter *adap = netdev2adap(dev); | 1343 | struct adapter *adap = netdev2adap(dev); |
1330 | 1344 | ||
1331 | if (data == 0) | 1345 | if (state == ETHTOOL_ID_ACTIVE) |
1332 | data = 2; /* default to 2 seconds */ | 1346 | val = 0xffff; |
1347 | else if (state == ETHTOOL_ID_INACTIVE) | ||
1348 | val = 0; | ||
1349 | else | ||
1350 | return -EINVAL; | ||
1333 | 1351 | ||
1334 | return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, | 1352 | return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); |
1335 | data * 5); | ||
1336 | } | 1353 | } |
1337 | 1354 | ||
1338 | static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) | 1355 | static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) |
@@ -1357,7 +1374,12 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) | |||
1357 | } else if (type == FW_PORT_TYPE_KR) | 1374 | } else if (type == FW_PORT_TYPE_KR) |
1358 | v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; | 1375 | v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; |
1359 | else if (type == FW_PORT_TYPE_BP_AP) | 1376 | else if (type == FW_PORT_TYPE_BP_AP) |
1360 | v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC; | 1377 | v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | |
1378 | SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; | ||
1379 | else if (type == FW_PORT_TYPE_BP4_AP) | ||
1380 | v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | | ||
1381 | SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | | ||
1382 | SUPPORTED_10000baseKX4_Full; | ||
1361 | else if (type == FW_PORT_TYPE_FIBER_XFI || | 1383 | else if (type == FW_PORT_TYPE_FIBER_XFI || |
1362 | type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) | 1384 | type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) |
1363 | v |= SUPPORTED_FIBRE; | 1385 | v |= SUPPORTED_FIBRE; |
@@ -1414,7 +1436,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
1414 | cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); | 1436 | cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); |
1415 | cmd->advertising = from_fw_linkcaps(p->port_type, | 1437 | cmd->advertising = from_fw_linkcaps(p->port_type, |
1416 | p->link_cfg.advertising); | 1438 | p->link_cfg.advertising); |
1417 | cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0; | 1439 | ethtool_cmd_speed_set(cmd, |
1440 | netif_carrier_ok(dev) ? p->link_cfg.speed : 0); | ||
1418 | cmd->duplex = DUPLEX_FULL; | 1441 | cmd->duplex = DUPLEX_FULL; |
1419 | cmd->autoneg = p->link_cfg.autoneg; | 1442 | cmd->autoneg = p->link_cfg.autoneg; |
1420 | cmd->maxtxpkt = 0; | 1443 | cmd->maxtxpkt = 0; |
@@ -1438,6 +1461,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
1438 | unsigned int cap; | 1461 | unsigned int cap; |
1439 | struct port_info *p = netdev_priv(dev); | 1462 | struct port_info *p = netdev_priv(dev); |
1440 | struct link_config *lc = &p->link_cfg; | 1463 | struct link_config *lc = &p->link_cfg; |
1464 | u32 speed = ethtool_cmd_speed(cmd); | ||
1441 | 1465 | ||
1442 | if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ | 1466 | if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ |
1443 | return -EINVAL; | 1467 | return -EINVAL; |
@@ -1448,16 +1472,16 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
1448 | * being requested. | 1472 | * being requested. |
1449 | */ | 1473 | */ |
1450 | if (cmd->autoneg == AUTONEG_DISABLE && | 1474 | if (cmd->autoneg == AUTONEG_DISABLE && |
1451 | (lc->supported & speed_to_caps(cmd->speed))) | 1475 | (lc->supported & speed_to_caps(speed))) |
1452 | return 0; | 1476 | return 0; |
1453 | return -EINVAL; | 1477 | return -EINVAL; |
1454 | } | 1478 | } |
1455 | 1479 | ||
1456 | if (cmd->autoneg == AUTONEG_DISABLE) { | 1480 | if (cmd->autoneg == AUTONEG_DISABLE) { |
1457 | cap = speed_to_caps(cmd->speed); | 1481 | cap = speed_to_caps(speed); |
1458 | 1482 | ||
1459 | if (!(lc->supported & cap) || cmd->speed == SPEED_1000 || | 1483 | if (!(lc->supported & cap) || (speed == SPEED_1000) || |
1460 | cmd->speed == SPEED_10000) | 1484 | (speed == SPEED_10000)) |
1461 | return -EINVAL; | 1485 | return -EINVAL; |
1462 | lc->requested_speed = cap; | 1486 | lc->requested_speed = cap; |
1463 | lc->advertising = 0; | 1487 | lc->advertising = 0; |
@@ -1509,24 +1533,6 @@ static int set_pauseparam(struct net_device *dev, | |||
1509 | return 0; | 1533 | return 0; |
1510 | } | 1534 | } |
1511 | 1535 | ||
1512 | static u32 get_rx_csum(struct net_device *dev) | ||
1513 | { | ||
1514 | struct port_info *p = netdev_priv(dev); | ||
1515 | |||
1516 | return p->rx_offload & RX_CSO; | ||
1517 | } | ||
1518 | |||
1519 | static int set_rx_csum(struct net_device *dev, u32 data) | ||
1520 | { | ||
1521 | struct port_info *p = netdev_priv(dev); | ||
1522 | |||
1523 | if (data) | ||
1524 | p->rx_offload |= RX_CSO; | ||
1525 | else | ||
1526 | p->rx_offload &= ~RX_CSO; | ||
1527 | return 0; | ||
1528 | } | ||
1529 | |||
1530 | static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) | 1536 | static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) |
1531 | { | 1537 | { |
1532 | const struct port_info *pi = netdev_priv(dev); | 1538 | const struct port_info *pi = netdev_priv(dev); |
@@ -1671,27 +1677,41 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | |||
1671 | return 0; | 1677 | return 0; |
1672 | } | 1678 | } |
1673 | 1679 | ||
1674 | /* | 1680 | /** |
1675 | * Translate a physical EEPROM address to virtual. The first 1K is accessed | 1681 | * eeprom_ptov - translate a physical EEPROM address to virtual |
1676 | * through virtual addresses starting at 31K, the rest is accessed through | 1682 | * @phys_addr: the physical EEPROM address |
1677 | * virtual addresses starting at 0. This mapping is correct only for PF0. | 1683 | * @fn: the PCI function number |
1684 | * @sz: size of function-specific area | ||
1685 | * | ||
1686 | * Translate a physical EEPROM address to virtual. The first 1K is | ||
1687 | * accessed through virtual addresses starting at 31K, the rest is | ||
1688 | * accessed through virtual addresses starting at 0. | ||
1689 | * | ||
1690 | * The mapping is as follows: | ||
1691 | * [0..1K) -> [31K..32K) | ||
1692 | * [1K..1K+A) -> [31K-A..31K) | ||
1693 | * [1K+A..ES) -> [0..ES-A-1K) | ||
1694 | * | ||
1695 | * where A = @fn * @sz, and ES = EEPROM size. | ||
1678 | */ | 1696 | */ |
1679 | static int eeprom_ptov(unsigned int phys_addr) | 1697 | static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) |
1680 | { | 1698 | { |
1699 | fn *= sz; | ||
1681 | if (phys_addr < 1024) | 1700 | if (phys_addr < 1024) |
1682 | return phys_addr + (31 << 10); | 1701 | return phys_addr + (31 << 10); |
1702 | if (phys_addr < 1024 + fn) | ||
1703 | return 31744 - fn + phys_addr - 1024; | ||
1683 | if (phys_addr < EEPROMSIZE) | 1704 | if (phys_addr < EEPROMSIZE) |
1684 | return phys_addr - 1024; | 1705 | return phys_addr - 1024 - fn; |
1685 | return -EINVAL; | 1706 | return -EINVAL; |
1686 | } | 1707 | } |
1687 | 1708 | ||
1688 | /* | 1709 | /* |
1689 | * The next two routines implement eeprom read/write from physical addresses. | 1710 | * The next two routines implement eeprom read/write from physical addresses. |
1690 | * The physical->virtual translation is correct only for PF0. | ||
1691 | */ | 1711 | */ |
1692 | static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) | 1712 | static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) |
1693 | { | 1713 | { |
1694 | int vaddr = eeprom_ptov(phys_addr); | 1714 | int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); |
1695 | 1715 | ||
1696 | if (vaddr >= 0) | 1716 | if (vaddr >= 0) |
1697 | vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); | 1717 | vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); |
@@ -1700,7 +1720,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) | |||
1700 | 1720 | ||
1701 | static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) | 1721 | static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) |
1702 | { | 1722 | { |
1703 | int vaddr = eeprom_ptov(phys_addr); | 1723 | int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); |
1704 | 1724 | ||
1705 | if (vaddr >= 0) | 1725 | if (vaddr >= 0) |
1706 | vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); | 1726 | vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); |
@@ -1743,6 +1763,14 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | |||
1743 | aligned_offset = eeprom->offset & ~3; | 1763 | aligned_offset = eeprom->offset & ~3; |
1744 | aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; | 1764 | aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; |
1745 | 1765 | ||
1766 | if (adapter->fn > 0) { | ||
1767 | u32 start = 1024 + adapter->fn * EEPROMPFSIZE; | ||
1768 | |||
1769 | if (aligned_offset < start || | ||
1770 | aligned_offset + aligned_len > start + EEPROMPFSIZE) | ||
1771 | return -EPERM; | ||
1772 | } | ||
1773 | |||
1746 | if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { | 1774 | if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { |
1747 | /* | 1775 | /* |
1748 | * RMW possibly needed for first or last words. | 1776 | * RMW possibly needed for first or last words. |
@@ -1826,20 +1854,21 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
1826 | return err; | 1854 | return err; |
1827 | } | 1855 | } |
1828 | 1856 | ||
1829 | #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) | 1857 | static int cxgb_set_features(struct net_device *dev, u32 features) |
1830 | |||
1831 | static int set_tso(struct net_device *dev, u32 value) | ||
1832 | { | 1858 | { |
1833 | if (value) | 1859 | const struct port_info *pi = netdev_priv(dev); |
1834 | dev->features |= TSO_FLAGS; | 1860 | u32 changed = dev->features ^ features; |
1835 | else | 1861 | int err; |
1836 | dev->features &= ~TSO_FLAGS; | ||
1837 | return 0; | ||
1838 | } | ||
1839 | 1862 | ||
1840 | static int set_flags(struct net_device *dev, u32 flags) | 1863 | if (!(changed & NETIF_F_HW_VLAN_RX)) |
1841 | { | 1864 | return 0; |
1842 | return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH); | 1865 | |
1866 | err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, | ||
1867 | -1, -1, -1, | ||
1868 | !!(features & NETIF_F_HW_VLAN_RX), true); | ||
1869 | if (unlikely(err)) | ||
1870 | dev->features = features ^ NETIF_F_HW_VLAN_RX; | ||
1871 | return err; | ||
1843 | } | 1872 | } |
1844 | 1873 | ||
1845 | static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p) | 1874 | static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p) |
@@ -1949,13 +1978,9 @@ static struct ethtool_ops cxgb_ethtool_ops = { | |||
1949 | .set_eeprom = set_eeprom, | 1978 | .set_eeprom = set_eeprom, |
1950 | .get_pauseparam = get_pauseparam, | 1979 | .get_pauseparam = get_pauseparam, |
1951 | .set_pauseparam = set_pauseparam, | 1980 | .set_pauseparam = set_pauseparam, |
1952 | .get_rx_csum = get_rx_csum, | ||
1953 | .set_rx_csum = set_rx_csum, | ||
1954 | .set_tx_csum = ethtool_op_set_tx_ipv6_csum, | ||
1955 | .set_sg = ethtool_op_set_sg, | ||
1956 | .get_link = ethtool_op_get_link, | 1981 | .get_link = ethtool_op_get_link, |
1957 | .get_strings = get_strings, | 1982 | .get_strings = get_strings, |
1958 | .phys_id = identify_port, | 1983 | .set_phys_id = identify_port, |
1959 | .nway_reset = restart_autoneg, | 1984 | .nway_reset = restart_autoneg, |
1960 | .get_sset_count = get_sset_count, | 1985 | .get_sset_count = get_sset_count, |
1961 | .get_ethtool_stats = get_stats, | 1986 | .get_ethtool_stats = get_stats, |
@@ -1963,8 +1988,6 @@ static struct ethtool_ops cxgb_ethtool_ops = { | |||
1963 | .get_regs = get_regs, | 1988 | .get_regs = get_regs, |
1964 | .get_wol = get_wol, | 1989 | .get_wol = get_wol, |
1965 | .set_wol = set_wol, | 1990 | .set_wol = set_wol, |
1966 | .set_tso = set_tso, | ||
1967 | .set_flags = set_flags, | ||
1968 | .get_rxnfc = get_rxnfc, | 1991 | .get_rxnfc = get_rxnfc, |
1969 | .get_rxfh_indir = get_rss_table, | 1992 | .get_rxfh_indir = get_rss_table, |
1970 | .set_rxfh_indir = set_rss_table, | 1993 | .set_rxfh_indir = set_rss_table, |
@@ -2026,6 +2049,7 @@ static const struct file_operations mem_debugfs_fops = { | |||
2026 | .owner = THIS_MODULE, | 2049 | .owner = THIS_MODULE, |
2027 | .open = mem_open, | 2050 | .open = mem_open, |
2028 | .read = mem_read, | 2051 | .read = mem_read, |
2052 | .llseek = default_llseek, | ||
2029 | }; | 2053 | }; |
2030 | 2054 | ||
2031 | static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, | 2055 | static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, |
@@ -2164,8 +2188,8 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan, | |||
2164 | * Queue a TID release request and if necessary schedule a work queue to | 2188 | * Queue a TID release request and if necessary schedule a work queue to |
2165 | * process it. | 2189 | * process it. |
2166 | */ | 2190 | */ |
2167 | void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | 2191 | static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, |
2168 | unsigned int tid) | 2192 | unsigned int tid) |
2169 | { | 2193 | { |
2170 | void **p = &t->tid_tab[tid]; | 2194 | void **p = &t->tid_tab[tid]; |
2171 | struct adapter *adap = container_of(t, struct adapter, tids); | 2195 | struct adapter *adap = container_of(t, struct adapter, tids); |
@@ -2180,7 +2204,6 @@ void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, | |||
2180 | } | 2204 | } |
2181 | spin_unlock_bh(&adap->tid_release_lock); | 2205 | spin_unlock_bh(&adap->tid_release_lock); |
2182 | } | 2206 | } |
2183 | EXPORT_SYMBOL(cxgb4_queue_tid_release); | ||
2184 | 2207 | ||
2185 | /* | 2208 | /* |
2186 | * Process the list of pending TID release requests. | 2209 | * Process the list of pending TID release requests. |
@@ -2304,7 +2327,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, | |||
2304 | req->peer_port = htons(0); | 2327 | req->peer_port = htons(0); |
2305 | req->local_ip = sip; | 2328 | req->local_ip = sip; |
2306 | req->peer_ip = htonl(0); | 2329 | req->peer_ip = htonl(0); |
2307 | chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; | 2330 | chan = rxq_to_chan(&adap->sge, queue); |
2308 | req->opt0 = cpu_to_be64(TX_CHAN(chan)); | 2331 | req->opt0 = cpu_to_be64(TX_CHAN(chan)); |
2309 | req->opt1 = cpu_to_be64(CONN_POLICY_ASK | | 2332 | req->opt1 = cpu_to_be64(CONN_POLICY_ASK | |
2310 | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); | 2333 | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); |
@@ -2313,48 +2336,6 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, | |||
2313 | EXPORT_SYMBOL(cxgb4_create_server); | 2336 | EXPORT_SYMBOL(cxgb4_create_server); |
2314 | 2337 | ||
2315 | /** | 2338 | /** |
2316 | * cxgb4_create_server6 - create an IPv6 server | ||
2317 | * @dev: the device | ||
2318 | * @stid: the server TID | ||
2319 | * @sip: local IPv6 address to bind server to | ||
2320 | * @sport: the server's TCP port | ||
2321 | * @queue: queue to direct messages from this server to | ||
2322 | * | ||
2323 | * Create an IPv6 server for the given port and address. | ||
2324 | * Returns <0 on error and one of the %NET_XMIT_* values on success. | ||
2325 | */ | ||
2326 | int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, | ||
2327 | const struct in6_addr *sip, __be16 sport, | ||
2328 | unsigned int queue) | ||
2329 | { | ||
2330 | unsigned int chan; | ||
2331 | struct sk_buff *skb; | ||
2332 | struct adapter *adap; | ||
2333 | struct cpl_pass_open_req6 *req; | ||
2334 | |||
2335 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); | ||
2336 | if (!skb) | ||
2337 | return -ENOMEM; | ||
2338 | |||
2339 | adap = netdev2adap(dev); | ||
2340 | req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); | ||
2341 | INIT_TP_WR(req, 0); | ||
2342 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); | ||
2343 | req->local_port = sport; | ||
2344 | req->peer_port = htons(0); | ||
2345 | req->local_ip_hi = *(__be64 *)(sip->s6_addr); | ||
2346 | req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); | ||
2347 | req->peer_ip_hi = cpu_to_be64(0); | ||
2348 | req->peer_ip_lo = cpu_to_be64(0); | ||
2349 | chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; | ||
2350 | req->opt0 = cpu_to_be64(TX_CHAN(chan)); | ||
2351 | req->opt1 = cpu_to_be64(CONN_POLICY_ASK | | ||
2352 | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); | ||
2353 | return t4_mgmt_tx(adap, skb); | ||
2354 | } | ||
2355 | EXPORT_SYMBOL(cxgb4_create_server6); | ||
2356 | |||
2357 | /** | ||
2358 | * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU | 2339 | * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU |
2359 | * @mtus: the HW MTU table | 2340 | * @mtus: the HW MTU table |
2360 | * @mtu: the target MTU | 2341 | * @mtu: the target MTU |
@@ -2413,25 +2394,6 @@ unsigned int cxgb4_port_idx(const struct net_device *dev) | |||
2413 | } | 2394 | } |
2414 | EXPORT_SYMBOL(cxgb4_port_idx); | 2395 | EXPORT_SYMBOL(cxgb4_port_idx); |
2415 | 2396 | ||
2416 | /** | ||
2417 | * cxgb4_netdev_by_hwid - return the net device of a HW port | ||
2418 | * @pdev: identifies the adapter | ||
2419 | * @id: the HW port id | ||
2420 | * | ||
2421 | * Return the net device associated with the interface with the given HW | ||
2422 | * id. | ||
2423 | */ | ||
2424 | struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id) | ||
2425 | { | ||
2426 | const struct adapter *adap = pci_get_drvdata(pdev); | ||
2427 | |||
2428 | if (!adap || id >= NCHAN) | ||
2429 | return NULL; | ||
2430 | id = adap->chan_map[id]; | ||
2431 | return id < MAX_NPORTS ? adap->port[id] : NULL; | ||
2432 | } | ||
2433 | EXPORT_SYMBOL(cxgb4_netdev_by_hwid); | ||
2434 | |||
2435 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, | 2397 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, |
2436 | struct tp_tcp_stats *v6) | 2398 | struct tp_tcp_stats *v6) |
2437 | { | 2399 | { |
@@ -2476,7 +2438,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event, | |||
2476 | case NETEVENT_NEIGH_UPDATE: | 2438 | case NETEVENT_NEIGH_UPDATE: |
2477 | check_neigh_update(data); | 2439 | check_neigh_update(data); |
2478 | break; | 2440 | break; |
2479 | case NETEVENT_PMTU_UPDATE: | ||
2480 | case NETEVENT_REDIRECT: | 2441 | case NETEVENT_REDIRECT: |
2481 | default: | 2442 | default: |
2482 | break; | 2443 | break; |
@@ -2670,7 +2631,7 @@ static int cxgb_up(struct adapter *adap) | |||
2670 | } else { | 2631 | } else { |
2671 | err = request_irq(adap->pdev->irq, t4_intr_handler(adap), | 2632 | err = request_irq(adap->pdev->irq, t4_intr_handler(adap), |
2672 | (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, | 2633 | (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, |
2673 | adap->name, adap); | 2634 | adap->port[0]->name, adap); |
2674 | if (err) | 2635 | if (err) |
2675 | goto irq_err; | 2636 | goto irq_err; |
2676 | } | 2637 | } |
@@ -2715,13 +2676,14 @@ static int cxgb_open(struct net_device *dev) | |||
2715 | struct port_info *pi = netdev_priv(dev); | 2676 | struct port_info *pi = netdev_priv(dev); |
2716 | struct adapter *adapter = pi->adapter; | 2677 | struct adapter *adapter = pi->adapter; |
2717 | 2678 | ||
2679 | netif_carrier_off(dev); | ||
2680 | |||
2718 | if (!(adapter->flags & FULL_INIT_DONE)) { | 2681 | if (!(adapter->flags & FULL_INIT_DONE)) { |
2719 | err = cxgb_up(adapter); | 2682 | err = cxgb_up(adapter); |
2720 | if (err < 0) | 2683 | if (err < 0) |
2721 | return err; | 2684 | return err; |
2722 | } | 2685 | } |
2723 | 2686 | ||
2724 | dev->real_num_tx_queues = pi->nqsets; | ||
2725 | err = link_start(dev); | 2687 | err = link_start(dev); |
2726 | if (!err) | 2688 | if (!err) |
2727 | netif_tx_start_all_queues(dev); | 2689 | netif_tx_start_all_queues(dev); |
@@ -2858,15 +2820,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) | |||
2858 | return 0; | 2820 | return 0; |
2859 | } | 2821 | } |
2860 | 2822 | ||
2861 | static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
2862 | { | ||
2863 | struct port_info *pi = netdev_priv(dev); | ||
2864 | |||
2865 | pi->vlan_grp = grp; | ||
2866 | t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1, | ||
2867 | grp != NULL, true); | ||
2868 | } | ||
2869 | |||
2870 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2823 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2871 | static void cxgb_netpoll(struct net_device *dev) | 2824 | static void cxgb_netpoll(struct net_device *dev) |
2872 | { | 2825 | { |
@@ -2891,10 +2844,10 @@ static const struct net_device_ops cxgb4_netdev_ops = { | |||
2891 | .ndo_get_stats64 = cxgb_get_stats, | 2844 | .ndo_get_stats64 = cxgb_get_stats, |
2892 | .ndo_set_rx_mode = cxgb_set_rxmode, | 2845 | .ndo_set_rx_mode = cxgb_set_rxmode, |
2893 | .ndo_set_mac_address = cxgb_set_mac_addr, | 2846 | .ndo_set_mac_address = cxgb_set_mac_addr, |
2847 | .ndo_set_features = cxgb_set_features, | ||
2894 | .ndo_validate_addr = eth_validate_addr, | 2848 | .ndo_validate_addr = eth_validate_addr, |
2895 | .ndo_do_ioctl = cxgb_ioctl, | 2849 | .ndo_do_ioctl = cxgb_ioctl, |
2896 | .ndo_change_mtu = cxgb_change_mtu, | 2850 | .ndo_change_mtu = cxgb_change_mtu, |
2897 | .ndo_vlan_rx_register = vlan_rx_register, | ||
2898 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2851 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2899 | .ndo_poll_controller = cxgb_netpoll, | 2852 | .ndo_poll_controller = cxgb_netpoll, |
2900 | #endif | 2853 | #endif |
@@ -3061,12 +3014,16 @@ static int adap_init0(struct adapter *adap) | |||
3061 | params[2] = FW_PARAM_PFVF(L2T_END); | 3014 | params[2] = FW_PARAM_PFVF(L2T_END); |
3062 | params[3] = FW_PARAM_PFVF(FILTER_START); | 3015 | params[3] = FW_PARAM_PFVF(FILTER_START); |
3063 | params[4] = FW_PARAM_PFVF(FILTER_END); | 3016 | params[4] = FW_PARAM_PFVF(FILTER_END); |
3064 | ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val); | 3017 | params[5] = FW_PARAM_PFVF(IQFLINT_START); |
3018 | params[6] = FW_PARAM_PFVF(EQ_START); | ||
3019 | ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val); | ||
3065 | if (ret < 0) | 3020 | if (ret < 0) |
3066 | goto bye; | 3021 | goto bye; |
3067 | port_vec = val[0]; | 3022 | port_vec = val[0]; |
3068 | adap->tids.ftid_base = val[3]; | 3023 | adap->tids.ftid_base = val[3]; |
3069 | adap->tids.nftids = val[4] - val[3] + 1; | 3024 | adap->tids.nftids = val[4] - val[3] + 1; |
3025 | adap->sge.ingr_start = val[5]; | ||
3026 | adap->sge.egr_start = val[6]; | ||
3070 | 3027 | ||
3071 | if (c.ofldcaps) { | 3028 | if (c.ofldcaps) { |
3072 | /* query offload-related parameters */ | 3029 | /* query offload-related parameters */ |
@@ -3496,49 +3453,53 @@ static int __devinit init_rss(struct adapter *adap) | |||
3496 | return 0; | 3453 | return 0; |
3497 | } | 3454 | } |
3498 | 3455 | ||
3499 | static void __devinit print_port_info(struct adapter *adap) | 3456 | static void __devinit print_port_info(const struct net_device *dev) |
3500 | { | 3457 | { |
3501 | static const char *base[] = { | 3458 | static const char *base[] = { |
3502 | "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", | 3459 | "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", |
3503 | "KX", "KR", "KR SFP+", "KR FEC" | 3460 | "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4" |
3504 | }; | 3461 | }; |
3505 | 3462 | ||
3506 | int i; | ||
3507 | char buf[80]; | 3463 | char buf[80]; |
3464 | char *bufp = buf; | ||
3508 | const char *spd = ""; | 3465 | const char *spd = ""; |
3466 | const struct port_info *pi = netdev_priv(dev); | ||
3467 | const struct adapter *adap = pi->adapter; | ||
3509 | 3468 | ||
3510 | if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) | 3469 | if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) |
3511 | spd = " 2.5 GT/s"; | 3470 | spd = " 2.5 GT/s"; |
3512 | else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) | 3471 | else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) |
3513 | spd = " 5 GT/s"; | 3472 | spd = " 5 GT/s"; |
3514 | 3473 | ||
3515 | for_each_port(adap, i) { | 3474 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) |
3516 | struct net_device *dev = adap->port[i]; | 3475 | bufp += sprintf(bufp, "100/"); |
3517 | const struct port_info *pi = netdev_priv(dev); | 3476 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) |
3518 | char *bufp = buf; | 3477 | bufp += sprintf(bufp, "1000/"); |
3478 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) | ||
3479 | bufp += sprintf(bufp, "10G/"); | ||
3480 | if (bufp != buf) | ||
3481 | --bufp; | ||
3482 | sprintf(bufp, "BASE-%s", base[pi->port_type]); | ||
3519 | 3483 | ||
3520 | if (!test_bit(i, &adap->registered_device_map)) | 3484 | netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", |
3521 | continue; | 3485 | adap->params.vpd.id, adap->params.rev, buf, |
3486 | is_offload(adap) ? "R" : "", adap->params.pci.width, spd, | ||
3487 | (adap->flags & USING_MSIX) ? " MSI-X" : | ||
3488 | (adap->flags & USING_MSI) ? " MSI" : ""); | ||
3489 | netdev_info(dev, "S/N: %s, E/C: %s\n", | ||
3490 | adap->params.vpd.sn, adap->params.vpd.ec); | ||
3491 | } | ||
3522 | 3492 | ||
3523 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) | 3493 | static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev) |
3524 | bufp += sprintf(bufp, "100/"); | 3494 | { |
3525 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) | 3495 | u16 v; |
3526 | bufp += sprintf(bufp, "1000/"); | 3496 | int pos; |
3527 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) | ||
3528 | bufp += sprintf(bufp, "10G/"); | ||
3529 | if (bufp != buf) | ||
3530 | --bufp; | ||
3531 | sprintf(bufp, "BASE-%s", base[pi->port_type]); | ||
3532 | 3497 | ||
3533 | netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", | 3498 | pos = pci_pcie_cap(dev); |
3534 | adap->params.vpd.id, adap->params.rev, | 3499 | if (pos > 0) { |
3535 | buf, is_offload(adap) ? "R" : "", | 3500 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v); |
3536 | adap->params.pci.width, spd, | 3501 | v |= PCI_EXP_DEVCTL_RELAX_EN; |
3537 | (adap->flags & USING_MSIX) ? " MSI-X" : | 3502 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v); |
3538 | (adap->flags & USING_MSI) ? " MSI" : ""); | ||
3539 | if (adap->name == dev->name) | ||
3540 | netdev_info(dev, "S/N: %s, E/C: %s\n", | ||
3541 | adap->params.vpd.sn, adap->params.vpd.ec); | ||
3542 | } | 3503 | } |
3543 | } | 3504 | } |
3544 | 3505 | ||
@@ -3566,6 +3527,7 @@ static void free_some_resources(struct adapter *adapter) | |||
3566 | t4_fw_bye(adapter, adapter->fn); | 3527 | t4_fw_bye(adapter, adapter->fn); |
3567 | } | 3528 | } |
3568 | 3529 | ||
3530 | #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) | ||
3569 | #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ | 3531 | #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ |
3570 | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) | 3532 | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) |
3571 | 3533 | ||
@@ -3616,6 +3578,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3616 | } | 3578 | } |
3617 | 3579 | ||
3618 | pci_enable_pcie_error_reporting(pdev); | 3580 | pci_enable_pcie_error_reporting(pdev); |
3581 | enable_pcie_relaxed_ordering(pdev); | ||
3619 | pci_set_master(pdev); | 3582 | pci_set_master(pdev); |
3620 | pci_save_state(pdev); | 3583 | pci_save_state(pdev); |
3621 | 3584 | ||
@@ -3635,7 +3598,6 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3635 | adapter->pdev = pdev; | 3598 | adapter->pdev = pdev; |
3636 | adapter->pdev_dev = &pdev->dev; | 3599 | adapter->pdev_dev = &pdev->dev; |
3637 | adapter->fn = func; | 3600 | adapter->fn = func; |
3638 | adapter->name = pci_name(pdev); | ||
3639 | adapter->msg_enable = dflt_msg_enable; | 3601 | adapter->msg_enable = dflt_msg_enable; |
3640 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); | 3602 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); |
3641 | 3603 | ||
@@ -3667,16 +3629,14 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3667 | pi = netdev_priv(netdev); | 3629 | pi = netdev_priv(netdev); |
3668 | pi->adapter = adapter; | 3630 | pi->adapter = adapter; |
3669 | pi->xact_addr_filt = -1; | 3631 | pi->xact_addr_filt = -1; |
3670 | pi->rx_offload = RX_CSO; | ||
3671 | pi->port_id = i; | 3632 | pi->port_id = i; |
3672 | netif_carrier_off(netdev); | ||
3673 | netif_tx_stop_all_queues(netdev); | ||
3674 | netdev->irq = pdev->irq; | 3633 | netdev->irq = pdev->irq; |
3675 | 3634 | ||
3676 | netdev->features |= NETIF_F_SG | TSO_FLAGS; | 3635 | netdev->hw_features = NETIF_F_SG | TSO_FLAGS | |
3677 | netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 3636 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
3678 | netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma; | 3637 | NETIF_F_RXCSUM | NETIF_F_RXHASH | |
3679 | netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 3638 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
3639 | netdev->features |= netdev->hw_features | highdma; | ||
3680 | netdev->vlan_features = netdev->features & VLAN_FEAT; | 3640 | netdev->vlan_features = netdev->features & VLAN_FEAT; |
3681 | 3641 | ||
3682 | netdev->netdev_ops = &cxgb4_netdev_ops; | 3642 | netdev->netdev_ops = &cxgb4_netdev_ops; |
@@ -3727,27 +3687,24 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3727 | * register at least one net device. | 3687 | * register at least one net device. |
3728 | */ | 3688 | */ |
3729 | for_each_port(adapter, i) { | 3689 | for_each_port(adapter, i) { |
3690 | pi = adap2pinfo(adapter, i); | ||
3691 | netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); | ||
3692 | netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); | ||
3693 | |||
3730 | err = register_netdev(adapter->port[i]); | 3694 | err = register_netdev(adapter->port[i]); |
3731 | if (err) | 3695 | if (err) |
3732 | dev_warn(&pdev->dev, | 3696 | break; |
3733 | "cannot register net device %s, skipping\n", | 3697 | adapter->chan_map[pi->tx_chan] = i; |
3734 | adapter->port[i]->name); | 3698 | print_port_info(adapter->port[i]); |
3735 | else { | ||
3736 | /* | ||
3737 | * Change the name we use for messages to the name of | ||
3738 | * the first successfully registered interface. | ||
3739 | */ | ||
3740 | if (!adapter->registered_device_map) | ||
3741 | adapter->name = adapter->port[i]->name; | ||
3742 | |||
3743 | __set_bit(i, &adapter->registered_device_map); | ||
3744 | adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; | ||
3745 | } | ||
3746 | } | 3699 | } |
3747 | if (!adapter->registered_device_map) { | 3700 | if (i == 0) { |
3748 | dev_err(&pdev->dev, "could not register any net devices\n"); | 3701 | dev_err(&pdev->dev, "could not register any net devices\n"); |
3749 | goto out_free_dev; | 3702 | goto out_free_dev; |
3750 | } | 3703 | } |
3704 | if (err) { | ||
3705 | dev_warn(&pdev->dev, "only %d net devices registered\n", i); | ||
3706 | err = 0; | ||
3707 | }; | ||
3751 | 3708 | ||
3752 | if (cxgb4_debugfs_root) { | 3709 | if (cxgb4_debugfs_root) { |
3753 | adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), | 3710 | adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), |
@@ -3758,8 +3715,6 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3758 | if (is_offload(adapter)) | 3715 | if (is_offload(adapter)) |
3759 | attach_ulds(adapter); | 3716 | attach_ulds(adapter); |
3760 | 3717 | ||
3761 | print_port_info(adapter); | ||
3762 | |||
3763 | sriov: | 3718 | sriov: |
3764 | #ifdef CONFIG_PCI_IOV | 3719 | #ifdef CONFIG_PCI_IOV |
3765 | if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) | 3720 | if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) |
@@ -3798,7 +3753,7 @@ static void __devexit remove_one(struct pci_dev *pdev) | |||
3798 | detach_ulds(adapter); | 3753 | detach_ulds(adapter); |
3799 | 3754 | ||
3800 | for_each_port(adapter, i) | 3755 | for_each_port(adapter, i) |
3801 | if (test_bit(i, &adapter->registered_device_map)) | 3756 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
3802 | unregister_netdev(adapter->port[i]); | 3757 | unregister_netdev(adapter->port[i]); |
3803 | 3758 | ||
3804 | if (adapter->debugfs_root) | 3759 | if (adapter->debugfs_root) |
@@ -3814,7 +3769,7 @@ static void __devexit remove_one(struct pci_dev *pdev) | |||
3814 | pci_disable_device(pdev); | 3769 | pci_disable_device(pdev); |
3815 | pci_release_regions(pdev); | 3770 | pci_release_regions(pdev); |
3816 | pci_set_drvdata(pdev, NULL); | 3771 | pci_set_drvdata(pdev, NULL); |
3817 | } else if (PCI_FUNC(pdev->devfn) > 0) | 3772 | } else |
3818 | pci_release_regions(pdev); | 3773 | pci_release_regions(pdev); |
3819 | } | 3774 | } |
3820 | 3775 | ||