aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e100.c
diff options
context:
space:
mode:
authorMalli Chilakala <mallikarjuna.chilakala@intel.com>2005-04-28 22:17:20 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-05-12 20:59:28 -0400
commit1f53367d5d75ba37f258f6e955d6fc24814051a0 (patch)
tree05f5c9c8f79edb4d6a7b8465e46992de3941b10a /drivers/net/e100.c
parent2acdb1e05c1a92e05ee710ed8f226a8f3183d5a0 (diff)
[PATCH] e100: Render e100 NAPI state machine
Render e100 NAPI state machine to be similar to the non-NAPI one. Signed-off-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com> Signed-off-by: Ganesh Venkatesan <ganesh.venkatesan@intel.com> Signed-off-by: John Ronciak <john.ronciak@intel.com>
Diffstat (limited to 'drivers/net/e100.c')
-rw-r--r--drivers/net/e100.c86
1 files changed, 71 insertions, 15 deletions
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 0f2837ebaab8..2c8c14ebf205 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -269,6 +269,12 @@ enum scb_status {
269 rus_mask = 0x3C, 269 rus_mask = 0x3C,
270}; 270};
271 271
272enum ru_state {
273 RU_SUSPENDED = 0,
274 RU_RUNNING = 1,
275 RU_UNINITIALIZED = -1,
276};
277
272enum scb_stat_ack { 278enum scb_stat_ack {
273 stat_ack_not_ours = 0x00, 279 stat_ack_not_ours = 0x00,
274 stat_ack_sw_gen = 0x04, 280 stat_ack_sw_gen = 0x04,
@@ -510,7 +516,7 @@ struct nic {
510 struct rx *rx_to_use; 516 struct rx *rx_to_use;
511 struct rx *rx_to_clean; 517 struct rx *rx_to_clean;
512 struct rfd blank_rfd; 518 struct rfd blank_rfd;
513 int ru_running; 519 enum ru_state ru_running;
514 520
515 spinlock_t cb_lock ____cacheline_aligned; 521 spinlock_t cb_lock ____cacheline_aligned;
516 spinlock_t cmd_lock; 522 spinlock_t cmd_lock;
@@ -1204,7 +1210,9 @@ static void e100_update_stats(struct nic *nic)
1204 } 1210 }
1205 } 1211 }
1206 1212
1207 e100_exec_cmd(nic, cuc_dump_reset, 0); 1213
1214 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1215 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1208} 1216}
1209 1217
1210static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) 1218static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
@@ -1298,7 +1306,8 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1298 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. 1306 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1299 Issue a NOP command followed by a 1us delay before 1307 Issue a NOP command followed by a 1us delay before
1300 issuing the Tx command. */ 1308 issuing the Tx command. */
1301 e100_exec_cmd(nic, cuc_nop, 0); 1309 if(e100_exec_cmd(nic, cuc_nop, 0))
1310 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1302 udelay(1); 1311 udelay(1);
1303 } 1312 }
1304 1313
@@ -1416,12 +1425,18 @@ static int e100_alloc_cbs(struct nic *nic)
1416 return 0; 1425 return 0;
1417} 1426}
1418 1427
1419static inline void e100_start_receiver(struct nic *nic) 1428static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1420{ 1429{
1430 if(!nic->rxs) return;
1431 if(RU_SUSPENDED != nic->ru_running) return;
1432
1433 /* handle init time starts */
1434 if(!rx) rx = nic->rxs;
1435
1421 /* (Re)start RU if suspended or idle and RFA is non-NULL */ 1436 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1422 if(!nic->ru_running && nic->rx_to_clean->skb) { 1437 if(rx->skb) {
1423 e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr); 1438 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1424 nic->ru_running = 1; 1439 nic->ru_running = RU_RUNNING;
1425 } 1440 }
1426} 1441}
1427 1442
@@ -1438,6 +1453,13 @@ static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1438 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1453 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1439 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1454 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1440 1455
1456 if(pci_dma_mapping_error(rx->dma_addr)) {
1457 dev_kfree_skb_any(rx->skb);
1458 rx->skb = 0;
1459 rx->dma_addr = 0;
1460 return -ENOMEM;
1461 }
1462
1441 /* Link the RFD to end of RFA by linking previous RFD to 1463 /* Link the RFD to end of RFA by linking previous RFD to
1442 * this one, and clearing EL bit of previous. */ 1464 * this one, and clearing EL bit of previous. */
1443 if(rx->prev->skb) { 1465 if(rx->prev->skb) {
@@ -1472,7 +1494,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1472 1494
1473 /* If data isn't ready, nothing to indicate */ 1495 /* If data isn't ready, nothing to indicate */
1474 if(unlikely(!(rfd_status & cb_complete))) 1496 if(unlikely(!(rfd_status & cb_complete)))
1475 return -EAGAIN; 1497 return -ENODATA;
1476 1498
1477 /* Get actual data size */ 1499 /* Get actual data size */
1478 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; 1500 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
@@ -1483,6 +1505,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1483 pci_unmap_single(nic->pdev, rx->dma_addr, 1505 pci_unmap_single(nic->pdev, rx->dma_addr,
1484 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1506 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1485 1507
1508 /* this allows for a fast restart without re-enabling interrupts */
1509 if(le16_to_cpu(rfd->command) & cb_el)
1510 nic->ru_running = RU_SUSPENDED;
1511
1486 /* Pull off the RFD and put the actual data (minus eth hdr) */ 1512 /* Pull off the RFD and put the actual data (minus eth hdr) */
1487 skb_reserve(skb, sizeof(struct rfd)); 1513 skb_reserve(skb, sizeof(struct rfd));
1488 skb_put(skb, actual_size); 1514 skb_put(skb, actual_size);
@@ -1515,20 +1541,45 @@ static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1515 unsigned int work_to_do) 1541 unsigned int work_to_do)
1516{ 1542{
1517 struct rx *rx; 1543 struct rx *rx;
1544 int restart_required = 0;
1545 struct rx *rx_to_start = NULL;
1546
1547 /* are we already rnr? then pay attention!!! this ensures that
1548 * the state machine progression never allows a start with a
1549 * partially cleaned list, avoiding a race between hardware
1550 * and rx_to_clean when in NAPI mode */
1551 if(RU_SUSPENDED == nic->ru_running)
1552 restart_required = 1;
1518 1553
1519 /* Indicate newly arrived packets */ 1554 /* Indicate newly arrived packets */
1520 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { 1555 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1521 if(e100_rx_indicate(nic, rx, work_done, work_to_do)) 1556 int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1557 if(-EAGAIN == err) {
1558 /* hit quota so have more work to do, restart once
1559 * cleanup is complete */
1560 restart_required = 0;
1561 break;
1562 } else if(-ENODATA == err)
1522 break; /* No more to clean */ 1563 break; /* No more to clean */
1523 } 1564 }
1524 1565
1566 /* save our starting point as the place we'll restart the receiver */
1567 if(restart_required)
1568 rx_to_start = nic->rx_to_clean;
1569
1525 /* Alloc new skbs to refill list */ 1570 /* Alloc new skbs to refill list */
1526 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { 1571 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1527 if(unlikely(e100_rx_alloc_skb(nic, rx))) 1572 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1528 break; /* Better luck next time (see watchdog) */ 1573 break; /* Better luck next time (see watchdog) */
1529 } 1574 }
1530 1575
1531 e100_start_receiver(nic); 1576 if(restart_required) {
1577 // ack the rnr?
1578 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1579 e100_start_receiver(nic, rx_to_start);
1580 if(work_done)
1581 (*work_done)++;
1582 }
1532} 1583}
1533 1584
1534static void e100_rx_clean_list(struct nic *nic) 1585static void e100_rx_clean_list(struct nic *nic)
@@ -1536,6 +1587,8 @@ static void e100_rx_clean_list(struct nic *nic)
1536 struct rx *rx; 1587 struct rx *rx;
1537 unsigned int i, count = nic->params.rfds.count; 1588 unsigned int i, count = nic->params.rfds.count;
1538 1589
1590 nic->ru_running = RU_UNINITIALIZED;
1591
1539 if(nic->rxs) { 1592 if(nic->rxs) {
1540 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { 1593 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1541 if(rx->skb) { 1594 if(rx->skb) {
@@ -1549,7 +1602,6 @@ static void e100_rx_clean_list(struct nic *nic)
1549 } 1602 }
1550 1603
1551 nic->rx_to_use = nic->rx_to_clean = NULL; 1604 nic->rx_to_use = nic->rx_to_clean = NULL;
1552 nic->ru_running = 0;
1553} 1605}
1554 1606
1555static int e100_rx_alloc_list(struct nic *nic) 1607static int e100_rx_alloc_list(struct nic *nic)
@@ -1558,6 +1610,7 @@ static int e100_rx_alloc_list(struct nic *nic)
1558 unsigned int i, count = nic->params.rfds.count; 1610 unsigned int i, count = nic->params.rfds.count;
1559 1611
1560 nic->rx_to_use = nic->rx_to_clean = NULL; 1612 nic->rx_to_use = nic->rx_to_clean = NULL;
1613 nic->ru_running = RU_UNINITIALIZED;
1561 1614
1562 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) 1615 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))
1563 return -ENOMEM; 1616 return -ENOMEM;
@@ -1573,6 +1626,7 @@ static int e100_rx_alloc_list(struct nic *nic)
1573 } 1626 }
1574 1627
1575 nic->rx_to_use = nic->rx_to_clean = nic->rxs; 1628 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1629 nic->ru_running = RU_SUSPENDED;
1576 1630
1577 return 0; 1631 return 0;
1578} 1632}
@@ -1594,7 +1648,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
1594 1648
1595 /* We hit Receive No Resource (RNR); restart RU after cleaning */ 1649 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1596 if(stat_ack & stat_ack_rnr) 1650 if(stat_ack & stat_ack_rnr)
1597 nic->ru_running = 0; 1651 nic->ru_running = RU_SUSPENDED;
1598 1652
1599 e100_disable_irq(nic); 1653 e100_disable_irq(nic);
1600 netif_rx_schedule(netdev); 1654 netif_rx_schedule(netdev);
@@ -1684,7 +1738,7 @@ static int e100_up(struct nic *nic)
1684 if((err = e100_hw_init(nic))) 1738 if((err = e100_hw_init(nic)))
1685 goto err_clean_cbs; 1739 goto err_clean_cbs;
1686 e100_set_multicast_list(nic->netdev); 1740 e100_set_multicast_list(nic->netdev);
1687 e100_start_receiver(nic); 1741 e100_start_receiver(nic, 0);
1688 mod_timer(&nic->watchdog, jiffies); 1742 mod_timer(&nic->watchdog, jiffies);
1689 if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ, 1743 if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,
1690 nic->netdev->name, nic->netdev))) 1744 nic->netdev->name, nic->netdev)))
@@ -1759,7 +1813,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
1759 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 1813 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
1760 BMCR_LOOPBACK); 1814 BMCR_LOOPBACK);
1761 1815
1762 e100_start_receiver(nic); 1816 e100_start_receiver(nic, 0);
1763 1817
1764 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { 1818 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
1765 err = -ENOMEM; 1819 err = -ENOMEM;
@@ -2233,6 +2287,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2233 2287
2234 e100_get_defaults(nic); 2288 e100_get_defaults(nic);
2235 2289
2290 /* locks must be initialized before calling hw_reset */
2236 spin_lock_init(&nic->cb_lock); 2291 spin_lock_init(&nic->cb_lock);
2237 spin_lock_init(&nic->cmd_lock); 2292 spin_lock_init(&nic->cmd_lock);
2238 2293
@@ -2348,7 +2403,8 @@ static int e100_resume(struct pci_dev *pdev)
2348 2403
2349 pci_set_power_state(pdev, PCI_D0); 2404 pci_set_power_state(pdev, PCI_D0);
2350 pci_restore_state(pdev); 2405 pci_restore_state(pdev);
2351 e100_hw_init(nic); 2406 if(e100_hw_init(nic))
2407 DPRINTK(HW, ERR, "e100_hw_init failed\n");
2352 2408
2353 netif_device_attach(netdev); 2409 netif_device_attach(netdev);
2354 if(netif_running(netdev)) 2410 if(netif_running(netdev))