diff options
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 298 |
1 files changed, 240 insertions, 58 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index a29b5a9f0fe2..f29a5b6f37d6 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -456,16 +456,18 @@ typedef union _ring_type { | |||
456 | /* General driver defaults */ | 456 | /* General driver defaults */ |
457 | #define NV_WATCHDOG_TIMEO (5*HZ) | 457 | #define NV_WATCHDOG_TIMEO (5*HZ) |
458 | 458 | ||
459 | #define RX_RING 128 | 459 | #define RX_RING_DEFAULT 128 |
460 | #define TX_RING 256 | 460 | #define TX_RING_DEFAULT 256 |
461 | #define RX_RING_MIN 128 | ||
462 | #define TX_RING_MIN 64 | ||
463 | #define RING_MAX_DESC_VER_1 1024 | ||
464 | #define RING_MAX_DESC_VER_2_3 16384 | ||
461 | /* | 465 | /* |
462 | * If your nic mysteriously hangs then try to reduce the limits | 466 | * Difference between the get and put pointers for the tx ring. |
463 | * to 1/0: It might be required to set NV_TX_LASTPACKET in the | 467 | * This is used to throttle the amount of data outstanding in the |
464 | * last valid ring entry. But this would be impossible to | 468 | * tx ring. |
465 | * implement - probably a disassembly error. | ||
466 | */ | 469 | */ |
467 | #define TX_LIMIT_STOP 255 | 470 | #define TX_LIMIT_DIFFERENCE 1 |
468 | #define TX_LIMIT_START 254 | ||
469 | 471 | ||
470 | /* rx/tx mac addr + type + vlan + align + slack*/ | 472 | /* rx/tx mac addr + type + vlan + align + slack*/ |
471 | #define NV_RX_HEADERS (64) | 473 | #define NV_RX_HEADERS (64) |
@@ -577,13 +579,14 @@ struct fe_priv { | |||
577 | */ | 579 | */ |
578 | ring_type rx_ring; | 580 | ring_type rx_ring; |
579 | unsigned int cur_rx, refill_rx; | 581 | unsigned int cur_rx, refill_rx; |
580 | struct sk_buff *rx_skbuff[RX_RING]; | 582 | struct sk_buff **rx_skbuff; |
581 | dma_addr_t rx_dma[RX_RING]; | 583 | dma_addr_t *rx_dma; |
582 | unsigned int rx_buf_sz; | 584 | unsigned int rx_buf_sz; |
583 | unsigned int pkt_limit; | 585 | unsigned int pkt_limit; |
584 | struct timer_list oom_kick; | 586 | struct timer_list oom_kick; |
585 | struct timer_list nic_poll; | 587 | struct timer_list nic_poll; |
586 | u32 nic_poll_irq; | 588 | u32 nic_poll_irq; |
589 | int rx_ring_size; | ||
587 | 590 | ||
588 | /* media detection workaround. | 591 | /* media detection workaround. |
589 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 592 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
@@ -595,10 +598,13 @@ struct fe_priv { | |||
595 | */ | 598 | */ |
596 | ring_type tx_ring; | 599 | ring_type tx_ring; |
597 | unsigned int next_tx, nic_tx; | 600 | unsigned int next_tx, nic_tx; |
598 | struct sk_buff *tx_skbuff[TX_RING]; | 601 | struct sk_buff **tx_skbuff; |
599 | dma_addr_t tx_dma[TX_RING]; | 602 | dma_addr_t *tx_dma; |
600 | unsigned int tx_dma_len[TX_RING]; | 603 | unsigned int *tx_dma_len; |
601 | u32 tx_flags; | 604 | u32 tx_flags; |
605 | int tx_ring_size; | ||
606 | int tx_limit_start; | ||
607 | int tx_limit_stop; | ||
602 | 608 | ||
603 | /* vlan fields */ | 609 | /* vlan fields */ |
604 | struct vlan_group *vlangrp; | 610 | struct vlan_group *vlangrp; |
@@ -704,7 +710,7 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |||
704 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | 710 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); |
705 | } | 711 | } |
706 | if (rxtx_flags & NV_SETUP_TX_RING) { | 712 | if (rxtx_flags & NV_SETUP_TX_RING) { |
707 | writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | 713 | writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); |
708 | } | 714 | } |
709 | } else { | 715 | } else { |
710 | if (rxtx_flags & NV_SETUP_RX_RING) { | 716 | if (rxtx_flags & NV_SETUP_RX_RING) { |
@@ -712,12 +718,37 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |||
712 | writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); | 718 | writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); |
713 | } | 719 | } |
714 | if (rxtx_flags & NV_SETUP_TX_RING) { | 720 | if (rxtx_flags & NV_SETUP_TX_RING) { |
715 | writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); | 721 | writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); |
716 | writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); | 722 | writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); |
717 | } | 723 | } |
718 | } | 724 | } |
719 | } | 725 | } |
720 | 726 | ||
727 | static void free_rings(struct net_device *dev) | ||
728 | { | ||
729 | struct fe_priv *np = get_nvpriv(dev); | ||
730 | |||
731 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
732 | if(np->rx_ring.orig) | ||
733 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), | ||
734 | np->rx_ring.orig, np->ring_addr); | ||
735 | } else { | ||
736 | if (np->rx_ring.ex) | ||
737 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), | ||
738 | np->rx_ring.ex, np->ring_addr); | ||
739 | } | ||
740 | if (np->rx_skbuff) | ||
741 | kfree(np->rx_skbuff); | ||
742 | if (np->rx_dma) | ||
743 | kfree(np->rx_dma); | ||
744 | if (np->tx_skbuff) | ||
745 | kfree(np->tx_skbuff); | ||
746 | if (np->tx_dma) | ||
747 | kfree(np->tx_dma); | ||
748 | if (np->tx_dma_len) | ||
749 | kfree(np->tx_dma_len); | ||
750 | } | ||
751 | |||
721 | static int using_multi_irqs(struct net_device *dev) | 752 | static int using_multi_irqs(struct net_device *dev) |
722 | { | 753 | { |
723 | struct fe_priv *np = get_nvpriv(dev); | 754 | struct fe_priv *np = get_nvpriv(dev); |
@@ -1056,7 +1087,7 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1056 | while (np->cur_rx != refill_rx) { | 1087 | while (np->cur_rx != refill_rx) { |
1057 | struct sk_buff *skb; | 1088 | struct sk_buff *skb; |
1058 | 1089 | ||
1059 | nr = refill_rx % RX_RING; | 1090 | nr = refill_rx % np->rx_ring_size; |
1060 | if (np->rx_skbuff[nr] == NULL) { | 1091 | if (np->rx_skbuff[nr] == NULL) { |
1061 | 1092 | ||
1062 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | 1093 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
@@ -1085,7 +1116,7 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1085 | refill_rx++; | 1116 | refill_rx++; |
1086 | } | 1117 | } |
1087 | np->refill_rx = refill_rx; | 1118 | np->refill_rx = refill_rx; |
1088 | if (np->cur_rx - refill_rx == RX_RING) | 1119 | if (np->cur_rx - refill_rx == np->rx_ring_size) |
1089 | return 1; | 1120 | return 1; |
1090 | return 0; | 1121 | return 0; |
1091 | } | 1122 | } |
@@ -1124,9 +1155,9 @@ static void nv_init_rx(struct net_device *dev) | |||
1124 | struct fe_priv *np = netdev_priv(dev); | 1155 | struct fe_priv *np = netdev_priv(dev); |
1125 | int i; | 1156 | int i; |
1126 | 1157 | ||
1127 | np->cur_rx = RX_RING; | 1158 | np->cur_rx = np->rx_ring_size; |
1128 | np->refill_rx = 0; | 1159 | np->refill_rx = 0; |
1129 | for (i = 0; i < RX_RING; i++) | 1160 | for (i = 0; i < np->rx_ring_size; i++) |
1130 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1161 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1131 | np->rx_ring.orig[i].FlagLen = 0; | 1162 | np->rx_ring.orig[i].FlagLen = 0; |
1132 | else | 1163 | else |
@@ -1139,7 +1170,7 @@ static void nv_init_tx(struct net_device *dev) | |||
1139 | int i; | 1170 | int i; |
1140 | 1171 | ||
1141 | np->next_tx = np->nic_tx = 0; | 1172 | np->next_tx = np->nic_tx = 0; |
1142 | for (i = 0; i < TX_RING; i++) { | 1173 | for (i = 0; i < np->tx_ring_size; i++) { |
1143 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1174 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1144 | np->tx_ring.orig[i].FlagLen = 0; | 1175 | np->tx_ring.orig[i].FlagLen = 0; |
1145 | else | 1176 | else |
@@ -1184,7 +1215,7 @@ static void nv_drain_tx(struct net_device *dev) | |||
1184 | struct fe_priv *np = netdev_priv(dev); | 1215 | struct fe_priv *np = netdev_priv(dev); |
1185 | unsigned int i; | 1216 | unsigned int i; |
1186 | 1217 | ||
1187 | for (i = 0; i < TX_RING; i++) { | 1218 | for (i = 0; i < np->tx_ring_size; i++) { |
1188 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1219 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1189 | np->tx_ring.orig[i].FlagLen = 0; | 1220 | np->tx_ring.orig[i].FlagLen = 0; |
1190 | else | 1221 | else |
@@ -1198,7 +1229,7 @@ static void nv_drain_rx(struct net_device *dev) | |||
1198 | { | 1229 | { |
1199 | struct fe_priv *np = netdev_priv(dev); | 1230 | struct fe_priv *np = netdev_priv(dev); |
1200 | int i; | 1231 | int i; |
1201 | for (i = 0; i < RX_RING; i++) { | 1232 | for (i = 0; i < np->rx_ring_size; i++) { |
1202 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1233 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1203 | np->rx_ring.orig[i].FlagLen = 0; | 1234 | np->rx_ring.orig[i].FlagLen = 0; |
1204 | else | 1235 | else |
@@ -1230,8 +1261,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1230 | u32 tx_flags = 0; | 1261 | u32 tx_flags = 0; |
1231 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); | 1262 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); |
1232 | unsigned int fragments = skb_shinfo(skb)->nr_frags; | 1263 | unsigned int fragments = skb_shinfo(skb)->nr_frags; |
1233 | unsigned int nr = (np->next_tx - 1) % TX_RING; | 1264 | unsigned int nr = (np->next_tx - 1) % np->tx_ring_size; |
1234 | unsigned int start_nr = np->next_tx % TX_RING; | 1265 | unsigned int start_nr = np->next_tx % np->tx_ring_size; |
1235 | unsigned int i; | 1266 | unsigned int i; |
1236 | u32 offset = 0; | 1267 | u32 offset = 0; |
1237 | u32 bcnt; | 1268 | u32 bcnt; |
@@ -1247,7 +1278,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1247 | 1278 | ||
1248 | spin_lock_irq(&np->lock); | 1279 | spin_lock_irq(&np->lock); |
1249 | 1280 | ||
1250 | if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) { | 1281 | if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { |
1251 | spin_unlock_irq(&np->lock); | 1282 | spin_unlock_irq(&np->lock); |
1252 | netif_stop_queue(dev); | 1283 | netif_stop_queue(dev); |
1253 | return NETDEV_TX_BUSY; | 1284 | return NETDEV_TX_BUSY; |
@@ -1256,7 +1287,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1256 | /* setup the header buffer */ | 1287 | /* setup the header buffer */ |
1257 | do { | 1288 | do { |
1258 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | 1289 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
1259 | nr = (nr + 1) % TX_RING; | 1290 | nr = (nr + 1) % np->tx_ring_size; |
1260 | 1291 | ||
1261 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, | 1292 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, |
1262 | PCI_DMA_TODEVICE); | 1293 | PCI_DMA_TODEVICE); |
@@ -1283,7 +1314,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1283 | 1314 | ||
1284 | do { | 1315 | do { |
1285 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | 1316 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
1286 | nr = (nr + 1) % TX_RING; | 1317 | nr = (nr + 1) % np->tx_ring_size; |
1287 | 1318 | ||
1288 | np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, | 1319 | np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, |
1289 | PCI_DMA_TODEVICE); | 1320 | PCI_DMA_TODEVICE); |
@@ -1365,7 +1396,7 @@ static void nv_tx_done(struct net_device *dev) | |||
1365 | struct sk_buff *skb; | 1396 | struct sk_buff *skb; |
1366 | 1397 | ||
1367 | while (np->nic_tx != np->next_tx) { | 1398 | while (np->nic_tx != np->next_tx) { |
1368 | i = np->nic_tx % TX_RING; | 1399 | i = np->nic_tx % np->tx_ring_size; |
1369 | 1400 | ||
1370 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1401 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1371 | Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); | 1402 | Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); |
@@ -1410,7 +1441,7 @@ static void nv_tx_done(struct net_device *dev) | |||
1410 | nv_release_txskb(dev, i); | 1441 | nv_release_txskb(dev, i); |
1411 | np->nic_tx++; | 1442 | np->nic_tx++; |
1412 | } | 1443 | } |
1413 | if (np->next_tx - np->nic_tx < TX_LIMIT_START) | 1444 | if (np->next_tx - np->nic_tx < np->tx_limit_start) |
1414 | netif_wake_queue(dev); | 1445 | netif_wake_queue(dev); |
1415 | } | 1446 | } |
1416 | 1447 | ||
@@ -1447,7 +1478,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1447 | readl(base + i + 24), readl(base + i + 28)); | 1478 | readl(base + i + 24), readl(base + i + 28)); |
1448 | } | 1479 | } |
1449 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); | 1480 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); |
1450 | for (i=0;i<TX_RING;i+= 4) { | 1481 | for (i=0;i<np->tx_ring_size;i+= 4) { |
1451 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1482 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1452 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | 1483 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", |
1453 | i, | 1484 | i, |
@@ -1563,10 +1594,10 @@ static void nv_rx_process(struct net_device *dev) | |||
1563 | struct sk_buff *skb; | 1594 | struct sk_buff *skb; |
1564 | int len; | 1595 | int len; |
1565 | int i; | 1596 | int i; |
1566 | if (np->cur_rx - np->refill_rx >= RX_RING) | 1597 | if (np->cur_rx - np->refill_rx >= np->rx_ring_size) |
1567 | break; /* we scanned the whole ring - do not continue */ | 1598 | break; /* we scanned the whole ring - do not continue */ |
1568 | 1599 | ||
1569 | i = np->cur_rx % RX_RING; | 1600 | i = np->cur_rx % np->rx_ring_size; |
1570 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1601 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1571 | Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); | 1602 | Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); |
1572 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); | 1603 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); |
@@ -1755,18 +1786,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
1755 | nv_drain_rx(dev); | 1786 | nv_drain_rx(dev); |
1756 | nv_drain_tx(dev); | 1787 | nv_drain_tx(dev); |
1757 | /* reinit driver view of the rx queue */ | 1788 | /* reinit driver view of the rx queue */ |
1758 | nv_init_rx(dev); | ||
1759 | nv_init_tx(dev); | ||
1760 | /* alloc new rx buffers */ | ||
1761 | set_bufsize(dev); | 1789 | set_bufsize(dev); |
1762 | if (nv_alloc_rx(dev)) { | 1790 | if (nv_init_ring(dev)) { |
1763 | if (!np->in_shutdown) | 1791 | if (!np->in_shutdown) |
1764 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 1792 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
1765 | } | 1793 | } |
1766 | /* reinit nic view of the rx queue */ | 1794 | /* reinit nic view of the rx queue */ |
1767 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | 1795 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
1768 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | 1796 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
1769 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 1797 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
1770 | base + NvRegRingSizes); | 1798 | base + NvRegRingSizes); |
1771 | pci_push(base); | 1799 | pci_push(base); |
1772 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | 1800 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
@@ -2685,6 +2713,149 @@ static int nv_set_tso(struct net_device *dev, u32 value) | |||
2685 | return -EOPNOTSUPP; | 2713 | return -EOPNOTSUPP; |
2686 | } | 2714 | } |
2687 | 2715 | ||
2716 | static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) | ||
2717 | { | ||
2718 | struct fe_priv *np = netdev_priv(dev); | ||
2719 | |||
2720 | ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; | ||
2721 | ring->rx_mini_max_pending = 0; | ||
2722 | ring->rx_jumbo_max_pending = 0; | ||
2723 | ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; | ||
2724 | |||
2725 | ring->rx_pending = np->rx_ring_size; | ||
2726 | ring->rx_mini_pending = 0; | ||
2727 | ring->rx_jumbo_pending = 0; | ||
2728 | ring->tx_pending = np->tx_ring_size; | ||
2729 | } | ||
2730 | |||
2731 | static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) | ||
2732 | { | ||
2733 | struct fe_priv *np = netdev_priv(dev); | ||
2734 | u8 __iomem *base = get_hwbase(dev); | ||
2735 | u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; | ||
2736 | dma_addr_t ring_addr; | ||
2737 | |||
2738 | if (ring->rx_pending < RX_RING_MIN || | ||
2739 | ring->tx_pending < TX_RING_MIN || | ||
2740 | ring->rx_mini_pending != 0 || | ||
2741 | ring->rx_jumbo_pending != 0 || | ||
2742 | (np->desc_ver == DESC_VER_1 && | ||
2743 | (ring->rx_pending > RING_MAX_DESC_VER_1 || | ||
2744 | ring->tx_pending > RING_MAX_DESC_VER_1)) || | ||
2745 | (np->desc_ver != DESC_VER_1 && | ||
2746 | (ring->rx_pending > RING_MAX_DESC_VER_2_3 || | ||
2747 | ring->tx_pending > RING_MAX_DESC_VER_2_3))) { | ||
2748 | return -EINVAL; | ||
2749 | } | ||
2750 | |||
2751 | /* allocate new rings */ | ||
2752 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
2753 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | ||
2754 | sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | ||
2755 | &ring_addr); | ||
2756 | } else { | ||
2757 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | ||
2758 | sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | ||
2759 | &ring_addr); | ||
2760 | } | ||
2761 | rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); | ||
2762 | rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); | ||
2763 | tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); | ||
2764 | tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL); | ||
2765 | tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL); | ||
2766 | if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { | ||
2767 | /* fall back to old rings */ | ||
2768 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
2769 | if(rxtx_ring) | ||
2770 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | ||
2771 | rxtx_ring, ring_addr); | ||
2772 | } else { | ||
2773 | if (rxtx_ring) | ||
2774 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | ||
2775 | rxtx_ring, ring_addr); | ||
2776 | } | ||
2777 | if (rx_skbuff) | ||
2778 | kfree(rx_skbuff); | ||
2779 | if (rx_dma) | ||
2780 | kfree(rx_dma); | ||
2781 | if (tx_skbuff) | ||
2782 | kfree(tx_skbuff); | ||
2783 | if (tx_dma) | ||
2784 | kfree(tx_dma); | ||
2785 | if (tx_dma_len) | ||
2786 | kfree(tx_dma_len); | ||
2787 | goto exit; | ||
2788 | } | ||
2789 | |||
2790 | if (netif_running(dev)) { | ||
2791 | nv_disable_irq(dev); | ||
2792 | spin_lock_bh(&dev->xmit_lock); | ||
2793 | spin_lock(&np->lock); | ||
2794 | /* stop engines */ | ||
2795 | nv_stop_rx(dev); | ||
2796 | nv_stop_tx(dev); | ||
2797 | nv_txrx_reset(dev); | ||
2798 | /* drain queues */ | ||
2799 | nv_drain_rx(dev); | ||
2800 | nv_drain_tx(dev); | ||
2801 | /* delete queues */ | ||
2802 | free_rings(dev); | ||
2803 | } | ||
2804 | |||
2805 | /* set new values */ | ||
2806 | np->rx_ring_size = ring->rx_pending; | ||
2807 | np->tx_ring_size = ring->tx_pending; | ||
2808 | np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE; | ||
2809 | np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1; | ||
2810 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
2811 | np->rx_ring.orig = (struct ring_desc*)rxtx_ring; | ||
2812 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; | ||
2813 | } else { | ||
2814 | np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; | ||
2815 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; | ||
2816 | } | ||
2817 | np->rx_skbuff = (struct sk_buff**)rx_skbuff; | ||
2818 | np->rx_dma = (dma_addr_t*)rx_dma; | ||
2819 | np->tx_skbuff = (struct sk_buff**)tx_skbuff; | ||
2820 | np->tx_dma = (dma_addr_t*)tx_dma; | ||
2821 | np->tx_dma_len = (unsigned int*)tx_dma_len; | ||
2822 | np->ring_addr = ring_addr; | ||
2823 | |||
2824 | memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); | ||
2825 | memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); | ||
2826 | memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); | ||
2827 | memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); | ||
2828 | memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); | ||
2829 | |||
2830 | if (netif_running(dev)) { | ||
2831 | /* reinit driver view of the queues */ | ||
2832 | set_bufsize(dev); | ||
2833 | if (nv_init_ring(dev)) { | ||
2834 | if (!np->in_shutdown) | ||
2835 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2836 | } | ||
2837 | |||
2838 | /* reinit nic view of the queues */ | ||
2839 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | ||
2840 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | ||
2841 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | ||
2842 | base + NvRegRingSizes); | ||
2843 | pci_push(base); | ||
2844 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
2845 | pci_push(base); | ||
2846 | |||
2847 | /* restart engines */ | ||
2848 | nv_start_rx(dev); | ||
2849 | nv_start_tx(dev); | ||
2850 | spin_unlock(&np->lock); | ||
2851 | spin_unlock_bh(&dev->xmit_lock); | ||
2852 | nv_enable_irq(dev); | ||
2853 | } | ||
2854 | return 0; | ||
2855 | exit: | ||
2856 | return -ENOMEM; | ||
2857 | } | ||
2858 | |||
2688 | static struct ethtool_ops ops = { | 2859 | static struct ethtool_ops ops = { |
2689 | .get_drvinfo = nv_get_drvinfo, | 2860 | .get_drvinfo = nv_get_drvinfo, |
2690 | .get_link = ethtool_op_get_link, | 2861 | .get_link = ethtool_op_get_link, |
@@ -2698,6 +2869,8 @@ static struct ethtool_ops ops = { | |||
2698 | .get_perm_addr = ethtool_op_get_perm_addr, | 2869 | .get_perm_addr = ethtool_op_get_perm_addr, |
2699 | .get_tso = ethtool_op_get_tso, | 2870 | .get_tso = ethtool_op_get_tso, |
2700 | .set_tso = nv_set_tso, | 2871 | .set_tso = nv_set_tso, |
2872 | .get_ringparam = nv_get_ringparam, | ||
2873 | .set_ringparam = nv_set_ringparam, | ||
2701 | }; | 2874 | }; |
2702 | 2875 | ||
2703 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | 2876 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) |
@@ -2904,7 +3077,7 @@ static int nv_open(struct net_device *dev) | |||
2904 | 3077 | ||
2905 | /* 4) give hw rings */ | 3078 | /* 4) give hw rings */ |
2906 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | 3079 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
2907 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | 3080 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
2908 | base + NvRegRingSizes); | 3081 | base + NvRegRingSizes); |
2909 | 3082 | ||
2910 | /* 5) continue setup */ | 3083 | /* 5) continue setup */ |
@@ -3187,21 +3360,38 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3187 | 3360 | ||
3188 | dev->irq = pci_dev->irq; | 3361 | dev->irq = pci_dev->irq; |
3189 | 3362 | ||
3363 | np->rx_ring_size = RX_RING_DEFAULT; | ||
3364 | np->tx_ring_size = TX_RING_DEFAULT; | ||
3365 | np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE; | ||
3366 | np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1; | ||
3367 | |||
3190 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 3368 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
3191 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, | 3369 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, |
3192 | sizeof(struct ring_desc) * (RX_RING + TX_RING), | 3370 | sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), |
3193 | &np->ring_addr); | 3371 | &np->ring_addr); |
3194 | if (!np->rx_ring.orig) | 3372 | if (!np->rx_ring.orig) |
3195 | goto out_unmap; | 3373 | goto out_unmap; |
3196 | np->tx_ring.orig = &np->rx_ring.orig[RX_RING]; | 3374 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; |
3197 | } else { | 3375 | } else { |
3198 | np->rx_ring.ex = pci_alloc_consistent(pci_dev, | 3376 | np->rx_ring.ex = pci_alloc_consistent(pci_dev, |
3199 | sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), | 3377 | sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), |
3200 | &np->ring_addr); | 3378 | &np->ring_addr); |
3201 | if (!np->rx_ring.ex) | 3379 | if (!np->rx_ring.ex) |
3202 | goto out_unmap; | 3380 | goto out_unmap; |
3203 | np->tx_ring.ex = &np->rx_ring.ex[RX_RING]; | 3381 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; |
3204 | } | 3382 | } |
3383 | np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); | ||
3384 | np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); | ||
3385 | np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); | ||
3386 | np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL); | ||
3387 | np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL); | ||
3388 | if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len) | ||
3389 | goto out_freering; | ||
3390 | memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); | ||
3391 | memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); | ||
3392 | memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); | ||
3393 | memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); | ||
3394 | memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); | ||
3205 | 3395 | ||
3206 | dev->open = nv_open; | 3396 | dev->open = nv_open; |
3207 | dev->stop = nv_close; | 3397 | dev->stop = nv_close; |
@@ -3323,7 +3513,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3323 | if (i == 33) { | 3513 | if (i == 33) { |
3324 | printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", | 3514 | printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", |
3325 | pci_name(pci_dev)); | 3515 | pci_name(pci_dev)); |
3326 | goto out_freering; | 3516 | goto out_error; |
3327 | } | 3517 | } |
3328 | 3518 | ||
3329 | /* reset it */ | 3519 | /* reset it */ |
@@ -3337,7 +3527,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3337 | err = register_netdev(dev); | 3527 | err = register_netdev(dev); |
3338 | if (err) { | 3528 | if (err) { |
3339 | printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); | 3529 | printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); |
3340 | goto out_freering; | 3530 | goto out_error; |
3341 | } | 3531 | } |
3342 | printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", | 3532 | printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", |
3343 | dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, | 3533 | dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, |
@@ -3345,14 +3535,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3345 | 3535 | ||
3346 | return 0; | 3536 | return 0; |
3347 | 3537 | ||
3348 | out_freering: | 3538 | out_error: |
3349 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | ||
3350 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), | ||
3351 | np->rx_ring.orig, np->ring_addr); | ||
3352 | else | ||
3353 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), | ||
3354 | np->rx_ring.ex, np->ring_addr); | ||
3355 | pci_set_drvdata(pci_dev, NULL); | 3539 | pci_set_drvdata(pci_dev, NULL); |
3540 | out_freering: | ||
3541 | free_rings(dev); | ||
3356 | out_unmap: | 3542 | out_unmap: |
3357 | iounmap(get_hwbase(dev)); | 3543 | iounmap(get_hwbase(dev)); |
3358 | out_relreg: | 3544 | out_relreg: |
@@ -3368,15 +3554,11 @@ out: | |||
3368 | static void __devexit nv_remove(struct pci_dev *pci_dev) | 3554 | static void __devexit nv_remove(struct pci_dev *pci_dev) |
3369 | { | 3555 | { |
3370 | struct net_device *dev = pci_get_drvdata(pci_dev); | 3556 | struct net_device *dev = pci_get_drvdata(pci_dev); |
3371 | struct fe_priv *np = netdev_priv(dev); | ||
3372 | 3557 | ||
3373 | unregister_netdev(dev); | 3558 | unregister_netdev(dev); |
3374 | 3559 | ||
3375 | /* free all structures */ | 3560 | /* free all structures */ |
3376 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 3561 | free_rings(dev); |
3377 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr); | ||
3378 | else | ||
3379 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr); | ||
3380 | iounmap(get_hwbase(dev)); | 3562 | iounmap(get_hwbase(dev)); |
3381 | pci_release_regions(pci_dev); | 3563 | pci_release_regions(pci_dev); |
3382 | pci_disable_device(pci_dev); | 3564 | pci_disable_device(pci_dev); |