diff options
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 125 |
1 files changed, 107 insertions, 18 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 64f0f697c958..91f09e583cea 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -85,6 +85,7 @@ | |||
85 | * 0.33: 16 May 2005: Support for MCP51 added. | 85 | * 0.33: 16 May 2005: Support for MCP51 added. |
86 | * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. | 86 | * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. |
87 | * 0.35: 26 Jun 2005: Support for MCP55 added. | 87 | * 0.35: 26 Jun 2005: Support for MCP55 added. |
88 | * 0.36: 28 Jul 2005: Add jumbo frame support. | ||
88 | * | 89 | * |
89 | * Known bugs: | 90 | * Known bugs: |
90 | * We suspect that on some hardware no TX done interrupts are generated. | 91 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -96,7 +97,7 @@ | |||
96 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 97 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
97 | * superfluous timer interrupts from the nic. | 98 | * superfluous timer interrupts from the nic. |
98 | */ | 99 | */ |
99 | #define FORCEDETH_VERSION "0.35" | 100 | #define FORCEDETH_VERSION "0.36" |
100 | #define DRV_NAME "forcedeth" | 101 | #define DRV_NAME "forcedeth" |
101 | 102 | ||
102 | #include <linux/module.h> | 103 | #include <linux/module.h> |
@@ -379,9 +380,13 @@ struct ring_desc { | |||
379 | #define TX_LIMIT_START 62 | 380 | #define TX_LIMIT_START 62 |
380 | 381 | ||
381 | /* rx/tx mac addr + type + vlan + align + slack*/ | 382 | /* rx/tx mac addr + type + vlan + align + slack*/ |
382 | #define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64) | 383 | #define NV_RX_HEADERS (64) |
383 | /* even more slack */ | 384 | /* even more slack. */ |
384 | #define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128) | 385 | #define NV_RX_ALLOC_PAD (64) |
386 | |||
387 | /* maximum mtu size */ | ||
388 | #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ | ||
389 | #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ | ||
385 | 390 | ||
386 | #define OOM_REFILL (1+HZ/20) | 391 | #define OOM_REFILL (1+HZ/20) |
387 | #define POLL_WAIT (1+HZ/100) | 392 | #define POLL_WAIT (1+HZ/100) |
@@ -473,6 +478,7 @@ struct fe_priv { | |||
473 | struct sk_buff *rx_skbuff[RX_RING]; | 478 | struct sk_buff *rx_skbuff[RX_RING]; |
474 | dma_addr_t rx_dma[RX_RING]; | 479 | dma_addr_t rx_dma[RX_RING]; |
475 | unsigned int rx_buf_sz; | 480 | unsigned int rx_buf_sz; |
481 | unsigned int pkt_limit; | ||
476 | struct timer_list oom_kick; | 482 | struct timer_list oom_kick; |
477 | struct timer_list nic_poll; | 483 | struct timer_list nic_poll; |
478 | 484 | ||
@@ -792,7 +798,7 @@ static int nv_alloc_rx(struct net_device *dev) | |||
792 | nr = refill_rx % RX_RING; | 798 | nr = refill_rx % RX_RING; |
793 | if (np->rx_skbuff[nr] == NULL) { | 799 | if (np->rx_skbuff[nr] == NULL) { |
794 | 800 | ||
795 | skb = dev_alloc_skb(RX_ALLOC_BUFSIZE); | 801 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
796 | if (!skb) | 802 | if (!skb) |
797 | break; | 803 | break; |
798 | 804 | ||
@@ -805,7 +811,7 @@ static int nv_alloc_rx(struct net_device *dev) | |||
805 | PCI_DMA_FROMDEVICE); | 811 | PCI_DMA_FROMDEVICE); |
806 | np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); | 812 | np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); |
807 | wmb(); | 813 | wmb(); |
808 | np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL); | 814 | np->rx_ring[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); |
809 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", | 815 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", |
810 | dev->name, refill_rx); | 816 | dev->name, refill_rx); |
811 | refill_rx++; | 817 | refill_rx++; |
@@ -831,19 +837,31 @@ static void nv_do_rx_refill(unsigned long data) | |||
831 | enable_irq(dev->irq); | 837 | enable_irq(dev->irq); |
832 | } | 838 | } |
833 | 839 | ||
834 | static int nv_init_ring(struct net_device *dev) | 840 | static void nv_init_rx(struct net_device *dev) |
835 | { | 841 | { |
836 | struct fe_priv *np = get_nvpriv(dev); | 842 | struct fe_priv *np = get_nvpriv(dev); |
837 | int i; | 843 | int i; |
838 | 844 | ||
839 | np->next_tx = np->nic_tx = 0; | ||
840 | for (i = 0; i < TX_RING; i++) | ||
841 | np->tx_ring[i].FlagLen = 0; | ||
842 | |||
843 | np->cur_rx = RX_RING; | 845 | np->cur_rx = RX_RING; |
844 | np->refill_rx = 0; | 846 | np->refill_rx = 0; |
845 | for (i = 0; i < RX_RING; i++) | 847 | for (i = 0; i < RX_RING; i++) |
846 | np->rx_ring[i].FlagLen = 0; | 848 | np->rx_ring[i].FlagLen = 0; |
849 | } | ||
850 | |||
851 | static void nv_init_tx(struct net_device *dev) | ||
852 | { | ||
853 | struct fe_priv *np = get_nvpriv(dev); | ||
854 | int i; | ||
855 | |||
856 | np->next_tx = np->nic_tx = 0; | ||
857 | for (i = 0; i < TX_RING; i++) | ||
858 | np->tx_ring[i].FlagLen = 0; | ||
859 | } | ||
860 | |||
861 | static int nv_init_ring(struct net_device *dev) | ||
862 | { | ||
863 | nv_init_tx(dev); | ||
864 | nv_init_rx(dev); | ||
847 | return nv_alloc_rx(dev); | 865 | return nv_alloc_rx(dev); |
848 | } | 866 | } |
849 | 867 | ||
@@ -1207,15 +1225,82 @@ next_pkt: | |||
1207 | } | 1225 | } |
1208 | } | 1226 | } |
1209 | 1227 | ||
1228 | static void set_bufsize(struct net_device *dev) | ||
1229 | { | ||
1230 | struct fe_priv *np = netdev_priv(dev); | ||
1231 | |||
1232 | if (dev->mtu <= ETH_DATA_LEN) | ||
1233 | np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; | ||
1234 | else | ||
1235 | np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; | ||
1236 | } | ||
1237 | |||
1210 | /* | 1238 | /* |
1211 | * nv_change_mtu: dev->change_mtu function | 1239 | * nv_change_mtu: dev->change_mtu function |
1212 | * Called with dev_base_lock held for read. | 1240 | * Called with dev_base_lock held for read. |
1213 | */ | 1241 | */ |
1214 | static int nv_change_mtu(struct net_device *dev, int new_mtu) | 1242 | static int nv_change_mtu(struct net_device *dev, int new_mtu) |
1215 | { | 1243 | { |
1216 | if (new_mtu > ETH_DATA_LEN) | 1244 | struct fe_priv *np = get_nvpriv(dev); |
1245 | int old_mtu; | ||
1246 | |||
1247 | if (new_mtu < 64 || new_mtu > np->pkt_limit) | ||
1217 | return -EINVAL; | 1248 | return -EINVAL; |
1249 | |||
1250 | old_mtu = dev->mtu; | ||
1218 | dev->mtu = new_mtu; | 1251 | dev->mtu = new_mtu; |
1252 | |||
1253 | /* return early if the buffer sizes will not change */ | ||
1254 | if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) | ||
1255 | return 0; | ||
1256 | if (old_mtu == new_mtu) | ||
1257 | return 0; | ||
1258 | |||
1259 | /* synchronized against open : rtnl_lock() held by caller */ | ||
1260 | if (netif_running(dev)) { | ||
1261 | u8 *base = get_hwbase(dev); | ||
1262 | /* | ||
1263 | * It seems that the nic preloads valid ring entries into an | ||
1264 | * internal buffer. The procedure for flushing everything is | ||
1265 | * guessed, there is probably a simpler approach. | ||
1266 | * Changing the MTU is a rare event, it shouldn't matter. | ||
1267 | */ | ||
1268 | disable_irq(dev->irq); | ||
1269 | spin_lock_bh(&dev->xmit_lock); | ||
1270 | spin_lock(&np->lock); | ||
1271 | /* stop engines */ | ||
1272 | nv_stop_rx(dev); | ||
1273 | nv_stop_tx(dev); | ||
1274 | nv_txrx_reset(dev); | ||
1275 | /* drain rx queue */ | ||
1276 | nv_drain_rx(dev); | ||
1277 | nv_drain_tx(dev); | ||
1278 | /* reinit driver view of the rx queue */ | ||
1279 | nv_init_rx(dev); | ||
1280 | nv_init_tx(dev); | ||
1281 | /* alloc new rx buffers */ | ||
1282 | set_bufsize(dev); | ||
1283 | if (nv_alloc_rx(dev)) { | ||
1284 | if (!np->in_shutdown) | ||
1285 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
1286 | } | ||
1287 | /* reinit nic view of the rx queue */ | ||
1288 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | ||
1289 | writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr); | ||
1290 | writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); | ||
1291 | writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), | ||
1292 | base + NvRegRingSizes); | ||
1293 | pci_push(base); | ||
1294 | writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl); | ||
1295 | pci_push(base); | ||
1296 | |||
1297 | /* restart rx engine */ | ||
1298 | nv_start_rx(dev); | ||
1299 | nv_start_tx(dev); | ||
1300 | spin_unlock(&np->lock); | ||
1301 | spin_unlock_bh(&dev->xmit_lock); | ||
1302 | enable_irq(dev->irq); | ||
1303 | } | ||
1219 | return 0; | 1304 | return 0; |
1220 | } | 1305 | } |
1221 | 1306 | ||
@@ -1792,6 +1877,7 @@ static int nv_open(struct net_device *dev) | |||
1792 | writel(0, base + NvRegAdapterControl); | 1877 | writel(0, base + NvRegAdapterControl); |
1793 | 1878 | ||
1794 | /* 2) initialize descriptor rings */ | 1879 | /* 2) initialize descriptor rings */ |
1880 | set_bufsize(dev); | ||
1795 | oom = nv_init_ring(dev); | 1881 | oom = nv_init_ring(dev); |
1796 | 1882 | ||
1797 | writel(0, base + NvRegLinkSpeed); | 1883 | writel(0, base + NvRegLinkSpeed); |
@@ -1837,7 +1923,7 @@ static int nv_open(struct net_device *dev) | |||
1837 | writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); | 1923 | writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); |
1838 | writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); | 1924 | writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); |
1839 | writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); | 1925 | writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); |
1840 | writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig); | 1926 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
1841 | 1927 | ||
1842 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); | 1928 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); |
1843 | get_random_bytes(&i, sizeof(i)); | 1929 | get_random_bytes(&i, sizeof(i)); |
@@ -2007,13 +2093,16 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
2007 | 2093 | ||
2008 | /* handle different descriptor versions */ | 2094 | /* handle different descriptor versions */ |
2009 | if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 || | 2095 | if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 || |
2010 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 || | 2096 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 || |
2011 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3 || | 2097 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3 || |
2012 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || | 2098 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || |
2013 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) | 2099 | pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) { |
2014 | np->desc_ver = DESC_VER_1; | 2100 | np->desc_ver = DESC_VER_1; |
2015 | else | 2101 | np->pkt_limit = NV_PKTLIMIT_1; |
2102 | } else { | ||
2016 | np->desc_ver = DESC_VER_2; | 2103 | np->desc_ver = DESC_VER_2; |
2104 | np->pkt_limit = NV_PKTLIMIT_2; | ||
2105 | } | ||
2017 | 2106 | ||
2018 | err = -ENOMEM; | 2107 | err = -ENOMEM; |
2019 | np->base = ioremap(addr, NV_PCI_REGSZ); | 2108 | np->base = ioremap(addr, NV_PCI_REGSZ); |