aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMithlesh Thukral <mithlesh@linsyssoft.com>2009-01-19 09:53:22 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2009-04-03 17:53:10 -0400
commitd9d578bff771229c017582d746259ac282ee01e2 (patch)
treed9ee56c1568acc1065d54c5554035521bf0bb6d1
parent371d7a9e6f0486fb814582c46785fdb147d7215e (diff)
Staging: sxg: SXG SGL related cleanup in data structures and code
* Cleanup in allocation of SXG_SGLs. * Locking issues related to SglQLock. * XmtCmd and XmtZeroLock consistency fixes. Signed-off-by: LinSysSoft Sahara Team <saharaproj@linsyssoft.com> Signed-off-by: Christopher Harrer <charrer@alacritech.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/sxg/sxg.c365
-rw-r--r--drivers/staging/sxg/sxg.h37
-rw-r--r--drivers/staging/sxg/sxg_ethtool.c2
-rw-r--r--drivers/staging/sxg/sxghif.h14
4 files changed, 260 insertions, 158 deletions
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c
index 80e84768da12..b8e0e2b7360a 100644
--- a/drivers/staging/sxg/sxg.c
+++ b/drivers/staging/sxg/sxg.c
@@ -95,13 +95,13 @@ static int sxg_entry_halt(struct net_device *dev);
95static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 95static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
96static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev); 96static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
97static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb); 97static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
98static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, 98static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
99 struct sxg_scatter_gather *SxgSgl); 99 struct sxg_scatter_gather *SxgSgl);
100 100
101static void sxg_handle_interrupt(struct adapter_t *adapter); 101static void sxg_handle_interrupt(struct adapter_t *adapter);
102static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId); 102static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
103static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId); 103static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId);
104static void sxg_complete_slow_send(struct adapter_t *adapter); 104static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context);
105static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter, 105static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
106 struct sxg_event *Event); 106 struct sxg_event *Event);
107static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus); 107static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
@@ -112,8 +112,12 @@ static bool sxg_mac_filter(struct adapter_t *adapter,
112static struct net_device_stats *sxg_get_stats(struct net_device *dev); 112static struct net_device_stats *sxg_get_stats(struct net_device *dev);
113#endif 113#endif
114 114
115void SxgFreeResources(struct adapter_t *adapter); 115void sxg_free_resources(struct adapter_t *adapter);
116void SxgFreeRcvBlocks(struct adapter_t *adapter); 116void sxg_free_rcvblocks(struct adapter_t *adapter);
117void sxg_free_sgl_buffers(struct adapter_t *adapter);
118void sxg_unmap_resources(struct adapter_t *adapter);
119void sxg_free_mcast_addrs(struct adapter_t *adapter);
120void sxg_collect_statistics(struct adapter_t *adapter);
117 121
118#define XXXTODO 0 122#define XXXTODO 0
119 123
@@ -505,6 +509,12 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
505 goto per_tcb_allocation_failed; 509 goto per_tcb_allocation_failed;
506 } 510 }
507 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1); 511 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
512 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
513 adapter->pucode_stats = pci_map_single(adapter->pcidev,
514 adapter->ucode_stats,
515 sizeof(struct sxg_ucode_stats),
516 PCI_DMA_FROMDEVICE);
517// memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
508 break; 518 break;
509 519
510 per_tcb_allocation_failed: 520 per_tcb_allocation_failed:
@@ -524,6 +534,13 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
524 adapter->RcvRings = NULL; 534 adapter->RcvRings = NULL;
525 } 535 }
526 /* Loop around and try again.... */ 536 /* Loop around and try again.... */
537 if (adapter->ucode_stats) {
538 pci_unmap_single(adapter->pcidev,
539 sizeof(struct sxg_ucode_stats),
540 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
541 adapter->ucode_stats = NULL;
542 }
543
527 } 544 }
528 545
529 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__); 546 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
@@ -1213,7 +1230,7 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
1213 } 1230 }
1214 /* Slowpath send completions */ 1231 /* Slowpath send completions */
1215 if (Isr & SXG_ISR_SPSEND) { 1232 if (Isr & SXG_ISR_SPSEND) {
1216 sxg_complete_slow_send(adapter); 1233 sxg_complete_slow_send(adapter, 1);
1217 } 1234 }
1218 /* Dump */ 1235 /* Dump */
1219 if (Isr & SXG_ISR_UPC) { 1236 if (Isr & SXG_ISR_UPC) {
@@ -1400,27 +1417,37 @@ static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
1400 * 1417 *
1401 * Arguments - 1418 * Arguments -
1402 * adapter - A pointer to our adapter structure 1419 * adapter - A pointer to our adapter structure
1403 1420 * irq_context - An integer to denote if we are in interrupt context
1404 * Return 1421 * Return
1405 * None 1422 * None
1406 */ 1423 */
1407static void sxg_complete_slow_send(struct adapter_t *adapter) 1424static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context)
1408{ 1425{
1409 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0]; 1426 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1410 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo; 1427 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
1411 u32 *ContextType; 1428 u32 *ContextType;
1412 struct sxg_cmd *XmtCmd; 1429 struct sxg_cmd *XmtCmd;
1430 unsigned long flags;
1431 unsigned long sgl_flags;
1432 unsigned int processed_count = 0;
1413 1433
1414 /* 1434 /*
1415 * NOTE - This lock is dropped and regrabbed in this loop. 1435 * NOTE - This lock is dropped and regrabbed in this loop.
1416 * This means two different processors can both be running/ 1436 * This means two different processors can both be running/
1417 * through this loop. Be *very* careful. 1437 * through this loop. Be *very* careful.
1418 */ 1438 */
1419 spin_lock(&adapter->XmtZeroLock); 1439 if(irq_context) {
1440 if(!spin_trylock(&adapter->XmtZeroLock))
1441 goto lock_busy;
1442 }
1443 else
1444 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1445
1420 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds", 1446 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1421 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 1447 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1422 1448
1423 while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) { 1449 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1450 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
1424 /* 1451 /*
1425 * Locate the current Cmd (ring descriptor entry), and 1452 * Locate the current Cmd (ring descriptor entry), and
1426 * associated SGL, and advance the tail 1453 * associated SGL, and advance the tail
@@ -1438,10 +1465,14 @@ static void sxg_complete_slow_send(struct adapter_t *adapter)
1438 struct sk_buff *skb; 1465 struct sk_buff *skb;
1439 struct sxg_scatter_gather *SxgSgl = 1466 struct sxg_scatter_gather *SxgSgl =
1440 (struct sxg_scatter_gather *)ContextType; 1467 (struct sxg_scatter_gather *)ContextType;
1468 dma64_addr_t FirstSgeAddress;
1469 u32 FirstSgeLength;
1441 1470
1442 /* Dumb-nic send. Command context is the dumb-nic SGL */ 1471 /* Dumb-nic send. Command context is the dumb-nic SGL */
1443 skb = (struct sk_buff *)ContextType; 1472 skb = (struct sk_buff *)ContextType;
1444 skb = SxgSgl->DumbPacket; 1473 skb = SxgSgl->DumbPacket;
1474 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1475 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
1445 /* Complete the send */ 1476 /* Complete the send */
1446 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, 1477 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1447 TRACE_IMPORTANT, "DmSndCmp", skb, 0, 1478 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
@@ -1456,17 +1487,36 @@ static void sxg_complete_slow_send(struct adapter_t *adapter)
1456 * chimney send, which results in a double trip 1487 * chimney send, which results in a double trip
1457 * in SxgTcpOuput 1488 * in SxgTcpOuput
1458 */ 1489 */
1459 spin_unlock(&adapter->XmtZeroLock); 1490 if(irq_context)
1460 SXG_COMPLETE_DUMB_SEND(adapter, skb); 1491 spin_unlock(&adapter->XmtZeroLock);
1492 else
1493 spin_unlock_irqrestore(
1494 &adapter->XmtZeroLock, flags);
1495
1496 SxgSgl->DumbPacket = NULL;
1497 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1498 FirstSgeAddress,
1499 FirstSgeLength);
1500 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL,
1501 irq_context);
1461 /* and reacquire.. */ 1502 /* and reacquire.. */
1462 spin_lock(&adapter->XmtZeroLock); 1503 if(irq_context) {
1504 if(!spin_trylock(&adapter->XmtZeroLock))
1505 goto lock_busy;
1506 }
1507 else
1508 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1463 } 1509 }
1464 break; 1510 break;
1465 default: 1511 default:
1466 ASSERT(0); 1512 ASSERT(0);
1467 } 1513 }
1468 } 1514 }
1469 spin_unlock(&adapter->XmtZeroLock); 1515 if(irq_context)
1516 spin_unlock(&adapter->XmtZeroLock);
1517 else
1518 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
1519lock_busy:
1470 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd", 1520 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1471 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 1521 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1472} 1522}
@@ -1486,8 +1536,14 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1486 u32 BufferSize = adapter->ReceiveBufferSize; 1536 u32 BufferSize = adapter->ReceiveBufferSize;
1487 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr; 1537 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1488 struct sk_buff *Packet; 1538 struct sk_buff *Packet;
1539 static int read_counter = 0;
1489 1540
1490 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle; 1541 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
1542 if(read_counter++ & 0x100)
1543 {
1544 sxg_collect_statistics(adapter);
1545 read_counter = 0;
1546 }
1491 ASSERT(RcvDataBufferHdr); 1547 ASSERT(RcvDataBufferHdr);
1492 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD); 1548 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
1493 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event, 1549 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
@@ -1560,12 +1616,13 @@ static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1560 RcvDataBufferHdr, Packet, Event->Length, 0); 1616 RcvDataBufferHdr, Packet, Event->Length, 0);
1561 /* Lastly adjust the receive packet length. */ 1617 /* Lastly adjust the receive packet length. */
1562 RcvDataBufferHdr->SxgDumbRcvPacket = NULL; 1618 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
1619 RcvDataBufferHdr->PhysicalAddress = NULL;
1563 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize); 1620 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1564 if (RcvDataBufferHdr->skb) 1621 if (RcvDataBufferHdr->skb)
1565 { 1622 {
1566 spin_lock(&adapter->RcvQLock); 1623 spin_lock(&adapter->RcvQLock);
1567 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); 1624 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1568 adapter->RcvBuffersOnCard ++; 1625 // adapter->RcvBuffersOnCard ++;
1569 spin_unlock(&adapter->RcvQLock); 1626 spin_unlock(&adapter->RcvQLock);
1570 } 1627 }
1571 return (Packet); 1628 return (Packet);
@@ -1911,20 +1968,17 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
1911 u32 mmio_start = 0; 1968 u32 mmio_start = 0;
1912 unsigned int mmio_len = 0; 1969 unsigned int mmio_len = 0;
1913 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); 1970 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1914 1971/*
1915 set_bit(ADAPT_DOWN, &adapter->state); 1972 set_bit(ADAPT_DOWN, &adapter->state);
1916 flush_scheduled_work(); 1973*/ flush_scheduled_work();
1917 1974
1918 /* Deallocate Resources */ 1975 /* Deallocate Resources */
1919 1976 unregister_netdev(dev);
1920 SxgFreeResources(adapter); 1977 sxg_free_resources(adapter);
1921 1978
1922 ASSERT(adapter); 1979 ASSERT(adapter);
1923 DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __func__, dev, 1980 DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
1924 adapter); 1981 adapter);
1925 sxg_deregister_interrupt(adapter);
1926 sxg_unmap_mmio_space(adapter);
1927 DBG_ERROR("sxg: %s unregister_netdev\n", __func__);
1928 1982
1929 mmio_start = pci_resource_start(pcidev, 0); 1983 mmio_start = pci_resource_start(pcidev, 0);
1930 mmio_len = pci_resource_len(pcidev, 0); 1984 mmio_len = pci_resource_len(pcidev, 0);
@@ -1933,11 +1987,6 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
1933 mmio_start, mmio_len); 1987 mmio_start, mmio_len);
1934 release_mem_region(mmio_start, mmio_len); 1988 release_mem_region(mmio_start, mmio_len);
1935 1989
1936/*
1937 DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __func__,
1938 (unsigned int)dev->base_addr);
1939 iounmap((char *)dev->base_addr);
1940*/
1941 mmio_start = pci_resource_start(pcidev, 2); 1990 mmio_start = pci_resource_start(pcidev, 2);
1942 mmio_len = pci_resource_len(pcidev, 2); 1991 mmio_len = pci_resource_len(pcidev, 2);
1943 1992
@@ -1945,10 +1994,6 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
1945 mmio_start, mmio_len); 1994 mmio_start, mmio_len);
1946 release_mem_region(mmio_start, mmio_len); 1995 release_mem_region(mmio_start, mmio_len);
1947 1996
1948 iounmap((char *)dev->base_addr);
1949 unregister_netdev(dev);
1950 //pci_release_regions(pcidev);
1951 //free_netdev(dev);
1952 pci_disable_device(pcidev); 1997 pci_disable_device(pcidev);
1953 1998
1954 DBG_ERROR("sxg: %s deallocate device\n", __func__); 1999 DBG_ERROR("sxg: %s deallocate device\n", __func__);
@@ -1978,6 +2023,7 @@ static int sxg_entry_halt(struct net_device *dev)
1978 2023
1979 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); 2024 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1980 2025
2026 sxg_deregister_interrupt(adapter);
1981 return (STATUS_SUCCESS); 2027 return (STATUS_SUCCESS);
1982} 2028}
1983 2029
@@ -2076,13 +2122,14 @@ static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
2076#else 2122#else
2077 SXG_DROP_DUMB_SEND(adapter, skb); 2123 SXG_DROP_DUMB_SEND(adapter, skb);
2078 adapter->stats.tx_dropped++; 2124 adapter->stats.tx_dropped++;
2125 return NETDEV_TX_BUSY;
2079#endif 2126#endif
2080 } 2127 }
2081 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__, 2128 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
2082 status); 2129 status);
2083 2130
2084 xmit_done: 2131 xmit_done:
2085 return 0; 2132 return NETDEV_TX_OK;
2086} 2133}
2087 2134
2088/* 2135/*
@@ -2100,6 +2147,7 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
2100{ 2147{
2101 struct sxg_x64_sgl *pSgl; 2148 struct sxg_x64_sgl *pSgl;
2102 struct sxg_scatter_gather *SxgSgl; 2149 struct sxg_scatter_gather *SxgSgl;
2150 unsigned long sgl_flags;
2103 /* void *SglBuffer; */ 2151 /* void *SglBuffer; */
2104 /* u32 SglBufferLength; */ 2152 /* u32 SglBufferLength; */
2105 2153
@@ -2111,7 +2159,7 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
2111 adapter, skb, 0, 0); 2159 adapter, skb, 0, 0);
2112 2160
2113 /* Allocate a SGL buffer */ 2161 /* Allocate a SGL buffer */
2114 SXG_GET_SGL_BUFFER(adapter, SxgSgl); 2162 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
2115 if (!SxgSgl) { 2163 if (!SxgSgl) {
2116 adapter->Stats.NoSglBuf++; 2164 adapter->Stats.NoSglBuf++;
2117 adapter->Stats.XmtErrors++; 2165 adapter->Stats.XmtErrors++;
@@ -2129,9 +2177,7 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
2129 pSgl = NULL; 2177 pSgl = NULL;
2130 2178
2131 /* Call the common sxg_dumb_sgl routine to complete the send. */ 2179 /* Call the common sxg_dumb_sgl routine to complete the send. */
2132 sxg_dumb_sgl(pSgl, SxgSgl); 2180 return (sxg_dumb_sgl(pSgl, SxgSgl));
2133 /* Return success sxg_dumb_sgl (or something later) will complete it.*/
2134 return (STATUS_SUCCESS);
2135} 2181}
2136 2182
2137/* 2183/*
@@ -2142,9 +2188,9 @@ static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
2142 * SxgSgl - struct sxg_scatter_gather 2188 * SxgSgl - struct sxg_scatter_gather
2143 * 2189 *
2144 * Return Value: 2190 * Return Value:
2145 * None. 2191 * Status of send operation.
2146 */ 2192 */
2147static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl, 2193static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2148 struct sxg_scatter_gather *SxgSgl) 2194 struct sxg_scatter_gather *SxgSgl)
2149{ 2195{
2150 struct adapter_t *adapter = SxgSgl->adapter; 2196 struct adapter_t *adapter = SxgSgl->adapter;
@@ -2158,6 +2204,7 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2158 /* unsigned int BufLen; */ 2204 /* unsigned int BufLen; */
2159 /* u32 SglOffset; */ 2205 /* u32 SglOffset; */
2160 u64 phys_addr; 2206 u64 phys_addr;
2207 unsigned long flags;
2161 2208
2162 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", 2209 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2163 pSgl, SxgSgl, 0, 0); 2210 pSgl, SxgSgl, 0, 0);
@@ -2179,16 +2226,17 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2179 SxgSgl->Sgl.NumberOfElements = 1; 2226 SxgSgl->Sgl.NumberOfElements = 1;
2180 2227
2181 /* Grab the spinlock and acquire a command */ 2228 /* Grab the spinlock and acquire a command */
2182 spin_lock(&adapter->XmtZeroLock); 2229 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2183 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl); 2230 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2184 if (XmtCmd == NULL) { 2231 if (XmtCmd == NULL) {
2185 /* 2232 /*
2186 * Call sxg_complete_slow_send to see if we can 2233 * Call sxg_complete_slow_send to see if we can
2187 * free up any XmtRingZero entries and then try again 2234 * free up any XmtRingZero entries and then try again
2188 */ 2235 */
2189 spin_unlock(&adapter->XmtZeroLock); 2236
2190 sxg_complete_slow_send(adapter); 2237 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2191 spin_lock(&adapter->XmtZeroLock); 2238 sxg_complete_slow_send(adapter, 0);
2239 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2192 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl); 2240 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2193 if (XmtCmd == NULL) { 2241 if (XmtCmd == NULL) {
2194 adapter->Stats.XmtZeroFull++; 2242 adapter->Stats.XmtZeroFull++;
@@ -2235,10 +2283,10 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2235 */ 2283 */
2236 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE); 2284 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
2237 adapter->Stats.XmtQLen++; /* Stats within lock */ 2285 adapter->Stats.XmtQLen++; /* Stats within lock */
2238 spin_unlock(&adapter->XmtZeroLock); 2286 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2239 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", 2287 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2240 XmtCmd, pSgl, SxgSgl, 0); 2288 XmtCmd, pSgl, SxgSgl, 0);
2241 return; 2289 return STATUS_SUCCESS;
2242 2290
2243 abortcmd: 2291 abortcmd:
2244 /* 2292 /*
@@ -2249,7 +2297,8 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2249 if (XmtCmd) { 2297 if (XmtCmd) {
2250 SXG_ABORT_CMD(XmtRingInfo); 2298 SXG_ABORT_CMD(XmtRingInfo);
2251 } 2299 }
2252 spin_unlock(&adapter->XmtZeroLock); 2300 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2301 return STATUS_FAILURE;
2253 2302
2254/* 2303/*
2255 * failsgl: 2304 * failsgl:
@@ -2260,7 +2309,7 @@ static void sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2260 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal", 2309 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2261 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail); 2310 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2262 /* SxgSgl->DumbPacket is the skb */ 2311 /* SxgSgl->DumbPacket is the skb */
2263 SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); 2312 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
2264} 2313}
2265 2314
2266/* 2315/*
@@ -3065,58 +3114,85 @@ static void sxg_unmap_mmio_space(struct adapter_t *adapter)
3065 */ 3114 */
3066#endif 3115#endif
3067} 3116}
3068/* 3117
3069void SxgFreeRcvBlocks(struct adapter_t *adapter) 3118void sxg_free_sgl_buffers(struct adapter_t *adapter)
3070{ 3119{
3071 u32 i;
3072 struct list_entry *ple; 3120 struct list_entry *ple;
3073 struct sxg_rcv_block_hdr *Hdr; 3121 struct sxg_scatter_gather *Sgl;
3074 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3075 u32 FreeBuffers = 0, FreeBlocks = 0;
3076 3122
3077 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FrRcvBlk", 3123 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
3078 adapter, 0, 0, 0); 3124 ple = RemoveHeadList(&adapter->AllSglBuffers);
3125 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3126 kfree(Sgl);
3127 adapter->AllSglBufferCount--;
3128 }
3129}
3130
3131void sxg_free_rcvblocks(struct adapter_t *adapter)
3132{
3133 u32 i;
3134 void *temp_RcvBlock;
3135 struct list_entry *ple;
3136 struct sxg_rcv_block_hdr *RcvBlockHdr;
3137 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3138 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3139 (adapter->state == SXG_STATE_HALTING));
3140 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3141
3142 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3143 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3144
3145 if(RcvBlockHdr->VirtualAddress) {
3146 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3147
3148 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3149 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3150 RcvDataBufferHdr =
3151 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3152 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3153 }
3154 }
3079 3155
3080 ASSERT((adapter->State == SXG_STATE_INITIALIZING) || 3156 pci_free_consistent(adapter->pcidev,
3081 (pAdapt->State == SXG_STATE_HALTING)); 3157 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3158 RcvBlockHdr->VirtualAddress,
3159 RcvBlockHdr->PhysicalAddress);
3160 adapter->AllRcvBlockCount--;
3161 }
3162 ASSERT(adapter->AllRcvBlockCount == 0);
3163 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3164 adapter, 0, 0, 0);
3165}
3166void sxg_free_mcast_addrs(struct adapter_t *adapter)
3167{
3168 struct sxg_multicast_address *address;
3169 while(adapter->MulticastAddrs) {
3170 address = adapter->MulticastAddrs;
3171 adapter->MulticastAddrs = address->Next;
3172 kfree(address);
3173 }
3174
3175 adapter->MulticastMask= 0;
3176}
3082 3177
3083 for(i = 0; i < SXG_MAX_CPU; i++) { 3178void sxg_unmap_resources(struct adapter_t *adapter)
3084 FreeBuffers += pAdapt->PerCpuResources[i].FreeReceiveBuffers.Count; 3179{
3085 FreeBlocks += pAdapt->PerCpuResources[i].FreeReceiveBlocks.Count; 3180 if(adapter->HwRegs) {
3086 pAdapt->PerCpuResources[i].FreeReceiveBuffers.Count = 0; 3181 iounmap((void *)adapter->HwRegs);
3087 pAdapt->PerCpuResources[i].FreeReceiveBuffers.FreeList = NULL; 3182 }
3088 pAdapt->PerCpuResources[i].FreeReceiveBlocks.Count = 0; 3183 if(adapter->UcodeRegs) {
3089 pAdapt->PerCpuResources[i].FreeReceiveBlocks.FreeList = NULL; 3184 iounmap((void *)adapter->UcodeRegs);
3090 }
3091 FreeBuffers += pAdapt->GlobalResources.FreeReceiveBuffers.Count;
3092 FreeBlocks += pAdapt->GlobalResources.FreeReceiveBlocks.Count;
3093 pAdapt->GlobalResources.FreeReceiveBuffers.Count = 0;
3094 pAdapt->GlobalResources.FreeReceiveBuffers.FreeList = NULL;
3095 pAdapt->GlobalResources.FreeReceiveBlocks.Count = 0;
3096 pAdapt->GlobalResources.FreeReceiveBlocks.FreeList = NULL;
3097 ASSERT(FreeBlocks == pAdapt->AllRcvBlockCount); // See SXG_RCV_BLOCK
3098 ASSERT(FreeBuffers ==
3099 (pAdapt->AllRcvBlockCount * SXG_RCV_DESCRIPTORS_PER_BLOCK)); // See SXG_RCV_BLOCK
3100
3101 while(!(IsListEmpty(&pAdapt->AllRcvBlocks))) {
3102 ple = RemoveHeadList(&pAdapt->AllRcvBlocks);
3103 Hdr = CONTAINING_RECORD(ple, SXG_RCV_BLOCK_HDR, AllList);
3104 NdisMFreeSharedMemory(pAdapt->MiniportHandle,
3105 SXG_RCV_BLOCK_SIZE(pAdapt->ReceiveBufferSize),
3106 TRUE,
3107 Hdr->VirtualAddress,
3108 Hdr->PhysicalAddress);
3109 pAdapt->AllRcvBlockCount--;
3110 } 3185 }
3111 ASSERT(pAdapt->AllRcvBlockCount == 0); 3186
3112 SLIC_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk", 3187 ASSERT(adapter->AllRcvBlockCount == 0);
3113 pAdapt, 0, 0, 0); 3188 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3189 adapter, 0, 0, 0);
3114} 3190}
3115*/ 3191
3116//#if XXXTODO 3192
3117 3193
3118/* 3194/*
3119 * SxgFreeResources - Free everything allocated in SxgAllocateResources 3195 * sxg_free_resources - Free everything allocated in SxgAllocateResources
3120 * 3196 *
3121 * Arguments - 3197 * Arguments -
3122 * adapter - A pointer to our adapter structure 3198 * adapter - A pointer to our adapter structure
@@ -3124,14 +3200,10 @@ void SxgFreeRcvBlocks(struct adapter_t *adapter)
3124 * Return 3200 * Return
3125 * none 3201 * none
3126 */ 3202 */
3127void SxgFreeResources(struct adapter_t *adapter) 3203void sxg_free_resources(struct adapter_t *adapter)
3128{ 3204{
3129 u32 RssIds, IsrCount; 3205 u32 RssIds, IsrCount;
3130 u32 i; 3206 u32 i;
3131/*
3132 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FreeRes",
3133 adapter, adapter->MaxTcbs, 0, 0);
3134*/
3135 RssIds = SXG_RSS_CPU_COUNT(adapter); 3207 RssIds = SXG_RSS_CPU_COUNT(adapter);
3136 IsrCount = adapter->MsiEnabled ? RssIds : 1; 3208 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3137 3209
@@ -3142,14 +3214,13 @@ void SxgFreeResources(struct adapter_t *adapter)
3142 */ 3214 */
3143 return; 3215 return;
3144 } 3216 }
3145/* 3217
3146 if (!(IsListEmpty(&adapter->AllRcvBlocks))) { 3218 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
3147 SxgFreeRcvBlocks(adapter); 3219 sxg_free_rcvblocks(adapter);
3148 } 3220 }
3149 if (!(IsListEmpty(&adapter->AllSglBuffers))) { 3221 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
3150 SxgFreeSglBuffers(adapter); 3222 sxg_free_sgl_buffers(adapter);
3151 } 3223 }
3152*/
3153 3224
3154 if (adapter->XmtRingZeroIndex) { 3225 if (adapter->XmtRingZeroIndex) {
3155 pci_free_consistent(adapter->pcidev, 3226 pci_free_consistent(adapter->pcidev,
@@ -3157,82 +3228,49 @@ void SxgFreeResources(struct adapter_t *adapter)
3157 adapter->XmtRingZeroIndex, 3228 adapter->XmtRingZeroIndex,
3158 adapter->PXmtRingZeroIndex); 3229 adapter->PXmtRingZeroIndex);
3159 } 3230 }
3160 printk("VSS Free Isr\n");
3161 if (adapter->Isr) { 3231 if (adapter->Isr) {
3162 pci_free_consistent(adapter->pcidev, 3232 pci_free_consistent(adapter->pcidev,
3163 sizeof(u32) * IsrCount, 3233 sizeof(u32) * IsrCount,
3164 adapter->Isr, adapter->PIsr); 3234 adapter->Isr, adapter->PIsr);
3165 } 3235 }
3166 3236
3167 printk("VSS Free EventRings\n");
3168 if (adapter->EventRings) { 3237 if (adapter->EventRings) {
3169 pci_free_consistent(adapter->pcidev, 3238 pci_free_consistent(adapter->pcidev,
3170 sizeof(struct sxg_event_ring) * RssIds, 3239 sizeof(struct sxg_event_ring) * RssIds,
3171 adapter->EventRings, adapter->PEventRings); 3240 adapter->EventRings, adapter->PEventRings);
3172 } 3241 }
3173/*
3174 printk("VSS Free RcvRings\n");
3175 if (adapter->RcvRings) { 3242 if (adapter->RcvRings) {
3176 pci_free_consistent(adapter->pcidev, 3243 pci_free_consistent(adapter->pcidev,
3177 sizeof(struct sxg_rcv_ring) * 4096, 3244 sizeof(struct sxg_rcv_ring) * 1,
3178 adapter->RcvRings, 3245 adapter->RcvRings,
3179 adapter->PRcvRings); 3246 adapter->PRcvRings);
3180 adapter->RcvRings = NULL; 3247 adapter->RcvRings = NULL;
3181 } 3248 }
3182 3249
3183 printk("VSS Free XmtRings\n");
3184 if(adapter->XmtRings) { 3250 if(adapter->XmtRings) {
3185 pci_free_consistent(adapter->pcidev, 3251 pci_free_consistent(adapter->pcidev,
3186 sizeof(struct sxg_xmt_ring) * 4096, 3252 sizeof(struct sxg_xmt_ring) * 1,
3187 adapter->XmtRings, 3253 adapter->XmtRings,
3188 adapter->PXmtRings); 3254 adapter->PXmtRings);
3189 adapter->XmtRings = NULL; 3255 adapter->XmtRings = NULL;
3190 } 3256 }
3191 3257
3192*/ 3258 if (adapter->ucode_stats) {
3259 pci_unmap_single(adapter->pcidev,
3260 sizeof(struct sxg_ucode_stats),
3261 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3262 adapter->ucode_stats = NULL;
3263 }
3193 3264
3194/*
3195 3265
3196 SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle);
3197 SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle);
3198*/
3199 /* Unmap register spaces */ 3266 /* Unmap register spaces */
3200 // SxgUnmapResources(adapter); 3267 sxg_unmap_resources(adapter);
3201
3202 /* Deregister DMA */
3203/* if (adapter->DmaHandle) {
3204 SXG_DEREGISTER_DMA(adapter->DmaHandle);
3205 }
3206*/ /* Deregister interrupt */
3207 // SxgDeregisterInterrupt(adapter);
3208
3209 /* Possibly free system info (5.2 only) */
3210 // SXG_RELEASE_SYSTEM_INFO(adapter);
3211 3268
3212 //SxgDiagFreeResources(adapter); 3269 sxg_free_mcast_addrs(adapter);
3213 3270
3214 // SxgFreeMCastAddrs(adapter);
3215/*
3216 if (SXG_TIMER_ALLOCATED(adapter->ResetTimer)) {
3217 SXG_CANCEL_TIMER(adapter->ResetTimer, TimerCancelled);
3218 SXG_FREE_TIMER(adapter->ResetTimer);
3219 }
3220 if (SXG_TIMER_ALLOCATED(adapter->RssTimer)) {
3221 SXG_CANCEL_TIMER(adapter->RssTimer, TimerCancelled);
3222 SXG_FREE_TIMER(adapter->RssTimer);
3223 }
3224 if (SXG_TIMER_ALLOCATED(adapter->OffloadTimer)) {
3225 SXG_CANCEL_TIMER(adapter->OffloadTimer, TimerCancelled);
3226 SXG_FREE_TIMER(adapter->OffloadTimer);
3227 }
3228*/
3229 adapter->BasicAllocations = FALSE; 3271 adapter->BasicAllocations = FALSE;
3230 3272
3231/* SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFreeRes",
3232 adapter, adapter->MaxTcbs, 0, 0);
3233*/
3234} 3273}
3235// #endif
3236 3274
3237/* 3275/*
3238 * sxg_allocate_complete - 3276 * sxg_allocate_complete -
@@ -3311,8 +3349,12 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
3311 ++adapter->AllocationsPending; 3349 ++adapter->AllocationsPending;
3312 spin_unlock(&adapter->AdapterLock); 3350 spin_unlock(&adapter->AdapterLock);
3313 3351
3314 /* At initialization time allocate resources synchronously. */ 3352 if(BufferType != SXG_BUFFER_TYPE_SGL)
3315 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer); 3353 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3354 else {
3355 Buffer = kzalloc(Size, GFP_ATOMIC);
3356 pBuffer = NULL;
3357 }
3316 if (Buffer == NULL) { 3358 if (Buffer == NULL) {
3317 spin_lock(&adapter->AdapterLock); 3359 spin_lock(&adapter->AdapterLock);
3318 /* 3360 /*
@@ -3468,19 +3510,25 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
3468 dma_addr_t PhysicalAddress, 3510 dma_addr_t PhysicalAddress,
3469 u32 Length) 3511 u32 Length)
3470{ 3512{
3513 unsigned long sgl_flags;
3471 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp", 3514 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3472 adapter, SxgSgl, Length, 0); 3515 adapter, SxgSgl, Length, 0);
3473 spin_lock(&adapter->SglQLock); 3516 if(!in_irq())
3517 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
3518 else
3519 spin_unlock(&adapter->SglQLock);
3474 adapter->AllSglBufferCount++; 3520 adapter->AllSglBufferCount++;
3475 memset(SxgSgl, 0, sizeof(struct sxg_scatter_gather)); 3521 /* PhysicalAddress; */
3476 /* *PhysicalAddress; */
3477 SxgSgl->PhysicalAddress = PhysicalAddress; 3522 SxgSgl->PhysicalAddress = PhysicalAddress;
3478 /* Initialize backpointer once */ 3523 /* Initialize backpointer once */
3479 SxgSgl->adapter = adapter; 3524 SxgSgl->adapter = adapter;
3480 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList); 3525 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
3481 spin_unlock(&adapter->SglQLock); 3526 if(!in_irq())
3527 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
3528 else
3529 spin_unlock(&adapter->SglQLock);
3482 SxgSgl->State = SXG_BUFFER_BUSY; 3530 SxgSgl->State = SXG_BUFFER_BUSY;
3483 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL); 3531 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL, in_irq());
3484 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl", 3532 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
3485 adapter, SxgSgl, Length, 0); 3533 adapter, SxgSgl, Length, 0);
3486} 3534}
@@ -3702,6 +3750,15 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter,
3702 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); 3750 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3703 ASSERT(RcvDataBufferHdr); 3751 ASSERT(RcvDataBufferHdr);
3704 ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket); 3752 ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
3753 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
3754 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
3755 adapter->ReceiveBufferSize);
3756 if(RcvDataBufferHdr->skb)
3757 RcvDataBufferHdr->SxgDumbRcvPacket =
3758 RcvDataBufferHdr->skb;
3759 else
3760 goto no_memory;
3761 }
3705 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket); 3762 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
3706 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD; 3763 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
3707 RcvDescriptorBlock->Descriptors[i].VirtualAddress = 3764 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
@@ -3730,6 +3787,8 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter,
3730 adapter, adapter->RcvBuffersOnCard, 3787 adapter, adapter->RcvBuffersOnCard,
3731 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount); 3788 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3732 return (STATUS_SUCCESS); 3789 return (STATUS_SUCCESS);
3790no_memory:
3791 return (-ENOMEM);
3733} 3792}
3734 3793
3735/* 3794/*
@@ -3823,7 +3882,8 @@ static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
3823 /* Now grab the RcvQLock lock and proceed */ 3882 /* Now grab the RcvQLock lock and proceed */
3824 spin_lock(&adapter->RcvQLock); 3883 spin_lock(&adapter->RcvQLock);
3825 ASSERT(Index != RcvRingInfo->Tail); 3884 ASSERT(Index != RcvRingInfo->Tail);
3826 while (RcvRingInfo->Tail != Index) { 3885 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
3886 RcvRingInfo->Tail) > 3) {
3827 /* 3887 /*
3828 * Locate the current Cmd (ring descriptor entry), and 3888 * Locate the current Cmd (ring descriptor entry), and
3829 * associated receive descriptor block, and advance 3889 * associated receive descriptor block, and advance
@@ -3854,6 +3914,15 @@ static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
3854 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail); 3914 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
3855} 3915}
3856 3916
3917/*
3918 * Read the statistics which the card has been maintaining.
3919 */
3920void sxg_collect_statistics(struct adapter_t *adapter)
3921{
3922 if(adapter->ucode_stats)
3923 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats, adapter->pucode_stats, 0);
3924}
3925
3857static struct pci_driver sxg_driver = { 3926static struct pci_driver sxg_driver = {
3858 .name = sxg_driver_name, 3927 .name = sxg_driver_name,
3859 .id_table = sxg_pci_tbl, 3928 .id_table = sxg_pci_tbl,
diff --git a/drivers/staging/sxg/sxg.h b/drivers/staging/sxg/sxg.h
index a00c2dc97a1e..2d0ad1977ccc 100644
--- a/drivers/staging/sxg/sxg.h
+++ b/drivers/staging/sxg/sxg.h
@@ -121,9 +121,10 @@ struct sxg_stats {
121 121
122/* DUMB-NIC Send path definitions */ 122/* DUMB-NIC Send path definitions */
123 123
124#define SXG_COMPLETE_DUMB_SEND(_pAdapt, _skb) { \ 124#define SXG_COMPLETE_DUMB_SEND(_pAdapt, _skb, _phys_addr, _size) { \
125 ASSERT(_skb); \ 125 ASSERT(_skb); \
126 dev_kfree_skb_irq(_skb); \ 126 pci_unmap_single(_pAdapt->pcidev, _size, _phys_addr, PCI_DMA_TODEVICE); \
127 dev_kfree_skb_irq(_skb); \
127} 128}
128 129
129#define SXG_DROP_DUMB_SEND(_pAdapt, _skb) { \ 130#define SXG_DROP_DUMB_SEND(_pAdapt, _skb) { \
@@ -262,14 +263,20 @@ struct sxg_stats {
262} 263}
263 264
264/* SGL macros */ 265/* SGL macros */
265#define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB) { \ 266#define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB, _irq) { \
266 spin_lock(&(_pAdapt)->SglQLock); \ 267 if(!_irq) \
268 spin_lock_irqsave(&(_pAdapt)->SglQLock, sgl_flags); \
269 else \
270 spin_lock(&(_pAdapt)->SglQLock); \
267 (_pAdapt)->FreeSglBufferCount++; \ 271 (_pAdapt)->FreeSglBufferCount++; \
268 ASSERT((_pAdapt)->AllSglBufferCount >= (_pAdapt)->FreeSglBufferCount); \ 272 ASSERT((_pAdapt)->AllSglBufferCount >= (_pAdapt)->FreeSglBufferCount); \
269 ASSERT(!((_Sgl)->State & SXG_BUFFER_FREE)); \ 273 ASSERT(!((_Sgl)->State & SXG_BUFFER_FREE)); \
270 (_Sgl)->State = SXG_BUFFER_FREE; \ 274 (_Sgl)->State = SXG_BUFFER_FREE; \
271 InsertTailList(&(_pAdapt)->FreeSglBuffers, &(_Sgl)->FreeList); \ 275 InsertTailList(&(_pAdapt)->FreeSglBuffers, &(_Sgl)->FreeList); \
272 spin_unlock(&(_pAdapt)->SglQLock); \ 276 if(!_irq) \
277 spin_unlock_irqrestore(&(_pAdapt)->SglQLock, sgl_flags); \
278 else \
279 spin_unlock(&(_pAdapt)->SglQLock); \
273} 280}
274 281
275/* 282/*
@@ -279,7 +286,7 @@ struct sxg_stats {
279 * until after that. We're dealing with round numbers here, so we don't need to, 286 * until after that. We're dealing with round numbers here, so we don't need to,
280 * and not grabbing it avoids a possible double-trip. 287 * and not grabbing it avoids a possible double-trip.
281 */ 288 */
282#define SXG_GET_SGL_BUFFER(_pAdapt, _Sgl) { \ 289#define SXG_GET_SGL_BUFFER(_pAdapt, _Sgl, _irq) { \
283 struct list_entry *_ple; \ 290 struct list_entry *_ple; \
284 if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \ 291 if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \
285 (_pAdapt->AllSglBufferCount < SXG_MAX_SGL_BUFFERS) && \ 292 (_pAdapt->AllSglBufferCount < SXG_MAX_SGL_BUFFERS) && \
@@ -289,7 +296,10 @@ struct sxg_stats {
289 SXG_BUFFER_TYPE_SGL); \ 296 SXG_BUFFER_TYPE_SGL); \
290 } \ 297 } \
291 _Sgl = NULL; \ 298 _Sgl = NULL; \
292 spin_lock(&(_pAdapt)->SglQLock); \ 299 if(!_irq) \
300 spin_lock_irqsave(&(_pAdapt)->SglQLock, sgl_flags); \
301 else \
302 spin_lock(&(_pAdapt)->SglQLock); \
293 if((_pAdapt)->FreeSglBufferCount) { \ 303 if((_pAdapt)->FreeSglBufferCount) { \
294 ASSERT(!(IsListEmpty(&(_pAdapt)->FreeSglBuffers))); \ 304 ASSERT(!(IsListEmpty(&(_pAdapt)->FreeSglBuffers))); \
295 _ple = RemoveHeadList(&(_pAdapt)->FreeSglBuffers); \ 305 _ple = RemoveHeadList(&(_pAdapt)->FreeSglBuffers); \
@@ -300,7 +310,10 @@ struct sxg_stats {
300 (_Sgl)->State = SXG_BUFFER_BUSY; \ 310 (_Sgl)->State = SXG_BUFFER_BUSY; \
301 (_Sgl)->pSgl = NULL; \ 311 (_Sgl)->pSgl = NULL; \
302 } \ 312 } \
303 spin_unlock(&(_pAdapt)->SglQLock); \ 313 if(!_irq) \
314 spin_unlock_irqrestore(&(_pAdapt)->SglQLock, sgl_flags);\
315 else \
316 spin_unlock(&(_pAdapt)->SglQLock); \
304} 317}
305 318
306/* 319/*
@@ -416,6 +429,7 @@ struct sxg_driver {
416#undef STATUS_SUCCESS 429#undef STATUS_SUCCESS
417#endif 430#endif
418 431
432/* TODO: We need to try and use NETDEV_TX_* before posting this out */
419#define STATUS_SUCCESS 0 433#define STATUS_SUCCESS 0
420#define STATUS_PENDING 0 434#define STATUS_PENDING 0
421#define STATUS_FAILURE -1 435#define STATUS_FAILURE -1
@@ -631,6 +645,10 @@ struct adapter_t {
631 645
632 struct sxg_rcv_ring *RcvRings; /* Receive rings */ 646 struct sxg_rcv_ring *RcvRings; /* Receive rings */
633 dma_addr_t PRcvRings; /* Receive rings - physical address */ 647 dma_addr_t PRcvRings; /* Receive rings - physical address */
648 struct sxg_ucode_stats *ucode_stats; /* Ucode Stats */
649 /* Ucode Stats - physical address */
650 dma_addr_t pucode_stats;
651
634 struct sxg_ring_info RcvRingZeroInfo; /* Receive ring 0 info */ 652 struct sxg_ring_info RcvRingZeroInfo; /* Receive ring 0 info */
635 653
636 u32 * Isr; /* Interrupt status register */ 654 u32 * Isr; /* Interrupt status register */
@@ -765,4 +783,5 @@ struct slic_crash_info {
765#define SIOCSLICTRACEDUMP (SIOCDEVPRIVATE+11) 783#define SIOCSLICTRACEDUMP (SIOCDEVPRIVATE+11)
766 784
767extern struct ethtool_ops sxg_nic_ethtool_ops; 785extern struct ethtool_ops sxg_nic_ethtool_ops;
786#define SXG_COMPLETE_SLOW_SEND_LIMIT 128
768#endif /* __SXG_DRIVER_H__ */ 787#endif /* __SXG_DRIVER_H__ */
diff --git a/drivers/staging/sxg/sxg_ethtool.c b/drivers/staging/sxg/sxg_ethtool.c
index c15c250e9ec5..151f7f075b52 100644
--- a/drivers/staging/sxg/sxg_ethtool.c
+++ b/drivers/staging/sxg/sxg_ethtool.c
@@ -137,7 +137,7 @@ sxg_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
137 struct adapter_t *adapter = netdev_priv(dev); 137 struct adapter_t *adapter = netdev_priv(dev);
138 strncpy(drvinfo->driver, sxg_driver_name, 32); 138 strncpy(drvinfo->driver, sxg_driver_name, 32);
139 strncpy(drvinfo->version, SXG_DRV_VERSION, 32); 139 strncpy(drvinfo->version, SXG_DRV_VERSION, 32);
140 strncpy(drvinfo->fw_version, SAHARA_UCODE_VERS_STRING, 32); 140// strncpy(drvinfo->fw_version, SAHARA_UCODE_VERS_STRING, 32);
141 strncpy(drvinfo->bus_info, pci_name(adapter->pcidev), 32); 141 strncpy(drvinfo->bus_info, pci_name(adapter->pcidev), 32);
142 /* TODO : Read the major and minor number of firmware. Is this 142 /* TODO : Read the major and minor number of firmware. Is this
143 * from the FLASH/EEPROM or download file ? 143 * from the FLASH/EEPROM or download file ?
diff --git a/drivers/staging/sxg/sxghif.h b/drivers/staging/sxg/sxghif.h
index 5a9e2712c89a..faba61529dd3 100644
--- a/drivers/staging/sxg/sxghif.h
+++ b/drivers/staging/sxg/sxghif.h
@@ -486,6 +486,20 @@ struct sxg_ring_info {
486 SXG_RING_ADVANCE_TAIL(_ringinfo); \ 486 SXG_RING_ADVANCE_TAIL(_ringinfo); \
487} 487}
488 488
489/*
490 * For a given ring find out how much the first pointer is ahead of
491 * the second pointer. "ahead" recognises the fact that the ring can wrap
492 */
493static inline int sxg_ring_get_forward_diff (struct sxg_ring_info *ringinfo,
494 int a, int b) {
495 if ((a < 0 || a > ringinfo->Size ) || (b < 0 || b > ringinfo->Size))
496 return -1;
497 if (a > b) /* _a is lagging _b and _b has not wrapped around */
498 return (a - b);
499 else
500 return ((ringinfo->Size - (b - a)));
501}
502
489/*************************************************************** 503/***************************************************************
490 * Host Command Buffer - commands to INIC via the Cmd Rings 504 * Host Command Buffer - commands to INIC via the Cmd Rings
491 * 505 *