aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorAlan Cox <alan@linux.intel.com>2009-10-06 10:49:29 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-12-11 15:23:07 -0500
commitc1996fc2ee1df6ecefe9f950f196cfc9b1d403e4 (patch)
treea4ffbbd9bd7d6cf00ed80d20be7012f8b93d51fc /drivers/staging
parent116badfe08c0ab8bcd54492a73b23bacb218ef54 (diff)
Staging: et131x: tidy up a bit further
Clean up the minor uglies left from the previous work Signed-off-by: Alan Cox <alan@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/et131x/et1310_tx.c67
-rw-r--r--drivers/staging/et131x/et131x_adapter.h6
2 files changed, 16 insertions, 57 deletions
diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
index 4aabfa31226..5fe72ba652f 100644
--- a/drivers/staging/et131x/et1310_tx.c
+++ b/drivers/staging/et131x/et1310_tx.c
@@ -94,7 +94,6 @@
94#include "et1310_tx.h" 94#include "et1310_tx.h"
95 95
96 96
97static void et131x_update_tcb_list(struct et131x_adapter *etdev);
98static inline void et131x_free_send_packet(struct et131x_adapter *etdev, 97static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
99 struct tcb *tcb); 98 struct tcb *tcb);
100static int et131x_send_packet(struct sk_buff *skb, 99static int et131x_send_packet(struct sk_buff *skb,
@@ -230,15 +229,10 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev)
230 /* Initialise the transmit DMA engine */ 229 /* Initialise the transmit DMA engine */
231 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value); 230 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
232 231
233 /* Load the completion writeback physical address 232 /* Load the completion writeback physical address */
234 * 233 writel((u32)((u64)etdev->tx_ring.pTxStatusPa >> 32),
235 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, 234 &txdma->dma_wb_base_hi);
236 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 235 writel((u32)etdev->tx_ring.pTxStatusPa, &txdma->dma_wb_base_lo);
237 * are ever returned, make sure the high part is retrieved here before
238 * storing the adjusted address.
239 */
240 writel(0, &txdma->dma_wb_base_hi);
241 writel(etdev->tx_ring.pTxStatusPa, &txdma->dma_wb_base_lo);
242 236
243 memset(etdev->tx_ring.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t)); 237 memset(etdev->tx_ring.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
244 238
@@ -279,7 +273,7 @@ void et131x_tx_dma_enable(struct et131x_adapter *etdev)
279void et131x_init_send(struct et131x_adapter *adapter) 273void et131x_init_send(struct et131x_adapter *adapter)
280{ 274{
281 struct tcb *tcb; 275 struct tcb *tcb;
282 u32 count; 276 u32 ct;
283 struct tx_ring *tx_ring; 277 struct tx_ring *tx_ring;
284 278
285 /* Setup some convenience pointers */ 279 /* Setup some convenience pointers */
@@ -289,23 +283,22 @@ void et131x_init_send(struct et131x_adapter *adapter)
289 tx_ring->TCBReadyQueueHead = tcb; 283 tx_ring->TCBReadyQueueHead = tcb;
290 284
291 /* Go through and set up each TCB */ 285 /* Go through and set up each TCB */
292 for (count = 0; count < NUM_TCB; count++) { 286 for (ct = 0; ct < NUM_TCB; ct++) {
293 memset(tcb, 0, sizeof(struct tcb)); 287 memset(tcb, 0, sizeof(struct tcb));
294 288
295 /* Set the link pointer in HW TCB to the next TCB in the 289 /* Set the link pointer in HW TCB to the next TCB in the
296 * chain. If this is the last TCB in the chain, also set the 290 * chain. If this is the last TCB in the chain, also set the
297 * tail pointer. 291 * tail pointer.
298 */ 292 */
299 if (count < NUM_TCB - 1) { 293 if (ct < NUM_TCB - 1)
300 tcb->Next = tcb + 1; 294 tcb->Next = tcb + 1;
301 } else { 295 else {
302 tx_ring->TCBReadyQueueTail = tcb; 296 tx_ring->TCBReadyQueueTail = tcb;
303 tcb->Next = NULL; 297 tcb->Next = NULL;
304 } 298 }
305 299
306 tcb++; 300 tcb++;
307 } 301 }
308
309 /* Curr send queue should now be empty */ 302 /* Curr send queue should now be empty */
310 tx_ring->CurrSendHead = NULL; 303 tx_ring->CurrSendHead = NULL;
311 tx_ring->CurrSendTail = NULL; 304 tx_ring->CurrSendTail = NULL;
@@ -332,7 +325,7 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
332 */ 325 */
333 326
334 /* TCB is not available */ 327 /* TCB is not available */
335 if (MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) { 328 if (etdev->tx_ring.nBusySend >= NUM_TCB) {
336 /* NOTE: If there's an error on send, no need to queue the 329 /* NOTE: If there's an error on send, no need to queue the
337 * packet under Linux; if we just send an error up to the 330 * packet under Linux; if we just send an error up to the
338 * netif layer, it will resend the skb to us. 331 * netif layer, it will resend the skb to us.
@@ -342,26 +335,15 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
342 /* We need to see if the link is up; if it's not, make the 335 /* We need to see if the link is up; if it's not, make the
343 * netif layer think we're good and drop the packet 336 * netif layer think we're good and drop the packet
344 */ 337 */
345 /* 338 if ((etdev->Flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
346 * if( MP_SHOULD_FAIL_SEND( etdev ) || 339 !netif_carrier_ok(netdev)) {
347 * etdev->DriverNoPhyAccess )
348 */
349 if (MP_SHOULD_FAIL_SEND(etdev) || !netif_carrier_ok(netdev)) {
350 dev_kfree_skb_any(skb); 340 dev_kfree_skb_any(skb);
351 skb = NULL; 341 skb = NULL;
352 342
353 etdev->net_stats.tx_dropped++; 343 etdev->net_stats.tx_dropped++;
354 } else { 344 } else {
355 status = et131x_send_packet(skb, etdev); 345 status = et131x_send_packet(skb, etdev);
356 346 if (status != 0 && status != -ENOMEM) {
357 if (status == -ENOMEM) {
358
359 /* NOTE: If there's an error on send, no need
360 * to queue the packet under Linux; if we just
361 * send an error up to the netif layer, it
362 * will resend the skb to us.
363 */
364 } else if (status != 0) {
365 /* On any other error, make netif think we're 347 /* On any other error, make netif think we're
366 * OK and drop the packet 348 * OK and drop the packet
367 */ 349 */
@@ -386,7 +368,7 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
386static int et131x_send_packet(struct sk_buff *skb, 368static int et131x_send_packet(struct sk_buff *skb,
387 struct et131x_adapter *etdev) 369 struct et131x_adapter *etdev)
388{ 370{
389 int status = 0; 371 int status;
390 struct tcb *tcb = NULL; 372 struct tcb *tcb = NULL;
391 u16 *shbufva; 373 u16 *shbufva;
392 unsigned long flags; 374 unsigned long flags;
@@ -429,8 +411,7 @@ static int et131x_send_packet(struct sk_buff *skb,
429 tcb->Next = NULL; 411 tcb->Next = NULL;
430 412
431 /* Call the NIC specific send handler. */ 413 /* Call the NIC specific send handler. */
432 if (status == 0) 414 status = nic_send_packet(etdev, tcb);
433 status = nic_send_packet(etdev, tcb);
434 415
435 if (status != 0) { 416 if (status != 0) {
436 spin_lock_irqsave(&etdev->TCBReadyQLock, flags); 417 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
@@ -725,12 +706,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
725 706
726 etdev->Stats.opackets++; 707 etdev->Stats.opackets++;
727 708
728 if (etdev->tx_ring.TCBReadyQueueTail) { 709 if (etdev->tx_ring.TCBReadyQueueTail)
729 etdev->tx_ring.TCBReadyQueueTail->Next = tcb; 710 etdev->tx_ring.TCBReadyQueueTail->Next = tcb;
730 } else { 711 else
731 /* Apparently ready Q is empty. */ 712 /* Apparently ready Q is empty. */
732 etdev->tx_ring.TCBReadyQueueHead = tcb; 713 etdev->tx_ring.TCBReadyQueueHead = tcb;
733 }
734 714
735 etdev->tx_ring.TCBReadyQueueTail = tcb; 715 etdev->tx_ring.TCBReadyQueueTail = tcb;
736 716
@@ -747,7 +727,6 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
747void et131x_free_busy_send_packets(struct et131x_adapter *etdev) 727void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
748{ 728{
749 struct tcb *tcb; 729 struct tcb *tcb;
750 struct list_head *entry;
751 unsigned long flags; 730 unsigned long flags;
752 u32 freed = 0; 731 u32 freed = 0;
753 732
@@ -794,20 +773,6 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
794 */ 773 */
795void et131x_handle_send_interrupt(struct et131x_adapter *etdev) 774void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
796{ 775{
797 /* Mark as completed any packets which have been sent by the device. */
798 et131x_update_tcb_list(etdev);
799}
800
801/**
802 * et131x_update_tcb_list - Helper routine for Send Interrupt handler
803 * @etdev: pointer to our adapter
804 *
805 * Re-claims the send resources and completes sends. Can also be called as
806 * part of the NIC send routine when the "ServiceComplete" indication has
807 * wrapped.
808 */
809static void et131x_update_tcb_list(struct et131x_adapter *etdev)
810{
811 unsigned long flags; 776 unsigned long flags;
812 u32 serviced; 777 u32 serviced;
813 struct tcb * tcb; 778 struct tcb * tcb;
diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
index a512f62469f..cc5a6ba55dc 100644
--- a/drivers/staging/et131x/et131x_adapter.h
+++ b/drivers/staging/et131x/et131x_adapter.h
@@ -100,12 +100,6 @@
100#define LO_MARK_PERCENT_FOR_PSR 15 100#define LO_MARK_PERCENT_FOR_PSR 15
101#define LO_MARK_PERCENT_FOR_RX 15 101#define LO_MARK_PERCENT_FOR_RX 15
102 102
103/* Macros specific to the private adapter structure */
104#define MP_TCB_RESOURCES_AVAILABLE(_M) ((_M)->tx_ring.nBusySend < NUM_TCB)
105#define MP_TCB_RESOURCES_NOT_AVAILABLE(_M) ((_M)->tx_ring.nBusySend >= NUM_TCB)
106
107#define MP_SHOULD_FAIL_SEND(_M) ((_M)->Flags & fMP_ADAPTER_FAIL_SEND_MASK)
108
109/* Counters for error rate monitoring */ 103/* Counters for error rate monitoring */
110typedef struct _MP_ERR_COUNTERS { 104typedef struct _MP_ERR_COUNTERS {
111 u32 PktCountTxPackets; 105 u32 PktCountTxPackets;