aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2007-05-08 01:47:45 -0400
committerJeff Garzik <jeff@garzik.org>2007-05-08 01:47:54 -0400
commitcd4ceb245be7926e94558e2b6cd279bfaa775908 (patch)
treeb942276c7d5caf98b7e2d1395d32b22cc6c69bd5
parentcfa8007d5cee58d2c2121b7d00077c6f10969cb7 (diff)
pasemi_mac: Logic cleanup / rx performance improvements
Logic cleanup and some performance enhancements to the RX path. Signed-off-by: Olof Johansson <olof@lixom.net> Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r--drivers/net/pasemi_mac.c75
1 files changed, 37 insertions, 38 deletions
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index f18fd07973b1..6b4e925aa7f1 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -305,19 +305,20 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
305 struct pasemi_mac *mac = netdev_priv(dev); 305 struct pasemi_mac *mac = netdev_priv(dev);
306 unsigned int i; 306 unsigned int i;
307 int start = mac->rx->next_to_fill; 307 int start = mac->rx->next_to_fill;
308 unsigned int count; 308 unsigned int limit, count;
309 309
310 count = (mac->rx->next_to_clean + RX_RING_SIZE - 310 limit = (mac->rx->next_to_clean + RX_RING_SIZE -
311 mac->rx->next_to_fill) & (RX_RING_SIZE - 1); 311 mac->rx->next_to_fill) & (RX_RING_SIZE - 1);
312 312
313 /* Check to see if we're doing first-time setup */ 313 /* Check to see if we're doing first-time setup */
314 if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0)) 314 if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
315 count = RX_RING_SIZE; 315 limit = RX_RING_SIZE;
316 316
317 if (count <= 0) 317 if (limit <= 0)
318 return; 318 return;
319 319
320 for (i = start; i < start + count; i++) { 320 i = start;
321 for (count = limit; count; count--) {
321 struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i); 322 struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i);
322 u64 *buff = &RX_BUFF(mac, i); 323 u64 *buff = &RX_BUFF(mac, i);
323 struct sk_buff *skb; 324 struct sk_buff *skb;
@@ -335,27 +336,27 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
335 dma = pci_map_single(mac->dma_pdev, skb->data, skb->len, 336 dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
336 PCI_DMA_FROMDEVICE); 337 PCI_DMA_FROMDEVICE);
337 338
338 if (dma_mapping_error(dma)) { 339 if (unlikely(dma_mapping_error(dma))) {
339 dev_kfree_skb_irq(info->skb); 340 dev_kfree_skb_irq(info->skb);
340 count = i - start;
341 break; 341 break;
342 } 342 }
343 343
344 info->skb = skb; 344 info->skb = skb;
345 info->dma = dma; 345 info->dma = dma;
346 *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); 346 *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
347 i++;
347 } 348 }
348 349
349 wmb(); 350 wmb();
350 351
351 pci_write_config_dword(mac->dma_pdev, 352 pci_write_config_dword(mac->dma_pdev,
352 PAS_DMA_RXCHAN_INCR(mac->dma_rxch), 353 PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
353 count); 354 limit - count);
354 pci_write_config_dword(mac->dma_pdev, 355 pci_write_config_dword(mac->dma_pdev,
355 PAS_DMA_RXINT_INCR(mac->dma_if), 356 PAS_DMA_RXINT_INCR(mac->dma_if),
356 count); 357 limit - count);
357 358
358 mac->rx->next_to_fill += count; 359 mac->rx->next_to_fill += limit - count;
359} 360}
360 361
361static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) 362static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac)
@@ -393,32 +394,31 @@ static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac)
393} 394}
394 395
395 396
396
397static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) 397static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
398{ 398{
399 unsigned int i; 399 unsigned int n;
400 int start, count; 400 int count;
401 struct pas_dma_xct_descr *dp;
402 struct pasemi_mac_buffer *info;
403 struct sk_buff *skb;
404 unsigned int i, len;
405 u64 macrx;
406 dma_addr_t dma;
401 407
402 spin_lock(&mac->rx->lock); 408 spin_lock(&mac->rx->lock);
403 409
404 start = mac->rx->next_to_clean; 410 n = mac->rx->next_to_clean;
405 count = 0;
406 411
407 for (i = start; i < (start + RX_RING_SIZE) && count < limit; i++) { 412 for (count = limit; count; count--) {
408 struct pas_dma_xct_descr *dp;
409 struct pasemi_mac_buffer *info;
410 struct sk_buff *skb;
411 unsigned int j, len;
412 dma_addr_t dma;
413 413
414 rmb(); 414 rmb();
415 415
416 dp = &RX_DESC(mac, i); 416 dp = &RX_DESC(mac, n);
417 macrx = dp->macrx;
417 418
418 if (!(dp->macrx & XCT_MACRX_O)) 419 if (!(macrx & XCT_MACRX_O))
419 break; 420 break;
420 421
421 count++;
422 422
423 info = NULL; 423 info = NULL;
424 424
@@ -430,22 +430,20 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
430 */ 430 */
431 431
432 dma = (dp->ptr & XCT_PTR_ADDR_M); 432 dma = (dp->ptr & XCT_PTR_ADDR_M);
433 for (j = start; j < (start + RX_RING_SIZE); j++) { 433 for (i = n; i < (n + RX_RING_SIZE); i++) {
434 info = &RX_DESC_INFO(mac, j); 434 info = &RX_DESC_INFO(mac, i);
435 if (info->dma == dma) 435 if (info->dma == dma)
436 break; 436 break;
437 } 437 }
438 438
439 BUG_ON(!info);
440 BUG_ON(info->dma != dma);
441 skb = info->skb; 439 skb = info->skb;
440 info->dma = 0;
442 441
443 pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len, 442 pci_unmap_single(mac->dma_pdev, dma, skb->len,
444 PCI_DMA_FROMDEVICE); 443 PCI_DMA_FROMDEVICE);
445 info->dma = 0;
446 444
445 len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
447 446
448 len = (dp->macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
449 if (len < 256) { 447 if (len < 256) {
450 struct sk_buff *new_skb = 448 struct sk_buff *new_skb =
451 netdev_alloc_skb(mac->netdev, len + NET_IP_ALIGN); 449 netdev_alloc_skb(mac->netdev, len + NET_IP_ALIGN);
@@ -465,9 +463,9 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
465 463
466 skb->protocol = eth_type_trans(skb, mac->netdev); 464 skb->protocol = eth_type_trans(skb, mac->netdev);
467 465
468 if ((dp->macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) { 466 if ((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
469 skb->ip_summed = CHECKSUM_COMPLETE; 467 skb->ip_summed = CHECKSUM_COMPLETE;
470 skb->csum = (dp->macrx & XCT_MACRX_CSUM_M) >> 468 skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
471 XCT_MACRX_CSUM_S; 469 XCT_MACRX_CSUM_S;
472 } else 470 } else
473 skb->ip_summed = CHECKSUM_NONE; 471 skb->ip_summed = CHECKSUM_NONE;
@@ -477,13 +475,13 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
477 475
478 netif_receive_skb(skb); 476 netif_receive_skb(skb);
479 477
480 info->dma = 0;
481 info->skb = NULL;
482 dp->ptr = 0; 478 dp->ptr = 0;
483 dp->macrx = 0; 479 dp->macrx = 0;
480
481 n++;
484 } 482 }
485 483
486 mac->rx->next_to_clean += count; 484 mac->rx->next_to_clean += limit - count;
487 pasemi_mac_replenish_rx_ring(mac->netdev); 485 pasemi_mac_replenish_rx_ring(mac->netdev);
488 486
489 spin_unlock(&mac->rx->lock); 487 spin_unlock(&mac->rx->lock);
@@ -899,6 +897,9 @@ static int pasemi_mac_poll(struct net_device *dev, int *budget)
899 897
900 pkts = pasemi_mac_clean_rx(mac, limit); 898 pkts = pasemi_mac_clean_rx(mac, limit);
901 899
900 dev->quota -= pkts;
901 *budget -= pkts;
902
902 if (pkts < limit) { 903 if (pkts < limit) {
903 /* all done, no more packets present */ 904 /* all done, no more packets present */
904 netif_rx_complete(dev); 905 netif_rx_complete(dev);
@@ -907,8 +908,6 @@ static int pasemi_mac_poll(struct net_device *dev, int *budget)
907 return 0; 908 return 0;
908 } else { 909 } else {
909 /* used up our quantum, so reschedule */ 910 /* used up our quantum, so reschedule */
910 dev->quota -= pkts;
911 *budget -= pkts;
912 return 1; 911 return 1;
913 } 912 }
914} 913}