aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bfin_mac.c
diff options
context:
space:
mode:
authorMichael Hennerich <michael.hennerich@analog.com>2009-05-28 23:41:15 -0400
committerDavid S. Miller <davem@davemloft.net>2009-05-29 18:49:09 -0400
commit015dac8886b5c48d62ebc33a964b9086d6a71bd7 (patch)
tree976e6916811e7dee4d884419dcb43fe8769e49ce /drivers/net/bfin_mac.c
parent805a8ab3ce1be83e9a98c21a625ebbb549a2d317 (diff)
netdev: bfin_mac: fix performance issue found by netperf
- Remove dead long delay - Use proper defines - Remove broken implementation of the TX DMA Data Alignment TXDWA feature Signed-off-by: Michael Hennerich <michael.hennerich@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org> Signed-off-by: Bryan Wu <cooloney@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bfin_mac.c')
-rw-r--r--drivers/net/bfin_mac.c110
1 files changed, 26 insertions, 84 deletions
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 38d34cec65a3..f0f1eb929dbb 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -194,13 +194,13 @@ static int desc_list_init(void)
194 struct dma_descriptor *b = &(r->desc_b); 194 struct dma_descriptor *b = &(r->desc_b);
195 195
196 /* allocate a new skb for next time receive */ 196 /* allocate a new skb for next time receive */
197 new_skb = dev_alloc_skb(PKT_BUF_SZ + 2); 197 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
198 if (!new_skb) { 198 if (!new_skb) {
199 printk(KERN_NOTICE DRV_NAME 199 printk(KERN_NOTICE DRV_NAME
200 ": init: low on mem - packet dropped\n"); 200 ": init: low on mem - packet dropped\n");
201 goto init_error; 201 goto init_error;
202 } 202 }
203 skb_reserve(new_skb, 2); 203 skb_reserve(new_skb, NET_IP_ALIGN);
204 r->skb = new_skb; 204 r->skb = new_skb;
205 205
206 /* 206 /*
@@ -566,9 +566,9 @@ static void adjust_tx_list(void)
566 */ 566 */
567 if (current_tx_ptr->next->next == tx_list_head) { 567 if (current_tx_ptr->next->next == tx_list_head) {
568 while (tx_list_head->status.status_word == 0) { 568 while (tx_list_head->status.status_word == 0) {
569 mdelay(1); 569 udelay(10);
570 if (tx_list_head->status.status_word != 0 570 if (tx_list_head->status.status_word != 0
571 || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) { 571 || !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
572 goto adjust_head; 572 goto adjust_head;
573 } 573 }
574 if (timeout_cnt-- < 0) { 574 if (timeout_cnt-- < 0) {
@@ -606,86 +606,28 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
606 struct net_device *dev) 606 struct net_device *dev)
607{ 607{
608 u16 *data; 608 u16 *data;
609 609 u32 data_align = (unsigned long)(skb->data) & 0x3;
610 current_tx_ptr->skb = skb; 610 current_tx_ptr->skb = skb;
611 611
612 if (ANOMALY_05000285) { 612 if (data_align == 0x2) {
613 /* 613 /* move skb->data to current_tx_ptr payload */
614 * TXDWA feature is not avaible to older revision < 0.3 silicon 614 data = (u16 *)(skb->data) - 1;
615 * of BF537 615 *data = (u16)(skb->len);
616 * 616 current_tx_ptr->desc_a.start_addr = (u32)data;
617 * Only if data buffer is ODD WORD alignment, we do not 617 /* this is important! */
618 * need to memcpy 618 blackfin_dcache_flush_range((u32)data,
619 */ 619 (u32)((u8 *)data + skb->len + 4));
620 u32 data_align = (u32)(skb->data) & 0x3;
621 if (data_align == 0x2) {
622 /* move skb->data to current_tx_ptr payload */
623 data = (u16 *)(skb->data) - 1;
624 *data = (u16)(skb->len);
625 current_tx_ptr->desc_a.start_addr = (u32)data;
626 /* this is important! */
627 blackfin_dcache_flush_range((u32)data,
628 (u32)((u8 *)data + skb->len + 4));
629 } else {
630 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
631 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
632 skb->len);
633 current_tx_ptr->desc_a.start_addr =
634 (u32)current_tx_ptr->packet;
635 if (current_tx_ptr->status.status_word != 0)
636 current_tx_ptr->status.status_word = 0;
637 blackfin_dcache_flush_range(
638 (u32)current_tx_ptr->packet,
639 (u32)(current_tx_ptr->packet + skb->len + 2));
640 }
641 } else { 620 } else {
642 /* 621 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
643 * TXDWA feature is avaible to revision < 0.3 silicon of 622 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
644 * BF537 and always avaible to BF52x 623 skb->len);
645 */ 624 current_tx_ptr->desc_a.start_addr =
646 u32 data_align = (u32)(skb->data) & 0x3; 625 (u32)current_tx_ptr->packet;
647 if (data_align == 0x0) { 626 if (current_tx_ptr->status.status_word != 0)
648 u16 sysctl = bfin_read_EMAC_SYSCTL(); 627 current_tx_ptr->status.status_word = 0;
649 sysctl |= TXDWA; 628 blackfin_dcache_flush_range(
650 bfin_write_EMAC_SYSCTL(sysctl); 629 (u32)current_tx_ptr->packet,
651 630 (u32)(current_tx_ptr->packet + skb->len + 2));
652 /* move skb->data to current_tx_ptr payload */
653 data = (u16 *)(skb->data) - 2;
654 *data = (u16)(skb->len);
655 current_tx_ptr->desc_a.start_addr = (u32)data;
656 /* this is important! */
657 blackfin_dcache_flush_range(
658 (u32)data,
659 (u32)((u8 *)data + skb->len + 4));
660 } else if (data_align == 0x2) {
661 u16 sysctl = bfin_read_EMAC_SYSCTL();
662 sysctl &= ~TXDWA;
663 bfin_write_EMAC_SYSCTL(sysctl);
664
665 /* move skb->data to current_tx_ptr payload */
666 data = (u16 *)(skb->data) - 1;
667 *data = (u16)(skb->len);
668 current_tx_ptr->desc_a.start_addr = (u32)data;
669 /* this is important! */
670 blackfin_dcache_flush_range(
671 (u32)data,
672 (u32)((u8 *)data + skb->len + 4));
673 } else {
674 u16 sysctl = bfin_read_EMAC_SYSCTL();
675 sysctl &= ~TXDWA;
676 bfin_write_EMAC_SYSCTL(sysctl);
677
678 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
679 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
680 skb->len);
681 current_tx_ptr->desc_a.start_addr =
682 (u32)current_tx_ptr->packet;
683 if (current_tx_ptr->status.status_word != 0)
684 current_tx_ptr->status.status_word = 0;
685 blackfin_dcache_flush_range(
686 (u32)current_tx_ptr->packet,
687 (u32)(current_tx_ptr->packet + skb->len + 2));
688 }
689 } 631 }
690 632
691 /* make sure the internal data buffers in the core are drained 633 /* make sure the internal data buffers in the core are drained
@@ -698,7 +640,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
698 current_tx_ptr->desc_a.config |= DMAEN; 640 current_tx_ptr->desc_a.config |= DMAEN;
699 641
700 /* tx dma is running, just return */ 642 /* tx dma is running, just return */
701 if (bfin_read_DMA2_IRQ_STATUS() & 0x08) 643 if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
702 goto out; 644 goto out;
703 645
704 /* tx dma is not running */ 646 /* tx dma is not running */
@@ -724,7 +666,7 @@ static void bfin_mac_rx(struct net_device *dev)
724 666
725 /* allocate a new skb for next time receive */ 667 /* allocate a new skb for next time receive */
726 skb = current_rx_ptr->skb; 668 skb = current_rx_ptr->skb;
727 new_skb = dev_alloc_skb(PKT_BUF_SZ + 2); 669 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
728 if (!new_skb) { 670 if (!new_skb) {
729 printk(KERN_NOTICE DRV_NAME 671 printk(KERN_NOTICE DRV_NAME
730 ": rx: low on mem - packet dropped\n"); 672 ": rx: low on mem - packet dropped\n");
@@ -732,7 +674,7 @@ static void bfin_mac_rx(struct net_device *dev)
732 goto out; 674 goto out;
733 } 675 }
734 /* reserve 2 bytes for RXDWA padding */ 676 /* reserve 2 bytes for RXDWA padding */
735 skb_reserve(new_skb, 2); 677 skb_reserve(new_skb, NET_IP_ALIGN);
736 current_rx_ptr->skb = new_skb; 678 current_rx_ptr->skb = new_skb;
737 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; 679 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
738 680