diff options
author | Francois Romieu <romieu@fr.zoreil.com> | 2006-09-10 17:33:44 -0400 |
---|---|---|
committer | Francois Romieu <romieu@fr.zoreil.com> | 2006-09-12 15:00:46 -0400 |
commit | 48907e39890590792c58272604cfb34ad1d80054 (patch) | |
tree | 75cc0a74ef9c704b3cfe0745d4bde39b5db515fb /drivers/net/8139cp.c | |
parent | cccb20d3a9b7c6d4b6e1b52ee02814e6094aaa12 (diff) |
8139cp: ring_info removal for the transmit path
As long as the descriptor fits on a single cacheline, the change
should be almost free.
Now ring_info is not used at all. Remove it.
Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'drivers/net/8139cp.c')
-rw-r--r-- | drivers/net/8139cp.c | 32 |
1 files changed, 13 insertions, 19 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index bbdaa18879ab..c3b8400bdc3f 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -314,11 +314,6 @@ struct cp_desc { | |||
314 | u64 addr; | 314 | u64 addr; |
315 | }; | 315 | }; |
316 | 316 | ||
317 | struct ring_info { | ||
318 | struct sk_buff *skb; | ||
319 | u32 len; | ||
320 | }; | ||
321 | |||
322 | struct cp_dma_stats { | 317 | struct cp_dma_stats { |
323 | u64 tx_ok; | 318 | u64 tx_ok; |
324 | u64 rx_ok; | 319 | u64 rx_ok; |
@@ -360,7 +355,7 @@ struct cp_private { | |||
360 | unsigned tx_head ____cacheline_aligned; | 355 | unsigned tx_head ____cacheline_aligned; |
361 | unsigned tx_tail; | 356 | unsigned tx_tail; |
362 | struct cp_desc *tx_ring; | 357 | struct cp_desc *tx_ring; |
363 | struct ring_info tx_skb[CP_TX_RING_SIZE]; | 358 | struct sk_buff *tx_skb[CP_TX_RING_SIZE]; |
364 | 359 | ||
365 | unsigned rx_buf_sz; | 360 | unsigned rx_buf_sz; |
366 | unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ | 361 | unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ |
@@ -721,11 +716,12 @@ static void cp_tx (struct cp_private *cp) | |||
721 | if (status & DescOwn) | 716 | if (status & DescOwn) |
722 | break; | 717 | break; |
723 | 718 | ||
724 | skb = cp->tx_skb[tx_tail].skb; | 719 | skb = cp->tx_skb[tx_tail]; |
725 | BUG_ON(!skb); | 720 | BUG_ON(!skb); |
726 | 721 | ||
727 | pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr), | 722 | pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr), |
728 | cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE); | 723 | le32_to_cpu(txd->opts1) & 0xffff, |
724 | PCI_DMA_TODEVICE); | ||
729 | 725 | ||
730 | if (status & LastFrag) { | 726 | if (status & LastFrag) { |
731 | if (status & (TxError | TxFIFOUnder)) { | 727 | if (status & (TxError | TxFIFOUnder)) { |
@@ -752,7 +748,7 @@ static void cp_tx (struct cp_private *cp) | |||
752 | dev_kfree_skb_irq(skb); | 748 | dev_kfree_skb_irq(skb); |
753 | } | 749 | } |
754 | 750 | ||
755 | cp->tx_skb[tx_tail].skb = NULL; | 751 | cp->tx_skb[tx_tail] = NULL; |
756 | 752 | ||
757 | tx_tail = NEXT_TX(tx_tail); | 753 | tx_tail = NEXT_TX(tx_tail); |
758 | } | 754 | } |
@@ -822,8 +818,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
822 | txd->opts1 = cpu_to_le32(flags); | 818 | txd->opts1 = cpu_to_le32(flags); |
823 | wmb(); | 819 | wmb(); |
824 | 820 | ||
825 | cp->tx_skb[entry].skb = skb; | 821 | cp->tx_skb[entry] = skb; |
826 | cp->tx_skb[entry].len = len; | ||
827 | entry = NEXT_TX(entry); | 822 | entry = NEXT_TX(entry); |
828 | } else { | 823 | } else { |
829 | struct cp_desc *txd; | 824 | struct cp_desc *txd; |
@@ -839,8 +834,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
839 | first_len = skb_headlen(skb); | 834 | first_len = skb_headlen(skb); |
840 | first_mapping = pci_map_single(cp->pdev, skb->data, | 835 | first_mapping = pci_map_single(cp->pdev, skb->data, |
841 | first_len, PCI_DMA_TODEVICE); | 836 | first_len, PCI_DMA_TODEVICE); |
842 | cp->tx_skb[entry].skb = skb; | 837 | cp->tx_skb[entry] = skb; |
843 | cp->tx_skb[entry].len = first_len; | ||
844 | entry = NEXT_TX(entry); | 838 | entry = NEXT_TX(entry); |
845 | 839 | ||
846 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 840 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
@@ -881,8 +875,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
881 | txd->opts1 = cpu_to_le32(ctrl); | 875 | txd->opts1 = cpu_to_le32(ctrl); |
882 | wmb(); | 876 | wmb(); |
883 | 877 | ||
884 | cp->tx_skb[entry].skb = skb; | 878 | cp->tx_skb[entry] = skb; |
885 | cp->tx_skb[entry].len = len; | ||
886 | entry = NEXT_TX(entry); | 879 | entry = NEXT_TX(entry); |
887 | } | 880 | } |
888 | 881 | ||
@@ -1159,12 +1152,13 @@ static void cp_clean_rings (struct cp_private *cp) | |||
1159 | } | 1152 | } |
1160 | 1153 | ||
1161 | for (i = 0; i < CP_TX_RING_SIZE; i++) { | 1154 | for (i = 0; i < CP_TX_RING_SIZE; i++) { |
1162 | if (cp->tx_skb[i].skb) { | 1155 | if (cp->tx_skb[i]) { |
1163 | struct sk_buff *skb = cp->tx_skb[i].skb; | 1156 | struct sk_buff *skb = cp->tx_skb[i]; |
1164 | 1157 | ||
1165 | desc = cp->tx_ring + i; | 1158 | desc = cp->tx_ring + i; |
1166 | pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), | 1159 | pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), |
1167 | cp->tx_skb[i].len, PCI_DMA_TODEVICE); | 1160 | le32_to_cpu(desc->opts1) & 0xffff, |
1161 | PCI_DMA_TODEVICE); | ||
1168 | if (le32_to_cpu(desc->opts1) & LastFrag) | 1162 | if (le32_to_cpu(desc->opts1) & LastFrag) |
1169 | dev_kfree_skb(skb); | 1163 | dev_kfree_skb(skb); |
1170 | cp->net_stats.tx_dropped++; | 1164 | cp->net_stats.tx_dropped++; |
@@ -1175,7 +1169,7 @@ static void cp_clean_rings (struct cp_private *cp) | |||
1175 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | 1169 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); |
1176 | 1170 | ||
1177 | memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); | 1171 | memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); |
1178 | memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); | 1172 | memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); |
1179 | } | 1173 | } |
1180 | 1174 | ||
1181 | static void cp_free_rings (struct cp_private *cp) | 1175 | static void cp_free_rings (struct cp_private *cp) |