diff options
author | Olof Johansson <olof@lixom.net> | 2007-10-02 17:26:13 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:54:24 -0400 |
commit | ad3c20d1ab586884f1815c315e3f303a8b8a7d7d (patch) | |
tree | 16493bc5ee35e6dcfeffe0bafb5f58d252b5d272 /drivers/net/pasemi_mac.c | |
parent | fc9e4d2a93dab4a995e2e75725577b9a60154cbc (diff) |
pasemi_mac: implement sg support
pasemi_mac: implement sg support
Implement SG support for pasemi_mac
Signed-off-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/pasemi_mac.c')
-rw-r--r-- | drivers/net/pasemi_mac.c | 163 |
1 files changed, 117 insertions, 46 deletions
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index c2a3524a541..5eb5e47120d 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -160,6 +160,30 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) | |||
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
162 | 162 | ||
163 | static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, | ||
164 | struct sk_buff *skb, | ||
165 | dma_addr_t *dmas) | ||
166 | { | ||
167 | int f; | ||
168 | int nfrags = skb_shinfo(skb)->nr_frags; | ||
169 | |||
170 | pci_unmap_single(mac->dma_pdev, dmas[0], skb_headlen(skb), | ||
171 | PCI_DMA_TODEVICE); | ||
172 | |||
173 | for (f = 0; f < nfrags; f++) { | ||
174 | skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; | ||
175 | |||
176 | pci_unmap_page(mac->dma_pdev, dmas[f+1], frag->size, | ||
177 | PCI_DMA_TODEVICE); | ||
178 | } | ||
179 | dev_kfree_skb_irq(skb); | ||
180 | |||
181 | /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs, | ||
182 | * aligned up to a power of 2 | ||
183 | */ | ||
184 | return (nfrags + 3) & ~1; | ||
185 | } | ||
186 | |||
163 | static int pasemi_mac_setup_rx_resources(struct net_device *dev) | 187 | static int pasemi_mac_setup_rx_resources(struct net_device *dev) |
164 | { | 188 | { |
165 | struct pasemi_mac_rxring *ring; | 189 | struct pasemi_mac_rxring *ring; |
@@ -300,24 +324,24 @@ out_ring: | |||
300 | static void pasemi_mac_free_tx_resources(struct net_device *dev) | 324 | static void pasemi_mac_free_tx_resources(struct net_device *dev) |
301 | { | 325 | { |
302 | struct pasemi_mac *mac = netdev_priv(dev); | 326 | struct pasemi_mac *mac = netdev_priv(dev); |
303 | unsigned int i; | 327 | unsigned int i, j; |
304 | struct pasemi_mac_buffer *info; | 328 | struct pasemi_mac_buffer *info; |
329 | dma_addr_t dmas[MAX_SKB_FRAGS+1]; | ||
330 | int freed; | ||
305 | 331 | ||
306 | for (i = 0; i < TX_RING_SIZE; i += 2) { | 332 | for (i = 0; i < TX_RING_SIZE; i += freed) { |
307 | info = &TX_RING_INFO(mac, i+1); | 333 | info = &TX_RING_INFO(mac, i+1); |
308 | if (info->dma && info->skb) { | 334 | if (info->dma && info->skb) { |
309 | pci_unmap_single(mac->dma_pdev, | 335 | for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++) |
310 | info->dma, | 336 | dmas[j] = TX_RING_INFO(mac, i+1+j).dma; |
311 | info->skb->len, | 337 | freed = pasemi_mac_unmap_tx_skb(mac, info->skb, dmas); |
312 | PCI_DMA_TODEVICE); | 338 | } else |
313 | dev_kfree_skb_any(info->skb); | 339 | freed = 2; |
314 | } | ||
315 | TX_RING(mac, i) = 0; | ||
316 | TX_RING(mac, i+1) = 0; | ||
317 | info->dma = 0; | ||
318 | info->skb = NULL; | ||
319 | } | 340 | } |
320 | 341 | ||
342 | for (i = 0; i < TX_RING_SIZE; i++) | ||
343 | TX_RING(mac, i) = 0; | ||
344 | |||
321 | dma_free_coherent(&mac->dma_pdev->dev, | 345 | dma_free_coherent(&mac->dma_pdev->dev, |
322 | TX_RING_SIZE * sizeof(u64), | 346 | TX_RING_SIZE * sizeof(u64), |
323 | mac->tx->ring, mac->tx->dma); | 347 | mac->tx->ring, mac->tx->dma); |
@@ -573,27 +597,34 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
573 | return count; | 597 | return count; |
574 | } | 598 | } |
575 | 599 | ||
600 | /* Can't make this too large or we blow the kernel stack limits */ | ||
601 | #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS) | ||
602 | |||
576 | static int pasemi_mac_clean_tx(struct pasemi_mac *mac) | 603 | static int pasemi_mac_clean_tx(struct pasemi_mac *mac) |
577 | { | 604 | { |
578 | int i; | 605 | int i, j; |
579 | struct pasemi_mac_buffer *info; | 606 | struct pasemi_mac_buffer *info; |
580 | unsigned int start, count, limit; | 607 | unsigned int start, descr_count, buf_count, limit; |
581 | unsigned int total_count; | 608 | unsigned int total_count; |
582 | unsigned long flags; | 609 | unsigned long flags; |
583 | struct sk_buff *skbs[32]; | 610 | struct sk_buff *skbs[TX_CLEAN_BATCHSIZE]; |
584 | dma_addr_t dmas[32]; | 611 | dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; |
585 | 612 | ||
586 | total_count = 0; | 613 | total_count = 0; |
614 | limit = TX_CLEAN_BATCHSIZE; | ||
587 | restart: | 615 | restart: |
588 | spin_lock_irqsave(&mac->tx->lock, flags); | 616 | spin_lock_irqsave(&mac->tx->lock, flags); |
589 | 617 | ||
590 | start = mac->tx->next_to_clean; | 618 | start = mac->tx->next_to_clean; |
591 | limit = min(mac->tx->next_to_fill, start+32); | ||
592 | 619 | ||
593 | count = 0; | 620 | buf_count = 0; |
621 | descr_count = 0; | ||
594 | 622 | ||
595 | for (i = start; i < limit; i += 2) { | 623 | for (i = start; |
624 | descr_count < limit && i < mac->tx->next_to_fill; | ||
625 | i += buf_count) { | ||
596 | u64 mactx = TX_RING(mac, i); | 626 | u64 mactx = TX_RING(mac, i); |
627 | |||
597 | if ((mactx & XCT_MACTX_E) || | 628 | if ((mactx & XCT_MACTX_E) || |
598 | (*mac->tx_status & PAS_STATUS_ERROR)) | 629 | (*mac->tx_status & PAS_STATUS_ERROR)) |
599 | pasemi_mac_tx_error(mac, mactx); | 630 | pasemi_mac_tx_error(mac, mactx); |
@@ -603,30 +634,38 @@ restart: | |||
603 | break; | 634 | break; |
604 | 635 | ||
605 | info = &TX_RING_INFO(mac, i+1); | 636 | info = &TX_RING_INFO(mac, i+1); |
606 | skbs[count] = info->skb; | 637 | skbs[descr_count] = info->skb; |
607 | dmas[count] = info->dma; | 638 | |
639 | buf_count = 2 + skb_shinfo(info->skb)->nr_frags; | ||
640 | for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++) | ||
641 | dmas[descr_count][j] = TX_RING_INFO(mac, i+1+j).dma; | ||
642 | |||
608 | 643 | ||
609 | info->dma = 0; | 644 | info->dma = 0; |
610 | TX_RING(mac, i) = 0; | 645 | TX_RING(mac, i) = 0; |
611 | TX_RING(mac, i+1) = 0; | 646 | TX_RING(mac, i+1) = 0; |
647 | TX_RING_INFO(mac, i+1).skb = 0; | ||
648 | TX_RING_INFO(mac, i+1).dma = 0; | ||
612 | 649 | ||
613 | 650 | /* Since we always fill with an even number of entries, make | |
614 | count++; | 651 | * sure we skip any unused one at the end as well. |
652 | */ | ||
653 | if (buf_count & 1) | ||
654 | buf_count++; | ||
655 | descr_count++; | ||
615 | } | 656 | } |
616 | mac->tx->next_to_clean += count * 2; | 657 | mac->tx->next_to_clean = i; |
658 | |||
617 | spin_unlock_irqrestore(&mac->tx->lock, flags); | 659 | spin_unlock_irqrestore(&mac->tx->lock, flags); |
618 | netif_wake_queue(mac->netdev); | 660 | netif_wake_queue(mac->netdev); |
619 | 661 | ||
620 | for (i = 0; i < count; i++) { | 662 | for (i = 0; i < descr_count; i++) |
621 | pci_unmap_single(mac->dma_pdev, dmas[i], | 663 | pasemi_mac_unmap_tx_skb(mac, skbs[i], dmas[i]); |
622 | skbs[i]->len, PCI_DMA_TODEVICE); | ||
623 | dev_kfree_skb_irq(skbs[i]); | ||
624 | } | ||
625 | 664 | ||
626 | total_count += count; | 665 | total_count += descr_count; |
627 | 666 | ||
628 | /* If the batch was full, try to clean more */ | 667 | /* If the batch was full, try to clean more */ |
629 | if (count == 32) | 668 | if (descr_count == limit) |
630 | goto restart; | 669 | goto restart; |
631 | 670 | ||
632 | return total_count; | 671 | return total_count; |
@@ -997,9 +1036,11 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
997 | { | 1036 | { |
998 | struct pasemi_mac *mac = netdev_priv(dev); | 1037 | struct pasemi_mac *mac = netdev_priv(dev); |
999 | struct pasemi_mac_txring *txring; | 1038 | struct pasemi_mac_txring *txring; |
1000 | u64 dflags, mactx, ptr; | 1039 | u64 dflags, mactx; |
1001 | dma_addr_t map; | 1040 | dma_addr_t map[MAX_SKB_FRAGS+1]; |
1041 | unsigned int map_size[MAX_SKB_FRAGS+1]; | ||
1002 | unsigned long flags; | 1042 | unsigned long flags; |
1043 | int i, nfrags; | ||
1003 | 1044 | ||
1004 | dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; | 1045 | dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; |
1005 | 1046 | ||
@@ -1020,25 +1061,40 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1020 | } | 1061 | } |
1021 | } | 1062 | } |
1022 | 1063 | ||
1023 | map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | 1064 | nfrags = skb_shinfo(skb)->nr_frags; |
1065 | |||
1066 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), | ||
1067 | PCI_DMA_TODEVICE); | ||
1068 | map_size[0] = skb_headlen(skb); | ||
1069 | if (dma_mapping_error(map[0])) | ||
1070 | goto out_err_nolock; | ||
1071 | |||
1072 | for (i = 0; i < nfrags; i++) { | ||
1073 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
1024 | 1074 | ||
1025 | if (dma_mapping_error(map)) | 1075 | map[i+1] = pci_map_page(mac->dma_pdev, frag->page, |
1026 | return NETDEV_TX_BUSY; | 1076 | frag->page_offset, frag->size, |
1077 | PCI_DMA_TODEVICE); | ||
1078 | map_size[i+1] = frag->size; | ||
1079 | if (dma_mapping_error(map[i+1])) { | ||
1080 | nfrags = i; | ||
1081 | goto out_err_nolock; | ||
1082 | } | ||
1083 | } | ||
1027 | 1084 | ||
1028 | mactx = dflags | XCT_MACTX_LLEN(skb->len); | 1085 | mactx = dflags | XCT_MACTX_LLEN(skb->len); |
1029 | ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map); | ||
1030 | 1086 | ||
1031 | txring = mac->tx; | 1087 | txring = mac->tx; |
1032 | 1088 | ||
1033 | spin_lock_irqsave(&txring->lock, flags); | 1089 | spin_lock_irqsave(&txring->lock, flags); |
1034 | 1090 | ||
1035 | if (RING_AVAIL(txring) <= 2) { | 1091 | if (RING_AVAIL(txring) <= nfrags+3) { |
1036 | spin_unlock_irqrestore(&txring->lock, flags); | 1092 | spin_unlock_irqrestore(&txring->lock, flags); |
1037 | pasemi_mac_clean_tx(mac); | 1093 | pasemi_mac_clean_tx(mac); |
1038 | pasemi_mac_restart_tx_intr(mac); | 1094 | pasemi_mac_restart_tx_intr(mac); |
1039 | spin_lock_irqsave(&txring->lock, flags); | 1095 | spin_lock_irqsave(&txring->lock, flags); |
1040 | 1096 | ||
1041 | if (RING_AVAIL(txring) <= 2) { | 1097 | if (RING_AVAIL(txring) <= nfrags+3) { |
1042 | /* Still no room -- stop the queue and wait for tx | 1098 | /* Still no room -- stop the queue and wait for tx |
1043 | * intr when there's room. | 1099 | * intr when there's room. |
1044 | */ | 1100 | */ |
@@ -1048,25 +1104,40 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1048 | } | 1104 | } |
1049 | 1105 | ||
1050 | TX_RING(mac, txring->next_to_fill) = mactx; | 1106 | TX_RING(mac, txring->next_to_fill) = mactx; |
1051 | TX_RING(mac, txring->next_to_fill+1) = ptr; | 1107 | txring->next_to_fill++; |
1108 | TX_RING_INFO(mac, txring->next_to_fill).skb = skb; | ||
1109 | for (i = 0; i <= nfrags; i++) { | ||
1110 | TX_RING(mac, txring->next_to_fill+i) = | ||
1111 | XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); | ||
1112 | TX_RING_INFO(mac, txring->next_to_fill+i).dma = map[i]; | ||
1113 | } | ||
1114 | |||
1115 | /* We have to add an even number of 8-byte entries to the ring | ||
1116 | * even if the last one is unused. That means always an odd number | ||
1117 | * of pointers + one mactx descriptor. | ||
1118 | */ | ||
1119 | if (nfrags & 1) | ||
1120 | nfrags++; | ||
1052 | 1121 | ||
1053 | TX_RING_INFO(mac, txring->next_to_fill+1).dma = map; | 1122 | txring->next_to_fill += nfrags + 1; |
1054 | TX_RING_INFO(mac, txring->next_to_fill+1).skb = skb; | ||
1055 | 1123 | ||
1056 | txring->next_to_fill += 2; | ||
1057 | 1124 | ||
1058 | dev->stats.tx_packets++; | 1125 | dev->stats.tx_packets++; |
1059 | dev->stats.tx_bytes += skb->len; | 1126 | dev->stats.tx_bytes += skb->len; |
1060 | 1127 | ||
1061 | spin_unlock_irqrestore(&txring->lock, flags); | 1128 | spin_unlock_irqrestore(&txring->lock, flags); |
1062 | 1129 | ||
1063 | write_dma_reg(mac, PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1); | 1130 | write_dma_reg(mac, PAS_DMA_TXCHAN_INCR(mac->dma_txch), (nfrags+2) >> 1); |
1064 | 1131 | ||
1065 | return NETDEV_TX_OK; | 1132 | return NETDEV_TX_OK; |
1066 | 1133 | ||
1067 | out_err: | 1134 | out_err: |
1068 | spin_unlock_irqrestore(&txring->lock, flags); | 1135 | spin_unlock_irqrestore(&txring->lock, flags); |
1069 | pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE); | 1136 | out_err_nolock: |
1137 | while (nfrags--) | ||
1138 | pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], | ||
1139 | PCI_DMA_TODEVICE); | ||
1140 | |||
1070 | return NETDEV_TX_BUSY; | 1141 | return NETDEV_TX_BUSY; |
1071 | } | 1142 | } |
1072 | 1143 | ||
@@ -1202,7 +1273,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1202 | 1273 | ||
1203 | netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); | 1274 | netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); |
1204 | 1275 | ||
1205 | dev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; | 1276 | dev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX | NETIF_F_SG; |
1206 | 1277 | ||
1207 | /* These should come out of the device tree eventually */ | 1278 | /* These should come out of the device tree eventually */ |
1208 | mac->dma_txch = index; | 1279 | mac->dma_txch = index; |