diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2006-09-26 14:57:41 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-27 17:56:31 -0400 |
commit | 291ea6142b94cc3e3ae2216d3937a78697447471 (patch) | |
tree | 8a38d22cf394a4372f88ade9cc5815b18e9d8578 | |
parent | 9fa1b1f33c4cbbe0ba7c0c166d170faaa735e53d (diff) |
[PATCH] sky2: incremental transmit completion
Since a transmit can take several control blocks, the old code waited
until the last control block was marked as done. This code processes
the return values incrementally. This makes slots in the tx ring available
and less chance of getting stuck.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r-- | drivers/net/sky2.c | 95 | ||||
-rw-r--r-- | drivers/net/sky2.h | 6 |
2 files changed, 50 insertions, 51 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index b908596bd89a..64af764d0087 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -769,9 +769,16 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) | |||
769 | struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; | 769 | struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; |
770 | 770 | ||
771 | sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE); | 771 | sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE); |
772 | le->ctrl = 0; | ||
772 | return le; | 773 | return le; |
773 | } | 774 | } |
774 | 775 | ||
776 | static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, | ||
777 | struct sky2_tx_le *le) | ||
778 | { | ||
779 | return sky2->tx_ring + (le - sky2->tx_le); | ||
780 | } | ||
781 | |||
775 | /* Update chip's next pointer */ | 782 | /* Update chip's next pointer */ |
776 | static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) | 783 | static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) |
777 | { | 784 | { |
@@ -786,6 +793,7 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) | |||
786 | { | 793 | { |
787 | struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; | 794 | struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; |
788 | sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); | 795 | sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); |
796 | le->ctrl = 0; | ||
789 | return le; | 797 | return le; |
790 | } | 798 | } |
791 | 799 | ||
@@ -805,7 +813,6 @@ static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map) | |||
805 | if (sky2->rx_addr64 != hi) { | 813 | if (sky2->rx_addr64 != hi) { |
806 | le = sky2_next_rx(sky2); | 814 | le = sky2_next_rx(sky2); |
807 | le->addr = cpu_to_le32(hi); | 815 | le->addr = cpu_to_le32(hi); |
808 | le->ctrl = 0; | ||
809 | le->opcode = OP_ADDR64 | HW_OWNER; | 816 | le->opcode = OP_ADDR64 | HW_OWNER; |
810 | sky2->rx_addr64 = high32(map + len); | 817 | sky2->rx_addr64 = high32(map + len); |
811 | } | 818 | } |
@@ -813,7 +820,6 @@ static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map) | |||
813 | le = sky2_next_rx(sky2); | 820 | le = sky2_next_rx(sky2); |
814 | le->addr = cpu_to_le32((u32) map); | 821 | le->addr = cpu_to_le32((u32) map); |
815 | le->length = cpu_to_le16(len); | 822 | le->length = cpu_to_le16(len); |
816 | le->ctrl = 0; | ||
817 | le->opcode = OP_PACKET | HW_OWNER; | 823 | le->opcode = OP_PACKET | HW_OWNER; |
818 | } | 824 | } |
819 | 825 | ||
@@ -877,7 +883,7 @@ static void sky2_rx_clean(struct sky2_port *sky2) | |||
877 | 883 | ||
878 | memset(sky2->rx_le, 0, RX_LE_BYTES); | 884 | memset(sky2->rx_le, 0, RX_LE_BYTES); |
879 | for (i = 0; i < sky2->rx_pending; i++) { | 885 | for (i = 0; i < sky2->rx_pending; i++) { |
880 | struct ring_info *re = sky2->rx_ring + i; | 886 | struct rx_ring_info *re = sky2->rx_ring + i; |
881 | 887 | ||
882 | if (re->skb) { | 888 | if (re->skb) { |
883 | pci_unmap_single(sky2->hw->pdev, | 889 | pci_unmap_single(sky2->hw->pdev, |
@@ -1008,7 +1014,7 @@ static int sky2_rx_start(struct sky2_port *sky2) | |||
1008 | 1014 | ||
1009 | rx_set_checksum(sky2); | 1015 | rx_set_checksum(sky2); |
1010 | for (i = 0; i < sky2->rx_pending; i++) { | 1016 | for (i = 0; i < sky2->rx_pending; i++) { |
1011 | struct ring_info *re = sky2->rx_ring + i; | 1017 | struct rx_ring_info *re = sky2->rx_ring + i; |
1012 | 1018 | ||
1013 | re->skb = sky2_alloc_skb(sky2->netdev, sky2->rx_bufsize, | 1019 | re->skb = sky2_alloc_skb(sky2->netdev, sky2->rx_bufsize, |
1014 | GFP_KERNEL); | 1020 | GFP_KERNEL); |
@@ -1094,7 +1100,7 @@ static int sky2_up(struct net_device *dev) | |||
1094 | goto err_out; | 1100 | goto err_out; |
1095 | memset(sky2->rx_le, 0, RX_LE_BYTES); | 1101 | memset(sky2->rx_le, 0, RX_LE_BYTES); |
1096 | 1102 | ||
1097 | sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct ring_info), | 1103 | sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), |
1098 | GFP_KERNEL); | 1104 | GFP_KERNEL); |
1099 | if (!sky2->rx_ring) | 1105 | if (!sky2->rx_ring) |
1100 | goto err_out; | 1106 | goto err_out; |
@@ -1241,13 +1247,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1241 | mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 1247 | mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
1242 | addr64 = high32(mapping); | 1248 | addr64 = high32(mapping); |
1243 | 1249 | ||
1244 | re = sky2->tx_ring + sky2->tx_prod; | ||
1245 | |||
1246 | /* Send high bits if changed or crosses boundary */ | 1250 | /* Send high bits if changed or crosses boundary */ |
1247 | if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) { | 1251 | if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) { |
1248 | le = get_tx_le(sky2); | 1252 | le = get_tx_le(sky2); |
1249 | le->addr = cpu_to_le32(addr64); | 1253 | le->addr = cpu_to_le32(addr64); |
1250 | le->ctrl = 0; | ||
1251 | le->opcode = OP_ADDR64 | HW_OWNER; | 1254 | le->opcode = OP_ADDR64 | HW_OWNER; |
1252 | sky2->tx_addr64 = high32(mapping + len); | 1255 | sky2->tx_addr64 = high32(mapping + len); |
1253 | } | 1256 | } |
@@ -1263,7 +1266,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1263 | le = get_tx_le(sky2); | 1266 | le = get_tx_le(sky2); |
1264 | le->addr = cpu_to_le32(mss); | 1267 | le->addr = cpu_to_le32(mss); |
1265 | le->opcode = OP_LRGLEN | HW_OWNER; | 1268 | le->opcode = OP_LRGLEN | HW_OWNER; |
1266 | le->ctrl = 0; | ||
1267 | sky2->tx_last_mss = mss; | 1269 | sky2->tx_last_mss = mss; |
1268 | } | 1270 | } |
1269 | } | 1271 | } |
@@ -1276,7 +1278,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1276 | le = get_tx_le(sky2); | 1278 | le = get_tx_le(sky2); |
1277 | le->addr = 0; | 1279 | le->addr = 0; |
1278 | le->opcode = OP_VLAN|HW_OWNER; | 1280 | le->opcode = OP_VLAN|HW_OWNER; |
1279 | le->ctrl = 0; | ||
1280 | } else | 1281 | } else |
1281 | le->opcode |= OP_VLAN; | 1282 | le->opcode |= OP_VLAN; |
1282 | le->length = cpu_to_be16(vlan_tx_tag_get(skb)); | 1283 | le->length = cpu_to_be16(vlan_tx_tag_get(skb)); |
@@ -1313,13 +1314,13 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1313 | le->ctrl = ctrl; | 1314 | le->ctrl = ctrl; |
1314 | le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); | 1315 | le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); |
1315 | 1316 | ||
1316 | /* Record the transmit mapping info */ | 1317 | re = tx_le_re(sky2, le); |
1317 | re->skb = skb; | 1318 | re->skb = skb; |
1318 | pci_unmap_addr_set(re, mapaddr, mapping); | 1319 | pci_unmap_addr_set(re, mapaddr, mapping); |
1320 | pci_unmap_len_set(re, maplen, len); | ||
1319 | 1321 | ||
1320 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 1322 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1321 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1323 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1322 | struct tx_ring_info *fre; | ||
1323 | 1324 | ||
1324 | mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, | 1325 | mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, |
1325 | frag->size, PCI_DMA_TODEVICE); | 1326 | frag->size, PCI_DMA_TODEVICE); |
@@ -1338,12 +1339,12 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1338 | le->ctrl = ctrl; | 1339 | le->ctrl = ctrl; |
1339 | le->opcode = OP_BUFFER | HW_OWNER; | 1340 | le->opcode = OP_BUFFER | HW_OWNER; |
1340 | 1341 | ||
1341 | fre = sky2->tx_ring | 1342 | re = tx_le_re(sky2, le); |
1342 | + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE); | 1343 | re->skb = skb; |
1343 | pci_unmap_addr_set(fre, mapaddr, mapping); | 1344 | pci_unmap_addr_set(re, mapaddr, mapping); |
1345 | pci_unmap_len_set(re, maplen, frag->size); | ||
1344 | } | 1346 | } |
1345 | 1347 | ||
1346 | re->idx = sky2->tx_prod; | ||
1347 | le->ctrl |= EOP; | 1348 | le->ctrl |= EOP; |
1348 | 1349 | ||
1349 | if (tx_avail(sky2) <= MAX_SKB_TX_LE) | 1350 | if (tx_avail(sky2) <= MAX_SKB_TX_LE) |
@@ -1361,49 +1362,47 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1361 | * Free ring elements from starting at tx_cons until "done" | 1362 | * Free ring elements from starting at tx_cons until "done" |
1362 | * | 1363 | * |
1363 | * NB: the hardware will tell us about partial completion of multi-part | 1364 | * NB: the hardware will tell us about partial completion of multi-part |
1364 | * buffers; these are deferred until completion. | 1365 | * buffers so make sure not to free skb to early. |
1365 | */ | 1366 | */ |
1366 | static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | 1367 | static void sky2_tx_complete(struct sky2_port *sky2, u16 done) |
1367 | { | 1368 | { |
1368 | struct net_device *dev = sky2->netdev; | 1369 | struct net_device *dev = sky2->netdev; |
1369 | struct pci_dev *pdev = sky2->hw->pdev; | 1370 | struct pci_dev *pdev = sky2->hw->pdev; |
1370 | u16 nxt, put; | 1371 | unsigned idx; |
1371 | unsigned i; | ||
1372 | 1372 | ||
1373 | BUG_ON(done >= TX_RING_SIZE); | 1373 | BUG_ON(done >= TX_RING_SIZE); |
1374 | 1374 | ||
1375 | if (unlikely(netif_msg_tx_done(sky2))) | 1375 | for (idx = sky2->tx_cons; idx != done; |
1376 | printk(KERN_DEBUG "%s: tx done, up to %u\n", | 1376 | idx = RING_NEXT(idx, TX_RING_SIZE)) { |
1377 | dev->name, done); | 1377 | struct sky2_tx_le *le = sky2->tx_le + idx; |
1378 | 1378 | struct tx_ring_info *re = sky2->tx_ring + idx; | |
1379 | for (put = sky2->tx_cons; put != done; put = nxt) { | 1379 | |
1380 | struct tx_ring_info *re = sky2->tx_ring + put; | 1380 | switch(le->opcode & ~HW_OWNER) { |
1381 | struct sk_buff *skb = re->skb; | 1381 | case OP_LARGESEND: |
1382 | 1382 | case OP_PACKET: | |
1383 | nxt = re->idx; | 1383 | pci_unmap_single(pdev, |
1384 | BUG_ON(nxt >= TX_RING_SIZE); | 1384 | pci_unmap_addr(re, mapaddr), |
1385 | prefetch(sky2->tx_ring + nxt); | 1385 | pci_unmap_len(re, maplen), |
1386 | 1386 | PCI_DMA_TODEVICE); | |
1387 | /* Check for partial status */ | ||
1388 | if (tx_dist(put, done) < tx_dist(put, nxt)) | ||
1389 | break; | 1387 | break; |
1390 | 1388 | case OP_BUFFER: | |
1391 | skb = re->skb; | 1389 | pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), |
1392 | pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), | 1390 | pci_unmap_len(re, maplen), |
1393 | skb_headlen(skb), PCI_DMA_TODEVICE); | ||
1394 | |||
1395 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
1396 | struct tx_ring_info *fre; | ||
1397 | fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE); | ||
1398 | pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), | ||
1399 | skb_shinfo(skb)->frags[i].size, | ||
1400 | PCI_DMA_TODEVICE); | 1391 | PCI_DMA_TODEVICE); |
1392 | break; | ||
1393 | } | ||
1394 | |||
1395 | if (le->ctrl & EOP) { | ||
1396 | if (unlikely(netif_msg_tx_done(sky2))) | ||
1397 | printk(KERN_DEBUG "%s: tx done %u\n", | ||
1398 | dev->name, idx); | ||
1399 | dev_kfree_skb(re->skb); | ||
1401 | } | 1400 | } |
1402 | 1401 | ||
1403 | dev_kfree_skb(skb); | 1402 | le->opcode = 0; /* paranoia */ |
1404 | } | 1403 | } |
1405 | 1404 | ||
1406 | sky2->tx_cons = put; | 1405 | sky2->tx_cons = idx; |
1407 | if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) | 1406 | if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) |
1408 | netif_wake_queue(dev); | 1407 | netif_wake_queue(dev); |
1409 | } | 1408 | } |
@@ -1843,7 +1842,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev, | |||
1843 | u16 length, u32 status) | 1842 | u16 length, u32 status) |
1844 | { | 1843 | { |
1845 | struct sky2_port *sky2 = netdev_priv(dev); | 1844 | struct sky2_port *sky2 = netdev_priv(dev); |
1846 | struct ring_info *re = sky2->rx_ring + sky2->rx_next; | 1845 | struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; |
1847 | struct sk_buff *skb = NULL; | 1846 | struct sk_buff *skb = NULL; |
1848 | 1847 | ||
1849 | if (unlikely(netif_msg_rx_status(sky2))) | 1848 | if (unlikely(netif_msg_rx_status(sky2))) |
@@ -1889,7 +1888,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev, | |||
1889 | prefetch(skb->data); | 1888 | prefetch(skb->data); |
1890 | 1889 | ||
1891 | re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data, | 1890 | re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data, |
1892 | sky2->rx_bufsize, PCI_DMA_FROMDEVICE); | 1891 | sky2->rx_bufsize, PCI_DMA_FROMDEVICE); |
1893 | } | 1892 | } |
1894 | 1893 | ||
1895 | skb_put(skb, length); | 1894 | skb_put(skb, length); |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index b2981565e9e3..c1e45123d44f 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -1774,10 +1774,10 @@ struct sky2_status_le { | |||
1774 | struct tx_ring_info { | 1774 | struct tx_ring_info { |
1775 | struct sk_buff *skb; | 1775 | struct sk_buff *skb; |
1776 | DECLARE_PCI_UNMAP_ADDR(mapaddr); | 1776 | DECLARE_PCI_UNMAP_ADDR(mapaddr); |
1777 | u16 idx; | 1777 | DECLARE_PCI_UNMAP_ADDR(maplen); |
1778 | }; | 1778 | }; |
1779 | 1779 | ||
1780 | struct ring_info { | 1780 | struct rx_ring_info { |
1781 | struct sk_buff *skb; | 1781 | struct sk_buff *skb; |
1782 | dma_addr_t mapaddr; | 1782 | dma_addr_t mapaddr; |
1783 | }; | 1783 | }; |
@@ -1799,7 +1799,7 @@ struct sky2_port { | |||
1799 | u16 tx_last_mss; | 1799 | u16 tx_last_mss; |
1800 | u32 tx_tcpsum; | 1800 | u32 tx_tcpsum; |
1801 | 1801 | ||
1802 | struct ring_info *rx_ring ____cacheline_aligned_in_smp; | 1802 | struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp; |
1803 | struct sky2_rx_le *rx_le; | 1803 | struct sky2_rx_le *rx_le; |
1804 | u32 rx_addr64; | 1804 | u32 rx_addr64; |
1805 | u16 rx_next; /* next re to check */ | 1805 | u16 rx_next; /* next re to check */ |