aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2017-11-24 06:05:36 -0500
committerDavid S. Miller <davem@davemloft.net>2017-11-25 09:56:02 -0500
commit540c11159dcece5c4a8157a7b39336316085470f (patch)
tree9d638840ef0dd103ccb2adc4981b8d4e77409166
parentfa6d7cb5d76cf0467c61420fc9238045aedfd379 (diff)
net: thunderbolt: Stop using zero to mean no valid DMA mapping
Commit 86dabda426ac ("net: thunderbolt: Clear finished Tx frame bus address in tbnet_tx_callback()") fixed a DMA-API violation where the driver called dma_unmap_page() in tbnet_free_buffers() for a bus address that might already be unmapped. The fix was to zero out the bus address of a frame in tbnet_tx_callback(). However, as pointed out by David Miller, zero might well be valid mapping (at least in theory) so it is not good idea to use it here. It turns out that we don't need the whole map/unmap dance for Tx buffers at all. Instead we can map the buffers when they are initially allocated and unmap them when the interface is brought down. In between we just DMA sync the buffers for the CPU or device as needed. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/thunderbolt.c57
1 files changed, 24 insertions, 33 deletions
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index 228d4aa6d9ae..ca5e375de27c 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -335,7 +335,7 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
335 if (ring->ring->is_tx) { 335 if (ring->ring->is_tx) {
336 dir = DMA_TO_DEVICE; 336 dir = DMA_TO_DEVICE;
337 order = 0; 337 order = 0;
338 size = tbnet_frame_size(tf); 338 size = TBNET_FRAME_SIZE;
339 } else { 339 } else {
340 dir = DMA_FROM_DEVICE; 340 dir = DMA_FROM_DEVICE;
341 order = TBNET_RX_PAGE_ORDER; 341 order = TBNET_RX_PAGE_ORDER;
@@ -512,6 +512,7 @@ err_free:
512static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) 512static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
513{ 513{
514 struct tbnet_ring *ring = &net->tx_ring; 514 struct tbnet_ring *ring = &net->tx_ring;
515 struct device *dma_dev = tb_ring_dma_device(ring->ring);
515 struct tbnet_frame *tf; 516 struct tbnet_frame *tf;
516 unsigned int index; 517 unsigned int index;
517 518
@@ -522,7 +523,9 @@ static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
522 523
523 tf = &ring->frames[index]; 524 tf = &ring->frames[index];
524 tf->frame.size = 0; 525 tf->frame.size = 0;
525 tf->frame.buffer_phy = 0; 526
527 dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
528 tbnet_frame_size(tf), DMA_TO_DEVICE);
526 529
527 return tf; 530 return tf;
528} 531}
@@ -531,13 +534,8 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
531 bool canceled) 534 bool canceled)
532{ 535{
533 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); 536 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
534 struct device *dma_dev = tb_ring_dma_device(ring);
535 struct tbnet *net = netdev_priv(tf->dev); 537 struct tbnet *net = netdev_priv(tf->dev);
536 538
537 dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf),
538 DMA_TO_DEVICE);
539 tf->frame.buffer_phy = 0;
540
541 /* Return buffer to the ring */ 539 /* Return buffer to the ring */
542 net->tx_ring.prod++; 540 net->tx_ring.prod++;
543 541
@@ -548,10 +546,12 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
548static int tbnet_alloc_tx_buffers(struct tbnet *net) 546static int tbnet_alloc_tx_buffers(struct tbnet *net)
549{ 547{
550 struct tbnet_ring *ring = &net->tx_ring; 548 struct tbnet_ring *ring = &net->tx_ring;
549 struct device *dma_dev = tb_ring_dma_device(ring->ring);
551 unsigned int i; 550 unsigned int i;
552 551
553 for (i = 0; i < TBNET_RING_SIZE; i++) { 552 for (i = 0; i < TBNET_RING_SIZE; i++) {
554 struct tbnet_frame *tf = &ring->frames[i]; 553 struct tbnet_frame *tf = &ring->frames[i];
554 dma_addr_t dma_addr;
555 555
556 tf->page = alloc_page(GFP_KERNEL); 556 tf->page = alloc_page(GFP_KERNEL);
557 if (!tf->page) { 557 if (!tf->page) {
@@ -559,7 +559,17 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
559 return -ENOMEM; 559 return -ENOMEM;
560 } 560 }
561 561
562 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
563 DMA_TO_DEVICE);
564 if (dma_mapping_error(dma_dev, dma_addr)) {
565 __free_page(tf->page);
566 tf->page = NULL;
567 tbnet_free_buffers(ring);
568 return -ENOMEM;
569 }
570
562 tf->dev = net->dev; 571 tf->dev = net->dev;
572 tf->frame.buffer_phy = dma_addr;
563 tf->frame.callback = tbnet_tx_callback; 573 tf->frame.callback = tbnet_tx_callback;
564 tf->frame.sof = TBIP_PDF_FRAME_START; 574 tf->frame.sof = TBIP_PDF_FRAME_START;
565 tf->frame.eof = TBIP_PDF_FRAME_END; 575 tf->frame.eof = TBIP_PDF_FRAME_END;
@@ -881,19 +891,6 @@ static int tbnet_stop(struct net_device *dev)
881 return 0; 891 return 0;
882} 892}
883 893
884static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf)
885{
886 dma_addr_t dma_addr;
887
888 dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf),
889 DMA_TO_DEVICE);
890 if (dma_mapping_error(dma_dev, dma_addr))
891 return false;
892
893 tf->frame.buffer_phy = dma_addr;
894 return true;
895}
896
897static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, 894static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
898 struct tbnet_frame **frames, u32 frame_count) 895 struct tbnet_frame **frames, u32 frame_count)
899{ 896{
@@ -908,13 +905,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
908 905
909 if (skb->ip_summed != CHECKSUM_PARTIAL) { 906 if (skb->ip_summed != CHECKSUM_PARTIAL) {
910 /* No need to calculate checksum so we just update the 907 /* No need to calculate checksum so we just update the
911 * total frame count and map the frames for DMA. 908 * total frame count and sync the frames for DMA.
912 */ 909 */
913 for (i = 0; i < frame_count; i++) { 910 for (i = 0; i < frame_count; i++) {
914 hdr = page_address(frames[i]->page); 911 hdr = page_address(frames[i]->page);
915 hdr->frame_count = cpu_to_le32(frame_count); 912 hdr->frame_count = cpu_to_le32(frame_count);
916 if (!tbnet_xmit_map(dma_dev, frames[i])) 913 dma_sync_single_for_device(dma_dev,
917 goto err_unmap; 914 frames[i]->frame.buffer_phy,
915 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
918 } 916 }
919 917
920 return true; 918 return true;
@@ -983,21 +981,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
983 *tucso = csum_fold(wsum); 981 *tucso = csum_fold(wsum);
984 982
985 /* Checksum is finally calculated and we don't touch the memory 983 /* Checksum is finally calculated and we don't touch the memory
986 * anymore, so DMA map the frames now. 984 * anymore, so DMA sync the frames now.
987 */ 985 */
988 for (i = 0; i < frame_count; i++) { 986 for (i = 0; i < frame_count; i++) {
989 if (!tbnet_xmit_map(dma_dev, frames[i])) 987 dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
990 goto err_unmap; 988 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
991 } 989 }
992 990
993 return true; 991 return true;
994
995err_unmap:
996 while (i--)
997 dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy,
998 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
999
1000 return false;
1001} 992}
1002 993
1003static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, 994static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,