aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorBen Hutchings <ben.hutchings@codethink.co.uk>2015-01-26 19:50:24 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-27 03:18:54 -0500
commit52b9fa3696c44151a2f1d361a00be7c5513db026 (patch)
treecd61f6cb3d68d8a6a6351ff84dcfc3db3c24b2b5 /drivers/net
parentaa3933b87309c61b4fa4bb93c1ad4c3f08afb1b8 (diff)
sh_eth: Fix DMA-API usage for RX buffers
- Use the return value of dma_map_single(), rather than calling virt_to_page() separately - Check for mapping failue - Call dma_unmap_single() rather than dma_sync_single_for_cpu() Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c34
1 files changed, 23 insertions, 11 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4a085da57188..04283fe0e6a7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1123,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1123 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1123 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1124 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1124 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1125 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1125 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1126 dma_addr_t dma_addr;
1126 1127
1127 mdp->cur_rx = 0; 1128 mdp->cur_rx = 0;
1128 mdp->cur_tx = 0; 1129 mdp->cur_tx = 0;
@@ -1136,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
1136 /* skb */ 1137 /* skb */
1137 mdp->rx_skbuff[i] = NULL; 1138 mdp->rx_skbuff[i] = NULL;
1138 skb = netdev_alloc_skb(ndev, skbuff_size); 1139 skb = netdev_alloc_skb(ndev, skbuff_size);
1139 mdp->rx_skbuff[i] = skb;
1140 if (skb == NULL) 1140 if (skb == NULL)
1141 break; 1141 break;
1142 sh_eth_set_receive_align(skb); 1142 sh_eth_set_receive_align(skb);
@@ -1145,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
1145 rxdesc = &mdp->rx_ring[i]; 1145 rxdesc = &mdp->rx_ring[i];
1146 /* The size of the buffer is a multiple of 16 bytes. */ 1146 /* The size of the buffer is a multiple of 16 bytes. */
1147 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1147 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1148 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, 1148 dma_addr = dma_map_single(&ndev->dev, skb->data,
1149 DMA_FROM_DEVICE); 1149 rxdesc->buffer_length,
1150 rxdesc->addr = virt_to_phys(skb->data); 1150 DMA_FROM_DEVICE);
1151 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1152 kfree_skb(skb);
1153 break;
1154 }
1155 mdp->rx_skbuff[i] = skb;
1156 rxdesc->addr = dma_addr;
1151 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1157 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1152 1158
1153 /* Rx descriptor address set */ 1159 /* Rx descriptor address set */
@@ -1432,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1432 u16 pkt_len = 0; 1438 u16 pkt_len = 0;
1433 u32 desc_status; 1439 u32 desc_status;
1434 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1440 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1441 dma_addr_t dma_addr;
1435 1442
1436 boguscnt = min(boguscnt, *quota); 1443 boguscnt = min(boguscnt, *quota);
1437 limit = boguscnt; 1444 limit = boguscnt;
@@ -1479,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1479 mdp->rx_skbuff[entry] = NULL; 1486 mdp->rx_skbuff[entry] = NULL;
1480 if (mdp->cd->rpadir) 1487 if (mdp->cd->rpadir)
1481 skb_reserve(skb, NET_IP_ALIGN); 1488 skb_reserve(skb, NET_IP_ALIGN);
1482 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1489 dma_unmap_single(&ndev->dev, rxdesc->addr,
1483 ALIGN(mdp->rx_buf_sz, 16), 1490 ALIGN(mdp->rx_buf_sz, 16),
1484 DMA_FROM_DEVICE); 1491 DMA_FROM_DEVICE);
1485 skb_put(skb, pkt_len); 1492 skb_put(skb, pkt_len);
1486 skb->protocol = eth_type_trans(skb, ndev); 1493 skb->protocol = eth_type_trans(skb, ndev);
1487 netif_receive_skb(skb); 1494 netif_receive_skb(skb);
@@ -1501,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1501 1508
1502 if (mdp->rx_skbuff[entry] == NULL) { 1509 if (mdp->rx_skbuff[entry] == NULL) {
1503 skb = netdev_alloc_skb(ndev, skbuff_size); 1510 skb = netdev_alloc_skb(ndev, skbuff_size);
1504 mdp->rx_skbuff[entry] = skb;
1505 if (skb == NULL) 1511 if (skb == NULL)
1506 break; /* Better luck next round. */ 1512 break; /* Better luck next round. */
1507 sh_eth_set_receive_align(skb); 1513 sh_eth_set_receive_align(skb);
1508 dma_map_single(&ndev->dev, skb->data, 1514 dma_addr = dma_map_single(&ndev->dev, skb->data,
1509 rxdesc->buffer_length, DMA_FROM_DEVICE); 1515 rxdesc->buffer_length,
1516 DMA_FROM_DEVICE);
1517 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1518 kfree_skb(skb);
1519 break;
1520 }
1521 mdp->rx_skbuff[entry] = skb;
1510 1522
1511 skb_checksum_none_assert(skb); 1523 skb_checksum_none_assert(skb);
1512 rxdesc->addr = virt_to_phys(skb->data); 1524 rxdesc->addr = dma_addr;
1513 } 1525 }
1514 if (entry >= mdp->num_rx_ring - 1) 1526 if (entry >= mdp->num_rx_ring - 1)
1515 rxdesc->status |= 1527 rxdesc->status |=