diff options
author | Alexey Brodkin <Alexey.Brodkin@synopsys.com> | 2013-06-26 03:49:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-06-26 04:35:44 -0400 |
commit | a4a1139b242f03dfb8a5d7a86fa674bda1cf60b2 (patch) | |
tree | b3778e33ba3fb380727aaa42653f6e9f7971d1a2 | |
parent | 8599b52e14a1611dcb563289421bee76751f1d53 (diff) |
arc_emac: fix compile-time errors & warnings on PPC64
As reported by "kbuild test robot" there were some errors and warnings
on attempt to build kernel with "make ARCH=powerpc allmodconfig".
And this patch addresses both errors and warnings.
Below is a list of introduced changes:
1. Fix compile-time errors (misspellings in "dma_unmap_single") on PPC.
2. Use DMA address instead of "skb->data" as a pointer to data buffer.
This fixed warnings on pointer to int conversion on 64-bit systems.
3. Re-implemented initial allocation of Rx buffers in "arc_emac_open" in
the same way they're re-allocated during operation (receiving packets).
So once again DMA address could be used instead of "skb->data".
4. Explicitly use EMAC_BUFFER_SIZE for Rx buffers allocation.
Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
Cc: netdev@vger.kernel.org
Cc: Andy Shevchenko <andy.shevchenko@gmail.com>
Cc: Francois Romieu <romieu@fr.zoreil.com>
Cc: Joe Perches <joe@perches.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Mischa Jonker <mjonker@synopsys.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Grant Likely <grant.likely@linaro.org>
Cc: Rob Herring <rob.herring@calxeda.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-kernel@vger.kernel.org
Cc: devicetree-discuss@lists.ozlabs.org
Cc: Florian Fainelli <florian@openwrt.org>
Cc: David Laight <david.laight@aculab.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/arc/emac_main.c | 65 |
1 files changed, 39 insertions, 26 deletions
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 20345f6bf894..f1b121ee5525 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -171,8 +171,8 @@ static void arc_emac_tx_clean(struct net_device *ndev) | |||
171 | stats->tx_bytes += skb->len; | 171 | stats->tx_bytes += skb->len; |
172 | } | 172 | } |
173 | 173 | ||
174 | dma_unmap_single(&ndev->dev, dma_unmap_addr(&tx_buff, addr), | 174 | dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), |
175 | dma_unmap_len(&tx_buff, len), DMA_TO_DEVICE); | 175 | dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); |
176 | 176 | ||
177 | /* return the sk_buff to system */ | 177 | /* return the sk_buff to system */ |
178 | dev_kfree_skb_irq(skb); | 178 | dev_kfree_skb_irq(skb); |
@@ -204,7 +204,6 @@ static int arc_emac_rx(struct net_device *ndev, int budget) | |||
204 | struct net_device_stats *stats = &priv->stats; | 204 | struct net_device_stats *stats = &priv->stats; |
205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; | 205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; |
206 | struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; | 206 | struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; |
207 | unsigned int buflen = EMAC_BUFFER_SIZE; | ||
208 | unsigned int pktlen, info = le32_to_cpu(rxbd->info); | 207 | unsigned int pktlen, info = le32_to_cpu(rxbd->info); |
209 | struct sk_buff *skb; | 208 | struct sk_buff *skb; |
210 | dma_addr_t addr; | 209 | dma_addr_t addr; |
@@ -226,7 +225,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) | |||
226 | netdev_err(ndev, "incomplete packet received\n"); | 225 | netdev_err(ndev, "incomplete packet received\n"); |
227 | 226 | ||
228 | /* Return ownership to EMAC */ | 227 | /* Return ownership to EMAC */ |
229 | rxbd->info = cpu_to_le32(FOR_EMAC | buflen); | 228 | rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
230 | stats->rx_errors++; | 229 | stats->rx_errors++; |
231 | stats->rx_length_errors++; | 230 | stats->rx_length_errors++; |
232 | continue; | 231 | continue; |
@@ -240,11 +239,12 @@ static int arc_emac_rx(struct net_device *ndev, int budget) | |||
240 | skb->dev = ndev; | 239 | skb->dev = ndev; |
241 | skb->protocol = eth_type_trans(skb, ndev); | 240 | skb->protocol = eth_type_trans(skb, ndev); |
242 | 241 | ||
243 | dma_unmap_single(&ndev->dev, dma_unmap_addr(&rx_buff, addr), | 242 | dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), |
244 | dma_unmap_len(&rx_buff, len), DMA_FROM_DEVICE); | 243 | dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); |
245 | 244 | ||
246 | /* Prepare the BD for next cycle */ | 245 | /* Prepare the BD for next cycle */ |
247 | rx_buff->skb = netdev_alloc_skb_ip_align(ndev, buflen); | 246 | rx_buff->skb = netdev_alloc_skb_ip_align(ndev, |
247 | EMAC_BUFFER_SIZE); | ||
248 | if (unlikely(!rx_buff->skb)) { | 248 | if (unlikely(!rx_buff->skb)) { |
249 | stats->rx_errors++; | 249 | stats->rx_errors++; |
250 | /* Because receive_skb is below, increment rx_dropped */ | 250 | /* Because receive_skb is below, increment rx_dropped */ |
@@ -256,7 +256,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) | |||
256 | netif_receive_skb(skb); | 256 | netif_receive_skb(skb); |
257 | 257 | ||
258 | addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, | 258 | addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, |
259 | buflen, DMA_FROM_DEVICE); | 259 | EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); |
260 | if (dma_mapping_error(&ndev->dev, addr)) { | 260 | if (dma_mapping_error(&ndev->dev, addr)) { |
261 | if (net_ratelimit()) | 261 | if (net_ratelimit()) |
262 | netdev_err(ndev, "cannot dma map\n"); | 262 | netdev_err(ndev, "cannot dma map\n"); |
@@ -264,16 +264,16 @@ static int arc_emac_rx(struct net_device *ndev, int budget) | |||
264 | stats->rx_errors++; | 264 | stats->rx_errors++; |
265 | continue; | 265 | continue; |
266 | } | 266 | } |
267 | dma_unmap_addr_set(&rx_buff, mapping, addr); | 267 | dma_unmap_addr_set(rx_buff, addr, addr); |
268 | dma_unmap_len_set(&rx_buff, len, buflen); | 268 | dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); |
269 | 269 | ||
270 | rxbd->data = cpu_to_le32(rx_buff->skb->data); | 270 | rxbd->data = cpu_to_le32(addr); |
271 | 271 | ||
272 | /* Make sure pointer to data buffer is set */ | 272 | /* Make sure pointer to data buffer is set */ |
273 | wmb(); | 273 | wmb(); |
274 | 274 | ||
275 | /* Return ownership to EMAC */ | 275 | /* Return ownership to EMAC */ |
276 | rxbd->info = cpu_to_le32(FOR_EMAC | buflen); | 276 | rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
277 | } | 277 | } |
278 | 278 | ||
279 | return work_done; | 279 | return work_done; |
@@ -376,8 +376,6 @@ static int arc_emac_open(struct net_device *ndev) | |||
376 | { | 376 | { |
377 | struct arc_emac_priv *priv = netdev_priv(ndev); | 377 | struct arc_emac_priv *priv = netdev_priv(ndev); |
378 | struct phy_device *phy_dev = priv->phy_dev; | 378 | struct phy_device *phy_dev = priv->phy_dev; |
379 | struct arc_emac_bd *bd; | ||
380 | struct sk_buff *skb; | ||
381 | int i; | 379 | int i; |
382 | 380 | ||
383 | phy_dev->autoneg = AUTONEG_ENABLE; | 381 | phy_dev->autoneg = AUTONEG_ENABLE; |
@@ -395,25 +393,40 @@ static int arc_emac_open(struct net_device *ndev) | |||
395 | } | 393 | } |
396 | } | 394 | } |
397 | 395 | ||
396 | priv->last_rx_bd = 0; | ||
397 | |||
398 | /* Allocate and set buffers for Rx BD's */ | 398 | /* Allocate and set buffers for Rx BD's */ |
399 | bd = priv->rxbd; | ||
400 | for (i = 0; i < RX_BD_NUM; i++) { | 399 | for (i = 0; i < RX_BD_NUM; i++) { |
401 | skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); | 400 | dma_addr_t addr; |
402 | if (unlikely(!skb)) | 401 | unsigned int *last_rx_bd = &priv->last_rx_bd; |
402 | struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; | ||
403 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; | ||
404 | |||
405 | rx_buff->skb = netdev_alloc_skb_ip_align(ndev, | ||
406 | EMAC_BUFFER_SIZE); | ||
407 | if (unlikely(!rx_buff->skb)) | ||
408 | return -ENOMEM; | ||
409 | |||
410 | addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, | ||
411 | EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
412 | if (dma_mapping_error(&ndev->dev, addr)) { | ||
413 | netdev_err(ndev, "cannot dma map\n"); | ||
414 | dev_kfree_skb(rx_buff->skb); | ||
403 | return -ENOMEM; | 415 | return -ENOMEM; |
416 | } | ||
417 | dma_unmap_addr_set(rx_buff, addr, addr); | ||
418 | dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); | ||
404 | 419 | ||
405 | priv->rx_buff[i].skb = skb; | 420 | rxbd->data = cpu_to_le32(addr); |
406 | bd->data = cpu_to_le32(skb->data); | ||
407 | 421 | ||
408 | /* Make sure pointer to data buffer is set */ | 422 | /* Make sure pointer to data buffer is set */ |
409 | wmb(); | 423 | wmb(); |
410 | 424 | ||
411 | /* Set ownership to EMAC */ | 425 | /* Return ownership to EMAC */ |
412 | bd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); | 426 | rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
413 | bd++; | ||
414 | } | ||
415 | 427 | ||
416 | priv->last_rx_bd = 0; | 428 | *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; |
429 | } | ||
417 | 430 | ||
418 | /* Clean Tx BD's */ | 431 | /* Clean Tx BD's */ |
419 | memset(priv->txbd, 0, TX_RING_SZ); | 432 | memset(priv->txbd, 0, TX_RING_SZ); |
@@ -543,11 +556,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) | |||
543 | dev_kfree_skb(skb); | 556 | dev_kfree_skb(skb); |
544 | return NETDEV_TX_OK; | 557 | return NETDEV_TX_OK; |
545 | } | 558 | } |
546 | dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], mapping, addr); | 559 | dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); |
547 | dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); | 560 | dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); |
548 | 561 | ||
549 | priv->tx_buff[*txbd_curr].skb = skb; | 562 | priv->tx_buff[*txbd_curr].skb = skb; |
550 | priv->txbd[*txbd_curr].data = cpu_to_le32(skb->data); | 563 | priv->txbd[*txbd_curr].data = cpu_to_le32(addr); |
551 | 564 | ||
552 | /* Make sure pointer to data buffer is set */ | 565 | /* Make sure pointer to data buffer is set */ |
553 | wmb(); | 566 | wmb(); |