diff options
author | David S. Miller <davem@davemloft.net> | 2015-07-23 03:41:16 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-07-23 03:41:16 -0400 |
commit | c5e40ee287db61a79af1746954ee03ebbf1ff8a3 (patch) | |
tree | 007da00e75e9b84766ac4868421705300e1e2e14 /drivers/net/ethernet/renesas/ravb_main.c | |
parent | 052831879945be0d9fad2216b127147c565ec1b1 (diff) | |
parent | c5dfd654d0ec0a28fe81e7bd4d4fd984a9855e09 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
net/bridge/br_mdb.c
br_mdb.c conflict was a function call being removed to fix a bug in
'net' but whose signature was changed in 'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/renesas/ravb_main.c')
-rw-r--r-- | drivers/net/ethernet/renesas/ravb_main.c | 74 |
1 files changed, 38 insertions, 36 deletions
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 47287c1cc7e0..779bb58a068e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q) | |||
228 | struct ravb_desc *desc; | 228 | struct ravb_desc *desc; |
229 | int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; | 229 | int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; |
230 | int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; | 230 | int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; |
231 | struct sk_buff *skb; | ||
232 | dma_addr_t dma_addr; | 231 | dma_addr_t dma_addr; |
233 | void *buffer; | ||
234 | int i; | 232 | int i; |
235 | 233 | ||
236 | priv->cur_rx[q] = 0; | 234 | priv->cur_rx[q] = 0; |
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q) | |||
241 | memset(priv->rx_ring[q], 0, rx_ring_size); | 239 | memset(priv->rx_ring[q], 0, rx_ring_size); |
242 | /* Build RX ring buffer */ | 240 | /* Build RX ring buffer */ |
243 | for (i = 0; i < priv->num_rx_ring[q]; i++) { | 241 | for (i = 0; i < priv->num_rx_ring[q]; i++) { |
244 | priv->rx_skb[q][i] = NULL; | ||
245 | skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); | ||
246 | if (!skb) | ||
247 | break; | ||
248 | ravb_set_buffer_align(skb); | ||
249 | /* RX descriptor */ | 242 | /* RX descriptor */ |
250 | rx_desc = &priv->rx_ring[q][i]; | 243 | rx_desc = &priv->rx_ring[q][i]; |
251 | /* The size of the buffer should be on 16-byte boundary. */ | 244 | /* The size of the buffer should be on 16-byte boundary. */ |
252 | rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); | 245 | rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); |
253 | dma_addr = dma_map_single(&ndev->dev, skb->data, | 246 | dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data, |
254 | ALIGN(PKT_BUF_SZ, 16), | 247 | ALIGN(PKT_BUF_SZ, 16), |
255 | DMA_FROM_DEVICE); | 248 | DMA_FROM_DEVICE); |
256 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | 249 | /* We just set the data size to 0 for a failed mapping which |
257 | dev_kfree_skb(skb); | 250 | * should prevent DMA from happening... |
258 | break; | 251 | */ |
259 | } | 252 | if (dma_mapping_error(&ndev->dev, dma_addr)) |
260 | priv->rx_skb[q][i] = skb; | 253 | rx_desc->ds_cc = cpu_to_le16(0); |
261 | rx_desc->dptr = cpu_to_le32(dma_addr); | 254 | rx_desc->dptr = cpu_to_le32(dma_addr); |
262 | rx_desc->die_dt = DT_FEMPTY; | 255 | rx_desc->die_dt = DT_FEMPTY; |
263 | } | 256 | } |
264 | rx_desc = &priv->rx_ring[q][i]; | 257 | rx_desc = &priv->rx_ring[q][i]; |
265 | rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); | 258 | rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); |
266 | rx_desc->die_dt = DT_LINKFIX; /* type */ | 259 | rx_desc->die_dt = DT_LINKFIX; /* type */ |
267 | priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]); | ||
268 | 260 | ||
269 | memset(priv->tx_ring[q], 0, tx_ring_size); | 261 | memset(priv->tx_ring[q], 0, tx_ring_size); |
270 | /* Build TX ring buffer */ | 262 | /* Build TX ring buffer */ |
271 | for (i = 0; i < priv->num_tx_ring[q]; i++) { | 263 | for (i = 0; i < priv->num_tx_ring[q]; i++) { |
272 | priv->tx_skb[q][i] = NULL; | ||
273 | priv->tx_buffers[q][i] = NULL; | ||
274 | buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); | ||
275 | if (!buffer) | ||
276 | break; | ||
277 | /* Aligned TX buffer */ | ||
278 | priv->tx_buffers[q][i] = buffer; | ||
279 | tx_desc = &priv->tx_ring[q][i]; | 264 | tx_desc = &priv->tx_ring[q][i]; |
280 | tx_desc->die_dt = DT_EEMPTY; | 265 | tx_desc->die_dt = DT_EEMPTY; |
281 | } | 266 | } |
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q) | |||
298 | static int ravb_ring_init(struct net_device *ndev, int q) | 283 | static int ravb_ring_init(struct net_device *ndev, int q) |
299 | { | 284 | { |
300 | struct ravb_private *priv = netdev_priv(ndev); | 285 | struct ravb_private *priv = netdev_priv(ndev); |
286 | struct sk_buff *skb; | ||
301 | int ring_size; | 287 | int ring_size; |
288 | void *buffer; | ||
289 | int i; | ||
302 | 290 | ||
303 | /* Allocate RX and TX skb rings */ | 291 | /* Allocate RX and TX skb rings */ |
304 | priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], | 292 | priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], |
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q) | |||
308 | if (!priv->rx_skb[q] || !priv->tx_skb[q]) | 296 | if (!priv->rx_skb[q] || !priv->tx_skb[q]) |
309 | goto error; | 297 | goto error; |
310 | 298 | ||
299 | for (i = 0; i < priv->num_rx_ring[q]; i++) { | ||
300 | skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); | ||
301 | if (!skb) | ||
302 | goto error; | ||
303 | ravb_set_buffer_align(skb); | ||
304 | priv->rx_skb[q][i] = skb; | ||
305 | } | ||
306 | |||
311 | /* Allocate rings for the aligned buffers */ | 307 | /* Allocate rings for the aligned buffers */ |
312 | priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], | 308 | priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], |
313 | sizeof(*priv->tx_buffers[q]), GFP_KERNEL); | 309 | sizeof(*priv->tx_buffers[q]), GFP_KERNEL); |
314 | if (!priv->tx_buffers[q]) | 310 | if (!priv->tx_buffers[q]) |
315 | goto error; | 311 | goto error; |
316 | 312 | ||
313 | for (i = 0; i < priv->num_tx_ring[q]; i++) { | ||
314 | buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); | ||
315 | if (!buffer) | ||
316 | goto error; | ||
317 | /* Aligned TX buffer */ | ||
318 | priv->tx_buffers[q][i] = buffer; | ||
319 | } | ||
320 | |||
317 | /* Allocate all RX descriptors. */ | 321 | /* Allocate all RX descriptors. */ |
318 | ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); | 322 | ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); |
319 | priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, | 323 | priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, |
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) | |||
524 | if (--boguscnt < 0) | 528 | if (--boguscnt < 0) |
525 | break; | 529 | break; |
526 | 530 | ||
531 | /* We use 0-byte descriptors to mark the DMA mapping errors */ | ||
532 | if (!pkt_len) | ||
533 | continue; | ||
534 | |||
527 | if (desc_status & MSC_MC) | 535 | if (desc_status & MSC_MC) |
528 | stats->multicast++; | 536 | stats->multicast++; |
529 | 537 | ||
@@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) | |||
543 | 551 | ||
544 | skb = priv->rx_skb[q][entry]; | 552 | skb = priv->rx_skb[q][entry]; |
545 | priv->rx_skb[q][entry] = NULL; | 553 | priv->rx_skb[q][entry] = NULL; |
546 | dma_sync_single_for_cpu(&ndev->dev, | 554 | dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), |
547 | le32_to_cpu(desc->dptr), | 555 | ALIGN(PKT_BUF_SZ, 16), |
548 | ALIGN(PKT_BUF_SZ, 16), | 556 | DMA_FROM_DEVICE); |
549 | DMA_FROM_DEVICE); | ||
550 | get_ts &= (q == RAVB_NC) ? | 557 | get_ts &= (q == RAVB_NC) ? |
551 | RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : | 558 | RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : |
552 | ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; | 559 | ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; |
@@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) | |||
584 | if (!skb) | 591 | if (!skb) |
585 | break; /* Better luck next round. */ | 592 | break; /* Better luck next round. */ |
586 | ravb_set_buffer_align(skb); | 593 | ravb_set_buffer_align(skb); |
587 | dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), | ||
588 | ALIGN(PKT_BUF_SZ, 16), | ||
589 | DMA_FROM_DEVICE); | ||
590 | dma_addr = dma_map_single(&ndev->dev, skb->data, | 594 | dma_addr = dma_map_single(&ndev->dev, skb->data, |
591 | le16_to_cpu(desc->ds_cc), | 595 | le16_to_cpu(desc->ds_cc), |
592 | DMA_FROM_DEVICE); | 596 | DMA_FROM_DEVICE); |
593 | skb_checksum_none_assert(skb); | 597 | skb_checksum_none_assert(skb); |
594 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | 598 | /* We just set the data size to 0 for a failed mapping |
595 | dev_kfree_skb_any(skb); | 599 | * which should prevent DMA from happening... |
596 | break; | 600 | */ |
597 | } | 601 | if (dma_mapping_error(&ndev->dev, dma_addr)) |
602 | desc->ds_cc = cpu_to_le16(0); | ||
598 | desc->dptr = cpu_to_le32(dma_addr); | 603 | desc->dptr = cpu_to_le32(dma_addr); |
599 | priv->rx_skb[q][entry] = skb; | 604 | priv->rx_skb[q][entry] = skb; |
600 | } | 605 | } |
@@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1279 | u32 dma_addr; | 1284 | u32 dma_addr; |
1280 | void *buffer; | 1285 | void *buffer; |
1281 | u32 entry; | 1286 | u32 entry; |
1282 | u32 tccr; | ||
1283 | 1287 | ||
1284 | spin_lock_irqsave(&priv->lock, flags); | 1288 | spin_lock_irqsave(&priv->lock, flags); |
1285 | if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { | 1289 | if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { |
@@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1328 | dma_wmb(); | 1332 | dma_wmb(); |
1329 | desc->die_dt = DT_FSINGLE; | 1333 | desc->die_dt = DT_FSINGLE; |
1330 | 1334 | ||
1331 | tccr = ravb_read(ndev, TCCR); | 1335 | ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); |
1332 | if (!(tccr & (TCCR_TSRQ0 << q))) | ||
1333 | ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR); | ||
1334 | 1336 | ||
1335 | priv->cur_tx[q]++; | 1337 | priv->cur_tx[q]++; |
1336 | if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && | 1338 | if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && |