diff options
author | Olof Johansson <olof@lixom.net> | 2007-10-02 17:25:53 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:54:23 -0400 |
commit | fc9e4d2a93dab4a995e2e75725577b9a60154cbc (patch) | |
tree | efa9ce615e57fdb4bb94d0920daf04a26b30c4ed /drivers | |
parent | 18eec695427ce1258fb5dad0ac180fa4d6f64af7 (diff) |
pasemi_mac: rework ring management
pasemi_mac: rework ring management
Rework ring management, switching to an opaque ring format instead of
the struct-based descriptor+pointer setup, since it will be needed for
SG support.
Signed-off-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/pasemi_mac.c | 213 | ||||
-rw-r--r-- | drivers/net/pasemi_mac.h | 23 |
2 files changed, 108 insertions, 128 deletions
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index b2861e0df86c..c2a3524a5411 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -63,10 +63,10 @@ | |||
63 | NETIF_MSG_RX_ERR | \ | 63 | NETIF_MSG_RX_ERR | \ |
64 | NETIF_MSG_TX_ERR) | 64 | NETIF_MSG_TX_ERR) |
65 | 65 | ||
66 | #define TX_DESC(mac, num) ((mac)->tx->desc[(num) & (TX_RING_SIZE-1)]) | 66 | #define TX_RING(mac, num) ((mac)->tx->ring[(num) & (TX_RING_SIZE-1)]) |
67 | #define TX_DESC_INFO(mac, num) ((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)]) | 67 | #define TX_RING_INFO(mac, num) ((mac)->tx->ring_info[(num) & (TX_RING_SIZE-1)]) |
68 | #define RX_DESC(mac, num) ((mac)->rx->desc[(num) & (RX_RING_SIZE-1)]) | 68 | #define RX_RING(mac, num) ((mac)->rx->ring[(num) & (RX_RING_SIZE-1)]) |
69 | #define RX_DESC_INFO(mac, num) ((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)]) | 69 | #define RX_RING_INFO(mac, num) ((mac)->rx->ring_info[(num) & (RX_RING_SIZE-1)]) |
70 | #define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)]) | 70 | #define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)]) |
71 | 71 | ||
72 | #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ | 72 | #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ |
@@ -174,22 +174,21 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) | |||
174 | spin_lock_init(&ring->lock); | 174 | spin_lock_init(&ring->lock); |
175 | 175 | ||
176 | ring->size = RX_RING_SIZE; | 176 | ring->size = RX_RING_SIZE; |
177 | ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) * | 177 | ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * |
178 | RX_RING_SIZE, GFP_KERNEL); | 178 | RX_RING_SIZE, GFP_KERNEL); |
179 | 179 | ||
180 | if (!ring->desc_info) | 180 | if (!ring->ring_info) |
181 | goto out_desc_info; | 181 | goto out_ring_info; |
182 | 182 | ||
183 | /* Allocate descriptors */ | 183 | /* Allocate descriptors */ |
184 | ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev, | 184 | ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev, |
185 | RX_RING_SIZE * | 185 | RX_RING_SIZE * sizeof(u64), |
186 | sizeof(struct pas_dma_xct_descr), | ||
187 | &ring->dma, GFP_KERNEL); | 186 | &ring->dma, GFP_KERNEL); |
188 | 187 | ||
189 | if (!ring->desc) | 188 | if (!ring->ring) |
190 | goto out_desc; | 189 | goto out_ring_desc; |
191 | 190 | ||
192 | memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr)); | 191 | memset(ring->ring, 0, RX_RING_SIZE * sizeof(u64)); |
193 | 192 | ||
194 | ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, | 193 | ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, |
195 | RX_RING_SIZE * sizeof(u64), | 194 | RX_RING_SIZE * sizeof(u64), |
@@ -203,7 +202,7 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) | |||
203 | 202 | ||
204 | write_dma_reg(mac, PAS_DMA_RXCHAN_BASEU(chan_id), | 203 | write_dma_reg(mac, PAS_DMA_RXCHAN_BASEU(chan_id), |
205 | PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) | | 204 | PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) | |
206 | PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2)); | 205 | PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); |
207 | 206 | ||
208 | write_dma_reg(mac, PAS_DMA_RXCHAN_CFG(chan_id), | 207 | write_dma_reg(mac, PAS_DMA_RXCHAN_CFG(chan_id), |
209 | PAS_DMA_RXCHAN_CFG_HBU(2)); | 208 | PAS_DMA_RXCHAN_CFG_HBU(2)); |
@@ -229,11 +228,11 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) | |||
229 | 228 | ||
230 | out_buffers: | 229 | out_buffers: |
231 | dma_free_coherent(&mac->dma_pdev->dev, | 230 | dma_free_coherent(&mac->dma_pdev->dev, |
232 | RX_RING_SIZE * sizeof(struct pas_dma_xct_descr), | 231 | RX_RING_SIZE * sizeof(u64), |
233 | mac->rx->desc, mac->rx->dma); | 232 | mac->rx->ring, mac->rx->dma); |
234 | out_desc: | 233 | out_ring_desc: |
235 | kfree(ring->desc_info); | 234 | kfree(ring->ring_info); |
236 | out_desc_info: | 235 | out_ring_info: |
237 | kfree(ring); | 236 | kfree(ring); |
238 | out_ring: | 237 | out_ring: |
239 | return -ENOMEM; | 238 | return -ENOMEM; |
@@ -254,25 +253,24 @@ static int pasemi_mac_setup_tx_resources(struct net_device *dev) | |||
254 | spin_lock_init(&ring->lock); | 253 | spin_lock_init(&ring->lock); |
255 | 254 | ||
256 | ring->size = TX_RING_SIZE; | 255 | ring->size = TX_RING_SIZE; |
257 | ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) * | 256 | ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * |
258 | TX_RING_SIZE, GFP_KERNEL); | 257 | TX_RING_SIZE, GFP_KERNEL); |
259 | if (!ring->desc_info) | 258 | if (!ring->ring_info) |
260 | goto out_desc_info; | 259 | goto out_ring_info; |
261 | 260 | ||
262 | /* Allocate descriptors */ | 261 | /* Allocate descriptors */ |
263 | ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev, | 262 | ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev, |
264 | TX_RING_SIZE * | 263 | TX_RING_SIZE * sizeof(u64), |
265 | sizeof(struct pas_dma_xct_descr), | ||
266 | &ring->dma, GFP_KERNEL); | 264 | &ring->dma, GFP_KERNEL); |
267 | if (!ring->desc) | 265 | if (!ring->ring) |
268 | goto out_desc; | 266 | goto out_ring_desc; |
269 | 267 | ||
270 | memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr)); | 268 | memset(ring->ring, 0, TX_RING_SIZE * sizeof(u64)); |
271 | 269 | ||
272 | write_dma_reg(mac, PAS_DMA_TXCHAN_BASEL(chan_id), | 270 | write_dma_reg(mac, PAS_DMA_TXCHAN_BASEL(chan_id), |
273 | PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); | 271 | PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); |
274 | val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32); | 272 | val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32); |
275 | val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2); | 273 | val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); |
276 | 274 | ||
277 | write_dma_reg(mac, PAS_DMA_TXCHAN_BASEU(chan_id), val); | 275 | write_dma_reg(mac, PAS_DMA_TXCHAN_BASEU(chan_id), val); |
278 | 276 | ||
@@ -291,9 +289,9 @@ static int pasemi_mac_setup_tx_resources(struct net_device *dev) | |||
291 | 289 | ||
292 | return 0; | 290 | return 0; |
293 | 291 | ||
294 | out_desc: | 292 | out_ring_desc: |
295 | kfree(ring->desc_info); | 293 | kfree(ring->ring_info); |
296 | out_desc_info: | 294 | out_ring_info: |
297 | kfree(ring); | 295 | kfree(ring); |
298 | out_ring: | 296 | out_ring: |
299 | return -ENOMEM; | 297 | return -ENOMEM; |
@@ -304,31 +302,27 @@ static void pasemi_mac_free_tx_resources(struct net_device *dev) | |||
304 | struct pasemi_mac *mac = netdev_priv(dev); | 302 | struct pasemi_mac *mac = netdev_priv(dev); |
305 | unsigned int i; | 303 | unsigned int i; |
306 | struct pasemi_mac_buffer *info; | 304 | struct pasemi_mac_buffer *info; |
307 | struct pas_dma_xct_descr *dp; | 305 | |
308 | 306 | for (i = 0; i < TX_RING_SIZE; i += 2) { | |
309 | for (i = 0; i < TX_RING_SIZE; i++) { | 307 | info = &TX_RING_INFO(mac, i+1); |
310 | info = &TX_DESC_INFO(mac, i); | 308 | if (info->dma && info->skb) { |
311 | dp = &TX_DESC(mac, i); | 309 | pci_unmap_single(mac->dma_pdev, |
312 | if (info->dma) { | 310 | info->dma, |
313 | if (info->skb) { | 311 | info->skb->len, |
314 | pci_unmap_single(mac->dma_pdev, | 312 | PCI_DMA_TODEVICE); |
315 | info->dma, | 313 | dev_kfree_skb_any(info->skb); |
316 | info->skb->len, | ||
317 | PCI_DMA_TODEVICE); | ||
318 | dev_kfree_skb_any(info->skb); | ||
319 | } | ||
320 | info->dma = 0; | ||
321 | info->skb = NULL; | ||
322 | dp->mactx = 0; | ||
323 | dp->ptr = 0; | ||
324 | } | 314 | } |
315 | TX_RING(mac, i) = 0; | ||
316 | TX_RING(mac, i+1) = 0; | ||
317 | info->dma = 0; | ||
318 | info->skb = NULL; | ||
325 | } | 319 | } |
326 | 320 | ||
327 | dma_free_coherent(&mac->dma_pdev->dev, | 321 | dma_free_coherent(&mac->dma_pdev->dev, |
328 | TX_RING_SIZE * sizeof(struct pas_dma_xct_descr), | 322 | TX_RING_SIZE * sizeof(u64), |
329 | mac->tx->desc, mac->tx->dma); | 323 | mac->tx->ring, mac->tx->dma); |
330 | 324 | ||
331 | kfree(mac->tx->desc_info); | 325 | kfree(mac->tx->ring_info); |
332 | kfree(mac->tx); | 326 | kfree(mac->tx); |
333 | mac->tx = NULL; | 327 | mac->tx = NULL; |
334 | } | 328 | } |
@@ -338,34 +332,31 @@ static void pasemi_mac_free_rx_resources(struct net_device *dev) | |||
338 | struct pasemi_mac *mac = netdev_priv(dev); | 332 | struct pasemi_mac *mac = netdev_priv(dev); |
339 | unsigned int i; | 333 | unsigned int i; |
340 | struct pasemi_mac_buffer *info; | 334 | struct pasemi_mac_buffer *info; |
341 | struct pas_dma_xct_descr *dp; | ||
342 | 335 | ||
343 | for (i = 0; i < RX_RING_SIZE; i++) { | 336 | for (i = 0; i < RX_RING_SIZE; i++) { |
344 | info = &RX_DESC_INFO(mac, i); | 337 | info = &RX_RING_INFO(mac, i); |
345 | dp = &RX_DESC(mac, i); | 338 | if (info->skb && info->dma) { |
346 | if (info->skb) { | 339 | pci_unmap_single(mac->dma_pdev, |
347 | if (info->dma) { | 340 | info->dma, |
348 | pci_unmap_single(mac->dma_pdev, | 341 | info->skb->len, |
349 | info->dma, | 342 | PCI_DMA_FROMDEVICE); |
350 | info->skb->len, | 343 | dev_kfree_skb_any(info->skb); |
351 | PCI_DMA_FROMDEVICE); | ||
352 | dev_kfree_skb_any(info->skb); | ||
353 | } | ||
354 | info->dma = 0; | ||
355 | info->skb = NULL; | ||
356 | dp->macrx = 0; | ||
357 | dp->ptr = 0; | ||
358 | } | 344 | } |
345 | info->dma = 0; | ||
346 | info->skb = NULL; | ||
359 | } | 347 | } |
360 | 348 | ||
349 | for (i = 0; i < RX_RING_SIZE; i++) | ||
350 | RX_RING(mac, i) = 0; | ||
351 | |||
361 | dma_free_coherent(&mac->dma_pdev->dev, | 352 | dma_free_coherent(&mac->dma_pdev->dev, |
362 | RX_RING_SIZE * sizeof(struct pas_dma_xct_descr), | 353 | RX_RING_SIZE * sizeof(u64), |
363 | mac->rx->desc, mac->rx->dma); | 354 | mac->rx->ring, mac->rx->dma); |
364 | 355 | ||
365 | dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), | 356 | dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), |
366 | mac->rx->buffers, mac->rx->buf_dma); | 357 | mac->rx->buffers, mac->rx->buf_dma); |
367 | 358 | ||
368 | kfree(mac->rx->desc_info); | 359 | kfree(mac->rx->ring_info); |
369 | kfree(mac->rx); | 360 | kfree(mac->rx); |
370 | mac->rx = NULL; | 361 | mac->rx = NULL; |
371 | } | 362 | } |
@@ -373,20 +364,22 @@ static void pasemi_mac_free_rx_resources(struct net_device *dev) | |||
373 | static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) | 364 | static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) |
374 | { | 365 | { |
375 | struct pasemi_mac *mac = netdev_priv(dev); | 366 | struct pasemi_mac *mac = netdev_priv(dev); |
376 | unsigned int i; | ||
377 | int start = mac->rx->next_to_fill; | 367 | int start = mac->rx->next_to_fill; |
378 | int count; | 368 | unsigned int fill, count; |
379 | 369 | ||
380 | if (limit <= 0) | 370 | if (limit <= 0) |
381 | return; | 371 | return; |
382 | 372 | ||
383 | i = start; | 373 | fill = start; |
384 | for (count = 0; count < limit; count++) { | 374 | for (count = 0; count < limit; count++) { |
385 | struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i); | 375 | struct pasemi_mac_buffer *info = &RX_RING_INFO(mac, fill); |
386 | u64 *buff = &RX_BUFF(mac, i); | 376 | u64 *buff = &RX_BUFF(mac, fill); |
387 | struct sk_buff *skb; | 377 | struct sk_buff *skb; |
388 | dma_addr_t dma; | 378 | dma_addr_t dma; |
389 | 379 | ||
380 | /* Entry in use? */ | ||
381 | WARN_ON(*buff); | ||
382 | |||
390 | /* skb might still be in there for recycle on short receives */ | 383 | /* skb might still be in there for recycle on short receives */ |
391 | if (info->skb) | 384 | if (info->skb) |
392 | skb = info->skb; | 385 | skb = info->skb; |
@@ -407,7 +400,7 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) | |||
407 | info->skb = skb; | 400 | info->skb = skb; |
408 | info->dma = dma; | 401 | info->dma = dma; |
409 | *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); | 402 | *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); |
410 | i++; | 403 | fill++; |
411 | } | 404 | } |
412 | 405 | ||
413 | wmb(); | 406 | wmb(); |
@@ -481,7 +474,6 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
481 | { | 474 | { |
482 | unsigned int n; | 475 | unsigned int n; |
483 | int count; | 476 | int count; |
484 | struct pas_dma_xct_descr *dp; | ||
485 | struct pasemi_mac_buffer *info; | 477 | struct pasemi_mac_buffer *info; |
486 | struct sk_buff *skb; | 478 | struct sk_buff *skb; |
487 | unsigned int i, len; | 479 | unsigned int i, len; |
@@ -496,9 +488,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
496 | 488 | ||
497 | rmb(); | 489 | rmb(); |
498 | 490 | ||
499 | dp = &RX_DESC(mac, n); | 491 | macrx = RX_RING(mac, n); |
500 | prefetchw(dp); | ||
501 | macrx = dp->macrx; | ||
502 | 492 | ||
503 | if ((macrx & XCT_MACRX_E) || | 493 | if ((macrx & XCT_MACRX_E) || |
504 | (*mac->rx_status & PAS_STATUS_ERROR)) | 494 | (*mac->rx_status & PAS_STATUS_ERROR)) |
@@ -516,12 +506,15 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
516 | * interface ring. | 506 | * interface ring. |
517 | */ | 507 | */ |
518 | 508 | ||
519 | dma = (dp->ptr & XCT_PTR_ADDR_M); | 509 | dma = (RX_RING(mac, n+1) & XCT_PTR_ADDR_M); |
520 | for (i = n; i < (n + RX_RING_SIZE); i++) { | 510 | for (i = mac->rx->next_to_fill; |
521 | info = &RX_DESC_INFO(mac, i); | 511 | i < (mac->rx->next_to_fill + RX_RING_SIZE); |
512 | i++) { | ||
513 | info = &RX_RING_INFO(mac, i); | ||
522 | if (info->dma == dma) | 514 | if (info->dma == dma) |
523 | break; | 515 | break; |
524 | } | 516 | } |
517 | |||
525 | prefetchw(info); | 518 | prefetchw(info); |
526 | 519 | ||
527 | skb = info->skb; | 520 | skb = info->skb; |
@@ -546,6 +539,11 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
546 | } else | 539 | } else |
547 | info->skb = NULL; | 540 | info->skb = NULL; |
548 | 541 | ||
542 | /* Need to zero it out since hardware doesn't, since the | ||
543 | * replenish loop uses it to tell when it's done. | ||
544 | */ | ||
545 | RX_BUFF(mac, i) = 0; | ||
546 | |||
549 | skb_put(skb, len); | 547 | skb_put(skb, len); |
550 | 548 | ||
551 | if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) { | 549 | if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) { |
@@ -561,13 +559,13 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
561 | skb->protocol = eth_type_trans(skb, mac->netdev); | 559 | skb->protocol = eth_type_trans(skb, mac->netdev); |
562 | netif_receive_skb(skb); | 560 | netif_receive_skb(skb); |
563 | 561 | ||
564 | dp->ptr = 0; | 562 | RX_RING(mac, n) = 0; |
565 | dp->macrx = 0; | 563 | RX_RING(mac, n+1) = 0; |
566 | 564 | ||
567 | n++; | 565 | n += 2; |
568 | } | 566 | } |
569 | 567 | ||
570 | mac->rx->next_to_clean += limit - count; | 568 | mac->rx->next_to_clean = n; |
571 | pasemi_mac_replenish_rx_ring(mac->netdev, limit-count); | 569 | pasemi_mac_replenish_rx_ring(mac->netdev, limit-count); |
572 | 570 | ||
573 | spin_unlock(&mac->rx->lock); | 571 | spin_unlock(&mac->rx->lock); |
@@ -579,7 +577,6 @@ static int pasemi_mac_clean_tx(struct pasemi_mac *mac) | |||
579 | { | 577 | { |
580 | int i; | 578 | int i; |
581 | struct pasemi_mac_buffer *info; | 579 | struct pasemi_mac_buffer *info; |
582 | struct pas_dma_xct_descr *dp; | ||
583 | unsigned int start, count, limit; | 580 | unsigned int start, count, limit; |
584 | unsigned int total_count; | 581 | unsigned int total_count; |
585 | unsigned long flags; | 582 | unsigned long flags; |
@@ -595,29 +592,28 @@ restart: | |||
595 | 592 | ||
596 | count = 0; | 593 | count = 0; |
597 | 594 | ||
598 | for (i = start; i < limit; i++) { | 595 | for (i = start; i < limit; i += 2) { |
599 | dp = &TX_DESC(mac, i); | 596 | u64 mactx = TX_RING(mac, i); |
600 | 597 | if ((mactx & XCT_MACTX_E) || | |
601 | if ((dp->mactx & XCT_MACTX_E) || | ||
602 | (*mac->tx_status & PAS_STATUS_ERROR)) | 598 | (*mac->tx_status & PAS_STATUS_ERROR)) |
603 | pasemi_mac_tx_error(mac, dp->mactx); | 599 | pasemi_mac_tx_error(mac, mactx); |
604 | 600 | ||
605 | if (unlikely(dp->mactx & XCT_MACTX_O)) | 601 | if (unlikely(mactx & XCT_MACTX_O)) |
606 | /* Not yet transmitted */ | 602 | /* Not yet transmitted */ |
607 | break; | 603 | break; |
608 | 604 | ||
609 | info = &TX_DESC_INFO(mac, i); | 605 | info = &TX_RING_INFO(mac, i+1); |
610 | skbs[count] = info->skb; | 606 | skbs[count] = info->skb; |
611 | dmas[count] = info->dma; | 607 | dmas[count] = info->dma; |
612 | 608 | ||
613 | info->skb = NULL; | ||
614 | info->dma = 0; | 609 | info->dma = 0; |
615 | dp->mactx = 0; | 610 | TX_RING(mac, i) = 0; |
616 | dp->ptr = 0; | 611 | TX_RING(mac, i+1) = 0; |
612 | |||
617 | 613 | ||
618 | count++; | 614 | count++; |
619 | } | 615 | } |
620 | mac->tx->next_to_clean += count; | 616 | mac->tx->next_to_clean += count * 2; |
621 | spin_unlock_irqrestore(&mac->tx->lock, flags); | 617 | spin_unlock_irqrestore(&mac->tx->lock, flags); |
622 | netif_wake_queue(mac->netdev); | 618 | netif_wake_queue(mac->netdev); |
623 | 619 | ||
@@ -1001,8 +997,6 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1001 | { | 997 | { |
1002 | struct pasemi_mac *mac = netdev_priv(dev); | 998 | struct pasemi_mac *mac = netdev_priv(dev); |
1003 | struct pasemi_mac_txring *txring; | 999 | struct pasemi_mac_txring *txring; |
1004 | struct pasemi_mac_buffer *info; | ||
1005 | struct pas_dma_xct_descr *dp; | ||
1006 | u64 dflags, mactx, ptr; | 1000 | u64 dflags, mactx, ptr; |
1007 | dma_addr_t map; | 1001 | dma_addr_t map; |
1008 | unsigned long flags; | 1002 | unsigned long flags; |
@@ -1038,13 +1032,13 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1038 | 1032 | ||
1039 | spin_lock_irqsave(&txring->lock, flags); | 1033 | spin_lock_irqsave(&txring->lock, flags); |
1040 | 1034 | ||
1041 | if (RING_AVAIL(txring) <= 1) { | 1035 | if (RING_AVAIL(txring) <= 2) { |
1042 | spin_unlock_irqrestore(&txring->lock, flags); | 1036 | spin_unlock_irqrestore(&txring->lock, flags); |
1043 | pasemi_mac_clean_tx(mac); | 1037 | pasemi_mac_clean_tx(mac); |
1044 | pasemi_mac_restart_tx_intr(mac); | 1038 | pasemi_mac_restart_tx_intr(mac); |
1045 | spin_lock_irqsave(&txring->lock, flags); | 1039 | spin_lock_irqsave(&txring->lock, flags); |
1046 | 1040 | ||
1047 | if (RING_AVAIL(txring) <= 1) { | 1041 | if (RING_AVAIL(txring) <= 2) { |
1048 | /* Still no room -- stop the queue and wait for tx | 1042 | /* Still no room -- stop the queue and wait for tx |
1049 | * intr when there's room. | 1043 | * intr when there's room. |
1050 | */ | 1044 | */ |
@@ -1053,15 +1047,14 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1053 | } | 1047 | } |
1054 | } | 1048 | } |
1055 | 1049 | ||
1056 | dp = &TX_DESC(mac, txring->next_to_fill); | 1050 | TX_RING(mac, txring->next_to_fill) = mactx; |
1057 | info = &TX_DESC_INFO(mac, txring->next_to_fill); | 1051 | TX_RING(mac, txring->next_to_fill+1) = ptr; |
1052 | |||
1053 | TX_RING_INFO(mac, txring->next_to_fill+1).dma = map; | ||
1054 | TX_RING_INFO(mac, txring->next_to_fill+1).skb = skb; | ||
1058 | 1055 | ||
1059 | dp->mactx = mactx; | 1056 | txring->next_to_fill += 2; |
1060 | dp->ptr = ptr; | ||
1061 | info->dma = map; | ||
1062 | info->skb = skb; | ||
1063 | 1057 | ||
1064 | txring->next_to_fill++; | ||
1065 | dev->stats.tx_packets++; | 1058 | dev->stats.tx_packets++; |
1066 | dev->stats.tx_bytes += skb->len; | 1059 | dev->stats.tx_bytes += skb->len; |
1067 | 1060 | ||
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index c52cfcb6c4ca..5a896aa87a99 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h | |||
@@ -28,25 +28,25 @@ | |||
28 | 28 | ||
29 | struct pasemi_mac_txring { | 29 | struct pasemi_mac_txring { |
30 | spinlock_t lock; | 30 | spinlock_t lock; |
31 | struct pas_dma_xct_descr *desc; | 31 | u64 *ring; |
32 | dma_addr_t dma; | 32 | dma_addr_t dma; |
33 | unsigned int size; | 33 | unsigned int size; |
34 | unsigned int next_to_fill; | 34 | unsigned int next_to_fill; |
35 | unsigned int next_to_clean; | 35 | unsigned int next_to_clean; |
36 | struct pasemi_mac_buffer *desc_info; | 36 | struct pasemi_mac_buffer *ring_info; |
37 | char irq_name[10]; /* "eth%d tx" */ | 37 | char irq_name[10]; /* "eth%d tx" */ |
38 | }; | 38 | }; |
39 | 39 | ||
40 | struct pasemi_mac_rxring { | 40 | struct pasemi_mac_rxring { |
41 | spinlock_t lock; | 41 | spinlock_t lock; |
42 | struct pas_dma_xct_descr *desc; /* RX channel descriptor ring */ | 42 | u64 *ring; /* RX channel descriptor ring */ |
43 | dma_addr_t dma; | 43 | dma_addr_t dma; |
44 | u64 *buffers; /* RX interface buffer ring */ | 44 | u64 *buffers; /* RX interface buffer ring */ |
45 | dma_addr_t buf_dma; | 45 | dma_addr_t buf_dma; |
46 | unsigned int size; | 46 | unsigned int size; |
47 | unsigned int next_to_fill; | 47 | unsigned int next_to_fill; |
48 | unsigned int next_to_clean; | 48 | unsigned int next_to_clean; |
49 | struct pasemi_mac_buffer *desc_info; | 49 | struct pasemi_mac_buffer *ring_info; |
50 | char irq_name[10]; /* "eth%d rx" */ | 50 | char irq_name[10]; /* "eth%d rx" */ |
51 | }; | 51 | }; |
52 | 52 | ||
@@ -88,7 +88,7 @@ struct pasemi_mac { | |||
88 | char phy_id[BUS_ID_SIZE]; | 88 | char phy_id[BUS_ID_SIZE]; |
89 | }; | 89 | }; |
90 | 90 | ||
91 | /* Software status descriptor (desc_info) */ | 91 | /* Software status descriptor (ring_info) */ |
92 | struct pasemi_mac_buffer { | 92 | struct pasemi_mac_buffer { |
93 | struct sk_buff *skb; | 93 | struct sk_buff *skb; |
94 | dma_addr_t dma; | 94 | dma_addr_t dma; |
@@ -101,20 +101,7 @@ struct pasdma_status { | |||
101 | u64 tx_sta[20]; | 101 | u64 tx_sta[20]; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | /* descriptor structure */ | ||
105 | struct pas_dma_xct_descr { | ||
106 | union { | ||
107 | u64 mactx; | ||
108 | u64 macrx; | ||
109 | }; | ||
110 | union { | ||
111 | u64 ptr; | ||
112 | u64 rxb; | ||
113 | }; | ||
114 | }; | ||
115 | |||
116 | /* MAC CFG register offsets */ | 104 | /* MAC CFG register offsets */ |
117 | |||
118 | enum { | 105 | enum { |
119 | PAS_MAC_CFG_PCFG = 0x80, | 106 | PAS_MAC_CFG_PCFG = 0x80, |
120 | PAS_MAC_CFG_TXP = 0x98, | 107 | PAS_MAC_CFG_TXP = 0x98, |