diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-06-01 12:09:35 -0400 |
---|---|---|
committer | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-06-12 02:40:34 -0400 |
commit | 8a578111e343350ff8fa75fc3630d4bba5475cae (patch) | |
tree | 925ec6a7ba50912e8abada8a11726158244c8bc1 /drivers/net/mv643xx_eth.c | |
parent | 69876569bbf3dfac567f47c619888c827cb87b30 (diff) |
mv643xx_eth: split out rx queue state
Split all RX queue related state into 'struct rx_queue', in
preparation for multiple RX queue support.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Acked-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 441 |
1 files changed, 225 insertions, 216 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 84e6c41becde..5bd4b38e953d 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -73,13 +73,7 @@ static char mv643xx_eth_driver_version[] = "1.0"; | |||
73 | #define MAX_DESCS_PER_SKB 1 | 73 | #define MAX_DESCS_PER_SKB 1 |
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | #define ETH_VLAN_HLEN 4 | 76 | #define ETH_HW_IP_ALIGN 2 |
77 | #define ETH_FCS_LEN 4 | ||
78 | #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ | ||
79 | #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ | ||
80 | ETH_VLAN_HLEN + ETH_FCS_LEN) | ||
81 | #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + \ | ||
82 | dma_get_cache_alignment()) | ||
83 | 77 | ||
84 | /* | 78 | /* |
85 | * Registers shared between all ports. | 79 | * Registers shared between all ports. |
@@ -288,22 +282,32 @@ struct mib_counters { | |||
288 | u32 late_collision; | 282 | u32 late_collision; |
289 | }; | 283 | }; |
290 | 284 | ||
285 | struct rx_queue { | ||
286 | int rx_ring_size; | ||
287 | |||
288 | int rx_desc_count; | ||
289 | int rx_curr_desc; | ||
290 | int rx_used_desc; | ||
291 | |||
292 | struct rx_desc *rx_desc_area; | ||
293 | dma_addr_t rx_desc_dma; | ||
294 | int rx_desc_area_size; | ||
295 | struct sk_buff **rx_skb; | ||
296 | |||
297 | struct timer_list rx_oom; | ||
298 | }; | ||
299 | |||
291 | struct mv643xx_eth_private { | 300 | struct mv643xx_eth_private { |
292 | struct mv643xx_eth_shared_private *shared; | 301 | struct mv643xx_eth_shared_private *shared; |
293 | int port_num; /* User Ethernet port number */ | 302 | int port_num; /* User Ethernet port number */ |
294 | 303 | ||
295 | struct mv643xx_eth_shared_private *shared_smi; | 304 | struct mv643xx_eth_shared_private *shared_smi; |
296 | 305 | ||
297 | u32 rx_sram_addr; /* Base address of rx sram area */ | ||
298 | u32 rx_sram_size; /* Size of rx sram area */ | ||
299 | u32 tx_sram_addr; /* Base address of tx sram area */ | 306 | u32 tx_sram_addr; /* Base address of tx sram area */ |
300 | u32 tx_sram_size; /* Size of tx sram area */ | 307 | u32 tx_sram_size; /* Size of tx sram area */ |
301 | 308 | ||
302 | /* Tx/Rx rings managment indexes fields. For driver use */ | 309 | /* Tx/Rx rings managment indexes fields. For driver use */ |
303 | 310 | ||
304 | /* Next available and first returning Rx resource */ | ||
305 | int rx_curr_desc, rx_used_desc; | ||
306 | |||
307 | /* Next available and first returning Tx resource */ | 311 | /* Next available and first returning Tx resource */ |
308 | int tx_curr_desc, tx_used_desc; | 312 | int tx_curr_desc, tx_used_desc; |
309 | 313 | ||
@@ -311,11 +315,6 @@ struct mv643xx_eth_private { | |||
311 | u32 tx_clean_threshold; | 315 | u32 tx_clean_threshold; |
312 | #endif | 316 | #endif |
313 | 317 | ||
314 | struct rx_desc *rx_desc_area; | ||
315 | dma_addr_t rx_desc_dma; | ||
316 | int rx_desc_area_size; | ||
317 | struct sk_buff **rx_skb; | ||
318 | |||
319 | struct tx_desc *tx_desc_area; | 318 | struct tx_desc *tx_desc_area; |
320 | dma_addr_t tx_desc_dma; | 319 | dma_addr_t tx_desc_dma; |
321 | int tx_desc_area_size; | 320 | int tx_desc_area_size; |
@@ -324,27 +323,25 @@ struct mv643xx_eth_private { | |||
324 | struct work_struct tx_timeout_task; | 323 | struct work_struct tx_timeout_task; |
325 | 324 | ||
326 | struct net_device *dev; | 325 | struct net_device *dev; |
327 | struct napi_struct napi; | ||
328 | struct mib_counters mib_counters; | 326 | struct mib_counters mib_counters; |
329 | spinlock_t lock; | 327 | spinlock_t lock; |
330 | /* Size of Tx Ring per queue */ | 328 | /* Size of Tx Ring per queue */ |
331 | int tx_ring_size; | 329 | int tx_ring_size; |
332 | /* Number of tx descriptors in use */ | 330 | /* Number of tx descriptors in use */ |
333 | int tx_desc_count; | 331 | int tx_desc_count; |
334 | /* Size of Rx Ring per queue */ | ||
335 | int rx_ring_size; | ||
336 | /* Number of rx descriptors in use */ | ||
337 | int rx_desc_count; | ||
338 | |||
339 | /* | ||
340 | * Used in case RX Ring is empty, which can be caused when | ||
341 | * system does not have resources (skb's) | ||
342 | */ | ||
343 | struct timer_list timeout; | ||
344 | 332 | ||
345 | u32 rx_int_coal; | 333 | u32 rx_int_coal; |
346 | u32 tx_int_coal; | 334 | u32 tx_int_coal; |
347 | struct mii_if_info mii; | 335 | struct mii_if_info mii; |
336 | |||
337 | /* | ||
338 | * RX state. | ||
339 | */ | ||
340 | int default_rx_ring_size; | ||
341 | unsigned long rx_desc_sram_addr; | ||
342 | int rx_desc_sram_size; | ||
343 | struct napi_struct napi; | ||
344 | struct rx_queue rxq[1]; | ||
348 | }; | 345 | }; |
349 | 346 | ||
350 | 347 | ||
@@ -361,30 +358,25 @@ static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) | |||
361 | 358 | ||
362 | 359 | ||
363 | /* rxq/txq helper functions *************************************************/ | 360 | /* rxq/txq helper functions *************************************************/ |
364 | static void mv643xx_eth_port_enable_rx(struct mv643xx_eth_private *mp, | 361 | static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) |
365 | unsigned int queues) | ||
366 | { | 362 | { |
367 | wrl(mp, RXQ_COMMAND(mp->port_num), queues); | 363 | return container_of(rxq, struct mv643xx_eth_private, rxq[0]); |
368 | } | 364 | } |
369 | 365 | ||
370 | static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_eth_private *mp) | 366 | static void rxq_enable(struct rx_queue *rxq) |
371 | { | 367 | { |
372 | unsigned int port_num = mp->port_num; | 368 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
373 | u32 queues; | 369 | wrl(mp, RXQ_COMMAND(mp->port_num), 1); |
374 | 370 | } | |
375 | /* Stop Rx port activity. Check port Rx activity. */ | ||
376 | queues = rdl(mp, RXQ_COMMAND(port_num)) & 0xFF; | ||
377 | if (queues) { | ||
378 | /* Issue stop command for active queues only */ | ||
379 | wrl(mp, RXQ_COMMAND(port_num), (queues << 8)); | ||
380 | 371 | ||
381 | /* Wait for all Rx activity to terminate. */ | 372 | static void rxq_disable(struct rx_queue *rxq) |
382 | /* Check port cause register that all Rx queues are stopped */ | 373 | { |
383 | while (rdl(mp, RXQ_COMMAND(port_num)) & 0xFF) | 374 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
384 | udelay(10); | 375 | u8 mask = 1; |
385 | } | ||
386 | 376 | ||
387 | return queues; | 377 | wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8); |
378 | while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask) | ||
379 | udelay(10); | ||
388 | } | 380 | } |
389 | 381 | ||
390 | static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private *mp, | 382 | static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private *mp, |
@@ -421,19 +413,29 @@ static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private *mp) | |||
421 | /* rx ***********************************************************************/ | 413 | /* rx ***********************************************************************/ |
422 | static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev); | 414 | static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev); |
423 | 415 | ||
424 | static void mv643xx_eth_rx_refill_descs(struct net_device *dev) | 416 | static void rxq_refill(struct rx_queue *rxq) |
425 | { | 417 | { |
426 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 418 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
427 | unsigned long flags; | 419 | unsigned long flags; |
428 | 420 | ||
429 | spin_lock_irqsave(&mp->lock, flags); | 421 | spin_lock_irqsave(&mp->lock, flags); |
430 | 422 | ||
431 | while (mp->rx_desc_count < mp->rx_ring_size) { | 423 | while (rxq->rx_desc_count < rxq->rx_ring_size) { |
424 | int skb_size; | ||
432 | struct sk_buff *skb; | 425 | struct sk_buff *skb; |
433 | int unaligned; | 426 | int unaligned; |
434 | int rx; | 427 | int rx; |
435 | 428 | ||
436 | skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment()); | 429 | /* |
430 | * Reserve 2+14 bytes for an ethernet header (the | ||
431 | * hardware automatically prepends 2 bytes of dummy | ||
432 | * data to each received packet), 4 bytes for a VLAN | ||
433 | * header, and 4 bytes for the trailing FCS -- 24 | ||
434 | * bytes total. | ||
435 | */ | ||
436 | skb_size = mp->dev->mtu + 24; | ||
437 | |||
438 | skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); | ||
437 | if (skb == NULL) | 439 | if (skb == NULL) |
438 | break; | 440 | break; |
439 | 441 | ||
@@ -441,44 +443,43 @@ static void mv643xx_eth_rx_refill_descs(struct net_device *dev) | |||
441 | if (unaligned) | 443 | if (unaligned) |
442 | skb_reserve(skb, dma_get_cache_alignment() - unaligned); | 444 | skb_reserve(skb, dma_get_cache_alignment() - unaligned); |
443 | 445 | ||
444 | mp->rx_desc_count++; | 446 | rxq->rx_desc_count++; |
445 | rx = mp->rx_used_desc; | 447 | rx = rxq->rx_used_desc; |
446 | mp->rx_used_desc = (rx + 1) % mp->rx_ring_size; | 448 | rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size; |
447 | 449 | ||
448 | mp->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, | 450 | rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data, |
449 | skb->data, | 451 | skb_size, DMA_FROM_DEVICE); |
450 | ETH_RX_SKB_SIZE, | 452 | rxq->rx_desc_area[rx].buf_size = skb_size; |
451 | DMA_FROM_DEVICE); | 453 | rxq->rx_skb[rx] = skb; |
452 | mp->rx_desc_area[rx].buf_size = ETH_RX_SKB_SIZE; | ||
453 | mp->rx_skb[rx] = skb; | ||
454 | wmb(); | 454 | wmb(); |
455 | mp->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA | | 455 | rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA | |
456 | RX_ENABLE_INTERRUPT; | 456 | RX_ENABLE_INTERRUPT; |
457 | wmb(); | 457 | wmb(); |
458 | 458 | ||
459 | skb_reserve(skb, ETH_HW_IP_ALIGN); | 459 | skb_reserve(skb, ETH_HW_IP_ALIGN); |
460 | } | 460 | } |
461 | 461 | ||
462 | if (mp->rx_desc_count == 0) { | 462 | if (rxq->rx_desc_count == 0) { |
463 | mp->timeout.expires = jiffies + (HZ / 10); | 463 | rxq->rx_oom.expires = jiffies + (HZ / 10); |
464 | add_timer(&mp->timeout); | 464 | add_timer(&rxq->rx_oom); |
465 | } | 465 | } |
466 | 466 | ||
467 | spin_unlock_irqrestore(&mp->lock, flags); | 467 | spin_unlock_irqrestore(&mp->lock, flags); |
468 | } | 468 | } |
469 | 469 | ||
470 | static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) | 470 | static inline void rxq_refill_timer_wrapper(unsigned long data) |
471 | { | 471 | { |
472 | mv643xx_eth_rx_refill_descs((struct net_device *)data); | 472 | rxq_refill((struct rx_queue *)data); |
473 | } | 473 | } |
474 | 474 | ||
475 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | 475 | static int rxq_process(struct rx_queue *rxq, int budget) |
476 | { | 476 | { |
477 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 477 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
478 | struct net_device_stats *stats = &dev->stats; | 478 | struct net_device_stats *stats = &mp->dev->stats; |
479 | unsigned int received_packets = 0; | 479 | int rx; |
480 | 480 | ||
481 | while (budget-- > 0) { | 481 | rx = 0; |
482 | while (rx < budget) { | ||
482 | struct sk_buff *skb; | 483 | struct sk_buff *skb; |
483 | volatile struct rx_desc *rx_desc; | 484 | volatile struct rx_desc *rx_desc; |
484 | unsigned int cmd_sts; | 485 | unsigned int cmd_sts; |
@@ -486,7 +487,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
486 | 487 | ||
487 | spin_lock_irqsave(&mp->lock, flags); | 488 | spin_lock_irqsave(&mp->lock, flags); |
488 | 489 | ||
489 | rx_desc = &mp->rx_desc_area[mp->rx_curr_desc]; | 490 | rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; |
490 | 491 | ||
491 | cmd_sts = rx_desc->cmd_sts; | 492 | cmd_sts = rx_desc->cmd_sts; |
492 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { | 493 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { |
@@ -495,17 +496,17 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
495 | } | 496 | } |
496 | rmb(); | 497 | rmb(); |
497 | 498 | ||
498 | skb = mp->rx_skb[mp->rx_curr_desc]; | 499 | skb = rxq->rx_skb[rxq->rx_curr_desc]; |
499 | mp->rx_skb[mp->rx_curr_desc] = NULL; | 500 | rxq->rx_skb[rxq->rx_curr_desc] = NULL; |
500 | 501 | ||
501 | mp->rx_curr_desc = (mp->rx_curr_desc + 1) % mp->rx_ring_size; | 502 | rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size; |
502 | 503 | ||
503 | spin_unlock_irqrestore(&mp->lock, flags); | 504 | spin_unlock_irqrestore(&mp->lock, flags); |
504 | 505 | ||
505 | dma_unmap_single(NULL, rx_desc->buf_ptr + ETH_HW_IP_ALIGN, | 506 | dma_unmap_single(NULL, rx_desc->buf_ptr + ETH_HW_IP_ALIGN, |
506 | ETH_RX_SKB_SIZE, DMA_FROM_DEVICE); | 507 | mp->dev->mtu + 24, DMA_FROM_DEVICE); |
507 | mp->rx_desc_count--; | 508 | rxq->rx_desc_count--; |
508 | received_packets++; | 509 | rx++; |
509 | 510 | ||
510 | /* | 511 | /* |
511 | * Update statistics. | 512 | * Update statistics. |
@@ -528,7 +529,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
528 | printk(KERN_ERR | 529 | printk(KERN_ERR |
529 | "%s: Received packet spread " | 530 | "%s: Received packet spread " |
530 | "on multiple descriptors\n", | 531 | "on multiple descriptors\n", |
531 | dev->name); | 532 | mp->dev->name); |
532 | } | 533 | } |
533 | if (cmd_sts & ERROR_SUMMARY) | 534 | if (cmd_sts & ERROR_SUMMARY) |
534 | stats->rx_errors++; | 535 | stats->rx_errors++; |
@@ -546,48 +547,45 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | |||
546 | skb->csum = htons( | 547 | skb->csum = htons( |
547 | (cmd_sts & 0x0007fff8) >> 3); | 548 | (cmd_sts & 0x0007fff8) >> 3); |
548 | } | 549 | } |
549 | skb->protocol = eth_type_trans(skb, dev); | 550 | skb->protocol = eth_type_trans(skb, mp->dev); |
550 | #ifdef MV643XX_ETH_NAPI | 551 | #ifdef MV643XX_ETH_NAPI |
551 | netif_receive_skb(skb); | 552 | netif_receive_skb(skb); |
552 | #else | 553 | #else |
553 | netif_rx(skb); | 554 | netif_rx(skb); |
554 | #endif | 555 | #endif |
555 | } | 556 | } |
556 | dev->last_rx = jiffies; | 557 | mp->dev->last_rx = jiffies; |
557 | } | 558 | } |
558 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ | 559 | rxq_refill(rxq); |
559 | 560 | ||
560 | return received_packets; | 561 | return rx; |
561 | } | 562 | } |
562 | 563 | ||
563 | #ifdef MV643XX_ETH_NAPI | 564 | #ifdef MV643XX_ETH_NAPI |
564 | static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | 565 | static int mv643xx_eth_poll(struct napi_struct *napi, int budget) |
565 | { | 566 | { |
566 | struct mv643xx_eth_private *mp = container_of(napi, struct mv643xx_eth_private, napi); | 567 | struct mv643xx_eth_private *mp; |
567 | struct net_device *dev = mp->dev; | 568 | int rx; |
568 | unsigned int port_num = mp->port_num; | 569 | |
569 | int work_done; | 570 | mp = container_of(napi, struct mv643xx_eth_private, napi); |
570 | 571 | ||
571 | #ifdef MV643XX_ETH_TX_FAST_REFILL | 572 | #ifdef MV643XX_ETH_TX_FAST_REFILL |
572 | if (++mp->tx_clean_threshold > 5) { | 573 | if (++mp->tx_clean_threshold > 5) { |
573 | mv643xx_eth_free_completed_tx_descs(dev); | 574 | mv643xx_eth_free_completed_tx_descs(mp->dev); |
574 | mp->tx_clean_threshold = 0; | 575 | mp->tx_clean_threshold = 0; |
575 | } | 576 | } |
576 | #endif | 577 | #endif |
577 | 578 | ||
578 | work_done = 0; | 579 | rx = rxq_process(mp->rxq, budget); |
579 | if ((rdl(mp, RXQ_CURRENT_DESC_PTR(port_num))) | ||
580 | != (u32) mp->rx_used_desc) | ||
581 | work_done = mv643xx_eth_receive_queue(dev, budget); | ||
582 | 580 | ||
583 | if (work_done < budget) { | 581 | if (rx < budget) { |
584 | netif_rx_complete(dev, napi); | 582 | netif_rx_complete(mp->dev, napi); |
585 | wrl(mp, INT_CAUSE(port_num), 0); | 583 | wrl(mp, INT_CAUSE(mp->port_num), 0); |
586 | wrl(mp, INT_CAUSE_EXT(port_num), 0); | 584 | wrl(mp, INT_CAUSE_EXT(mp->port_num), 0); |
587 | wrl(mp, INT_MASK(port_num), INT_RX | INT_EXT); | 585 | wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT); |
588 | } | 586 | } |
589 | 587 | ||
590 | return work_done; | 588 | return rx; |
591 | } | 589 | } |
592 | #endif | 590 | #endif |
593 | 591 | ||
@@ -1252,53 +1250,102 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) | |||
1252 | 1250 | ||
1253 | 1251 | ||
1254 | /* rx/tx queue initialisation ***********************************************/ | 1252 | /* rx/tx queue initialisation ***********************************************/ |
1255 | static void ether_init_rx_desc_ring(struct mv643xx_eth_private *mp) | 1253 | static int rxq_init(struct mv643xx_eth_private *mp) |
1256 | { | 1254 | { |
1257 | volatile struct rx_desc *p_rx_desc; | 1255 | struct rx_queue *rxq = mp->rxq; |
1258 | int rx_desc_num = mp->rx_ring_size; | 1256 | struct rx_desc *rx_desc; |
1257 | int size; | ||
1259 | int i; | 1258 | int i; |
1260 | 1259 | ||
1261 | /* initialize the next_desc_ptr links in the Rx descriptors ring */ | 1260 | rxq->rx_ring_size = mp->default_rx_ring_size; |
1262 | p_rx_desc = (struct rx_desc *)mp->rx_desc_area; | 1261 | |
1263 | for (i = 0; i < rx_desc_num; i++) { | 1262 | rxq->rx_desc_count = 0; |
1264 | p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma + | 1263 | rxq->rx_curr_desc = 0; |
1265 | ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); | 1264 | rxq->rx_used_desc = 0; |
1265 | |||
1266 | size = rxq->rx_ring_size * sizeof(struct rx_desc); | ||
1267 | |||
1268 | if (size <= mp->rx_desc_sram_size) { | ||
1269 | rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, | ||
1270 | mp->rx_desc_sram_size); | ||
1271 | rxq->rx_desc_dma = mp->rx_desc_sram_addr; | ||
1272 | } else { | ||
1273 | rxq->rx_desc_area = dma_alloc_coherent(NULL, size, | ||
1274 | &rxq->rx_desc_dma, | ||
1275 | GFP_KERNEL); | ||
1276 | } | ||
1277 | |||
1278 | if (rxq->rx_desc_area == NULL) { | ||
1279 | dev_printk(KERN_ERR, &mp->dev->dev, | ||
1280 | "can't allocate rx ring (%d bytes)\n", size); | ||
1281 | goto out; | ||
1282 | } | ||
1283 | memset(rxq->rx_desc_area, 0, size); | ||
1284 | |||
1285 | rxq->rx_desc_area_size = size; | ||
1286 | rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), | ||
1287 | GFP_KERNEL); | ||
1288 | if (rxq->rx_skb == NULL) { | ||
1289 | dev_printk(KERN_ERR, &mp->dev->dev, | ||
1290 | "can't allocate rx skb ring\n"); | ||
1291 | goto out_free; | ||
1292 | } | ||
1293 | |||
1294 | rx_desc = (struct rx_desc *)rxq->rx_desc_area; | ||
1295 | for (i = 0; i < rxq->rx_ring_size; i++) { | ||
1296 | int nexti = (i + 1) % rxq->rx_ring_size; | ||
1297 | rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + | ||
1298 | nexti * sizeof(struct rx_desc); | ||
1266 | } | 1299 | } |
1267 | 1300 | ||
1268 | /* Save Rx desc pointer to driver struct. */ | 1301 | init_timer(&rxq->rx_oom); |
1269 | mp->rx_curr_desc = 0; | 1302 | rxq->rx_oom.data = (unsigned long)rxq; |
1270 | mp->rx_used_desc = 0; | 1303 | rxq->rx_oom.function = rxq_refill_timer_wrapper; |
1304 | |||
1305 | return 0; | ||
1306 | |||
1307 | |||
1308 | out_free: | ||
1309 | if (size <= mp->rx_desc_sram_size) | ||
1310 | iounmap(rxq->rx_desc_area); | ||
1311 | else | ||
1312 | dma_free_coherent(NULL, size, | ||
1313 | rxq->rx_desc_area, | ||
1314 | rxq->rx_desc_dma); | ||
1271 | 1315 | ||
1272 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); | 1316 | out: |
1317 | return -ENOMEM; | ||
1273 | } | 1318 | } |
1274 | 1319 | ||
1275 | static void mv643xx_eth_free_rx_rings(struct net_device *dev) | 1320 | static void rxq_deinit(struct rx_queue *rxq) |
1276 | { | 1321 | { |
1277 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 1322 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
1278 | int curr; | 1323 | int i; |
1324 | |||
1325 | rxq_disable(rxq); | ||
1279 | 1326 | ||
1280 | /* Stop RX Queues */ | 1327 | del_timer_sync(&rxq->rx_oom); |
1281 | mv643xx_eth_port_disable_rx(mp); | ||
1282 | 1328 | ||
1283 | /* Free preallocated skb's on RX rings */ | 1329 | for (i = 0; i < rxq->rx_ring_size; i++) { |
1284 | for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { | 1330 | if (rxq->rx_skb[i]) { |
1285 | if (mp->rx_skb[curr]) { | 1331 | dev_kfree_skb(rxq->rx_skb[i]); |
1286 | dev_kfree_skb(mp->rx_skb[curr]); | 1332 | rxq->rx_desc_count--; |
1287 | mp->rx_desc_count--; | ||
1288 | } | 1333 | } |
1289 | } | 1334 | } |
1290 | 1335 | ||
1291 | if (mp->rx_desc_count) | 1336 | if (rxq->rx_desc_count) { |
1292 | printk(KERN_ERR | 1337 | dev_printk(KERN_ERR, &mp->dev->dev, |
1293 | "%s: Error in freeing Rx Ring. %d skb's still" | 1338 | "error freeing rx ring -- %d skbs stuck\n", |
1294 | " stuck in RX Ring - ignoring them\n", dev->name, | 1339 | rxq->rx_desc_count); |
1295 | mp->rx_desc_count); | 1340 | } |
1296 | /* Free RX ring */ | 1341 | |
1297 | if (mp->rx_sram_size) | 1342 | if (rxq->rx_desc_area_size <= mp->rx_desc_sram_size) |
1298 | iounmap(mp->rx_desc_area); | 1343 | iounmap(rxq->rx_desc_area); |
1299 | else | 1344 | else |
1300 | dma_free_coherent(NULL, mp->rx_desc_area_size, | 1345 | dma_free_coherent(NULL, rxq->rx_desc_area_size, |
1301 | mp->rx_desc_area, mp->rx_desc_dma); | 1346 | rxq->rx_desc_area, rxq->rx_desc_dma); |
1347 | |||
1348 | kfree(rxq->rx_skb); | ||
1302 | } | 1349 | } |
1303 | 1350 | ||
1304 | static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp) | 1351 | static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp) |
@@ -1510,7 +1557,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1510 | } | 1557 | } |
1511 | #else | 1558 | #else |
1512 | if (int_cause & INT_RX) | 1559 | if (int_cause & INT_RX) |
1513 | mv643xx_eth_receive_queue(dev, INT_MAX); | 1560 | rxq_process(mp->rxq, INT_MAX); |
1514 | #endif | 1561 | #endif |
1515 | if (int_cause_ext & INT_EXT_TX) | 1562 | if (int_cause_ext & INT_EXT_TX) |
1516 | mv643xx_eth_free_completed_tx_descs(dev); | 1563 | mv643xx_eth_free_completed_tx_descs(dev); |
@@ -1544,20 +1591,30 @@ static void phy_reset(struct mv643xx_eth_private *mp) | |||
1544 | static void port_start(struct net_device *dev) | 1591 | static void port_start(struct net_device *dev) |
1545 | { | 1592 | { |
1546 | struct mv643xx_eth_private *mp = netdev_priv(dev); | 1593 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1547 | unsigned int port_num = mp->port_num; | ||
1548 | int tx_curr_desc, rx_curr_desc; | ||
1549 | u32 pscr; | 1594 | u32 pscr; |
1550 | struct ethtool_cmd ethtool_cmd; | 1595 | struct ethtool_cmd ethtool_cmd; |
1596 | int i; | ||
1551 | 1597 | ||
1552 | /* Assignment of Tx CTRP of given queue */ | 1598 | /* |
1553 | tx_curr_desc = mp->tx_curr_desc; | 1599 | * Configure basic link parameters. |
1554 | wrl(mp, TXQ_CURRENT_DESC_PTR(port_num), | 1600 | */ |
1555 | (u32)((struct tx_desc *)mp->tx_desc_dma + tx_curr_desc)); | 1601 | pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); |
1602 | pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); | ||
1603 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); | ||
1604 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | | ||
1605 | DISABLE_AUTO_NEG_SPEED_GMII | | ||
1606 | DISABLE_AUTO_NEG_FOR_DUPLEX | | ||
1607 | DO_NOT_FORCE_LINK_FAIL | | ||
1608 | SERIAL_PORT_CONTROL_RESERVED; | ||
1609 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); | ||
1610 | pscr |= SERIAL_PORT_ENABLE; | ||
1611 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); | ||
1556 | 1612 | ||
1557 | /* Assignment of Rx CRDP of given queue */ | 1613 | wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE); |
1558 | rx_curr_desc = mp->rx_curr_desc; | 1614 | |
1559 | wrl(mp, RXQ_CURRENT_DESC_PTR(port_num), | 1615 | mv643xx_eth_get_settings(dev, ðtool_cmd); |
1560 | (u32)((struct rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | 1616 | phy_reset(mp); |
1617 | mv643xx_eth_set_settings(dev, ðtool_cmd); | ||
1561 | 1618 | ||
1562 | /* Add the assigned Ethernet address to the port's address table */ | 1619 | /* Add the assigned Ethernet address to the port's address table */ |
1563 | uc_addr_set(mp, dev->dev_addr); | 1620 | uc_addr_set(mp, dev->dev_addr); |
@@ -1566,42 +1623,34 @@ static void port_start(struct net_device *dev) | |||
1566 | * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast | 1623 | * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast |
1567 | * frames to RX queue #0. | 1624 | * frames to RX queue #0. |
1568 | */ | 1625 | */ |
1569 | wrl(mp, PORT_CONFIG(port_num), 0x00000000); | 1626 | wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000); |
1570 | 1627 | ||
1571 | /* | 1628 | /* |
1572 | * Treat BPDUs as normal multicasts, and disable partition mode. | 1629 | * Treat BPDUs as normal multicasts, and disable partition mode. |
1573 | */ | 1630 | */ |
1574 | wrl(mp, PORT_CONFIG_EXT(port_num), 0x00000000); | 1631 | wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000); |
1575 | |||
1576 | pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num)); | ||
1577 | |||
1578 | pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); | ||
1579 | wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr); | ||
1580 | 1632 | ||
1581 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | | 1633 | /* |
1582 | DISABLE_AUTO_NEG_SPEED_GMII | | 1634 | * Enable the receive queue. |
1583 | DISABLE_AUTO_NEG_FOR_DUPLEX | | 1635 | */ |
1584 | DO_NOT_FORCE_LINK_FAIL | | 1636 | for (i = 0; i < 1; i++) { |
1585 | SERIAL_PORT_CONTROL_RESERVED; | 1637 | struct rx_queue *rxq = mp->rxq; |
1638 | int off = RXQ_CURRENT_DESC_PTR(mp->port_num); | ||
1639 | u32 addr; | ||
1586 | 1640 | ||
1587 | wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr); | 1641 | addr = (u32)rxq->rx_desc_dma; |
1642 | addr += rxq->rx_curr_desc * sizeof(struct rx_desc); | ||
1643 | wrl(mp, off, addr); | ||
1588 | 1644 | ||
1589 | pscr |= SERIAL_PORT_ENABLE; | 1645 | rxq_enable(rxq); |
1590 | wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr); | 1646 | } |
1591 | 1647 | ||
1592 | /* Assign port SDMA configuration */ | ||
1593 | wrl(mp, SDMA_CONFIG(port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE); | ||
1594 | 1648 | ||
1595 | /* Enable port Rx. */ | 1649 | wrl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num), |
1596 | mv643xx_eth_port_enable_rx(mp, 1); | 1650 | (u32)((struct tx_desc *)mp->tx_desc_dma + mp->tx_curr_desc)); |
1597 | 1651 | ||
1598 | /* Disable port bandwidth limits by clearing MTU register */ | 1652 | /* Disable port bandwidth limits by clearing MTU register */ |
1599 | wrl(mp, TX_BW_MTU(port_num), 0); | 1653 | wrl(mp, TX_BW_MTU(mp->port_num), 0); |
1600 | |||
1601 | /* save phy settings across reset */ | ||
1602 | mv643xx_eth_get_settings(dev, ðtool_cmd); | ||
1603 | phy_reset(mp); | ||
1604 | mv643xx_eth_set_settings(dev, ðtool_cmd); | ||
1605 | } | 1654 | } |
1606 | 1655 | ||
1607 | #ifdef MV643XX_ETH_COAL | 1656 | #ifdef MV643XX_ETH_COAL |
@@ -1661,18 +1710,11 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1661 | 1710 | ||
1662 | port_init(mp); | 1711 | port_init(mp); |
1663 | 1712 | ||
1664 | memset(&mp->timeout, 0, sizeof(struct timer_list)); | 1713 | err = rxq_init(mp); |
1665 | mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper; | 1714 | if (err) |
1666 | mp->timeout.data = (unsigned long)dev; | ||
1667 | |||
1668 | /* Allocate RX and TX skb rings */ | ||
1669 | mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, | ||
1670 | GFP_KERNEL); | ||
1671 | if (!mp->rx_skb) { | ||
1672 | printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name); | ||
1673 | err = -ENOMEM; | ||
1674 | goto out_free_irq; | 1715 | goto out_free_irq; |
1675 | } | 1716 | rxq_refill(mp->rxq); |
1717 | |||
1676 | mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size, | 1718 | mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size, |
1677 | GFP_KERNEL); | 1719 | GFP_KERNEL); |
1678 | if (!mp->tx_skb) { | 1720 | if (!mp->tx_skb) { |
@@ -1706,39 +1748,6 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1706 | 1748 | ||
1707 | ether_init_tx_desc_ring(mp); | 1749 | ether_init_tx_desc_ring(mp); |
1708 | 1750 | ||
1709 | /* Allocate RX ring */ | ||
1710 | mp->rx_desc_count = 0; | ||
1711 | size = mp->rx_ring_size * sizeof(struct rx_desc); | ||
1712 | mp->rx_desc_area_size = size; | ||
1713 | |||
1714 | if (mp->rx_sram_size) { | ||
1715 | mp->rx_desc_area = ioremap(mp->rx_sram_addr, | ||
1716 | mp->rx_sram_size); | ||
1717 | mp->rx_desc_dma = mp->rx_sram_addr; | ||
1718 | } else | ||
1719 | mp->rx_desc_area = dma_alloc_coherent(NULL, size, | ||
1720 | &mp->rx_desc_dma, | ||
1721 | GFP_KERNEL); | ||
1722 | |||
1723 | if (!mp->rx_desc_area) { | ||
1724 | printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n", | ||
1725 | dev->name, size); | ||
1726 | printk(KERN_ERR "%s: Freeing previously allocated TX queues...", | ||
1727 | dev->name); | ||
1728 | if (mp->rx_sram_size) | ||
1729 | iounmap(mp->tx_desc_area); | ||
1730 | else | ||
1731 | dma_free_coherent(NULL, mp->tx_desc_area_size, | ||
1732 | mp->tx_desc_area, mp->tx_desc_dma); | ||
1733 | err = -ENOMEM; | ||
1734 | goto out_free_tx_skb; | ||
1735 | } | ||
1736 | memset((void *)mp->rx_desc_area, 0, size); | ||
1737 | |||
1738 | ether_init_rx_desc_ring(mp); | ||
1739 | |||
1740 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ | ||
1741 | |||
1742 | #ifdef MV643XX_ETH_NAPI | 1751 | #ifdef MV643XX_ETH_NAPI |
1743 | napi_enable(&mp->napi); | 1752 | napi_enable(&mp->napi); |
1744 | #endif | 1753 | #endif |
@@ -1764,7 +1773,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1764 | out_free_tx_skb: | 1773 | out_free_tx_skb: |
1765 | kfree(mp->tx_skb); | 1774 | kfree(mp->tx_skb); |
1766 | out_free_rx_skb: | 1775 | out_free_rx_skb: |
1767 | kfree(mp->rx_skb); | 1776 | rxq_deinit(mp->rxq); |
1768 | out_free_irq: | 1777 | out_free_irq: |
1769 | free_irq(dev->irq, dev); | 1778 | free_irq(dev->irq, dev); |
1770 | 1779 | ||
@@ -1777,7 +1786,7 @@ static void port_reset(struct mv643xx_eth_private *mp) | |||
1777 | unsigned int reg_data; | 1786 | unsigned int reg_data; |
1778 | 1787 | ||
1779 | mv643xx_eth_port_disable_tx(mp); | 1788 | mv643xx_eth_port_disable_tx(mp); |
1780 | mv643xx_eth_port_disable_rx(mp); | 1789 | rxq_disable(mp->rxq); |
1781 | 1790 | ||
1782 | /* Clear all MIB counters */ | 1791 | /* Clear all MIB counters */ |
1783 | clear_mib_counters(mp); | 1792 | clear_mib_counters(mp); |
@@ -1809,7 +1818,7 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
1809 | port_reset(mp); | 1818 | port_reset(mp); |
1810 | 1819 | ||
1811 | mv643xx_eth_free_tx_rings(dev); | 1820 | mv643xx_eth_free_tx_rings(dev); |
1812 | mv643xx_eth_free_rx_rings(dev); | 1821 | rxq_deinit(mp->rxq); |
1813 | 1822 | ||
1814 | free_irq(dev->irq, dev); | 1823 | free_irq(dev->irq, dev); |
1815 | 1824 | ||
@@ -2162,7 +2171,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2162 | 2171 | ||
2163 | /* set default config values */ | 2172 | /* set default config values */ |
2164 | uc_addr_get(mp, dev->dev_addr); | 2173 | uc_addr_get(mp, dev->dev_addr); |
2165 | mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; | ||
2166 | mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; | 2174 | mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; |
2167 | 2175 | ||
2168 | if (is_valid_ether_addr(pd->mac_addr)) | 2176 | if (is_valid_ether_addr(pd->mac_addr)) |
@@ -2171,8 +2179,9 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2171 | if (pd->phy_addr || pd->force_phy_addr) | 2179 | if (pd->phy_addr || pd->force_phy_addr) |
2172 | phy_addr_set(mp, pd->phy_addr); | 2180 | phy_addr_set(mp, pd->phy_addr); |
2173 | 2181 | ||
2182 | mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE; | ||
2174 | if (pd->rx_queue_size) | 2183 | if (pd->rx_queue_size) |
2175 | mp->rx_ring_size = pd->rx_queue_size; | 2184 | mp->default_rx_ring_size = pd->rx_queue_size; |
2176 | 2185 | ||
2177 | if (pd->tx_queue_size) | 2186 | if (pd->tx_queue_size) |
2178 | mp->tx_ring_size = pd->tx_queue_size; | 2187 | mp->tx_ring_size = pd->tx_queue_size; |
@@ -2183,8 +2192,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2183 | } | 2192 | } |
2184 | 2193 | ||
2185 | if (pd->rx_sram_size) { | 2194 | if (pd->rx_sram_size) { |
2186 | mp->rx_sram_size = pd->rx_sram_size; | 2195 | mp->rx_desc_sram_addr = pd->rx_sram_addr; |
2187 | mp->rx_sram_addr = pd->rx_sram_addr; | 2196 | mp->rx_desc_sram_size = pd->rx_sram_size; |
2188 | } | 2197 | } |
2189 | 2198 | ||
2190 | duplex = pd->duplex; | 2199 | duplex = pd->duplex; |