diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2010-09-10 02:41:36 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-09-10 15:27:32 -0400 |
commit | ba1e8a35b77f3bc7d109696dbd2a7fd5af208b62 (patch) | |
tree | 5c65eea772e85b7e31b3488fa09e523bbd68b272 /drivers/net/sfc/rx.c | |
parent | 58758aa505edc5b8f8393cee45b54c7485d76de5 (diff) |
sfc: Abstract channel and index lookup for RX queues
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/rx.c')
-rw-r--r-- | drivers/net/sfc/rx.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index acb372e841b2..1e6c8cfa6c0c 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -341,7 +341,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, | |||
341 | */ | 341 | */ |
342 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | 342 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) |
343 | { | 343 | { |
344 | struct efx_channel *channel = rx_queue->channel; | 344 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
345 | unsigned fill_level; | 345 | unsigned fill_level; |
346 | int space, rc = 0; | 346 | int space, rc = 0; |
347 | 347 | ||
@@ -364,7 +364,8 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | |||
364 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, | 364 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
365 | "RX queue %d fast-filling descriptor ring from" | 365 | "RX queue %d fast-filling descriptor ring from" |
366 | " level %d to level %d using %s allocation\n", | 366 | " level %d to level %d using %s allocation\n", |
367 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, | 367 | efx_rx_queue_index(rx_queue), fill_level, |
368 | rx_queue->fast_fill_limit, | ||
368 | channel->rx_alloc_push_pages ? "page" : "skb"); | 369 | channel->rx_alloc_push_pages ? "page" : "skb"); |
369 | 370 | ||
370 | do { | 371 | do { |
@@ -382,7 +383,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | |||
382 | 383 | ||
383 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, | 384 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
384 | "RX queue %d fast-filled descriptor ring " | 385 | "RX queue %d fast-filled descriptor ring " |
385 | "to level %d\n", rx_queue->queue, | 386 | "to level %d\n", efx_rx_queue_index(rx_queue), |
386 | rx_queue->added_count - rx_queue->removed_count); | 387 | rx_queue->added_count - rx_queue->removed_count); |
387 | 388 | ||
388 | out: | 389 | out: |
@@ -393,7 +394,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | |||
393 | void efx_rx_slow_fill(unsigned long context) | 394 | void efx_rx_slow_fill(unsigned long context) |
394 | { | 395 | { |
395 | struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; | 396 | struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; |
396 | struct efx_channel *channel = rx_queue->channel; | 397 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
397 | 398 | ||
398 | /* Post an event to cause NAPI to run and refill the queue */ | 399 | /* Post an event to cause NAPI to run and refill the queue */ |
399 | efx_nic_generate_fill_event(channel); | 400 | efx_nic_generate_fill_event(channel); |
@@ -421,7 +422,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
421 | netif_err(efx, rx_err, efx->net_dev, | 422 | netif_err(efx, rx_err, efx->net_dev, |
422 | " RX queue %d seriously overlength " | 423 | " RX queue %d seriously overlength " |
423 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | 424 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", |
424 | rx_queue->queue, len, max_len, | 425 | efx_rx_queue_index(rx_queue), len, max_len, |
425 | efx->type->rx_buffer_padding); | 426 | efx->type->rx_buffer_padding); |
426 | /* If this buffer was skb-allocated, then the meta | 427 | /* If this buffer was skb-allocated, then the meta |
427 | * data at the end of the skb will be trashed. So | 428 | * data at the end of the skb will be trashed. So |
@@ -434,10 +435,10 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
434 | netif_err(efx, rx_err, efx->net_dev, | 435 | netif_err(efx, rx_err, efx->net_dev, |
435 | " RX queue %d overlength RX event " | 436 | " RX queue %d overlength RX event " |
436 | "(0x%x > 0x%x)\n", | 437 | "(0x%x > 0x%x)\n", |
437 | rx_queue->queue, len, max_len); | 438 | efx_rx_queue_index(rx_queue), len, max_len); |
438 | } | 439 | } |
439 | 440 | ||
440 | rx_queue->channel->n_rx_overlength++; | 441 | efx_rx_queue_channel(rx_queue)->n_rx_overlength++; |
441 | } | 442 | } |
442 | 443 | ||
443 | /* Pass a received packet up through the generic LRO stack | 444 | /* Pass a received packet up through the generic LRO stack |
@@ -507,7 +508,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
507 | unsigned int len, bool checksummed, bool discard) | 508 | unsigned int len, bool checksummed, bool discard) |
508 | { | 509 | { |
509 | struct efx_nic *efx = rx_queue->efx; | 510 | struct efx_nic *efx = rx_queue->efx; |
510 | struct efx_channel *channel = rx_queue->channel; | 511 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
511 | struct efx_rx_buffer *rx_buf; | 512 | struct efx_rx_buffer *rx_buf; |
512 | bool leak_packet = false; | 513 | bool leak_packet = false; |
513 | 514 | ||
@@ -528,7 +529,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
528 | 529 | ||
529 | netif_vdbg(efx, rx_status, efx->net_dev, | 530 | netif_vdbg(efx, rx_status, efx->net_dev, |
530 | "RX queue %d received id %x at %llx+%x %s%s\n", | 531 | "RX queue %d received id %x at %llx+%x %s%s\n", |
531 | rx_queue->queue, index, | 532 | efx_rx_queue_index(rx_queue), index, |
532 | (unsigned long long)rx_buf->dma_addr, len, | 533 | (unsigned long long)rx_buf->dma_addr, len, |
533 | (checksummed ? " [SUMMED]" : ""), | 534 | (checksummed ? " [SUMMED]" : ""), |
534 | (discard ? " [DISCARD]" : "")); | 535 | (discard ? " [DISCARD]" : "")); |
@@ -560,12 +561,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
560 | */ | 561 | */ |
561 | rx_buf->len = len; | 562 | rx_buf->len = len; |
562 | out: | 563 | out: |
563 | if (rx_queue->channel->rx_pkt) | 564 | if (channel->rx_pkt) |
564 | __efx_rx_packet(rx_queue->channel, | 565 | __efx_rx_packet(channel, |
565 | rx_queue->channel->rx_pkt, | 566 | channel->rx_pkt, channel->rx_pkt_csummed); |
566 | rx_queue->channel->rx_pkt_csummed); | 567 | channel->rx_pkt = rx_buf; |
567 | rx_queue->channel->rx_pkt = rx_buf; | 568 | channel->rx_pkt_csummed = checksummed; |
568 | rx_queue->channel->rx_pkt_csummed = checksummed; | ||
569 | } | 569 | } |
570 | 570 | ||
571 | /* Handle a received packet. Second half: Touches packet payload. */ | 571 | /* Handle a received packet. Second half: Touches packet payload. */ |
@@ -654,7 +654,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |||
654 | int rc; | 654 | int rc; |
655 | 655 | ||
656 | netif_dbg(efx, probe, efx->net_dev, | 656 | netif_dbg(efx, probe, efx->net_dev, |
657 | "creating RX queue %d\n", rx_queue->queue); | 657 | "creating RX queue %d\n", efx_rx_queue_index(rx_queue)); |
658 | 658 | ||
659 | /* Allocate RX buffers */ | 659 | /* Allocate RX buffers */ |
660 | rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); | 660 | rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); |
@@ -675,7 +675,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | |||
675 | unsigned int max_fill, trigger, limit; | 675 | unsigned int max_fill, trigger, limit; |
676 | 676 | ||
677 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, | 677 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
678 | "initialising RX queue %d\n", rx_queue->queue); | 678 | "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); |
679 | 679 | ||
680 | /* Initialise ptr fields */ | 680 | /* Initialise ptr fields */ |
681 | rx_queue->added_count = 0; | 681 | rx_queue->added_count = 0; |
@@ -703,7 +703,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
703 | struct efx_rx_buffer *rx_buf; | 703 | struct efx_rx_buffer *rx_buf; |
704 | 704 | ||
705 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, | 705 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
706 | "shutting down RX queue %d\n", rx_queue->queue); | 706 | "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); |
707 | 707 | ||
708 | del_timer_sync(&rx_queue->slow_fill); | 708 | del_timer_sync(&rx_queue->slow_fill); |
709 | efx_nic_fini_rx(rx_queue); | 709 | efx_nic_fini_rx(rx_queue); |
@@ -720,7 +720,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
720 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | 720 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) |
721 | { | 721 | { |
722 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, | 722 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
723 | "destroying RX queue %d\n", rx_queue->queue); | 723 | "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); |
724 | 724 | ||
725 | efx_nic_remove_rx(rx_queue); | 725 | efx_nic_remove_rx(rx_queue); |
726 | 726 | ||