aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2010-09-10 02:41:36 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-10 15:27:32 -0400
commitba1e8a35b77f3bc7d109696dbd2a7fd5af208b62 (patch)
tree5c65eea772e85b7e31b3488fa09e523bbd68b272
parent58758aa505edc5b8f8393cee45b54c7485d76de5 (diff)
sfc: Abstract channel and index lookup for RX queues
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/sfc/net_driver.h11
-rw-r--r--drivers/net/sfc/nic.c31
-rw-r--r--drivers/net/sfc/rx.c38
3 files changed, 47 insertions, 33 deletions
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 64e7caa4bbb5..89c6e02c57dd 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -943,6 +943,17 @@ struct efx_nic_type {
943 continue; \ 943 continue; \
944 else 944 else
945 945
946static inline struct efx_channel *
947efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
948{
949 return rx_queue->channel;
950}
951
952static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
953{
954 return rx_queue->queue;
955}
956
946/* Returns a pointer to the specified receive buffer in the RX 957/* Returns a pointer to the specified receive buffer in the RX
947 * descriptor queue. 958 * descriptor queue.
948 */ 959 */
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 8efe1ca83c1d..be4d5524054f 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -539,8 +539,8 @@ void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
539 wmb(); 539 wmb();
540 write_ptr = rx_queue->added_count & EFX_RXQ_MASK; 540 write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
541 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 541 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
542 efx_writed_page(rx_queue->efx, &reg, 542 efx_writed_page(rx_queue->efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
543 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue); 543 efx_rx_queue_index(rx_queue));
544} 544}
545 545
546int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 546int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
@@ -561,7 +561,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
561 561
562 netif_dbg(efx, hw, efx->net_dev, 562 netif_dbg(efx, hw, efx->net_dev,
563 "RX queue %d ring in special buffers %d-%d\n", 563 "RX queue %d ring in special buffers %d-%d\n",
564 rx_queue->queue, rx_queue->rxd.index, 564 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
565 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 565 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
566 566
567 rx_queue->flushed = FLUSH_NONE; 567 rx_queue->flushed = FLUSH_NONE;
@@ -575,9 +575,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
575 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 575 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
576 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 576 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
577 FRF_AZ_RX_DESCQ_EVQ_ID, 577 FRF_AZ_RX_DESCQ_EVQ_ID,
578 rx_queue->channel->channel, 578 efx_rx_queue_channel(rx_queue)->channel,
579 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 579 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
580 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue, 580 FRF_AZ_RX_DESCQ_LABEL,
581 efx_rx_queue_index(rx_queue),
581 FRF_AZ_RX_DESCQ_SIZE, 582 FRF_AZ_RX_DESCQ_SIZE,
582 __ffs(rx_queue->rxd.entries), 583 __ffs(rx_queue->rxd.entries),
583 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 584 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
@@ -585,7 +586,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
585 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 586 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
586 FRF_AZ_RX_DESCQ_EN, 1); 587 FRF_AZ_RX_DESCQ_EN, 1);
587 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 588 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
588 rx_queue->queue); 589 efx_rx_queue_index(rx_queue));
589} 590}
590 591
591static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 592static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
@@ -598,7 +599,8 @@ static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
598 /* Post a flush command */ 599 /* Post a flush command */
599 EFX_POPULATE_OWORD_2(rx_flush_descq, 600 EFX_POPULATE_OWORD_2(rx_flush_descq,
600 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 601 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
601 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue); 602 FRF_AZ_RX_FLUSH_DESCQ,
603 efx_rx_queue_index(rx_queue));
602 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 604 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
603} 605}
604 606
@@ -613,7 +615,7 @@ void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
613 /* Remove RX descriptor ring from card */ 615 /* Remove RX descriptor ring from card */
614 EFX_ZERO_OWORD(rx_desc_ptr); 616 EFX_ZERO_OWORD(rx_desc_ptr);
615 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 617 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
616 rx_queue->queue); 618 efx_rx_queue_index(rx_queue));
617 619
618 /* Unpin RX descriptor ring */ 620 /* Unpin RX descriptor ring */
619 efx_fini_special_buffer(efx, &rx_queue->rxd); 621 efx_fini_special_buffer(efx, &rx_queue->rxd);
@@ -714,6 +716,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
714 bool *rx_ev_pkt_ok, 716 bool *rx_ev_pkt_ok,
715 bool *discard) 717 bool *discard)
716{ 718{
719 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
717 struct efx_nic *efx = rx_queue->efx; 720 struct efx_nic *efx = rx_queue->efx;
718 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 721 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
719 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 722 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
@@ -746,14 +749,14 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
746 /* Count errors that are not in MAC stats. Ignore expected 749 /* Count errors that are not in MAC stats. Ignore expected
747 * checksum errors during self-test. */ 750 * checksum errors during self-test. */
748 if (rx_ev_frm_trunc) 751 if (rx_ev_frm_trunc)
749 ++rx_queue->channel->n_rx_frm_trunc; 752 ++channel->n_rx_frm_trunc;
750 else if (rx_ev_tobe_disc) 753 else if (rx_ev_tobe_disc)
751 ++rx_queue->channel->n_rx_tobe_disc; 754 ++channel->n_rx_tobe_disc;
752 else if (!efx->loopback_selftest) { 755 else if (!efx->loopback_selftest) {
753 if (rx_ev_ip_hdr_chksum_err) 756 if (rx_ev_ip_hdr_chksum_err)
754 ++rx_queue->channel->n_rx_ip_hdr_chksum_err; 757 ++channel->n_rx_ip_hdr_chksum_err;
755 else if (rx_ev_tcp_udp_chksum_err) 758 else if (rx_ev_tcp_udp_chksum_err)
756 ++rx_queue->channel->n_rx_tcp_udp_chksum_err; 759 ++channel->n_rx_tcp_udp_chksum_err;
757 } 760 }
758 761
759 /* The frame must be discarded if any of these are true. */ 762 /* The frame must be discarded if any of these are true. */
@@ -769,7 +772,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
769 netif_dbg(efx, rx_err, efx->net_dev, 772 netif_dbg(efx, rx_err, efx->net_dev,
770 " RX queue %d unexpected RX event " 773 " RX queue %d unexpected RX event "
771 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 774 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
772 rx_queue->queue, EFX_QWORD_VAL(*event), 775 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
773 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 776 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
774 rx_ev_ip_hdr_chksum_err ? 777 rx_ev_ip_hdr_chksum_err ?
775 " [IP_HDR_CHKSUM_ERR]" : "", 778 " [IP_HDR_CHKSUM_ERR]" : "",
@@ -1269,7 +1272,7 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1269 if (rx_queue->flushed != FLUSH_DONE) 1272 if (rx_queue->flushed != FLUSH_DONE)
1270 netif_err(efx, hw, efx->net_dev, 1273 netif_err(efx, hw, efx->net_dev,
1271 "rx queue %d flush command timed out\n", 1274 "rx queue %d flush command timed out\n",
1272 rx_queue->queue); 1275 efx_rx_queue_index(rx_queue));
1273 rx_queue->flushed = FLUSH_DONE; 1276 rx_queue->flushed = FLUSH_DONE;
1274 } 1277 }
1275 1278
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index acb372e841b2..1e6c8cfa6c0c 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -341,7 +341,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
341 */ 341 */
342void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) 342void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
343{ 343{
344 struct efx_channel *channel = rx_queue->channel; 344 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
345 unsigned fill_level; 345 unsigned fill_level;
346 int space, rc = 0; 346 int space, rc = 0;
347 347
@@ -364,7 +364,8 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
364 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 364 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
365 "RX queue %d fast-filling descriptor ring from" 365 "RX queue %d fast-filling descriptor ring from"
366 " level %d to level %d using %s allocation\n", 366 " level %d to level %d using %s allocation\n",
367 rx_queue->queue, fill_level, rx_queue->fast_fill_limit, 367 efx_rx_queue_index(rx_queue), fill_level,
368 rx_queue->fast_fill_limit,
368 channel->rx_alloc_push_pages ? "page" : "skb"); 369 channel->rx_alloc_push_pages ? "page" : "skb");
369 370
370 do { 371 do {
@@ -382,7 +383,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
382 383
383 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 384 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
384 "RX queue %d fast-filled descriptor ring " 385 "RX queue %d fast-filled descriptor ring "
385 "to level %d\n", rx_queue->queue, 386 "to level %d\n", efx_rx_queue_index(rx_queue),
386 rx_queue->added_count - rx_queue->removed_count); 387 rx_queue->added_count - rx_queue->removed_count);
387 388
388 out: 389 out:
@@ -393,7 +394,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
393void efx_rx_slow_fill(unsigned long context) 394void efx_rx_slow_fill(unsigned long context)
394{ 395{
395 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; 396 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
396 struct efx_channel *channel = rx_queue->channel; 397 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
397 398
398 /* Post an event to cause NAPI to run and refill the queue */ 399 /* Post an event to cause NAPI to run and refill the queue */
399 efx_nic_generate_fill_event(channel); 400 efx_nic_generate_fill_event(channel);
@@ -421,7 +422,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
421 netif_err(efx, rx_err, efx->net_dev, 422 netif_err(efx, rx_err, efx->net_dev,
422 " RX queue %d seriously overlength " 423 " RX queue %d seriously overlength "
423 "RX event (0x%x > 0x%x+0x%x). Leaking\n", 424 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
424 rx_queue->queue, len, max_len, 425 efx_rx_queue_index(rx_queue), len, max_len,
425 efx->type->rx_buffer_padding); 426 efx->type->rx_buffer_padding);
426 /* If this buffer was skb-allocated, then the meta 427 /* If this buffer was skb-allocated, then the meta
427 * data at the end of the skb will be trashed. So 428 * data at the end of the skb will be trashed. So
@@ -434,10 +435,10 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
434 netif_err(efx, rx_err, efx->net_dev, 435 netif_err(efx, rx_err, efx->net_dev,
435 " RX queue %d overlength RX event " 436 " RX queue %d overlength RX event "
436 "(0x%x > 0x%x)\n", 437 "(0x%x > 0x%x)\n",
437 rx_queue->queue, len, max_len); 438 efx_rx_queue_index(rx_queue), len, max_len);
438 } 439 }
439 440
440 rx_queue->channel->n_rx_overlength++; 441 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
441} 442}
442 443
443/* Pass a received packet up through the generic LRO stack 444/* Pass a received packet up through the generic LRO stack
@@ -507,7 +508,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
507 unsigned int len, bool checksummed, bool discard) 508 unsigned int len, bool checksummed, bool discard)
508{ 509{
509 struct efx_nic *efx = rx_queue->efx; 510 struct efx_nic *efx = rx_queue->efx;
510 struct efx_channel *channel = rx_queue->channel; 511 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
511 struct efx_rx_buffer *rx_buf; 512 struct efx_rx_buffer *rx_buf;
512 bool leak_packet = false; 513 bool leak_packet = false;
513 514
@@ -528,7 +529,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
528 529
529 netif_vdbg(efx, rx_status, efx->net_dev, 530 netif_vdbg(efx, rx_status, efx->net_dev,
530 "RX queue %d received id %x at %llx+%x %s%s\n", 531 "RX queue %d received id %x at %llx+%x %s%s\n",
531 rx_queue->queue, index, 532 efx_rx_queue_index(rx_queue), index,
532 (unsigned long long)rx_buf->dma_addr, len, 533 (unsigned long long)rx_buf->dma_addr, len,
533 (checksummed ? " [SUMMED]" : ""), 534 (checksummed ? " [SUMMED]" : ""),
534 (discard ? " [DISCARD]" : "")); 535 (discard ? " [DISCARD]" : ""));
@@ -560,12 +561,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
560 */ 561 */
561 rx_buf->len = len; 562 rx_buf->len = len;
562out: 563out:
563 if (rx_queue->channel->rx_pkt) 564 if (channel->rx_pkt)
564 __efx_rx_packet(rx_queue->channel, 565 __efx_rx_packet(channel,
565 rx_queue->channel->rx_pkt, 566 channel->rx_pkt, channel->rx_pkt_csummed);
566 rx_queue->channel->rx_pkt_csummed); 567 channel->rx_pkt = rx_buf;
567 rx_queue->channel->rx_pkt = rx_buf; 568 channel->rx_pkt_csummed = checksummed;
568 rx_queue->channel->rx_pkt_csummed = checksummed;
569} 569}
570 570
571/* Handle a received packet. Second half: Touches packet payload. */ 571/* Handle a received packet. Second half: Touches packet payload. */
@@ -654,7 +654,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
654 int rc; 654 int rc;
655 655
656 netif_dbg(efx, probe, efx->net_dev, 656 netif_dbg(efx, probe, efx->net_dev,
657 "creating RX queue %d\n", rx_queue->queue); 657 "creating RX queue %d\n", efx_rx_queue_index(rx_queue));
658 658
659 /* Allocate RX buffers */ 659 /* Allocate RX buffers */
660 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); 660 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
@@ -675,7 +675,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
675 unsigned int max_fill, trigger, limit; 675 unsigned int max_fill, trigger, limit;
676 676
677 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 677 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
678 "initialising RX queue %d\n", rx_queue->queue); 678 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
679 679
680 /* Initialise ptr fields */ 680 /* Initialise ptr fields */
681 rx_queue->added_count = 0; 681 rx_queue->added_count = 0;
@@ -703,7 +703,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
703 struct efx_rx_buffer *rx_buf; 703 struct efx_rx_buffer *rx_buf;
704 704
705 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 705 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
706 "shutting down RX queue %d\n", rx_queue->queue); 706 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
707 707
708 del_timer_sync(&rx_queue->slow_fill); 708 del_timer_sync(&rx_queue->slow_fill);
709 efx_nic_fini_rx(rx_queue); 709 efx_nic_fini_rx(rx_queue);
@@ -720,7 +720,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
720void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 720void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
721{ 721{
722 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 722 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
723 "destroying RX queue %d\n", rx_queue->queue); 723 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
724 724
725 efx_nic_remove_rx(rx_queue); 725 efx_nic_remove_rx(rx_queue);
726 726