aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge
diff options
context:
space:
mode:
authorSreenivasa Honnur <Sreenivasa.Honnur@neterion.com>2009-10-04 21:55:47 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-06 18:22:53 -0400
commitf0dfebafcc14a7456eb6ae974b68f600fdd8b42d (patch)
tree851584c52b4e24cf929717b05746bee5394f1c70 /drivers/net/vxge
parenta4a987d82258f55c4bc4ab0156fb20a2b3fa4f41 (diff)
vxge: Removed unused functions.
- Removed the wrr_rebalance function - This feature is not supported by the ASIC, hence removing the related code. Signed-off-by: Sreenivasa Honnur <sreenivasa.honnur@neterion.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vxge')
-rw-r--r--drivers/net/vxge/vxge-config.c204
1 files changed, 0 insertions, 204 deletions
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index e51fac8d0ad0..933237ec38d8 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -461,209 +461,6 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
461} 461}
462 462
463/* 463/*
464 * vxge_hw_wrr_rebalance - Rebalance the RX_WRR and KDFC_WRR calandars.
465 * Rebalance the RX_WRR and KDFC_WRR calandars.
466 */
467static enum
468vxge_hw_status vxge_hw_wrr_rebalance(struct __vxge_hw_device *hldev)
469{
470 u64 val64;
471 u32 wrr_states[VXGE_HW_WEIGHTED_RR_SERVICE_STATES];
472 u32 i, j, how_often = 1;
473 enum vxge_hw_status status = VXGE_HW_OK;
474
475 status = __vxge_hw_device_is_privilaged(hldev->host_type,
476 hldev->func_id);
477 if (status != VXGE_HW_OK)
478 goto exit;
479
480 /* Reset the priorities assigned to the WRR arbitration
481 phases for the receive traffic */
482 for (i = 0; i < VXGE_HW_WRR_RING_COUNT; i++)
483 writeq(0, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
484
485 /* Reset the transmit FIFO servicing calendar for FIFOs */
486 for (i = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
487 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_0) + i));
488 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_20) + i));
489 }
490
491 /* Assign WRR priority 0 for all FIFOs */
492 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
493 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(0),
494 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
495
496 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(0),
497 ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
498 }
499
500 /* Reset to service non-offload doorbells */
501 writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
502 writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
503
504 /* Set priority 0 to all receive queues */
505 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_0);
506 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_1);
507 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_2);
508
509 /* Initialize all the slots as unused */
510 for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
511 wrr_states[i] = -1;
512
513 /* Prepare the Fifo service states */
514 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
515
516 if (!hldev->config.vp_config[i].min_bandwidth)
517 continue;
518
519 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
520 hldev->config.vp_config[i].min_bandwidth;
521 if (how_often) {
522
523 for (j = 0; j < VXGE_HW_WRR_FIFO_SERVICE_STATES;) {
524 if (wrr_states[j] == -1) {
525 wrr_states[j] = i;
526 /* Make sure each fifo is serviced
527 * atleast once */
528 if (i == j)
529 j += VXGE_HW_MAX_VIRTUAL_PATHS;
530 else
531 j += how_often;
532 } else
533 j++;
534 }
535 }
536 }
537
538 /* Fill the unused slots with 0 */
539 for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
540 if (wrr_states[j] == -1)
541 wrr_states[j] = 0;
542 }
543
544 /* Assign WRR priority number for FIFOs */
545 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
546 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(i),
547 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
548
549 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(i),
550 ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
551 }
552
553 /* Modify the servicing algorithm applied to the 3 types of doorbells.
554 i.e, none-offload, message and offload */
555 writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(0) |
556 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(0) |
557 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(0) |
558 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(0) |
559 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(1) |
560 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(0) |
561 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(0) |
562 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(0),
563 &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
564
565 writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(1),
566 &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
567
568 for (i = 0, j = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
569
570 val64 = VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(wrr_states[j++]);
571 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(wrr_states[j++]);
572 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(wrr_states[j++]);
573 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(wrr_states[j++]);
574 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(wrr_states[j++]);
575 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(wrr_states[j++]);
576 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(wrr_states[j++]);
577 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(wrr_states[j++]);
578
579 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_0 + i));
580 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_20 + i));
581 }
582
583 /* Set up the priorities assigned to receive queues */
584 writeq(VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(0) |
585 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(1) |
586 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(2) |
587 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(3) |
588 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(4) |
589 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(5) |
590 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(6) |
591 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(7),
592 &hldev->mrpcim_reg->rx_queue_priority_0);
593
594 writeq(VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(8) |
595 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(9) |
596 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(10) |
597 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(11) |
598 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(12) |
599 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(13) |
600 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(14) |
601 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(15),
602 &hldev->mrpcim_reg->rx_queue_priority_1);
603
604 writeq(VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(16),
605 &hldev->mrpcim_reg->rx_queue_priority_2);
606
607 /* Initialize all the slots as unused */
608 for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
609 wrr_states[i] = -1;
610
611 /* Prepare the Ring service states */
612 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
613
614 if (!hldev->config.vp_config[i].min_bandwidth)
615 continue;
616
617 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
618 hldev->config.vp_config[i].min_bandwidth;
619
620 if (how_often) {
621 for (j = 0; j < VXGE_HW_WRR_RING_SERVICE_STATES;) {
622 if (wrr_states[j] == -1) {
623 wrr_states[j] = i;
624 /* Make sure each ring is
625 * serviced atleast once */
626 if (i == j)
627 j += VXGE_HW_MAX_VIRTUAL_PATHS;
628 else
629 j += how_often;
630 } else
631 j++;
632 }
633 }
634 }
635
636 /* Fill the unused slots with 0 */
637 for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
638 if (wrr_states[j] == -1)
639 wrr_states[j] = 0;
640 }
641
642 for (i = 0, j = 0; i < VXGE_HW_WRR_RING_COUNT; i++) {
643 val64 = VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(
644 wrr_states[j++]);
645 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(
646 wrr_states[j++]);
647 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(
648 wrr_states[j++]);
649 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(
650 wrr_states[j++]);
651 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(
652 wrr_states[j++]);
653 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(
654 wrr_states[j++]);
655 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(
656 wrr_states[j++]);
657 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(
658 wrr_states[j++]);
659
660 writeq(val64, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
661 }
662exit:
663 return status;
664}
665
666/*
667 * __vxge_hw_device_initialize 464 * __vxge_hw_device_initialize
668 * Initialize Titan-V hardware. 465 * Initialize Titan-V hardware.
669 */ 466 */
@@ -679,7 +476,6 @@ enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
679 goto exit; 476 goto exit;
680 } 477 }
681 478
682 vxge_hw_wrr_rebalance(hldev);
683exit: 479exit:
684 return status; 480 return status;
685} 481}