aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2009-08-24 13:29:41 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-31 01:34:39 -0400
commit13d866a9c912d6bc7133f4ef4d536c3a960b06cb (patch)
treedf02e9b726e8f50c0d69006873989a00305ecd2e /drivers
parent6fce365df8c4af573ea77e744fe310e034931d42 (diff)
s2io.c: Shorten code line length by using intermediate pointers
Repeated variable use and line wrapping is hard to read. Use temp variables instead of direct references. struct fifo_info *fifo = &mac_control->fifos[i]; struct ring_info *ring = &mac_control->rings[i]; struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; Signed-off-by: Joe Perches <joe@perches.com> Acked-by: Sreenivasa Honnur <sreenivasa.honnur@neterion.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/s2io.c393
1 files changed, 224 insertions, 169 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 07e95e9b1c93..b6bd3c812adb 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -364,13 +364,19 @@ static void s2io_vlan_rx_register(struct net_device *dev,
364 struct mac_info *mac_control = &nic->mac_control; 364 struct mac_info *mac_control = &nic->mac_control;
365 struct config_param *config = &nic->config; 365 struct config_param *config = &nic->config;
366 366
367 for (i = 0; i < config->tx_fifo_num; i++) 367 for (i = 0; i < config->tx_fifo_num; i++) {
368 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]); 368 struct fifo_info *fifo = &mac_control->fifos[i];
369
370 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
371 }
369 372
370 nic->vlgrp = grp; 373 nic->vlgrp = grp;
371 for (i = config->tx_fifo_num - 1; i >= 0; i--) 374
372 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, 375 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
373 flags[i]); 376 struct fifo_info *fifo = &mac_control->fifos[i];
377
378 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
379 }
374} 380}
375 381
376/* Unregister the vlan */ 382/* Unregister the vlan */
@@ -382,15 +388,20 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
382 struct mac_info *mac_control = &nic->mac_control; 388 struct mac_info *mac_control = &nic->mac_control;
383 struct config_param *config = &nic->config; 389 struct config_param *config = &nic->config;
384 390
385 for (i = 0; i < config->tx_fifo_num; i++) 391 for (i = 0; i < config->tx_fifo_num; i++) {
386 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]); 392 struct fifo_info *fifo = &mac_control->fifos[i];
393
394 spin_lock_irqsave(&fifo->tx_lock, flags[i]);
395 }
387 396
388 if (nic->vlgrp) 397 if (nic->vlgrp)
389 vlan_group_set_device(nic->vlgrp, vid, NULL); 398 vlan_group_set_device(nic->vlgrp, vid, NULL);
390 399
391 for (i = config->tx_fifo_num - 1; i >= 0; i--) 400 for (i = config->tx_fifo_num - 1; i >= 0; i--) {
392 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, 401 struct fifo_info *fifo = &mac_control->fifos[i];
393 flags[i]); 402
403 spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
404 }
394} 405}
395 406
396/* 407/*
@@ -635,11 +646,12 @@ static int init_shared_mem(struct s2io_nic *nic)
635 mac_control = &nic->mac_control; 646 mac_control = &nic->mac_control;
636 config = &nic->config; 647 config = &nic->config;
637 648
638 649 /* Allocation and initialization of TXDLs in FIFOs */
639 /* Allocation and initialization of TXDLs in FIOFs */
640 size = 0; 650 size = 0;
641 for (i = 0; i < config->tx_fifo_num; i++) { 651 for (i = 0; i < config->tx_fifo_num; i++) {
642 size += config->tx_cfg[i].fifo_len; 652 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
653
654 size += tx_cfg->fifo_len;
643 } 655 }
644 if (size > MAX_AVAILABLE_TXDS) { 656 if (size > MAX_AVAILABLE_TXDS) {
645 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); 657 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
@@ -649,7 +661,9 @@ static int init_shared_mem(struct s2io_nic *nic)
649 661
650 size = 0; 662 size = 0;
651 for (i = 0; i < config->tx_fifo_num; i++) { 663 for (i = 0; i < config->tx_fifo_num; i++) {
652 size = config->tx_cfg[i].fifo_len; 664 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
665
666 size = tx_cfg->fifo_len;
653 /* 667 /*
654 * Legal values are from 2 to 8192 668 * Legal values are from 2 to 8192
655 */ 669 */
@@ -666,11 +680,13 @@ static int init_shared_mem(struct s2io_nic *nic)
666 lst_per_page = PAGE_SIZE / lst_size; 680 lst_per_page = PAGE_SIZE / lst_size;
667 681
668 for (i = 0; i < config->tx_fifo_num; i++) { 682 for (i = 0; i < config->tx_fifo_num; i++) {
669 int fifo_len = config->tx_cfg[i].fifo_len; 683 struct fifo_info *fifo = &mac_control->fifos[i];
684 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
685 int fifo_len = tx_cfg->fifo_len;
670 int list_holder_size = fifo_len * sizeof(struct list_info_hold); 686 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
671 mac_control->fifos[i].list_info = kzalloc(list_holder_size, 687
672 GFP_KERNEL); 688 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
673 if (!mac_control->fifos[i].list_info) { 689 if (!fifo->list_info) {
674 DBG_PRINT(INFO_DBG, 690 DBG_PRINT(INFO_DBG,
675 "Malloc failed for list_info\n"); 691 "Malloc failed for list_info\n");
676 return -ENOMEM; 692 return -ENOMEM;
@@ -680,16 +696,17 @@ static int init_shared_mem(struct s2io_nic *nic)
680 for (i = 0; i < config->tx_fifo_num; i++) { 696 for (i = 0; i < config->tx_fifo_num; i++) {
681 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, 697 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
682 lst_per_page); 698 lst_per_page);
683 mac_control->fifos[i].tx_curr_put_info.offset = 0; 699 struct fifo_info *fifo = &mac_control->fifos[i];
684 mac_control->fifos[i].tx_curr_put_info.fifo_len = 700 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
685 config->tx_cfg[i].fifo_len - 1; 701
686 mac_control->fifos[i].tx_curr_get_info.offset = 0; 702 fifo->tx_curr_put_info.offset = 0;
687 mac_control->fifos[i].tx_curr_get_info.fifo_len = 703 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
688 config->tx_cfg[i].fifo_len - 1; 704 fifo->tx_curr_get_info.offset = 0;
689 mac_control->fifos[i].fifo_no = i; 705 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
690 mac_control->fifos[i].nic = nic; 706 fifo->fifo_no = i;
691 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2; 707 fifo->nic = nic;
692 mac_control->fifos[i].dev = dev; 708 fifo->max_txds = MAX_SKB_FRAGS + 2;
709 fifo->dev = dev;
693 710
694 for (j = 0; j < page_num; j++) { 711 for (j = 0; j < page_num; j++) {
695 int k = 0; 712 int k = 0;
@@ -726,11 +743,11 @@ static int init_shared_mem(struct s2io_nic *nic)
726 } 743 }
727 while (k < lst_per_page) { 744 while (k < lst_per_page) {
728 int l = (j * lst_per_page) + k; 745 int l = (j * lst_per_page) + k;
729 if (l == config->tx_cfg[i].fifo_len) 746 if (l == tx_cfg->fifo_len)
730 break; 747 break;
731 mac_control->fifos[i].list_info[l].list_virt_addr = 748 fifo->list_info[l].list_virt_addr =
732 tmp_v + (k * lst_size); 749 tmp_v + (k * lst_size);
733 mac_control->fifos[i].list_info[l].list_phy_addr = 750 fifo->list_info[l].list_phy_addr =
734 tmp_p + (k * lst_size); 751 tmp_p + (k * lst_size);
735 k++; 752 k++;
736 } 753 }
@@ -738,10 +755,12 @@ static int init_shared_mem(struct s2io_nic *nic)
738 } 755 }
739 756
740 for (i = 0; i < config->tx_fifo_num; i++) { 757 for (i = 0; i < config->tx_fifo_num; i++) {
741 size = config->tx_cfg[i].fifo_len; 758 struct fifo_info *fifo = &mac_control->fifos[i];
742 mac_control->fifos[i].ufo_in_band_v 759 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
743 = kcalloc(size, sizeof(u64), GFP_KERNEL); 760
744 if (!mac_control->fifos[i].ufo_in_band_v) 761 size = tx_cfg->fifo_len;
762 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
763 if (!fifo->ufo_in_band_v)
745 return -ENOMEM; 764 return -ENOMEM;
746 mem_allocated += (size * sizeof(u64)); 765 mem_allocated += (size * sizeof(u64));
747 } 766 }
@@ -749,20 +768,19 @@ static int init_shared_mem(struct s2io_nic *nic)
749 /* Allocation and initialization of RXDs in Rings */ 768 /* Allocation and initialization of RXDs in Rings */
750 size = 0; 769 size = 0;
751 for (i = 0; i < config->rx_ring_num; i++) { 770 for (i = 0; i < config->rx_ring_num; i++) {
752 if (config->rx_cfg[i].num_rxd % 771 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
753 (rxd_count[nic->rxd_mode] + 1)) { 772 struct ring_info *ring = &mac_control->rings[i];
773
774 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
754 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name); 775 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
755 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", 776 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", i);
756 i);
757 DBG_PRINT(ERR_DBG, "RxDs per Block"); 777 DBG_PRINT(ERR_DBG, "RxDs per Block");
758 return FAILURE; 778 return FAILURE;
759 } 779 }
760 size += config->rx_cfg[i].num_rxd; 780 size += rx_cfg->num_rxd;
761 mac_control->rings[i].block_count = 781 ring->block_count = rx_cfg->num_rxd /
762 config->rx_cfg[i].num_rxd /
763 (rxd_count[nic->rxd_mode] + 1 ); 782 (rxd_count[nic->rxd_mode] + 1 );
764 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd - 783 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
765 mac_control->rings[i].block_count;
766 } 784 }
767 if (nic->rxd_mode == RXD_MODE_1) 785 if (nic->rxd_mode == RXD_MODE_1)
768 size = (size * (sizeof(struct RxD1))); 786 size = (size * (sizeof(struct RxD1)));
@@ -770,26 +788,26 @@ static int init_shared_mem(struct s2io_nic *nic)
770 size = (size * (sizeof(struct RxD3))); 788 size = (size * (sizeof(struct RxD3)));
771 789
772 for (i = 0; i < config->rx_ring_num; i++) { 790 for (i = 0; i < config->rx_ring_num; i++) {
773 mac_control->rings[i].rx_curr_get_info.block_index = 0; 791 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
774 mac_control->rings[i].rx_curr_get_info.offset = 0; 792 struct ring_info *ring = &mac_control->rings[i];
775 mac_control->rings[i].rx_curr_get_info.ring_len = 793
776 config->rx_cfg[i].num_rxd - 1; 794 ring->rx_curr_get_info.block_index = 0;
777 mac_control->rings[i].rx_curr_put_info.block_index = 0; 795 ring->rx_curr_get_info.offset = 0;
778 mac_control->rings[i].rx_curr_put_info.offset = 0; 796 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
779 mac_control->rings[i].rx_curr_put_info.ring_len = 797 ring->rx_curr_put_info.block_index = 0;
780 config->rx_cfg[i].num_rxd - 1; 798 ring->rx_curr_put_info.offset = 0;
781 mac_control->rings[i].nic = nic; 799 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
782 mac_control->rings[i].ring_no = i; 800 ring->nic = nic;
783 mac_control->rings[i].lro = lro_enable; 801 ring->ring_no = i;
784 802 ring->lro = lro_enable;
785 blk_cnt = config->rx_cfg[i].num_rxd / 803
786 (rxd_count[nic->rxd_mode] + 1); 804 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
787 /* Allocating all the Rx blocks */ 805 /* Allocating all the Rx blocks */
788 for (j = 0; j < blk_cnt; j++) { 806 for (j = 0; j < blk_cnt; j++) {
789 struct rx_block_info *rx_blocks; 807 struct rx_block_info *rx_blocks;
790 int l; 808 int l;
791 809
792 rx_blocks = &mac_control->rings[i].rx_blocks[j]; 810 rx_blocks = &ring->rx_blocks[j];
793 size = SIZE_OF_BLOCK; //size is always page size 811 size = SIZE_OF_BLOCK; //size is always page size
794 tmp_v_addr = pci_alloc_consistent(nic->pdev, size, 812 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
795 &tmp_p_addr); 813 &tmp_p_addr);
@@ -825,16 +843,11 @@ static int init_shared_mem(struct s2io_nic *nic)
825 } 843 }
826 /* Interlinking all Rx Blocks */ 844 /* Interlinking all Rx Blocks */
827 for (j = 0; j < blk_cnt; j++) { 845 for (j = 0; j < blk_cnt; j++) {
828 tmp_v_addr = 846 int next = (j + 1) % blk_cnt;
829 mac_control->rings[i].rx_blocks[j].block_virt_addr; 847 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
830 tmp_v_addr_next = 848 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
831 mac_control->rings[i].rx_blocks[(j + 1) % 849 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
832 blk_cnt].block_virt_addr; 850 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
833 tmp_p_addr =
834 mac_control->rings[i].rx_blocks[j].block_dma_addr;
835 tmp_p_addr_next =
836 mac_control->rings[i].rx_blocks[(j + 1) %
837 blk_cnt].block_dma_addr;
838 851
839 pre_rxd_blk = (struct RxD_block *) tmp_v_addr; 852 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
840 pre_rxd_blk->reserved_2_pNext_RxD_block = 853 pre_rxd_blk->reserved_2_pNext_RxD_block =
@@ -849,26 +862,28 @@ static int init_shared_mem(struct s2io_nic *nic)
849 * and the buffers as well. 862 * and the buffers as well.
850 */ 863 */
851 for (i = 0; i < config->rx_ring_num; i++) { 864 for (i = 0; i < config->rx_ring_num; i++) {
852 blk_cnt = config->rx_cfg[i].num_rxd / 865 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
853 (rxd_count[nic->rxd_mode]+ 1); 866 struct ring_info *ring = &mac_control->rings[i];
854 mac_control->rings[i].ba = 867
855 kmalloc((sizeof(struct buffAdd *) * blk_cnt), 868 blk_cnt = rx_cfg->num_rxd /
856 GFP_KERNEL); 869 (rxd_count[nic->rxd_mode]+ 1);
857 if (!mac_control->rings[i].ba) 870 ring->ba = kmalloc((sizeof(struct buffAdd *) * blk_cnt),
871 GFP_KERNEL);
872 if (!ring->ba)
858 return -ENOMEM; 873 return -ENOMEM;
859 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt); 874 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
860 for (j = 0; j < blk_cnt; j++) { 875 for (j = 0; j < blk_cnt; j++) {
861 int k = 0; 876 int k = 0;
862 mac_control->rings[i].ba[j] = 877 ring->ba[j] =
863 kmalloc((sizeof(struct buffAdd) * 878 kmalloc((sizeof(struct buffAdd) *
864 (rxd_count[nic->rxd_mode] + 1)), 879 (rxd_count[nic->rxd_mode] + 1)),
865 GFP_KERNEL); 880 GFP_KERNEL);
866 if (!mac_control->rings[i].ba[j]) 881 if (!ring->ba[j])
867 return -ENOMEM; 882 return -ENOMEM;
868 mem_allocated += (sizeof(struct buffAdd) * \ 883 mem_allocated += (sizeof(struct buffAdd) * \
869 (rxd_count[nic->rxd_mode] + 1)); 884 (rxd_count[nic->rxd_mode] + 1));
870 while (k != rxd_count[nic->rxd_mode]) { 885 while (k != rxd_count[nic->rxd_mode]) {
871 ba = &mac_control->rings[i].ba[j][k]; 886 ba = &ring->ba[j][k];
872 887
873 ba->ba_0_org = (void *) kmalloc 888 ba->ba_0_org = (void *) kmalloc
874 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 889 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
@@ -952,22 +967,23 @@ static void free_shared_mem(struct s2io_nic *nic)
952 lst_per_page = PAGE_SIZE / lst_size; 967 lst_per_page = PAGE_SIZE / lst_size;
953 968
954 for (i = 0; i < config->tx_fifo_num; i++) { 969 for (i = 0; i < config->tx_fifo_num; i++) {
955 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, 970 struct fifo_info *fifo = &mac_control->fifos[i];
956 lst_per_page); 971 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
972
973 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
957 for (j = 0; j < page_num; j++) { 974 for (j = 0; j < page_num; j++) {
958 int mem_blks = (j * lst_per_page); 975 int mem_blks = (j * lst_per_page);
959 if (!mac_control->fifos[i].list_info) 976 struct list_info_hold *fli;
977
978 if (!fifo->list_info)
960 return; 979 return;
961 if (!mac_control->fifos[i].list_info[mem_blks]. 980
962 list_virt_addr) 981 fli = &fifo->list_info[mem_blks];
982 if (!fli->list_virt_addr)
963 break; 983 break;
964 pci_free_consistent(nic->pdev, PAGE_SIZE, 984 pci_free_consistent(nic->pdev, PAGE_SIZE,
965 mac_control->fifos[i]. 985 fli->list_virt_addr,
966 list_info[mem_blks]. 986 fli->list_phy_addr);
967 list_virt_addr,
968 mac_control->fifos[i].
969 list_info[mem_blks].
970 list_phy_addr);
971 nic->mac_control.stats_info->sw_stat.mem_freed 987 nic->mac_control.stats_info->sw_stat.mem_freed
972 += PAGE_SIZE; 988 += PAGE_SIZE;
973 } 989 }
@@ -986,25 +1002,25 @@ static void free_shared_mem(struct s2io_nic *nic)
986 nic->mac_control.stats_info->sw_stat.mem_freed 1002 nic->mac_control.stats_info->sw_stat.mem_freed
987 += PAGE_SIZE; 1003 += PAGE_SIZE;
988 } 1004 }
989 kfree(mac_control->fifos[i].list_info); 1005 kfree(fifo->list_info);
990 nic->mac_control.stats_info->sw_stat.mem_freed += 1006 nic->mac_control.stats_info->sw_stat.mem_freed +=
991 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold)); 1007 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
992 } 1008 }
993 1009
994 size = SIZE_OF_BLOCK; 1010 size = SIZE_OF_BLOCK;
995 for (i = 0; i < config->rx_ring_num; i++) { 1011 for (i = 0; i < config->rx_ring_num; i++) {
996 blk_cnt = mac_control->rings[i].block_count; 1012 struct ring_info *ring = &mac_control->rings[i];
1013
1014 blk_cnt = ring->block_count;
997 for (j = 0; j < blk_cnt; j++) { 1015 for (j = 0; j < blk_cnt; j++) {
998 tmp_v_addr = mac_control->rings[i].rx_blocks[j]. 1016 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
999 block_virt_addr; 1017 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
1000 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1001 block_dma_addr;
1002 if (tmp_v_addr == NULL) 1018 if (tmp_v_addr == NULL)
1003 break; 1019 break;
1004 pci_free_consistent(nic->pdev, size, 1020 pci_free_consistent(nic->pdev, size,
1005 tmp_v_addr, tmp_p_addr); 1021 tmp_v_addr, tmp_p_addr);
1006 nic->mac_control.stats_info->sw_stat.mem_freed += size; 1022 nic->mac_control.stats_info->sw_stat.mem_freed += size;
1007 kfree(mac_control->rings[i].rx_blocks[j].rxds); 1023 kfree(ring->rx_blocks[j].rxds);
1008 nic->mac_control.stats_info->sw_stat.mem_freed += 1024 nic->mac_control.stats_info->sw_stat.mem_freed +=
1009 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]); 1025 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1010 } 1026 }
@@ -1013,15 +1029,17 @@ static void free_shared_mem(struct s2io_nic *nic)
1013 if (nic->rxd_mode == RXD_MODE_3B) { 1029 if (nic->rxd_mode == RXD_MODE_3B) {
1014 /* Freeing buffer storage addresses in 2BUFF mode. */ 1030 /* Freeing buffer storage addresses in 2BUFF mode. */
1015 for (i = 0; i < config->rx_ring_num; i++) { 1031 for (i = 0; i < config->rx_ring_num; i++) {
1016 blk_cnt = config->rx_cfg[i].num_rxd / 1032 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1017 (rxd_count[nic->rxd_mode] + 1); 1033 struct ring_info *ring = &mac_control->rings[i];
1034
1035 blk_cnt = rx_cfg->num_rxd /
1036 (rxd_count[nic->rxd_mode] + 1);
1018 for (j = 0; j < blk_cnt; j++) { 1037 for (j = 0; j < blk_cnt; j++) {
1019 int k = 0; 1038 int k = 0;
1020 if (!mac_control->rings[i].ba[j]) 1039 if (!ring->ba[j])
1021 continue; 1040 continue;
1022 while (k != rxd_count[nic->rxd_mode]) { 1041 while (k != rxd_count[nic->rxd_mode]) {
1023 struct buffAdd *ba = 1042 struct buffAdd *ba = &ring->ba[j][k];
1024 &mac_control->rings[i].ba[j][k];
1025 kfree(ba->ba_0_org); 1043 kfree(ba->ba_0_org);
1026 nic->mac_control.stats_info->sw_stat.\ 1044 nic->mac_control.stats_info->sw_stat.\
1027 mem_freed += (BUF0_LEN + ALIGN_SIZE); 1045 mem_freed += (BUF0_LEN + ALIGN_SIZE);
@@ -1030,22 +1048,25 @@ static void free_shared_mem(struct s2io_nic *nic)
1030 mem_freed += (BUF1_LEN + ALIGN_SIZE); 1048 mem_freed += (BUF1_LEN + ALIGN_SIZE);
1031 k++; 1049 k++;
1032 } 1050 }
1033 kfree(mac_control->rings[i].ba[j]); 1051 kfree(ring->ba[j]);
1034 nic->mac_control.stats_info->sw_stat.mem_freed += 1052 nic->mac_control.stats_info->sw_stat.mem_freed +=
1035 (sizeof(struct buffAdd) * 1053 (sizeof(struct buffAdd) *
1036 (rxd_count[nic->rxd_mode] + 1)); 1054 (rxd_count[nic->rxd_mode] + 1));
1037 } 1055 }
1038 kfree(mac_control->rings[i].ba); 1056 kfree(ring->ba);
1039 nic->mac_control.stats_info->sw_stat.mem_freed += 1057 nic->mac_control.stats_info->sw_stat.mem_freed +=
1040 (sizeof(struct buffAdd *) * blk_cnt); 1058 (sizeof(struct buffAdd *) * blk_cnt);
1041 } 1059 }
1042 } 1060 }
1043 1061
1044 for (i = 0; i < nic->config.tx_fifo_num; i++) { 1062 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1045 if (mac_control->fifos[i].ufo_in_band_v) { 1063 struct fifo_info *fifo = &mac_control->fifos[i];
1064 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1065
1066 if (fifo->ufo_in_band_v) {
1046 nic->mac_control.stats_info->sw_stat.mem_freed 1067 nic->mac_control.stats_info->sw_stat.mem_freed
1047 += (config->tx_cfg[i].fifo_len * sizeof(u64)); 1068 += (tx_cfg->fifo_len * sizeof(u64));
1048 kfree(mac_control->fifos[i].ufo_in_band_v); 1069 kfree(fifo->ufo_in_band_v);
1049 } 1070 }
1050 } 1071 }
1051 1072
@@ -1339,10 +1360,10 @@ static int init_nic(struct s2io_nic *nic)
1339 1360
1340 1361
1341 for (i = 0, j = 0; i < config->tx_fifo_num; i++) { 1362 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1342 val64 |= 1363 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1343 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19), 1364
1344 13) | vBIT(config->tx_cfg[i].fifo_priority, 1365 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1345 ((j * 32) + 5), 3); 1366 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1346 1367
1347 if (i == (config->tx_fifo_num - 1)) { 1368 if (i == (config->tx_fifo_num - 1)) {
1348 if (i % 2 == 0) 1369 if (i % 2 == 0)
@@ -1400,9 +1421,9 @@ static int init_nic(struct s2io_nic *nic)
1400 /* Rx DMA intialization. */ 1421 /* Rx DMA intialization. */
1401 val64 = 0; 1422 val64 = 0;
1402 for (i = 0; i < config->rx_ring_num; i++) { 1423 for (i = 0; i < config->rx_ring_num; i++) {
1403 val64 |= 1424 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1404 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)), 1425
1405 3); 1426 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1406 } 1427 }
1407 writeq(val64, &bar0->rx_queue_priority); 1428 writeq(val64, &bar0->rx_queue_priority);
1408 1429
@@ -2276,7 +2297,9 @@ static int start_nic(struct s2io_nic *nic)
2276 2297
2277 /* PRC Initialization and configuration */ 2298 /* PRC Initialization and configuration */
2278 for (i = 0; i < config->rx_ring_num; i++) { 2299 for (i = 0; i < config->rx_ring_num; i++) {
2279 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr, 2300 struct ring_info *ring = &mac_control->rings[i];
2301
2302 writeq((u64) ring->rx_blocks[0].block_dma_addr,
2280 &bar0->prc_rxd0_n[i]); 2303 &bar0->prc_rxd0_n[i]);
2281 2304
2282 val64 = readq(&bar0->prc_ctrl_n[i]); 2305 val64 = readq(&bar0->prc_ctrl_n[i]);
@@ -2434,11 +2457,13 @@ static void free_tx_buffers(struct s2io_nic *nic)
2434 config = &nic->config; 2457 config = &nic->config;
2435 2458
2436 for (i = 0; i < config->tx_fifo_num; i++) { 2459 for (i = 0; i < config->tx_fifo_num; i++) {
2460 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2461 struct fifo_info *fifo = &mac_control->fifos[i];
2437 unsigned long flags; 2462 unsigned long flags;
2438 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags); 2463
2439 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) { 2464 spin_lock_irqsave(&fifo->tx_lock, flags);
2440 txdp = (struct TxD *) \ 2465 for (j = 0; j < tx_cfg->fifo_len; j++) {
2441 mac_control->fifos[i].list_info[j].list_virt_addr; 2466 txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
2442 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); 2467 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2443 if (skb) { 2468 if (skb) {
2444 nic->mac_control.stats_info->sw_stat.mem_freed 2469 nic->mac_control.stats_info->sw_stat.mem_freed
@@ -2450,9 +2475,9 @@ static void free_tx_buffers(struct s2io_nic *nic)
2450 DBG_PRINT(INTR_DBG, 2475 DBG_PRINT(INTR_DBG,
2451 "%s:forcibly freeing %d skbs on FIFO%d\n", 2476 "%s:forcibly freeing %d skbs on FIFO%d\n",
2452 dev->name, cnt, i); 2477 dev->name, cnt, i);
2453 mac_control->fifos[i].tx_curr_get_info.offset = 0; 2478 fifo->tx_curr_get_info.offset = 0;
2454 mac_control->fifos[i].tx_curr_put_info.offset = 0; 2479 fifo->tx_curr_put_info.offset = 0;
2455 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags); 2480 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2456 } 2481 }
2457} 2482}
2458 2483
@@ -2795,14 +2820,16 @@ static void free_rx_buffers(struct s2io_nic *sp)
2795 config = &sp->config; 2820 config = &sp->config;
2796 2821
2797 for (i = 0; i < config->rx_ring_num; i++) { 2822 for (i = 0; i < config->rx_ring_num; i++) {
2823 struct ring_info *ring = &mac_control->rings[i];
2824
2798 for (blk = 0; blk < rx_ring_sz[i]; blk++) 2825 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2799 free_rxd_blk(sp,i,blk); 2826 free_rxd_blk(sp,i,blk);
2800 2827
2801 mac_control->rings[i].rx_curr_put_info.block_index = 0; 2828 ring->rx_curr_put_info.block_index = 0;
2802 mac_control->rings[i].rx_curr_get_info.block_index = 0; 2829 ring->rx_curr_get_info.block_index = 0;
2803 mac_control->rings[i].rx_curr_put_info.offset = 0; 2830 ring->rx_curr_put_info.offset = 0;
2804 mac_control->rings[i].rx_curr_get_info.offset = 0; 2831 ring->rx_curr_get_info.offset = 0;
2805 mac_control->rings[i].rx_bufs_left = 0; 2832 ring->rx_bufs_left = 0;
2806 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", 2833 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2807 dev->name, buf_cnt, i); 2834 dev->name, buf_cnt, i);
2808 } 2835 }
@@ -2866,7 +2893,6 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
2866static int s2io_poll_inta(struct napi_struct *napi, int budget) 2893static int s2io_poll_inta(struct napi_struct *napi, int budget)
2867{ 2894{
2868 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); 2895 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2869 struct ring_info *ring;
2870 struct config_param *config; 2896 struct config_param *config;
2871 struct mac_info *mac_control; 2897 struct mac_info *mac_control;
2872 int pkts_processed = 0; 2898 int pkts_processed = 0;
@@ -2881,7 +2907,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
2881 return 0; 2907 return 0;
2882 2908
2883 for (i = 0; i < config->rx_ring_num; i++) { 2909 for (i = 0; i < config->rx_ring_num; i++) {
2884 ring = &mac_control->rings[i]; 2910 struct ring_info *ring = &mac_control->rings[i];
2885 ring_pkts_processed = rx_intr_handler(ring, budget); 2911 ring_pkts_processed = rx_intr_handler(ring, budget);
2886 s2io_chk_rx_buffers(nic, ring); 2912 s2io_chk_rx_buffers(nic, ring);
2887 pkts_processed += ring_pkts_processed; 2913 pkts_processed += ring_pkts_processed;
@@ -2936,12 +2962,16 @@ static void s2io_netpoll(struct net_device *dev)
2936 tx_intr_handler(&mac_control->fifos[i]); 2962 tx_intr_handler(&mac_control->fifos[i]);
2937 2963
2938 /* check for received packet and indicate up to network */ 2964 /* check for received packet and indicate up to network */
2939 for (i = 0; i < config->rx_ring_num; i++) 2965 for (i = 0; i < config->rx_ring_num; i++) {
2940 rx_intr_handler(&mac_control->rings[i], 0); 2966 struct ring_info *ring = &mac_control->rings[i];
2967
2968 rx_intr_handler(ring, 0);
2969 }
2941 2970
2942 for (i = 0; i < config->rx_ring_num; i++) { 2971 for (i = 0; i < config->rx_ring_num; i++) {
2943 if (fill_rx_buffers(nic, &mac_control->rings[i], 0) == 2972 struct ring_info *ring = &mac_control->rings[i];
2944 -ENOMEM) { 2973
2974 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2945 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2975 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2946 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); 2976 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2947 break; 2977 break;
@@ -4803,8 +4833,11 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4803 if (reason & GEN_INTR_RXTRAFFIC) 4833 if (reason & GEN_INTR_RXTRAFFIC)
4804 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4834 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4805 4835
4806 for (i = 0; i < config->rx_ring_num; i++) 4836 for (i = 0; i < config->rx_ring_num; i++) {
4807 rx_intr_handler(&mac_control->rings[i], 0); 4837 struct ring_info *ring = &mac_control->rings[i];
4838
4839 rx_intr_handler(ring, 0);
4840 }
4808 } 4841 }
4809 4842
4810 /* 4843 /*
@@ -4825,8 +4858,11 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4825 * Reallocate the buffers from the interrupt handler itself. 4858 * Reallocate the buffers from the interrupt handler itself.
4826 */ 4859 */
4827 if (!config->napi) { 4860 if (!config->napi) {
4828 for (i = 0; i < config->rx_ring_num; i++) 4861 for (i = 0; i < config->rx_ring_num; i++) {
4829 s2io_chk_rx_buffers(sp, &mac_control->rings[i]); 4862 struct ring_info *ring = &mac_control->rings[i];
4863
4864 s2io_chk_rx_buffers(sp, ring);
4865 }
4830 } 4866 }
4831 writeq(sp->general_int_mask, &bar0->general_int_mask); 4867 writeq(sp->general_int_mask, &bar0->general_int_mask);
4832 readl(&bar0->general_int_status); 4868 readl(&bar0->general_int_status);
@@ -4923,8 +4959,10 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4923 /* collect per-ring rx_packets and rx_bytes */ 4959 /* collect per-ring rx_packets and rx_bytes */
4924 dev->stats.rx_packets = dev->stats.rx_bytes = 0; 4960 dev->stats.rx_packets = dev->stats.rx_bytes = 0;
4925 for (i = 0; i < config->rx_ring_num; i++) { 4961 for (i = 0; i < config->rx_ring_num; i++) {
4926 dev->stats.rx_packets += mac_control->rings[i].rx_packets; 4962 struct ring_info *ring = &mac_control->rings[i];
4927 dev->stats.rx_bytes += mac_control->rings[i].rx_bytes; 4963
4964 dev->stats.rx_packets += ring->rx_packets;
4965 dev->stats.rx_bytes += ring->rx_bytes;
4928 } 4966 }
4929 4967
4930 return (&dev->stats); 4968 return (&dev->stats);
@@ -6974,15 +7012,16 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6974 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; 7012 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6975 7013
6976 for (i = 0; i < config->rx_ring_num; i++) { 7014 for (i = 0; i < config->rx_ring_num; i++) {
6977 blk_cnt = config->rx_cfg[i].num_rxd / 7015 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6978 (rxd_count[sp->rxd_mode] +1); 7016 struct ring_info *ring = &mac_control->rings[i];
7017
7018 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] +1);
6979 7019
6980 for (j = 0; j < blk_cnt; j++) { 7020 for (j = 0; j < blk_cnt; j++) {
6981 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { 7021 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6982 rxdp = mac_control->rings[i]. 7022 rxdp = ring-> rx_blocks[j].rxds[k].virt_addr;
6983 rx_blocks[j].rxds[k].virt_addr;
6984 if(sp->rxd_mode == RXD_MODE_3B) 7023 if(sp->rxd_mode == RXD_MODE_3B)
6985 ba = &mac_control->rings[i].ba[j][k]; 7024 ba = &ring->ba[j][k];
6986 if (set_rxd_buffer_pointer(sp, rxdp, ba, 7025 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6987 &skb,(u64 *)&temp0_64, 7026 &skb,(u64 *)&temp0_64,
6988 (u64 *)&temp1_64, 7027 (u64 *)&temp1_64,
@@ -7205,8 +7244,10 @@ static int s2io_card_up(struct s2io_nic * sp)
7205 config = &sp->config; 7244 config = &sp->config;
7206 7245
7207 for (i = 0; i < config->rx_ring_num; i++) { 7246 for (i = 0; i < config->rx_ring_num; i++) {
7208 mac_control->rings[i].mtu = dev->mtu; 7247 struct ring_info *ring = &mac_control->rings[i];
7209 ret = fill_rx_buffers(sp, &mac_control->rings[i], 1); 7248
7249 ring->mtu = dev->mtu;
7250 ret = fill_rx_buffers(sp, ring, 1);
7210 if (ret) { 7251 if (ret) {
7211 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7252 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7212 dev->name); 7253 dev->name);
@@ -7215,7 +7256,7 @@ static int s2io_card_up(struct s2io_nic * sp)
7215 return -ENOMEM; 7256 return -ENOMEM;
7216 } 7257 }
7217 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, 7258 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7218 mac_control->rings[i].rx_bufs_left); 7259 ring->rx_bufs_left);
7219 } 7260 }
7220 7261
7221 /* Initialise napi */ 7262 /* Initialise napi */
@@ -7875,8 +7916,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7875 7916
7876 config->multiq = dev_multiq; 7917 config->multiq = dev_multiq;
7877 for (i = 0; i < config->tx_fifo_num; i++) { 7918 for (i = 0; i < config->tx_fifo_num; i++) {
7878 config->tx_cfg[i].fifo_len = tx_fifo_len[i]; 7919 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7879 config->tx_cfg[i].fifo_priority = i; 7920
7921 tx_cfg->fifo_len = tx_fifo_len[i];
7922 tx_cfg->fifo_priority = i;
7880 } 7923 }
7881 7924
7882 /* mapping the QoS priority to the configured fifos */ 7925 /* mapping the QoS priority to the configured fifos */
@@ -7890,9 +7933,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7890 7933
7891 config->tx_intr_type = TXD_INT_TYPE_UTILZ; 7934 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7892 for (i = 0; i < config->tx_fifo_num; i++) { 7935 for (i = 0; i < config->tx_fifo_num; i++) {
7893 config->tx_cfg[i].f_no_snoop = 7936 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7894 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER); 7937
7895 if (config->tx_cfg[i].fifo_len < 65) { 7938 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7939 if (tx_cfg->fifo_len < 65) {
7896 config->tx_intr_type = TXD_INT_TYPE_PER_LIST; 7940 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7897 break; 7941 break;
7898 } 7942 }
@@ -7903,20 +7947,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7903 /* Rx side parameters. */ 7947 /* Rx side parameters. */
7904 config->rx_ring_num = rx_ring_num; 7948 config->rx_ring_num = rx_ring_num;
7905 for (i = 0; i < config->rx_ring_num; i++) { 7949 for (i = 0; i < config->rx_ring_num; i++) {
7906 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 7950 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7907 (rxd_count[sp->rxd_mode] + 1); 7951 struct ring_info *ring = &mac_control->rings[i];
7908 config->rx_cfg[i].ring_priority = i; 7952
7909 mac_control->rings[i].rx_bufs_left = 0; 7953 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7910 mac_control->rings[i].rxd_mode = sp->rxd_mode; 7954 rx_cfg->ring_priority = i;
7911 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode]; 7955 ring->rx_bufs_left = 0;
7912 mac_control->rings[i].pdev = sp->pdev; 7956 ring->rxd_mode = sp->rxd_mode;
7913 mac_control->rings[i].dev = sp->dev; 7957 ring->rxd_count = rxd_count[sp->rxd_mode];
7958 ring->pdev = sp->pdev;
7959 ring->dev = sp->dev;
7914 } 7960 }
7915 7961
7916 for (i = 0; i < rx_ring_num; i++) { 7962 for (i = 0; i < rx_ring_num; i++) {
7917 config->rx_cfg[i].ring_org = RING_ORG_BUFF1; 7963 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7918 config->rx_cfg[i].f_no_snoop = 7964
7919 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER); 7965 rx_cfg->ring_org = RING_ORG_BUFF1;
7966 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7920 } 7967 }
7921 7968
7922 /* Setting Mac Control parameters */ 7969 /* Setting Mac Control parameters */
@@ -8015,9 +8062,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8015 } 8062 }
8016 8063
8017 if (config->intr_type == MSI_X) { 8064 if (config->intr_type == MSI_X) {
8018 for (i = 0; i < config->rx_ring_num ; i++) 8065 for (i = 0; i < config->rx_ring_num ; i++) {
8019 netif_napi_add(dev, &mac_control->rings[i].napi, 8066 struct ring_info *ring = &mac_control->rings[i];
8020 s2io_poll_msix, 64); 8067
8068 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
8069 }
8021 } else { 8070 } else {
8022 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); 8071 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8023 } 8072 }
@@ -8089,8 +8138,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8089 sp->state = 0; 8138 sp->state = 0;
8090 8139
8091 /* Initialize spinlocks */ 8140 /* Initialize spinlocks */
8092 for (i = 0; i < sp->config.tx_fifo_num; i++) 8141 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8093 spin_lock_init(&mac_control->fifos[i].tx_lock); 8142 struct fifo_info *fifo = &mac_control->fifos[i];
8143
8144 spin_lock_init(&fifo->tx_lock);
8145 }
8094 8146
8095 /* 8147 /*
8096 * SXE-002: Configure link and activity LED to init state 8148 * SXE-002: Configure link and activity LED to init state
@@ -8165,8 +8217,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8165 break; 8217 break;
8166 } 8218 }
8167 if (sp->config.multiq) { 8219 if (sp->config.multiq) {
8168 for (i = 0; i < sp->config.tx_fifo_num; i++) 8220 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8169 mac_control->fifos[i].multiq = config->multiq; 8221 struct fifo_info *fifo = &mac_control->fifos[i];
8222
8223 fifo->multiq = config->multiq;
8224 }
8170 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n", 8225 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8171 dev->name); 8226 dev->name);
8172 } else 8227 } else