diff options
author | Joe Perches <joe@perches.com> | 2009-08-24 13:29:44 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-31 01:34:51 -0400 |
commit | d44570e4067aa8b832b1c1e1eb6da079f590d501 (patch) | |
tree | 4ed2d5858f9f852b099b500dfc0bd291d28e3f31 | |
parent | 44364a035a4f7c5b58fd96b1a90b52746d1aab6b (diff) |
s2io.c: Make more conforming to normal kernel style
Still has a few long lines.
checkpatch was:
total: 263 errors, 53 warnings, 8751 lines checked
is:
total: 4 errors, 35 warnings, 8767 lines checked
Signed-off-by: Joe Perches <joe@perches.com>
Acked-by: Sreenivasa Honnur <sreenivasa.honnur@neterion.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/s2io.c | 2232 |
1 files changed, 1124 insertions, 1108 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 134cdb39e701..003dd8c4734c 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /************************************************************************ | 1 | /************************************************************************ |
2 | * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC | 2 | * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC |
3 | * Copyright(c) 2002-2007 Neterion Inc. | 3 | * Copyright(c) 2002-2007 Neterion Inc. |
4 | 4 | * | |
5 | * This software may be used and distributed according to the terms of | 5 | * This software may be used and distributed according to the terms of |
6 | * the GNU General Public License (GPL), incorporated herein by reference. | 6 | * the GNU General Public License (GPL), incorporated herein by reference. |
7 | * Drivers based on or derived from this code fall under the GPL and must | 7 | * Drivers based on or derived from this code fall under the GPL and must |
@@ -75,11 +75,11 @@ | |||
75 | #include <linux/if_vlan.h> | 75 | #include <linux/if_vlan.h> |
76 | #include <linux/ip.h> | 76 | #include <linux/ip.h> |
77 | #include <linux/tcp.h> | 77 | #include <linux/tcp.h> |
78 | #include <linux/uaccess.h> | ||
79 | #include <linux/io.h> | ||
78 | #include <net/tcp.h> | 80 | #include <net/tcp.h> |
79 | 81 | ||
80 | #include <asm/system.h> | 82 | #include <asm/system.h> |
81 | #include <asm/uaccess.h> | ||
82 | #include <asm/io.h> | ||
83 | #include <asm/div64.h> | 83 | #include <asm/div64.h> |
84 | #include <asm/irq.h> | 84 | #include <asm/irq.h> |
85 | 85 | ||
@@ -93,15 +93,15 @@ | |||
93 | static char s2io_driver_name[] = "Neterion"; | 93 | static char s2io_driver_name[] = "Neterion"; |
94 | static char s2io_driver_version[] = DRV_VERSION; | 94 | static char s2io_driver_version[] = DRV_VERSION; |
95 | 95 | ||
96 | static int rxd_size[2] = {32,48}; | 96 | static int rxd_size[2] = {32, 48}; |
97 | static int rxd_count[2] = {127,85}; | 97 | static int rxd_count[2] = {127, 85}; |
98 | 98 | ||
99 | static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) | 99 | static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) |
100 | { | 100 | { |
101 | int ret; | 101 | int ret; |
102 | 102 | ||
103 | ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) && | 103 | ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) && |
104 | (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK)); | 104 | (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK)); |
105 | 105 | ||
106 | return ret; | 106 | return ret; |
107 | } | 107 | } |
@@ -111,15 +111,15 @@ static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) | |||
111 | * problem, 600B, 600C, 600D, 640B, 640C and 640D. | 111 | * problem, 600B, 600C, 600D, 640B, 640C and 640D. |
112 | * macro below identifies these cards given the subsystem_id. | 112 | * macro below identifies these cards given the subsystem_id. |
113 | */ | 113 | */ |
114 | #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \ | 114 | #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \ |
115 | (dev_type == XFRAME_I_DEVICE) ? \ | 115 | (dev_type == XFRAME_I_DEVICE) ? \ |
116 | ((((subid >= 0x600B) && (subid <= 0x600D)) || \ | 116 | ((((subid >= 0x600B) && (subid <= 0x600D)) || \ |
117 | ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0 | 117 | ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0 |
118 | 118 | ||
119 | #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \ | 119 | #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \ |
120 | ADAPTER_STATUS_RMAC_LOCAL_FAULT))) | 120 | ADAPTER_STATUS_RMAC_LOCAL_FAULT))) |
121 | 121 | ||
122 | static inline int is_s2io_card_up(const struct s2io_nic * sp) | 122 | static inline int is_s2io_card_up(const struct s2io_nic *sp) |
123 | { | 123 | { |
124 | return test_bit(__S2IO_STATE_CARD_UP, &sp->state); | 124 | return test_bit(__S2IO_STATE_CARD_UP, &sp->state); |
125 | } | 125 | } |
@@ -328,20 +328,20 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { | |||
328 | #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys) | 328 | #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys) |
329 | #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys) | 329 | #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys) |
330 | 330 | ||
331 | #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN ) | 331 | #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN) |
332 | #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN ) | 332 | #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN) |
333 | 333 | ||
334 | #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN ) | 334 | #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN) |
335 | #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN ) | 335 | #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN) |
336 | 336 | ||
337 | #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings) | 337 | #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings) |
338 | #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN | 338 | #define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN) |
339 | 339 | ||
340 | #define S2IO_TIMER_CONF(timer, handle, arg, exp) \ | 340 | #define S2IO_TIMER_CONF(timer, handle, arg, exp) \ |
341 | init_timer(&timer); \ | 341 | init_timer(&timer); \ |
342 | timer.function = handle; \ | 342 | timer.function = handle; \ |
343 | timer.data = (unsigned long) arg; \ | 343 | timer.data = (unsigned long)arg; \ |
344 | mod_timer(&timer, (jiffies + exp)) \ | 344 | mod_timer(&timer, (jiffies + exp)) \ |
345 | 345 | ||
346 | /* copy mac addr to def_mac_addr array */ | 346 | /* copy mac addr to def_mac_addr array */ |
347 | static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr) | 347 | static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr) |
@@ -507,11 +507,11 @@ S2IO_PARM_INT(ufo, 0); | |||
507 | S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC); | 507 | S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC); |
508 | 508 | ||
509 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = | 509 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = |
510 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; | 510 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; |
511 | static unsigned int rx_ring_sz[MAX_RX_RINGS] = | 511 | static unsigned int rx_ring_sz[MAX_RX_RINGS] = |
512 | {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; | 512 | {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; |
513 | static unsigned int rts_frm_len[MAX_RX_RINGS] = | 513 | static unsigned int rts_frm_len[MAX_RX_RINGS] = |
514 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; | 514 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; |
515 | 515 | ||
516 | module_param_array(tx_fifo_len, uint, NULL, 0); | 516 | module_param_array(tx_fifo_len, uint, NULL, 0); |
517 | module_param_array(rx_ring_sz, uint, NULL, 0); | 517 | module_param_array(rx_ring_sz, uint, NULL, 0); |
@@ -527,9 +527,9 @@ static struct pci_device_id s2io_tbl[] __devinitdata = { | |||
527 | {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, | 527 | {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, |
528 | PCI_ANY_ID, PCI_ANY_ID}, | 528 | PCI_ANY_ID, PCI_ANY_ID}, |
529 | {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN, | 529 | {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN, |
530 | PCI_ANY_ID, PCI_ANY_ID}, | 530 | PCI_ANY_ID, PCI_ANY_ID}, |
531 | {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI, | 531 | {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI, |
532 | PCI_ANY_ID, PCI_ANY_ID}, | 532 | PCI_ANY_ID, PCI_ANY_ID}, |
533 | {0,} | 533 | {0,} |
534 | }; | 534 | }; |
535 | 535 | ||
@@ -542,11 +542,11 @@ static struct pci_error_handlers s2io_err_handler = { | |||
542 | }; | 542 | }; |
543 | 543 | ||
544 | static struct pci_driver s2io_driver = { | 544 | static struct pci_driver s2io_driver = { |
545 | .name = "S2IO", | 545 | .name = "S2IO", |
546 | .id_table = s2io_tbl, | 546 | .id_table = s2io_tbl, |
547 | .probe = s2io_init_nic, | 547 | .probe = s2io_init_nic, |
548 | .remove = __devexit_p(s2io_rem_nic), | 548 | .remove = __devexit_p(s2io_rem_nic), |
549 | .err_handler = &s2io_err_handler, | 549 | .err_handler = &s2io_err_handler, |
550 | }; | 550 | }; |
551 | 551 | ||
552 | /* A simplifier macro used both by init and free shared_mem Fns(). */ | 552 | /* A simplifier macro used both by init and free shared_mem Fns(). */ |
@@ -655,7 +655,8 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
655 | } | 655 | } |
656 | if (size > MAX_AVAILABLE_TXDS) { | 656 | if (size > MAX_AVAILABLE_TXDS) { |
657 | DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); | 657 | DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); |
658 | DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); | 658 | DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", |
659 | size); | ||
659 | return -EINVAL; | 660 | return -EINVAL; |
660 | } | 661 | } |
661 | 662 | ||
@@ -671,7 +672,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
671 | DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size); | 672 | DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size); |
672 | DBG_PRINT(ERR_DBG, "for fifo %d\n", i); | 673 | DBG_PRINT(ERR_DBG, "for fifo %d\n", i); |
673 | DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len" | 674 | DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len" |
674 | "are 2 to 8192\n"); | 675 | "are 2 to 8192\n"); |
675 | return -EINVAL; | 676 | return -EINVAL; |
676 | } | 677 | } |
677 | } | 678 | } |
@@ -687,8 +688,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
687 | 688 | ||
688 | fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL); | 689 | fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL); |
689 | if (!fifo->list_info) { | 690 | if (!fifo->list_info) { |
690 | DBG_PRINT(INFO_DBG, | 691 | DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n"); |
691 | "Malloc failed for list_info\n"); | ||
692 | return -ENOMEM; | 692 | return -ENOMEM; |
693 | } | 693 | } |
694 | mem_allocated += list_holder_size; | 694 | mem_allocated += list_holder_size; |
@@ -715,8 +715,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
715 | tmp_v = pci_alloc_consistent(nic->pdev, | 715 | tmp_v = pci_alloc_consistent(nic->pdev, |
716 | PAGE_SIZE, &tmp_p); | 716 | PAGE_SIZE, &tmp_p); |
717 | if (!tmp_v) { | 717 | if (!tmp_v) { |
718 | DBG_PRINT(INFO_DBG, | 718 | DBG_PRINT(INFO_DBG, "pci_alloc_consistent "); |
719 | "pci_alloc_consistent "); | ||
720 | DBG_PRINT(INFO_DBG, "failed for TxDL\n"); | 719 | DBG_PRINT(INFO_DBG, "failed for TxDL\n"); |
721 | return -ENOMEM; | 720 | return -ENOMEM; |
722 | } | 721 | } |
@@ -728,15 +727,17 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
728 | if (!tmp_p) { | 727 | if (!tmp_p) { |
729 | mac_control->zerodma_virt_addr = tmp_v; | 728 | mac_control->zerodma_virt_addr = tmp_v; |
730 | DBG_PRINT(INIT_DBG, | 729 | DBG_PRINT(INIT_DBG, |
731 | "%s: Zero DMA address for TxDL. ", dev->name); | 730 | "%s: Zero DMA address for TxDL. ", |
731 | dev->name); | ||
732 | DBG_PRINT(INIT_DBG, | 732 | DBG_PRINT(INIT_DBG, |
733 | "Virtual address %p\n", tmp_v); | 733 | "Virtual address %p\n", tmp_v); |
734 | tmp_v = pci_alloc_consistent(nic->pdev, | 734 | tmp_v = pci_alloc_consistent(nic->pdev, |
735 | PAGE_SIZE, &tmp_p); | 735 | PAGE_SIZE, &tmp_p); |
736 | if (!tmp_v) { | 736 | if (!tmp_v) { |
737 | DBG_PRINT(INFO_DBG, | 737 | DBG_PRINT(INFO_DBG, |
738 | "pci_alloc_consistent "); | 738 | "pci_alloc_consistent "); |
739 | DBG_PRINT(INFO_DBG, "failed for TxDL\n"); | 739 | DBG_PRINT(INFO_DBG, |
740 | "failed for TxDL\n"); | ||
740 | return -ENOMEM; | 741 | return -ENOMEM; |
741 | } | 742 | } |
742 | mem_allocated += PAGE_SIZE; | 743 | mem_allocated += PAGE_SIZE; |
@@ -746,9 +747,9 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
746 | if (l == tx_cfg->fifo_len) | 747 | if (l == tx_cfg->fifo_len) |
747 | break; | 748 | break; |
748 | fifo->list_info[l].list_virt_addr = | 749 | fifo->list_info[l].list_virt_addr = |
749 | tmp_v + (k * lst_size); | 750 | tmp_v + (k * lst_size); |
750 | fifo->list_info[l].list_phy_addr = | 751 | fifo->list_info[l].list_phy_addr = |
751 | tmp_p + (k * lst_size); | 752 | tmp_p + (k * lst_size); |
752 | k++; | 753 | k++; |
753 | } | 754 | } |
754 | } | 755 | } |
@@ -779,7 +780,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
779 | } | 780 | } |
780 | size += rx_cfg->num_rxd; | 781 | size += rx_cfg->num_rxd; |
781 | ring->block_count = rx_cfg->num_rxd / | 782 | ring->block_count = rx_cfg->num_rxd / |
782 | (rxd_count[nic->rxd_mode] + 1 ); | 783 | (rxd_count[nic->rxd_mode] + 1); |
783 | ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count; | 784 | ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count; |
784 | } | 785 | } |
785 | if (nic->rxd_mode == RXD_MODE_1) | 786 | if (nic->rxd_mode == RXD_MODE_1) |
@@ -808,7 +809,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
808 | int l; | 809 | int l; |
809 | 810 | ||
810 | rx_blocks = &ring->rx_blocks[j]; | 811 | rx_blocks = &ring->rx_blocks[j]; |
811 | size = SIZE_OF_BLOCK; //size is always page size | 812 | size = SIZE_OF_BLOCK; /* size is always page size */ |
812 | tmp_v_addr = pci_alloc_consistent(nic->pdev, size, | 813 | tmp_v_addr = pci_alloc_consistent(nic->pdev, size, |
813 | &tmp_p_addr); | 814 | &tmp_p_addr); |
814 | if (tmp_v_addr == NULL) { | 815 | if (tmp_v_addr == NULL) { |
@@ -832,7 +833,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
832 | if (!rx_blocks->rxds) | 833 | if (!rx_blocks->rxds) |
833 | return -ENOMEM; | 834 | return -ENOMEM; |
834 | mem_allocated += size; | 835 | mem_allocated += size; |
835 | for (l=0; l<rxd_count[nic->rxd_mode];l++) { | 836 | for (l = 0; l < rxd_count[nic->rxd_mode]; l++) { |
836 | rx_blocks->rxds[l].virt_addr = | 837 | rx_blocks->rxds[l].virt_addr = |
837 | rx_blocks->block_virt_addr + | 838 | rx_blocks->block_virt_addr + |
838 | (rxd_size[nic->rxd_mode] * l); | 839 | (rxd_size[nic->rxd_mode] * l); |
@@ -849,11 +850,11 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
849 | tmp_p_addr = ring->rx_blocks[j].block_dma_addr; | 850 | tmp_p_addr = ring->rx_blocks[j].block_dma_addr; |
850 | tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr; | 851 | tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr; |
851 | 852 | ||
852 | pre_rxd_blk = (struct RxD_block *) tmp_v_addr; | 853 | pre_rxd_blk = (struct RxD_block *)tmp_v_addr; |
853 | pre_rxd_blk->reserved_2_pNext_RxD_block = | 854 | pre_rxd_blk->reserved_2_pNext_RxD_block = |
854 | (unsigned long) tmp_v_addr_next; | 855 | (unsigned long)tmp_v_addr_next; |
855 | pre_rxd_blk->pNext_RxD_Blk_physical = | 856 | pre_rxd_blk->pNext_RxD_Blk_physical = |
856 | (u64) tmp_p_addr_next; | 857 | (u64)tmp_p_addr_next; |
857 | } | 858 | } |
858 | } | 859 | } |
859 | if (nic->rxd_mode == RXD_MODE_3B) { | 860 | if (nic->rxd_mode == RXD_MODE_3B) { |
@@ -866,7 +867,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
866 | struct ring_info *ring = &mac_control->rings[i]; | 867 | struct ring_info *ring = &mac_control->rings[i]; |
867 | 868 | ||
868 | blk_cnt = rx_cfg->num_rxd / | 869 | blk_cnt = rx_cfg->num_rxd / |
869 | (rxd_count[nic->rxd_mode]+ 1); | 870 | (rxd_count[nic->rxd_mode] + 1); |
870 | size = sizeof(struct buffAdd *) * blk_cnt; | 871 | size = sizeof(struct buffAdd *) * blk_cnt; |
871 | ring->ba = kmalloc(size, GFP_KERNEL); | 872 | ring->ba = kmalloc(size, GFP_KERNEL); |
872 | if (!ring->ba) | 873 | if (!ring->ba) |
@@ -890,18 +891,18 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
890 | mem_allocated += size; | 891 | mem_allocated += size; |
891 | tmp = (unsigned long)ba->ba_0_org; | 892 | tmp = (unsigned long)ba->ba_0_org; |
892 | tmp += ALIGN_SIZE; | 893 | tmp += ALIGN_SIZE; |
893 | tmp &= ~((unsigned long) ALIGN_SIZE); | 894 | tmp &= ~((unsigned long)ALIGN_SIZE); |
894 | ba->ba_0 = (void *) tmp; | 895 | ba->ba_0 = (void *)tmp; |
895 | 896 | ||
896 | size = BUF1_LEN + ALIGN_SIZE; | 897 | size = BUF1_LEN + ALIGN_SIZE; |
897 | ba->ba_1_org = kmalloc(size, GFP_KERNEL); | 898 | ba->ba_1_org = kmalloc(size, GFP_KERNEL); |
898 | if (!ba->ba_1_org) | 899 | if (!ba->ba_1_org) |
899 | return -ENOMEM; | 900 | return -ENOMEM; |
900 | mem_allocated += size; | 901 | mem_allocated += size; |
901 | tmp = (unsigned long) ba->ba_1_org; | 902 | tmp = (unsigned long)ba->ba_1_org; |
902 | tmp += ALIGN_SIZE; | 903 | tmp += ALIGN_SIZE; |
903 | tmp &= ~((unsigned long) ALIGN_SIZE); | 904 | tmp &= ~((unsigned long)ALIGN_SIZE); |
904 | ba->ba_1 = (void *) tmp; | 905 | ba->ba_1 = (void *)tmp; |
905 | k++; | 906 | k++; |
906 | } | 907 | } |
907 | } | 908 | } |
@@ -910,8 +911,9 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
910 | 911 | ||
911 | /* Allocation and initialization of Statistics block */ | 912 | /* Allocation and initialization of Statistics block */ |
912 | size = sizeof(struct stat_block); | 913 | size = sizeof(struct stat_block); |
913 | mac_control->stats_mem = pci_alloc_consistent | 914 | mac_control->stats_mem = |
914 | (nic->pdev, size, &mac_control->stats_mem_phy); | 915 | pci_alloc_consistent(nic->pdev, size, |
916 | &mac_control->stats_mem_phy); | ||
915 | 917 | ||
916 | if (!mac_control->stats_mem) { | 918 | if (!mac_control->stats_mem) { |
917 | /* | 919 | /* |
@@ -925,10 +927,10 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
925 | mac_control->stats_mem_sz = size; | 927 | mac_control->stats_mem_sz = size; |
926 | 928 | ||
927 | tmp_v_addr = mac_control->stats_mem; | 929 | tmp_v_addr = mac_control->stats_mem; |
928 | mac_control->stats_info = (struct stat_block *) tmp_v_addr; | 930 | mac_control->stats_info = (struct stat_block *)tmp_v_addr; |
929 | memset(tmp_v_addr, 0, size); | 931 | memset(tmp_v_addr, 0, size); |
930 | DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name, | 932 | DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name, |
931 | (unsigned long long) tmp_p_addr); | 933 | (unsigned long long)tmp_p_addr); |
932 | mac_control->stats_info->sw_stat.mem_allocated += mem_allocated; | 934 | mac_control->stats_info->sw_stat.mem_allocated += mem_allocated; |
933 | return SUCCESS; | 935 | return SUCCESS; |
934 | } | 936 | } |
@@ -959,7 +961,7 @@ static void free_shared_mem(struct s2io_nic *nic) | |||
959 | mac_control = &nic->mac_control; | 961 | mac_control = &nic->mac_control; |
960 | config = &nic->config; | 962 | config = &nic->config; |
961 | 963 | ||
962 | lst_size = (sizeof(struct TxD) * config->max_txds); | 964 | lst_size = sizeof(struct TxD) * config->max_txds; |
963 | lst_per_page = PAGE_SIZE / lst_size; | 965 | lst_per_page = PAGE_SIZE / lst_size; |
964 | 966 | ||
965 | for (i = 0; i < config->tx_fifo_num; i++) { | 967 | for (i = 0; i < config->tx_fifo_num; i++) { |
@@ -981,7 +983,7 @@ static void free_shared_mem(struct s2io_nic *nic) | |||
981 | fli->list_virt_addr, | 983 | fli->list_virt_addr, |
982 | fli->list_phy_addr); | 984 | fli->list_phy_addr); |
983 | nic->mac_control.stats_info->sw_stat.mem_freed | 985 | nic->mac_control.stats_info->sw_stat.mem_freed |
984 | += PAGE_SIZE; | 986 | += PAGE_SIZE; |
985 | } | 987 | } |
986 | /* If we got a zero DMA address during allocation, | 988 | /* If we got a zero DMA address during allocation, |
987 | * free the page now | 989 | * free the page now |
@@ -991,16 +993,17 @@ static void free_shared_mem(struct s2io_nic *nic) | |||
991 | mac_control->zerodma_virt_addr, | 993 | mac_control->zerodma_virt_addr, |
992 | (dma_addr_t)0); | 994 | (dma_addr_t)0); |
993 | DBG_PRINT(INIT_DBG, | 995 | DBG_PRINT(INIT_DBG, |
994 | "%s: Freeing TxDL with zero DMA addr. ", | 996 | "%s: Freeing TxDL with zero DMA addr. ", |
995 | dev->name); | 997 | dev->name); |
996 | DBG_PRINT(INIT_DBG, "Virtual address %p\n", | 998 | DBG_PRINT(INIT_DBG, "Virtual address %p\n", |
997 | mac_control->zerodma_virt_addr); | 999 | mac_control->zerodma_virt_addr); |
998 | nic->mac_control.stats_info->sw_stat.mem_freed | 1000 | nic->mac_control.stats_info->sw_stat.mem_freed |
999 | += PAGE_SIZE; | 1001 | += PAGE_SIZE; |
1000 | } | 1002 | } |
1001 | kfree(fifo->list_info); | 1003 | kfree(fifo->list_info); |
1002 | nic->mac_control.stats_info->sw_stat.mem_freed += | 1004 | nic->mac_control.stats_info->sw_stat.mem_freed += |
1003 | (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold)); | 1005 | nic->config.tx_cfg[i].fifo_len * |
1006 | sizeof(struct list_info_hold); | ||
1004 | } | 1007 | } |
1005 | 1008 | ||
1006 | size = SIZE_OF_BLOCK; | 1009 | size = SIZE_OF_BLOCK; |
@@ -1018,7 +1021,7 @@ static void free_shared_mem(struct s2io_nic *nic) | |||
1018 | nic->mac_control.stats_info->sw_stat.mem_freed += size; | 1021 | nic->mac_control.stats_info->sw_stat.mem_freed += size; |
1019 | kfree(ring->rx_blocks[j].rxds); | 1022 | kfree(ring->rx_blocks[j].rxds); |
1020 | nic->mac_control.stats_info->sw_stat.mem_freed += | 1023 | nic->mac_control.stats_info->sw_stat.mem_freed += |
1021 | ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]); | 1024 | sizeof(struct rxd_info) * rxd_count[nic->rxd_mode]; |
1022 | } | 1025 | } |
1023 | } | 1026 | } |
1024 | 1027 | ||
@@ -1038,20 +1041,20 @@ static void free_shared_mem(struct s2io_nic *nic) | |||
1038 | struct buffAdd *ba = &ring->ba[j][k]; | 1041 | struct buffAdd *ba = &ring->ba[j][k]; |
1039 | kfree(ba->ba_0_org); | 1042 | kfree(ba->ba_0_org); |
1040 | nic->mac_control.stats_info->sw_stat.\ | 1043 | nic->mac_control.stats_info->sw_stat.\ |
1041 | mem_freed += (BUF0_LEN + ALIGN_SIZE); | 1044 | mem_freed += (BUF0_LEN + ALIGN_SIZE); |
1042 | kfree(ba->ba_1_org); | 1045 | kfree(ba->ba_1_org); |
1043 | nic->mac_control.stats_info->sw_stat.\ | 1046 | nic->mac_control.stats_info->sw_stat.\ |
1044 | mem_freed += (BUF1_LEN + ALIGN_SIZE); | 1047 | mem_freed += (BUF1_LEN + ALIGN_SIZE); |
1045 | k++; | 1048 | k++; |
1046 | } | 1049 | } |
1047 | kfree(ring->ba[j]); | 1050 | kfree(ring->ba[j]); |
1048 | nic->mac_control.stats_info->sw_stat.mem_freed += | 1051 | nic->mac_control.stats_info->sw_stat.mem_freed += |
1049 | (sizeof(struct buffAdd) * | 1052 | (sizeof(struct buffAdd) * |
1050 | (rxd_count[nic->rxd_mode] + 1)); | 1053 | (rxd_count[nic->rxd_mode] + 1)); |
1051 | } | 1054 | } |
1052 | kfree(ring->ba); | 1055 | kfree(ring->ba); |
1053 | nic->mac_control.stats_info->sw_stat.mem_freed += | 1056 | nic->mac_control.stats_info->sw_stat.mem_freed += |
1054 | (sizeof(struct buffAdd *) * blk_cnt); | 1057 | (sizeof(struct buffAdd *) * blk_cnt); |
1055 | } | 1058 | } |
1056 | } | 1059 | } |
1057 | 1060 | ||
@@ -1089,7 +1092,7 @@ static int s2io_verify_pci_mode(struct s2io_nic *nic) | |||
1089 | val64 = readq(&bar0->pci_mode); | 1092 | val64 = readq(&bar0->pci_mode); |
1090 | mode = (u8)GET_PCI_MODE(val64); | 1093 | mode = (u8)GET_PCI_MODE(val64); |
1091 | 1094 | ||
1092 | if ( val64 & PCI_MODE_UNKNOWN_MODE) | 1095 | if (val64 & PCI_MODE_UNKNOWN_MODE) |
1093 | return -1; /* Unknown PCI mode */ | 1096 | return -1; /* Unknown PCI mode */ |
1094 | return mode; | 1097 | return mode; |
1095 | } | 1098 | } |
@@ -1124,50 +1127,47 @@ static int s2io_print_pci_mode(struct s2io_nic *nic) | |||
1124 | val64 = readq(&bar0->pci_mode); | 1127 | val64 = readq(&bar0->pci_mode); |
1125 | mode = (u8)GET_PCI_MODE(val64); | 1128 | mode = (u8)GET_PCI_MODE(val64); |
1126 | 1129 | ||
1127 | if ( val64 & PCI_MODE_UNKNOWN_MODE) | 1130 | if (val64 & PCI_MODE_UNKNOWN_MODE) |
1128 | return -1; /* Unknown PCI mode */ | 1131 | return -1; /* Unknown PCI mode */ |
1129 | 1132 | ||
1130 | config->bus_speed = bus_speed[mode]; | 1133 | config->bus_speed = bus_speed[mode]; |
1131 | 1134 | ||
1132 | if (s2io_on_nec_bridge(nic->pdev)) { | 1135 | if (s2io_on_nec_bridge(nic->pdev)) { |
1133 | DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n", | 1136 | DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n", |
1134 | nic->dev->name); | 1137 | nic->dev->name); |
1135 | return mode; | 1138 | return mode; |
1136 | } | 1139 | } |
1137 | 1140 | ||
1138 | if (val64 & PCI_MODE_32_BITS) { | 1141 | DBG_PRINT(ERR_DBG, "%s: Device is on %d bit ", |
1139 | DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name); | 1142 | nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64); |
1140 | } else { | ||
1141 | DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name); | ||
1142 | } | ||
1143 | 1143 | ||
1144 | switch(mode) { | 1144 | switch (mode) { |
1145 | case PCI_MODE_PCI_33: | 1145 | case PCI_MODE_PCI_33: |
1146 | DBG_PRINT(ERR_DBG, "33MHz PCI bus\n"); | 1146 | DBG_PRINT(ERR_DBG, "33MHz PCI bus\n"); |
1147 | break; | 1147 | break; |
1148 | case PCI_MODE_PCI_66: | 1148 | case PCI_MODE_PCI_66: |
1149 | DBG_PRINT(ERR_DBG, "66MHz PCI bus\n"); | 1149 | DBG_PRINT(ERR_DBG, "66MHz PCI bus\n"); |
1150 | break; | 1150 | break; |
1151 | case PCI_MODE_PCIX_M1_66: | 1151 | case PCI_MODE_PCIX_M1_66: |
1152 | DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n"); | 1152 | DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n"); |
1153 | break; | 1153 | break; |
1154 | case PCI_MODE_PCIX_M1_100: | 1154 | case PCI_MODE_PCIX_M1_100: |
1155 | DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n"); | 1155 | DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n"); |
1156 | break; | 1156 | break; |
1157 | case PCI_MODE_PCIX_M1_133: | 1157 | case PCI_MODE_PCIX_M1_133: |
1158 | DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n"); | 1158 | DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n"); |
1159 | break; | 1159 | break; |
1160 | case PCI_MODE_PCIX_M2_66: | 1160 | case PCI_MODE_PCIX_M2_66: |
1161 | DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n"); | 1161 | DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n"); |
1162 | break; | 1162 | break; |
1163 | case PCI_MODE_PCIX_M2_100: | 1163 | case PCI_MODE_PCIX_M2_100: |
1164 | DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n"); | 1164 | DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n"); |
1165 | break; | 1165 | break; |
1166 | case PCI_MODE_PCIX_M2_133: | 1166 | case PCI_MODE_PCIX_M2_133: |
1167 | DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n"); | 1167 | DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n"); |
1168 | break; | 1168 | break; |
1169 | default: | 1169 | default: |
1170 | return -1; /* Unsupported bus speed */ | 1170 | return -1; /* Unsupported bus speed */ |
1171 | } | 1171 | } |
1172 | 1172 | ||
1173 | return mode; | 1173 | return mode; |
@@ -1205,9 +1205,9 @@ static int init_tti(struct s2io_nic *nic, int link) | |||
1205 | val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078); | 1205 | val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078); |
1206 | 1206 | ||
1207 | val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) | | 1207 | val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) | |
1208 | TTI_DATA1_MEM_TX_URNG_B(0x10) | | 1208 | TTI_DATA1_MEM_TX_URNG_B(0x10) | |
1209 | TTI_DATA1_MEM_TX_URNG_C(0x30) | | 1209 | TTI_DATA1_MEM_TX_URNG_C(0x30) | |
1210 | TTI_DATA1_MEM_TX_TIMER_AC_EN; | 1210 | TTI_DATA1_MEM_TX_TIMER_AC_EN; |
1211 | if (i == 0) | 1211 | if (i == 0) |
1212 | if (use_continuous_tx_intrs && (link == LINK_UP)) | 1212 | if (use_continuous_tx_intrs && (link == LINK_UP)) |
1213 | val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; | 1213 | val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; |
@@ -1220,11 +1220,11 @@ static int init_tti(struct s2io_nic *nic, int link) | |||
1220 | TTI_DATA2_MEM_TX_UFC_D(0x300); | 1220 | TTI_DATA2_MEM_TX_UFC_D(0x300); |
1221 | } else { | 1221 | } else { |
1222 | if ((nic->config.tx_steering_type == | 1222 | if ((nic->config.tx_steering_type == |
1223 | TX_DEFAULT_STEERING) && | 1223 | TX_DEFAULT_STEERING) && |
1224 | (config->tx_fifo_num > 1) && | 1224 | (config->tx_fifo_num > 1) && |
1225 | (i >= nic->udp_fifo_idx) && | 1225 | (i >= nic->udp_fifo_idx) && |
1226 | (i < (nic->udp_fifo_idx + | 1226 | (i < (nic->udp_fifo_idx + |
1227 | nic->total_udp_fifos))) | 1227 | nic->total_udp_fifos))) |
1228 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) | | 1228 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) | |
1229 | TTI_DATA2_MEM_TX_UFC_B(0x80) | | 1229 | TTI_DATA2_MEM_TX_UFC_B(0x80) | |
1230 | TTI_DATA2_MEM_TX_UFC_C(0x100) | | 1230 | TTI_DATA2_MEM_TX_UFC_C(0x100) | |
@@ -1238,12 +1238,14 @@ static int init_tti(struct s2io_nic *nic, int link) | |||
1238 | 1238 | ||
1239 | writeq(val64, &bar0->tti_data2_mem); | 1239 | writeq(val64, &bar0->tti_data2_mem); |
1240 | 1240 | ||
1241 | val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD | | 1241 | val64 = TTI_CMD_MEM_WE | |
1242 | TTI_CMD_MEM_OFFSET(i); | 1242 | TTI_CMD_MEM_STROBE_NEW_CMD | |
1243 | TTI_CMD_MEM_OFFSET(i); | ||
1243 | writeq(val64, &bar0->tti_command_mem); | 1244 | writeq(val64, &bar0->tti_command_mem); |
1244 | 1245 | ||
1245 | if (wait_for_cmd_complete(&bar0->tti_command_mem, | 1246 | if (wait_for_cmd_complete(&bar0->tti_command_mem, |
1246 | TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS) | 1247 | TTI_CMD_MEM_STROBE_NEW_CMD, |
1248 | S2IO_BIT_RESET) != SUCCESS) | ||
1247 | return FAILURE; | 1249 | return FAILURE; |
1248 | } | 1250 | } |
1249 | 1251 | ||
@@ -1277,8 +1279,8 @@ static int init_nic(struct s2io_nic *nic) | |||
1277 | config = &nic->config; | 1279 | config = &nic->config; |
1278 | 1280 | ||
1279 | /* to set the swapper controle on the card */ | 1281 | /* to set the swapper controle on the card */ |
1280 | if(s2io_set_swapper(nic)) { | 1282 | if (s2io_set_swapper(nic)) { |
1281 | DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n"); | 1283 | DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n"); |
1282 | return -EIO; | 1284 | return -EIO; |
1283 | } | 1285 | } |
1284 | 1286 | ||
@@ -1317,7 +1319,7 @@ static int init_nic(struct s2io_nic *nic) | |||
1317 | val64 = readq(&bar0->mac_cfg); | 1319 | val64 = readq(&bar0->mac_cfg); |
1318 | val64 |= MAC_RMAC_BCAST_ENABLE; | 1320 | val64 |= MAC_RMAC_BCAST_ENABLE; |
1319 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); | 1321 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); |
1320 | writel((u32) val64, add); | 1322 | writel((u32)val64, add); |
1321 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); | 1323 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); |
1322 | writel((u32) (val64 >> 32), (add + 4)); | 1324 | writel((u32) (val64 >> 32), (add + 4)); |
1323 | 1325 | ||
@@ -1354,7 +1356,6 @@ static int init_nic(struct s2io_nic *nic) | |||
1354 | writeq(val64, &bar0->tx_fifo_partition_2); | 1356 | writeq(val64, &bar0->tx_fifo_partition_2); |
1355 | writeq(val64, &bar0->tx_fifo_partition_3); | 1357 | writeq(val64, &bar0->tx_fifo_partition_3); |
1356 | 1358 | ||
1357 | |||
1358 | for (i = 0, j = 0; i < config->tx_fifo_num; i++) { | 1359 | for (i = 0, j = 0; i < config->tx_fifo_num; i++) { |
1359 | struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; | 1360 | struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; |
1360 | 1361 | ||
@@ -1397,21 +1398,22 @@ static int init_nic(struct s2io_nic *nic) | |||
1397 | * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug | 1398 | * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug |
1398 | * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. | 1399 | * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. |
1399 | */ | 1400 | */ |
1400 | if ((nic->device_type == XFRAME_I_DEVICE) && | 1401 | if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4)) |
1401 | (nic->pdev->revision < 4)) | ||
1402 | writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable); | 1402 | writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable); |
1403 | 1403 | ||
1404 | val64 = readq(&bar0->tx_fifo_partition_0); | 1404 | val64 = readq(&bar0->tx_fifo_partition_0); |
1405 | DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n", | 1405 | DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n", |
1406 | &bar0->tx_fifo_partition_0, (unsigned long long) val64); | 1406 | &bar0->tx_fifo_partition_0, (unsigned long long)val64); |
1407 | 1407 | ||
1408 | /* | 1408 | /* |
1409 | * Initialization of Tx_PA_CONFIG register to ignore packet | 1409 | * Initialization of Tx_PA_CONFIG register to ignore packet |
1410 | * integrity checking. | 1410 | * integrity checking. |
1411 | */ | 1411 | */ |
1412 | val64 = readq(&bar0->tx_pa_cfg); | 1412 | val64 = readq(&bar0->tx_pa_cfg); |
1413 | val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI | | 1413 | val64 |= TX_PA_CFG_IGNORE_FRM_ERR | |
1414 | TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR; | 1414 | TX_PA_CFG_IGNORE_SNAP_OUI | |
1415 | TX_PA_CFG_IGNORE_LLC_CTRL | | ||
1416 | TX_PA_CFG_IGNORE_L2_ERR; | ||
1415 | writeq(val64, &bar0->tx_pa_cfg); | 1417 | writeq(val64, &bar0->tx_pa_cfg); |
1416 | 1418 | ||
1417 | /* Rx DMA intialization. */ | 1419 | /* Rx DMA intialization. */ |
@@ -1703,7 +1705,7 @@ static int init_nic(struct s2io_nic *nic) | |||
1703 | */ | 1705 | */ |
1704 | if (rts_frm_len[i] != 0) { | 1706 | if (rts_frm_len[i] != 0) { |
1705 | writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]), | 1707 | writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]), |
1706 | &bar0->rts_frm_len_n[i]); | 1708 | &bar0->rts_frm_len_n[i]); |
1707 | } | 1709 | } |
1708 | } | 1710 | } |
1709 | 1711 | ||
@@ -1711,7 +1713,7 @@ static int init_nic(struct s2io_nic *nic) | |||
1711 | for (i = 0; i < 64; i++) { | 1713 | for (i = 0; i < 64; i++) { |
1712 | if (rts_ds_steer(nic, i, 0) == FAILURE) { | 1714 | if (rts_ds_steer(nic, i, 0) == FAILURE) { |
1713 | DBG_PRINT(ERR_DBG, "%s: failed rts ds steering", | 1715 | DBG_PRINT(ERR_DBG, "%s: failed rts ds steering", |
1714 | dev->name); | 1716 | dev->name); |
1715 | DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i); | 1717 | DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i); |
1716 | return -ENODEV; | 1718 | return -ENODEV; |
1717 | } | 1719 | } |
@@ -1730,7 +1732,7 @@ static int init_nic(struct s2io_nic *nic) | |||
1730 | * bandwidth utilization. | 1732 | * bandwidth utilization. |
1731 | */ | 1733 | */ |
1732 | val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) | | 1734 | val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) | |
1733 | MAC_RX_LINK_UTIL_VAL(rmac_util_period); | 1735 | MAC_RX_LINK_UTIL_VAL(rmac_util_period); |
1734 | writeq(val64, &bar0->mac_link_util); | 1736 | writeq(val64, &bar0->mac_link_util); |
1735 | 1737 | ||
1736 | /* | 1738 | /* |
@@ -1753,24 +1755,26 @@ static int init_nic(struct s2io_nic *nic) | |||
1753 | } else | 1755 | } else |
1754 | val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF); | 1756 | val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF); |
1755 | val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) | | 1757 | val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) | |
1756 | RTI_DATA1_MEM_RX_URNG_B(0x10) | | 1758 | RTI_DATA1_MEM_RX_URNG_B(0x10) | |
1757 | RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN; | 1759 | RTI_DATA1_MEM_RX_URNG_C(0x30) | |
1760 | RTI_DATA1_MEM_RX_TIMER_AC_EN; | ||
1758 | 1761 | ||
1759 | writeq(val64, &bar0->rti_data1_mem); | 1762 | writeq(val64, &bar0->rti_data1_mem); |
1760 | 1763 | ||
1761 | val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | | 1764 | val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | |
1762 | RTI_DATA2_MEM_RX_UFC_B(0x2) ; | 1765 | RTI_DATA2_MEM_RX_UFC_B(0x2) ; |
1763 | if (nic->config.intr_type == MSI_X) | 1766 | if (nic->config.intr_type == MSI_X) |
1764 | val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \ | 1767 | val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | |
1765 | RTI_DATA2_MEM_RX_UFC_D(0x40)); | 1768 | RTI_DATA2_MEM_RX_UFC_D(0x40)); |
1766 | else | 1769 | else |
1767 | val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \ | 1770 | val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | |
1768 | RTI_DATA2_MEM_RX_UFC_D(0x80)); | 1771 | RTI_DATA2_MEM_RX_UFC_D(0x80)); |
1769 | writeq(val64, &bar0->rti_data2_mem); | 1772 | writeq(val64, &bar0->rti_data2_mem); |
1770 | 1773 | ||
1771 | for (i = 0; i < config->rx_ring_num; i++) { | 1774 | for (i = 0; i < config->rx_ring_num; i++) { |
1772 | val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD | 1775 | val64 = RTI_CMD_MEM_WE | |
1773 | | RTI_CMD_MEM_OFFSET(i); | 1776 | RTI_CMD_MEM_STROBE_NEW_CMD | |
1777 | RTI_CMD_MEM_OFFSET(i); | ||
1774 | writeq(val64, &bar0->rti_command_mem); | 1778 | writeq(val64, &bar0->rti_command_mem); |
1775 | 1779 | ||
1776 | /* | 1780 | /* |
@@ -1843,19 +1847,17 @@ static int init_nic(struct s2io_nic *nic) | |||
1843 | */ | 1847 | */ |
1844 | val64 = 0; | 1848 | val64 = 0; |
1845 | for (i = 0; i < 4; i++) { | 1849 | for (i = 0; i < 4; i++) { |
1846 | val64 |= | 1850 | val64 |= (((u64)0xFF00 | |
1847 | (((u64) 0xFF00 | nic->mac_control. | 1851 | nic->mac_control.mc_pause_threshold_q0q3) |
1848 | mc_pause_threshold_q0q3) | 1852 | << (i * 2 * 8)); |
1849 | << (i * 2 * 8)); | ||
1850 | } | 1853 | } |
1851 | writeq(val64, &bar0->mc_pause_thresh_q0q3); | 1854 | writeq(val64, &bar0->mc_pause_thresh_q0q3); |
1852 | 1855 | ||
1853 | val64 = 0; | 1856 | val64 = 0; |
1854 | for (i = 0; i < 4; i++) { | 1857 | for (i = 0; i < 4; i++) { |
1855 | val64 |= | 1858 | val64 |= (((u64)0xFF00 | |
1856 | (((u64) 0xFF00 | nic->mac_control. | 1859 | nic->mac_control.mc_pause_threshold_q4q7) |
1857 | mc_pause_threshold_q4q7) | 1860 | << (i * 2 * 8)); |
1858 | << (i * 2 * 8)); | ||
1859 | } | 1861 | } |
1860 | writeq(val64, &bar0->mc_pause_thresh_q4q7); | 1862 | writeq(val64, &bar0->mc_pause_thresh_q4q7); |
1861 | 1863 | ||
@@ -1918,10 +1920,10 @@ static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr) | |||
1918 | 1920 | ||
1919 | temp64 = readq(addr); | 1921 | temp64 = readq(addr); |
1920 | 1922 | ||
1921 | if(flag == ENABLE_INTRS) | 1923 | if (flag == ENABLE_INTRS) |
1922 | temp64 &= ~((u64) value); | 1924 | temp64 &= ~((u64)value); |
1923 | else | 1925 | else |
1924 | temp64 |= ((u64) value); | 1926 | temp64 |= ((u64)value); |
1925 | writeq(temp64, addr); | 1927 | writeq(temp64, addr); |
1926 | } | 1928 | } |
1927 | 1929 | ||
@@ -1933,124 +1935,125 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag) | |||
1933 | 1935 | ||
1934 | writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask); | 1936 | writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask); |
1935 | if (mask & TX_DMA_INTR) { | 1937 | if (mask & TX_DMA_INTR) { |
1936 | |||
1937 | gen_int_mask |= TXDMA_INT_M; | 1938 | gen_int_mask |= TXDMA_INT_M; |
1938 | 1939 | ||
1939 | do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT | | 1940 | do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT | |
1940 | TXDMA_PCC_INT | TXDMA_TTI_INT | | 1941 | TXDMA_PCC_INT | TXDMA_TTI_INT | |
1941 | TXDMA_LSO_INT | TXDMA_TPA_INT | | 1942 | TXDMA_LSO_INT | TXDMA_TPA_INT | |
1942 | TXDMA_SM_INT, flag, &bar0->txdma_int_mask); | 1943 | TXDMA_SM_INT, flag, &bar0->txdma_int_mask); |
1943 | 1944 | ||
1944 | do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM | | 1945 | do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM | |
1945 | PFC_MISC_0_ERR | PFC_MISC_1_ERR | | 1946 | PFC_MISC_0_ERR | PFC_MISC_1_ERR | |
1946 | PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag, | 1947 | PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag, |
1947 | &bar0->pfc_err_mask); | 1948 | &bar0->pfc_err_mask); |
1948 | 1949 | ||
1949 | do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM | | 1950 | do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM | |
1950 | TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR | | 1951 | TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR | |
1951 | TDA_PCIX_ERR, flag, &bar0->tda_err_mask); | 1952 | TDA_PCIX_ERR, flag, &bar0->tda_err_mask); |
1952 | 1953 | ||
1953 | do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR | | 1954 | do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR | |
1954 | PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM | | 1955 | PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM | |
1955 | PCC_N_SERR | PCC_6_COF_OV_ERR | | 1956 | PCC_N_SERR | PCC_6_COF_OV_ERR | |
1956 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR | | 1957 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR | |
1957 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR | | 1958 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR | |
1958 | PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask); | 1959 | PCC_TXB_ECC_SG_ERR, |
1960 | flag, &bar0->pcc_err_mask); | ||
1959 | 1961 | ||
1960 | do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR | | 1962 | do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR | |
1961 | TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask); | 1963 | TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask); |
1962 | 1964 | ||
1963 | do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT | | 1965 | do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT | |
1964 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM | | 1966 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM | |
1965 | LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, | 1967 | LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, |
1966 | flag, &bar0->lso_err_mask); | 1968 | flag, &bar0->lso_err_mask); |
1967 | 1969 | ||
1968 | do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP, | 1970 | do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP, |
1969 | flag, &bar0->tpa_err_mask); | 1971 | flag, &bar0->tpa_err_mask); |
1970 | 1972 | ||
1971 | do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask); | 1973 | do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask); |
1972 | |||
1973 | } | 1974 | } |
1974 | 1975 | ||
1975 | if (mask & TX_MAC_INTR) { | 1976 | if (mask & TX_MAC_INTR) { |
1976 | gen_int_mask |= TXMAC_INT_M; | 1977 | gen_int_mask |= TXMAC_INT_M; |
1977 | do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag, | 1978 | do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag, |
1978 | &bar0->mac_int_mask); | 1979 | &bar0->mac_int_mask); |
1979 | do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR | | 1980 | do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR | |
1980 | TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR | | 1981 | TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR | |
1981 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR, | 1982 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR, |
1982 | flag, &bar0->mac_tmac_err_mask); | 1983 | flag, &bar0->mac_tmac_err_mask); |
1983 | } | 1984 | } |
1984 | 1985 | ||
1985 | if (mask & TX_XGXS_INTR) { | 1986 | if (mask & TX_XGXS_INTR) { |
1986 | gen_int_mask |= TXXGXS_INT_M; | 1987 | gen_int_mask |= TXXGXS_INT_M; |
1987 | do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag, | 1988 | do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag, |
1988 | &bar0->xgxs_int_mask); | 1989 | &bar0->xgxs_int_mask); |
1989 | do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR | | 1990 | do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR | |
1990 | TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, | 1991 | TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, |
1991 | flag, &bar0->xgxs_txgxs_err_mask); | 1992 | flag, &bar0->xgxs_txgxs_err_mask); |
1992 | } | 1993 | } |
1993 | 1994 | ||
1994 | if (mask & RX_DMA_INTR) { | 1995 | if (mask & RX_DMA_INTR) { |
1995 | gen_int_mask |= RXDMA_INT_M; | 1996 | gen_int_mask |= RXDMA_INT_M; |
1996 | do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M | | 1997 | do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M | |
1997 | RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M, | 1998 | RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M, |
1998 | flag, &bar0->rxdma_int_mask); | 1999 | flag, &bar0->rxdma_int_mask); |
1999 | do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR | | 2000 | do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR | |
2000 | RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM | | 2001 | RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM | |
2001 | RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR | | 2002 | RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR | |
2002 | RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask); | 2003 | RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask); |
2003 | do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn | | 2004 | do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn | |
2004 | PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn | | 2005 | PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn | |
2005 | PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag, | 2006 | PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag, |
2006 | &bar0->prc_pcix_err_mask); | 2007 | &bar0->prc_pcix_err_mask); |
2007 | do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR | | 2008 | do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR | |
2008 | RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag, | 2009 | RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag, |
2009 | &bar0->rpa_err_mask); | 2010 | &bar0->rpa_err_mask); |
2010 | do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR | | 2011 | do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR | |
2011 | RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM | | 2012 | RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM | |
2012 | RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR | | 2013 | RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR | |
2013 | RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR, | 2014 | RDA_FRM_ECC_SG_ERR | |
2014 | flag, &bar0->rda_err_mask); | 2015 | RDA_MISC_ERR|RDA_PCIX_ERR, |
2016 | flag, &bar0->rda_err_mask); | ||
2015 | do_s2io_write_bits(RTI_SM_ERR_ALARM | | 2017 | do_s2io_write_bits(RTI_SM_ERR_ALARM | |
2016 | RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, | 2018 | RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, |
2017 | flag, &bar0->rti_err_mask); | 2019 | flag, &bar0->rti_err_mask); |
2018 | } | 2020 | } |
2019 | 2021 | ||
2020 | if (mask & RX_MAC_INTR) { | 2022 | if (mask & RX_MAC_INTR) { |
2021 | gen_int_mask |= RXMAC_INT_M; | 2023 | gen_int_mask |= RXMAC_INT_M; |
2022 | do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag, | 2024 | do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag, |
2023 | &bar0->mac_int_mask); | 2025 | &bar0->mac_int_mask); |
2024 | interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR | | 2026 | interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR | |
2025 | RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR | | 2027 | RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR | |
2026 | RMAC_DOUBLE_ECC_ERR; | 2028 | RMAC_DOUBLE_ECC_ERR); |
2027 | if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) | 2029 | if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) |
2028 | interruptible |= RMAC_LINK_STATE_CHANGE_INT; | 2030 | interruptible |= RMAC_LINK_STATE_CHANGE_INT; |
2029 | do_s2io_write_bits(interruptible, | 2031 | do_s2io_write_bits(interruptible, |
2030 | flag, &bar0->mac_rmac_err_mask); | 2032 | flag, &bar0->mac_rmac_err_mask); |
2031 | } | 2033 | } |
2032 | 2034 | ||
2033 | if (mask & RX_XGXS_INTR) | 2035 | if (mask & RX_XGXS_INTR) { |
2034 | { | ||
2035 | gen_int_mask |= RXXGXS_INT_M; | 2036 | gen_int_mask |= RXXGXS_INT_M; |
2036 | do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag, | 2037 | do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag, |
2037 | &bar0->xgxs_int_mask); | 2038 | &bar0->xgxs_int_mask); |
2038 | do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag, | 2039 | do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag, |
2039 | &bar0->xgxs_rxgxs_err_mask); | 2040 | &bar0->xgxs_rxgxs_err_mask); |
2040 | } | 2041 | } |
2041 | 2042 | ||
2042 | if (mask & MC_INTR) { | 2043 | if (mask & MC_INTR) { |
2043 | gen_int_mask |= MC_INT_M; | 2044 | gen_int_mask |= MC_INT_M; |
2044 | do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask); | 2045 | do_s2io_write_bits(MC_INT_MASK_MC_INT, |
2046 | flag, &bar0->mc_int_mask); | ||
2045 | do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG | | 2047 | do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG | |
2046 | MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag, | 2048 | MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag, |
2047 | &bar0->mc_err_mask); | 2049 | &bar0->mc_err_mask); |
2048 | } | 2050 | } |
2049 | nic->general_int_mask = gen_int_mask; | 2051 | nic->general_int_mask = gen_int_mask; |
2050 | 2052 | ||
2051 | /* Remove this line when alarm interrupts are enabled */ | 2053 | /* Remove this line when alarm interrupts are enabled */ |
2052 | nic->general_int_mask = 0; | 2054 | nic->general_int_mask = 0; |
2053 | } | 2055 | } |
2056 | |||
2054 | /** | 2057 | /** |
2055 | * en_dis_able_nic_intrs - Enable or Disable the interrupts | 2058 | * en_dis_able_nic_intrs - Enable or Disable the interrupts |
2056 | * @nic: device private variable, | 2059 | * @nic: device private variable, |
@@ -2082,11 +2085,11 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) | |||
2082 | * TODO | 2085 | * TODO |
2083 | */ | 2086 | */ |
2084 | if (s2io_link_fault_indication(nic) == | 2087 | if (s2io_link_fault_indication(nic) == |
2085 | LINK_UP_DOWN_INTERRUPT ) { | 2088 | LINK_UP_DOWN_INTERRUPT) { |
2086 | do_s2io_write_bits(PIC_INT_GPIO, flag, | 2089 | do_s2io_write_bits(PIC_INT_GPIO, flag, |
2087 | &bar0->pic_int_mask); | 2090 | &bar0->pic_int_mask); |
2088 | do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag, | 2091 | do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag, |
2089 | &bar0->gpio_int_mask); | 2092 | &bar0->gpio_int_mask); |
2090 | } else | 2093 | } else |
2091 | writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); | 2094 | writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); |
2092 | } else if (flag == DISABLE_INTRS) { | 2095 | } else if (flag == DISABLE_INTRS) { |
@@ -2133,7 +2136,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) | |||
2133 | 2136 | ||
2134 | temp64 = readq(&bar0->general_int_mask); | 2137 | temp64 = readq(&bar0->general_int_mask); |
2135 | if (flag == ENABLE_INTRS) | 2138 | if (flag == ENABLE_INTRS) |
2136 | temp64 &= ~((u64) intr_mask); | 2139 | temp64 &= ~((u64)intr_mask); |
2137 | else | 2140 | else |
2138 | temp64 = DISABLE_ALL_INTRS; | 2141 | temp64 = DISABLE_ALL_INTRS; |
2139 | writeq(temp64, &bar0->general_int_mask); | 2142 | writeq(temp64, &bar0->general_int_mask); |
@@ -2198,7 +2201,7 @@ static int verify_xena_quiescence(struct s2io_nic *sp) | |||
2198 | return 0; | 2201 | return 0; |
2199 | } | 2202 | } |
2200 | if (!(val64 & ADAPTER_STATUS_RDMA_READY)) { | 2203 | if (!(val64 & ADAPTER_STATUS_RDMA_READY)) { |
2201 | DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!"); | 2204 | DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!"); |
2202 | return 0; | 2205 | return 0; |
2203 | } | 2206 | } |
2204 | if (!(val64 & ADAPTER_STATUS_PFC_READY)) { | 2207 | if (!(val64 & ADAPTER_STATUS_PFC_READY)) { |
@@ -2232,13 +2235,13 @@ static int verify_xena_quiescence(struct s2io_nic *sp) | |||
2232 | * not be asserted. | 2235 | * not be asserted. |
2233 | */ | 2236 | */ |
2234 | if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) && | 2237 | if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) && |
2235 | sp->device_type == XFRAME_II_DEVICE && mode != | 2238 | sp->device_type == XFRAME_II_DEVICE && |
2236 | PCI_MODE_PCI_33) { | 2239 | mode != PCI_MODE_PCI_33) { |
2237 | DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!"); | 2240 | DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!"); |
2238 | return 0; | 2241 | return 0; |
2239 | } | 2242 | } |
2240 | if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) == | 2243 | if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) == |
2241 | ADAPTER_STATUS_RC_PRC_QUIESCENT)) { | 2244 | ADAPTER_STATUS_RC_PRC_QUIESCENT)) { |
2242 | DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!"); | 2245 | DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!"); |
2243 | return 0; | 2246 | return 0; |
2244 | } | 2247 | } |
@@ -2253,7 +2256,7 @@ static int verify_xena_quiescence(struct s2io_nic *sp) | |||
2253 | * | 2256 | * |
2254 | */ | 2257 | */ |
2255 | 2258 | ||
2256 | static void fix_mac_address(struct s2io_nic * sp) | 2259 | static void fix_mac_address(struct s2io_nic *sp) |
2257 | { | 2260 | { |
2258 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 2261 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
2259 | u64 val64; | 2262 | u64 val64; |
@@ -2295,7 +2298,7 @@ static int start_nic(struct s2io_nic *nic) | |||
2295 | for (i = 0; i < config->rx_ring_num; i++) { | 2298 | for (i = 0; i < config->rx_ring_num; i++) { |
2296 | struct ring_info *ring = &mac_control->rings[i]; | 2299 | struct ring_info *ring = &mac_control->rings[i]; |
2297 | 2300 | ||
2298 | writeq((u64) ring->rx_blocks[0].block_dma_addr, | 2301 | writeq((u64)ring->rx_blocks[0].block_dma_addr, |
2299 | &bar0->prc_rxd0_n[i]); | 2302 | &bar0->prc_rxd0_n[i]); |
2300 | 2303 | ||
2301 | val64 = readq(&bar0->prc_ctrl_n[i]); | 2304 | val64 = readq(&bar0->prc_ctrl_n[i]); |
@@ -2349,7 +2352,7 @@ static int start_nic(struct s2io_nic *nic) | |||
2349 | if (!verify_xena_quiescence(nic)) { | 2352 | if (!verify_xena_quiescence(nic)) { |
2350 | DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name); | 2353 | DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name); |
2351 | DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n", | 2354 | DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n", |
2352 | (unsigned long long) val64); | 2355 | (unsigned long long)val64); |
2353 | return FAILURE; | 2356 | return FAILURE; |
2354 | } | 2357 | } |
2355 | 2358 | ||
@@ -2389,8 +2392,8 @@ static int start_nic(struct s2io_nic *nic) | |||
2389 | /** | 2392 | /** |
2390 | * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb | 2393 | * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb |
2391 | */ | 2394 | */ |
2392 | static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \ | 2395 | static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, |
2393 | TxD *txdlp, int get_off) | 2396 | struct TxD *txdlp, int get_off) |
2394 | { | 2397 | { |
2395 | struct s2io_nic *nic = fifo_data->nic; | 2398 | struct s2io_nic *nic = fifo_data->nic; |
2396 | struct sk_buff *skb; | 2399 | struct sk_buff *skb; |
@@ -2399,22 +2402,18 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \ | |||
2399 | 2402 | ||
2400 | txds = txdlp; | 2403 | txds = txdlp; |
2401 | if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) { | 2404 | if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) { |
2402 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2405 | pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, |
2403 | txds->Buffer_Pointer, sizeof(u64), | 2406 | sizeof(u64), PCI_DMA_TODEVICE); |
2404 | PCI_DMA_TODEVICE); | ||
2405 | txds++; | 2407 | txds++; |
2406 | } | 2408 | } |
2407 | 2409 | ||
2408 | skb = (struct sk_buff *) ((unsigned long) | 2410 | skb = (struct sk_buff *)((unsigned long)txds->Host_Control); |
2409 | txds->Host_Control); | ||
2410 | if (!skb) { | 2411 | if (!skb) { |
2411 | memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); | 2412 | memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); |
2412 | return NULL; | 2413 | return NULL; |
2413 | } | 2414 | } |
2414 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2415 | pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, |
2415 | txds->Buffer_Pointer, | 2416 | skb->len - skb->data_len, PCI_DMA_TODEVICE); |
2416 | skb->len - skb->data_len, | ||
2417 | PCI_DMA_TODEVICE); | ||
2418 | frg_cnt = skb_shinfo(skb)->nr_frags; | 2417 | frg_cnt = skb_shinfo(skb)->nr_frags; |
2419 | if (frg_cnt) { | 2418 | if (frg_cnt) { |
2420 | txds++; | 2419 | txds++; |
@@ -2422,13 +2421,13 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \ | |||
2422 | skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; | 2421 | skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; |
2423 | if (!txds->Buffer_Pointer) | 2422 | if (!txds->Buffer_Pointer) |
2424 | break; | 2423 | break; |
2425 | pci_unmap_page(nic->pdev, (dma_addr_t) | 2424 | pci_unmap_page(nic->pdev, |
2426 | txds->Buffer_Pointer, | 2425 | (dma_addr_t)txds->Buffer_Pointer, |
2427 | frag->size, PCI_DMA_TODEVICE); | 2426 | frag->size, PCI_DMA_TODEVICE); |
2428 | } | 2427 | } |
2429 | } | 2428 | } |
2430 | memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds)); | 2429 | memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); |
2431 | return(skb); | 2430 | return skb; |
2432 | } | 2431 | } |
2433 | 2432 | ||
2434 | /** | 2433 | /** |
@@ -2437,7 +2436,7 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \ | |||
2437 | * Description: | 2436 | * Description: |
2438 | * Free all queued Tx buffers. | 2437 | * Free all queued Tx buffers. |
2439 | * Return Value: void | 2438 | * Return Value: void |
2440 | */ | 2439 | */ |
2441 | 2440 | ||
2442 | static void free_tx_buffers(struct s2io_nic *nic) | 2441 | static void free_tx_buffers(struct s2io_nic *nic) |
2443 | { | 2442 | { |
@@ -2533,7 +2532,7 @@ static void stop_nic(struct s2io_nic *nic) | |||
2533 | * SUCCESS on success or an appropriate -ve value on failure. | 2532 | * SUCCESS on success or an appropriate -ve value on failure. |
2534 | */ | 2533 | */ |
2535 | static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | 2534 | static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, |
2536 | int from_card_up) | 2535 | int from_card_up) |
2537 | { | 2536 | { |
2538 | struct sk_buff *skb; | 2537 | struct sk_buff *skb; |
2539 | struct RxD_t *rxdp; | 2538 | struct RxD_t *rxdp; |
@@ -2564,17 +2563,16 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |||
2564 | rxd_index += (block_no * ring->rxd_count); | 2563 | rxd_index += (block_no * ring->rxd_count); |
2565 | 2564 | ||
2566 | if ((block_no == block_no1) && | 2565 | if ((block_no == block_no1) && |
2567 | (off == ring->rx_curr_get_info.offset) && | 2566 | (off == ring->rx_curr_get_info.offset) && |
2568 | (rxdp->Host_Control)) { | 2567 | (rxdp->Host_Control)) { |
2569 | DBG_PRINT(INTR_DBG, "%s: Get and Put", | 2568 | DBG_PRINT(INTR_DBG, "%s: Get and Put", ring->dev->name); |
2570 | ring->dev->name); | ||
2571 | DBG_PRINT(INTR_DBG, " info equated\n"); | 2569 | DBG_PRINT(INTR_DBG, " info equated\n"); |
2572 | goto end; | 2570 | goto end; |
2573 | } | 2571 | } |
2574 | if (off && (off == ring->rxd_count)) { | 2572 | if (off && (off == ring->rxd_count)) { |
2575 | ring->rx_curr_put_info.block_index++; | 2573 | ring->rx_curr_put_info.block_index++; |
2576 | if (ring->rx_curr_put_info.block_index == | 2574 | if (ring->rx_curr_put_info.block_index == |
2577 | ring->block_count) | 2575 | ring->block_count) |
2578 | ring->rx_curr_put_info.block_index = 0; | 2576 | ring->rx_curr_put_info.block_index = 0; |
2579 | block_no = ring->rx_curr_put_info.block_index; | 2577 | block_no = ring->rx_curr_put_info.block_index; |
2580 | off = 0; | 2578 | off = 0; |
@@ -2586,14 +2584,15 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |||
2586 | } | 2584 | } |
2587 | 2585 | ||
2588 | if ((rxdp->Control_1 & RXD_OWN_XENA) && | 2586 | if ((rxdp->Control_1 & RXD_OWN_XENA) && |
2589 | ((ring->rxd_mode == RXD_MODE_3B) && | 2587 | ((ring->rxd_mode == RXD_MODE_3B) && |
2590 | (rxdp->Control_2 & s2BIT(0)))) { | 2588 | (rxdp->Control_2 & s2BIT(0)))) { |
2591 | ring->rx_curr_put_info.offset = off; | 2589 | ring->rx_curr_put_info.offset = off; |
2592 | goto end; | 2590 | goto end; |
2593 | } | 2591 | } |
2594 | /* calculate size of skb based on ring mode */ | 2592 | /* calculate size of skb based on ring mode */ |
2595 | size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE + | 2593 | size = ring->mtu + |
2596 | HEADER_802_2_SIZE + HEADER_SNAP_SIZE; | 2594 | HEADER_ETHERNET_II_802_3_SIZE + |
2595 | HEADER_802_2_SIZE + HEADER_SNAP_SIZE; | ||
2597 | if (ring->rxd_mode == RXD_MODE_1) | 2596 | if (ring->rxd_mode == RXD_MODE_1) |
2598 | size += NET_IP_ALIGN; | 2597 | size += NET_IP_ALIGN; |
2599 | else | 2598 | else |
@@ -2601,7 +2600,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |||
2601 | 2600 | ||
2602 | /* allocate skb */ | 2601 | /* allocate skb */ |
2603 | skb = dev_alloc_skb(size); | 2602 | skb = dev_alloc_skb(size); |
2604 | if(!skb) { | 2603 | if (!skb) { |
2605 | DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name); | 2604 | DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name); |
2606 | DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n"); | 2605 | DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n"); |
2607 | if (first_rxdp) { | 2606 | if (first_rxdp) { |
@@ -2616,19 +2615,20 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |||
2616 | 2615 | ||
2617 | if (ring->rxd_mode == RXD_MODE_1) { | 2616 | if (ring->rxd_mode == RXD_MODE_1) { |
2618 | /* 1 buffer mode - normal operation mode */ | 2617 | /* 1 buffer mode - normal operation mode */ |
2619 | rxdp1 = (struct RxD1*)rxdp; | 2618 | rxdp1 = (struct RxD1 *)rxdp; |
2620 | memset(rxdp, 0, sizeof(struct RxD1)); | 2619 | memset(rxdp, 0, sizeof(struct RxD1)); |
2621 | skb_reserve(skb, NET_IP_ALIGN); | 2620 | skb_reserve(skb, NET_IP_ALIGN); |
2622 | rxdp1->Buffer0_ptr = pci_map_single | 2621 | rxdp1->Buffer0_ptr = |
2623 | (ring->pdev, skb->data, size - NET_IP_ALIGN, | 2622 | pci_map_single(ring->pdev, skb->data, |
2624 | PCI_DMA_FROMDEVICE); | 2623 | size - NET_IP_ALIGN, |
2624 | PCI_DMA_FROMDEVICE); | ||
2625 | if (pci_dma_mapping_error(nic->pdev, | 2625 | if (pci_dma_mapping_error(nic->pdev, |
2626 | rxdp1->Buffer0_ptr)) | 2626 | rxdp1->Buffer0_ptr)) |
2627 | goto pci_map_failed; | 2627 | goto pci_map_failed; |
2628 | 2628 | ||
2629 | rxdp->Control_2 = | 2629 | rxdp->Control_2 = |
2630 | SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); | 2630 | SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); |
2631 | rxdp->Host_Control = (unsigned long) (skb); | 2631 | rxdp->Host_Control = (unsigned long)skb; |
2632 | } else if (ring->rxd_mode == RXD_MODE_3B) { | 2632 | } else if (ring->rxd_mode == RXD_MODE_3B) { |
2633 | /* | 2633 | /* |
2634 | * 2 buffer mode - | 2634 | * 2 buffer mode - |
@@ -2636,7 +2636,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |||
2636 | * byte aligned receive buffers. | 2636 | * byte aligned receive buffers. |
2637 | */ | 2637 | */ |
2638 | 2638 | ||
2639 | rxdp3 = (struct RxD3*)rxdp; | 2639 | rxdp3 = (struct RxD3 *)rxdp; |
2640 | /* save buffer pointers to avoid frequent dma mapping */ | 2640 | /* save buffer pointers to avoid frequent dma mapping */ |
2641 | Buffer0_ptr = rxdp3->Buffer0_ptr; | 2641 | Buffer0_ptr = rxdp3->Buffer0_ptr; |
2642 | Buffer1_ptr = rxdp3->Buffer1_ptr; | 2642 | Buffer1_ptr = rxdp3->Buffer1_ptr; |
@@ -2647,7 +2647,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |||
2647 | 2647 | ||
2648 | ba = &ring->ba[block_no][off]; | 2648 | ba = &ring->ba[block_no][off]; |
2649 | skb_reserve(skb, BUF0_LEN); | 2649 | skb_reserve(skb, BUF0_LEN); |
2650 | tmp = (u64)(unsigned long) skb->data; | 2650 | tmp = (u64)(unsigned long)skb->data; |
2651 | tmp += ALIGN_SIZE; | 2651 | tmp += ALIGN_SIZE; |
2652 | tmp &= ~ALIGN_SIZE; | 2652 | tmp &= ~ALIGN_SIZE; |
2653 | skb->data = (void *) (unsigned long)tmp; | 2653 | skb->data = (void *) (unsigned long)tmp; |
@@ -2655,15 +2655,17 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |||
2655 | 2655 | ||
2656 | if (from_card_up) { | 2656 | if (from_card_up) { |
2657 | rxdp3->Buffer0_ptr = | 2657 | rxdp3->Buffer0_ptr = |
2658 | pci_map_single(ring->pdev, ba->ba_0, | 2658 | pci_map_single(ring->pdev, ba->ba_0, |
2659 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2659 | BUF0_LEN, |
2660 | if (pci_dma_mapping_error(nic->pdev, | 2660 | PCI_DMA_FROMDEVICE); |
2661 | rxdp3->Buffer0_ptr)) | 2661 | if (pci_dma_mapping_error(nic->pdev, |
2662 | rxdp3->Buffer0_ptr)) | ||
2662 | goto pci_map_failed; | 2663 | goto pci_map_failed; |
2663 | } else | 2664 | } else |
2664 | pci_dma_sync_single_for_device(ring->pdev, | 2665 | pci_dma_sync_single_for_device(ring->pdev, |
2665 | (dma_addr_t) rxdp3->Buffer0_ptr, | 2666 | (dma_addr_t)rxdp3->Buffer0_ptr, |
2666 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2667 | BUF0_LEN, |
2668 | PCI_DMA_FROMDEVICE); | ||
2667 | 2669 | ||
2668 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | 2670 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); |
2669 | if (ring->rxd_mode == RXD_MODE_3B) { | 2671 | if (ring->rxd_mode == RXD_MODE_3B) { |
@@ -2673,34 +2675,35 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |||
2673 | * Buffer2 will have L3/L4 header plus | 2675 | * Buffer2 will have L3/L4 header plus |
2674 | * L4 payload | 2676 | * L4 payload |
2675 | */ | 2677 | */ |
2676 | rxdp3->Buffer2_ptr = pci_map_single | 2678 | rxdp3->Buffer2_ptr = pci_map_single(ring->pdev, |
2677 | (ring->pdev, skb->data, ring->mtu + 4, | 2679 | skb->data, |
2678 | PCI_DMA_FROMDEVICE); | 2680 | ring->mtu + 4, |
2681 | PCI_DMA_FROMDEVICE); | ||
2679 | 2682 | ||
2680 | if (pci_dma_mapping_error(nic->pdev, | 2683 | if (pci_dma_mapping_error(nic->pdev, |
2681 | rxdp3->Buffer2_ptr)) | 2684 | rxdp3->Buffer2_ptr)) |
2682 | goto pci_map_failed; | 2685 | goto pci_map_failed; |
2683 | 2686 | ||
2684 | if (from_card_up) { | 2687 | if (from_card_up) { |
2685 | rxdp3->Buffer1_ptr = | 2688 | rxdp3->Buffer1_ptr = |
2686 | pci_map_single(ring->pdev, | 2689 | pci_map_single(ring->pdev, |
2687 | ba->ba_1, BUF1_LEN, | 2690 | ba->ba_1, |
2688 | PCI_DMA_FROMDEVICE); | 2691 | BUF1_LEN, |
2692 | PCI_DMA_FROMDEVICE); | ||
2689 | 2693 | ||
2690 | if (pci_dma_mapping_error(nic->pdev, | 2694 | if (pci_dma_mapping_error(nic->pdev, |
2691 | rxdp3->Buffer1_ptr)) { | 2695 | rxdp3->Buffer1_ptr)) { |
2692 | pci_unmap_single | 2696 | pci_unmap_single(ring->pdev, |
2693 | (ring->pdev, | 2697 | (dma_addr_t)(unsigned long) |
2694 | (dma_addr_t)(unsigned long) | 2698 | skb->data, |
2695 | skb->data, | 2699 | ring->mtu + 4, |
2696 | ring->mtu + 4, | 2700 | PCI_DMA_FROMDEVICE); |
2697 | PCI_DMA_FROMDEVICE); | ||
2698 | goto pci_map_failed; | 2701 | goto pci_map_failed; |
2699 | } | 2702 | } |
2700 | } | 2703 | } |
2701 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); | 2704 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); |
2702 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 | 2705 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 |
2703 | (ring->mtu + 4); | 2706 | (ring->mtu + 4); |
2704 | } | 2707 | } |
2705 | rxdp->Control_2 |= s2BIT(0); | 2708 | rxdp->Control_2 |= s2BIT(0); |
2706 | rxdp->Host_Control = (unsigned long) (skb); | 2709 | rxdp->Host_Control = (unsigned long) (skb); |
@@ -2724,7 +2727,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |||
2724 | alloc_tab++; | 2727 | alloc_tab++; |
2725 | } | 2728 | } |
2726 | 2729 | ||
2727 | end: | 2730 | end: |
2728 | /* Transfer ownership of first descriptor to adapter just before | 2731 | /* Transfer ownership of first descriptor to adapter just before |
2729 | * exiting. Before that, use memory barrier so that ownership | 2732 | * exiting. Before that, use memory barrier so that ownership |
2730 | * and other fields are seen by adapter correctly. | 2733 | * and other fields are seen by adapter correctly. |
@@ -2735,6 +2738,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |||
2735 | } | 2738 | } |
2736 | 2739 | ||
2737 | return SUCCESS; | 2740 | return SUCCESS; |
2741 | |||
2738 | pci_map_failed: | 2742 | pci_map_failed: |
2739 | stats->pci_map_fail_cnt++; | 2743 | stats->pci_map_fail_cnt++; |
2740 | stats->mem_freed += skb->truesize; | 2744 | stats->mem_freed += skb->truesize; |
@@ -2756,38 +2760,34 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) | |||
2756 | mac_control = &sp->mac_control; | 2760 | mac_control = &sp->mac_control; |
2757 | for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { | 2761 | for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { |
2758 | rxdp = mac_control->rings[ring_no]. | 2762 | rxdp = mac_control->rings[ring_no]. |
2759 | rx_blocks[blk].rxds[j].virt_addr; | 2763 | rx_blocks[blk].rxds[j].virt_addr; |
2760 | skb = (struct sk_buff *) | 2764 | skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control); |
2761 | ((unsigned long) rxdp->Host_Control); | 2765 | if (!skb) |
2762 | if (!skb) { | ||
2763 | continue; | 2766 | continue; |
2764 | } | ||
2765 | if (sp->rxd_mode == RXD_MODE_1) { | 2767 | if (sp->rxd_mode == RXD_MODE_1) { |
2766 | rxdp1 = (struct RxD1*)rxdp; | 2768 | rxdp1 = (struct RxD1 *)rxdp; |
2767 | pci_unmap_single(sp->pdev, (dma_addr_t) | 2769 | pci_unmap_single(sp->pdev, |
2768 | rxdp1->Buffer0_ptr, | 2770 | (dma_addr_t)rxdp1->Buffer0_ptr, |
2769 | dev->mtu + | 2771 | dev->mtu + |
2770 | HEADER_ETHERNET_II_802_3_SIZE | 2772 | HEADER_ETHERNET_II_802_3_SIZE + |
2771 | + HEADER_802_2_SIZE + | 2773 | HEADER_802_2_SIZE + HEADER_SNAP_SIZE, |
2772 | HEADER_SNAP_SIZE, | 2774 | PCI_DMA_FROMDEVICE); |
2773 | PCI_DMA_FROMDEVICE); | ||
2774 | memset(rxdp, 0, sizeof(struct RxD1)); | 2775 | memset(rxdp, 0, sizeof(struct RxD1)); |
2775 | } else if(sp->rxd_mode == RXD_MODE_3B) { | 2776 | } else if (sp->rxd_mode == RXD_MODE_3B) { |
2776 | rxdp3 = (struct RxD3*)rxdp; | 2777 | rxdp3 = (struct RxD3 *)rxdp; |
2777 | ba = &mac_control->rings[ring_no]. | 2778 | ba = &mac_control->rings[ring_no].ba[blk][j]; |
2778 | ba[blk][j]; | 2779 | pci_unmap_single(sp->pdev, |
2779 | pci_unmap_single(sp->pdev, (dma_addr_t) | 2780 | (dma_addr_t)rxdp3->Buffer0_ptr, |
2780 | rxdp3->Buffer0_ptr, | 2781 | BUF0_LEN, |
2781 | BUF0_LEN, | 2782 | PCI_DMA_FROMDEVICE); |
2782 | PCI_DMA_FROMDEVICE); | 2783 | pci_unmap_single(sp->pdev, |
2783 | pci_unmap_single(sp->pdev, (dma_addr_t) | 2784 | (dma_addr_t)rxdp3->Buffer1_ptr, |
2784 | rxdp3->Buffer1_ptr, | 2785 | BUF1_LEN, |
2785 | BUF1_LEN, | 2786 | PCI_DMA_FROMDEVICE); |
2786 | PCI_DMA_FROMDEVICE); | 2787 | pci_unmap_single(sp->pdev, |
2787 | pci_unmap_single(sp->pdev, (dma_addr_t) | 2788 | (dma_addr_t)rxdp3->Buffer2_ptr, |
2788 | rxdp3->Buffer2_ptr, | 2789 | dev->mtu + 4, |
2789 | dev->mtu + 4, | 2790 | PCI_DMA_FROMDEVICE); |
2790 | PCI_DMA_FROMDEVICE); | ||
2791 | memset(rxdp, 0, sizeof(struct RxD3)); | 2791 | memset(rxdp, 0, sizeof(struct RxD3)); |
2792 | } | 2792 | } |
2793 | sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; | 2793 | sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; |
@@ -2819,7 +2819,7 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2819 | struct ring_info *ring = &mac_control->rings[i]; | 2819 | struct ring_info *ring = &mac_control->rings[i]; |
2820 | 2820 | ||
2821 | for (blk = 0; blk < rx_ring_sz[i]; blk++) | 2821 | for (blk = 0; blk < rx_ring_sz[i]; blk++) |
2822 | free_rxd_blk(sp,i,blk); | 2822 | free_rxd_blk(sp, i, blk); |
2823 | 2823 | ||
2824 | ring->rx_curr_put_info.block_index = 0; | 2824 | ring->rx_curr_put_info.block_index = 0; |
2825 | ring->rx_curr_get_info.block_index = 0; | 2825 | ring->rx_curr_get_info.block_index = 0; |
@@ -2886,6 +2886,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) | |||
2886 | } | 2886 | } |
2887 | return pkts_processed; | 2887 | return pkts_processed; |
2888 | } | 2888 | } |
2889 | |||
2889 | static int s2io_poll_inta(struct napi_struct *napi, int budget) | 2890 | static int s2io_poll_inta(struct napi_struct *napi, int budget) |
2890 | { | 2891 | { |
2891 | struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); | 2892 | struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); |
@@ -2999,8 +3000,8 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget) | |||
2999 | struct sk_buff *skb; | 3000 | struct sk_buff *skb; |
3000 | int pkt_cnt = 0, napi_pkts = 0; | 3001 | int pkt_cnt = 0, napi_pkts = 0; |
3001 | int i; | 3002 | int i; |
3002 | struct RxD1* rxdp1; | 3003 | struct RxD1 *rxdp1; |
3003 | struct RxD3* rxdp3; | 3004 | struct RxD3 *rxdp3; |
3004 | 3005 | ||
3005 | get_info = ring_data->rx_curr_get_info; | 3006 | get_info = ring_data->rx_curr_get_info; |
3006 | get_block = get_info.block_index; | 3007 | get_block = get_info.block_index; |
@@ -3016,10 +3017,10 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget) | |||
3016 | if ((get_block == put_block) && | 3017 | if ((get_block == put_block) && |
3017 | (get_info.offset + 1) == put_info.offset) { | 3018 | (get_info.offset + 1) == put_info.offset) { |
3018 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n", | 3019 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n", |
3019 | ring_data->dev->name); | 3020 | ring_data->dev->name); |
3020 | break; | 3021 | break; |
3021 | } | 3022 | } |
3022 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); | 3023 | skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control); |
3023 | if (skb == NULL) { | 3024 | if (skb == NULL) { |
3024 | DBG_PRINT(ERR_DBG, "%s: The skb is ", | 3025 | DBG_PRINT(ERR_DBG, "%s: The skb is ", |
3025 | ring_data->dev->name); | 3026 | ring_data->dev->name); |
@@ -3027,30 +3028,31 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget) | |||
3027 | return 0; | 3028 | return 0; |
3028 | } | 3029 | } |
3029 | if (ring_data->rxd_mode == RXD_MODE_1) { | 3030 | if (ring_data->rxd_mode == RXD_MODE_1) { |
3030 | rxdp1 = (struct RxD1*)rxdp; | 3031 | rxdp1 = (struct RxD1 *)rxdp; |
3031 | pci_unmap_single(ring_data->pdev, (dma_addr_t) | 3032 | pci_unmap_single(ring_data->pdev, (dma_addr_t) |
3032 | rxdp1->Buffer0_ptr, | 3033 | rxdp1->Buffer0_ptr, |
3033 | ring_data->mtu + | 3034 | ring_data->mtu + |
3034 | HEADER_ETHERNET_II_802_3_SIZE + | 3035 | HEADER_ETHERNET_II_802_3_SIZE + |
3035 | HEADER_802_2_SIZE + | 3036 | HEADER_802_2_SIZE + |
3036 | HEADER_SNAP_SIZE, | 3037 | HEADER_SNAP_SIZE, |
3037 | PCI_DMA_FROMDEVICE); | 3038 | PCI_DMA_FROMDEVICE); |
3038 | } else if (ring_data->rxd_mode == RXD_MODE_3B) { | 3039 | } else if (ring_data->rxd_mode == RXD_MODE_3B) { |
3039 | rxdp3 = (struct RxD3*)rxdp; | 3040 | rxdp3 = (struct RxD3 *)rxdp; |
3040 | pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t) | 3041 | pci_dma_sync_single_for_cpu(ring_data->pdev, |
3041 | rxdp3->Buffer0_ptr, | 3042 | (dma_addr_t)rxdp3->Buffer0_ptr, |
3042 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 3043 | BUF0_LEN, |
3043 | pci_unmap_single(ring_data->pdev, (dma_addr_t) | 3044 | PCI_DMA_FROMDEVICE); |
3044 | rxdp3->Buffer2_ptr, | 3045 | pci_unmap_single(ring_data->pdev, |
3045 | ring_data->mtu + 4, | 3046 | (dma_addr_t)rxdp3->Buffer2_ptr, |
3046 | PCI_DMA_FROMDEVICE); | 3047 | ring_data->mtu + 4, |
3048 | PCI_DMA_FROMDEVICE); | ||
3047 | } | 3049 | } |
3048 | prefetch(skb->data); | 3050 | prefetch(skb->data); |
3049 | rx_osm_handler(ring_data, rxdp); | 3051 | rx_osm_handler(ring_data, rxdp); |
3050 | get_info.offset++; | 3052 | get_info.offset++; |
3051 | ring_data->rx_curr_get_info.offset = get_info.offset; | 3053 | ring_data->rx_curr_get_info.offset = get_info.offset; |
3052 | rxdp = ring_data->rx_blocks[get_block]. | 3054 | rxdp = ring_data->rx_blocks[get_block]. |
3053 | rxds[get_info.offset].virt_addr; | 3055 | rxds[get_info.offset].virt_addr; |
3054 | if (get_info.offset == rxd_count[ring_data->rxd_mode]) { | 3056 | if (get_info.offset == rxd_count[ring_data->rxd_mode]) { |
3055 | get_info.offset = 0; | 3057 | get_info.offset = 0; |
3056 | ring_data->rx_curr_get_info.offset = get_info.offset; | 3058 | ring_data->rx_curr_get_info.offset = get_info.offset; |
@@ -3073,7 +3075,7 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget) | |||
3073 | } | 3075 | } |
3074 | if (ring_data->lro) { | 3076 | if (ring_data->lro) { |
3075 | /* Clear all LRO sessions before exiting */ | 3077 | /* Clear all LRO sessions before exiting */ |
3076 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | 3078 | for (i = 0; i < MAX_LRO_SESSIONS; i++) { |
3077 | struct lro *lro = &ring_data->lro0_n[i]; | 3079 | struct lro *lro = &ring_data->lro0_n[i]; |
3078 | if (lro->in_use) { | 3080 | if (lro->in_use) { |
3079 | update_L3L4_header(ring_data->nic, lro); | 3081 | update_L3L4_header(ring_data->nic, lro); |
@@ -3082,7 +3084,7 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget) | |||
3082 | } | 3084 | } |
3083 | } | 3085 | } |
3084 | } | 3086 | } |
3085 | return(napi_pkts); | 3087 | return napi_pkts; |
3086 | } | 3088 | } |
3087 | 3089 | ||
3088 | /** | 3090 | /** |
@@ -3108,12 +3110,12 @@ static void tx_intr_handler(struct fifo_info *fifo_data) | |||
3108 | u8 err_mask; | 3110 | u8 err_mask; |
3109 | 3111 | ||
3110 | if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags)) | 3112 | if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags)) |
3111 | return; | 3113 | return; |
3112 | 3114 | ||
3113 | get_info = fifo_data->tx_curr_get_info; | 3115 | get_info = fifo_data->tx_curr_get_info; |
3114 | memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info)); | 3116 | memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info)); |
3115 | txdlp = (struct TxD *) fifo_data->list_info[get_info.offset]. | 3117 | txdlp = (struct TxD *) |
3116 | list_virt_addr; | 3118 | fifo_data->list_info[get_info.offset].list_virt_addr; |
3117 | while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && | 3119 | while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && |
3118 | (get_info.offset != put_info.offset) && | 3120 | (get_info.offset != put_info.offset) && |
3119 | (txdlp->Host_Control)) { | 3121 | (txdlp->Host_Control)) { |
@@ -3123,44 +3125,43 @@ static void tx_intr_handler(struct fifo_info *fifo_data) | |||
3123 | err = txdlp->Control_1 & TXD_T_CODE; | 3125 | err = txdlp->Control_1 & TXD_T_CODE; |
3124 | if (err & 0x1) { | 3126 | if (err & 0x1) { |
3125 | nic->mac_control.stats_info->sw_stat. | 3127 | nic->mac_control.stats_info->sw_stat. |
3126 | parity_err_cnt++; | 3128 | parity_err_cnt++; |
3127 | } | 3129 | } |
3128 | 3130 | ||
3129 | /* update t_code statistics */ | 3131 | /* update t_code statistics */ |
3130 | err_mask = err >> 48; | 3132 | err_mask = err >> 48; |
3131 | switch(err_mask) { | 3133 | switch (err_mask) { |
3132 | case 2: | 3134 | case 2: |
3133 | nic->mac_control.stats_info->sw_stat. | 3135 | nic->mac_control.stats_info->sw_stat. |
3134 | tx_buf_abort_cnt++; | 3136 | tx_buf_abort_cnt++; |
3135 | break; | 3137 | break; |
3136 | 3138 | ||
3137 | case 3: | 3139 | case 3: |
3138 | nic->mac_control.stats_info->sw_stat. | 3140 | nic->mac_control.stats_info->sw_stat. |
3139 | tx_desc_abort_cnt++; | 3141 | tx_desc_abort_cnt++; |
3140 | break; | 3142 | break; |
3141 | 3143 | ||
3142 | case 7: | 3144 | case 7: |
3143 | nic->mac_control.stats_info->sw_stat. | 3145 | nic->mac_control.stats_info->sw_stat. |
3144 | tx_parity_err_cnt++; | 3146 | tx_parity_err_cnt++; |
3145 | break; | 3147 | break; |
3146 | 3148 | ||
3147 | case 10: | 3149 | case 10: |
3148 | nic->mac_control.stats_info->sw_stat. | 3150 | nic->mac_control.stats_info->sw_stat. |
3149 | tx_link_loss_cnt++; | 3151 | tx_link_loss_cnt++; |
3150 | break; | 3152 | break; |
3151 | 3153 | ||
3152 | case 15: | 3154 | case 15: |
3153 | nic->mac_control.stats_info->sw_stat. | 3155 | nic->mac_control.stats_info->sw_stat. |
3154 | tx_list_proc_err_cnt++; | 3156 | tx_list_proc_err_cnt++; |
3155 | break; | 3157 | break; |
3156 | } | 3158 | } |
3157 | } | 3159 | } |
3158 | 3160 | ||
3159 | skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset); | 3161 | skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset); |
3160 | if (skb == NULL) { | 3162 | if (skb == NULL) { |
3161 | spin_unlock_irqrestore(&fifo_data->tx_lock, flags); | 3163 | spin_unlock_irqrestore(&fifo_data->tx_lock, flags); |
3162 | DBG_PRINT(ERR_DBG, "%s: Null skb ", | 3164 | DBG_PRINT(ERR_DBG, "%s: Null skb ", __func__); |
3163 | __func__); | ||
3164 | DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); | 3165 | DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); |
3165 | return; | 3166 | return; |
3166 | } | 3167 | } |
@@ -3174,10 +3175,9 @@ static void tx_intr_handler(struct fifo_info *fifo_data) | |||
3174 | get_info.offset++; | 3175 | get_info.offset++; |
3175 | if (get_info.offset == get_info.fifo_len + 1) | 3176 | if (get_info.offset == get_info.fifo_len + 1) |
3176 | get_info.offset = 0; | 3177 | get_info.offset = 0; |
3177 | txdlp = (struct TxD *) fifo_data->list_info | 3178 | txdlp = (struct TxD *) |
3178 | [get_info.offset].list_virt_addr; | 3179 | fifo_data->list_info[get_info.offset].list_virt_addr; |
3179 | fifo_data->tx_curr_get_info.offset = | 3180 | fifo_data->tx_curr_get_info.offset = get_info.offset; |
3180 | get_info.offset; | ||
3181 | } | 3181 | } |
3182 | 3182 | ||
3183 | s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq); | 3183 | s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq); |
@@ -3195,43 +3195,41 @@ static void tx_intr_handler(struct fifo_info *fifo_data) | |||
3195 | * This function is used to write values to the MDIO registers | 3195 | * This function is used to write values to the MDIO registers |
3196 | * NONE | 3196 | * NONE |
3197 | */ | 3197 | */ |
3198 | static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev) | 3198 | static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, |
3199 | struct net_device *dev) | ||
3199 | { | 3200 | { |
3200 | u64 val64 = 0x0; | 3201 | u64 val64; |
3201 | struct s2io_nic *sp = netdev_priv(dev); | 3202 | struct s2io_nic *sp = netdev_priv(dev); |
3202 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 3203 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
3203 | 3204 | ||
3204 | //address transaction | 3205 | /* address transaction */ |
3205 | val64 = val64 | MDIO_MMD_INDX_ADDR(addr) | 3206 | val64 = MDIO_MMD_INDX_ADDR(addr) | |
3206 | | MDIO_MMD_DEV_ADDR(mmd_type) | 3207 | MDIO_MMD_DEV_ADDR(mmd_type) | |
3207 | | MDIO_MMS_PRT_ADDR(0x0); | 3208 | MDIO_MMS_PRT_ADDR(0x0); |
3208 | writeq(val64, &bar0->mdio_control); | 3209 | writeq(val64, &bar0->mdio_control); |
3209 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); | 3210 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); |
3210 | writeq(val64, &bar0->mdio_control); | 3211 | writeq(val64, &bar0->mdio_control); |
3211 | udelay(100); | 3212 | udelay(100); |
3212 | 3213 | ||
3213 | //Data transaction | 3214 | /* Data transaction */ |
3214 | val64 = 0x0; | 3215 | val64 = MDIO_MMD_INDX_ADDR(addr) | |
3215 | val64 = val64 | MDIO_MMD_INDX_ADDR(addr) | 3216 | MDIO_MMD_DEV_ADDR(mmd_type) | |
3216 | | MDIO_MMD_DEV_ADDR(mmd_type) | 3217 | MDIO_MMS_PRT_ADDR(0x0) | |
3217 | | MDIO_MMS_PRT_ADDR(0x0) | 3218 | MDIO_MDIO_DATA(value) | |
3218 | | MDIO_MDIO_DATA(value) | 3219 | MDIO_OP(MDIO_OP_WRITE_TRANS); |
3219 | | MDIO_OP(MDIO_OP_WRITE_TRANS); | ||
3220 | writeq(val64, &bar0->mdio_control); | 3220 | writeq(val64, &bar0->mdio_control); |
3221 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); | 3221 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); |
3222 | writeq(val64, &bar0->mdio_control); | 3222 | writeq(val64, &bar0->mdio_control); |
3223 | udelay(100); | 3223 | udelay(100); |
3224 | 3224 | ||
3225 | val64 = 0x0; | 3225 | val64 = MDIO_MMD_INDX_ADDR(addr) | |
3226 | val64 = val64 | MDIO_MMD_INDX_ADDR(addr) | 3226 | MDIO_MMD_DEV_ADDR(mmd_type) | |
3227 | | MDIO_MMD_DEV_ADDR(mmd_type) | 3227 | MDIO_MMS_PRT_ADDR(0x0) | |
3228 | | MDIO_MMS_PRT_ADDR(0x0) | 3228 | MDIO_OP(MDIO_OP_READ_TRANS); |
3229 | | MDIO_OP(MDIO_OP_READ_TRANS); | ||
3230 | writeq(val64, &bar0->mdio_control); | 3229 | writeq(val64, &bar0->mdio_control); |
3231 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); | 3230 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); |
3232 | writeq(val64, &bar0->mdio_control); | 3231 | writeq(val64, &bar0->mdio_control); |
3233 | udelay(100); | 3232 | udelay(100); |
3234 | |||
3235 | } | 3233 | } |
3236 | 3234 | ||
3237 | /** | 3235 | /** |
@@ -3251,20 +3249,19 @@ static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev) | |||
3251 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 3249 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
3252 | 3250 | ||
3253 | /* address transaction */ | 3251 | /* address transaction */ |
3254 | val64 = val64 | MDIO_MMD_INDX_ADDR(addr) | 3252 | val64 = val64 | (MDIO_MMD_INDX_ADDR(addr) |
3255 | | MDIO_MMD_DEV_ADDR(mmd_type) | 3253 | | MDIO_MMD_DEV_ADDR(mmd_type) |
3256 | | MDIO_MMS_PRT_ADDR(0x0); | 3254 | | MDIO_MMS_PRT_ADDR(0x0)); |
3257 | writeq(val64, &bar0->mdio_control); | 3255 | writeq(val64, &bar0->mdio_control); |
3258 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); | 3256 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); |
3259 | writeq(val64, &bar0->mdio_control); | 3257 | writeq(val64, &bar0->mdio_control); |
3260 | udelay(100); | 3258 | udelay(100); |
3261 | 3259 | ||
3262 | /* Data transaction */ | 3260 | /* Data transaction */ |
3263 | val64 = 0x0; | 3261 | val64 = MDIO_MMD_INDX_ADDR(addr) | |
3264 | val64 = val64 | MDIO_MMD_INDX_ADDR(addr) | 3262 | MDIO_MMD_DEV_ADDR(mmd_type) | |
3265 | | MDIO_MMD_DEV_ADDR(mmd_type) | 3263 | MDIO_MMS_PRT_ADDR(0x0) | |
3266 | | MDIO_MMS_PRT_ADDR(0x0) | 3264 | MDIO_OP(MDIO_OP_READ_TRANS); |
3267 | | MDIO_OP(MDIO_OP_READ_TRANS); | ||
3268 | writeq(val64, &bar0->mdio_control); | 3265 | writeq(val64, &bar0->mdio_control); |
3269 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); | 3266 | val64 = val64 | MDIO_CTRL_START_TRANS(0xE); |
3270 | writeq(val64, &bar0->mdio_control); | 3267 | writeq(val64, &bar0->mdio_control); |
@@ -3276,6 +3273,7 @@ static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev) | |||
3276 | rval64 = rval64 >> 16; | 3273 | rval64 = rval64 >> 16; |
3277 | return rval64; | 3274 | return rval64; |
3278 | } | 3275 | } |
3276 | |||
3279 | /** | 3277 | /** |
3280 | * s2io_chk_xpak_counter - Function to check the status of the xpak counters | 3278 | * s2io_chk_xpak_counter - Function to check the status of the xpak counters |
3281 | * @counter : couter value to be updated | 3279 | * @counter : couter value to be updated |
@@ -3286,45 +3284,43 @@ static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev) | |||
3286 | * NONE | 3284 | * NONE |
3287 | */ | 3285 | */ |
3288 | 3286 | ||
3289 | static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type) | 3287 | static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, |
3288 | u16 flag, u16 type) | ||
3290 | { | 3289 | { |
3291 | u64 mask = 0x3; | 3290 | u64 mask = 0x3; |
3292 | u64 val64; | 3291 | u64 val64; |
3293 | int i; | 3292 | int i; |
3294 | for(i = 0; i <index; i++) | 3293 | for (i = 0; i < index; i++) |
3295 | mask = mask << 0x2; | 3294 | mask = mask << 0x2; |
3296 | 3295 | ||
3297 | if(flag > 0) | 3296 | if (flag > 0) { |
3298 | { | ||
3299 | *counter = *counter + 1; | 3297 | *counter = *counter + 1; |
3300 | val64 = *regs_stat & mask; | 3298 | val64 = *regs_stat & mask; |
3301 | val64 = val64 >> (index * 0x2); | 3299 | val64 = val64 >> (index * 0x2); |
3302 | val64 = val64 + 1; | 3300 | val64 = val64 + 1; |
3303 | if(val64 == 3) | 3301 | if (val64 == 3) { |
3304 | { | 3302 | switch (type) { |
3305 | switch(type) | ||
3306 | { | ||
3307 | case 1: | 3303 | case 1: |
3308 | DBG_PRINT(ERR_DBG, "Take Xframe NIC out of " | 3304 | DBG_PRINT(ERR_DBG, "Take Xframe NIC out of " |
3309 | "service. Excessive temperatures may " | 3305 | "service. Excessive temperatures may " |
3310 | "result in premature transceiver " | 3306 | "result in premature transceiver " |
3311 | "failure \n"); | 3307 | "failure \n"); |
3312 | break; | 3308 | break; |
3313 | case 2: | 3309 | case 2: |
3314 | DBG_PRINT(ERR_DBG, "Take Xframe NIC out of " | 3310 | DBG_PRINT(ERR_DBG, "Take Xframe NIC out of " |
3315 | "service Excessive bias currents may " | 3311 | "service Excessive bias currents may " |
3316 | "indicate imminent laser diode " | 3312 | "indicate imminent laser diode " |
3317 | "failure \n"); | 3313 | "failure \n"); |
3318 | break; | 3314 | break; |
3319 | case 3: | 3315 | case 3: |
3320 | DBG_PRINT(ERR_DBG, "Take Xframe NIC out of " | 3316 | DBG_PRINT(ERR_DBG, "Take Xframe NIC out of " |
3321 | "service Excessive laser output " | 3317 | "service Excessive laser output " |
3322 | "power may saturate far-end " | 3318 | "power may saturate far-end " |
3323 | "receiver\n"); | 3319 | "receiver\n"); |
3324 | break; | 3320 | break; |
3325 | default: | 3321 | default: |
3326 | DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm " | 3322 | DBG_PRINT(ERR_DBG, |
3327 | "type \n"); | 3323 | "Incorrect XPAK Alarm type\n"); |
3328 | } | 3324 | } |
3329 | val64 = 0x0; | 3325 | val64 = 0x0; |
3330 | } | 3326 | } |
@@ -3358,16 +3354,14 @@ static void s2io_updt_xpak_counter(struct net_device *dev) | |||
3358 | addr = MDIO_CTRL1; | 3354 | addr = MDIO_CTRL1; |
3359 | val64 = 0x0; | 3355 | val64 = 0x0; |
3360 | val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); | 3356 | val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); |
3361 | if((val64 == 0xFFFF) || (val64 == 0x0000)) | 3357 | if ((val64 == 0xFFFF) || (val64 == 0x0000)) { |
3362 | { | ||
3363 | DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - " | 3358 | DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - " |
3364 | "Returned %llx\n", (unsigned long long)val64); | 3359 | "Returned %llx\n", (unsigned long long)val64); |
3365 | return; | 3360 | return; |
3366 | } | 3361 | } |
3367 | 3362 | ||
3368 | /* Check for the expected value of control reg 1 */ | 3363 | /* Check for the expected value of control reg 1 */ |
3369 | if(val64 != MDIO_CTRL1_SPEED10G) | 3364 | if (val64 != MDIO_CTRL1_SPEED10G) { |
3370 | { | ||
3371 | DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "); | 3365 | DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "); |
3372 | DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x%x\n", | 3366 | DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x%x\n", |
3373 | (unsigned long long)val64, MDIO_CTRL1_SPEED10G); | 3367 | (unsigned long long)val64, MDIO_CTRL1_SPEED10G); |
@@ -3387,28 +3381,28 @@ static void s2io_updt_xpak_counter(struct net_device *dev) | |||
3387 | flag = CHECKBIT(val64, 0x7); | 3381 | flag = CHECKBIT(val64, 0x7); |
3388 | type = 1; | 3382 | type = 1; |
3389 | s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high, | 3383 | s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high, |
3390 | &stat_info->xpak_stat.xpak_regs_stat, | 3384 | &stat_info->xpak_stat.xpak_regs_stat, |
3391 | 0x0, flag, type); | 3385 | 0x0, flag, type); |
3392 | 3386 | ||
3393 | if(CHECKBIT(val64, 0x6)) | 3387 | if (CHECKBIT(val64, 0x6)) |
3394 | stat_info->xpak_stat.alarm_transceiver_temp_low++; | 3388 | stat_info->xpak_stat.alarm_transceiver_temp_low++; |
3395 | 3389 | ||
3396 | flag = CHECKBIT(val64, 0x3); | 3390 | flag = CHECKBIT(val64, 0x3); |
3397 | type = 2; | 3391 | type = 2; |
3398 | s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high, | 3392 | s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high, |
3399 | &stat_info->xpak_stat.xpak_regs_stat, | 3393 | &stat_info->xpak_stat.xpak_regs_stat, |
3400 | 0x2, flag, type); | 3394 | 0x2, flag, type); |
3401 | 3395 | ||
3402 | if(CHECKBIT(val64, 0x2)) | 3396 | if (CHECKBIT(val64, 0x2)) |
3403 | stat_info->xpak_stat.alarm_laser_bias_current_low++; | 3397 | stat_info->xpak_stat.alarm_laser_bias_current_low++; |
3404 | 3398 | ||
3405 | flag = CHECKBIT(val64, 0x1); | 3399 | flag = CHECKBIT(val64, 0x1); |
3406 | type = 3; | 3400 | type = 3; |
3407 | s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high, | 3401 | s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high, |
3408 | &stat_info->xpak_stat.xpak_regs_stat, | 3402 | &stat_info->xpak_stat.xpak_regs_stat, |
3409 | 0x4, flag, type); | 3403 | 0x4, flag, type); |
3410 | 3404 | ||
3411 | if(CHECKBIT(val64, 0x0)) | 3405 | if (CHECKBIT(val64, 0x0)) |
3412 | stat_info->xpak_stat.alarm_laser_output_power_low++; | 3406 | stat_info->xpak_stat.alarm_laser_output_power_low++; |
3413 | 3407 | ||
3414 | /* Reading the Warning flags */ | 3408 | /* Reading the Warning flags */ |
@@ -3416,22 +3410,22 @@ static void s2io_updt_xpak_counter(struct net_device *dev) | |||
3416 | val64 = 0x0; | 3410 | val64 = 0x0; |
3417 | val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); | 3411 | val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); |
3418 | 3412 | ||
3419 | if(CHECKBIT(val64, 0x7)) | 3413 | if (CHECKBIT(val64, 0x7)) |
3420 | stat_info->xpak_stat.warn_transceiver_temp_high++; | 3414 | stat_info->xpak_stat.warn_transceiver_temp_high++; |
3421 | 3415 | ||
3422 | if(CHECKBIT(val64, 0x6)) | 3416 | if (CHECKBIT(val64, 0x6)) |
3423 | stat_info->xpak_stat.warn_transceiver_temp_low++; | 3417 | stat_info->xpak_stat.warn_transceiver_temp_low++; |
3424 | 3418 | ||
3425 | if(CHECKBIT(val64, 0x3)) | 3419 | if (CHECKBIT(val64, 0x3)) |
3426 | stat_info->xpak_stat.warn_laser_bias_current_high++; | 3420 | stat_info->xpak_stat.warn_laser_bias_current_high++; |
3427 | 3421 | ||
3428 | if(CHECKBIT(val64, 0x2)) | 3422 | if (CHECKBIT(val64, 0x2)) |
3429 | stat_info->xpak_stat.warn_laser_bias_current_low++; | 3423 | stat_info->xpak_stat.warn_laser_bias_current_low++; |
3430 | 3424 | ||
3431 | if(CHECKBIT(val64, 0x1)) | 3425 | if (CHECKBIT(val64, 0x1)) |
3432 | stat_info->xpak_stat.warn_laser_output_power_high++; | 3426 | stat_info->xpak_stat.warn_laser_output_power_high++; |
3433 | 3427 | ||
3434 | if(CHECKBIT(val64, 0x0)) | 3428 | if (CHECKBIT(val64, 0x0)) |
3435 | stat_info->xpak_stat.warn_laser_output_power_low++; | 3429 | stat_info->xpak_stat.warn_laser_output_power_low++; |
3436 | } | 3430 | } |
3437 | 3431 | ||
@@ -3447,7 +3441,7 @@ static void s2io_updt_xpak_counter(struct net_device *dev) | |||
3447 | */ | 3441 | */ |
3448 | 3442 | ||
3449 | static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, | 3443 | static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, |
3450 | int bit_state) | 3444 | int bit_state) |
3451 | { | 3445 | { |
3452 | int ret = FAILURE, cnt = 0, delay = 1; | 3446 | int ret = FAILURE, cnt = 0, delay = 1; |
3453 | u64 val64; | 3447 | u64 val64; |
@@ -3469,7 +3463,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, | |||
3469 | } | 3463 | } |
3470 | } | 3464 | } |
3471 | 3465 | ||
3472 | if(in_interrupt()) | 3466 | if (in_interrupt()) |
3473 | mdelay(delay); | 3467 | mdelay(delay); |
3474 | else | 3468 | else |
3475 | msleep(delay); | 3469 | msleep(delay); |
@@ -3509,7 +3503,7 @@ static u16 check_pci_device_id(u16 id) | |||
3509 | * void. | 3503 | * void. |
3510 | */ | 3504 | */ |
3511 | 3505 | ||
3512 | static void s2io_reset(struct s2io_nic * sp) | 3506 | static void s2io_reset(struct s2io_nic *sp) |
3513 | { | 3507 | { |
3514 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 3508 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
3515 | u64 val64; | 3509 | u64 val64; |
@@ -3519,17 +3513,16 @@ static void s2io_reset(struct s2io_nic * sp) | |||
3519 | unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt; | 3513 | unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt; |
3520 | unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt; | 3514 | unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt; |
3521 | 3515 | ||
3522 | DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n", | 3516 | DBG_PRINT(INIT_DBG, "%s - Resetting XFrame card %s\n", |
3523 | __func__, sp->dev->name); | 3517 | __func__, sp->dev->name); |
3524 | 3518 | ||
3525 | /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ | 3519 | /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ |
3526 | pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); | 3520 | pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); |
3527 | 3521 | ||
3528 | val64 = SW_RESET_ALL; | 3522 | val64 = SW_RESET_ALL; |
3529 | writeq(val64, &bar0->sw_reset); | 3523 | writeq(val64, &bar0->sw_reset); |
3530 | if (strstr(sp->product_name, "CX4")) { | 3524 | if (strstr(sp->product_name, "CX4")) |
3531 | msleep(750); | 3525 | msleep(750); |
3532 | } | ||
3533 | msleep(250); | 3526 | msleep(250); |
3534 | for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) { | 3527 | for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) { |
3535 | 3528 | ||
@@ -3541,9 +3534,8 @@ static void s2io_reset(struct s2io_nic * sp) | |||
3541 | msleep(200); | 3534 | msleep(200); |
3542 | } | 3535 | } |
3543 | 3536 | ||
3544 | if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) { | 3537 | if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) |
3545 | DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__); | 3538 | DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__); |
3546 | } | ||
3547 | 3539 | ||
3548 | pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd); | 3540 | pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd); |
3549 | 3541 | ||
@@ -3571,7 +3563,7 @@ static void s2io_reset(struct s2io_nic * sp) | |||
3571 | } | 3563 | } |
3572 | 3564 | ||
3573 | /* Reset device statistics maintained by OS */ | 3565 | /* Reset device statistics maintained by OS */ |
3574 | memset(&sp->stats, 0, sizeof (struct net_device_stats)); | 3566 | memset(&sp->stats, 0, sizeof(struct net_device_stats)); |
3575 | 3567 | ||
3576 | up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt; | 3568 | up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt; |
3577 | down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt; | 3569 | down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt; |
@@ -3626,7 +3618,7 @@ static void s2io_reset(struct s2io_nic * sp) | |||
3626 | * SUCCESS on success and FAILURE on failure. | 3618 | * SUCCESS on success and FAILURE on failure. |
3627 | */ | 3619 | */ |
3628 | 3620 | ||
3629 | static int s2io_set_swapper(struct s2io_nic * sp) | 3621 | static int s2io_set_swapper(struct s2io_nic *sp) |
3630 | { | 3622 | { |
3631 | struct net_device *dev = sp->dev; | 3623 | struct net_device *dev = sp->dev; |
3632 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 3624 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
@@ -3645,7 +3637,7 @@ static int s2io_set_swapper(struct s2io_nic * sp) | |||
3645 | 0x4200004242000042ULL, /* FE=0, SE=1 */ | 3637 | 0x4200004242000042ULL, /* FE=0, SE=1 */ |
3646 | 0}; /* FE=0, SE=0 */ | 3638 | 0}; /* FE=0, SE=0 */ |
3647 | 3639 | ||
3648 | while(i<4) { | 3640 | while (i < 4) { |
3649 | writeq(value[i], &bar0->swapper_ctrl); | 3641 | writeq(value[i], &bar0->swapper_ctrl); |
3650 | val64 = readq(&bar0->pif_rd_swapper_fb); | 3642 | val64 = readq(&bar0->pif_rd_swapper_fb); |
3651 | if (val64 == 0x0123456789ABCDEFULL) | 3643 | if (val64 == 0x0123456789ABCDEFULL) |
@@ -3654,9 +3646,9 @@ static int s2io_set_swapper(struct s2io_nic * sp) | |||
3654 | } | 3646 | } |
3655 | if (i == 4) { | 3647 | if (i == 4) { |
3656 | DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ", | 3648 | DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ", |
3657 | dev->name); | 3649 | dev->name); |
3658 | DBG_PRINT(ERR_DBG, "feedback read %llx\n", | 3650 | DBG_PRINT(ERR_DBG, "feedback read %llx\n", |
3659 | (unsigned long long) val64); | 3651 | (unsigned long long)val64); |
3660 | return FAILURE; | 3652 | return FAILURE; |
3661 | } | 3653 | } |
3662 | valr = value[i]; | 3654 | valr = value[i]; |
@@ -3668,22 +3660,22 @@ static int s2io_set_swapper(struct s2io_nic * sp) | |||
3668 | writeq(valt, &bar0->xmsi_address); | 3660 | writeq(valt, &bar0->xmsi_address); |
3669 | val64 = readq(&bar0->xmsi_address); | 3661 | val64 = readq(&bar0->xmsi_address); |
3670 | 3662 | ||
3671 | if(val64 != valt) { | 3663 | if (val64 != valt) { |
3672 | int i = 0; | 3664 | int i = 0; |
3673 | u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */ | 3665 | u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */ |
3674 | 0x0081810000818100ULL, /* FE=1, SE=0 */ | 3666 | 0x0081810000818100ULL, /* FE=1, SE=0 */ |
3675 | 0x0042420000424200ULL, /* FE=0, SE=1 */ | 3667 | 0x0042420000424200ULL, /* FE=0, SE=1 */ |
3676 | 0}; /* FE=0, SE=0 */ | 3668 | 0}; /* FE=0, SE=0 */ |
3677 | 3669 | ||
3678 | while(i<4) { | 3670 | while (i < 4) { |
3679 | writeq((value[i] | valr), &bar0->swapper_ctrl); | 3671 | writeq((value[i] | valr), &bar0->swapper_ctrl); |
3680 | writeq(valt, &bar0->xmsi_address); | 3672 | writeq(valt, &bar0->xmsi_address); |
3681 | val64 = readq(&bar0->xmsi_address); | 3673 | val64 = readq(&bar0->xmsi_address); |
3682 | if(val64 == valt) | 3674 | if (val64 == valt) |
3683 | break; | 3675 | break; |
3684 | i++; | 3676 | i++; |
3685 | } | 3677 | } |
3686 | if(i == 4) { | 3678 | if (i == 4) { |
3687 | unsigned long long x = val64; | 3679 | unsigned long long x = val64; |
3688 | DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr "); | 3680 | DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr "); |
3689 | DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x); | 3681 | DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x); |
@@ -3693,21 +3685,22 @@ static int s2io_set_swapper(struct s2io_nic * sp) | |||
3693 | val64 = readq(&bar0->swapper_ctrl); | 3685 | val64 = readq(&bar0->swapper_ctrl); |
3694 | val64 &= 0xFFFF000000000000ULL; | 3686 | val64 &= 0xFFFF000000000000ULL; |
3695 | 3687 | ||
3696 | #ifdef __BIG_ENDIAN | 3688 | #ifdef __BIG_ENDIAN |
3697 | /* | 3689 | /* |
3698 | * The device by default set to a big endian format, so a | 3690 | * The device by default set to a big endian format, so a |
3699 | * big endian driver need not set anything. | 3691 | * big endian driver need not set anything. |
3700 | */ | 3692 | */ |
3701 | val64 |= (SWAPPER_CTRL_TXP_FE | | 3693 | val64 |= (SWAPPER_CTRL_TXP_FE | |
3702 | SWAPPER_CTRL_TXP_SE | | 3694 | SWAPPER_CTRL_TXP_SE | |
3703 | SWAPPER_CTRL_TXD_R_FE | | 3695 | SWAPPER_CTRL_TXD_R_FE | |
3704 | SWAPPER_CTRL_TXD_W_FE | | 3696 | SWAPPER_CTRL_TXD_W_FE | |
3705 | SWAPPER_CTRL_TXF_R_FE | | 3697 | SWAPPER_CTRL_TXF_R_FE | |
3706 | SWAPPER_CTRL_RXD_R_FE | | 3698 | SWAPPER_CTRL_RXD_R_FE | |
3707 | SWAPPER_CTRL_RXD_W_FE | | 3699 | SWAPPER_CTRL_RXD_W_FE | |
3708 | SWAPPER_CTRL_RXF_W_FE | | 3700 | SWAPPER_CTRL_RXF_W_FE | |
3709 | SWAPPER_CTRL_XMSI_FE | | 3701 | SWAPPER_CTRL_XMSI_FE | |
3710 | SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); | 3702 | SWAPPER_CTRL_STATS_FE | |
3703 | SWAPPER_CTRL_STATS_SE); | ||
3711 | if (sp->config.intr_type == INTA) | 3704 | if (sp->config.intr_type == INTA) |
3712 | val64 |= SWAPPER_CTRL_XMSI_SE; | 3705 | val64 |= SWAPPER_CTRL_XMSI_SE; |
3713 | writeq(val64, &bar0->swapper_ctrl); | 3706 | writeq(val64, &bar0->swapper_ctrl); |
@@ -3718,19 +3711,20 @@ static int s2io_set_swapper(struct s2io_nic * sp) | |||
3718 | * we want to set. | 3711 | * we want to set. |
3719 | */ | 3712 | */ |
3720 | val64 |= (SWAPPER_CTRL_TXP_FE | | 3713 | val64 |= (SWAPPER_CTRL_TXP_FE | |
3721 | SWAPPER_CTRL_TXP_SE | | 3714 | SWAPPER_CTRL_TXP_SE | |
3722 | SWAPPER_CTRL_TXD_R_FE | | 3715 | SWAPPER_CTRL_TXD_R_FE | |
3723 | SWAPPER_CTRL_TXD_R_SE | | 3716 | SWAPPER_CTRL_TXD_R_SE | |
3724 | SWAPPER_CTRL_TXD_W_FE | | 3717 | SWAPPER_CTRL_TXD_W_FE | |
3725 | SWAPPER_CTRL_TXD_W_SE | | 3718 | SWAPPER_CTRL_TXD_W_SE | |
3726 | SWAPPER_CTRL_TXF_R_FE | | 3719 | SWAPPER_CTRL_TXF_R_FE | |
3727 | SWAPPER_CTRL_RXD_R_FE | | 3720 | SWAPPER_CTRL_RXD_R_FE | |
3728 | SWAPPER_CTRL_RXD_R_SE | | 3721 | SWAPPER_CTRL_RXD_R_SE | |
3729 | SWAPPER_CTRL_RXD_W_FE | | 3722 | SWAPPER_CTRL_RXD_W_FE | |
3730 | SWAPPER_CTRL_RXD_W_SE | | 3723 | SWAPPER_CTRL_RXD_W_SE | |
3731 | SWAPPER_CTRL_RXF_W_FE | | 3724 | SWAPPER_CTRL_RXF_W_FE | |
3732 | SWAPPER_CTRL_XMSI_FE | | 3725 | SWAPPER_CTRL_XMSI_FE | |
3733 | SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); | 3726 | SWAPPER_CTRL_STATS_FE | |
3727 | SWAPPER_CTRL_STATS_SE); | ||
3734 | if (sp->config.intr_type == INTA) | 3728 | if (sp->config.intr_type == INTA) |
3735 | val64 |= SWAPPER_CTRL_XMSI_SE; | 3729 | val64 |= SWAPPER_CTRL_XMSI_SE; |
3736 | writeq(val64, &bar0->swapper_ctrl); | 3730 | writeq(val64, &bar0->swapper_ctrl); |
@@ -3747,7 +3741,7 @@ static int s2io_set_swapper(struct s2io_nic * sp) | |||
3747 | DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ", | 3741 | DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ", |
3748 | dev->name); | 3742 | dev->name); |
3749 | DBG_PRINT(ERR_DBG, "feedback read %llx\n", | 3743 | DBG_PRINT(ERR_DBG, "feedback read %llx\n", |
3750 | (unsigned long long) val64); | 3744 | (unsigned long long)val64); |
3751 | return FAILURE; | 3745 | return FAILURE; |
3752 | } | 3746 | } |
3753 | 3747 | ||
@@ -3766,7 +3760,7 @@ static int wait_for_msix_trans(struct s2io_nic *nic, int i) | |||
3766 | break; | 3760 | break; |
3767 | mdelay(1); | 3761 | mdelay(1); |
3768 | cnt++; | 3762 | cnt++; |
3769 | } while(cnt < 5); | 3763 | } while (cnt < 5); |
3770 | if (cnt == 5) { | 3764 | if (cnt == 5) { |
3771 | DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i); | 3765 | DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i); |
3772 | ret = 1; | 3766 | ret = 1; |
@@ -3781,12 +3775,11 @@ static void restore_xmsi_data(struct s2io_nic *nic) | |||
3781 | u64 val64; | 3775 | u64 val64; |
3782 | int i, msix_index; | 3776 | int i, msix_index; |
3783 | 3777 | ||
3784 | |||
3785 | if (nic->device_type == XFRAME_I_DEVICE) | 3778 | if (nic->device_type == XFRAME_I_DEVICE) |
3786 | return; | 3779 | return; |
3787 | 3780 | ||
3788 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { | 3781 | for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { |
3789 | msix_index = (i) ? ((i-1) * 8 + 1): 0; | 3782 | msix_index = (i) ? ((i-1) * 8 + 1) : 0; |
3790 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); | 3783 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); |
3791 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); | 3784 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); |
3792 | val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); | 3785 | val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); |
@@ -3808,8 +3801,8 @@ static void store_xmsi_data(struct s2io_nic *nic) | |||
3808 | return; | 3801 | return; |
3809 | 3802 | ||
3810 | /* Store and display */ | 3803 | /* Store and display */ |
3811 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { | 3804 | for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { |
3812 | msix_index = (i) ? ((i-1) * 8 + 1): 0; | 3805 | msix_index = (i) ? ((i-1) * 8 + 1) : 0; |
3813 | val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); | 3806 | val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); |
3814 | writeq(val64, &bar0->xmsi_access); | 3807 | writeq(val64, &bar0->xmsi_access); |
3815 | if (wait_for_msix_trans(nic, msix_index)) { | 3808 | if (wait_for_msix_trans(nic, msix_index)) { |
@@ -3836,8 +3829,8 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) | |||
3836 | size = nic->num_entries * sizeof(struct msix_entry); | 3829 | size = nic->num_entries * sizeof(struct msix_entry); |
3837 | nic->entries = kzalloc(size, GFP_KERNEL); | 3830 | nic->entries = kzalloc(size, GFP_KERNEL); |
3838 | if (!nic->entries) { | 3831 | if (!nic->entries) { |
3839 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ | 3832 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", |
3840 | __func__); | 3833 | __func__); |
3841 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; | 3834 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; |
3842 | return -ENOMEM; | 3835 | return -ENOMEM; |
3843 | } | 3836 | } |
@@ -3847,7 +3840,7 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) | |||
3847 | nic->s2io_entries = kzalloc(size, GFP_KERNEL); | 3840 | nic->s2io_entries = kzalloc(size, GFP_KERNEL); |
3848 | if (!nic->s2io_entries) { | 3841 | if (!nic->s2io_entries) { |
3849 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", | 3842 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", |
3850 | __func__); | 3843 | __func__); |
3851 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; | 3844 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; |
3852 | kfree(nic->entries); | 3845 | kfree(nic->entries); |
3853 | nic->mac_control.stats_info->sw_stat.mem_freed | 3846 | nic->mac_control.stats_info->sw_stat.mem_freed |
@@ -3926,14 +3919,14 @@ static int s2io_test_msi(struct s2io_nic *sp) | |||
3926 | u64 val64, saved64; | 3919 | u64 val64, saved64; |
3927 | 3920 | ||
3928 | err = request_irq(sp->entries[1].vector, s2io_test_intr, 0, | 3921 | err = request_irq(sp->entries[1].vector, s2io_test_intr, 0, |
3929 | sp->name, sp); | 3922 | sp->name, sp); |
3930 | if (err) { | 3923 | if (err) { |
3931 | DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n", | 3924 | DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n", |
3932 | sp->dev->name, pci_name(pdev), pdev->irq); | 3925 | sp->dev->name, pci_name(pdev), pdev->irq); |
3933 | return err; | 3926 | return err; |
3934 | } | 3927 | } |
3935 | 3928 | ||
3936 | init_waitqueue_head (&sp->msi_wait); | 3929 | init_waitqueue_head(&sp->msi_wait); |
3937 | sp->msi_detected = 0; | 3930 | sp->msi_detected = 0; |
3938 | 3931 | ||
3939 | saved64 = val64 = readq(&bar0->scheduled_int_ctrl); | 3932 | saved64 = val64 = readq(&bar0->scheduled_int_ctrl); |
@@ -3947,8 +3940,8 @@ static int s2io_test_msi(struct s2io_nic *sp) | |||
3947 | if (!sp->msi_detected) { | 3940 | if (!sp->msi_detected) { |
3948 | /* MSI(X) test failed, go back to INTx mode */ | 3941 | /* MSI(X) test failed, go back to INTx mode */ |
3949 | DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated " | 3942 | DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated " |
3950 | "using MSI(X) during test\n", sp->dev->name, | 3943 | "using MSI(X) during test\n", sp->dev->name, |
3951 | pci_name(pdev)); | 3944 | pci_name(pdev)); |
3952 | 3945 | ||
3953 | err = -EOPNOTSUPP; | 3946 | err = -EOPNOTSUPP; |
3954 | } | 3947 | } |
@@ -3966,8 +3959,7 @@ static void remove_msix_isr(struct s2io_nic *sp) | |||
3966 | u16 msi_control; | 3959 | u16 msi_control; |
3967 | 3960 | ||
3968 | for (i = 0; i < sp->num_entries; i++) { | 3961 | for (i = 0; i < sp->num_entries; i++) { |
3969 | if (sp->s2io_entries[i].in_use == | 3962 | if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) { |
3970 | MSIX_REGISTERED_SUCCESS) { | ||
3971 | int vector = sp->entries[i].vector; | 3963 | int vector = sp->entries[i].vector; |
3972 | void *arg = sp->s2io_entries[i].arg; | 3964 | void *arg = sp->s2io_entries[i].arg; |
3973 | free_irq(vector, arg); | 3965 | free_irq(vector, arg); |
@@ -4043,12 +4035,12 @@ hw_init_failed: | |||
4043 | if (sp->entries) { | 4035 | if (sp->entries) { |
4044 | kfree(sp->entries); | 4036 | kfree(sp->entries); |
4045 | sp->mac_control.stats_info->sw_stat.mem_freed | 4037 | sp->mac_control.stats_info->sw_stat.mem_freed |
4046 | += (sp->num_entries * sizeof(struct msix_entry)); | 4038 | += (sp->num_entries * sizeof(struct msix_entry)); |
4047 | } | 4039 | } |
4048 | if (sp->s2io_entries) { | 4040 | if (sp->s2io_entries) { |
4049 | kfree(sp->s2io_entries); | 4041 | kfree(sp->s2io_entries); |
4050 | sp->mac_control.stats_info->sw_stat.mem_freed | 4042 | sp->mac_control.stats_info->sw_stat.mem_freed |
4051 | += (sp->num_entries * sizeof(struct s2io_msix_entry)); | 4043 | += (sp->num_entries * sizeof(struct s2io_msix_entry)); |
4052 | } | 4044 | } |
4053 | } | 4045 | } |
4054 | return err; | 4046 | return err; |
@@ -4075,8 +4067,8 @@ static int s2io_close(struct net_device *dev) | |||
4075 | int offset; | 4067 | int offset; |
4076 | 4068 | ||
4077 | /* Return if the device is already closed * | 4069 | /* Return if the device is already closed * |
4078 | * Can happen when s2io_card_up failed in change_mtu * | 4070 | * Can happen when s2io_card_up failed in change_mtu * |
4079 | */ | 4071 | */ |
4080 | if (!is_s2io_card_up(sp)) | 4072 | if (!is_s2io_card_up(sp)) |
4081 | return 0; | 4073 | return 0; |
4082 | 4074 | ||
@@ -4152,20 +4144,20 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4152 | 4144 | ||
4153 | if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) { | 4145 | if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) { |
4154 | th = (struct tcphdr *)(((unsigned char *)ip) + | 4146 | th = (struct tcphdr *)(((unsigned char *)ip) + |
4155 | ip->ihl*4); | 4147 | ip->ihl*4); |
4156 | 4148 | ||
4157 | if (ip->protocol == IPPROTO_TCP) { | 4149 | if (ip->protocol == IPPROTO_TCP) { |
4158 | queue_len = sp->total_tcp_fifos; | 4150 | queue_len = sp->total_tcp_fifos; |
4159 | queue = (ntohs(th->source) + | 4151 | queue = (ntohs(th->source) + |
4160 | ntohs(th->dest)) & | 4152 | ntohs(th->dest)) & |
4161 | sp->fifo_selector[queue_len - 1]; | 4153 | sp->fifo_selector[queue_len - 1]; |
4162 | if (queue >= queue_len) | 4154 | if (queue >= queue_len) |
4163 | queue = queue_len - 1; | 4155 | queue = queue_len - 1; |
4164 | } else if (ip->protocol == IPPROTO_UDP) { | 4156 | } else if (ip->protocol == IPPROTO_UDP) { |
4165 | queue_len = sp->total_udp_fifos; | 4157 | queue_len = sp->total_udp_fifos; |
4166 | queue = (ntohs(th->source) + | 4158 | queue = (ntohs(th->source) + |
4167 | ntohs(th->dest)) & | 4159 | ntohs(th->dest)) & |
4168 | sp->fifo_selector[queue_len - 1]; | 4160 | sp->fifo_selector[queue_len - 1]; |
4169 | if (queue >= queue_len) | 4161 | if (queue >= queue_len) |
4170 | queue = queue_len - 1; | 4162 | queue = queue_len - 1; |
4171 | queue += sp->udp_fifo_idx; | 4163 | queue += sp->udp_fifo_idx; |
@@ -4178,7 +4170,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4178 | } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING) | 4170 | } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING) |
4179 | /* get fifo number based on skb->priority value */ | 4171 | /* get fifo number based on skb->priority value */ |
4180 | queue = config->fifo_mapping | 4172 | queue = config->fifo_mapping |
4181 | [skb->priority & (MAX_TX_FIFOS - 1)]; | 4173 | [skb->priority & (MAX_TX_FIFOS - 1)]; |
4182 | fifo = &mac_control->fifos[queue]; | 4174 | fifo = &mac_control->fifos[queue]; |
4183 | 4175 | ||
4184 | if (do_spin_lock) | 4176 | if (do_spin_lock) |
@@ -4200,14 +4192,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4200 | } | 4192 | } |
4201 | } | 4193 | } |
4202 | 4194 | ||
4203 | put_off = (u16) fifo->tx_curr_put_info.offset; | 4195 | put_off = (u16)fifo->tx_curr_put_info.offset; |
4204 | get_off = (u16) fifo->tx_curr_get_info.offset; | 4196 | get_off = (u16)fifo->tx_curr_get_info.offset; |
4205 | txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr; | 4197 | txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr; |
4206 | 4198 | ||
4207 | queue_len = fifo->tx_curr_put_info.fifo_len + 1; | 4199 | queue_len = fifo->tx_curr_put_info.fifo_len + 1; |
4208 | /* Avoid "put" pointer going beyond "get" pointer */ | 4200 | /* Avoid "put" pointer going beyond "get" pointer */ |
4209 | if (txdp->Host_Control || | 4201 | if (txdp->Host_Control || |
4210 | ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { | 4202 | ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { |
4211 | DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); | 4203 | DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); |
4212 | s2io_stop_tx_queue(sp, fifo->fifo_no); | 4204 | s2io_stop_tx_queue(sp, fifo->fifo_no); |
4213 | dev_kfree_skb(skb); | 4205 | dev_kfree_skb(skb); |
@@ -4221,9 +4213,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4221 | txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); | 4213 | txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); |
4222 | } | 4214 | } |
4223 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 4215 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
4224 | txdp->Control_2 |= | 4216 | txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN | |
4225 | (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | | 4217 | TXD_TX_CKO_TCP_EN | |
4226 | TXD_TX_CKO_UDP_EN); | 4218 | TXD_TX_CKO_UDP_EN); |
4227 | } | 4219 | } |
4228 | txdp->Control_1 |= TXD_GATHER_CODE_FIRST; | 4220 | txdp->Control_1 |= TXD_GATHER_CODE_FIRST; |
4229 | txdp->Control_1 |= TXD_LIST_OWN_XENA; | 4221 | txdp->Control_1 |= TXD_LIST_OWN_XENA; |
@@ -4248,26 +4240,27 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4248 | #ifdef __BIG_ENDIAN | 4240 | #ifdef __BIG_ENDIAN |
4249 | /* both variants do cpu_to_be64(be32_to_cpu(...)) */ | 4241 | /* both variants do cpu_to_be64(be32_to_cpu(...)) */ |
4250 | fifo->ufo_in_band_v[put_off] = | 4242 | fifo->ufo_in_band_v[put_off] = |
4251 | (__force u64)skb_shinfo(skb)->ip6_frag_id; | 4243 | (__force u64)skb_shinfo(skb)->ip6_frag_id; |
4252 | #else | 4244 | #else |
4253 | fifo->ufo_in_band_v[put_off] = | 4245 | fifo->ufo_in_band_v[put_off] = |
4254 | (__force u64)skb_shinfo(skb)->ip6_frag_id << 32; | 4246 | (__force u64)skb_shinfo(skb)->ip6_frag_id << 32; |
4255 | #endif | 4247 | #endif |
4256 | txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v; | 4248 | txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v; |
4257 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, | 4249 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, |
4258 | fifo->ufo_in_band_v, | 4250 | fifo->ufo_in_band_v, |
4259 | sizeof(u64), PCI_DMA_TODEVICE); | 4251 | sizeof(u64), |
4252 | PCI_DMA_TODEVICE); | ||
4260 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) | 4253 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) |
4261 | goto pci_map_failed; | 4254 | goto pci_map_failed; |
4262 | txdp++; | 4255 | txdp++; |
4263 | } | 4256 | } |
4264 | 4257 | ||
4265 | txdp->Buffer_Pointer = pci_map_single | 4258 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data, |
4266 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); | 4259 | frg_len, PCI_DMA_TODEVICE); |
4267 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) | 4260 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) |
4268 | goto pci_map_failed; | 4261 | goto pci_map_failed; |
4269 | 4262 | ||
4270 | txdp->Host_Control = (unsigned long) skb; | 4263 | txdp->Host_Control = (unsigned long)skb; |
4271 | txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); | 4264 | txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); |
4272 | if (offload_type == SKB_GSO_UDP) | 4265 | if (offload_type == SKB_GSO_UDP) |
4273 | txdp->Control_1 |= TXD_UFO_EN; | 4266 | txdp->Control_1 |= TXD_UFO_EN; |
@@ -4280,9 +4273,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4280 | if (!frag->size) | 4273 | if (!frag->size) |
4281 | continue; | 4274 | continue; |
4282 | txdp++; | 4275 | txdp++; |
4283 | txdp->Buffer_Pointer = (u64) pci_map_page | 4276 | txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page, |
4284 | (sp->pdev, frag->page, frag->page_offset, | 4277 | frag->page_offset, |
4285 | frag->size, PCI_DMA_TODEVICE); | 4278 | frag->size, |
4279 | PCI_DMA_TODEVICE); | ||
4286 | txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); | 4280 | txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); |
4287 | if (offload_type == SKB_GSO_UDP) | 4281 | if (offload_type == SKB_GSO_UDP) |
4288 | txdp->Control_1 |= TXD_UFO_EN; | 4282 | txdp->Control_1 |= TXD_UFO_EN; |
@@ -4422,17 +4416,16 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp) | |||
4422 | * This is unstable state so clear both up/down | 4416 | * This is unstable state so clear both up/down |
4423 | * interrupt and adapter to re-evaluate the link state. | 4417 | * interrupt and adapter to re-evaluate the link state. |
4424 | */ | 4418 | */ |
4425 | val64 |= GPIO_INT_REG_LINK_DOWN; | 4419 | val64 |= GPIO_INT_REG_LINK_DOWN; |
4426 | val64 |= GPIO_INT_REG_LINK_UP; | 4420 | val64 |= GPIO_INT_REG_LINK_UP; |
4427 | writeq(val64, &bar0->gpio_int_reg); | 4421 | writeq(val64, &bar0->gpio_int_reg); |
4428 | val64 = readq(&bar0->gpio_int_mask); | 4422 | val64 = readq(&bar0->gpio_int_mask); |
4429 | val64 &= ~(GPIO_INT_MASK_LINK_UP | | 4423 | val64 &= ~(GPIO_INT_MASK_LINK_UP | |
4430 | GPIO_INT_MASK_LINK_DOWN); | 4424 | GPIO_INT_MASK_LINK_DOWN); |
4431 | writeq(val64, &bar0->gpio_int_mask); | 4425 | writeq(val64, &bar0->gpio_int_mask); |
4432 | } | 4426 | } else if (val64 & GPIO_INT_REG_LINK_UP) { |
4433 | else if (val64 & GPIO_INT_REG_LINK_UP) { | ||
4434 | val64 = readq(&bar0->adapter_status); | 4427 | val64 = readq(&bar0->adapter_status); |
4435 | /* Enable Adapter */ | 4428 | /* Enable Adapter */ |
4436 | val64 = readq(&bar0->adapter_control); | 4429 | val64 = readq(&bar0->adapter_control); |
4437 | val64 |= ADAPTER_CNTL_EN; | 4430 | val64 |= ADAPTER_CNTL_EN; |
4438 | writeq(val64, &bar0->adapter_control); | 4431 | writeq(val64, &bar0->adapter_control); |
@@ -4451,7 +4444,7 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp) | |||
4451 | val64 |= GPIO_INT_MASK_LINK_UP; | 4444 | val64 |= GPIO_INT_MASK_LINK_UP; |
4452 | writeq(val64, &bar0->gpio_int_mask); | 4445 | writeq(val64, &bar0->gpio_int_mask); |
4453 | 4446 | ||
4454 | }else if (val64 & GPIO_INT_REG_LINK_DOWN) { | 4447 | } else if (val64 & GPIO_INT_REG_LINK_DOWN) { |
4455 | val64 = readq(&bar0->adapter_status); | 4448 | val64 = readq(&bar0->adapter_status); |
4456 | s2io_link(sp, LINK_DOWN); | 4449 | s2io_link(sp, LINK_DOWN); |
4457 | /* Link is down so unmaks link up interrupt */ | 4450 | /* Link is down so unmaks link up interrupt */ |
@@ -4462,7 +4455,7 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp) | |||
4462 | 4455 | ||
4463 | /* turn off LED */ | 4456 | /* turn off LED */ |
4464 | val64 = readq(&bar0->adapter_control); | 4457 | val64 = readq(&bar0->adapter_control); |
4465 | val64 = val64 &(~ADAPTER_LED_ON); | 4458 | val64 = val64 & (~ADAPTER_LED_ON); |
4466 | writeq(val64, &bar0->adapter_control); | 4459 | writeq(val64, &bar0->adapter_control); |
4467 | } | 4460 | } |
4468 | } | 4461 | } |
@@ -4479,12 +4472,12 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp) | |||
4479 | * 1 - if alarm bit set | 4472 | * 1 - if alarm bit set |
4480 | * 0 - if alarm bit is not set | 4473 | * 0 - if alarm bit is not set |
4481 | */ | 4474 | */ |
4482 | static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr, | 4475 | static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr, |
4483 | unsigned long long *cnt) | 4476 | unsigned long long *cnt) |
4484 | { | 4477 | { |
4485 | u64 val64; | 4478 | u64 val64; |
4486 | val64 = readq(addr); | 4479 | val64 = readq(addr); |
4487 | if ( val64 & value ) { | 4480 | if (val64 & value) { |
4488 | writeq(val64, addr); | 4481 | writeq(val64, addr); |
4489 | (*cnt)++; | 4482 | (*cnt)++; |
4490 | return 1; | 4483 | return 1; |
@@ -4501,12 +4494,12 @@ static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr, | |||
4501 | * Return Value: | 4494 | * Return Value: |
4502 | * NONE | 4495 | * NONE |
4503 | */ | 4496 | */ |
4504 | static void s2io_handle_errors(void * dev_id) | 4497 | static void s2io_handle_errors(void *dev_id) |
4505 | { | 4498 | { |
4506 | struct net_device *dev = (struct net_device *) dev_id; | 4499 | struct net_device *dev = (struct net_device *)dev_id; |
4507 | struct s2io_nic *sp = netdev_priv(dev); | 4500 | struct s2io_nic *sp = netdev_priv(dev); |
4508 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 4501 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
4509 | u64 temp64 = 0,val64=0; | 4502 | u64 temp64 = 0, val64 = 0; |
4510 | int i = 0; | 4503 | int i = 0; |
4511 | 4504 | ||
4512 | struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat; | 4505 | struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat; |
@@ -4519,10 +4512,10 @@ static void s2io_handle_errors(void * dev_id) | |||
4519 | return; | 4512 | return; |
4520 | 4513 | ||
4521 | memset(&sw_stat->ring_full_cnt, 0, | 4514 | memset(&sw_stat->ring_full_cnt, 0, |
4522 | sizeof(sw_stat->ring_full_cnt)); | 4515 | sizeof(sw_stat->ring_full_cnt)); |
4523 | 4516 | ||
4524 | /* Handling the XPAK counters update */ | 4517 | /* Handling the XPAK counters update */ |
4525 | if(stats->xpak_timer_count < 72000) { | 4518 | if (stats->xpak_timer_count < 72000) { |
4526 | /* waiting for an hour */ | 4519 | /* waiting for an hour */ |
4527 | stats->xpak_timer_count++; | 4520 | stats->xpak_timer_count++; |
4528 | } else { | 4521 | } else { |
@@ -4541,191 +4534,227 @@ static void s2io_handle_errors(void * dev_id) | |||
4541 | 4534 | ||
4542 | /* In case of a serious error, the device will be Reset. */ | 4535 | /* In case of a serious error, the device will be Reset. */ |
4543 | if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source, | 4536 | if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source, |
4544 | &sw_stat->serious_err_cnt)) | 4537 | &sw_stat->serious_err_cnt)) |
4545 | goto reset; | 4538 | goto reset; |
4546 | 4539 | ||
4547 | /* Check for data parity error */ | 4540 | /* Check for data parity error */ |
4548 | if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg, | 4541 | if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg, |
4549 | &sw_stat->parity_err_cnt)) | 4542 | &sw_stat->parity_err_cnt)) |
4550 | goto reset; | 4543 | goto reset; |
4551 | 4544 | ||
4552 | /* Check for ring full counter */ | 4545 | /* Check for ring full counter */ |
4553 | if (sp->device_type == XFRAME_II_DEVICE) { | 4546 | if (sp->device_type == XFRAME_II_DEVICE) { |
4554 | val64 = readq(&bar0->ring_bump_counter1); | 4547 | val64 = readq(&bar0->ring_bump_counter1); |
4555 | for (i=0; i<4; i++) { | 4548 | for (i = 0; i < 4; i++) { |
4556 | temp64 = ( val64 & vBIT(0xFFFF,(i*16),16)); | 4549 | temp64 = (val64 & vBIT(0xFFFF, (i*16), 16)); |
4557 | temp64 >>= 64 - ((i+1)*16); | 4550 | temp64 >>= 64 - ((i+1)*16); |
4558 | sw_stat->ring_full_cnt[i] += temp64; | 4551 | sw_stat->ring_full_cnt[i] += temp64; |
4559 | } | 4552 | } |
4560 | 4553 | ||
4561 | val64 = readq(&bar0->ring_bump_counter2); | 4554 | val64 = readq(&bar0->ring_bump_counter2); |
4562 | for (i=0; i<4; i++) { | 4555 | for (i = 0; i < 4; i++) { |
4563 | temp64 = ( val64 & vBIT(0xFFFF,(i*16),16)); | 4556 | temp64 = (val64 & vBIT(0xFFFF, (i*16), 16)); |
4564 | temp64 >>= 64 - ((i+1)*16); | 4557 | temp64 >>= 64 - ((i+1)*16); |
4565 | sw_stat->ring_full_cnt[i+4] += temp64; | 4558 | sw_stat->ring_full_cnt[i+4] += temp64; |
4566 | } | 4559 | } |
4567 | } | 4560 | } |
4568 | 4561 | ||
4569 | val64 = readq(&bar0->txdma_int_status); | 4562 | val64 = readq(&bar0->txdma_int_status); |
4570 | /*check for pfc_err*/ | 4563 | /*check for pfc_err*/ |
4571 | if (val64 & TXDMA_PFC_INT) { | 4564 | if (val64 & TXDMA_PFC_INT) { |
4572 | if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM| | 4565 | if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM | |
4573 | PFC_MISC_0_ERR | PFC_MISC_1_ERR| | 4566 | PFC_MISC_0_ERR | PFC_MISC_1_ERR | |
4574 | PFC_PCIX_ERR, &bar0->pfc_err_reg, | 4567 | PFC_PCIX_ERR, |
4575 | &sw_stat->pfc_err_cnt)) | 4568 | &bar0->pfc_err_reg, |
4569 | &sw_stat->pfc_err_cnt)) | ||
4576 | goto reset; | 4570 | goto reset; |
4577 | do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg, | 4571 | do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, |
4578 | &sw_stat->pfc_err_cnt); | 4572 | &bar0->pfc_err_reg, |
4573 | &sw_stat->pfc_err_cnt); | ||
4579 | } | 4574 | } |
4580 | 4575 | ||
4581 | /*check for tda_err*/ | 4576 | /*check for tda_err*/ |
4582 | if (val64 & TXDMA_TDA_INT) { | 4577 | if (val64 & TXDMA_TDA_INT) { |
4583 | if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM | | 4578 | if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | |
4584 | TDA_SM1_ERR_ALARM, &bar0->tda_err_reg, | 4579 | TDA_SM0_ERR_ALARM | |
4585 | &sw_stat->tda_err_cnt)) | 4580 | TDA_SM1_ERR_ALARM, |
4581 | &bar0->tda_err_reg, | ||
4582 | &sw_stat->tda_err_cnt)) | ||
4586 | goto reset; | 4583 | goto reset; |
4587 | do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR, | 4584 | do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR, |
4588 | &bar0->tda_err_reg, &sw_stat->tda_err_cnt); | 4585 | &bar0->tda_err_reg, |
4586 | &sw_stat->tda_err_cnt); | ||
4589 | } | 4587 | } |
4590 | /*check for pcc_err*/ | 4588 | /*check for pcc_err*/ |
4591 | if (val64 & TXDMA_PCC_INT) { | 4589 | if (val64 & TXDMA_PCC_INT) { |
4592 | if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM | 4590 | if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM | |
4593 | | PCC_N_SERR | PCC_6_COF_OV_ERR | 4591 | PCC_N_SERR | PCC_6_COF_OV_ERR | |
4594 | | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR | 4592 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR | |
4595 | | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR | 4593 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR | |
4596 | | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg, | 4594 | PCC_TXB_ECC_DB_ERR, |
4597 | &sw_stat->pcc_err_cnt)) | 4595 | &bar0->pcc_err_reg, |
4596 | &sw_stat->pcc_err_cnt)) | ||
4598 | goto reset; | 4597 | goto reset; |
4599 | do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR, | 4598 | do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR, |
4600 | &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt); | 4599 | &bar0->pcc_err_reg, |
4600 | &sw_stat->pcc_err_cnt); | ||
4601 | } | 4601 | } |
4602 | 4602 | ||
4603 | /*check for tti_err*/ | 4603 | /*check for tti_err*/ |
4604 | if (val64 & TXDMA_TTI_INT) { | 4604 | if (val64 & TXDMA_TTI_INT) { |
4605 | if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg, | 4605 | if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, |
4606 | &sw_stat->tti_err_cnt)) | 4606 | &bar0->tti_err_reg, |
4607 | &sw_stat->tti_err_cnt)) | ||
4607 | goto reset; | 4608 | goto reset; |
4608 | do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR, | 4609 | do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR, |
4609 | &bar0->tti_err_reg, &sw_stat->tti_err_cnt); | 4610 | &bar0->tti_err_reg, |
4611 | &sw_stat->tti_err_cnt); | ||
4610 | } | 4612 | } |
4611 | 4613 | ||
4612 | /*check for lso_err*/ | 4614 | /*check for lso_err*/ |
4613 | if (val64 & TXDMA_LSO_INT) { | 4615 | if (val64 & TXDMA_LSO_INT) { |
4614 | if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT | 4616 | if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT | |
4615 | | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM, | 4617 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM, |
4616 | &bar0->lso_err_reg, &sw_stat->lso_err_cnt)) | 4618 | &bar0->lso_err_reg, |
4619 | &sw_stat->lso_err_cnt)) | ||
4617 | goto reset; | 4620 | goto reset; |
4618 | do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, | 4621 | do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, |
4619 | &bar0->lso_err_reg, &sw_stat->lso_err_cnt); | 4622 | &bar0->lso_err_reg, |
4623 | &sw_stat->lso_err_cnt); | ||
4620 | } | 4624 | } |
4621 | 4625 | ||
4622 | /*check for tpa_err*/ | 4626 | /*check for tpa_err*/ |
4623 | if (val64 & TXDMA_TPA_INT) { | 4627 | if (val64 & TXDMA_TPA_INT) { |
4624 | if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg, | 4628 | if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, |
4625 | &sw_stat->tpa_err_cnt)) | 4629 | &bar0->tpa_err_reg, |
4630 | &sw_stat->tpa_err_cnt)) | ||
4626 | goto reset; | 4631 | goto reset; |
4627 | do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg, | 4632 | do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, |
4628 | &sw_stat->tpa_err_cnt); | 4633 | &bar0->tpa_err_reg, |
4634 | &sw_stat->tpa_err_cnt); | ||
4629 | } | 4635 | } |
4630 | 4636 | ||
4631 | /*check for sm_err*/ | 4637 | /*check for sm_err*/ |
4632 | if (val64 & TXDMA_SM_INT) { | 4638 | if (val64 & TXDMA_SM_INT) { |
4633 | if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg, | 4639 | if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, |
4634 | &sw_stat->sm_err_cnt)) | 4640 | &bar0->sm_err_reg, |
4641 | &sw_stat->sm_err_cnt)) | ||
4635 | goto reset; | 4642 | goto reset; |
4636 | } | 4643 | } |
4637 | 4644 | ||
4638 | val64 = readq(&bar0->mac_int_status); | 4645 | val64 = readq(&bar0->mac_int_status); |
4639 | if (val64 & MAC_INT_STATUS_TMAC_INT) { | 4646 | if (val64 & MAC_INT_STATUS_TMAC_INT) { |
4640 | if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR, | 4647 | if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR, |
4641 | &bar0->mac_tmac_err_reg, | 4648 | &bar0->mac_tmac_err_reg, |
4642 | &sw_stat->mac_tmac_err_cnt)) | 4649 | &sw_stat->mac_tmac_err_cnt)) |
4643 | goto reset; | 4650 | goto reset; |
4644 | do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR | 4651 | do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR | |
4645 | | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR, | 4652 | TMAC_DESC_ECC_SG_ERR | |
4646 | &bar0->mac_tmac_err_reg, | 4653 | TMAC_DESC_ECC_DB_ERR, |
4647 | &sw_stat->mac_tmac_err_cnt); | 4654 | &bar0->mac_tmac_err_reg, |
4655 | &sw_stat->mac_tmac_err_cnt); | ||
4648 | } | 4656 | } |
4649 | 4657 | ||
4650 | val64 = readq(&bar0->xgxs_int_status); | 4658 | val64 = readq(&bar0->xgxs_int_status); |
4651 | if (val64 & XGXS_INT_STATUS_TXGXS) { | 4659 | if (val64 & XGXS_INT_STATUS_TXGXS) { |
4652 | if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR, | 4660 | if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR, |
4653 | &bar0->xgxs_txgxs_err_reg, | 4661 | &bar0->xgxs_txgxs_err_reg, |
4654 | &sw_stat->xgxs_txgxs_err_cnt)) | 4662 | &sw_stat->xgxs_txgxs_err_cnt)) |
4655 | goto reset; | 4663 | goto reset; |
4656 | do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, | 4664 | do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, |
4657 | &bar0->xgxs_txgxs_err_reg, | 4665 | &bar0->xgxs_txgxs_err_reg, |
4658 | &sw_stat->xgxs_txgxs_err_cnt); | 4666 | &sw_stat->xgxs_txgxs_err_cnt); |
4659 | } | 4667 | } |
4660 | 4668 | ||
4661 | val64 = readq(&bar0->rxdma_int_status); | 4669 | val64 = readq(&bar0->rxdma_int_status); |
4662 | if (val64 & RXDMA_INT_RC_INT_M) { | 4670 | if (val64 & RXDMA_INT_RC_INT_M) { |
4663 | if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR | 4671 | if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | |
4664 | | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM, | 4672 | RC_FTC_ECC_DB_ERR | |
4665 | &bar0->rc_err_reg, &sw_stat->rc_err_cnt)) | 4673 | RC_PRCn_SM_ERR_ALARM | |
4674 | RC_FTC_SM_ERR_ALARM, | ||
4675 | &bar0->rc_err_reg, | ||
4676 | &sw_stat->rc_err_cnt)) | ||
4666 | goto reset; | 4677 | goto reset; |
4667 | do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR | 4678 | do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | |
4668 | | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg, | 4679 | RC_FTC_ECC_SG_ERR | |
4669 | &sw_stat->rc_err_cnt); | 4680 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg, |
4670 | if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn | 4681 | &sw_stat->rc_err_cnt); |
4671 | | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg, | 4682 | if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | |
4672 | &sw_stat->prc_pcix_err_cnt)) | 4683 | PRC_PCI_AB_WR_Rn | |
4684 | PRC_PCI_AB_F_WR_Rn, | ||
4685 | &bar0->prc_pcix_err_reg, | ||
4686 | &sw_stat->prc_pcix_err_cnt)) | ||
4673 | goto reset; | 4687 | goto reset; |
4674 | do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn | 4688 | do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | |
4675 | | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg, | 4689 | PRC_PCI_DP_WR_Rn | |
4676 | &sw_stat->prc_pcix_err_cnt); | 4690 | PRC_PCI_DP_F_WR_Rn, |
4691 | &bar0->prc_pcix_err_reg, | ||
4692 | &sw_stat->prc_pcix_err_cnt); | ||
4677 | } | 4693 | } |
4678 | 4694 | ||
4679 | if (val64 & RXDMA_INT_RPA_INT_M) { | 4695 | if (val64 & RXDMA_INT_RPA_INT_M) { |
4680 | if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR, | 4696 | if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR, |
4681 | &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt)) | 4697 | &bar0->rpa_err_reg, |
4698 | &sw_stat->rpa_err_cnt)) | ||
4682 | goto reset; | 4699 | goto reset; |
4683 | do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, | 4700 | do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, |
4684 | &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt); | 4701 | &bar0->rpa_err_reg, |
4702 | &sw_stat->rpa_err_cnt); | ||
4685 | } | 4703 | } |
4686 | 4704 | ||
4687 | if (val64 & RXDMA_INT_RDA_INT_M) { | 4705 | if (val64 & RXDMA_INT_RDA_INT_M) { |
4688 | if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR | 4706 | if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR | |
4689 | | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM | 4707 | RDA_FRM_ECC_DB_N_AERR | |
4690 | | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR, | 4708 | RDA_SM1_ERR_ALARM | |
4691 | &bar0->rda_err_reg, &sw_stat->rda_err_cnt)) | 4709 | RDA_SM0_ERR_ALARM | |
4710 | RDA_RXD_ECC_DB_SERR, | ||
4711 | &bar0->rda_err_reg, | ||
4712 | &sw_stat->rda_err_cnt)) | ||
4692 | goto reset; | 4713 | goto reset; |
4693 | do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR | 4714 | do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | |
4694 | | RDA_MISC_ERR | RDA_PCIX_ERR, | 4715 | RDA_FRM_ECC_SG_ERR | |
4695 | &bar0->rda_err_reg, &sw_stat->rda_err_cnt); | 4716 | RDA_MISC_ERR | |
4717 | RDA_PCIX_ERR, | ||
4718 | &bar0->rda_err_reg, | ||
4719 | &sw_stat->rda_err_cnt); | ||
4696 | } | 4720 | } |
4697 | 4721 | ||
4698 | if (val64 & RXDMA_INT_RTI_INT_M) { | 4722 | if (val64 & RXDMA_INT_RTI_INT_M) { |
4699 | if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg, | 4723 | if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, |
4700 | &sw_stat->rti_err_cnt)) | 4724 | &bar0->rti_err_reg, |
4725 | &sw_stat->rti_err_cnt)) | ||
4701 | goto reset; | 4726 | goto reset; |
4702 | do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, | 4727 | do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, |
4703 | &bar0->rti_err_reg, &sw_stat->rti_err_cnt); | 4728 | &bar0->rti_err_reg, |
4729 | &sw_stat->rti_err_cnt); | ||
4704 | } | 4730 | } |
4705 | 4731 | ||
4706 | val64 = readq(&bar0->mac_int_status); | 4732 | val64 = readq(&bar0->mac_int_status); |
4707 | if (val64 & MAC_INT_STATUS_RMAC_INT) { | 4733 | if (val64 & MAC_INT_STATUS_RMAC_INT) { |
4708 | if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR, | 4734 | if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR, |
4709 | &bar0->mac_rmac_err_reg, | 4735 | &bar0->mac_rmac_err_reg, |
4710 | &sw_stat->mac_rmac_err_cnt)) | 4736 | &sw_stat->mac_rmac_err_cnt)) |
4711 | goto reset; | 4737 | goto reset; |
4712 | do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR| | 4738 | do_s2io_chk_alarm_bit(RMAC_UNUSED_INT | |
4713 | RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg, | 4739 | RMAC_SINGLE_ECC_ERR | |
4714 | &sw_stat->mac_rmac_err_cnt); | 4740 | RMAC_DOUBLE_ECC_ERR, |
4741 | &bar0->mac_rmac_err_reg, | ||
4742 | &sw_stat->mac_rmac_err_cnt); | ||
4715 | } | 4743 | } |
4716 | 4744 | ||
4717 | val64 = readq(&bar0->xgxs_int_status); | 4745 | val64 = readq(&bar0->xgxs_int_status); |
4718 | if (val64 & XGXS_INT_STATUS_RXGXS) { | 4746 | if (val64 & XGXS_INT_STATUS_RXGXS) { |
4719 | if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, | 4747 | if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, |
4720 | &bar0->xgxs_rxgxs_err_reg, | 4748 | &bar0->xgxs_rxgxs_err_reg, |
4721 | &sw_stat->xgxs_rxgxs_err_cnt)) | 4749 | &sw_stat->xgxs_rxgxs_err_cnt)) |
4722 | goto reset; | 4750 | goto reset; |
4723 | } | 4751 | } |
4724 | 4752 | ||
4725 | val64 = readq(&bar0->mc_int_status); | 4753 | val64 = readq(&bar0->mc_int_status); |
4726 | if(val64 & MC_INT_STATUS_MC_INT) { | 4754 | if (val64 & MC_INT_STATUS_MC_INT) { |
4727 | if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg, | 4755 | if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, |
4728 | &sw_stat->mc_err_cnt)) | 4756 | &bar0->mc_err_reg, |
4757 | &sw_stat->mc_err_cnt)) | ||
4729 | goto reset; | 4758 | goto reset; |
4730 | 4759 | ||
4731 | /* Handling Ecc errors */ | 4760 | /* Handling Ecc errors */ |
@@ -4738,10 +4767,10 @@ static void s2io_handle_errors(void * dev_id) | |||
4738 | * Reset XframeI only if critical error | 4767 | * Reset XframeI only if critical error |
4739 | */ | 4768 | */ |
4740 | if (val64 & | 4769 | if (val64 & |
4741 | (MC_ERR_REG_MIRI_ECC_DB_ERR_0 | | 4770 | (MC_ERR_REG_MIRI_ECC_DB_ERR_0 | |
4742 | MC_ERR_REG_MIRI_ECC_DB_ERR_1)) | 4771 | MC_ERR_REG_MIRI_ECC_DB_ERR_1)) |
4743 | goto reset; | 4772 | goto reset; |
4744 | } | 4773 | } |
4745 | } else | 4774 | } else |
4746 | sw_stat->single_ecc_errs++; | 4775 | sw_stat->single_ecc_errs++; |
4747 | } | 4776 | } |
@@ -4770,7 +4799,7 @@ reset: | |||
4770 | */ | 4799 | */ |
4771 | static irqreturn_t s2io_isr(int irq, void *dev_id) | 4800 | static irqreturn_t s2io_isr(int irq, void *dev_id) |
4772 | { | 4801 | { |
4773 | struct net_device *dev = (struct net_device *) dev_id; | 4802 | struct net_device *dev = (struct net_device *)dev_id; |
4774 | struct s2io_nic *sp = netdev_priv(dev); | 4803 | struct s2io_nic *sp = netdev_priv(dev); |
4775 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 4804 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
4776 | int i; | 4805 | int i; |
@@ -4797,14 +4826,11 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4797 | */ | 4826 | */ |
4798 | reason = readq(&bar0->general_int_status); | 4827 | reason = readq(&bar0->general_int_status); |
4799 | 4828 | ||
4800 | if (unlikely(reason == S2IO_MINUS_ONE) ) { | 4829 | if (unlikely(reason == S2IO_MINUS_ONE)) |
4801 | /* Nothing much can be done. Get out */ | 4830 | return IRQ_HANDLED; /* Nothing much can be done. Get out */ |
4802 | return IRQ_HANDLED; | ||
4803 | } | ||
4804 | 4831 | ||
4805 | if (reason & (GEN_INTR_RXTRAFFIC | | 4832 | if (reason & |
4806 | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) | 4833 | (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) { |
4807 | { | ||
4808 | writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); | 4834 | writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); |
4809 | 4835 | ||
4810 | if (config->napi) { | 4836 | if (config->napi) { |
@@ -4859,8 +4885,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4859 | 4885 | ||
4860 | return IRQ_HANDLED; | 4886 | return IRQ_HANDLED; |
4861 | 4887 | ||
4862 | } | 4888 | } else if (!reason) { |
4863 | else if (!reason) { | ||
4864 | /* The interrupt was not raised by us */ | 4889 | /* The interrupt was not raised by us */ |
4865 | return IRQ_NONE; | 4890 | return IRQ_NONE; |
4866 | } | 4891 | } |
@@ -4890,7 +4915,7 @@ static void s2io_updt_stats(struct s2io_nic *sp) | |||
4890 | cnt++; | 4915 | cnt++; |
4891 | if (cnt == 5) | 4916 | if (cnt == 5) |
4892 | break; /* Updt failed */ | 4917 | break; /* Updt failed */ |
4893 | } while(1); | 4918 | } while (1); |
4894 | } | 4919 | } |
4895 | } | 4920 | } |
4896 | 4921 | ||
@@ -4921,7 +4946,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) | |||
4921 | /* Using sp->stats as a staging area, because reset (due to mtu | 4946 | /* Using sp->stats as a staging area, because reset (due to mtu |
4922 | change, for example) will clear some hardware counters */ | 4947 | change, for example) will clear some hardware counters */ |
4923 | dev->stats.tx_packets += | 4948 | dev->stats.tx_packets += |
4924 | le32_to_cpu(mac_control->stats_info->tmac_frms) - | 4949 | le32_to_cpu(mac_control->stats_info->tmac_frms) - |
4925 | sp->stats.tx_packets; | 4950 | sp->stats.tx_packets; |
4926 | sp->stats.tx_packets = | 4951 | sp->stats.tx_packets = |
4927 | le32_to_cpu(mac_control->stats_info->tmac_frms); | 4952 | le32_to_cpu(mac_control->stats_info->tmac_frms); |
@@ -4936,12 +4961,12 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) | |||
4936 | sp->stats.rx_errors = | 4961 | sp->stats.rx_errors = |
4937 | le64_to_cpu(mac_control->stats_info->rmac_drop_frms); | 4962 | le64_to_cpu(mac_control->stats_info->rmac_drop_frms); |
4938 | dev->stats.multicast = | 4963 | dev->stats.multicast = |
4939 | le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) - | 4964 | le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) - |
4940 | sp->stats.multicast; | 4965 | sp->stats.multicast; |
4941 | sp->stats.multicast = | 4966 | sp->stats.multicast = |
4942 | le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms); | 4967 | le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms); |
4943 | dev->stats.rx_length_errors = | 4968 | dev->stats.rx_length_errors = |
4944 | le64_to_cpu(mac_control->stats_info->rmac_long_frms) - | 4969 | le64_to_cpu(mac_control->stats_info->rmac_long_frms) - |
4945 | sp->stats.rx_length_errors; | 4970 | sp->stats.rx_length_errors; |
4946 | sp->stats.rx_length_errors = | 4971 | sp->stats.rx_length_errors = |
4947 | le64_to_cpu(mac_control->stats_info->rmac_long_frms); | 4972 | le64_to_cpu(mac_control->stats_info->rmac_long_frms); |
@@ -4955,7 +4980,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) | |||
4955 | dev->stats.rx_bytes += ring->rx_bytes; | 4980 | dev->stats.rx_bytes += ring->rx_bytes; |
4956 | } | 4981 | } |
4957 | 4982 | ||
4958 | return (&dev->stats); | 4983 | return &dev->stats; |
4959 | } | 4984 | } |
4960 | 4985 | ||
4961 | /** | 4986 | /** |
@@ -4978,7 +5003,7 @@ static void s2io_set_multicast(struct net_device *dev) | |||
4978 | struct s2io_nic *sp = netdev_priv(dev); | 5003 | struct s2io_nic *sp = netdev_priv(dev); |
4979 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 5004 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
4980 | u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = | 5005 | u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = |
4981 | 0xfeffffffffffULL; | 5006 | 0xfeffffffffffULL; |
4982 | u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0; | 5007 | u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0; |
4983 | void __iomem *add; | 5008 | void __iomem *add; |
4984 | struct config_param *config = &sp->config; | 5009 | struct config_param *config = &sp->config; |
@@ -4990,13 +5015,13 @@ static void s2io_set_multicast(struct net_device *dev) | |||
4990 | writeq(RMAC_ADDR_DATA1_MEM_MASK(mask), | 5015 | writeq(RMAC_ADDR_DATA1_MEM_MASK(mask), |
4991 | &bar0->rmac_addr_data1_mem); | 5016 | &bar0->rmac_addr_data1_mem); |
4992 | val64 = RMAC_ADDR_CMD_MEM_WE | | 5017 | val64 = RMAC_ADDR_CMD_MEM_WE | |
4993 | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | | 5018 | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | |
4994 | RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1); | 5019 | RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1); |
4995 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 5020 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
4996 | /* Wait till command completes */ | 5021 | /* Wait till command completes */ |
4997 | wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, | 5022 | wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
4998 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, | 5023 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, |
4999 | S2IO_BIT_RESET); | 5024 | S2IO_BIT_RESET); |
5000 | 5025 | ||
5001 | sp->m_cast_flg = 1; | 5026 | sp->m_cast_flg = 1; |
5002 | sp->all_multi_pos = config->max_mc_addr - 1; | 5027 | sp->all_multi_pos = config->max_mc_addr - 1; |
@@ -5007,13 +5032,13 @@ static void s2io_set_multicast(struct net_device *dev) | |||
5007 | writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0), | 5032 | writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0), |
5008 | &bar0->rmac_addr_data1_mem); | 5033 | &bar0->rmac_addr_data1_mem); |
5009 | val64 = RMAC_ADDR_CMD_MEM_WE | | 5034 | val64 = RMAC_ADDR_CMD_MEM_WE | |
5010 | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | | 5035 | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | |
5011 | RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); | 5036 | RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); |
5012 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 5037 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
5013 | /* Wait till command completes */ | 5038 | /* Wait till command completes */ |
5014 | wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, | 5039 | wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
5015 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, | 5040 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, |
5016 | S2IO_BIT_RESET); | 5041 | S2IO_BIT_RESET); |
5017 | 5042 | ||
5018 | sp->m_cast_flg = 0; | 5043 | sp->m_cast_flg = 0; |
5019 | sp->all_multi_pos = 0; | 5044 | sp->all_multi_pos = 0; |
@@ -5026,7 +5051,7 @@ static void s2io_set_multicast(struct net_device *dev) | |||
5026 | val64 |= MAC_CFG_RMAC_PROM_ENABLE; | 5051 | val64 |= MAC_CFG_RMAC_PROM_ENABLE; |
5027 | 5052 | ||
5028 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); | 5053 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); |
5029 | writel((u32) val64, add); | 5054 | writel((u32)val64, add); |
5030 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); | 5055 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); |
5031 | writel((u32) (val64 >> 32), (add + 4)); | 5056 | writel((u32) (val64 >> 32), (add + 4)); |
5032 | 5057 | ||
@@ -5048,7 +5073,7 @@ static void s2io_set_multicast(struct net_device *dev) | |||
5048 | val64 &= ~MAC_CFG_RMAC_PROM_ENABLE; | 5073 | val64 &= ~MAC_CFG_RMAC_PROM_ENABLE; |
5049 | 5074 | ||
5050 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); | 5075 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); |
5051 | writel((u32) val64, add); | 5076 | writel((u32)val64, add); |
5052 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); | 5077 | writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); |
5053 | writel((u32) (val64 >> 32), (add + 4)); | 5078 | writel((u32) (val64 >> 32), (add + 4)); |
5054 | 5079 | ||
@@ -5084,19 +5109,18 @@ static void s2io_set_multicast(struct net_device *dev) | |||
5084 | writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), | 5109 | writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), |
5085 | &bar0->rmac_addr_data0_mem); | 5110 | &bar0->rmac_addr_data0_mem); |
5086 | writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), | 5111 | writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), |
5087 | &bar0->rmac_addr_data1_mem); | 5112 | &bar0->rmac_addr_data1_mem); |
5088 | val64 = RMAC_ADDR_CMD_MEM_WE | | 5113 | val64 = RMAC_ADDR_CMD_MEM_WE | |
5089 | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | | 5114 | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | |
5090 | RMAC_ADDR_CMD_MEM_OFFSET | 5115 | RMAC_ADDR_CMD_MEM_OFFSET |
5091 | (config->mc_start_offset + i); | 5116 | (config->mc_start_offset + i); |
5092 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 5117 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
5093 | 5118 | ||
5094 | /* Wait for command completes */ | 5119 | /* Wait for command completes */ |
5095 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, | 5120 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
5096 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, | 5121 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, |
5097 | S2IO_BIT_RESET)) { | 5122 | S2IO_BIT_RESET)) { |
5098 | DBG_PRINT(ERR_DBG, "%s: Adding ", | 5123 | DBG_PRINT(ERR_DBG, "%s: Adding ", dev->name); |
5099 | dev->name); | ||
5100 | DBG_PRINT(ERR_DBG, "Multicasts failed\n"); | 5124 | DBG_PRINT(ERR_DBG, "Multicasts failed\n"); |
5101 | return; | 5125 | return; |
5102 | } | 5126 | } |
@@ -5116,19 +5140,18 @@ static void s2io_set_multicast(struct net_device *dev) | |||
5116 | writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), | 5140 | writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), |
5117 | &bar0->rmac_addr_data0_mem); | 5141 | &bar0->rmac_addr_data0_mem); |
5118 | writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), | 5142 | writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), |
5119 | &bar0->rmac_addr_data1_mem); | 5143 | &bar0->rmac_addr_data1_mem); |
5120 | val64 = RMAC_ADDR_CMD_MEM_WE | | 5144 | val64 = RMAC_ADDR_CMD_MEM_WE | |
5121 | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | | 5145 | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | |
5122 | RMAC_ADDR_CMD_MEM_OFFSET | 5146 | RMAC_ADDR_CMD_MEM_OFFSET |
5123 | (i + config->mc_start_offset); | 5147 | (i + config->mc_start_offset); |
5124 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 5148 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
5125 | 5149 | ||
5126 | /* Wait for command completes */ | 5150 | /* Wait for command completes */ |
5127 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, | 5151 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
5128 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, | 5152 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, |
5129 | S2IO_BIT_RESET)) { | 5153 | S2IO_BIT_RESET)) { |
5130 | DBG_PRINT(ERR_DBG, "%s: Adding ", | 5154 | DBG_PRINT(ERR_DBG, "%s: Adding ", dev->name); |
5131 | dev->name); | ||
5132 | DBG_PRINT(ERR_DBG, "Multicasts failed\n"); | 5155 | DBG_PRINT(ERR_DBG, "Multicasts failed\n"); |
5133 | return; | 5156 | return; |
5134 | } | 5157 | } |
@@ -5163,11 +5186,11 @@ static void do_s2io_restore_unicast_mc(struct s2io_nic *sp) | |||
5163 | /* restore unicast mac address */ | 5186 | /* restore unicast mac address */ |
5164 | for (offset = 0; offset < config->max_mac_addr; offset++) | 5187 | for (offset = 0; offset < config->max_mac_addr; offset++) |
5165 | do_s2io_prog_unicast(sp->dev, | 5188 | do_s2io_prog_unicast(sp->dev, |
5166 | sp->def_mac_addr[offset].mac_addr); | 5189 | sp->def_mac_addr[offset].mac_addr); |
5167 | 5190 | ||
5168 | /* restore multicast mac address */ | 5191 | /* restore multicast mac address */ |
5169 | for (offset = config->mc_start_offset; | 5192 | for (offset = config->mc_start_offset; |
5170 | offset < config->max_mc_addr; offset++) | 5193 | offset < config->max_mc_addr; offset++) |
5171 | do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr); | 5194 | do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr); |
5172 | } | 5195 | } |
5173 | 5196 | ||
@@ -5197,13 +5220,13 @@ static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr) | |||
5197 | } | 5220 | } |
5198 | if (i == config->max_mc_addr) { | 5221 | if (i == config->max_mc_addr) { |
5199 | DBG_PRINT(ERR_DBG, | 5222 | DBG_PRINT(ERR_DBG, |
5200 | "CAM full no space left for multicast MAC\n"); | 5223 | "CAM full no space left for multicast MAC\n"); |
5201 | return FAILURE; | 5224 | return FAILURE; |
5202 | } | 5225 | } |
5203 | /* Update the internal structure with this new mac address */ | 5226 | /* Update the internal structure with this new mac address */ |
5204 | do_s2io_copy_mac_addr(sp, i, mac_addr); | 5227 | do_s2io_copy_mac_addr(sp, i, mac_addr); |
5205 | 5228 | ||
5206 | return (do_s2io_add_mac(sp, mac_addr, i)); | 5229 | return do_s2io_add_mac(sp, mac_addr, i); |
5207 | } | 5230 | } |
5208 | 5231 | ||
5209 | /* add MAC address to CAM */ | 5232 | /* add MAC address to CAM */ |
@@ -5213,17 +5236,16 @@ static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off) | |||
5213 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 5236 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
5214 | 5237 | ||
5215 | writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr), | 5238 | writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr), |
5216 | &bar0->rmac_addr_data0_mem); | 5239 | &bar0->rmac_addr_data0_mem); |
5217 | 5240 | ||
5218 | val64 = | 5241 | val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | |
5219 | RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | | ||
5220 | RMAC_ADDR_CMD_MEM_OFFSET(off); | 5242 | RMAC_ADDR_CMD_MEM_OFFSET(off); |
5221 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 5243 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
5222 | 5244 | ||
5223 | /* Wait till command completes */ | 5245 | /* Wait till command completes */ |
5224 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, | 5246 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
5225 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, | 5247 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, |
5226 | S2IO_BIT_RESET)) { | 5248 | S2IO_BIT_RESET)) { |
5227 | DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n"); | 5249 | DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n"); |
5228 | return FAILURE; | 5250 | return FAILURE; |
5229 | } | 5251 | } |
@@ -5237,7 +5259,7 @@ static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr) | |||
5237 | struct config_param *config = &sp->config; | 5259 | struct config_param *config = &sp->config; |
5238 | 5260 | ||
5239 | for (offset = 1; | 5261 | for (offset = 1; |
5240 | offset < config->max_mc_addr; offset++) { | 5262 | offset < config->max_mc_addr; offset++) { |
5241 | tmp64 = do_s2io_read_unicast_mc(sp, offset); | 5263 | tmp64 = do_s2io_read_unicast_mc(sp, offset); |
5242 | if (tmp64 == addr) { | 5264 | if (tmp64 == addr) { |
5243 | /* disable the entry by writing 0xffffffffffffULL */ | 5265 | /* disable the entry by writing 0xffffffffffffULL */ |
@@ -5249,7 +5271,7 @@ static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr) | |||
5249 | } | 5271 | } |
5250 | } | 5272 | } |
5251 | DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n", | 5273 | DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n", |
5252 | (unsigned long long)addr); | 5274 | (unsigned long long)addr); |
5253 | return FAILURE; | 5275 | return FAILURE; |
5254 | } | 5276 | } |
5255 | 5277 | ||
@@ -5260,20 +5282,20 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset) | |||
5260 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 5282 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
5261 | 5283 | ||
5262 | /* read mac addr */ | 5284 | /* read mac addr */ |
5263 | val64 = | 5285 | val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | |
5264 | RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | | ||
5265 | RMAC_ADDR_CMD_MEM_OFFSET(offset); | 5286 | RMAC_ADDR_CMD_MEM_OFFSET(offset); |
5266 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 5287 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
5267 | 5288 | ||
5268 | /* Wait till command completes */ | 5289 | /* Wait till command completes */ |
5269 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, | 5290 | if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
5270 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, | 5291 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, |
5271 | S2IO_BIT_RESET)) { | 5292 | S2IO_BIT_RESET)) { |
5272 | DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n"); | 5293 | DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n"); |
5273 | return FAILURE; | 5294 | return FAILURE; |
5274 | } | 5295 | } |
5275 | tmp64 = readq(&bar0->rmac_addr_data0_mem); | 5296 | tmp64 = readq(&bar0->rmac_addr_data0_mem); |
5276 | return (tmp64 >> 16); | 5297 | |
5298 | return tmp64 >> 16; | ||
5277 | } | 5299 | } |
5278 | 5300 | ||
5279 | /** | 5301 | /** |
@@ -5290,7 +5312,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p) | |||
5290 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 5312 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
5291 | 5313 | ||
5292 | /* store the MAC address in CAM */ | 5314 | /* store the MAC address in CAM */ |
5293 | return (do_s2io_prog_unicast(dev, dev->dev_addr)); | 5315 | return do_s2io_prog_unicast(dev, dev->dev_addr); |
5294 | } | 5316 | } |
5295 | /** | 5317 | /** |
5296 | * do_s2io_prog_unicast - Programs the Xframe mac address | 5318 | * do_s2io_prog_unicast - Programs the Xframe mac address |
@@ -5311,10 +5333,10 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) | |||
5311 | struct config_param *config = &sp->config; | 5333 | struct config_param *config = &sp->config; |
5312 | 5334 | ||
5313 | /* | 5335 | /* |
5314 | * Set the new MAC address as the new unicast filter and reflect this | 5336 | * Set the new MAC address as the new unicast filter and reflect this |
5315 | * change on the device address registered with the OS. It will be | 5337 | * change on the device address registered with the OS. It will be |
5316 | * at offset 0. | 5338 | * at offset 0. |
5317 | */ | 5339 | */ |
5318 | for (i = 0; i < ETH_ALEN; i++) { | 5340 | for (i = 0; i < ETH_ALEN; i++) { |
5319 | mac_addr <<= 8; | 5341 | mac_addr <<= 8; |
5320 | mac_addr |= addr[i]; | 5342 | mac_addr |= addr[i]; |
@@ -5334,8 +5356,8 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) | |||
5334 | 5356 | ||
5335 | if (tmp64 == mac_addr) { | 5357 | if (tmp64 == mac_addr) { |
5336 | DBG_PRINT(INFO_DBG, | 5358 | DBG_PRINT(INFO_DBG, |
5337 | "MAC addr:0x%llx already present in CAM\n", | 5359 | "MAC addr:0x%llx already present in CAM\n", |
5338 | (unsigned long long)mac_addr); | 5360 | (unsigned long long)mac_addr); |
5339 | return SUCCESS; | 5361 | return SUCCESS; |
5340 | } | 5362 | } |
5341 | } | 5363 | } |
@@ -5345,7 +5367,8 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) | |||
5345 | } | 5367 | } |
5346 | /* Update the internal structure with this new mac address */ | 5368 | /* Update the internal structure with this new mac address */ |
5347 | do_s2io_copy_mac_addr(sp, i, mac_addr); | 5369 | do_s2io_copy_mac_addr(sp, i, mac_addr); |
5348 | return (do_s2io_add_mac(sp, mac_addr, i)); | 5370 | |
5371 | return do_s2io_add_mac(sp, mac_addr, i); | ||
5349 | } | 5372 | } |
5350 | 5373 | ||
5351 | /** | 5374 | /** |
@@ -5358,14 +5381,15 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) | |||
5358 | * the NIC. | 5381 | * the NIC. |
5359 | * Return value: | 5382 | * Return value: |
5360 | * 0 on success. | 5383 | * 0 on success. |
5361 | */ | 5384 | */ |
5362 | 5385 | ||
5363 | static int s2io_ethtool_sset(struct net_device *dev, | 5386 | static int s2io_ethtool_sset(struct net_device *dev, |
5364 | struct ethtool_cmd *info) | 5387 | struct ethtool_cmd *info) |
5365 | { | 5388 | { |
5366 | struct s2io_nic *sp = netdev_priv(dev); | 5389 | struct s2io_nic *sp = netdev_priv(dev); |
5367 | if ((info->autoneg == AUTONEG_ENABLE) || | 5390 | if ((info->autoneg == AUTONEG_ENABLE) || |
5368 | (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL)) | 5391 | (info->speed != SPEED_10000) || |
5392 | (info->duplex != DUPLEX_FULL)) | ||
5369 | return -EINVAL; | 5393 | return -EINVAL; |
5370 | else { | 5394 | else { |
5371 | s2io_close(sp->dev); | 5395 | s2io_close(sp->dev); |
@@ -5446,14 +5470,14 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev, | |||
5446 | * buffer area. | 5470 | * buffer area. |
5447 | * Return value : | 5471 | * Return value : |
5448 | * void . | 5472 | * void . |
5449 | */ | 5473 | */ |
5450 | 5474 | ||
5451 | static void s2io_ethtool_gregs(struct net_device *dev, | 5475 | static void s2io_ethtool_gregs(struct net_device *dev, |
5452 | struct ethtool_regs *regs, void *space) | 5476 | struct ethtool_regs *regs, void *space) |
5453 | { | 5477 | { |
5454 | int i; | 5478 | int i; |
5455 | u64 reg; | 5479 | u64 reg; |
5456 | u8 *reg_space = (u8 *) space; | 5480 | u8 *reg_space = (u8 *)space; |
5457 | struct s2io_nic *sp = netdev_priv(dev); | 5481 | struct s2io_nic *sp = netdev_priv(dev); |
5458 | 5482 | ||
5459 | regs->len = XENA_REG_SPACE; | 5483 | regs->len = XENA_REG_SPACE; |
@@ -5473,17 +5497,17 @@ static void s2io_ethtool_gregs(struct net_device *dev, | |||
5473 | * adapter LED bit of the adapter control bit to set/reset every time on | 5497 | * adapter LED bit of the adapter control bit to set/reset every time on |
5474 | * invocation. The timer is set for 1/2 a second, hence tha NIC blinks | 5498 | * invocation. The timer is set for 1/2 a second, hence tha NIC blinks |
5475 | * once every second. | 5499 | * once every second. |
5476 | */ | 5500 | */ |
5477 | static void s2io_phy_id(unsigned long data) | 5501 | static void s2io_phy_id(unsigned long data) |
5478 | { | 5502 | { |
5479 | struct s2io_nic *sp = (struct s2io_nic *) data; | 5503 | struct s2io_nic *sp = (struct s2io_nic *)data; |
5480 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 5504 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
5481 | u64 val64 = 0; | 5505 | u64 val64 = 0; |
5482 | u16 subid; | 5506 | u16 subid; |
5483 | 5507 | ||
5484 | subid = sp->pdev->subsystem_device; | 5508 | subid = sp->pdev->subsystem_device; |
5485 | if ((sp->device_type == XFRAME_II_DEVICE) || | 5509 | if ((sp->device_type == XFRAME_II_DEVICE) || |
5486 | ((subid & 0xFF) >= 0x07)) { | 5510 | ((subid & 0xFF) >= 0x07)) { |
5487 | val64 = readq(&bar0->gpio_control); | 5511 | val64 = readq(&bar0->gpio_control); |
5488 | val64 ^= GPIO_CTRL_GPIO_0; | 5512 | val64 ^= GPIO_CTRL_GPIO_0; |
5489 | writeq(val64, &bar0->gpio_control); | 5513 | writeq(val64, &bar0->gpio_control); |
@@ -5520,8 +5544,7 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data) | |||
5520 | 5544 | ||
5521 | subid = sp->pdev->subsystem_device; | 5545 | subid = sp->pdev->subsystem_device; |
5522 | last_gpio_ctrl_val = readq(&bar0->gpio_control); | 5546 | last_gpio_ctrl_val = readq(&bar0->gpio_control); |
5523 | if ((sp->device_type == XFRAME_I_DEVICE) && | 5547 | if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) { |
5524 | ((subid & 0xFF) < 0x07)) { | ||
5525 | val64 = readq(&bar0->adapter_control); | 5548 | val64 = readq(&bar0->adapter_control); |
5526 | if (!(val64 & ADAPTER_CNTL_EN)) { | 5549 | if (!(val64 & ADAPTER_CNTL_EN)) { |
5527 | printk(KERN_ERR | 5550 | printk(KERN_ERR |
@@ -5532,7 +5555,7 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data) | |||
5532 | if (sp->id_timer.function == NULL) { | 5555 | if (sp->id_timer.function == NULL) { |
5533 | init_timer(&sp->id_timer); | 5556 | init_timer(&sp->id_timer); |
5534 | sp->id_timer.function = s2io_phy_id; | 5557 | sp->id_timer.function = s2io_phy_id; |
5535 | sp->id_timer.data = (unsigned long) sp; | 5558 | sp->id_timer.data = (unsigned long)sp; |
5536 | } | 5559 | } |
5537 | mod_timer(&sp->id_timer, jiffies); | 5560 | mod_timer(&sp->id_timer, jiffies); |
5538 | if (data) | 5561 | if (data) |
@@ -5550,10 +5573,10 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data) | |||
5550 | } | 5573 | } |
5551 | 5574 | ||
5552 | static void s2io_ethtool_gringparam(struct net_device *dev, | 5575 | static void s2io_ethtool_gringparam(struct net_device *dev, |
5553 | struct ethtool_ringparam *ering) | 5576 | struct ethtool_ringparam *ering) |
5554 | { | 5577 | { |
5555 | struct s2io_nic *sp = netdev_priv(dev); | 5578 | struct s2io_nic *sp = netdev_priv(dev); |
5556 | int i,tx_desc_count=0,rx_desc_count=0; | 5579 | int i, tx_desc_count = 0, rx_desc_count = 0; |
5557 | 5580 | ||
5558 | if (sp->rxd_mode == RXD_MODE_1) | 5581 | if (sp->rxd_mode == RXD_MODE_1) |
5559 | ering->rx_max_pending = MAX_RX_DESC_1; | 5582 | ering->rx_max_pending = MAX_RX_DESC_1; |
@@ -5564,7 +5587,7 @@ static void s2io_ethtool_gringparam(struct net_device *dev, | |||
5564 | for (i = 0 ; i < sp->config.tx_fifo_num ; i++) | 5587 | for (i = 0 ; i < sp->config.tx_fifo_num ; i++) |
5565 | tx_desc_count += sp->config.tx_cfg[i].fifo_len; | 5588 | tx_desc_count += sp->config.tx_cfg[i].fifo_len; |
5566 | 5589 | ||
5567 | DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds); | 5590 | DBG_PRINT(INFO_DBG, "\nmax txds : %d\n", sp->config.max_txds); |
5568 | ering->tx_pending = tx_desc_count; | 5591 | ering->tx_pending = tx_desc_count; |
5569 | rx_desc_count = 0; | 5592 | rx_desc_count = 0; |
5570 | for (i = 0 ; i < sp->config.rx_ring_num ; i++) | 5593 | for (i = 0 ; i < sp->config.rx_ring_num ; i++) |
@@ -5574,7 +5597,7 @@ static void s2io_ethtool_gringparam(struct net_device *dev, | |||
5574 | 5597 | ||
5575 | ering->rx_mini_max_pending = 0; | 5598 | ering->rx_mini_max_pending = 0; |
5576 | ering->rx_mini_pending = 0; | 5599 | ering->rx_mini_pending = 0; |
5577 | if(sp->rxd_mode == RXD_MODE_1) | 5600 | if (sp->rxd_mode == RXD_MODE_1) |
5578 | ering->rx_jumbo_max_pending = MAX_RX_DESC_1; | 5601 | ering->rx_jumbo_max_pending = MAX_RX_DESC_1; |
5579 | else if (sp->rxd_mode == RXD_MODE_3B) | 5602 | else if (sp->rxd_mode == RXD_MODE_3B) |
5580 | ering->rx_jumbo_max_pending = MAX_RX_DESC_2; | 5603 | ering->rx_jumbo_max_pending = MAX_RX_DESC_2; |
@@ -5619,7 +5642,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev, | |||
5619 | */ | 5642 | */ |
5620 | 5643 | ||
5621 | static int s2io_ethtool_setpause_data(struct net_device *dev, | 5644 | static int s2io_ethtool_setpause_data(struct net_device *dev, |
5622 | struct ethtool_pauseparam *ep) | 5645 | struct ethtool_pauseparam *ep) |
5623 | { | 5646 | { |
5624 | u64 val64; | 5647 | u64 val64; |
5625 | struct s2io_nic *sp = netdev_priv(dev); | 5648 | struct s2io_nic *sp = netdev_priv(dev); |
@@ -5655,7 +5678,7 @@ static int s2io_ethtool_setpause_data(struct net_device *dev, | |||
5655 | */ | 5678 | */ |
5656 | 5679 | ||
5657 | #define S2IO_DEV_ID 5 | 5680 | #define S2IO_DEV_ID 5 |
5658 | static int read_eeprom(struct s2io_nic * sp, int off, u64 * data) | 5681 | static int read_eeprom(struct s2io_nic *sp, int off, u64 *data) |
5659 | { | 5682 | { |
5660 | int ret = -1; | 5683 | int ret = -1; |
5661 | u32 exit_cnt = 0; | 5684 | u32 exit_cnt = 0; |
@@ -5663,9 +5686,11 @@ static int read_eeprom(struct s2io_nic * sp, int off, u64 * data) | |||
5663 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 5686 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
5664 | 5687 | ||
5665 | if (sp->device_type == XFRAME_I_DEVICE) { | 5688 | if (sp->device_type == XFRAME_I_DEVICE) { |
5666 | val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | | 5689 | val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | |
5667 | I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ | | 5690 | I2C_CONTROL_ADDR(off) | |
5668 | I2C_CONTROL_CNTL_START; | 5691 | I2C_CONTROL_BYTE_CNT(0x3) | |
5692 | I2C_CONTROL_READ | | ||
5693 | I2C_CONTROL_CNTL_START; | ||
5669 | SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); | 5694 | SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); |
5670 | 5695 | ||
5671 | while (exit_cnt < 5) { | 5696 | while (exit_cnt < 5) { |
@@ -5720,16 +5745,18 @@ static int read_eeprom(struct s2io_nic * sp, int off, u64 * data) | |||
5720 | * 0 on success, -1 on failure. | 5745 | * 0 on success, -1 on failure. |
5721 | */ | 5746 | */ |
5722 | 5747 | ||
5723 | static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt) | 5748 | static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt) |
5724 | { | 5749 | { |
5725 | int exit_cnt = 0, ret = -1; | 5750 | int exit_cnt = 0, ret = -1; |
5726 | u64 val64; | 5751 | u64 val64; |
5727 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 5752 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
5728 | 5753 | ||
5729 | if (sp->device_type == XFRAME_I_DEVICE) { | 5754 | if (sp->device_type == XFRAME_I_DEVICE) { |
5730 | val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | | 5755 | val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | |
5731 | I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) | | 5756 | I2C_CONTROL_ADDR(off) | |
5732 | I2C_CONTROL_CNTL_START; | 5757 | I2C_CONTROL_BYTE_CNT(cnt) | |
5758 | I2C_CONTROL_SET_DATA((u32)data) | | ||
5759 | I2C_CONTROL_CNTL_START; | ||
5733 | SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); | 5760 | SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); |
5734 | 5761 | ||
5735 | while (exit_cnt < 5) { | 5762 | while (exit_cnt < 5) { |
@@ -5746,7 +5773,7 @@ static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt) | |||
5746 | 5773 | ||
5747 | if (sp->device_type == XFRAME_II_DEVICE) { | 5774 | if (sp->device_type == XFRAME_II_DEVICE) { |
5748 | int write_cnt = (cnt == 8) ? 0 : cnt; | 5775 | int write_cnt = (cnt == 8) ? 0 : cnt; |
5749 | writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data); | 5776 | writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data); |
5750 | 5777 | ||
5751 | val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 | | 5778 | val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 | |
5752 | SPI_CONTROL_BYTECNT(write_cnt) | | 5779 | SPI_CONTROL_BYTECNT(write_cnt) | |
@@ -5773,14 +5800,13 @@ static void s2io_vpd_read(struct s2io_nic *nic) | |||
5773 | { | 5800 | { |
5774 | u8 *vpd_data; | 5801 | u8 *vpd_data; |
5775 | u8 data; | 5802 | u8 data; |
5776 | int i=0, cnt, fail = 0; | 5803 | int i = 0, cnt, fail = 0; |
5777 | int vpd_addr = 0x80; | 5804 | int vpd_addr = 0x80; |
5778 | 5805 | ||
5779 | if (nic->device_type == XFRAME_II_DEVICE) { | 5806 | if (nic->device_type == XFRAME_II_DEVICE) { |
5780 | strcpy(nic->product_name, "Xframe II 10GbE network adapter"); | 5807 | strcpy(nic->product_name, "Xframe II 10GbE network adapter"); |
5781 | vpd_addr = 0x80; | 5808 | vpd_addr = 0x80; |
5782 | } | 5809 | } else { |
5783 | else { | ||
5784 | strcpy(nic->product_name, "Xframe I 10GbE network adapter"); | 5810 | strcpy(nic->product_name, "Xframe I 10GbE network adapter"); |
5785 | vpd_addr = 0x50; | 5811 | vpd_addr = 0x50; |
5786 | } | 5812 | } |
@@ -5793,11 +5819,11 @@ static void s2io_vpd_read(struct s2io_nic *nic) | |||
5793 | } | 5819 | } |
5794 | nic->mac_control.stats_info->sw_stat.mem_allocated += 256; | 5820 | nic->mac_control.stats_info->sw_stat.mem_allocated += 256; |
5795 | 5821 | ||
5796 | for (i = 0; i < 256; i +=4 ) { | 5822 | for (i = 0; i < 256; i += 4) { |
5797 | pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); | 5823 | pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); |
5798 | pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); | 5824 | pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); |
5799 | pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0); | 5825 | pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0); |
5800 | for (cnt = 0; cnt <5; cnt++) { | 5826 | for (cnt = 0; cnt < 5; cnt++) { |
5801 | msleep(2); | 5827 | msleep(2); |
5802 | pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data); | 5828 | pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data); |
5803 | if (data == 0x80) | 5829 | if (data == 0x80) |
@@ -5812,15 +5838,15 @@ static void s2io_vpd_read(struct s2io_nic *nic) | |||
5812 | (u32 *)&vpd_data[i]); | 5838 | (u32 *)&vpd_data[i]); |
5813 | } | 5839 | } |
5814 | 5840 | ||
5815 | if(!fail) { | 5841 | if (!fail) { |
5816 | /* read serial number of adapter */ | 5842 | /* read serial number of adapter */ |
5817 | for (cnt = 0; cnt < 256; cnt++) { | 5843 | for (cnt = 0; cnt < 256; cnt++) { |
5818 | if ((vpd_data[cnt] == 'S') && | 5844 | if ((vpd_data[cnt] == 'S') && |
5819 | (vpd_data[cnt+1] == 'N') && | 5845 | (vpd_data[cnt+1] == 'N') && |
5820 | (vpd_data[cnt+2] < VPD_STRING_LEN)) { | 5846 | (vpd_data[cnt+2] < VPD_STRING_LEN)) { |
5821 | memset(nic->serial_num, 0, VPD_STRING_LEN); | 5847 | memset(nic->serial_num, 0, VPD_STRING_LEN); |
5822 | memcpy(nic->serial_num, &vpd_data[cnt + 3], | 5848 | memcpy(nic->serial_num, &vpd_data[cnt + 3], |
5823 | vpd_data[cnt+2]); | 5849 | vpd_data[cnt+2]); |
5824 | break; | 5850 | break; |
5825 | } | 5851 | } |
5826 | } | 5852 | } |
@@ -5848,7 +5874,7 @@ static void s2io_vpd_read(struct s2io_nic *nic) | |||
5848 | */ | 5874 | */ |
5849 | 5875 | ||
5850 | static int s2io_ethtool_geeprom(struct net_device *dev, | 5876 | static int s2io_ethtool_geeprom(struct net_device *dev, |
5851 | struct ethtool_eeprom *eeprom, u8 * data_buf) | 5877 | struct ethtool_eeprom *eeprom, u8 * data_buf) |
5852 | { | 5878 | { |
5853 | u32 i, valid; | 5879 | u32 i, valid; |
5854 | u64 data; | 5880 | u64 data; |
@@ -5886,7 +5912,7 @@ static int s2io_ethtool_geeprom(struct net_device *dev, | |||
5886 | 5912 | ||
5887 | static int s2io_ethtool_seeprom(struct net_device *dev, | 5913 | static int s2io_ethtool_seeprom(struct net_device *dev, |
5888 | struct ethtool_eeprom *eeprom, | 5914 | struct ethtool_eeprom *eeprom, |
5889 | u8 * data_buf) | 5915 | u8 *data_buf) |
5890 | { | 5916 | { |
5891 | int len = eeprom->len, cnt = 0; | 5917 | int len = eeprom->len, cnt = 0; |
5892 | u64 valid = 0, data; | 5918 | u64 valid = 0, data; |
@@ -5895,16 +5921,15 @@ static int s2io_ethtool_seeprom(struct net_device *dev, | |||
5895 | if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { | 5921 | if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { |
5896 | DBG_PRINT(ERR_DBG, | 5922 | DBG_PRINT(ERR_DBG, |
5897 | "ETHTOOL_WRITE_EEPROM Err: Magic value "); | 5923 | "ETHTOOL_WRITE_EEPROM Err: Magic value "); |
5898 | DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n", | 5924 | DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n", eeprom->magic); |
5899 | eeprom->magic); | ||
5900 | return -EFAULT; | 5925 | return -EFAULT; |
5901 | } | 5926 | } |
5902 | 5927 | ||
5903 | while (len) { | 5928 | while (len) { |
5904 | data = (u32) data_buf[cnt] & 0x000000FF; | 5929 | data = (u32)data_buf[cnt] & 0x000000FF; |
5905 | if (data) { | 5930 | if (data) |
5906 | valid = (u32) (data << 24); | 5931 | valid = (u32)(data << 24); |
5907 | } else | 5932 | else |
5908 | valid = data; | 5933 | valid = data; |
5909 | 5934 | ||
5910 | if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) { | 5935 | if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) { |
@@ -5934,7 +5959,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev, | |||
5934 | * 0 on success. | 5959 | * 0 on success. |
5935 | */ | 5960 | */ |
5936 | 5961 | ||
5937 | static int s2io_register_test(struct s2io_nic * sp, uint64_t * data) | 5962 | static int s2io_register_test(struct s2io_nic *sp, uint64_t *data) |
5938 | { | 5963 | { |
5939 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 5964 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
5940 | u64 val64 = 0, exp_val; | 5965 | u64 val64 = 0, exp_val; |
@@ -6001,7 +6026,7 @@ static int s2io_register_test(struct s2io_nic * sp, uint64_t * data) | |||
6001 | * 0 on success. | 6026 | * 0 on success. |
6002 | */ | 6027 | */ |
6003 | 6028 | ||
6004 | static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data) | 6029 | static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data) |
6005 | { | 6030 | { |
6006 | int fail = 0; | 6031 | int fail = 0; |
6007 | u64 ret_data, org_4F0, org_7F0; | 6032 | u64 ret_data, org_4F0, org_7F0; |
@@ -6030,9 +6055,9 @@ static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data) | |||
6030 | 6055 | ||
6031 | if (ret_data != 0x012345) { | 6056 | if (ret_data != 0x012345) { |
6032 | DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. " | 6057 | DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. " |
6033 | "Data written %llx Data read %llx\n", | 6058 | "Data written %llx Data read %llx\n", |
6034 | dev->name, (unsigned long long)0x12345, | 6059 | dev->name, (unsigned long long)0x12345, |
6035 | (unsigned long long)ret_data); | 6060 | (unsigned long long)ret_data); |
6036 | fail = 1; | 6061 | fail = 1; |
6037 | } | 6062 | } |
6038 | 6063 | ||
@@ -6052,9 +6077,9 @@ static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data) | |||
6052 | 6077 | ||
6053 | if (ret_data != 0x012345) { | 6078 | if (ret_data != 0x012345) { |
6054 | DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. " | 6079 | DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. " |
6055 | "Data written %llx Data read %llx\n", | 6080 | "Data written %llx Data read %llx\n", |
6056 | dev->name, (unsigned long long)0x12345, | 6081 | dev->name, (unsigned long long)0x12345, |
6057 | (unsigned long long)ret_data); | 6082 | (unsigned long long)ret_data); |
6058 | fail = 1; | 6083 | fail = 1; |
6059 | } | 6084 | } |
6060 | 6085 | ||
@@ -6103,7 +6128,7 @@ static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data) | |||
6103 | * 0 on success and -1 on failure. | 6128 | * 0 on success and -1 on failure. |
6104 | */ | 6129 | */ |
6105 | 6130 | ||
6106 | static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data) | 6131 | static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data) |
6107 | { | 6132 | { |
6108 | u8 bist = 0; | 6133 | u8 bist = 0; |
6109 | int cnt = 0, ret = -1; | 6134 | int cnt = 0, ret = -1; |
@@ -6139,13 +6164,13 @@ static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data) | |||
6139 | * 0 on success. | 6164 | * 0 on success. |
6140 | */ | 6165 | */ |
6141 | 6166 | ||
6142 | static int s2io_link_test(struct s2io_nic * sp, uint64_t * data) | 6167 | static int s2io_link_test(struct s2io_nic *sp, uint64_t *data) |
6143 | { | 6168 | { |
6144 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 6169 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
6145 | u64 val64; | 6170 | u64 val64; |
6146 | 6171 | ||
6147 | val64 = readq(&bar0->adapter_status); | 6172 | val64 = readq(&bar0->adapter_status); |
6148 | if(!(LINK_IS_UP(val64))) | 6173 | if (!(LINK_IS_UP(val64))) |
6149 | *data = 1; | 6174 | *data = 1; |
6150 | else | 6175 | else |
6151 | *data = 0; | 6176 | *data = 0; |
@@ -6166,7 +6191,7 @@ static int s2io_link_test(struct s2io_nic * sp, uint64_t * data) | |||
6166 | * 0 on success. | 6191 | * 0 on success. |
6167 | */ | 6192 | */ |
6168 | 6193 | ||
6169 | static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data) | 6194 | static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data) |
6170 | { | 6195 | { |
6171 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 6196 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
6172 | u64 val64; | 6197 | u64 val64; |
@@ -6189,28 +6214,26 @@ static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data) | |||
6189 | 6214 | ||
6190 | while (iteration < 2) { | 6215 | while (iteration < 2) { |
6191 | val64 = 0x55555555aaaa0000ULL; | 6216 | val64 = 0x55555555aaaa0000ULL; |
6192 | if (iteration == 1) { | 6217 | if (iteration == 1) |
6193 | val64 ^= 0xFFFFFFFFFFFF0000ULL; | 6218 | val64 ^= 0xFFFFFFFFFFFF0000ULL; |
6194 | } | ||
6195 | writeq(val64, &bar0->mc_rldram_test_d0); | 6219 | writeq(val64, &bar0->mc_rldram_test_d0); |
6196 | 6220 | ||
6197 | val64 = 0xaaaa5a5555550000ULL; | 6221 | val64 = 0xaaaa5a5555550000ULL; |
6198 | if (iteration == 1) { | 6222 | if (iteration == 1) |
6199 | val64 ^= 0xFFFFFFFFFFFF0000ULL; | 6223 | val64 ^= 0xFFFFFFFFFFFF0000ULL; |
6200 | } | ||
6201 | writeq(val64, &bar0->mc_rldram_test_d1); | 6224 | writeq(val64, &bar0->mc_rldram_test_d1); |
6202 | 6225 | ||
6203 | val64 = 0x55aaaaaaaa5a0000ULL; | 6226 | val64 = 0x55aaaaaaaa5a0000ULL; |
6204 | if (iteration == 1) { | 6227 | if (iteration == 1) |
6205 | val64 ^= 0xFFFFFFFFFFFF0000ULL; | 6228 | val64 ^= 0xFFFFFFFFFFFF0000ULL; |
6206 | } | ||
6207 | writeq(val64, &bar0->mc_rldram_test_d2); | 6229 | writeq(val64, &bar0->mc_rldram_test_d2); |
6208 | 6230 | ||
6209 | val64 = (u64) (0x0000003ffffe0100ULL); | 6231 | val64 = (u64) (0x0000003ffffe0100ULL); |
6210 | writeq(val64, &bar0->mc_rldram_test_add); | 6232 | writeq(val64, &bar0->mc_rldram_test_add); |
6211 | 6233 | ||
6212 | val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE | | 6234 | val64 = MC_RLDRAM_TEST_MODE | |
6213 | MC_RLDRAM_TEST_GO; | 6235 | MC_RLDRAM_TEST_WRITE | |
6236 | MC_RLDRAM_TEST_GO; | ||
6214 | SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); | 6237 | SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); |
6215 | 6238 | ||
6216 | for (cnt = 0; cnt < 5; cnt++) { | 6239 | for (cnt = 0; cnt < 5; cnt++) { |
@@ -6268,7 +6291,7 @@ static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data) | |||
6268 | 6291 | ||
6269 | static void s2io_ethtool_test(struct net_device *dev, | 6292 | static void s2io_ethtool_test(struct net_device *dev, |
6270 | struct ethtool_test *ethtest, | 6293 | struct ethtool_test *ethtest, |
6271 | uint64_t * data) | 6294 | uint64_t *data) |
6272 | { | 6295 | { |
6273 | struct s2io_nic *sp = netdev_priv(dev); | 6296 | struct s2io_nic *sp = netdev_priv(dev); |
6274 | int orig_state = netif_running(sp->dev); | 6297 | int orig_state = netif_running(sp->dev); |
@@ -6301,8 +6324,7 @@ static void s2io_ethtool_test(struct net_device *dev, | |||
6301 | } else { | 6324 | } else { |
6302 | /* Online Tests. */ | 6325 | /* Online Tests. */ |
6303 | if (!orig_state) { | 6326 | if (!orig_state) { |
6304 | DBG_PRINT(ERR_DBG, | 6327 | DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n", |
6305 | "%s: is not up, cannot run test\n", | ||
6306 | dev->name); | 6328 | dev->name); |
6307 | data[0] = -1; | 6329 | data[0] = -1; |
6308 | data[1] = -1; | 6330 | data[1] = -1; |
@@ -6323,7 +6345,7 @@ static void s2io_ethtool_test(struct net_device *dev, | |||
6323 | 6345 | ||
6324 | static void s2io_get_ethtool_stats(struct net_device *dev, | 6346 | static void s2io_get_ethtool_stats(struct net_device *dev, |
6325 | struct ethtool_stats *estats, | 6347 | struct ethtool_stats *estats, |
6326 | u64 * tmp_stats) | 6348 | u64 *tmp_stats) |
6327 | { | 6349 | { |
6328 | int i = 0, k; | 6350 | int i = 0, k; |
6329 | struct s2io_nic *sp = netdev_priv(dev); | 6351 | struct s2io_nic *sp = netdev_priv(dev); |
@@ -6344,19 +6366,19 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
6344 | (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 | | 6366 | (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 | |
6345 | le32_to_cpu(stat_info->tmac_bcst_frms); | 6367 | le32_to_cpu(stat_info->tmac_bcst_frms); |
6346 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms); | 6368 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms); |
6347 | tmp_stats[i++] = | ||
6348 | (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 | | ||
6349 | le32_to_cpu(stat_info->tmac_ttl_octets); | ||
6350 | tmp_stats[i++] = | 6369 | tmp_stats[i++] = |
6351 | (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 | | 6370 | (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 | |
6352 | le32_to_cpu(stat_info->tmac_ucst_frms); | 6371 | le32_to_cpu(stat_info->tmac_ttl_octets); |
6353 | tmp_stats[i++] = | 6372 | tmp_stats[i++] = |
6354 | (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 | | 6373 | (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 | |
6355 | le32_to_cpu(stat_info->tmac_nucst_frms); | 6374 | le32_to_cpu(stat_info->tmac_ucst_frms); |
6375 | tmp_stats[i++] = | ||
6376 | (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 | | ||
6377 | le32_to_cpu(stat_info->tmac_nucst_frms); | ||
6356 | tmp_stats[i++] = | 6378 | tmp_stats[i++] = |
6357 | (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 | | 6379 | (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 | |
6358 | le32_to_cpu(stat_info->tmac_any_err_frms); | 6380 | le32_to_cpu(stat_info->tmac_any_err_frms); |
6359 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets); | 6381 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets); |
6360 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets); | 6382 | tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets); |
6361 | tmp_stats[i++] = | 6383 | tmp_stats[i++] = |
6362 | (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 | | 6384 | (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 | |
@@ -6392,23 +6414,23 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
6392 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms); | 6414 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms); |
6393 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms); | 6415 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms); |
6394 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms); | 6416 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms); |
6395 | tmp_stats[i++] = | 6417 | tmp_stats[i++] = |
6396 | (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 | | 6418 | (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 | |
6397 | le32_to_cpu(stat_info->rmac_ttl_octets); | 6419 | le32_to_cpu(stat_info->rmac_ttl_octets); |
6398 | tmp_stats[i++] = | ||
6399 | (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow) | ||
6400 | << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms); | ||
6401 | tmp_stats[i++] = | 6420 | tmp_stats[i++] = |
6402 | (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow) | 6421 | (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow) << 32 |
6403 | << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms); | 6422 | | le32_to_cpu(stat_info->rmac_accepted_ucst_frms); |
6423 | tmp_stats[i++] = | ||
6424 | (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow) | ||
6425 | << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms); | ||
6404 | tmp_stats[i++] = | 6426 | tmp_stats[i++] = |
6405 | (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 | | 6427 | (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 | |
6406 | le32_to_cpu(stat_info->rmac_discarded_frms); | 6428 | le32_to_cpu(stat_info->rmac_discarded_frms); |
6407 | tmp_stats[i++] = | 6429 | tmp_stats[i++] = |
6408 | (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow) | 6430 | (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow) |
6409 | << 32 | le32_to_cpu(stat_info->rmac_drop_events); | 6431 | << 32 | le32_to_cpu(stat_info->rmac_drop_events); |
6410 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets); | 6432 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets); |
6411 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms); | 6433 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms); |
6412 | tmp_stats[i++] = | 6434 | tmp_stats[i++] = |
6413 | (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 | | 6435 | (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 | |
6414 | le32_to_cpu(stat_info->rmac_usized_frms); | 6436 | le32_to_cpu(stat_info->rmac_usized_frms); |
@@ -6422,11 +6444,11 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
6422 | (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 | | 6444 | (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 | |
6423 | le32_to_cpu(stat_info->rmac_jabber_frms); | 6445 | le32_to_cpu(stat_info->rmac_jabber_frms); |
6424 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms); | 6446 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms); |
6425 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms); | 6447 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms); |
6426 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms); | 6448 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms); |
6427 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms); | 6449 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms); |
6428 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms); | 6450 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms); |
6429 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms); | 6451 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms); |
6430 | tmp_stats[i++] = | 6452 | tmp_stats[i++] = |
6431 | (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 | | 6453 | (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 | |
6432 | le32_to_cpu(stat_info->rmac_ip); | 6454 | le32_to_cpu(stat_info->rmac_ip); |
@@ -6446,27 +6468,27 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
6446 | (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 | | 6468 | (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 | |
6447 | le32_to_cpu(stat_info->rmac_err_drp_udp); | 6469 | le32_to_cpu(stat_info->rmac_err_drp_udp); |
6448 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym); | 6470 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym); |
6449 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0); | 6471 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0); |
6450 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1); | 6472 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1); |
6451 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2); | 6473 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2); |
6452 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3); | 6474 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3); |
6453 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4); | 6475 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4); |
6454 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5); | 6476 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5); |
6455 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6); | 6477 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6); |
6456 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7); | 6478 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7); |
6457 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0); | 6479 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0); |
6458 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1); | 6480 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1); |
6459 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2); | 6481 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2); |
6460 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3); | 6482 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3); |
6461 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4); | 6483 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4); |
6462 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5); | 6484 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5); |
6463 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6); | 6485 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6); |
6464 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7); | 6486 | tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7); |
6465 | tmp_stats[i++] = | 6487 | tmp_stats[i++] = |
6466 | (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 | | 6488 | (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 | |
6467 | le32_to_cpu(stat_info->rmac_pause_cnt); | 6489 | le32_to_cpu(stat_info->rmac_pause_cnt); |
6468 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt); | 6490 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt); |
6469 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt); | 6491 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt); |
6470 | tmp_stats[i++] = | 6492 | tmp_stats[i++] = |
6471 | (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 | | 6493 | (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 | |
6472 | le32_to_cpu(stat_info->rmac_accepted_ip); | 6494 | le32_to_cpu(stat_info->rmac_accepted_ip); |
@@ -6491,13 +6513,13 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
6491 | tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt); | 6513 | tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt); |
6492 | 6514 | ||
6493 | /* Enhanced statistics exist only for Hercules */ | 6515 | /* Enhanced statistics exist only for Hercules */ |
6494 | if(sp->device_type == XFRAME_II_DEVICE) { | 6516 | if (sp->device_type == XFRAME_II_DEVICE) { |
6495 | tmp_stats[i++] = | 6517 | tmp_stats[i++] = |
6496 | le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms); | 6518 | le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms); |
6497 | tmp_stats[i++] = | 6519 | tmp_stats[i++] = |
6498 | le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms); | 6520 | le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms); |
6499 | tmp_stats[i++] = | 6521 | tmp_stats[i++] = |
6500 | le64_to_cpu(stat_info->rmac_ttl_8192_max_frms); | 6522 | le64_to_cpu(stat_info->rmac_ttl_8192_max_frms); |
6501 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms); | 6523 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms); |
6502 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms); | 6524 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms); |
6503 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms); | 6525 | tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms); |
@@ -6550,8 +6572,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
6550 | count++; | 6572 | count++; |
6551 | } | 6573 | } |
6552 | tmp_stats[i++] = count; | 6574 | tmp_stats[i++] = count; |
6553 | } | 6575 | } else |
6554 | else | ||
6555 | tmp_stats[i++] = 0; | 6576 | tmp_stats[i++] = 0; |
6556 | tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt; | 6577 | tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt; |
6557 | tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt; | 6578 | tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt; |
@@ -6599,15 +6620,15 @@ static void s2io_get_ethtool_stats(struct net_device *dev, | |||
6599 | 6620 | ||
6600 | static int s2io_ethtool_get_regs_len(struct net_device *dev) | 6621 | static int s2io_ethtool_get_regs_len(struct net_device *dev) |
6601 | { | 6622 | { |
6602 | return (XENA_REG_SPACE); | 6623 | return XENA_REG_SPACE; |
6603 | } | 6624 | } |
6604 | 6625 | ||
6605 | 6626 | ||
6606 | static u32 s2io_ethtool_get_rx_csum(struct net_device * dev) | 6627 | static u32 s2io_ethtool_get_rx_csum(struct net_device *dev) |
6607 | { | 6628 | { |
6608 | struct s2io_nic *sp = netdev_priv(dev); | 6629 | struct s2io_nic *sp = netdev_priv(dev); |
6609 | 6630 | ||
6610 | return (sp->rx_csum); | 6631 | return sp->rx_csum; |
6611 | } | 6632 | } |
6612 | 6633 | ||
6613 | static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data) | 6634 | static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data) |
@@ -6624,7 +6645,7 @@ static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data) | |||
6624 | 6645 | ||
6625 | static int s2io_get_eeprom_len(struct net_device *dev) | 6646 | static int s2io_get_eeprom_len(struct net_device *dev) |
6626 | { | 6647 | { |
6627 | return (XENA_EEPROM_SPACE); | 6648 | return XENA_EEPROM_SPACE; |
6628 | } | 6649 | } |
6629 | 6650 | ||
6630 | static int s2io_get_sset_count(struct net_device *dev, int sset) | 6651 | static int s2io_get_sset_count(struct net_device *dev, int sset) |
@@ -6635,7 +6656,7 @@ static int s2io_get_sset_count(struct net_device *dev, int sset) | |||
6635 | case ETH_SS_TEST: | 6656 | case ETH_SS_TEST: |
6636 | return S2IO_TEST_LEN; | 6657 | return S2IO_TEST_LEN; |
6637 | case ETH_SS_STATS: | 6658 | case ETH_SS_STATS: |
6638 | switch(sp->device_type) { | 6659 | switch (sp->device_type) { |
6639 | case XFRAME_I_DEVICE: | 6660 | case XFRAME_I_DEVICE: |
6640 | return XFRAME_I_STAT_LEN; | 6661 | return XFRAME_I_STAT_LEN; |
6641 | case XFRAME_II_DEVICE: | 6662 | case XFRAME_II_DEVICE: |
@@ -6649,7 +6670,7 @@ static int s2io_get_sset_count(struct net_device *dev, int sset) | |||
6649 | } | 6670 | } |
6650 | 6671 | ||
6651 | static void s2io_ethtool_get_strings(struct net_device *dev, | 6672 | static void s2io_ethtool_get_strings(struct net_device *dev, |
6652 | u32 stringset, u8 * data) | 6673 | u32 stringset, u8 *data) |
6653 | { | 6674 | { |
6654 | int stat_size = 0; | 6675 | int stat_size = 0; |
6655 | struct s2io_nic *sp = netdev_priv(dev); | 6676 | struct s2io_nic *sp = netdev_priv(dev); |
@@ -6660,16 +6681,16 @@ static void s2io_ethtool_get_strings(struct net_device *dev, | |||
6660 | break; | 6681 | break; |
6661 | case ETH_SS_STATS: | 6682 | case ETH_SS_STATS: |
6662 | stat_size = sizeof(ethtool_xena_stats_keys); | 6683 | stat_size = sizeof(ethtool_xena_stats_keys); |
6663 | memcpy(data, ðtool_xena_stats_keys,stat_size); | 6684 | memcpy(data, ðtool_xena_stats_keys, stat_size); |
6664 | if(sp->device_type == XFRAME_II_DEVICE) { | 6685 | if (sp->device_type == XFRAME_II_DEVICE) { |
6665 | memcpy(data + stat_size, | 6686 | memcpy(data + stat_size, |
6666 | ðtool_enhanced_stats_keys, | 6687 | ðtool_enhanced_stats_keys, |
6667 | sizeof(ethtool_enhanced_stats_keys)); | 6688 | sizeof(ethtool_enhanced_stats_keys)); |
6668 | stat_size += sizeof(ethtool_enhanced_stats_keys); | 6689 | stat_size += sizeof(ethtool_enhanced_stats_keys); |
6669 | } | 6690 | } |
6670 | 6691 | ||
6671 | memcpy(data + stat_size, ðtool_driver_stats_keys, | 6692 | memcpy(data + stat_size, ðtool_driver_stats_keys, |
6672 | sizeof(ethtool_driver_stats_keys)); | 6693 | sizeof(ethtool_driver_stats_keys)); |
6673 | } | 6694 | } |
6674 | } | 6695 | } |
6675 | 6696 | ||
@@ -6758,8 +6779,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) | |||
6758 | int ret = 0; | 6779 | int ret = 0; |
6759 | 6780 | ||
6760 | if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { | 6781 | if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { |
6761 | DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", | 6782 | DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name); |
6762 | dev->name); | ||
6763 | return -EPERM; | 6783 | return -EPERM; |
6764 | } | 6784 | } |
6765 | 6785 | ||
@@ -6792,7 +6812,8 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) | |||
6792 | 6812 | ||
6793 | static void s2io_set_link(struct work_struct *work) | 6813 | static void s2io_set_link(struct work_struct *work) |
6794 | { | 6814 | { |
6795 | struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task); | 6815 | struct s2io_nic *nic = container_of(work, struct s2io_nic, |
6816 | set_link_task); | ||
6796 | struct net_device *dev = nic->dev; | 6817 | struct net_device *dev = nic->dev; |
6797 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 6818 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
6798 | register u64 val64; | 6819 | register u64 val64; |
@@ -6825,7 +6846,7 @@ static void s2io_set_link(struct work_struct *work) | |||
6825 | val64 |= ADAPTER_CNTL_EN; | 6846 | val64 |= ADAPTER_CNTL_EN; |
6826 | writeq(val64, &bar0->adapter_control); | 6847 | writeq(val64, &bar0->adapter_control); |
6827 | if (CARDS_WITH_FAULTY_LINK_INDICATORS( | 6848 | if (CARDS_WITH_FAULTY_LINK_INDICATORS( |
6828 | nic->device_type, subid)) { | 6849 | nic->device_type, subid)) { |
6829 | val64 = readq(&bar0->gpio_control); | 6850 | val64 = readq(&bar0->gpio_control); |
6830 | val64 |= GPIO_CTRL_GPIO_0; | 6851 | val64 |= GPIO_CTRL_GPIO_0; |
6831 | writeq(val64, &bar0->gpio_control); | 6852 | writeq(val64, &bar0->gpio_control); |
@@ -6855,7 +6876,7 @@ static void s2io_set_link(struct work_struct *work) | |||
6855 | } | 6876 | } |
6856 | /* turn off LED */ | 6877 | /* turn off LED */ |
6857 | val64 = readq(&bar0->adapter_control); | 6878 | val64 = readq(&bar0->adapter_control); |
6858 | val64 = val64 &(~ADAPTER_LED_ON); | 6879 | val64 = val64 & (~ADAPTER_LED_ON); |
6859 | writeq(val64, &bar0->adapter_control); | 6880 | writeq(val64, &bar0->adapter_control); |
6860 | s2io_link(nic, LINK_DOWN); | 6881 | s2io_link(nic, LINK_DOWN); |
6861 | } | 6882 | } |
@@ -6866,9 +6887,9 @@ out_unlock: | |||
6866 | } | 6887 | } |
6867 | 6888 | ||
6868 | static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | 6889 | static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, |
6869 | struct buffAdd *ba, | 6890 | struct buffAdd *ba, |
6870 | struct sk_buff **skb, u64 *temp0, u64 *temp1, | 6891 | struct sk_buff **skb, u64 *temp0, u64 *temp1, |
6871 | u64 *temp2, int size) | 6892 | u64 *temp2, int size) |
6872 | { | 6893 | { |
6873 | struct net_device *dev = sp->dev; | 6894 | struct net_device *dev = sp->dev; |
6874 | struct swStat *stats = &sp->mac_control.stats_info->sw_stat; | 6895 | struct swStat *stats = &sp->mac_control.stats_info->sw_stat; |
@@ -6890,7 +6911,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6890 | DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); | 6911 | DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); |
6891 | DBG_PRINT(INFO_DBG, "memory to allocate "); | 6912 | DBG_PRINT(INFO_DBG, "memory to allocate "); |
6892 | DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n"); | 6913 | DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n"); |
6893 | sp->mac_control.stats_info->sw_stat. \ | 6914 | sp->mac_control.stats_info->sw_stat. |
6894 | mem_alloc_fail_cnt++; | 6915 | mem_alloc_fail_cnt++; |
6895 | return -ENOMEM ; | 6916 | return -ENOMEM ; |
6896 | } | 6917 | } |
@@ -6901,9 +6922,9 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6901 | * Host Control is NULL | 6922 | * Host Control is NULL |
6902 | */ | 6923 | */ |
6903 | rxdp1->Buffer0_ptr = *temp0 = | 6924 | rxdp1->Buffer0_ptr = *temp0 = |
6904 | pci_map_single( sp->pdev, (*skb)->data, | 6925 | pci_map_single(sp->pdev, (*skb)->data, |
6905 | size - NET_IP_ALIGN, | 6926 | size - NET_IP_ALIGN, |
6906 | PCI_DMA_FROMDEVICE); | 6927 | PCI_DMA_FROMDEVICE); |
6907 | if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) | 6928 | if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) |
6908 | goto memalloc_failed; | 6929 | goto memalloc_failed; |
6909 | rxdp->Host_Control = (unsigned long) (*skb); | 6930 | rxdp->Host_Control = (unsigned long) (*skb); |
@@ -6921,7 +6942,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6921 | DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); | 6942 | DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); |
6922 | DBG_PRINT(INFO_DBG, "memory to allocate "); | 6943 | DBG_PRINT(INFO_DBG, "memory to allocate "); |
6923 | DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n"); | 6944 | DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n"); |
6924 | sp->mac_control.stats_info->sw_stat. \ | 6945 | sp->mac_control.stats_info->sw_stat. |
6925 | mem_alloc_fail_cnt++; | 6946 | mem_alloc_fail_cnt++; |
6926 | return -ENOMEM; | 6947 | return -ENOMEM; |
6927 | } | 6948 | } |
@@ -6934,13 +6955,14 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6934 | if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) | 6955 | if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) |
6935 | goto memalloc_failed; | 6956 | goto memalloc_failed; |
6936 | rxdp3->Buffer0_ptr = *temp0 = | 6957 | rxdp3->Buffer0_ptr = *temp0 = |
6937 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, | 6958 | pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, |
6938 | PCI_DMA_FROMDEVICE); | 6959 | PCI_DMA_FROMDEVICE); |
6939 | if (pci_dma_mapping_error(sp->pdev, | 6960 | if (pci_dma_mapping_error(sp->pdev, |
6940 | rxdp3->Buffer0_ptr)) { | 6961 | rxdp3->Buffer0_ptr)) { |
6941 | pci_unmap_single (sp->pdev, | 6962 | pci_unmap_single(sp->pdev, |
6942 | (dma_addr_t)rxdp3->Buffer2_ptr, | 6963 | (dma_addr_t)rxdp3->Buffer2_ptr, |
6943 | dev->mtu + 4, PCI_DMA_FROMDEVICE); | 6964 | dev->mtu + 4, |
6965 | PCI_DMA_FROMDEVICE); | ||
6944 | goto memalloc_failed; | 6966 | goto memalloc_failed; |
6945 | } | 6967 | } |
6946 | rxdp->Host_Control = (unsigned long) (*skb); | 6968 | rxdp->Host_Control = (unsigned long) (*skb); |
@@ -6948,25 +6970,27 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6948 | /* Buffer-1 will be dummy buffer not used */ | 6970 | /* Buffer-1 will be dummy buffer not used */ |
6949 | rxdp3->Buffer1_ptr = *temp1 = | 6971 | rxdp3->Buffer1_ptr = *temp1 = |
6950 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, | 6972 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, |
6951 | PCI_DMA_FROMDEVICE); | 6973 | PCI_DMA_FROMDEVICE); |
6952 | if (pci_dma_mapping_error(sp->pdev, | 6974 | if (pci_dma_mapping_error(sp->pdev, |
6953 | rxdp3->Buffer1_ptr)) { | 6975 | rxdp3->Buffer1_ptr)) { |
6954 | pci_unmap_single (sp->pdev, | 6976 | pci_unmap_single(sp->pdev, |
6955 | (dma_addr_t)rxdp3->Buffer0_ptr, | 6977 | (dma_addr_t)rxdp3->Buffer0_ptr, |
6956 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 6978 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
6957 | pci_unmap_single (sp->pdev, | 6979 | pci_unmap_single(sp->pdev, |
6958 | (dma_addr_t)rxdp3->Buffer2_ptr, | 6980 | (dma_addr_t)rxdp3->Buffer2_ptr, |
6959 | dev->mtu + 4, PCI_DMA_FROMDEVICE); | 6981 | dev->mtu + 4, |
6982 | PCI_DMA_FROMDEVICE); | ||
6960 | goto memalloc_failed; | 6983 | goto memalloc_failed; |
6961 | } | 6984 | } |
6962 | } | 6985 | } |
6963 | } | 6986 | } |
6964 | return 0; | 6987 | return 0; |
6965 | memalloc_failed: | 6988 | |
6966 | stats->pci_map_fail_cnt++; | 6989 | memalloc_failed: |
6967 | stats->mem_freed += (*skb)->truesize; | 6990 | stats->pci_map_fail_cnt++; |
6968 | dev_kfree_skb(*skb); | 6991 | stats->mem_freed += (*skb)->truesize; |
6969 | return -ENOMEM; | 6992 | dev_kfree_skb(*skb); |
6993 | return -ENOMEM; | ||
6970 | } | 6994 | } |
6971 | 6995 | ||
6972 | static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, | 6996 | static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, |
@@ -6974,18 +6998,18 @@ static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6974 | { | 6998 | { |
6975 | struct net_device *dev = sp->dev; | 6999 | struct net_device *dev = sp->dev; |
6976 | if (sp->rxd_mode == RXD_MODE_1) { | 7000 | if (sp->rxd_mode == RXD_MODE_1) { |
6977 | rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN); | 7001 | rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); |
6978 | } else if (sp->rxd_mode == RXD_MODE_3B) { | 7002 | } else if (sp->rxd_mode == RXD_MODE_3B) { |
6979 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | 7003 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); |
6980 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); | 7004 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); |
6981 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4); | 7005 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4); |
6982 | } | 7006 | } |
6983 | } | 7007 | } |
6984 | 7008 | ||
6985 | static int rxd_owner_bit_reset(struct s2io_nic *sp) | 7009 | static int rxd_owner_bit_reset(struct s2io_nic *sp) |
6986 | { | 7010 | { |
6987 | int i, j, k, blk_cnt = 0, size; | 7011 | int i, j, k, blk_cnt = 0, size; |
6988 | struct mac_info * mac_control = &sp->mac_control; | 7012 | struct mac_info *mac_control = &sp->mac_control; |
6989 | struct config_param *config = &sp->config; | 7013 | struct config_param *config = &sp->config; |
6990 | struct net_device *dev = sp->dev; | 7014 | struct net_device *dev = sp->dev; |
6991 | struct RxD_t *rxdp = NULL; | 7015 | struct RxD_t *rxdp = NULL; |
@@ -7005,18 +7029,18 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp) | |||
7005 | struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; | 7029 | struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; |
7006 | struct ring_info *ring = &mac_control->rings[i]; | 7030 | struct ring_info *ring = &mac_control->rings[i]; |
7007 | 7031 | ||
7008 | blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] +1); | 7032 | blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1); |
7009 | 7033 | ||
7010 | for (j = 0; j < blk_cnt; j++) { | 7034 | for (j = 0; j < blk_cnt; j++) { |
7011 | for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { | 7035 | for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { |
7012 | rxdp = ring-> rx_blocks[j].rxds[k].virt_addr; | 7036 | rxdp = ring->rx_blocks[j].rxds[k].virt_addr; |
7013 | if(sp->rxd_mode == RXD_MODE_3B) | 7037 | if (sp->rxd_mode == RXD_MODE_3B) |
7014 | ba = &ring->ba[j][k]; | 7038 | ba = &ring->ba[j][k]; |
7015 | if (set_rxd_buffer_pointer(sp, rxdp, ba, | 7039 | if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb, |
7016 | &skb,(u64 *)&temp0_64, | 7040 | (u64 *)&temp0_64, |
7017 | (u64 *)&temp1_64, | 7041 | (u64 *)&temp1_64, |
7018 | (u64 *)&temp2_64, | 7042 | (u64 *)&temp2_64, |
7019 | size) == -ENOMEM) { | 7043 | size) == -ENOMEM) { |
7020 | return 0; | 7044 | return 0; |
7021 | } | 7045 | } |
7022 | 7046 | ||
@@ -7031,7 +7055,7 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp) | |||
7031 | 7055 | ||
7032 | } | 7056 | } |
7033 | 7057 | ||
7034 | static int s2io_add_isr(struct s2io_nic * sp) | 7058 | static int s2io_add_isr(struct s2io_nic *sp) |
7035 | { | 7059 | { |
7036 | int ret = 0; | 7060 | int ret = 0; |
7037 | struct net_device *dev = sp->dev; | 7061 | struct net_device *dev = sp->dev; |
@@ -7044,7 +7068,10 @@ static int s2io_add_isr(struct s2io_nic * sp) | |||
7044 | sp->config.intr_type = INTA; | 7068 | sp->config.intr_type = INTA; |
7045 | } | 7069 | } |
7046 | 7070 | ||
7047 | /* Store the values of the MSIX table in the struct s2io_nic structure */ | 7071 | /* |
7072 | * Store the values of the MSIX table in | ||
7073 | * the struct s2io_nic structure | ||
7074 | */ | ||
7048 | store_xmsi_data(sp); | 7075 | store_xmsi_data(sp); |
7049 | 7076 | ||
7050 | /* After proper initialization of H/W, register ISR */ | 7077 | /* After proper initialization of H/W, register ISR */ |
@@ -7054,45 +7081,47 @@ static int s2io_add_isr(struct s2io_nic * sp) | |||
7054 | for (i = 0; i < sp->num_entries; i++) { | 7081 | for (i = 0; i < sp->num_entries; i++) { |
7055 | if (sp->s2io_entries[i].in_use == MSIX_FLG) { | 7082 | if (sp->s2io_entries[i].in_use == MSIX_FLG) { |
7056 | if (sp->s2io_entries[i].type == | 7083 | if (sp->s2io_entries[i].type == |
7057 | MSIX_RING_TYPE) { | 7084 | MSIX_RING_TYPE) { |
7058 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", | 7085 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", |
7059 | dev->name, i); | 7086 | dev->name, i); |
7060 | err = request_irq(sp->entries[i].vector, | 7087 | err = request_irq(sp->entries[i].vector, |
7061 | s2io_msix_ring_handle, 0, | 7088 | s2io_msix_ring_handle, |
7062 | sp->desc[i], | 7089 | 0, |
7063 | sp->s2io_entries[i].arg); | 7090 | sp->desc[i], |
7091 | sp->s2io_entries[i].arg); | ||
7064 | } else if (sp->s2io_entries[i].type == | 7092 | } else if (sp->s2io_entries[i].type == |
7065 | MSIX_ALARM_TYPE) { | 7093 | MSIX_ALARM_TYPE) { |
7066 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", | 7094 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", |
7067 | dev->name, i); | 7095 | dev->name, i); |
7068 | err = request_irq(sp->entries[i].vector, | 7096 | err = request_irq(sp->entries[i].vector, |
7069 | s2io_msix_fifo_handle, 0, | 7097 | s2io_msix_fifo_handle, |
7070 | sp->desc[i], | 7098 | 0, |
7071 | sp->s2io_entries[i].arg); | 7099 | sp->desc[i], |
7100 | sp->s2io_entries[i].arg); | ||
7072 | 7101 | ||
7073 | } | 7102 | } |
7074 | /* if either data or addr is zero print it. */ | 7103 | /* if either data or addr is zero print it. */ |
7075 | if (!(sp->msix_info[i].addr && | 7104 | if (!(sp->msix_info[i].addr && |
7076 | sp->msix_info[i].data)) { | 7105 | sp->msix_info[i].data)) { |
7077 | DBG_PRINT(ERR_DBG, | 7106 | DBG_PRINT(ERR_DBG, |
7078 | "%s @Addr:0x%llx Data:0x%llx\n", | 7107 | "%s @Addr:0x%llx Data:0x%llx\n", |
7079 | sp->desc[i], | 7108 | sp->desc[i], |
7080 | (unsigned long long) | 7109 | (unsigned long long) |
7081 | sp->msix_info[i].addr, | 7110 | sp->msix_info[i].addr, |
7082 | (unsigned long long) | 7111 | (unsigned long long) |
7083 | ntohl(sp->msix_info[i].data)); | 7112 | ntohl(sp->msix_info[i].data)); |
7084 | } else | 7113 | } else |
7085 | msix_rx_cnt++; | 7114 | msix_rx_cnt++; |
7086 | if (err) { | 7115 | if (err) { |
7087 | remove_msix_isr(sp); | 7116 | remove_msix_isr(sp); |
7088 | 7117 | ||
7089 | DBG_PRINT(ERR_DBG, | 7118 | DBG_PRINT(ERR_DBG, |
7090 | "%s:MSI-X-%d registration " | 7119 | "%s:MSI-X-%d registration " |
7091 | "failed\n", dev->name, i); | 7120 | "failed\n", dev->name, i); |
7092 | 7121 | ||
7093 | DBG_PRINT(ERR_DBG, | 7122 | DBG_PRINT(ERR_DBG, |
7094 | "%s: Defaulting to INTA\n", | 7123 | "%s: Defaulting to INTA\n", |
7095 | dev->name); | 7124 | dev->name); |
7096 | sp->config.intr_type = INTA; | 7125 | sp->config.intr_type = INTA; |
7097 | break; | 7126 | break; |
7098 | } | 7127 | } |
@@ -7102,14 +7131,14 @@ static int s2io_add_isr(struct s2io_nic * sp) | |||
7102 | } | 7131 | } |
7103 | if (!err) { | 7132 | if (!err) { |
7104 | printk(KERN_INFO "MSI-X-RX %d entries enabled\n", | 7133 | printk(KERN_INFO "MSI-X-RX %d entries enabled\n", |
7105 | --msix_rx_cnt); | 7134 | --msix_rx_cnt); |
7106 | DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled" | 7135 | DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled" |
7107 | " through alarm vector\n"); | 7136 | " through alarm vector\n"); |
7108 | } | 7137 | } |
7109 | } | 7138 | } |
7110 | if (sp->config.intr_type == INTA) { | 7139 | if (sp->config.intr_type == INTA) { |
7111 | err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, | 7140 | err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED, |
7112 | sp->name, dev); | 7141 | sp->name, dev); |
7113 | if (err) { | 7142 | if (err) { |
7114 | DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", | 7143 | DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", |
7115 | dev->name); | 7144 | dev->name); |
@@ -7118,7 +7147,8 @@ static int s2io_add_isr(struct s2io_nic * sp) | |||
7118 | } | 7147 | } |
7119 | return 0; | 7148 | return 0; |
7120 | } | 7149 | } |
7121 | static void s2io_rem_isr(struct s2io_nic * sp) | 7150 | |
7151 | static void s2io_rem_isr(struct s2io_nic *sp) | ||
7122 | { | 7152 | { |
7123 | if (sp->config.intr_type == MSI_X) | 7153 | if (sp->config.intr_type == MSI_X) |
7124 | remove_msix_isr(sp); | 7154 | remove_msix_isr(sp); |
@@ -7126,7 +7156,7 @@ static void s2io_rem_isr(struct s2io_nic * sp) | |||
7126 | remove_inta_isr(sp); | 7156 | remove_inta_isr(sp); |
7127 | } | 7157 | } |
7128 | 7158 | ||
7129 | static void do_s2io_card_down(struct s2io_nic * sp, int do_io) | 7159 | static void do_s2io_card_down(struct s2io_nic *sp, int do_io) |
7130 | { | 7160 | { |
7131 | int cnt = 0; | 7161 | int cnt = 0; |
7132 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 7162 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
@@ -7139,9 +7169,8 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) | |||
7139 | 7169 | ||
7140 | del_timer_sync(&sp->alarm_timer); | 7170 | del_timer_sync(&sp->alarm_timer); |
7141 | /* If s2io_set_link task is executing, wait till it completes. */ | 7171 | /* If s2io_set_link task is executing, wait till it completes. */ |
7142 | while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) { | 7172 | while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) |
7143 | msleep(50); | 7173 | msleep(50); |
7144 | } | ||
7145 | clear_bit(__S2IO_STATE_CARD_UP, &sp->state); | 7174 | clear_bit(__S2IO_STATE_CARD_UP, &sp->state); |
7146 | 7175 | ||
7147 | /* Disable napi */ | 7176 | /* Disable napi */ |
@@ -7150,7 +7179,7 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) | |||
7150 | if (config->intr_type == MSI_X) { | 7179 | if (config->intr_type == MSI_X) { |
7151 | for (; off < sp->config.rx_ring_num; off++) | 7180 | for (; off < sp->config.rx_ring_num; off++) |
7152 | napi_disable(&sp->mac_control.rings[off].napi); | 7181 | napi_disable(&sp->mac_control.rings[off].napi); |
7153 | } | 7182 | } |
7154 | else | 7183 | else |
7155 | napi_disable(&sp->napi); | 7184 | napi_disable(&sp->napi); |
7156 | } | 7185 | } |
@@ -7165,7 +7194,7 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) | |||
7165 | s2io_link(sp, LINK_DOWN); | 7194 | s2io_link(sp, LINK_DOWN); |
7166 | 7195 | ||
7167 | /* Check if the device is Quiescent and then Reset the NIC */ | 7196 | /* Check if the device is Quiescent and then Reset the NIC */ |
7168 | while(do_io) { | 7197 | while (do_io) { |
7169 | /* As per the HW requirement we need to replenish the | 7198 | /* As per the HW requirement we need to replenish the |
7170 | * receive buffer to avoid the ring bump. Since there is | 7199 | * receive buffer to avoid the ring bump. Since there is |
7171 | * no intention of processing the Rx frame at this pointwe are | 7200 | * no intention of processing the Rx frame at this pointwe are |
@@ -7177,17 +7206,16 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) | |||
7177 | 7206 | ||
7178 | val64 = readq(&bar0->adapter_status); | 7207 | val64 = readq(&bar0->adapter_status); |
7179 | if (verify_xena_quiescence(sp)) { | 7208 | if (verify_xena_quiescence(sp)) { |
7180 | if(verify_pcc_quiescent(sp, sp->device_enabled_once)) | 7209 | if (verify_pcc_quiescent(sp, sp->device_enabled_once)) |
7181 | break; | 7210 | break; |
7182 | } | 7211 | } |
7183 | 7212 | ||
7184 | msleep(50); | 7213 | msleep(50); |
7185 | cnt++; | 7214 | cnt++; |
7186 | if (cnt == 10) { | 7215 | if (cnt == 10) { |
7187 | DBG_PRINT(ERR_DBG, | 7216 | DBG_PRINT(ERR_DBG, "s2io_close:Device not Quiescent "); |
7188 | "s2io_close:Device not Quiescent "); | ||
7189 | DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n", | 7217 | DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n", |
7190 | (unsigned long long) val64); | 7218 | (unsigned long long)val64); |
7191 | break; | 7219 | break; |
7192 | } | 7220 | } |
7193 | } | 7221 | } |
@@ -7203,17 +7231,17 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) | |||
7203 | clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state)); | 7231 | clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state)); |
7204 | } | 7232 | } |
7205 | 7233 | ||
7206 | static void s2io_card_down(struct s2io_nic * sp) | 7234 | static void s2io_card_down(struct s2io_nic *sp) |
7207 | { | 7235 | { |
7208 | do_s2io_card_down(sp, 1); | 7236 | do_s2io_card_down(sp, 1); |
7209 | } | 7237 | } |
7210 | 7238 | ||
7211 | static int s2io_card_up(struct s2io_nic * sp) | 7239 | static int s2io_card_up(struct s2io_nic *sp) |
7212 | { | 7240 | { |
7213 | int i, ret = 0; | 7241 | int i, ret = 0; |
7214 | struct mac_info *mac_control; | 7242 | struct mac_info *mac_control; |
7215 | struct config_param *config; | 7243 | struct config_param *config; |
7216 | struct net_device *dev = (struct net_device *) sp->dev; | 7244 | struct net_device *dev = (struct net_device *)sp->dev; |
7217 | u16 interruptible; | 7245 | u16 interruptible; |
7218 | 7246 | ||
7219 | /* Initialize the H/W I/O registers */ | 7247 | /* Initialize the H/W I/O registers */ |
@@ -7264,7 +7292,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7264 | sp->promisc_flg = 0; | 7292 | sp->promisc_flg = 0; |
7265 | if (sp->m_cast_flg) { | 7293 | if (sp->m_cast_flg) { |
7266 | sp->m_cast_flg = 0; | 7294 | sp->m_cast_flg = 0; |
7267 | sp->all_multi_pos= 0; | 7295 | sp->all_multi_pos = 0; |
7268 | } | 7296 | } |
7269 | 7297 | ||
7270 | /* Setting its receive mode */ | 7298 | /* Setting its receive mode */ |
@@ -7273,7 +7301,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7273 | if (sp->lro) { | 7301 | if (sp->lro) { |
7274 | /* Initialize max aggregatable pkts per session based on MTU */ | 7302 | /* Initialize max aggregatable pkts per session based on MTU */ |
7275 | sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; | 7303 | sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; |
7276 | /* Check if we can use(if specified) user provided value */ | 7304 | /* Check if we can use (if specified) user provided value */ |
7277 | if (lro_max_pkts < sp->lro_max_aggr_per_sess) | 7305 | if (lro_max_pkts < sp->lro_max_aggr_per_sess) |
7278 | sp->lro_max_aggr_per_sess = lro_max_pkts; | 7306 | sp->lro_max_aggr_per_sess = lro_max_pkts; |
7279 | } | 7307 | } |
@@ -7335,12 +7363,10 @@ static void s2io_restart_nic(struct work_struct *work) | |||
7335 | 7363 | ||
7336 | s2io_card_down(sp); | 7364 | s2io_card_down(sp); |
7337 | if (s2io_card_up(sp)) { | 7365 | if (s2io_card_up(sp)) { |
7338 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 7366 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name); |
7339 | dev->name); | ||
7340 | } | 7367 | } |
7341 | s2io_wake_all_tx_queue(sp); | 7368 | s2io_wake_all_tx_queue(sp); |
7342 | DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", | 7369 | DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name); |
7343 | dev->name); | ||
7344 | out_unlock: | 7370 | out_unlock: |
7345 | rtnl_unlock(); | 7371 | rtnl_unlock(); |
7346 | } | 7372 | } |
@@ -7389,9 +7415,9 @@ static void s2io_tx_watchdog(struct net_device *dev) | |||
7389 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | 7415 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) |
7390 | { | 7416 | { |
7391 | struct s2io_nic *sp = ring_data->nic; | 7417 | struct s2io_nic *sp = ring_data->nic; |
7392 | struct net_device *dev = (struct net_device *) ring_data->dev; | 7418 | struct net_device *dev = (struct net_device *)ring_data->dev; |
7393 | struct sk_buff *skb = (struct sk_buff *) | 7419 | struct sk_buff *skb = (struct sk_buff *) |
7394 | ((unsigned long) rxdp->Host_Control); | 7420 | ((unsigned long)rxdp->Host_Control); |
7395 | int ring_no = ring_data->ring_no; | 7421 | int ring_no = ring_data->ring_no; |
7396 | u16 l3_csum, l4_csum; | 7422 | u16 l3_csum, l4_csum; |
7397 | unsigned long long err = rxdp->Control_1 & RXD_T_CODE; | 7423 | unsigned long long err = rxdp->Control_1 & RXD_T_CODE; |
@@ -7402,66 +7428,57 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7402 | 7428 | ||
7403 | if (err) { | 7429 | if (err) { |
7404 | /* Check for parity error */ | 7430 | /* Check for parity error */ |
7405 | if (err & 0x1) { | 7431 | if (err & 0x1) |
7406 | sp->mac_control.stats_info->sw_stat.parity_err_cnt++; | 7432 | sp->mac_control.stats_info->sw_stat.parity_err_cnt++; |
7407 | } | 7433 | |
7408 | err_mask = err >> 48; | 7434 | err_mask = err >> 48; |
7409 | switch(err_mask) { | 7435 | switch (err_mask) { |
7410 | case 1: | 7436 | case 1: |
7411 | sp->mac_control.stats_info->sw_stat. | 7437 | sp->mac_control.stats_info->sw_stat.rx_parity_err_cnt++; |
7412 | rx_parity_err_cnt++; | ||
7413 | break; | 7438 | break; |
7414 | 7439 | ||
7415 | case 2: | 7440 | case 2: |
7416 | sp->mac_control.stats_info->sw_stat. | 7441 | sp->mac_control.stats_info->sw_stat.rx_abort_cnt++; |
7417 | rx_abort_cnt++; | ||
7418 | break; | 7442 | break; |
7419 | 7443 | ||
7420 | case 3: | 7444 | case 3: |
7421 | sp->mac_control.stats_info->sw_stat. | 7445 | sp->mac_control.stats_info->sw_stat.rx_parity_abort_cnt++; |
7422 | rx_parity_abort_cnt++; | ||
7423 | break; | 7446 | break; |
7424 | 7447 | ||
7425 | case 4: | 7448 | case 4: |
7426 | sp->mac_control.stats_info->sw_stat. | 7449 | sp->mac_control.stats_info->sw_stat.rx_rda_fail_cnt++; |
7427 | rx_rda_fail_cnt++; | ||
7428 | break; | 7450 | break; |
7429 | 7451 | ||
7430 | case 5: | 7452 | case 5: |
7431 | sp->mac_control.stats_info->sw_stat. | 7453 | sp->mac_control.stats_info->sw_stat.rx_unkn_prot_cnt++; |
7432 | rx_unkn_prot_cnt++; | ||
7433 | break; | 7454 | break; |
7434 | 7455 | ||
7435 | case 6: | 7456 | case 6: |
7436 | sp->mac_control.stats_info->sw_stat. | 7457 | sp->mac_control.stats_info->sw_stat.rx_fcs_err_cnt++; |
7437 | rx_fcs_err_cnt++; | ||
7438 | break; | 7458 | break; |
7439 | 7459 | ||
7440 | case 7: | 7460 | case 7: |
7441 | sp->mac_control.stats_info->sw_stat. | 7461 | sp->mac_control.stats_info->sw_stat.rx_buf_size_err_cnt++; |
7442 | rx_buf_size_err_cnt++; | ||
7443 | break; | 7462 | break; |
7444 | 7463 | ||
7445 | case 8: | 7464 | case 8: |
7446 | sp->mac_control.stats_info->sw_stat. | 7465 | sp->mac_control.stats_info->sw_stat.rx_rxd_corrupt_cnt++; |
7447 | rx_rxd_corrupt_cnt++; | ||
7448 | break; | 7466 | break; |
7449 | 7467 | ||
7450 | case 15: | 7468 | case 15: |
7451 | sp->mac_control.stats_info->sw_stat. | 7469 | sp->mac_control.stats_info->sw_stat.rx_unkn_err_cnt++; |
7452 | rx_unkn_err_cnt++; | ||
7453 | break; | 7470 | break; |
7454 | } | 7471 | } |
7455 | /* | 7472 | /* |
7456 | * Drop the packet if bad transfer code. Exception being | 7473 | * Drop the packet if bad transfer code. Exception being |
7457 | * 0x5, which could be due to unsupported IPv6 extension header. | 7474 | * 0x5, which could be due to unsupported IPv6 extension header. |
7458 | * In this case, we let stack handle the packet. | 7475 | * In this case, we let stack handle the packet. |
7459 | * Note that in this case, since checksum will be incorrect, | 7476 | * Note that in this case, since checksum will be incorrect, |
7460 | * stack will validate the same. | 7477 | * stack will validate the same. |
7461 | */ | 7478 | */ |
7462 | if (err_mask != 0x5) { | 7479 | if (err_mask != 0x5) { |
7463 | DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n", | 7480 | DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n", |
7464 | dev->name, err_mask); | 7481 | dev->name, err_mask); |
7465 | dev->stats.rx_crc_errors++; | 7482 | dev->stats.rx_crc_errors++; |
7466 | sp->mac_control.stats_info->sw_stat.mem_freed | 7483 | sp->mac_control.stats_info->sw_stat.mem_freed |
7467 | += skb->truesize; | 7484 | += skb->truesize; |
@@ -7494,8 +7511,9 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7494 | skb_put(skb, buf2_len); | 7511 | skb_put(skb, buf2_len); |
7495 | } | 7512 | } |
7496 | 7513 | ||
7497 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) || | 7514 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && |
7498 | (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && | 7515 | ((!ring_data->lro) || |
7516 | (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && | ||
7499 | (sp->rx_csum)) { | 7517 | (sp->rx_csum)) { |
7500 | l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); | 7518 | l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); |
7501 | l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); | 7519 | l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); |
@@ -7512,52 +7530,44 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7512 | int ret = 0; | 7530 | int ret = 0; |
7513 | 7531 | ||
7514 | ret = s2io_club_tcp_session(ring_data, | 7532 | ret = s2io_club_tcp_session(ring_data, |
7515 | skb->data, &tcp, &tcp_len, &lro, | 7533 | skb->data, &tcp, |
7516 | rxdp, sp); | 7534 | &tcp_len, &lro, |
7535 | rxdp, sp); | ||
7517 | switch (ret) { | 7536 | switch (ret) { |
7518 | case 3: /* Begin anew */ | 7537 | case 3: /* Begin anew */ |
7519 | lro->parent = skb; | 7538 | lro->parent = skb; |
7520 | goto aggregate; | 7539 | goto aggregate; |
7521 | case 1: /* Aggregate */ | 7540 | case 1: /* Aggregate */ |
7522 | { | 7541 | lro_append_pkt(sp, lro, skb, tcp_len); |
7523 | lro_append_pkt(sp, lro, | 7542 | goto aggregate; |
7524 | skb, tcp_len); | 7543 | case 4: /* Flush session */ |
7525 | goto aggregate; | 7544 | lro_append_pkt(sp, lro, skb, tcp_len); |
7526 | } | 7545 | queue_rx_frame(lro->parent, |
7527 | case 4: /* Flush session */ | 7546 | lro->vlan_tag); |
7528 | { | 7547 | clear_lro_session(lro); |
7529 | lro_append_pkt(sp, lro, | 7548 | sp->mac_control.stats_info-> |
7530 | skb, tcp_len); | 7549 | sw_stat.flush_max_pkts++; |
7531 | queue_rx_frame(lro->parent, | 7550 | goto aggregate; |
7532 | lro->vlan_tag); | 7551 | case 2: /* Flush both */ |
7533 | clear_lro_session(lro); | 7552 | lro->parent->data_len = lro->frags_len; |
7534 | sp->mac_control.stats_info-> | 7553 | sp->mac_control.stats_info-> |
7535 | sw_stat.flush_max_pkts++; | 7554 | sw_stat.sending_both++; |
7536 | goto aggregate; | 7555 | queue_rx_frame(lro->parent, |
7537 | } | 7556 | lro->vlan_tag); |
7538 | case 2: /* Flush both */ | 7557 | clear_lro_session(lro); |
7539 | lro->parent->data_len = | 7558 | goto send_up; |
7540 | lro->frags_len; | 7559 | case 0: /* sessions exceeded */ |
7541 | sp->mac_control.stats_info-> | 7560 | case -1: /* non-TCP or not L2 aggregatable */ |
7542 | sw_stat.sending_both++; | 7561 | case 5: /* |
7543 | queue_rx_frame(lro->parent, | 7562 | * First pkt in session not |
7544 | lro->vlan_tag); | 7563 | * L3/L4 aggregatable |
7545 | clear_lro_session(lro); | 7564 | */ |
7546 | goto send_up; | 7565 | break; |
7547 | case 0: /* sessions exceeded */ | 7566 | default: |
7548 | case -1: /* non-TCP or not | 7567 | DBG_PRINT(ERR_DBG, |
7549 | * L2 aggregatable | 7568 | "%s: Samadhana!!\n", |
7550 | */ | 7569 | __func__); |
7551 | case 5: /* | 7570 | BUG(); |
7552 | * First pkt in session not | ||
7553 | * L3/L4 aggregatable | ||
7554 | */ | ||
7555 | break; | ||
7556 | default: | ||
7557 | DBG_PRINT(ERR_DBG, | ||
7558 | "%s: Samadhana!!\n", | ||
7559 | __func__); | ||
7560 | BUG(); | ||
7561 | } | 7571 | } |
7562 | } | 7572 | } |
7563 | } else { | 7573 | } else { |
@@ -7592,9 +7602,9 @@ aggregate: | |||
7592 | * void. | 7602 | * void. |
7593 | */ | 7603 | */ |
7594 | 7604 | ||
7595 | static void s2io_link(struct s2io_nic * sp, int link) | 7605 | static void s2io_link(struct s2io_nic *sp, int link) |
7596 | { | 7606 | { |
7597 | struct net_device *dev = (struct net_device *) sp->dev; | 7607 | struct net_device *dev = (struct net_device *)sp->dev; |
7598 | 7608 | ||
7599 | if (link != sp->last_link_state) { | 7609 | if (link != sp->last_link_state) { |
7600 | init_tti(sp, link); | 7610 | init_tti(sp, link); |
@@ -7602,15 +7612,16 @@ static void s2io_link(struct s2io_nic * sp, int link) | |||
7602 | DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); | 7612 | DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); |
7603 | s2io_stop_all_tx_queue(sp); | 7613 | s2io_stop_all_tx_queue(sp); |
7604 | netif_carrier_off(dev); | 7614 | netif_carrier_off(dev); |
7605 | if(sp->mac_control.stats_info->sw_stat.link_up_cnt) | 7615 | if (sp->mac_control.stats_info->sw_stat.link_up_cnt) |
7606 | sp->mac_control.stats_info->sw_stat.link_up_time = | 7616 | sp->mac_control.stats_info->sw_stat. |
7607 | jiffies - sp->start_time; | 7617 | link_up_time = jiffies - sp->start_time; |
7608 | sp->mac_control.stats_info->sw_stat.link_down_cnt++; | 7618 | sp->mac_control.stats_info->sw_stat.link_down_cnt++; |
7609 | } else { | 7619 | } else { |
7610 | DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name); | 7620 | DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name); |
7611 | if (sp->mac_control.stats_info->sw_stat.link_down_cnt) | 7621 | if (sp->mac_control.stats_info->sw_stat.link_down_cnt) |
7612 | sp->mac_control.stats_info->sw_stat.link_down_time = | 7622 | sp->mac_control.stats_info-> |
7613 | jiffies - sp->start_time; | 7623 | sw_stat.link_down_time = |
7624 | jiffies - sp->start_time; | ||
7614 | sp->mac_control.stats_info->sw_stat.link_up_cnt++; | 7625 | sp->mac_control.stats_info->sw_stat.link_up_cnt++; |
7615 | netif_carrier_on(dev); | 7626 | netif_carrier_on(dev); |
7616 | s2io_wake_all_tx_queue(sp); | 7627 | s2io_wake_all_tx_queue(sp); |
@@ -7631,7 +7642,7 @@ static void s2io_link(struct s2io_nic * sp, int link) | |||
7631 | * void | 7642 | * void |
7632 | */ | 7643 | */ |
7633 | 7644 | ||
7634 | static void s2io_init_pci(struct s2io_nic * sp) | 7645 | static void s2io_init_pci(struct s2io_nic *sp) |
7635 | { | 7646 | { |
7636 | u16 pci_cmd = 0, pcix_cmd = 0; | 7647 | u16 pci_cmd = 0, pcix_cmd = 0; |
7637 | 7648 | ||
@@ -7651,12 +7662,11 @@ static void s2io_init_pci(struct s2io_nic * sp) | |||
7651 | } | 7662 | } |
7652 | 7663 | ||
7653 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, | 7664 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, |
7654 | u8 *dev_multiq) | 7665 | u8 *dev_multiq) |
7655 | { | 7666 | { |
7656 | if ((tx_fifo_num > MAX_TX_FIFOS) || | 7667 | if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) { |
7657 | (tx_fifo_num < 1)) { | ||
7658 | DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos " | 7668 | DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos " |
7659 | "(%d) not supported\n", tx_fifo_num); | 7669 | "(%d) not supported\n", tx_fifo_num); |
7660 | 7670 | ||
7661 | if (tx_fifo_num < 1) | 7671 | if (tx_fifo_num < 1) |
7662 | tx_fifo_num = 1; | 7672 | tx_fifo_num = 1; |
@@ -7673,24 +7683,24 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, | |||
7673 | if (tx_steering_type && (1 == tx_fifo_num)) { | 7683 | if (tx_steering_type && (1 == tx_fifo_num)) { |
7674 | if (tx_steering_type != TX_DEFAULT_STEERING) | 7684 | if (tx_steering_type != TX_DEFAULT_STEERING) |
7675 | DBG_PRINT(ERR_DBG, | 7685 | DBG_PRINT(ERR_DBG, |
7676 | "s2io: Tx steering is not supported with " | 7686 | "s2io: Tx steering is not supported with " |
7677 | "one fifo. Disabling Tx steering.\n"); | 7687 | "one fifo. Disabling Tx steering.\n"); |
7678 | tx_steering_type = NO_STEERING; | 7688 | tx_steering_type = NO_STEERING; |
7679 | } | 7689 | } |
7680 | 7690 | ||
7681 | if ((tx_steering_type < NO_STEERING) || | 7691 | if ((tx_steering_type < NO_STEERING) || |
7682 | (tx_steering_type > TX_DEFAULT_STEERING)) { | 7692 | (tx_steering_type > TX_DEFAULT_STEERING)) { |
7683 | DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not " | 7693 | DBG_PRINT(ERR_DBG, |
7684 | "supported\n"); | 7694 | "s2io: Requested transmit steering not supported\n"); |
7685 | DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n"); | 7695 | DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n"); |
7686 | tx_steering_type = NO_STEERING; | 7696 | tx_steering_type = NO_STEERING; |
7687 | } | 7697 | } |
7688 | 7698 | ||
7689 | if (rx_ring_num > MAX_RX_RINGS) { | 7699 | if (rx_ring_num > MAX_RX_RINGS) { |
7690 | DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not " | 7700 | DBG_PRINT(ERR_DBG, |
7691 | "supported\n"); | 7701 | "s2io: Requested number of rx rings not supported\n"); |
7692 | DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n", | 7702 | DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n", |
7693 | MAX_RX_RINGS); | 7703 | MAX_RX_RINGS); |
7694 | rx_ring_num = MAX_RX_RINGS; | 7704 | rx_ring_num = MAX_RX_RINGS; |
7695 | } | 7705 | } |
7696 | 7706 | ||
@@ -7701,10 +7711,10 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, | |||
7701 | } | 7711 | } |
7702 | 7712 | ||
7703 | if ((*dev_intr_type == MSI_X) && | 7713 | if ((*dev_intr_type == MSI_X) && |
7704 | ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && | 7714 | ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && |
7705 | (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { | 7715 | (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { |
7706 | DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. " | 7716 | DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. " |
7707 | "Defaulting to INTA\n"); | 7717 | "Defaulting to INTA\n"); |
7708 | *dev_intr_type = INTA; | 7718 | *dev_intr_type = INTA; |
7709 | } | 7719 | } |
7710 | 7720 | ||
@@ -7743,8 +7753,8 @@ static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring) | |||
7743 | writeq(val64, &bar0->rts_ds_mem_ctrl); | 7753 | writeq(val64, &bar0->rts_ds_mem_ctrl); |
7744 | 7754 | ||
7745 | return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl, | 7755 | return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl, |
7746 | RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, | 7756 | RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, |
7747 | S2IO_BIT_RESET); | 7757 | S2IO_BIT_RESET); |
7748 | } | 7758 | } |
7749 | 7759 | ||
7750 | static const struct net_device_ops s2io_netdev_ops = { | 7760 | static const struct net_device_ops s2io_netdev_ops = { |
@@ -7800,7 +7810,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7800 | if (ret) | 7810 | if (ret) |
7801 | return ret; | 7811 | return ret; |
7802 | 7812 | ||
7803 | if ((ret = pci_enable_device(pdev))) { | 7813 | ret = pci_enable_device(pdev); |
7814 | if (ret) { | ||
7804 | DBG_PRINT(ERR_DBG, | 7815 | DBG_PRINT(ERR_DBG, |
7805 | "s2io_init_nic: pci_enable_device failed\n"); | 7816 | "s2io_init_nic: pci_enable_device failed\n"); |
7806 | return ret; | 7817 | return ret; |
@@ -7809,11 +7820,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7809 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | 7820 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
7810 | DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n"); | 7821 | DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n"); |
7811 | dma_flag = true; | 7822 | dma_flag = true; |
7812 | if (pci_set_consistent_dma_mask | 7823 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { |
7813 | (pdev, DMA_BIT_MASK(64))) { | ||
7814 | DBG_PRINT(ERR_DBG, | 7824 | DBG_PRINT(ERR_DBG, |
7815 | "Unable to obtain 64bit DMA for \ | 7825 | "Unable to obtain 64bit DMA " |
7816 | consistent allocations\n"); | 7826 | "for consistent allocations\n"); |
7817 | pci_disable_device(pdev); | 7827 | pci_disable_device(pdev); |
7818 | return -ENOMEM; | 7828 | return -ENOMEM; |
7819 | } | 7829 | } |
@@ -7823,8 +7833,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7823 | pci_disable_device(pdev); | 7833 | pci_disable_device(pdev); |
7824 | return -ENOMEM; | 7834 | return -ENOMEM; |
7825 | } | 7835 | } |
7826 | if ((ret = pci_request_regions(pdev, s2io_driver_name))) { | 7836 | ret = pci_request_regions(pdev, s2io_driver_name); |
7827 | DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret); | 7837 | if (ret) { |
7838 | DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", | ||
7839 | __func__, ret); | ||
7828 | pci_disable_device(pdev); | 7840 | pci_disable_device(pdev); |
7829 | return -ENODEV; | 7841 | return -ENODEV; |
7830 | } | 7842 | } |
@@ -7858,7 +7870,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7858 | sp->config.intr_type = dev_intr_type; | 7870 | sp->config.intr_type = dev_intr_type; |
7859 | 7871 | ||
7860 | if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || | 7872 | if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || |
7861 | (pdev->device == PCI_DEVICE_ID_HERC_UNI)) | 7873 | (pdev->device == PCI_DEVICE_ID_HERC_UNI)) |
7862 | sp->device_type = XFRAME_II_DEVICE; | 7874 | sp->device_type = XFRAME_II_DEVICE; |
7863 | else | 7875 | else |
7864 | sp->device_type = XFRAME_I_DEVICE; | 7876 | sp->device_type = XFRAME_I_DEVICE; |
@@ -7889,16 +7901,16 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7889 | 7901 | ||
7890 | /* Initialize the fifos used for tx steering */ | 7902 | /* Initialize the fifos used for tx steering */ |
7891 | if (config->tx_fifo_num < 5) { | 7903 | if (config->tx_fifo_num < 5) { |
7892 | if (config->tx_fifo_num == 1) | 7904 | if (config->tx_fifo_num == 1) |
7893 | sp->total_tcp_fifos = 1; | 7905 | sp->total_tcp_fifos = 1; |
7894 | else | 7906 | else |
7895 | sp->total_tcp_fifos = config->tx_fifo_num - 1; | 7907 | sp->total_tcp_fifos = config->tx_fifo_num - 1; |
7896 | sp->udp_fifo_idx = config->tx_fifo_num - 1; | 7908 | sp->udp_fifo_idx = config->tx_fifo_num - 1; |
7897 | sp->total_udp_fifos = 1; | 7909 | sp->total_udp_fifos = 1; |
7898 | sp->other_fifo_idx = sp->total_tcp_fifos - 1; | 7910 | sp->other_fifo_idx = sp->total_tcp_fifos - 1; |
7899 | } else { | 7911 | } else { |
7900 | sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM - | 7912 | sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM - |
7901 | FIFO_OTHER_MAX_NUM); | 7913 | FIFO_OTHER_MAX_NUM); |
7902 | sp->udp_fifo_idx = sp->total_tcp_fifos; | 7914 | sp->udp_fifo_idx = sp->total_tcp_fifos; |
7903 | sp->total_udp_fifos = FIFO_UDP_MAX_NUM; | 7915 | sp->total_udp_fifos = FIFO_UDP_MAX_NUM; |
7904 | sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM; | 7916 | sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM; |
@@ -7964,8 +7976,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7964 | 7976 | ||
7965 | /* initialize the shared memory used by the NIC and the host */ | 7977 | /* initialize the shared memory used by the NIC and the host */ |
7966 | if (init_shared_mem(sp)) { | 7978 | if (init_shared_mem(sp)) { |
7967 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", | 7979 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name); |
7968 | dev->name); | ||
7969 | ret = -ENOMEM; | 7980 | ret = -ENOMEM; |
7970 | goto mem_alloc_failed; | 7981 | goto mem_alloc_failed; |
7971 | } | 7982 | } |
@@ -7987,12 +7998,13 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7987 | } | 7998 | } |
7988 | 7999 | ||
7989 | dev->irq = pdev->irq; | 8000 | dev->irq = pdev->irq; |
7990 | dev->base_addr = (unsigned long) sp->bar0; | 8001 | dev->base_addr = (unsigned long)sp->bar0; |
7991 | 8002 | ||
7992 | /* Initializing the BAR1 address as the start of the FIFO pointer. */ | 8003 | /* Initializing the BAR1 address as the start of the FIFO pointer. */ |
7993 | for (j = 0; j < MAX_TX_FIFOS; j++) { | 8004 | for (j = 0; j < MAX_TX_FIFOS; j++) { |
7994 | mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *) | 8005 | mac_control->tx_FIFO_start[j] = |
7995 | (sp->bar1 + (j * 0x00020000)); | 8006 | (struct TxFIFO_element __iomem *) |
8007 | (sp->bar1 + (j * 0x00020000)); | ||
7996 | } | 8008 | } |
7997 | 8009 | ||
7998 | /* Driver entry points */ | 8010 | /* Driver entry points */ |
@@ -8046,7 +8058,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8046 | if (ret) { | 8058 | if (ret) { |
8047 | 8059 | ||
8048 | DBG_PRINT(ERR_DBG, | 8060 | DBG_PRINT(ERR_DBG, |
8049 | "s2io: MSI-X requested but failed to enable\n"); | 8061 | "s2io: MSI-X requested but failed to enable\n"); |
8050 | sp->config.intr_type = INTA; | 8062 | sp->config.intr_type = INTA; |
8051 | } | 8063 | } |
8052 | } | 8064 | } |
@@ -8077,12 +8089,13 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8077 | */ | 8089 | */ |
8078 | bar0 = sp->bar0; | 8090 | bar0 = sp->bar0; |
8079 | val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | | 8091 | val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | |
8080 | RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET); | 8092 | RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET); |
8081 | writeq(val64, &bar0->rmac_addr_cmd_mem); | 8093 | writeq(val64, &bar0->rmac_addr_cmd_mem); |
8082 | wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, | 8094 | wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, |
8083 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET); | 8095 | RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, |
8096 | S2IO_BIT_RESET); | ||
8084 | tmp64 = readq(&bar0->rmac_addr_data0_mem); | 8097 | tmp64 = readq(&bar0->rmac_addr_data0_mem); |
8085 | mac_down = (u32) tmp64; | 8098 | mac_down = (u32)tmp64; |
8086 | mac_up = (u32) (tmp64 >> 32); | 8099 | mac_up = (u32) (tmp64 >> 32); |
8087 | 8100 | ||
8088 | sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up); | 8101 | sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up); |
@@ -8113,10 +8126,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8113 | 8126 | ||
8114 | /* Configure MSIX vector for number of rings configured plus one */ | 8127 | /* Configure MSIX vector for number of rings configured plus one */ |
8115 | if ((sp->device_type == XFRAME_II_DEVICE) && | 8128 | if ((sp->device_type == XFRAME_II_DEVICE) && |
8116 | (config->intr_type == MSI_X)) | 8129 | (config->intr_type == MSI_X)) |
8117 | sp->num_entries = config->rx_ring_num + 1; | 8130 | sp->num_entries = config->rx_ring_num + 1; |
8118 | 8131 | ||
8119 | /* Store the values of the MSIX table in the s2io_nic structure */ | 8132 | /* Store the values of the MSIX table in the s2io_nic structure */ |
8120 | store_xmsi_data(sp); | 8133 | store_xmsi_data(sp); |
8121 | /* reset Nic and bring it to known state */ | 8134 | /* reset Nic and bring it to known state */ |
8122 | s2io_reset(sp); | 8135 | s2io_reset(sp); |
@@ -8144,7 +8157,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8144 | val64 |= 0x0000800000000000ULL; | 8157 | val64 |= 0x0000800000000000ULL; |
8145 | writeq(val64, &bar0->gpio_control); | 8158 | writeq(val64, &bar0->gpio_control); |
8146 | val64 = 0x0411040400000000ULL; | 8159 | val64 = 0x0411040400000000ULL; |
8147 | writeq(val64, (void __iomem *) bar0 + 0x2700); | 8160 | writeq(val64, (void __iomem *)bar0 + 0x2700); |
8148 | val64 = readq(&bar0->gpio_control); | 8161 | val64 = readq(&bar0->gpio_control); |
8149 | } | 8162 | } |
8150 | 8163 | ||
@@ -8157,7 +8170,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8157 | } | 8170 | } |
8158 | s2io_vpd_read(sp); | 8171 | s2io_vpd_read(sp); |
8159 | DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n"); | 8172 | DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n"); |
8160 | DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name, | 8173 | DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name, |
8161 | sp->product_name, pdev->revision); | 8174 | sp->product_name, pdev->revision); |
8162 | DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, | 8175 | DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, |
8163 | s2io_driver_version); | 8176 | s2io_driver_version); |
@@ -8172,15 +8185,15 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8172 | goto set_swap_failed; | 8185 | goto set_swap_failed; |
8173 | } | 8186 | } |
8174 | } | 8187 | } |
8175 | switch(sp->rxd_mode) { | 8188 | switch (sp->rxd_mode) { |
8176 | case RXD_MODE_1: | 8189 | case RXD_MODE_1: |
8177 | DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n", | 8190 | DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n", |
8178 | dev->name); | 8191 | dev->name); |
8179 | break; | 8192 | break; |
8180 | case RXD_MODE_3B: | 8193 | case RXD_MODE_3B: |
8181 | DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", | 8194 | DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", |
8182 | dev->name); | 8195 | dev->name); |
8183 | break; | 8196 | break; |
8184 | } | 8197 | } |
8185 | 8198 | ||
8186 | switch (sp->config.napi) { | 8199 | switch (sp->config.napi) { |
@@ -8193,18 +8206,18 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8193 | } | 8206 | } |
8194 | 8207 | ||
8195 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, | 8208 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, |
8196 | sp->config.tx_fifo_num); | 8209 | sp->config.tx_fifo_num); |
8197 | 8210 | ||
8198 | DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name, | 8211 | DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name, |
8199 | sp->config.rx_ring_num); | 8212 | sp->config.rx_ring_num); |
8200 | 8213 | ||
8201 | switch(sp->config.intr_type) { | 8214 | switch (sp->config.intr_type) { |
8202 | case INTA: | 8215 | case INTA: |
8203 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); | 8216 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); |
8204 | break; | 8217 | break; |
8205 | case MSI_X: | 8218 | case MSI_X: |
8206 | DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); | 8219 | DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); |
8207 | break; | 8220 | break; |
8208 | } | 8221 | } |
8209 | if (sp->config.multiq) { | 8222 | if (sp->config.multiq) { |
8210 | for (i = 0; i < sp->config.tx_fifo_num; i++) { | 8223 | for (i = 0; i < sp->config.tx_fifo_num; i++) { |
@@ -8213,31 +8226,34 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8213 | fifo->multiq = config->multiq; | 8226 | fifo->multiq = config->multiq; |
8214 | } | 8227 | } |
8215 | DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n", | 8228 | DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n", |
8216 | dev->name); | 8229 | dev->name); |
8217 | } else | 8230 | } else |
8218 | DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n", | 8231 | DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n", |
8219 | dev->name); | 8232 | dev->name); |
8220 | 8233 | ||
8221 | switch (sp->config.tx_steering_type) { | 8234 | switch (sp->config.tx_steering_type) { |
8222 | case NO_STEERING: | 8235 | case NO_STEERING: |
8223 | DBG_PRINT(ERR_DBG, "%s: No steering enabled for" | 8236 | DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n", |
8224 | " transmit\n", dev->name); | 8237 | dev->name); |
8225 | break; | 8238 | break; |
8226 | case TX_PRIORITY_STEERING: | 8239 | case TX_PRIORITY_STEERING: |
8227 | DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for" | 8240 | DBG_PRINT(ERR_DBG, |
8228 | " transmit\n", dev->name); | 8241 | "%s: Priority steering enabled for transmit\n", |
8242 | dev->name); | ||
8229 | break; | 8243 | break; |
8230 | case TX_DEFAULT_STEERING: | 8244 | case TX_DEFAULT_STEERING: |
8231 | DBG_PRINT(ERR_DBG, "%s: Default steering enabled for" | 8245 | DBG_PRINT(ERR_DBG, |
8232 | " transmit\n", dev->name); | 8246 | "%s: Default steering enabled for transmit\n", |
8247 | dev->name); | ||
8233 | } | 8248 | } |
8234 | 8249 | ||
8235 | if (sp->lro) | 8250 | if (sp->lro) |
8236 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", | 8251 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", |
8237 | dev->name); | 8252 | dev->name); |
8238 | if (ufo) | 8253 | if (ufo) |
8239 | DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)" | 8254 | DBG_PRINT(ERR_DBG, |
8240 | " enabled\n", dev->name); | 8255 | "%s: UDP Fragmentation Offload(UFO) enabled\n", |
8256 | dev->name); | ||
8241 | /* Initialize device name */ | 8257 | /* Initialize device name */ |
8242 | sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); | 8258 | sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); |
8243 | 8259 | ||
@@ -8255,13 +8271,13 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8255 | 8271 | ||
8256 | return 0; | 8272 | return 0; |
8257 | 8273 | ||
8258 | register_failed: | 8274 | register_failed: |
8259 | set_swap_failed: | 8275 | set_swap_failed: |
8260 | iounmap(sp->bar1); | 8276 | iounmap(sp->bar1); |
8261 | bar1_remap_failed: | 8277 | bar1_remap_failed: |
8262 | iounmap(sp->bar0); | 8278 | iounmap(sp->bar0); |
8263 | bar0_remap_failed: | 8279 | bar0_remap_failed: |
8264 | mem_alloc_failed: | 8280 | mem_alloc_failed: |
8265 | free_shared_mem(sp); | 8281 | free_shared_mem(sp); |
8266 | pci_disable_device(pdev); | 8282 | pci_disable_device(pdev); |
8267 | pci_release_regions(pdev); | 8283 | pci_release_regions(pdev); |
@@ -8283,7 +8299,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8283 | static void __devexit s2io_rem_nic(struct pci_dev *pdev) | 8299 | static void __devexit s2io_rem_nic(struct pci_dev *pdev) |
8284 | { | 8300 | { |
8285 | struct net_device *dev = | 8301 | struct net_device *dev = |
8286 | (struct net_device *) pci_get_drvdata(pdev); | 8302 | (struct net_device *)pci_get_drvdata(pdev); |
8287 | struct s2io_nic *sp; | 8303 | struct s2io_nic *sp; |
8288 | 8304 | ||
8289 | if (dev == NULL) { | 8305 | if (dev == NULL) { |
@@ -8331,28 +8347,28 @@ module_init(s2io_starter); | |||
8331 | module_exit(s2io_closer); | 8347 | module_exit(s2io_closer); |
8332 | 8348 | ||
8333 | static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, | 8349 | static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, |
8334 | struct tcphdr **tcp, struct RxD_t *rxdp, | 8350 | struct tcphdr **tcp, struct RxD_t *rxdp, |
8335 | struct s2io_nic *sp) | 8351 | struct s2io_nic *sp) |
8336 | { | 8352 | { |
8337 | int ip_off; | 8353 | int ip_off; |
8338 | u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; | 8354 | u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; |
8339 | 8355 | ||
8340 | if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { | 8356 | if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { |
8341 | DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n", | 8357 | DBG_PRINT(INIT_DBG, |
8358 | "%s: Non-TCP frames not supported for LRO\n", | ||
8342 | __func__); | 8359 | __func__); |
8343 | return -1; | 8360 | return -1; |
8344 | } | 8361 | } |
8345 | 8362 | ||
8346 | /* Checking for DIX type or DIX type with VLAN */ | 8363 | /* Checking for DIX type or DIX type with VLAN */ |
8347 | if ((l2_type == 0) | 8364 | if ((l2_type == 0) || (l2_type == 4)) { |
8348 | || (l2_type == 4)) { | ||
8349 | ip_off = HEADER_ETHERNET_II_802_3_SIZE; | 8365 | ip_off = HEADER_ETHERNET_II_802_3_SIZE; |
8350 | /* | 8366 | /* |
8351 | * If vlan stripping is disabled and the frame is VLAN tagged, | 8367 | * If vlan stripping is disabled and the frame is VLAN tagged, |
8352 | * shift the offset by the VLAN header size bytes. | 8368 | * shift the offset by the VLAN header size bytes. |
8353 | */ | 8369 | */ |
8354 | if ((!sp->vlan_strip_flag) && | 8370 | if ((!sp->vlan_strip_flag) && |
8355 | (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) | 8371 | (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) |
8356 | ip_off += HEADER_VLAN_SIZE; | 8372 | ip_off += HEADER_VLAN_SIZE; |
8357 | } else { | 8373 | } else { |
8358 | /* LLC, SNAP etc are considered non-mergeable */ | 8374 | /* LLC, SNAP etc are considered non-mergeable */ |
@@ -8370,22 +8386,25 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, | |||
8370 | static int check_for_socket_match(struct lro *lro, struct iphdr *ip, | 8386 | static int check_for_socket_match(struct lro *lro, struct iphdr *ip, |
8371 | struct tcphdr *tcp) | 8387 | struct tcphdr *tcp) |
8372 | { | 8388 | { |
8373 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__); | 8389 | DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); |
8374 | if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) || | 8390 | if ((lro->iph->saddr != ip->saddr) || |
8375 | (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest)) | 8391 | (lro->iph->daddr != ip->daddr) || |
8392 | (lro->tcph->source != tcp->source) || | ||
8393 | (lro->tcph->dest != tcp->dest)) | ||
8376 | return -1; | 8394 | return -1; |
8377 | return 0; | 8395 | return 0; |
8378 | } | 8396 | } |
8379 | 8397 | ||
8380 | static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp) | 8398 | static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp) |
8381 | { | 8399 | { |
8382 | return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2)); | 8400 | return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2); |
8383 | } | 8401 | } |
8384 | 8402 | ||
8385 | static void initiate_new_session(struct lro *lro, u8 *l2h, | 8403 | static void initiate_new_session(struct lro *lro, u8 *l2h, |
8386 | struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag) | 8404 | struct iphdr *ip, struct tcphdr *tcp, |
8405 | u32 tcp_pyld_len, u16 vlan_tag) | ||
8387 | { | 8406 | { |
8388 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__); | 8407 | DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); |
8389 | lro->l2h = l2h; | 8408 | lro->l2h = l2h; |
8390 | lro->iph = ip; | 8409 | lro->iph = ip; |
8391 | lro->tcph = tcp; | 8410 | lro->tcph = tcp; |
@@ -8396,9 +8415,9 @@ static void initiate_new_session(struct lro *lro, u8 *l2h, | |||
8396 | lro->frags_len = 0; | 8415 | lro->frags_len = 0; |
8397 | lro->vlan_tag = vlan_tag; | 8416 | lro->vlan_tag = vlan_tag; |
8398 | /* | 8417 | /* |
8399 | * check if we saw TCP timestamp. Other consistency checks have | 8418 | * Check if we saw TCP timestamp. |
8400 | * already been done. | 8419 | * Other consistency checks have already been done. |
8401 | */ | 8420 | */ |
8402 | if (tcp->doff == 8) { | 8421 | if (tcp->doff == 8) { |
8403 | __be32 *ptr; | 8422 | __be32 *ptr; |
8404 | ptr = (__be32 *)(tcp+1); | 8423 | ptr = (__be32 *)(tcp+1); |
@@ -8415,7 +8434,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro) | |||
8415 | struct tcphdr *tcp = lro->tcph; | 8434 | struct tcphdr *tcp = lro->tcph; |
8416 | __sum16 nchk; | 8435 | __sum16 nchk; |
8417 | struct stat_block *statinfo = sp->mac_control.stats_info; | 8436 | struct stat_block *statinfo = sp->mac_control.stats_info; |
8418 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__); | 8437 | DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); |
8419 | 8438 | ||
8420 | /* Update L3 header */ | 8439 | /* Update L3 header */ |
8421 | ip->tot_len = htons(lro->total_len); | 8440 | ip->tot_len = htons(lro->total_len); |
@@ -8441,9 +8460,9 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro) | |||
8441 | } | 8460 | } |
8442 | 8461 | ||
8443 | static void aggregate_new_rx(struct lro *lro, struct iphdr *ip, | 8462 | static void aggregate_new_rx(struct lro *lro, struct iphdr *ip, |
8444 | struct tcphdr *tcp, u32 l4_pyld) | 8463 | struct tcphdr *tcp, u32 l4_pyld) |
8445 | { | 8464 | { |
8446 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__); | 8465 | DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); |
8447 | lro->total_len += l4_pyld; | 8466 | lro->total_len += l4_pyld; |
8448 | lro->frags_len += l4_pyld; | 8467 | lro->frags_len += l4_pyld; |
8449 | lro->tcp_next_seq += l4_pyld; | 8468 | lro->tcp_next_seq += l4_pyld; |
@@ -8467,7 +8486,7 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip, | |||
8467 | { | 8486 | { |
8468 | u8 *ptr; | 8487 | u8 *ptr; |
8469 | 8488 | ||
8470 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__); | 8489 | DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); |
8471 | 8490 | ||
8472 | if (!tcp_pyld_len) { | 8491 | if (!tcp_pyld_len) { |
8473 | /* Runt frame or a pure ack */ | 8492 | /* Runt frame or a pure ack */ |
@@ -8482,8 +8501,9 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip, | |||
8482 | return -1; | 8501 | return -1; |
8483 | 8502 | ||
8484 | /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ | 8503 | /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ |
8485 | if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || | 8504 | if (tcp->urg || tcp->psh || tcp->rst || |
8486 | tcp->ece || tcp->cwr || !tcp->ack) { | 8505 | tcp->syn || tcp->fin || |
8506 | tcp->ece || tcp->cwr || !tcp->ack) { | ||
8487 | /* | 8507 | /* |
8488 | * Currently recognize only the ack control word and | 8508 | * Currently recognize only the ack control word and |
8489 | * any other control field being set would result in | 8509 | * any other control field being set would result in |
@@ -8519,27 +8539,26 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip, | |||
8519 | return 0; | 8539 | return 0; |
8520 | } | 8540 | } |
8521 | 8541 | ||
8522 | static int | 8542 | static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, |
8523 | s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp, | 8543 | u8 **tcp, u32 *tcp_len, struct lro **lro, |
8524 | u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp, | 8544 | struct RxD_t *rxdp, struct s2io_nic *sp) |
8525 | struct s2io_nic *sp) | ||
8526 | { | 8545 | { |
8527 | struct iphdr *ip; | 8546 | struct iphdr *ip; |
8528 | struct tcphdr *tcph; | 8547 | struct tcphdr *tcph; |
8529 | int ret = 0, i; | 8548 | int ret = 0, i; |
8530 | u16 vlan_tag = 0; | 8549 | u16 vlan_tag = 0; |
8531 | 8550 | ||
8532 | if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp, | 8551 | ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp, |
8533 | rxdp, sp))) { | 8552 | rxdp, sp); |
8534 | DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n", | 8553 | if (ret) |
8535 | ip->saddr, ip->daddr); | ||
8536 | } else | ||
8537 | return ret; | 8554 | return ret; |
8538 | 8555 | ||
8556 | DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr); | ||
8557 | |||
8539 | vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2); | 8558 | vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2); |
8540 | tcph = (struct tcphdr *)*tcp; | 8559 | tcph = (struct tcphdr *)*tcp; |
8541 | *tcp_len = get_l4_pyld_length(ip, tcph); | 8560 | *tcp_len = get_l4_pyld_length(ip, tcph); |
8542 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | 8561 | for (i = 0; i < MAX_LRO_SESSIONS; i++) { |
8543 | struct lro *l_lro = &ring_data->lro0_n[i]; | 8562 | struct lro *l_lro = &ring_data->lro0_n[i]; |
8544 | if (l_lro->in_use) { | 8563 | if (l_lro->in_use) { |
8545 | if (check_for_socket_match(l_lro, ip, tcph)) | 8564 | if (check_for_socket_match(l_lro, ip, tcph)) |
@@ -8554,12 +8573,13 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp, | |||
8554 | ntohl(tcph->seq)); | 8573 | ntohl(tcph->seq)); |
8555 | 8574 | ||
8556 | sp->mac_control.stats_info-> | 8575 | sp->mac_control.stats_info-> |
8557 | sw_stat.outof_sequence_pkts++; | 8576 | sw_stat.outof_sequence_pkts++; |
8558 | ret = 2; | 8577 | ret = 2; |
8559 | break; | 8578 | break; |
8560 | } | 8579 | } |
8561 | 8580 | ||
8562 | if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len)) | 8581 | if (!verify_l3_l4_lro_capable(l_lro, ip, tcph, |
8582 | *tcp_len)) | ||
8563 | ret = 1; /* Aggregate */ | 8583 | ret = 1; /* Aggregate */ |
8564 | else | 8584 | else |
8565 | ret = 2; /* Flush both */ | 8585 | ret = 2; /* Flush both */ |
@@ -8573,11 +8593,10 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp, | |||
8573 | * don't create new LRO session. Just send this | 8593 | * don't create new LRO session. Just send this |
8574 | * packet up. | 8594 | * packet up. |
8575 | */ | 8595 | */ |
8576 | if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) { | 8596 | if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) |
8577 | return 5; | 8597 | return 5; |
8578 | } | ||
8579 | 8598 | ||
8580 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | 8599 | for (i = 0; i < MAX_LRO_SESSIONS; i++) { |
8581 | struct lro *l_lro = &ring_data->lro0_n[i]; | 8600 | struct lro *l_lro = &ring_data->lro0_n[i]; |
8582 | if (!(l_lro->in_use)) { | 8601 | if (!(l_lro->in_use)) { |
8583 | *lro = l_lro; | 8602 | *lro = l_lro; |
@@ -8588,31 +8607,30 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp, | |||
8588 | } | 8607 | } |
8589 | 8608 | ||
8590 | if (ret == 0) { /* sessions exceeded */ | 8609 | if (ret == 0) { /* sessions exceeded */ |
8591 | DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n", | 8610 | DBG_PRINT(INFO_DBG, "%s:All LRO sessions already in use\n", |
8592 | __func__); | 8611 | __func__); |
8593 | *lro = NULL; | 8612 | *lro = NULL; |
8594 | return ret; | 8613 | return ret; |
8595 | } | 8614 | } |
8596 | 8615 | ||
8597 | switch (ret) { | 8616 | switch (ret) { |
8598 | case 3: | 8617 | case 3: |
8599 | initiate_new_session(*lro, buffer, ip, tcph, *tcp_len, | 8618 | initiate_new_session(*lro, buffer, ip, tcph, *tcp_len, |
8600 | vlan_tag); | 8619 | vlan_tag); |
8601 | break; | 8620 | break; |
8602 | case 2: | 8621 | case 2: |
8622 | update_L3L4_header(sp, *lro); | ||
8623 | break; | ||
8624 | case 1: | ||
8625 | aggregate_new_rx(*lro, ip, tcph, *tcp_len); | ||
8626 | if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) { | ||
8603 | update_L3L4_header(sp, *lro); | 8627 | update_L3L4_header(sp, *lro); |
8604 | break; | 8628 | ret = 4; /* Flush the LRO */ |
8605 | case 1: | 8629 | } |
8606 | aggregate_new_rx(*lro, ip, tcph, *tcp_len); | 8630 | break; |
8607 | if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) { | 8631 | default: |
8608 | update_L3L4_header(sp, *lro); | 8632 | DBG_PRINT(ERR_DBG, "%s:Dont know, can't say!!\n", __func__); |
8609 | ret = 4; /* Flush the LRO */ | 8633 | break; |
8610 | } | ||
8611 | break; | ||
8612 | default: | ||
8613 | DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n", | ||
8614 | __func__); | ||
8615 | break; | ||
8616 | } | 8634 | } |
8617 | 8635 | ||
8618 | return ret; | 8636 | return ret; |
@@ -8631,8 +8649,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag) | |||
8631 | struct s2io_nic *sp = netdev_priv(dev); | 8649 | struct s2io_nic *sp = netdev_priv(dev); |
8632 | 8650 | ||
8633 | skb->protocol = eth_type_trans(skb, dev); | 8651 | skb->protocol = eth_type_trans(skb, dev); |
8634 | if (sp->vlgrp && vlan_tag | 8652 | if (sp->vlgrp && vlan_tag && (sp->vlan_strip_flag)) { |
8635 | && (sp->vlan_strip_flag)) { | ||
8636 | /* Queueing the vlan frame to the upper layer */ | 8653 | /* Queueing the vlan frame to the upper layer */ |
8637 | if (sp->config.napi) | 8654 | if (sp->config.napi) |
8638 | vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag); | 8655 | vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag); |
@@ -8647,8 +8664,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag) | |||
8647 | } | 8664 | } |
8648 | 8665 | ||
8649 | static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, | 8666 | static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, |
8650 | struct sk_buff *skb, | 8667 | struct sk_buff *skb, u32 tcp_len) |
8651 | u32 tcp_len) | ||
8652 | { | 8668 | { |
8653 | struct sk_buff *first = lro->parent; | 8669 | struct sk_buff *first = lro->parent; |
8654 | 8670 | ||
@@ -8674,7 +8690,7 @@ static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, | |||
8674 | * this device has been detected. | 8690 | * this device has been detected. |
8675 | */ | 8691 | */ |
8676 | static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev, | 8692 | static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev, |
8677 | pci_channel_state_t state) | 8693 | pci_channel_state_t state) |
8678 | { | 8694 | { |
8679 | struct net_device *netdev = pci_get_drvdata(pdev); | 8695 | struct net_device *netdev = pci_get_drvdata(pdev); |
8680 | struct s2io_nic *sp = netdev_priv(netdev); | 8696 | struct s2io_nic *sp = netdev_priv(netdev); |