aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorSreenivasa Honnur <Sreenivasa.Honnur@neterion.com>2007-05-10 04:22:25 -0400
committerJeff Garzik <jeff@garzik.org>2007-05-11 17:53:11 -0400
commit491976b2bce07f2c44e32a987fa6eb9edf8b0aeb (patch)
treeb1a02510ed57250fe1c3966626a7b5c0e462b087 /drivers/net
parentc53d49453f738555c252304357cd7da6ba1f1ee0 (diff)
S2IO: Statistics for link up/down and memory allocated/freed
1. Added statistics for link up/down, last link up/down. 2. Statistics for memory allocated/freed. 3. Changed level of some DBG_PRINTs. Signed-off-by: Sreenivasa Honnur <sreenivasa.honnur@neterion.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/s2io.c300
-rw-r--r--drivers/net/s2io.h24
2 files changed, 282 insertions, 42 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index e2195aa00a04..e3e6d410d72c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -84,7 +84,7 @@
84#include "s2io.h" 84#include "s2io.h"
85#include "s2io-regs.h" 85#include "s2io-regs.h"
86 86
87#define DRV_VERSION "2.0.22.1" 87#define DRV_VERSION "2.0.23.1"
88 88
89/* S2io Driver name & version. */ 89/* S2io Driver name & version. */
90static char s2io_driver_name[] = "Neterion"; 90static char s2io_driver_name[] = "Neterion";
@@ -282,7 +282,27 @@ static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
282 ("lro_flush_due_to_max_pkts"), 282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"), 283 ("lro_avg_aggr_pkts"),
284 ("mem_alloc_fail_cnt"), 284 ("mem_alloc_fail_cnt"),
285 ("watchdog_timer_cnt") 285 ("watchdog_timer_cnt"),
286 ("mem_allocated"),
287 ("mem_freed"),
288 ("link_up_cnt"),
289 ("link_down_cnt"),
290 ("link_up_time"),
291 ("link_down_time"),
292 ("tx_tcode_buf_abort_cnt"),
293 ("tx_tcode_desc_abort_cnt"),
294 ("tx_tcode_parity_err_cnt"),
295 ("tx_tcode_link_loss_cnt"),
296 ("tx_tcode_list_proc_err_cnt"),
297 ("rx_tcode_parity_err_cnt"),
298 ("rx_tcode_abort_cnt"),
299 ("rx_tcode_parity_abort_cnt"),
300 ("rx_tcode_rda_fail_cnt"),
301 ("rx_tcode_unkn_prot_cnt"),
302 ("rx_tcode_fcs_err_cnt"),
303 ("rx_tcode_buf_size_err_cnt"),
304 ("rx_tcode_rxd_corrupt_cnt"),
305 ("rx_tcode_unkn_err_cnt")
286}; 306};
287 307
288#define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN 308#define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
@@ -492,6 +512,7 @@ static int init_shared_mem(struct s2io_nic *nic)
492 512
493 struct mac_info *mac_control; 513 struct mac_info *mac_control;
494 struct config_param *config; 514 struct config_param *config;
515 unsigned long long mem_allocated = 0;
495 516
496 mac_control = &nic->mac_control; 517 mac_control = &nic->mac_control;
497 config = &nic->config; 518 config = &nic->config;
@@ -521,6 +542,7 @@ static int init_shared_mem(struct s2io_nic *nic)
521 "Malloc failed for list_info\n"); 542 "Malloc failed for list_info\n");
522 return -ENOMEM; 543 return -ENOMEM;
523 } 544 }
545 mem_allocated += list_holder_size;
524 memset(mac_control->fifos[i].list_info, 0, list_holder_size); 546 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
525 } 547 }
526 for (i = 0; i < config->tx_fifo_num; i++) { 548 for (i = 0; i < config->tx_fifo_num; i++) {
@@ -567,6 +589,7 @@ static int init_shared_mem(struct s2io_nic *nic)
567 DBG_PRINT(INFO_DBG, "failed for TxDL\n"); 589 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
568 return -ENOMEM; 590 return -ENOMEM;
569 } 591 }
592 mem_allocated += PAGE_SIZE;
570 } 593 }
571 while (k < lst_per_page) { 594 while (k < lst_per_page) {
572 int l = (j * lst_per_page) + k; 595 int l = (j * lst_per_page) + k;
@@ -584,6 +607,7 @@ static int init_shared_mem(struct s2io_nic *nic)
584 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL); 607 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
585 if (!nic->ufo_in_band_v) 608 if (!nic->ufo_in_band_v)
586 return -ENOMEM; 609 return -ENOMEM;
610 mem_allocated += (size * sizeof(u64));
587 611
588 /* Allocation and initialization of RXDs in Rings */ 612 /* Allocation and initialization of RXDs in Rings */
589 size = 0; 613 size = 0;
@@ -641,6 +665,7 @@ static int init_shared_mem(struct s2io_nic *nic)
641 rx_blocks->block_virt_addr = tmp_v_addr; 665 rx_blocks->block_virt_addr = tmp_v_addr;
642 return -ENOMEM; 666 return -ENOMEM;
643 } 667 }
668 mem_allocated += size;
644 memset(tmp_v_addr, 0, size); 669 memset(tmp_v_addr, 0, size);
645 rx_blocks->block_virt_addr = tmp_v_addr; 670 rx_blocks->block_virt_addr = tmp_v_addr;
646 rx_blocks->block_dma_addr = tmp_p_addr; 671 rx_blocks->block_dma_addr = tmp_p_addr;
@@ -649,6 +674,8 @@ static int init_shared_mem(struct s2io_nic *nic)
649 GFP_KERNEL); 674 GFP_KERNEL);
650 if (!rx_blocks->rxds) 675 if (!rx_blocks->rxds)
651 return -ENOMEM; 676 return -ENOMEM;
677 mem_allocated +=
678 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
652 for (l=0; l<rxd_count[nic->rxd_mode];l++) { 679 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
653 rx_blocks->rxds[l].virt_addr = 680 rx_blocks->rxds[l].virt_addr =
654 rx_blocks->block_virt_addr + 681 rx_blocks->block_virt_addr +
@@ -691,6 +718,7 @@ static int init_shared_mem(struct s2io_nic *nic)
691 GFP_KERNEL); 718 GFP_KERNEL);
692 if (!mac_control->rings[i].ba) 719 if (!mac_control->rings[i].ba)
693 return -ENOMEM; 720 return -ENOMEM;
721 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
694 for (j = 0; j < blk_cnt; j++) { 722 for (j = 0; j < blk_cnt; j++) {
695 int k = 0; 723 int k = 0;
696 mac_control->rings[i].ba[j] = 724 mac_control->rings[i].ba[j] =
@@ -699,6 +727,8 @@ static int init_shared_mem(struct s2io_nic *nic)
699 GFP_KERNEL); 727 GFP_KERNEL);
700 if (!mac_control->rings[i].ba[j]) 728 if (!mac_control->rings[i].ba[j])
701 return -ENOMEM; 729 return -ENOMEM;
730 mem_allocated += (sizeof(struct buffAdd) * \
731 (rxd_count[nic->rxd_mode] + 1));
702 while (k != rxd_count[nic->rxd_mode]) { 732 while (k != rxd_count[nic->rxd_mode]) {
703 ba = &mac_control->rings[i].ba[j][k]; 733 ba = &mac_control->rings[i].ba[j][k];
704 734
@@ -706,6 +736,8 @@ static int init_shared_mem(struct s2io_nic *nic)
706 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 736 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
707 if (!ba->ba_0_org) 737 if (!ba->ba_0_org)
708 return -ENOMEM; 738 return -ENOMEM;
739 mem_allocated +=
740 (BUF0_LEN + ALIGN_SIZE);
709 tmp = (unsigned long)ba->ba_0_org; 741 tmp = (unsigned long)ba->ba_0_org;
710 tmp += ALIGN_SIZE; 742 tmp += ALIGN_SIZE;
711 tmp &= ~((unsigned long) ALIGN_SIZE); 743 tmp &= ~((unsigned long) ALIGN_SIZE);
@@ -715,6 +747,8 @@ static int init_shared_mem(struct s2io_nic *nic)
715 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); 747 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
716 if (!ba->ba_1_org) 748 if (!ba->ba_1_org)
717 return -ENOMEM; 749 return -ENOMEM;
750 mem_allocated
751 += (BUF1_LEN + ALIGN_SIZE);
718 tmp = (unsigned long) ba->ba_1_org; 752 tmp = (unsigned long) ba->ba_1_org;
719 tmp += ALIGN_SIZE; 753 tmp += ALIGN_SIZE;
720 tmp &= ~((unsigned long) ALIGN_SIZE); 754 tmp &= ~((unsigned long) ALIGN_SIZE);
@@ -738,6 +772,7 @@ static int init_shared_mem(struct s2io_nic *nic)
738 */ 772 */
739 return -ENOMEM; 773 return -ENOMEM;
740 } 774 }
775 mem_allocated += size;
741 mac_control->stats_mem_sz = size; 776 mac_control->stats_mem_sz = size;
742 777
743 tmp_v_addr = mac_control->stats_mem; 778 tmp_v_addr = mac_control->stats_mem;
@@ -745,7 +780,7 @@ static int init_shared_mem(struct s2io_nic *nic)
745 memset(tmp_v_addr, 0, size); 780 memset(tmp_v_addr, 0, size);
746 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name, 781 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
747 (unsigned long long) tmp_p_addr); 782 (unsigned long long) tmp_p_addr);
748 783 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
749 return SUCCESS; 784 return SUCCESS;
750} 785}
751 786
@@ -759,12 +794,14 @@ static int init_shared_mem(struct s2io_nic *nic)
759static void free_shared_mem(struct s2io_nic *nic) 794static void free_shared_mem(struct s2io_nic *nic)
760{ 795{
761 int i, j, blk_cnt, size; 796 int i, j, blk_cnt, size;
797 u32 ufo_size = 0;
762 void *tmp_v_addr; 798 void *tmp_v_addr;
763 dma_addr_t tmp_p_addr; 799 dma_addr_t tmp_p_addr;
764 struct mac_info *mac_control; 800 struct mac_info *mac_control;
765 struct config_param *config; 801 struct config_param *config;
766 int lst_size, lst_per_page; 802 int lst_size, lst_per_page;
767 struct net_device *dev = nic->dev; 803 struct net_device *dev = nic->dev;
804 int page_num = 0;
768 805
769 if (!nic) 806 if (!nic)
770 return; 807 return;
@@ -776,8 +813,9 @@ static void free_shared_mem(struct s2io_nic *nic)
776 lst_per_page = PAGE_SIZE / lst_size; 813 lst_per_page = PAGE_SIZE / lst_size;
777 814
778 for (i = 0; i < config->tx_fifo_num; i++) { 815 for (i = 0; i < config->tx_fifo_num; i++) {
779 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, 816 ufo_size += config->tx_cfg[i].fifo_len;
780 lst_per_page); 817 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
818 lst_per_page);
781 for (j = 0; j < page_num; j++) { 819 for (j = 0; j < page_num; j++) {
782 int mem_blks = (j * lst_per_page); 820 int mem_blks = (j * lst_per_page);
783 if (!mac_control->fifos[i].list_info) 821 if (!mac_control->fifos[i].list_info)
@@ -792,6 +830,8 @@ static void free_shared_mem(struct s2io_nic *nic)
792 mac_control->fifos[i]. 830 mac_control->fifos[i].
793 list_info[mem_blks]. 831 list_info[mem_blks].
794 list_phy_addr); 832 list_phy_addr);
833 nic->mac_control.stats_info->sw_stat.mem_freed
834 += PAGE_SIZE;
795 } 835 }
796 /* If we got a zero DMA address during allocation, 836 /* If we got a zero DMA address during allocation,
797 * free the page now 837 * free the page now
@@ -805,8 +845,12 @@ static void free_shared_mem(struct s2io_nic *nic)
805 dev->name); 845 dev->name);
806 DBG_PRINT(INIT_DBG, "Virtual address %p\n", 846 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
807 mac_control->zerodma_virt_addr); 847 mac_control->zerodma_virt_addr);
848 nic->mac_control.stats_info->sw_stat.mem_freed
849 += PAGE_SIZE;
808 } 850 }
809 kfree(mac_control->fifos[i].list_info); 851 kfree(mac_control->fifos[i].list_info);
852 nic->mac_control.stats_info->sw_stat.mem_freed +=
853 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
810 } 854 }
811 855
812 size = SIZE_OF_BLOCK; 856 size = SIZE_OF_BLOCK;
@@ -821,7 +865,10 @@ static void free_shared_mem(struct s2io_nic *nic)
821 break; 865 break;
822 pci_free_consistent(nic->pdev, size, 866 pci_free_consistent(nic->pdev, size,
823 tmp_v_addr, tmp_p_addr); 867 tmp_v_addr, tmp_p_addr);
868 nic->mac_control.stats_info->sw_stat.mem_freed += size;
824 kfree(mac_control->rings[i].rx_blocks[j].rxds); 869 kfree(mac_control->rings[i].rx_blocks[j].rxds);
870 nic->mac_control.stats_info->sw_stat.mem_freed +=
871 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
825 } 872 }
826 } 873 }
827 874
@@ -838,12 +885,20 @@ static void free_shared_mem(struct s2io_nic *nic)
838 struct buffAdd *ba = 885 struct buffAdd *ba =
839 &mac_control->rings[i].ba[j][k]; 886 &mac_control->rings[i].ba[j][k];
840 kfree(ba->ba_0_org); 887 kfree(ba->ba_0_org);
888 nic->mac_control.stats_info->sw_stat.\
889 mem_freed += (BUF0_LEN + ALIGN_SIZE);
841 kfree(ba->ba_1_org); 890 kfree(ba->ba_1_org);
891 nic->mac_control.stats_info->sw_stat.\
892 mem_freed += (BUF1_LEN + ALIGN_SIZE);
842 k++; 893 k++;
843 } 894 }
844 kfree(mac_control->rings[i].ba[j]); 895 kfree(mac_control->rings[i].ba[j]);
896 nic->mac_control.stats_info->sw_stat.mem_freed += (sizeof(struct buffAdd) *
897 (rxd_count[nic->rxd_mode] + 1));
845 } 898 }
846 kfree(mac_control->rings[i].ba); 899 kfree(mac_control->rings[i].ba);
900 nic->mac_control.stats_info->sw_stat.mem_freed +=
901 (sizeof(struct buffAdd *) * blk_cnt);
847 } 902 }
848 } 903 }
849 904
@@ -852,9 +907,14 @@ static void free_shared_mem(struct s2io_nic *nic)
852 mac_control->stats_mem_sz, 907 mac_control->stats_mem_sz,
853 mac_control->stats_mem, 908 mac_control->stats_mem,
854 mac_control->stats_mem_phy); 909 mac_control->stats_mem_phy);
910 nic->mac_control.stats_info->sw_stat.mem_freed +=
911 mac_control->stats_mem_sz;
855 } 912 }
856 if (nic->ufo_in_band_v) 913 if (nic->ufo_in_band_v) {
857 kfree(nic->ufo_in_band_v); 914 kfree(nic->ufo_in_band_v);
915 nic->mac_control.stats_info->sw_stat.mem_freed
916 += (ufo_size * sizeof(u64));
917 }
858} 918}
859 919
860/** 920/**
@@ -2124,10 +2184,12 @@ static void free_tx_buffers(struct s2io_nic *nic)
2124 2184
2125 for (i = 0; i < config->tx_fifo_num; i++) { 2185 for (i = 0; i < config->tx_fifo_num; i++) {
2126 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 2186 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2127 txdp = (struct TxD *) mac_control->fifos[i].list_info[j]. 2187 txdp = (struct TxD *) \
2128 list_virt_addr; 2188 mac_control->fifos[i].list_info[j].list_virt_addr;
2129 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); 2189 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2130 if (skb) { 2190 if (skb) {
2191 nic->mac_control.stats_info->sw_stat.mem_freed
2192 += skb->truesize;
2131 dev_kfree_skb(skb); 2193 dev_kfree_skb(skb);
2132 cnt++; 2194 cnt++;
2133 } 2195 }
@@ -2194,6 +2256,8 @@ static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2194 } 2256 }
2195 frag_list = skb_shinfo(skb)->frag_list; 2257 frag_list = skb_shinfo(skb)->frag_list;
2196 skb->truesize += frag_list->truesize; 2258 skb->truesize += frag_list->truesize;
2259 nic->mac_control.stats_info->sw_stat.mem_allocated
2260 += frag_list->truesize;
2197 frag_list->next = NULL; 2261 frag_list->next = NULL;
2198 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); 2262 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2199 frag_list->data = tmp; 2263 frag_list->data = tmp;
@@ -2326,6 +2390,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2326 mem_alloc_fail_cnt++; 2390 mem_alloc_fail_cnt++;
2327 return -ENOMEM ; 2391 return -ENOMEM ;
2328 } 2392 }
2393 nic->mac_control.stats_info->sw_stat.mem_allocated
2394 += skb->truesize;
2329 if (nic->rxd_mode == RXD_MODE_1) { 2395 if (nic->rxd_mode == RXD_MODE_1) {
2330 /* 1 buffer mode - normal operation mode */ 2396 /* 1 buffer mode - normal operation mode */
2331 memset(rxdp, 0, sizeof(struct RxD1)); 2397 memset(rxdp, 0, sizeof(struct RxD1));
@@ -2333,7 +2399,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2333 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single 2399 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2334 (nic->pdev, skb->data, size - NET_IP_ALIGN, 2400 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2335 PCI_DMA_FROMDEVICE); 2401 PCI_DMA_FROMDEVICE);
2336 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 2402 rxdp->Control_2 =
2403 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2337 2404
2338 } else if (nic->rxd_mode >= RXD_MODE_3A) { 2405 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2339 /* 2406 /*
@@ -2347,7 +2414,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2347 * payload 2414 * payload
2348 */ 2415 */
2349 2416
2350 /* save the buffer pointers to avoid frequent dma mapping */ 2417 /* save buffer pointers to avoid frequent dma mapping */
2351 Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr; 2418 Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr;
2352 Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr; 2419 Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr;
2353 memset(rxdp, 0, sizeof(struct RxD3)); 2420 memset(rxdp, 0, sizeof(struct RxD3));
@@ -2369,7 +2436,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2369 PCI_DMA_FROMDEVICE); 2436 PCI_DMA_FROMDEVICE);
2370 else 2437 else
2371 pci_dma_sync_single_for_device(nic->pdev, 2438 pci_dma_sync_single_for_device(nic->pdev,
2372 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr, 2439 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2373 BUF0_LEN, PCI_DMA_FROMDEVICE); 2440 BUF0_LEN, PCI_DMA_FROMDEVICE);
2374 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2441 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2375 if (nic->rxd_mode == RXD_MODE_3B) { 2442 if (nic->rxd_mode == RXD_MODE_3B) {
@@ -2396,6 +2463,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2396 } else { 2463 } else {
2397 /* 3 buffer mode */ 2464 /* 3 buffer mode */
2398 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) { 2465 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2466 nic->mac_control.stats_info->sw_stat.\
2467 mem_freed += skb->truesize;
2399 dev_kfree_skb_irq(skb); 2468 dev_kfree_skb_irq(skb);
2400 if (first_rxdp) { 2469 if (first_rxdp) {
2401 wmb(); 2470 wmb();
@@ -2496,6 +2565,7 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2496 PCI_DMA_FROMDEVICE); 2565 PCI_DMA_FROMDEVICE);
2497 memset(rxdp, 0, sizeof(struct RxD3)); 2566 memset(rxdp, 0, sizeof(struct RxD3));
2498 } 2567 }
2568 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2499 dev_kfree_skb(skb); 2569 dev_kfree_skb(skb);
2500 atomic_dec(&sp->rx_bufs_left[ring_no]); 2570 atomic_dec(&sp->rx_bufs_left[ring_no]);
2501 } 2571 }
@@ -2825,13 +2895,35 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
2825 nic->mac_control.stats_info->sw_stat. 2895 nic->mac_control.stats_info->sw_stat.
2826 parity_err_cnt++; 2896 parity_err_cnt++;
2827 } 2897 }
2828 if ((err >> 48) == 0xA) { 2898
2829 DBG_PRINT(TX_DBG, "TxD returned due \ 2899 /* update t_code statistics */
2830 to loss of link\n"); 2900 err >>= 48;
2831 } 2901 switch(err) {
2832 else { 2902 case 2:
2833 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err); 2903 nic->mac_control.stats_info->sw_stat.
2834 } 2904 tx_buf_abort_cnt++;
2905 break;
2906
2907 case 3:
2908 nic->mac_control.stats_info->sw_stat.
2909 tx_desc_abort_cnt++;
2910 break;
2911
2912 case 7:
2913 nic->mac_control.stats_info->sw_stat.
2914 tx_parity_err_cnt++;
2915 break;
2916
2917 case 10:
2918 nic->mac_control.stats_info->sw_stat.
2919 tx_link_loss_cnt++;
2920 break;
2921
2922 case 15:
2923 nic->mac_control.stats_info->sw_stat.
2924 tx_list_proc_err_cnt++;
2925 break;
2926 }
2835 } 2927 }
2836 2928
2837 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset); 2929 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
@@ -2844,6 +2936,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
2844 2936
2845 /* Updating the statistics block */ 2937 /* Updating the statistics block */
2846 nic->stats.tx_bytes += skb->len; 2938 nic->stats.tx_bytes += skb->len;
2939 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2847 dev_kfree_skb_irq(skb); 2940 dev_kfree_skb_irq(skb);
2848 2941
2849 get_info.offset++; 2942 get_info.offset++;
@@ -3319,7 +3412,9 @@ static void s2io_reset(struct s2io_nic * sp)
3319 u16 subid, pci_cmd; 3412 u16 subid, pci_cmd;
3320 int i; 3413 int i;
3321 u16 val16; 3414 u16 val16;
3322 unsigned long long reset_cnt = 0; 3415 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3416 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3417
3323 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n", 3418 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3324 __FUNCTION__, sp->dev->name); 3419 __FUNCTION__, sp->dev->name);
3325 3420
@@ -3385,11 +3480,26 @@ new_way:
3385 3480
3386 /* Reset device statistics maintained by OS */ 3481 /* Reset device statistics maintained by OS */
3387 memset(&sp->stats, 0, sizeof (struct net_device_stats)); 3482 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3388 /* save reset count */ 3483
3484 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3485 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3486 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3487 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3389 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt; 3488 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3489 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3490 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3491 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3492 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3390 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block)); 3493 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3391 /* restore reset count */ 3494 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3495 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3496 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3497 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3498 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3392 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt; 3499 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3500 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3501 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3502 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3393 3503
3394 /* SXE-002: Configure link and activity LED to turn it off */ 3504 /* SXE-002: Configure link and activity LED to turn it off */
3395 subid = sp->pdev->subsystem_device; 3505 subid = sp->pdev->subsystem_device;
@@ -3677,21 +3787,29 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3677 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry), 3787 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3678 GFP_KERNEL); 3788 GFP_KERNEL);
3679 if (nic->entries == NULL) { 3789 if (nic->entries == NULL) {
3680 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", __FUNCTION__); 3790 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3791 __FUNCTION__);
3681 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3792 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3682 return -ENOMEM; 3793 return -ENOMEM;
3683 } 3794 }
3684 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3795 nic->mac_control.stats_info->sw_stat.mem_allocated
3796 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3797 memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3685 3798
3686 nic->s2io_entries = 3799 nic->s2io_entries =
3687 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry), 3800 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3688 GFP_KERNEL); 3801 GFP_KERNEL);
3689 if (nic->s2io_entries == NULL) { 3802 if (nic->s2io_entries == NULL) {
3690 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", __FUNCTION__); 3803 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3804 __FUNCTION__);
3691 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3805 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3692 kfree(nic->entries); 3806 kfree(nic->entries);
3807 nic->mac_control.stats_info->sw_stat.mem_freed
3808 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3693 return -ENOMEM; 3809 return -ENOMEM;
3694 } 3810 }
3811 nic->mac_control.stats_info->sw_stat.mem_allocated
3812 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3695 memset(nic->s2io_entries, 0, 3813 memset(nic->s2io_entries, 0,
3696 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 3814 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3697 3815
@@ -3715,7 +3833,8 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3715 rx_mat = readq(&bar0->rx_mat); 3833 rx_mat = readq(&bar0->rx_mat);
3716 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) { 3834 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3717 rx_mat |= RX_MAT_SET(j, msix_indx); 3835 rx_mat |= RX_MAT_SET(j, msix_indx);
3718 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j]; 3836 nic->s2io_entries[msix_indx].arg
3837 = &nic->mac_control.rings[j];
3719 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; 3838 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3720 nic->s2io_entries[msix_indx].in_use = MSIX_FLG; 3839 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3721 } 3840 }
@@ -3724,7 +3843,8 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3724 tx_mat = readq(&bar0->tx_mat0_n[7]); 3843 tx_mat = readq(&bar0->tx_mat0_n[7]);
3725 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) { 3844 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3726 tx_mat |= TX_MAT_SET(i, msix_indx); 3845 tx_mat |= TX_MAT_SET(i, msix_indx);
3727 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j]; 3846 nic->s2io_entries[msix_indx].arg
3847 = &nic->mac_control.rings[j];
3728 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; 3848 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3729 nic->s2io_entries[msix_indx].in_use = MSIX_FLG; 3849 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3730 } 3850 }
@@ -3741,7 +3861,11 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3741 if (ret) { 3861 if (ret) {
3742 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); 3862 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3743 kfree(nic->entries); 3863 kfree(nic->entries);
3864 nic->mac_control.stats_info->sw_stat.mem_freed
3865 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3744 kfree(nic->s2io_entries); 3866 kfree(nic->s2io_entries);
3867 nic->mac_control.stats_info->sw_stat.mem_freed
3868 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3745 nic->entries = NULL; 3869 nic->entries = NULL;
3746 nic->s2io_entries = NULL; 3870 nic->s2io_entries = NULL;
3747 nic->avail_msix_vectors = 0; 3871 nic->avail_msix_vectors = 0;
@@ -3809,10 +3933,16 @@ static int s2io_open(struct net_device *dev)
3809 3933
3810hw_init_failed: 3934hw_init_failed:
3811 if (sp->intr_type == MSI_X) { 3935 if (sp->intr_type == MSI_X) {
3812 if (sp->entries) 3936 if (sp->entries) {
3813 kfree(sp->entries); 3937 kfree(sp->entries);
3814 if (sp->s2io_entries) 3938 sp->mac_control.stats_info->sw_stat.mem_freed
3939 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3940 }
3941 if (sp->s2io_entries) {
3815 kfree(sp->s2io_entries); 3942 kfree(sp->s2io_entries);
3943 sp->mac_control.stats_info->sw_stat.mem_freed
3944 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3945 }
3816 } 3946 }
3817 return err; 3947 return err;
3818} 3948}
@@ -3873,6 +4003,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3873 config = &sp->config; 4003 config = &sp->config;
3874 4004
3875 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name); 4005 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4006
4007 if (unlikely(skb->len <= 0)) {
4008 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4009 dev_kfree_skb_any(skb);
4010 return 0;
4011}
4012
3876 spin_lock_irqsave(&sp->tx_lock, flags); 4013 spin_lock_irqsave(&sp->tx_lock, flags);
3877 if (atomic_read(&sp->card_state) == CARD_DOWN) { 4014 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3878 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n", 4015 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
@@ -3883,7 +4020,6 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3883 } 4020 }
3884 4021
3885 queue = 0; 4022 queue = 0;
3886
3887 /* Get Fifo number to Transmit based on vlan priority */ 4023 /* Get Fifo number to Transmit based on vlan priority */
3888 if (sp->vlgrp && vlan_tx_tag_present(skb)) { 4024 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3889 vlan_tag = vlan_tx_tag_get(skb); 4025 vlan_tag = vlan_tx_tag_get(skb);
@@ -3907,14 +4043,6 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3907 return 0; 4043 return 0;
3908 } 4044 }
3909 4045
3910 /* A buffer with no data will be dropped */
3911 if (!skb->len) {
3912 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3913 dev_kfree_skb(skb);
3914 spin_unlock_irqrestore(&sp->tx_lock, flags);
3915 return 0;
3916 }
3917
3918 offload_type = s2io_offload_type(skb); 4046 offload_type = s2io_offload_type(skb);
3919 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 4047 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3920 txdp->Control_1 |= TXD_TCP_LSO_EN; 4048 txdp->Control_1 |= TXD_TCP_LSO_EN;
@@ -4010,7 +4138,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4010 put_off, get_off); 4138 put_off, get_off);
4011 netif_stop_queue(dev); 4139 netif_stop_queue(dev);
4012 } 4140 }
4013 4141 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4014 dev->trans_start = jiffies; 4142 dev->trans_start = jiffies;
4015 spin_unlock_irqrestore(&sp->tx_lock, flags); 4143 spin_unlock_irqrestore(&sp->tx_lock, flags);
4016 4144
@@ -5026,6 +5154,7 @@ static void s2io_vpd_read(struct s2io_nic *nic)
5026 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 5154 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5027 return; 5155 return;
5028 } 5156 }
5157 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5029 5158
5030 for (i = 0; i < 256; i +=4 ) { 5159 for (i = 0; i < 256; i +=4 ) {
5031 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); 5160 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
@@ -5065,6 +5194,7 @@ static void s2io_vpd_read(struct s2io_nic *nic)
5065 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 5194 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5066 } 5195 }
5067 kfree(vpd_data); 5196 kfree(vpd_data);
5197 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5068} 5198}
5069 5199
5070/** 5200/**
@@ -5787,6 +5917,28 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
5787 tmp_stats[i++] = 0; 5917 tmp_stats[i++] = 0;
5788 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt; 5918 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5789 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt; 5919 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5920 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5921 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
5922 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
5923 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
5924 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
5925 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
5926
5927 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
5928 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
5929 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
5930 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
5931 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
5932
5933 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
5934 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
5935 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
5936 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
5937 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
5938 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
5939 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
5940 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
5941 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
5790} 5942}
5791 5943
5792static int s2io_ethtool_get_regs_len(struct net_device *dev) 5944static int s2io_ethtool_get_regs_len(struct net_device *dev)
@@ -6008,7 +6160,7 @@ static void s2io_tasklet(unsigned long dev_addr)
6008 if (ret == -ENOMEM) { 6160 if (ret == -ENOMEM) {
6009 DBG_PRINT(INFO_DBG, "%s: Out of ", 6161 DBG_PRINT(INFO_DBG, "%s: Out of ",
6010 dev->name); 6162 dev->name);
6011 DBG_PRINT(ERR_DBG, "memory in tasklet\n"); 6163 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6012 break; 6164 break;
6013 } else if (ret == -EFILL) { 6165 } else if (ret == -EFILL) {
6014 DBG_PRINT(INFO_DBG, 6166 DBG_PRINT(INFO_DBG,
@@ -6129,6 +6281,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6129 mem_alloc_fail_cnt++; 6281 mem_alloc_fail_cnt++;
6130 return -ENOMEM ; 6282 return -ENOMEM ;
6131 } 6283 }
6284 sp->mac_control.stats_info->sw_stat.mem_allocated
6285 += (*skb)->truesize;
6132 /* storing the mapped addr in a temp variable 6286 /* storing the mapped addr in a temp variable
6133 * such it will be used for next rxd whose 6287 * such it will be used for next rxd whose
6134 * Host Control is NULL 6288 * Host Control is NULL
@@ -6155,6 +6309,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6155 mem_alloc_fail_cnt++; 6309 mem_alloc_fail_cnt++;
6156 return -ENOMEM; 6310 return -ENOMEM;
6157 } 6311 }
6312 sp->mac_control.stats_info->sw_stat.mem_allocated
6313 += (*skb)->truesize;
6158 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 = 6314 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6159 pci_map_single(sp->pdev, (*skb)->data, 6315 pci_map_single(sp->pdev, (*skb)->data,
6160 dev->mtu + 4, 6316 dev->mtu + 4,
@@ -6185,6 +6341,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6185 mem_alloc_fail_cnt++; 6341 mem_alloc_fail_cnt++;
6186 return -ENOMEM; 6342 return -ENOMEM;
6187 } 6343 }
6344 sp->mac_control.stats_info->sw_stat.mem_allocated
6345 += (*skb)->truesize;
6188 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 = 6346 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6189 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, 6347 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6190 PCI_DMA_FROMDEVICE); 6348 PCI_DMA_FROMDEVICE);
@@ -6208,6 +6366,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6208 } 6366 }
6209 frag_list = skb_shinfo(*skb)->frag_list; 6367 frag_list = skb_shinfo(*skb)->frag_list;
6210 frag_list->next = NULL; 6368 frag_list->next = NULL;
6369 sp->mac_control.stats_info->sw_stat.mem_allocated
6370 += frag_list->truesize;
6211 /* 6371 /*
6212 * Buffer-2 receives L4 data payload 6372 * Buffer-2 receives L4 data payload
6213 */ 6373 */
@@ -6664,7 +6824,53 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6664 if (err & 0x1) { 6824 if (err & 0x1) {
6665 sp->mac_control.stats_info->sw_stat.parity_err_cnt++; 6825 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6666 } 6826 }
6827 err >>= 48;
6828 switch(err) {
6829 case 1:
6830 sp->mac_control.stats_info->sw_stat.
6831 rx_parity_err_cnt++;
6832 break;
6833
6834 case 2:
6835 sp->mac_control.stats_info->sw_stat.
6836 rx_abort_cnt++;
6837 break;
6838
6839 case 3:
6840 sp->mac_control.stats_info->sw_stat.
6841 rx_parity_abort_cnt++;
6842 break;
6843
6844 case 4:
6845 sp->mac_control.stats_info->sw_stat.
6846 rx_rda_fail_cnt++;
6847 break;
6848
6849 case 5:
6850 sp->mac_control.stats_info->sw_stat.
6851 rx_unkn_prot_cnt++;
6852 break;
6853
6854 case 6:
6855 sp->mac_control.stats_info->sw_stat.
6856 rx_fcs_err_cnt++;
6857 break;
6667 6858
6859 case 7:
6860 sp->mac_control.stats_info->sw_stat.
6861 rx_buf_size_err_cnt++;
6862 break;
6863
6864 case 8:
6865 sp->mac_control.stats_info->sw_stat.
6866 rx_rxd_corrupt_cnt++;
6867 break;
6868
6869 case 15:
6870 sp->mac_control.stats_info->sw_stat.
6871 rx_unkn_err_cnt++;
6872 break;
6873 }
6668 /* 6874 /*
6669 * Drop the packet if bad transfer code. Exception being 6875 * Drop the packet if bad transfer code. Exception being
6670 * 0x5, which could be due to unsupported IPv6 extension header. 6876 * 0x5, which could be due to unsupported IPv6 extension header.
@@ -6672,10 +6878,12 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6672 * Note that in this case, since checksum will be incorrect, 6878 * Note that in this case, since checksum will be incorrect,
6673 * stack will validate the same. 6879 * stack will validate the same.
6674 */ 6880 */
6675 if (err && ((err >> 48) != 0x5)) { 6881 if (err != 0x5) {
6676 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n", 6882 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6677 dev->name, err); 6883 dev->name, err);
6678 sp->stats.rx_crc_errors++; 6884 sp->stats.rx_crc_errors++;
6885 sp->mac_control.stats_info->sw_stat.mem_freed
6886 += skb->truesize;
6679 dev_kfree_skb(skb); 6887 dev_kfree_skb(skb);
6680 atomic_dec(&sp->rx_bufs_left[ring_no]); 6888 atomic_dec(&sp->rx_bufs_left[ring_no]);
6681 rxdp->Host_Control = 0; 6889 rxdp->Host_Control = 0;
@@ -6685,7 +6893,6 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6685 6893
6686 /* Updating statistics */ 6894 /* Updating statistics */
6687 rxdp->Host_Control = 0; 6895 rxdp->Host_Control = 0;
6688 sp->stats.rx_packets++;
6689 if (sp->rxd_mode == RXD_MODE_1) { 6896 if (sp->rxd_mode == RXD_MODE_1) {
6690 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); 6897 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6691 6898
@@ -6789,7 +6996,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6789 } else { 6996 } else {
6790 skb->ip_summed = CHECKSUM_NONE; 6997 skb->ip_summed = CHECKSUM_NONE;
6791 } 6998 }
6792 6999 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
6793 if (!sp->lro) { 7000 if (!sp->lro) {
6794 skb->protocol = eth_type_trans(skb, dev); 7001 skb->protocol = eth_type_trans(skb, dev);
6795 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) && 7002 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
@@ -6838,12 +7045,21 @@ static void s2io_link(struct s2io_nic * sp, int link)
6838 if (link == LINK_DOWN) { 7045 if (link == LINK_DOWN) {
6839 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); 7046 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6840 netif_carrier_off(dev); 7047 netif_carrier_off(dev);
7048 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7049 sp->mac_control.stats_info->sw_stat.link_up_time =
7050 jiffies - sp->start_time;
7051 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
6841 } else { 7052 } else {
6842 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name); 7053 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7054 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7055 sp->mac_control.stats_info->sw_stat.link_down_time =
7056 jiffies - sp->start_time;
7057 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
6843 netif_carrier_on(dev); 7058 netif_carrier_on(dev);
6844 } 7059 }
6845 } 7060 }
6846 sp->last_link_state = link; 7061 sp->last_link_state = link;
7062 sp->start_time = jiffies;
6847} 7063}
6848 7064
6849/** 7065/**
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 24a3a80a2293..54baa0b8ec7c 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -98,6 +98,29 @@ struct swStat {
98 /* Other statistics */ 98 /* Other statistics */
99 unsigned long long mem_alloc_fail_cnt; 99 unsigned long long mem_alloc_fail_cnt;
100 unsigned long long watchdog_timer_cnt; 100 unsigned long long watchdog_timer_cnt;
101 unsigned long long mem_allocated;
102 unsigned long long mem_freed;
103 unsigned long long link_up_cnt;
104 unsigned long long link_down_cnt;
105 unsigned long long link_up_time;
106 unsigned long long link_down_time;
107
108 /* Transfer Code statistics */
109 unsigned long long tx_buf_abort_cnt;
110 unsigned long long tx_desc_abort_cnt;
111 unsigned long long tx_parity_err_cnt;
112 unsigned long long tx_link_loss_cnt;
113 unsigned long long tx_list_proc_err_cnt;
114
115 unsigned long long rx_parity_err_cnt;
116 unsigned long long rx_abort_cnt;
117 unsigned long long rx_parity_abort_cnt;
118 unsigned long long rx_rda_fail_cnt;
119 unsigned long long rx_unkn_prot_cnt;
120 unsigned long long rx_fcs_err_cnt;
121 unsigned long long rx_buf_size_err_cnt;
122 unsigned long long rx_rxd_corrupt_cnt;
123 unsigned long long rx_unkn_err_cnt;
101}; 124};
102 125
103/* Xpak releated alarm and warnings */ 126/* Xpak releated alarm and warnings */
@@ -827,6 +850,7 @@ struct s2io_nic {
827#define LINK_UP 2 850#define LINK_UP 2
828 851
829 int task_flag; 852 int task_flag;
853 unsigned long long start_time;
830#define CARD_DOWN 1 854#define CARD_DOWN 1
831#define CARD_UP 2 855#define CARD_UP 2
832 atomic_t card_state; 856 atomic_t card_state;