aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/s2io.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/s2io.c')
-rw-r--r--drivers/net/s2io.c753
1 files changed, 403 insertions, 350 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index d303d162974f..9c4935407f26 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -30,6 +30,8 @@
30 * in the driver. 30 * in the driver.
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This 31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8. 32 * is also an array of size 8.
33 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
34 * values are 1, 2 and 3.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 35 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of 36 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO. 37 * Tx descriptors that can be associated with each corresponding FIFO.
@@ -65,12 +67,15 @@
65#include "s2io.h" 67#include "s2io.h"
66#include "s2io-regs.h" 68#include "s2io-regs.h"
67 69
68#define DRV_VERSION "Version 2.0.9.1" 70#define DRV_VERSION "Version 2.0.9.3"
69 71
70/* S2io Driver name & version. */ 72/* S2io Driver name & version. */
71static char s2io_driver_name[] = "Neterion"; 73static char s2io_driver_name[] = "Neterion";
72static char s2io_driver_version[] = DRV_VERSION; 74static char s2io_driver_version[] = DRV_VERSION;
73 75
76int rxd_size[4] = {32,48,48,64};
77int rxd_count[4] = {127,85,85,63};
78
74static inline int RXD_IS_UP2DT(RxD_t *rxdp) 79static inline int RXD_IS_UP2DT(RxD_t *rxdp)
75{ 80{
76 int ret; 81 int ret;
@@ -104,7 +109,7 @@ static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
104 mac_control = &sp->mac_control; 109 mac_control = &sp->mac_control;
105 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) { 110 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
106 level = LOW; 111 level = LOW;
107 if (rxb_size <= MAX_RXDS_PER_BLOCK) { 112 if (rxb_size <= rxd_count[sp->rxd_mode]) {
108 level = PANIC; 113 level = PANIC;
109 } 114 }
110 } 115 }
@@ -296,6 +301,7 @@ static unsigned int rx_ring_sz[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 301 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297static unsigned int rts_frm_len[MAX_RX_RINGS] = 302static unsigned int rts_frm_len[MAX_RX_RINGS] =
298 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 303 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
304static unsigned int rx_ring_mode = 1;
299static unsigned int use_continuous_tx_intrs = 1; 305static unsigned int use_continuous_tx_intrs = 1;
300static unsigned int rmac_pause_time = 65535; 306static unsigned int rmac_pause_time = 65535;
301static unsigned int mc_pause_threshold_q0q3 = 187; 307static unsigned int mc_pause_threshold_q0q3 = 187;
@@ -304,6 +310,7 @@ static unsigned int shared_splits;
304static unsigned int tmac_util_period = 5; 310static unsigned int tmac_util_period = 5;
305static unsigned int rmac_util_period = 5; 311static unsigned int rmac_util_period = 5;
306static unsigned int bimodal = 0; 312static unsigned int bimodal = 0;
313static unsigned int l3l4hdr_size = 128;
307#ifndef CONFIG_S2IO_NAPI 314#ifndef CONFIG_S2IO_NAPI
308static unsigned int indicate_max_pkts; 315static unsigned int indicate_max_pkts;
309#endif 316#endif
@@ -357,10 +364,8 @@ static int init_shared_mem(struct s2io_nic *nic)
357 int i, j, blk_cnt, rx_sz, tx_sz; 364 int i, j, blk_cnt, rx_sz, tx_sz;
358 int lst_size, lst_per_page; 365 int lst_size, lst_per_page;
359 struct net_device *dev = nic->dev; 366 struct net_device *dev = nic->dev;
360#ifdef CONFIG_2BUFF_MODE
361 unsigned long tmp; 367 unsigned long tmp;
362 buffAdd_t *ba; 368 buffAdd_t *ba;
363#endif
364 369
365 mac_info_t *mac_control; 370 mac_info_t *mac_control;
366 struct config_param *config; 371 struct config_param *config;
@@ -458,7 +463,8 @@ static int init_shared_mem(struct s2io_nic *nic)
458 /* Allocation and initialization of RXDs in Rings */ 463 /* Allocation and initialization of RXDs in Rings */
459 size = 0; 464 size = 0;
460 for (i = 0; i < config->rx_ring_num; i++) { 465 for (i = 0; i < config->rx_ring_num; i++) {
461 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) { 466 if (config->rx_cfg[i].num_rxd %
467 (rxd_count[nic->rxd_mode] + 1)) {
462 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name); 468 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
463 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", 469 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
464 i); 470 i);
@@ -467,11 +473,15 @@ static int init_shared_mem(struct s2io_nic *nic)
467 } 473 }
468 size += config->rx_cfg[i].num_rxd; 474 size += config->rx_cfg[i].num_rxd;
469 mac_control->rings[i].block_count = 475 mac_control->rings[i].block_count =
470 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 476 config->rx_cfg[i].num_rxd /
471 mac_control->rings[i].pkt_cnt = 477 (rxd_count[nic->rxd_mode] + 1 );
472 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count; 478 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
479 mac_control->rings[i].block_count;
473 } 480 }
474 size = (size * (sizeof(RxD_t))); 481 if (nic->rxd_mode == RXD_MODE_1)
482 size = (size * (sizeof(RxD1_t)));
483 else
484 size = (size * (sizeof(RxD3_t)));
475 rx_sz = size; 485 rx_sz = size;
476 486
477 for (i = 0; i < config->rx_ring_num; i++) { 487 for (i = 0; i < config->rx_ring_num; i++) {
@@ -486,15 +496,15 @@ static int init_shared_mem(struct s2io_nic *nic)
486 mac_control->rings[i].nic = nic; 496 mac_control->rings[i].nic = nic;
487 mac_control->rings[i].ring_no = i; 497 mac_control->rings[i].ring_no = i;
488 498
489 blk_cnt = 499 blk_cnt = config->rx_cfg[i].num_rxd /
490 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 500 (rxd_count[nic->rxd_mode] + 1);
491 /* Allocating all the Rx blocks */ 501 /* Allocating all the Rx blocks */
492 for (j = 0; j < blk_cnt; j++) { 502 for (j = 0; j < blk_cnt; j++) {
493#ifndef CONFIG_2BUFF_MODE 503 rx_block_info_t *rx_blocks;
494 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t)); 504 int l;
495#else 505
496 size = SIZE_OF_BLOCK; 506 rx_blocks = &mac_control->rings[i].rx_blocks[j];
497#endif 507 size = SIZE_OF_BLOCK; //size is always page size
498 tmp_v_addr = pci_alloc_consistent(nic->pdev, size, 508 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
499 &tmp_p_addr); 509 &tmp_p_addr);
500 if (tmp_v_addr == NULL) { 510 if (tmp_v_addr == NULL) {
@@ -504,11 +514,24 @@ static int init_shared_mem(struct s2io_nic *nic)
504 * memory that was alloced till the 514 * memory that was alloced till the
505 * failure happened. 515 * failure happened.
506 */ 516 */
507 mac_control->rings[i].rx_blocks[j].block_virt_addr = 517 rx_blocks->block_virt_addr = tmp_v_addr;
508 tmp_v_addr;
509 return -ENOMEM; 518 return -ENOMEM;
510 } 519 }
511 memset(tmp_v_addr, 0, size); 520 memset(tmp_v_addr, 0, size);
521 rx_blocks->block_virt_addr = tmp_v_addr;
522 rx_blocks->block_dma_addr = tmp_p_addr;
523 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
524 rxd_count[nic->rxd_mode],
525 GFP_KERNEL);
526 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
527 rx_blocks->rxds[l].virt_addr =
528 rx_blocks->block_virt_addr +
529 (rxd_size[nic->rxd_mode] * l);
530 rx_blocks->rxds[l].dma_addr =
531 rx_blocks->block_dma_addr +
532 (rxd_size[nic->rxd_mode] * l);
533 }
534
512 mac_control->rings[i].rx_blocks[j].block_virt_addr = 535 mac_control->rings[i].rx_blocks[j].block_virt_addr =
513 tmp_v_addr; 536 tmp_v_addr;
514 mac_control->rings[i].rx_blocks[j].block_dma_addr = 537 mac_control->rings[i].rx_blocks[j].block_dma_addr =
@@ -528,62 +551,58 @@ static int init_shared_mem(struct s2io_nic *nic)
528 blk_cnt].block_dma_addr; 551 blk_cnt].block_dma_addr;
529 552
530 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 553 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
531 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
532 * marker.
533 */
534#ifndef CONFIG_2BUFF_MODE
535 pre_rxd_blk->reserved_2_pNext_RxD_block = 554 pre_rxd_blk->reserved_2_pNext_RxD_block =
536 (unsigned long) tmp_v_addr_next; 555 (unsigned long) tmp_v_addr_next;
537#endif
538 pre_rxd_blk->pNext_RxD_Blk_physical = 556 pre_rxd_blk->pNext_RxD_Blk_physical =
539 (u64) tmp_p_addr_next; 557 (u64) tmp_p_addr_next;
540 } 558 }
541 } 559 }
542 560 if (nic->rxd_mode >= RXD_MODE_3A) {
543#ifdef CONFIG_2BUFF_MODE 561 /*
544 /* 562 * Allocation of Storages for buffer addresses in 2BUFF mode
545 * Allocation of Storages for buffer addresses in 2BUFF mode 563 * and the buffers as well.
546 * and the buffers as well. 564 */
547 */ 565 for (i = 0; i < config->rx_ring_num; i++) {
548 for (i = 0; i < config->rx_ring_num; i++) { 566 blk_cnt = config->rx_cfg[i].num_rxd /
549 blk_cnt = 567 (rxd_count[nic->rxd_mode]+ 1);
550 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 568 mac_control->rings[i].ba =
551 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt), 569 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
552 GFP_KERNEL); 570 GFP_KERNEL);
553 if (!mac_control->rings[i].ba) 571 if (!mac_control->rings[i].ba)
554 return -ENOMEM;
555 for (j = 0; j < blk_cnt; j++) {
556 int k = 0;
557 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
558 (MAX_RXDS_PER_BLOCK + 1)),
559 GFP_KERNEL);
560 if (!mac_control->rings[i].ba[j])
561 return -ENOMEM; 572 return -ENOMEM;
562 while (k != MAX_RXDS_PER_BLOCK) { 573 for (j = 0; j < blk_cnt; j++) {
563 ba = &mac_control->rings[i].ba[j][k]; 574 int k = 0;
564 575 mac_control->rings[i].ba[j] =
565 ba->ba_0_org = (void *) kmalloc 576 kmalloc((sizeof(buffAdd_t) *
566 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 577 (rxd_count[nic->rxd_mode] + 1)),
567 if (!ba->ba_0_org) 578 GFP_KERNEL);
568 return -ENOMEM; 579 if (!mac_control->rings[i].ba[j])
569 tmp = (unsigned long) ba->ba_0_org;
570 tmp += ALIGN_SIZE;
571 tmp &= ~((unsigned long) ALIGN_SIZE);
572 ba->ba_0 = (void *) tmp;
573
574 ba->ba_1_org = (void *) kmalloc
575 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
576 if (!ba->ba_1_org)
577 return -ENOMEM; 580 return -ENOMEM;
578 tmp = (unsigned long) ba->ba_1_org; 581 while (k != rxd_count[nic->rxd_mode]) {
579 tmp += ALIGN_SIZE; 582 ba = &mac_control->rings[i].ba[j][k];
580 tmp &= ~((unsigned long) ALIGN_SIZE); 583
581 ba->ba_1 = (void *) tmp; 584 ba->ba_0_org = (void *) kmalloc
582 k++; 585 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
586 if (!ba->ba_0_org)
587 return -ENOMEM;
588 tmp = (unsigned long)ba->ba_0_org;
589 tmp += ALIGN_SIZE;
590 tmp &= ~((unsigned long) ALIGN_SIZE);
591 ba->ba_0 = (void *) tmp;
592
593 ba->ba_1_org = (void *) kmalloc
594 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
595 if (!ba->ba_1_org)
596 return -ENOMEM;
597 tmp = (unsigned long) ba->ba_1_org;
598 tmp += ALIGN_SIZE;
599 tmp &= ~((unsigned long) ALIGN_SIZE);
600 ba->ba_1 = (void *) tmp;
601 k++;
602 }
583 } 603 }
584 } 604 }
585 } 605 }
586#endif
587 606
588 /* Allocation and initialization of Statistics block */ 607 /* Allocation and initialization of Statistics block */
589 size = sizeof(StatInfo_t); 608 size = sizeof(StatInfo_t);
@@ -669,11 +688,7 @@ static void free_shared_mem(struct s2io_nic *nic)
669 kfree(mac_control->fifos[i].list_info); 688 kfree(mac_control->fifos[i].list_info);
670 } 689 }
671 690
672#ifndef CONFIG_2BUFF_MODE
673 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
674#else
675 size = SIZE_OF_BLOCK; 691 size = SIZE_OF_BLOCK;
676#endif
677 for (i = 0; i < config->rx_ring_num; i++) { 692 for (i = 0; i < config->rx_ring_num; i++) {
678 blk_cnt = mac_control->rings[i].block_count; 693 blk_cnt = mac_control->rings[i].block_count;
679 for (j = 0; j < blk_cnt; j++) { 694 for (j = 0; j < blk_cnt; j++) {
@@ -685,30 +700,31 @@ static void free_shared_mem(struct s2io_nic *nic)
685 break; 700 break;
686 pci_free_consistent(nic->pdev, size, 701 pci_free_consistent(nic->pdev, size,
687 tmp_v_addr, tmp_p_addr); 702 tmp_v_addr, tmp_p_addr);
703 kfree(mac_control->rings[i].rx_blocks[j].rxds);
688 } 704 }
689 } 705 }
690 706
691#ifdef CONFIG_2BUFF_MODE 707 if (nic->rxd_mode >= RXD_MODE_3A) {
692 /* Freeing buffer storage addresses in 2BUFF mode. */ 708 /* Freeing buffer storage addresses in 2BUFF mode. */
693 for (i = 0; i < config->rx_ring_num; i++) { 709 for (i = 0; i < config->rx_ring_num; i++) {
694 blk_cnt = 710 blk_cnt = config->rx_cfg[i].num_rxd /
695 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 711 (rxd_count[nic->rxd_mode] + 1);
696 for (j = 0; j < blk_cnt; j++) { 712 for (j = 0; j < blk_cnt; j++) {
697 int k = 0; 713 int k = 0;
698 if (!mac_control->rings[i].ba[j]) 714 if (!mac_control->rings[i].ba[j])
699 continue; 715 continue;
700 while (k != MAX_RXDS_PER_BLOCK) { 716 while (k != rxd_count[nic->rxd_mode]) {
701 buffAdd_t *ba = &mac_control->rings[i].ba[j][k]; 717 buffAdd_t *ba =
702 kfree(ba->ba_0_org); 718 &mac_control->rings[i].ba[j][k];
703 kfree(ba->ba_1_org); 719 kfree(ba->ba_0_org);
704 k++; 720 kfree(ba->ba_1_org);
721 k++;
722 }
723 kfree(mac_control->rings[i].ba[j]);
705 } 724 }
706 kfree(mac_control->rings[i].ba[j]);
707 }
708 if (mac_control->rings[i].ba)
709 kfree(mac_control->rings[i].ba); 725 kfree(mac_control->rings[i].ba);
726 }
710 } 727 }
711#endif
712 728
713 if (mac_control->stats_mem) { 729 if (mac_control->stats_mem) {
714 pci_free_consistent(nic->pdev, 730 pci_free_consistent(nic->pdev,
@@ -1895,20 +1911,19 @@ static int start_nic(struct s2io_nic *nic)
1895 val64 = readq(&bar0->prc_ctrl_n[i]); 1911 val64 = readq(&bar0->prc_ctrl_n[i]);
1896 if (nic->config.bimodal) 1912 if (nic->config.bimodal)
1897 val64 |= PRC_CTRL_BIMODAL_INTERRUPT; 1913 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1898#ifndef CONFIG_2BUFF_MODE 1914 if (nic->rxd_mode == RXD_MODE_1)
1899 val64 |= PRC_CTRL_RC_ENABLED; 1915 val64 |= PRC_CTRL_RC_ENABLED;
1900#else 1916 else
1901 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3; 1917 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1902#endif
1903 writeq(val64, &bar0->prc_ctrl_n[i]); 1918 writeq(val64, &bar0->prc_ctrl_n[i]);
1904 } 1919 }
1905 1920
1906#ifdef CONFIG_2BUFF_MODE 1921 if (nic->rxd_mode == RXD_MODE_3B) {
1907 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */ 1922 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1908 val64 = readq(&bar0->rx_pa_cfg); 1923 val64 = readq(&bar0->rx_pa_cfg);
1909 val64 |= RX_PA_CFG_IGNORE_L2_ERR; 1924 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1910 writeq(val64, &bar0->rx_pa_cfg); 1925 writeq(val64, &bar0->rx_pa_cfg);
1911#endif 1926 }
1912 1927
1913 /* 1928 /*
1914 * Enabling MC-RLDRAM. After enabling the device, we timeout 1929 * Enabling MC-RLDRAM. After enabling the device, we timeout
@@ -2091,6 +2106,41 @@ static void stop_nic(struct s2io_nic *nic)
2091 } 2106 }
2092} 2107}
2093 2108
2109int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2110{
2111 struct net_device *dev = nic->dev;
2112 struct sk_buff *frag_list;
2113 u64 tmp;
2114
2115 /* Buffer-1 receives L3/L4 headers */
2116 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2117 (nic->pdev, skb->data, l3l4hdr_size + 4,
2118 PCI_DMA_FROMDEVICE);
2119
2120 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2121 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2122 if (skb_shinfo(skb)->frag_list == NULL) {
2123 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2124 return -ENOMEM ;
2125 }
2126 frag_list = skb_shinfo(skb)->frag_list;
2127 frag_list->next = NULL;
2128 tmp = (u64) frag_list->data;
2129 tmp += ALIGN_SIZE;
2130 tmp &= ~ALIGN_SIZE;
2131 frag_list->data = (void *) tmp;
2132 frag_list->tail = (void *) tmp;
2133
2134 /* Buffer-2 receives L4 data payload */
2135 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2136 frag_list->data, dev->mtu,
2137 PCI_DMA_FROMDEVICE);
2138 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2139 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2140
2141 return SUCCESS;
2142}
2143
2094/** 2144/**
2095 * fill_rx_buffers - Allocates the Rx side skbs 2145 * fill_rx_buffers - Allocates the Rx side skbs
2096 * @nic: device private variable 2146 * @nic: device private variable
@@ -2118,18 +2168,12 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2118 struct sk_buff *skb; 2168 struct sk_buff *skb;
2119 RxD_t *rxdp; 2169 RxD_t *rxdp;
2120 int off, off1, size, block_no, block_no1; 2170 int off, off1, size, block_no, block_no1;
2121 int offset, offset1;
2122 u32 alloc_tab = 0; 2171 u32 alloc_tab = 0;
2123 u32 alloc_cnt; 2172 u32 alloc_cnt;
2124 mac_info_t *mac_control; 2173 mac_info_t *mac_control;
2125 struct config_param *config; 2174 struct config_param *config;
2126#ifdef CONFIG_2BUFF_MODE
2127 RxD_t *rxdpnext;
2128 int nextblk;
2129 u64 tmp; 2175 u64 tmp;
2130 buffAdd_t *ba; 2176 buffAdd_t *ba;
2131 dma_addr_t rxdpphys;
2132#endif
2133#ifndef CONFIG_S2IO_NAPI 2177#ifndef CONFIG_S2IO_NAPI
2134 unsigned long flags; 2178 unsigned long flags;
2135#endif 2179#endif
@@ -2139,8 +2183,6 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2139 config = &nic->config; 2183 config = &nic->config;
2140 alloc_cnt = mac_control->rings[ring_no].pkt_cnt - 2184 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2141 atomic_read(&nic->rx_bufs_left[ring_no]); 2185 atomic_read(&nic->rx_bufs_left[ring_no]);
2142 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2143 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2144 2186
2145 while (alloc_tab < alloc_cnt) { 2187 while (alloc_tab < alloc_cnt) {
2146 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2188 block_no = mac_control->rings[ring_no].rx_curr_put_info.
@@ -2149,159 +2191,145 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2149 block_index; 2191 block_index;
2150 off = mac_control->rings[ring_no].rx_curr_put_info.offset; 2192 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2151 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; 2193 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2152#ifndef CONFIG_2BUFF_MODE
2153 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2154 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2155#else
2156 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2157 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2158#endif
2159 2194
2160 rxdp = mac_control->rings[ring_no].rx_blocks[block_no]. 2195 rxdp = mac_control->rings[ring_no].
2161 block_virt_addr + off; 2196 rx_blocks[block_no].rxds[off].virt_addr;
2162 if ((offset == offset1) && (rxdp->Host_Control)) { 2197
2163 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name); 2198 if ((block_no == block_no1) && (off == off1) &&
2199 (rxdp->Host_Control)) {
2200 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2201 dev->name);
2164 DBG_PRINT(INTR_DBG, " info equated\n"); 2202 DBG_PRINT(INTR_DBG, " info equated\n");
2165 goto end; 2203 goto end;
2166 } 2204 }
2167#ifndef CONFIG_2BUFF_MODE 2205 if (off && (off == rxd_count[nic->rxd_mode])) {
2168 if (rxdp->Control_1 == END_OF_BLOCK) {
2169 mac_control->rings[ring_no].rx_curr_put_info. 2206 mac_control->rings[ring_no].rx_curr_put_info.
2170 block_index++; 2207 block_index++;
2208 if (mac_control->rings[ring_no].rx_curr_put_info.
2209 block_index == mac_control->rings[ring_no].
2210 block_count)
2211 mac_control->rings[ring_no].rx_curr_put_info.
2212 block_index = 0;
2213 block_no = mac_control->rings[ring_no].
2214 rx_curr_put_info.block_index;
2215 if (off == rxd_count[nic->rxd_mode])
2216 off = 0;
2171 mac_control->rings[ring_no].rx_curr_put_info. 2217 mac_control->rings[ring_no].rx_curr_put_info.
2172 block_index %= mac_control->rings[ring_no].block_count; 2218 offset = off;
2173 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2219 rxdp = mac_control->rings[ring_no].
2174 block_index; 2220 rx_blocks[block_no].block_virt_addr;
2175 off++;
2176 off %= (MAX_RXDS_PER_BLOCK + 1);
2177 mac_control->rings[ring_no].rx_curr_put_info.offset =
2178 off;
2179 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2180 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2221 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2181 dev->name, rxdp); 2222 dev->name, rxdp);
2182 } 2223 }
2183#ifndef CONFIG_S2IO_NAPI 2224#ifndef CONFIG_S2IO_NAPI
2184 spin_lock_irqsave(&nic->put_lock, flags); 2225 spin_lock_irqsave(&nic->put_lock, flags);
2185 mac_control->rings[ring_no].put_pos = 2226 mac_control->rings[ring_no].put_pos =
2186 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off; 2227 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2187 spin_unlock_irqrestore(&nic->put_lock, flags); 2228 spin_unlock_irqrestore(&nic->put_lock, flags);
2188#endif 2229#endif
2189#else 2230 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2190 if (rxdp->Host_Control == END_OF_BLOCK) { 2231 ((nic->rxd_mode >= RXD_MODE_3A) &&
2191 mac_control->rings[ring_no].rx_curr_put_info. 2232 (rxdp->Control_2 & BIT(0)))) {
2192 block_index++;
2193 mac_control->rings[ring_no].rx_curr_put_info.block_index
2194 %= mac_control->rings[ring_no].block_count;
2195 block_no = mac_control->rings[ring_no].rx_curr_put_info
2196 .block_index;
2197 off = 0;
2198 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2199 dev->name, block_no,
2200 (unsigned long long) rxdp->Control_1);
2201 mac_control->rings[ring_no].rx_curr_put_info.offset =
2202 off;
2203 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2204 block_virt_addr;
2205 }
2206#ifndef CONFIG_S2IO_NAPI
2207 spin_lock_irqsave(&nic->put_lock, flags);
2208 mac_control->rings[ring_no].put_pos = (block_no *
2209 (MAX_RXDS_PER_BLOCK + 1)) + off;
2210 spin_unlock_irqrestore(&nic->put_lock, flags);
2211#endif
2212#endif
2213
2214#ifndef CONFIG_2BUFF_MODE
2215 if (rxdp->Control_1 & RXD_OWN_XENA)
2216#else
2217 if (rxdp->Control_2 & BIT(0))
2218#endif
2219 {
2220 mac_control->rings[ring_no].rx_curr_put_info. 2233 mac_control->rings[ring_no].rx_curr_put_info.
2221 offset = off; 2234 offset = off;
2222 goto end; 2235 goto end;
2223 } 2236 }
2224#ifdef CONFIG_2BUFF_MODE 2237 /* calculate size of skb based on ring mode */
2225 /* 2238 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2226 * RxDs Spanning cache lines will be replenished only 2239 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2227 * if the succeeding RxD is also owned by Host. It 2240 if (nic->rxd_mode == RXD_MODE_1)
2228 * will always be the ((8*i)+3) and ((8*i)+6) 2241 size += NET_IP_ALIGN;
2229 * descriptors for the 48 byte descriptor. The offending 2242 else if (nic->rxd_mode == RXD_MODE_3B)
2230 * decsriptor is of-course the 3rd descriptor. 2243 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2231 */ 2244 else
2232 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no]. 2245 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2233 block_dma_addr + (off * sizeof(RxD_t));
2234 if (((u64) (rxdpphys)) % 128 > 80) {
2235 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2236 block_virt_addr + (off + 1);
2237 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2238 nextblk = (block_no + 1) %
2239 (mac_control->rings[ring_no].block_count);
2240 rxdpnext = mac_control->rings[ring_no].rx_blocks
2241 [nextblk].block_virt_addr;
2242 }
2243 if (rxdpnext->Control_2 & BIT(0))
2244 goto end;
2245 }
2246#endif
2247 2246
2248#ifndef CONFIG_2BUFF_MODE 2247 /* allocate skb */
2249 skb = dev_alloc_skb(size + NET_IP_ALIGN); 2248 skb = dev_alloc_skb(size);
2250#else 2249 if(!skb) {
2251 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2252#endif
2253 if (!skb) {
2254 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); 2250 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2255 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); 2251 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2256 if (first_rxdp) { 2252 if (first_rxdp) {
2257 wmb(); 2253 wmb();
2258 first_rxdp->Control_1 |= RXD_OWN_XENA; 2254 first_rxdp->Control_1 |= RXD_OWN_XENA;
2259 } 2255 }
2260 return -ENOMEM; 2256 return -ENOMEM ;
2257 }
2258 if (nic->rxd_mode == RXD_MODE_1) {
2259 /* 1 buffer mode - normal operation mode */
2260 memset(rxdp, 0, sizeof(RxD1_t));
2261 skb_reserve(skb, NET_IP_ALIGN);
2262 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2263 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2264 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1);
2265 rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size);
2266
2267 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2268 /*
2269 * 2 or 3 buffer mode -
2270 * Both 2 buffer mode and 3 buffer mode provides 128
2271 * byte aligned receive buffers.
2272 *
2273 * 3 buffer mode provides header separation where in
2274 * skb->data will have L3/L4 headers where as
2275 * skb_shinfo(skb)->frag_list will have the L4 data
2276 * payload
2277 */
2278
2279 memset(rxdp, 0, sizeof(RxD3_t));
2280 ba = &mac_control->rings[ring_no].ba[block_no][off];
2281 skb_reserve(skb, BUF0_LEN);
2282 tmp = (u64)(unsigned long) skb->data;
2283 tmp += ALIGN_SIZE;
2284 tmp &= ~ALIGN_SIZE;
2285 skb->data = (void *) (unsigned long)tmp;
2286 skb->tail = (void *) (unsigned long)tmp;
2287
2288 ((RxD3_t*)rxdp)->Buffer0_ptr =
2289 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2290 PCI_DMA_FROMDEVICE);
2291 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2292 if (nic->rxd_mode == RXD_MODE_3B) {
2293 /* Two buffer mode */
2294
2295 /*
2296 * Buffer2 will have L3/L4 header plus
2297 * L4 payload
2298 */
2299 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2300 (nic->pdev, skb->data, dev->mtu + 4,
2301 PCI_DMA_FROMDEVICE);
2302
2303 /* Buffer-1 will be dummy buffer not used */
2304 ((RxD3_t*)rxdp)->Buffer1_ptr =
2305 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2306 PCI_DMA_FROMDEVICE);
2307 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2308 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2309 (dev->mtu + 4);
2310 } else {
2311 /* 3 buffer mode */
2312 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2313 dev_kfree_skb_irq(skb);
2314 if (first_rxdp) {
2315 wmb();
2316 first_rxdp->Control_1 |=
2317 RXD_OWN_XENA;
2318 }
2319 return -ENOMEM ;
2320 }
2321 }
2322 rxdp->Control_2 |= BIT(0);
2261 } 2323 }
2262#ifndef CONFIG_2BUFF_MODE
2263 skb_reserve(skb, NET_IP_ALIGN);
2264 memset(rxdp, 0, sizeof(RxD_t));
2265 rxdp->Buffer0_ptr = pci_map_single
2266 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2267 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2268 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2269 rxdp->Host_Control = (unsigned long) (skb); 2324 rxdp->Host_Control = (unsigned long) (skb);
2270 if (alloc_tab & ((1 << rxsync_frequency) - 1)) 2325 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2271 rxdp->Control_1 |= RXD_OWN_XENA; 2326 rxdp->Control_1 |= RXD_OWN_XENA;
2272 off++; 2327 off++;
2273 off %= (MAX_RXDS_PER_BLOCK + 1); 2328 if (off == (rxd_count[nic->rxd_mode] + 1))
2274 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2329 off = 0;
2275#else
2276 ba = &mac_control->rings[ring_no].ba[block_no][off];
2277 skb_reserve(skb, BUF0_LEN);
2278 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2279 if (tmp)
2280 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2281
2282 memset(rxdp, 0, sizeof(RxD_t));
2283 rxdp->Buffer2_ptr = pci_map_single
2284 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2285 PCI_DMA_FROMDEVICE);
2286 rxdp->Buffer0_ptr =
2287 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2288 PCI_DMA_FROMDEVICE);
2289 rxdp->Buffer1_ptr =
2290 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2291 PCI_DMA_FROMDEVICE);
2292
2293 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2294 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2295 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2296 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2297 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2298 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2299 rxdp->Control_1 |= RXD_OWN_XENA;
2300 off++;
2301 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2330 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2302#endif
2303 rxdp->Control_2 |= SET_RXD_MARKER;
2304 2331
2332 rxdp->Control_2 |= SET_RXD_MARKER;
2305 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { 2333 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2306 if (first_rxdp) { 2334 if (first_rxdp) {
2307 wmb(); 2335 wmb();
@@ -2326,6 +2354,67 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2326 return SUCCESS; 2354 return SUCCESS;
2327} 2355}
2328 2356
2357static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2358{
2359 struct net_device *dev = sp->dev;
2360 int j;
2361 struct sk_buff *skb;
2362 RxD_t *rxdp;
2363 mac_info_t *mac_control;
2364 buffAdd_t *ba;
2365
2366 mac_control = &sp->mac_control;
2367 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2368 rxdp = mac_control->rings[ring_no].
2369 rx_blocks[blk].rxds[j].virt_addr;
2370 skb = (struct sk_buff *)
2371 ((unsigned long) rxdp->Host_Control);
2372 if (!skb) {
2373 continue;
2374 }
2375 if (sp->rxd_mode == RXD_MODE_1) {
2376 pci_unmap_single(sp->pdev, (dma_addr_t)
2377 ((RxD1_t*)rxdp)->Buffer0_ptr,
2378 dev->mtu +
2379 HEADER_ETHERNET_II_802_3_SIZE
2380 + HEADER_802_2_SIZE +
2381 HEADER_SNAP_SIZE,
2382 PCI_DMA_FROMDEVICE);
2383 memset(rxdp, 0, sizeof(RxD1_t));
2384 } else if(sp->rxd_mode == RXD_MODE_3B) {
2385 ba = &mac_control->rings[ring_no].
2386 ba[blk][j];
2387 pci_unmap_single(sp->pdev, (dma_addr_t)
2388 ((RxD3_t*)rxdp)->Buffer0_ptr,
2389 BUF0_LEN,
2390 PCI_DMA_FROMDEVICE);
2391 pci_unmap_single(sp->pdev, (dma_addr_t)
2392 ((RxD3_t*)rxdp)->Buffer1_ptr,
2393 BUF1_LEN,
2394 PCI_DMA_FROMDEVICE);
2395 pci_unmap_single(sp->pdev, (dma_addr_t)
2396 ((RxD3_t*)rxdp)->Buffer2_ptr,
2397 dev->mtu + 4,
2398 PCI_DMA_FROMDEVICE);
2399 memset(rxdp, 0, sizeof(RxD3_t));
2400 } else {
2401 pci_unmap_single(sp->pdev, (dma_addr_t)
2402 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2403 PCI_DMA_FROMDEVICE);
2404 pci_unmap_single(sp->pdev, (dma_addr_t)
2405 ((RxD3_t*)rxdp)->Buffer1_ptr,
2406 l3l4hdr_size + 4,
2407 PCI_DMA_FROMDEVICE);
2408 pci_unmap_single(sp->pdev, (dma_addr_t)
2409 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2410 PCI_DMA_FROMDEVICE);
2411 memset(rxdp, 0, sizeof(RxD3_t));
2412 }
2413 dev_kfree_skb(skb);
2414 atomic_dec(&sp->rx_bufs_left[ring_no]);
2415 }
2416}
2417
2329/** 2418/**
2330 * free_rx_buffers - Frees all Rx buffers 2419 * free_rx_buffers - Frees all Rx buffers
2331 * @sp: device private variable. 2420 * @sp: device private variable.
@@ -2338,77 +2427,17 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2338static void free_rx_buffers(struct s2io_nic *sp) 2427static void free_rx_buffers(struct s2io_nic *sp)
2339{ 2428{
2340 struct net_device *dev = sp->dev; 2429 struct net_device *dev = sp->dev;
2341 int i, j, blk = 0, off, buf_cnt = 0; 2430 int i, blk = 0, buf_cnt = 0;
2342 RxD_t *rxdp;
2343 struct sk_buff *skb;
2344 mac_info_t *mac_control; 2431 mac_info_t *mac_control;
2345 struct config_param *config; 2432 struct config_param *config;
2346#ifdef CONFIG_2BUFF_MODE
2347 buffAdd_t *ba;
2348#endif
2349 2433
2350 mac_control = &sp->mac_control; 2434 mac_control = &sp->mac_control;
2351 config = &sp->config; 2435 config = &sp->config;
2352 2436
2353 for (i = 0; i < config->rx_ring_num; i++) { 2437 for (i = 0; i < config->rx_ring_num; i++) {
2354 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) { 2438 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2355 off = j % (MAX_RXDS_PER_BLOCK + 1); 2439 free_rxd_blk(sp,i,blk);
2356 rxdp = mac_control->rings[i].rx_blocks[blk].
2357 block_virt_addr + off;
2358
2359#ifndef CONFIG_2BUFF_MODE
2360 if (rxdp->Control_1 == END_OF_BLOCK) {
2361 rxdp =
2362 (RxD_t *) ((unsigned long) rxdp->
2363 Control_2);
2364 j++;
2365 blk++;
2366 }
2367#else
2368 if (rxdp->Host_Control == END_OF_BLOCK) {
2369 blk++;
2370 continue;
2371 }
2372#endif
2373
2374 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2375 memset(rxdp, 0, sizeof(RxD_t));
2376 continue;
2377 }
2378 2440
2379 skb =
2380 (struct sk_buff *) ((unsigned long) rxdp->
2381 Host_Control);
2382 if (skb) {
2383#ifndef CONFIG_2BUFF_MODE
2384 pci_unmap_single(sp->pdev, (dma_addr_t)
2385 rxdp->Buffer0_ptr,
2386 dev->mtu +
2387 HEADER_ETHERNET_II_802_3_SIZE
2388 + HEADER_802_2_SIZE +
2389 HEADER_SNAP_SIZE,
2390 PCI_DMA_FROMDEVICE);
2391#else
2392 ba = &mac_control->rings[i].ba[blk][off];
2393 pci_unmap_single(sp->pdev, (dma_addr_t)
2394 rxdp->Buffer0_ptr,
2395 BUF0_LEN,
2396 PCI_DMA_FROMDEVICE);
2397 pci_unmap_single(sp->pdev, (dma_addr_t)
2398 rxdp->Buffer1_ptr,
2399 BUF1_LEN,
2400 PCI_DMA_FROMDEVICE);
2401 pci_unmap_single(sp->pdev, (dma_addr_t)
2402 rxdp->Buffer2_ptr,
2403 dev->mtu + BUF0_LEN + 4,
2404 PCI_DMA_FROMDEVICE);
2405#endif
2406 dev_kfree_skb(skb);
2407 atomic_dec(&sp->rx_bufs_left[i]);
2408 buf_cnt++;
2409 }
2410 memset(rxdp, 0, sizeof(RxD_t));
2411 }
2412 mac_control->rings[i].rx_curr_put_info.block_index = 0; 2441 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2413 mac_control->rings[i].rx_curr_get_info.block_index = 0; 2442 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2414 mac_control->rings[i].rx_curr_put_info.offset = 0; 2443 mac_control->rings[i].rx_curr_put_info.offset = 0;
@@ -2514,7 +2543,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2514{ 2543{
2515 nic_t *nic = ring_data->nic; 2544 nic_t *nic = ring_data->nic;
2516 struct net_device *dev = (struct net_device *) nic->dev; 2545 struct net_device *dev = (struct net_device *) nic->dev;
2517 int get_block, get_offset, put_block, put_offset, ring_bufs; 2546 int get_block, put_block, put_offset;
2518 rx_curr_get_info_t get_info, put_info; 2547 rx_curr_get_info_t get_info, put_info;
2519 RxD_t *rxdp; 2548 RxD_t *rxdp;
2520 struct sk_buff *skb; 2549 struct sk_buff *skb;
@@ -2533,21 +2562,22 @@ static void rx_intr_handler(ring_info_t *ring_data)
2533 get_block = get_info.block_index; 2562 get_block = get_info.block_index;
2534 put_info = ring_data->rx_curr_put_info; 2563 put_info = ring_data->rx_curr_put_info;
2535 put_block = put_info.block_index; 2564 put_block = put_info.block_index;
2536 ring_bufs = get_info.ring_len+1; 2565 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2537 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2538 get_info.offset;
2539 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2540 get_info.offset;
2541#ifndef CONFIG_S2IO_NAPI 2566#ifndef CONFIG_S2IO_NAPI
2542 spin_lock(&nic->put_lock); 2567 spin_lock(&nic->put_lock);
2543 put_offset = ring_data->put_pos; 2568 put_offset = ring_data->put_pos;
2544 spin_unlock(&nic->put_lock); 2569 spin_unlock(&nic->put_lock);
2545#else 2570#else
2546 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) + 2571 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2547 put_info.offset; 2572 put_info.offset;
2548#endif 2573#endif
2549 while (RXD_IS_UP2DT(rxdp) && 2574 while (RXD_IS_UP2DT(rxdp)) {
2550 (((get_offset + 1) % ring_bufs) != put_offset)) { 2575 /* If your are next to put index then it's FIFO full condition */
2576 if ((get_block == put_block) &&
2577 (get_info.offset + 1) == put_info.offset) {
2578 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2579 break;
2580 }
2551 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2581 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2552 if (skb == NULL) { 2582 if (skb == NULL) {
2553 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2583 DBG_PRINT(ERR_DBG, "%s: The skb is ",
@@ -2556,46 +2586,52 @@ static void rx_intr_handler(ring_info_t *ring_data)
2556 spin_unlock(&nic->rx_lock); 2586 spin_unlock(&nic->rx_lock);
2557 return; 2587 return;
2558 } 2588 }
2559#ifndef CONFIG_2BUFF_MODE 2589 if (nic->rxd_mode == RXD_MODE_1) {
2560 pci_unmap_single(nic->pdev, (dma_addr_t) 2590 pci_unmap_single(nic->pdev, (dma_addr_t)
2561 rxdp->Buffer0_ptr, 2591 ((RxD1_t*)rxdp)->Buffer0_ptr,
2562 dev->mtu + 2592 dev->mtu +
2563 HEADER_ETHERNET_II_802_3_SIZE + 2593 HEADER_ETHERNET_II_802_3_SIZE +
2564 HEADER_802_2_SIZE + 2594 HEADER_802_2_SIZE +
2565 HEADER_SNAP_SIZE, 2595 HEADER_SNAP_SIZE,
2566 PCI_DMA_FROMDEVICE); 2596 PCI_DMA_FROMDEVICE);
2567#else 2597 } else if (nic->rxd_mode == RXD_MODE_3B) {
2568 pci_unmap_single(nic->pdev, (dma_addr_t) 2598 pci_unmap_single(nic->pdev, (dma_addr_t)
2569 rxdp->Buffer0_ptr, 2599 ((RxD3_t*)rxdp)->Buffer0_ptr,
2570 BUF0_LEN, PCI_DMA_FROMDEVICE); 2600 BUF0_LEN, PCI_DMA_FROMDEVICE);
2571 pci_unmap_single(nic->pdev, (dma_addr_t) 2601 pci_unmap_single(nic->pdev, (dma_addr_t)
2572 rxdp->Buffer1_ptr, 2602 ((RxD3_t*)rxdp)->Buffer1_ptr,
2573 BUF1_LEN, PCI_DMA_FROMDEVICE); 2603 BUF1_LEN, PCI_DMA_FROMDEVICE);
2574 pci_unmap_single(nic->pdev, (dma_addr_t) 2604 pci_unmap_single(nic->pdev, (dma_addr_t)
2575 rxdp->Buffer2_ptr, 2605 ((RxD3_t*)rxdp)->Buffer2_ptr,
2576 dev->mtu + BUF0_LEN + 4, 2606 dev->mtu + 4,
2577 PCI_DMA_FROMDEVICE); 2607 PCI_DMA_FROMDEVICE);
2578#endif 2608 } else {
2609 pci_unmap_single(nic->pdev, (dma_addr_t)
2610 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2611 PCI_DMA_FROMDEVICE);
2612 pci_unmap_single(nic->pdev, (dma_addr_t)
2613 ((RxD3_t*)rxdp)->Buffer1_ptr,
2614 l3l4hdr_size + 4,
2615 PCI_DMA_FROMDEVICE);
2616 pci_unmap_single(nic->pdev, (dma_addr_t)
2617 ((RxD3_t*)rxdp)->Buffer2_ptr,
2618 dev->mtu, PCI_DMA_FROMDEVICE);
2619 }
2579 rx_osm_handler(ring_data, rxdp); 2620 rx_osm_handler(ring_data, rxdp);
2580 get_info.offset++; 2621 get_info.offset++;
2581 ring_data->rx_curr_get_info.offset = 2622 ring_data->rx_curr_get_info.offset = get_info.offset;
2582 get_info.offset; 2623 rxdp = ring_data->rx_blocks[get_block].
2583 rxdp = ring_data->rx_blocks[get_block].block_virt_addr + 2624 rxds[get_info.offset].virt_addr;
2584 get_info.offset; 2625 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2585 if (get_info.offset &&
2586 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2587 get_info.offset = 0; 2626 get_info.offset = 0;
2588 ring_data->rx_curr_get_info.offset 2627 ring_data->rx_curr_get_info.offset = get_info.offset;
2589 = get_info.offset;
2590 get_block++; 2628 get_block++;
2591 get_block %= ring_data->block_count; 2629 if (get_block == ring_data->block_count)
2592 ring_data->rx_curr_get_info.block_index 2630 get_block = 0;
2593 = get_block; 2631 ring_data->rx_curr_get_info.block_index = get_block;
2594 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 2632 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2595 } 2633 }
2596 2634
2597 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2598 get_info.offset;
2599#ifdef CONFIG_S2IO_NAPI 2635#ifdef CONFIG_S2IO_NAPI
2600 nic->pkts_to_process -= 1; 2636 nic->pkts_to_process -= 1;
2601 if (!nic->pkts_to_process) 2637 if (!nic->pkts_to_process)
@@ -5538,16 +5574,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5538 ((unsigned long) rxdp->Host_Control); 5574 ((unsigned long) rxdp->Host_Control);
5539 int ring_no = ring_data->ring_no; 5575 int ring_no = ring_data->ring_no;
5540 u16 l3_csum, l4_csum; 5576 u16 l3_csum, l4_csum;
5541#ifdef CONFIG_2BUFF_MODE 5577
5542 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5543 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5544 int get_block = ring_data->rx_curr_get_info.block_index;
5545 int get_off = ring_data->rx_curr_get_info.offset;
5546 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5547 unsigned char *buff;
5548#else
5549 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5550#endif
5551 skb->dev = dev; 5578 skb->dev = dev;
5552 if (rxdp->Control_1 & RXD_T_CODE) { 5579 if (rxdp->Control_1 & RXD_T_CODE) {
5553 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; 5580 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
@@ -5564,19 +5591,36 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5564 rxdp->Host_Control = 0; 5591 rxdp->Host_Control = 0;
5565 sp->rx_pkt_count++; 5592 sp->rx_pkt_count++;
5566 sp->stats.rx_packets++; 5593 sp->stats.rx_packets++;
5567#ifndef CONFIG_2BUFF_MODE 5594 if (sp->rxd_mode == RXD_MODE_1) {
5568 sp->stats.rx_bytes += len; 5595 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
5569#else
5570 sp->stats.rx_bytes += buf0_len + buf2_len;
5571#endif
5572 5596
5573#ifndef CONFIG_2BUFF_MODE 5597 sp->stats.rx_bytes += len;
5574 skb_put(skb, len); 5598 skb_put(skb, len);
5575#else 5599
5576 buff = skb_push(skb, buf0_len); 5600 } else if (sp->rxd_mode >= RXD_MODE_3A) {
5577 memcpy(buff, ba->ba_0, buf0_len); 5601 int get_block = ring_data->rx_curr_get_info.block_index;
5578 skb_put(skb, buf2_len); 5602 int get_off = ring_data->rx_curr_get_info.offset;
5579#endif 5603 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
5604 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
5605 unsigned char *buff = skb_push(skb, buf0_len);
5606
5607 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5608 sp->stats.rx_bytes += buf0_len + buf2_len;
5609 memcpy(buff, ba->ba_0, buf0_len);
5610
5611 if (sp->rxd_mode == RXD_MODE_3A) {
5612 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
5613
5614 skb_put(skb, buf1_len);
5615 skb->len += buf2_len;
5616 skb->data_len += buf2_len;
5617 skb->truesize += buf2_len;
5618 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
5619 sp->stats.rx_bytes += buf1_len;
5620
5621 } else
5622 skb_put(skb, buf2_len);
5623 }
5580 5624
5581 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && 5625 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5582 (sp->rx_csum)) { 5626 (sp->rx_csum)) {
@@ -5712,6 +5756,7 @@ MODULE_VERSION(DRV_VERSION);
5712 5756
5713module_param(tx_fifo_num, int, 0); 5757module_param(tx_fifo_num, int, 0);
5714module_param(rx_ring_num, int, 0); 5758module_param(rx_ring_num, int, 0);
5759module_param(rx_ring_mode, int, 0);
5715module_param_array(tx_fifo_len, uint, NULL, 0); 5760module_param_array(tx_fifo_len, uint, NULL, 0);
5716module_param_array(rx_ring_sz, uint, NULL, 0); 5761module_param_array(rx_ring_sz, uint, NULL, 0);
5717module_param_array(rts_frm_len, uint, NULL, 0); 5762module_param_array(rts_frm_len, uint, NULL, 0);
@@ -5723,6 +5768,7 @@ module_param(shared_splits, int, 0);
5723module_param(tmac_util_period, int, 0); 5768module_param(tmac_util_period, int, 0);
5724module_param(rmac_util_period, int, 0); 5769module_param(rmac_util_period, int, 0);
5725module_param(bimodal, bool, 0); 5770module_param(bimodal, bool, 0);
5771module_param(l3l4hdr_size, int , 0);
5726#ifndef CONFIG_S2IO_NAPI 5772#ifndef CONFIG_S2IO_NAPI
5727module_param(indicate_max_pkts, int, 0); 5773module_param(indicate_max_pkts, int, 0);
5728#endif 5774#endif
@@ -5844,6 +5890,13 @@ Defaulting to INTA\n");
5844 sp->pdev = pdev; 5890 sp->pdev = pdev;
5845 sp->high_dma_flag = dma_flag; 5891 sp->high_dma_flag = dma_flag;
5846 sp->device_enabled_once = FALSE; 5892 sp->device_enabled_once = FALSE;
5893 if (rx_ring_mode == 1)
5894 sp->rxd_mode = RXD_MODE_1;
5895 if (rx_ring_mode == 2)
5896 sp->rxd_mode = RXD_MODE_3B;
5897 if (rx_ring_mode == 3)
5898 sp->rxd_mode = RXD_MODE_3A;
5899
5847 sp->intr_type = dev_intr_type; 5900 sp->intr_type = dev_intr_type;
5848 5901
5849 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || 5902 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
@@ -5896,7 +5949,7 @@ Defaulting to INTA\n");
5896 config->rx_ring_num = rx_ring_num; 5949 config->rx_ring_num = rx_ring_num;
5897 for (i = 0; i < MAX_RX_RINGS; i++) { 5950 for (i = 0; i < MAX_RX_RINGS; i++) {
5898 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 5951 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5899 (MAX_RXDS_PER_BLOCK + 1); 5952 (rxd_count[sp->rxd_mode] + 1);
5900 config->rx_cfg[i].ring_priority = i; 5953 config->rx_cfg[i].ring_priority = i;
5901 } 5954 }
5902 5955
@@ -6091,9 +6144,6 @@ Defaulting to INTA\n");
6091 DBG_PRINT(ERR_DBG, "(rev %d), Version %s", 6144 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6092 get_xena_rev_id(sp->pdev), 6145 get_xena_rev_id(sp->pdev),
6093 s2io_driver_version); 6146 s2io_driver_version);
6094#ifdef CONFIG_2BUFF_MODE
6095 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6096#endif
6097 switch(sp->intr_type) { 6147 switch(sp->intr_type) {
6098 case INTA: 6148 case INTA:
6099 DBG_PRINT(ERR_DBG, ", Intr type INTA"); 6149 DBG_PRINT(ERR_DBG, ", Intr type INTA");
@@ -6126,9 +6176,6 @@ Defaulting to INTA\n");
6126 DBG_PRINT(ERR_DBG, "(rev %d), Version %s", 6176 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6127 get_xena_rev_id(sp->pdev), 6177 get_xena_rev_id(sp->pdev),
6128 s2io_driver_version); 6178 s2io_driver_version);
6129#ifdef CONFIG_2BUFF_MODE
6130 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6131#endif
6132 switch(sp->intr_type) { 6179 switch(sp->intr_type) {
6133 case INTA: 6180 case INTA:
6134 DBG_PRINT(ERR_DBG, ", Intr type INTA"); 6181 DBG_PRINT(ERR_DBG, ", Intr type INTA");
@@ -6149,6 +6196,12 @@ Defaulting to INTA\n");
6149 sp->def_mac_addr[0].mac_addr[4], 6196 sp->def_mac_addr[0].mac_addr[4],
6150 sp->def_mac_addr[0].mac_addr[5]); 6197 sp->def_mac_addr[0].mac_addr[5]);
6151 } 6198 }
6199 if (sp->rxd_mode == RXD_MODE_3B)
6200 DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been "
6201 "enabled\n",dev->name);
6202 if (sp->rxd_mode == RXD_MODE_3A)
6203 DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been "
6204 "enabled\n",dev->name);
6152 6205
6153 /* Initialize device name */ 6206 /* Initialize device name */
6154 strcpy(sp->name, dev->name); 6207 strcpy(sp->name, dev->name);