aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnanda Raju <Ananda.Raju@neterion.com>2005-10-31 16:55:31 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-05 14:40:27 -0500
commitda6971d8ece2ec9762509e20dda6808335b5a10b (patch)
tree9ae532e7ab6314400fb601f6b7c5e6042166429f
parent29b09fcc341ede8dc08c900b132903fdd0231400 (diff)
[PATCH] S2io: Multi buffer mode support
Hi, This patch provides dynamic two buffer-mode and 3 buffer-mode options. Previously 2 buffer-mode was compilation option. Now with this patch applied one can load driver in 2 buffer-mode with module-load parameter ie. #insmod s2io.ko rx_ring_mode=2 This patch also provides 3 buffer-mode which provides header separation functionality. In 3 buffer-mode skb->data will have L2/L3/L4 headers and "skb_shinfo(skb)->frag_list->data" will have have L4 payload. one can load driver in 3 buffer-mode with same above module-load parameter ie. #insmod s2io.ko rx_ring_mode=3 Please review the patch. Signed-off-by: Ananda Raju <ananda.raju@neterion.com> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/s2io.c762
-rw-r--r--drivers/net/s2io.h91
3 files changed, 455 insertions, 409 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index bb3bda312bbe..1958d9e16a3a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2258,17 +2258,6 @@ config S2IO_NAPI
2258 2258
2259 If in doubt, say N. 2259 If in doubt, say N.
2260 2260
2261config 2BUFF_MODE
2262 bool "Use 2 Buffer Mode on Rx side."
2263 depends on S2IO
2264 ---help---
2265 On enabling the 2 buffer mode, the received frame will be
2266 split into 2 parts before being DMA'ed to the hosts memory.
2267 The parts are the ethernet header and ethernet payload.
2268 This is useful on systems where DMA'ing to to unaligned
2269 physical memory loactions comes with a heavy price.
2270 If not sure please say N.
2271
2272endmenu 2261endmenu
2273 2262
2274if !UML 2263if !UML
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 3f5e93aad5c7..9c4935407f26 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -30,6 +30,8 @@
30 * in the driver. 30 * in the driver.
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This 31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8. 32 * is also an array of size 8.
33 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
34 * values are 1, 2 and 3.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 35 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of 36 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO. 37 * Tx descriptors that can be associated with each corresponding FIFO.
@@ -65,12 +67,15 @@
65#include "s2io.h" 67#include "s2io.h"
66#include "s2io-regs.h" 68#include "s2io-regs.h"
67 69
68#define DRV_VERSION "Version 2.0.9.1" 70#define DRV_VERSION "Version 2.0.9.3"
69 71
70/* S2io Driver name & version. */ 72/* S2io Driver name & version. */
71static char s2io_driver_name[] = "Neterion"; 73static char s2io_driver_name[] = "Neterion";
72static char s2io_driver_version[] = DRV_VERSION; 74static char s2io_driver_version[] = DRV_VERSION;
73 75
76int rxd_size[4] = {32,48,48,64};
77int rxd_count[4] = {127,85,85,63};
78
74static inline int RXD_IS_UP2DT(RxD_t *rxdp) 79static inline int RXD_IS_UP2DT(RxD_t *rxdp)
75{ 80{
76 int ret; 81 int ret;
@@ -104,7 +109,7 @@ static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
104 mac_control = &sp->mac_control; 109 mac_control = &sp->mac_control;
105 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) { 110 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
106 level = LOW; 111 level = LOW;
107 if (rxb_size <= MAX_RXDS_PER_BLOCK) { 112 if (rxb_size <= rxd_count[sp->rxd_mode]) {
108 level = PANIC; 113 level = PANIC;
109 } 114 }
110 } 115 }
@@ -296,6 +301,7 @@ static unsigned int rx_ring_sz[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 301 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297static unsigned int rts_frm_len[MAX_RX_RINGS] = 302static unsigned int rts_frm_len[MAX_RX_RINGS] =
298 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 303 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
304static unsigned int rx_ring_mode = 1;
299static unsigned int use_continuous_tx_intrs = 1; 305static unsigned int use_continuous_tx_intrs = 1;
300static unsigned int rmac_pause_time = 65535; 306static unsigned int rmac_pause_time = 65535;
301static unsigned int mc_pause_threshold_q0q3 = 187; 307static unsigned int mc_pause_threshold_q0q3 = 187;
@@ -304,6 +310,7 @@ static unsigned int shared_splits;
304static unsigned int tmac_util_period = 5; 310static unsigned int tmac_util_period = 5;
305static unsigned int rmac_util_period = 5; 311static unsigned int rmac_util_period = 5;
306static unsigned int bimodal = 0; 312static unsigned int bimodal = 0;
313static unsigned int l3l4hdr_size = 128;
307#ifndef CONFIG_S2IO_NAPI 314#ifndef CONFIG_S2IO_NAPI
308static unsigned int indicate_max_pkts; 315static unsigned int indicate_max_pkts;
309#endif 316#endif
@@ -357,10 +364,8 @@ static int init_shared_mem(struct s2io_nic *nic)
357 int i, j, blk_cnt, rx_sz, tx_sz; 364 int i, j, blk_cnt, rx_sz, tx_sz;
358 int lst_size, lst_per_page; 365 int lst_size, lst_per_page;
359 struct net_device *dev = nic->dev; 366 struct net_device *dev = nic->dev;
360#ifdef CONFIG_2BUFF_MODE
361 unsigned long tmp; 367 unsigned long tmp;
362 buffAdd_t *ba; 368 buffAdd_t *ba;
363#endif
364 369
365 mac_info_t *mac_control; 370 mac_info_t *mac_control;
366 struct config_param *config; 371 struct config_param *config;
@@ -458,7 +463,8 @@ static int init_shared_mem(struct s2io_nic *nic)
458 /* Allocation and initialization of RXDs in Rings */ 463 /* Allocation and initialization of RXDs in Rings */
459 size = 0; 464 size = 0;
460 for (i = 0; i < config->rx_ring_num; i++) { 465 for (i = 0; i < config->rx_ring_num; i++) {
461 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) { 466 if (config->rx_cfg[i].num_rxd %
467 (rxd_count[nic->rxd_mode] + 1)) {
462 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name); 468 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
463 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", 469 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
464 i); 470 i);
@@ -467,11 +473,15 @@ static int init_shared_mem(struct s2io_nic *nic)
467 } 473 }
468 size += config->rx_cfg[i].num_rxd; 474 size += config->rx_cfg[i].num_rxd;
469 mac_control->rings[i].block_count = 475 mac_control->rings[i].block_count =
470 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 476 config->rx_cfg[i].num_rxd /
471 mac_control->rings[i].pkt_cnt = 477 (rxd_count[nic->rxd_mode] + 1 );
472 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count; 478 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
479 mac_control->rings[i].block_count;
473 } 480 }
474 size = (size * (sizeof(RxD_t))); 481 if (nic->rxd_mode == RXD_MODE_1)
482 size = (size * (sizeof(RxD1_t)));
483 else
484 size = (size * (sizeof(RxD3_t)));
475 rx_sz = size; 485 rx_sz = size;
476 486
477 for (i = 0; i < config->rx_ring_num; i++) { 487 for (i = 0; i < config->rx_ring_num; i++) {
@@ -486,15 +496,15 @@ static int init_shared_mem(struct s2io_nic *nic)
486 mac_control->rings[i].nic = nic; 496 mac_control->rings[i].nic = nic;
487 mac_control->rings[i].ring_no = i; 497 mac_control->rings[i].ring_no = i;
488 498
489 blk_cnt = 499 blk_cnt = config->rx_cfg[i].num_rxd /
490 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 500 (rxd_count[nic->rxd_mode] + 1);
491 /* Allocating all the Rx blocks */ 501 /* Allocating all the Rx blocks */
492 for (j = 0; j < blk_cnt; j++) { 502 for (j = 0; j < blk_cnt; j++) {
493#ifndef CONFIG_2BUFF_MODE 503 rx_block_info_t *rx_blocks;
494 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t)); 504 int l;
495#else 505
496 size = SIZE_OF_BLOCK; 506 rx_blocks = &mac_control->rings[i].rx_blocks[j];
497#endif 507 size = SIZE_OF_BLOCK; //size is always page size
498 tmp_v_addr = pci_alloc_consistent(nic->pdev, size, 508 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
499 &tmp_p_addr); 509 &tmp_p_addr);
500 if (tmp_v_addr == NULL) { 510 if (tmp_v_addr == NULL) {
@@ -504,11 +514,24 @@ static int init_shared_mem(struct s2io_nic *nic)
504 * memory that was alloced till the 514 * memory that was alloced till the
505 * failure happened. 515 * failure happened.
506 */ 516 */
507 mac_control->rings[i].rx_blocks[j].block_virt_addr = 517 rx_blocks->block_virt_addr = tmp_v_addr;
508 tmp_v_addr;
509 return -ENOMEM; 518 return -ENOMEM;
510 } 519 }
511 memset(tmp_v_addr, 0, size); 520 memset(tmp_v_addr, 0, size);
521 rx_blocks->block_virt_addr = tmp_v_addr;
522 rx_blocks->block_dma_addr = tmp_p_addr;
523 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
524 rxd_count[nic->rxd_mode],
525 GFP_KERNEL);
526 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
527 rx_blocks->rxds[l].virt_addr =
528 rx_blocks->block_virt_addr +
529 (rxd_size[nic->rxd_mode] * l);
530 rx_blocks->rxds[l].dma_addr =
531 rx_blocks->block_dma_addr +
532 (rxd_size[nic->rxd_mode] * l);
533 }
534
512 mac_control->rings[i].rx_blocks[j].block_virt_addr = 535 mac_control->rings[i].rx_blocks[j].block_virt_addr =
513 tmp_v_addr; 536 tmp_v_addr;
514 mac_control->rings[i].rx_blocks[j].block_dma_addr = 537 mac_control->rings[i].rx_blocks[j].block_dma_addr =
@@ -528,62 +551,58 @@ static int init_shared_mem(struct s2io_nic *nic)
528 blk_cnt].block_dma_addr; 551 blk_cnt].block_dma_addr;
529 552
530 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 553 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
531 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
532 * marker.
533 */
534#ifndef CONFIG_2BUFF_MODE
535 pre_rxd_blk->reserved_2_pNext_RxD_block = 554 pre_rxd_blk->reserved_2_pNext_RxD_block =
536 (unsigned long) tmp_v_addr_next; 555 (unsigned long) tmp_v_addr_next;
537#endif
538 pre_rxd_blk->pNext_RxD_Blk_physical = 556 pre_rxd_blk->pNext_RxD_Blk_physical =
539 (u64) tmp_p_addr_next; 557 (u64) tmp_p_addr_next;
540 } 558 }
541 } 559 }
542 560 if (nic->rxd_mode >= RXD_MODE_3A) {
543#ifdef CONFIG_2BUFF_MODE 561 /*
544 /* 562 * Allocation of Storages for buffer addresses in 2BUFF mode
545 * Allocation of Storages for buffer addresses in 2BUFF mode 563 * and the buffers as well.
546 * and the buffers as well. 564 */
547 */ 565 for (i = 0; i < config->rx_ring_num; i++) {
548 for (i = 0; i < config->rx_ring_num; i++) { 566 blk_cnt = config->rx_cfg[i].num_rxd /
549 blk_cnt = 567 (rxd_count[nic->rxd_mode]+ 1);
550 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 568 mac_control->rings[i].ba =
551 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt), 569 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
552 GFP_KERNEL); 570 GFP_KERNEL);
553 if (!mac_control->rings[i].ba) 571 if (!mac_control->rings[i].ba)
554 return -ENOMEM;
555 for (j = 0; j < blk_cnt; j++) {
556 int k = 0;
557 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
558 (MAX_RXDS_PER_BLOCK + 1)),
559 GFP_KERNEL);
560 if (!mac_control->rings[i].ba[j])
561 return -ENOMEM; 572 return -ENOMEM;
562 while (k != MAX_RXDS_PER_BLOCK) { 573 for (j = 0; j < blk_cnt; j++) {
563 ba = &mac_control->rings[i].ba[j][k]; 574 int k = 0;
564 575 mac_control->rings[i].ba[j] =
565 ba->ba_0_org = (void *) kmalloc 576 kmalloc((sizeof(buffAdd_t) *
566 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 577 (rxd_count[nic->rxd_mode] + 1)),
567 if (!ba->ba_0_org) 578 GFP_KERNEL);
568 return -ENOMEM; 579 if (!mac_control->rings[i].ba[j])
569 tmp = (unsigned long) ba->ba_0_org;
570 tmp += ALIGN_SIZE;
571 tmp &= ~((unsigned long) ALIGN_SIZE);
572 ba->ba_0 = (void *) tmp;
573
574 ba->ba_1_org = (void *) kmalloc
575 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
576 if (!ba->ba_1_org)
577 return -ENOMEM; 580 return -ENOMEM;
578 tmp = (unsigned long) ba->ba_1_org; 581 while (k != rxd_count[nic->rxd_mode]) {
579 tmp += ALIGN_SIZE; 582 ba = &mac_control->rings[i].ba[j][k];
580 tmp &= ~((unsigned long) ALIGN_SIZE); 583
581 ba->ba_1 = (void *) tmp; 584 ba->ba_0_org = (void *) kmalloc
582 k++; 585 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
586 if (!ba->ba_0_org)
587 return -ENOMEM;
588 tmp = (unsigned long)ba->ba_0_org;
589 tmp += ALIGN_SIZE;
590 tmp &= ~((unsigned long) ALIGN_SIZE);
591 ba->ba_0 = (void *) tmp;
592
593 ba->ba_1_org = (void *) kmalloc
594 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
595 if (!ba->ba_1_org)
596 return -ENOMEM;
597 tmp = (unsigned long) ba->ba_1_org;
598 tmp += ALIGN_SIZE;
599 tmp &= ~((unsigned long) ALIGN_SIZE);
600 ba->ba_1 = (void *) tmp;
601 k++;
602 }
583 } 603 }
584 } 604 }
585 } 605 }
586#endif
587 606
588 /* Allocation and initialization of Statistics block */ 607 /* Allocation and initialization of Statistics block */
589 size = sizeof(StatInfo_t); 608 size = sizeof(StatInfo_t);
@@ -669,11 +688,7 @@ static void free_shared_mem(struct s2io_nic *nic)
669 kfree(mac_control->fifos[i].list_info); 688 kfree(mac_control->fifos[i].list_info);
670 } 689 }
671 690
672#ifndef CONFIG_2BUFF_MODE
673 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
674#else
675 size = SIZE_OF_BLOCK; 691 size = SIZE_OF_BLOCK;
676#endif
677 for (i = 0; i < config->rx_ring_num; i++) { 692 for (i = 0; i < config->rx_ring_num; i++) {
678 blk_cnt = mac_control->rings[i].block_count; 693 blk_cnt = mac_control->rings[i].block_count;
679 for (j = 0; j < blk_cnt; j++) { 694 for (j = 0; j < blk_cnt; j++) {
@@ -685,29 +700,31 @@ static void free_shared_mem(struct s2io_nic *nic)
685 break; 700 break;
686 pci_free_consistent(nic->pdev, size, 701 pci_free_consistent(nic->pdev, size,
687 tmp_v_addr, tmp_p_addr); 702 tmp_v_addr, tmp_p_addr);
703 kfree(mac_control->rings[i].rx_blocks[j].rxds);
688 } 704 }
689 } 705 }
690 706
691#ifdef CONFIG_2BUFF_MODE 707 if (nic->rxd_mode >= RXD_MODE_3A) {
692 /* Freeing buffer storage addresses in 2BUFF mode. */ 708 /* Freeing buffer storage addresses in 2BUFF mode. */
693 for (i = 0; i < config->rx_ring_num; i++) { 709 for (i = 0; i < config->rx_ring_num; i++) {
694 blk_cnt = 710 blk_cnt = config->rx_cfg[i].num_rxd /
695 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 711 (rxd_count[nic->rxd_mode] + 1);
696 for (j = 0; j < blk_cnt; j++) { 712 for (j = 0; j < blk_cnt; j++) {
697 int k = 0; 713 int k = 0;
698 if (!mac_control->rings[i].ba[j]) 714 if (!mac_control->rings[i].ba[j])
699 continue; 715 continue;
700 while (k != MAX_RXDS_PER_BLOCK) { 716 while (k != rxd_count[nic->rxd_mode]) {
701 buffAdd_t *ba = &mac_control->rings[i].ba[j][k]; 717 buffAdd_t *ba =
702 kfree(ba->ba_0_org); 718 &mac_control->rings[i].ba[j][k];
703 kfree(ba->ba_1_org); 719 kfree(ba->ba_0_org);
704 k++; 720 kfree(ba->ba_1_org);
721 k++;
722 }
723 kfree(mac_control->rings[i].ba[j]);
705 } 724 }
706 kfree(mac_control->rings[i].ba[j]); 725 kfree(mac_control->rings[i].ba);
707 } 726 }
708 kfree(mac_control->rings[i].ba);
709 } 727 }
710#endif
711 728
712 if (mac_control->stats_mem) { 729 if (mac_control->stats_mem) {
713 pci_free_consistent(nic->pdev, 730 pci_free_consistent(nic->pdev,
@@ -1894,20 +1911,19 @@ static int start_nic(struct s2io_nic *nic)
1894 val64 = readq(&bar0->prc_ctrl_n[i]); 1911 val64 = readq(&bar0->prc_ctrl_n[i]);
1895 if (nic->config.bimodal) 1912 if (nic->config.bimodal)
1896 val64 |= PRC_CTRL_BIMODAL_INTERRUPT; 1913 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1897#ifndef CONFIG_2BUFF_MODE 1914 if (nic->rxd_mode == RXD_MODE_1)
1898 val64 |= PRC_CTRL_RC_ENABLED; 1915 val64 |= PRC_CTRL_RC_ENABLED;
1899#else 1916 else
1900 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3; 1917 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1901#endif
1902 writeq(val64, &bar0->prc_ctrl_n[i]); 1918 writeq(val64, &bar0->prc_ctrl_n[i]);
1903 } 1919 }
1904 1920
1905#ifdef CONFIG_2BUFF_MODE 1921 if (nic->rxd_mode == RXD_MODE_3B) {
1906 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */ 1922 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1907 val64 = readq(&bar0->rx_pa_cfg); 1923 val64 = readq(&bar0->rx_pa_cfg);
1908 val64 |= RX_PA_CFG_IGNORE_L2_ERR; 1924 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1909 writeq(val64, &bar0->rx_pa_cfg); 1925 writeq(val64, &bar0->rx_pa_cfg);
1910#endif 1926 }
1911 1927
1912 /* 1928 /*
1913 * Enabling MC-RLDRAM. After enabling the device, we timeout 1929 * Enabling MC-RLDRAM. After enabling the device, we timeout
@@ -2090,6 +2106,41 @@ static void stop_nic(struct s2io_nic *nic)
2090 } 2106 }
2091} 2107}
2092 2108
2109int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2110{
2111 struct net_device *dev = nic->dev;
2112 struct sk_buff *frag_list;
2113 u64 tmp;
2114
2115 /* Buffer-1 receives L3/L4 headers */
2116 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2117 (nic->pdev, skb->data, l3l4hdr_size + 4,
2118 PCI_DMA_FROMDEVICE);
2119
2120 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2121 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2122 if (skb_shinfo(skb)->frag_list == NULL) {
2123 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2124 return -ENOMEM ;
2125 }
2126 frag_list = skb_shinfo(skb)->frag_list;
2127 frag_list->next = NULL;
2128 tmp = (u64) frag_list->data;
2129 tmp += ALIGN_SIZE;
2130 tmp &= ~ALIGN_SIZE;
2131 frag_list->data = (void *) tmp;
2132 frag_list->tail = (void *) tmp;
2133
2134 /* Buffer-2 receives L4 data payload */
2135 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2136 frag_list->data, dev->mtu,
2137 PCI_DMA_FROMDEVICE);
2138 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2139 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2140
2141 return SUCCESS;
2142}
2143
2093/** 2144/**
2094 * fill_rx_buffers - Allocates the Rx side skbs 2145 * fill_rx_buffers - Allocates the Rx side skbs
2095 * @nic: device private variable 2146 * @nic: device private variable
@@ -2117,18 +2168,12 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2117 struct sk_buff *skb; 2168 struct sk_buff *skb;
2118 RxD_t *rxdp; 2169 RxD_t *rxdp;
2119 int off, off1, size, block_no, block_no1; 2170 int off, off1, size, block_no, block_no1;
2120 int offset, offset1;
2121 u32 alloc_tab = 0; 2171 u32 alloc_tab = 0;
2122 u32 alloc_cnt; 2172 u32 alloc_cnt;
2123 mac_info_t *mac_control; 2173 mac_info_t *mac_control;
2124 struct config_param *config; 2174 struct config_param *config;
2125#ifdef CONFIG_2BUFF_MODE
2126 RxD_t *rxdpnext;
2127 int nextblk;
2128 u64 tmp; 2175 u64 tmp;
2129 buffAdd_t *ba; 2176 buffAdd_t *ba;
2130 dma_addr_t rxdpphys;
2131#endif
2132#ifndef CONFIG_S2IO_NAPI 2177#ifndef CONFIG_S2IO_NAPI
2133 unsigned long flags; 2178 unsigned long flags;
2134#endif 2179#endif
@@ -2138,8 +2183,6 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2138 config = &nic->config; 2183 config = &nic->config;
2139 alloc_cnt = mac_control->rings[ring_no].pkt_cnt - 2184 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2140 atomic_read(&nic->rx_bufs_left[ring_no]); 2185 atomic_read(&nic->rx_bufs_left[ring_no]);
2141 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2142 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2143 2186
2144 while (alloc_tab < alloc_cnt) { 2187 while (alloc_tab < alloc_cnt) {
2145 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2188 block_no = mac_control->rings[ring_no].rx_curr_put_info.
@@ -2148,159 +2191,145 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2148 block_index; 2191 block_index;
2149 off = mac_control->rings[ring_no].rx_curr_put_info.offset; 2192 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2150 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; 2193 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2151#ifndef CONFIG_2BUFF_MODE
2152 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2153 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2154#else
2155 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2156 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2157#endif
2158 2194
2159 rxdp = mac_control->rings[ring_no].rx_blocks[block_no]. 2195 rxdp = mac_control->rings[ring_no].
2160 block_virt_addr + off; 2196 rx_blocks[block_no].rxds[off].virt_addr;
2161 if ((offset == offset1) && (rxdp->Host_Control)) { 2197
2162 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name); 2198 if ((block_no == block_no1) && (off == off1) &&
2199 (rxdp->Host_Control)) {
2200 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2201 dev->name);
2163 DBG_PRINT(INTR_DBG, " info equated\n"); 2202 DBG_PRINT(INTR_DBG, " info equated\n");
2164 goto end; 2203 goto end;
2165 } 2204 }
2166#ifndef CONFIG_2BUFF_MODE 2205 if (off && (off == rxd_count[nic->rxd_mode])) {
2167 if (rxdp->Control_1 == END_OF_BLOCK) {
2168 mac_control->rings[ring_no].rx_curr_put_info. 2206 mac_control->rings[ring_no].rx_curr_put_info.
2169 block_index++; 2207 block_index++;
2208 if (mac_control->rings[ring_no].rx_curr_put_info.
2209 block_index == mac_control->rings[ring_no].
2210 block_count)
2211 mac_control->rings[ring_no].rx_curr_put_info.
2212 block_index = 0;
2213 block_no = mac_control->rings[ring_no].
2214 rx_curr_put_info.block_index;
2215 if (off == rxd_count[nic->rxd_mode])
2216 off = 0;
2170 mac_control->rings[ring_no].rx_curr_put_info. 2217 mac_control->rings[ring_no].rx_curr_put_info.
2171 block_index %= mac_control->rings[ring_no].block_count; 2218 offset = off;
2172 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2219 rxdp = mac_control->rings[ring_no].
2173 block_index; 2220 rx_blocks[block_no].block_virt_addr;
2174 off++;
2175 off %= (MAX_RXDS_PER_BLOCK + 1);
2176 mac_control->rings[ring_no].rx_curr_put_info.offset =
2177 off;
2178 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2179 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2221 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2180 dev->name, rxdp); 2222 dev->name, rxdp);
2181 } 2223 }
2182#ifndef CONFIG_S2IO_NAPI 2224#ifndef CONFIG_S2IO_NAPI
2183 spin_lock_irqsave(&nic->put_lock, flags); 2225 spin_lock_irqsave(&nic->put_lock, flags);
2184 mac_control->rings[ring_no].put_pos = 2226 mac_control->rings[ring_no].put_pos =
2185 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off; 2227 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2186 spin_unlock_irqrestore(&nic->put_lock, flags); 2228 spin_unlock_irqrestore(&nic->put_lock, flags);
2187#endif 2229#endif
2188#else 2230 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2189 if (rxdp->Host_Control == END_OF_BLOCK) { 2231 ((nic->rxd_mode >= RXD_MODE_3A) &&
2232 (rxdp->Control_2 & BIT(0)))) {
2190 mac_control->rings[ring_no].rx_curr_put_info. 2233 mac_control->rings[ring_no].rx_curr_put_info.
2191 block_index++; 2234 offset = off;
2192 mac_control->rings[ring_no].rx_curr_put_info.block_index
2193 %= mac_control->rings[ring_no].block_count;
2194 block_no = mac_control->rings[ring_no].rx_curr_put_info
2195 .block_index;
2196 off = 0;
2197 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2198 dev->name, block_no,
2199 (unsigned long long) rxdp->Control_1);
2200 mac_control->rings[ring_no].rx_curr_put_info.offset =
2201 off;
2202 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2203 block_virt_addr;
2204 }
2205#ifndef CONFIG_S2IO_NAPI
2206 spin_lock_irqsave(&nic->put_lock, flags);
2207 mac_control->rings[ring_no].put_pos = (block_no *
2208 (MAX_RXDS_PER_BLOCK + 1)) + off;
2209 spin_unlock_irqrestore(&nic->put_lock, flags);
2210#endif
2211#endif
2212
2213#ifndef CONFIG_2BUFF_MODE
2214 if (rxdp->Control_1 & RXD_OWN_XENA)
2215#else
2216 if (rxdp->Control_2 & BIT(0))
2217#endif
2218 {
2219 mac_control->rings[ring_no].rx_curr_put_info.
2220 offset = off;
2221 goto end; 2235 goto end;
2222 } 2236 }
2223#ifdef CONFIG_2BUFF_MODE 2237 /* calculate size of skb based on ring mode */
2224 /* 2238 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2225 * RxDs Spanning cache lines will be replenished only 2239 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2226 * if the succeeding RxD is also owned by Host. It 2240 if (nic->rxd_mode == RXD_MODE_1)
2227 * will always be the ((8*i)+3) and ((8*i)+6) 2241 size += NET_IP_ALIGN;
2228 * descriptors for the 48 byte descriptor. The offending 2242 else if (nic->rxd_mode == RXD_MODE_3B)
2229 * decsriptor is of-course the 3rd descriptor. 2243 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2230 */ 2244 else
2231 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no]. 2245 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2232 block_dma_addr + (off * sizeof(RxD_t));
2233 if (((u64) (rxdpphys)) % 128 > 80) {
2234 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2235 block_virt_addr + (off + 1);
2236 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2237 nextblk = (block_no + 1) %
2238 (mac_control->rings[ring_no].block_count);
2239 rxdpnext = mac_control->rings[ring_no].rx_blocks
2240 [nextblk].block_virt_addr;
2241 }
2242 if (rxdpnext->Control_2 & BIT(0))
2243 goto end;
2244 }
2245#endif
2246 2246
2247#ifndef CONFIG_2BUFF_MODE 2247 /* allocate skb */
2248 skb = dev_alloc_skb(size + NET_IP_ALIGN); 2248 skb = dev_alloc_skb(size);
2249#else 2249 if(!skb) {
2250 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2251#endif
2252 if (!skb) {
2253 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); 2250 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2254 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); 2251 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2255 if (first_rxdp) { 2252 if (first_rxdp) {
2256 wmb(); 2253 wmb();
2257 first_rxdp->Control_1 |= RXD_OWN_XENA; 2254 first_rxdp->Control_1 |= RXD_OWN_XENA;
2258 } 2255 }
2259 return -ENOMEM; 2256 return -ENOMEM ;
2257 }
2258 if (nic->rxd_mode == RXD_MODE_1) {
2259 /* 1 buffer mode - normal operation mode */
2260 memset(rxdp, 0, sizeof(RxD1_t));
2261 skb_reserve(skb, NET_IP_ALIGN);
2262 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2263 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2264 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1);
2265 rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size);
2266
2267 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2268 /*
2269 * 2 or 3 buffer mode -
2270 * Both 2 buffer mode and 3 buffer mode provides 128
2271 * byte aligned receive buffers.
2272 *
2273 * 3 buffer mode provides header separation where in
2274 * skb->data will have L3/L4 headers where as
2275 * skb_shinfo(skb)->frag_list will have the L4 data
2276 * payload
2277 */
2278
2279 memset(rxdp, 0, sizeof(RxD3_t));
2280 ba = &mac_control->rings[ring_no].ba[block_no][off];
2281 skb_reserve(skb, BUF0_LEN);
2282 tmp = (u64)(unsigned long) skb->data;
2283 tmp += ALIGN_SIZE;
2284 tmp &= ~ALIGN_SIZE;
2285 skb->data = (void *) (unsigned long)tmp;
2286 skb->tail = (void *) (unsigned long)tmp;
2287
2288 ((RxD3_t*)rxdp)->Buffer0_ptr =
2289 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2290 PCI_DMA_FROMDEVICE);
2291 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2292 if (nic->rxd_mode == RXD_MODE_3B) {
2293 /* Two buffer mode */
2294
2295 /*
2296 * Buffer2 will have L3/L4 header plus
2297 * L4 payload
2298 */
2299 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2300 (nic->pdev, skb->data, dev->mtu + 4,
2301 PCI_DMA_FROMDEVICE);
2302
2303 /* Buffer-1 will be dummy buffer not used */
2304 ((RxD3_t*)rxdp)->Buffer1_ptr =
2305 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2306 PCI_DMA_FROMDEVICE);
2307 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2308 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2309 (dev->mtu + 4);
2310 } else {
2311 /* 3 buffer mode */
2312 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2313 dev_kfree_skb_irq(skb);
2314 if (first_rxdp) {
2315 wmb();
2316 first_rxdp->Control_1 |=
2317 RXD_OWN_XENA;
2318 }
2319 return -ENOMEM ;
2320 }
2321 }
2322 rxdp->Control_2 |= BIT(0);
2260 } 2323 }
2261#ifndef CONFIG_2BUFF_MODE
2262 skb_reserve(skb, NET_IP_ALIGN);
2263 memset(rxdp, 0, sizeof(RxD_t));
2264 rxdp->Buffer0_ptr = pci_map_single
2265 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2266 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2267 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2268 rxdp->Host_Control = (unsigned long) (skb); 2324 rxdp->Host_Control = (unsigned long) (skb);
2269 if (alloc_tab & ((1 << rxsync_frequency) - 1)) 2325 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2270 rxdp->Control_1 |= RXD_OWN_XENA; 2326 rxdp->Control_1 |= RXD_OWN_XENA;
2271 off++; 2327 off++;
2272 off %= (MAX_RXDS_PER_BLOCK + 1); 2328 if (off == (rxd_count[nic->rxd_mode] + 1))
2273 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2329 off = 0;
2274#else
2275 ba = &mac_control->rings[ring_no].ba[block_no][off];
2276 skb_reserve(skb, BUF0_LEN);
2277 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2278 if (tmp)
2279 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2280
2281 memset(rxdp, 0, sizeof(RxD_t));
2282 rxdp->Buffer2_ptr = pci_map_single
2283 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2284 PCI_DMA_FROMDEVICE);
2285 rxdp->Buffer0_ptr =
2286 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2287 PCI_DMA_FROMDEVICE);
2288 rxdp->Buffer1_ptr =
2289 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2290 PCI_DMA_FROMDEVICE);
2291
2292 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2293 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2294 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2295 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2296 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2297 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2298 rxdp->Control_1 |= RXD_OWN_XENA;
2299 off++;
2300 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2330 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2301#endif
2302 rxdp->Control_2 |= SET_RXD_MARKER;
2303 2331
2332 rxdp->Control_2 |= SET_RXD_MARKER;
2304 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { 2333 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2305 if (first_rxdp) { 2334 if (first_rxdp) {
2306 wmb(); 2335 wmb();
@@ -2325,6 +2354,67 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2325 return SUCCESS; 2354 return SUCCESS;
2326} 2355}
2327 2356
2357static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2358{
2359 struct net_device *dev = sp->dev;
2360 int j;
2361 struct sk_buff *skb;
2362 RxD_t *rxdp;
2363 mac_info_t *mac_control;
2364 buffAdd_t *ba;
2365
2366 mac_control = &sp->mac_control;
2367 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2368 rxdp = mac_control->rings[ring_no].
2369 rx_blocks[blk].rxds[j].virt_addr;
2370 skb = (struct sk_buff *)
2371 ((unsigned long) rxdp->Host_Control);
2372 if (!skb) {
2373 continue;
2374 }
2375 if (sp->rxd_mode == RXD_MODE_1) {
2376 pci_unmap_single(sp->pdev, (dma_addr_t)
2377 ((RxD1_t*)rxdp)->Buffer0_ptr,
2378 dev->mtu +
2379 HEADER_ETHERNET_II_802_3_SIZE
2380 + HEADER_802_2_SIZE +
2381 HEADER_SNAP_SIZE,
2382 PCI_DMA_FROMDEVICE);
2383 memset(rxdp, 0, sizeof(RxD1_t));
2384 } else if(sp->rxd_mode == RXD_MODE_3B) {
2385 ba = &mac_control->rings[ring_no].
2386 ba[blk][j];
2387 pci_unmap_single(sp->pdev, (dma_addr_t)
2388 ((RxD3_t*)rxdp)->Buffer0_ptr,
2389 BUF0_LEN,
2390 PCI_DMA_FROMDEVICE);
2391 pci_unmap_single(sp->pdev, (dma_addr_t)
2392 ((RxD3_t*)rxdp)->Buffer1_ptr,
2393 BUF1_LEN,
2394 PCI_DMA_FROMDEVICE);
2395 pci_unmap_single(sp->pdev, (dma_addr_t)
2396 ((RxD3_t*)rxdp)->Buffer2_ptr,
2397 dev->mtu + 4,
2398 PCI_DMA_FROMDEVICE);
2399 memset(rxdp, 0, sizeof(RxD3_t));
2400 } else {
2401 pci_unmap_single(sp->pdev, (dma_addr_t)
2402 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2403 PCI_DMA_FROMDEVICE);
2404 pci_unmap_single(sp->pdev, (dma_addr_t)
2405 ((RxD3_t*)rxdp)->Buffer1_ptr,
2406 l3l4hdr_size + 4,
2407 PCI_DMA_FROMDEVICE);
2408 pci_unmap_single(sp->pdev, (dma_addr_t)
2409 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2410 PCI_DMA_FROMDEVICE);
2411 memset(rxdp, 0, sizeof(RxD3_t));
2412 }
2413 dev_kfree_skb(skb);
2414 atomic_dec(&sp->rx_bufs_left[ring_no]);
2415 }
2416}
2417
2328/** 2418/**
2329 * free_rx_buffers - Frees all Rx buffers 2419 * free_rx_buffers - Frees all Rx buffers
2330 * @sp: device private variable. 2420 * @sp: device private variable.
@@ -2337,77 +2427,17 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2337static void free_rx_buffers(struct s2io_nic *sp) 2427static void free_rx_buffers(struct s2io_nic *sp)
2338{ 2428{
2339 struct net_device *dev = sp->dev; 2429 struct net_device *dev = sp->dev;
2340 int i, j, blk = 0, off, buf_cnt = 0; 2430 int i, blk = 0, buf_cnt = 0;
2341 RxD_t *rxdp;
2342 struct sk_buff *skb;
2343 mac_info_t *mac_control; 2431 mac_info_t *mac_control;
2344 struct config_param *config; 2432 struct config_param *config;
2345#ifdef CONFIG_2BUFF_MODE
2346 buffAdd_t *ba;
2347#endif
2348 2433
2349 mac_control = &sp->mac_control; 2434 mac_control = &sp->mac_control;
2350 config = &sp->config; 2435 config = &sp->config;
2351 2436
2352 for (i = 0; i < config->rx_ring_num; i++) { 2437 for (i = 0; i < config->rx_ring_num; i++) {
2353 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) { 2438 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2354 off = j % (MAX_RXDS_PER_BLOCK + 1); 2439 free_rxd_blk(sp,i,blk);
2355 rxdp = mac_control->rings[i].rx_blocks[blk].
2356 block_virt_addr + off;
2357
2358#ifndef CONFIG_2BUFF_MODE
2359 if (rxdp->Control_1 == END_OF_BLOCK) {
2360 rxdp =
2361 (RxD_t *) ((unsigned long) rxdp->
2362 Control_2);
2363 j++;
2364 blk++;
2365 }
2366#else
2367 if (rxdp->Host_Control == END_OF_BLOCK) {
2368 blk++;
2369 continue;
2370 }
2371#endif
2372 2440
2373 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2374 memset(rxdp, 0, sizeof(RxD_t));
2375 continue;
2376 }
2377
2378 skb =
2379 (struct sk_buff *) ((unsigned long) rxdp->
2380 Host_Control);
2381 if (skb) {
2382#ifndef CONFIG_2BUFF_MODE
2383 pci_unmap_single(sp->pdev, (dma_addr_t)
2384 rxdp->Buffer0_ptr,
2385 dev->mtu +
2386 HEADER_ETHERNET_II_802_3_SIZE
2387 + HEADER_802_2_SIZE +
2388 HEADER_SNAP_SIZE,
2389 PCI_DMA_FROMDEVICE);
2390#else
2391 ba = &mac_control->rings[i].ba[blk][off];
2392 pci_unmap_single(sp->pdev, (dma_addr_t)
2393 rxdp->Buffer0_ptr,
2394 BUF0_LEN,
2395 PCI_DMA_FROMDEVICE);
2396 pci_unmap_single(sp->pdev, (dma_addr_t)
2397 rxdp->Buffer1_ptr,
2398 BUF1_LEN,
2399 PCI_DMA_FROMDEVICE);
2400 pci_unmap_single(sp->pdev, (dma_addr_t)
2401 rxdp->Buffer2_ptr,
2402 dev->mtu + BUF0_LEN + 4,
2403 PCI_DMA_FROMDEVICE);
2404#endif
2405 dev_kfree_skb(skb);
2406 atomic_dec(&sp->rx_bufs_left[i]);
2407 buf_cnt++;
2408 }
2409 memset(rxdp, 0, sizeof(RxD_t));
2410 }
2411 mac_control->rings[i].rx_curr_put_info.block_index = 0; 2441 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2412 mac_control->rings[i].rx_curr_get_info.block_index = 0; 2442 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2413 mac_control->rings[i].rx_curr_put_info.offset = 0; 2443 mac_control->rings[i].rx_curr_put_info.offset = 0;
@@ -2513,7 +2543,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2513{ 2543{
2514 nic_t *nic = ring_data->nic; 2544 nic_t *nic = ring_data->nic;
2515 struct net_device *dev = (struct net_device *) nic->dev; 2545 struct net_device *dev = (struct net_device *) nic->dev;
2516 int get_block, get_offset, put_block, put_offset, ring_bufs; 2546 int get_block, put_block, put_offset;
2517 rx_curr_get_info_t get_info, put_info; 2547 rx_curr_get_info_t get_info, put_info;
2518 RxD_t *rxdp; 2548 RxD_t *rxdp;
2519 struct sk_buff *skb; 2549 struct sk_buff *skb;
@@ -2532,21 +2562,22 @@ static void rx_intr_handler(ring_info_t *ring_data)
2532 get_block = get_info.block_index; 2562 get_block = get_info.block_index;
2533 put_info = ring_data->rx_curr_put_info; 2563 put_info = ring_data->rx_curr_put_info;
2534 put_block = put_info.block_index; 2564 put_block = put_info.block_index;
2535 ring_bufs = get_info.ring_len+1; 2565 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2536 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2537 get_info.offset;
2538 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2539 get_info.offset;
2540#ifndef CONFIG_S2IO_NAPI 2566#ifndef CONFIG_S2IO_NAPI
2541 spin_lock(&nic->put_lock); 2567 spin_lock(&nic->put_lock);
2542 put_offset = ring_data->put_pos; 2568 put_offset = ring_data->put_pos;
2543 spin_unlock(&nic->put_lock); 2569 spin_unlock(&nic->put_lock);
2544#else 2570#else
2545 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) + 2571 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2546 put_info.offset; 2572 put_info.offset;
2547#endif 2573#endif
2548 while (RXD_IS_UP2DT(rxdp) && 2574 while (RXD_IS_UP2DT(rxdp)) {
2549 (((get_offset + 1) % ring_bufs) != put_offset)) { 2575 /* If your are next to put index then it's FIFO full condition */
2576 if ((get_block == put_block) &&
2577 (get_info.offset + 1) == put_info.offset) {
2578 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2579 break;
2580 }
2550 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2581 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2551 if (skb == NULL) { 2582 if (skb == NULL) {
2552 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2583 DBG_PRINT(ERR_DBG, "%s: The skb is ",
@@ -2555,46 +2586,52 @@ static void rx_intr_handler(ring_info_t *ring_data)
2555 spin_unlock(&nic->rx_lock); 2586 spin_unlock(&nic->rx_lock);
2556 return; 2587 return;
2557 } 2588 }
2558#ifndef CONFIG_2BUFF_MODE 2589 if (nic->rxd_mode == RXD_MODE_1) {
2559 pci_unmap_single(nic->pdev, (dma_addr_t) 2590 pci_unmap_single(nic->pdev, (dma_addr_t)
2560 rxdp->Buffer0_ptr, 2591 ((RxD1_t*)rxdp)->Buffer0_ptr,
2561 dev->mtu + 2592 dev->mtu +
2562 HEADER_ETHERNET_II_802_3_SIZE + 2593 HEADER_ETHERNET_II_802_3_SIZE +
2563 HEADER_802_2_SIZE + 2594 HEADER_802_2_SIZE +
2564 HEADER_SNAP_SIZE, 2595 HEADER_SNAP_SIZE,
2565 PCI_DMA_FROMDEVICE); 2596 PCI_DMA_FROMDEVICE);
2566#else 2597 } else if (nic->rxd_mode == RXD_MODE_3B) {
2567 pci_unmap_single(nic->pdev, (dma_addr_t) 2598 pci_unmap_single(nic->pdev, (dma_addr_t)
2568 rxdp->Buffer0_ptr, 2599 ((RxD3_t*)rxdp)->Buffer0_ptr,
2569 BUF0_LEN, PCI_DMA_FROMDEVICE); 2600 BUF0_LEN, PCI_DMA_FROMDEVICE);
2570 pci_unmap_single(nic->pdev, (dma_addr_t) 2601 pci_unmap_single(nic->pdev, (dma_addr_t)
2571 rxdp->Buffer1_ptr, 2602 ((RxD3_t*)rxdp)->Buffer1_ptr,
2572 BUF1_LEN, PCI_DMA_FROMDEVICE); 2603 BUF1_LEN, PCI_DMA_FROMDEVICE);
2573 pci_unmap_single(nic->pdev, (dma_addr_t) 2604 pci_unmap_single(nic->pdev, (dma_addr_t)
2574 rxdp->Buffer2_ptr, 2605 ((RxD3_t*)rxdp)->Buffer2_ptr,
2575 dev->mtu + BUF0_LEN + 4, 2606 dev->mtu + 4,
2576 PCI_DMA_FROMDEVICE); 2607 PCI_DMA_FROMDEVICE);
2577#endif 2608 } else {
2609 pci_unmap_single(nic->pdev, (dma_addr_t)
2610 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2611 PCI_DMA_FROMDEVICE);
2612 pci_unmap_single(nic->pdev, (dma_addr_t)
2613 ((RxD3_t*)rxdp)->Buffer1_ptr,
2614 l3l4hdr_size + 4,
2615 PCI_DMA_FROMDEVICE);
2616 pci_unmap_single(nic->pdev, (dma_addr_t)
2617 ((RxD3_t*)rxdp)->Buffer2_ptr,
2618 dev->mtu, PCI_DMA_FROMDEVICE);
2619 }
2578 rx_osm_handler(ring_data, rxdp); 2620 rx_osm_handler(ring_data, rxdp);
2579 get_info.offset++; 2621 get_info.offset++;
2580 ring_data->rx_curr_get_info.offset = 2622 ring_data->rx_curr_get_info.offset = get_info.offset;
2581 get_info.offset; 2623 rxdp = ring_data->rx_blocks[get_block].
2582 rxdp = ring_data->rx_blocks[get_block].block_virt_addr + 2624 rxds[get_info.offset].virt_addr;
2583 get_info.offset; 2625 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2584 if (get_info.offset &&
2585 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2586 get_info.offset = 0; 2626 get_info.offset = 0;
2587 ring_data->rx_curr_get_info.offset 2627 ring_data->rx_curr_get_info.offset = get_info.offset;
2588 = get_info.offset;
2589 get_block++; 2628 get_block++;
2590 get_block %= ring_data->block_count; 2629 if (get_block == ring_data->block_count)
2591 ring_data->rx_curr_get_info.block_index 2630 get_block = 0;
2592 = get_block; 2631 ring_data->rx_curr_get_info.block_index = get_block;
2593 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 2632 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2594 } 2633 }
2595 2634
2596 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2597 get_info.offset;
2598#ifdef CONFIG_S2IO_NAPI 2635#ifdef CONFIG_S2IO_NAPI
2599 nic->pkts_to_process -= 1; 2636 nic->pkts_to_process -= 1;
2600 if (!nic->pkts_to_process) 2637 if (!nic->pkts_to_process)
@@ -3044,7 +3081,7 @@ int s2io_set_swapper(nic_t * sp)
3044 3081
3045int wait_for_msix_trans(nic_t *nic, int i) 3082int wait_for_msix_trans(nic_t *nic, int i)
3046{ 3083{
3047 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3084 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3048 u64 val64; 3085 u64 val64;
3049 int ret = 0, cnt = 0; 3086 int ret = 0, cnt = 0;
3050 3087
@@ -3065,7 +3102,7 @@ int wait_for_msix_trans(nic_t *nic, int i)
3065 3102
3066void restore_xmsi_data(nic_t *nic) 3103void restore_xmsi_data(nic_t *nic)
3067{ 3104{
3068 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3105 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3069 u64 val64; 3106 u64 val64;
3070 int i; 3107 int i;
3071 3108
@@ -3083,7 +3120,7 @@ void restore_xmsi_data(nic_t *nic)
3083 3120
3084void store_xmsi_data(nic_t *nic) 3121void store_xmsi_data(nic_t *nic)
3085{ 3122{
3086 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3123 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3087 u64 val64, addr, data; 3124 u64 val64, addr, data;
3088 int i; 3125 int i;
3089 3126
@@ -3106,7 +3143,7 @@ void store_xmsi_data(nic_t *nic)
3106 3143
3107int s2io_enable_msi(nic_t *nic) 3144int s2io_enable_msi(nic_t *nic)
3108{ 3145{
3109 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3146 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3110 u16 msi_ctrl, msg_val; 3147 u16 msi_ctrl, msg_val;
3111 struct config_param *config = &nic->config; 3148 struct config_param *config = &nic->config;
3112 struct net_device *dev = nic->dev; 3149 struct net_device *dev = nic->dev;
@@ -3156,7 +3193,7 @@ int s2io_enable_msi(nic_t *nic)
3156 3193
3157int s2io_enable_msi_x(nic_t *nic) 3194int s2io_enable_msi_x(nic_t *nic)
3158{ 3195{
3159 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3196 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
3160 u64 tx_mat, rx_mat; 3197 u64 tx_mat, rx_mat;
3161 u16 msi_control; /* Temp variable */ 3198 u16 msi_control; /* Temp variable */
3162 int ret, i, j, msix_indx = 1; 3199 int ret, i, j, msix_indx = 1;
@@ -5537,16 +5574,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5537 ((unsigned long) rxdp->Host_Control); 5574 ((unsigned long) rxdp->Host_Control);
5538 int ring_no = ring_data->ring_no; 5575 int ring_no = ring_data->ring_no;
5539 u16 l3_csum, l4_csum; 5576 u16 l3_csum, l4_csum;
5540#ifdef CONFIG_2BUFF_MODE 5577
5541 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5542 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5543 int get_block = ring_data->rx_curr_get_info.block_index;
5544 int get_off = ring_data->rx_curr_get_info.offset;
5545 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5546 unsigned char *buff;
5547#else
5548 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5549#endif
5550 skb->dev = dev; 5578 skb->dev = dev;
5551 if (rxdp->Control_1 & RXD_T_CODE) { 5579 if (rxdp->Control_1 & RXD_T_CODE) {
5552 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; 5580 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
@@ -5563,19 +5591,36 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5563 rxdp->Host_Control = 0; 5591 rxdp->Host_Control = 0;
5564 sp->rx_pkt_count++; 5592 sp->rx_pkt_count++;
5565 sp->stats.rx_packets++; 5593 sp->stats.rx_packets++;
5566#ifndef CONFIG_2BUFF_MODE 5594 if (sp->rxd_mode == RXD_MODE_1) {
5567 sp->stats.rx_bytes += len; 5595 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
5568#else
5569 sp->stats.rx_bytes += buf0_len + buf2_len;
5570#endif
5571 5596
5572#ifndef CONFIG_2BUFF_MODE 5597 sp->stats.rx_bytes += len;
5573 skb_put(skb, len); 5598 skb_put(skb, len);
5574#else 5599
5575 buff = skb_push(skb, buf0_len); 5600 } else if (sp->rxd_mode >= RXD_MODE_3A) {
5576 memcpy(buff, ba->ba_0, buf0_len); 5601 int get_block = ring_data->rx_curr_get_info.block_index;
5577 skb_put(skb, buf2_len); 5602 int get_off = ring_data->rx_curr_get_info.offset;
5578#endif 5603 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
5604 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
5605 unsigned char *buff = skb_push(skb, buf0_len);
5606
5607 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5608 sp->stats.rx_bytes += buf0_len + buf2_len;
5609 memcpy(buff, ba->ba_0, buf0_len);
5610
5611 if (sp->rxd_mode == RXD_MODE_3A) {
5612 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
5613
5614 skb_put(skb, buf1_len);
5615 skb->len += buf2_len;
5616 skb->data_len += buf2_len;
5617 skb->truesize += buf2_len;
5618 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
5619 sp->stats.rx_bytes += buf1_len;
5620
5621 } else
5622 skb_put(skb, buf2_len);
5623 }
5579 5624
5580 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && 5625 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5581 (sp->rx_csum)) { 5626 (sp->rx_csum)) {
@@ -5711,6 +5756,7 @@ MODULE_VERSION(DRV_VERSION);
5711 5756
5712module_param(tx_fifo_num, int, 0); 5757module_param(tx_fifo_num, int, 0);
5713module_param(rx_ring_num, int, 0); 5758module_param(rx_ring_num, int, 0);
5759module_param(rx_ring_mode, int, 0);
5714module_param_array(tx_fifo_len, uint, NULL, 0); 5760module_param_array(tx_fifo_len, uint, NULL, 0);
5715module_param_array(rx_ring_sz, uint, NULL, 0); 5761module_param_array(rx_ring_sz, uint, NULL, 0);
5716module_param_array(rts_frm_len, uint, NULL, 0); 5762module_param_array(rts_frm_len, uint, NULL, 0);
@@ -5722,6 +5768,7 @@ module_param(shared_splits, int, 0);
5722module_param(tmac_util_period, int, 0); 5768module_param(tmac_util_period, int, 0);
5723module_param(rmac_util_period, int, 0); 5769module_param(rmac_util_period, int, 0);
5724module_param(bimodal, bool, 0); 5770module_param(bimodal, bool, 0);
5771module_param(l3l4hdr_size, int , 0);
5725#ifndef CONFIG_S2IO_NAPI 5772#ifndef CONFIG_S2IO_NAPI
5726module_param(indicate_max_pkts, int, 0); 5773module_param(indicate_max_pkts, int, 0);
5727#endif 5774#endif
@@ -5843,6 +5890,13 @@ Defaulting to INTA\n");
5843 sp->pdev = pdev; 5890 sp->pdev = pdev;
5844 sp->high_dma_flag = dma_flag; 5891 sp->high_dma_flag = dma_flag;
5845 sp->device_enabled_once = FALSE; 5892 sp->device_enabled_once = FALSE;
5893 if (rx_ring_mode == 1)
5894 sp->rxd_mode = RXD_MODE_1;
5895 if (rx_ring_mode == 2)
5896 sp->rxd_mode = RXD_MODE_3B;
5897 if (rx_ring_mode == 3)
5898 sp->rxd_mode = RXD_MODE_3A;
5899
5846 sp->intr_type = dev_intr_type; 5900 sp->intr_type = dev_intr_type;
5847 5901
5848 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || 5902 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
@@ -5895,7 +5949,7 @@ Defaulting to INTA\n");
5895 config->rx_ring_num = rx_ring_num; 5949 config->rx_ring_num = rx_ring_num;
5896 for (i = 0; i < MAX_RX_RINGS; i++) { 5950 for (i = 0; i < MAX_RX_RINGS; i++) {
5897 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 5951 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5898 (MAX_RXDS_PER_BLOCK + 1); 5952 (rxd_count[sp->rxd_mode] + 1);
5899 config->rx_cfg[i].ring_priority = i; 5953 config->rx_cfg[i].ring_priority = i;
5900 } 5954 }
5901 5955
@@ -6090,9 +6144,6 @@ Defaulting to INTA\n");
6090 DBG_PRINT(ERR_DBG, "(rev %d), Version %s", 6144 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6091 get_xena_rev_id(sp->pdev), 6145 get_xena_rev_id(sp->pdev),
6092 s2io_driver_version); 6146 s2io_driver_version);
6093#ifdef CONFIG_2BUFF_MODE
6094 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6095#endif
6096 switch(sp->intr_type) { 6147 switch(sp->intr_type) {
6097 case INTA: 6148 case INTA:
6098 DBG_PRINT(ERR_DBG, ", Intr type INTA"); 6149 DBG_PRINT(ERR_DBG, ", Intr type INTA");
@@ -6125,9 +6176,6 @@ Defaulting to INTA\n");
6125 DBG_PRINT(ERR_DBG, "(rev %d), Version %s", 6176 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6126 get_xena_rev_id(sp->pdev), 6177 get_xena_rev_id(sp->pdev),
6127 s2io_driver_version); 6178 s2io_driver_version);
6128#ifdef CONFIG_2BUFF_MODE
6129 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6130#endif
6131 switch(sp->intr_type) { 6179 switch(sp->intr_type) {
6132 case INTA: 6180 case INTA:
6133 DBG_PRINT(ERR_DBG, ", Intr type INTA"); 6181 DBG_PRINT(ERR_DBG, ", Intr type INTA");
@@ -6148,6 +6196,12 @@ Defaulting to INTA\n");
6148 sp->def_mac_addr[0].mac_addr[4], 6196 sp->def_mac_addr[0].mac_addr[4],
6149 sp->def_mac_addr[0].mac_addr[5]); 6197 sp->def_mac_addr[0].mac_addr[5]);
6150 } 6198 }
6199 if (sp->rxd_mode == RXD_MODE_3B)
6200 DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been "
6201 "enabled\n",dev->name);
6202 if (sp->rxd_mode == RXD_MODE_3A)
6203 DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been "
6204 "enabled\n",dev->name);
6151 6205
6152 /* Initialize device name */ 6206 /* Initialize device name */
6153 strcpy(sp->name, dev->name); 6207 strcpy(sp->name, dev->name);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 1cc24b56760e..419aad7f10e7 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -418,7 +418,7 @@ typedef struct list_info_hold {
418 void *list_virt_addr; 418 void *list_virt_addr;
419} list_info_hold_t; 419} list_info_hold_t;
420 420
421/* Rx descriptor structure */ 421/* Rx descriptor structure for 1 buffer mode */
422typedef struct _RxD_t { 422typedef struct _RxD_t {
423 u64 Host_Control; /* reserved for host */ 423 u64 Host_Control; /* reserved for host */
424 u64 Control_1; 424 u64 Control_1;
@@ -439,49 +439,54 @@ typedef struct _RxD_t {
439#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2) 439#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
440#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62) 440#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
441 441
442#ifndef CONFIG_2BUFF_MODE
443#define MASK_BUFFER0_SIZE vBIT(0x3FFF,2,14)
444#define SET_BUFFER0_SIZE(val) vBIT(val,2,14)
445#else
446#define MASK_BUFFER0_SIZE vBIT(0xFF,2,14)
447#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
448#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
449#define SET_BUFFER0_SIZE(val) vBIT(val,8,8)
450#define SET_BUFFER1_SIZE(val) vBIT(val,16,16)
451#define SET_BUFFER2_SIZE(val) vBIT(val,32,16)
452#endif
453
454#define MASK_VLAN_TAG vBIT(0xFFFF,48,16) 442#define MASK_VLAN_TAG vBIT(0xFFFF,48,16)
455#define SET_VLAN_TAG(val) vBIT(val,48,16) 443#define SET_VLAN_TAG(val) vBIT(val,48,16)
456#define SET_NUM_TAG(val) vBIT(val,16,32) 444#define SET_NUM_TAG(val) vBIT(val,16,32)
457 445
458#ifndef CONFIG_2BUFF_MODE 446
459#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0x3FFF,2,14))) 447} RxD_t;
460#else 448/* Rx descriptor structure for 1 buffer mode */
461#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \ 449typedef struct _RxD1_t {
462 >> 48) 450 struct _RxD_t h;
463#define RXD_GET_BUFFER1_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER1_SIZE) \ 451
464 >> 32) 452#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14)
465#define RXD_GET_BUFFER2_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER2_SIZE) \ 453#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14)
466 >> 16) 454#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
455 (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
456 u64 Buffer0_ptr;
457} RxD1_t;
458/* Rx descriptor structure for 3 or 2 buffer mode */
459
460typedef struct _RxD3_t {
461 struct _RxD_t h;
462
463#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14)
464#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16)
465#define MASK_BUFFER2_SIZE_3 vBIT(0xFFFF,32,16)
466#define SET_BUFFER0_SIZE_3(val) vBIT(val,8,8)
467#define SET_BUFFER1_SIZE_3(val) vBIT(val,16,16)
468#define SET_BUFFER2_SIZE_3(val) vBIT(val,32,16)
469#define RXD_GET_BUFFER0_SIZE_3(Control_2) \
470 (u8)((Control_2 & MASK_BUFFER0_SIZE_3) >> 48)
471#define RXD_GET_BUFFER1_SIZE_3(Control_2) \
472 (u16)((Control_2 & MASK_BUFFER1_SIZE_3) >> 32)
473#define RXD_GET_BUFFER2_SIZE_3(Control_2) \
474 (u16)((Control_2 & MASK_BUFFER2_SIZE_3) >> 16)
467#define BUF0_LEN 40 475#define BUF0_LEN 40
468#define BUF1_LEN 1 476#define BUF1_LEN 1
469#endif
470 477
471 u64 Buffer0_ptr; 478 u64 Buffer0_ptr;
472#ifdef CONFIG_2BUFF_MODE
473 u64 Buffer1_ptr; 479 u64 Buffer1_ptr;
474 u64 Buffer2_ptr; 480 u64 Buffer2_ptr;
475#endif 481} RxD3_t;
476} RxD_t; 482
477 483
478/* Structure that represents the Rx descriptor block which contains 484/* Structure that represents the Rx descriptor block which contains
479 * 128 Rx descriptors. 485 * 128 Rx descriptors.
480 */ 486 */
481#ifndef CONFIG_2BUFF_MODE
482typedef struct _RxD_block { 487typedef struct _RxD_block {
483#define MAX_RXDS_PER_BLOCK 127 488#define MAX_RXDS_PER_BLOCK_1 127
484 RxD_t rxd[MAX_RXDS_PER_BLOCK]; 489 RxD1_t rxd[MAX_RXDS_PER_BLOCK_1];
485 490
486 u64 reserved_0; 491 u64 reserved_0;
487#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 492#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
@@ -492,18 +497,13 @@ typedef struct _RxD_block {
492 * the upper 32 bits should 497 * the upper 32 bits should
493 * be 0 */ 498 * be 0 */
494} RxD_block_t; 499} RxD_block_t;
495#else
496typedef struct _RxD_block {
497#define MAX_RXDS_PER_BLOCK 85
498 RxD_t rxd[MAX_RXDS_PER_BLOCK];
499 500
500#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
501 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd
502 * in this blk */
503 u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */
504} RxD_block_t;
505#define SIZE_OF_BLOCK 4096 501#define SIZE_OF_BLOCK 4096
506 502
503#define RXD_MODE_1 0
504#define RXD_MODE_3A 1
505#define RXD_MODE_3B 2
506
507/* Structure to hold virtual addresses of Buf0 and Buf1 in 507/* Structure to hold virtual addresses of Buf0 and Buf1 in
508 * 2buf mode. */ 508 * 2buf mode. */
509typedef struct bufAdd { 509typedef struct bufAdd {
@@ -512,7 +512,6 @@ typedef struct bufAdd {
512 void *ba_0; 512 void *ba_0;
513 void *ba_1; 513 void *ba_1;
514} buffAdd_t; 514} buffAdd_t;
515#endif
516 515
517/* Structure which stores all the MAC control parameters */ 516/* Structure which stores all the MAC control parameters */
518 517
@@ -539,10 +538,17 @@ typedef struct {
539 538
540typedef tx_curr_get_info_t tx_curr_put_info_t; 539typedef tx_curr_get_info_t tx_curr_put_info_t;
541 540
541
542typedef struct rxd_info {
543 void *virt_addr;
544 dma_addr_t dma_addr;
545}rxd_info_t;
546
542/* Structure that holds the Phy and virt addresses of the Blocks */ 547/* Structure that holds the Phy and virt addresses of the Blocks */
543typedef struct rx_block_info { 548typedef struct rx_block_info {
544 RxD_t *block_virt_addr; 549 void *block_virt_addr;
545 dma_addr_t block_dma_addr; 550 dma_addr_t block_dma_addr;
551 rxd_info_t *rxds;
546} rx_block_info_t; 552} rx_block_info_t;
547 553
548/* pre declaration of the nic structure */ 554/* pre declaration of the nic structure */
@@ -578,10 +584,8 @@ typedef struct ring_info {
578 int put_pos; 584 int put_pos;
579#endif 585#endif
580 586
581#ifdef CONFIG_2BUFF_MODE
582 /* Buffer Address store. */ 587 /* Buffer Address store. */
583 buffAdd_t **ba; 588 buffAdd_t **ba;
584#endif
585 nic_t *nic; 589 nic_t *nic;
586} ring_info_t; 590} ring_info_t;
587 591
@@ -647,8 +651,6 @@ typedef struct {
647 651
648/* Default Tunable parameters of the NIC. */ 652/* Default Tunable parameters of the NIC. */
649#define DEFAULT_FIFO_LEN 4096 653#define DEFAULT_FIFO_LEN 4096
650#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1)
651#define LARGE_RXD_CNT 100 * (MAX_RXDS_PER_BLOCK+1)
652#define SMALL_BLK_CNT 30 654#define SMALL_BLK_CNT 30
653#define LARGE_BLK_CNT 100 655#define LARGE_BLK_CNT 100
654 656
@@ -678,6 +680,7 @@ struct msix_info_st {
678 680
679/* Structure representing one instance of the NIC */ 681/* Structure representing one instance of the NIC */
680struct s2io_nic { 682struct s2io_nic {
683 int rxd_mode;
681#ifdef CONFIG_S2IO_NAPI 684#ifdef CONFIG_S2IO_NAPI
682 /* 685 /*
683 * Count of packets to be processed in a given iteration, it will be indicated 686 * Count of packets to be processed in a given iteration, it will be indicated