aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/s2io.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/s2io.c')
-rw-r--r--drivers/net/s2io.c542
1 files changed, 159 insertions, 383 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index afef6c0c59fe..2be0a0f1b48f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -32,12 +32,12 @@
32 * rx_ring_sz: This defines the number of receive blocks each ring can have. 32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8. 33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3. 35 * values are 1, 2.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO. 38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA), 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)' 40 * 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not. 41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0' 42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be 43 * lro_max_pkts: This parameter defines maximum number of packets can be
@@ -84,14 +84,14 @@
84#include "s2io.h" 84#include "s2io.h"
85#include "s2io-regs.h" 85#include "s2io-regs.h"
86 86
87#define DRV_VERSION "2.0.23.1" 87#define DRV_VERSION "2.0.25.1"
88 88
89/* S2io Driver name & version. */ 89/* S2io Driver name & version. */
90static char s2io_driver_name[] = "Neterion"; 90static char s2io_driver_name[] = "Neterion";
91static char s2io_driver_version[] = DRV_VERSION; 91static char s2io_driver_version[] = DRV_VERSION;
92 92
93static int rxd_size[4] = {32,48,48,64}; 93static int rxd_size[2] = {32,48};
94static int rxd_count[4] = {127,85,85,63}; 94static int rxd_count[2] = {127,85};
95 95
96static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) 96static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97{ 97{
@@ -282,6 +282,7 @@ static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
282 ("lro_flush_due_to_max_pkts"), 282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"), 283 ("lro_avg_aggr_pkts"),
284 ("mem_alloc_fail_cnt"), 284 ("mem_alloc_fail_cnt"),
285 ("pci_map_fail_cnt"),
285 ("watchdog_timer_cnt"), 286 ("watchdog_timer_cnt"),
286 ("mem_allocated"), 287 ("mem_allocated"),
287 ("mem_freed"), 288 ("mem_freed"),
@@ -426,7 +427,7 @@ S2IO_PARM_INT(bimodal, 0);
426S2IO_PARM_INT(l3l4hdr_size, 128); 427S2IO_PARM_INT(l3l4hdr_size, 128);
427/* Frequency of Rx desc syncs expressed as power of 2 */ 428/* Frequency of Rx desc syncs expressed as power of 2 */
428S2IO_PARM_INT(rxsync_frequency, 3); 429S2IO_PARM_INT(rxsync_frequency, 3);
429/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 430/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
430S2IO_PARM_INT(intr_type, 0); 431S2IO_PARM_INT(intr_type, 0);
431/* Large receive offload feature */ 432/* Large receive offload feature */
432S2IO_PARM_INT(lro, 0); 433S2IO_PARM_INT(lro, 0);
@@ -701,7 +702,7 @@ static int init_shared_mem(struct s2io_nic *nic)
701 (u64) tmp_p_addr_next; 702 (u64) tmp_p_addr_next;
702 } 703 }
703 } 704 }
704 if (nic->rxd_mode >= RXD_MODE_3A) { 705 if (nic->rxd_mode == RXD_MODE_3B) {
705 /* 706 /*
706 * Allocation of Storages for buffer addresses in 2BUFF mode 707 * Allocation of Storages for buffer addresses in 2BUFF mode
707 * and the buffers as well. 708 * and the buffers as well.
@@ -870,7 +871,7 @@ static void free_shared_mem(struct s2io_nic *nic)
870 } 871 }
871 } 872 }
872 873
873 if (nic->rxd_mode >= RXD_MODE_3A) { 874 if (nic->rxd_mode == RXD_MODE_3B) {
874 /* Freeing buffer storage addresses in 2BUFF mode. */ 875 /* Freeing buffer storage addresses in 2BUFF mode. */
875 for (i = 0; i < config->rx_ring_num; i++) { 876 for (i = 0; i < config->rx_ring_num; i++) {
876 blk_cnt = config->rx_cfg[i].num_rxd / 877 blk_cnt = config->rx_cfg[i].num_rxd /
@@ -2233,44 +2234,6 @@ static void stop_nic(struct s2io_nic *nic)
2233 writeq(val64, &bar0->adapter_control); 2234 writeq(val64, &bar0->adapter_control);
2234} 2235}
2235 2236
2236static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2237 sk_buff *skb)
2238{
2239 struct net_device *dev = nic->dev;
2240 struct sk_buff *frag_list;
2241 void *tmp;
2242
2243 /* Buffer-1 receives L3/L4 headers */
2244 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2245 (nic->pdev, skb->data, l3l4hdr_size + 4,
2246 PCI_DMA_FROMDEVICE);
2247
2248 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2249 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2250 if (skb_shinfo(skb)->frag_list == NULL) {
2251 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
2252 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2253 return -ENOMEM ;
2254 }
2255 frag_list = skb_shinfo(skb)->frag_list;
2256 skb->truesize += frag_list->truesize;
2257 nic->mac_control.stats_info->sw_stat.mem_allocated
2258 += frag_list->truesize;
2259 frag_list->next = NULL;
2260 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2261 frag_list->data = tmp;
2262 skb_reset_tail_pointer(frag_list);
2263
2264 /* Buffer-2 receives L4 data payload */
2265 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2266 frag_list->data, dev->mtu,
2267 PCI_DMA_FROMDEVICE);
2268 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2269 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2270
2271 return SUCCESS;
2272}
2273
2274/** 2237/**
2275 * fill_rx_buffers - Allocates the Rx side skbs 2238 * fill_rx_buffers - Allocates the Rx side skbs
2276 * @nic: device private variable 2239 * @nic: device private variable
@@ -2307,6 +2270,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2307 unsigned long flags; 2270 unsigned long flags;
2308 struct RxD_t *first_rxdp = NULL; 2271 struct RxD_t *first_rxdp = NULL;
2309 u64 Buffer0_ptr = 0, Buffer1_ptr = 0; 2272 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2273 struct RxD1 *rxdp1;
2274 struct RxD3 *rxdp3;
2275 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2310 2276
2311 mac_control = &nic->mac_control; 2277 mac_control = &nic->mac_control;
2312 config = &nic->config; 2278 config = &nic->config;
@@ -2359,7 +2325,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2359 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; 2325 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2360 } 2326 }
2361 if ((rxdp->Control_1 & RXD_OWN_XENA) && 2327 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2362 ((nic->rxd_mode >= RXD_MODE_3A) && 2328 ((nic->rxd_mode == RXD_MODE_3B) &&
2363 (rxdp->Control_2 & BIT(0)))) { 2329 (rxdp->Control_2 & BIT(0)))) {
2364 mac_control->rings[ring_no].rx_curr_put_info. 2330 mac_control->rings[ring_no].rx_curr_put_info.
2365 offset = off; 2331 offset = off;
@@ -2370,10 +2336,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2370 HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2336 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2371 if (nic->rxd_mode == RXD_MODE_1) 2337 if (nic->rxd_mode == RXD_MODE_1)
2372 size += NET_IP_ALIGN; 2338 size += NET_IP_ALIGN;
2373 else if (nic->rxd_mode == RXD_MODE_3B)
2374 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2375 else 2339 else
2376 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4; 2340 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2377 2341
2378 /* allocate skb */ 2342 /* allocate skb */
2379 skb = dev_alloc_skb(size); 2343 skb = dev_alloc_skb(size);
@@ -2392,33 +2356,35 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2392 += skb->truesize; 2356 += skb->truesize;
2393 if (nic->rxd_mode == RXD_MODE_1) { 2357 if (nic->rxd_mode == RXD_MODE_1) {
2394 /* 1 buffer mode - normal operation mode */ 2358 /* 1 buffer mode - normal operation mode */
2359 rxdp1 = (struct RxD1*)rxdp;
2395 memset(rxdp, 0, sizeof(struct RxD1)); 2360 memset(rxdp, 0, sizeof(struct RxD1));
2396 skb_reserve(skb, NET_IP_ALIGN); 2361 skb_reserve(skb, NET_IP_ALIGN);
2397 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single 2362 rxdp1->Buffer0_ptr = pci_map_single
2398 (nic->pdev, skb->data, size - NET_IP_ALIGN, 2363 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2399 PCI_DMA_FROMDEVICE); 2364 PCI_DMA_FROMDEVICE);
2365 if( (rxdp1->Buffer0_ptr == 0) ||
2366 (rxdp1->Buffer0_ptr ==
2367 DMA_ERROR_CODE))
2368 goto pci_map_failed;
2369
2400 rxdp->Control_2 = 2370 rxdp->Control_2 =
2401 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 2371 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2402 2372
2403 } else if (nic->rxd_mode >= RXD_MODE_3A) { 2373 } else if (nic->rxd_mode == RXD_MODE_3B) {
2404 /* 2374 /*
2405 * 2 or 3 buffer mode - 2375 * 2 buffer mode -
2406 * Both 2 buffer mode and 3 buffer mode provides 128 2376 * 2 buffer mode provides 128
2407 * byte aligned receive buffers. 2377 * byte aligned receive buffers.
2408 *
2409 * 3 buffer mode provides header separation where in
2410 * skb->data will have L3/L4 headers where as
2411 * skb_shinfo(skb)->frag_list will have the L4 data
2412 * payload
2413 */ 2378 */
2414 2379
2380 rxdp3 = (struct RxD3*)rxdp;
2415 /* save buffer pointers to avoid frequent dma mapping */ 2381 /* save buffer pointers to avoid frequent dma mapping */
2416 Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr; 2382 Buffer0_ptr = rxdp3->Buffer0_ptr;
2417 Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr; 2383 Buffer1_ptr = rxdp3->Buffer1_ptr;
2418 memset(rxdp, 0, sizeof(struct RxD3)); 2384 memset(rxdp, 0, sizeof(struct RxD3));
2419 /* restore the buffer pointers for dma sync*/ 2385 /* restore the buffer pointers for dma sync*/
2420 ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr; 2386 rxdp3->Buffer0_ptr = Buffer0_ptr;
2421 ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr; 2387 rxdp3->Buffer1_ptr = Buffer1_ptr;
2422 2388
2423 ba = &mac_control->rings[ring_no].ba[block_no][off]; 2389 ba = &mac_control->rings[ring_no].ba[block_no][off];
2424 skb_reserve(skb, BUF0_LEN); 2390 skb_reserve(skb, BUF0_LEN);
@@ -2428,14 +2394,18 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2428 skb->data = (void *) (unsigned long)tmp; 2394 skb->data = (void *) (unsigned long)tmp;
2429 skb_reset_tail_pointer(skb); 2395 skb_reset_tail_pointer(skb);
2430 2396
2431 if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) 2397 if (!(rxdp3->Buffer0_ptr))
2432 ((struct RxD3*)rxdp)->Buffer0_ptr = 2398 rxdp3->Buffer0_ptr =
2433 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2399 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2434 PCI_DMA_FROMDEVICE); 2400 PCI_DMA_FROMDEVICE);
2435 else 2401 else
2436 pci_dma_sync_single_for_device(nic->pdev, 2402 pci_dma_sync_single_for_device(nic->pdev,
2437 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr, 2403 (dma_addr_t) rxdp3->Buffer0_ptr,
2438 BUF0_LEN, PCI_DMA_FROMDEVICE); 2404 BUF0_LEN, PCI_DMA_FROMDEVICE);
2405 if( (rxdp3->Buffer0_ptr == 0) ||
2406 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2407 goto pci_map_failed;
2408
2439 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2409 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2440 if (nic->rxd_mode == RXD_MODE_3B) { 2410 if (nic->rxd_mode == RXD_MODE_3B) {
2441 /* Two buffer mode */ 2411 /* Two buffer mode */
@@ -2444,33 +2414,30 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2444 * Buffer2 will have L3/L4 header plus 2414 * Buffer2 will have L3/L4 header plus
2445 * L4 payload 2415 * L4 payload
2446 */ 2416 */
2447 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single 2417 rxdp3->Buffer2_ptr = pci_map_single
2448 (nic->pdev, skb->data, dev->mtu + 4, 2418 (nic->pdev, skb->data, dev->mtu + 4,
2449 PCI_DMA_FROMDEVICE); 2419 PCI_DMA_FROMDEVICE);
2450 2420
2451 /* Buffer-1 will be dummy buffer. Not used */ 2421 if( (rxdp3->Buffer2_ptr == 0) ||
2452 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) { 2422 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2453 ((struct RxD3*)rxdp)->Buffer1_ptr = 2423 goto pci_map_failed;
2424
2425 rxdp3->Buffer1_ptr =
2454 pci_map_single(nic->pdev, 2426 pci_map_single(nic->pdev,
2455 ba->ba_1, BUF1_LEN, 2427 ba->ba_1, BUF1_LEN,
2456 PCI_DMA_FROMDEVICE); 2428 PCI_DMA_FROMDEVICE);
2429 if( (rxdp3->Buffer1_ptr == 0) ||
2430 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2431 pci_unmap_single
2432 (nic->pdev,
2433 (dma_addr_t)skb->data,
2434 dev->mtu + 4,
2435 PCI_DMA_FROMDEVICE);
2436 goto pci_map_failed;
2457 } 2437 }
2458 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2438 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2459 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2439 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2460 (dev->mtu + 4); 2440 (dev->mtu + 4);
2461 } else {
2462 /* 3 buffer mode */
2463 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2464 nic->mac_control.stats_info->sw_stat.\
2465 mem_freed += skb->truesize;
2466 dev_kfree_skb_irq(skb);
2467 if (first_rxdp) {
2468 wmb();
2469 first_rxdp->Control_1 |=
2470 RXD_OWN_XENA;
2471 }
2472 return -ENOMEM ;
2473 }
2474 } 2441 }
2475 rxdp->Control_2 |= BIT(0); 2442 rxdp->Control_2 |= BIT(0);
2476 } 2443 }
@@ -2505,6 +2472,11 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2505 } 2472 }
2506 2473
2507 return SUCCESS; 2474 return SUCCESS;
2475pci_map_failed:
2476 stats->pci_map_fail_cnt++;
2477 stats->mem_freed += skb->truesize;
2478 dev_kfree_skb_irq(skb);
2479 return -ENOMEM;
2508} 2480}
2509 2481
2510static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) 2482static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
@@ -2515,6 +2487,8 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2515 struct RxD_t *rxdp; 2487 struct RxD_t *rxdp;
2516 struct mac_info *mac_control; 2488 struct mac_info *mac_control;
2517 struct buffAdd *ba; 2489 struct buffAdd *ba;
2490 struct RxD1 *rxdp1;
2491 struct RxD3 *rxdp3;
2518 2492
2519 mac_control = &sp->mac_control; 2493 mac_control = &sp->mac_control;
2520 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { 2494 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
@@ -2526,40 +2500,30 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2526 continue; 2500 continue;
2527 } 2501 }
2528 if (sp->rxd_mode == RXD_MODE_1) { 2502 if (sp->rxd_mode == RXD_MODE_1) {
2503 rxdp1 = (struct RxD1*)rxdp;
2529 pci_unmap_single(sp->pdev, (dma_addr_t) 2504 pci_unmap_single(sp->pdev, (dma_addr_t)
2530 ((struct RxD1*)rxdp)->Buffer0_ptr, 2505 rxdp1->Buffer0_ptr,
2531 dev->mtu + 2506 dev->mtu +
2532 HEADER_ETHERNET_II_802_3_SIZE 2507 HEADER_ETHERNET_II_802_3_SIZE
2533 + HEADER_802_2_SIZE + 2508 + HEADER_802_2_SIZE +
2534 HEADER_SNAP_SIZE, 2509 HEADER_SNAP_SIZE,
2535 PCI_DMA_FROMDEVICE); 2510 PCI_DMA_FROMDEVICE);
2536 memset(rxdp, 0, sizeof(struct RxD1)); 2511 memset(rxdp, 0, sizeof(struct RxD1));
2537 } else if(sp->rxd_mode == RXD_MODE_3B) { 2512 } else if(sp->rxd_mode == RXD_MODE_3B) {
2513 rxdp3 = (struct RxD3*)rxdp;
2538 ba = &mac_control->rings[ring_no]. 2514 ba = &mac_control->rings[ring_no].
2539 ba[blk][j]; 2515 ba[blk][j];
2540 pci_unmap_single(sp->pdev, (dma_addr_t) 2516 pci_unmap_single(sp->pdev, (dma_addr_t)
2541 ((struct RxD3*)rxdp)->Buffer0_ptr, 2517 rxdp3->Buffer0_ptr,
2542 BUF0_LEN, 2518 BUF0_LEN,
2543 PCI_DMA_FROMDEVICE);
2544 pci_unmap_single(sp->pdev, (dma_addr_t)
2545 ((struct RxD3*)rxdp)->Buffer1_ptr,
2546 BUF1_LEN,
2547 PCI_DMA_FROMDEVICE);
2548 pci_unmap_single(sp->pdev, (dma_addr_t)
2549 ((struct RxD3*)rxdp)->Buffer2_ptr,
2550 dev->mtu + 4,
2551 PCI_DMA_FROMDEVICE);
2552 memset(rxdp, 0, sizeof(struct RxD3));
2553 } else {
2554 pci_unmap_single(sp->pdev, (dma_addr_t)
2555 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2556 PCI_DMA_FROMDEVICE); 2519 PCI_DMA_FROMDEVICE);
2557 pci_unmap_single(sp->pdev, (dma_addr_t) 2520 pci_unmap_single(sp->pdev, (dma_addr_t)
2558 ((struct RxD3*)rxdp)->Buffer1_ptr, 2521 rxdp3->Buffer1_ptr,
2559 l3l4hdr_size + 4, 2522 BUF1_LEN,
2560 PCI_DMA_FROMDEVICE); 2523 PCI_DMA_FROMDEVICE);
2561 pci_unmap_single(sp->pdev, (dma_addr_t) 2524 pci_unmap_single(sp->pdev, (dma_addr_t)
2562 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu, 2525 rxdp3->Buffer2_ptr,
2526 dev->mtu + 4,
2563 PCI_DMA_FROMDEVICE); 2527 PCI_DMA_FROMDEVICE);
2564 memset(rxdp, 0, sizeof(struct RxD3)); 2528 memset(rxdp, 0, sizeof(struct RxD3));
2565 } 2529 }
@@ -2756,6 +2720,8 @@ static void rx_intr_handler(struct ring_info *ring_data)
2756 struct sk_buff *skb; 2720 struct sk_buff *skb;
2757 int pkt_cnt = 0; 2721 int pkt_cnt = 0;
2758 int i; 2722 int i;
2723 struct RxD1* rxdp1;
2724 struct RxD3* rxdp3;
2759 2725
2760 spin_lock(&nic->rx_lock); 2726 spin_lock(&nic->rx_lock);
2761 if (atomic_read(&nic->card_state) == CARD_DOWN) { 2727 if (atomic_read(&nic->card_state) == CARD_DOWN) {
@@ -2796,32 +2762,23 @@ static void rx_intr_handler(struct ring_info *ring_data)
2796 return; 2762 return;
2797 } 2763 }
2798 if (nic->rxd_mode == RXD_MODE_1) { 2764 if (nic->rxd_mode == RXD_MODE_1) {
2765 rxdp1 = (struct RxD1*)rxdp;
2799 pci_unmap_single(nic->pdev, (dma_addr_t) 2766 pci_unmap_single(nic->pdev, (dma_addr_t)
2800 ((struct RxD1*)rxdp)->Buffer0_ptr, 2767 rxdp1->Buffer0_ptr,
2801 dev->mtu + 2768 dev->mtu +
2802 HEADER_ETHERNET_II_802_3_SIZE + 2769 HEADER_ETHERNET_II_802_3_SIZE +
2803 HEADER_802_2_SIZE + 2770 HEADER_802_2_SIZE +
2804 HEADER_SNAP_SIZE, 2771 HEADER_SNAP_SIZE,
2805 PCI_DMA_FROMDEVICE); 2772 PCI_DMA_FROMDEVICE);
2806 } else if (nic->rxd_mode == RXD_MODE_3B) { 2773 } else if (nic->rxd_mode == RXD_MODE_3B) {
2774 rxdp3 = (struct RxD3*)rxdp;
2807 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2775 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2808 ((struct RxD3*)rxdp)->Buffer0_ptr, 2776 rxdp3->Buffer0_ptr,
2809 BUF0_LEN, PCI_DMA_FROMDEVICE); 2777 BUF0_LEN, PCI_DMA_FROMDEVICE);
2810 pci_unmap_single(nic->pdev, (dma_addr_t)
2811 ((struct RxD3*)rxdp)->Buffer2_ptr,
2812 dev->mtu + 4,
2813 PCI_DMA_FROMDEVICE);
2814 } else {
2815 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2816 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2817 PCI_DMA_FROMDEVICE);
2818 pci_unmap_single(nic->pdev, (dma_addr_t)
2819 ((struct RxD3*)rxdp)->Buffer1_ptr,
2820 l3l4hdr_size + 4,
2821 PCI_DMA_FROMDEVICE);
2822 pci_unmap_single(nic->pdev, (dma_addr_t) 2778 pci_unmap_single(nic->pdev, (dma_addr_t)
2823 ((struct RxD3*)rxdp)->Buffer2_ptr, 2779 rxdp3->Buffer2_ptr,
2824 dev->mtu, PCI_DMA_FROMDEVICE); 2780 dev->mtu + 4,
2781 PCI_DMA_FROMDEVICE);
2825 } 2782 }
2826 prefetch(skb->data); 2783 prefetch(skb->data);
2827 rx_osm_handler(ring_data, rxdp); 2784 rx_osm_handler(ring_data, rxdp);
@@ -3425,23 +3382,8 @@ static void s2io_reset(struct s2io_nic * sp)
3425 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ 3382 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3426 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); 3383 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3427 3384
3428 if (sp->device_type == XFRAME_II_DEVICE) {
3429 int ret;
3430 ret = pci_set_power_state(sp->pdev, 3);
3431 if (!ret)
3432 ret = pci_set_power_state(sp->pdev, 0);
3433 else {
3434 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3435 __FUNCTION__);
3436 goto old_way;
3437 }
3438 msleep(20);
3439 goto new_way;
3440 }
3441old_way:
3442 val64 = SW_RESET_ALL; 3385 val64 = SW_RESET_ALL;
3443 writeq(val64, &bar0->sw_reset); 3386 writeq(val64, &bar0->sw_reset);
3444new_way:
3445 if (strstr(sp->product_name, "CX4")) { 3387 if (strstr(sp->product_name, "CX4")) {
3446 msleep(750); 3388 msleep(750);
3447 } 3389 }
@@ -3731,56 +3673,6 @@ static void store_xmsi_data(struct s2io_nic *nic)
3731 } 3673 }
3732} 3674}
3733 3675
3734int s2io_enable_msi(struct s2io_nic *nic)
3735{
3736 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3737 u16 msi_ctrl, msg_val;
3738 struct config_param *config = &nic->config;
3739 struct net_device *dev = nic->dev;
3740 u64 val64, tx_mat, rx_mat;
3741 int i, err;
3742
3743 val64 = readq(&bar0->pic_control);
3744 val64 &= ~BIT(1);
3745 writeq(val64, &bar0->pic_control);
3746
3747 err = pci_enable_msi(nic->pdev);
3748 if (err) {
3749 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3750 nic->dev->name);
3751 return err;
3752 }
3753
3754 /*
3755 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3756 * for interrupt handling.
3757 */
3758 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3759 msg_val ^= 0x1;
3760 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3761 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3762
3763 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3764 msi_ctrl |= 0x10;
3765 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3766
3767 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3768 tx_mat = readq(&bar0->tx_mat0_n[0]);
3769 for (i=0; i<config->tx_fifo_num; i++) {
3770 tx_mat |= TX_MAT_SET(i, 1);
3771 }
3772 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3773
3774 rx_mat = readq(&bar0->rx_mat);
3775 for (i=0; i<config->rx_ring_num; i++) {
3776 rx_mat |= RX_MAT_SET(i, 1);
3777 }
3778 writeq(rx_mat, &bar0->rx_mat);
3779
3780 dev->irq = nic->pdev->irq;
3781 return 0;
3782}
3783
3784static int s2io_enable_msi_x(struct s2io_nic *nic) 3676static int s2io_enable_msi_x(struct s2io_nic *nic)
3785{ 3677{
3786 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3678 struct XENA_dev_config __iomem *bar0 = nic->bar0;
@@ -4001,6 +3893,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4001 struct mac_info *mac_control; 3893 struct mac_info *mac_control;
4002 struct config_param *config; 3894 struct config_param *config;
4003 int offload_type; 3895 int offload_type;
3896 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4004 3897
4005 mac_control = &sp->mac_control; 3898 mac_control = &sp->mac_control;
4006 config = &sp->config; 3899 config = &sp->config;
@@ -4085,11 +3978,18 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4085 txdp->Buffer_Pointer = pci_map_single(sp->pdev, 3978 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4086 sp->ufo_in_band_v, 3979 sp->ufo_in_band_v,
4087 sizeof(u64), PCI_DMA_TODEVICE); 3980 sizeof(u64), PCI_DMA_TODEVICE);
3981 if((txdp->Buffer_Pointer == 0) ||
3982 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
3983 goto pci_map_failed;
4088 txdp++; 3984 txdp++;
4089 } 3985 }
4090 3986
4091 txdp->Buffer_Pointer = pci_map_single 3987 txdp->Buffer_Pointer = pci_map_single
4092 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3988 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3989 if((txdp->Buffer_Pointer == 0) ||
3990 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
3991 goto pci_map_failed;
3992
4093 txdp->Host_Control = (unsigned long) skb; 3993 txdp->Host_Control = (unsigned long) skb;
4094 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 3994 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4095 if (offload_type == SKB_GSO_UDP) 3995 if (offload_type == SKB_GSO_UDP)
@@ -4146,6 +4046,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4146 spin_unlock_irqrestore(&sp->tx_lock, flags); 4046 spin_unlock_irqrestore(&sp->tx_lock, flags);
4147 4047
4148 return 0; 4048 return 0;
4049pci_map_failed:
4050 stats->pci_map_fail_cnt++;
4051 netif_stop_queue(dev);
4052 stats->mem_freed += skb->truesize;
4053 dev_kfree_skb(skb);
4054 spin_unlock_irqrestore(&sp->tx_lock, flags);
4055 return 0;
4149} 4056}
4150 4057
4151static void 4058static void
@@ -4186,39 +4093,6 @@ static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4186 return 0; 4093 return 0;
4187} 4094}
4188 4095
4189static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4190{
4191 struct net_device *dev = (struct net_device *) dev_id;
4192 struct s2io_nic *sp = dev->priv;
4193 int i;
4194 struct mac_info *mac_control;
4195 struct config_param *config;
4196
4197 atomic_inc(&sp->isr_cnt);
4198 mac_control = &sp->mac_control;
4199 config = &sp->config;
4200 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4201
4202 /* If Intr is because of Rx Traffic */
4203 for (i = 0; i < config->rx_ring_num; i++)
4204 rx_intr_handler(&mac_control->rings[i]);
4205
4206 /* If Intr is because of Tx Traffic */
4207 for (i = 0; i < config->tx_fifo_num; i++)
4208 tx_intr_handler(&mac_control->fifos[i]);
4209
4210 /*
4211 * If the Rx buffer count is below the panic threshold then
4212 * reallocate the buffers from the interrupt handler itself,
4213 * else schedule a tasklet to reallocate the buffers.
4214 */
4215 for (i = 0; i < config->rx_ring_num; i++)
4216 s2io_chk_rx_buffers(sp, i);
4217
4218 atomic_dec(&sp->isr_cnt);
4219 return IRQ_HANDLED;
4220}
4221
4222static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4096static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4223{ 4097{
4224 struct ring_info *ring = (struct ring_info *)dev_id; 4098 struct ring_info *ring = (struct ring_info *)dev_id;
@@ -4927,19 +4801,17 @@ static void s2io_ethtool_gringparam(struct net_device *dev,
4927 ering->rx_max_pending = MAX_RX_DESC_1; 4801 ering->rx_max_pending = MAX_RX_DESC_1;
4928 else if (sp->rxd_mode == RXD_MODE_3B) 4802 else if (sp->rxd_mode == RXD_MODE_3B)
4929 ering->rx_max_pending = MAX_RX_DESC_2; 4803 ering->rx_max_pending = MAX_RX_DESC_2;
4930 else if (sp->rxd_mode == RXD_MODE_3A)
4931 ering->rx_max_pending = MAX_RX_DESC_3;
4932 4804
4933 ering->tx_max_pending = MAX_TX_DESC; 4805 ering->tx_max_pending = MAX_TX_DESC;
4934 for (i = 0 ; i < sp->config.tx_fifo_num ; i++) { 4806 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
4935 tx_desc_count += sp->config.tx_cfg[i].fifo_len; 4807 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4936 } 4808
4937 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds); 4809 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4938 ering->tx_pending = tx_desc_count; 4810 ering->tx_pending = tx_desc_count;
4939 rx_desc_count = 0; 4811 rx_desc_count = 0;
4940 for (i = 0 ; i < sp->config.rx_ring_num ; i++) { 4812 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
4941 rx_desc_count += sp->config.rx_cfg[i].num_rxd; 4813 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
4942 } 4814
4943 ering->rx_pending = rx_desc_count; 4815 ering->rx_pending = rx_desc_count;
4944 4816
4945 ering->rx_mini_max_pending = 0; 4817 ering->rx_mini_max_pending = 0;
@@ -5923,6 +5795,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
5923 else 5795 else
5924 tmp_stats[i++] = 0; 5796 tmp_stats[i++] = 0;
5925 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt; 5797 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
5798 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
5926 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt; 5799 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
5927 tmp_stats[i++] = stat_info->sw_stat.mem_allocated; 5800 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
5928 tmp_stats[i++] = stat_info->sw_stat.mem_freed; 5801 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
@@ -6266,9 +6139,10 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6266 u64 *temp2, int size) 6139 u64 *temp2, int size)
6267{ 6140{
6268 struct net_device *dev = sp->dev; 6141 struct net_device *dev = sp->dev;
6269 struct sk_buff *frag_list; 6142 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6270 6143
6271 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { 6144 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6145 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6272 /* allocate skb */ 6146 /* allocate skb */
6273 if (*skb) { 6147 if (*skb) {
6274 DBG_PRINT(INFO_DBG, "SKB is not NULL\n"); 6148 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
@@ -6277,7 +6151,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6277 * using same mapped address for the Rxd 6151 * using same mapped address for the Rxd
6278 * buffer pointer 6152 * buffer pointer
6279 */ 6153 */
6280 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0; 6154 rxdp1->Buffer0_ptr = *temp0;
6281 } else { 6155 } else {
6282 *skb = dev_alloc_skb(size); 6156 *skb = dev_alloc_skb(size);
6283 if (!(*skb)) { 6157 if (!(*skb)) {
@@ -6294,18 +6168,23 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6294 * such it will be used for next rxd whose 6168 * such it will be used for next rxd whose
6295 * Host Control is NULL 6169 * Host Control is NULL
6296 */ 6170 */
6297 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 = 6171 rxdp1->Buffer0_ptr = *temp0 =
6298 pci_map_single( sp->pdev, (*skb)->data, 6172 pci_map_single( sp->pdev, (*skb)->data,
6299 size - NET_IP_ALIGN, 6173 size - NET_IP_ALIGN,
6300 PCI_DMA_FROMDEVICE); 6174 PCI_DMA_FROMDEVICE);
6175 if( (rxdp1->Buffer0_ptr == 0) ||
6176 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6177 goto memalloc_failed;
6178 }
6301 rxdp->Host_Control = (unsigned long) (*skb); 6179 rxdp->Host_Control = (unsigned long) (*skb);
6302 } 6180 }
6303 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { 6181 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6182 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6304 /* Two buffer Mode */ 6183 /* Two buffer Mode */
6305 if (*skb) { 6184 if (*skb) {
6306 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2; 6185 rxdp3->Buffer2_ptr = *temp2;
6307 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0; 6186 rxdp3->Buffer0_ptr = *temp0;
6308 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1; 6187 rxdp3->Buffer1_ptr = *temp1;
6309 } else { 6188 } else {
6310 *skb = dev_alloc_skb(size); 6189 *skb = dev_alloc_skb(size);
6311 if (!(*skb)) { 6190 if (!(*skb)) {
@@ -6318,73 +6197,47 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6318 } 6197 }
6319 sp->mac_control.stats_info->sw_stat.mem_allocated 6198 sp->mac_control.stats_info->sw_stat.mem_allocated
6320 += (*skb)->truesize; 6199 += (*skb)->truesize;
6321 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 = 6200 rxdp3->Buffer2_ptr = *temp2 =
6322 pci_map_single(sp->pdev, (*skb)->data, 6201 pci_map_single(sp->pdev, (*skb)->data,
6323 dev->mtu + 4, 6202 dev->mtu + 4,
6324 PCI_DMA_FROMDEVICE); 6203 PCI_DMA_FROMDEVICE);
6325 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 = 6204 if( (rxdp3->Buffer2_ptr == 0) ||
6205 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6206 goto memalloc_failed;
6207 }
6208 rxdp3->Buffer0_ptr = *temp0 =
6326 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, 6209 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6327 PCI_DMA_FROMDEVICE); 6210 PCI_DMA_FROMDEVICE);
6211 if( (rxdp3->Buffer0_ptr == 0) ||
6212 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6213 pci_unmap_single (sp->pdev,
6214 (dma_addr_t)(*skb)->data,
6215 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6216 goto memalloc_failed;
6217 }
6328 rxdp->Host_Control = (unsigned long) (*skb); 6218 rxdp->Host_Control = (unsigned long) (*skb);
6329 6219
6330 /* Buffer-1 will be dummy buffer not used */ 6220 /* Buffer-1 will be dummy buffer not used */
6331 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 = 6221 rxdp3->Buffer1_ptr = *temp1 =
6332 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, 6222 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6333 PCI_DMA_FROMDEVICE);
6334 }
6335 } else if ((rxdp->Host_Control == 0)) {
6336 /* Three buffer mode */
6337 if (*skb) {
6338 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6339 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6340 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6341 } else {
6342 *skb = dev_alloc_skb(size);
6343 if (!(*skb)) {
6344 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6345 DBG_PRINT(INFO_DBG, "memory to allocate ");
6346 DBG_PRINT(INFO_DBG, "3 buf mode SKBs\n");
6347 sp->mac_control.stats_info->sw_stat. \
6348 mem_alloc_fail_cnt++;
6349 return -ENOMEM;
6350 }
6351 sp->mac_control.stats_info->sw_stat.mem_allocated
6352 += (*skb)->truesize;
6353 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6354 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6355 PCI_DMA_FROMDEVICE);
6356 /* Buffer-1 receives L3/L4 headers */
6357 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6358 pci_map_single( sp->pdev, (*skb)->data,
6359 l3l4hdr_size + 4,
6360 PCI_DMA_FROMDEVICE); 6223 PCI_DMA_FROMDEVICE);
6361 /* 6224 if( (rxdp3->Buffer1_ptr == 0) ||
6362 * skb_shinfo(skb)->frag_list will have L4 6225 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6363 * data payload 6226 pci_unmap_single (sp->pdev,
6364 */ 6227 (dma_addr_t)(*skb)->data,
6365 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu + 6228 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6366 ALIGN_SIZE); 6229 goto memalloc_failed;
6367 if (skb_shinfo(*skb)->frag_list == NULL) {
6368 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6369 failed\n ", dev->name);
6370 sp->mac_control.stats_info->sw_stat. \
6371 mem_alloc_fail_cnt++;
6372 return -ENOMEM ;
6373 } 6230 }
6374 frag_list = skb_shinfo(*skb)->frag_list;
6375 frag_list->next = NULL;
6376 sp->mac_control.stats_info->sw_stat.mem_allocated
6377 += frag_list->truesize;
6378 /*
6379 * Buffer-2 receives L4 data payload
6380 */
6381 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6382 pci_map_single( sp->pdev, frag_list->data,
6383 dev->mtu, PCI_DMA_FROMDEVICE);
6384 } 6231 }
6385 } 6232 }
6386 return 0; 6233 return 0;
6234 memalloc_failed:
6235 stats->pci_map_fail_cnt++;
6236 stats->mem_freed += (*skb)->truesize;
6237 dev_kfree_skb(*skb);
6238 return -ENOMEM;
6387} 6239}
6240
6388static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, 6241static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6389 int size) 6242 int size)
6390{ 6243{
@@ -6395,10 +6248,6 @@ static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6395 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 6248 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6396 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 6249 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6397 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4); 6250 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6398 } else {
6399 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6400 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6401 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6402 } 6251 }
6403} 6252}
6404 6253
@@ -6420,8 +6269,6 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6420 size += NET_IP_ALIGN; 6269 size += NET_IP_ALIGN;
6421 else if (sp->rxd_mode == RXD_MODE_3B) 6270 else if (sp->rxd_mode == RXD_MODE_3B)
6422 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; 6271 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6423 else
6424 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6425 6272
6426 for (i = 0; i < config->rx_ring_num; i++) { 6273 for (i = 0; i < config->rx_ring_num; i++) {
6427 blk_cnt = config->rx_cfg[i].num_rxd / 6274 blk_cnt = config->rx_cfg[i].num_rxd /
@@ -6431,7 +6278,7 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
6431 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { 6278 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6432 rxdp = mac_control->rings[i]. 6279 rxdp = mac_control->rings[i].
6433 rx_blocks[j].rxds[k].virt_addr; 6280 rx_blocks[j].rxds[k].virt_addr;
6434 if(sp->rxd_mode >= RXD_MODE_3A) 6281 if(sp->rxd_mode == RXD_MODE_3B)
6435 ba = &mac_control->rings[i].ba[j][k]; 6282 ba = &mac_control->rings[i].ba[j][k];
6436 if (set_rxd_buffer_pointer(sp, rxdp, ba, 6283 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6437 &skb,(u64 *)&temp0_64, 6284 &skb,(u64 *)&temp0_64,
@@ -6458,9 +6305,7 @@ static int s2io_add_isr(struct s2io_nic * sp)
6458 struct net_device *dev = sp->dev; 6305 struct net_device *dev = sp->dev;
6459 int err = 0; 6306 int err = 0;
6460 6307
6461 if (sp->intr_type == MSI) 6308 if (sp->intr_type == MSI_X)
6462 ret = s2io_enable_msi(sp);
6463 else if (sp->intr_type == MSI_X)
6464 ret = s2io_enable_msi_x(sp); 6309 ret = s2io_enable_msi_x(sp);
6465 if (ret) { 6310 if (ret) {
6466 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); 6311 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
@@ -6471,16 +6316,6 @@ static int s2io_add_isr(struct s2io_nic * sp)
6471 store_xmsi_data(sp); 6316 store_xmsi_data(sp);
6472 6317
6473 /* After proper initialization of H/W, register ISR */ 6318 /* After proper initialization of H/W, register ISR */
6474 if (sp->intr_type == MSI) {
6475 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6476 IRQF_SHARED, sp->name, dev);
6477 if (err) {
6478 pci_disable_msi(sp->pdev);
6479 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6480 dev->name);
6481 return -1;
6482 }
6483 }
6484 if (sp->intr_type == MSI_X) { 6319 if (sp->intr_type == MSI_X) {
6485 int i, msix_tx_cnt=0,msix_rx_cnt=0; 6320 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6486 6321
@@ -6567,14 +6402,6 @@ static void s2io_rem_isr(struct s2io_nic * sp)
6567 pci_disable_msix(sp->pdev); 6402 pci_disable_msix(sp->pdev);
6568 } else { 6403 } else {
6569 free_irq(sp->pdev->irq, dev); 6404 free_irq(sp->pdev->irq, dev);
6570 if (sp->intr_type == MSI) {
6571 u16 val;
6572
6573 pci_disable_msi(sp->pdev);
6574 pci_read_config_word(sp->pdev, 0x4c, &val);
6575 val ^= 0x1;
6576 pci_write_config_word(sp->pdev, 0x4c, val);
6577 }
6578 } 6405 }
6579 /* Waiting till all Interrupt handlers are complete */ 6406 /* Waiting till all Interrupt handlers are complete */
6580 cnt = 0; 6407 cnt = 0;
@@ -6907,6 +6734,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6907 } 6734 }
6908 6735
6909 /* Updating statistics */ 6736 /* Updating statistics */
6737 sp->stats.rx_packets++;
6910 rxdp->Host_Control = 0; 6738 rxdp->Host_Control = 0;
6911 if (sp->rxd_mode == RXD_MODE_1) { 6739 if (sp->rxd_mode == RXD_MODE_1) {
6912 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); 6740 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
@@ -6914,7 +6742,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6914 sp->stats.rx_bytes += len; 6742 sp->stats.rx_bytes += len;
6915 skb_put(skb, len); 6743 skb_put(skb, len);
6916 6744
6917 } else if (sp->rxd_mode >= RXD_MODE_3A) { 6745 } else if (sp->rxd_mode == RXD_MODE_3B) {
6918 int get_block = ring_data->rx_curr_get_info.block_index; 6746 int get_block = ring_data->rx_curr_get_info.block_index;
6919 int get_off = ring_data->rx_curr_get_info.offset; 6747 int get_off = ring_data->rx_curr_get_info.offset;
6920 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); 6748 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
@@ -6924,18 +6752,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6924 struct buffAdd *ba = &ring_data->ba[get_block][get_off]; 6752 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6925 sp->stats.rx_bytes += buf0_len + buf2_len; 6753 sp->stats.rx_bytes += buf0_len + buf2_len;
6926 memcpy(buff, ba->ba_0, buf0_len); 6754 memcpy(buff, ba->ba_0, buf0_len);
6927 6755 skb_put(skb, buf2_len);
6928 if (sp->rxd_mode == RXD_MODE_3A) {
6929 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6930
6931 skb_put(skb, buf1_len);
6932 skb->len += buf2_len;
6933 skb->data_len += buf2_len;
6934 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6935 sp->stats.rx_bytes += buf1_len;
6936
6937 } else
6938 skb_put(skb, buf2_len);
6939 } 6756 }
6940 6757
6941 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || 6758 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
@@ -7131,7 +6948,7 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7131 *dev_intr_type = INTA; 6948 *dev_intr_type = INTA;
7132 } 6949 }
7133#else 6950#else
7134 if (*dev_intr_type > MSI_X) { 6951 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7135 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " 6952 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7136 "Defaulting to INTA\n"); 6953 "Defaulting to INTA\n");
7137 *dev_intr_type = INTA; 6954 *dev_intr_type = INTA;
@@ -7145,10 +6962,10 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
7145 *dev_intr_type = INTA; 6962 *dev_intr_type = INTA;
7146 } 6963 }
7147 6964
7148 if (rx_ring_mode > 3) { 6965 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7149 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); 6966 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7150 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); 6967 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7151 rx_ring_mode = 3; 6968 rx_ring_mode = 1;
7152 } 6969 }
7153 return SUCCESS; 6970 return SUCCESS;
7154} 6971}
@@ -7240,28 +7057,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7240 pci_disable_device(pdev); 7057 pci_disable_device(pdev);
7241 return -ENOMEM; 7058 return -ENOMEM;
7242 } 7059 }
7243 if (dev_intr_type != MSI_X) { 7060 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7244 if (pci_request_regions(pdev, s2io_driver_name)) { 7061 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7245 DBG_PRINT(ERR_DBG, "Request Regions failed\n"); 7062 pci_disable_device(pdev);
7246 pci_disable_device(pdev); 7063 return -ENODEV;
7247 return -ENODEV;
7248 }
7249 }
7250 else {
7251 if (!(request_mem_region(pci_resource_start(pdev, 0),
7252 pci_resource_len(pdev, 0), s2io_driver_name))) {
7253 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
7254 pci_disable_device(pdev);
7255 return -ENODEV;
7256 }
7257 if (!(request_mem_region(pci_resource_start(pdev, 2),
7258 pci_resource_len(pdev, 2), s2io_driver_name))) {
7259 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
7260 release_mem_region(pci_resource_start(pdev, 0),
7261 pci_resource_len(pdev, 0));
7262 pci_disable_device(pdev);
7263 return -ENODEV;
7264 }
7265 } 7064 }
7266 7065
7267 dev = alloc_etherdev(sizeof(struct s2io_nic)); 7066 dev = alloc_etherdev(sizeof(struct s2io_nic));
@@ -7288,8 +7087,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7288 sp->rxd_mode = RXD_MODE_1; 7087 sp->rxd_mode = RXD_MODE_1;
7289 if (rx_ring_mode == 2) 7088 if (rx_ring_mode == 2)
7290 sp->rxd_mode = RXD_MODE_3B; 7089 sp->rxd_mode = RXD_MODE_3B;
7291 if (rx_ring_mode == 3)
7292 sp->rxd_mode = RXD_MODE_3A;
7293 7090
7294 sp->intr_type = dev_intr_type; 7091 sp->intr_type = dev_intr_type;
7295 7092
@@ -7565,10 +7362,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7565 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", 7362 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7566 dev->name); 7363 dev->name);
7567 break; 7364 break;
7568 case RXD_MODE_3A:
7569 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7570 dev->name);
7571 break;
7572 } 7365 }
7573 7366
7574 if (napi) 7367 if (napi)
@@ -7577,9 +7370,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7577 case INTA: 7370 case INTA:
7578 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); 7371 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7579 break; 7372 break;
7580 case MSI:
7581 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7582 break;
7583 case MSI_X: 7373 case MSI_X:
7584 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); 7374 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7585 break; 7375 break;
@@ -7619,14 +7409,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7619 mem_alloc_failed: 7409 mem_alloc_failed:
7620 free_shared_mem(sp); 7410 free_shared_mem(sp);
7621 pci_disable_device(pdev); 7411 pci_disable_device(pdev);
7622 if (dev_intr_type != MSI_X) 7412 pci_release_regions(pdev);
7623 pci_release_regions(pdev);
7624 else {
7625 release_mem_region(pci_resource_start(pdev, 0),
7626 pci_resource_len(pdev, 0));
7627 release_mem_region(pci_resource_start(pdev, 2),
7628 pci_resource_len(pdev, 2));
7629 }
7630 pci_set_drvdata(pdev, NULL); 7413 pci_set_drvdata(pdev, NULL);
7631 free_netdev(dev); 7414 free_netdev(dev);
7632 7415
@@ -7661,14 +7444,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7661 free_shared_mem(sp); 7444 free_shared_mem(sp);
7662 iounmap(sp->bar0); 7445 iounmap(sp->bar0);
7663 iounmap(sp->bar1); 7446 iounmap(sp->bar1);
7664 if (sp->intr_type != MSI_X) 7447 pci_release_regions(pdev);
7665 pci_release_regions(pdev);
7666 else {
7667 release_mem_region(pci_resource_start(pdev, 0),
7668 pci_resource_len(pdev, 0));
7669 release_mem_region(pci_resource_start(pdev, 2),
7670 pci_resource_len(pdev, 2));
7671 }
7672 pci_set_drvdata(pdev, NULL); 7448 pci_set_drvdata(pdev, NULL);
7673 free_netdev(dev); 7449 free_netdev(dev);
7674 pci_disable_device(pdev); 7450 pci_disable_device(pdev);