diff options
author | Veena Parat <Veena.Parat@neterion.com> | 2007-07-23 02:20:51 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-07-30 15:56:03 -0400 |
commit | 6d517a27d5b376c769f48213044b658042b5f07a (patch) | |
tree | c77c169c3edf4eef67dc465145a02b060356d6c0 | |
parent | 2c6a3f72688acbc640b3be8083dac0e90354f0cf (diff) |
S2IO: Removing 3 buffer mode support from the driver
- Removed 3 buffer mode support from driver - unused feature
- Incorporated Jeff Garzik's comments on elimination of inline typecasting
- Code cleanup : Removed a few extra spaces
Signed-off-by: Veena Parat <veena.parat@neterion.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r-- | drivers/net/s2io.c | 287 | ||||
-rw-r--r-- | drivers/net/s2io.h | 3 |
2 files changed, 73 insertions, 217 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index afef6c0c59fe..6bfb191634e7 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -32,7 +32,7 @@ | |||
32 | * rx_ring_sz: This defines the number of receive blocks each ring can have. | 32 | * rx_ring_sz: This defines the number of receive blocks each ring can have. |
33 | * This is also an array of size 8. | 33 | * This is also an array of size 8. |
34 | * rx_ring_mode: This defines the operation mode of all 8 rings. The valid | 34 | * rx_ring_mode: This defines the operation mode of all 8 rings. The valid |
35 | * values are 1, 2 and 3. | 35 | * values are 1, 2. |
36 | * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. | 36 | * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. |
37 | * tx_fifo_len: This too is an array of 8. Each element defines the number of | 37 | * tx_fifo_len: This too is an array of 8. Each element defines the number of |
38 | * Tx descriptors that can be associated with each corresponding FIFO. | 38 | * Tx descriptors that can be associated with each corresponding FIFO. |
@@ -90,8 +90,8 @@ | |||
90 | static char s2io_driver_name[] = "Neterion"; | 90 | static char s2io_driver_name[] = "Neterion"; |
91 | static char s2io_driver_version[] = DRV_VERSION; | 91 | static char s2io_driver_version[] = DRV_VERSION; |
92 | 92 | ||
93 | static int rxd_size[4] = {32,48,48,64}; | 93 | static int rxd_size[2] = {32,48}; |
94 | static int rxd_count[4] = {127,85,85,63}; | 94 | static int rxd_count[2] = {127,85}; |
95 | 95 | ||
96 | static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) | 96 | static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) |
97 | { | 97 | { |
@@ -701,7 +701,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
701 | (u64) tmp_p_addr_next; | 701 | (u64) tmp_p_addr_next; |
702 | } | 702 | } |
703 | } | 703 | } |
704 | if (nic->rxd_mode >= RXD_MODE_3A) { | 704 | if (nic->rxd_mode == RXD_MODE_3B) { |
705 | /* | 705 | /* |
706 | * Allocation of Storages for buffer addresses in 2BUFF mode | 706 | * Allocation of Storages for buffer addresses in 2BUFF mode |
707 | * and the buffers as well. | 707 | * and the buffers as well. |
@@ -870,7 +870,7 @@ static void free_shared_mem(struct s2io_nic *nic) | |||
870 | } | 870 | } |
871 | } | 871 | } |
872 | 872 | ||
873 | if (nic->rxd_mode >= RXD_MODE_3A) { | 873 | if (nic->rxd_mode == RXD_MODE_3B) { |
874 | /* Freeing buffer storage addresses in 2BUFF mode. */ | 874 | /* Freeing buffer storage addresses in 2BUFF mode. */ |
875 | for (i = 0; i < config->rx_ring_num; i++) { | 875 | for (i = 0; i < config->rx_ring_num; i++) { |
876 | blk_cnt = config->rx_cfg[i].num_rxd / | 876 | blk_cnt = config->rx_cfg[i].num_rxd / |
@@ -2233,44 +2233,6 @@ static void stop_nic(struct s2io_nic *nic) | |||
2233 | writeq(val64, &bar0->adapter_control); | 2233 | writeq(val64, &bar0->adapter_control); |
2234 | } | 2234 | } |
2235 | 2235 | ||
2236 | static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \ | ||
2237 | sk_buff *skb) | ||
2238 | { | ||
2239 | struct net_device *dev = nic->dev; | ||
2240 | struct sk_buff *frag_list; | ||
2241 | void *tmp; | ||
2242 | |||
2243 | /* Buffer-1 receives L3/L4 headers */ | ||
2244 | ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single | ||
2245 | (nic->pdev, skb->data, l3l4hdr_size + 4, | ||
2246 | PCI_DMA_FROMDEVICE); | ||
2247 | |||
2248 | /* skb_shinfo(skb)->frag_list will have L4 data payload */ | ||
2249 | skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE); | ||
2250 | if (skb_shinfo(skb)->frag_list == NULL) { | ||
2251 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; | ||
2252 | DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name); | ||
2253 | return -ENOMEM ; | ||
2254 | } | ||
2255 | frag_list = skb_shinfo(skb)->frag_list; | ||
2256 | skb->truesize += frag_list->truesize; | ||
2257 | nic->mac_control.stats_info->sw_stat.mem_allocated | ||
2258 | += frag_list->truesize; | ||
2259 | frag_list->next = NULL; | ||
2260 | tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); | ||
2261 | frag_list->data = tmp; | ||
2262 | skb_reset_tail_pointer(frag_list); | ||
2263 | |||
2264 | /* Buffer-2 receives L4 data payload */ | ||
2265 | ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, | ||
2266 | frag_list->data, dev->mtu, | ||
2267 | PCI_DMA_FROMDEVICE); | ||
2268 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); | ||
2269 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu); | ||
2270 | |||
2271 | return SUCCESS; | ||
2272 | } | ||
2273 | |||
2274 | /** | 2236 | /** |
2275 | * fill_rx_buffers - Allocates the Rx side skbs | 2237 | * fill_rx_buffers - Allocates the Rx side skbs |
2276 | * @nic: device private variable | 2238 | * @nic: device private variable |
@@ -2307,6 +2269,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2307 | unsigned long flags; | 2269 | unsigned long flags; |
2308 | struct RxD_t *first_rxdp = NULL; | 2270 | struct RxD_t *first_rxdp = NULL; |
2309 | u64 Buffer0_ptr = 0, Buffer1_ptr = 0; | 2271 | u64 Buffer0_ptr = 0, Buffer1_ptr = 0; |
2272 | struct RxD1 *rxdp1; | ||
2273 | struct RxD3 *rxdp3; | ||
2310 | 2274 | ||
2311 | mac_control = &nic->mac_control; | 2275 | mac_control = &nic->mac_control; |
2312 | config = &nic->config; | 2276 | config = &nic->config; |
@@ -2359,7 +2323,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2359 | (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; | 2323 | (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; |
2360 | } | 2324 | } |
2361 | if ((rxdp->Control_1 & RXD_OWN_XENA) && | 2325 | if ((rxdp->Control_1 & RXD_OWN_XENA) && |
2362 | ((nic->rxd_mode >= RXD_MODE_3A) && | 2326 | ((nic->rxd_mode == RXD_MODE_3B) && |
2363 | (rxdp->Control_2 & BIT(0)))) { | 2327 | (rxdp->Control_2 & BIT(0)))) { |
2364 | mac_control->rings[ring_no].rx_curr_put_info. | 2328 | mac_control->rings[ring_no].rx_curr_put_info. |
2365 | offset = off; | 2329 | offset = off; |
@@ -2370,10 +2334,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2370 | HEADER_802_2_SIZE + HEADER_SNAP_SIZE; | 2334 | HEADER_802_2_SIZE + HEADER_SNAP_SIZE; |
2371 | if (nic->rxd_mode == RXD_MODE_1) | 2335 | if (nic->rxd_mode == RXD_MODE_1) |
2372 | size += NET_IP_ALIGN; | 2336 | size += NET_IP_ALIGN; |
2373 | else if (nic->rxd_mode == RXD_MODE_3B) | ||
2374 | size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; | ||
2375 | else | 2337 | else |
2376 | size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4; | 2338 | size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; |
2377 | 2339 | ||
2378 | /* allocate skb */ | 2340 | /* allocate skb */ |
2379 | skb = dev_alloc_skb(size); | 2341 | skb = dev_alloc_skb(size); |
@@ -2392,33 +2354,30 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2392 | += skb->truesize; | 2354 | += skb->truesize; |
2393 | if (nic->rxd_mode == RXD_MODE_1) { | 2355 | if (nic->rxd_mode == RXD_MODE_1) { |
2394 | /* 1 buffer mode - normal operation mode */ | 2356 | /* 1 buffer mode - normal operation mode */ |
2357 | rxdp1 = (struct RxD1*)rxdp; | ||
2395 | memset(rxdp, 0, sizeof(struct RxD1)); | 2358 | memset(rxdp, 0, sizeof(struct RxD1)); |
2396 | skb_reserve(skb, NET_IP_ALIGN); | 2359 | skb_reserve(skb, NET_IP_ALIGN); |
2397 | ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single | 2360 | rxdp1->Buffer0_ptr = pci_map_single |
2398 | (nic->pdev, skb->data, size - NET_IP_ALIGN, | 2361 | (nic->pdev, skb->data, size - NET_IP_ALIGN, |
2399 | PCI_DMA_FROMDEVICE); | 2362 | PCI_DMA_FROMDEVICE); |
2400 | rxdp->Control_2 = | 2363 | rxdp->Control_2 = |
2401 | SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); | 2364 | SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); |
2402 | 2365 | ||
2403 | } else if (nic->rxd_mode >= RXD_MODE_3A) { | 2366 | } else if (nic->rxd_mode == RXD_MODE_3B) { |
2404 | /* | 2367 | /* |
2405 | * 2 or 3 buffer mode - | 2368 | * 2 buffer mode - |
2406 | * Both 2 buffer mode and 3 buffer mode provides 128 | 2369 | * 2 buffer mode provides 128 |
2407 | * byte aligned receive buffers. | 2370 | * byte aligned receive buffers. |
2408 | * | ||
2409 | * 3 buffer mode provides header separation where in | ||
2410 | * skb->data will have L3/L4 headers where as | ||
2411 | * skb_shinfo(skb)->frag_list will have the L4 data | ||
2412 | * payload | ||
2413 | */ | 2371 | */ |
2414 | 2372 | ||
2373 | rxdp3 = (struct RxD3*)rxdp; | ||
2415 | /* save buffer pointers to avoid frequent dma mapping */ | 2374 | /* save buffer pointers to avoid frequent dma mapping */ |
2416 | Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr; | 2375 | Buffer0_ptr = rxdp3->Buffer0_ptr; |
2417 | Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr; | 2376 | Buffer1_ptr = rxdp3->Buffer1_ptr; |
2418 | memset(rxdp, 0, sizeof(struct RxD3)); | 2377 | memset(rxdp, 0, sizeof(struct RxD3)); |
2419 | /* restore the buffer pointers for dma sync*/ | 2378 | /* restore the buffer pointers for dma sync*/ |
2420 | ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr; | 2379 | rxdp3->Buffer0_ptr = Buffer0_ptr; |
2421 | ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr; | 2380 | rxdp3->Buffer1_ptr = Buffer1_ptr; |
2422 | 2381 | ||
2423 | ba = &mac_control->rings[ring_no].ba[block_no][off]; | 2382 | ba = &mac_control->rings[ring_no].ba[block_no][off]; |
2424 | skb_reserve(skb, BUF0_LEN); | 2383 | skb_reserve(skb, BUF0_LEN); |
@@ -2428,13 +2387,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2428 | skb->data = (void *) (unsigned long)tmp; | 2387 | skb->data = (void *) (unsigned long)tmp; |
2429 | skb_reset_tail_pointer(skb); | 2388 | skb_reset_tail_pointer(skb); |
2430 | 2389 | ||
2431 | if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) | 2390 | if (!(rxdp3->Buffer0_ptr)) |
2432 | ((struct RxD3*)rxdp)->Buffer0_ptr = | 2391 | rxdp3->Buffer0_ptr = |
2433 | pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, | 2392 | pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, |
2434 | PCI_DMA_FROMDEVICE); | 2393 | PCI_DMA_FROMDEVICE); |
2435 | else | 2394 | else |
2436 | pci_dma_sync_single_for_device(nic->pdev, | 2395 | pci_dma_sync_single_for_device(nic->pdev, |
2437 | (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr, | 2396 | (dma_addr_t) rxdp3->Buffer0_ptr, |
2438 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2397 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
2439 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | 2398 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); |
2440 | if (nic->rxd_mode == RXD_MODE_3B) { | 2399 | if (nic->rxd_mode == RXD_MODE_3B) { |
@@ -2444,13 +2403,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2444 | * Buffer2 will have L3/L4 header plus | 2403 | * Buffer2 will have L3/L4 header plus |
2445 | * L4 payload | 2404 | * L4 payload |
2446 | */ | 2405 | */ |
2447 | ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single | 2406 | rxdp3->Buffer2_ptr = pci_map_single |
2448 | (nic->pdev, skb->data, dev->mtu + 4, | 2407 | (nic->pdev, skb->data, dev->mtu + 4, |
2449 | PCI_DMA_FROMDEVICE); | 2408 | PCI_DMA_FROMDEVICE); |
2450 | 2409 | ||
2451 | /* Buffer-1 will be dummy buffer. Not used */ | 2410 | /* Buffer-1 will be dummy buffer. Not used */ |
2452 | if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) { | 2411 | if (!(rxdp3->Buffer1_ptr)) { |
2453 | ((struct RxD3*)rxdp)->Buffer1_ptr = | 2412 | rxdp3->Buffer1_ptr = |
2454 | pci_map_single(nic->pdev, | 2413 | pci_map_single(nic->pdev, |
2455 | ba->ba_1, BUF1_LEN, | 2414 | ba->ba_1, BUF1_LEN, |
2456 | PCI_DMA_FROMDEVICE); | 2415 | PCI_DMA_FROMDEVICE); |
@@ -2458,19 +2417,6 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2458 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); | 2417 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); |
2459 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 | 2418 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 |
2460 | (dev->mtu + 4); | 2419 | (dev->mtu + 4); |
2461 | } else { | ||
2462 | /* 3 buffer mode */ | ||
2463 | if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) { | ||
2464 | nic->mac_control.stats_info->sw_stat.\ | ||
2465 | mem_freed += skb->truesize; | ||
2466 | dev_kfree_skb_irq(skb); | ||
2467 | if (first_rxdp) { | ||
2468 | wmb(); | ||
2469 | first_rxdp->Control_1 |= | ||
2470 | RXD_OWN_XENA; | ||
2471 | } | ||
2472 | return -ENOMEM ; | ||
2473 | } | ||
2474 | } | 2420 | } |
2475 | rxdp->Control_2 |= BIT(0); | 2421 | rxdp->Control_2 |= BIT(0); |
2476 | } | 2422 | } |
@@ -2515,6 +2461,8 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) | |||
2515 | struct RxD_t *rxdp; | 2461 | struct RxD_t *rxdp; |
2516 | struct mac_info *mac_control; | 2462 | struct mac_info *mac_control; |
2517 | struct buffAdd *ba; | 2463 | struct buffAdd *ba; |
2464 | struct RxD1 *rxdp1; | ||
2465 | struct RxD3 *rxdp3; | ||
2518 | 2466 | ||
2519 | mac_control = &sp->mac_control; | 2467 | mac_control = &sp->mac_control; |
2520 | for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { | 2468 | for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { |
@@ -2526,40 +2474,30 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) | |||
2526 | continue; | 2474 | continue; |
2527 | } | 2475 | } |
2528 | if (sp->rxd_mode == RXD_MODE_1) { | 2476 | if (sp->rxd_mode == RXD_MODE_1) { |
2477 | rxdp1 = (struct RxD1*)rxdp; | ||
2529 | pci_unmap_single(sp->pdev, (dma_addr_t) | 2478 | pci_unmap_single(sp->pdev, (dma_addr_t) |
2530 | ((struct RxD1*)rxdp)->Buffer0_ptr, | 2479 | rxdp1->Buffer0_ptr, |
2531 | dev->mtu + | 2480 | dev->mtu + |
2532 | HEADER_ETHERNET_II_802_3_SIZE | 2481 | HEADER_ETHERNET_II_802_3_SIZE |
2533 | + HEADER_802_2_SIZE + | 2482 | + HEADER_802_2_SIZE + |
2534 | HEADER_SNAP_SIZE, | 2483 | HEADER_SNAP_SIZE, |
2535 | PCI_DMA_FROMDEVICE); | 2484 | PCI_DMA_FROMDEVICE); |
2536 | memset(rxdp, 0, sizeof(struct RxD1)); | 2485 | memset(rxdp, 0, sizeof(struct RxD1)); |
2537 | } else if(sp->rxd_mode == RXD_MODE_3B) { | 2486 | } else if(sp->rxd_mode == RXD_MODE_3B) { |
2487 | rxdp3 = (struct RxD3*)rxdp; | ||
2538 | ba = &mac_control->rings[ring_no]. | 2488 | ba = &mac_control->rings[ring_no]. |
2539 | ba[blk][j]; | 2489 | ba[blk][j]; |
2540 | pci_unmap_single(sp->pdev, (dma_addr_t) | 2490 | pci_unmap_single(sp->pdev, (dma_addr_t) |
2541 | ((struct RxD3*)rxdp)->Buffer0_ptr, | 2491 | rxdp3->Buffer0_ptr, |
2542 | BUF0_LEN, | 2492 | BUF0_LEN, |
2543 | PCI_DMA_FROMDEVICE); | ||
2544 | pci_unmap_single(sp->pdev, (dma_addr_t) | ||
2545 | ((struct RxD3*)rxdp)->Buffer1_ptr, | ||
2546 | BUF1_LEN, | ||
2547 | PCI_DMA_FROMDEVICE); | ||
2548 | pci_unmap_single(sp->pdev, (dma_addr_t) | ||
2549 | ((struct RxD3*)rxdp)->Buffer2_ptr, | ||
2550 | dev->mtu + 4, | ||
2551 | PCI_DMA_FROMDEVICE); | ||
2552 | memset(rxdp, 0, sizeof(struct RxD3)); | ||
2553 | } else { | ||
2554 | pci_unmap_single(sp->pdev, (dma_addr_t) | ||
2555 | ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN, | ||
2556 | PCI_DMA_FROMDEVICE); | 2493 | PCI_DMA_FROMDEVICE); |
2557 | pci_unmap_single(sp->pdev, (dma_addr_t) | 2494 | pci_unmap_single(sp->pdev, (dma_addr_t) |
2558 | ((struct RxD3*)rxdp)->Buffer1_ptr, | 2495 | rxdp3->Buffer1_ptr, |
2559 | l3l4hdr_size + 4, | 2496 | BUF1_LEN, |
2560 | PCI_DMA_FROMDEVICE); | 2497 | PCI_DMA_FROMDEVICE); |
2561 | pci_unmap_single(sp->pdev, (dma_addr_t) | 2498 | pci_unmap_single(sp->pdev, (dma_addr_t) |
2562 | ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu, | 2499 | rxdp3->Buffer2_ptr, |
2500 | dev->mtu + 4, | ||
2563 | PCI_DMA_FROMDEVICE); | 2501 | PCI_DMA_FROMDEVICE); |
2564 | memset(rxdp, 0, sizeof(struct RxD3)); | 2502 | memset(rxdp, 0, sizeof(struct RxD3)); |
2565 | } | 2503 | } |
@@ -2756,6 +2694,8 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
2756 | struct sk_buff *skb; | 2694 | struct sk_buff *skb; |
2757 | int pkt_cnt = 0; | 2695 | int pkt_cnt = 0; |
2758 | int i; | 2696 | int i; |
2697 | struct RxD1* rxdp1; | ||
2698 | struct RxD3* rxdp3; | ||
2759 | 2699 | ||
2760 | spin_lock(&nic->rx_lock); | 2700 | spin_lock(&nic->rx_lock); |
2761 | if (atomic_read(&nic->card_state) == CARD_DOWN) { | 2701 | if (atomic_read(&nic->card_state) == CARD_DOWN) { |
@@ -2796,32 +2736,23 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
2796 | return; | 2736 | return; |
2797 | } | 2737 | } |
2798 | if (nic->rxd_mode == RXD_MODE_1) { | 2738 | if (nic->rxd_mode == RXD_MODE_1) { |
2739 | rxdp1 = (struct RxD1*)rxdp; | ||
2799 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2740 | pci_unmap_single(nic->pdev, (dma_addr_t) |
2800 | ((struct RxD1*)rxdp)->Buffer0_ptr, | 2741 | rxdp1->Buffer0_ptr, |
2801 | dev->mtu + | 2742 | dev->mtu + |
2802 | HEADER_ETHERNET_II_802_3_SIZE + | 2743 | HEADER_ETHERNET_II_802_3_SIZE + |
2803 | HEADER_802_2_SIZE + | 2744 | HEADER_802_2_SIZE + |
2804 | HEADER_SNAP_SIZE, | 2745 | HEADER_SNAP_SIZE, |
2805 | PCI_DMA_FROMDEVICE); | 2746 | PCI_DMA_FROMDEVICE); |
2806 | } else if (nic->rxd_mode == RXD_MODE_3B) { | 2747 | } else if (nic->rxd_mode == RXD_MODE_3B) { |
2748 | rxdp3 = (struct RxD3*)rxdp; | ||
2807 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) | 2749 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) |
2808 | ((struct RxD3*)rxdp)->Buffer0_ptr, | 2750 | rxdp3->Buffer0_ptr, |
2809 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2751 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
2810 | pci_unmap_single(nic->pdev, (dma_addr_t) | ||
2811 | ((struct RxD3*)rxdp)->Buffer2_ptr, | ||
2812 | dev->mtu + 4, | ||
2813 | PCI_DMA_FROMDEVICE); | ||
2814 | } else { | ||
2815 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) | ||
2816 | ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN, | ||
2817 | PCI_DMA_FROMDEVICE); | ||
2818 | pci_unmap_single(nic->pdev, (dma_addr_t) | ||
2819 | ((struct RxD3*)rxdp)->Buffer1_ptr, | ||
2820 | l3l4hdr_size + 4, | ||
2821 | PCI_DMA_FROMDEVICE); | ||
2822 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2752 | pci_unmap_single(nic->pdev, (dma_addr_t) |
2823 | ((struct RxD3*)rxdp)->Buffer2_ptr, | 2753 | rxdp3->Buffer2_ptr, |
2824 | dev->mtu, PCI_DMA_FROMDEVICE); | 2754 | dev->mtu + 4, |
2755 | PCI_DMA_FROMDEVICE); | ||
2825 | } | 2756 | } |
2826 | prefetch(skb->data); | 2757 | prefetch(skb->data); |
2827 | rx_osm_handler(ring_data, rxdp); | 2758 | rx_osm_handler(ring_data, rxdp); |
@@ -4927,8 +4858,6 @@ static void s2io_ethtool_gringparam(struct net_device *dev, | |||
4927 | ering->rx_max_pending = MAX_RX_DESC_1; | 4858 | ering->rx_max_pending = MAX_RX_DESC_1; |
4928 | else if (sp->rxd_mode == RXD_MODE_3B) | 4859 | else if (sp->rxd_mode == RXD_MODE_3B) |
4929 | ering->rx_max_pending = MAX_RX_DESC_2; | 4860 | ering->rx_max_pending = MAX_RX_DESC_2; |
4930 | else if (sp->rxd_mode == RXD_MODE_3A) | ||
4931 | ering->rx_max_pending = MAX_RX_DESC_3; | ||
4932 | 4861 | ||
4933 | ering->tx_max_pending = MAX_TX_DESC; | 4862 | ering->tx_max_pending = MAX_TX_DESC; |
4934 | for (i = 0 ; i < sp->config.tx_fifo_num ; i++) { | 4863 | for (i = 0 ; i < sp->config.tx_fifo_num ; i++) { |
@@ -6266,9 +6195,9 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6266 | u64 *temp2, int size) | 6195 | u64 *temp2, int size) |
6267 | { | 6196 | { |
6268 | struct net_device *dev = sp->dev; | 6197 | struct net_device *dev = sp->dev; |
6269 | struct sk_buff *frag_list; | ||
6270 | 6198 | ||
6271 | if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { | 6199 | if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { |
6200 | struct RxD1 *rxdp1 = (struct RxD1 *)rxdp; | ||
6272 | /* allocate skb */ | 6201 | /* allocate skb */ |
6273 | if (*skb) { | 6202 | if (*skb) { |
6274 | DBG_PRINT(INFO_DBG, "SKB is not NULL\n"); | 6203 | DBG_PRINT(INFO_DBG, "SKB is not NULL\n"); |
@@ -6277,7 +6206,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6277 | * using same mapped address for the Rxd | 6206 | * using same mapped address for the Rxd |
6278 | * buffer pointer | 6207 | * buffer pointer |
6279 | */ | 6208 | */ |
6280 | ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0; | 6209 | rxdp1->Buffer0_ptr = *temp0; |
6281 | } else { | 6210 | } else { |
6282 | *skb = dev_alloc_skb(size); | 6211 | *skb = dev_alloc_skb(size); |
6283 | if (!(*skb)) { | 6212 | if (!(*skb)) { |
@@ -6294,18 +6223,19 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6294 | * such it will be used for next rxd whose | 6223 | * such it will be used for next rxd whose |
6295 | * Host Control is NULL | 6224 | * Host Control is NULL |
6296 | */ | 6225 | */ |
6297 | ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 = | 6226 | rxdp1->Buffer0_ptr = *temp0 = |
6298 | pci_map_single( sp->pdev, (*skb)->data, | 6227 | pci_map_single( sp->pdev, (*skb)->data, |
6299 | size - NET_IP_ALIGN, | 6228 | size - NET_IP_ALIGN, |
6300 | PCI_DMA_FROMDEVICE); | 6229 | PCI_DMA_FROMDEVICE); |
6301 | rxdp->Host_Control = (unsigned long) (*skb); | 6230 | rxdp->Host_Control = (unsigned long) (*skb); |
6302 | } | 6231 | } |
6303 | } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { | 6232 | } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { |
6233 | struct RxD3 *rxdp3 = (struct RxD3 *)rxdp; | ||
6304 | /* Two buffer Mode */ | 6234 | /* Two buffer Mode */ |
6305 | if (*skb) { | 6235 | if (*skb) { |
6306 | ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2; | 6236 | rxdp3->Buffer2_ptr = *temp2; |
6307 | ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0; | 6237 | rxdp3->Buffer0_ptr = *temp0; |
6308 | ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1; | 6238 | rxdp3->Buffer1_ptr = *temp1; |
6309 | } else { | 6239 | } else { |
6310 | *skb = dev_alloc_skb(size); | 6240 | *skb = dev_alloc_skb(size); |
6311 | if (!(*skb)) { | 6241 | if (!(*skb)) { |
@@ -6318,69 +6248,19 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6318 | } | 6248 | } |
6319 | sp->mac_control.stats_info->sw_stat.mem_allocated | 6249 | sp->mac_control.stats_info->sw_stat.mem_allocated |
6320 | += (*skb)->truesize; | 6250 | += (*skb)->truesize; |
6321 | ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 = | 6251 | rxdp3->Buffer2_ptr = *temp2 = |
6322 | pci_map_single(sp->pdev, (*skb)->data, | 6252 | pci_map_single(sp->pdev, (*skb)->data, |
6323 | dev->mtu + 4, | 6253 | dev->mtu + 4, |
6324 | PCI_DMA_FROMDEVICE); | 6254 | PCI_DMA_FROMDEVICE); |
6325 | ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 = | 6255 | rxdp3->Buffer0_ptr = *temp0 = |
6326 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, | 6256 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, |
6327 | PCI_DMA_FROMDEVICE); | 6257 | PCI_DMA_FROMDEVICE); |
6328 | rxdp->Host_Control = (unsigned long) (*skb); | 6258 | rxdp->Host_Control = (unsigned long) (*skb); |
6329 | 6259 | ||
6330 | /* Buffer-1 will be dummy buffer not used */ | 6260 | /* Buffer-1 will be dummy buffer not used */ |
6331 | ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 = | 6261 | rxdp3->Buffer1_ptr = *temp1 = |
6332 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, | 6262 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, |
6333 | PCI_DMA_FROMDEVICE); | ||
6334 | } | ||
6335 | } else if ((rxdp->Host_Control == 0)) { | ||
6336 | /* Three buffer mode */ | ||
6337 | if (*skb) { | ||
6338 | ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0; | ||
6339 | ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1; | ||
6340 | ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2; | ||
6341 | } else { | ||
6342 | *skb = dev_alloc_skb(size); | ||
6343 | if (!(*skb)) { | ||
6344 | DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); | ||
6345 | DBG_PRINT(INFO_DBG, "memory to allocate "); | ||
6346 | DBG_PRINT(INFO_DBG, "3 buf mode SKBs\n"); | ||
6347 | sp->mac_control.stats_info->sw_stat. \ | ||
6348 | mem_alloc_fail_cnt++; | ||
6349 | return -ENOMEM; | ||
6350 | } | ||
6351 | sp->mac_control.stats_info->sw_stat.mem_allocated | ||
6352 | += (*skb)->truesize; | ||
6353 | ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 = | ||
6354 | pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, | ||
6355 | PCI_DMA_FROMDEVICE); | ||
6356 | /* Buffer-1 receives L3/L4 headers */ | ||
6357 | ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 = | ||
6358 | pci_map_single( sp->pdev, (*skb)->data, | ||
6359 | l3l4hdr_size + 4, | ||
6360 | PCI_DMA_FROMDEVICE); | 6263 | PCI_DMA_FROMDEVICE); |
6361 | /* | ||
6362 | * skb_shinfo(skb)->frag_list will have L4 | ||
6363 | * data payload | ||
6364 | */ | ||
6365 | skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu + | ||
6366 | ALIGN_SIZE); | ||
6367 | if (skb_shinfo(*skb)->frag_list == NULL) { | ||
6368 | DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \ | ||
6369 | failed\n ", dev->name); | ||
6370 | sp->mac_control.stats_info->sw_stat. \ | ||
6371 | mem_alloc_fail_cnt++; | ||
6372 | return -ENOMEM ; | ||
6373 | } | ||
6374 | frag_list = skb_shinfo(*skb)->frag_list; | ||
6375 | frag_list->next = NULL; | ||
6376 | sp->mac_control.stats_info->sw_stat.mem_allocated | ||
6377 | += frag_list->truesize; | ||
6378 | /* | ||
6379 | * Buffer-2 receives L4 data payload | ||
6380 | */ | ||
6381 | ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 = | ||
6382 | pci_map_single( sp->pdev, frag_list->data, | ||
6383 | dev->mtu, PCI_DMA_FROMDEVICE); | ||
6384 | } | 6264 | } |
6385 | } | 6265 | } |
6386 | return 0; | 6266 | return 0; |
@@ -6395,10 +6275,6 @@ static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6395 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | 6275 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); |
6396 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); | 6276 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); |
6397 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4); | 6277 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4); |
6398 | } else { | ||
6399 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | ||
6400 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); | ||
6401 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu); | ||
6402 | } | 6278 | } |
6403 | } | 6279 | } |
6404 | 6280 | ||
@@ -6420,8 +6296,6 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp) | |||
6420 | size += NET_IP_ALIGN; | 6296 | size += NET_IP_ALIGN; |
6421 | else if (sp->rxd_mode == RXD_MODE_3B) | 6297 | else if (sp->rxd_mode == RXD_MODE_3B) |
6422 | size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; | 6298 | size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; |
6423 | else | ||
6424 | size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4; | ||
6425 | 6299 | ||
6426 | for (i = 0; i < config->rx_ring_num; i++) { | 6300 | for (i = 0; i < config->rx_ring_num; i++) { |
6427 | blk_cnt = config->rx_cfg[i].num_rxd / | 6301 | blk_cnt = config->rx_cfg[i].num_rxd / |
@@ -6431,7 +6305,7 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp) | |||
6431 | for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { | 6305 | for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { |
6432 | rxdp = mac_control->rings[i]. | 6306 | rxdp = mac_control->rings[i]. |
6433 | rx_blocks[j].rxds[k].virt_addr; | 6307 | rx_blocks[j].rxds[k].virt_addr; |
6434 | if(sp->rxd_mode >= RXD_MODE_3A) | 6308 | if(sp->rxd_mode == RXD_MODE_3B) |
6435 | ba = &mac_control->rings[i].ba[j][k]; | 6309 | ba = &mac_control->rings[i].ba[j][k]; |
6436 | if (set_rxd_buffer_pointer(sp, rxdp, ba, | 6310 | if (set_rxd_buffer_pointer(sp, rxdp, ba, |
6437 | &skb,(u64 *)&temp0_64, | 6311 | &skb,(u64 *)&temp0_64, |
@@ -6914,7 +6788,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
6914 | sp->stats.rx_bytes += len; | 6788 | sp->stats.rx_bytes += len; |
6915 | skb_put(skb, len); | 6789 | skb_put(skb, len); |
6916 | 6790 | ||
6917 | } else if (sp->rxd_mode >= RXD_MODE_3A) { | 6791 | } else if (sp->rxd_mode == RXD_MODE_3B) { |
6918 | int get_block = ring_data->rx_curr_get_info.block_index; | 6792 | int get_block = ring_data->rx_curr_get_info.block_index; |
6919 | int get_off = ring_data->rx_curr_get_info.offset; | 6793 | int get_off = ring_data->rx_curr_get_info.offset; |
6920 | int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); | 6794 | int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); |
@@ -6924,18 +6798,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
6924 | struct buffAdd *ba = &ring_data->ba[get_block][get_off]; | 6798 | struct buffAdd *ba = &ring_data->ba[get_block][get_off]; |
6925 | sp->stats.rx_bytes += buf0_len + buf2_len; | 6799 | sp->stats.rx_bytes += buf0_len + buf2_len; |
6926 | memcpy(buff, ba->ba_0, buf0_len); | 6800 | memcpy(buff, ba->ba_0, buf0_len); |
6927 | 6801 | skb_put(skb, buf2_len); | |
6928 | if (sp->rxd_mode == RXD_MODE_3A) { | ||
6929 | int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2); | ||
6930 | |||
6931 | skb_put(skb, buf1_len); | ||
6932 | skb->len += buf2_len; | ||
6933 | skb->data_len += buf2_len; | ||
6934 | skb_put(skb_shinfo(skb)->frag_list, buf2_len); | ||
6935 | sp->stats.rx_bytes += buf1_len; | ||
6936 | |||
6937 | } else | ||
6938 | skb_put(skb, buf2_len); | ||
6939 | } | 6802 | } |
6940 | 6803 | ||
6941 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || | 6804 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || |
@@ -7145,10 +7008,10 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) | |||
7145 | *dev_intr_type = INTA; | 7008 | *dev_intr_type = INTA; |
7146 | } | 7009 | } |
7147 | 7010 | ||
7148 | if (rx_ring_mode > 3) { | 7011 | if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) { |
7149 | DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); | 7012 | DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); |
7150 | DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); | 7013 | DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n"); |
7151 | rx_ring_mode = 3; | 7014 | rx_ring_mode = 1; |
7152 | } | 7015 | } |
7153 | return SUCCESS; | 7016 | return SUCCESS; |
7154 | } | 7017 | } |
@@ -7288,8 +7151,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7288 | sp->rxd_mode = RXD_MODE_1; | 7151 | sp->rxd_mode = RXD_MODE_1; |
7289 | if (rx_ring_mode == 2) | 7152 | if (rx_ring_mode == 2) |
7290 | sp->rxd_mode = RXD_MODE_3B; | 7153 | sp->rxd_mode = RXD_MODE_3B; |
7291 | if (rx_ring_mode == 3) | ||
7292 | sp->rxd_mode = RXD_MODE_3A; | ||
7293 | 7154 | ||
7294 | sp->intr_type = dev_intr_type; | 7155 | sp->intr_type = dev_intr_type; |
7295 | 7156 | ||
@@ -7565,10 +7426,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7565 | DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", | 7426 | DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", |
7566 | dev->name); | 7427 | dev->name); |
7567 | break; | 7428 | break; |
7568 | case RXD_MODE_3A: | ||
7569 | DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n", | ||
7570 | dev->name); | ||
7571 | break; | ||
7572 | } | 7429 | } |
7573 | 7430 | ||
7574 | if (napi) | 7431 | if (napi) |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 3887fe63a908..ba443f6b0137 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -575,8 +575,7 @@ struct RxD_block { | |||
575 | #define SIZE_OF_BLOCK 4096 | 575 | #define SIZE_OF_BLOCK 4096 |
576 | 576 | ||
577 | #define RXD_MODE_1 0 /* One Buffer mode */ | 577 | #define RXD_MODE_1 0 /* One Buffer mode */ |
578 | #define RXD_MODE_3A 1 /* Three Buffer mode */ | 578 | #define RXD_MODE_3B 1 /* Two Buffer mode */ |
579 | #define RXD_MODE_3B 2 /* Two Buffer mode */ | ||
580 | 579 | ||
581 | /* Structure to hold virtual addresses of Buf0 and Buf1 in | 580 | /* Structure to hold virtual addresses of Buf0 and Buf1 in |
582 | * 2buf mode. */ | 581 | * 2buf mode. */ |