diff options
author | Sreenivasa Honnur <Sreenivasa.Honnur@neterion.com> | 2008-04-28 21:08:45 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-04-29 01:55:30 -0400 |
commit | 0425b46a4beef234c522f183d5c2934edbb0f625 (patch) | |
tree | 429f3eca8dde628cd6d4b9ceca421222445c9adf /drivers/net | |
parent | dfd44151e8888b964b7f2400f26794154a58c86b (diff) |
S2io: Enable multi ring support
- Seperate ring specific data
- Initialize all configured rings with equal priority.
- Updated boundary check for number of Rings.
- Updated per ring statistics of rx_bytes and rx_packets.
- Moved lro struct from struct s2io_nic to struct ring_info.
- Access respective rx ring directly in fill_rx_buffers.
- Moved rx_bufs_left struct s2io_nic to struct ring_info.
- Added per ring variables - rxd_mode, rxd_count, dev, pdev.
Signed-off-by: Surjit Reang <surjit.reang@neterion.com>
Signed-off-by: Sreenivasa Honnur <sreenivasa.honnur@neterion.com>
Signed-off-by: Ramkrishna Vepa <ram.vepa@neterion.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/s2io.c | 335 | ||||
-rw-r--r-- | drivers/net/s2io.h | 82 |
2 files changed, 226 insertions, 191 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 157fd932e951..74cc80cc49b4 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -809,6 +809,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
809 | config->rx_cfg[i].num_rxd - 1; | 809 | config->rx_cfg[i].num_rxd - 1; |
810 | mac_control->rings[i].nic = nic; | 810 | mac_control->rings[i].nic = nic; |
811 | mac_control->rings[i].ring_no = i; | 811 | mac_control->rings[i].ring_no = i; |
812 | mac_control->rings[i].lro = lro_enable; | ||
812 | 813 | ||
813 | blk_cnt = config->rx_cfg[i].num_rxd / | 814 | blk_cnt = config->rx_cfg[i].num_rxd / |
814 | (rxd_count[nic->rxd_mode] + 1); | 815 | (rxd_count[nic->rxd_mode] + 1); |
@@ -1560,113 +1561,112 @@ static int init_nic(struct s2io_nic *nic) | |||
1560 | writeq(val64, &bar0->tx_fifo_partition_0); | 1561 | writeq(val64, &bar0->tx_fifo_partition_0); |
1561 | 1562 | ||
1562 | /* Filling the Rx round robin registers as per the | 1563 | /* Filling the Rx round robin registers as per the |
1563 | * number of Rings and steering based on QoS. | 1564 | * number of Rings and steering based on QoS with |
1564 | */ | 1565 | * equal priority. |
1566 | */ | ||
1565 | switch (config->rx_ring_num) { | 1567 | switch (config->rx_ring_num) { |
1566 | case 1: | 1568 | case 1: |
1569 | val64 = 0x0; | ||
1570 | writeq(val64, &bar0->rx_w_round_robin_0); | ||
1571 | writeq(val64, &bar0->rx_w_round_robin_1); | ||
1572 | writeq(val64, &bar0->rx_w_round_robin_2); | ||
1573 | writeq(val64, &bar0->rx_w_round_robin_3); | ||
1574 | writeq(val64, &bar0->rx_w_round_robin_4); | ||
1575 | |||
1567 | val64 = 0x8080808080808080ULL; | 1576 | val64 = 0x8080808080808080ULL; |
1568 | writeq(val64, &bar0->rts_qos_steering); | 1577 | writeq(val64, &bar0->rts_qos_steering); |
1569 | break; | 1578 | break; |
1570 | case 2: | 1579 | case 2: |
1571 | val64 = 0x0000010000010000ULL; | 1580 | val64 = 0x0001000100010001ULL; |
1572 | writeq(val64, &bar0->rx_w_round_robin_0); | 1581 | writeq(val64, &bar0->rx_w_round_robin_0); |
1573 | val64 = 0x0100000100000100ULL; | ||
1574 | writeq(val64, &bar0->rx_w_round_robin_1); | 1582 | writeq(val64, &bar0->rx_w_round_robin_1); |
1575 | val64 = 0x0001000001000001ULL; | ||
1576 | writeq(val64, &bar0->rx_w_round_robin_2); | 1583 | writeq(val64, &bar0->rx_w_round_robin_2); |
1577 | val64 = 0x0000010000010000ULL; | ||
1578 | writeq(val64, &bar0->rx_w_round_robin_3); | 1584 | writeq(val64, &bar0->rx_w_round_robin_3); |
1579 | val64 = 0x0100000000000000ULL; | 1585 | val64 = 0x0001000100000000ULL; |
1580 | writeq(val64, &bar0->rx_w_round_robin_4); | 1586 | writeq(val64, &bar0->rx_w_round_robin_4); |
1581 | 1587 | ||
1582 | val64 = 0x8080808040404040ULL; | 1588 | val64 = 0x8080808040404040ULL; |
1583 | writeq(val64, &bar0->rts_qos_steering); | 1589 | writeq(val64, &bar0->rts_qos_steering); |
1584 | break; | 1590 | break; |
1585 | case 3: | 1591 | case 3: |
1586 | val64 = 0x0001000102000001ULL; | 1592 | val64 = 0x0001020001020001ULL; |
1587 | writeq(val64, &bar0->rx_w_round_robin_0); | 1593 | writeq(val64, &bar0->rx_w_round_robin_0); |
1588 | val64 = 0x0001020000010001ULL; | 1594 | val64 = 0x0200010200010200ULL; |
1589 | writeq(val64, &bar0->rx_w_round_robin_1); | 1595 | writeq(val64, &bar0->rx_w_round_robin_1); |
1590 | val64 = 0x0200000100010200ULL; | 1596 | val64 = 0x0102000102000102ULL; |
1591 | writeq(val64, &bar0->rx_w_round_robin_2); | 1597 | writeq(val64, &bar0->rx_w_round_robin_2); |
1592 | val64 = 0x0001000102000001ULL; | 1598 | val64 = 0x0001020001020001ULL; |
1593 | writeq(val64, &bar0->rx_w_round_robin_3); | 1599 | writeq(val64, &bar0->rx_w_round_robin_3); |
1594 | val64 = 0x0001020000000000ULL; | 1600 | val64 = 0x0200010200000000ULL; |
1595 | writeq(val64, &bar0->rx_w_round_robin_4); | 1601 | writeq(val64, &bar0->rx_w_round_robin_4); |
1596 | 1602 | ||
1597 | val64 = 0x8080804040402020ULL; | 1603 | val64 = 0x8080804040402020ULL; |
1598 | writeq(val64, &bar0->rts_qos_steering); | 1604 | writeq(val64, &bar0->rts_qos_steering); |
1599 | break; | 1605 | break; |
1600 | case 4: | 1606 | case 4: |
1601 | val64 = 0x0001020300010200ULL; | 1607 | val64 = 0x0001020300010203ULL; |
1602 | writeq(val64, &bar0->rx_w_round_robin_0); | 1608 | writeq(val64, &bar0->rx_w_round_robin_0); |
1603 | val64 = 0x0100000102030001ULL; | ||
1604 | writeq(val64, &bar0->rx_w_round_robin_1); | 1609 | writeq(val64, &bar0->rx_w_round_robin_1); |
1605 | val64 = 0x0200010000010203ULL; | ||
1606 | writeq(val64, &bar0->rx_w_round_robin_2); | 1610 | writeq(val64, &bar0->rx_w_round_robin_2); |
1607 | val64 = 0x0001020001000001ULL; | ||
1608 | writeq(val64, &bar0->rx_w_round_robin_3); | 1611 | writeq(val64, &bar0->rx_w_round_robin_3); |
1609 | val64 = 0x0203000100000000ULL; | 1612 | val64 = 0x0001020300000000ULL; |
1610 | writeq(val64, &bar0->rx_w_round_robin_4); | 1613 | writeq(val64, &bar0->rx_w_round_robin_4); |
1611 | 1614 | ||
1612 | val64 = 0x8080404020201010ULL; | 1615 | val64 = 0x8080404020201010ULL; |
1613 | writeq(val64, &bar0->rts_qos_steering); | 1616 | writeq(val64, &bar0->rts_qos_steering); |
1614 | break; | 1617 | break; |
1615 | case 5: | 1618 | case 5: |
1616 | val64 = 0x0001000203000102ULL; | 1619 | val64 = 0x0001020304000102ULL; |
1617 | writeq(val64, &bar0->rx_w_round_robin_0); | 1620 | writeq(val64, &bar0->rx_w_round_robin_0); |
1618 | val64 = 0x0001020001030004ULL; | 1621 | val64 = 0x0304000102030400ULL; |
1619 | writeq(val64, &bar0->rx_w_round_robin_1); | 1622 | writeq(val64, &bar0->rx_w_round_robin_1); |
1620 | val64 = 0x0001000203000102ULL; | 1623 | val64 = 0x0102030400010203ULL; |
1621 | writeq(val64, &bar0->rx_w_round_robin_2); | 1624 | writeq(val64, &bar0->rx_w_round_robin_2); |
1622 | val64 = 0x0001020001030004ULL; | 1625 | val64 = 0x0400010203040001ULL; |
1623 | writeq(val64, &bar0->rx_w_round_robin_3); | 1626 | writeq(val64, &bar0->rx_w_round_robin_3); |
1624 | val64 = 0x0001000000000000ULL; | 1627 | val64 = 0x0203040000000000ULL; |
1625 | writeq(val64, &bar0->rx_w_round_robin_4); | 1628 | writeq(val64, &bar0->rx_w_round_robin_4); |
1626 | 1629 | ||
1627 | val64 = 0x8080404020201008ULL; | 1630 | val64 = 0x8080404020201008ULL; |
1628 | writeq(val64, &bar0->rts_qos_steering); | 1631 | writeq(val64, &bar0->rts_qos_steering); |
1629 | break; | 1632 | break; |
1630 | case 6: | 1633 | case 6: |
1631 | val64 = 0x0001020304000102ULL; | 1634 | val64 = 0x0001020304050001ULL; |
1632 | writeq(val64, &bar0->rx_w_round_robin_0); | 1635 | writeq(val64, &bar0->rx_w_round_robin_0); |
1633 | val64 = 0x0304050001020001ULL; | 1636 | val64 = 0x0203040500010203ULL; |
1634 | writeq(val64, &bar0->rx_w_round_robin_1); | 1637 | writeq(val64, &bar0->rx_w_round_robin_1); |
1635 | val64 = 0x0203000100000102ULL; | 1638 | val64 = 0x0405000102030405ULL; |
1636 | writeq(val64, &bar0->rx_w_round_robin_2); | 1639 | writeq(val64, &bar0->rx_w_round_robin_2); |
1637 | val64 = 0x0304000102030405ULL; | 1640 | val64 = 0x0001020304050001ULL; |
1638 | writeq(val64, &bar0->rx_w_round_robin_3); | 1641 | writeq(val64, &bar0->rx_w_round_robin_3); |
1639 | val64 = 0x0001000200000000ULL; | 1642 | val64 = 0x0203040500000000ULL; |
1640 | writeq(val64, &bar0->rx_w_round_robin_4); | 1643 | writeq(val64, &bar0->rx_w_round_robin_4); |
1641 | 1644 | ||
1642 | val64 = 0x8080404020100804ULL; | 1645 | val64 = 0x8080404020100804ULL; |
1643 | writeq(val64, &bar0->rts_qos_steering); | 1646 | writeq(val64, &bar0->rts_qos_steering); |
1644 | break; | 1647 | break; |
1645 | case 7: | 1648 | case 7: |
1646 | val64 = 0x0001020001020300ULL; | 1649 | val64 = 0x0001020304050600ULL; |
1647 | writeq(val64, &bar0->rx_w_round_robin_0); | 1650 | writeq(val64, &bar0->rx_w_round_robin_0); |
1648 | val64 = 0x0102030400010203ULL; | 1651 | val64 = 0x0102030405060001ULL; |
1649 | writeq(val64, &bar0->rx_w_round_robin_1); | 1652 | writeq(val64, &bar0->rx_w_round_robin_1); |
1650 | val64 = 0x0405060001020001ULL; | 1653 | val64 = 0x0203040506000102ULL; |
1651 | writeq(val64, &bar0->rx_w_round_robin_2); | 1654 | writeq(val64, &bar0->rx_w_round_robin_2); |
1652 | val64 = 0x0304050000010200ULL; | 1655 | val64 = 0x0304050600010203ULL; |
1653 | writeq(val64, &bar0->rx_w_round_robin_3); | 1656 | writeq(val64, &bar0->rx_w_round_robin_3); |
1654 | val64 = 0x0102030000000000ULL; | 1657 | val64 = 0x0405060000000000ULL; |
1655 | writeq(val64, &bar0->rx_w_round_robin_4); | 1658 | writeq(val64, &bar0->rx_w_round_robin_4); |
1656 | 1659 | ||
1657 | val64 = 0x8080402010080402ULL; | 1660 | val64 = 0x8080402010080402ULL; |
1658 | writeq(val64, &bar0->rts_qos_steering); | 1661 | writeq(val64, &bar0->rts_qos_steering); |
1659 | break; | 1662 | break; |
1660 | case 8: | 1663 | case 8: |
1661 | val64 = 0x0001020300040105ULL; | 1664 | val64 = 0x0001020304050607ULL; |
1662 | writeq(val64, &bar0->rx_w_round_robin_0); | 1665 | writeq(val64, &bar0->rx_w_round_robin_0); |
1663 | val64 = 0x0200030106000204ULL; | ||
1664 | writeq(val64, &bar0->rx_w_round_robin_1); | 1666 | writeq(val64, &bar0->rx_w_round_robin_1); |
1665 | val64 = 0x0103000502010007ULL; | ||
1666 | writeq(val64, &bar0->rx_w_round_robin_2); | 1667 | writeq(val64, &bar0->rx_w_round_robin_2); |
1667 | val64 = 0x0304010002060500ULL; | ||
1668 | writeq(val64, &bar0->rx_w_round_robin_3); | 1668 | writeq(val64, &bar0->rx_w_round_robin_3); |
1669 | val64 = 0x0103020400000000ULL; | 1669 | val64 = 0x0001020300000000ULL; |
1670 | writeq(val64, &bar0->rx_w_round_robin_4); | 1670 | writeq(val64, &bar0->rx_w_round_robin_4); |
1671 | 1671 | ||
1672 | val64 = 0x8040201008040201ULL; | 1672 | val64 = 0x8040201008040201ULL; |
@@ -2499,8 +2499,7 @@ static void stop_nic(struct s2io_nic *nic) | |||
2499 | 2499 | ||
2500 | /** | 2500 | /** |
2501 | * fill_rx_buffers - Allocates the Rx side skbs | 2501 | * fill_rx_buffers - Allocates the Rx side skbs |
2502 | * @nic: device private variable | 2502 | * @ring_info: per ring structure |
2503 | * @ring_no: ring number | ||
2504 | * Description: | 2503 | * Description: |
2505 | * The function allocates Rx side skbs and puts the physical | 2504 | * The function allocates Rx side skbs and puts the physical |
2506 | * address of these buffers into the RxD buffer pointers, so that the NIC | 2505 | * address of these buffers into the RxD buffer pointers, so that the NIC |
@@ -2518,103 +2517,94 @@ static void stop_nic(struct s2io_nic *nic) | |||
2518 | * SUCCESS on success or an appropriate -ve value on failure. | 2517 | * SUCCESS on success or an appropriate -ve value on failure. |
2519 | */ | 2518 | */ |
2520 | 2519 | ||
2521 | static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | 2520 | static int fill_rx_buffers(struct ring_info *ring) |
2522 | { | 2521 | { |
2523 | struct net_device *dev = nic->dev; | ||
2524 | struct sk_buff *skb; | 2522 | struct sk_buff *skb; |
2525 | struct RxD_t *rxdp; | 2523 | struct RxD_t *rxdp; |
2526 | int off, off1, size, block_no, block_no1; | 2524 | int off, size, block_no, block_no1; |
2527 | u32 alloc_tab = 0; | 2525 | u32 alloc_tab = 0; |
2528 | u32 alloc_cnt; | 2526 | u32 alloc_cnt; |
2529 | struct mac_info *mac_control; | ||
2530 | struct config_param *config; | ||
2531 | u64 tmp; | 2527 | u64 tmp; |
2532 | struct buffAdd *ba; | 2528 | struct buffAdd *ba; |
2533 | struct RxD_t *first_rxdp = NULL; | 2529 | struct RxD_t *first_rxdp = NULL; |
2534 | u64 Buffer0_ptr = 0, Buffer1_ptr = 0; | 2530 | u64 Buffer0_ptr = 0, Buffer1_ptr = 0; |
2531 | int rxd_index = 0; | ||
2535 | struct RxD1 *rxdp1; | 2532 | struct RxD1 *rxdp1; |
2536 | struct RxD3 *rxdp3; | 2533 | struct RxD3 *rxdp3; |
2537 | struct swStat *stats = &nic->mac_control.stats_info->sw_stat; | 2534 | struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat; |
2538 | 2535 | ||
2539 | mac_control = &nic->mac_control; | 2536 | alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left; |
2540 | config = &nic->config; | ||
2541 | alloc_cnt = mac_control->rings[ring_no].pkt_cnt - | ||
2542 | atomic_read(&nic->rx_bufs_left[ring_no]); | ||
2543 | 2537 | ||
2544 | block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index; | 2538 | block_no1 = ring->rx_curr_get_info.block_index; |
2545 | off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; | ||
2546 | while (alloc_tab < alloc_cnt) { | 2539 | while (alloc_tab < alloc_cnt) { |
2547 | block_no = mac_control->rings[ring_no].rx_curr_put_info. | 2540 | block_no = ring->rx_curr_put_info.block_index; |
2548 | block_index; | ||
2549 | off = mac_control->rings[ring_no].rx_curr_put_info.offset; | ||
2550 | 2541 | ||
2551 | rxdp = mac_control->rings[ring_no]. | 2542 | off = ring->rx_curr_put_info.offset; |
2552 | rx_blocks[block_no].rxds[off].virt_addr; | 2543 | |
2544 | rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr; | ||
2545 | |||
2546 | rxd_index = off + 1; | ||
2547 | if (block_no) | ||
2548 | rxd_index += (block_no * ring->rxd_count); | ||
2553 | 2549 | ||
2554 | if ((block_no == block_no1) && (off == off1) && | 2550 | if ((block_no == block_no1) && |
2555 | (rxdp->Host_Control)) { | 2551 | (off == ring->rx_curr_get_info.offset) && |
2552 | (rxdp->Host_Control)) { | ||
2556 | DBG_PRINT(INTR_DBG, "%s: Get and Put", | 2553 | DBG_PRINT(INTR_DBG, "%s: Get and Put", |
2557 | dev->name); | 2554 | ring->dev->name); |
2558 | DBG_PRINT(INTR_DBG, " info equated\n"); | 2555 | DBG_PRINT(INTR_DBG, " info equated\n"); |
2559 | goto end; | 2556 | goto end; |
2560 | } | 2557 | } |
2561 | if (off && (off == rxd_count[nic->rxd_mode])) { | 2558 | if (off && (off == ring->rxd_count)) { |
2562 | mac_control->rings[ring_no].rx_curr_put_info. | 2559 | ring->rx_curr_put_info.block_index++; |
2563 | block_index++; | 2560 | if (ring->rx_curr_put_info.block_index == |
2564 | if (mac_control->rings[ring_no].rx_curr_put_info. | 2561 | ring->block_count) |
2565 | block_index == mac_control->rings[ring_no]. | 2562 | ring->rx_curr_put_info.block_index = 0; |
2566 | block_count) | 2563 | block_no = ring->rx_curr_put_info.block_index; |
2567 | mac_control->rings[ring_no].rx_curr_put_info. | 2564 | off = 0; |
2568 | block_index = 0; | 2565 | ring->rx_curr_put_info.offset = off; |
2569 | block_no = mac_control->rings[ring_no]. | 2566 | rxdp = ring->rx_blocks[block_no].block_virt_addr; |
2570 | rx_curr_put_info.block_index; | ||
2571 | if (off == rxd_count[nic->rxd_mode]) | ||
2572 | off = 0; | ||
2573 | mac_control->rings[ring_no].rx_curr_put_info. | ||
2574 | offset = off; | ||
2575 | rxdp = mac_control->rings[ring_no]. | ||
2576 | rx_blocks[block_no].block_virt_addr; | ||
2577 | DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", | 2567 | DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", |
2578 | dev->name, rxdp); | 2568 | ring->dev->name, rxdp); |
2569 | |||
2579 | } | 2570 | } |
2580 | 2571 | ||
2581 | if ((rxdp->Control_1 & RXD_OWN_XENA) && | 2572 | if ((rxdp->Control_1 & RXD_OWN_XENA) && |
2582 | ((nic->rxd_mode == RXD_MODE_3B) && | 2573 | ((ring->rxd_mode == RXD_MODE_3B) && |
2583 | (rxdp->Control_2 & s2BIT(0)))) { | 2574 | (rxdp->Control_2 & s2BIT(0)))) { |
2584 | mac_control->rings[ring_no].rx_curr_put_info. | 2575 | ring->rx_curr_put_info.offset = off; |
2585 | offset = off; | ||
2586 | goto end; | 2576 | goto end; |
2587 | } | 2577 | } |
2588 | /* calculate size of skb based on ring mode */ | 2578 | /* calculate size of skb based on ring mode */ |
2589 | size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + | 2579 | size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE + |
2590 | HEADER_802_2_SIZE + HEADER_SNAP_SIZE; | 2580 | HEADER_802_2_SIZE + HEADER_SNAP_SIZE; |
2591 | if (nic->rxd_mode == RXD_MODE_1) | 2581 | if (ring->rxd_mode == RXD_MODE_1) |
2592 | size += NET_IP_ALIGN; | 2582 | size += NET_IP_ALIGN; |
2593 | else | 2583 | else |
2594 | size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; | 2584 | size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4; |
2595 | 2585 | ||
2596 | /* allocate skb */ | 2586 | /* allocate skb */ |
2597 | skb = dev_alloc_skb(size); | 2587 | skb = dev_alloc_skb(size); |
2598 | if(!skb) { | 2588 | if(!skb) { |
2599 | DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); | 2589 | DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name); |
2600 | DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n"); | 2590 | DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n"); |
2601 | if (first_rxdp) { | 2591 | if (first_rxdp) { |
2602 | wmb(); | 2592 | wmb(); |
2603 | first_rxdp->Control_1 |= RXD_OWN_XENA; | 2593 | first_rxdp->Control_1 |= RXD_OWN_XENA; |
2604 | } | 2594 | } |
2605 | nic->mac_control.stats_info->sw_stat. \ | 2595 | stats->mem_alloc_fail_cnt++; |
2606 | mem_alloc_fail_cnt++; | 2596 | |
2607 | return -ENOMEM ; | 2597 | return -ENOMEM ; |
2608 | } | 2598 | } |
2609 | nic->mac_control.stats_info->sw_stat.mem_allocated | 2599 | stats->mem_allocated += skb->truesize; |
2610 | += skb->truesize; | 2600 | |
2611 | if (nic->rxd_mode == RXD_MODE_1) { | 2601 | if (ring->rxd_mode == RXD_MODE_1) { |
2612 | /* 1 buffer mode - normal operation mode */ | 2602 | /* 1 buffer mode - normal operation mode */ |
2613 | rxdp1 = (struct RxD1*)rxdp; | 2603 | rxdp1 = (struct RxD1*)rxdp; |
2614 | memset(rxdp, 0, sizeof(struct RxD1)); | 2604 | memset(rxdp, 0, sizeof(struct RxD1)); |
2615 | skb_reserve(skb, NET_IP_ALIGN); | 2605 | skb_reserve(skb, NET_IP_ALIGN); |
2616 | rxdp1->Buffer0_ptr = pci_map_single | 2606 | rxdp1->Buffer0_ptr = pci_map_single |
2617 | (nic->pdev, skb->data, size - NET_IP_ALIGN, | 2607 | (ring->pdev, skb->data, size - NET_IP_ALIGN, |
2618 | PCI_DMA_FROMDEVICE); | 2608 | PCI_DMA_FROMDEVICE); |
2619 | if( (rxdp1->Buffer0_ptr == 0) || | 2609 | if( (rxdp1->Buffer0_ptr == 0) || |
2620 | (rxdp1->Buffer0_ptr == | 2610 | (rxdp1->Buffer0_ptr == |
@@ -2623,8 +2613,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2623 | 2613 | ||
2624 | rxdp->Control_2 = | 2614 | rxdp->Control_2 = |
2625 | SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); | 2615 | SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); |
2626 | 2616 | rxdp->Host_Control = (unsigned long) (skb); | |
2627 | } else if (nic->rxd_mode == RXD_MODE_3B) { | 2617 | } else if (ring->rxd_mode == RXD_MODE_3B) { |
2628 | /* | 2618 | /* |
2629 | * 2 buffer mode - | 2619 | * 2 buffer mode - |
2630 | * 2 buffer mode provides 128 | 2620 | * 2 buffer mode provides 128 |
@@ -2640,7 +2630,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2640 | rxdp3->Buffer0_ptr = Buffer0_ptr; | 2630 | rxdp3->Buffer0_ptr = Buffer0_ptr; |
2641 | rxdp3->Buffer1_ptr = Buffer1_ptr; | 2631 | rxdp3->Buffer1_ptr = Buffer1_ptr; |
2642 | 2632 | ||
2643 | ba = &mac_control->rings[ring_no].ba[block_no][off]; | 2633 | ba = &ring->ba[block_no][off]; |
2644 | skb_reserve(skb, BUF0_LEN); | 2634 | skb_reserve(skb, BUF0_LEN); |
2645 | tmp = (u64)(unsigned long) skb->data; | 2635 | tmp = (u64)(unsigned long) skb->data; |
2646 | tmp += ALIGN_SIZE; | 2636 | tmp += ALIGN_SIZE; |
@@ -2650,10 +2640,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2650 | 2640 | ||
2651 | if (!(rxdp3->Buffer0_ptr)) | 2641 | if (!(rxdp3->Buffer0_ptr)) |
2652 | rxdp3->Buffer0_ptr = | 2642 | rxdp3->Buffer0_ptr = |
2653 | pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, | 2643 | pci_map_single(ring->pdev, ba->ba_0, |
2654 | PCI_DMA_FROMDEVICE); | 2644 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
2655 | else | 2645 | else |
2656 | pci_dma_sync_single_for_device(nic->pdev, | 2646 | pci_dma_sync_single_for_device(ring->pdev, |
2657 | (dma_addr_t) rxdp3->Buffer0_ptr, | 2647 | (dma_addr_t) rxdp3->Buffer0_ptr, |
2658 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2648 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
2659 | if( (rxdp3->Buffer0_ptr == 0) || | 2649 | if( (rxdp3->Buffer0_ptr == 0) || |
@@ -2661,7 +2651,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2661 | goto pci_map_failed; | 2651 | goto pci_map_failed; |
2662 | 2652 | ||
2663 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | 2653 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); |
2664 | if (nic->rxd_mode == RXD_MODE_3B) { | 2654 | if (ring->rxd_mode == RXD_MODE_3B) { |
2665 | /* Two buffer mode */ | 2655 | /* Two buffer mode */ |
2666 | 2656 | ||
2667 | /* | 2657 | /* |
@@ -2669,39 +2659,42 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2669 | * L4 payload | 2659 | * L4 payload |
2670 | */ | 2660 | */ |
2671 | rxdp3->Buffer2_ptr = pci_map_single | 2661 | rxdp3->Buffer2_ptr = pci_map_single |
2672 | (nic->pdev, skb->data, dev->mtu + 4, | 2662 | (ring->pdev, skb->data, ring->mtu + 4, |
2673 | PCI_DMA_FROMDEVICE); | 2663 | PCI_DMA_FROMDEVICE); |
2674 | 2664 | ||
2675 | if( (rxdp3->Buffer2_ptr == 0) || | 2665 | if( (rxdp3->Buffer2_ptr == 0) || |
2676 | (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) | 2666 | (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) |
2677 | goto pci_map_failed; | 2667 | goto pci_map_failed; |
2678 | 2668 | ||
2679 | rxdp3->Buffer1_ptr = | 2669 | if (!rxdp3->Buffer1_ptr) |
2680 | pci_map_single(nic->pdev, | 2670 | rxdp3->Buffer1_ptr = |
2671 | pci_map_single(ring->pdev, | ||
2681 | ba->ba_1, BUF1_LEN, | 2672 | ba->ba_1, BUF1_LEN, |
2682 | PCI_DMA_FROMDEVICE); | 2673 | PCI_DMA_FROMDEVICE); |
2674 | |||
2683 | if( (rxdp3->Buffer1_ptr == 0) || | 2675 | if( (rxdp3->Buffer1_ptr == 0) || |
2684 | (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { | 2676 | (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { |
2685 | pci_unmap_single | 2677 | pci_unmap_single |
2686 | (nic->pdev, | 2678 | (ring->pdev, |
2687 | (dma_addr_t)rxdp3->Buffer2_ptr, | 2679 | (dma_addr_t)(unsigned long) |
2688 | dev->mtu + 4, | 2680 | skb->data, |
2681 | ring->mtu + 4, | ||
2689 | PCI_DMA_FROMDEVICE); | 2682 | PCI_DMA_FROMDEVICE); |
2690 | goto pci_map_failed; | 2683 | goto pci_map_failed; |
2691 | } | 2684 | } |
2692 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); | 2685 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); |
2693 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 | 2686 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 |
2694 | (dev->mtu + 4); | 2687 | (ring->mtu + 4); |
2695 | } | 2688 | } |
2696 | rxdp->Control_2 |= s2BIT(0); | 2689 | rxdp->Control_2 |= s2BIT(0); |
2690 | rxdp->Host_Control = (unsigned long) (skb); | ||
2697 | } | 2691 | } |
2698 | rxdp->Host_Control = (unsigned long) (skb); | ||
2699 | if (alloc_tab & ((1 << rxsync_frequency) - 1)) | 2692 | if (alloc_tab & ((1 << rxsync_frequency) - 1)) |
2700 | rxdp->Control_1 |= RXD_OWN_XENA; | 2693 | rxdp->Control_1 |= RXD_OWN_XENA; |
2701 | off++; | 2694 | off++; |
2702 | if (off == (rxd_count[nic->rxd_mode] + 1)) | 2695 | if (off == (ring->rxd_count + 1)) |
2703 | off = 0; | 2696 | off = 0; |
2704 | mac_control->rings[ring_no].rx_curr_put_info.offset = off; | 2697 | ring->rx_curr_put_info.offset = off; |
2705 | 2698 | ||
2706 | rxdp->Control_2 |= SET_RXD_MARKER; | 2699 | rxdp->Control_2 |= SET_RXD_MARKER; |
2707 | if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { | 2700 | if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { |
@@ -2711,7 +2704,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2711 | } | 2704 | } |
2712 | first_rxdp = rxdp; | 2705 | first_rxdp = rxdp; |
2713 | } | 2706 | } |
2714 | atomic_inc(&nic->rx_bufs_left[ring_no]); | 2707 | ring->rx_bufs_left += 1; |
2715 | alloc_tab++; | 2708 | alloc_tab++; |
2716 | } | 2709 | } |
2717 | 2710 | ||
@@ -2783,7 +2776,7 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) | |||
2783 | } | 2776 | } |
2784 | sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; | 2777 | sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; |
2785 | dev_kfree_skb(skb); | 2778 | dev_kfree_skb(skb); |
2786 | atomic_dec(&sp->rx_bufs_left[ring_no]); | 2779 | mac_control->rings[ring_no].rx_bufs_left -= 1; |
2787 | } | 2780 | } |
2788 | } | 2781 | } |
2789 | 2782 | ||
@@ -2814,7 +2807,7 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2814 | mac_control->rings[i].rx_curr_get_info.block_index = 0; | 2807 | mac_control->rings[i].rx_curr_get_info.block_index = 0; |
2815 | mac_control->rings[i].rx_curr_put_info.offset = 0; | 2808 | mac_control->rings[i].rx_curr_put_info.offset = 0; |
2816 | mac_control->rings[i].rx_curr_get_info.offset = 0; | 2809 | mac_control->rings[i].rx_curr_get_info.offset = 0; |
2817 | atomic_set(&sp->rx_bufs_left[i], 0); | 2810 | mac_control->rings[i].rx_bufs_left = 0; |
2818 | DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", | 2811 | DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", |
2819 | dev->name, buf_cnt, i); | 2812 | dev->name, buf_cnt, i); |
2820 | } | 2813 | } |
@@ -2864,7 +2857,7 @@ static int s2io_poll(struct napi_struct *napi, int budget) | |||
2864 | netif_rx_complete(dev, napi); | 2857 | netif_rx_complete(dev, napi); |
2865 | 2858 | ||
2866 | for (i = 0; i < config->rx_ring_num; i++) { | 2859 | for (i = 0; i < config->rx_ring_num; i++) { |
2867 | if (fill_rx_buffers(nic, i) == -ENOMEM) { | 2860 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { |
2868 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2861 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
2869 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); | 2862 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); |
2870 | break; | 2863 | break; |
@@ -2877,7 +2870,7 @@ static int s2io_poll(struct napi_struct *napi, int budget) | |||
2877 | 2870 | ||
2878 | no_rx: | 2871 | no_rx: |
2879 | for (i = 0; i < config->rx_ring_num; i++) { | 2872 | for (i = 0; i < config->rx_ring_num; i++) { |
2880 | if (fill_rx_buffers(nic, i) == -ENOMEM) { | 2873 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { |
2881 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2874 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
2882 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); | 2875 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); |
2883 | break; | 2876 | break; |
@@ -2928,7 +2921,7 @@ static void s2io_netpoll(struct net_device *dev) | |||
2928 | rx_intr_handler(&mac_control->rings[i]); | 2921 | rx_intr_handler(&mac_control->rings[i]); |
2929 | 2922 | ||
2930 | for (i = 0; i < config->rx_ring_num; i++) { | 2923 | for (i = 0; i < config->rx_ring_num; i++) { |
2931 | if (fill_rx_buffers(nic, i) == -ENOMEM) { | 2924 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { |
2932 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2925 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
2933 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); | 2926 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); |
2934 | break; | 2927 | break; |
@@ -2953,8 +2946,6 @@ static void s2io_netpoll(struct net_device *dev) | |||
2953 | */ | 2946 | */ |
2954 | static void rx_intr_handler(struct ring_info *ring_data) | 2947 | static void rx_intr_handler(struct ring_info *ring_data) |
2955 | { | 2948 | { |
2956 | struct s2io_nic *nic = ring_data->nic; | ||
2957 | struct net_device *dev = (struct net_device *) nic->dev; | ||
2958 | int get_block, put_block; | 2949 | int get_block, put_block; |
2959 | struct rx_curr_get_info get_info, put_info; | 2950 | struct rx_curr_get_info get_info, put_info; |
2960 | struct RxD_t *rxdp; | 2951 | struct RxD_t *rxdp; |
@@ -2977,33 +2968,34 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
2977 | */ | 2968 | */ |
2978 | if ((get_block == put_block) && | 2969 | if ((get_block == put_block) && |
2979 | (get_info.offset + 1) == put_info.offset) { | 2970 | (get_info.offset + 1) == put_info.offset) { |
2980 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); | 2971 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n", |
2972 | ring_data->dev->name); | ||
2981 | break; | 2973 | break; |
2982 | } | 2974 | } |
2983 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); | 2975 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); |
2984 | if (skb == NULL) { | 2976 | if (skb == NULL) { |
2985 | DBG_PRINT(ERR_DBG, "%s: The skb is ", | 2977 | DBG_PRINT(ERR_DBG, "%s: The skb is ", |
2986 | dev->name); | 2978 | ring_data->dev->name); |
2987 | DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); | 2979 | DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); |
2988 | return; | 2980 | return; |
2989 | } | 2981 | } |
2990 | if (nic->rxd_mode == RXD_MODE_1) { | 2982 | if (ring_data->rxd_mode == RXD_MODE_1) { |
2991 | rxdp1 = (struct RxD1*)rxdp; | 2983 | rxdp1 = (struct RxD1*)rxdp; |
2992 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2984 | pci_unmap_single(ring_data->pdev, (dma_addr_t) |
2993 | rxdp1->Buffer0_ptr, | 2985 | rxdp1->Buffer0_ptr, |
2994 | dev->mtu + | 2986 | ring_data->mtu + |
2995 | HEADER_ETHERNET_II_802_3_SIZE + | 2987 | HEADER_ETHERNET_II_802_3_SIZE + |
2996 | HEADER_802_2_SIZE + | 2988 | HEADER_802_2_SIZE + |
2997 | HEADER_SNAP_SIZE, | 2989 | HEADER_SNAP_SIZE, |
2998 | PCI_DMA_FROMDEVICE); | 2990 | PCI_DMA_FROMDEVICE); |
2999 | } else if (nic->rxd_mode == RXD_MODE_3B) { | 2991 | } else if (ring_data->rxd_mode == RXD_MODE_3B) { |
3000 | rxdp3 = (struct RxD3*)rxdp; | 2992 | rxdp3 = (struct RxD3*)rxdp; |
3001 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) | 2993 | pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t) |
3002 | rxdp3->Buffer0_ptr, | 2994 | rxdp3->Buffer0_ptr, |
3003 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2995 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
3004 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2996 | pci_unmap_single(ring_data->pdev, (dma_addr_t) |
3005 | rxdp3->Buffer2_ptr, | 2997 | rxdp3->Buffer2_ptr, |
3006 | dev->mtu + 4, | 2998 | ring_data->mtu + 4, |
3007 | PCI_DMA_FROMDEVICE); | 2999 | PCI_DMA_FROMDEVICE); |
3008 | } | 3000 | } |
3009 | prefetch(skb->data); | 3001 | prefetch(skb->data); |
@@ -3012,7 +3004,7 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
3012 | ring_data->rx_curr_get_info.offset = get_info.offset; | 3004 | ring_data->rx_curr_get_info.offset = get_info.offset; |
3013 | rxdp = ring_data->rx_blocks[get_block]. | 3005 | rxdp = ring_data->rx_blocks[get_block]. |
3014 | rxds[get_info.offset].virt_addr; | 3006 | rxds[get_info.offset].virt_addr; |
3015 | if (get_info.offset == rxd_count[nic->rxd_mode]) { | 3007 | if (get_info.offset == rxd_count[ring_data->rxd_mode]) { |
3016 | get_info.offset = 0; | 3008 | get_info.offset = 0; |
3017 | ring_data->rx_curr_get_info.offset = get_info.offset; | 3009 | ring_data->rx_curr_get_info.offset = get_info.offset; |
3018 | get_block++; | 3010 | get_block++; |
@@ -3022,19 +3014,21 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
3022 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; | 3014 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; |
3023 | } | 3015 | } |
3024 | 3016 | ||
3025 | nic->pkts_to_process -= 1; | 3017 | if(ring_data->nic->config.napi){ |
3026 | if ((napi) && (!nic->pkts_to_process)) | 3018 | ring_data->nic->pkts_to_process -= 1; |
3027 | break; | 3019 | if (!ring_data->nic->pkts_to_process) |
3020 | break; | ||
3021 | } | ||
3028 | pkt_cnt++; | 3022 | pkt_cnt++; |
3029 | if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) | 3023 | if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) |
3030 | break; | 3024 | break; |
3031 | } | 3025 | } |
3032 | if (nic->lro) { | 3026 | if (ring_data->lro) { |
3033 | /* Clear all LRO sessions before exiting */ | 3027 | /* Clear all LRO sessions before exiting */ |
3034 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | 3028 | for (i=0; i<MAX_LRO_SESSIONS; i++) { |
3035 | struct lro *lro = &nic->lro0_n[i]; | 3029 | struct lro *lro = &ring_data->lro0_n[i]; |
3036 | if (lro->in_use) { | 3030 | if (lro->in_use) { |
3037 | update_L3L4_header(nic, lro); | 3031 | update_L3L4_header(ring_data->nic, lro); |
3038 | queue_rx_frame(lro->parent, lro->vlan_tag); | 3032 | queue_rx_frame(lro->parent, lro->vlan_tag); |
3039 | clear_lro_session(lro); | 3033 | clear_lro_session(lro); |
3040 | } | 3034 | } |
@@ -4333,10 +4327,10 @@ s2io_alarm_handle(unsigned long data) | |||
4333 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); | 4327 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); |
4334 | } | 4328 | } |
4335 | 4329 | ||
4336 | static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n) | 4330 | static int s2io_chk_rx_buffers(struct ring_info *ring) |
4337 | { | 4331 | { |
4338 | if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { | 4332 | if (fill_rx_buffers(ring) == -ENOMEM) { |
4339 | DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name); | 4333 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); |
4340 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | 4334 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); |
4341 | } | 4335 | } |
4342 | return 0; | 4336 | return 0; |
@@ -4351,7 +4345,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) | |||
4351 | return IRQ_HANDLED; | 4345 | return IRQ_HANDLED; |
4352 | 4346 | ||
4353 | rx_intr_handler(ring); | 4347 | rx_intr_handler(ring); |
4354 | s2io_chk_rx_buffers(sp, ring->ring_no); | 4348 | s2io_chk_rx_buffers(ring); |
4355 | 4349 | ||
4356 | return IRQ_HANDLED; | 4350 | return IRQ_HANDLED; |
4357 | } | 4351 | } |
@@ -4809,7 +4803,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4809 | */ | 4803 | */ |
4810 | if (!config->napi) { | 4804 | if (!config->napi) { |
4811 | for (i = 0; i < config->rx_ring_num; i++) | 4805 | for (i = 0; i < config->rx_ring_num; i++) |
4812 | s2io_chk_rx_buffers(sp, i); | 4806 | s2io_chk_rx_buffers(&mac_control->rings[i]); |
4813 | } | 4807 | } |
4814 | writeq(sp->general_int_mask, &bar0->general_int_mask); | 4808 | writeq(sp->general_int_mask, &bar0->general_int_mask); |
4815 | readl(&bar0->general_int_status); | 4809 | readl(&bar0->general_int_status); |
@@ -4866,6 +4860,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) | |||
4866 | struct s2io_nic *sp = dev->priv; | 4860 | struct s2io_nic *sp = dev->priv; |
4867 | struct mac_info *mac_control; | 4861 | struct mac_info *mac_control; |
4868 | struct config_param *config; | 4862 | struct config_param *config; |
4863 | int i; | ||
4869 | 4864 | ||
4870 | 4865 | ||
4871 | mac_control = &sp->mac_control; | 4866 | mac_control = &sp->mac_control; |
@@ -4885,6 +4880,13 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) | |||
4885 | sp->stats.rx_length_errors = | 4880 | sp->stats.rx_length_errors = |
4886 | le64_to_cpu(mac_control->stats_info->rmac_long_frms); | 4881 | le64_to_cpu(mac_control->stats_info->rmac_long_frms); |
4887 | 4882 | ||
4883 | /* collect per-ring rx_packets and rx_bytes */ | ||
4884 | sp->stats.rx_packets = sp->stats.rx_bytes = 0; | ||
4885 | for (i = 0; i < config->rx_ring_num; i++) { | ||
4886 | sp->stats.rx_packets += mac_control->rings[i].rx_packets; | ||
4887 | sp->stats.rx_bytes += mac_control->rings[i].rx_bytes; | ||
4888 | } | ||
4889 | |||
4888 | return (&sp->stats); | 4890 | return (&sp->stats); |
4889 | } | 4891 | } |
4890 | 4892 | ||
@@ -7157,7 +7159,9 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7157 | config = &sp->config; | 7159 | config = &sp->config; |
7158 | 7160 | ||
7159 | for (i = 0; i < config->rx_ring_num; i++) { | 7161 | for (i = 0; i < config->rx_ring_num; i++) { |
7160 | if ((ret = fill_rx_buffers(sp, i))) { | 7162 | mac_control->rings[i].mtu = dev->mtu; |
7163 | ret = fill_rx_buffers(&mac_control->rings[i]); | ||
7164 | if (ret) { | ||
7161 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", | 7165 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", |
7162 | dev->name); | 7166 | dev->name); |
7163 | s2io_reset(sp); | 7167 | s2io_reset(sp); |
@@ -7165,7 +7169,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7165 | return -ENOMEM; | 7169 | return -ENOMEM; |
7166 | } | 7170 | } |
7167 | DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, | 7171 | DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, |
7168 | atomic_read(&sp->rx_bufs_left[i])); | 7172 | mac_control->rings[i].rx_bufs_left); |
7169 | } | 7173 | } |
7170 | 7174 | ||
7171 | /* Initialise napi */ | 7175 | /* Initialise napi */ |
@@ -7300,7 +7304,7 @@ static void s2io_tx_watchdog(struct net_device *dev) | |||
7300 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | 7304 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) |
7301 | { | 7305 | { |
7302 | struct s2io_nic *sp = ring_data->nic; | 7306 | struct s2io_nic *sp = ring_data->nic; |
7303 | struct net_device *dev = (struct net_device *) sp->dev; | 7307 | struct net_device *dev = (struct net_device *) ring_data->dev; |
7304 | struct sk_buff *skb = (struct sk_buff *) | 7308 | struct sk_buff *skb = (struct sk_buff *) |
7305 | ((unsigned long) rxdp->Host_Control); | 7309 | ((unsigned long) rxdp->Host_Control); |
7306 | int ring_no = ring_data->ring_no; | 7310 | int ring_no = ring_data->ring_no; |
@@ -7377,19 +7381,19 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7377 | sp->mac_control.stats_info->sw_stat.mem_freed | 7381 | sp->mac_control.stats_info->sw_stat.mem_freed |
7378 | += skb->truesize; | 7382 | += skb->truesize; |
7379 | dev_kfree_skb(skb); | 7383 | dev_kfree_skb(skb); |
7380 | atomic_dec(&sp->rx_bufs_left[ring_no]); | 7384 | ring_data->rx_bufs_left -= 1; |
7381 | rxdp->Host_Control = 0; | 7385 | rxdp->Host_Control = 0; |
7382 | return 0; | 7386 | return 0; |
7383 | } | 7387 | } |
7384 | } | 7388 | } |
7385 | 7389 | ||
7386 | /* Updating statistics */ | 7390 | /* Updating statistics */ |
7387 | sp->stats.rx_packets++; | 7391 | ring_data->rx_packets++; |
7388 | rxdp->Host_Control = 0; | 7392 | rxdp->Host_Control = 0; |
7389 | if (sp->rxd_mode == RXD_MODE_1) { | 7393 | if (sp->rxd_mode == RXD_MODE_1) { |
7390 | int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); | 7394 | int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); |
7391 | 7395 | ||
7392 | sp->stats.rx_bytes += len; | 7396 | ring_data->rx_bytes += len; |
7393 | skb_put(skb, len); | 7397 | skb_put(skb, len); |
7394 | 7398 | ||
7395 | } else if (sp->rxd_mode == RXD_MODE_3B) { | 7399 | } else if (sp->rxd_mode == RXD_MODE_3B) { |
@@ -7400,13 +7404,13 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7400 | unsigned char *buff = skb_push(skb, buf0_len); | 7404 | unsigned char *buff = skb_push(skb, buf0_len); |
7401 | 7405 | ||
7402 | struct buffAdd *ba = &ring_data->ba[get_block][get_off]; | 7406 | struct buffAdd *ba = &ring_data->ba[get_block][get_off]; |
7403 | sp->stats.rx_bytes += buf0_len + buf2_len; | 7407 | ring_data->rx_bytes += buf0_len + buf2_len; |
7404 | memcpy(buff, ba->ba_0, buf0_len); | 7408 | memcpy(buff, ba->ba_0, buf0_len); |
7405 | skb_put(skb, buf2_len); | 7409 | skb_put(skb, buf2_len); |
7406 | } | 7410 | } |
7407 | 7411 | ||
7408 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || | 7412 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) || |
7409 | (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && | 7413 | (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && |
7410 | (sp->rx_csum)) { | 7414 | (sp->rx_csum)) { |
7411 | l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); | 7415 | l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); |
7412 | l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); | 7416 | l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); |
@@ -7417,14 +7421,14 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7417 | * a flag in the RxD. | 7421 | * a flag in the RxD. |
7418 | */ | 7422 | */ |
7419 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 7423 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
7420 | if (sp->lro) { | 7424 | if (ring_data->lro) { |
7421 | u32 tcp_len; | 7425 | u32 tcp_len; |
7422 | u8 *tcp; | 7426 | u8 *tcp; |
7423 | int ret = 0; | 7427 | int ret = 0; |
7424 | 7428 | ||
7425 | ret = s2io_club_tcp_session(skb->data, &tcp, | 7429 | ret = s2io_club_tcp_session(ring_data, |
7426 | &tcp_len, &lro, | 7430 | skb->data, &tcp, &tcp_len, &lro, |
7427 | rxdp, sp); | 7431 | rxdp, sp); |
7428 | switch (ret) { | 7432 | switch (ret) { |
7429 | case 3: /* Begin anew */ | 7433 | case 3: /* Begin anew */ |
7430 | lro->parent = skb; | 7434 | lro->parent = skb; |
@@ -7486,7 +7490,7 @@ send_up: | |||
7486 | queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); | 7490 | queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); |
7487 | dev->last_rx = jiffies; | 7491 | dev->last_rx = jiffies; |
7488 | aggregate: | 7492 | aggregate: |
7489 | atomic_dec(&sp->rx_bufs_left[ring_no]); | 7493 | sp->mac_control.rings[ring_no].rx_bufs_left -= 1; |
7490 | return SUCCESS; | 7494 | return SUCCESS; |
7491 | } | 7495 | } |
7492 | 7496 | ||
@@ -7603,12 +7607,14 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, | |||
7603 | tx_steering_type = NO_STEERING; | 7607 | tx_steering_type = NO_STEERING; |
7604 | } | 7608 | } |
7605 | 7609 | ||
7606 | if ( rx_ring_num > 8) { | 7610 | if (rx_ring_num > MAX_RX_RINGS) { |
7607 | DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " | 7611 | DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not " |
7608 | "supported\n"); | 7612 | "supported\n"); |
7609 | DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); | 7613 | DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n", |
7610 | rx_ring_num = 8; | 7614 | MAX_RX_RINGS); |
7615 | rx_ring_num = MAX_RX_RINGS; | ||
7611 | } | 7616 | } |
7617 | |||
7612 | if (*dev_intr_type != INTA) | 7618 | if (*dev_intr_type != INTA) |
7613 | napi = 0; | 7619 | napi = 0; |
7614 | 7620 | ||
@@ -7836,10 +7842,15 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7836 | 7842 | ||
7837 | /* Rx side parameters. */ | 7843 | /* Rx side parameters. */ |
7838 | config->rx_ring_num = rx_ring_num; | 7844 | config->rx_ring_num = rx_ring_num; |
7839 | for (i = 0; i < MAX_RX_RINGS; i++) { | 7845 | for (i = 0; i < config->rx_ring_num; i++) { |
7840 | config->rx_cfg[i].num_rxd = rx_ring_sz[i] * | 7846 | config->rx_cfg[i].num_rxd = rx_ring_sz[i] * |
7841 | (rxd_count[sp->rxd_mode] + 1); | 7847 | (rxd_count[sp->rxd_mode] + 1); |
7842 | config->rx_cfg[i].ring_priority = i; | 7848 | config->rx_cfg[i].ring_priority = i; |
7849 | mac_control->rings[i].rx_bufs_left = 0; | ||
7850 | mac_control->rings[i].rxd_mode = sp->rxd_mode; | ||
7851 | mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode]; | ||
7852 | mac_control->rings[i].pdev = sp->pdev; | ||
7853 | mac_control->rings[i].dev = sp->dev; | ||
7843 | } | 7854 | } |
7844 | 7855 | ||
7845 | for (i = 0; i < rx_ring_num; i++) { | 7856 | for (i = 0; i < rx_ring_num; i++) { |
@@ -7854,10 +7865,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7854 | mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; | 7865 | mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; |
7855 | 7866 | ||
7856 | 7867 | ||
7857 | /* Initialize Ring buffer parameters. */ | ||
7858 | for (i = 0; i < config->rx_ring_num; i++) | ||
7859 | atomic_set(&sp->rx_bufs_left[i], 0); | ||
7860 | |||
7861 | /* initialize the shared memory used by the NIC and the host */ | 7868 | /* initialize the shared memory used by the NIC and the host */ |
7862 | if (init_shared_mem(sp)) { | 7869 | if (init_shared_mem(sp)) { |
7863 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", | 7870 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", |
@@ -8077,6 +8084,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
8077 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, | 8084 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, |
8078 | sp->config.tx_fifo_num); | 8085 | sp->config.tx_fifo_num); |
8079 | 8086 | ||
8087 | DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name, | ||
8088 | sp->config.rx_ring_num); | ||
8089 | |||
8080 | switch(sp->config.intr_type) { | 8090 | switch(sp->config.intr_type) { |
8081 | case INTA: | 8091 | case INTA: |
8082 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); | 8092 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); |
@@ -8391,8 +8401,9 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip, | |||
8391 | } | 8401 | } |
8392 | 8402 | ||
8393 | static int | 8403 | static int |
8394 | s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, | 8404 | s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp, |
8395 | struct RxD_t *rxdp, struct s2io_nic *sp) | 8405 | u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp, |
8406 | struct s2io_nic *sp) | ||
8396 | { | 8407 | { |
8397 | struct iphdr *ip; | 8408 | struct iphdr *ip; |
8398 | struct tcphdr *tcph; | 8409 | struct tcphdr *tcph; |
@@ -8410,7 +8421,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, | |||
8410 | tcph = (struct tcphdr *)*tcp; | 8421 | tcph = (struct tcphdr *)*tcp; |
8411 | *tcp_len = get_l4_pyld_length(ip, tcph); | 8422 | *tcp_len = get_l4_pyld_length(ip, tcph); |
8412 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | 8423 | for (i=0; i<MAX_LRO_SESSIONS; i++) { |
8413 | struct lro *l_lro = &sp->lro0_n[i]; | 8424 | struct lro *l_lro = &ring_data->lro0_n[i]; |
8414 | if (l_lro->in_use) { | 8425 | if (l_lro->in_use) { |
8415 | if (check_for_socket_match(l_lro, ip, tcph)) | 8426 | if (check_for_socket_match(l_lro, ip, tcph)) |
8416 | continue; | 8427 | continue; |
@@ -8448,7 +8459,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, | |||
8448 | } | 8459 | } |
8449 | 8460 | ||
8450 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | 8461 | for (i=0; i<MAX_LRO_SESSIONS; i++) { |
8451 | struct lro *l_lro = &sp->lro0_n[i]; | 8462 | struct lro *l_lro = &ring_data->lro0_n[i]; |
8452 | if (!(l_lro->in_use)) { | 8463 | if (!(l_lro->in_use)) { |
8453 | *lro = l_lro; | 8464 | *lro = l_lro; |
8454 | ret = 3; /* Begin anew */ | 8465 | ret = 3; /* Begin anew */ |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index ce53a02105f2..0709ebae9139 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -678,11 +678,53 @@ struct rx_block_info { | |||
678 | struct rxd_info *rxds; | 678 | struct rxd_info *rxds; |
679 | }; | 679 | }; |
680 | 680 | ||
681 | /* Data structure to represent a LRO session */ | ||
682 | struct lro { | ||
683 | struct sk_buff *parent; | ||
684 | struct sk_buff *last_frag; | ||
685 | u8 *l2h; | ||
686 | struct iphdr *iph; | ||
687 | struct tcphdr *tcph; | ||
688 | u32 tcp_next_seq; | ||
689 | __be32 tcp_ack; | ||
690 | int total_len; | ||
691 | int frags_len; | ||
692 | int sg_num; | ||
693 | int in_use; | ||
694 | __be16 window; | ||
695 | u16 vlan_tag; | ||
696 | u32 cur_tsval; | ||
697 | __be32 cur_tsecr; | ||
698 | u8 saw_ts; | ||
699 | } ____cacheline_aligned; | ||
700 | |||
681 | /* Ring specific structure */ | 701 | /* Ring specific structure */ |
682 | struct ring_info { | 702 | struct ring_info { |
683 | /* The ring number */ | 703 | /* The ring number */ |
684 | int ring_no; | 704 | int ring_no; |
685 | 705 | ||
706 | /* per-ring buffer counter */ | ||
707 | u32 rx_bufs_left; | ||
708 | |||
709 | #define MAX_LRO_SESSIONS 32 | ||
710 | struct lro lro0_n[MAX_LRO_SESSIONS]; | ||
711 | u8 lro; | ||
712 | |||
713 | /* copy of sp->rxd_mode flag */ | ||
714 | int rxd_mode; | ||
715 | |||
716 | /* Number of rxds per block for the rxd_mode */ | ||
717 | int rxd_count; | ||
718 | |||
719 | /* copy of sp pointer */ | ||
720 | struct s2io_nic *nic; | ||
721 | |||
722 | /* copy of sp->dev pointer */ | ||
723 | struct net_device *dev; | ||
724 | |||
725 | /* copy of sp->pdev pointer */ | ||
726 | struct pci_dev *pdev; | ||
727 | |||
686 | /* | 728 | /* |
687 | * Place holders for the virtual and physical addresses of | 729 | * Place holders for the virtual and physical addresses of |
688 | * all the Rx Blocks | 730 | * all the Rx Blocks |
@@ -703,10 +745,16 @@ struct ring_info { | |||
703 | */ | 745 | */ |
704 | struct rx_curr_get_info rx_curr_get_info; | 746 | struct rx_curr_get_info rx_curr_get_info; |
705 | 747 | ||
748 | /* interface MTU value */ | ||
749 | unsigned mtu; | ||
750 | |||
706 | /* Buffer Address store. */ | 751 | /* Buffer Address store. */ |
707 | struct buffAdd **ba; | 752 | struct buffAdd **ba; |
708 | struct s2io_nic *nic; | 753 | |
709 | }; | 754 | /* per-Ring statistics */ |
755 | unsigned long rx_packets; | ||
756 | unsigned long rx_bytes; | ||
757 | } ____cacheline_aligned; | ||
710 | 758 | ||
711 | /* Fifo specific structure */ | 759 | /* Fifo specific structure */ |
712 | struct fifo_info { | 760 | struct fifo_info { |
@@ -813,26 +861,6 @@ struct msix_info_st { | |||
813 | u64 data; | 861 | u64 data; |
814 | }; | 862 | }; |
815 | 863 | ||
816 | /* Data structure to represent a LRO session */ | ||
817 | struct lro { | ||
818 | struct sk_buff *parent; | ||
819 | struct sk_buff *last_frag; | ||
820 | u8 *l2h; | ||
821 | struct iphdr *iph; | ||
822 | struct tcphdr *tcph; | ||
823 | u32 tcp_next_seq; | ||
824 | __be32 tcp_ack; | ||
825 | int total_len; | ||
826 | int frags_len; | ||
827 | int sg_num; | ||
828 | int in_use; | ||
829 | __be16 window; | ||
830 | u16 vlan_tag; | ||
831 | u32 cur_tsval; | ||
832 | __be32 cur_tsecr; | ||
833 | u8 saw_ts; | ||
834 | } ____cacheline_aligned; | ||
835 | |||
836 | /* These flags represent the devices temporary state */ | 864 | /* These flags represent the devices temporary state */ |
837 | enum s2io_device_state_t | 865 | enum s2io_device_state_t |
838 | { | 866 | { |
@@ -872,8 +900,6 @@ struct s2io_nic { | |||
872 | /* Space to back up the PCI config space */ | 900 | /* Space to back up the PCI config space */ |
873 | u32 config_space[256 / sizeof(u32)]; | 901 | u32 config_space[256 / sizeof(u32)]; |
874 | 902 | ||
875 | atomic_t rx_bufs_left[MAX_RX_RINGS]; | ||
876 | |||
877 | #define PROMISC 1 | 903 | #define PROMISC 1 |
878 | #define ALL_MULTI 2 | 904 | #define ALL_MULTI 2 |
879 | 905 | ||
@@ -950,8 +976,6 @@ struct s2io_nic { | |||
950 | #define XFRAME_II_DEVICE 2 | 976 | #define XFRAME_II_DEVICE 2 |
951 | u8 device_type; | 977 | u8 device_type; |
952 | 978 | ||
953 | #define MAX_LRO_SESSIONS 32 | ||
954 | struct lro lro0_n[MAX_LRO_SESSIONS]; | ||
955 | unsigned long clubbed_frms_cnt; | 979 | unsigned long clubbed_frms_cnt; |
956 | unsigned long sending_both; | 980 | unsigned long sending_both; |
957 | u8 lro; | 981 | u8 lro; |
@@ -1118,9 +1142,9 @@ static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr); | |||
1118 | static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset); | 1142 | static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset); |
1119 | static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr); | 1143 | static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr); |
1120 | 1144 | ||
1121 | static int | 1145 | static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, |
1122 | s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, | 1146 | u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp, |
1123 | struct RxD_t *rxdp, struct s2io_nic *sp); | 1147 | struct s2io_nic *sp); |
1124 | static void clear_lro_session(struct lro *lro); | 1148 | static void clear_lro_session(struct lro *lro); |
1125 | static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag); | 1149 | static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag); |
1126 | static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro); | 1150 | static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro); |