diff options
author | Sivakumar Subramani <Sivakumar.Subramani@neterion.com> | 2007-01-31 13:28:08 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-05 16:58:51 -0500 |
commit | db874e65ae93861461f83658fdec08368252cd2e (patch) | |
tree | 4e48b384d51a87f937caf4d945b1593a3abd8fef /drivers | |
parent | 7517c1b78759921daa679f1efba5d5dc0c81930e (diff) |
s2io: Making LRO and UFO as module loadable parameter.
This patch adds two load parameters napi and ufo. Previously NAPI was
compilation option with these changes wan enable disable NAPI using load
parameter. Also we are introducing ufo load parameter to enable/disable
ufo feature
Signed-off-by: Sivakumar Subramani <sivakumar.subramani@neterion.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/s2io.c | 162 | ||||
-rw-r--r-- | drivers/net/s2io.h | 8 |
2 files changed, 77 insertions, 93 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 36937cb1fd66..063040bc62d7 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -401,9 +401,10 @@ S2IO_PARM_INT(lro, 0); | |||
401 | * aggregation happens until we hit max IP pkt size(64K) | 401 | * aggregation happens until we hit max IP pkt size(64K) |
402 | */ | 402 | */ |
403 | S2IO_PARM_INT(lro_max_pkts, 0xFFFF); | 403 | S2IO_PARM_INT(lro_max_pkts, 0xFFFF); |
404 | #ifndef CONFIG_S2IO_NAPI | ||
405 | S2IO_PARM_INT(indicate_max_pkts, 0); | 404 | S2IO_PARM_INT(indicate_max_pkts, 0); |
406 | #endif | 405 | |
406 | S2IO_PARM_INT(napi, 1); | ||
407 | S2IO_PARM_INT(ufo, 0); | ||
407 | 408 | ||
408 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = | 409 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = |
409 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; | 410 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; |
@@ -2274,9 +2275,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2274 | struct config_param *config; | 2275 | struct config_param *config; |
2275 | u64 tmp; | 2276 | u64 tmp; |
2276 | buffAdd_t *ba; | 2277 | buffAdd_t *ba; |
2277 | #ifndef CONFIG_S2IO_NAPI | ||
2278 | unsigned long flags; | 2278 | unsigned long flags; |
2279 | #endif | ||
2280 | RxD_t *first_rxdp = NULL; | 2279 | RxD_t *first_rxdp = NULL; |
2281 | 2280 | ||
2282 | mac_control = &nic->mac_control; | 2281 | mac_control = &nic->mac_control; |
@@ -2320,12 +2319,15 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2320 | DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", | 2319 | DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", |
2321 | dev->name, rxdp); | 2320 | dev->name, rxdp); |
2322 | } | 2321 | } |
2323 | #ifndef CONFIG_S2IO_NAPI | 2322 | if(!napi) { |
2324 | spin_lock_irqsave(&nic->put_lock, flags); | 2323 | spin_lock_irqsave(&nic->put_lock, flags); |
2325 | mac_control->rings[ring_no].put_pos = | 2324 | mac_control->rings[ring_no].put_pos = |
2326 | (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; | 2325 | (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; |
2327 | spin_unlock_irqrestore(&nic->put_lock, flags); | 2326 | spin_unlock_irqrestore(&nic->put_lock, flags); |
2328 | #endif | 2327 | } else { |
2328 | mac_control->rings[ring_no].put_pos = | ||
2329 | (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; | ||
2330 | } | ||
2329 | if ((rxdp->Control_1 & RXD_OWN_XENA) && | 2331 | if ((rxdp->Control_1 & RXD_OWN_XENA) && |
2330 | ((nic->rxd_mode >= RXD_MODE_3A) && | 2332 | ((nic->rxd_mode >= RXD_MODE_3A) && |
2331 | (rxdp->Control_2 & BIT(0)))) { | 2333 | (rxdp->Control_2 & BIT(0)))) { |
@@ -2568,7 +2570,6 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2568 | * 0 on success and 1 if there are No Rx packets to be processed. | 2570 | * 0 on success and 1 if there are No Rx packets to be processed. |
2569 | */ | 2571 | */ |
2570 | 2572 | ||
2571 | #if defined(CONFIG_S2IO_NAPI) | ||
2572 | static int s2io_poll(struct net_device *dev, int *budget) | 2573 | static int s2io_poll(struct net_device *dev, int *budget) |
2573 | { | 2574 | { |
2574 | nic_t *nic = dev->priv; | 2575 | nic_t *nic = dev->priv; |
@@ -2633,7 +2634,6 @@ no_rx: | |||
2633 | atomic_dec(&nic->isr_cnt); | 2634 | atomic_dec(&nic->isr_cnt); |
2634 | return 1; | 2635 | return 1; |
2635 | } | 2636 | } |
2636 | #endif | ||
2637 | 2637 | ||
2638 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2638 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2639 | /** | 2639 | /** |
@@ -2707,9 +2707,7 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
2707 | rx_curr_get_info_t get_info, put_info; | 2707 | rx_curr_get_info_t get_info, put_info; |
2708 | RxD_t *rxdp; | 2708 | RxD_t *rxdp; |
2709 | struct sk_buff *skb; | 2709 | struct sk_buff *skb; |
2710 | #ifndef CONFIG_S2IO_NAPI | ||
2711 | int pkt_cnt = 0; | 2710 | int pkt_cnt = 0; |
2712 | #endif | ||
2713 | int i; | 2711 | int i; |
2714 | 2712 | ||
2715 | spin_lock(&nic->rx_lock); | 2713 | spin_lock(&nic->rx_lock); |
@@ -2725,16 +2723,18 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
2725 | put_info = ring_data->rx_curr_put_info; | 2723 | put_info = ring_data->rx_curr_put_info; |
2726 | put_block = put_info.block_index; | 2724 | put_block = put_info.block_index; |
2727 | rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; | 2725 | rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; |
2728 | #ifndef CONFIG_S2IO_NAPI | 2726 | if (!napi) { |
2729 | spin_lock(&nic->put_lock); | 2727 | spin_lock(&nic->put_lock); |
2730 | put_offset = ring_data->put_pos; | 2728 | put_offset = ring_data->put_pos; |
2731 | spin_unlock(&nic->put_lock); | 2729 | spin_unlock(&nic->put_lock); |
2732 | #else | 2730 | } else |
2733 | put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) + | 2731 | put_offset = ring_data->put_pos; |
2734 | put_info.offset; | 2732 | |
2735 | #endif | ||
2736 | while (RXD_IS_UP2DT(rxdp)) { | 2733 | while (RXD_IS_UP2DT(rxdp)) { |
2737 | /* If your are next to put index then it's FIFO full condition */ | 2734 | /* |
2735 | * If your are next to put index then it's | ||
2736 | * FIFO full condition | ||
2737 | */ | ||
2738 | if ((get_block == put_block) && | 2738 | if ((get_block == put_block) && |
2739 | (get_info.offset + 1) == put_info.offset) { | 2739 | (get_info.offset + 1) == put_info.offset) { |
2740 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); | 2740 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); |
@@ -2792,15 +2792,12 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
2792 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; | 2792 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; |
2793 | } | 2793 | } |
2794 | 2794 | ||
2795 | #ifdef CONFIG_S2IO_NAPI | ||
2796 | nic->pkts_to_process -= 1; | 2795 | nic->pkts_to_process -= 1; |
2797 | if (!nic->pkts_to_process) | 2796 | if ((napi) && (!nic->pkts_to_process)) |
2798 | break; | 2797 | break; |
2799 | #else | ||
2800 | pkt_cnt++; | 2798 | pkt_cnt++; |
2801 | if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) | 2799 | if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) |
2802 | break; | 2800 | break; |
2803 | #endif | ||
2804 | } | 2801 | } |
2805 | if (nic->lro) { | 2802 | if (nic->lro) { |
2806 | /* Clear all LRO sessions before exiting */ | 2803 | /* Clear all LRO sessions before exiting */ |
@@ -4193,26 +4190,26 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4193 | org_mask = readq(&bar0->general_int_mask); | 4190 | org_mask = readq(&bar0->general_int_mask); |
4194 | writeq(val64, &bar0->general_int_mask); | 4191 | writeq(val64, &bar0->general_int_mask); |
4195 | 4192 | ||
4196 | #ifdef CONFIG_S2IO_NAPI | 4193 | if (napi) { |
4197 | if (reason & GEN_INTR_RXTRAFFIC) { | 4194 | if (reason & GEN_INTR_RXTRAFFIC) { |
4198 | if (netif_rx_schedule_prep(dev)) { | 4195 | if (netif_rx_schedule_prep(dev)) { |
4199 | writeq(val64, &bar0->rx_traffic_mask); | 4196 | writeq(val64, &bar0->rx_traffic_mask); |
4200 | __netif_rx_schedule(dev); | 4197 | __netif_rx_schedule(dev); |
4198 | } | ||
4199 | } | ||
4200 | } else { | ||
4201 | /* | ||
4202 | * Rx handler is called by default, without checking for the | ||
4203 | * cause of interrupt. | ||
4204 | * rx_traffic_int reg is an R1 register, writing all 1's | ||
4205 | * will ensure that the actual interrupt causing bit get's | ||
4206 | * cleared and hence a read can be avoided. | ||
4207 | */ | ||
4208 | writeq(val64, &bar0->rx_traffic_int); | ||
4209 | for (i = 0; i < config->rx_ring_num; i++) { | ||
4210 | rx_intr_handler(&mac_control->rings[i]); | ||
4201 | } | 4211 | } |
4202 | } | 4212 | } |
4203 | #else | ||
4204 | /* | ||
4205 | * Rx handler is called by default, without checking for the | ||
4206 | * cause of interrupt. | ||
4207 | * rx_traffic_int reg is an R1 register, writing all 1's | ||
4208 | * will ensure that the actual interrupt causing bit get's | ||
4209 | * cleared and hence a read can be avoided. | ||
4210 | */ | ||
4211 | writeq(val64, &bar0->rx_traffic_int); | ||
4212 | for (i = 0; i < config->rx_ring_num; i++) { | ||
4213 | rx_intr_handler(&mac_control->rings[i]); | ||
4214 | } | ||
4215 | #endif | ||
4216 | 4213 | ||
4217 | /* | 4214 | /* |
4218 | * tx_traffic_int reg is an R1 register, writing all 1's | 4215 | * tx_traffic_int reg is an R1 register, writing all 1's |
@@ -4231,11 +4228,14 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4231 | * reallocate the buffers from the interrupt handler itself, | 4228 | * reallocate the buffers from the interrupt handler itself, |
4232 | * else schedule a tasklet to reallocate the buffers. | 4229 | * else schedule a tasklet to reallocate the buffers. |
4233 | */ | 4230 | */ |
4234 | #ifndef CONFIG_S2IO_NAPI | 4231 | if (!napi) { |
4235 | for (i = 0; i < config->rx_ring_num; i++) | 4232 | for (i = 0; i < config->rx_ring_num; i++) |
4236 | s2io_chk_rx_buffers(sp, i); | 4233 | s2io_chk_rx_buffers(sp, i); |
4237 | #endif | 4234 | } |
4238 | writeq(org_mask, &bar0->general_int_mask); | 4235 | |
4236 | writeq(0, &bar0->general_int_mask); | ||
4237 | readl(&bar0->general_int_status); | ||
4238 | |||
4239 | atomic_dec(&sp->isr_cnt); | 4239 | atomic_dec(&sp->isr_cnt); |
4240 | return IRQ_HANDLED; | 4240 | return IRQ_HANDLED; |
4241 | } | 4241 | } |
@@ -6578,23 +6578,20 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) | |||
6578 | 6578 | ||
6579 | if (!sp->lro) { | 6579 | if (!sp->lro) { |
6580 | skb->protocol = eth_type_trans(skb, dev); | 6580 | skb->protocol = eth_type_trans(skb, dev); |
6581 | #ifdef CONFIG_S2IO_NAPI | ||
6582 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { | 6581 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { |
6583 | /* Queueing the vlan frame to the upper layer */ | 6582 | /* Queueing the vlan frame to the upper layer */ |
6584 | vlan_hwaccel_receive_skb(skb, sp->vlgrp, | 6583 | if (napi) |
6585 | RXD_GET_VLAN_TAG(rxdp->Control_2)); | 6584 | vlan_hwaccel_receive_skb(skb, sp->vlgrp, |
6586 | } else { | 6585 | RXD_GET_VLAN_TAG(rxdp->Control_2)); |
6587 | netif_receive_skb(skb); | 6586 | else |
6588 | } | 6587 | vlan_hwaccel_rx(skb, sp->vlgrp, |
6589 | #else | 6588 | RXD_GET_VLAN_TAG(rxdp->Control_2)); |
6590 | if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { | ||
6591 | /* Queueing the vlan frame to the upper layer */ | ||
6592 | vlan_hwaccel_rx(skb, sp->vlgrp, | ||
6593 | RXD_GET_VLAN_TAG(rxdp->Control_2)); | ||
6594 | } else { | 6589 | } else { |
6595 | netif_rx(skb); | 6590 | if (napi) |
6591 | netif_receive_skb(skb); | ||
6592 | else | ||
6593 | netif_rx(skb); | ||
6596 | } | 6594 | } |
6597 | #endif | ||
6598 | } else { | 6595 | } else { |
6599 | send_up: | 6596 | send_up: |
6600 | queue_rx_frame(skb); | 6597 | queue_rx_frame(skb); |
@@ -6695,13 +6692,9 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) | |||
6695 | DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); | 6692 | DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); |
6696 | rx_ring_num = 8; | 6693 | rx_ring_num = 8; |
6697 | } | 6694 | } |
6698 | #ifdef CONFIG_S2IO_NAPI | 6695 | if (*dev_intr_type != INTA) |
6699 | if (*dev_intr_type != INTA) { | 6696 | napi = 0; |
6700 | DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when " | 6697 | |
6701 | "MSI/MSI-X is enabled. Defaulting to INTA\n"); | ||
6702 | *dev_intr_type = INTA; | ||
6703 | } | ||
6704 | #endif | ||
6705 | #ifndef CONFIG_PCI_MSI | 6698 | #ifndef CONFIG_PCI_MSI |
6706 | if (*dev_intr_type != INTA) { | 6699 | if (*dev_intr_type != INTA) { |
6707 | DBG_PRINT(ERR_DBG, "s2io: This kernel does not support" | 6700 | DBG_PRINT(ERR_DBG, "s2io: This kernel does not support" |
@@ -6962,10 +6955,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
6962 | * will use eth_mac_addr() for dev->set_mac_address | 6955 | * will use eth_mac_addr() for dev->set_mac_address |
6963 | * mac address will be set every time dev->open() is called | 6956 | * mac address will be set every time dev->open() is called |
6964 | */ | 6957 | */ |
6965 | #if defined(CONFIG_S2IO_NAPI) | ||
6966 | dev->poll = s2io_poll; | 6958 | dev->poll = s2io_poll; |
6967 | dev->weight = 32; | 6959 | dev->weight = 32; |
6968 | #endif | ||
6969 | 6960 | ||
6970 | #ifdef CONFIG_NET_POLL_CONTROLLER | 6961 | #ifdef CONFIG_NET_POLL_CONTROLLER |
6971 | dev->poll_controller = s2io_netpoll; | 6962 | dev->poll_controller = s2io_netpoll; |
@@ -6976,7 +6967,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
6976 | dev->features |= NETIF_F_HIGHDMA; | 6967 | dev->features |= NETIF_F_HIGHDMA; |
6977 | dev->features |= NETIF_F_TSO; | 6968 | dev->features |= NETIF_F_TSO; |
6978 | dev->features |= NETIF_F_TSO6; | 6969 | dev->features |= NETIF_F_TSO6; |
6979 | if (sp->device_type & XFRAME_II_DEVICE) { | 6970 | if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) { |
6980 | dev->features |= NETIF_F_UFO; | 6971 | dev->features |= NETIF_F_UFO; |
6981 | dev->features |= NETIF_F_HW_CSUM; | 6972 | dev->features |= NETIF_F_HW_CSUM; |
6982 | } | 6973 | } |
@@ -7057,9 +7048,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7057 | 7048 | ||
7058 | /* Initialize spinlocks */ | 7049 | /* Initialize spinlocks */ |
7059 | spin_lock_init(&sp->tx_lock); | 7050 | spin_lock_init(&sp->tx_lock); |
7060 | #ifndef CONFIG_S2IO_NAPI | 7051 | |
7061 | spin_lock_init(&sp->put_lock); | 7052 | if (!napi) |
7062 | #endif | 7053 | spin_lock_init(&sp->put_lock); |
7063 | spin_lock_init(&sp->rx_lock); | 7054 | spin_lock_init(&sp->rx_lock); |
7064 | 7055 | ||
7065 | /* | 7056 | /* |
@@ -7120,9 +7111,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7120 | dev->name); | 7111 | dev->name); |
7121 | break; | 7112 | break; |
7122 | } | 7113 | } |
7123 | #ifdef CONFIG_S2IO_NAPI | 7114 | |
7124 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); | 7115 | if (napi) |
7125 | #endif | 7116 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); |
7126 | switch(sp->intr_type) { | 7117 | switch(sp->intr_type) { |
7127 | case INTA: | 7118 | case INTA: |
7128 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); | 7119 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); |
@@ -7137,7 +7128,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7137 | if (sp->lro) | 7128 | if (sp->lro) |
7138 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", | 7129 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", |
7139 | dev->name); | 7130 | dev->name); |
7140 | 7131 | if (ufo) | |
7132 | DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)" | ||
7133 | " enabled\n", dev->name); | ||
7141 | /* Initialize device name */ | 7134 | /* Initialize device name */ |
7142 | sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); | 7135 | sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); |
7143 | 7136 | ||
@@ -7539,11 +7532,10 @@ static void queue_rx_frame(struct sk_buff *skb) | |||
7539 | struct net_device *dev = skb->dev; | 7532 | struct net_device *dev = skb->dev; |
7540 | 7533 | ||
7541 | skb->protocol = eth_type_trans(skb, dev); | 7534 | skb->protocol = eth_type_trans(skb, dev); |
7542 | #ifdef CONFIG_S2IO_NAPI | 7535 | if (napi) |
7543 | netif_receive_skb(skb); | 7536 | netif_receive_skb(skb); |
7544 | #else | 7537 | else |
7545 | netif_rx(skb); | 7538 | netif_rx(skb); |
7546 | #endif | ||
7547 | } | 7539 | } |
7548 | 7540 | ||
7549 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, | 7541 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 3b0bafd273c8..577fa3ad214e 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -616,10 +616,8 @@ typedef struct ring_info { | |||
616 | */ | 616 | */ |
617 | rx_curr_get_info_t rx_curr_get_info; | 617 | rx_curr_get_info_t rx_curr_get_info; |
618 | 618 | ||
619 | #ifndef CONFIG_S2IO_NAPI | ||
620 | /* Index to the absolute position of the put pointer of Rx ring */ | 619 | /* Index to the absolute position of the put pointer of Rx ring */ |
621 | int put_pos; | 620 | int put_pos; |
622 | #endif | ||
623 | 621 | ||
624 | /* Buffer Address store. */ | 622 | /* Buffer Address store. */ |
625 | buffAdd_t **ba; | 623 | buffAdd_t **ba; |
@@ -738,13 +736,11 @@ typedef struct lro { | |||
738 | /* Structure representing one instance of the NIC */ | 736 | /* Structure representing one instance of the NIC */ |
739 | struct s2io_nic { | 737 | struct s2io_nic { |
740 | int rxd_mode; | 738 | int rxd_mode; |
741 | #ifdef CONFIG_S2IO_NAPI | ||
742 | /* | 739 | /* |
743 | * Count of packets to be processed in a given iteration, it will be indicated | 740 | * Count of packets to be processed in a given iteration, it will be indicated |
744 | * by the quota field of the device structure when NAPI is enabled. | 741 | * by the quota field of the device structure when NAPI is enabled. |
745 | */ | 742 | */ |
746 | int pkts_to_process; | 743 | int pkts_to_process; |
747 | #endif | ||
748 | struct net_device *dev; | 744 | struct net_device *dev; |
749 | mac_info_t mac_control; | 745 | mac_info_t mac_control; |
750 | struct config_param config; | 746 | struct config_param config; |
@@ -775,9 +771,7 @@ struct s2io_nic { | |||
775 | atomic_t rx_bufs_left[MAX_RX_RINGS]; | 771 | atomic_t rx_bufs_left[MAX_RX_RINGS]; |
776 | 772 | ||
777 | spinlock_t tx_lock; | 773 | spinlock_t tx_lock; |
778 | #ifndef CONFIG_S2IO_NAPI | ||
779 | spinlock_t put_lock; | 774 | spinlock_t put_lock; |
780 | #endif | ||
781 | 775 | ||
782 | #define PROMISC 1 | 776 | #define PROMISC 1 |
783 | #define ALL_MULTI 2 | 777 | #define ALL_MULTI 2 |
@@ -985,9 +979,7 @@ static void s2io_tasklet(unsigned long dev_addr); | |||
985 | static void s2io_set_multicast(struct net_device *dev); | 979 | static void s2io_set_multicast(struct net_device *dev); |
986 | static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp); | 980 | static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp); |
987 | static void s2io_link(nic_t * sp, int link); | 981 | static void s2io_link(nic_t * sp, int link); |
988 | #if defined(CONFIG_S2IO_NAPI) | ||
989 | static int s2io_poll(struct net_device *dev, int *budget); | 982 | static int s2io_poll(struct net_device *dev, int *budget); |
990 | #endif | ||
991 | static void s2io_init_pci(nic_t * sp); | 983 | static void s2io_init_pci(nic_t * sp); |
992 | static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); | 984 | static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); |
993 | static void s2io_alarm_handle(unsigned long data); | 985 | static void s2io_alarm_handle(unsigned long data); |