diff options
author | Sreenivasa Honnur <Sreenivasa.Honnur@neterion.com> | 2008-02-20 16:44:07 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2008-03-17 07:49:26 -0400 |
commit | 3a3d5756ac552ee2d2cca6ba0912f0ff328e8357 (patch) | |
tree | cf815d61905778d7fdded30b17b3a17e4406dda2 /drivers/net | |
parent | 19a3da6c6e1e74ecac129a079139aaebb63fe6c8 (diff) |
S2io: Multiqueue network device support implementation
- Resubmit #3
Multiqueue netwrok device support implementation.
- Added a loadable parameter "multiq" to enable/disable multiqueue support,
by default it is disabled.
- skb->queue_mapping is not used for queue/fifo selection. FIFO selection is
based on skb->priority.
- Added per FIFO flags FIFO_QUEUE_START and FIFO_QUEUE_STOP. Check this flag
for starting and stopping netif queue and update the flags accordingly.
- In tx_intr_handler added a check to ensure that we have free TXDs before wak-
ing up the queue.
- Added helper functions for queue manipulation(start/stop/wakeup) to invoke
appropriate netif_ functions.
- Calling netif_start/stop for link up/down case respectively.
- As per Andi kleen's review comments, using skb->priority field for FIFO
selection.
Signed-off-by: Surjit Reang <surjit.reang@neterion.com>
Signed-off-by: Ramkrishna Vepa <ram.vepa@neterion.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/s2io.c | 206 | ||||
-rw-r--r-- | drivers/net/s2io.h | 10 |
2 files changed, 186 insertions, 30 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index c72787adeba3..3ddc0aae60b6 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -50,6 +50,8 @@ | |||
50 | * Possible values '1' for enable , '0' for disable. | 50 | * Possible values '1' for enable , '0' for disable. |
51 | * Default is '2' - which means disable in promisc mode | 51 | * Default is '2' - which means disable in promisc mode |
52 | * and enable in non-promiscuous mode. | 52 | * and enable in non-promiscuous mode. |
53 | * multiq: This parameter used to enable/disable MULTIQUEUE support. | ||
54 | * Possible values '1' for enable and '0' for disable. Default is '0' | ||
53 | ************************************************************************/ | 55 | ************************************************************************/ |
54 | 56 | ||
55 | #include <linux/module.h> | 57 | #include <linux/module.h> |
@@ -458,8 +460,7 @@ MODULE_VERSION(DRV_VERSION); | |||
458 | /* Module Loadable parameters. */ | 460 | /* Module Loadable parameters. */ |
459 | S2IO_PARM_INT(tx_fifo_num, 1); | 461 | S2IO_PARM_INT(tx_fifo_num, 1); |
460 | S2IO_PARM_INT(rx_ring_num, 1); | 462 | S2IO_PARM_INT(rx_ring_num, 1); |
461 | 463 | S2IO_PARM_INT(multiq, 0); | |
462 | |||
463 | S2IO_PARM_INT(rx_ring_mode, 1); | 464 | S2IO_PARM_INT(rx_ring_mode, 1); |
464 | S2IO_PARM_INT(use_continuous_tx_intrs, 1); | 465 | S2IO_PARM_INT(use_continuous_tx_intrs, 1); |
465 | S2IO_PARM_INT(rmac_pause_time, 0x100); | 466 | S2IO_PARM_INT(rmac_pause_time, 0x100); |
@@ -533,6 +534,101 @@ static struct pci_driver s2io_driver = { | |||
533 | /* A simplifier macro used both by init and free shared_mem Fns(). */ | 534 | /* A simplifier macro used both by init and free shared_mem Fns(). */ |
534 | #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each) | 535 | #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each) |
535 | 536 | ||
537 | /* netqueue manipulation helper functions */ | ||
538 | static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) | ||
539 | { | ||
540 | int i; | ||
541 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
542 | if (sp->config.multiq) { | ||
543 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
544 | netif_stop_subqueue(sp->dev, i); | ||
545 | } else | ||
546 | #endif | ||
547 | { | ||
548 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
549 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP; | ||
550 | netif_stop_queue(sp->dev); | ||
551 | } | ||
552 | } | ||
553 | |||
554 | static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no) | ||
555 | { | ||
556 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
557 | if (sp->config.multiq) | ||
558 | netif_stop_subqueue(sp->dev, fifo_no); | ||
559 | else | ||
560 | #endif | ||
561 | { | ||
562 | sp->mac_control.fifos[fifo_no].queue_state = | ||
563 | FIFO_QUEUE_STOP; | ||
564 | netif_stop_queue(sp->dev); | ||
565 | } | ||
566 | } | ||
567 | |||
568 | static inline void s2io_start_all_tx_queue(struct s2io_nic *sp) | ||
569 | { | ||
570 | int i; | ||
571 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
572 | if (sp->config.multiq) { | ||
573 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
574 | netif_start_subqueue(sp->dev, i); | ||
575 | } else | ||
576 | #endif | ||
577 | { | ||
578 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
579 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; | ||
580 | netif_start_queue(sp->dev); | ||
581 | } | ||
582 | } | ||
583 | |||
584 | static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no) | ||
585 | { | ||
586 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
587 | if (sp->config.multiq) | ||
588 | netif_start_subqueue(sp->dev, fifo_no); | ||
589 | else | ||
590 | #endif | ||
591 | { | ||
592 | sp->mac_control.fifos[fifo_no].queue_state = | ||
593 | FIFO_QUEUE_START; | ||
594 | netif_start_queue(sp->dev); | ||
595 | } | ||
596 | } | ||
597 | |||
598 | static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) | ||
599 | { | ||
600 | int i; | ||
601 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
602 | if (sp->config.multiq) { | ||
603 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
604 | netif_wake_subqueue(sp->dev, i); | ||
605 | } else | ||
606 | #endif | ||
607 | { | ||
608 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
609 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; | ||
610 | netif_wake_queue(sp->dev); | ||
611 | } | ||
612 | } | ||
613 | |||
614 | static inline void s2io_wake_tx_queue( | ||
615 | struct fifo_info *fifo, int cnt, u8 multiq) | ||
616 | { | ||
617 | |||
618 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
619 | if (multiq) { | ||
620 | if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no)) | ||
621 | netif_wake_subqueue(fifo->dev, fifo->fifo_no); | ||
622 | } else | ||
623 | #endif | ||
624 | if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) { | ||
625 | if (netif_queue_stopped(fifo->dev)) { | ||
626 | fifo->queue_state = FIFO_QUEUE_START; | ||
627 | netif_wake_queue(fifo->dev); | ||
628 | } | ||
629 | } | ||
630 | } | ||
631 | |||
536 | /** | 632 | /** |
537 | * init_shared_mem - Allocation and Initialization of Memory | 633 | * init_shared_mem - Allocation and Initialization of Memory |
538 | * @nic: Device private variable. | 634 | * @nic: Device private variable. |
@@ -614,6 +710,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
614 | mac_control->fifos[i].fifo_no = i; | 710 | mac_control->fifos[i].fifo_no = i; |
615 | mac_control->fifos[i].nic = nic; | 711 | mac_control->fifos[i].nic = nic; |
616 | mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2; | 712 | mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2; |
713 | mac_control->fifos[i].dev = dev; | ||
617 | 714 | ||
618 | for (j = 0; j < page_num; j++) { | 715 | for (j = 0; j < page_num; j++) { |
619 | int k = 0; | 716 | int k = 0; |
@@ -2972,10 +3069,10 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
2972 | static void tx_intr_handler(struct fifo_info *fifo_data) | 3069 | static void tx_intr_handler(struct fifo_info *fifo_data) |
2973 | { | 3070 | { |
2974 | struct s2io_nic *nic = fifo_data->nic; | 3071 | struct s2io_nic *nic = fifo_data->nic; |
2975 | struct net_device *dev = (struct net_device *) nic->dev; | ||
2976 | struct tx_curr_get_info get_info, put_info; | 3072 | struct tx_curr_get_info get_info, put_info; |
2977 | struct sk_buff *skb; | 3073 | struct sk_buff *skb = NULL; |
2978 | struct TxD *txdlp; | 3074 | struct TxD *txdlp; |
3075 | int pkt_cnt = 0; | ||
2979 | unsigned long flags = 0; | 3076 | unsigned long flags = 0; |
2980 | u8 err_mask; | 3077 | u8 err_mask; |
2981 | 3078 | ||
@@ -3036,6 +3133,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data) | |||
3036 | DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); | 3133 | DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); |
3037 | return; | 3134 | return; |
3038 | } | 3135 | } |
3136 | pkt_cnt++; | ||
3039 | 3137 | ||
3040 | /* Updating the statistics block */ | 3138 | /* Updating the statistics block */ |
3041 | nic->stats.tx_bytes += skb->len; | 3139 | nic->stats.tx_bytes += skb->len; |
@@ -3051,8 +3149,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data) | |||
3051 | get_info.offset; | 3149 | get_info.offset; |
3052 | } | 3150 | } |
3053 | 3151 | ||
3054 | if (netif_queue_stopped(dev)) | 3152 | s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq); |
3055 | netif_wake_queue(dev); | ||
3056 | 3153 | ||
3057 | spin_unlock_irqrestore(&fifo_data->tx_lock, flags); | 3154 | spin_unlock_irqrestore(&fifo_data->tx_lock, flags); |
3058 | } | 3155 | } |
@@ -3933,8 +4030,7 @@ static int s2io_open(struct net_device *dev) | |||
3933 | err = -ENODEV; | 4030 | err = -ENODEV; |
3934 | goto hw_init_failed; | 4031 | goto hw_init_failed; |
3935 | } | 4032 | } |
3936 | 4033 | s2io_start_all_tx_queue(sp); | |
3937 | netif_start_queue(dev); | ||
3938 | return 0; | 4034 | return 0; |
3939 | 4035 | ||
3940 | hw_init_failed: | 4036 | hw_init_failed: |
@@ -3979,8 +4075,7 @@ static int s2io_close(struct net_device *dev) | |||
3979 | if (!is_s2io_card_up(sp)) | 4075 | if (!is_s2io_card_up(sp)) |
3980 | return 0; | 4076 | return 0; |
3981 | 4077 | ||
3982 | netif_stop_queue(dev); | 4078 | s2io_stop_all_tx_queue(sp); |
3983 | |||
3984 | /* delete all populated mac entries */ | 4079 | /* delete all populated mac entries */ |
3985 | for (offset = 1; offset < config->max_mc_addr; offset++) { | 4080 | for (offset = 1; offset < config->max_mc_addr; offset++) { |
3986 | tmp64 = do_s2io_read_unicast_mc(sp, offset); | 4081 | tmp64 = do_s2io_read_unicast_mc(sp, offset); |
@@ -4016,7 +4111,6 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4016 | struct TxFIFO_element __iomem *tx_fifo; | 4111 | struct TxFIFO_element __iomem *tx_fifo; |
4017 | unsigned long flags = 0; | 4112 | unsigned long flags = 0; |
4018 | u16 vlan_tag = 0; | 4113 | u16 vlan_tag = 0; |
4019 | int vlan_priority = 0; | ||
4020 | struct fifo_info *fifo = NULL; | 4114 | struct fifo_info *fifo = NULL; |
4021 | struct mac_info *mac_control; | 4115 | struct mac_info *mac_control; |
4022 | struct config_param *config; | 4116 | struct config_param *config; |
@@ -4043,14 +4137,29 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4043 | 4137 | ||
4044 | queue = 0; | 4138 | queue = 0; |
4045 | /* Get Fifo number to Transmit based on vlan priority */ | 4139 | /* Get Fifo number to Transmit based on vlan priority */ |
4046 | if (sp->vlgrp && vlan_tx_tag_present(skb)) { | 4140 | if (sp->vlgrp && vlan_tx_tag_present(skb)) |
4047 | vlan_tag = vlan_tx_tag_get(skb); | 4141 | vlan_tag = vlan_tx_tag_get(skb); |
4048 | vlan_priority = vlan_tag >> 13; | 4142 | |
4049 | queue = config->fifo_mapping[vlan_priority]; | 4143 | /* get fifo number based on skb->priority value */ |
4050 | } | 4144 | queue = config->fifo_mapping[skb->priority & (MAX_TX_FIFOS - 1)]; |
4051 | 4145 | ||
4052 | fifo = &mac_control->fifos[queue]; | 4146 | fifo = &mac_control->fifos[queue]; |
4053 | spin_lock_irqsave(&fifo->tx_lock, flags); | 4147 | spin_lock_irqsave(&fifo->tx_lock, flags); |
4148 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
4149 | if (sp->config.multiq) { | ||
4150 | if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { | ||
4151 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | ||
4152 | return NETDEV_TX_BUSY; | ||
4153 | } | ||
4154 | } else | ||
4155 | #endif | ||
4156 | if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) { | ||
4157 | if (netif_queue_stopped(dev)) { | ||
4158 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | ||
4159 | return NETDEV_TX_BUSY; | ||
4160 | } | ||
4161 | } | ||
4162 | |||
4054 | put_off = (u16) fifo->tx_curr_put_info.offset; | 4163 | put_off = (u16) fifo->tx_curr_put_info.offset; |
4055 | get_off = (u16) fifo->tx_curr_get_info.offset; | 4164 | get_off = (u16) fifo->tx_curr_get_info.offset; |
4056 | txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr; | 4165 | txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr; |
@@ -4060,7 +4169,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4060 | if (txdp->Host_Control || | 4169 | if (txdp->Host_Control || |
4061 | ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { | 4170 | ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { |
4062 | DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); | 4171 | DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); |
4063 | netif_stop_queue(dev); | 4172 | s2io_stop_tx_queue(sp, fifo->fifo_no); |
4064 | dev_kfree_skb(skb); | 4173 | dev_kfree_skb(skb); |
4065 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | 4174 | spin_unlock_irqrestore(&fifo->tx_lock, flags); |
4066 | return 0; | 4175 | return 0; |
@@ -4080,7 +4189,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4080 | txdp->Control_1 |= TXD_LIST_OWN_XENA; | 4189 | txdp->Control_1 |= TXD_LIST_OWN_XENA; |
4081 | txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no); | 4190 | txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no); |
4082 | 4191 | ||
4083 | if (sp->vlgrp && vlan_tx_tag_present(skb)) { | 4192 | if (vlan_tag) { |
4084 | txdp->Control_2 |= TXD_VLAN_ENABLE; | 4193 | txdp->Control_2 |= TXD_VLAN_ENABLE; |
4085 | txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); | 4194 | txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); |
4086 | } | 4195 | } |
@@ -4166,7 +4275,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4166 | DBG_PRINT(TX_DBG, | 4275 | DBG_PRINT(TX_DBG, |
4167 | "No free TxDs for xmit, Put: 0x%x Get:0x%x\n", | 4276 | "No free TxDs for xmit, Put: 0x%x Get:0x%x\n", |
4168 | put_off, get_off); | 4277 | put_off, get_off); |
4169 | netif_stop_queue(dev); | 4278 | s2io_stop_tx_queue(sp, fifo->fifo_no); |
4170 | } | 4279 | } |
4171 | mac_control->stats_info->sw_stat.mem_allocated += skb->truesize; | 4280 | mac_control->stats_info->sw_stat.mem_allocated += skb->truesize; |
4172 | dev->trans_start = jiffies; | 4281 | dev->trans_start = jiffies; |
@@ -4175,7 +4284,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4175 | return 0; | 4284 | return 0; |
4176 | pci_map_failed: | 4285 | pci_map_failed: |
4177 | stats->pci_map_fail_cnt++; | 4286 | stats->pci_map_fail_cnt++; |
4178 | netif_stop_queue(dev); | 4287 | s2io_stop_tx_queue(sp, fifo->fifo_no); |
4179 | stats->mem_freed += skb->truesize; | 4288 | stats->mem_freed += skb->truesize; |
4180 | dev_kfree_skb(skb); | 4289 | dev_kfree_skb(skb); |
4181 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | 4290 | spin_unlock_irqrestore(&fifo->tx_lock, flags); |
@@ -4587,7 +4696,7 @@ static void s2io_handle_errors(void * dev_id) | |||
4587 | return; | 4696 | return; |
4588 | 4697 | ||
4589 | reset: | 4698 | reset: |
4590 | netif_stop_queue(dev); | 4699 | s2io_stop_all_tx_queue(sp); |
4591 | schedule_work(&sp->rst_timer_task); | 4700 | schedule_work(&sp->rst_timer_task); |
4592 | sw_stat->soft_reset_cnt++; | 4701 | sw_stat->soft_reset_cnt++; |
4593 | return; | 4702 | return; |
@@ -6574,16 +6683,15 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) | |||
6574 | 6683 | ||
6575 | dev->mtu = new_mtu; | 6684 | dev->mtu = new_mtu; |
6576 | if (netif_running(dev)) { | 6685 | if (netif_running(dev)) { |
6686 | s2io_stop_all_tx_queue(sp); | ||
6577 | s2io_card_down(sp); | 6687 | s2io_card_down(sp); |
6578 | netif_stop_queue(dev); | ||
6579 | ret = s2io_card_up(sp); | 6688 | ret = s2io_card_up(sp); |
6580 | if (ret) { | 6689 | if (ret) { |
6581 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 6690 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", |
6582 | __FUNCTION__); | 6691 | __FUNCTION__); |
6583 | return ret; | 6692 | return ret; |
6584 | } | 6693 | } |
6585 | if (netif_queue_stopped(dev)) | 6694 | s2io_wake_all_tx_queue(sp); |
6586 | netif_wake_queue(dev); | ||
6587 | } else { /* Device is down */ | 6695 | } else { /* Device is down */ |
6588 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 6696 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
6589 | u64 val64 = new_mtu; | 6697 | u64 val64 = new_mtu; |
@@ -6691,7 +6799,7 @@ static void s2io_set_link(struct work_struct *work) | |||
6691 | } else { | 6799 | } else { |
6692 | DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name); | 6800 | DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name); |
6693 | DBG_PRINT(ERR_DBG, "device is not Quiescent\n"); | 6801 | DBG_PRINT(ERR_DBG, "device is not Quiescent\n"); |
6694 | netif_stop_queue(dev); | 6802 | s2io_stop_all_tx_queue(nic); |
6695 | } | 6803 | } |
6696 | } | 6804 | } |
6697 | val64 = readq(&bar0->adapter_control); | 6805 | val64 = readq(&bar0->adapter_control); |
@@ -7181,7 +7289,7 @@ static void s2io_restart_nic(struct work_struct *work) | |||
7181 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 7289 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", |
7182 | dev->name); | 7290 | dev->name); |
7183 | } | 7291 | } |
7184 | netif_wake_queue(dev); | 7292 | s2io_wake_all_tx_queue(sp); |
7185 | DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", | 7293 | DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", |
7186 | dev->name); | 7294 | dev->name); |
7187 | out_unlock: | 7295 | out_unlock: |
@@ -7460,6 +7568,7 @@ static void s2io_link(struct s2io_nic * sp, int link) | |||
7460 | init_tti(sp, link); | 7568 | init_tti(sp, link); |
7461 | if (link == LINK_DOWN) { | 7569 | if (link == LINK_DOWN) { |
7462 | DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); | 7570 | DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); |
7571 | s2io_stop_all_tx_queue(sp); | ||
7463 | netif_carrier_off(dev); | 7572 | netif_carrier_off(dev); |
7464 | if(sp->mac_control.stats_info->sw_stat.link_up_cnt) | 7573 | if(sp->mac_control.stats_info->sw_stat.link_up_cnt) |
7465 | sp->mac_control.stats_info->sw_stat.link_up_time = | 7574 | sp->mac_control.stats_info->sw_stat.link_up_time = |
@@ -7472,6 +7581,7 @@ static void s2io_link(struct s2io_nic * sp, int link) | |||
7472 | jiffies - sp->start_time; | 7581 | jiffies - sp->start_time; |
7473 | sp->mac_control.stats_info->sw_stat.link_up_cnt++; | 7582 | sp->mac_control.stats_info->sw_stat.link_up_cnt++; |
7474 | netif_carrier_on(dev); | 7583 | netif_carrier_on(dev); |
7584 | s2io_wake_all_tx_queue(sp); | ||
7475 | } | 7585 | } |
7476 | } | 7586 | } |
7477 | sp->last_link_state = link; | 7587 | sp->last_link_state = link; |
@@ -7508,7 +7618,8 @@ static void s2io_init_pci(struct s2io_nic * sp) | |||
7508 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); | 7618 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); |
7509 | } | 7619 | } |
7510 | 7620 | ||
7511 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) | 7621 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, |
7622 | u8 *dev_multiq) | ||
7512 | { | 7623 | { |
7513 | if ((tx_fifo_num > MAX_TX_FIFOS) || | 7624 | if ((tx_fifo_num > MAX_TX_FIFOS) || |
7514 | (tx_fifo_num < FIFO_DEFAULT_NUM)) { | 7625 | (tx_fifo_num < FIFO_DEFAULT_NUM)) { |
@@ -7522,6 +7633,18 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) | |||
7522 | DBG_PRINT(ERR_DBG, "tx fifos\n"); | 7633 | DBG_PRINT(ERR_DBG, "tx fifos\n"); |
7523 | } | 7634 | } |
7524 | 7635 | ||
7636 | #ifndef CONFIG_NETDEVICES_MULTIQUEUE | ||
7637 | if (multiq) { | ||
7638 | DBG_PRINT(ERR_DBG, "s2io: Multiqueue support not enabled\n"); | ||
7639 | multiq = 0; | ||
7640 | } | ||
7641 | #endif | ||
7642 | /* if multiqueue is enabled configure all fifos */ | ||
7643 | if (multiq) { | ||
7644 | tx_fifo_num = MAX_TX_FIFOS; | ||
7645 | *dev_multiq = multiq; | ||
7646 | } | ||
7647 | |||
7525 | if ( rx_ring_num > 8) { | 7648 | if ( rx_ring_num > 8) { |
7526 | DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " | 7649 | DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " |
7527 | "supported\n"); | 7650 | "supported\n"); |
@@ -7613,9 +7736,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7613 | struct config_param *config; | 7736 | struct config_param *config; |
7614 | int mode; | 7737 | int mode; |
7615 | u8 dev_intr_type = intr_type; | 7738 | u8 dev_intr_type = intr_type; |
7739 | u8 dev_multiq = 0; | ||
7616 | DECLARE_MAC_BUF(mac); | 7740 | DECLARE_MAC_BUF(mac); |
7617 | 7741 | ||
7618 | if ((ret = s2io_verify_parm(pdev, &dev_intr_type))) | 7742 | ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq); |
7743 | if (ret) | ||
7619 | return ret; | 7744 | return ret; |
7620 | 7745 | ||
7621 | if ((ret = pci_enable_device(pdev))) { | 7746 | if ((ret = pci_enable_device(pdev))) { |
@@ -7646,7 +7771,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7646 | pci_disable_device(pdev); | 7771 | pci_disable_device(pdev); |
7647 | return -ENODEV; | 7772 | return -ENODEV; |
7648 | } | 7773 | } |
7649 | 7774 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | |
7775 | if (dev_multiq) | ||
7776 | dev = alloc_etherdev_mq(sizeof(struct s2io_nic), MAX_TX_FIFOS); | ||
7777 | else | ||
7778 | #endif | ||
7650 | dev = alloc_etherdev(sizeof(struct s2io_nic)); | 7779 | dev = alloc_etherdev(sizeof(struct s2io_nic)); |
7651 | if (dev == NULL) { | 7780 | if (dev == NULL) { |
7652 | DBG_PRINT(ERR_DBG, "Device allocation failed\n"); | 7781 | DBG_PRINT(ERR_DBG, "Device allocation failed\n"); |
@@ -7698,6 +7827,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7698 | 7827 | ||
7699 | /* Tx side parameters. */ | 7828 | /* Tx side parameters. */ |
7700 | config->tx_fifo_num = tx_fifo_num; | 7829 | config->tx_fifo_num = tx_fifo_num; |
7830 | config->multiq = dev_multiq; | ||
7701 | for (i = 0; i < MAX_TX_FIFOS; i++) { | 7831 | for (i = 0; i < MAX_TX_FIFOS; i++) { |
7702 | config->tx_cfg[i].fifo_len = tx_fifo_len[i]; | 7832 | config->tx_cfg[i].fifo_len = tx_fifo_len[i]; |
7703 | config->tx_cfg[i].fifo_priority = i; | 7833 | config->tx_cfg[i].fifo_priority = i; |
@@ -7705,7 +7835,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7705 | 7835 | ||
7706 | /* mapping the QoS priority to the configured fifos */ | 7836 | /* mapping the QoS priority to the configured fifos */ |
7707 | for (i = 0; i < MAX_TX_FIFOS; i++) | 7837 | for (i = 0; i < MAX_TX_FIFOS; i++) |
7708 | config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i]; | 7838 | config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i]; |
7709 | 7839 | ||
7710 | config->tx_intr_type = TXD_INT_TYPE_UTILZ; | 7840 | config->tx_intr_type = TXD_INT_TYPE_UTILZ; |
7711 | for (i = 0; i < config->tx_fifo_num; i++) { | 7841 | for (i = 0; i < config->tx_fifo_num; i++) { |
@@ -7810,7 +7940,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7810 | dev->features |= NETIF_F_UFO; | 7940 | dev->features |= NETIF_F_UFO; |
7811 | dev->features |= NETIF_F_HW_CSUM; | 7941 | dev->features |= NETIF_F_HW_CSUM; |
7812 | } | 7942 | } |
7813 | 7943 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | |
7944 | if (config->multiq) | ||
7945 | dev->features |= NETIF_F_MULTI_QUEUE; | ||
7946 | #endif | ||
7814 | dev->tx_timeout = &s2io_tx_watchdog; | 7947 | dev->tx_timeout = &s2io_tx_watchdog; |
7815 | dev->watchdog_timeo = WATCH_DOG_TIMEOUT; | 7948 | dev->watchdog_timeo = WATCH_DOG_TIMEOUT; |
7816 | INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); | 7949 | INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); |
@@ -7959,6 +8092,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7959 | 8092 | ||
7960 | if (napi) | 8093 | if (napi) |
7961 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); | 8094 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); |
8095 | |||
8096 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, | ||
8097 | sp->config.tx_fifo_num); | ||
8098 | |||
7962 | switch(sp->config.intr_type) { | 8099 | switch(sp->config.intr_type) { |
7963 | case INTA: | 8100 | case INTA: |
7964 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); | 8101 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); |
@@ -7967,6 +8104,15 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7967 | DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); | 8104 | DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); |
7968 | break; | 8105 | break; |
7969 | } | 8106 | } |
8107 | if (sp->config.multiq) { | ||
8108 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
8109 | mac_control->fifos[i].multiq = config->multiq; | ||
8110 | DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n", | ||
8111 | dev->name); | ||
8112 | } else | ||
8113 | DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n", | ||
8114 | dev->name); | ||
8115 | |||
7970 | if (sp->lro) | 8116 | if (sp->lro) |
7971 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", | 8117 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", |
7972 | dev->name); | 8118 | dev->name); |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 64b88eb48287..fdc0a94d0a89 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -464,6 +464,7 @@ struct config_param { | |||
464 | int max_mc_addr; /* xena=64 herc=256 */ | 464 | int max_mc_addr; /* xena=64 herc=256 */ |
465 | int max_mac_addr; /* xena=16 herc=64 */ | 465 | int max_mac_addr; /* xena=16 herc=64 */ |
466 | int mc_start_offset; /* xena=16 herc=64 */ | 466 | int mc_start_offset; /* xena=16 herc=64 */ |
467 | u8 multiq; | ||
467 | }; | 468 | }; |
468 | 469 | ||
469 | /* Structure representing MAC Addrs */ | 470 | /* Structure representing MAC Addrs */ |
@@ -720,6 +721,15 @@ struct fifo_info { | |||
720 | * the buffers | 721 | * the buffers |
721 | */ | 722 | */ |
722 | struct tx_curr_get_info tx_curr_get_info; | 723 | struct tx_curr_get_info tx_curr_get_info; |
724 | #define FIFO_QUEUE_START 0 | ||
725 | #define FIFO_QUEUE_STOP 1 | ||
726 | int queue_state; | ||
727 | |||
728 | /* copy of sp->dev pointer */ | ||
729 | struct net_device *dev; | ||
730 | |||
731 | /* copy of multiq status */ | ||
732 | u8 multiq; | ||
723 | 733 | ||
724 | /* Per fifo lock */ | 734 | /* Per fifo lock */ |
725 | spinlock_t tx_lock; | 735 | spinlock_t tx_lock; |