diff options
Diffstat (limited to 'drivers/net/s2io.c')
-rw-r--r-- | drivers/net/s2io.c | 444 |
1 files changed, 354 insertions, 90 deletions
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index c72787adeba3..6d8e5c4cf858 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -50,6 +50,8 @@ | |||
50 | * Possible values '1' for enable , '0' for disable. | 50 | * Possible values '1' for enable , '0' for disable. |
51 | * Default is '2' - which means disable in promisc mode | 51 | * Default is '2' - which means disable in promisc mode |
52 | * and enable in non-promiscuous mode. | 52 | * and enable in non-promiscuous mode. |
53 | * multiq: This parameter used to enable/disable MULTIQUEUE support. | ||
54 | * Possible values '1' for enable and '0' for disable. Default is '0' | ||
53 | ************************************************************************/ | 55 | ************************************************************************/ |
54 | 56 | ||
55 | #include <linux/module.h> | 57 | #include <linux/module.h> |
@@ -84,7 +86,7 @@ | |||
84 | #include "s2io.h" | 86 | #include "s2io.h" |
85 | #include "s2io-regs.h" | 87 | #include "s2io-regs.h" |
86 | 88 | ||
87 | #define DRV_VERSION "2.0.26.15-2" | 89 | #define DRV_VERSION "2.0.26.19" |
88 | 90 | ||
89 | /* S2io Driver name & version. */ | 91 | /* S2io Driver name & version. */ |
90 | static char s2io_driver_name[] = "Neterion"; | 92 | static char s2io_driver_name[] = "Neterion"; |
@@ -386,6 +388,26 @@ static void s2io_vlan_rx_register(struct net_device *dev, | |||
386 | /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */ | 388 | /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */ |
387 | static int vlan_strip_flag; | 389 | static int vlan_strip_flag; |
388 | 390 | ||
391 | /* Unregister the vlan */ | ||
392 | static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) | ||
393 | { | ||
394 | int i; | ||
395 | struct s2io_nic *nic = dev->priv; | ||
396 | unsigned long flags[MAX_TX_FIFOS]; | ||
397 | struct mac_info *mac_control = &nic->mac_control; | ||
398 | struct config_param *config = &nic->config; | ||
399 | |||
400 | for (i = 0; i < config->tx_fifo_num; i++) | ||
401 | spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]); | ||
402 | |||
403 | if (nic->vlgrp) | ||
404 | vlan_group_set_device(nic->vlgrp, vid, NULL); | ||
405 | |||
406 | for (i = config->tx_fifo_num - 1; i >= 0; i--) | ||
407 | spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, | ||
408 | flags[i]); | ||
409 | } | ||
410 | |||
389 | /* | 411 | /* |
390 | * Constants to be programmed into the Xena's registers, to configure | 412 | * Constants to be programmed into the Xena's registers, to configure |
391 | * the XAUI. | 413 | * the XAUI. |
@@ -456,10 +478,9 @@ MODULE_VERSION(DRV_VERSION); | |||
456 | 478 | ||
457 | 479 | ||
458 | /* Module Loadable parameters. */ | 480 | /* Module Loadable parameters. */ |
459 | S2IO_PARM_INT(tx_fifo_num, 1); | 481 | S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM); |
460 | S2IO_PARM_INT(rx_ring_num, 1); | 482 | S2IO_PARM_INT(rx_ring_num, 1); |
461 | 483 | S2IO_PARM_INT(multiq, 0); | |
462 | |||
463 | S2IO_PARM_INT(rx_ring_mode, 1); | 484 | S2IO_PARM_INT(rx_ring_mode, 1); |
464 | S2IO_PARM_INT(use_continuous_tx_intrs, 1); | 485 | S2IO_PARM_INT(use_continuous_tx_intrs, 1); |
465 | S2IO_PARM_INT(rmac_pause_time, 0x100); | 486 | S2IO_PARM_INT(rmac_pause_time, 0x100); |
@@ -469,6 +490,8 @@ S2IO_PARM_INT(shared_splits, 0); | |||
469 | S2IO_PARM_INT(tmac_util_period, 5); | 490 | S2IO_PARM_INT(tmac_util_period, 5); |
470 | S2IO_PARM_INT(rmac_util_period, 5); | 491 | S2IO_PARM_INT(rmac_util_period, 5); |
471 | S2IO_PARM_INT(l3l4hdr_size, 128); | 492 | S2IO_PARM_INT(l3l4hdr_size, 128); |
493 | /* 0 is no steering, 1 is Priority steering, 2 is Default steering */ | ||
494 | S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING); | ||
472 | /* Frequency of Rx desc syncs expressed as power of 2 */ | 495 | /* Frequency of Rx desc syncs expressed as power of 2 */ |
473 | S2IO_PARM_INT(rxsync_frequency, 3); | 496 | S2IO_PARM_INT(rxsync_frequency, 3); |
474 | /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ | 497 | /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ |
@@ -533,6 +556,101 @@ static struct pci_driver s2io_driver = { | |||
533 | /* A simplifier macro used both by init and free shared_mem Fns(). */ | 556 | /* A simplifier macro used both by init and free shared_mem Fns(). */ |
534 | #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each) | 557 | #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each) |
535 | 558 | ||
559 | /* netqueue manipulation helper functions */ | ||
560 | static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) | ||
561 | { | ||
562 | int i; | ||
563 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
564 | if (sp->config.multiq) { | ||
565 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
566 | netif_stop_subqueue(sp->dev, i); | ||
567 | } else | ||
568 | #endif | ||
569 | { | ||
570 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
571 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP; | ||
572 | netif_stop_queue(sp->dev); | ||
573 | } | ||
574 | } | ||
575 | |||
576 | static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no) | ||
577 | { | ||
578 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
579 | if (sp->config.multiq) | ||
580 | netif_stop_subqueue(sp->dev, fifo_no); | ||
581 | else | ||
582 | #endif | ||
583 | { | ||
584 | sp->mac_control.fifos[fifo_no].queue_state = | ||
585 | FIFO_QUEUE_STOP; | ||
586 | netif_stop_queue(sp->dev); | ||
587 | } | ||
588 | } | ||
589 | |||
590 | static inline void s2io_start_all_tx_queue(struct s2io_nic *sp) | ||
591 | { | ||
592 | int i; | ||
593 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
594 | if (sp->config.multiq) { | ||
595 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
596 | netif_start_subqueue(sp->dev, i); | ||
597 | } else | ||
598 | #endif | ||
599 | { | ||
600 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
601 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; | ||
602 | netif_start_queue(sp->dev); | ||
603 | } | ||
604 | } | ||
605 | |||
606 | static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no) | ||
607 | { | ||
608 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
609 | if (sp->config.multiq) | ||
610 | netif_start_subqueue(sp->dev, fifo_no); | ||
611 | else | ||
612 | #endif | ||
613 | { | ||
614 | sp->mac_control.fifos[fifo_no].queue_state = | ||
615 | FIFO_QUEUE_START; | ||
616 | netif_start_queue(sp->dev); | ||
617 | } | ||
618 | } | ||
619 | |||
620 | static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) | ||
621 | { | ||
622 | int i; | ||
623 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
624 | if (sp->config.multiq) { | ||
625 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
626 | netif_wake_subqueue(sp->dev, i); | ||
627 | } else | ||
628 | #endif | ||
629 | { | ||
630 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
631 | sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; | ||
632 | netif_wake_queue(sp->dev); | ||
633 | } | ||
634 | } | ||
635 | |||
636 | static inline void s2io_wake_tx_queue( | ||
637 | struct fifo_info *fifo, int cnt, u8 multiq) | ||
638 | { | ||
639 | |||
640 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
641 | if (multiq) { | ||
642 | if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no)) | ||
643 | netif_wake_subqueue(fifo->dev, fifo->fifo_no); | ||
644 | } else | ||
645 | #endif | ||
646 | if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) { | ||
647 | if (netif_queue_stopped(fifo->dev)) { | ||
648 | fifo->queue_state = FIFO_QUEUE_START; | ||
649 | netif_wake_queue(fifo->dev); | ||
650 | } | ||
651 | } | ||
652 | } | ||
653 | |||
536 | /** | 654 | /** |
537 | * init_shared_mem - Allocation and Initialization of Memory | 655 | * init_shared_mem - Allocation and Initialization of Memory |
538 | * @nic: Device private variable. | 656 | * @nic: Device private variable. |
@@ -614,6 +732,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
614 | mac_control->fifos[i].fifo_no = i; | 732 | mac_control->fifos[i].fifo_no = i; |
615 | mac_control->fifos[i].nic = nic; | 733 | mac_control->fifos[i].nic = nic; |
616 | mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2; | 734 | mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2; |
735 | mac_control->fifos[i].dev = dev; | ||
617 | 736 | ||
618 | for (j = 0; j < page_num; j++) { | 737 | for (j = 0; j < page_num; j++) { |
619 | int k = 0; | 738 | int k = 0; |
@@ -2948,7 +3067,7 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
2948 | struct lro *lro = &nic->lro0_n[i]; | 3067 | struct lro *lro = &nic->lro0_n[i]; |
2949 | if (lro->in_use) { | 3068 | if (lro->in_use) { |
2950 | update_L3L4_header(nic, lro); | 3069 | update_L3L4_header(nic, lro); |
2951 | queue_rx_frame(lro->parent); | 3070 | queue_rx_frame(lro->parent, lro->vlan_tag); |
2952 | clear_lro_session(lro); | 3071 | clear_lro_session(lro); |
2953 | } | 3072 | } |
2954 | } | 3073 | } |
@@ -2972,10 +3091,10 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
2972 | static void tx_intr_handler(struct fifo_info *fifo_data) | 3091 | static void tx_intr_handler(struct fifo_info *fifo_data) |
2973 | { | 3092 | { |
2974 | struct s2io_nic *nic = fifo_data->nic; | 3093 | struct s2io_nic *nic = fifo_data->nic; |
2975 | struct net_device *dev = (struct net_device *) nic->dev; | ||
2976 | struct tx_curr_get_info get_info, put_info; | 3094 | struct tx_curr_get_info get_info, put_info; |
2977 | struct sk_buff *skb; | 3095 | struct sk_buff *skb = NULL; |
2978 | struct TxD *txdlp; | 3096 | struct TxD *txdlp; |
3097 | int pkt_cnt = 0; | ||
2979 | unsigned long flags = 0; | 3098 | unsigned long flags = 0; |
2980 | u8 err_mask; | 3099 | u8 err_mask; |
2981 | 3100 | ||
@@ -3036,6 +3155,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data) | |||
3036 | DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); | 3155 | DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); |
3037 | return; | 3156 | return; |
3038 | } | 3157 | } |
3158 | pkt_cnt++; | ||
3039 | 3159 | ||
3040 | /* Updating the statistics block */ | 3160 | /* Updating the statistics block */ |
3041 | nic->stats.tx_bytes += skb->len; | 3161 | nic->stats.tx_bytes += skb->len; |
@@ -3051,8 +3171,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data) | |||
3051 | get_info.offset; | 3171 | get_info.offset; |
3052 | } | 3172 | } |
3053 | 3173 | ||
3054 | if (netif_queue_stopped(dev)) | 3174 | s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq); |
3055 | netif_wake_queue(dev); | ||
3056 | 3175 | ||
3057 | spin_unlock_irqrestore(&fifo_data->tx_lock, flags); | 3176 | spin_unlock_irqrestore(&fifo_data->tx_lock, flags); |
3058 | } | 3177 | } |
@@ -3933,8 +4052,7 @@ static int s2io_open(struct net_device *dev) | |||
3933 | err = -ENODEV; | 4052 | err = -ENODEV; |
3934 | goto hw_init_failed; | 4053 | goto hw_init_failed; |
3935 | } | 4054 | } |
3936 | 4055 | s2io_start_all_tx_queue(sp); | |
3937 | netif_start_queue(dev); | ||
3938 | return 0; | 4056 | return 0; |
3939 | 4057 | ||
3940 | hw_init_failed: | 4058 | hw_init_failed: |
@@ -3979,8 +4097,7 @@ static int s2io_close(struct net_device *dev) | |||
3979 | if (!is_s2io_card_up(sp)) | 4097 | if (!is_s2io_card_up(sp)) |
3980 | return 0; | 4098 | return 0; |
3981 | 4099 | ||
3982 | netif_stop_queue(dev); | 4100 | s2io_stop_all_tx_queue(sp); |
3983 | |||
3984 | /* delete all populated mac entries */ | 4101 | /* delete all populated mac entries */ |
3985 | for (offset = 1; offset < config->max_mc_addr; offset++) { | 4102 | for (offset = 1; offset < config->max_mc_addr; offset++) { |
3986 | tmp64 = do_s2io_read_unicast_mc(sp, offset); | 4103 | tmp64 = do_s2io_read_unicast_mc(sp, offset); |
@@ -4016,11 +4133,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4016 | struct TxFIFO_element __iomem *tx_fifo; | 4133 | struct TxFIFO_element __iomem *tx_fifo; |
4017 | unsigned long flags = 0; | 4134 | unsigned long flags = 0; |
4018 | u16 vlan_tag = 0; | 4135 | u16 vlan_tag = 0; |
4019 | int vlan_priority = 0; | ||
4020 | struct fifo_info *fifo = NULL; | 4136 | struct fifo_info *fifo = NULL; |
4021 | struct mac_info *mac_control; | 4137 | struct mac_info *mac_control; |
4022 | struct config_param *config; | 4138 | struct config_param *config; |
4139 | int do_spin_lock = 1; | ||
4023 | int offload_type; | 4140 | int offload_type; |
4141 | int enable_per_list_interrupt = 0; | ||
4024 | struct swStat *stats = &sp->mac_control.stats_info->sw_stat; | 4142 | struct swStat *stats = &sp->mac_control.stats_info->sw_stat; |
4025 | 4143 | ||
4026 | mac_control = &sp->mac_control; | 4144 | mac_control = &sp->mac_control; |
@@ -4042,15 +4160,67 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4042 | } | 4160 | } |
4043 | 4161 | ||
4044 | queue = 0; | 4162 | queue = 0; |
4045 | /* Get Fifo number to Transmit based on vlan priority */ | 4163 | if (sp->vlgrp && vlan_tx_tag_present(skb)) |
4046 | if (sp->vlgrp && vlan_tx_tag_present(skb)) { | ||
4047 | vlan_tag = vlan_tx_tag_get(skb); | 4164 | vlan_tag = vlan_tx_tag_get(skb); |
4048 | vlan_priority = vlan_tag >> 13; | 4165 | if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) { |
4049 | queue = config->fifo_mapping[vlan_priority]; | 4166 | if (skb->protocol == htons(ETH_P_IP)) { |
4167 | struct iphdr *ip; | ||
4168 | struct tcphdr *th; | ||
4169 | ip = ip_hdr(skb); | ||
4170 | |||
4171 | if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) { | ||
4172 | th = (struct tcphdr *)(((unsigned char *)ip) + | ||
4173 | ip->ihl*4); | ||
4174 | |||
4175 | if (ip->protocol == IPPROTO_TCP) { | ||
4176 | queue_len = sp->total_tcp_fifos; | ||
4177 | queue = (ntohs(th->source) + | ||
4178 | ntohs(th->dest)) & | ||
4179 | sp->fifo_selector[queue_len - 1]; | ||
4180 | if (queue >= queue_len) | ||
4181 | queue = queue_len - 1; | ||
4182 | } else if (ip->protocol == IPPROTO_UDP) { | ||
4183 | queue_len = sp->total_udp_fifos; | ||
4184 | queue = (ntohs(th->source) + | ||
4185 | ntohs(th->dest)) & | ||
4186 | sp->fifo_selector[queue_len - 1]; | ||
4187 | if (queue >= queue_len) | ||
4188 | queue = queue_len - 1; | ||
4189 | queue += sp->udp_fifo_idx; | ||
4190 | if (skb->len > 1024) | ||
4191 | enable_per_list_interrupt = 1; | ||
4192 | do_spin_lock = 0; | ||
4193 | } | ||
4194 | } | ||
4195 | } | ||
4196 | } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING) | ||
4197 | /* get fifo number based on skb->priority value */ | ||
4198 | queue = config->fifo_mapping | ||
4199 | [skb->priority & (MAX_TX_FIFOS - 1)]; | ||
4200 | fifo = &mac_control->fifos[queue]; | ||
4201 | |||
4202 | if (do_spin_lock) | ||
4203 | spin_lock_irqsave(&fifo->tx_lock, flags); | ||
4204 | else { | ||
4205 | if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) | ||
4206 | return NETDEV_TX_LOCKED; | ||
4207 | } | ||
4208 | |||
4209 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
4210 | if (sp->config.multiq) { | ||
4211 | if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { | ||
4212 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | ||
4213 | return NETDEV_TX_BUSY; | ||
4214 | } | ||
4215 | } else | ||
4216 | #endif | ||
4217 | if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) { | ||
4218 | if (netif_queue_stopped(dev)) { | ||
4219 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | ||
4220 | return NETDEV_TX_BUSY; | ||
4221 | } | ||
4050 | } | 4222 | } |
4051 | 4223 | ||
4052 | fifo = &mac_control->fifos[queue]; | ||
4053 | spin_lock_irqsave(&fifo->tx_lock, flags); | ||
4054 | put_off = (u16) fifo->tx_curr_put_info.offset; | 4224 | put_off = (u16) fifo->tx_curr_put_info.offset; |
4055 | get_off = (u16) fifo->tx_curr_get_info.offset; | 4225 | get_off = (u16) fifo->tx_curr_get_info.offset; |
4056 | txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr; | 4226 | txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr; |
@@ -4060,7 +4230,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4060 | if (txdp->Host_Control || | 4230 | if (txdp->Host_Control || |
4061 | ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { | 4231 | ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { |
4062 | DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); | 4232 | DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); |
4063 | netif_stop_queue(dev); | 4233 | s2io_stop_tx_queue(sp, fifo->fifo_no); |
4064 | dev_kfree_skb(skb); | 4234 | dev_kfree_skb(skb); |
4065 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | 4235 | spin_unlock_irqrestore(&fifo->tx_lock, flags); |
4066 | return 0; | 4236 | return 0; |
@@ -4079,8 +4249,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4079 | txdp->Control_1 |= TXD_GATHER_CODE_FIRST; | 4249 | txdp->Control_1 |= TXD_GATHER_CODE_FIRST; |
4080 | txdp->Control_1 |= TXD_LIST_OWN_XENA; | 4250 | txdp->Control_1 |= TXD_LIST_OWN_XENA; |
4081 | txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no); | 4251 | txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no); |
4082 | 4252 | if (enable_per_list_interrupt) | |
4083 | if (sp->vlgrp && vlan_tx_tag_present(skb)) { | 4253 | if (put_off & (queue_len >> 5)) |
4254 | txdp->Control_2 |= TXD_INT_TYPE_PER_LIST; | ||
4255 | if (vlan_tag) { | ||
4084 | txdp->Control_2 |= TXD_VLAN_ENABLE; | 4256 | txdp->Control_2 |= TXD_VLAN_ENABLE; |
4085 | txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); | 4257 | txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); |
4086 | } | 4258 | } |
@@ -4166,7 +4338,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4166 | DBG_PRINT(TX_DBG, | 4338 | DBG_PRINT(TX_DBG, |
4167 | "No free TxDs for xmit, Put: 0x%x Get:0x%x\n", | 4339 | "No free TxDs for xmit, Put: 0x%x Get:0x%x\n", |
4168 | put_off, get_off); | 4340 | put_off, get_off); |
4169 | netif_stop_queue(dev); | 4341 | s2io_stop_tx_queue(sp, fifo->fifo_no); |
4170 | } | 4342 | } |
4171 | mac_control->stats_info->sw_stat.mem_allocated += skb->truesize; | 4343 | mac_control->stats_info->sw_stat.mem_allocated += skb->truesize; |
4172 | dev->trans_start = jiffies; | 4344 | dev->trans_start = jiffies; |
@@ -4175,7 +4347,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4175 | return 0; | 4347 | return 0; |
4176 | pci_map_failed: | 4348 | pci_map_failed: |
4177 | stats->pci_map_fail_cnt++; | 4349 | stats->pci_map_fail_cnt++; |
4178 | netif_stop_queue(dev); | 4350 | s2io_stop_tx_queue(sp, fifo->fifo_no); |
4179 | stats->mem_freed += skb->truesize; | 4351 | stats->mem_freed += skb->truesize; |
4180 | dev_kfree_skb(skb); | 4352 | dev_kfree_skb(skb); |
4181 | spin_unlock_irqrestore(&fifo->tx_lock, flags); | 4353 | spin_unlock_irqrestore(&fifo->tx_lock, flags); |
@@ -4587,7 +4759,7 @@ static void s2io_handle_errors(void * dev_id) | |||
4587 | return; | 4759 | return; |
4588 | 4760 | ||
4589 | reset: | 4761 | reset: |
4590 | netif_stop_queue(dev); | 4762 | s2io_stop_all_tx_queue(sp); |
4591 | schedule_work(&sp->rst_timer_task); | 4763 | schedule_work(&sp->rst_timer_task); |
4592 | sw_stat->soft_reset_cnt++; | 4764 | sw_stat->soft_reset_cnt++; |
4593 | return; | 4765 | return; |
@@ -6574,16 +6746,15 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) | |||
6574 | 6746 | ||
6575 | dev->mtu = new_mtu; | 6747 | dev->mtu = new_mtu; |
6576 | if (netif_running(dev)) { | 6748 | if (netif_running(dev)) { |
6749 | s2io_stop_all_tx_queue(sp); | ||
6577 | s2io_card_down(sp); | 6750 | s2io_card_down(sp); |
6578 | netif_stop_queue(dev); | ||
6579 | ret = s2io_card_up(sp); | 6751 | ret = s2io_card_up(sp); |
6580 | if (ret) { | 6752 | if (ret) { |
6581 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 6753 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", |
6582 | __FUNCTION__); | 6754 | __FUNCTION__); |
6583 | return ret; | 6755 | return ret; |
6584 | } | 6756 | } |
6585 | if (netif_queue_stopped(dev)) | 6757 | s2io_wake_all_tx_queue(sp); |
6586 | netif_wake_queue(dev); | ||
6587 | } else { /* Device is down */ | 6758 | } else { /* Device is down */ |
6588 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 6759 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
6589 | u64 val64 = new_mtu; | 6760 | u64 val64 = new_mtu; |
@@ -6691,7 +6862,7 @@ static void s2io_set_link(struct work_struct *work) | |||
6691 | } else { | 6862 | } else { |
6692 | DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name); | 6863 | DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name); |
6693 | DBG_PRINT(ERR_DBG, "device is not Quiescent\n"); | 6864 | DBG_PRINT(ERR_DBG, "device is not Quiescent\n"); |
6694 | netif_stop_queue(dev); | 6865 | s2io_stop_all_tx_queue(nic); |
6695 | } | 6866 | } |
6696 | } | 6867 | } |
6697 | val64 = readq(&bar0->adapter_control); | 6868 | val64 = readq(&bar0->adapter_control); |
@@ -7181,7 +7352,7 @@ static void s2io_restart_nic(struct work_struct *work) | |||
7181 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 7352 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", |
7182 | dev->name); | 7353 | dev->name); |
7183 | } | 7354 | } |
7184 | netif_wake_queue(dev); | 7355 | s2io_wake_all_tx_queue(sp); |
7185 | DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", | 7356 | DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", |
7186 | dev->name); | 7357 | dev->name); |
7187 | out_unlock: | 7358 | out_unlock: |
@@ -7371,7 +7542,8 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7371 | { | 7542 | { |
7372 | lro_append_pkt(sp, lro, | 7543 | lro_append_pkt(sp, lro, |
7373 | skb, tcp_len); | 7544 | skb, tcp_len); |
7374 | queue_rx_frame(lro->parent); | 7545 | queue_rx_frame(lro->parent, |
7546 | lro->vlan_tag); | ||
7375 | clear_lro_session(lro); | 7547 | clear_lro_session(lro); |
7376 | sp->mac_control.stats_info-> | 7548 | sp->mac_control.stats_info-> |
7377 | sw_stat.flush_max_pkts++; | 7549 | sw_stat.flush_max_pkts++; |
@@ -7382,7 +7554,8 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7382 | lro->frags_len; | 7554 | lro->frags_len; |
7383 | sp->mac_control.stats_info-> | 7555 | sp->mac_control.stats_info-> |
7384 | sw_stat.sending_both++; | 7556 | sw_stat.sending_both++; |
7385 | queue_rx_frame(lro->parent); | 7557 | queue_rx_frame(lro->parent, |
7558 | lro->vlan_tag); | ||
7386 | clear_lro_session(lro); | 7559 | clear_lro_session(lro); |
7387 | goto send_up; | 7560 | goto send_up; |
7388 | case 0: /* sessions exceeded */ | 7561 | case 0: /* sessions exceeded */ |
@@ -7408,31 +7581,12 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
7408 | */ | 7581 | */ |
7409 | skb->ip_summed = CHECKSUM_NONE; | 7582 | skb->ip_summed = CHECKSUM_NONE; |
7410 | } | 7583 | } |
7411 | } else { | 7584 | } else |
7412 | skb->ip_summed = CHECKSUM_NONE; | 7585 | skb->ip_summed = CHECKSUM_NONE; |
7413 | } | 7586 | |
7414 | sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; | 7587 | sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; |
7415 | if (!sp->lro) { | ||
7416 | skb->protocol = eth_type_trans(skb, dev); | ||
7417 | if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) && | ||
7418 | vlan_strip_flag)) { | ||
7419 | /* Queueing the vlan frame to the upper layer */ | ||
7420 | if (napi) | ||
7421 | vlan_hwaccel_receive_skb(skb, sp->vlgrp, | ||
7422 | RXD_GET_VLAN_TAG(rxdp->Control_2)); | ||
7423 | else | ||
7424 | vlan_hwaccel_rx(skb, sp->vlgrp, | ||
7425 | RXD_GET_VLAN_TAG(rxdp->Control_2)); | ||
7426 | } else { | ||
7427 | if (napi) | ||
7428 | netif_receive_skb(skb); | ||
7429 | else | ||
7430 | netif_rx(skb); | ||
7431 | } | ||
7432 | } else { | ||
7433 | send_up: | 7588 | send_up: |
7434 | queue_rx_frame(skb); | 7589 | queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); |
7435 | } | ||
7436 | dev->last_rx = jiffies; | 7590 | dev->last_rx = jiffies; |
7437 | aggregate: | 7591 | aggregate: |
7438 | atomic_dec(&sp->rx_bufs_left[ring_no]); | 7592 | atomic_dec(&sp->rx_bufs_left[ring_no]); |
@@ -7460,6 +7614,7 @@ static void s2io_link(struct s2io_nic * sp, int link) | |||
7460 | init_tti(sp, link); | 7614 | init_tti(sp, link); |
7461 | if (link == LINK_DOWN) { | 7615 | if (link == LINK_DOWN) { |
7462 | DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); | 7616 | DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); |
7617 | s2io_stop_all_tx_queue(sp); | ||
7463 | netif_carrier_off(dev); | 7618 | netif_carrier_off(dev); |
7464 | if(sp->mac_control.stats_info->sw_stat.link_up_cnt) | 7619 | if(sp->mac_control.stats_info->sw_stat.link_up_cnt) |
7465 | sp->mac_control.stats_info->sw_stat.link_up_time = | 7620 | sp->mac_control.stats_info->sw_stat.link_up_time = |
@@ -7472,6 +7627,7 @@ static void s2io_link(struct s2io_nic * sp, int link) | |||
7472 | jiffies - sp->start_time; | 7627 | jiffies - sp->start_time; |
7473 | sp->mac_control.stats_info->sw_stat.link_up_cnt++; | 7628 | sp->mac_control.stats_info->sw_stat.link_up_cnt++; |
7474 | netif_carrier_on(dev); | 7629 | netif_carrier_on(dev); |
7630 | s2io_wake_all_tx_queue(sp); | ||
7475 | } | 7631 | } |
7476 | } | 7632 | } |
7477 | sp->last_link_state = link; | 7633 | sp->last_link_state = link; |
@@ -7508,20 +7664,48 @@ static void s2io_init_pci(struct s2io_nic * sp) | |||
7508 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); | 7664 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); |
7509 | } | 7665 | } |
7510 | 7666 | ||
7511 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) | 7667 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, |
7668 | u8 *dev_multiq) | ||
7512 | { | 7669 | { |
7513 | if ((tx_fifo_num > MAX_TX_FIFOS) || | 7670 | if ((tx_fifo_num > MAX_TX_FIFOS) || |
7514 | (tx_fifo_num < FIFO_DEFAULT_NUM)) { | 7671 | (tx_fifo_num < 1)) { |
7515 | DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos " | 7672 | DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos " |
7516 | "(%d) not supported\n", tx_fifo_num); | 7673 | "(%d) not supported\n", tx_fifo_num); |
7517 | tx_fifo_num = | 7674 | |
7518 | ((tx_fifo_num > MAX_TX_FIFOS)? MAX_TX_FIFOS : | 7675 | if (tx_fifo_num < 1) |
7519 | ((tx_fifo_num < FIFO_DEFAULT_NUM) ? FIFO_DEFAULT_NUM : | 7676 | tx_fifo_num = 1; |
7520 | tx_fifo_num)); | 7677 | else |
7678 | tx_fifo_num = MAX_TX_FIFOS; | ||
7679 | |||
7521 | DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num); | 7680 | DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num); |
7522 | DBG_PRINT(ERR_DBG, "tx fifos\n"); | 7681 | DBG_PRINT(ERR_DBG, "tx fifos\n"); |
7523 | } | 7682 | } |
7524 | 7683 | ||
7684 | #ifndef CONFIG_NETDEVICES_MULTIQUEUE | ||
7685 | if (multiq) { | ||
7686 | DBG_PRINT(ERR_DBG, "s2io: Multiqueue support not enabled\n"); | ||
7687 | multiq = 0; | ||
7688 | } | ||
7689 | #endif | ||
7690 | if (multiq) | ||
7691 | *dev_multiq = multiq; | ||
7692 | |||
7693 | if (tx_steering_type && (1 == tx_fifo_num)) { | ||
7694 | if (tx_steering_type != TX_DEFAULT_STEERING) | ||
7695 | DBG_PRINT(ERR_DBG, | ||
7696 | "s2io: Tx steering is not supported with " | ||
7697 | "one fifo. Disabling Tx steering.\n"); | ||
7698 | tx_steering_type = NO_STEERING; | ||
7699 | } | ||
7700 | |||
7701 | if ((tx_steering_type < NO_STEERING) || | ||
7702 | (tx_steering_type > TX_DEFAULT_STEERING)) { | ||
7703 | DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not " | ||
7704 | "supported\n"); | ||
7705 | DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n"); | ||
7706 | tx_steering_type = NO_STEERING; | ||
7707 | } | ||
7708 | |||
7525 | if ( rx_ring_num > 8) { | 7709 | if ( rx_ring_num > 8) { |
7526 | DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " | 7710 | DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " |
7527 | "supported\n"); | 7711 | "supported\n"); |
@@ -7613,9 +7797,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7613 | struct config_param *config; | 7797 | struct config_param *config; |
7614 | int mode; | 7798 | int mode; |
7615 | u8 dev_intr_type = intr_type; | 7799 | u8 dev_intr_type = intr_type; |
7800 | u8 dev_multiq = 0; | ||
7616 | DECLARE_MAC_BUF(mac); | 7801 | DECLARE_MAC_BUF(mac); |
7617 | 7802 | ||
7618 | if ((ret = s2io_verify_parm(pdev, &dev_intr_type))) | 7803 | ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq); |
7804 | if (ret) | ||
7619 | return ret; | 7805 | return ret; |
7620 | 7806 | ||
7621 | if ((ret = pci_enable_device(pdev))) { | 7807 | if ((ret = pci_enable_device(pdev))) { |
@@ -7646,7 +7832,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7646 | pci_disable_device(pdev); | 7832 | pci_disable_device(pdev); |
7647 | return -ENODEV; | 7833 | return -ENODEV; |
7648 | } | 7834 | } |
7649 | 7835 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | |
7836 | if (dev_multiq) | ||
7837 | dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num); | ||
7838 | else | ||
7839 | #endif | ||
7650 | dev = alloc_etherdev(sizeof(struct s2io_nic)); | 7840 | dev = alloc_etherdev(sizeof(struct s2io_nic)); |
7651 | if (dev == NULL) { | 7841 | if (dev == NULL) { |
7652 | DBG_PRINT(ERR_DBG, "Device allocation failed\n"); | 7842 | DBG_PRINT(ERR_DBG, "Device allocation failed\n"); |
@@ -7695,17 +7885,45 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7695 | config = &sp->config; | 7885 | config = &sp->config; |
7696 | 7886 | ||
7697 | config->napi = napi; | 7887 | config->napi = napi; |
7888 | config->tx_steering_type = tx_steering_type; | ||
7698 | 7889 | ||
7699 | /* Tx side parameters. */ | 7890 | /* Tx side parameters. */ |
7700 | config->tx_fifo_num = tx_fifo_num; | 7891 | if (config->tx_steering_type == TX_PRIORITY_STEERING) |
7701 | for (i = 0; i < MAX_TX_FIFOS; i++) { | 7892 | config->tx_fifo_num = MAX_TX_FIFOS; |
7893 | else | ||
7894 | config->tx_fifo_num = tx_fifo_num; | ||
7895 | |||
7896 | /* Initialize the fifos used for tx steering */ | ||
7897 | if (config->tx_fifo_num < 5) { | ||
7898 | if (config->tx_fifo_num == 1) | ||
7899 | sp->total_tcp_fifos = 1; | ||
7900 | else | ||
7901 | sp->total_tcp_fifos = config->tx_fifo_num - 1; | ||
7902 | sp->udp_fifo_idx = config->tx_fifo_num - 1; | ||
7903 | sp->total_udp_fifos = 1; | ||
7904 | sp->other_fifo_idx = sp->total_tcp_fifos - 1; | ||
7905 | } else { | ||
7906 | sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM - | ||
7907 | FIFO_OTHER_MAX_NUM); | ||
7908 | sp->udp_fifo_idx = sp->total_tcp_fifos; | ||
7909 | sp->total_udp_fifos = FIFO_UDP_MAX_NUM; | ||
7910 | sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM; | ||
7911 | } | ||
7912 | |||
7913 | config->multiq = dev_multiq; | ||
7914 | for (i = 0; i < config->tx_fifo_num; i++) { | ||
7702 | config->tx_cfg[i].fifo_len = tx_fifo_len[i]; | 7915 | config->tx_cfg[i].fifo_len = tx_fifo_len[i]; |
7703 | config->tx_cfg[i].fifo_priority = i; | 7916 | config->tx_cfg[i].fifo_priority = i; |
7704 | } | 7917 | } |
7705 | 7918 | ||
7706 | /* mapping the QoS priority to the configured fifos */ | 7919 | /* mapping the QoS priority to the configured fifos */ |
7707 | for (i = 0; i < MAX_TX_FIFOS; i++) | 7920 | for (i = 0; i < MAX_TX_FIFOS; i++) |
7708 | config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i]; | 7921 | config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i]; |
7922 | |||
7923 | /* map the hashing selector table to the configured fifos */ | ||
7924 | for (i = 0; i < config->tx_fifo_num; i++) | ||
7925 | sp->fifo_selector[i] = fifo_selector[i]; | ||
7926 | |||
7709 | 7927 | ||
7710 | config->tx_intr_type = TXD_INT_TYPE_UTILZ; | 7928 | config->tx_intr_type = TXD_INT_TYPE_UTILZ; |
7711 | for (i = 0; i < config->tx_fifo_num; i++) { | 7929 | for (i = 0; i < config->tx_fifo_num; i++) { |
@@ -7790,6 +8008,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7790 | SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); | 8008 | SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); |
7791 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 8009 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
7792 | dev->vlan_rx_register = s2io_vlan_rx_register; | 8010 | dev->vlan_rx_register = s2io_vlan_rx_register; |
8011 | dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid; | ||
7793 | 8012 | ||
7794 | /* | 8013 | /* |
7795 | * will use eth_mac_addr() for dev->set_mac_address | 8014 | * will use eth_mac_addr() for dev->set_mac_address |
@@ -7810,7 +8029,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7810 | dev->features |= NETIF_F_UFO; | 8029 | dev->features |= NETIF_F_UFO; |
7811 | dev->features |= NETIF_F_HW_CSUM; | 8030 | dev->features |= NETIF_F_HW_CSUM; |
7812 | } | 8031 | } |
7813 | 8032 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | |
8033 | if (config->multiq) | ||
8034 | dev->features |= NETIF_F_MULTI_QUEUE; | ||
8035 | #endif | ||
7814 | dev->tx_timeout = &s2io_tx_watchdog; | 8036 | dev->tx_timeout = &s2io_tx_watchdog; |
7815 | dev->watchdog_timeo = WATCH_DOG_TIMEOUT; | 8037 | dev->watchdog_timeo = WATCH_DOG_TIMEOUT; |
7816 | INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); | 8038 | INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); |
@@ -7959,6 +8181,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7959 | 8181 | ||
7960 | if (napi) | 8182 | if (napi) |
7961 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); | 8183 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); |
8184 | |||
8185 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, | ||
8186 | sp->config.tx_fifo_num); | ||
8187 | |||
7962 | switch(sp->config.intr_type) { | 8188 | switch(sp->config.intr_type) { |
7963 | case INTA: | 8189 | case INTA: |
7964 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); | 8190 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); |
@@ -7967,6 +8193,29 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7967 | DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); | 8193 | DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); |
7968 | break; | 8194 | break; |
7969 | } | 8195 | } |
8196 | if (sp->config.multiq) { | ||
8197 | for (i = 0; i < sp->config.tx_fifo_num; i++) | ||
8198 | mac_control->fifos[i].multiq = config->multiq; | ||
8199 | DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n", | ||
8200 | dev->name); | ||
8201 | } else | ||
8202 | DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n", | ||
8203 | dev->name); | ||
8204 | |||
8205 | switch (sp->config.tx_steering_type) { | ||
8206 | case NO_STEERING: | ||
8207 | DBG_PRINT(ERR_DBG, "%s: No steering enabled for" | ||
8208 | " transmit\n", dev->name); | ||
8209 | break; | ||
8210 | case TX_PRIORITY_STEERING: | ||
8211 | DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for" | ||
8212 | " transmit\n", dev->name); | ||
8213 | break; | ||
8214 | case TX_DEFAULT_STEERING: | ||
8215 | DBG_PRINT(ERR_DBG, "%s: Default steering enabled for" | ||
8216 | " transmit\n", dev->name); | ||
8217 | } | ||
8218 | |||
7970 | if (sp->lro) | 8219 | if (sp->lro) |
7971 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", | 8220 | DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", |
7972 | dev->name); | 8221 | dev->name); |
@@ -8061,7 +8310,8 @@ module_init(s2io_starter); | |||
8061 | module_exit(s2io_closer); | 8310 | module_exit(s2io_closer); |
8062 | 8311 | ||
8063 | static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, | 8312 | static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, |
8064 | struct tcphdr **tcp, struct RxD_t *rxdp) | 8313 | struct tcphdr **tcp, struct RxD_t *rxdp, |
8314 | struct s2io_nic *sp) | ||
8065 | { | 8315 | { |
8066 | int ip_off; | 8316 | int ip_off; |
8067 | u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; | 8317 | u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; |
@@ -8072,19 +8322,20 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, | |||
8072 | return -1; | 8322 | return -1; |
8073 | } | 8323 | } |
8074 | 8324 | ||
8075 | /* TODO: | 8325 | /* Checking for DIX type or DIX type with VLAN */ |
8076 | * By default the VLAN field in the MAC is stripped by the card, if this | 8326 | if ((l2_type == 0) |
8077 | * feature is turned off in rx_pa_cfg register, then the ip_off field | 8327 | || (l2_type == 4)) { |
8078 | * has to be shifted by a further 2 bytes | 8328 | ip_off = HEADER_ETHERNET_II_802_3_SIZE; |
8079 | */ | 8329 | /* |
8080 | switch (l2_type) { | 8330 | * If vlan stripping is disabled and the frame is VLAN tagged, |
8081 | case 0: /* DIX type */ | 8331 | * shift the offset by the VLAN header size bytes. |
8082 | case 4: /* DIX type with VLAN */ | 8332 | */ |
8083 | ip_off = HEADER_ETHERNET_II_802_3_SIZE; | 8333 | if ((!vlan_strip_flag) && |
8084 | break; | 8334 | (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) |
8335 | ip_off += HEADER_VLAN_SIZE; | ||
8336 | } else { | ||
8085 | /* LLC, SNAP etc are considered non-mergeable */ | 8337 | /* LLC, SNAP etc are considered non-mergeable */ |
8086 | default: | 8338 | return -1; |
8087 | return -1; | ||
8088 | } | 8339 | } |
8089 | 8340 | ||
8090 | *ip = (struct iphdr *)((u8 *)buffer + ip_off); | 8341 | *ip = (struct iphdr *)((u8 *)buffer + ip_off); |
@@ -8111,7 +8362,7 @@ static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp) | |||
8111 | } | 8362 | } |
8112 | 8363 | ||
8113 | static void initiate_new_session(struct lro *lro, u8 *l2h, | 8364 | static void initiate_new_session(struct lro *lro, u8 *l2h, |
8114 | struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len) | 8365 | struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag) |
8115 | { | 8366 | { |
8116 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | 8367 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); |
8117 | lro->l2h = l2h; | 8368 | lro->l2h = l2h; |
@@ -8122,6 +8373,7 @@ static void initiate_new_session(struct lro *lro, u8 *l2h, | |||
8122 | lro->sg_num = 1; | 8373 | lro->sg_num = 1; |
8123 | lro->total_len = ntohs(ip->tot_len); | 8374 | lro->total_len = ntohs(ip->tot_len); |
8124 | lro->frags_len = 0; | 8375 | lro->frags_len = 0; |
8376 | lro->vlan_tag = vlan_tag; | ||
8125 | /* | 8377 | /* |
8126 | * check if we saw TCP timestamp. Other consistency checks have | 8378 | * check if we saw TCP timestamp. Other consistency checks have |
8127 | * already been done. | 8379 | * already been done. |
@@ -8253,15 +8505,16 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, | |||
8253 | struct iphdr *ip; | 8505 | struct iphdr *ip; |
8254 | struct tcphdr *tcph; | 8506 | struct tcphdr *tcph; |
8255 | int ret = 0, i; | 8507 | int ret = 0, i; |
8508 | u16 vlan_tag = 0; | ||
8256 | 8509 | ||
8257 | if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp, | 8510 | if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp, |
8258 | rxdp))) { | 8511 | rxdp, sp))) { |
8259 | DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n", | 8512 | DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n", |
8260 | ip->saddr, ip->daddr); | 8513 | ip->saddr, ip->daddr); |
8261 | } else { | 8514 | } else |
8262 | return ret; | 8515 | return ret; |
8263 | } | ||
8264 | 8516 | ||
8517 | vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2); | ||
8265 | tcph = (struct tcphdr *)*tcp; | 8518 | tcph = (struct tcphdr *)*tcp; |
8266 | *tcp_len = get_l4_pyld_length(ip, tcph); | 8519 | *tcp_len = get_l4_pyld_length(ip, tcph); |
8267 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | 8520 | for (i=0; i<MAX_LRO_SESSIONS; i++) { |
@@ -8321,7 +8574,8 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, | |||
8321 | 8574 | ||
8322 | switch (ret) { | 8575 | switch (ret) { |
8323 | case 3: | 8576 | case 3: |
8324 | initiate_new_session(*lro, buffer, ip, tcph, *tcp_len); | 8577 | initiate_new_session(*lro, buffer, ip, tcph, *tcp_len, |
8578 | vlan_tag); | ||
8325 | break; | 8579 | break; |
8326 | case 2: | 8580 | case 2: |
8327 | update_L3L4_header(sp, *lro); | 8581 | update_L3L4_header(sp, *lro); |
@@ -8349,15 +8603,25 @@ static void clear_lro_session(struct lro *lro) | |||
8349 | memset(lro, 0, lro_struct_size); | 8603 | memset(lro, 0, lro_struct_size); |
8350 | } | 8604 | } |
8351 | 8605 | ||
8352 | static void queue_rx_frame(struct sk_buff *skb) | 8606 | static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag) |
8353 | { | 8607 | { |
8354 | struct net_device *dev = skb->dev; | 8608 | struct net_device *dev = skb->dev; |
8609 | struct s2io_nic *sp = dev->priv; | ||
8355 | 8610 | ||
8356 | skb->protocol = eth_type_trans(skb, dev); | 8611 | skb->protocol = eth_type_trans(skb, dev); |
8357 | if (napi) | 8612 | if (sp->vlgrp && vlan_tag |
8358 | netif_receive_skb(skb); | 8613 | && (vlan_strip_flag)) { |
8359 | else | 8614 | /* Queueing the vlan frame to the upper layer */ |
8360 | netif_rx(skb); | 8615 | if (sp->config.napi) |
8616 | vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag); | ||
8617 | else | ||
8618 | vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag); | ||
8619 | } else { | ||
8620 | if (sp->config.napi) | ||
8621 | netif_receive_skb(skb); | ||
8622 | else | ||
8623 | netif_rx(skb); | ||
8624 | } | ||
8361 | } | 8625 | } |
8362 | 8626 | ||
8363 | static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, | 8627 | static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, |