aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vxge/vxge-main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/vxge/vxge-main.c')
-rw-r--r--drivers/net/vxge/vxge-main.c1449
1 files changed, 938 insertions, 511 deletions
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c7c5605b3728..8ab870a2ad02 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -50,6 +50,9 @@
50#include <net/ip.h> 50#include <net/ip.h>
51#include <linux/netdevice.h> 51#include <linux/netdevice.h>
52#include <linux/etherdevice.h> 52#include <linux/etherdevice.h>
53#include <linux/firmware.h>
54#include <linux/net_tstamp.h>
55#include <linux/prefetch.h>
53#include "vxge-main.h" 56#include "vxge-main.h"
54#include "vxge-reg.h" 57#include "vxge-reg.h"
55 58
@@ -138,11 +141,10 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
138 * This function is called during interrupt context to notify link up state 141 * This function is called during interrupt context to notify link up state
139 * change. 142 * change.
140 */ 143 */
141void 144static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
142vxge_callback_link_up(struct __vxge_hw_device *hldev)
143{ 145{
144 struct net_device *dev = hldev->ndev; 146 struct net_device *dev = hldev->ndev;
145 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 147 struct vxgedev *vdev = netdev_priv(dev);
146 148
147 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 149 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
148 vdev->ndev->name, __func__, __LINE__); 150 vdev->ndev->name, __func__, __LINE__);
@@ -162,11 +164,10 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
162 * This function is called during interrupt context to notify link down state 164 * This function is called during interrupt context to notify link down state
163 * change. 165 * change.
164 */ 166 */
165void 167static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
166vxge_callback_link_down(struct __vxge_hw_device *hldev)
167{ 168{
168 struct net_device *dev = hldev->ndev; 169 struct net_device *dev = hldev->ndev;
169 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 170 struct vxgedev *vdev = netdev_priv(dev);
170 171
171 vxge_debug_entryexit(VXGE_TRACE, 172 vxge_debug_entryexit(VXGE_TRACE,
172 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 173 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
@@ -185,7 +186,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev)
185 * 186 *
186 * Allocate SKB. 187 * Allocate SKB.
187 */ 188 */
188static struct sk_buff* 189static struct sk_buff *
189vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) 190vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
190{ 191{
191 struct net_device *dev; 192 struct net_device *dev;
@@ -304,22 +305,14 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
304 "%s: %s:%d skb protocol = %d", 305 "%s: %s:%d skb protocol = %d",
305 ring->ndev->name, __func__, __LINE__, skb->protocol); 306 ring->ndev->name, __func__, __LINE__, skb->protocol);
306 307
307 if (ring->gro_enable) { 308 if (ring->vlgrp && ext_info->vlan &&
308 if (ring->vlgrp && ext_info->vlan && 309 (ring->vlan_tag_strip ==
309 (ring->vlan_tag_strip == 310 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
310 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) 311 vlan_gro_receive(ring->napi_p, ring->vlgrp,
311 vlan_gro_receive(ring->napi_p, ring->vlgrp, 312 ext_info->vlan, skb);
312 ext_info->vlan, skb); 313 else
313 else 314 napi_gro_receive(ring->napi_p, skb);
314 napi_gro_receive(ring->napi_p, skb); 315
315 } else {
316 if (ring->vlgrp && vlan &&
317 (ring->vlan_tag_strip ==
318 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
319 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
320 else
321 netif_receive_skb(skb);
322 }
323 vxge_debug_entryexit(VXGE_TRACE, 316 vxge_debug_entryexit(VXGE_TRACE,
324 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 317 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
325} 318}
@@ -354,12 +347,12 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
354 * If the interrupt is because of a received frame or if the receive ring 347 * If the interrupt is because of a received frame or if the receive ring
355 * contains fresh as yet un-processed frames, this function is called. 348 * contains fresh as yet un-processed frames, this function is called.
356 */ 349 */
357enum vxge_hw_status 350static enum vxge_hw_status
358vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, 351vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
359 u8 t_code, void *userdata) 352 u8 t_code, void *userdata)
360{ 353{
361 struct vxge_ring *ring = (struct vxge_ring *)userdata; 354 struct vxge_ring *ring = (struct vxge_ring *)userdata;
362 struct net_device *dev = ring->ndev; 355 struct net_device *dev = ring->ndev;
363 unsigned int dma_sizes; 356 unsigned int dma_sizes;
364 void *first_dtr = NULL; 357 void *first_dtr = NULL;
365 int dtr_cnt = 0; 358 int dtr_cnt = 0;
@@ -371,9 +364,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
371 struct vxge_hw_ring_rxd_info ext_info; 364 struct vxge_hw_ring_rxd_info ext_info;
372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 365 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
373 ring->ndev->name, __func__, __LINE__); 366 ring->ndev->name, __func__, __LINE__);
374 ring->pkts_processed = 0;
375
376 vxge_hw_ring_replenish(ringh);
377 367
378 do { 368 do {
379 prefetch((char *)dtr + L1_CACHE_BYTES); 369 prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -403,7 +393,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
403 393
404 prefetch((char *)skb + L1_CACHE_BYTES); 394 prefetch((char *)skb + L1_CACHE_BYTES);
405 if (unlikely(t_code)) { 395 if (unlikely(t_code)) {
406
407 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != 396 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
408 VXGE_HW_OK) { 397 VXGE_HW_OK) {
409 398
@@ -426,9 +415,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
426 } 415 }
427 416
428 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { 417 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
429
430 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { 418 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
431
432 if (!vxge_rx_map(dtr, ring)) { 419 if (!vxge_rx_map(dtr, ring)) {
433 skb_put(skb, pkt_length); 420 skb_put(skb, pkt_length);
434 421
@@ -496,12 +483,29 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
496 483
497 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && 484 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
498 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && 485 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
499 ring->rx_csum && /* Offload Rx side CSUM */ 486 (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
500 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && 487 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
501 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) 488 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
502 skb->ip_summed = CHECKSUM_UNNECESSARY; 489 skb->ip_summed = CHECKSUM_UNNECESSARY;
503 else 490 else
504 skb->ip_summed = CHECKSUM_NONE; 491 skb_checksum_none_assert(skb);
492
493
494 if (ring->rx_hwts) {
495 struct skb_shared_hwtstamps *skb_hwts;
496 u32 ns = *(u32 *)(skb->head + pkt_length);
497
498 skb_hwts = skb_hwtstamps(skb);
499 skb_hwts->hwtstamp = ns_to_ktime(ns);
500 skb_hwts->syststamp.tv64 = 0;
501 }
502
503 /* rth_hash_type and rth_it_hit are non-zero regardless of
504 * whether rss is enabled. Only the rth_value is zero/non-zero
505 * if rss is disabled/enabled, so key off of that.
506 */
507 if (ext_info.rth_value)
508 skb->rxhash = ext_info.rth_value;
505 509
506 vxge_rx_complete(ring, skb, ext_info.vlan, 510 vxge_rx_complete(ring, skb, ext_info.vlan,
507 pkt_length, &ext_info); 511 pkt_length, &ext_info);
@@ -531,7 +535,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
531 * freed and frees all skbs whose data have already DMA'ed into the NICs 535 * freed and frees all skbs whose data have already DMA'ed into the NICs
532 * internal memory. 536 * internal memory.
533 */ 537 */
534enum vxge_hw_status 538static enum vxge_hw_status
535vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, 539vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
536 enum vxge_hw_fifo_tcode t_code, void *userdata, 540 enum vxge_hw_fifo_tcode t_code, void *userdata,
537 struct sk_buff ***skb_ptr, int nr_skb, int *more) 541 struct sk_buff ***skb_ptr, int nr_skb, int *more)
@@ -650,6 +654,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list(
650 return FALSE; 654 return FALSE;
651} 655}
652 656
657static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
658{
659 struct vxge_mac_addrs *new_mac_entry;
660 u8 *mac_address = NULL;
661
662 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
663 return TRUE;
664
665 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
666 if (!new_mac_entry) {
667 vxge_debug_mem(VXGE_ERR,
668 "%s: memory allocation failed",
669 VXGE_DRIVER_NAME);
670 return FALSE;
671 }
672
673 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
674
675 /* Copy the new mac address to the list */
676 mac_address = (u8 *)&new_mac_entry->macaddr;
677 memcpy(mac_address, mac->macaddr, ETH_ALEN);
678
679 new_mac_entry->state = mac->state;
680 vpath->mac_addr_cnt++;
681
682 /* Is this a multicast address */
683 if (0x01 & mac->macaddr[0])
684 vpath->mcast_addr_cnt++;
685
686 return TRUE;
687}
688
689/* Add a mac address to DA table */
690static enum vxge_hw_status
691vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
692{
693 enum vxge_hw_status status = VXGE_HW_OK;
694 struct vxge_vpath *vpath;
695 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
696
697 if (0x01 & mac->macaddr[0]) /* multicast address */
698 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
699 else
700 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
701
702 vpath = &vdev->vpaths[mac->vpath_no];
703 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
704 mac->macmask, duplicate_mode);
705 if (status != VXGE_HW_OK) {
706 vxge_debug_init(VXGE_ERR,
707 "DA config add entry failed for vpath:%d",
708 vpath->device_id);
709 } else
710 if (FALSE == vxge_mac_list_add(vpath, mac))
711 status = -EPERM;
712
713 return status;
714}
715
653static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) 716static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
654{ 717{
655 struct macInfo mac_info; 718 struct macInfo mac_info;
@@ -660,7 +723,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
660 struct vxge_vpath *vpath = NULL; 723 struct vxge_vpath *vpath = NULL;
661 struct __vxge_hw_device *hldev; 724 struct __vxge_hw_device *hldev;
662 725
663 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 726 hldev = pci_get_drvdata(vdev->pdev);
664 727
665 mac_address = (u8 *)&mac_addr; 728 mac_address = (u8 *)&mac_addr;
666 memcpy(mac_address, mac_header, ETH_ALEN); 729 memcpy(mac_address, mac_header, ETH_ALEN);
@@ -759,7 +822,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
759 return NETDEV_TX_OK; 822 return NETDEV_TX_OK;
760 } 823 }
761 824
762 vdev = (struct vxgedev *)netdev_priv(dev); 825 vdev = netdev_priv(dev);
763 826
764 if (unlikely(!is_vxge_card_up(vdev))) { 827 if (unlikely(!is_vxge_card_up(vdev))) {
765 vxge_debug_tx(VXGE_ERR, 828 vxge_debug_tx(VXGE_ERR,
@@ -822,7 +885,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
822 dev->name, __func__, __LINE__, 885 dev->name, __func__, __LINE__,
823 fifo_hw, dtr, dtr_priv); 886 fifo_hw, dtr, dtr_priv);
824 887
825 if (vdev->vlgrp && vlan_tx_tag_present(skb)) { 888 if (vlan_tx_tag_present(skb)) {
826 u16 vlan_tag = vlan_tx_tag_get(skb); 889 u16 vlan_tag = vlan_tx_tag_get(skb);
827 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); 890 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
828 } 891 }
@@ -995,6 +1058,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
995 "%s:%d Exiting...", __func__, __LINE__); 1058 "%s:%d Exiting...", __func__, __LINE__);
996} 1059}
997 1060
1061static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1062{
1063 struct list_head *entry, *next;
1064 u64 del_mac = 0;
1065 u8 *mac_address = (u8 *) (&del_mac);
1066
1067 /* Copy the mac address to delete from the list */
1068 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1069
1070 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1071 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1072 list_del(entry);
1073 kfree((struct vxge_mac_addrs *)entry);
1074 vpath->mac_addr_cnt--;
1075
1076 /* Is this a multicast address */
1077 if (0x01 & mac->macaddr[0])
1078 vpath->mcast_addr_cnt--;
1079 return TRUE;
1080 }
1081 }
1082
1083 return FALSE;
1084}
1085
1086/* delete a mac address from DA table */
1087static enum vxge_hw_status
1088vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1089{
1090 enum vxge_hw_status status = VXGE_HW_OK;
1091 struct vxge_vpath *vpath;
1092
1093 vpath = &vdev->vpaths[mac->vpath_no];
1094 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1095 mac->macmask);
1096 if (status != VXGE_HW_OK) {
1097 vxge_debug_init(VXGE_ERR,
1098 "DA config delete entry failed for vpath:%d",
1099 vpath->device_id);
1100 } else
1101 vxge_mac_list_del(vpath, mac);
1102 return status;
1103}
1104
998/** 1105/**
999 * vxge_set_multicast 1106 * vxge_set_multicast
1000 * @dev: pointer to the device structure 1107 * @dev: pointer to the device structure
@@ -1024,7 +1131,7 @@ static void vxge_set_multicast(struct net_device *dev)
1024 vxge_debug_entryexit(VXGE_TRACE, 1131 vxge_debug_entryexit(VXGE_TRACE,
1025 "%s:%d", __func__, __LINE__); 1132 "%s:%d", __func__, __LINE__);
1026 1133
1027 vdev = (struct vxgedev *)netdev_priv(dev); 1134 vdev = netdev_priv(dev);
1028 hldev = (struct __vxge_hw_device *)vdev->devh; 1135 hldev = (struct __vxge_hw_device *)vdev->devh;
1029 1136
1030 if (unlikely(!is_vxge_card_up(vdev))) 1137 if (unlikely(!is_vxge_card_up(vdev)))
@@ -1084,7 +1191,7 @@ static void vxge_set_multicast(struct net_device *dev)
1084 /* Delete previous MC's */ 1191 /* Delete previous MC's */
1085 for (i = 0; i < mcast_cnt; i++) { 1192 for (i = 0; i < mcast_cnt; i++) {
1086 list_for_each_safe(entry, next, list_head) { 1193 list_for_each_safe(entry, next, list_head) {
1087 mac_entry = (struct vxge_mac_addrs *) entry; 1194 mac_entry = (struct vxge_mac_addrs *)entry;
1088 /* Copy the mac address to delete */ 1195 /* Copy the mac address to delete */
1089 mac_address = (u8 *)&mac_entry->macaddr; 1196 mac_address = (u8 *)&mac_entry->macaddr;
1090 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1197 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1127,7 +1234,7 @@ _set_all_mcast:
1127 /* Delete previous MC's */ 1234 /* Delete previous MC's */
1128 for (i = 0; i < mcast_cnt; i++) { 1235 for (i = 0; i < mcast_cnt; i++) {
1129 list_for_each_safe(entry, next, list_head) { 1236 list_for_each_safe(entry, next, list_head) {
1130 mac_entry = (struct vxge_mac_addrs *) entry; 1237 mac_entry = (struct vxge_mac_addrs *)entry;
1131 /* Copy the mac address to delete */ 1238 /* Copy the mac address to delete */
1132 mac_address = (u8 *)&mac_entry->macaddr; 1239 mac_address = (u8 *)&mac_entry->macaddr;
1133 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1240 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1174,14 +1281,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1174{ 1281{
1175 struct sockaddr *addr = p; 1282 struct sockaddr *addr = p;
1176 struct vxgedev *vdev; 1283 struct vxgedev *vdev;
1177 struct __vxge_hw_device *hldev; 1284 struct __vxge_hw_device *hldev;
1178 enum vxge_hw_status status = VXGE_HW_OK; 1285 enum vxge_hw_status status = VXGE_HW_OK;
1179 struct macInfo mac_info_new, mac_info_old; 1286 struct macInfo mac_info_new, mac_info_old;
1180 int vpath_idx = 0; 1287 int vpath_idx = 0;
1181 1288
1182 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1289 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1183 1290
1184 vdev = (struct vxgedev *)netdev_priv(dev); 1291 vdev = netdev_priv(dev);
1185 hldev = vdev->devh; 1292 hldev = vdev->devh;
1186 1293
1187 if (!is_valid_ether_addr(addr->sa_data)) 1294 if (!is_valid_ether_addr(addr->sa_data))
@@ -1246,7 +1353,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1246 * 1353 *
1247 * Enables the interrupts for the vpath 1354 * Enables the interrupts for the vpath
1248*/ 1355*/
1249void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) 1356static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1250{ 1357{
1251 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1358 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1252 int msix_id = 0; 1359 int msix_id = 0;
@@ -1279,11 +1386,16 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1279 * 1386 *
1280 * Disables the interrupts for the vpath 1387 * Disables the interrupts for the vpath
1281*/ 1388*/
1282void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) 1389static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1283{ 1390{
1284 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1391 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1392 struct __vxge_hw_device *hldev;
1285 int msix_id; 1393 int msix_id;
1286 1394
1395 hldev = pci_get_drvdata(vdev->pdev);
1396
1397 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1398
1287 vxge_hw_vpath_intr_disable(vpath->handle); 1399 vxge_hw_vpath_intr_disable(vpath->handle);
1288 1400
1289 if (vdev->config.intr_type == INTA) 1401 if (vdev->config.intr_type == INTA)
@@ -1300,6 +1412,95 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1300 } 1412 }
1301} 1413}
1302 1414
1415/* list all mac addresses from DA table */
1416static enum vxge_hw_status
1417vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1418{
1419 enum vxge_hw_status status = VXGE_HW_OK;
1420 unsigned char macmask[ETH_ALEN];
1421 unsigned char macaddr[ETH_ALEN];
1422
1423 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1424 macaddr, macmask);
1425 if (status != VXGE_HW_OK) {
1426 vxge_debug_init(VXGE_ERR,
1427 "DA config list entry failed for vpath:%d",
1428 vpath->device_id);
1429 return status;
1430 }
1431
1432 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1433 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1434 macaddr, macmask);
1435 if (status != VXGE_HW_OK)
1436 break;
1437 }
1438
1439 return status;
1440}
1441
1442/* Store all mac addresses from the list to the DA table */
1443static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1444{
1445 enum vxge_hw_status status = VXGE_HW_OK;
1446 struct macInfo mac_info;
1447 u8 *mac_address = NULL;
1448 struct list_head *entry, *next;
1449
1450 memset(&mac_info, 0, sizeof(struct macInfo));
1451
1452 if (vpath->is_open) {
1453 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1454 mac_address =
1455 (u8 *)&
1456 ((struct vxge_mac_addrs *)entry)->macaddr;
1457 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1458 ((struct vxge_mac_addrs *)entry)->state =
1459 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1460 /* does this mac address already exist in da table? */
1461 status = vxge_search_mac_addr_in_da_table(vpath,
1462 &mac_info);
1463 if (status != VXGE_HW_OK) {
1464 /* Add this mac address to the DA table */
1465 status = vxge_hw_vpath_mac_addr_add(
1466 vpath->handle, mac_info.macaddr,
1467 mac_info.macmask,
1468 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1469 if (status != VXGE_HW_OK) {
1470 vxge_debug_init(VXGE_ERR,
1471 "DA add entry failed for vpath:%d",
1472 vpath->device_id);
1473 ((struct vxge_mac_addrs *)entry)->state
1474 = VXGE_LL_MAC_ADDR_IN_LIST;
1475 }
1476 }
1477 }
1478 }
1479
1480 return status;
1481}
1482
1483/* Store all vlan ids from the list to the vid table */
1484static enum vxge_hw_status
1485vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1486{
1487 enum vxge_hw_status status = VXGE_HW_OK;
1488 struct vxgedev *vdev = vpath->vdev;
1489 u16 vid;
1490
1491 if (vdev->vlgrp && vpath->is_open) {
1492
1493 for (vid = 0; vid < VLAN_N_VID; vid++) {
1494 if (!vlan_group_get_device(vdev->vlgrp, vid))
1495 continue;
1496 /* Add these vlan to the vid table */
1497 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1498 }
1499 }
1500
1501 return status;
1502}
1503
1303/* 1504/*
1304 * vxge_reset_vpath 1505 * vxge_reset_vpath
1305 * @vdev: pointer to vdev 1506 * @vdev: pointer to vdev
@@ -1377,6 +1578,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1377 return ret; 1578 return ret;
1378} 1579}
1379 1580
1581/* Configure CI */
1582static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1583{
1584 int i = 0;
1585
1586 /* Enable CI for RTI */
1587 if (vdev->config.intr_type == MSI_X) {
1588 for (i = 0; i < vdev->no_of_vpath; i++) {
1589 struct __vxge_hw_ring *hw_ring;
1590
1591 hw_ring = vdev->vpaths[i].ring.handle;
1592 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1593 }
1594 }
1595
1596 /* Enable CI for TTI */
1597 for (i = 0; i < vdev->no_of_vpath; i++) {
1598 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1599 vxge_hw_vpath_tti_ci_set(hw_fifo);
1600 /*
1601 * For Inta (with or without napi), Set CI ON for only one
1602 * vpath. (Have only one free running timer).
1603 */
1604 if ((vdev->config.intr_type == INTA) && (i == 0))
1605 break;
1606 }
1607
1608 return;
1609}
1610
1380static int do_vxge_reset(struct vxgedev *vdev, int event) 1611static int do_vxge_reset(struct vxgedev *vdev, int event)
1381{ 1612{
1382 enum vxge_hw_status status; 1613 enum vxge_hw_status status;
@@ -1395,12 +1626,16 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1395 } 1626 }
1396 1627
1397 if (event == VXGE_LL_FULL_RESET) { 1628 if (event == VXGE_LL_FULL_RESET) {
1629 netif_carrier_off(vdev->ndev);
1630
1398 /* wait for all the vpath reset to complete */ 1631 /* wait for all the vpath reset to complete */
1399 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 1632 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1400 while (test_bit(vp_id, &vdev->vp_reset)) 1633 while (test_bit(vp_id, &vdev->vp_reset))
1401 msleep(50); 1634 msleep(50);
1402 } 1635 }
1403 1636
1637 netif_carrier_on(vdev->ndev);
1638
1404 /* if execution mode is set to debug, don't reset the adapter */ 1639 /* if execution mode is set to debug, don't reset the adapter */
1405 if (unlikely(vdev->exec_mode)) { 1640 if (unlikely(vdev->exec_mode)) {
1406 vxge_debug_init(VXGE_ERR, 1641 vxge_debug_init(VXGE_ERR,
@@ -1413,6 +1648,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1413 } 1648 }
1414 1649
1415 if (event == VXGE_LL_FULL_RESET) { 1650 if (event == VXGE_LL_FULL_RESET) {
1651 vxge_hw_device_wait_receive_idle(vdev->devh);
1416 vxge_hw_device_intr_disable(vdev->devh); 1652 vxge_hw_device_intr_disable(vdev->devh);
1417 1653
1418 switch (vdev->cric_err_event) { 1654 switch (vdev->cric_err_event) {
@@ -1537,6 +1773,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1537 netif_tx_wake_all_queues(vdev->ndev); 1773 netif_tx_wake_all_queues(vdev->ndev);
1538 } 1774 }
1539 1775
1776 /* configure CI */
1777 vxge_config_ci_for_tti_rti(vdev);
1778
1540out: 1779out:
1541 vxge_debug_entryexit(VXGE_TRACE, 1780 vxge_debug_entryexit(VXGE_TRACE,
1542 "%s:%d Exiting...", __func__, __LINE__); 1781 "%s:%d Exiting...", __func__, __LINE__);
@@ -1553,9 +1792,14 @@ out:
1553 * 1792 *
1554 * driver may reset the chip on events of serr, eccerr, etc 1793 * driver may reset the chip on events of serr, eccerr, etc
1555 */ 1794 */
1556int vxge_reset(struct vxgedev *vdev) 1795static void vxge_reset(struct work_struct *work)
1557{ 1796{
1558 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET); 1797 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1798
1799 if (!netif_running(vdev->ndev))
1800 return;
1801
1802 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1559} 1803}
1560 1804
1561/** 1805/**
@@ -1572,22 +1816,29 @@ int vxge_reset(struct vxgedev *vdev)
1572 */ 1816 */
1573static int vxge_poll_msix(struct napi_struct *napi, int budget) 1817static int vxge_poll_msix(struct napi_struct *napi, int budget)
1574{ 1818{
1575 struct vxge_ring *ring = 1819 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1576 container_of(napi, struct vxge_ring, napi); 1820 int pkts_processed;
1577 int budget_org = budget; 1821 int budget_org = budget;
1578 ring->budget = budget;
1579 1822
1823 ring->budget = budget;
1824 ring->pkts_processed = 0;
1580 vxge_hw_vpath_poll_rx(ring->handle); 1825 vxge_hw_vpath_poll_rx(ring->handle);
1826 pkts_processed = ring->pkts_processed;
1581 1827
1582 if (ring->pkts_processed < budget_org) { 1828 if (ring->pkts_processed < budget_org) {
1583 napi_complete(napi); 1829 napi_complete(napi);
1830
1584 /* Re enable the Rx interrupts for the vpath */ 1831 /* Re enable the Rx interrupts for the vpath */
1585 vxge_hw_channel_msix_unmask( 1832 vxge_hw_channel_msix_unmask(
1586 (struct __vxge_hw_channel *)ring->handle, 1833 (struct __vxge_hw_channel *)ring->handle,
1587 ring->rx_vector_no); 1834 ring->rx_vector_no);
1835 mmiowb();
1588 } 1836 }
1589 1837
1590 return ring->pkts_processed; 1838 /* We are copying and returning the local variable, in case if after
1839 * clearing the msix interrupt above, if the interrupt fires right
1840 * away which can preempt this NAPI thread */
1841 return pkts_processed;
1591} 1842}
1592 1843
1593static int vxge_poll_inta(struct napi_struct *napi, int budget) 1844static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1598,12 +1849,12 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1598 int budget_org = budget; 1849 int budget_org = budget;
1599 struct vxge_ring *ring; 1850 struct vxge_ring *ring;
1600 1851
1601 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 1852 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1602 pci_get_drvdata(vdev->pdev);
1603 1853
1604 for (i = 0; i < vdev->no_of_vpath; i++) { 1854 for (i = 0; i < vdev->no_of_vpath; i++) {
1605 ring = &vdev->vpaths[i].ring; 1855 ring = &vdev->vpaths[i].ring;
1606 ring->budget = budget; 1856 ring->budget = budget;
1857 ring->pkts_processed = 0;
1607 vxge_hw_vpath_poll_rx(ring->handle); 1858 vxge_hw_vpath_poll_rx(ring->handle);
1608 pkts_processed += ring->pkts_processed; 1859 pkts_processed += ring->pkts_processed;
1609 budget -= ring->pkts_processed; 1860 budget -= ring->pkts_processed;
@@ -1635,11 +1886,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1635 */ 1886 */
1636static void vxge_netpoll(struct net_device *dev) 1887static void vxge_netpoll(struct net_device *dev)
1637{ 1888{
1638 struct __vxge_hw_device *hldev; 1889 struct __vxge_hw_device *hldev;
1639 struct vxgedev *vdev; 1890 struct vxgedev *vdev;
1640 1891
1641 vdev = (struct vxgedev *)netdev_priv(dev); 1892 vdev = netdev_priv(dev);
1642 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 1893 hldev = pci_get_drvdata(vdev->pdev);
1643 1894
1644 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1895 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1645 1896
@@ -1679,15 +1930,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1679 mtable[index] = index % vdev->no_of_vpath; 1930 mtable[index] = index % vdev->no_of_vpath;
1680 } 1931 }
1681 1932
1682 /* Fill RTH hash types */
1683 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1684 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1685 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1686 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1687 hash_types.hash_type_tcpipv6ex_en =
1688 vdev->config.rth_hash_type_tcpipv6ex;
1689 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1690
1691 /* set indirection table, bucket-to-vpath mapping */ 1933 /* set indirection table, bucket-to-vpath mapping */
1692 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, 1934 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1693 vdev->no_of_vpath, 1935 vdev->no_of_vpath,
@@ -1700,19 +1942,27 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1700 return status; 1942 return status;
1701 } 1943 }
1702 1944
1945 /* Fill RTH hash types */
1946 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1947 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1948 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1949 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1950 hash_types.hash_type_tcpipv6ex_en =
1951 vdev->config.rth_hash_type_tcpipv6ex;
1952 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1953
1703 /* 1954 /*
1704 * Because the itable_set() method uses the active_table field 1955 * Because the itable_set() method uses the active_table field
1705 * for the target virtual path the RTH config should be updated 1956 * for the target virtual path the RTH config should be updated
1706 * for all VPATHs. The h/w only uses the lowest numbered VPATH 1957 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1707 * when steering frames. 1958 * when steering frames.
1708 */ 1959 */
1709 for (index = 0; index < vdev->no_of_vpath; index++) { 1960 for (index = 0; index < vdev->no_of_vpath; index++) {
1710 status = vxge_hw_vpath_rts_rth_set( 1961 status = vxge_hw_vpath_rts_rth_set(
1711 vdev->vpaths[index].handle, 1962 vdev->vpaths[index].handle,
1712 vdev->config.rth_algorithm, 1963 vdev->config.rth_algorithm,
1713 &hash_types, 1964 &hash_types,
1714 vdev->config.rth_bkt_sz); 1965 vdev->config.rth_bkt_sz);
1715
1716 if (status != VXGE_HW_OK) { 1966 if (status != VXGE_HW_OK) {
1717 vxge_debug_init(VXGE_ERR, 1967 vxge_debug_init(VXGE_ERR,
1718 "RTH configuration failed for vpath:%d", 1968 "RTH configuration failed for vpath:%d",
@@ -1724,197 +1974,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1724 return status; 1974 return status;
1725} 1975}
1726 1976
1727int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1728{
1729 struct vxge_mac_addrs *new_mac_entry;
1730 u8 *mac_address = NULL;
1731
1732 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
1733 return TRUE;
1734
1735 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
1736 if (!new_mac_entry) {
1737 vxge_debug_mem(VXGE_ERR,
1738 "%s: memory allocation failed",
1739 VXGE_DRIVER_NAME);
1740 return FALSE;
1741 }
1742
1743 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
1744
1745 /* Copy the new mac address to the list */
1746 mac_address = (u8 *)&new_mac_entry->macaddr;
1747 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1748
1749 new_mac_entry->state = mac->state;
1750 vpath->mac_addr_cnt++;
1751
1752 /* Is this a multicast address */
1753 if (0x01 & mac->macaddr[0])
1754 vpath->mcast_addr_cnt++;
1755
1756 return TRUE;
1757}
1758
1759/* Add a mac address to DA table */
1760enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1761{
1762 enum vxge_hw_status status = VXGE_HW_OK;
1763 struct vxge_vpath *vpath;
1764 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
1765
1766 if (0x01 & mac->macaddr[0]) /* multicast address */
1767 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
1768 else
1769 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
1770
1771 vpath = &vdev->vpaths[mac->vpath_no];
1772 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
1773 mac->macmask, duplicate_mode);
1774 if (status != VXGE_HW_OK) {
1775 vxge_debug_init(VXGE_ERR,
1776 "DA config add entry failed for vpath:%d",
1777 vpath->device_id);
1778 } else
1779 if (FALSE == vxge_mac_list_add(vpath, mac))
1780 status = -EPERM;
1781
1782 return status;
1783}
1784
1785int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1786{
1787 struct list_head *entry, *next;
1788 u64 del_mac = 0;
1789 u8 *mac_address = (u8 *) (&del_mac);
1790
1791 /* Copy the mac address to delete from the list */
1792 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1793
1794 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1795 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1796 list_del(entry);
1797 kfree((struct vxge_mac_addrs *)entry);
1798 vpath->mac_addr_cnt--;
1799
1800 /* Is this a multicast address */
1801 if (0x01 & mac->macaddr[0])
1802 vpath->mcast_addr_cnt--;
1803 return TRUE;
1804 }
1805 }
1806
1807 return FALSE;
1808}
1809/* delete a mac address from DA table */
1810enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1811{
1812 enum vxge_hw_status status = VXGE_HW_OK;
1813 struct vxge_vpath *vpath;
1814
1815 vpath = &vdev->vpaths[mac->vpath_no];
1816 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1817 mac->macmask);
1818 if (status != VXGE_HW_OK) {
1819 vxge_debug_init(VXGE_ERR,
1820 "DA config delete entry failed for vpath:%d",
1821 vpath->device_id);
1822 } else
1823 vxge_mac_list_del(vpath, mac);
1824 return status;
1825}
1826
1827/* list all mac addresses from DA table */
1828enum vxge_hw_status
1829static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1830 struct macInfo *mac)
1831{
1832 enum vxge_hw_status status = VXGE_HW_OK;
1833 unsigned char macmask[ETH_ALEN];
1834 unsigned char macaddr[ETH_ALEN];
1835
1836 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1837 macaddr, macmask);
1838 if (status != VXGE_HW_OK) {
1839 vxge_debug_init(VXGE_ERR,
1840 "DA config list entry failed for vpath:%d",
1841 vpath->device_id);
1842 return status;
1843 }
1844
1845 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1846
1847 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1848 macaddr, macmask);
1849 if (status != VXGE_HW_OK)
1850 break;
1851 }
1852
1853 return status;
1854}
1855
1856/* Store all vlan ids from the list to the vid table */
1857enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1858{
1859 enum vxge_hw_status status = VXGE_HW_OK;
1860 struct vxgedev *vdev = vpath->vdev;
1861 u16 vid;
1862
1863 if (vdev->vlgrp && vpath->is_open) {
1864
1865 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1866 if (!vlan_group_get_device(vdev->vlgrp, vid))
1867 continue;
1868 /* Add these vlan to the vid table */
1869 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1870 }
1871 }
1872
1873 return status;
1874}
1875
1876/* Store all mac addresses from the list to the DA table */
1877enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1878{
1879 enum vxge_hw_status status = VXGE_HW_OK;
1880 struct macInfo mac_info;
1881 u8 *mac_address = NULL;
1882 struct list_head *entry, *next;
1883
1884 memset(&mac_info, 0, sizeof(struct macInfo));
1885
1886 if (vpath->is_open) {
1887
1888 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1889 mac_address =
1890 (u8 *)&
1891 ((struct vxge_mac_addrs *)entry)->macaddr;
1892 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1893 ((struct vxge_mac_addrs *)entry)->state =
1894 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1895 /* does this mac address already exist in da table? */
1896 status = vxge_search_mac_addr_in_da_table(vpath,
1897 &mac_info);
1898 if (status != VXGE_HW_OK) {
1899 /* Add this mac address to the DA table */
1900 status = vxge_hw_vpath_mac_addr_add(
1901 vpath->handle, mac_info.macaddr,
1902 mac_info.macmask,
1903 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1904 if (status != VXGE_HW_OK) {
1905 vxge_debug_init(VXGE_ERR,
1906 "DA add entry failed for vpath:%d",
1907 vpath->device_id);
1908 ((struct vxge_mac_addrs *)entry)->state
1909 = VXGE_LL_MAC_ADDR_IN_LIST;
1910 }
1911 }
1912 }
1913 }
1914
1915 return status;
1916}
1917
1918/* reset vpaths */ 1977/* reset vpaths */
1919enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) 1978enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1920{ 1979{
@@ -1948,7 +2007,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1948} 2007}
1949 2008
1950/* close vpaths */ 2009/* close vpaths */
1951void vxge_close_vpaths(struct vxgedev *vdev, int index) 2010static void vxge_close_vpaths(struct vxgedev *vdev, int index)
1952{ 2011{
1953 struct vxge_vpath *vpath; 2012 struct vxge_vpath *vpath;
1954 int i; 2013 int i;
@@ -1966,7 +2025,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index)
1966} 2025}
1967 2026
1968/* open vpaths */ 2027/* open vpaths */
1969int vxge_open_vpaths(struct vxgedev *vdev) 2028static int vxge_open_vpaths(struct vxgedev *vdev)
1970{ 2029{
1971 struct vxge_hw_vpath_attr attr; 2030 struct vxge_hw_vpath_attr attr;
1972 enum vxge_hw_status status; 2031 enum vxge_hw_status status;
@@ -1976,8 +2035,23 @@ int vxge_open_vpaths(struct vxgedev *vdev)
1976 2035
1977 for (i = 0; i < vdev->no_of_vpath; i++) { 2036 for (i = 0; i < vdev->no_of_vpath; i++) {
1978 vpath = &vdev->vpaths[i]; 2037 vpath = &vdev->vpaths[i];
1979
1980 vxge_assert(vpath->is_configured); 2038 vxge_assert(vpath->is_configured);
2039
2040 if (!vdev->titan1) {
2041 struct vxge_hw_vp_config *vcfg;
2042 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2043
2044 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2045 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2046 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2047 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2048 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2049 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2050 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2051 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2052 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2053 }
2054
1981 attr.vp_id = vpath->device_id; 2055 attr.vp_id = vpath->device_id;
1982 attr.fifo_attr.callback = vxge_xmit_compl; 2056 attr.fifo_attr.callback = vxge_xmit_compl;
1983 attr.fifo_attr.txdl_term = vxge_tx_term; 2057 attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -1992,6 +2066,7 @@ int vxge_open_vpaths(struct vxgedev *vdev)
1992 2066
1993 vpath->ring.ndev = vdev->ndev; 2067 vpath->ring.ndev = vdev->ndev;
1994 vpath->ring.pdev = vdev->pdev; 2068 vpath->ring.pdev = vdev->pdev;
2069
1995 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); 2070 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
1996 if (status == VXGE_HW_OK) { 2071 if (status == VXGE_HW_OK) {
1997 vpath->fifo.handle = 2072 vpath->fifo.handle =
@@ -2010,20 +2085,19 @@ int vxge_open_vpaths(struct vxgedev *vdev)
2010 netdev_get_tx_queue(vdev->ndev, 0); 2085 netdev_get_tx_queue(vdev->ndev, 0);
2011 vpath->fifo.indicate_max_pkts = 2086 vpath->fifo.indicate_max_pkts =
2012 vdev->config.fifo_indicate_max_pkts; 2087 vdev->config.fifo_indicate_max_pkts;
2088 vpath->fifo.tx_vector_no = 0;
2013 vpath->ring.rx_vector_no = 0; 2089 vpath->ring.rx_vector_no = 0;
2014 vpath->ring.rx_csum = vdev->rx_csum; 2090 vpath->ring.rx_hwts = vdev->rx_hwts;
2015 vpath->is_open = 1; 2091 vpath->is_open = 1;
2016 vdev->vp_handles[i] = vpath->handle; 2092 vdev->vp_handles[i] = vpath->handle;
2017 vpath->ring.gro_enable = vdev->config.gro_enable;
2018 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; 2093 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2019 vdev->stats.vpaths_open++; 2094 vdev->stats.vpaths_open++;
2020 } else { 2095 } else {
2021 vdev->stats.vpath_open_fail++; 2096 vdev->stats.vpath_open_fail++;
2022 vxge_debug_init(VXGE_ERR, 2097 vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2023 "%s: vpath: %d failed to open " 2098 "open with status: %d",
2024 "with status: %d", 2099 vdev->ndev->name, vpath->device_id,
2025 vdev->ndev->name, vpath->device_id, 2100 status);
2026 status);
2027 vxge_close_vpaths(vdev, 0); 2101 vxge_close_vpaths(vdev, 0);
2028 return -EPERM; 2102 return -EPERM;
2029 } 2103 }
@@ -2031,9 +2105,65 @@ int vxge_open_vpaths(struct vxgedev *vdev)
2031 vp_id = vpath->handle->vpath->vp_id; 2105 vp_id = vpath->handle->vpath->vp_id;
2032 vdev->vpaths_deployed |= vxge_mBIT(vp_id); 2106 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2033 } 2107 }
2108
2034 return VXGE_HW_OK; 2109 return VXGE_HW_OK;
2035} 2110}
2036 2111
2112/**
2113 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2114 * if the interrupts are not within a range
2115 * @fifo: pointer to transmit fifo structure
2116 * Description: The function changes boundary timer and restriction timer
2117 * value depends on the traffic
2118 * Return Value: None
2119 */
2120static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2121{
2122 fifo->interrupt_count++;
2123 if (jiffies > fifo->jiffies + HZ / 100) {
2124 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2125
2126 fifo->jiffies = jiffies;
2127 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2128 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2129 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2130 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2131 } else if (hw_fifo->rtimer != 0) {
2132 hw_fifo->rtimer = 0;
2133 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2134 }
2135 fifo->interrupt_count = 0;
2136 }
2137}
2138
2139/**
2140 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2141 * if the interrupts are not within a range
2142 * @ring: pointer to receive ring structure
2143 * Description: The function increases of decreases the packet counts within
2144 * the ranges of traffic utilization, if the interrupts due to this ring are
2145 * not within a fixed range.
2146 * Return Value: Nothing
2147 */
2148static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2149{
2150 ring->interrupt_count++;
2151 if (jiffies > ring->jiffies + HZ / 100) {
2152 struct __vxge_hw_ring *hw_ring = ring->handle;
2153
2154 ring->jiffies = jiffies;
2155 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2156 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2157 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2158 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2159 } else if (hw_ring->rtimer != 0) {
2160 hw_ring->rtimer = 0;
2161 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2162 }
2163 ring->interrupt_count = 0;
2164 }
2165}
2166
2037/* 2167/*
2038 * vxge_isr_napi 2168 * vxge_isr_napi
2039 * @irq: the irq of the device. 2169 * @irq: the irq of the device.
@@ -2050,21 +2180,20 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2050 struct __vxge_hw_device *hldev; 2180 struct __vxge_hw_device *hldev;
2051 u64 reason; 2181 u64 reason;
2052 enum vxge_hw_status status; 2182 enum vxge_hw_status status;
2053 struct vxgedev *vdev = (struct vxgedev *) dev_id;; 2183 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2054 2184
2055 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); 2185 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2056 2186
2057 dev = vdev->ndev; 2187 dev = vdev->ndev;
2058 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 2188 hldev = pci_get_drvdata(vdev->pdev);
2059 2189
2060 if (pci_channel_offline(vdev->pdev)) 2190 if (pci_channel_offline(vdev->pdev))
2061 return IRQ_NONE; 2191 return IRQ_NONE;
2062 2192
2063 if (unlikely(!is_vxge_card_up(vdev))) 2193 if (unlikely(!is_vxge_card_up(vdev)))
2064 return IRQ_NONE; 2194 return IRQ_HANDLED;
2065 2195
2066 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, 2196 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2067 &reason);
2068 if (status == VXGE_HW_OK) { 2197 if (status == VXGE_HW_OK) {
2069 vxge_hw_device_mask_all(hldev); 2198 vxge_hw_device_mask_all(hldev);
2070 2199
@@ -2095,24 +2224,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2095 2224
2096#ifdef CONFIG_PCI_MSI 2225#ifdef CONFIG_PCI_MSI
2097 2226
2098static irqreturn_t 2227static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2099vxge_tx_msix_handle(int irq, void *dev_id)
2100{ 2228{
2101 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; 2229 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2102 2230
2231 adaptive_coalesce_tx_interrupts(fifo);
2232
2233 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2234 fifo->tx_vector_no);
2235
2236 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2237 fifo->tx_vector_no);
2238
2103 VXGE_COMPLETE_VPATH_TX(fifo); 2239 VXGE_COMPLETE_VPATH_TX(fifo);
2104 2240
2241 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2242 fifo->tx_vector_no);
2243
2244 mmiowb();
2245
2105 return IRQ_HANDLED; 2246 return IRQ_HANDLED;
2106} 2247}
2107 2248
2108static irqreturn_t 2249static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2109vxge_rx_msix_napi_handle(int irq, void *dev_id)
2110{ 2250{
2111 struct vxge_ring *ring = (struct vxge_ring *)dev_id; 2251 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2112 2252
2113 /* MSIX_IDX for Rx is 1 */ 2253 adaptive_coalesce_rx_interrupts(ring);
2254
2114 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, 2255 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2115 ring->rx_vector_no); 2256 ring->rx_vector_no);
2257
2258 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2259 ring->rx_vector_no);
2116 2260
2117 napi_schedule(&ring->napi); 2261 napi_schedule(&ring->napi);
2118 return IRQ_HANDLED; 2262 return IRQ_HANDLED;
@@ -2129,14 +2273,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2129 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; 2273 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2130 2274
2131 for (i = 0; i < vdev->no_of_vpath; i++) { 2275 for (i = 0; i < vdev->no_of_vpath; i++) {
2276 /* Reduce the chance of losing alarm interrupts by masking
2277 * the vector. A pending bit will be set if an alarm is
2278 * generated and on unmask the interrupt will be fired.
2279 */
2132 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); 2280 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2281 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2282 mmiowb();
2133 2283
2134 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2284 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2135 vdev->exec_mode); 2285 vdev->exec_mode);
2136 if (status == VXGE_HW_OK) { 2286 if (status == VXGE_HW_OK) {
2137
2138 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2287 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2139 msix_id); 2288 msix_id);
2289 mmiowb();
2140 continue; 2290 continue;
2141 } 2291 }
2142 vxge_debug_intr(VXGE_ERR, 2292 vxge_debug_intr(VXGE_ERR,
@@ -2159,8 +2309,8 @@ start:
2159 /* Alarm MSIX Vectors count */ 2309 /* Alarm MSIX Vectors count */
2160 vdev->intr_cnt++; 2310 vdev->intr_cnt++;
2161 2311
2162 vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry), 2312 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2163 GFP_KERNEL); 2313 GFP_KERNEL);
2164 if (!vdev->entries) { 2314 if (!vdev->entries) {
2165 vxge_debug_init(VXGE_ERR, 2315 vxge_debug_init(VXGE_ERR,
2166 "%s: memory allocation failed", 2316 "%s: memory allocation failed",
@@ -2169,9 +2319,9 @@ start:
2169 goto alloc_entries_failed; 2319 goto alloc_entries_failed;
2170 } 2320 }
2171 2321
2172 vdev->vxge_entries = 2322 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2173 kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry), 2323 sizeof(struct vxge_msix_entry),
2174 GFP_KERNEL); 2324 GFP_KERNEL);
2175 if (!vdev->vxge_entries) { 2325 if (!vdev->vxge_entries) {
2176 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", 2326 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2177 VXGE_DRIVER_NAME); 2327 VXGE_DRIVER_NAME);
@@ -2255,6 +2405,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
2255 vpath->ring.rx_vector_no = (vpath->device_id * 2405 vpath->ring.rx_vector_no = (vpath->device_id *
2256 VXGE_HW_VPATH_MSIX_ACTIVE) + 1; 2406 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2257 2407
2408 vpath->fifo.tx_vector_no = (vpath->device_id *
2409 VXGE_HW_VPATH_MSIX_ACTIVE);
2410
2258 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 2411 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2259 VXGE_ALARM_MSIX_ID); 2412 VXGE_ALARM_MSIX_ID);
2260 } 2413 }
@@ -2289,8 +2442,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2289 2442
2290static void vxge_rem_isr(struct vxgedev *vdev) 2443static void vxge_rem_isr(struct vxgedev *vdev)
2291{ 2444{
2292 struct __vxge_hw_device *hldev; 2445 struct __vxge_hw_device *hldev;
2293 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2446 hldev = pci_get_drvdata(vdev->pdev);
2294 2447
2295#ifdef CONFIG_PCI_MSI 2448#ifdef CONFIG_PCI_MSI
2296 if (vdev->config.intr_type == MSI_X) { 2449 if (vdev->config.intr_type == MSI_X) {
@@ -2430,8 +2583,9 @@ INTA_MODE:
2430 "%s:vxge:INTA", vdev->ndev->name); 2583 "%s:vxge:INTA", vdev->ndev->name);
2431 vxge_hw_device_set_intr_type(vdev->devh, 2584 vxge_hw_device_set_intr_type(vdev->devh,
2432 VXGE_HW_INTR_MODE_IRQLINE); 2585 VXGE_HW_INTR_MODE_IRQLINE);
2433 vxge_hw_vpath_tti_ci_set(vdev->devh, 2586
2434 vdev->vpaths[0].device_id); 2587 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2588
2435 ret = request_irq((int) vdev->pdev->irq, 2589 ret = request_irq((int) vdev->pdev->irq,
2436 vxge_isr_napi, 2590 vxge_isr_napi,
2437 IRQF_SHARED, vdev->desc[0], vdev); 2591 IRQF_SHARED, vdev->desc[0], vdev);
@@ -2507,6 +2661,40 @@ static void vxge_poll_vp_lockup(unsigned long data)
2507 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); 2661 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2508} 2662}
2509 2663
2664static u32 vxge_fix_features(struct net_device *dev, u32 features)
2665{
2666 u32 changed = dev->features ^ features;
2667
2668 /* Enabling RTH requires some of the logic in vxge_device_register and a
2669 * vpath reset. Due to these restrictions, only allow modification
2670 * while the interface is down.
2671 */
2672 if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2673 features ^= NETIF_F_RXHASH;
2674
2675 return features;
2676}
2677
2678static int vxge_set_features(struct net_device *dev, u32 features)
2679{
2680 struct vxgedev *vdev = netdev_priv(dev);
2681 u32 changed = dev->features ^ features;
2682
2683 if (!(changed & NETIF_F_RXHASH))
2684 return 0;
2685
2686 /* !netif_running() ensured by vxge_fix_features() */
2687
2688 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2689 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2690 dev->features = features ^ NETIF_F_RXHASH;
2691 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2692 return -EIO;
2693 }
2694
2695 return 0;
2696}
2697
2510/** 2698/**
2511 * vxge_open 2699 * vxge_open
2512 * @dev: pointer to the device structure. 2700 * @dev: pointer to the device structure.
@@ -2517,8 +2705,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
2517 * Return value: '0' on success and an appropriate (-)ve integer as 2705 * Return value: '0' on success and an appropriate (-)ve integer as
2518 * defined in errno.h file on failure. 2706 * defined in errno.h file on failure.
2519 */ 2707 */
2520int 2708static int vxge_open(struct net_device *dev)
2521vxge_open(struct net_device *dev)
2522{ 2709{
2523 enum vxge_hw_status status; 2710 enum vxge_hw_status status;
2524 struct vxgedev *vdev; 2711 struct vxgedev *vdev;
@@ -2527,11 +2714,12 @@ vxge_open(struct net_device *dev)
2527 int ret = 0; 2714 int ret = 0;
2528 int i; 2715 int i;
2529 u64 val64, function_mode; 2716 u64 val64, function_mode;
2717
2530 vxge_debug_entryexit(VXGE_TRACE, 2718 vxge_debug_entryexit(VXGE_TRACE,
2531 "%s: %s:%d", dev->name, __func__, __LINE__); 2719 "%s: %s:%d", dev->name, __func__, __LINE__);
2532 2720
2533 vdev = (struct vxgedev *)netdev_priv(dev); 2721 vdev = netdev_priv(dev);
2534 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2722 hldev = pci_get_drvdata(vdev->pdev);
2535 function_mode = vdev->config.device_hw_info.function_mode; 2723 function_mode = vdev->config.device_hw_info.function_mode;
2536 2724
2537 /* make sure you have link off by default every time Nic is 2725 /* make sure you have link off by default every time Nic is
@@ -2586,6 +2774,8 @@ vxge_open(struct net_device *dev)
2586 goto out2; 2774 goto out2;
2587 } 2775 }
2588 } 2776 }
2777 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2778 hldev->config.rth_en ? "enabled" : "disabled");
2589 2779
2590 for (i = 0; i < vdev->no_of_vpath; i++) { 2780 for (i = 0; i < vdev->no_of_vpath; i++) {
2591 vpath = &vdev->vpaths[i]; 2781 vpath = &vdev->vpaths[i];
@@ -2623,7 +2813,7 @@ vxge_open(struct net_device *dev)
2623 } 2813 }
2624 2814
2625 /* Enable vpath to sniff all unicast/multicast traffic that not 2815 /* Enable vpath to sniff all unicast/multicast traffic that not
2626 * addressed to them. We allow promiscous mode for PF only 2816 * addressed to them. We allow promiscuous mode for PF only
2627 */ 2817 */
2628 2818
2629 val64 = 0; 2819 val64 = 0;
@@ -2671,9 +2861,10 @@ vxge_open(struct net_device *dev)
2671 vxge_os_timer(vdev->vp_reset_timer, 2861 vxge_os_timer(vdev->vp_reset_timer,
2672 vxge_poll_vp_reset, vdev, (HZ/2)); 2862 vxge_poll_vp_reset, vdev, (HZ/2));
2673 2863
2674 if (vdev->vp_lockup_timer.function == NULL) 2864 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2675 vxge_os_timer(vdev->vp_lockup_timer, 2865 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2676 vxge_poll_vp_lockup, vdev, (HZ/2)); 2866 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2867 HZ / 2);
2677 2868
2678 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2869 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2679 2870
@@ -2698,6 +2889,10 @@ vxge_open(struct net_device *dev)
2698 } 2889 }
2699 2890
2700 netif_tx_start_all_queues(vdev->ndev); 2891 netif_tx_start_all_queues(vdev->ndev);
2892
2893 /* configure CI */
2894 vxge_config_ci_for_tti_rti(vdev);
2895
2701 goto out0; 2896 goto out0;
2702 2897
2703out2: 2898out2:
@@ -2720,8 +2915,8 @@ out0:
2720 return ret; 2915 return ret;
2721} 2916}
2722 2917
2723/* Loop throught the mac address list and delete all the entries */ 2918/* Loop through the mac address list and delete all the entries */
2724void vxge_free_mac_add_list(struct vxge_vpath *vpath) 2919static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2725{ 2920{
2726 2921
2727 struct list_head *entry, *next; 2922 struct list_head *entry, *next;
@@ -2745,7 +2940,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
2745 } 2940 }
2746} 2941}
2747 2942
2748int do_vxge_close(struct net_device *dev, int do_io) 2943static int do_vxge_close(struct net_device *dev, int do_io)
2749{ 2944{
2750 enum vxge_hw_status status; 2945 enum vxge_hw_status status;
2751 struct vxgedev *vdev; 2946 struct vxgedev *vdev;
@@ -2755,8 +2950,8 @@ int do_vxge_close(struct net_device *dev, int do_io)
2755 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 2950 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2756 dev->name, __func__, __LINE__); 2951 dev->name, __func__, __LINE__);
2757 2952
2758 vdev = (struct vxgedev *)netdev_priv(dev); 2953 vdev = netdev_priv(dev);
2759 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2954 hldev = pci_get_drvdata(vdev->pdev);
2760 2955
2761 if (unlikely(!is_vxge_card_up(vdev))) 2956 if (unlikely(!is_vxge_card_up(vdev)))
2762 return 0; 2957 return 0;
@@ -2766,7 +2961,6 @@ int do_vxge_close(struct net_device *dev, int do_io)
2766 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 2961 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2767 msleep(50); 2962 msleep(50);
2768 2963
2769 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2770 if (do_io) { 2964 if (do_io) {
2771 /* Put the vpath back in normal mode */ 2965 /* Put the vpath back in normal mode */
2772 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); 2966 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2777,7 +2971,6 @@ int do_vxge_close(struct net_device *dev, int do_io)
2777 struct vxge_hw_mrpcim_reg, 2971 struct vxge_hw_mrpcim_reg,
2778 rts_mgr_cbasin_cfg), 2972 rts_mgr_cbasin_cfg),
2779 &val64); 2973 &val64);
2780
2781 if (status == VXGE_HW_OK) { 2974 if (status == VXGE_HW_OK) {
2782 val64 &= ~vpath_vector; 2975 val64 &= ~vpath_vector;
2783 status = vxge_hw_mgmt_reg_write(vdev->devh, 2976 status = vxge_hw_mgmt_reg_write(vdev->devh,
@@ -2789,7 +2982,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
2789 val64); 2982 val64);
2790 } 2983 }
2791 2984
2792 /* Remove the function 0 from promiscous mode */ 2985 /* Remove the function 0 from promiscuous mode */
2793 vxge_hw_mgmt_reg_write(vdev->devh, 2986 vxge_hw_mgmt_reg_write(vdev->devh,
2794 vxge_hw_mgmt_reg_type_mrpcim, 2987 vxge_hw_mgmt_reg_type_mrpcim,
2795 0, 2988 0,
@@ -2806,10 +2999,17 @@ int do_vxge_close(struct net_device *dev, int do_io)
2806 2999
2807 smp_wmb(); 3000 smp_wmb();
2808 } 3001 }
2809 del_timer_sync(&vdev->vp_lockup_timer); 3002
3003 if (vdev->titan1)
3004 del_timer_sync(&vdev->vp_lockup_timer);
2810 3005
2811 del_timer_sync(&vdev->vp_reset_timer); 3006 del_timer_sync(&vdev->vp_reset_timer);
2812 3007
3008 if (do_io)
3009 vxge_hw_device_wait_receive_idle(hldev);
3010
3011 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3012
2813 /* Disable napi */ 3013 /* Disable napi */
2814 if (vdev->config.intr_type != MSI_X) 3014 if (vdev->config.intr_type != MSI_X)
2815 napi_disable(&vdev->napi); 3015 napi_disable(&vdev->napi);
@@ -2826,8 +3026,6 @@ int do_vxge_close(struct net_device *dev, int do_io)
2826 if (do_io) 3026 if (do_io)
2827 vxge_hw_device_intr_disable(vdev->devh); 3027 vxge_hw_device_intr_disable(vdev->devh);
2828 3028
2829 mdelay(1000);
2830
2831 vxge_rem_isr(vdev); 3029 vxge_rem_isr(vdev);
2832 3030
2833 vxge_napi_del_all(vdev); 3031 vxge_napi_del_all(vdev);
@@ -2856,8 +3054,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
2856 * Return value: '0' on success and an appropriate (-)ve integer as 3054 * Return value: '0' on success and an appropriate (-)ve integer as
2857 * defined in errno.h file on failure. 3055 * defined in errno.h file on failure.
2858 */ 3056 */
2859int 3057static int vxge_close(struct net_device *dev)
2860vxge_close(struct net_device *dev)
2861{ 3058{
2862 do_vxge_close(dev, 1); 3059 do_vxge_close(dev, 1);
2863 return 0; 3060 return 0;
@@ -2914,34 +3111,24 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
2914} 3111}
2915 3112
2916/** 3113/**
2917 * vxge_get_stats 3114 * vxge_get_stats64
2918 * @dev: pointer to the device structure 3115 * @dev: pointer to the device structure
3116 * @stats: pointer to struct rtnl_link_stats64
2919 * 3117 *
2920 * Updates the device statistics structure. This function updates the device
2921 * statistics structure in the net_device structure and returns a pointer
2922 * to the same.
2923 */ 3118 */
2924static struct net_device_stats * 3119static struct rtnl_link_stats64 *
2925vxge_get_stats(struct net_device *dev) 3120vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2926{ 3121{
2927 struct vxgedev *vdev; 3122 struct vxgedev *vdev = netdev_priv(dev);
2928 struct net_device_stats *net_stats;
2929 int k; 3123 int k;
2930 3124
2931 vdev = netdev_priv(dev); 3125 /* net_stats already zeroed by caller */
2932
2933 net_stats = &vdev->stats.net_stats;
2934
2935 memset(net_stats, 0, sizeof(struct net_device_stats));
2936
2937 for (k = 0; k < vdev->no_of_vpath; k++) { 3126 for (k = 0; k < vdev->no_of_vpath; k++) {
2938 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms; 3127 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
2939 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; 3128 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
2940 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; 3129 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
2941 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; 3130 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
2942 net_stats->rx_dropped += 3131 net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
2943 vdev->vpaths[k].ring.stats.rx_dropped;
2944
2945 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; 3132 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
2946 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; 3133 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
2947 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; 3134 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
@@ -2950,6 +3137,92 @@ vxge_get_stats(struct net_device *dev)
2950 return net_stats; 3137 return net_stats;
2951} 3138}
2952 3139
3140static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
3141{
3142 enum vxge_hw_status status;
3143 u64 val64;
3144
3145 /* Timestamp is passed to the driver via the FCS, therefore we
3146 * must disable the FCS stripping by the adapter. Since this is
3147 * required for the driver to load (due to a hardware bug),
3148 * there is no need to do anything special here.
3149 */
3150 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3151 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3152 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3153
3154 status = vxge_hw_mgmt_reg_write(devh,
3155 vxge_hw_mgmt_reg_type_mrpcim,
3156 0,
3157 offsetof(struct vxge_hw_mrpcim_reg,
3158 xmac_timestamp),
3159 val64);
3160 vxge_hw_device_flush_io(devh);
3161 devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
3162 return status;
3163}
3164
3165static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3166{
3167 struct hwtstamp_config config;
3168 int i;
3169
3170 if (copy_from_user(&config, data, sizeof(config)))
3171 return -EFAULT;
3172
3173 /* reserved for future extensions */
3174 if (config.flags)
3175 return -EINVAL;
3176
3177 /* Transmit HW Timestamp not supported */
3178 switch (config.tx_type) {
3179 case HWTSTAMP_TX_OFF:
3180 break;
3181 case HWTSTAMP_TX_ON:
3182 default:
3183 return -ERANGE;
3184 }
3185
3186 switch (config.rx_filter) {
3187 case HWTSTAMP_FILTER_NONE:
3188 vdev->rx_hwts = 0;
3189 config.rx_filter = HWTSTAMP_FILTER_NONE;
3190 break;
3191
3192 case HWTSTAMP_FILTER_ALL:
3193 case HWTSTAMP_FILTER_SOME:
3194 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3195 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3196 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3197 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3198 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3199 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3200 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3201 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3202 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3203 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3204 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3205 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3206 if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
3207 return -EFAULT;
3208
3209 vdev->rx_hwts = 1;
3210 config.rx_filter = HWTSTAMP_FILTER_ALL;
3211 break;
3212
3213 default:
3214 return -ERANGE;
3215 }
3216
3217 for (i = 0; i < vdev->no_of_vpath; i++)
3218 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3219
3220 if (copy_to_user(data, &config, sizeof(config)))
3221 return -EFAULT;
3222
3223 return 0;
3224}
3225
2953/** 3226/**
2954 * vxge_ioctl 3227 * vxge_ioctl
2955 * @dev: Device pointer. 3228 * @dev: Device pointer.
@@ -2962,7 +3235,20 @@ vxge_get_stats(struct net_device *dev)
2962 */ 3235 */
2963static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3236static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2964{ 3237{
2965 return -EOPNOTSUPP; 3238 struct vxgedev *vdev = netdev_priv(dev);
3239 int ret;
3240
3241 switch (cmd) {
3242 case SIOCSHWTSTAMP:
3243 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3244 if (ret)
3245 return ret;
3246 break;
3247 default:
3248 return -EOPNOTSUPP;
3249 }
3250
3251 return 0;
2966} 3252}
2967 3253
2968/** 3254/**
@@ -2973,18 +3259,17 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2973 * This function is triggered if the Tx Queue is stopped 3259 * This function is triggered if the Tx Queue is stopped
2974 * for a pre-defined amount of time when the Interface is still up. 3260 * for a pre-defined amount of time when the Interface is still up.
2975 */ 3261 */
2976static void 3262static void vxge_tx_watchdog(struct net_device *dev)
2977vxge_tx_watchdog(struct net_device *dev)
2978{ 3263{
2979 struct vxgedev *vdev; 3264 struct vxgedev *vdev;
2980 3265
2981 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3266 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2982 3267
2983 vdev = (struct vxgedev *)netdev_priv(dev); 3268 vdev = netdev_priv(dev);
2984 3269
2985 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; 3270 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
2986 3271
2987 vxge_reset(vdev); 3272 schedule_work(&vdev->reset_task);
2988 vxge_debug_entryexit(VXGE_TRACE, 3273 vxge_debug_entryexit(VXGE_TRACE,
2989 "%s:%d Exiting...", __func__, __LINE__); 3274 "%s:%d Exiting...", __func__, __LINE__);
2990} 3275}
@@ -3008,7 +3293,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3008 3293
3009 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3294 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3010 3295
3011 vdev = (struct vxgedev *)netdev_priv(dev); 3296 vdev = netdev_priv(dev);
3012 3297
3013 vpath = &vdev->vpaths[0]; 3298 vpath = &vdev->vpaths[0];
3014 if ((NULL == grp) && (vpath->is_open)) { 3299 if ((NULL == grp) && (vpath->is_open)) {
@@ -3057,7 +3342,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3057 struct vxge_vpath *vpath; 3342 struct vxge_vpath *vpath;
3058 int vp_id; 3343 int vp_id;
3059 3344
3060 vdev = (struct vxgedev *)netdev_priv(dev); 3345 vdev = netdev_priv(dev);
3061 3346
3062 /* Add these vlan to the vid table */ 3347 /* Add these vlan to the vid table */
3063 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 3348 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
@@ -3084,7 +3369,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3084 3369
3085 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3370 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3086 3371
3087 vdev = (struct vxgedev *)netdev_priv(dev); 3372 vdev = netdev_priv(dev);
3088 3373
3089 vlan_group_set_device(vdev->vlgrp, vid, NULL); 3374 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3090 3375
@@ -3102,29 +3387,28 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3102static const struct net_device_ops vxge_netdev_ops = { 3387static const struct net_device_ops vxge_netdev_ops = {
3103 .ndo_open = vxge_open, 3388 .ndo_open = vxge_open,
3104 .ndo_stop = vxge_close, 3389 .ndo_stop = vxge_close,
3105 .ndo_get_stats = vxge_get_stats, 3390 .ndo_get_stats64 = vxge_get_stats64,
3106 .ndo_start_xmit = vxge_xmit, 3391 .ndo_start_xmit = vxge_xmit,
3107 .ndo_validate_addr = eth_validate_addr, 3392 .ndo_validate_addr = eth_validate_addr,
3108 .ndo_set_multicast_list = vxge_set_multicast, 3393 .ndo_set_multicast_list = vxge_set_multicast,
3109
3110 .ndo_do_ioctl = vxge_ioctl, 3394 .ndo_do_ioctl = vxge_ioctl,
3111
3112 .ndo_set_mac_address = vxge_set_mac_addr, 3395 .ndo_set_mac_address = vxge_set_mac_addr,
3113 .ndo_change_mtu = vxge_change_mtu, 3396 .ndo_change_mtu = vxge_change_mtu,
3397 .ndo_fix_features = vxge_fix_features,
3398 .ndo_set_features = vxge_set_features,
3114 .ndo_vlan_rx_register = vxge_vlan_rx_register, 3399 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3115 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, 3400 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3116 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, 3401 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3117
3118 .ndo_tx_timeout = vxge_tx_watchdog, 3402 .ndo_tx_timeout = vxge_tx_watchdog,
3119#ifdef CONFIG_NET_POLL_CONTROLLER 3403#ifdef CONFIG_NET_POLL_CONTROLLER
3120 .ndo_poll_controller = vxge_netpoll, 3404 .ndo_poll_controller = vxge_netpoll,
3121#endif 3405#endif
3122}; 3406};
3123 3407
3124int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3408static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3125 struct vxge_config *config, 3409 struct vxge_config *config,
3126 int high_dma, int no_of_vpath, 3410 int high_dma, int no_of_vpath,
3127 struct vxgedev **vdev_out) 3411 struct vxgedev **vdev_out)
3128{ 3412{
3129 struct net_device *ndev; 3413 struct net_device *ndev;
3130 enum vxge_hw_status status = VXGE_HW_OK; 3414 enum vxge_hw_status status = VXGE_HW_OK;
@@ -3158,12 +3442,21 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3158 vdev->devh = hldev; 3442 vdev->devh = hldev;
3159 vdev->pdev = hldev->pdev; 3443 vdev->pdev = hldev->pdev;
3160 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3444 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3161 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3445 vdev->rx_hwts = 0;
3446 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3162 3447
3163 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3448 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3164 3449
3165 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 3450 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3166 NETIF_F_HW_VLAN_FILTER; 3451 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3452 NETIF_F_TSO | NETIF_F_TSO6 |
3453 NETIF_F_HW_VLAN_TX;
3454 if (vdev->config.rth_steering != NO_STEERING)
3455 ndev->hw_features |= NETIF_F_RXHASH;
3456
3457 ndev->features |= ndev->hw_features |
3458 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3459
3167 /* Driver entry points */ 3460 /* Driver entry points */
3168 ndev->irq = vdev->pdev->irq; 3461 ndev->irq = vdev->pdev->irq;
3169 ndev->base_addr = (unsigned long) hldev->bar0; 3462 ndev->base_addr = (unsigned long) hldev->bar0;
@@ -3171,8 +3464,9 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3171 ndev->netdev_ops = &vxge_netdev_ops; 3464 ndev->netdev_ops = &vxge_netdev_ops;
3172 3465
3173 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; 3466 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3467 INIT_WORK(&vdev->reset_task, vxge_reset);
3174 3468
3175 initialize_ethtool_ops(ndev); 3469 vxge_initialize_ethtool_ops(ndev);
3176 3470
3177 /* Allocate memory for vpath */ 3471 /* Allocate memory for vpath */
3178 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3472 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@@ -3181,13 +3475,10 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3181 vxge_debug_init(VXGE_ERR, 3475 vxge_debug_init(VXGE_ERR,
3182 "%s: vpath memory allocation failed", 3476 "%s: vpath memory allocation failed",
3183 vdev->ndev->name); 3477 vdev->ndev->name);
3184 ret = -ENODEV; 3478 ret = -ENOMEM;
3185 goto _out1; 3479 goto _out1;
3186 } 3480 }
3187 3481
3188 ndev->features |= NETIF_F_SG;
3189
3190 ndev->features |= NETIF_F_HW_CSUM;
3191 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3482 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3192 "%s : checksuming enabled", __func__); 3483 "%s : checksuming enabled", __func__);
3193 3484
@@ -3197,16 +3488,11 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3197 "%s : using High DMA", __func__); 3488 "%s : using High DMA", __func__);
3198 } 3489 }
3199 3490
3200 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; 3491 ret = register_netdev(ndev);
3201 3492 if (ret) {
3202 if (vdev->config.gro_enable)
3203 ndev->features |= NETIF_F_GRO;
3204
3205 if (register_netdev(ndev)) {
3206 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3493 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3207 "%s: %s : device registration failed!", 3494 "%s: %s : device registration failed!",
3208 ndev->name, __func__); 3495 ndev->name, __func__);
3209 ret = -ENODEV;
3210 goto _out2; 3496 goto _out2;
3211 } 3497 }
3212 3498
@@ -3223,6 +3509,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3223 "%s: Ethernet device registered", 3509 "%s: Ethernet device registered",
3224 ndev->name); 3510 ndev->name);
3225 3511
3512 hldev->ndev = ndev;
3226 *vdev_out = vdev; 3513 *vdev_out = vdev;
3227 3514
3228 /* Resetting the Device stats */ 3515 /* Resetting the Device stats */
@@ -3257,36 +3544,34 @@ _out0:
3257 * 3544 *
3258 * This function will unregister and free network device 3545 * This function will unregister and free network device
3259 */ 3546 */
3260void 3547static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3261vxge_device_unregister(struct __vxge_hw_device *hldev)
3262{ 3548{
3263 struct vxgedev *vdev; 3549 struct vxgedev *vdev;
3264 struct net_device *dev; 3550 struct net_device *dev;
3265 char buf[IFNAMSIZ]; 3551 char buf[IFNAMSIZ];
3266#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3267 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3268 u32 level_trace;
3269#endif
3270 3552
3271 dev = hldev->ndev; 3553 dev = hldev->ndev;
3272 vdev = netdev_priv(dev); 3554 vdev = netdev_priv(dev);
3273#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3274 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3275 level_trace = vdev->level_trace;
3276#endif
3277 vxge_debug_entryexit(level_trace,
3278 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3279 3555
3280 memcpy(buf, vdev->ndev->name, IFNAMSIZ); 3556 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3557 __func__, __LINE__);
3558
3559 strncpy(buf, dev->name, IFNAMSIZ);
3560
3561 flush_work_sync(&vdev->reset_task);
3281 3562
3282 /* in 2.6 will call stop() if device is up */ 3563 /* in 2.6 will call stop() if device is up */
3283 unregister_netdev(dev); 3564 unregister_netdev(dev);
3284 3565
3285 flush_scheduled_work(); 3566 kfree(vdev->vpaths);
3567
3568 /* we are safe to free it now */
3569 free_netdev(dev);
3286 3570
3287 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf); 3571 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3288 vxge_debug_entryexit(level_trace, 3572 buf);
3289 "%s: %s:%d Exiting...", buf, __func__, __LINE__); 3573 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3574 __func__, __LINE__);
3290} 3575}
3291 3576
3292/* 3577/*
@@ -3300,7 +3585,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3300 enum vxge_hw_event type, u64 vp_id) 3585 enum vxge_hw_event type, u64 vp_id)
3301{ 3586{
3302 struct net_device *dev = hldev->ndev; 3587 struct net_device *dev = hldev->ndev;
3303 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 3588 struct vxgedev *vdev = netdev_priv(dev);
3304 struct vxge_vpath *vpath = NULL; 3589 struct vxge_vpath *vpath = NULL;
3305 int vpath_idx; 3590 int vpath_idx;
3306 3591
@@ -3523,9 +3808,9 @@ static int __devinit vxge_config_vpaths(
3523 device_config->vp_config[i].tti.timer_ac_en = 3808 device_config->vp_config[i].tti.timer_ac_en =
3524 VXGE_HW_TIM_TIMER_AC_ENABLE; 3809 VXGE_HW_TIM_TIMER_AC_ENABLE;
3525 3810
3526 /* For msi-x with napi (each vector 3811 /* For msi-x with napi (each vector has a handler of its own) -
3527 has a handler of its own) - 3812 * Set CI to OFF for all vpaths
3528 Set CI to OFF for all vpaths */ 3813 */
3529 device_config->vp_config[i].tti.timer_ci_en = 3814 device_config->vp_config[i].tti.timer_ci_en =
3530 VXGE_HW_TIM_TIMER_CI_DISABLE; 3815 VXGE_HW_TIM_TIMER_CI_DISABLE;
3531 3816
@@ -3555,10 +3840,13 @@ static int __devinit vxge_config_vpaths(
3555 3840
3556 device_config->vp_config[i].ring.ring_blocks = 3841 device_config->vp_config[i].ring.ring_blocks =
3557 VXGE_HW_DEF_RING_BLOCKS; 3842 VXGE_HW_DEF_RING_BLOCKS;
3843
3558 device_config->vp_config[i].ring.buffer_mode = 3844 device_config->vp_config[i].ring.buffer_mode =
3559 VXGE_HW_RING_RXD_BUFFER_MODE_1; 3845 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3846
3560 device_config->vp_config[i].ring.rxds_limit = 3847 device_config->vp_config[i].ring.rxds_limit =
3561 VXGE_HW_DEF_RING_RXDS_LIMIT; 3848 VXGE_HW_DEF_RING_RXDS_LIMIT;
3849
3562 device_config->vp_config[i].ring.scatter_mode = 3850 device_config->vp_config[i].ring.scatter_mode =
3563 VXGE_HW_RING_SCATTER_MODE_A; 3851 VXGE_HW_RING_SCATTER_MODE_A;
3564 3852
@@ -3635,9 +3923,10 @@ static void __devinit vxge_device_config_init(
3635 break; 3923 break;
3636 3924
3637 case MSI_X: 3925 case MSI_X:
3638 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; 3926 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3639 break; 3927 break;
3640 } 3928 }
3929
3641 /* Timer period between device poll */ 3930 /* Timer period between device poll */
3642 device_config->device_poll_millis = VXGE_TIMER_DELAY; 3931 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3643 3932
@@ -3649,16 +3938,10 @@ static void __devinit vxge_device_config_init(
3649 3938
3650 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", 3939 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3651 __func__); 3940 __func__);
3652 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
3653 device_config->dma_blockpool_initial);
3654 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
3655 device_config->dma_blockpool_max);
3656 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", 3941 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3657 device_config->intr_mode); 3942 device_config->intr_mode);
3658 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", 3943 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3659 device_config->device_poll_millis); 3944 device_config->device_poll_millis);
3660 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
3661 device_config->rts_mac_en);
3662 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", 3945 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3663 device_config->rth_en); 3946 device_config->rth_en);
3664 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", 3947 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
@@ -3734,22 +4017,10 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3734 vdev->config.tx_steering_type = 0; 4017 vdev->config.tx_steering_type = 0;
3735 } 4018 }
3736 4019
3737 if (vdev->config.gro_enable) {
3738 vxge_debug_init(VXGE_ERR,
3739 "%s: Generic receive offload enabled",
3740 vdev->ndev->name);
3741 } else
3742 vxge_debug_init(VXGE_TRACE,
3743 "%s: Generic receive offload disabled",
3744 vdev->ndev->name);
3745
3746 if (vdev->config.addr_learn_en) 4020 if (vdev->config.addr_learn_en)
3747 vxge_debug_init(VXGE_TRACE, 4021 vxge_debug_init(VXGE_TRACE,
3748 "%s: MAC Address learning enabled", vdev->ndev->name); 4022 "%s: MAC Address learning enabled", vdev->ndev->name);
3749 4023
3750 vxge_debug_init(VXGE_TRACE,
3751 "%s: Rx doorbell mode enabled", vdev->ndev->name);
3752
3753 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 4024 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3754 if (!vxge_bVALn(vpath_mask, i, 1)) 4025 if (!vxge_bVALn(vpath_mask, i, 1))
3755 continue; 4026 continue;
@@ -3762,14 +4033,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3762 ((struct __vxge_hw_device *)(vdev->devh))-> 4033 ((struct __vxge_hw_device *)(vdev->devh))->
3763 config.vp_config[i].rpa_strip_vlan_tag 4034 config.vp_config[i].rpa_strip_vlan_tag
3764 ? "Enabled" : "Disabled"); 4035 ? "Enabled" : "Disabled");
3765 vxge_debug_init(VXGE_TRACE,
3766 "%s: Ring blocks : %d", vdev->ndev->name,
3767 ((struct __vxge_hw_device *)(vdev->devh))->
3768 config.vp_config[i].ring.ring_blocks);
3769 vxge_debug_init(VXGE_TRACE,
3770 "%s: Fifo blocks : %d", vdev->ndev->name,
3771 ((struct __vxge_hw_device *)(vdev->devh))->
3772 config.vp_config[i].fifo.fifo_blocks);
3773 vxge_debug_ll_config(VXGE_TRACE, 4036 vxge_debug_ll_config(VXGE_TRACE,
3774 "%s: Max frags : %d", vdev->ndev->name, 4037 "%s: Max frags : %d", vdev->ndev->name,
3775 ((struct __vxge_hw_device *)(vdev->devh))-> 4038 ((struct __vxge_hw_device *)(vdev->devh))->
@@ -3809,8 +4072,7 @@ static int vxge_pm_resume(struct pci_dev *pdev)
3809static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, 4072static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3810 pci_channel_state_t state) 4073 pci_channel_state_t state)
3811{ 4074{
3812 struct __vxge_hw_device *hldev = 4075 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3813 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3814 struct net_device *netdev = hldev->ndev; 4076 struct net_device *netdev = hldev->ndev;
3815 4077
3816 netif_device_detach(netdev); 4078 netif_device_detach(netdev);
@@ -3839,8 +4101,7 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3839 */ 4101 */
3840static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) 4102static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3841{ 4103{
3842 struct __vxge_hw_device *hldev = 4104 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3843 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3844 struct net_device *netdev = hldev->ndev; 4105 struct net_device *netdev = hldev->ndev;
3845 4106
3846 struct vxgedev *vdev = netdev_priv(netdev); 4107 struct vxgedev *vdev = netdev_priv(netdev);
@@ -3851,7 +4112,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3851 } 4112 }
3852 4113
3853 pci_set_master(pdev); 4114 pci_set_master(pdev);
3854 vxge_reset(vdev); 4115 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
3855 4116
3856 return PCI_ERS_RESULT_RECOVERED; 4117 return PCI_ERS_RESULT_RECOVERED;
3857} 4118}
@@ -3865,8 +4126,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3865 */ 4126 */
3866static void vxge_io_resume(struct pci_dev *pdev) 4127static void vxge_io_resume(struct pci_dev *pdev)
3867{ 4128{
3868 struct __vxge_hw_device *hldev = 4129 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3869 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3870 struct net_device *netdev = hldev->ndev; 4130 struct net_device *netdev = hldev->ndev;
3871 4131
3872 if (netif_running(netdev)) { 4132 if (netif_running(netdev)) {
@@ -3910,6 +4170,157 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
3910 return num_functions; 4170 return num_functions;
3911} 4171}
3912 4172
4173int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4174{
4175 struct __vxge_hw_device *hldev = vdev->devh;
4176 u32 maj, min, bld, cmaj, cmin, cbld;
4177 enum vxge_hw_status status;
4178 const struct firmware *fw;
4179 int ret;
4180
4181 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4182 if (ret) {
4183 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4184 VXGE_DRIVER_NAME, fw_name);
4185 goto out;
4186 }
4187
4188 /* Load the new firmware onto the adapter */
4189 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4190 if (status != VXGE_HW_OK) {
4191 vxge_debug_init(VXGE_ERR,
4192 "%s: FW image download to adapter failed '%s'.",
4193 VXGE_DRIVER_NAME, fw_name);
4194 ret = -EIO;
4195 goto out;
4196 }
4197
4198 /* Read the version of the new firmware */
4199 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4200 if (status != VXGE_HW_OK) {
4201 vxge_debug_init(VXGE_ERR,
4202 "%s: Upgrade read version failed '%s'.",
4203 VXGE_DRIVER_NAME, fw_name);
4204 ret = -EIO;
4205 goto out;
4206 }
4207
4208 cmaj = vdev->config.device_hw_info.fw_version.major;
4209 cmin = vdev->config.device_hw_info.fw_version.minor;
4210 cbld = vdev->config.device_hw_info.fw_version.build;
4211 /* It's possible the version in /lib/firmware is not the latest version.
4212 * If so, we could get into a loop of trying to upgrade to the latest
4213 * and flashing the older version.
4214 */
4215 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4216 !override) {
4217 ret = -EINVAL;
4218 goto out;
4219 }
4220
4221 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4222 maj, min, bld);
4223
4224 /* Flash the adapter with the new firmware */
4225 status = vxge_hw_flash_fw(hldev);
4226 if (status != VXGE_HW_OK) {
4227 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4228 VXGE_DRIVER_NAME, fw_name);
4229 ret = -EIO;
4230 goto out;
4231 }
4232
4233 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4234 "hard reset before using, thus requiring a system reboot or a "
4235 "hotplug event.\n");
4236
4237out:
4238 release_firmware(fw);
4239 return ret;
4240}
4241
4242static int vxge_probe_fw_update(struct vxgedev *vdev)
4243{
4244 u32 maj, min, bld;
4245 int ret, gpxe = 0;
4246 char *fw_name;
4247
4248 maj = vdev->config.device_hw_info.fw_version.major;
4249 min = vdev->config.device_hw_info.fw_version.minor;
4250 bld = vdev->config.device_hw_info.fw_version.build;
4251
4252 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4253 return 0;
4254
4255 /* Ignore the build number when determining if the current firmware is
4256 * "too new" to load the driver
4257 */
4258 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4259 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4260 "version, unable to load driver\n",
4261 VXGE_DRIVER_NAME);
4262 return -EINVAL;
4263 }
4264
4265 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4266 * work with this driver.
4267 */
4268 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4269 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4270 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4271 return -EINVAL;
4272 }
4273
4274 /* If file not specified, determine gPXE or not */
4275 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4276 int i;
4277 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4278 if (vdev->devh->eprom_versions[i]) {
4279 gpxe = 1;
4280 break;
4281 }
4282 }
4283 if (gpxe)
4284 fw_name = "vxge/X3fw-pxe.ncf";
4285 else
4286 fw_name = "vxge/X3fw.ncf";
4287
4288 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4289 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4290 * probe, so ignore them
4291 */
4292 if (ret != -EINVAL && ret != -ENOENT)
4293 return -EIO;
4294 else
4295 ret = 0;
4296
4297 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4298 VXGE_FW_VER(maj, min, 0)) {
4299 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4300 " be used with this driver.\n"
4301 "Please get the latest version from "
4302 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4303 VXGE_DRIVER_NAME, maj, min, bld);
4304 return -EINVAL;
4305 }
4306
4307 return ret;
4308}
4309
4310static int __devinit is_sriov_initialized(struct pci_dev *pdev)
4311{
4312 int pos;
4313 u16 ctrl;
4314
4315 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4316 if (pos) {
4317 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4318 if (ctrl & PCI_SRIOV_CTRL_VFE)
4319 return 1;
4320 }
4321 return 0;
4322}
4323
3913/** 4324/**
3914 * vxge_probe 4325 * vxge_probe
3915 * @pdev : structure containing the PCI related information of the device. 4326 * @pdev : structure containing the PCI related information of the device.
@@ -3924,7 +4335,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
3924static int __devinit 4335static int __devinit
3925vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) 4336vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3926{ 4337{
3927 struct __vxge_hw_device *hldev; 4338 struct __vxge_hw_device *hldev;
3928 enum vxge_hw_status status; 4339 enum vxge_hw_status status;
3929 int ret; 4340 int ret;
3930 int high_dma = 0; 4341 int high_dma = 0;
@@ -3947,9 +4358,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3947 attr.pdev = pdev; 4358 attr.pdev = pdev;
3948 4359
3949 /* In SRIOV-17 mode, functions of the same adapter 4360 /* In SRIOV-17 mode, functions of the same adapter
3950 * can be deployed on different buses */ 4361 * can be deployed on different buses
3951 if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) || 4362 */
3952 (device != PCI_SLOT(pdev->devfn)))) 4363 if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4364 !pdev->is_virtfn)
3953 new_device = 1; 4365 new_device = 1;
3954 4366
3955 bus = pdev->bus->number; 4367 bus = pdev->bus->number;
@@ -3967,6 +4379,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3967 driver_config->config_dev_cnt = 0; 4379 driver_config->config_dev_cnt = 0;
3968 driver_config->total_dev_cnt = 0; 4380 driver_config->total_dev_cnt = 0;
3969 } 4381 }
4382
3970 /* Now making the CPU based no of vpath calculation 4383 /* Now making the CPU based no of vpath calculation
3971 * applicable for individual functions as well. 4384 * applicable for individual functions as well.
3972 */ 4385 */
@@ -3989,11 +4402,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3989 goto _exit0; 4402 goto _exit0;
3990 } 4403 }
3991 4404
3992 ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL); 4405 ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
3993 if (!ll_config) { 4406 if (!ll_config) {
3994 ret = -ENOMEM; 4407 ret = -ENOMEM;
3995 vxge_debug_init(VXGE_ERR, 4408 vxge_debug_init(VXGE_ERR,
3996 "ll_config : malloc failed %s %d", 4409 "device_config : malloc failed %s %d",
3997 __FILE__, __LINE__); 4410 __FILE__, __LINE__);
3998 goto _exit0; 4411 goto _exit0;
3999 } 4412 }
@@ -4037,10 +4450,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4037 goto _exit1; 4450 goto _exit1;
4038 } 4451 }
4039 4452
4040 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) { 4453 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4454 if (ret) {
4041 vxge_debug_init(VXGE_ERR, 4455 vxge_debug_init(VXGE_ERR,
4042 "%s : request regions failed", __func__); 4456 "%s : request regions failed", __func__);
4043 ret = -ENODEV;
4044 goto _exit1; 4457 goto _exit1;
4045 } 4458 }
4046 4459
@@ -4068,16 +4481,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4068 goto _exit3; 4481 goto _exit3;
4069 } 4482 }
4070 4483
4071 if (ll_config->device_hw_info.fw_version.major !=
4072 VXGE_DRIVER_FW_VERSION_MAJOR) {
4073 vxge_debug_init(VXGE_ERR,
4074 "%s: Incorrect firmware version."
4075 "Please upgrade the firmware to version 1.x.x",
4076 VXGE_DRIVER_NAME);
4077 ret = -EINVAL;
4078 goto _exit3;
4079 }
4080
4081 vpath_mask = ll_config->device_hw_info.vpath_mask; 4484 vpath_mask = ll_config->device_hw_info.vpath_mask;
4082 if (vpath_mask == 0) { 4485 if (vpath_mask == 0) {
4083 vxge_debug_ll_config(VXGE_TRACE, 4486 vxge_debug_ll_config(VXGE_TRACE,
@@ -4106,14 +4509,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4106 num_vfs = vxge_get_num_vfs(function_mode) - 1; 4509 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4107 4510
4108 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ 4511 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4109 if (is_sriov(function_mode) && (max_config_dev > 1) && 4512 if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4110 (ll_config->intr_type != INTA) && 4513 (ll_config->intr_type != INTA)) {
4111 (is_privileged == VXGE_HW_OK)) { 4514 ret = pci_enable_sriov(pdev, num_vfs);
4112 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
4113 ? (max_config_dev - 1) : num_vfs);
4114 if (ret) 4515 if (ret)
4115 vxge_debug_ll_config(VXGE_ERR, 4516 vxge_debug_ll_config(VXGE_ERR,
4116 "Failed in enabling SRIOV mode: %d\n", ret); 4517 "Failed in enabling SRIOV mode: %d\n", ret);
4518 /* No need to fail out, as an error here is non-fatal */
4117 } 4519 }
4118 4520
4119 /* 4521 /*
@@ -4141,46 +4543,93 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4141 goto _exit3; 4543 goto _exit3;
4142 } 4544 }
4143 4545
4546 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4547 ll_config->device_hw_info.fw_version.minor,
4548 ll_config->device_hw_info.fw_version.build) >=
4549 VXGE_EPROM_FW_VER) {
4550 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4551
4552 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4553 if (status != VXGE_HW_OK) {
4554 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4555 VXGE_DRIVER_NAME);
4556 /* This is a non-fatal error, continue */
4557 }
4558
4559 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4560 hldev->eprom_versions[i] = img[i].version;
4561 if (!img[i].is_valid)
4562 break;
4563 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4564 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4565 VXGE_EPROM_IMG_MAJOR(img[i].version),
4566 VXGE_EPROM_IMG_MINOR(img[i].version),
4567 VXGE_EPROM_IMG_FIX(img[i].version),
4568 VXGE_EPROM_IMG_BUILD(img[i].version));
4569 }
4570 }
4571
4144 /* if FCS stripping is not disabled in MAC fail driver load */ 4572 /* if FCS stripping is not disabled in MAC fail driver load */
4145 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) { 4573 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4146 vxge_debug_init(VXGE_ERR, 4574 if (status != VXGE_HW_OK) {
4147 "%s: FCS stripping is not disabled in MAC" 4575 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4148 " failing driver load", VXGE_DRIVER_NAME); 4576 " failing driver load", VXGE_DRIVER_NAME);
4149 ret = -EINVAL; 4577 ret = -EINVAL;
4150 goto _exit4; 4578 goto _exit4;
4151 } 4579 }
4152 4580
4581 /* Always enable HWTS. This will always cause the FCS to be invalid,
4582 * due to the fact that HWTS is using the FCS as the location of the
4583 * timestamp. The HW FCS checking will still correctly determine if
4584 * there is a valid checksum, and the FCS is being removed by the driver
4585 * anyway. So no fucntionality is being lost. Since it is always
4586 * enabled, we now simply use the ioctl call to set whether or not the
4587 * driver should be paying attention to the HWTS.
4588 */
4589 if (is_privileged == VXGE_HW_OK) {
4590 status = vxge_timestamp_config(hldev);
4591 if (status != VXGE_HW_OK) {
4592 vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4593 VXGE_DRIVER_NAME);
4594 ret = -EFAULT;
4595 goto _exit4;
4596 }
4597 }
4598
4153 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); 4599 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4154 4600
4155 /* set private device info */ 4601 /* set private device info */
4156 pci_set_drvdata(pdev, hldev); 4602 pci_set_drvdata(pdev, hldev);
4157 4603
4158 ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4159 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4604 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4160 ll_config->addr_learn_en = addr_learn_en; 4605 ll_config->addr_learn_en = addr_learn_en;
4161 ll_config->rth_algorithm = RTH_ALG_JENKINS; 4606 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4162 ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; 4607 ll_config->rth_hash_type_tcpipv4 = 1;
4163 ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; 4608 ll_config->rth_hash_type_ipv4 = 0;
4164 ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4609 ll_config->rth_hash_type_tcpipv6 = 0;
4165 ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4610 ll_config->rth_hash_type_ipv6 = 0;
4166 ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4611 ll_config->rth_hash_type_tcpipv6ex = 0;
4167 ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4612 ll_config->rth_hash_type_ipv6ex = 0;
4168 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; 4613 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4169 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4614 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4170 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4615 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4171 4616
4172 if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, 4617 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4173 &vdev)) { 4618 &vdev);
4619 if (ret) {
4174 ret = -EINVAL; 4620 ret = -EINVAL;
4175 goto _exit4; 4621 goto _exit4;
4176 } 4622 }
4177 4623
4624 ret = vxge_probe_fw_update(vdev);
4625 if (ret)
4626 goto _exit5;
4627
4178 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); 4628 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4179 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4629 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4180 vxge_hw_device_trace_level_get(hldev)); 4630 vxge_hw_device_trace_level_get(hldev));
4181 4631
4182 /* set private HW device info */ 4632 /* set private HW device info */
4183 hldev->ndev = vdev->ndev;
4184 vdev->mtu = VXGE_HW_DEFAULT_MTU; 4633 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4185 vdev->bar0 = attr.bar0; 4634 vdev->bar0 = attr.bar0;
4186 vdev->max_vpath_supported = max_vpath_supported; 4635 vdev->max_vpath_supported = max_vpath_supported;
@@ -4274,15 +4723,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4274 4723
4275 /* Copy the station mac address to the list */ 4724 /* Copy the station mac address to the list */
4276 for (i = 0; i < vdev->no_of_vpath; i++) { 4725 for (i = 0; i < vdev->no_of_vpath; i++) {
4277 entry = (struct vxge_mac_addrs *) 4726 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4278 kzalloc(sizeof(struct vxge_mac_addrs),
4279 GFP_KERNEL);
4280 if (NULL == entry) { 4727 if (NULL == entry) {
4281 vxge_debug_init(VXGE_ERR, 4728 vxge_debug_init(VXGE_ERR,
4282 "%s: mac_addr_list : memory allocation failed", 4729 "%s: mac_addr_list : memory allocation failed",
4283 vdev->ndev->name); 4730 vdev->ndev->name);
4284 ret = -EPERM; 4731 ret = -EPERM;
4285 goto _exit5; 4732 goto _exit6;
4286 } 4733 }
4287 macaddr = (u8 *)&entry->macaddr; 4734 macaddr = (u8 *)&entry->macaddr;
4288 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); 4735 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4322,25 +4769,26 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4322 kfree(ll_config); 4769 kfree(ll_config);
4323 return 0; 4770 return 0;
4324 4771
4325_exit5: 4772_exit6:
4326 for (i = 0; i < vdev->no_of_vpath; i++) 4773 for (i = 0; i < vdev->no_of_vpath; i++)
4327 vxge_free_mac_add_list(&vdev->vpaths[i]); 4774 vxge_free_mac_add_list(&vdev->vpaths[i]);
4328 4775_exit5:
4329 vxge_device_unregister(hldev); 4776 vxge_device_unregister(hldev);
4330_exit4: 4777_exit4:
4331 pci_disable_sriov(pdev); 4778 pci_set_drvdata(pdev, NULL);
4332 vxge_hw_device_terminate(hldev); 4779 vxge_hw_device_terminate(hldev);
4780 pci_disable_sriov(pdev);
4333_exit3: 4781_exit3:
4334 iounmap(attr.bar0); 4782 iounmap(attr.bar0);
4335_exit2: 4783_exit2:
4336 pci_release_regions(pdev); 4784 pci_release_region(pdev, 0);
4337_exit1: 4785_exit1:
4338 pci_disable_device(pdev); 4786 pci_disable_device(pdev);
4339_exit0: 4787_exit0:
4340 kfree(ll_config); 4788 kfree(ll_config);
4341 kfree(device_config); 4789 kfree(device_config);
4342 driver_config->config_dev_cnt--; 4790 driver_config->config_dev_cnt--;
4343 pci_set_drvdata(pdev, NULL); 4791 driver_config->total_dev_cnt--;
4344 return ret; 4792 return ret;
4345} 4793}
4346 4794
@@ -4350,61 +4798,39 @@ _exit0:
4350 * Description: This function is called by the Pci subsystem to release a 4798 * Description: This function is called by the Pci subsystem to release a
4351 * PCI device and free up all resource held up by the device. 4799 * PCI device and free up all resource held up by the device.
4352 */ 4800 */
4353static void __devexit 4801static void __devexit vxge_remove(struct pci_dev *pdev)
4354vxge_remove(struct pci_dev *pdev)
4355{ 4802{
4356 struct __vxge_hw_device *hldev; 4803 struct __vxge_hw_device *hldev;
4357 struct vxgedev *vdev = NULL; 4804 struct vxgedev *vdev;
4358 struct net_device *dev; 4805 int i;
4359 int i = 0;
4360#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4361 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4362 u32 level_trace;
4363#endif
4364
4365 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
4366 4806
4807 hldev = pci_get_drvdata(pdev);
4367 if (hldev == NULL) 4808 if (hldev == NULL)
4368 return; 4809 return;
4369 dev = hldev->ndev;
4370 vdev = netdev_priv(dev);
4371 4810
4372#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ 4811 vdev = netdev_priv(hldev->ndev);
4373 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4374 level_trace = vdev->level_trace;
4375#endif
4376 vxge_debug_entryexit(level_trace,
4377 "%s:%d", __func__, __LINE__);
4378 4812
4379 vxge_debug_init(level_trace, 4813 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4380 "%s : removing PCI device...", __func__); 4814 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4381 vxge_device_unregister(hldev); 4815 __func__);
4382 4816
4383 for (i = 0; i < vdev->no_of_vpath; i++) { 4817 for (i = 0; i < vdev->no_of_vpath; i++)
4384 vxge_free_mac_add_list(&vdev->vpaths[i]); 4818 vxge_free_mac_add_list(&vdev->vpaths[i]);
4385 vdev->vpaths[i].mcast_addr_cnt = 0;
4386 vdev->vpaths[i].mac_addr_cnt = 0;
4387 }
4388
4389 kfree(vdev->vpaths);
4390
4391 iounmap(vdev->bar0);
4392
4393 pci_disable_sriov(pdev);
4394
4395 /* we are safe to free it now */
4396 free_netdev(dev);
4397
4398 vxge_debug_init(level_trace,
4399 "%s:%d Device unregistered", __func__, __LINE__);
4400 4819
4820 vxge_device_unregister(hldev);
4821 pci_set_drvdata(pdev, NULL);
4822 /* Do not call pci_disable_sriov here, as it will break child devices */
4401 vxge_hw_device_terminate(hldev); 4823 vxge_hw_device_terminate(hldev);
4402 4824 iounmap(vdev->bar0);
4825 pci_release_region(pdev, 0);
4403 pci_disable_device(pdev); 4826 pci_disable_device(pdev);
4404 pci_release_regions(pdev); 4827 driver_config->config_dev_cnt--;
4405 pci_set_drvdata(pdev, NULL); 4828 driver_config->total_dev_cnt--;
4406 vxge_debug_entryexit(level_trace, 4829
4407 "%s:%d Exiting...", __func__, __LINE__); 4830 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4831 __func__, __LINE__);
4832 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4833 __LINE__);
4408} 4834}
4409 4835
4410static struct pci_error_handlers vxge_err_handler = { 4836static struct pci_error_handlers vxge_err_handler = {
@@ -4440,6 +4866,10 @@ vxge_starter(void)
4440 return -ENOMEM; 4866 return -ENOMEM;
4441 4867
4442 ret = pci_register_driver(&vxge_driver); 4868 ret = pci_register_driver(&vxge_driver);
4869 if (ret) {
4870 kfree(driver_config);
4871 goto err;
4872 }
4443 4873
4444 if (driver_config->config_dev_cnt && 4874 if (driver_config->config_dev_cnt &&
4445 (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) 4875 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
@@ -4447,10 +4877,7 @@ vxge_starter(void)
4447 "%s: Configured %d of %d devices", 4877 "%s: Configured %d of %d devices",
4448 VXGE_DRIVER_NAME, driver_config->config_dev_cnt, 4878 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4449 driver_config->total_dev_cnt); 4879 driver_config->total_dev_cnt);
4450 4880err:
4451 if (ret)
4452 kfree(driver_config);
4453
4454 return ret; 4881 return ret;
4455} 4882}
4456 4883