diff options
author | Jon Mason <jon.mason@exar.com> | 2010-07-15 04:47:24 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-07-15 23:46:21 -0400 |
commit | 7adf7d1b0d50075e252aa82505fb473af38c3f20 (patch) | |
tree | 211e18105dff4f52f0632460731d34ea679c0118 /drivers/net/vxge | |
parent | d03848e057cb33ab4261264903b5ebee0738a8dc (diff) |
vxge: Fix multicast issues
Fix error in multicast flag check, add calls to restore the status of
multicast and promiscuous mode settings after change_mtu, and style
cleanups to shorten the function calls by using a temporary variable.
Signed-off-by: Jon Mason <jon.mason@exar.com>
Signed-off-by: Sreenivasa Honnur <sreenivasa.honnur@exar.com>
Signed-off-by: Ramkrishna Vepa <ramkrishna.vepa@exar.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vxge')
-rw-r--r-- | drivers/net/vxge/vxge-main.c | 270 |
1 files changed, 146 insertions, 124 deletions
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index e78703d9e381..66d914c1ccb9 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -1088,7 +1088,8 @@ static void vxge_set_multicast(struct net_device *dev) | |||
1088 | struct netdev_hw_addr *ha; | 1088 | struct netdev_hw_addr *ha; |
1089 | struct vxgedev *vdev; | 1089 | struct vxgedev *vdev; |
1090 | int i, mcast_cnt = 0; | 1090 | int i, mcast_cnt = 0; |
1091 | struct __vxge_hw_device *hldev; | 1091 | struct __vxge_hw_device *hldev; |
1092 | struct vxge_vpath *vpath; | ||
1092 | enum vxge_hw_status status = VXGE_HW_OK; | 1093 | enum vxge_hw_status status = VXGE_HW_OK; |
1093 | struct macInfo mac_info; | 1094 | struct macInfo mac_info; |
1094 | int vpath_idx = 0; | 1095 | int vpath_idx = 0; |
@@ -1108,46 +1109,48 @@ static void vxge_set_multicast(struct net_device *dev) | |||
1108 | 1109 | ||
1109 | if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { | 1110 | if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { |
1110 | for (i = 0; i < vdev->no_of_vpath; i++) { | 1111 | for (i = 0; i < vdev->no_of_vpath; i++) { |
1111 | vxge_assert(vdev->vpaths[i].is_open); | 1112 | vpath = &vdev->vpaths[i]; |
1112 | status = vxge_hw_vpath_mcast_enable( | 1113 | vxge_assert(vpath->is_open); |
1113 | vdev->vpaths[i].handle); | 1114 | status = vxge_hw_vpath_mcast_enable(vpath->handle); |
1115 | if (status != VXGE_HW_OK) | ||
1116 | vxge_debug_init(VXGE_ERR, "failed to enable " | ||
1117 | "multicast, status %d", status); | ||
1114 | vdev->all_multi_flg = 1; | 1118 | vdev->all_multi_flg = 1; |
1115 | } | 1119 | } |
1116 | } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { | 1120 | } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { |
1117 | for (i = 0; i < vdev->no_of_vpath; i++) { | 1121 | for (i = 0; i < vdev->no_of_vpath; i++) { |
1118 | vxge_assert(vdev->vpaths[i].is_open); | 1122 | vpath = &vdev->vpaths[i]; |
1119 | status = vxge_hw_vpath_mcast_disable( | 1123 | vxge_assert(vpath->is_open); |
1120 | vdev->vpaths[i].handle); | 1124 | status = vxge_hw_vpath_mcast_disable(vpath->handle); |
1121 | vdev->all_multi_flg = 1; | 1125 | if (status != VXGE_HW_OK) |
1126 | vxge_debug_init(VXGE_ERR, "failed to disable " | ||
1127 | "multicast, status %d", status); | ||
1128 | vdev->all_multi_flg = 0; | ||
1122 | } | 1129 | } |
1123 | } | 1130 | } |
1124 | 1131 | ||
1125 | if (status != VXGE_HW_OK) | ||
1126 | vxge_debug_init(VXGE_ERR, | ||
1127 | "failed to %s multicast, status %d", | ||
1128 | dev->flags & IFF_ALLMULTI ? | ||
1129 | "enable" : "disable", status); | ||
1130 | 1132 | ||
1131 | if (!vdev->config.addr_learn_en) { | 1133 | if (!vdev->config.addr_learn_en) { |
1132 | if (dev->flags & IFF_PROMISC) { | 1134 | for (i = 0; i < vdev->no_of_vpath; i++) { |
1133 | for (i = 0; i < vdev->no_of_vpath; i++) { | 1135 | vpath = &vdev->vpaths[i]; |
1134 | vxge_assert(vdev->vpaths[i].is_open); | 1136 | vxge_assert(vpath->is_open); |
1137 | |||
1138 | if (dev->flags & IFF_PROMISC) | ||
1135 | status = vxge_hw_vpath_promisc_enable( | 1139 | status = vxge_hw_vpath_promisc_enable( |
1136 | vdev->vpaths[i].handle); | 1140 | vpath->handle); |
1137 | } | 1141 | else |
1138 | } else { | ||
1139 | for (i = 0; i < vdev->no_of_vpath; i++) { | ||
1140 | vxge_assert(vdev->vpaths[i].is_open); | ||
1141 | status = vxge_hw_vpath_promisc_disable( | 1142 | status = vxge_hw_vpath_promisc_disable( |
1142 | vdev->vpaths[i].handle); | 1143 | vpath->handle); |
1143 | } | 1144 | if (status != VXGE_HW_OK) |
1145 | vxge_debug_init(VXGE_ERR, "failed to %s promisc" | ||
1146 | ", status %d", dev->flags&IFF_PROMISC ? | ||
1147 | "enable" : "disable", status); | ||
1144 | } | 1148 | } |
1145 | } | 1149 | } |
1146 | 1150 | ||
1147 | memset(&mac_info, 0, sizeof(struct macInfo)); | 1151 | memset(&mac_info, 0, sizeof(struct macInfo)); |
1148 | /* Update individual M_CAST address list */ | 1152 | /* Update individual M_CAST address list */ |
1149 | if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) { | 1153 | if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) { |
1150 | |||
1151 | mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; | 1154 | mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; |
1152 | list_head = &vdev->vpaths[0].mac_addr_list; | 1155 | list_head = &vdev->vpaths[0].mac_addr_list; |
1153 | if ((netdev_mc_count(dev) + | 1156 | if ((netdev_mc_count(dev) + |
@@ -1157,14 +1160,7 @@ static void vxge_set_multicast(struct net_device *dev) | |||
1157 | 1160 | ||
1158 | /* Delete previous MC's */ | 1161 | /* Delete previous MC's */ |
1159 | for (i = 0; i < mcast_cnt; i++) { | 1162 | for (i = 0; i < mcast_cnt; i++) { |
1160 | if (!list_empty(list_head)) | ||
1161 | mac_entry = (struct vxge_mac_addrs *) | ||
1162 | list_first_entry(list_head, | ||
1163 | struct vxge_mac_addrs, | ||
1164 | item); | ||
1165 | |||
1166 | list_for_each_safe(entry, next, list_head) { | 1163 | list_for_each_safe(entry, next, list_head) { |
1167 | |||
1168 | mac_entry = (struct vxge_mac_addrs *) entry; | 1164 | mac_entry = (struct vxge_mac_addrs *) entry; |
1169 | /* Copy the mac address to delete */ | 1165 | /* Copy the mac address to delete */ |
1170 | mac_address = (u8 *)&mac_entry->macaddr; | 1166 | mac_address = (u8 *)&mac_entry->macaddr; |
@@ -1207,9 +1203,7 @@ _set_all_mcast: | |||
1207 | mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; | 1203 | mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; |
1208 | /* Delete previous MC's */ | 1204 | /* Delete previous MC's */ |
1209 | for (i = 0; i < mcast_cnt; i++) { | 1205 | for (i = 0; i < mcast_cnt; i++) { |
1210 | |||
1211 | list_for_each_safe(entry, next, list_head) { | 1206 | list_for_each_safe(entry, next, list_head) { |
1212 | |||
1213 | mac_entry = (struct vxge_mac_addrs *) entry; | 1207 | mac_entry = (struct vxge_mac_addrs *) entry; |
1214 | /* Copy the mac address to delete */ | 1208 | /* Copy the mac address to delete */ |
1215 | mac_address = (u8 *)&mac_entry->macaddr; | 1209 | mac_address = (u8 *)&mac_entry->macaddr; |
@@ -1229,9 +1223,10 @@ _set_all_mcast: | |||
1229 | 1223 | ||
1230 | /* Enable all multicast */ | 1224 | /* Enable all multicast */ |
1231 | for (i = 0; i < vdev->no_of_vpath; i++) { | 1225 | for (i = 0; i < vdev->no_of_vpath; i++) { |
1232 | vxge_assert(vdev->vpaths[i].is_open); | 1226 | vpath = &vdev->vpaths[i]; |
1233 | status = vxge_hw_vpath_mcast_enable( | 1227 | vxge_assert(vpath->is_open); |
1234 | vdev->vpaths[i].handle); | 1228 | |
1229 | status = vxge_hw_vpath_mcast_enable(vpath->handle); | ||
1235 | if (status != VXGE_HW_OK) { | 1230 | if (status != VXGE_HW_OK) { |
1236 | vxge_debug_init(VXGE_ERR, | 1231 | vxge_debug_init(VXGE_ERR, |
1237 | "%s:%d Enabling all multicasts failed", | 1232 | "%s:%d Enabling all multicasts failed", |
@@ -1392,6 +1387,7 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) | |||
1392 | static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) | 1387 | static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) |
1393 | { | 1388 | { |
1394 | enum vxge_hw_status status = VXGE_HW_OK; | 1389 | enum vxge_hw_status status = VXGE_HW_OK; |
1390 | struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; | ||
1395 | int ret = 0; | 1391 | int ret = 0; |
1396 | 1392 | ||
1397 | /* check if device is down already */ | 1393 | /* check if device is down already */ |
@@ -1402,12 +1398,10 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) | |||
1402 | if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) | 1398 | if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) |
1403 | return 0; | 1399 | return 0; |
1404 | 1400 | ||
1405 | if (vdev->vpaths[vp_id].handle) { | 1401 | if (vpath->handle) { |
1406 | if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle) | 1402 | if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { |
1407 | == VXGE_HW_OK) { | ||
1408 | if (is_vxge_card_up(vdev) && | 1403 | if (is_vxge_card_up(vdev) && |
1409 | vxge_hw_vpath_recover_from_reset( | 1404 | vxge_hw_vpath_recover_from_reset(vpath->handle) |
1410 | vdev->vpaths[vp_id].handle) | ||
1411 | != VXGE_HW_OK) { | 1405 | != VXGE_HW_OK) { |
1412 | vxge_debug_init(VXGE_ERR, | 1406 | vxge_debug_init(VXGE_ERR, |
1413 | "vxge_hw_vpath_recover_from_reset" | 1407 | "vxge_hw_vpath_recover_from_reset" |
@@ -1423,11 +1417,20 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) | |||
1423 | } else | 1417 | } else |
1424 | return VXGE_HW_FAIL; | 1418 | return VXGE_HW_FAIL; |
1425 | 1419 | ||
1426 | vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); | 1420 | vxge_restore_vpath_mac_addr(vpath); |
1427 | vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); | 1421 | vxge_restore_vpath_vid_table(vpath); |
1428 | 1422 | ||
1429 | /* Enable all broadcast */ | 1423 | /* Enable all broadcast */ |
1430 | vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle); | 1424 | vxge_hw_vpath_bcast_enable(vpath->handle); |
1425 | |||
1426 | /* Enable all multicast */ | ||
1427 | if (vdev->all_multi_flg) { | ||
1428 | status = vxge_hw_vpath_mcast_enable(vpath->handle); | ||
1429 | if (status != VXGE_HW_OK) | ||
1430 | vxge_debug_init(VXGE_ERR, | ||
1431 | "%s:%d Enabling multicast failed", | ||
1432 | __func__, __LINE__); | ||
1433 | } | ||
1431 | 1434 | ||
1432 | /* Enable the interrupts */ | 1435 | /* Enable the interrupts */ |
1433 | vxge_vpath_intr_enable(vdev, vp_id); | 1436 | vxge_vpath_intr_enable(vdev, vp_id); |
@@ -1435,17 +1438,17 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) | |||
1435 | smp_wmb(); | 1438 | smp_wmb(); |
1436 | 1439 | ||
1437 | /* Enable the flow of traffic through the vpath */ | 1440 | /* Enable the flow of traffic through the vpath */ |
1438 | vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle); | 1441 | vxge_hw_vpath_enable(vpath->handle); |
1439 | 1442 | ||
1440 | smp_wmb(); | 1443 | smp_wmb(); |
1441 | vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle); | 1444 | vxge_hw_vpath_rx_doorbell_init(vpath->handle); |
1442 | vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK; | 1445 | vpath->ring.last_status = VXGE_HW_OK; |
1443 | 1446 | ||
1444 | /* Vpath reset done */ | 1447 | /* Vpath reset done */ |
1445 | clear_bit(vp_id, &vdev->vp_reset); | 1448 | clear_bit(vp_id, &vdev->vp_reset); |
1446 | 1449 | ||
1447 | /* Start the vpath queue */ | 1450 | /* Start the vpath queue */ |
1448 | vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo); | 1451 | vxge_wake_tx_queue(&vpath->fifo); |
1449 | 1452 | ||
1450 | return ret; | 1453 | return ret; |
1451 | } | 1454 | } |
@@ -1479,9 +1482,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) | |||
1479 | vxge_debug_init(VXGE_ERR, | 1482 | vxge_debug_init(VXGE_ERR, |
1480 | "%s: execution mode is debug, returning..", | 1483 | "%s: execution mode is debug, returning..", |
1481 | vdev->ndev->name); | 1484 | vdev->ndev->name); |
1482 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); | 1485 | clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); |
1483 | netif_tx_stop_all_queues(vdev->ndev); | 1486 | netif_tx_stop_all_queues(vdev->ndev); |
1484 | return 0; | 1487 | return 0; |
1485 | } | 1488 | } |
1486 | } | 1489 | } |
1487 | 1490 | ||
@@ -1628,8 +1631,7 @@ out: | |||
1628 | */ | 1631 | */ |
1629 | int vxge_reset(struct vxgedev *vdev) | 1632 | int vxge_reset(struct vxgedev *vdev) |
1630 | { | 1633 | { |
1631 | do_vxge_reset(vdev, VXGE_LL_FULL_RESET); | 1634 | return do_vxge_reset(vdev, VXGE_LL_FULL_RESET); |
1632 | return 0; | ||
1633 | } | 1635 | } |
1634 | 1636 | ||
1635 | /** | 1637 | /** |
@@ -1992,17 +1994,17 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) | |||
1992 | /* reset vpaths */ | 1994 | /* reset vpaths */ |
1993 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) | 1995 | enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) |
1994 | { | 1996 | { |
1995 | int i; | ||
1996 | enum vxge_hw_status status = VXGE_HW_OK; | 1997 | enum vxge_hw_status status = VXGE_HW_OK; |
1998 | struct vxge_vpath *vpath; | ||
1999 | int i; | ||
1997 | 2000 | ||
1998 | for (i = 0; i < vdev->no_of_vpath; i++) | 2001 | for (i = 0; i < vdev->no_of_vpath; i++) { |
1999 | if (vdev->vpaths[i].handle) { | 2002 | vpath = &vdev->vpaths[i]; |
2000 | if (vxge_hw_vpath_reset(vdev->vpaths[i].handle) | 2003 | if (vpath->handle) { |
2001 | == VXGE_HW_OK) { | 2004 | if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { |
2002 | if (is_vxge_card_up(vdev) && | 2005 | if (is_vxge_card_up(vdev) && |
2003 | vxge_hw_vpath_recover_from_reset( | 2006 | vxge_hw_vpath_recover_from_reset( |
2004 | vdev->vpaths[i].handle) | 2007 | vpath->handle) != VXGE_HW_OK) { |
2005 | != VXGE_HW_OK) { | ||
2006 | vxge_debug_init(VXGE_ERR, | 2008 | vxge_debug_init(VXGE_ERR, |
2007 | "vxge_hw_vpath_recover_" | 2009 | "vxge_hw_vpath_recover_" |
2008 | "from_reset failed for vpath: " | 2010 | "from_reset failed for vpath: " |
@@ -2016,83 +2018,87 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) | |||
2016 | return status; | 2018 | return status; |
2017 | } | 2019 | } |
2018 | } | 2020 | } |
2021 | } | ||
2022 | |||
2019 | return status; | 2023 | return status; |
2020 | } | 2024 | } |
2021 | 2025 | ||
2022 | /* close vpaths */ | 2026 | /* close vpaths */ |
2023 | void vxge_close_vpaths(struct vxgedev *vdev, int index) | 2027 | void vxge_close_vpaths(struct vxgedev *vdev, int index) |
2024 | { | 2028 | { |
2029 | struct vxge_vpath *vpath; | ||
2025 | int i; | 2030 | int i; |
2031 | |||
2026 | for (i = index; i < vdev->no_of_vpath; i++) { | 2032 | for (i = index; i < vdev->no_of_vpath; i++) { |
2027 | if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) { | 2033 | vpath = &vdev->vpaths[i]; |
2028 | vxge_hw_vpath_close(vdev->vpaths[i].handle); | 2034 | |
2035 | if (vpath->handle && vpath->is_open) { | ||
2036 | vxge_hw_vpath_close(vpath->handle); | ||
2029 | vdev->stats.vpaths_open--; | 2037 | vdev->stats.vpaths_open--; |
2030 | } | 2038 | } |
2031 | vdev->vpaths[i].is_open = 0; | 2039 | vpath->is_open = 0; |
2032 | vdev->vpaths[i].handle = NULL; | 2040 | vpath->handle = NULL; |
2033 | } | 2041 | } |
2034 | } | 2042 | } |
2035 | 2043 | ||
2036 | /* open vpaths */ | 2044 | /* open vpaths */ |
2037 | int vxge_open_vpaths(struct vxgedev *vdev) | 2045 | int vxge_open_vpaths(struct vxgedev *vdev) |
2038 | { | 2046 | { |
2047 | struct vxge_hw_vpath_attr attr; | ||
2039 | enum vxge_hw_status status; | 2048 | enum vxge_hw_status status; |
2040 | int i; | 2049 | struct vxge_vpath *vpath; |
2041 | u32 vp_id = 0; | 2050 | u32 vp_id = 0; |
2042 | struct vxge_hw_vpath_attr attr; | 2051 | int i; |
2043 | 2052 | ||
2044 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2053 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2045 | vxge_assert(vdev->vpaths[i].is_configured); | 2054 | vpath = &vdev->vpaths[i]; |
2046 | attr.vp_id = vdev->vpaths[i].device_id; | 2055 | |
2056 | vxge_assert(vpath->is_configured); | ||
2057 | attr.vp_id = vpath->device_id; | ||
2047 | attr.fifo_attr.callback = vxge_xmit_compl; | 2058 | attr.fifo_attr.callback = vxge_xmit_compl; |
2048 | attr.fifo_attr.txdl_term = vxge_tx_term; | 2059 | attr.fifo_attr.txdl_term = vxge_tx_term; |
2049 | attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); | 2060 | attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); |
2050 | attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo; | 2061 | attr.fifo_attr.userdata = &vpath->fifo; |
2051 | 2062 | ||
2052 | attr.ring_attr.callback = vxge_rx_1b_compl; | 2063 | attr.ring_attr.callback = vxge_rx_1b_compl; |
2053 | attr.ring_attr.rxd_init = vxge_rx_initial_replenish; | 2064 | attr.ring_attr.rxd_init = vxge_rx_initial_replenish; |
2054 | attr.ring_attr.rxd_term = vxge_rx_term; | 2065 | attr.ring_attr.rxd_term = vxge_rx_term; |
2055 | attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); | 2066 | attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); |
2056 | attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring; | 2067 | attr.ring_attr.userdata = &vpath->ring; |
2057 | 2068 | ||
2058 | vdev->vpaths[i].ring.ndev = vdev->ndev; | 2069 | vpath->ring.ndev = vdev->ndev; |
2059 | vdev->vpaths[i].ring.pdev = vdev->pdev; | 2070 | vpath->ring.pdev = vdev->pdev; |
2060 | status = vxge_hw_vpath_open(vdev->devh, &attr, | 2071 | status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); |
2061 | &(vdev->vpaths[i].handle)); | ||
2062 | if (status == VXGE_HW_OK) { | 2072 | if (status == VXGE_HW_OK) { |
2063 | vdev->vpaths[i].fifo.handle = | 2073 | vpath->fifo.handle = |
2064 | (struct __vxge_hw_fifo *)attr.fifo_attr.userdata; | 2074 | (struct __vxge_hw_fifo *)attr.fifo_attr.userdata; |
2065 | vdev->vpaths[i].ring.handle = | 2075 | vpath->ring.handle = |
2066 | (struct __vxge_hw_ring *)attr.ring_attr.userdata; | 2076 | (struct __vxge_hw_ring *)attr.ring_attr.userdata; |
2067 | vdev->vpaths[i].fifo.tx_steering_type = | 2077 | vpath->fifo.tx_steering_type = |
2068 | vdev->config.tx_steering_type; | 2078 | vdev->config.tx_steering_type; |
2069 | vdev->vpaths[i].fifo.ndev = vdev->ndev; | 2079 | vpath->fifo.ndev = vdev->ndev; |
2070 | vdev->vpaths[i].fifo.pdev = vdev->pdev; | 2080 | vpath->fifo.pdev = vdev->pdev; |
2071 | vdev->vpaths[i].fifo.indicate_max_pkts = | 2081 | vpath->fifo.indicate_max_pkts = |
2072 | vdev->config.fifo_indicate_max_pkts; | 2082 | vdev->config.fifo_indicate_max_pkts; |
2073 | vdev->vpaths[i].ring.rx_vector_no = 0; | 2083 | vpath->ring.rx_vector_no = 0; |
2074 | vdev->vpaths[i].ring.rx_csum = vdev->rx_csum; | 2084 | vpath->ring.rx_csum = vdev->rx_csum; |
2075 | vdev->vpaths[i].is_open = 1; | 2085 | vpath->is_open = 1; |
2076 | vdev->vp_handles[i] = vdev->vpaths[i].handle; | 2086 | vdev->vp_handles[i] = vpath->handle; |
2077 | vdev->vpaths[i].ring.gro_enable = | 2087 | vpath->ring.gro_enable = vdev->config.gro_enable; |
2078 | vdev->config.gro_enable; | 2088 | vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; |
2079 | vdev->vpaths[i].ring.vlan_tag_strip = | ||
2080 | vdev->vlan_tag_strip; | ||
2081 | vdev->stats.vpaths_open++; | 2089 | vdev->stats.vpaths_open++; |
2082 | } else { | 2090 | } else { |
2083 | vdev->stats.vpath_open_fail++; | 2091 | vdev->stats.vpath_open_fail++; |
2084 | vxge_debug_init(VXGE_ERR, | 2092 | vxge_debug_init(VXGE_ERR, |
2085 | "%s: vpath: %d failed to open " | 2093 | "%s: vpath: %d failed to open " |
2086 | "with status: %d", | 2094 | "with status: %d", |
2087 | vdev->ndev->name, vdev->vpaths[i].device_id, | 2095 | vdev->ndev->name, vpath->device_id, |
2088 | status); | 2096 | status); |
2089 | vxge_close_vpaths(vdev, 0); | 2097 | vxge_close_vpaths(vdev, 0); |
2090 | return -EPERM; | 2098 | return -EPERM; |
2091 | } | 2099 | } |
2092 | 2100 | ||
2093 | vp_id = | 2101 | vp_id = vpath->handle->vpath->vp_id; |
2094 | ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)-> | ||
2095 | vpath->vp_id; | ||
2096 | vdev->vpaths_deployed |= vxge_mBIT(vp_id); | 2102 | vdev->vpaths_deployed |= vxge_mBIT(vp_id); |
2097 | } | 2103 | } |
2098 | return VXGE_HW_OK; | 2104 | return VXGE_HW_OK; |
@@ -2266,7 +2272,6 @@ start: | |||
2266 | vdev->vxge_entries[j].in_use = 0; | 2272 | vdev->vxge_entries[j].in_use = 0; |
2267 | 2273 | ||
2268 | ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); | 2274 | ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); |
2269 | |||
2270 | if (ret > 0) { | 2275 | if (ret > 0) { |
2271 | vxge_debug_init(VXGE_ERR, | 2276 | vxge_debug_init(VXGE_ERR, |
2272 | "%s: MSI-X enable failed for %d vectors, ret: %d", | 2277 | "%s: MSI-X enable failed for %d vectors, ret: %d", |
@@ -2312,17 +2317,16 @@ static int vxge_enable_msix(struct vxgedev *vdev) | |||
2312 | ret = vxge_alloc_msix(vdev); | 2317 | ret = vxge_alloc_msix(vdev); |
2313 | if (!ret) { | 2318 | if (!ret) { |
2314 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2319 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2320 | struct vxge_vpath *vpath = &vdev->vpaths[i]; | ||
2315 | 2321 | ||
2316 | /* If fifo or ring are not enabled | 2322 | /* If fifo or ring are not enabled, the MSIX vector for |
2317 | the MSIX vector for that should be set to 0 | 2323 | * it should be set to 0. |
2318 | Hence initializeing this array to all 0s. | 2324 | */ |
2319 | */ | 2325 | vpath->ring.rx_vector_no = (vpath->device_id * |
2320 | vdev->vpaths[i].ring.rx_vector_no = | 2326 | VXGE_HW_VPATH_MSIX_ACTIVE) + 1; |
2321 | (vdev->vpaths[i].device_id * | ||
2322 | VXGE_HW_VPATH_MSIX_ACTIVE) + 1; | ||
2323 | 2327 | ||
2324 | vxge_hw_vpath_msix_set(vdev->vpaths[i].handle, | 2328 | vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, |
2325 | tim_msix_id, VXGE_ALARM_MSIX_ID); | 2329 | VXGE_ALARM_MSIX_ID); |
2326 | } | 2330 | } |
2327 | } | 2331 | } |
2328 | 2332 | ||
@@ -2537,9 +2541,10 @@ static void vxge_poll_vp_reset(unsigned long data) | |||
2537 | static void vxge_poll_vp_lockup(unsigned long data) | 2541 | static void vxge_poll_vp_lockup(unsigned long data) |
2538 | { | 2542 | { |
2539 | struct vxgedev *vdev = (struct vxgedev *)data; | 2543 | struct vxgedev *vdev = (struct vxgedev *)data; |
2540 | int i; | ||
2541 | struct vxge_ring *ring; | ||
2542 | enum vxge_hw_status status = VXGE_HW_OK; | 2544 | enum vxge_hw_status status = VXGE_HW_OK; |
2545 | struct vxge_vpath *vpath; | ||
2546 | struct vxge_ring *ring; | ||
2547 | int i; | ||
2543 | 2548 | ||
2544 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2549 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2545 | ring = &vdev->vpaths[i].ring; | 2550 | ring = &vdev->vpaths[i].ring; |
@@ -2553,13 +2558,13 @@ static void vxge_poll_vp_lockup(unsigned long data) | |||
2553 | 2558 | ||
2554 | /* schedule vpath reset */ | 2559 | /* schedule vpath reset */ |
2555 | if (!test_and_set_bit(i, &vdev->vp_reset)) { | 2560 | if (!test_and_set_bit(i, &vdev->vp_reset)) { |
2561 | vpath = &vdev->vpaths[i]; | ||
2556 | 2562 | ||
2557 | /* disable interrupts for this vpath */ | 2563 | /* disable interrupts for this vpath */ |
2558 | vxge_vpath_intr_disable(vdev, i); | 2564 | vxge_vpath_intr_disable(vdev, i); |
2559 | 2565 | ||
2560 | /* stop the queue for this vpath */ | 2566 | /* stop the queue for this vpath */ |
2561 | vxge_stop_tx_queue(&vdev->vpaths[i]. | 2567 | vxge_stop_tx_queue(&vpath->fifo); |
2562 | fifo); | ||
2563 | continue; | 2568 | continue; |
2564 | } | 2569 | } |
2565 | } | 2570 | } |
@@ -2588,6 +2593,7 @@ vxge_open(struct net_device *dev) | |||
2588 | enum vxge_hw_status status; | 2593 | enum vxge_hw_status status; |
2589 | struct vxgedev *vdev; | 2594 | struct vxgedev *vdev; |
2590 | struct __vxge_hw_device *hldev; | 2595 | struct __vxge_hw_device *hldev; |
2596 | struct vxge_vpath *vpath; | ||
2591 | int ret = 0; | 2597 | int ret = 0; |
2592 | int i; | 2598 | int i; |
2593 | u64 val64, function_mode; | 2599 | u64 val64, function_mode; |
@@ -2626,15 +2632,17 @@ vxge_open(struct net_device *dev) | |||
2626 | netif_napi_add(dev, &vdev->napi, vxge_poll_inta, | 2632 | netif_napi_add(dev, &vdev->napi, vxge_poll_inta, |
2627 | vdev->config.napi_weight); | 2633 | vdev->config.napi_weight); |
2628 | napi_enable(&vdev->napi); | 2634 | napi_enable(&vdev->napi); |
2629 | for (i = 0; i < vdev->no_of_vpath; i++) | 2635 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2630 | vdev->vpaths[i].ring.napi_p = &vdev->napi; | 2636 | vpath = &vdev->vpaths[i]; |
2637 | vpath->ring.napi_p = &vdev->napi; | ||
2638 | } | ||
2631 | } else { | 2639 | } else { |
2632 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2640 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2633 | netif_napi_add(dev, &vdev->vpaths[i].ring.napi, | 2641 | vpath = &vdev->vpaths[i]; |
2642 | netif_napi_add(dev, &vpath->ring.napi, | ||
2634 | vxge_poll_msix, vdev->config.napi_weight); | 2643 | vxge_poll_msix, vdev->config.napi_weight); |
2635 | napi_enable(&vdev->vpaths[i].ring.napi); | 2644 | napi_enable(&vpath->ring.napi); |
2636 | vdev->vpaths[i].ring.napi_p = | 2645 | vpath->ring.napi_p = &vpath->ring.napi; |
2637 | &vdev->vpaths[i].ring.napi; | ||
2638 | } | 2646 | } |
2639 | } | 2647 | } |
2640 | 2648 | ||
@@ -2651,9 +2659,10 @@ vxge_open(struct net_device *dev) | |||
2651 | } | 2659 | } |
2652 | 2660 | ||
2653 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2661 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2662 | vpath = &vdev->vpaths[i]; | ||
2663 | |||
2654 | /* set initial mtu before enabling the device */ | 2664 | /* set initial mtu before enabling the device */ |
2655 | status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle, | 2665 | status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu); |
2656 | vdev->mtu); | ||
2657 | if (status != VXGE_HW_OK) { | 2666 | if (status != VXGE_HW_OK) { |
2658 | vxge_debug_init(VXGE_ERR, | 2667 | vxge_debug_init(VXGE_ERR, |
2659 | "%s: fatal: can not set new MTU", dev->name); | 2668 | "%s: fatal: can not set new MTU", dev->name); |
@@ -2667,10 +2676,21 @@ vxge_open(struct net_device *dev) | |||
2667 | "%s: MTU is %d", vdev->ndev->name, vdev->mtu); | 2676 | "%s: MTU is %d", vdev->ndev->name, vdev->mtu); |
2668 | VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); | 2677 | VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); |
2669 | 2678 | ||
2670 | /* Reprogram the DA table with populated mac addresses */ | 2679 | /* Restore the DA, VID table and also multicast and promiscuous mode |
2671 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2680 | * states |
2672 | vxge_restore_vpath_mac_addr(&vdev->vpaths[i]); | 2681 | */ |
2673 | vxge_restore_vpath_vid_table(&vdev->vpaths[i]); | 2682 | if (vdev->all_multi_flg) { |
2683 | for (i = 0; i < vdev->no_of_vpath; i++) { | ||
2684 | vpath = &vdev->vpaths[i]; | ||
2685 | vxge_restore_vpath_mac_addr(vpath); | ||
2686 | vxge_restore_vpath_vid_table(vpath); | ||
2687 | |||
2688 | status = vxge_hw_vpath_mcast_enable(vpath->handle); | ||
2689 | if (status != VXGE_HW_OK) | ||
2690 | vxge_debug_init(VXGE_ERR, | ||
2691 | "%s:%d Enabling multicast failed", | ||
2692 | __func__, __LINE__); | ||
2693 | } | ||
2674 | } | 2694 | } |
2675 | 2695 | ||
2676 | /* Enable vpath to sniff all unicast/multicast traffic that not | 2696 | /* Enable vpath to sniff all unicast/multicast traffic that not |
@@ -2699,14 +2719,14 @@ vxge_open(struct net_device *dev) | |||
2699 | 2719 | ||
2700 | /* Enabling Bcast and mcast for all vpath */ | 2720 | /* Enabling Bcast and mcast for all vpath */ |
2701 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2721 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2702 | status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle); | 2722 | vpath = &vdev->vpaths[i]; |
2723 | status = vxge_hw_vpath_bcast_enable(vpath->handle); | ||
2703 | if (status != VXGE_HW_OK) | 2724 | if (status != VXGE_HW_OK) |
2704 | vxge_debug_init(VXGE_ERR, | 2725 | vxge_debug_init(VXGE_ERR, |
2705 | "%s : Can not enable bcast for vpath " | 2726 | "%s : Can not enable bcast for vpath " |
2706 | "id %d", dev->name, i); | 2727 | "id %d", dev->name, i); |
2707 | if (vdev->config.addr_learn_en) { | 2728 | if (vdev->config.addr_learn_en) { |
2708 | status = | 2729 | status = vxge_hw_vpath_mcast_enable(vpath->handle); |
2709 | vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle); | ||
2710 | if (status != VXGE_HW_OK) | 2730 | if (status != VXGE_HW_OK) |
2711 | vxge_debug_init(VXGE_ERR, | 2731 | vxge_debug_init(VXGE_ERR, |
2712 | "%s : Can not enable mcast for vpath " | 2732 | "%s : Can not enable mcast for vpath " |
@@ -2741,9 +2761,11 @@ vxge_open(struct net_device *dev) | |||
2741 | smp_wmb(); | 2761 | smp_wmb(); |
2742 | 2762 | ||
2743 | for (i = 0; i < vdev->no_of_vpath; i++) { | 2763 | for (i = 0; i < vdev->no_of_vpath; i++) { |
2744 | vxge_hw_vpath_enable(vdev->vpaths[i].handle); | 2764 | vpath = &vdev->vpaths[i]; |
2765 | |||
2766 | vxge_hw_vpath_enable(vpath->handle); | ||
2745 | smp_wmb(); | 2767 | smp_wmb(); |
2746 | vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); | 2768 | vxge_hw_vpath_rx_doorbell_init(vpath->handle); |
2747 | } | 2769 | } |
2748 | 2770 | ||
2749 | netif_tx_start_all_queues(vdev->ndev); | 2771 | netif_tx_start_all_queues(vdev->ndev); |