diff options
author | Rasesh Mody <rmody@brocade.com> | 2010-10-05 11:46:05 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-05 23:39:39 -0400 |
commit | b7ee31c5af7f04b67d8b8e4f3b2bcb8bcfced8a3 (patch) | |
tree | a50e868a3a0d6dcc9c86ca488db91f77eaa3ab56 /drivers/net/bna/bna_ctrl.c | |
parent | e2fa6f2ef6e48666b78d4b0f00914b06bb19d298 (diff) |
bna: scope and dead code cleanup
As suggested by Stephen Hemminger:
1) Made functions and data structures static wherever possible.
2) Removed unused code.
Signed-off-by: Debashis Dutt <ddutt@brocade.com>
Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bna/bna_ctrl.c')
-rw-r--r-- | drivers/net/bna/bna_ctrl.c | 559 |
1 files changed, 98 insertions, 461 deletions
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c index ddd922f210c7..07b26598546e 100644 --- a/drivers/net/bna/bna_ctrl.c +++ b/drivers/net/bna/bna_ctrl.c | |||
@@ -19,6 +19,46 @@ | |||
19 | #include "bfa_sm.h" | 19 | #include "bfa_sm.h" |
20 | #include "bfa_wc.h" | 20 | #include "bfa_wc.h" |
21 | 21 | ||
22 | static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status); | ||
23 | |||
24 | static void | ||
25 | bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen, | ||
26 | int status) | ||
27 | { | ||
28 | int i; | ||
29 | u8 prio_map; | ||
30 | |||
31 | port->llport.link_status = BNA_LINK_UP; | ||
32 | if (aen->cee_linkup) | ||
33 | port->llport.link_status = BNA_CEE_UP; | ||
34 | |||
35 | /* Compute the priority */ | ||
36 | prio_map = aen->prio_map; | ||
37 | if (prio_map) { | ||
38 | for (i = 0; i < 8; i++) { | ||
39 | if ((prio_map >> i) & 0x1) | ||
40 | break; | ||
41 | } | ||
42 | port->priority = i; | ||
43 | } else | ||
44 | port->priority = 0; | ||
45 | |||
46 | /* Dispatch events */ | ||
47 | bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup); | ||
48 | bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority); | ||
49 | port->link_cbfn(port->bna->bnad, port->llport.link_status); | ||
50 | } | ||
51 | |||
52 | static void | ||
53 | bna_port_cb_link_down(struct bna_port *port, int status) | ||
54 | { | ||
55 | port->llport.link_status = BNA_LINK_DOWN; | ||
56 | |||
57 | /* Dispatch events */ | ||
58 | bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN); | ||
59 | port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN); | ||
60 | } | ||
61 | |||
22 | /** | 62 | /** |
23 | * MBOX | 63 | * MBOX |
24 | */ | 64 | */ |
@@ -96,7 +136,7 @@ bna_ll_isr(void *llarg, struct bfi_mbmsg *msg) | |||
96 | bna_mbox_aen_callback(bna, msg); | 136 | bna_mbox_aen_callback(bna, msg); |
97 | } | 137 | } |
98 | 138 | ||
99 | void | 139 | static void |
100 | bna_err_handler(struct bna *bna, u32 intr_status) | 140 | bna_err_handler(struct bna *bna, u32 intr_status) |
101 | { | 141 | { |
102 | u32 init_halt; | 142 | u32 init_halt; |
@@ -140,7 +180,7 @@ bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe) | |||
140 | } | 180 | } |
141 | } | 181 | } |
142 | 182 | ||
143 | void | 183 | static void |
144 | bna_mbox_flush_q(struct bna *bna, struct list_head *q) | 184 | bna_mbox_flush_q(struct bna *bna, struct list_head *q) |
145 | { | 185 | { |
146 | struct bna_mbox_qe *mb_qe = NULL; | 186 | struct bna_mbox_qe *mb_qe = NULL; |
@@ -166,18 +206,18 @@ bna_mbox_flush_q(struct bna *bna, struct list_head *q) | |||
166 | bna->mbox_mod.state = BNA_MBOX_FREE; | 206 | bna->mbox_mod.state = BNA_MBOX_FREE; |
167 | } | 207 | } |
168 | 208 | ||
169 | void | 209 | static void |
170 | bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod) | 210 | bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod) |
171 | { | 211 | { |
172 | } | 212 | } |
173 | 213 | ||
174 | void | 214 | static void |
175 | bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod) | 215 | bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod) |
176 | { | 216 | { |
177 | bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q); | 217 | bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q); |
178 | } | 218 | } |
179 | 219 | ||
180 | void | 220 | static void |
181 | bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna) | 221 | bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna) |
182 | { | 222 | { |
183 | bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna); | 223 | bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna); |
@@ -187,7 +227,7 @@ bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna) | |||
187 | mbox_mod->bna = bna; | 227 | mbox_mod->bna = bna; |
188 | } | 228 | } |
189 | 229 | ||
190 | void | 230 | static void |
191 | bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod) | 231 | bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod) |
192 | { | 232 | { |
193 | mbox_mod->bna = NULL; | 233 | mbox_mod->bna = NULL; |
@@ -538,7 +578,7 @@ bna_fw_cb_llport_down(void *arg, int status) | |||
538 | bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN); | 578 | bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN); |
539 | } | 579 | } |
540 | 580 | ||
541 | void | 581 | static void |
542 | bna_port_cb_llport_stopped(struct bna_port *port, | 582 | bna_port_cb_llport_stopped(struct bna_port *port, |
543 | enum bna_cb_status status) | 583 | enum bna_cb_status status) |
544 | { | 584 | { |
@@ -591,7 +631,7 @@ bna_llport_fail(struct bna_llport *llport) | |||
591 | bfa_fsm_send_event(llport, LLPORT_E_FAIL); | 631 | bfa_fsm_send_event(llport, LLPORT_E_FAIL); |
592 | } | 632 | } |
593 | 633 | ||
594 | int | 634 | static int |
595 | bna_llport_state_get(struct bna_llport *llport) | 635 | bna_llport_state_get(struct bna_llport *llport) |
596 | { | 636 | { |
597 | return bfa_sm_to_state(llport_sm_table, llport->fsm); | 637 | return bfa_sm_to_state(llport_sm_table, llport->fsm); |
@@ -1109,7 +1149,7 @@ bna_port_cb_chld_stopped(void *arg) | |||
1109 | bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED); | 1149 | bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED); |
1110 | } | 1150 | } |
1111 | 1151 | ||
1112 | void | 1152 | static void |
1113 | bna_port_init(struct bna_port *port, struct bna *bna) | 1153 | bna_port_init(struct bna_port *port, struct bna *bna) |
1114 | { | 1154 | { |
1115 | port->bna = bna; | 1155 | port->bna = bna; |
@@ -1137,7 +1177,7 @@ bna_port_init(struct bna_port *port, struct bna *bna) | |||
1137 | bna_llport_init(&port->llport, bna); | 1177 | bna_llport_init(&port->llport, bna); |
1138 | } | 1178 | } |
1139 | 1179 | ||
1140 | void | 1180 | static void |
1141 | bna_port_uninit(struct bna_port *port) | 1181 | bna_port_uninit(struct bna_port *port) |
1142 | { | 1182 | { |
1143 | bna_llport_uninit(&port->llport); | 1183 | bna_llport_uninit(&port->llport); |
@@ -1147,13 +1187,13 @@ bna_port_uninit(struct bna_port *port) | |||
1147 | port->bna = NULL; | 1187 | port->bna = NULL; |
1148 | } | 1188 | } |
1149 | 1189 | ||
1150 | int | 1190 | static int |
1151 | bna_port_state_get(struct bna_port *port) | 1191 | bna_port_state_get(struct bna_port *port) |
1152 | { | 1192 | { |
1153 | return bfa_sm_to_state(port_sm_table, port->fsm); | 1193 | return bfa_sm_to_state(port_sm_table, port->fsm); |
1154 | } | 1194 | } |
1155 | 1195 | ||
1156 | void | 1196 | static void |
1157 | bna_port_start(struct bna_port *port) | 1197 | bna_port_start(struct bna_port *port) |
1158 | { | 1198 | { |
1159 | port->flags |= BNA_PORT_F_DEVICE_READY; | 1199 | port->flags |= BNA_PORT_F_DEVICE_READY; |
@@ -1161,7 +1201,7 @@ bna_port_start(struct bna_port *port) | |||
1161 | bfa_fsm_send_event(port, PORT_E_START); | 1201 | bfa_fsm_send_event(port, PORT_E_START); |
1162 | } | 1202 | } |
1163 | 1203 | ||
1164 | void | 1204 | static void |
1165 | bna_port_stop(struct bna_port *port) | 1205 | bna_port_stop(struct bna_port *port) |
1166 | { | 1206 | { |
1167 | port->stop_cbfn = bna_device_cb_port_stopped; | 1207 | port->stop_cbfn = bna_device_cb_port_stopped; |
@@ -1171,7 +1211,7 @@ bna_port_stop(struct bna_port *port) | |||
1171 | bfa_fsm_send_event(port, PORT_E_STOP); | 1211 | bfa_fsm_send_event(port, PORT_E_STOP); |
1172 | } | 1212 | } |
1173 | 1213 | ||
1174 | void | 1214 | static void |
1175 | bna_port_fail(struct bna_port *port) | 1215 | bna_port_fail(struct bna_port *port) |
1176 | { | 1216 | { |
1177 | port->flags &= ~BNA_PORT_F_DEVICE_READY; | 1217 | port->flags &= ~BNA_PORT_F_DEVICE_READY; |
@@ -1190,44 +1230,6 @@ bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status) | |||
1190 | bfa_wc_down(&port->chld_stop_wc); | 1230 | bfa_wc_down(&port->chld_stop_wc); |
1191 | } | 1231 | } |
1192 | 1232 | ||
1193 | void | ||
1194 | bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen, | ||
1195 | int status) | ||
1196 | { | ||
1197 | int i; | ||
1198 | u8 prio_map; | ||
1199 | |||
1200 | port->llport.link_status = BNA_LINK_UP; | ||
1201 | if (aen->cee_linkup) | ||
1202 | port->llport.link_status = BNA_CEE_UP; | ||
1203 | |||
1204 | /* Compute the priority */ | ||
1205 | prio_map = aen->prio_map; | ||
1206 | if (prio_map) { | ||
1207 | for (i = 0; i < 8; i++) { | ||
1208 | if ((prio_map >> i) & 0x1) | ||
1209 | break; | ||
1210 | } | ||
1211 | port->priority = i; | ||
1212 | } else | ||
1213 | port->priority = 0; | ||
1214 | |||
1215 | /* Dispatch events */ | ||
1216 | bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup); | ||
1217 | bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority); | ||
1218 | port->link_cbfn(port->bna->bnad, port->llport.link_status); | ||
1219 | } | ||
1220 | |||
1221 | void | ||
1222 | bna_port_cb_link_down(struct bna_port *port, int status) | ||
1223 | { | ||
1224 | port->llport.link_status = BNA_LINK_DOWN; | ||
1225 | |||
1226 | /* Dispatch events */ | ||
1227 | bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN); | ||
1228 | port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN); | ||
1229 | } | ||
1230 | |||
1231 | int | 1233 | int |
1232 | bna_port_mtu_get(struct bna_port *port) | 1234 | bna_port_mtu_get(struct bna_port *port) |
1233 | { | 1235 | { |
@@ -1293,54 +1295,6 @@ bna_port_mac_get(struct bna_port *port, mac_t *mac) | |||
1293 | } | 1295 | } |
1294 | 1296 | ||
1295 | /** | 1297 | /** |
1296 | * Should be called only when port is disabled | ||
1297 | */ | ||
1298 | void | ||
1299 | bna_port_type_set(struct bna_port *port, enum bna_port_type type) | ||
1300 | { | ||
1301 | port->type = type; | ||
1302 | port->llport.type = type; | ||
1303 | } | ||
1304 | |||
1305 | /** | ||
1306 | * Should be called only when port is disabled | ||
1307 | */ | ||
1308 | void | ||
1309 | bna_port_linkcbfn_set(struct bna_port *port, | ||
1310 | void (*linkcbfn)(struct bnad *, enum bna_link_status)) | ||
1311 | { | ||
1312 | port->link_cbfn = linkcbfn; | ||
1313 | } | ||
1314 | |||
1315 | void | ||
1316 | bna_port_admin_up(struct bna_port *port) | ||
1317 | { | ||
1318 | struct bna_llport *llport = &port->llport; | ||
1319 | |||
1320 | if (llport->flags & BNA_LLPORT_F_ENABLED) | ||
1321 | return; | ||
1322 | |||
1323 | llport->flags |= BNA_LLPORT_F_ENABLED; | ||
1324 | |||
1325 | if (llport->flags & BNA_LLPORT_F_RX_ENABLED) | ||
1326 | bfa_fsm_send_event(llport, LLPORT_E_UP); | ||
1327 | } | ||
1328 | |||
1329 | void | ||
1330 | bna_port_admin_down(struct bna_port *port) | ||
1331 | { | ||
1332 | struct bna_llport *llport = &port->llport; | ||
1333 | |||
1334 | if (!(llport->flags & BNA_LLPORT_F_ENABLED)) | ||
1335 | return; | ||
1336 | |||
1337 | llport->flags &= ~BNA_LLPORT_F_ENABLED; | ||
1338 | |||
1339 | if (llport->flags & BNA_LLPORT_F_RX_ENABLED) | ||
1340 | bfa_fsm_send_event(llport, LLPORT_E_DOWN); | ||
1341 | } | ||
1342 | |||
1343 | /** | ||
1344 | * DEVICE | 1298 | * DEVICE |
1345 | */ | 1299 | */ |
1346 | #define enable_mbox_intr(_device)\ | 1300 | #define enable_mbox_intr(_device)\ |
@@ -1357,7 +1311,7 @@ do {\ | |||
1357 | bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\ | 1311 | bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\ |
1358 | } while (0) | 1312 | } while (0) |
1359 | 1313 | ||
1360 | const struct bna_chip_regs_offset reg_offset[] = | 1314 | static const struct bna_chip_regs_offset reg_offset[] = |
1361 | {{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS, | 1315 | {{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS, |
1362 | HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0}, | 1316 | HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0}, |
1363 | {HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS, | 1317 | {HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS, |
@@ -1642,7 +1596,34 @@ static struct bfa_ioc_cbfn bfa_iocll_cbfn = { | |||
1642 | bna_device_cb_iocll_reset | 1596 | bna_device_cb_iocll_reset |
1643 | }; | 1597 | }; |
1644 | 1598 | ||
1645 | void | 1599 | /* device */ |
1600 | static void | ||
1601 | bna_adv_device_init(struct bna_device *device, struct bna *bna, | ||
1602 | struct bna_res_info *res_info) | ||
1603 | { | ||
1604 | u8 *kva; | ||
1605 | u64 dma; | ||
1606 | |||
1607 | device->bna = bna; | ||
1608 | |||
1609 | kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; | ||
1610 | |||
1611 | /** | ||
1612 | * Attach common modules (Diag, SFP, CEE, Port) and claim respective | ||
1613 | * DMA memory. | ||
1614 | */ | ||
1615 | BNA_GET_DMA_ADDR( | ||
1616 | &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); | ||
1617 | kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; | ||
1618 | |||
1619 | bfa_nw_cee_attach(&bna->cee, &device->ioc, bna); | ||
1620 | bfa_nw_cee_mem_claim(&bna->cee, kva, dma); | ||
1621 | kva += bfa_nw_cee_meminfo(); | ||
1622 | dma += bfa_nw_cee_meminfo(); | ||
1623 | |||
1624 | } | ||
1625 | |||
1626 | static void | ||
1646 | bna_device_init(struct bna_device *device, struct bna *bna, | 1627 | bna_device_init(struct bna_device *device, struct bna *bna, |
1647 | struct bna_res_info *res_info) | 1628 | struct bna_res_info *res_info) |
1648 | { | 1629 | { |
@@ -1681,7 +1662,7 @@ bna_device_init(struct bna_device *device, struct bna *bna, | |||
1681 | bfa_fsm_set_state(device, bna_device_sm_stopped); | 1662 | bfa_fsm_set_state(device, bna_device_sm_stopped); |
1682 | } | 1663 | } |
1683 | 1664 | ||
1684 | void | 1665 | static void |
1685 | bna_device_uninit(struct bna_device *device) | 1666 | bna_device_uninit(struct bna_device *device) |
1686 | { | 1667 | { |
1687 | bna_mbox_mod_uninit(&device->bna->mbox_mod); | 1668 | bna_mbox_mod_uninit(&device->bna->mbox_mod); |
@@ -1691,7 +1672,7 @@ bna_device_uninit(struct bna_device *device) | |||
1691 | device->bna = NULL; | 1672 | device->bna = NULL; |
1692 | } | 1673 | } |
1693 | 1674 | ||
1694 | void | 1675 | static void |
1695 | bna_device_cb_port_stopped(void *arg, enum bna_cb_status status) | 1676 | bna_device_cb_port_stopped(void *arg, enum bna_cb_status status) |
1696 | { | 1677 | { |
1697 | struct bna_device *device = (struct bna_device *)arg; | 1678 | struct bna_device *device = (struct bna_device *)arg; |
@@ -1699,7 +1680,7 @@ bna_device_cb_port_stopped(void *arg, enum bna_cb_status status) | |||
1699 | bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED); | 1680 | bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED); |
1700 | } | 1681 | } |
1701 | 1682 | ||
1702 | int | 1683 | static int |
1703 | bna_device_status_get(struct bna_device *device) | 1684 | bna_device_status_get(struct bna_device *device) |
1704 | { | 1685 | { |
1705 | return device->fsm == (bfa_fsm_t)bna_device_sm_ready; | 1686 | return device->fsm == (bfa_fsm_t)bna_device_sm_ready; |
@@ -1733,24 +1714,13 @@ bna_device_disable(struct bna_device *device, enum bna_cleanup_type type) | |||
1733 | bfa_fsm_send_event(device, DEVICE_E_DISABLE); | 1714 | bfa_fsm_send_event(device, DEVICE_E_DISABLE); |
1734 | } | 1715 | } |
1735 | 1716 | ||
1736 | int | 1717 | static int |
1737 | bna_device_state_get(struct bna_device *device) | 1718 | bna_device_state_get(struct bna_device *device) |
1738 | { | 1719 | { |
1739 | return bfa_sm_to_state(device_sm_table, device->fsm); | 1720 | return bfa_sm_to_state(device_sm_table, device->fsm); |
1740 | } | 1721 | } |
1741 | 1722 | ||
1742 | u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { | 1723 | const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { |
1743 | {12, 20}, | ||
1744 | {10, 18}, | ||
1745 | {8, 16}, | ||
1746 | {6, 12}, | ||
1747 | {4, 8}, | ||
1748 | {3, 6}, | ||
1749 | {2, 4}, | ||
1750 | {1, 2}, | ||
1751 | }; | ||
1752 | |||
1753 | u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { | ||
1754 | {12, 12}, | 1724 | {12, 12}, |
1755 | {6, 10}, | 1725 | {6, 10}, |
1756 | {5, 10}, | 1726 | {5, 10}, |
@@ -1761,36 +1731,9 @@ u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { | |||
1761 | {1, 2}, | 1731 | {1, 2}, |
1762 | }; | 1732 | }; |
1763 | 1733 | ||
1764 | /* device */ | ||
1765 | void | ||
1766 | bna_adv_device_init(struct bna_device *device, struct bna *bna, | ||
1767 | struct bna_res_info *res_info) | ||
1768 | { | ||
1769 | u8 *kva; | ||
1770 | u64 dma; | ||
1771 | |||
1772 | device->bna = bna; | ||
1773 | |||
1774 | kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; | ||
1775 | |||
1776 | /** | ||
1777 | * Attach common modules (Diag, SFP, CEE, Port) and claim respective | ||
1778 | * DMA memory. | ||
1779 | */ | ||
1780 | BNA_GET_DMA_ADDR( | ||
1781 | &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); | ||
1782 | kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; | ||
1783 | |||
1784 | bfa_nw_cee_attach(&bna->cee, &device->ioc, bna); | ||
1785 | bfa_nw_cee_mem_claim(&bna->cee, kva, dma); | ||
1786 | kva += bfa_nw_cee_meminfo(); | ||
1787 | dma += bfa_nw_cee_meminfo(); | ||
1788 | |||
1789 | } | ||
1790 | |||
1791 | /* utils */ | 1734 | /* utils */ |
1792 | 1735 | ||
1793 | void | 1736 | static void |
1794 | bna_adv_res_req(struct bna_res_info *res_info) | 1737 | bna_adv_res_req(struct bna_res_info *res_info) |
1795 | { | 1738 | { |
1796 | /* DMA memory for COMMON_MODULE */ | 1739 | /* DMA memory for COMMON_MODULE */ |
@@ -2044,36 +1987,6 @@ bna_fw_stats_get(struct bna *bna) | |||
2044 | bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1]; | 1987 | bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1]; |
2045 | } | 1988 | } |
2046 | 1989 | ||
2047 | static void | ||
2048 | bna_fw_cb_stats_clr(void *arg, int status) | ||
2049 | { | ||
2050 | struct bna *bna = (struct bna *)arg; | ||
2051 | |||
2052 | bfa_q_qe_init(&bna->mbox_qe.qe); | ||
2053 | |||
2054 | memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats)); | ||
2055 | memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats)); | ||
2056 | |||
2057 | bnad_cb_stats_clr(bna->bnad); | ||
2058 | } | ||
2059 | |||
2060 | static void | ||
2061 | bna_fw_stats_clr(struct bna *bna) | ||
2062 | { | ||
2063 | struct bfi_ll_stats_req ll_req; | ||
2064 | |||
2065 | bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0); | ||
2066 | ll_req.stats_mask = htons(BFI_LL_STATS_ALL); | ||
2067 | ll_req.rxf_id_mask[0] = htonl(0xffffffff); | ||
2068 | ll_req.rxf_id_mask[1] = htonl(0xffffffff); | ||
2069 | ll_req.txf_id_mask[0] = htonl(0xffffffff); | ||
2070 | ll_req.txf_id_mask[1] = htonl(0xffffffff); | ||
2071 | |||
2072 | bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req), | ||
2073 | bna_fw_cb_stats_clr, bna); | ||
2074 | bna_mbox_send(bna, &bna->mbox_qe); | ||
2075 | } | ||
2076 | |||
2077 | void | 1990 | void |
2078 | bna_stats_get(struct bna *bna) | 1991 | bna_stats_get(struct bna *bna) |
2079 | { | 1992 | { |
@@ -2083,22 +1996,8 @@ bna_stats_get(struct bna *bna) | |||
2083 | bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); | 1996 | bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); |
2084 | } | 1997 | } |
2085 | 1998 | ||
2086 | void | ||
2087 | bna_stats_clr(struct bna *bna) | ||
2088 | { | ||
2089 | if (bna_device_status_get(&bna->device)) | ||
2090 | bna_fw_stats_clr(bna); | ||
2091 | else { | ||
2092 | memset(&bna->stats.sw_stats, 0, | ||
2093 | sizeof(struct bna_sw_stats)); | ||
2094 | memset(bna->stats.hw_stats, 0, | ||
2095 | sizeof(struct bfi_ll_stats)); | ||
2096 | bnad_cb_stats_clr(bna->bnad); | ||
2097 | } | ||
2098 | } | ||
2099 | |||
2100 | /* IB */ | 1999 | /* IB */ |
2101 | void | 2000 | static void |
2102 | bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) | 2001 | bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) |
2103 | { | 2002 | { |
2104 | ib->ib_config.coalescing_timeo = coalescing_timeo; | 2003 | ib->ib_config.coalescing_timeo = coalescing_timeo; |
@@ -2157,7 +2056,7 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status) | |||
2157 | bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); | 2056 | bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); |
2158 | } | 2057 | } |
2159 | 2058 | ||
2160 | void | 2059 | static void |
2161 | __rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status) | 2060 | __rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status) |
2162 | { | 2061 | { |
2163 | struct bna_rx_fndb_ram *rx_fndb_ram; | 2062 | struct bna_rx_fndb_ram *rx_fndb_ram; |
@@ -2553,7 +2452,7 @@ rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf) | |||
2553 | * 0 = no h/w change | 2452 | * 0 = no h/w change |
2554 | * 1 = need h/w change | 2453 | * 1 = need h/w change |
2555 | */ | 2454 | */ |
2556 | int | 2455 | static int |
2557 | rxf_promisc_enable(struct bna_rxf *rxf) | 2456 | rxf_promisc_enable(struct bna_rxf *rxf) |
2558 | { | 2457 | { |
2559 | struct bna *bna = rxf->rx->bna; | 2458 | struct bna *bna = rxf->rx->bna; |
@@ -2584,7 +2483,7 @@ rxf_promisc_enable(struct bna_rxf *rxf) | |||
2584 | * 0 = no h/w change | 2483 | * 0 = no h/w change |
2585 | * 1 = need h/w change | 2484 | * 1 = need h/w change |
2586 | */ | 2485 | */ |
2587 | int | 2486 | static int |
2588 | rxf_promisc_disable(struct bna_rxf *rxf) | 2487 | rxf_promisc_disable(struct bna_rxf *rxf) |
2589 | { | 2488 | { |
2590 | struct bna *bna = rxf->rx->bna; | 2489 | struct bna *bna = rxf->rx->bna; |
@@ -2623,7 +2522,7 @@ rxf_promisc_disable(struct bna_rxf *rxf) | |||
2623 | * 0 = no h/w change | 2522 | * 0 = no h/w change |
2624 | * 1 = need h/w change | 2523 | * 1 = need h/w change |
2625 | */ | 2524 | */ |
2626 | int | 2525 | static int |
2627 | rxf_default_enable(struct bna_rxf *rxf) | 2526 | rxf_default_enable(struct bna_rxf *rxf) |
2628 | { | 2527 | { |
2629 | struct bna *bna = rxf->rx->bna; | 2528 | struct bna *bna = rxf->rx->bna; |
@@ -2654,7 +2553,7 @@ rxf_default_enable(struct bna_rxf *rxf) | |||
2654 | * 0 = no h/w change | 2553 | * 0 = no h/w change |
2655 | * 1 = need h/w change | 2554 | * 1 = need h/w change |
2656 | */ | 2555 | */ |
2657 | int | 2556 | static int |
2658 | rxf_default_disable(struct bna_rxf *rxf) | 2557 | rxf_default_disable(struct bna_rxf *rxf) |
2659 | { | 2558 | { |
2660 | struct bna *bna = rxf->rx->bna; | 2559 | struct bna *bna = rxf->rx->bna; |
@@ -2693,7 +2592,7 @@ rxf_default_disable(struct bna_rxf *rxf) | |||
2693 | * 0 = no h/w change | 2592 | * 0 = no h/w change |
2694 | * 1 = need h/w change | 2593 | * 1 = need h/w change |
2695 | */ | 2594 | */ |
2696 | int | 2595 | static int |
2697 | rxf_allmulti_enable(struct bna_rxf *rxf) | 2596 | rxf_allmulti_enable(struct bna_rxf *rxf) |
2698 | { | 2597 | { |
2699 | int ret = 0; | 2598 | int ret = 0; |
@@ -2721,7 +2620,7 @@ rxf_allmulti_enable(struct bna_rxf *rxf) | |||
2721 | * 0 = no h/w change | 2620 | * 0 = no h/w change |
2722 | * 1 = need h/w change | 2621 | * 1 = need h/w change |
2723 | */ | 2622 | */ |
2724 | int | 2623 | static int |
2725 | rxf_allmulti_disable(struct bna_rxf *rxf) | 2624 | rxf_allmulti_disable(struct bna_rxf *rxf) |
2726 | { | 2625 | { |
2727 | int ret = 0; | 2626 | int ret = 0; |
@@ -2746,159 +2645,6 @@ rxf_allmulti_disable(struct bna_rxf *rxf) | |||
2746 | } | 2645 | } |
2747 | 2646 | ||
2748 | /* RxF <- bnad */ | 2647 | /* RxF <- bnad */ |
2749 | void | ||
2750 | bna_rx_mcast_delall(struct bna_rx *rx, | ||
2751 | void (*cbfn)(struct bnad *, struct bna_rx *, | ||
2752 | enum bna_cb_status)) | ||
2753 | { | ||
2754 | struct bna_rxf *rxf = &rx->rxf; | ||
2755 | struct list_head *qe; | ||
2756 | struct bna_mac *mac; | ||
2757 | int need_hw_config = 0; | ||
2758 | |||
2759 | /* Purge all entries from pending_add_q */ | ||
2760 | while (!list_empty(&rxf->mcast_pending_add_q)) { | ||
2761 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); | ||
2762 | mac = (struct bna_mac *)qe; | ||
2763 | bfa_q_qe_init(&mac->qe); | ||
2764 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | ||
2765 | } | ||
2766 | |||
2767 | /* Schedule all entries in active_q for deletion */ | ||
2768 | while (!list_empty(&rxf->mcast_active_q)) { | ||
2769 | bfa_q_deq(&rxf->mcast_active_q, &qe); | ||
2770 | mac = (struct bna_mac *)qe; | ||
2771 | bfa_q_qe_init(&mac->qe); | ||
2772 | list_add_tail(&mac->qe, &rxf->mcast_pending_del_q); | ||
2773 | need_hw_config = 1; | ||
2774 | } | ||
2775 | |||
2776 | if (need_hw_config) { | ||
2777 | rxf->cam_fltr_cbfn = cbfn; | ||
2778 | rxf->cam_fltr_cbarg = rx->bna->bnad; | ||
2779 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | ||
2780 | return; | ||
2781 | } | ||
2782 | |||
2783 | if (cbfn) | ||
2784 | (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); | ||
2785 | } | ||
2786 | |||
2787 | /* RxF <- Rx */ | ||
2788 | void | ||
2789 | bna_rx_receive_resume(struct bna_rx *rx, | ||
2790 | void (*cbfn)(struct bnad *, struct bna_rx *, | ||
2791 | enum bna_cb_status)) | ||
2792 | { | ||
2793 | struct bna_rxf *rxf = &rx->rxf; | ||
2794 | |||
2795 | if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) { | ||
2796 | rxf->oper_state_cbfn = cbfn; | ||
2797 | rxf->oper_state_cbarg = rx->bna->bnad; | ||
2798 | bfa_fsm_send_event(rxf, RXF_E_RESUME); | ||
2799 | } else if (cbfn) | ||
2800 | (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); | ||
2801 | } | ||
2802 | |||
2803 | void | ||
2804 | bna_rx_receive_pause(struct bna_rx *rx, | ||
2805 | void (*cbfn)(struct bnad *, struct bna_rx *, | ||
2806 | enum bna_cb_status)) | ||
2807 | { | ||
2808 | struct bna_rxf *rxf = &rx->rxf; | ||
2809 | |||
2810 | if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) { | ||
2811 | rxf->oper_state_cbfn = cbfn; | ||
2812 | rxf->oper_state_cbarg = rx->bna->bnad; | ||
2813 | bfa_fsm_send_event(rxf, RXF_E_PAUSE); | ||
2814 | } else if (cbfn) | ||
2815 | (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); | ||
2816 | } | ||
2817 | |||
2818 | /* RxF <- bnad */ | ||
2819 | enum bna_cb_status | ||
2820 | bna_rx_ucast_add(struct bna_rx *rx, u8 *addr, | ||
2821 | void (*cbfn)(struct bnad *, struct bna_rx *, | ||
2822 | enum bna_cb_status)) | ||
2823 | { | ||
2824 | struct bna_rxf *rxf = &rx->rxf; | ||
2825 | struct list_head *qe; | ||
2826 | struct bna_mac *mac; | ||
2827 | |||
2828 | /* Check if already added */ | ||
2829 | list_for_each(qe, &rxf->ucast_active_q) { | ||
2830 | mac = (struct bna_mac *)qe; | ||
2831 | if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { | ||
2832 | if (cbfn) | ||
2833 | (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); | ||
2834 | return BNA_CB_SUCCESS; | ||
2835 | } | ||
2836 | } | ||
2837 | |||
2838 | /* Check if pending addition */ | ||
2839 | list_for_each(qe, &rxf->ucast_pending_add_q) { | ||
2840 | mac = (struct bna_mac *)qe; | ||
2841 | if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { | ||
2842 | if (cbfn) | ||
2843 | (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); | ||
2844 | return BNA_CB_SUCCESS; | ||
2845 | } | ||
2846 | } | ||
2847 | |||
2848 | mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod); | ||
2849 | if (mac == NULL) | ||
2850 | return BNA_CB_UCAST_CAM_FULL; | ||
2851 | bfa_q_qe_init(&mac->qe); | ||
2852 | memcpy(mac->addr, addr, ETH_ALEN); | ||
2853 | list_add_tail(&mac->qe, &rxf->ucast_pending_add_q); | ||
2854 | |||
2855 | rxf->cam_fltr_cbfn = cbfn; | ||
2856 | rxf->cam_fltr_cbarg = rx->bna->bnad; | ||
2857 | |||
2858 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | ||
2859 | |||
2860 | return BNA_CB_SUCCESS; | ||
2861 | } | ||
2862 | |||
2863 | /* RxF <- bnad */ | ||
2864 | enum bna_cb_status | ||
2865 | bna_rx_ucast_del(struct bna_rx *rx, u8 *addr, | ||
2866 | void (*cbfn)(struct bnad *, struct bna_rx *, | ||
2867 | enum bna_cb_status)) | ||
2868 | { | ||
2869 | struct bna_rxf *rxf = &rx->rxf; | ||
2870 | struct list_head *qe; | ||
2871 | struct bna_mac *mac; | ||
2872 | |||
2873 | list_for_each(qe, &rxf->ucast_pending_add_q) { | ||
2874 | mac = (struct bna_mac *)qe; | ||
2875 | if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { | ||
2876 | list_del(qe); | ||
2877 | bfa_q_qe_init(qe); | ||
2878 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); | ||
2879 | if (cbfn) | ||
2880 | (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); | ||
2881 | return BNA_CB_SUCCESS; | ||
2882 | } | ||
2883 | } | ||
2884 | |||
2885 | list_for_each(qe, &rxf->ucast_active_q) { | ||
2886 | mac = (struct bna_mac *)qe; | ||
2887 | if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { | ||
2888 | list_del(qe); | ||
2889 | bfa_q_qe_init(qe); | ||
2890 | list_add_tail(qe, &rxf->ucast_pending_del_q); | ||
2891 | rxf->cam_fltr_cbfn = cbfn; | ||
2892 | rxf->cam_fltr_cbarg = rx->bna->bnad; | ||
2893 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | ||
2894 | return BNA_CB_SUCCESS; | ||
2895 | } | ||
2896 | } | ||
2897 | |||
2898 | return BNA_CB_INVALID_MAC; | ||
2899 | } | ||
2900 | |||
2901 | /* RxF <- bnad */ | ||
2902 | enum bna_cb_status | 2648 | enum bna_cb_status |
2903 | bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, | 2649 | bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, |
2904 | enum bna_rxmode bitmask, | 2650 | enum bna_rxmode bitmask, |
@@ -2978,39 +2724,6 @@ err_return: | |||
2978 | return BNA_CB_FAIL; | 2724 | return BNA_CB_FAIL; |
2979 | } | 2725 | } |
2980 | 2726 | ||
2981 | /* RxF <- bnad */ | ||
2982 | void | ||
2983 | bna_rx_rss_enable(struct bna_rx *rx) | ||
2984 | { | ||
2985 | struct bna_rxf *rxf = &rx->rxf; | ||
2986 | |||
2987 | rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING; | ||
2988 | rxf->rss_status = BNA_STATUS_T_ENABLED; | ||
2989 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | ||
2990 | } | ||
2991 | |||
2992 | /* RxF <- bnad */ | ||
2993 | void | ||
2994 | bna_rx_rss_disable(struct bna_rx *rx) | ||
2995 | { | ||
2996 | struct bna_rxf *rxf = &rx->rxf; | ||
2997 | |||
2998 | rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING; | ||
2999 | rxf->rss_status = BNA_STATUS_T_DISABLED; | ||
3000 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | ||
3001 | } | ||
3002 | |||
3003 | /* RxF <- bnad */ | ||
3004 | void | ||
3005 | bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config) | ||
3006 | { | ||
3007 | struct bna_rxf *rxf = &rx->rxf; | ||
3008 | rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING; | ||
3009 | rxf->rss_status = BNA_STATUS_T_ENABLED; | ||
3010 | rxf->rss_cfg = *rss_config; | ||
3011 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | ||
3012 | } | ||
3013 | |||
3014 | void | 2727 | void |
3015 | /* RxF <- bnad */ | 2728 | /* RxF <- bnad */ |
3016 | bna_rx_vlanfilter_enable(struct bna_rx *rx) | 2729 | bna_rx_vlanfilter_enable(struct bna_rx *rx) |
@@ -3024,68 +2737,8 @@ bna_rx_vlanfilter_enable(struct bna_rx *rx) | |||
3024 | } | 2737 | } |
3025 | } | 2738 | } |
3026 | 2739 | ||
3027 | /* RxF <- bnad */ | ||
3028 | void | ||
3029 | bna_rx_vlanfilter_disable(struct bna_rx *rx) | ||
3030 | { | ||
3031 | struct bna_rxf *rxf = &rx->rxf; | ||
3032 | |||
3033 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { | ||
3034 | rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING; | ||
3035 | rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; | ||
3036 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | ||
3037 | } | ||
3038 | } | ||
3039 | |||
3040 | /* Rx */ | 2740 | /* Rx */ |
3041 | 2741 | ||
3042 | struct bna_rxp * | ||
3043 | bna_rx_get_rxp(struct bna_rx *rx, int vector) | ||
3044 | { | ||
3045 | struct bna_rxp *rxp; | ||
3046 | struct list_head *qe; | ||
3047 | |||
3048 | list_for_each(qe, &rx->rxp_q) { | ||
3049 | rxp = (struct bna_rxp *)qe; | ||
3050 | if (rxp->vector == vector) | ||
3051 | return rxp; | ||
3052 | } | ||
3053 | return NULL; | ||
3054 | } | ||
3055 | |||
3056 | /* | ||
3057 | * bna_rx_rss_rit_set() | ||
3058 | * Sets the Q ids for the specified msi-x vectors in the RIT. | ||
3059 | * Maximum rit size supported is 64, which should be the max size of the | ||
3060 | * vectors array. | ||
3061 | */ | ||
3062 | |||
3063 | void | ||
3064 | bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors) | ||
3065 | { | ||
3066 | int i; | ||
3067 | struct bna_rxp *rxp; | ||
3068 | struct bna_rxq *q0 = NULL, *q1 = NULL; | ||
3069 | struct bna *bna; | ||
3070 | struct bna_rxf *rxf; | ||
3071 | |||
3072 | /* Build the RIT contents for this RX */ | ||
3073 | bna = rx->bna; | ||
3074 | |||
3075 | rxf = &rx->rxf; | ||
3076 | for (i = 0; i < nvectors; i++) { | ||
3077 | rxp = bna_rx_get_rxp(rx, vectors[i]); | ||
3078 | |||
3079 | GET_RXQS(rxp, q0, q1); | ||
3080 | rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id; | ||
3081 | rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0); | ||
3082 | } | ||
3083 | |||
3084 | rxf->rit_segment->rit_size = nvectors; | ||
3085 | |||
3086 | /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */ | ||
3087 | } | ||
3088 | |||
3089 | /* Rx <- bnad */ | 2742 | /* Rx <- bnad */ |
3090 | void | 2743 | void |
3091 | bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) | 2744 | bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) |
@@ -3102,7 +2755,7 @@ bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) | |||
3102 | 2755 | ||
3103 | /* Rx <- bnad */ | 2756 | /* Rx <- bnad */ |
3104 | void | 2757 | void |
3105 | bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]) | 2758 | bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) |
3106 | { | 2759 | { |
3107 | int i, j; | 2760 | int i, j; |
3108 | 2761 | ||
@@ -3165,22 +2818,6 @@ bna_rx_dim_update(struct bna_ccb *ccb) | |||
3165 | 2818 | ||
3166 | /* Tx */ | 2819 | /* Tx */ |
3167 | /* TX <- bnad */ | 2820 | /* TX <- bnad */ |
3168 | enum bna_cb_status | ||
3169 | bna_tx_prio_set(struct bna_tx *tx, int prio, | ||
3170 | void (*cbfn)(struct bnad *, struct bna_tx *, | ||
3171 | enum bna_cb_status)) | ||
3172 | { | ||
3173 | if (tx->flags & BNA_TX_F_PRIO_LOCK) | ||
3174 | return BNA_CB_FAIL; | ||
3175 | else { | ||
3176 | tx->prio_change_cbfn = cbfn; | ||
3177 | bna_tx_prio_changed(tx, prio); | ||
3178 | } | ||
3179 | |||
3180 | return BNA_CB_SUCCESS; | ||
3181 | } | ||
3182 | |||
3183 | /* TX <- bnad */ | ||
3184 | void | 2821 | void |
3185 | bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) | 2822 | bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) |
3186 | { | 2823 | { |