diff options
author | Sathya Perla <sathya.perla@emulex.com> | 2012-02-09 13:05:27 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-02-13 00:47:40 -0500 |
commit | 10ef9ab4329edd08bccc7a8d34b96b85714195ce (patch) | |
tree | 0651c5e33e1ca021241d6a42318b6dd402659c54 /drivers/net/ethernet/emulex/benet/be_ethtool.c | |
parent | 23677ce3172fcb93522a1df077d21019e73ee1e3 (diff) |
be2net: event queue re-design
v2: Fixed up the bad typecasting pointed out by David...
In the current design 8 TXQs are serviced by 1 EQ, while each RSS queue
is serviced by a separate EQ. This is being changed as follows:
- Upto 8 EQs will be used (based on the availabilty of msix vectors).
Each EQ will handle 1 RSS and 1 TX ring. The default non-RSS RX queue and
MCC queue are handled by the last EQ.
- On cards which provide support, upto 8 RSS rings will be used, instead
of the current limit of 4.
The new design allows spreading the TX multi-queue completion processing
across multiple CPUs unlike the previous design.
Signed-off-by: Sathya Perla <sathya.perla@emulex.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/emulex/benet/be_ethtool.c')
-rw-r--r-- | drivers/net/ethernet/emulex/benet/be_ethtool.c | 96 |
1 files changed, 24 insertions, 72 deletions
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index dc1383c396c0..30ce17806916 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c | |||
@@ -37,7 +37,6 @@ enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT}; | |||
37 | FIELDINFO(struct be_drv_stats, field) | 37 | FIELDINFO(struct be_drv_stats, field) |
38 | 38 | ||
39 | static const struct be_ethtool_stat et_stats[] = { | 39 | static const struct be_ethtool_stat et_stats[] = { |
40 | {DRVSTAT_INFO(tx_events)}, | ||
41 | {DRVSTAT_INFO(rx_crc_errors)}, | 40 | {DRVSTAT_INFO(rx_crc_errors)}, |
42 | {DRVSTAT_INFO(rx_alignment_symbol_errors)}, | 41 | {DRVSTAT_INFO(rx_alignment_symbol_errors)}, |
43 | {DRVSTAT_INFO(rx_pause_frames)}, | 42 | {DRVSTAT_INFO(rx_pause_frames)}, |
@@ -126,8 +125,6 @@ static const struct be_ethtool_stat et_stats[] = { | |||
126 | static const struct be_ethtool_stat et_rx_stats[] = { | 125 | static const struct be_ethtool_stat et_rx_stats[] = { |
127 | {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ | 126 | {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ |
128 | {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ | 127 | {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ |
129 | {DRVSTAT_RX_INFO(rx_polls)}, | ||
130 | {DRVSTAT_RX_INFO(rx_events)}, | ||
131 | {DRVSTAT_RX_INFO(rx_compl)}, | 128 | {DRVSTAT_RX_INFO(rx_compl)}, |
132 | {DRVSTAT_RX_INFO(rx_mcast_pkts)}, | 129 | {DRVSTAT_RX_INFO(rx_mcast_pkts)}, |
133 | /* Number of page allocation failures while posting receive buffers | 130 | /* Number of page allocation failures while posting receive buffers |
@@ -154,7 +151,6 @@ static const struct be_ethtool_stat et_tx_stats[] = { | |||
154 | {DRVSTAT_TX_INFO(tx_reqs)}, | 151 | {DRVSTAT_TX_INFO(tx_reqs)}, |
155 | /* Number of TX work request blocks DMAed to HW */ | 152 | /* Number of TX work request blocks DMAed to HW */ |
156 | {DRVSTAT_TX_INFO(tx_wrbs)}, | 153 | {DRVSTAT_TX_INFO(tx_wrbs)}, |
157 | {DRVSTAT_TX_INFO(tx_compl)}, | ||
158 | /* Number of times the TX queue was stopped due to lack | 154 | /* Number of times the TX queue was stopped due to lack |
159 | * of spaces in the TXQ. | 155 | * of spaces in the TXQ. |
160 | */ | 156 | */ |
@@ -290,86 +286,42 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) | |||
290 | } | 286 | } |
291 | } | 287 | } |
292 | 288 | ||
293 | static int | 289 | static int be_get_coalesce(struct net_device *netdev, |
294 | be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) | 290 | struct ethtool_coalesce *et) |
295 | { | 291 | { |
296 | struct be_adapter *adapter = netdev_priv(netdev); | 292 | struct be_adapter *adapter = netdev_priv(netdev); |
297 | struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq; | 293 | struct be_eq_obj *eqo = &adapter->eq_obj[0]; |
298 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | 294 | |
299 | 295 | ||
300 | coalesce->rx_coalesce_usecs = rx_eq->cur_eqd; | 296 | et->rx_coalesce_usecs = eqo->cur_eqd; |
301 | coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd; | 297 | et->rx_coalesce_usecs_high = eqo->max_eqd; |
302 | coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd; | 298 | et->rx_coalesce_usecs_low = eqo->min_eqd; |
303 | 299 | ||
304 | coalesce->tx_coalesce_usecs = tx_eq->cur_eqd; | 300 | et->tx_coalesce_usecs = eqo->cur_eqd; |
305 | coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd; | 301 | et->tx_coalesce_usecs_high = eqo->max_eqd; |
306 | coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd; | 302 | et->tx_coalesce_usecs_low = eqo->min_eqd; |
307 | 303 | ||
308 | coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic; | 304 | et->use_adaptive_rx_coalesce = eqo->enable_aic; |
309 | coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic; | 305 | et->use_adaptive_tx_coalesce = eqo->enable_aic; |
310 | 306 | ||
311 | return 0; | 307 | return 0; |
312 | } | 308 | } |
313 | 309 | ||
314 | /* | 310 | /* TX attributes are ignored. Only RX attributes are considered |
315 | * This routine is used to set interrup coalescing delay | 311 | * eqd cmd is issued in the worker thread. |
316 | */ | 312 | */ |
317 | static int | 313 | static int be_set_coalesce(struct net_device *netdev, |
318 | be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) | 314 | struct ethtool_coalesce *et) |
319 | { | 315 | { |
320 | struct be_adapter *adapter = netdev_priv(netdev); | 316 | struct be_adapter *adapter = netdev_priv(netdev); |
321 | struct be_rx_obj *rxo; | 317 | struct be_eq_obj *eqo; |
322 | struct be_eq_obj *rx_eq; | 318 | int i; |
323 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | 319 | |
324 | u32 rx_max, rx_min, rx_cur; | 320 | for_all_evt_queues(adapter, eqo, i) { |
325 | int status = 0, i; | 321 | eqo->enable_aic = et->use_adaptive_rx_coalesce; |
326 | u32 tx_cur; | 322 | eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); |
327 | 323 | eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd); | |
328 | if (coalesce->use_adaptive_tx_coalesce == 1) | 324 | eqo->eqd = et->rx_coalesce_usecs; |
329 | return -EINVAL; | ||
330 | |||
331 | for_all_rx_queues(adapter, rxo, i) { | ||
332 | rx_eq = &rxo->rx_eq; | ||
333 | |||
334 | if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce) | ||
335 | rx_eq->cur_eqd = 0; | ||
336 | rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce; | ||
337 | |||
338 | rx_max = coalesce->rx_coalesce_usecs_high; | ||
339 | rx_min = coalesce->rx_coalesce_usecs_low; | ||
340 | rx_cur = coalesce->rx_coalesce_usecs; | ||
341 | |||
342 | if (rx_eq->enable_aic) { | ||
343 | if (rx_max > BE_MAX_EQD) | ||
344 | rx_max = BE_MAX_EQD; | ||
345 | if (rx_min > rx_max) | ||
346 | rx_min = rx_max; | ||
347 | rx_eq->max_eqd = rx_max; | ||
348 | rx_eq->min_eqd = rx_min; | ||
349 | if (rx_eq->cur_eqd > rx_max) | ||
350 | rx_eq->cur_eqd = rx_max; | ||
351 | if (rx_eq->cur_eqd < rx_min) | ||
352 | rx_eq->cur_eqd = rx_min; | ||
353 | } else { | ||
354 | if (rx_cur > BE_MAX_EQD) | ||
355 | rx_cur = BE_MAX_EQD; | ||
356 | if (rx_eq->cur_eqd != rx_cur) { | ||
357 | status = be_cmd_modify_eqd(adapter, rx_eq->q.id, | ||
358 | rx_cur); | ||
359 | if (!status) | ||
360 | rx_eq->cur_eqd = rx_cur; | ||
361 | } | ||
362 | } | ||
363 | } | ||
364 | |||
365 | tx_cur = coalesce->tx_coalesce_usecs; | ||
366 | |||
367 | if (tx_cur > BE_MAX_EQD) | ||
368 | tx_cur = BE_MAX_EQD; | ||
369 | if (tx_eq->cur_eqd != tx_cur) { | ||
370 | status = be_cmd_modify_eqd(adapter, tx_eq->q.id, tx_cur); | ||
371 | if (!status) | ||
372 | tx_eq->cur_eqd = tx_cur; | ||
373 | } | 325 | } |
374 | 326 | ||
375 | return 0; | 327 | return 0; |