aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge/qlge_ethtool.c
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2009-08-27 07:02:09 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-29 02:22:28 -0400
commitb2014ff8ac314f58d6542ec4ea7b576a2de21c8b (patch)
tree86710e2716e5b36120ea27b94d2e79d4326f7477 /drivers/net/qlge/qlge_ethtool.c
parentb7f1d43a2ba1b63abbb1dcd966ab1edb9f62f636 (diff)
qlge: Get rid of 'default' rx_ring type.
Currently we have three types of RX rings. 1) Default ring - services rx_ring for broadcast/multicast, handles firmware events, and errors. 2) TX completion ring - handles only outbound completions. 3) RSS ring - handles only inbound completions. This patch gets rid of the default ring type and moves it's functionality into the first RSS ring. This makes better use of MSIX vectors since they are a limited resource on some platforms. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/qlge/qlge_ethtool.c')
-rw-r--r--drivers/net/qlge/qlge_ethtool.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index eb6a9ee640ed..68f9bd280f86 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -49,10 +49,11 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
49 /* Skip the default queue, and update the outbound handler 49 /* Skip the default queue, and update the outbound handler
50 * queues if they changed. 50 * queues if they changed.
51 */ 51 */
52 cqicb = (struct cqicb *)&qdev->rx_ring[1]; 52 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
53 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || 53 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
54 le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) { 54 le16_to_cpu(cqicb->pkt_delay) !=
55 for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) { 55 qdev->tx_max_coalesced_frames) {
56 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
56 rx_ring = &qdev->rx_ring[i]; 57 rx_ring = &qdev->rx_ring[i];
57 cqicb = (struct cqicb *)rx_ring; 58 cqicb = (struct cqicb *)rx_ring;
58 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); 59 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
@@ -70,12 +71,11 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
70 } 71 }
71 72
72 /* Update the inbound (RSS) handler queues if they changed. */ 73 /* Update the inbound (RSS) handler queues if they changed. */
73 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id]; 74 cqicb = (struct cqicb *)&qdev->rx_ring[0];
74 if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || 75 if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
75 le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) { 76 le16_to_cpu(cqicb->pkt_delay) !=
76 for (i = qdev->rss_ring_first_cq_id; 77 qdev->rx_max_coalesced_frames) {
77 i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count; 78 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
78 i++) {
79 rx_ring = &qdev->rx_ring[i]; 79 rx_ring = &qdev->rx_ring[i];
80 cqicb = (struct cqicb *)rx_ring; 80 cqicb = (struct cqicb *)rx_ring;
81 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); 81 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);