aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2009-08-27 07:02:09 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-29 02:22:28 -0400
commitb2014ff8ac314f58d6542ec4ea7b576a2de21c8b (patch)
tree86710e2716e5b36120ea27b94d2e79d4326f7477
parentb7f1d43a2ba1b63abbb1dcd966ab1edb9f62f636 (diff)
qlge: Get rid of 'default' rx_ring type.
Currently we have three types of RX rings. 1) Default ring - services rx_ring for broadcast/multicast, handles firmware events, and errors. 2) TX completion ring - handles only outbound completions. 3) RSS ring - handles only inbound completions. This patch gets rid of the default ring type and moves it's functionality into the first RSS ring. This makes better use of MSIX vectors since they are a limited resource on some platforms. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/qlge/qlge.h4
-rw-r--r--drivers/net/qlge/qlge_dbg.c2
-rw-r--r--drivers/net/qlge/qlge_ethtool.c16
-rw-r--r--drivers/net/qlge/qlge_main.c70
4 files changed, 28 insertions, 64 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 6ed5317ab1c0..ed5dbca01bd1 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1287,7 +1287,7 @@ struct rx_ring {
1287 u32 sbq_free_cnt; /* free buffer desc cnt */ 1287 u32 sbq_free_cnt; /* free buffer desc cnt */
1288 1288
1289 /* Misc. handler elements. */ 1289 /* Misc. handler elements. */
1290 u32 type; /* Type of queue, tx, rx, or default. */ 1290 u32 type; /* Type of queue, tx, rx. */
1291 u32 irq; /* Which vector this ring is assigned. */ 1291 u32 irq; /* Which vector this ring is assigned. */
1292 u32 cpu; /* Which CPU this should run on. */ 1292 u32 cpu; /* Which CPU this should run on. */
1293 char name[IFNAMSIZ + 5]; 1293 char name[IFNAMSIZ + 5];
@@ -1486,11 +1486,9 @@ struct ql_adapter {
1486 struct intr_context intr_context[MAX_RX_RINGS]; 1486 struct intr_context intr_context[MAX_RX_RINGS];
1487 1487
1488 int tx_ring_count; /* One per online CPU. */ 1488 int tx_ring_count; /* One per online CPU. */
1489 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
1490 u32 rss_ring_count; /* One per online CPU. */ 1489 u32 rss_ring_count; /* One per online CPU. */
1491 /* 1490 /*
1492 * rx_ring_count = 1491 * rx_ring_count =
1493 * one default queue +
1494 * (CPU count * outbound completion rx_ring) + 1492 * (CPU count * outbound completion rx_ring) +
1495 * (CPU count * inbound (RSS) completion rx_ring) 1493 * (CPU count * inbound (RSS) completion rx_ring)
1496 */ 1494 */
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 40a70c36f5ae..aa88cb3f41c7 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -418,8 +418,6 @@ void ql_dump_qdev(struct ql_adapter *qdev)
418 printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count); 418 printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count);
419 printk(KERN_ERR PFX "qdev->tx_ring = %p.\n", 419 printk(KERN_ERR PFX "qdev->tx_ring = %p.\n",
420 qdev->tx_ring); 420 qdev->tx_ring);
421 printk(KERN_ERR PFX "qdev->rss_ring_first_cq_id = %d.\n",
422 qdev->rss_ring_first_cq_id);
423 printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n", 421 printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n",
424 qdev->rss_ring_count); 422 qdev->rss_ring_count);
425 printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring); 423 printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring);
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index eb6a9ee640ed..68f9bd280f86 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -49,10 +49,11 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
49 /* Skip the default queue, and update the outbound handler 49 /* Skip the default queue, and update the outbound handler
50 * queues if they changed. 50 * queues if they changed.
51 */ 51 */
52 cqicb = (struct cqicb *)&qdev->rx_ring[1]; 52 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
53 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || 53 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
54 le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) { 54 le16_to_cpu(cqicb->pkt_delay) !=
55 for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) { 55 qdev->tx_max_coalesced_frames) {
56 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
56 rx_ring = &qdev->rx_ring[i]; 57 rx_ring = &qdev->rx_ring[i];
57 cqicb = (struct cqicb *)rx_ring; 58 cqicb = (struct cqicb *)rx_ring;
58 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); 59 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
@@ -70,12 +71,11 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
70 } 71 }
71 72
72 /* Update the inbound (RSS) handler queues if they changed. */ 73 /* Update the inbound (RSS) handler queues if they changed. */
73 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id]; 74 cqicb = (struct cqicb *)&qdev->rx_ring[0];
74 if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || 75 if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
75 le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) { 76 le16_to_cpu(cqicb->pkt_delay) !=
76 for (i = qdev->rss_ring_first_cq_id; 77 qdev->rx_max_coalesced_frames) {
77 i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count; 78 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
78 i++) {
79 rx_ring = &qdev->rx_ring[i]; 79 rx_ring = &qdev->rx_ring[i];
80 cqicb = (struct cqicb *)rx_ring; 80 cqicb = (struct cqicb *)rx_ring;
81 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); 81 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 3a271afdd8f5..89ea9c7a58c8 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -370,9 +370,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
370 cam_output = (CAM_OUT_ROUTE_NIC | 370 cam_output = (CAM_OUT_ROUTE_NIC |
371 (qdev-> 371 (qdev->
372 func << CAM_OUT_FUNC_SHIFT) | 372 func << CAM_OUT_FUNC_SHIFT) |
373 (qdev-> 373 (0 << CAM_OUT_CQ_ID_SHIFT));
374 rss_ring_first_cq_id <<
375 CAM_OUT_CQ_ID_SHIFT));
376 if (qdev->vlgrp) 374 if (qdev->vlgrp)
377 cam_output |= CAM_OUT_RV; 375 cam_output |= CAM_OUT_RV;
378 /* route to NIC core */ 376 /* route to NIC core */
@@ -1649,8 +1647,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1649 1647
1650 qdev->stats.rx_packets++; 1648 qdev->stats.rx_packets++;
1651 qdev->stats.rx_bytes += skb->len; 1649 qdev->stats.rx_bytes += skb->len;
1652 skb_record_rx_queue(skb, 1650 skb_record_rx_queue(skb, rx_ring->cq_id);
1653 rx_ring->cq_id - qdev->rss_ring_first_cq_id);
1654 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1651 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1655 if (qdev->vlgrp && 1652 if (qdev->vlgrp &&
1656 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && 1653 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
@@ -2044,7 +2041,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2044 ql_disable_completion_interrupt(qdev, 2041 ql_disable_completion_interrupt(qdev,
2045 intr_context-> 2042 intr_context->
2046 intr); 2043 intr);
2047 if (i < qdev->rss_ring_first_cq_id) 2044 if (i >= qdev->rss_ring_count)
2048 queue_delayed_work_on(rx_ring->cpu, 2045 queue_delayed_work_on(rx_ring->cpu,
2049 qdev->q_workqueue, 2046 qdev->q_workqueue,
2050 &rx_ring->rx_work, 2047 &rx_ring->rx_work,
@@ -2908,28 +2905,20 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2908 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | 2905 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2909 i; 2906 i;
2910 2907
2911 if (i == 0) { 2908 if (i < qdev->rss_ring_count) {
2912 /* 2909 /*
2913 * Default queue handles bcast/mcast plus 2910 * Inbound queues handle unicast frames only.
2914 * async events. Needs buffers.
2915 */ 2911 */
2916 intr_context->handler = qlge_isr; 2912 intr_context->handler = qlge_msix_rx_isr;
2917 sprintf(intr_context->name, "%s-default-queue", 2913 sprintf(intr_context->name, "%s-rx-%d",
2918 qdev->ndev->name); 2914 qdev->ndev->name, i);
2919 } else if (i < qdev->rss_ring_first_cq_id) { 2915 } else {
2920 /* 2916 /*
2921 * Outbound queue is for outbound completions only. 2917 * Outbound queue is for outbound completions only.
2922 */ 2918 */
2923 intr_context->handler = qlge_msix_tx_isr; 2919 intr_context->handler = qlge_msix_tx_isr;
2924 sprintf(intr_context->name, "%s-tx-%d", 2920 sprintf(intr_context->name, "%s-tx-%d",
2925 qdev->ndev->name, i); 2921 qdev->ndev->name, i);
2926 } else {
2927 /*
2928 * Inbound queues handle unicast frames only.
2929 */
2930 intr_context->handler = qlge_msix_rx_isr;
2931 sprintf(intr_context->name, "%s-rx-%d",
2932 qdev->ndev->name, i);
2933 } 2922 }
2934 } 2923 }
2935 } else { 2924 } else {
@@ -3062,7 +3051,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
3062 3051
3063 memset((void *)ricb, 0, sizeof(*ricb)); 3052 memset((void *)ricb, 0, sizeof(*ricb));
3064 3053
3065 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K; 3054 ricb->base_cq = RSS_L4K;
3066 ricb->flags = 3055 ricb->flags =
3067 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 | 3056 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
3068 RSS_RT6); 3057 RSS_RT6);
@@ -3264,7 +3253,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3264 } 3253 }
3265 3254
3266 /* Start NAPI for the RSS queues. */ 3255 /* Start NAPI for the RSS queues. */
3267 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) { 3256 for (i = 0; i < qdev->rss_ring_count; i++) {
3268 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n", 3257 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
3269 i); 3258 i);
3270 napi_enable(&qdev->rx_ring[i].napi); 3259 napi_enable(&qdev->rx_ring[i].napi);
@@ -3355,7 +3344,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3355 * environment. Outbound completion processing 3344 * environment. Outbound completion processing
3356 * is done in interrupt context. 3345 * is done in interrupt context.
3357 */ 3346 */
3358 if (i >= qdev->rss_ring_first_cq_id) { 3347 if (i <= qdev->rss_ring_count) {
3359 napi_disable(&rx_ring->napi); 3348 napi_disable(&rx_ring->napi);
3360 } else { 3349 } else {
3361 cancel_delayed_work_sync(&rx_ring->rx_work); 3350 cancel_delayed_work_sync(&rx_ring->rx_work);
@@ -3370,7 +3359,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3370 3359
3371 /* Call netif_napi_del() from common point. 3360 /* Call netif_napi_del() from common point.
3372 */ 3361 */
3373 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) 3362 for (i = 0; i < qdev->rss_ring_count; i++)
3374 netif_napi_del(&qdev->rx_ring[i].napi); 3363 netif_napi_del(&qdev->rx_ring[i].napi);
3375 3364
3376 ql_free_rx_buffers(qdev); 3365 ql_free_rx_buffers(qdev);
@@ -3476,8 +3465,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3476 qdev->tx_ring_count = cpu_cnt; 3465 qdev->tx_ring_count = cpu_cnt;
3477 /* Allocate inbound completion (RSS) ring for each CPU. */ 3466 /* Allocate inbound completion (RSS) ring for each CPU. */
3478 qdev->rss_ring_count = cpu_cnt; 3467 qdev->rss_ring_count = cpu_cnt;
3479 /* cq_id for the first inbound ring handler. */
3480 qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3481 /* 3468 /*
3482 * qdev->rx_ring_count: 3469 * qdev->rx_ring_count:
3483 * Total number of rx_rings. This includes the one 3470 * Total number of rx_rings. This includes the one
@@ -3485,7 +3472,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3485 * handler rx_rings, and the number of inbound 3472 * handler rx_rings, and the number of inbound
3486 * completion handler rx_rings. 3473 * completion handler rx_rings.
3487 */ 3474 */
3488 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; 3475 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3489 3476
3490 for (i = 0; i < qdev->tx_ring_count; i++) { 3477 for (i = 0; i < qdev->tx_ring_count; i++) {
3491 tx_ring = &qdev->tx_ring[i]; 3478 tx_ring = &qdev->tx_ring[i];
@@ -3500,7 +3487,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3500 * The completion queue ID for the tx rings start 3487 * The completion queue ID for the tx rings start
3501 * immediately after the default Q ID, which is zero. 3488 * immediately after the default Q ID, which is zero.
3502 */ 3489 */
3503 tx_ring->cq_id = i + 1; 3490 tx_ring->cq_id = i + qdev->rss_ring_count;
3504 } 3491 }
3505 3492
3506 for (i = 0; i < qdev->rx_ring_count; i++) { 3493 for (i = 0; i < qdev->rx_ring_count; i++) {
@@ -3509,11 +3496,8 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3509 rx_ring->qdev = qdev; 3496 rx_ring->qdev = qdev;
3510 rx_ring->cq_id = i; 3497 rx_ring->cq_id = i;
3511 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ 3498 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3512 if (i == 0) { /* Default queue at index 0. */ 3499 if (i < qdev->rss_ring_count) {
3513 /* 3500 /* Inbound completions (RSS) queues */
3514 * Default queue handles bcast/mcast plus
3515 * async events. Needs buffers.
3516 */
3517 rx_ring->cq_len = qdev->rx_ring_size; 3501 rx_ring->cq_len = qdev->rx_ring_size;
3518 rx_ring->cq_size = 3502 rx_ring->cq_size =
3519 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3503 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
@@ -3525,8 +3509,8 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3525 rx_ring->sbq_size = 3509 rx_ring->sbq_size =
3526 rx_ring->sbq_len * sizeof(__le64); 3510 rx_ring->sbq_len * sizeof(__le64);
3527 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3511 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3528 rx_ring->type = DEFAULT_Q; 3512 rx_ring->type = RX_Q;
3529 } else if (i < qdev->rss_ring_first_cq_id) { 3513 } else {
3530 /* 3514 /*
3531 * Outbound queue handles outbound completions only. 3515 * Outbound queue handles outbound completions only.
3532 */ 3516 */
@@ -3541,22 +3525,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3541 rx_ring->sbq_size = 0; 3525 rx_ring->sbq_size = 0;
3542 rx_ring->sbq_buf_size = 0; 3526 rx_ring->sbq_buf_size = 0;
3543 rx_ring->type = TX_Q; 3527 rx_ring->type = TX_Q;
3544 } else { /* Inbound completions (RSS) queues */
3545 /*
3546 * Inbound queues handle unicast frames only.
3547 */
3548 rx_ring->cq_len = qdev->rx_ring_size;
3549 rx_ring->cq_size =
3550 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3551 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3552 rx_ring->lbq_size =
3553 rx_ring->lbq_len * sizeof(__le64);
3554 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3555 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3556 rx_ring->sbq_size =
3557 rx_ring->sbq_len * sizeof(__le64);
3558 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3559 rx_ring->type = RX_Q;
3560 } 3528 }
3561 } 3529 }
3562 return 0; 3530 return 0;