aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea/ehea_main.c
diff options
context:
space:
mode:
authorBreno Leitao <leitao@linux.vnet.ibm.com>2010-10-05 09:16:22 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-05 23:10:32 -0400
commit5b27d42755fa6536a89f32b107fb2a53267696c2 (patch)
treea626834a26e639f0f210a735200c46ed113723e0 /drivers/net/ehea/ehea_main.c
parent9ed51657f6ea2a08582d6a9be5404b044972b7e0 (diff)
ehea: using wait queues instead of msleep on ehea_flush_sq
This patch just remove a msleep loop and change to wait queue, making the code cleaner. Signed-off-by: Breno Leitao <leitao@linux.vnet.ibm.com> Acked-by: David Howells <dhowells@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ehea/ehea_main.c')
-rw-r--r--drivers/net/ehea/ehea_main.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 190fb691d20..7897bdf76e6 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -888,6 +888,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
888 pr->queue_stopped = 0; 888 pr->queue_stopped = 0;
889 } 889 }
890 spin_unlock_irqrestore(&pr->netif_queue, flags); 890 spin_unlock_irqrestore(&pr->netif_queue, flags);
891 wake_up(&pr->port->swqe_avail_wq);
891 892
892 return cqe; 893 return cqe;
893} 894}
@@ -2652,6 +2653,8 @@ static int ehea_open(struct net_device *dev)
2652 netif_start_queue(dev); 2653 netif_start_queue(dev);
2653 } 2654 }
2654 2655
2656 init_waitqueue_head(&port->swqe_avail_wq);
2657
2655 mutex_unlock(&port->port_lock); 2658 mutex_unlock(&port->port_lock);
2656 2659
2657 return ret; 2660 return ret;
@@ -2724,13 +2727,15 @@ static void ehea_flush_sq(struct ehea_port *port)
2724 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2727 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2725 struct ehea_port_res *pr = &port->port_res[i]; 2728 struct ehea_port_res *pr = &port->port_res[i];
2726 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; 2729 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2727 int k = 0; 2730 int ret;
2728 while (atomic_read(&pr->swqe_avail) < swqe_max) { 2731
2729 msleep(5); 2732 ret = wait_event_timeout(port->swqe_avail_wq,
2730 if (++k == 20) { 2733 atomic_read(&pr->swqe_avail) >= swqe_max,
2731 ehea_error("WARNING: sq not flushed completely"); 2734 msecs_to_jiffies(100));
2732 break; 2735
2733 } 2736 if (!ret) {
2737 ehea_error("WARNING: sq not flushed completely");
2738 break;
2734 } 2739 }
2735 } 2740 }
2736} 2741}