aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea
diff options
context:
space:
mode:
authorThomas Klein <osstklei@de.ibm.com>2008-04-04 09:04:53 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-04-12 04:47:50 -0400
commit44fb3126d7e6fb411775551b4653643f1d28ebe9 (patch)
treee7f8e112de20e501b5e73bbd38b530a77e954169 /drivers/net/ehea
parent5a81f14330ce70bc256b624593768fd1b4170d27 (diff)
ehea: Fix DLPAR memory add support
This patch fixes two weaknesses in send/receive packet handling which may lead to kernel panics during DLPAR memory add operations. Signed-off-by: Thomas Klein <tklein@de.ibm.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r--drivers/net/ehea/ehea.h3
-rw-r--r--drivers/net/ehea/ehea_main.c24
2 files changed, 24 insertions, 3 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 93b7fb246960..a8d3280923e8 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0089" 43#define DRV_VERSION "EHEA_0090"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
@@ -371,6 +371,7 @@ struct ehea_port_res {
371 struct ehea_q_skb_arr rq2_skba; 371 struct ehea_q_skb_arr rq2_skba;
372 struct ehea_q_skb_arr rq3_skba; 372 struct ehea_q_skb_arr rq3_skba;
373 struct ehea_q_skb_arr sq_skba; 373 struct ehea_q_skb_arr sq_skba;
374 int sq_skba_size;
374 spinlock_t netif_queue; 375 spinlock_t netif_queue;
375 int queue_stopped; 376 int queue_stopped;
376 int swqe_refill_th; 377 int swqe_refill_th;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 07c742dd3f09..f460b623c077 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -349,7 +349,8 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
349 pr->rq1_skba.os_skbs = 0; 349 pr->rq1_skba.os_skbs = 0;
350 350
351 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 351 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
352 pr->rq1_skba.index = index; 352 if (nr_of_wqes > 0)
353 pr->rq1_skba.index = index;
353 pr->rq1_skba.os_skbs = fill_wqes; 354 pr->rq1_skba.os_skbs = fill_wqes;
354 return; 355 return;
355 } 356 }
@@ -1464,7 +1465,9 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1464 init_attr->act_nr_rwqes_rq2, 1465 init_attr->act_nr_rwqes_rq2,
1465 init_attr->act_nr_rwqes_rq3); 1466 init_attr->act_nr_rwqes_rq3);
1466 1467
1467 ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1); 1468 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1469
1470 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1468 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); 1471 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1469 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); 1472 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1470 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); 1473 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
@@ -2621,6 +2624,22 @@ void ehea_purge_sq(struct ehea_qp *orig_qp)
2621 } 2624 }
2622} 2625}
2623 2626
2627void ehea_flush_sq(struct ehea_port *port)
2628{
2629 int i;
2630
2631 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2632 struct ehea_port_res *pr = &port->port_res[i];
2633 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2634 int k = 0;
2635 while (atomic_read(&pr->swqe_avail) < swqe_max) {
2636 msleep(5);
2637 if (++k == 20)
2638 break;
2639 }
2640 }
2641}
2642
2624int ehea_stop_qps(struct net_device *dev) 2643int ehea_stop_qps(struct net_device *dev)
2625{ 2644{
2626 struct ehea_port *port = netdev_priv(dev); 2645 struct ehea_port *port = netdev_priv(dev);
@@ -2845,6 +2864,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2845 if (dev->flags & IFF_UP) { 2864 if (dev->flags & IFF_UP) {
2846 down(&port->port_lock); 2865 down(&port->port_lock);
2847 netif_stop_queue(dev); 2866 netif_stop_queue(dev);
2867 ehea_flush_sq(port);
2848 ret = ehea_stop_qps(dev); 2868 ret = ehea_stop_qps(dev);
2849 if (ret) { 2869 if (ret) {
2850 up(&port->port_lock); 2870 up(&port->port_lock);