aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea
diff options
context:
space:
mode:
authorAndre Detsch <adetsch@br.ibm.com>2010-08-17 01:49:12 -0400
committerDavid S. Miller <davem@davemloft.net>2010-08-19 02:50:51 -0400
commit2928db4c3c62552d3caf9ab53ccc6f7ae9865a23 (patch)
tree17e22bd8583f780a93001220df2b9f178b4f87f9 /drivers/net/ehea
parent96ac4f6b326450af496e82347e207b0fca7a9090 (diff)
ehea: Fix synchronization between HW and SW send queue
ehea: Fix synchronization between HW and SW send queue When memory is added to / removed from a partition via the Memory DLPAR mechanism, the eHEA driver has to do a couple of things to reflect the memory change in its own IO address translation tables. This involves stopping and restarting the HW queues. During this operation, it is possible that HW and SW pointer into these queues get out of sync. This results in a situation where packets that are attached to a send queue are not transmitted immediately, but delayed until further X packets have been put on the queue. This patch detects such loss of synchronization, and resets the ehea port when needed. Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> Signed-off-by: Andre Detsch <adetsch@br.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r--drivers/net/ehea/ehea.h3
-rw-r--r--drivers/net/ehea/ehea_main.c60
2 files changed, 61 insertions, 2 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 0060e422f171..2ce67f6152cd 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0105" 43#define DRV_VERSION "EHEA_0106"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
@@ -400,6 +400,7 @@ struct ehea_port_res {
400 u32 poll_counter; 400 u32 poll_counter;
401 struct net_lro_mgr lro_mgr; 401 struct net_lro_mgr lro_mgr;
402 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; 402 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
403 int sq_restart_flag;
403}; 404};
404 405
405 406
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 3beba70b7dea..adb5994c125f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -776,6 +776,53 @@ static int ehea_proc_rwqes(struct net_device *dev,
776 return processed; 776 return processed;
777} 777}
778 778
779#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
780
781static void reset_sq_restart_flag(struct ehea_port *port)
782{
783 int i;
784
785 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
786 struct ehea_port_res *pr = &port->port_res[i];
787 pr->sq_restart_flag = 0;
788 }
789}
790
791static void check_sqs(struct ehea_port *port)
792{
793 struct ehea_swqe *swqe;
794 int swqe_index;
795 int i, k;
796
797 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
798 struct ehea_port_res *pr = &port->port_res[i];
799 k = 0;
800 swqe = ehea_get_swqe(pr->qp, &swqe_index);
801 memset(swqe, 0, SWQE_HEADER_SIZE);
802 atomic_dec(&pr->swqe_avail);
803
804 swqe->tx_control |= EHEA_SWQE_PURGE;
805 swqe->wr_id = SWQE_RESTART_CHECK;
806 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
807 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
808 swqe->immediate_data_length = 80;
809
810 ehea_post_swqe(pr->qp, swqe);
811
812 while (pr->sq_restart_flag == 0) {
813 msleep(5);
814 if (++k == 100) {
815 ehea_error("HW/SW queues out of sync");
816 ehea_schedule_port_reset(pr->port);
817 return;
818 }
819 }
820 }
821
822 return;
823}
824
825
779static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) 826static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
780{ 827{
781 struct sk_buff *skb; 828 struct sk_buff *skb;
@@ -793,6 +840,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
793 840
794 cqe_counter++; 841 cqe_counter++;
795 rmb(); 842 rmb();
843
844 if (cqe->wr_id == SWQE_RESTART_CHECK) {
845 pr->sq_restart_flag = 1;
846 swqe_av++;
847 break;
848 }
849
796 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 850 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
797 ehea_error("Bad send completion status=0x%04X", 851 ehea_error("Bad send completion status=0x%04X",
798 cqe->status); 852 cqe->status);
@@ -2675,8 +2729,10 @@ static void ehea_flush_sq(struct ehea_port *port)
2675 int k = 0; 2729 int k = 0;
2676 while (atomic_read(&pr->swqe_avail) < swqe_max) { 2730 while (atomic_read(&pr->swqe_avail) < swqe_max) {
2677 msleep(5); 2731 msleep(5);
2678 if (++k == 20) 2732 if (++k == 20) {
2733 ehea_error("WARNING: sq not flushed completely");
2679 break; 2734 break;
2735 }
2680 } 2736 }
2681 } 2737 }
2682} 2738}
@@ -2917,6 +2973,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2917 port_napi_disable(port); 2973 port_napi_disable(port);
2918 mutex_unlock(&port->port_lock); 2974 mutex_unlock(&port->port_lock);
2919 } 2975 }
2976 reset_sq_restart_flag(port);
2920 } 2977 }
2921 2978
2922 /* Unregister old memory region */ 2979 /* Unregister old memory region */
@@ -2951,6 +3008,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2951 mutex_lock(&port->port_lock); 3008 mutex_lock(&port->port_lock);
2952 port_napi_enable(port); 3009 port_napi_enable(port);
2953 ret = ehea_restart_qps(dev); 3010 ret = ehea_restart_qps(dev);
3011 check_sqs(port);
2954 if (!ret) 3012 if (!ret)
2955 netif_wake_queue(dev); 3013 netif_wake_queue(dev);
2956 mutex_unlock(&port->port_lock); 3014 mutex_unlock(&port->port_lock);