aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea/ehea_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ehea/ehea_main.c')
-rw-r--r--drivers/net/ehea/ehea_main.c60
1 files changed, 59 insertions, 1 deletions
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 897719b49f96..a333b42111b8 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -776,6 +776,53 @@ static int ehea_proc_rwqes(struct net_device *dev,
776 return processed; 776 return processed;
777} 777}
778 778
779#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
780
781static void reset_sq_restart_flag(struct ehea_port *port)
782{
783 int i;
784
785 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
786 struct ehea_port_res *pr = &port->port_res[i];
787 pr->sq_restart_flag = 0;
788 }
789}
790
791static void check_sqs(struct ehea_port *port)
792{
793 struct ehea_swqe *swqe;
794 int swqe_index;
795 int i, k;
796
797 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
798 struct ehea_port_res *pr = &port->port_res[i];
799 k = 0;
800 swqe = ehea_get_swqe(pr->qp, &swqe_index);
801 memset(swqe, 0, SWQE_HEADER_SIZE);
802 atomic_dec(&pr->swqe_avail);
803
804 swqe->tx_control |= EHEA_SWQE_PURGE;
805 swqe->wr_id = SWQE_RESTART_CHECK;
806 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
807 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
808 swqe->immediate_data_length = 80;
809
810 ehea_post_swqe(pr->qp, swqe);
811
812 while (pr->sq_restart_flag == 0) {
813 msleep(5);
814 if (++k == 100) {
815 ehea_error("HW/SW queues out of sync");
816 ehea_schedule_port_reset(pr->port);
817 return;
818 }
819 }
820 }
821
822 return;
823}
824
825
779static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) 826static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
780{ 827{
781 struct sk_buff *skb; 828 struct sk_buff *skb;
@@ -793,6 +840,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
793 840
794 cqe_counter++; 841 cqe_counter++;
795 rmb(); 842 rmb();
843
844 if (cqe->wr_id == SWQE_RESTART_CHECK) {
845 pr->sq_restart_flag = 1;
846 swqe_av++;
847 break;
848 }
849
796 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 850 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
797 ehea_error("Bad send completion status=0x%04X", 851 ehea_error("Bad send completion status=0x%04X",
798 cqe->status); 852 cqe->status);
@@ -2675,8 +2729,10 @@ static void ehea_flush_sq(struct ehea_port *port)
2675 int k = 0; 2729 int k = 0;
2676 while (atomic_read(&pr->swqe_avail) < swqe_max) { 2730 while (atomic_read(&pr->swqe_avail) < swqe_max) {
2677 msleep(5); 2731 msleep(5);
2678 if (++k == 20) 2732 if (++k == 20) {
2733 ehea_error("WARNING: sq not flushed completely");
2679 break; 2734 break;
2735 }
2680 } 2736 }
2681 } 2737 }
2682} 2738}
@@ -2917,6 +2973,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2917 port_napi_disable(port); 2973 port_napi_disable(port);
2918 mutex_unlock(&port->port_lock); 2974 mutex_unlock(&port->port_lock);
2919 } 2975 }
2976 reset_sq_restart_flag(port);
2920 } 2977 }
2921 2978
2922 /* Unregister old memory region */ 2979 /* Unregister old memory region */
@@ -2951,6 +3008,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2951 mutex_lock(&port->port_lock); 3008 mutex_lock(&port->port_lock);
2952 port_napi_enable(port); 3009 port_napi_enable(port);
2953 ret = ehea_restart_qps(dev); 3010 ret = ehea_restart_qps(dev);
3011 check_sqs(port);
2954 if (!ret) 3012 if (!ret)
2955 netif_wake_queue(dev); 3013 netif_wake_queue(dev);
2956 mutex_unlock(&port->port_lock); 3014 mutex_unlock(&port->port_lock);