aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r--drivers/net/ehea/ehea_main.c31
-rw-r--r--drivers/net/ehea/ehea_qmr.c43
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
3 files changed, 62 insertions, 26 deletions
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 3f445efa9482..c35d1e3631d1 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -791,11 +791,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
791 cqe_counter++; 791 cqe_counter++;
792 rmb(); 792 rmb();
793 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 793 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
794 ehea_error("Send Completion Error: Resetting port"); 794 ehea_error("Bad send completion status=0x%04X",
795 cqe->status);
796
795 if (netif_msg_tx_err(pr->port)) 797 if (netif_msg_tx_err(pr->port))
796 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 798 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
797 ehea_schedule_port_reset(pr->port); 799
798 break; 800 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
801 ehea_error("Resetting port");
802 ehea_schedule_port_reset(pr->port);
803 break;
804 }
799 } 805 }
800 806
801 if (netif_msg_tx_done(pr->port)) 807 if (netif_msg_tx_done(pr->port))
@@ -901,6 +907,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
901 struct ehea_eqe *eqe; 907 struct ehea_eqe *eqe;
902 struct ehea_qp *qp; 908 struct ehea_qp *qp;
903 u32 qp_token; 909 u32 qp_token;
910 u64 resource_type, aer, aerr;
911 int reset_port = 0;
904 912
905 eqe = ehea_poll_eq(port->qp_eq); 913 eqe = ehea_poll_eq(port->qp_eq);
906 914
@@ -910,11 +918,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
910 eqe->entry, qp_token); 918 eqe->entry, qp_token);
911 919
912 qp = port->port_res[qp_token].qp; 920 qp = port->port_res[qp_token].qp;
913 ehea_error_data(port->adapter, qp->fw_handle); 921
922 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
923 &aer, &aerr);
924
925 if (resource_type == EHEA_AER_RESTYPE_QP) {
926 if ((aer & EHEA_AER_RESET_MASK) ||
927 (aerr & EHEA_AERR_RESET_MASK))
928 reset_port = 1;
929 } else
930 reset_port = 1; /* Reset in case of CQ or EQ error */
931
914 eqe = ehea_poll_eq(port->qp_eq); 932 eqe = ehea_poll_eq(port->qp_eq);
915 } 933 }
916 934
917 ehea_schedule_port_reset(port); 935 if (reset_port) {
936 ehea_error("Resetting port");
937 ehea_schedule_port_reset(port);
938 }
918 939
919 return IRQ_HANDLED; 940 return IRQ_HANDLED;
920} 941}
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index a1b4c7e56367..89128b6373e3 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -229,14 +229,14 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
229 229
230int ehea_destroy_cq(struct ehea_cq *cq) 230int ehea_destroy_cq(struct ehea_cq *cq)
231{ 231{
232 u64 hret; 232 u64 hret, aer, aerr;
233 if (!cq) 233 if (!cq)
234 return 0; 234 return 0;
235 235
236 hcp_epas_dtor(&cq->epas); 236 hcp_epas_dtor(&cq->epas);
237 hret = ehea_destroy_cq_res(cq, NORMAL_FREE); 237 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
238 if (hret == H_R_STATE) { 238 if (hret == H_R_STATE) {
239 ehea_error_data(cq->adapter, cq->fw_handle); 239 ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
240 hret = ehea_destroy_cq_res(cq, FORCE_FREE); 240 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
241 } 241 }
242 242
@@ -357,7 +357,7 @@ u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
357 357
358int ehea_destroy_eq(struct ehea_eq *eq) 358int ehea_destroy_eq(struct ehea_eq *eq)
359{ 359{
360 u64 hret; 360 u64 hret, aer, aerr;
361 if (!eq) 361 if (!eq)
362 return 0; 362 return 0;
363 363
@@ -365,7 +365,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
365 365
366 hret = ehea_destroy_eq_res(eq, NORMAL_FREE); 366 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
367 if (hret == H_R_STATE) { 367 if (hret == H_R_STATE) {
368 ehea_error_data(eq->adapter, eq->fw_handle); 368 ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
369 hret = ehea_destroy_eq_res(eq, FORCE_FREE); 369 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
370 } 370 }
371 371
@@ -540,7 +540,7 @@ u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
540 540
541int ehea_destroy_qp(struct ehea_qp *qp) 541int ehea_destroy_qp(struct ehea_qp *qp)
542{ 542{
543 u64 hret; 543 u64 hret, aer, aerr;
544 if (!qp) 544 if (!qp)
545 return 0; 545 return 0;
546 546
@@ -548,7 +548,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
548 548
549 hret = ehea_destroy_qp_res(qp, NORMAL_FREE); 549 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
550 if (hret == H_R_STATE) { 550 if (hret == H_R_STATE) {
551 ehea_error_data(qp->adapter, qp->fw_handle); 551 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
552 hret = ehea_destroy_qp_res(qp, FORCE_FREE); 552 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
553 } 553 }
554 554
@@ -986,42 +986,45 @@ void print_error_data(u64 *data)
986 if (length > EHEA_PAGESIZE) 986 if (length > EHEA_PAGESIZE)
987 length = EHEA_PAGESIZE; 987 length = EHEA_PAGESIZE;
988 988
989 if (type == 0x8) /* Queue Pair */ 989 if (type == EHEA_AER_RESTYPE_QP)
990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, " 990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
991 "port=%llX", resource, data[6], data[12], data[22]); 991 "port=%llX", resource, data[6], data[12], data[22]);
992 992 else if (type == EHEA_AER_RESTYPE_CQ)
993 if (type == 0x4) /* Completion Queue */
994 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource, 993 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
995 data[6]); 994 data[6]);
996 995 else if (type == EHEA_AER_RESTYPE_EQ)
997 if (type == 0x3) /* Event Queue */
998 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource, 996 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
999 data[6]); 997 data[6]);
1000 998
1001 ehea_dump(data, length, "error data"); 999 ehea_dump(data, length, "error data");
1002} 1000}
1003 1001
1004void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle) 1002u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1003 u64 *aer, u64 *aerr)
1005{ 1004{
1006 unsigned long ret; 1005 unsigned long ret;
1007 u64 *rblock; 1006 u64 *rblock;
1007 u64 type = 0;
1008 1008
1009 rblock = (void *)get_zeroed_page(GFP_KERNEL); 1009 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1010 if (!rblock) { 1010 if (!rblock) {
1011 ehea_error("Cannot allocate rblock memory."); 1011 ehea_error("Cannot allocate rblock memory.");
1012 return; 1012 goto out;
1013 } 1013 }
1014 1014
1015 ret = ehea_h_error_data(adapter->handle, 1015 ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1016 res_handle,
1017 rblock);
1018 1016
1019 if (ret == H_R_STATE) 1017 if (ret == H_SUCCESS) {
1020 ehea_error("No error data is available: %llX.", res_handle); 1018 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1021 else if (ret == H_SUCCESS) 1019 *aer = rblock[6];
1020 *aerr = rblock[12];
1022 print_error_data(rblock); 1021 print_error_data(rblock);
1023 else 1022 } else if (ret == H_R_STATE) {
1023 ehea_error("No error data available: %llX.", res_handle);
1024 } else
1024 ehea_error("Error data could not be fetched: %llX", res_handle); 1025 ehea_error("Error data could not be fetched: %llX", res_handle);
1025 1026
1026 free_page((unsigned long)rblock); 1027 free_page((unsigned long)rblock);
1028out:
1029 return type;
1027} 1030}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0817c1e74a19..882c50c9c34f 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -154,6 +154,9 @@ struct ehea_rwqe {
154#define EHEA_CQE_STAT_ERR_IP 0x2000 154#define EHEA_CQE_STAT_ERR_IP 0x2000
155#define EHEA_CQE_STAT_ERR_CRC 0x1000 155#define EHEA_CQE_STAT_ERR_CRC 0x1000
156 156
157/* Defines which bad send cqe stati lead to a port reset */
158#define EHEA_CQE_STAT_RESET_MASK 0x0002
159
157struct ehea_cqe { 160struct ehea_cqe {
158 u64 wr_id; /* work request ID from WQE */ 161 u64 wr_id; /* work request ID from WQE */
159 u8 type; 162 u8 type;
@@ -187,6 +190,14 @@ struct ehea_cqe {
187#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55) 190#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
188#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63) 191#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
189 192
193#define EHEA_AER_RESTYPE_QP 0x8
194#define EHEA_AER_RESTYPE_CQ 0x4
195#define EHEA_AER_RESTYPE_EQ 0x3
196
197/* Defines which affiliated errors lead to a port reset */
198#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
199#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
200
190struct ehea_eqe { 201struct ehea_eqe {
191 u64 entry; 202 u64 entry;
192}; 203};
@@ -379,7 +390,8 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
379 390
380int ehea_rem_mr(struct ehea_mr *mr); 391int ehea_rem_mr(struct ehea_mr *mr);
381 392
382void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 393u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
394 u64 *aer, u64 *aerr);
383 395
384int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); 396int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
385int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages); 397int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);