aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
committerGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
commitcf9b59e9d3e008591d1f54830f570982bb307a0d (patch)
tree113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/net/ehea
parent44504b2bebf8b5823c59484e73096a7d6574471d (diff)
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and build failures in vio.c after merge. Conflicts: drivers/i2c/busses/i2c-cpm.c drivers/i2c/busses/i2c-mpc.c drivers/net/gianfar.c Also fixed up one line in arch/powerpc/kernel/vio.c to use the correct node pointer. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c78
-rw-r--r--drivers/net/ehea/ehea_qmr.c43
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
4 files changed, 89 insertions, 48 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index fa311a950996..0630980a2722 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0102" 43#define DRV_VERSION "EHEA_0103"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b23173864c60..f547894ff48f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -794,11 +794,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
794 cqe_counter++; 794 cqe_counter++;
795 rmb(); 795 rmb();
796 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 796 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
797 ehea_error("Send Completion Error: Resetting port"); 797 ehea_error("Bad send completion status=0x%04X",
798 cqe->status);
799
798 if (netif_msg_tx_err(pr->port)) 800 if (netif_msg_tx_err(pr->port))
799 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 801 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
800 ehea_schedule_port_reset(pr->port); 802
801 break; 803 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
804 ehea_error("Resetting port");
805 ehea_schedule_port_reset(pr->port);
806 break;
807 }
802 } 808 }
803 809
804 if (netif_msg_tx_done(pr->port)) 810 if (netif_msg_tx_done(pr->port))
@@ -817,7 +823,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
817 quota--; 823 quota--;
818 824
819 cqe = ehea_poll_cq(send_cq); 825 cqe = ehea_poll_cq(send_cq);
820 }; 826 }
821 827
822 ehea_update_feca(send_cq, cqe_counter); 828 ehea_update_feca(send_cq, cqe_counter);
823 atomic_add(swqe_av, &pr->swqe_avail); 829 atomic_add(swqe_av, &pr->swqe_avail);
@@ -904,6 +910,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
904 struct ehea_eqe *eqe; 910 struct ehea_eqe *eqe;
905 struct ehea_qp *qp; 911 struct ehea_qp *qp;
906 u32 qp_token; 912 u32 qp_token;
913 u64 resource_type, aer, aerr;
914 int reset_port = 0;
907 915
908 eqe = ehea_poll_eq(port->qp_eq); 916 eqe = ehea_poll_eq(port->qp_eq);
909 917
@@ -913,11 +921,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
913 eqe->entry, qp_token); 921 eqe->entry, qp_token);
914 922
915 qp = port->port_res[qp_token].qp; 923 qp = port->port_res[qp_token].qp;
916 ehea_error_data(port->adapter, qp->fw_handle); 924
925 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
926 &aer, &aerr);
927
928 if (resource_type == EHEA_AER_RESTYPE_QP) {
929 if ((aer & EHEA_AER_RESET_MASK) ||
930 (aerr & EHEA_AERR_RESET_MASK))
931 reset_port = 1;
932 } else
933 reset_port = 1; /* Reset in case of CQ or EQ error */
934
917 eqe = ehea_poll_eq(port->qp_eq); 935 eqe = ehea_poll_eq(port->qp_eq);
918 } 936 }
919 937
920 ehea_schedule_port_reset(port); 938 if (reset_port) {
939 ehea_error("Resetting port");
940 ehea_schedule_port_reset(port);
941 }
921 942
922 return IRQ_HANDLED; 943 return IRQ_HANDLED;
923} 944}
@@ -1621,7 +1642,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1621{ 1642{
1622 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1643 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1623 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1644 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1624 int skb_data_size = skb->len - skb->data_len; 1645 int skb_data_size = skb_headlen(skb);
1625 int headersize; 1646 int headersize;
1626 1647
1627 /* Packet is TCP with TSO enabled */ 1648 /* Packet is TCP with TSO enabled */
@@ -1632,7 +1653,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1632 */ 1653 */
1633 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); 1654 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1634 1655
1635 skb_data_size = skb->len - skb->data_len; 1656 skb_data_size = skb_headlen(skb);
1636 1657
1637 if (skb_data_size >= headersize) { 1658 if (skb_data_size >= headersize) {
1638 /* copy immediate data */ 1659 /* copy immediate data */
@@ -1654,7 +1675,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1654static void write_swqe2_nonTSO(struct sk_buff *skb, 1675static void write_swqe2_nonTSO(struct sk_buff *skb,
1655 struct ehea_swqe *swqe, u32 lkey) 1676 struct ehea_swqe *swqe, u32 lkey)
1656{ 1677{
1657 int skb_data_size = skb->len - skb->data_len; 1678 int skb_data_size = skb_headlen(skb);
1658 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1679 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1659 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1680 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1660 1681
@@ -1863,7 +1884,6 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1863 port->promisc = enable; 1884 port->promisc = enable;
1864out: 1885out:
1865 free_page((unsigned long)cb7); 1886 free_page((unsigned long)cb7);
1866 return;
1867} 1887}
1868 1888
1869static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, 1889static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
@@ -1970,7 +1990,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1970static void ehea_set_multicast_list(struct net_device *dev) 1990static void ehea_set_multicast_list(struct net_device *dev)
1971{ 1991{
1972 struct ehea_port *port = netdev_priv(dev); 1992 struct ehea_port *port = netdev_priv(dev);
1973 struct dev_mc_list *k_mcl_entry; 1993 struct netdev_hw_addr *ha;
1974 int ret; 1994 int ret;
1975 1995
1976 if (dev->flags & IFF_PROMISC) { 1996 if (dev->flags & IFF_PROMISC) {
@@ -2001,13 +2021,12 @@ static void ehea_set_multicast_list(struct net_device *dev)
2001 goto out; 2021 goto out;
2002 } 2022 }
2003 2023
2004 netdev_for_each_mc_addr(k_mcl_entry, dev) 2024 netdev_for_each_mc_addr(ha, dev)
2005 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); 2025 ehea_add_multicast_entry(port, ha->addr);
2006 2026
2007 } 2027 }
2008out: 2028out:
2009 ehea_update_bcmc_registrations(); 2029 ehea_update_bcmc_registrations();
2010 return;
2011} 2030}
2012 2031
2013static int ehea_change_mtu(struct net_device *dev, int new_mtu) 2032static int ehea_change_mtu(struct net_device *dev, int new_mtu)
@@ -2111,8 +2130,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2111 } else { 2130 } else {
2112 /* first copy data from the skb->data buffer ... */ 2131 /* first copy data from the skb->data buffer ... */
2113 skb_copy_from_linear_data(skb, imm_data, 2132 skb_copy_from_linear_data(skb, imm_data,
2114 skb->len - skb->data_len); 2133 skb_headlen(skb));
2115 imm_data += skb->len - skb->data_len; 2134 imm_data += skb_headlen(skb);
2116 2135
2117 /* ... then copy data from the fragments */ 2136 /* ... then copy data from the fragments */
2118 for (i = 0; i < nfrags; i++) { 2137 for (i = 0; i < nfrags; i++) {
@@ -2223,7 +2242,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2223 } 2242 }
2224 spin_unlock_irqrestore(&pr->netif_queue, flags); 2243 spin_unlock_irqrestore(&pr->netif_queue, flags);
2225 } 2244 }
2226 dev->trans_start = jiffies; 2245 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
2227 spin_unlock(&pr->xmit_lock); 2246 spin_unlock(&pr->xmit_lock);
2228 2247
2229 return NETDEV_TX_OK; 2248 return NETDEV_TX_OK;
@@ -2320,7 +2339,6 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2320 ehea_error("modify_ehea_port failed"); 2339 ehea_error("modify_ehea_port failed");
2321out: 2340out:
2322 free_page((unsigned long)cb1); 2341 free_page((unsigned long)cb1);
2323 return;
2324} 2342}
2325 2343
2326int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) 2344int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
@@ -2863,7 +2881,6 @@ static void ehea_reset_port(struct work_struct *work)
2863 netif_wake_queue(dev); 2881 netif_wake_queue(dev);
2864out: 2882out:
2865 mutex_unlock(&port->port_lock); 2883 mutex_unlock(&port->port_lock);
2866 return;
2867} 2884}
2868 2885
2869static void ehea_rereg_mrs(struct work_struct *work) 2886static void ehea_rereg_mrs(struct work_struct *work)
@@ -2871,7 +2888,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2871 int ret, i; 2888 int ret, i;
2872 struct ehea_adapter *adapter; 2889 struct ehea_adapter *adapter;
2873 2890
2874 mutex_lock(&dlpar_mem_lock);
2875 ehea_info("LPAR memory changed - re-initializing driver"); 2891 ehea_info("LPAR memory changed - re-initializing driver");
2876 2892
2877 list_for_each_entry(adapter, &adapter_list, list) 2893 list_for_each_entry(adapter, &adapter_list, list)
@@ -2941,7 +2957,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2941 } 2957 }
2942 ehea_info("re-initializing driver complete"); 2958 ehea_info("re-initializing driver complete");
2943out: 2959out:
2944 mutex_unlock(&dlpar_mem_lock);
2945 return; 2960 return;
2946} 2961}
2947 2962
@@ -3241,7 +3256,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3241 ehea_remove_adapter_mr(adapter); 3256 ehea_remove_adapter_mr(adapter);
3242 3257
3243 i++; 3258 i++;
3244 }; 3259 }
3245 return 0; 3260 return 0;
3246} 3261}
3247 3262
@@ -3260,7 +3275,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3260 if (dn_log_port_id) 3275 if (dn_log_port_id)
3261 if (*dn_log_port_id == logical_port_id) 3276 if (*dn_log_port_id == logical_port_id)
3262 return eth_dn; 3277 return eth_dn;
3263 }; 3278 }
3264 3279
3265 return NULL; 3280 return NULL;
3266} 3281}
@@ -3524,7 +3539,14 @@ void ehea_crash_handler(void)
3524static int ehea_mem_notifier(struct notifier_block *nb, 3539static int ehea_mem_notifier(struct notifier_block *nb,
3525 unsigned long action, void *data) 3540 unsigned long action, void *data)
3526{ 3541{
3542 int ret = NOTIFY_BAD;
3527 struct memory_notify *arg = data; 3543 struct memory_notify *arg = data;
3544
3545 if (!mutex_trylock(&dlpar_mem_lock)) {
3546 ehea_info("ehea_mem_notifier must not be called parallelized");
3547 goto out;
3548 }
3549
3528 switch (action) { 3550 switch (action) {
3529 case MEM_CANCEL_OFFLINE: 3551 case MEM_CANCEL_OFFLINE:
3530 ehea_info("memory offlining canceled"); 3552 ehea_info("memory offlining canceled");
@@ -3533,14 +3555,14 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3533 ehea_info("memory is going online"); 3555 ehea_info("memory is going online");
3534 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3556 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3535 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) 3557 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3536 return NOTIFY_BAD; 3558 goto out_unlock;
3537 ehea_rereg_mrs(NULL); 3559 ehea_rereg_mrs(NULL);
3538 break; 3560 break;
3539 case MEM_GOING_OFFLINE: 3561 case MEM_GOING_OFFLINE:
3540 ehea_info("memory is going offline"); 3562 ehea_info("memory is going offline");
3541 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3563 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3542 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) 3564 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3543 return NOTIFY_BAD; 3565 goto out_unlock;
3544 ehea_rereg_mrs(NULL); 3566 ehea_rereg_mrs(NULL);
3545 break; 3567 break;
3546 default: 3568 default:
@@ -3548,8 +3570,12 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3548 } 3570 }
3549 3571
3550 ehea_update_firmware_handles(); 3572 ehea_update_firmware_handles();
3573 ret = NOTIFY_OK;
3551 3574
3552 return NOTIFY_OK; 3575out_unlock:
3576 mutex_unlock(&dlpar_mem_lock);
3577out:
3578 return ret;
3553} 3579}
3554 3580
3555static struct notifier_block ehea_mem_nb = { 3581static struct notifier_block ehea_mem_nb = {
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index a1b4c7e56367..89128b6373e3 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -229,14 +229,14 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
229 229
230int ehea_destroy_cq(struct ehea_cq *cq) 230int ehea_destroy_cq(struct ehea_cq *cq)
231{ 231{
232 u64 hret; 232 u64 hret, aer, aerr;
233 if (!cq) 233 if (!cq)
234 return 0; 234 return 0;
235 235
236 hcp_epas_dtor(&cq->epas); 236 hcp_epas_dtor(&cq->epas);
237 hret = ehea_destroy_cq_res(cq, NORMAL_FREE); 237 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
238 if (hret == H_R_STATE) { 238 if (hret == H_R_STATE) {
239 ehea_error_data(cq->adapter, cq->fw_handle); 239 ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
240 hret = ehea_destroy_cq_res(cq, FORCE_FREE); 240 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
241 } 241 }
242 242
@@ -357,7 +357,7 @@ u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
357 357
358int ehea_destroy_eq(struct ehea_eq *eq) 358int ehea_destroy_eq(struct ehea_eq *eq)
359{ 359{
360 u64 hret; 360 u64 hret, aer, aerr;
361 if (!eq) 361 if (!eq)
362 return 0; 362 return 0;
363 363
@@ -365,7 +365,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
365 365
366 hret = ehea_destroy_eq_res(eq, NORMAL_FREE); 366 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
367 if (hret == H_R_STATE) { 367 if (hret == H_R_STATE) {
368 ehea_error_data(eq->adapter, eq->fw_handle); 368 ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
369 hret = ehea_destroy_eq_res(eq, FORCE_FREE); 369 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
370 } 370 }
371 371
@@ -540,7 +540,7 @@ u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
540 540
541int ehea_destroy_qp(struct ehea_qp *qp) 541int ehea_destroy_qp(struct ehea_qp *qp)
542{ 542{
543 u64 hret; 543 u64 hret, aer, aerr;
544 if (!qp) 544 if (!qp)
545 return 0; 545 return 0;
546 546
@@ -548,7 +548,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
548 548
549 hret = ehea_destroy_qp_res(qp, NORMAL_FREE); 549 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
550 if (hret == H_R_STATE) { 550 if (hret == H_R_STATE) {
551 ehea_error_data(qp->adapter, qp->fw_handle); 551 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
552 hret = ehea_destroy_qp_res(qp, FORCE_FREE); 552 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
553 } 553 }
554 554
@@ -986,42 +986,45 @@ void print_error_data(u64 *data)
986 if (length > EHEA_PAGESIZE) 986 if (length > EHEA_PAGESIZE)
987 length = EHEA_PAGESIZE; 987 length = EHEA_PAGESIZE;
988 988
989 if (type == 0x8) /* Queue Pair */ 989 if (type == EHEA_AER_RESTYPE_QP)
990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, " 990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
991 "port=%llX", resource, data[6], data[12], data[22]); 991 "port=%llX", resource, data[6], data[12], data[22]);
992 992 else if (type == EHEA_AER_RESTYPE_CQ)
993 if (type == 0x4) /* Completion Queue */
994 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource, 993 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
995 data[6]); 994 data[6]);
996 995 else if (type == EHEA_AER_RESTYPE_EQ)
997 if (type == 0x3) /* Event Queue */
998 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource, 996 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
999 data[6]); 997 data[6]);
1000 998
1001 ehea_dump(data, length, "error data"); 999 ehea_dump(data, length, "error data");
1002} 1000}
1003 1001
1004void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle) 1002u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1003 u64 *aer, u64 *aerr)
1005{ 1004{
1006 unsigned long ret; 1005 unsigned long ret;
1007 u64 *rblock; 1006 u64 *rblock;
1007 u64 type = 0;
1008 1008
1009 rblock = (void *)get_zeroed_page(GFP_KERNEL); 1009 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1010 if (!rblock) { 1010 if (!rblock) {
1011 ehea_error("Cannot allocate rblock memory."); 1011 ehea_error("Cannot allocate rblock memory.");
1012 return; 1012 goto out;
1013 } 1013 }
1014 1014
1015 ret = ehea_h_error_data(adapter->handle, 1015 ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1016 res_handle,
1017 rblock);
1018 1016
1019 if (ret == H_R_STATE) 1017 if (ret == H_SUCCESS) {
1020 ehea_error("No error data is available: %llX.", res_handle); 1018 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1021 else if (ret == H_SUCCESS) 1019 *aer = rblock[6];
1020 *aerr = rblock[12];
1022 print_error_data(rblock); 1021 print_error_data(rblock);
1023 else 1022 } else if (ret == H_R_STATE) {
1023 ehea_error("No error data available: %llX.", res_handle);
1024 } else
1024 ehea_error("Error data could not be fetched: %llX", res_handle); 1025 ehea_error("Error data could not be fetched: %llX", res_handle);
1025 1026
1026 free_page((unsigned long)rblock); 1027 free_page((unsigned long)rblock);
1028out:
1029 return type;
1027} 1030}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0817c1e74a19..882c50c9c34f 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -154,6 +154,9 @@ struct ehea_rwqe {
154#define EHEA_CQE_STAT_ERR_IP 0x2000 154#define EHEA_CQE_STAT_ERR_IP 0x2000
155#define EHEA_CQE_STAT_ERR_CRC 0x1000 155#define EHEA_CQE_STAT_ERR_CRC 0x1000
156 156
157/* Defines which bad send cqe stati lead to a port reset */
158#define EHEA_CQE_STAT_RESET_MASK 0x0002
159
157struct ehea_cqe { 160struct ehea_cqe {
158 u64 wr_id; /* work request ID from WQE */ 161 u64 wr_id; /* work request ID from WQE */
159 u8 type; 162 u8 type;
@@ -187,6 +190,14 @@ struct ehea_cqe {
187#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55) 190#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
188#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63) 191#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
189 192
193#define EHEA_AER_RESTYPE_QP 0x8
194#define EHEA_AER_RESTYPE_CQ 0x4
195#define EHEA_AER_RESTYPE_EQ 0x3
196
197/* Defines which affiliated errors lead to a port reset */
198#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
199#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
200
190struct ehea_eqe { 201struct ehea_eqe {
191 u64 entry; 202 u64 entry;
192}; 203};
@@ -379,7 +390,8 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
379 390
380int ehea_rem_mr(struct ehea_mr *mr); 391int ehea_rem_mr(struct ehea_mr *mr);
381 392
382void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 393u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
394 u64 *aer, u64 *aerr);
383 395
384int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); 396int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
385int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages); 397int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);