aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
commitf8965467f366fd18f01feafb5db10512d7b4422c (patch)
tree3706a9cd779859271ca61b85c63a1bc3f82d626e /drivers/net/ehea
parenta26272e5200765691e67d6780e52b32498fdb659 (diff)
parent2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1674 commits) qlcnic: adding co maintainer ixgbe: add support for active DA cables ixgbe: dcb, do not tag tc_prio_control frames ixgbe: fix ixgbe_tx_is_paused logic ixgbe: always enable vlan strip/insert when DCB is enabled ixgbe: remove some redundant code in setting FCoE FIP filter ixgbe: fix wrong offset to fc_frame_header in ixgbe_fcoe_ddp ixgbe: fix header len when unsplit packet overflows to data buffer ipv6: Never schedule DAD timer on dead address ipv6: Use POSTDAD state ipv6: Use state_lock to protect ifa state ipv6: Replace inet6_ifaddr->dead with state cxgb4: notify upper drivers if the device is already up when they load cxgb4: keep interrupts available when the ports are brought down cxgb4: fix initial addition of MAC address cnic: Return SPQ credit to bnx2x after ring setup and shutdown. cnic: Convert cnic_local_flags to atomic ops. can: Fix SJA1000 command register writes on SMP systems bridge: fix build for CONFIG_SYSFS disabled ARCNET: Limit com20020 PCI ID matches for SOHARD cards ... Fix up various conflicts with pcmcia tree drivers/net/ {pcmcia/3c589_cs.c, wireless/orinoco/orinoco_cs.c and wireless/orinoco/spectrum_cs.c} and feature removal (Documentation/feature-removal-schedule.txt). Also fix a non-content conflict due to pm_qos_requirement getting renamed in the PM tree (now pm_qos_request) in net/mac80211/scan.c
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c78
-rw-r--r--drivers/net/ehea/ehea_qmr.c43
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
4 files changed, 89 insertions, 48 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index fa311a95099..0630980a272 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0102" 43#define DRV_VERSION "EHEA_0103"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 809ccc9ff09..02698a1c80b 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -791,11 +791,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
791 cqe_counter++; 791 cqe_counter++;
792 rmb(); 792 rmb();
793 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 793 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
794 ehea_error("Send Completion Error: Resetting port"); 794 ehea_error("Bad send completion status=0x%04X",
795 cqe->status);
796
795 if (netif_msg_tx_err(pr->port)) 797 if (netif_msg_tx_err(pr->port))
796 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 798 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
797 ehea_schedule_port_reset(pr->port); 799
798 break; 800 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
801 ehea_error("Resetting port");
802 ehea_schedule_port_reset(pr->port);
803 break;
804 }
799 } 805 }
800 806
801 if (netif_msg_tx_done(pr->port)) 807 if (netif_msg_tx_done(pr->port))
@@ -814,7 +820,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
814 quota--; 820 quota--;
815 821
816 cqe = ehea_poll_cq(send_cq); 822 cqe = ehea_poll_cq(send_cq);
817 }; 823 }
818 824
819 ehea_update_feca(send_cq, cqe_counter); 825 ehea_update_feca(send_cq, cqe_counter);
820 atomic_add(swqe_av, &pr->swqe_avail); 826 atomic_add(swqe_av, &pr->swqe_avail);
@@ -901,6 +907,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
901 struct ehea_eqe *eqe; 907 struct ehea_eqe *eqe;
902 struct ehea_qp *qp; 908 struct ehea_qp *qp;
903 u32 qp_token; 909 u32 qp_token;
910 u64 resource_type, aer, aerr;
911 int reset_port = 0;
904 912
905 eqe = ehea_poll_eq(port->qp_eq); 913 eqe = ehea_poll_eq(port->qp_eq);
906 914
@@ -910,11 +918,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
910 eqe->entry, qp_token); 918 eqe->entry, qp_token);
911 919
912 qp = port->port_res[qp_token].qp; 920 qp = port->port_res[qp_token].qp;
913 ehea_error_data(port->adapter, qp->fw_handle); 921
922 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
923 &aer, &aerr);
924
925 if (resource_type == EHEA_AER_RESTYPE_QP) {
926 if ((aer & EHEA_AER_RESET_MASK) ||
927 (aerr & EHEA_AERR_RESET_MASK))
928 reset_port = 1;
929 } else
930 reset_port = 1; /* Reset in case of CQ or EQ error */
931
914 eqe = ehea_poll_eq(port->qp_eq); 932 eqe = ehea_poll_eq(port->qp_eq);
915 } 933 }
916 934
917 ehea_schedule_port_reset(port); 935 if (reset_port) {
936 ehea_error("Resetting port");
937 ehea_schedule_port_reset(port);
938 }
918 939
919 return IRQ_HANDLED; 940 return IRQ_HANDLED;
920} 941}
@@ -1618,7 +1639,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1618{ 1639{
1619 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1640 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1620 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1641 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1621 int skb_data_size = skb->len - skb->data_len; 1642 int skb_data_size = skb_headlen(skb);
1622 int headersize; 1643 int headersize;
1623 1644
1624 /* Packet is TCP with TSO enabled */ 1645 /* Packet is TCP with TSO enabled */
@@ -1629,7 +1650,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1629 */ 1650 */
1630 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); 1651 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1631 1652
1632 skb_data_size = skb->len - skb->data_len; 1653 skb_data_size = skb_headlen(skb);
1633 1654
1634 if (skb_data_size >= headersize) { 1655 if (skb_data_size >= headersize) {
1635 /* copy immediate data */ 1656 /* copy immediate data */
@@ -1651,7 +1672,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1651static void write_swqe2_nonTSO(struct sk_buff *skb, 1672static void write_swqe2_nonTSO(struct sk_buff *skb,
1652 struct ehea_swqe *swqe, u32 lkey) 1673 struct ehea_swqe *swqe, u32 lkey)
1653{ 1674{
1654 int skb_data_size = skb->len - skb->data_len; 1675 int skb_data_size = skb_headlen(skb);
1655 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1676 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1656 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1677 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1657 1678
@@ -1860,7 +1881,6 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1860 port->promisc = enable; 1881 port->promisc = enable;
1861out: 1882out:
1862 free_page((unsigned long)cb7); 1883 free_page((unsigned long)cb7);
1863 return;
1864} 1884}
1865 1885
1866static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, 1886static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
@@ -1967,7 +1987,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1967static void ehea_set_multicast_list(struct net_device *dev) 1987static void ehea_set_multicast_list(struct net_device *dev)
1968{ 1988{
1969 struct ehea_port *port = netdev_priv(dev); 1989 struct ehea_port *port = netdev_priv(dev);
1970 struct dev_mc_list *k_mcl_entry; 1990 struct netdev_hw_addr *ha;
1971 int ret; 1991 int ret;
1972 1992
1973 if (dev->flags & IFF_PROMISC) { 1993 if (dev->flags & IFF_PROMISC) {
@@ -1998,13 +2018,12 @@ static void ehea_set_multicast_list(struct net_device *dev)
1998 goto out; 2018 goto out;
1999 } 2019 }
2000 2020
2001 netdev_for_each_mc_addr(k_mcl_entry, dev) 2021 netdev_for_each_mc_addr(ha, dev)
2002 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); 2022 ehea_add_multicast_entry(port, ha->addr);
2003 2023
2004 } 2024 }
2005out: 2025out:
2006 ehea_update_bcmc_registrations(); 2026 ehea_update_bcmc_registrations();
2007 return;
2008} 2027}
2009 2028
2010static int ehea_change_mtu(struct net_device *dev, int new_mtu) 2029static int ehea_change_mtu(struct net_device *dev, int new_mtu)
@@ -2108,8 +2127,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2108 } else { 2127 } else {
2109 /* first copy data from the skb->data buffer ... */ 2128 /* first copy data from the skb->data buffer ... */
2110 skb_copy_from_linear_data(skb, imm_data, 2129 skb_copy_from_linear_data(skb, imm_data,
2111 skb->len - skb->data_len); 2130 skb_headlen(skb));
2112 imm_data += skb->len - skb->data_len; 2131 imm_data += skb_headlen(skb);
2113 2132
2114 /* ... then copy data from the fragments */ 2133 /* ... then copy data from the fragments */
2115 for (i = 0; i < nfrags; i++) { 2134 for (i = 0; i < nfrags; i++) {
@@ -2220,7 +2239,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2220 } 2239 }
2221 spin_unlock_irqrestore(&pr->netif_queue, flags); 2240 spin_unlock_irqrestore(&pr->netif_queue, flags);
2222 } 2241 }
2223 dev->trans_start = jiffies; 2242 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
2224 spin_unlock(&pr->xmit_lock); 2243 spin_unlock(&pr->xmit_lock);
2225 2244
2226 return NETDEV_TX_OK; 2245 return NETDEV_TX_OK;
@@ -2317,7 +2336,6 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2317 ehea_error("modify_ehea_port failed"); 2336 ehea_error("modify_ehea_port failed");
2318out: 2337out:
2319 free_page((unsigned long)cb1); 2338 free_page((unsigned long)cb1);
2320 return;
2321} 2339}
2322 2340
2323int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) 2341int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
@@ -2860,7 +2878,6 @@ static void ehea_reset_port(struct work_struct *work)
2860 netif_wake_queue(dev); 2878 netif_wake_queue(dev);
2861out: 2879out:
2862 mutex_unlock(&port->port_lock); 2880 mutex_unlock(&port->port_lock);
2863 return;
2864} 2881}
2865 2882
2866static void ehea_rereg_mrs(struct work_struct *work) 2883static void ehea_rereg_mrs(struct work_struct *work)
@@ -2868,7 +2885,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2868 int ret, i; 2885 int ret, i;
2869 struct ehea_adapter *adapter; 2886 struct ehea_adapter *adapter;
2870 2887
2871 mutex_lock(&dlpar_mem_lock);
2872 ehea_info("LPAR memory changed - re-initializing driver"); 2888 ehea_info("LPAR memory changed - re-initializing driver");
2873 2889
2874 list_for_each_entry(adapter, &adapter_list, list) 2890 list_for_each_entry(adapter, &adapter_list, list)
@@ -2938,7 +2954,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2938 } 2954 }
2939 ehea_info("re-initializing driver complete"); 2955 ehea_info("re-initializing driver complete");
2940out: 2956out:
2941 mutex_unlock(&dlpar_mem_lock);
2942 return; 2957 return;
2943} 2958}
2944 2959
@@ -3238,7 +3253,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3238 ehea_remove_adapter_mr(adapter); 3253 ehea_remove_adapter_mr(adapter);
3239 3254
3240 i++; 3255 i++;
3241 }; 3256 }
3242 return 0; 3257 return 0;
3243} 3258}
3244 3259
@@ -3257,7 +3272,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3257 if (dn_log_port_id) 3272 if (dn_log_port_id)
3258 if (*dn_log_port_id == logical_port_id) 3273 if (*dn_log_port_id == logical_port_id)
3259 return eth_dn; 3274 return eth_dn;
3260 }; 3275 }
3261 3276
3262 return NULL; 3277 return NULL;
3263} 3278}
@@ -3521,7 +3536,14 @@ void ehea_crash_handler(void)
3521static int ehea_mem_notifier(struct notifier_block *nb, 3536static int ehea_mem_notifier(struct notifier_block *nb,
3522 unsigned long action, void *data) 3537 unsigned long action, void *data)
3523{ 3538{
3539 int ret = NOTIFY_BAD;
3524 struct memory_notify *arg = data; 3540 struct memory_notify *arg = data;
3541
3542 if (!mutex_trylock(&dlpar_mem_lock)) {
3543 ehea_info("ehea_mem_notifier must not be called parallelized");
3544 goto out;
3545 }
3546
3525 switch (action) { 3547 switch (action) {
3526 case MEM_CANCEL_OFFLINE: 3548 case MEM_CANCEL_OFFLINE:
3527 ehea_info("memory offlining canceled"); 3549 ehea_info("memory offlining canceled");
@@ -3530,14 +3552,14 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3530 ehea_info("memory is going online"); 3552 ehea_info("memory is going online");
3531 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3553 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3532 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) 3554 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3533 return NOTIFY_BAD; 3555 goto out_unlock;
3534 ehea_rereg_mrs(NULL); 3556 ehea_rereg_mrs(NULL);
3535 break; 3557 break;
3536 case MEM_GOING_OFFLINE: 3558 case MEM_GOING_OFFLINE:
3537 ehea_info("memory is going offline"); 3559 ehea_info("memory is going offline");
3538 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3560 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3539 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) 3561 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3540 return NOTIFY_BAD; 3562 goto out_unlock;
3541 ehea_rereg_mrs(NULL); 3563 ehea_rereg_mrs(NULL);
3542 break; 3564 break;
3543 default: 3565 default:
@@ -3545,8 +3567,12 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3545 } 3567 }
3546 3568
3547 ehea_update_firmware_handles(); 3569 ehea_update_firmware_handles();
3570 ret = NOTIFY_OK;
3548 3571
3549 return NOTIFY_OK; 3572out_unlock:
3573 mutex_unlock(&dlpar_mem_lock);
3574out:
3575 return ret;
3550} 3576}
3551 3577
3552static struct notifier_block ehea_mem_nb = { 3578static struct notifier_block ehea_mem_nb = {
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index a1b4c7e5636..89128b6373e 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -229,14 +229,14 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
229 229
230int ehea_destroy_cq(struct ehea_cq *cq) 230int ehea_destroy_cq(struct ehea_cq *cq)
231{ 231{
232 u64 hret; 232 u64 hret, aer, aerr;
233 if (!cq) 233 if (!cq)
234 return 0; 234 return 0;
235 235
236 hcp_epas_dtor(&cq->epas); 236 hcp_epas_dtor(&cq->epas);
237 hret = ehea_destroy_cq_res(cq, NORMAL_FREE); 237 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
238 if (hret == H_R_STATE) { 238 if (hret == H_R_STATE) {
239 ehea_error_data(cq->adapter, cq->fw_handle); 239 ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
240 hret = ehea_destroy_cq_res(cq, FORCE_FREE); 240 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
241 } 241 }
242 242
@@ -357,7 +357,7 @@ u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
357 357
358int ehea_destroy_eq(struct ehea_eq *eq) 358int ehea_destroy_eq(struct ehea_eq *eq)
359{ 359{
360 u64 hret; 360 u64 hret, aer, aerr;
361 if (!eq) 361 if (!eq)
362 return 0; 362 return 0;
363 363
@@ -365,7 +365,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
365 365
366 hret = ehea_destroy_eq_res(eq, NORMAL_FREE); 366 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
367 if (hret == H_R_STATE) { 367 if (hret == H_R_STATE) {
368 ehea_error_data(eq->adapter, eq->fw_handle); 368 ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
369 hret = ehea_destroy_eq_res(eq, FORCE_FREE); 369 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
370 } 370 }
371 371
@@ -540,7 +540,7 @@ u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
540 540
541int ehea_destroy_qp(struct ehea_qp *qp) 541int ehea_destroy_qp(struct ehea_qp *qp)
542{ 542{
543 u64 hret; 543 u64 hret, aer, aerr;
544 if (!qp) 544 if (!qp)
545 return 0; 545 return 0;
546 546
@@ -548,7 +548,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
548 548
549 hret = ehea_destroy_qp_res(qp, NORMAL_FREE); 549 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
550 if (hret == H_R_STATE) { 550 if (hret == H_R_STATE) {
551 ehea_error_data(qp->adapter, qp->fw_handle); 551 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
552 hret = ehea_destroy_qp_res(qp, FORCE_FREE); 552 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
553 } 553 }
554 554
@@ -986,42 +986,45 @@ void print_error_data(u64 *data)
986 if (length > EHEA_PAGESIZE) 986 if (length > EHEA_PAGESIZE)
987 length = EHEA_PAGESIZE; 987 length = EHEA_PAGESIZE;
988 988
989 if (type == 0x8) /* Queue Pair */ 989 if (type == EHEA_AER_RESTYPE_QP)
990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, " 990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
991 "port=%llX", resource, data[6], data[12], data[22]); 991 "port=%llX", resource, data[6], data[12], data[22]);
992 992 else if (type == EHEA_AER_RESTYPE_CQ)
993 if (type == 0x4) /* Completion Queue */
994 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource, 993 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
995 data[6]); 994 data[6]);
996 995 else if (type == EHEA_AER_RESTYPE_EQ)
997 if (type == 0x3) /* Event Queue */
998 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource, 996 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
999 data[6]); 997 data[6]);
1000 998
1001 ehea_dump(data, length, "error data"); 999 ehea_dump(data, length, "error data");
1002} 1000}
1003 1001
1004void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle) 1002u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1003 u64 *aer, u64 *aerr)
1005{ 1004{
1006 unsigned long ret; 1005 unsigned long ret;
1007 u64 *rblock; 1006 u64 *rblock;
1007 u64 type = 0;
1008 1008
1009 rblock = (void *)get_zeroed_page(GFP_KERNEL); 1009 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1010 if (!rblock) { 1010 if (!rblock) {
1011 ehea_error("Cannot allocate rblock memory."); 1011 ehea_error("Cannot allocate rblock memory.");
1012 return; 1012 goto out;
1013 } 1013 }
1014 1014
1015 ret = ehea_h_error_data(adapter->handle, 1015 ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1016 res_handle,
1017 rblock);
1018 1016
1019 if (ret == H_R_STATE) 1017 if (ret == H_SUCCESS) {
1020 ehea_error("No error data is available: %llX.", res_handle); 1018 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1021 else if (ret == H_SUCCESS) 1019 *aer = rblock[6];
1020 *aerr = rblock[12];
1022 print_error_data(rblock); 1021 print_error_data(rblock);
1023 else 1022 } else if (ret == H_R_STATE) {
1023 ehea_error("No error data available: %llX.", res_handle);
1024 } else
1024 ehea_error("Error data could not be fetched: %llX", res_handle); 1025 ehea_error("Error data could not be fetched: %llX", res_handle);
1025 1026
1026 free_page((unsigned long)rblock); 1027 free_page((unsigned long)rblock);
1028out:
1029 return type;
1027} 1030}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0817c1e74a1..882c50c9c34 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -154,6 +154,9 @@ struct ehea_rwqe {
154#define EHEA_CQE_STAT_ERR_IP 0x2000 154#define EHEA_CQE_STAT_ERR_IP 0x2000
155#define EHEA_CQE_STAT_ERR_CRC 0x1000 155#define EHEA_CQE_STAT_ERR_CRC 0x1000
156 156
157/* Defines which bad send cqe stati lead to a port reset */
158#define EHEA_CQE_STAT_RESET_MASK 0x0002
159
157struct ehea_cqe { 160struct ehea_cqe {
158 u64 wr_id; /* work request ID from WQE */ 161 u64 wr_id; /* work request ID from WQE */
159 u8 type; 162 u8 type;
@@ -187,6 +190,14 @@ struct ehea_cqe {
187#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55) 190#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
188#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63) 191#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
189 192
193#define EHEA_AER_RESTYPE_QP 0x8
194#define EHEA_AER_RESTYPE_CQ 0x4
195#define EHEA_AER_RESTYPE_EQ 0x3
196
197/* Defines which affiliated errors lead to a port reset */
198#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
199#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
200
190struct ehea_eqe { 201struct ehea_eqe {
191 u64 entry; 202 u64 entry;
192}; 203};
@@ -379,7 +390,8 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
379 390
380int ehea_rem_mr(struct ehea_mr *mr); 391int ehea_rem_mr(struct ehea_mr *mr);
381 392
382void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 393u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
394 u64 *aer, u64 *aerr);
383 395
384int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); 396int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
385int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages); 397int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);