aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea/ehea_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ehea/ehea_main.c')
-rw-r--r--drivers/net/ehea/ehea_main.c69
1 files changed, 50 insertions, 19 deletions
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b004eaba3d7b..33a41e29ec83 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -32,6 +32,7 @@
32#include <linux/udp.h> 32#include <linux/udp.h>
33#include <linux/if.h> 33#include <linux/if.h>
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/slab.h>
35#include <linux/if_ether.h> 36#include <linux/if_ether.h>
36#include <linux/notifier.h> 37#include <linux/notifier.h>
37#include <linux/reboot.h> 38#include <linux/reboot.h>
@@ -790,11 +791,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
790 cqe_counter++; 791 cqe_counter++;
791 rmb(); 792 rmb();
792 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 793 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
793 ehea_error("Send Completion Error: Resetting port"); 794 ehea_error("Bad send completion status=0x%04X",
795 cqe->status);
796
794 if (netif_msg_tx_err(pr->port)) 797 if (netif_msg_tx_err(pr->port))
795 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 798 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
796 ehea_schedule_port_reset(pr->port); 799
797 break; 800 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
801 ehea_error("Resetting port");
802 ehea_schedule_port_reset(pr->port);
803 break;
804 }
798 } 805 }
799 806
800 if (netif_msg_tx_done(pr->port)) 807 if (netif_msg_tx_done(pr->port))
@@ -900,6 +907,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
900 struct ehea_eqe *eqe; 907 struct ehea_eqe *eqe;
901 struct ehea_qp *qp; 908 struct ehea_qp *qp;
902 u32 qp_token; 909 u32 qp_token;
910 u64 resource_type, aer, aerr;
911 int reset_port = 0;
903 912
904 eqe = ehea_poll_eq(port->qp_eq); 913 eqe = ehea_poll_eq(port->qp_eq);
905 914
@@ -909,11 +918,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
909 eqe->entry, qp_token); 918 eqe->entry, qp_token);
910 919
911 qp = port->port_res[qp_token].qp; 920 qp = port->port_res[qp_token].qp;
912 ehea_error_data(port->adapter, qp->fw_handle); 921
922 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
923 &aer, &aerr);
924
925 if (resource_type == EHEA_AER_RESTYPE_QP) {
926 if ((aer & EHEA_AER_RESET_MASK) ||
927 (aerr & EHEA_AERR_RESET_MASK))
928 reset_port = 1;
929 } else
930 reset_port = 1; /* Reset in case of CQ or EQ error */
931
913 eqe = ehea_poll_eq(port->qp_eq); 932 eqe = ehea_poll_eq(port->qp_eq);
914 } 933 }
915 934
916 ehea_schedule_port_reset(port); 935 if (reset_port) {
936 ehea_error("Resetting port");
937 ehea_schedule_port_reset(port);
938 }
917 939
918 return IRQ_HANDLED; 940 return IRQ_HANDLED;
919} 941}
@@ -1617,7 +1639,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1617{ 1639{
1618 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1640 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1619 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1641 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1620 int skb_data_size = skb->len - skb->data_len; 1642 int skb_data_size = skb_headlen(skb);
1621 int headersize; 1643 int headersize;
1622 1644
1623 /* Packet is TCP with TSO enabled */ 1645 /* Packet is TCP with TSO enabled */
@@ -1628,7 +1650,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1628 */ 1650 */
1629 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); 1651 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1630 1652
1631 skb_data_size = skb->len - skb->data_len; 1653 skb_data_size = skb_headlen(skb);
1632 1654
1633 if (skb_data_size >= headersize) { 1655 if (skb_data_size >= headersize) {
1634 /* copy immediate data */ 1656 /* copy immediate data */
@@ -1650,7 +1672,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1650static void write_swqe2_nonTSO(struct sk_buff *skb, 1672static void write_swqe2_nonTSO(struct sk_buff *skb,
1651 struct ehea_swqe *swqe, u32 lkey) 1673 struct ehea_swqe *swqe, u32 lkey)
1652{ 1674{
1653 int skb_data_size = skb->len - skb->data_len; 1675 int skb_data_size = skb_headlen(skb);
1654 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1676 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1655 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1677 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1656 1678
@@ -1966,7 +1988,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1966static void ehea_set_multicast_list(struct net_device *dev) 1988static void ehea_set_multicast_list(struct net_device *dev)
1967{ 1989{
1968 struct ehea_port *port = netdev_priv(dev); 1990 struct ehea_port *port = netdev_priv(dev);
1969 struct dev_mc_list *k_mcl_entry; 1991 struct netdev_hw_addr *ha;
1970 int ret; 1992 int ret;
1971 1993
1972 if (dev->flags & IFF_PROMISC) { 1994 if (dev->flags & IFF_PROMISC) {
@@ -1997,8 +2019,8 @@ static void ehea_set_multicast_list(struct net_device *dev)
1997 goto out; 2019 goto out;
1998 } 2020 }
1999 2021
2000 netdev_for_each_mc_addr(k_mcl_entry, dev) 2022 netdev_for_each_mc_addr(ha, dev)
2001 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); 2023 ehea_add_multicast_entry(port, ha->addr);
2002 2024
2003 } 2025 }
2004out: 2026out:
@@ -2107,8 +2129,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2107 } else { 2129 } else {
2108 /* first copy data from the skb->data buffer ... */ 2130 /* first copy data from the skb->data buffer ... */
2109 skb_copy_from_linear_data(skb, imm_data, 2131 skb_copy_from_linear_data(skb, imm_data,
2110 skb->len - skb->data_len); 2132 skb_headlen(skb));
2111 imm_data += skb->len - skb->data_len; 2133 imm_data += skb_headlen(skb);
2112 2134
2113 /* ... then copy data from the fragments */ 2135 /* ... then copy data from the fragments */
2114 for (i = 0; i < nfrags; i++) { 2136 for (i = 0; i < nfrags; i++) {
@@ -2219,7 +2241,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2219 } 2241 }
2220 spin_unlock_irqrestore(&pr->netif_queue, flags); 2242 spin_unlock_irqrestore(&pr->netif_queue, flags);
2221 } 2243 }
2222 dev->trans_start = jiffies; 2244 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
2223 spin_unlock(&pr->xmit_lock); 2245 spin_unlock(&pr->xmit_lock);
2224 2246
2225 return NETDEV_TX_OK; 2247 return NETDEV_TX_OK;
@@ -2867,7 +2889,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2867 int ret, i; 2889 int ret, i;
2868 struct ehea_adapter *adapter; 2890 struct ehea_adapter *adapter;
2869 2891
2870 mutex_lock(&dlpar_mem_lock);
2871 ehea_info("LPAR memory changed - re-initializing driver"); 2892 ehea_info("LPAR memory changed - re-initializing driver");
2872 2893
2873 list_for_each_entry(adapter, &adapter_list, list) 2894 list_for_each_entry(adapter, &adapter_list, list)
@@ -2937,7 +2958,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2937 } 2958 }
2938 ehea_info("re-initializing driver complete"); 2959 ehea_info("re-initializing driver complete");
2939out: 2960out:
2940 mutex_unlock(&dlpar_mem_lock);
2941 return; 2961 return;
2942} 2962}
2943 2963
@@ -3520,7 +3540,14 @@ void ehea_crash_handler(void)
3520static int ehea_mem_notifier(struct notifier_block *nb, 3540static int ehea_mem_notifier(struct notifier_block *nb,
3521 unsigned long action, void *data) 3541 unsigned long action, void *data)
3522{ 3542{
3543 int ret = NOTIFY_BAD;
3523 struct memory_notify *arg = data; 3544 struct memory_notify *arg = data;
3545
3546 if (!mutex_trylock(&dlpar_mem_lock)) {
3547 ehea_info("ehea_mem_notifier must not be called parallelized");
3548 goto out;
3549 }
3550
3524 switch (action) { 3551 switch (action) {
3525 case MEM_CANCEL_OFFLINE: 3552 case MEM_CANCEL_OFFLINE:
3526 ehea_info("memory offlining canceled"); 3553 ehea_info("memory offlining canceled");
@@ -3529,14 +3556,14 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3529 ehea_info("memory is going online"); 3556 ehea_info("memory is going online");
3530 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3557 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3531 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) 3558 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3532 return NOTIFY_BAD; 3559 goto out_unlock;
3533 ehea_rereg_mrs(NULL); 3560 ehea_rereg_mrs(NULL);
3534 break; 3561 break;
3535 case MEM_GOING_OFFLINE: 3562 case MEM_GOING_OFFLINE:
3536 ehea_info("memory is going offline"); 3563 ehea_info("memory is going offline");
3537 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3564 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3538 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) 3565 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3539 return NOTIFY_BAD; 3566 goto out_unlock;
3540 ehea_rereg_mrs(NULL); 3567 ehea_rereg_mrs(NULL);
3541 break; 3568 break;
3542 default: 3569 default:
@@ -3544,8 +3571,12 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3544 } 3571 }
3545 3572
3546 ehea_update_firmware_handles(); 3573 ehea_update_firmware_handles();
3574 ret = NOTIFY_OK;
3547 3575
3548 return NOTIFY_OK; 3576out_unlock:
3577 mutex_unlock(&dlpar_mem_lock);
3578out:
3579 return ret;
3549} 3580}
3550 3581
3551static struct notifier_block ehea_mem_nb = { 3582static struct notifier_block ehea_mem_nb = {