diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2010-05-22 02:36:56 -0400 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2010-05-22 02:36:56 -0400 |
commit | cf9b59e9d3e008591d1f54830f570982bb307a0d (patch) | |
tree | 113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/net/ehea/ehea_main.c | |
parent | 44504b2bebf8b5823c59484e73096a7d6574471d (diff) | |
parent | f4b87dee923342505e1ddba8d34ce9de33e75050 (diff) |
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and
build failures in vio.c after merge.
Conflicts:
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-mpc.c
drivers/net/gianfar.c
Also fixed up one line in arch/powerpc/kernel/vio.c to use the
correct node pointer.
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/net/ehea/ehea_main.c')
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 78 |
1 files changed, 52 insertions, 26 deletions
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index b23173864c60..f547894ff48f 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -794,11 +794,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) | |||
794 | cqe_counter++; | 794 | cqe_counter++; |
795 | rmb(); | 795 | rmb(); |
796 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { | 796 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { |
797 | ehea_error("Send Completion Error: Resetting port"); | 797 | ehea_error("Bad send completion status=0x%04X", |
798 | cqe->status); | ||
799 | |||
798 | if (netif_msg_tx_err(pr->port)) | 800 | if (netif_msg_tx_err(pr->port)) |
799 | ehea_dump(cqe, sizeof(*cqe), "Send CQE"); | 801 | ehea_dump(cqe, sizeof(*cqe), "Send CQE"); |
800 | ehea_schedule_port_reset(pr->port); | 802 | |
801 | break; | 803 | if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { |
804 | ehea_error("Resetting port"); | ||
805 | ehea_schedule_port_reset(pr->port); | ||
806 | break; | ||
807 | } | ||
802 | } | 808 | } |
803 | 809 | ||
804 | if (netif_msg_tx_done(pr->port)) | 810 | if (netif_msg_tx_done(pr->port)) |
@@ -817,7 +823,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) | |||
817 | quota--; | 823 | quota--; |
818 | 824 | ||
819 | cqe = ehea_poll_cq(send_cq); | 825 | cqe = ehea_poll_cq(send_cq); |
820 | }; | 826 | } |
821 | 827 | ||
822 | ehea_update_feca(send_cq, cqe_counter); | 828 | ehea_update_feca(send_cq, cqe_counter); |
823 | atomic_add(swqe_av, &pr->swqe_avail); | 829 | atomic_add(swqe_av, &pr->swqe_avail); |
@@ -904,6 +910,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) | |||
904 | struct ehea_eqe *eqe; | 910 | struct ehea_eqe *eqe; |
905 | struct ehea_qp *qp; | 911 | struct ehea_qp *qp; |
906 | u32 qp_token; | 912 | u32 qp_token; |
913 | u64 resource_type, aer, aerr; | ||
914 | int reset_port = 0; | ||
907 | 915 | ||
908 | eqe = ehea_poll_eq(port->qp_eq); | 916 | eqe = ehea_poll_eq(port->qp_eq); |
909 | 917 | ||
@@ -913,11 +921,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) | |||
913 | eqe->entry, qp_token); | 921 | eqe->entry, qp_token); |
914 | 922 | ||
915 | qp = port->port_res[qp_token].qp; | 923 | qp = port->port_res[qp_token].qp; |
916 | ehea_error_data(port->adapter, qp->fw_handle); | 924 | |
925 | resource_type = ehea_error_data(port->adapter, qp->fw_handle, | ||
926 | &aer, &aerr); | ||
927 | |||
928 | if (resource_type == EHEA_AER_RESTYPE_QP) { | ||
929 | if ((aer & EHEA_AER_RESET_MASK) || | ||
930 | (aerr & EHEA_AERR_RESET_MASK)) | ||
931 | reset_port = 1; | ||
932 | } else | ||
933 | reset_port = 1; /* Reset in case of CQ or EQ error */ | ||
934 | |||
917 | eqe = ehea_poll_eq(port->qp_eq); | 935 | eqe = ehea_poll_eq(port->qp_eq); |
918 | } | 936 | } |
919 | 937 | ||
920 | ehea_schedule_port_reset(port); | 938 | if (reset_port) { |
939 | ehea_error("Resetting port"); | ||
940 | ehea_schedule_port_reset(port); | ||
941 | } | ||
921 | 942 | ||
922 | return IRQ_HANDLED; | 943 | return IRQ_HANDLED; |
923 | } | 944 | } |
@@ -1621,7 +1642,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, | |||
1621 | { | 1642 | { |
1622 | struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; | 1643 | struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; |
1623 | u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; | 1644 | u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; |
1624 | int skb_data_size = skb->len - skb->data_len; | 1645 | int skb_data_size = skb_headlen(skb); |
1625 | int headersize; | 1646 | int headersize; |
1626 | 1647 | ||
1627 | /* Packet is TCP with TSO enabled */ | 1648 | /* Packet is TCP with TSO enabled */ |
@@ -1632,7 +1653,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, | |||
1632 | */ | 1653 | */ |
1633 | headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); | 1654 | headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); |
1634 | 1655 | ||
1635 | skb_data_size = skb->len - skb->data_len; | 1656 | skb_data_size = skb_headlen(skb); |
1636 | 1657 | ||
1637 | if (skb_data_size >= headersize) { | 1658 | if (skb_data_size >= headersize) { |
1638 | /* copy immediate data */ | 1659 | /* copy immediate data */ |
@@ -1654,7 +1675,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, | |||
1654 | static void write_swqe2_nonTSO(struct sk_buff *skb, | 1675 | static void write_swqe2_nonTSO(struct sk_buff *skb, |
1655 | struct ehea_swqe *swqe, u32 lkey) | 1676 | struct ehea_swqe *swqe, u32 lkey) |
1656 | { | 1677 | { |
1657 | int skb_data_size = skb->len - skb->data_len; | 1678 | int skb_data_size = skb_headlen(skb); |
1658 | u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; | 1679 | u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; |
1659 | struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; | 1680 | struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; |
1660 | 1681 | ||
@@ -1863,7 +1884,6 @@ static void ehea_promiscuous(struct net_device *dev, int enable) | |||
1863 | port->promisc = enable; | 1884 | port->promisc = enable; |
1864 | out: | 1885 | out: |
1865 | free_page((unsigned long)cb7); | 1886 | free_page((unsigned long)cb7); |
1866 | return; | ||
1867 | } | 1887 | } |
1868 | 1888 | ||
1869 | static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, | 1889 | static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, |
@@ -1970,7 +1990,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) | |||
1970 | static void ehea_set_multicast_list(struct net_device *dev) | 1990 | static void ehea_set_multicast_list(struct net_device *dev) |
1971 | { | 1991 | { |
1972 | struct ehea_port *port = netdev_priv(dev); | 1992 | struct ehea_port *port = netdev_priv(dev); |
1973 | struct dev_mc_list *k_mcl_entry; | 1993 | struct netdev_hw_addr *ha; |
1974 | int ret; | 1994 | int ret; |
1975 | 1995 | ||
1976 | if (dev->flags & IFF_PROMISC) { | 1996 | if (dev->flags & IFF_PROMISC) { |
@@ -2001,13 +2021,12 @@ static void ehea_set_multicast_list(struct net_device *dev) | |||
2001 | goto out; | 2021 | goto out; |
2002 | } | 2022 | } |
2003 | 2023 | ||
2004 | netdev_for_each_mc_addr(k_mcl_entry, dev) | 2024 | netdev_for_each_mc_addr(ha, dev) |
2005 | ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); | 2025 | ehea_add_multicast_entry(port, ha->addr); |
2006 | 2026 | ||
2007 | } | 2027 | } |
2008 | out: | 2028 | out: |
2009 | ehea_update_bcmc_registrations(); | 2029 | ehea_update_bcmc_registrations(); |
2010 | return; | ||
2011 | } | 2030 | } |
2012 | 2031 | ||
2013 | static int ehea_change_mtu(struct net_device *dev, int new_mtu) | 2032 | static int ehea_change_mtu(struct net_device *dev, int new_mtu) |
@@ -2111,8 +2130,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
2111 | } else { | 2130 | } else { |
2112 | /* first copy data from the skb->data buffer ... */ | 2131 | /* first copy data from the skb->data buffer ... */ |
2113 | skb_copy_from_linear_data(skb, imm_data, | 2132 | skb_copy_from_linear_data(skb, imm_data, |
2114 | skb->len - skb->data_len); | 2133 | skb_headlen(skb)); |
2115 | imm_data += skb->len - skb->data_len; | 2134 | imm_data += skb_headlen(skb); |
2116 | 2135 | ||
2117 | /* ... then copy data from the fragments */ | 2136 | /* ... then copy data from the fragments */ |
2118 | for (i = 0; i < nfrags; i++) { | 2137 | for (i = 0; i < nfrags; i++) { |
@@ -2223,7 +2242,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2223 | } | 2242 | } |
2224 | spin_unlock_irqrestore(&pr->netif_queue, flags); | 2243 | spin_unlock_irqrestore(&pr->netif_queue, flags); |
2225 | } | 2244 | } |
2226 | dev->trans_start = jiffies; | 2245 | dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ |
2227 | spin_unlock(&pr->xmit_lock); | 2246 | spin_unlock(&pr->xmit_lock); |
2228 | 2247 | ||
2229 | return NETDEV_TX_OK; | 2248 | return NETDEV_TX_OK; |
@@ -2320,7 +2339,6 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2320 | ehea_error("modify_ehea_port failed"); | 2339 | ehea_error("modify_ehea_port failed"); |
2321 | out: | 2340 | out: |
2322 | free_page((unsigned long)cb1); | 2341 | free_page((unsigned long)cb1); |
2323 | return; | ||
2324 | } | 2342 | } |
2325 | 2343 | ||
2326 | int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | 2344 | int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) |
@@ -2863,7 +2881,6 @@ static void ehea_reset_port(struct work_struct *work) | |||
2863 | netif_wake_queue(dev); | 2881 | netif_wake_queue(dev); |
2864 | out: | 2882 | out: |
2865 | mutex_unlock(&port->port_lock); | 2883 | mutex_unlock(&port->port_lock); |
2866 | return; | ||
2867 | } | 2884 | } |
2868 | 2885 | ||
2869 | static void ehea_rereg_mrs(struct work_struct *work) | 2886 | static void ehea_rereg_mrs(struct work_struct *work) |
@@ -2871,7 +2888,6 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2871 | int ret, i; | 2888 | int ret, i; |
2872 | struct ehea_adapter *adapter; | 2889 | struct ehea_adapter *adapter; |
2873 | 2890 | ||
2874 | mutex_lock(&dlpar_mem_lock); | ||
2875 | ehea_info("LPAR memory changed - re-initializing driver"); | 2891 | ehea_info("LPAR memory changed - re-initializing driver"); |
2876 | 2892 | ||
2877 | list_for_each_entry(adapter, &adapter_list, list) | 2893 | list_for_each_entry(adapter, &adapter_list, list) |
@@ -2941,7 +2957,6 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2941 | } | 2957 | } |
2942 | ehea_info("re-initializing driver complete"); | 2958 | ehea_info("re-initializing driver complete"); |
2943 | out: | 2959 | out: |
2944 | mutex_unlock(&dlpar_mem_lock); | ||
2945 | return; | 2960 | return; |
2946 | } | 2961 | } |
2947 | 2962 | ||
@@ -3241,7 +3256,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) | |||
3241 | ehea_remove_adapter_mr(adapter); | 3256 | ehea_remove_adapter_mr(adapter); |
3242 | 3257 | ||
3243 | i++; | 3258 | i++; |
3244 | }; | 3259 | } |
3245 | return 0; | 3260 | return 0; |
3246 | } | 3261 | } |
3247 | 3262 | ||
@@ -3260,7 +3275,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, | |||
3260 | if (dn_log_port_id) | 3275 | if (dn_log_port_id) |
3261 | if (*dn_log_port_id == logical_port_id) | 3276 | if (*dn_log_port_id == logical_port_id) |
3262 | return eth_dn; | 3277 | return eth_dn; |
3263 | }; | 3278 | } |
3264 | 3279 | ||
3265 | return NULL; | 3280 | return NULL; |
3266 | } | 3281 | } |
@@ -3524,7 +3539,14 @@ void ehea_crash_handler(void) | |||
3524 | static int ehea_mem_notifier(struct notifier_block *nb, | 3539 | static int ehea_mem_notifier(struct notifier_block *nb, |
3525 | unsigned long action, void *data) | 3540 | unsigned long action, void *data) |
3526 | { | 3541 | { |
3542 | int ret = NOTIFY_BAD; | ||
3527 | struct memory_notify *arg = data; | 3543 | struct memory_notify *arg = data; |
3544 | |||
3545 | if (!mutex_trylock(&dlpar_mem_lock)) { | ||
3546 | ehea_info("ehea_mem_notifier must not be called parallelized"); | ||
3547 | goto out; | ||
3548 | } | ||
3549 | |||
3528 | switch (action) { | 3550 | switch (action) { |
3529 | case MEM_CANCEL_OFFLINE: | 3551 | case MEM_CANCEL_OFFLINE: |
3530 | ehea_info("memory offlining canceled"); | 3552 | ehea_info("memory offlining canceled"); |
@@ -3533,14 +3555,14 @@ static int ehea_mem_notifier(struct notifier_block *nb, | |||
3533 | ehea_info("memory is going online"); | 3555 | ehea_info("memory is going online"); |
3534 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | 3556 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); |
3535 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) | 3557 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) |
3536 | return NOTIFY_BAD; | 3558 | goto out_unlock; |
3537 | ehea_rereg_mrs(NULL); | 3559 | ehea_rereg_mrs(NULL); |
3538 | break; | 3560 | break; |
3539 | case MEM_GOING_OFFLINE: | 3561 | case MEM_GOING_OFFLINE: |
3540 | ehea_info("memory is going offline"); | 3562 | ehea_info("memory is going offline"); |
3541 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | 3563 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); |
3542 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) | 3564 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) |
3543 | return NOTIFY_BAD; | 3565 | goto out_unlock; |
3544 | ehea_rereg_mrs(NULL); | 3566 | ehea_rereg_mrs(NULL); |
3545 | break; | 3567 | break; |
3546 | default: | 3568 | default: |
@@ -3548,8 +3570,12 @@ static int ehea_mem_notifier(struct notifier_block *nb, | |||
3548 | } | 3570 | } |
3549 | 3571 | ||
3550 | ehea_update_firmware_handles(); | 3572 | ehea_update_firmware_handles(); |
3573 | ret = NOTIFY_OK; | ||
3551 | 3574 | ||
3552 | return NOTIFY_OK; | 3575 | out_unlock: |
3576 | mutex_unlock(&dlpar_mem_lock); | ||
3577 | out: | ||
3578 | return ret; | ||
3553 | } | 3579 | } |
3554 | 3580 | ||
3555 | static struct notifier_block ehea_mem_nb = { | 3581 | static struct notifier_block ehea_mem_nb = { |