aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/iwcm.c1
-rw-r--r--drivers/infiniband/core/mad.c35
-rw-r--r--drivers/infiniband/core/mad_priv.h3
-rw-r--r--drivers/infiniband/core/multicast.c10
-rw-r--r--drivers/infiniband/core/sa_query.c7
-rw-r--r--drivers/infiniband/core/smi.c8
-rw-r--r--drivers/infiniband/core/uverbs_main.c10
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c24
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c37
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c21
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c52
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c47
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c12
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_config_reg.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c17
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c128
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c767
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h103
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c204
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c21
46 files changed, 1212 insertions, 484 deletions
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 8f9509e1ebf7..55d093a36ae4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
362 * In either case, must tell the provider to reject. 362 * In either case, must tell the provider to reject.
363 */ 363 */
364 cm_id_priv->state = IW_CM_STATE_DESTROYING; 364 cm_id_priv->state = IW_CM_STATE_DESTROYING;
365 cm_id->device->iwcm->reject(cm_id, NULL, 0);
365 break; 366 break;
366 case IW_CM_STATE_CONN_SENT: 367 case IW_CM_STATE_CONN_SENT:
367 case IW_CM_STATE_DESTROYING: 368 case IW_CM_STATE_DESTROYING:
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index de922a04ca2d..7522008fda86 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2,6 +2,7 @@
2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
5 * 6 *
6 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -45,14 +46,21 @@ MODULE_DESCRIPTION("kernel IB MAD API");
45MODULE_AUTHOR("Hal Rosenstock"); 46MODULE_AUTHOR("Hal Rosenstock");
46MODULE_AUTHOR("Sean Hefty"); 47MODULE_AUTHOR("Sean Hefty");
47 48
49int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
50int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
51
52module_param_named(send_queue_size, mad_sendq_size, int, 0444);
53MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
54module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
55MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
56
48static struct kmem_cache *ib_mad_cache; 57static struct kmem_cache *ib_mad_cache;
49 58
50static struct list_head ib_mad_port_list; 59static struct list_head ib_mad_port_list;
51static u32 ib_mad_client_id = 0; 60static u32 ib_mad_client_id = 0;
52 61
53/* Port list lock */ 62/* Port list lock */
54static spinlock_t ib_mad_port_list_lock; 63static DEFINE_SPINLOCK(ib_mad_port_list_lock);
55
56 64
57/* Forward declarations */ 65/* Forward declarations */
58static int method_in_use(struct ib_mad_mgmt_method_table **method, 66static int method_in_use(struct ib_mad_mgmt_method_table **method,
@@ -1974,7 +1982,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1974 unsigned long delay; 1982 unsigned long delay;
1975 1983
1976 if (list_empty(&mad_agent_priv->wait_list)) { 1984 if (list_empty(&mad_agent_priv->wait_list)) {
1977 cancel_delayed_work(&mad_agent_priv->timed_work); 1985 __cancel_delayed_work(&mad_agent_priv->timed_work);
1978 } else { 1986 } else {
1979 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 1987 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1980 struct ib_mad_send_wr_private, 1988 struct ib_mad_send_wr_private,
@@ -1983,7 +1991,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1983 if (time_after(mad_agent_priv->timeout, 1991 if (time_after(mad_agent_priv->timeout,
1984 mad_send_wr->timeout)) { 1992 mad_send_wr->timeout)) {
1985 mad_agent_priv->timeout = mad_send_wr->timeout; 1993 mad_agent_priv->timeout = mad_send_wr->timeout;
1986 cancel_delayed_work(&mad_agent_priv->timed_work); 1994 __cancel_delayed_work(&mad_agent_priv->timed_work);
1987 delay = mad_send_wr->timeout - jiffies; 1995 delay = mad_send_wr->timeout - jiffies;
1988 if ((long)delay <= 0) 1996 if ((long)delay <= 0)
1989 delay = 1; 1997 delay = 1;
@@ -2023,7 +2031,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2023 2031
2024 /* Reschedule a work item if we have a shorter timeout */ 2032 /* Reschedule a work item if we have a shorter timeout */
2025 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { 2033 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2026 cancel_delayed_work(&mad_agent_priv->timed_work); 2034 __cancel_delayed_work(&mad_agent_priv->timed_work);
2027 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2035 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2028 &mad_agent_priv->timed_work, delay); 2036 &mad_agent_priv->timed_work, delay);
2029 } 2037 }
@@ -2736,8 +2744,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2736 qp_init_attr.send_cq = qp_info->port_priv->cq; 2744 qp_init_attr.send_cq = qp_info->port_priv->cq;
2737 qp_init_attr.recv_cq = qp_info->port_priv->cq; 2745 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2738 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 2746 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2739 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE; 2747 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2740 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE; 2748 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2741 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 2749 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2742 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 2750 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2743 qp_init_attr.qp_type = qp_type; 2751 qp_init_attr.qp_type = qp_type;
@@ -2752,8 +2760,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2752 goto error; 2760 goto error;
2753 } 2761 }
2754 /* Use minimum queue sizes unless the CQ is resized */ 2762 /* Use minimum queue sizes unless the CQ is resized */
2755 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE; 2763 qp_info->send_queue.max_active = mad_sendq_size;
2756 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE; 2764 qp_info->recv_queue.max_active = mad_recvq_size;
2757 return 0; 2765 return 0;
2758 2766
2759error: 2767error:
@@ -2792,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
2792 init_mad_qp(port_priv, &port_priv->qp_info[0]); 2800 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2793 init_mad_qp(port_priv, &port_priv->qp_info[1]); 2801 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2794 2802
2795 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; 2803 cq_size = (mad_sendq_size + mad_recvq_size) * 2;
2796 port_priv->cq = ib_create_cq(port_priv->device, 2804 port_priv->cq = ib_create_cq(port_priv->device,
2797 ib_mad_thread_completion_handler, 2805 ib_mad_thread_completion_handler,
2798 NULL, port_priv, cq_size, 0); 2806 NULL, port_priv, cq_size, 0);
@@ -2984,7 +2992,11 @@ static int __init ib_mad_init_module(void)
2984{ 2992{
2985 int ret; 2993 int ret;
2986 2994
2987 spin_lock_init(&ib_mad_port_list_lock); 2995 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
2996 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
2997
2998 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
2999 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
2988 3000
2989 ib_mad_cache = kmem_cache_create("ib_mad", 3001 ib_mad_cache = kmem_cache_create("ib_mad",
2990 sizeof(struct ib_mad_private), 3002 sizeof(struct ib_mad_private),
@@ -3021,4 +3033,3 @@ static void __exit ib_mad_cleanup_module(void)
3021 3033
3022module_init(ib_mad_init_module); 3034module_init(ib_mad_init_module);
3023module_exit(ib_mad_cleanup_module); 3035module_exit(ib_mad_cleanup_module);
3024
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 05ce331733b0..9430ab4969c5 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -2,6 +2,7 @@
2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
5 * 6 *
6 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -49,6 +50,8 @@
49/* QP and CQ parameters */ 50/* QP and CQ parameters */
50#define IB_MAD_QP_SEND_SIZE 128 51#define IB_MAD_QP_SEND_SIZE 128
51#define IB_MAD_QP_RECV_SIZE 512 52#define IB_MAD_QP_RECV_SIZE 512
53#define IB_MAD_QP_MIN_SIZE 64
54#define IB_MAD_QP_MAX_SIZE 8192
52#define IB_MAD_SEND_REQ_MAX_SG 2 55#define IB_MAD_SEND_REQ_MAX_SG 2
53#define IB_MAD_RECV_REQ_MAX_SG 1 56#define IB_MAD_RECV_REQ_MAX_SG 1
54 57
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 107f170c57cd..8d82ba171353 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -106,6 +106,8 @@ struct mcast_group {
106 struct ib_sa_query *query; 106 struct ib_sa_query *query;
107 int query_id; 107 int query_id;
108 u16 pkey_index; 108 u16 pkey_index;
109 u8 leave_state;
110 int retries;
109}; 111};
110 112
111struct mcast_member { 113struct mcast_member {
@@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
350 352
351 rec = group->rec; 353 rec = group->rec;
352 rec.join_state = leave_state; 354 rec.join_state = leave_state;
355 group->leave_state = leave_state;
353 356
354 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, 357 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
355 port->port_num, IB_SA_METHOD_DELETE, &rec, 358 port->port_num, IB_SA_METHOD_DELETE, &rec,
@@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
542{ 545{
543 struct mcast_group *group = context; 546 struct mcast_group *group = context;
544 547
545 mcast_work_handler(&group->work); 548 if (status && group->retries > 0 &&
549 !send_leave(group, group->leave_state))
550 group->retries--;
551 else
552 mcast_work_handler(&group->work);
546} 553}
547 554
548static struct mcast_group *acquire_group(struct mcast_port *port, 555static struct mcast_group *acquire_group(struct mcast_port *port,
@@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
565 if (!group) 572 if (!group)
566 return NULL; 573 return NULL;
567 574
575 group->retries = 3;
568 group->port = port; 576 group->port = port;
569 group->rec.mgid = *mgid; 577 group->rec.mgid = *mgid;
570 group->pkey_index = MCAST_INVALID_PKEY_INDEX; 578 group->pkey_index = MCAST_INVALID_PKEY_INDEX;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1865049e80f7..82543716d59e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -109,10 +109,10 @@ static struct ib_client sa_client = {
109 .remove = ib_sa_remove_one 109 .remove = ib_sa_remove_one
110}; 110};
111 111
112static spinlock_t idr_lock; 112static DEFINE_SPINLOCK(idr_lock);
113static DEFINE_IDR(query_idr); 113static DEFINE_IDR(query_idr);
114 114
115static spinlock_t tid_lock; 115static DEFINE_SPINLOCK(tid_lock);
116static u32 tid; 116static u32 tid;
117 117
118#define PATH_REC_FIELD(field) \ 118#define PATH_REC_FIELD(field) \
@@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void)
1077{ 1077{
1078 int ret; 1078 int ret;
1079 1079
1080 spin_lock_init(&idr_lock);
1081 spin_lock_init(&tid_lock);
1082
1083 get_random_bytes(&tid, sizeof tid); 1080 get_random_bytes(&tid, sizeof tid);
1084 1081
1085 ret = ib_register_client(&sa_client); 1082 ret = ib_register_client(&sa_client);
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 87236753bce9..5855e4405d9b 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -52,6 +52,10 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
52 hop_cnt = smp->hop_cnt; 52 hop_cnt = smp->hop_cnt;
53 53
54 /* See section 14.2.2.2, Vol 1 IB spec */ 54 /* See section 14.2.2.2, Vol 1 IB spec */
55 /* C14-6 -- valid hop_cnt values are from 0 to 63 */
56 if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
57 return IB_SMI_DISCARD;
58
55 if (!ib_get_smp_direction(smp)) { 59 if (!ib_get_smp_direction(smp)) {
56 /* C14-9:1 */ 60 /* C14-9:1 */
57 if (hop_cnt && hop_ptr == 0) { 61 if (hop_cnt && hop_ptr == 0) {
@@ -133,6 +137,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
133 hop_cnt = smp->hop_cnt; 137 hop_cnt = smp->hop_cnt;
134 138
135 /* See section 14.2.2.2, Vol 1 IB spec */ 139 /* See section 14.2.2.2, Vol 1 IB spec */
140 /* C14-6 -- valid hop_cnt values are from 0 to 63 */
141 if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
142 return IB_SMI_DISCARD;
143
136 if (!ib_get_smp_direction(smp)) { 144 if (!ib_get_smp_direction(smp)) {
137 /* C14-9:1 -- sender should have incremented hop_ptr */ 145 /* C14-9:1 -- sender should have incremented hop_ptr */
138 if (hop_cnt && hop_ptr == 0) 146 if (hop_cnt && hop_ptr == 0)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index eb36a81dd09b..d3fff9e008a3 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
73DEFINE_IDR(ib_uverbs_qp_idr); 73DEFINE_IDR(ib_uverbs_qp_idr);
74DEFINE_IDR(ib_uverbs_srq_idr); 74DEFINE_IDR(ib_uverbs_srq_idr);
75 75
76static spinlock_t map_lock; 76static DEFINE_SPINLOCK(map_lock);
77static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES]; 77static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
78static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); 78static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
79 79
@@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
584 584
585 if (hdr.command < 0 || 585 if (hdr.command < 0 ||
586 hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || 586 hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
587 !uverbs_cmd_table[hdr.command] || 587 !uverbs_cmd_table[hdr.command])
588 !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
589 return -EINVAL; 588 return -EINVAL;
590 589
591 if (!file->ucontext && 590 if (!file->ucontext &&
592 hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) 591 hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
593 return -EINVAL; 592 return -EINVAL;
594 593
594 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
595 return -ENOSYS;
596
595 return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, 597 return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
596 hdr.in_words * 4, hdr.out_words * 4); 598 hdr.in_words * 4, hdr.out_words * 4);
597} 599}
@@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void)
836{ 838{
837 int ret; 839 int ret;
838 840
839 spin_lock_init(&map_lock);
840
841 ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, 841 ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
842 "infiniband_verbs"); 842 "infiniband_verbs");
843 if (ret) { 843 if (ret) {
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 0cfbb6d2f762..8250740c94b0 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table);
86 86
87static void c2_print_macaddr(struct net_device *netdev) 87static void c2_print_macaddr(struct net_device *netdev)
88{ 88{
89 pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, " 89 pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
90 "IRQ %u\n", netdev->name,
91 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
92 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
93 netdev->irq);
94} 90}
95 91
96static void c2_set_rxbufsize(struct c2_port *c2_port) 92static void c2_set_rxbufsize(struct c2_port *c2_port)
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index f1948fad85d7..ad723bd8bf49 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev)
780 /* Register pseudo network device */ 780 /* Register pseudo network device */
781 dev->pseudo_netdev = c2_pseudo_netdev_init(dev); 781 dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
782 if (!dev->pseudo_netdev) 782 if (!dev->pseudo_netdev)
783 goto out3; 783 goto out;
784 784
785 ret = register_netdev(dev->pseudo_netdev); 785 ret = register_netdev(dev->pseudo_netdev);
786 if (ret) 786 if (ret)
787 goto out2; 787 goto out_free_netdev;
788 788
789 pr_debug("%s:%u\n", __func__, __LINE__); 789 pr_debug("%s:%u\n", __func__, __LINE__);
790 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); 790 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
@@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev)
851 dev->ibdev.post_recv = c2_post_receive; 851 dev->ibdev.post_recv = c2_post_receive;
852 852
853 dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); 853 dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
854 if (dev->ibdev.iwcm == NULL) {
855 ret = -ENOMEM;
856 goto out_unregister_netdev;
857 }
854 dev->ibdev.iwcm->add_ref = c2_add_ref; 858 dev->ibdev.iwcm->add_ref = c2_add_ref;
855 dev->ibdev.iwcm->rem_ref = c2_rem_ref; 859 dev->ibdev.iwcm->rem_ref = c2_rem_ref;
856 dev->ibdev.iwcm->get_qp = c2_get_qp; 860 dev->ibdev.iwcm->get_qp = c2_get_qp;
@@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev)
862 866
863 ret = ib_register_device(&dev->ibdev); 867 ret = ib_register_device(&dev->ibdev);
864 if (ret) 868 if (ret)
865 goto out1; 869 goto out_free_iwcm;
866 870
867 for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) { 871 for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
868 ret = device_create_file(&dev->ibdev.dev, 872 ret = device_create_file(&dev->ibdev.dev,
869 c2_dev_attributes[i]); 873 c2_dev_attributes[i]);
870 if (ret) 874 if (ret)
871 goto out0; 875 goto out_unregister_ibdev;
872 } 876 }
873 goto out3; 877 goto out;
874 878
875out0: 879out_unregister_ibdev:
876 ib_unregister_device(&dev->ibdev); 880 ib_unregister_device(&dev->ibdev);
877out1: 881out_free_iwcm:
882 kfree(dev->ibdev.iwcm);
883out_unregister_netdev:
878 unregister_netdev(dev->pseudo_netdev); 884 unregister_netdev(dev->pseudo_netdev);
879out2: 885out_free_netdev:
880 free_netdev(dev->pseudo_netdev); 886 free_netdev(dev->pseudo_netdev);
881out3: 887out:
882 pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret); 888 pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
883 return ret; 889 return ret;
884} 890}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 62f9cf2f94ec..72ed3396b721 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -852,7 +852,9 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
852 wqe->qpcaps = attr->qpcaps; 852 wqe->qpcaps = attr->qpcaps;
853 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss); 853 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
854 wqe->rqe_count = cpu_to_be16(attr->rqe_count); 854 wqe->rqe_count = cpu_to_be16(attr->rqe_count);
855 wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type)); 855 wqe->flags_rtr_type = cpu_to_be16(attr->flags |
856 V_RTR_TYPE(attr->rtr_type) |
857 V_CHAN(attr->chan));
856 wqe->ord = cpu_to_be32(attr->ord); 858 wqe->ord = cpu_to_be32(attr->ord);
857 wqe->ird = cpu_to_be32(attr->ird); 859 wqe->ird = cpu_to_be32(attr->ird);
858 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); 860 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
@@ -1032,6 +1034,7 @@ err3:
1032err2: 1034err2:
1033 cxio_hal_destroy_ctrl_qp(rdev_p); 1035 cxio_hal_destroy_ctrl_qp(rdev_p);
1034err1: 1036err1:
1037 rdev_p->t3cdev_p->ulp = NULL;
1035 list_del(&rdev_p->entry); 1038 list_del(&rdev_p->entry);
1036 return err; 1039 return err;
1037} 1040}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 32e3b1461d81..a197a5b7ac7f 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -327,6 +327,11 @@ enum rdma_init_rtr_types {
327#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE) 327#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
328#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE) 328#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
329 329
330#define S_CHAN 4
331#define M_CHAN 0x3
332#define V_CHAN(x) ((x) << S_CHAN)
333#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
334
330struct t3_rdma_init_attr { 335struct t3_rdma_init_attr {
331 u32 tid; 336 u32 tid;
332 u32 qpid; 337 u32 qpid;
@@ -346,6 +351,7 @@ struct t3_rdma_init_attr {
346 u16 flags; 351 u16 flags;
347 u16 rqe_count; 352 u16 rqe_count;
348 u32 irs; 353 u32 irs;
354 u32 chan;
349}; 355};
350 356
351struct t3_rdma_init_wr { 357struct t3_rdma_init_wr {
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 26fc0a4eaa74..b0ea0105ddf6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
51 51
52static void open_rnic_dev(struct t3cdev *); 52static void open_rnic_dev(struct t3cdev *);
53static void close_rnic_dev(struct t3cdev *); 53static void close_rnic_dev(struct t3cdev *);
54static void iwch_err_handler(struct t3cdev *, u32, u32); 54static void iwch_event_handler(struct t3cdev *, u32, u32);
55 55
56struct cxgb3_client t3c_client = { 56struct cxgb3_client t3c_client = {
57 .name = "iw_cxgb3", 57 .name = "iw_cxgb3",
@@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = {
59 .remove = close_rnic_dev, 59 .remove = close_rnic_dev,
60 .handlers = t3c_handlers, 60 .handlers = t3c_handlers,
61 .redirect = iwch_ep_redirect, 61 .redirect = iwch_ep_redirect,
62 .err_handler = iwch_err_handler 62 .event_handler = iwch_event_handler
63}; 63};
64 64
65static LIST_HEAD(dev_list); 65static LIST_HEAD(dev_list);
@@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp)
105static void open_rnic_dev(struct t3cdev *tdev) 105static void open_rnic_dev(struct t3cdev *tdev)
106{ 106{
107 struct iwch_dev *rnicp; 107 struct iwch_dev *rnicp;
108 static int vers_printed;
109 108
110 PDBG("%s t3cdev %p\n", __func__, tdev); 109 PDBG("%s t3cdev %p\n", __func__, tdev);
111 if (!vers_printed++) 110 printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
112 printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
113 DRV_VERSION); 111 DRV_VERSION);
114 rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp)); 112 rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
115 if (!rnicp) { 113 if (!rnicp) {
@@ -162,21 +160,36 @@ static void close_rnic_dev(struct t3cdev *tdev)
162 mutex_unlock(&dev_mutex); 160 mutex_unlock(&dev_mutex);
163} 161}
164 162
165static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) 163static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
166{ 164{
167 struct cxio_rdev *rdev = tdev->ulp; 165 struct cxio_rdev *rdev = tdev->ulp;
168 struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev); 166 struct iwch_dev *rnicp;
169 struct ib_event event; 167 struct ib_event event;
168 u32 portnum = port_id + 1;
170 169
171 if (status == OFFLOAD_STATUS_DOWN) { 170 if (!rdev)
171 return;
172 rnicp = rdev_to_iwch_dev(rdev);
173 switch (evt) {
174 case OFFLOAD_STATUS_DOWN: {
172 rdev->flags = CXIO_ERROR_FATAL; 175 rdev->flags = CXIO_ERROR_FATAL;
173
174 event.device = &rnicp->ibdev;
175 event.event = IB_EVENT_DEVICE_FATAL; 176 event.event = IB_EVENT_DEVICE_FATAL;
176 event.element.port_num = 0; 177 break;
177 ib_dispatch_event(&event); 178 }
179 case OFFLOAD_PORT_DOWN: {
180 event.event = IB_EVENT_PORT_ERR;
181 break;
182 }
183 case OFFLOAD_PORT_UP: {
184 event.event = IB_EVENT_PORT_ACTIVE;
185 break;
186 }
178 } 187 }
179 188
189 event.device = &rnicp->ibdev;
190 event.element.port_num = portnum;
191 ib_dispatch_event(&event);
192
180 return; 193 return;
181} 194}
182 195
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 52d7bb0c2a12..66b41351910a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -286,7 +286,7 @@ void __free_ep(struct kref *kref)
286 ep = container_of(container_of(kref, struct iwch_ep_common, kref), 286 ep = container_of(container_of(kref, struct iwch_ep_common, kref),
287 struct iwch_ep, com); 287 struct iwch_ep, com);
288 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 288 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
289 if (ep->com.flags & RELEASE_RESOURCES) { 289 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
290 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 290 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
291 dst_release(ep->dst); 291 dst_release(ep->dst);
292 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 292 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -297,7 +297,7 @@ void __free_ep(struct kref *kref)
297static void release_ep_resources(struct iwch_ep *ep) 297static void release_ep_resources(struct iwch_ep *ep)
298{ 298{
299 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); 299 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
300 ep->com.flags |= RELEASE_RESOURCES; 300 set_bit(RELEASE_RESOURCES, &ep->com.flags);
301 put_ep(&ep->com); 301 put_ep(&ep->com);
302} 302}
303 303
@@ -786,10 +786,12 @@ static void connect_request_upcall(struct iwch_ep *ep)
786 event.private_data_len = ep->plen; 786 event.private_data_len = ep->plen;
787 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 787 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
788 event.provider_data = ep; 788 event.provider_data = ep;
789 if (state_read(&ep->parent_ep->com) != DEAD) 789 if (state_read(&ep->parent_ep->com) != DEAD) {
790 get_ep(&ep->com);
790 ep->parent_ep->com.cm_id->event_handler( 791 ep->parent_ep->com.cm_id->event_handler(
791 ep->parent_ep->com.cm_id, 792 ep->parent_ep->com.cm_id,
792 &event); 793 &event);
794 }
793 put_ep(&ep->parent_ep->com); 795 put_ep(&ep->parent_ep->com);
794 ep->parent_ep = NULL; 796 ep->parent_ep = NULL;
795} 797}
@@ -1156,8 +1158,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1156 * We get 2 abort replies from the HW. The first one must 1158 * We get 2 abort replies from the HW. The first one must
1157 * be ignored except for scribbling that we need one more. 1159 * be ignored except for scribbling that we need one more.
1158 */ 1160 */
1159 if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) { 1161 if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
1160 ep->com.flags |= ABORT_REQ_IN_PROGRESS;
1161 return CPL_RET_BUF_DONE; 1162 return CPL_RET_BUF_DONE;
1162 } 1163 }
1163 1164
@@ -1477,10 +1478,14 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1477 /* 1478 /*
1478 * We're gonna mark this puppy DEAD, but keep 1479 * We're gonna mark this puppy DEAD, but keep
1479 * the reference on it until the ULP accepts or 1480 * the reference on it until the ULP accepts or
1480 * rejects the CR. 1481 * rejects the CR. Also wake up anyone waiting
1482 * in rdma connection migration (see iwch_accept_cr()).
1481 */ 1483 */
1482 __state_set(&ep->com, CLOSING); 1484 __state_set(&ep->com, CLOSING);
1483 get_ep(&ep->com); 1485 ep->com.rpl_done = 1;
1486 ep->com.rpl_err = -ECONNRESET;
1487 PDBG("waking up ep %p\n", ep);
1488 wake_up(&ep->com.waitq);
1484 break; 1489 break;
1485 case MPA_REP_SENT: 1490 case MPA_REP_SENT:
1486 __state_set(&ep->com, CLOSING); 1491 __state_set(&ep->com, CLOSING);
@@ -1561,8 +1566,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1561 * We get 2 peer aborts from the HW. The first one must 1566 * We get 2 peer aborts from the HW. The first one must
1562 * be ignored except for scribbling that we need one more. 1567 * be ignored except for scribbling that we need one more.
1563 */ 1568 */
1564 if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) { 1569 if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
1565 ep->com.flags |= PEER_ABORT_IN_PROGRESS;
1566 return CPL_RET_BUF_DONE; 1570 return CPL_RET_BUF_DONE;
1567 } 1571 }
1568 1572
@@ -1589,9 +1593,13 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1589 /* 1593 /*
1590 * We're gonna mark this puppy DEAD, but keep 1594 * We're gonna mark this puppy DEAD, but keep
1591 * the reference on it until the ULP accepts or 1595 * the reference on it until the ULP accepts or
1592 * rejects the CR. 1596 * rejects the CR. Also wake up anyone waiting
1597 * in rdma connection migration (see iwch_accept_cr()).
1593 */ 1598 */
1594 get_ep(&ep->com); 1599 ep->com.rpl_done = 1;
1600 ep->com.rpl_err = -ECONNRESET;
1601 PDBG("waking up ep %p\n", ep);
1602 wake_up(&ep->com.waitq);
1595 break; 1603 break;
1596 case MORIBUND: 1604 case MORIBUND:
1597 case CLOSING: 1605 case CLOSING:
@@ -1797,6 +1805,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1797 err = send_mpa_reject(ep, pdata, pdata_len); 1805 err = send_mpa_reject(ep, pdata, pdata_len);
1798 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL); 1806 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1799 } 1807 }
1808 put_ep(&ep->com);
1800 return 0; 1809 return 0;
1801} 1810}
1802 1811
@@ -1810,8 +1819,10 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1810 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); 1819 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1811 1820
1812 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1821 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1813 if (state_read(&ep->com) == DEAD) 1822 if (state_read(&ep->com) == DEAD) {
1814 return -ECONNRESET; 1823 err = -ECONNRESET;
1824 goto err;
1825 }
1815 1826
1816 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1827 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1817 BUG_ON(!qp); 1828 BUG_ON(!qp);
@@ -1819,15 +1830,14 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1819 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || 1830 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1820 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { 1831 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1821 abort_connection(ep, NULL, GFP_KERNEL); 1832 abort_connection(ep, NULL, GFP_KERNEL);
1822 return -EINVAL; 1833 err = -EINVAL;
1834 goto err;
1823 } 1835 }
1824 1836
1825 cm_id->add_ref(cm_id); 1837 cm_id->add_ref(cm_id);
1826 ep->com.cm_id = cm_id; 1838 ep->com.cm_id = cm_id;
1827 ep->com.qp = qp; 1839 ep->com.qp = qp;
1828 1840
1829 ep->com.rpl_done = 0;
1830 ep->com.rpl_err = 0;
1831 ep->ird = conn_param->ird; 1841 ep->ird = conn_param->ird;
1832 ep->ord = conn_param->ord; 1842 ep->ord = conn_param->ord;
1833 1843
@@ -1836,8 +1846,6 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1836 1846
1837 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 1847 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1838 1848
1839 get_ep(&ep->com);
1840
1841 /* bind QP to EP and move to RTS */ 1849 /* bind QP to EP and move to RTS */
1842 attrs.mpa_attr = ep->mpa_attr; 1850 attrs.mpa_attr = ep->mpa_attr;
1843 attrs.max_ird = ep->ird; 1851 attrs.max_ird = ep->ird;
@@ -1855,30 +1863,31 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1855 err = iwch_modify_qp(ep->com.qp->rhp, 1863 err = iwch_modify_qp(ep->com.qp->rhp,
1856 ep->com.qp, mask, &attrs, 1); 1864 ep->com.qp, mask, &attrs, 1);
1857 if (err) 1865 if (err)
1858 goto err; 1866 goto err1;
1859 1867
1860 /* if needed, wait for wr_ack */ 1868 /* if needed, wait for wr_ack */
1861 if (iwch_rqes_posted(qp)) { 1869 if (iwch_rqes_posted(qp)) {
1862 wait_event(ep->com.waitq, ep->com.rpl_done); 1870 wait_event(ep->com.waitq, ep->com.rpl_done);
1863 err = ep->com.rpl_err; 1871 err = ep->com.rpl_err;
1864 if (err) 1872 if (err)
1865 goto err; 1873 goto err1;
1866 } 1874 }
1867 1875
1868 err = send_mpa_reply(ep, conn_param->private_data, 1876 err = send_mpa_reply(ep, conn_param->private_data,
1869 conn_param->private_data_len); 1877 conn_param->private_data_len);
1870 if (err) 1878 if (err)
1871 goto err; 1879 goto err1;
1872 1880
1873 1881
1874 state_set(&ep->com, FPDU_MODE); 1882 state_set(&ep->com, FPDU_MODE);
1875 established_upcall(ep); 1883 established_upcall(ep);
1876 put_ep(&ep->com); 1884 put_ep(&ep->com);
1877 return 0; 1885 return 0;
1878err: 1886err1:
1879 ep->com.cm_id = NULL; 1887 ep->com.cm_id = NULL;
1880 ep->com.qp = NULL; 1888 ep->com.qp = NULL;
1881 cm_id->rem_ref(cm_id); 1889 cm_id->rem_ref(cm_id);
1890err:
1882 put_ep(&ep->com); 1891 put_ep(&ep->com);
1883 return err; 1892 return err;
1884} 1893}
@@ -2097,14 +2106,17 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2097 ep->com.state = CLOSING; 2106 ep->com.state = CLOSING;
2098 start_ep_timer(ep); 2107 start_ep_timer(ep);
2099 } 2108 }
2109 set_bit(CLOSE_SENT, &ep->com.flags);
2100 break; 2110 break;
2101 case CLOSING: 2111 case CLOSING:
2102 close = 1; 2112 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2103 if (abrupt) { 2113 close = 1;
2104 stop_ep_timer(ep); 2114 if (abrupt) {
2105 ep->com.state = ABORTING; 2115 stop_ep_timer(ep);
2106 } else 2116 ep->com.state = ABORTING;
2107 ep->com.state = MORIBUND; 2117 } else
2118 ep->com.state = MORIBUND;
2119 }
2108 break; 2120 break;
2109 case MORIBUND: 2121 case MORIBUND:
2110 case ABORTING: 2122 case ABORTING:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 43c0aea7eadc..b9efadfffb4f 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -145,9 +145,10 @@ enum iwch_ep_state {
145}; 145};
146 146
147enum iwch_ep_flags { 147enum iwch_ep_flags {
148 PEER_ABORT_IN_PROGRESS = (1 << 0), 148 PEER_ABORT_IN_PROGRESS = 0,
149 ABORT_REQ_IN_PROGRESS = (1 << 1), 149 ABORT_REQ_IN_PROGRESS = 1,
150 RELEASE_RESOURCES = (1 << 2), 150 RELEASE_RESOURCES = 2,
151 CLOSE_SENT = 3,
151}; 152};
152 153
153struct iwch_ep_common { 154struct iwch_ep_common {
@@ -162,7 +163,7 @@ struct iwch_ep_common {
162 wait_queue_head_t waitq; 163 wait_queue_head_t waitq;
163 int rpl_done; 164 int rpl_done;
164 int rpl_err; 165 int rpl_err;
165 u32 flags; 166 unsigned long flags;
166}; 167};
167 168
168struct iwch_listen_ep { 169struct iwch_listen_ep {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index ec49a5cbdebb..e1ec65ebb016 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -39,7 +39,7 @@
39#include "iwch.h" 39#include "iwch.h"
40#include "iwch_provider.h" 40#include "iwch_provider.h"
41 41
42static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag) 42static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
43{ 43{
44 u32 mmid; 44 u32 mmid;
45 45
@@ -47,14 +47,15 @@ static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
47 mhp->attr.stag = stag; 47 mhp->attr.stag = stag;
48 mmid = stag >> 8; 48 mmid = stag >> 8;
49 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 49 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
50 insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
51 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); 50 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
51 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
52} 52}
53 53
54int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 54int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
55 struct iwch_mr *mhp, int shift) 55 struct iwch_mr *mhp, int shift)
56{ 56{
57 u32 stag; 57 u32 stag;
58 int ret;
58 59
59 if (cxio_register_phys_mem(&rhp->rdev, 60 if (cxio_register_phys_mem(&rhp->rdev,
60 &stag, mhp->attr.pdid, 61 &stag, mhp->attr.pdid,
@@ -66,9 +67,11 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
66 mhp->attr.pbl_size, mhp->attr.pbl_addr)) 67 mhp->attr.pbl_size, mhp->attr.pbl_addr))
67 return -ENOMEM; 68 return -ENOMEM;
68 69
69 iwch_finish_mem_reg(mhp, stag); 70 ret = iwch_finish_mem_reg(mhp, stag);
70 71 if (ret)
71 return 0; 72 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
73 mhp->attr.pbl_addr);
74 return ret;
72} 75}
73 76
74int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, 77int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
@@ -77,6 +80,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
77 int npages) 80 int npages)
78{ 81{
79 u32 stag; 82 u32 stag;
83 int ret;
80 84
81 /* We could support this... */ 85 /* We could support this... */
82 if (npages > mhp->attr.pbl_size) 86 if (npages > mhp->attr.pbl_size)
@@ -93,9 +97,12 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
93 mhp->attr.pbl_size, mhp->attr.pbl_addr)) 97 mhp->attr.pbl_size, mhp->attr.pbl_addr))
94 return -ENOMEM; 98 return -ENOMEM;
95 99
96 iwch_finish_mem_reg(mhp, stag); 100 ret = iwch_finish_mem_reg(mhp, stag);
101 if (ret)
102 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
103 mhp->attr.pbl_addr);
97 104
98 return 0; 105 return ret;
99} 106}
100 107
101int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) 108int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e2a63214008a..6895523779d0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -195,7 +195,11 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
195 spin_lock_init(&chp->lock); 195 spin_lock_init(&chp->lock);
196 atomic_set(&chp->refcnt, 1); 196 atomic_set(&chp->refcnt, 1);
197 init_waitqueue_head(&chp->wait); 197 init_waitqueue_head(&chp->wait);
198 insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); 198 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
199 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
200 kfree(chp);
201 return ERR_PTR(-ENOMEM);
202 }
199 203
200 if (ucontext) { 204 if (ucontext) {
201 struct iwch_mm_entry *mm; 205 struct iwch_mm_entry *mm;
@@ -750,7 +754,11 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
750 mhp->attr.stag = stag; 754 mhp->attr.stag = stag;
751 mmid = (stag) >> 8; 755 mmid = (stag) >> 8;
752 mhp->ibmw.rkey = stag; 756 mhp->ibmw.rkey = stag;
753 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 757 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
758 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
759 kfree(mhp);
760 return ERR_PTR(-ENOMEM);
761 }
754 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 762 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
755 return &(mhp->ibmw); 763 return &(mhp->ibmw);
756} 764}
@@ -778,37 +786,43 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
778 struct iwch_mr *mhp; 786 struct iwch_mr *mhp;
779 u32 mmid; 787 u32 mmid;
780 u32 stag = 0; 788 u32 stag = 0;
781 int ret; 789 int ret = 0;
782 790
783 php = to_iwch_pd(pd); 791 php = to_iwch_pd(pd);
784 rhp = php->rhp; 792 rhp = php->rhp;
785 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 793 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
786 if (!mhp) 794 if (!mhp)
787 return ERR_PTR(-ENOMEM); 795 goto err;
788 796
789 mhp->rhp = rhp; 797 mhp->rhp = rhp;
790 ret = iwch_alloc_pbl(mhp, pbl_depth); 798 ret = iwch_alloc_pbl(mhp, pbl_depth);
791 if (ret) { 799 if (ret)
792 kfree(mhp); 800 goto err1;
793 return ERR_PTR(ret);
794 }
795 mhp->attr.pbl_size = pbl_depth; 801 mhp->attr.pbl_size = pbl_depth;
796 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid, 802 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
797 mhp->attr.pbl_size, mhp->attr.pbl_addr); 803 mhp->attr.pbl_size, mhp->attr.pbl_addr);
798 if (ret) { 804 if (ret)
799 iwch_free_pbl(mhp); 805 goto err2;
800 kfree(mhp);
801 return ERR_PTR(ret);
802 }
803 mhp->attr.pdid = php->pdid; 806 mhp->attr.pdid = php->pdid;
804 mhp->attr.type = TPT_NON_SHARED_MR; 807 mhp->attr.type = TPT_NON_SHARED_MR;
805 mhp->attr.stag = stag; 808 mhp->attr.stag = stag;
806 mhp->attr.state = 1; 809 mhp->attr.state = 1;
807 mmid = (stag) >> 8; 810 mmid = (stag) >> 8;
808 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 811 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
809 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 812 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
813 goto err3;
814
810 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 815 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
811 return &(mhp->ibmr); 816 return &(mhp->ibmr);
817err3:
818 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
819 mhp->attr.pbl_addr);
820err2:
821 iwch_free_pbl(mhp);
822err1:
823 kfree(mhp);
824err:
825 return ERR_PTR(ret);
812} 826}
813 827
814static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl( 828static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
@@ -961,7 +975,13 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
961 spin_lock_init(&qhp->lock); 975 spin_lock_init(&qhp->lock);
962 init_waitqueue_head(&qhp->wait); 976 init_waitqueue_head(&qhp->wait);
963 atomic_set(&qhp->refcnt, 1); 977 atomic_set(&qhp->refcnt, 1);
964 insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid); 978
979 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
980 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
981 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
982 kfree(qhp);
983 return ERR_PTR(-ENOMEM);
984 }
965 985
966 if (udata) { 986 if (udata) {
967 987
@@ -1418,6 +1438,7 @@ int iwch_register_device(struct iwch_dev *dev)
1418bail2: 1438bail2:
1419 ib_unregister_device(&dev->ibdev); 1439 ib_unregister_device(&dev->ibdev);
1420bail1: 1440bail1:
1441 kfree(dev->ibdev.iwcm);
1421 return ret; 1442 return ret;
1422} 1443}
1423 1444
@@ -1430,5 +1451,6 @@ void iwch_unregister_device(struct iwch_dev *dev)
1430 device_remove_file(&dev->ibdev.dev, 1451 device_remove_file(&dev->ibdev.dev,
1431 iwch_class_attributes[i]); 1452 iwch_class_attributes[i]);
1432 ib_unregister_device(&dev->ibdev); 1453 ib_unregister_device(&dev->ibdev);
1454 kfree(dev->ibdev.iwcm);
1433 return; 1455 return;
1434} 1456}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 27bbdc8e773a..6e8653471941 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -889,6 +889,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
889 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); 889 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
890 init_attr.rqe_count = iwch_rqes_posted(qhp); 890 init_attr.rqe_count = iwch_rqes_posted(qhp);
891 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; 891 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
892 init_attr.chan = qhp->ep->l2t->smt_idx;
892 if (peer2peer) { 893 if (peer2peer) {
893 init_attr.rtr_type = RTR_READ; 894 init_attr.rtr_type = RTR_READ;
894 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) 895 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index fab18a2c74a8..5b635aa5947e 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52#include "ehca_tools.h" 52#include "ehca_tools.h"
53#include "hcp_if.h" 53#include "hcp_if.h"
54 54
55#define HCAD_VERSION "0028" 55#define HCAD_VERSION "0029"
56 56
57MODULE_LICENSE("Dual BSD/GPL"); 57MODULE_LICENSE("Dual BSD/GPL");
58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -64,7 +64,7 @@ static int ehca_hw_level = 0;
64static int ehca_poll_all_eqs = 1; 64static int ehca_poll_all_eqs = 1;
65 65
66int ehca_debug_level = 0; 66int ehca_debug_level = 0;
67int ehca_nr_ports = 2; 67int ehca_nr_ports = -1;
68int ehca_use_hp_mr = 0; 68int ehca_use_hp_mr = 0;
69int ehca_port_act_time = 30; 69int ehca_port_act_time = 30;
70int ehca_static_rate = -1; 70int ehca_static_rate = -1;
@@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level,
95 "Hardware level (0: autosensing (default), " 95 "Hardware level (0: autosensing (default), "
96 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)"); 96 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
97MODULE_PARM_DESC(nr_ports, 97MODULE_PARM_DESC(nr_ports,
98 "number of connected ports (-1: autodetect, 1: port one only, " 98 "number of connected ports (-1: autodetect (default), "
99 "2: two ports (default)"); 99 "1: port one only, 2: two ports)");
100MODULE_PARM_DESC(use_hp_mr, 100MODULE_PARM_DESC(use_hp_mr,
101 "Use high performance MRs (default: no)"); 101 "Use high performance MRs (default: no)");
102MODULE_PARM_DESC(port_act_time, 102MODULE_PARM_DESC(port_act_time,
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 5a3d96f84c79..8fd88cd828fd 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -786,7 +786,11 @@ repoll:
786 wc->slid = cqe->rlid; 786 wc->slid = cqe->rlid;
787 wc->dlid_path_bits = cqe->dlid; 787 wc->dlid_path_bits = cqe->dlid;
788 wc->src_qp = cqe->remote_qp_number; 788 wc->src_qp = cqe->remote_qp_number;
789 wc->wc_flags = cqe->w_completion_flags; 789 /*
790 * HW has "Immed data present" and "GRH present" in bits 6 and 5.
791 * SW defines those in bits 1 and 0, so we can just shift and mask.
792 */
793 wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
790 wc->ex.imm_data = cpu_to_be32(cqe->immediate_data); 794 wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
791 wc->sl = cqe->service_level; 795 wc->sl = cqe->service_level;
792 796
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index c568b28f4e20..8c1213f8916a 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -125,14 +125,30 @@ struct ib_perf {
125 u8 data[192]; 125 u8 data[192];
126} __attribute__ ((packed)); 126} __attribute__ ((packed));
127 127
128/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
129struct tcslfl {
130 u32 tc:8;
131 u32 sl:4;
132 u32 fl:20;
133} __attribute__ ((packed));
134
135/* IP Version/TC/FL packed into 32 bits, as in GRH */
136struct vertcfl {
137 u32 ver:4;
138 u32 tc:8;
139 u32 fl:20;
140} __attribute__ ((packed));
128 141
129static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, 142static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
143 struct ib_wc *in_wc, struct ib_grh *in_grh,
130 struct ib_mad *in_mad, struct ib_mad *out_mad) 144 struct ib_mad *in_mad, struct ib_mad *out_mad)
131{ 145{
132 struct ib_perf *in_perf = (struct ib_perf *)in_mad; 146 struct ib_perf *in_perf = (struct ib_perf *)in_mad;
133 struct ib_perf *out_perf = (struct ib_perf *)out_mad; 147 struct ib_perf *out_perf = (struct ib_perf *)out_mad;
134 struct ib_class_port_info *poi = 148 struct ib_class_port_info *poi =
135 (struct ib_class_port_info *)out_perf->data; 149 (struct ib_class_port_info *)out_perf->data;
150 struct tcslfl *tcslfl =
151 (struct tcslfl *)&poi->redirect_tcslfl;
136 struct ehca_shca *shca = 152 struct ehca_shca *shca =
137 container_of(ibdev, struct ehca_shca, ib_device); 153 container_of(ibdev, struct ehca_shca, ib_device);
138 struct ehca_sport *sport = &shca->sport[port_num - 1]; 154 struct ehca_sport *sport = &shca->sport[port_num - 1];
@@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
158 poi->base_version = 1; 174 poi->base_version = 1;
159 poi->class_version = 1; 175 poi->class_version = 1;
160 poi->resp_time_value = 18; 176 poi->resp_time_value = 18;
161 poi->redirect_lid = sport->saved_attr.lid; 177
162 poi->redirect_qp = sport->pma_qp_nr; 178 /* copy local routing information from WC where applicable */
179 tcslfl->sl = in_wc->sl;
180 poi->redirect_lid =
181 sport->saved_attr.lid | in_wc->dlid_path_bits;
182 poi->redirect_qp = sport->pma_qp_nr;
163 poi->redirect_qkey = IB_QP1_QKEY; 183 poi->redirect_qkey = IB_QP1_QKEY;
164 poi->redirect_pkey = IB_DEFAULT_PKEY_FULL; 184
185 ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
186 &poi->redirect_pkey);
187
188 /* if request was globally routed, copy route info */
189 if (in_grh) {
190 struct vertcfl *vertcfl =
191 (struct vertcfl *)&in_grh->version_tclass_flow;
192 memcpy(poi->redirect_gid, in_grh->dgid.raw,
193 sizeof(poi->redirect_gid));
194 tcslfl->tc = vertcfl->tc;
195 tcslfl->fl = vertcfl->fl;
196 } else
197 /* else only fill in default GID */
198 ehca_query_gid(ibdev, port_num, 0,
199 (union ib_gid *)&poi->redirect_gid);
165 200
166 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x", 201 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
167 sport->saved_attr.lid, sport->pma_qp_nr); 202 sport->saved_attr.lid, sport->pma_qp_nr);
@@ -183,8 +218,7 @@ perf_reply:
183 218
184int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 219int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
185 struct ib_wc *in_wc, struct ib_grh *in_grh, 220 struct ib_wc *in_wc, struct ib_grh *in_grh,
186 struct ib_mad *in_mad, 221 struct ib_mad *in_mad, struct ib_mad *out_mad)
187 struct ib_mad *out_mad)
188{ 222{
189 int ret; 223 int ret;
190 224
@@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
196 return IB_MAD_RESULT_SUCCESS; 230 return IB_MAD_RESULT_SUCCESS;
197 231
198 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp); 232 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
199 ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad); 233 ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
234 in_mad, out_mad);
200 235
201 return ret; 236 return ret;
202} 237}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 23173982b32c..38a287006612 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
1616 pd->port_cnt = 1; 1616 pd->port_cnt = 1;
1617 port_fp(fp) = pd; 1617 port_fp(fp) = pd;
1618 pd->port_pid = get_pid(task_pid(current)); 1618 pd->port_pid = get_pid(task_pid(current));
1619 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1619 strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1620 ipath_stats.sps_ports++; 1620 ipath_stats.sps_ports++;
1621 ret = 0; 1621 ret = 0;
1622 } else 1622 } else
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 16a702d46018..ceb98ee78666 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp,
60 if (smp->attr_mod) 60 if (smp->attr_mod)
61 smp->status |= IB_SMP_INVALID_FIELD; 61 smp->status |= IB_SMP_INVALID_FIELD;
62 62
63 strncpy(smp->data, ibdev->node_desc, sizeof(smp->data)); 63 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
64 64
65 return reply(smp); 65 return reply(smp);
66} 66}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ae3d7590346e..3cb3f47a10b8 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
342 struct mlx4_ib_alloc_ucontext_resp resp; 342 struct mlx4_ib_alloc_ucontext_resp resp;
343 int err; 343 int err;
344 344
345 if (!dev->ib_active)
346 return ERR_PTR(-EAGAIN);
347
345 resp.qp_tab_size = dev->dev->caps.num_qps; 348 resp.qp_tab_size = dev->dev->caps.num_qps;
346 resp.bf_reg_size = dev->dev->caps.bf_reg_size; 349 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
347 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; 350 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
@@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = {
540 543
541static void *mlx4_ib_add(struct mlx4_dev *dev) 544static void *mlx4_ib_add(struct mlx4_dev *dev)
542{ 545{
543 static int mlx4_ib_version_printed;
544 struct mlx4_ib_dev *ibdev; 546 struct mlx4_ib_dev *ibdev;
545 int num_ports = 0; 547 int num_ports = 0;
546 int i; 548 int i;
547 549
548 if (!mlx4_ib_version_printed) { 550 printk_once(KERN_INFO "%s", mlx4_ib_version);
549 printk(KERN_INFO "%s", mlx4_ib_version);
550 ++mlx4_ib_version_printed;
551 }
552 551
553 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 552 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
554 num_ports++; 553 num_ports++;
@@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
673 goto err_reg; 672 goto err_reg;
674 } 673 }
675 674
675 ibdev->ib_active = true;
676
676 return ibdev; 677 return ibdev;
677 678
678err_reg: 679err_reg:
@@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
729 break; 730 break;
730 731
731 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: 732 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
733 ibdev->ib_active = false;
732 ibev.event = IB_EVENT_DEVICE_FATAL; 734 ibev.event = IB_EVENT_DEVICE_FATAL;
733 break; 735 break;
734 736
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 8a7dd6795fa0..3486d7675e56 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -175,6 +175,7 @@ struct mlx4_ib_dev {
175 spinlock_t sm_lock; 175 spinlock_t sm_lock;
176 176
177 struct mutex cap_mask_mutex; 177 struct mutex cap_mask_mutex;
178 bool ib_active;
178}; 179};
179 180
180static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) 181static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index c4a02648c8af..219b10397b4d 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
615} 615}
616 616
617static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 617static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
618 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
618{ 619{
619 if (send_cq == recv_cq) 620 if (send_cq == recv_cq) {
620 spin_lock_irq(&send_cq->lock); 621 spin_lock_irq(&send_cq->lock);
621 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 622 __acquire(&recv_cq->lock);
623 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
622 spin_lock_irq(&send_cq->lock); 624 spin_lock_irq(&send_cq->lock);
623 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 625 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
624 } else { 626 } else {
@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
628} 630}
629 631
630static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 632static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
633 __releases(&send_cq->lock) __releases(&recv_cq->lock)
631{ 634{
632 if (send_cq == recv_cq) 635 if (send_cq == recv_cq) {
636 __release(&recv_cq->lock);
633 spin_unlock_irq(&send_cq->lock); 637 spin_unlock_irq(&send_cq->lock);
634 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 638 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
635 spin_unlock(&recv_cq->lock); 639 spin_unlock(&recv_cq->lock);
636 spin_unlock_irq(&send_cq->lock); 640 spin_unlock_irq(&send_cq->lock);
637 } else { 641 } else {
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 65ad359fdf16..056b2a4c6970 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev)
88 event.device = &dev->ib_dev; 88 event.device = &dev->ib_dev;
89 event.event = IB_EVENT_DEVICE_FATAL; 89 event.event = IB_EVENT_DEVICE_FATAL;
90 event.element.port_num = 0; 90 event.element.port_num = 0;
91 dev->active = false;
91 92
92 ib_dispatch_event(&event); 93 ib_dispatch_event(&event);
93 94
diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h
index 75671f75cac4..155bc66395be 100644
--- a/drivers/infiniband/hw/mthca/mthca_config_reg.h
+++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h
@@ -34,8 +34,6 @@
34#ifndef MTHCA_CONFIG_REG_H 34#ifndef MTHCA_CONFIG_REG_H
35#define MTHCA_CONFIG_REG_H 35#define MTHCA_CONFIG_REG_H
36 36
37#include <asm/page.h>
38
39#define MTHCA_HCR_BASE 0x80680 37#define MTHCA_HCR_BASE 0x80680
40#define MTHCA_HCR_SIZE 0x0001c 38#define MTHCA_HCR_SIZE 0x0001c
41#define MTHCA_ECR_BASE 0x80700 39#define MTHCA_ECR_BASE 0x80700
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 9ef611f6dd36..7e6a6d64ad4e 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -357,6 +357,7 @@ struct mthca_dev {
357 struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; 357 struct ib_ah *sm_ah[MTHCA_MAX_PORTS];
358 spinlock_t sm_lock; 358 spinlock_t sm_lock;
359 u8 rate[MTHCA_MAX_PORTS]; 359 u8 rate[MTHCA_MAX_PORTS];
360 bool active;
360}; 361};
361 362
362#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG 363#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 90e4e450a120..8c31fa36e95e 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev)
829 829
830 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { 830 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
831 static const char *eq_name[] = { 831 static const char *eq_name[] = {
832 [MTHCA_EQ_COMP] = DRV_NAME " (comp)", 832 [MTHCA_EQ_COMP] = DRV_NAME "-comp",
833 [MTHCA_EQ_ASYNC] = DRV_NAME " (async)", 833 [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
834 [MTHCA_EQ_CMD] = DRV_NAME " (cmd)" 834 [MTHCA_EQ_CMD] = DRV_NAME "-cmd"
835 }; 835 };
836 836
837 for (i = 0; i < MTHCA_NUM_EQ; ++i) { 837 for (i = 0; i < MTHCA_NUM_EQ; ++i) {
838 snprintf(dev->eq_table.eq[i].irq_name,
839 IB_DEVICE_NAME_MAX,
840 "%s@pci:%s", eq_name[i],
841 pci_name(dev->pdev));
838 err = request_irq(dev->eq_table.eq[i].msi_x_vector, 842 err = request_irq(dev->eq_table.eq[i].msi_x_vector,
839 mthca_is_memfree(dev) ? 843 mthca_is_memfree(dev) ?
840 mthca_arbel_msi_x_interrupt : 844 mthca_arbel_msi_x_interrupt :
841 mthca_tavor_msi_x_interrupt, 845 mthca_tavor_msi_x_interrupt,
842 0, eq_name[i], dev->eq_table.eq + i); 846 0, dev->eq_table.eq[i].irq_name,
847 dev->eq_table.eq + i);
843 if (err) 848 if (err)
844 goto err_out_cmd; 849 goto err_out_cmd;
845 dev->eq_table.eq[i].have_irq = 1; 850 dev->eq_table.eq[i].have_irq = 1;
846 } 851 }
847 } else { 852 } else {
853 snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
854 DRV_NAME "@pci:%s", pci_name(dev->pdev));
848 err = request_irq(dev->pdev->irq, 855 err = request_irq(dev->pdev->irq,
849 mthca_is_memfree(dev) ? 856 mthca_is_memfree(dev) ?
850 mthca_arbel_interrupt : 857 mthca_arbel_interrupt :
851 mthca_tavor_interrupt, 858 mthca_tavor_interrupt,
852 IRQF_SHARED, DRV_NAME, dev); 859 IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
853 if (err) 860 if (err)
854 goto err_out_cmd; 861 goto err_out_cmd;
855 dev->eq_table.have_irq = 1; 862 dev->eq_table.have_irq = 1;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 13da9f1d24c0..b01b28987874 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
1116 pci_set_drvdata(pdev, mdev); 1116 pci_set_drvdata(pdev, mdev);
1117 mdev->hca_type = hca_type; 1117 mdev->hca_type = hca_type;
1118 1118
1119 mdev->active = true;
1120
1119 return 0; 1121 return 0;
1120 1122
1121err_unregister: 1123err_unregister:
@@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev)
1215static int __devinit mthca_init_one(struct pci_dev *pdev, 1217static int __devinit mthca_init_one(struct pci_dev *pdev,
1216 const struct pci_device_id *id) 1218 const struct pci_device_id *id)
1217{ 1219{
1218 static int mthca_version_printed = 0;
1219 int ret; 1220 int ret;
1220 1221
1221 mutex_lock(&mthca_device_mutex); 1222 mutex_lock(&mthca_device_mutex);
1222 1223
1223 if (!mthca_version_printed) { 1224 printk_once(KERN_INFO "%s", mthca_version);
1224 printk(KERN_INFO "%s", mthca_version);
1225 ++mthca_version_printed;
1226 }
1227 1225
1228 if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { 1226 if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
1229 printk(KERN_ERR PFX "%s has invalid driver data %lx\n", 1227 printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 87ad889e367b..bcf7a4014820 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
334 struct mthca_ucontext *context; 334 struct mthca_ucontext *context;
335 int err; 335 int err;
336 336
337 if (!(to_mdev(ibdev)->active))
338 return ERR_PTR(-EAGAIN);
339
337 memset(&uresp, 0, sizeof uresp); 340 memset(&uresp, 0, sizeof uresp);
338 341
339 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; 342 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index c621f8794b88..90f4c4d2e983 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -113,6 +113,7 @@ struct mthca_eq {
113 int nent; 113 int nent;
114 struct mthca_buf_list *page_list; 114 struct mthca_buf_list *page_list;
115 struct mthca_mr mr; 115 struct mthca_mr mr;
116 char irq_name[IB_DEVICE_NAME_MAX];
116}; 117};
117 118
118struct mthca_av; 119struct mthca_av;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f5081bfde6db..c10576fa60c1 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1319} 1319}
1320 1320
1321static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1321static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1322 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1322{ 1323{
1323 if (send_cq == recv_cq) 1324 if (send_cq == recv_cq) {
1324 spin_lock_irq(&send_cq->lock); 1325 spin_lock_irq(&send_cq->lock);
1325 else if (send_cq->cqn < recv_cq->cqn) { 1326 __acquire(&recv_cq->lock);
1327 } else if (send_cq->cqn < recv_cq->cqn) {
1326 spin_lock_irq(&send_cq->lock); 1328 spin_lock_irq(&send_cq->lock);
1327 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1329 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1328 } else { 1330 } else {
@@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1332} 1334}
1333 1335
1334static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1336static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1337 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1335{ 1338{
1336 if (send_cq == recv_cq) 1339 if (send_cq == recv_cq) {
1340 __release(&recv_cq->lock);
1337 spin_unlock_irq(&send_cq->lock); 1341 spin_unlock_irq(&send_cq->lock);
1338 else if (send_cq->cqn < recv_cq->cqn) { 1342 } else if (send_cq->cqn < recv_cq->cqn) {
1339 spin_unlock(&recv_cq->lock); 1343 spin_unlock(&recv_cq->lock);
1340 spin_unlock_irq(&send_cq->lock); 1344 spin_unlock_irq(&send_cq->lock);
1341 } else { 1345 } else {
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index acb6817f6060..2a13a163d337 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -30,7 +30,6 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#include <linux/init.h>
34#include <linux/errno.h> 33#include <linux/errno.h>
35#include <linux/pci.h> 34#include <linux/pci.h>
36#include <linux/delay.h> 35#include <linux/delay.h>
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bf1720f7f35f..bcc6abc4faff 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *);
523void nes_cm_disconn_worker(void *); 523void nes_cm_disconn_worker(void *);
524 524
525/* nes_verbs.c */ 525/* nes_verbs.c */
526int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32); 526int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
527int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); 527int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
528struct nes_ib_device *nes_init_ofa_device(struct net_device *); 528struct nes_ib_device *nes_init_ofa_device(struct net_device *);
529void nes_destroy_ofa_device(struct nes_ib_device *); 529void nes_destroy_ofa_device(struct nes_ib_device *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 114b802771ad..73473db19863 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
2450 */ 2450 */
2451int nes_cm_disconn(struct nes_qp *nesqp) 2451int nes_cm_disconn(struct nes_qp *nesqp)
2452{ 2452{
2453 unsigned long flags; 2453 struct disconn_work *work;
2454
2455 spin_lock_irqsave(&nesqp->lock, flags);
2456 if (nesqp->disconn_pending == 0) {
2457 nesqp->disconn_pending++;
2458 spin_unlock_irqrestore(&nesqp->lock, flags);
2459 /* init our disconnect work element, to */
2460 INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
2461 2454
2462 queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work); 2455 work = kzalloc(sizeof *work, GFP_ATOMIC);
2463 } else 2456 if (!work)
2464 spin_unlock_irqrestore(&nesqp->lock, flags); 2457 return -ENOMEM; /* Timer will clean up */
2465 2458
2459 nes_add_ref(&nesqp->ibqp);
2460 work->nesqp = nesqp;
2461 INIT_WORK(&work->work, nes_disconnect_worker);
2462 queue_work(g_cm_core->disconn_wq, &work->work);
2466 return 0; 2463 return 0;
2467} 2464}
2468 2465
@@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp)
2472 */ 2469 */
2473static void nes_disconnect_worker(struct work_struct *work) 2470static void nes_disconnect_worker(struct work_struct *work)
2474{ 2471{
2475 struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work); 2472 struct disconn_work *dwork = container_of(work, struct disconn_work, work);
2473 struct nes_qp *nesqp = dwork->nesqp;
2476 2474
2475 kfree(dwork);
2477 nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", 2476 nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
2478 nesqp->last_aeq, nesqp->hwqp.qp_id); 2477 nesqp->last_aeq, nesqp->hwqp.qp_id);
2479 nes_cm_disconn_true(nesqp); 2478 nes_cm_disconn_true(nesqp);
2479 nes_rem_ref(&nesqp->ibqp);
2480} 2480}
2481 2481
2482 2482
@@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2493 u16 last_ae; 2493 u16 last_ae;
2494 u8 original_hw_tcp_state; 2494 u8 original_hw_tcp_state;
2495 u8 original_ibqp_state; 2495 u8 original_ibqp_state;
2496 u8 issued_disconnect_reset = 0; 2496 enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
2497 int issue_disconn = 0;
2498 int issue_close = 0;
2499 int issue_flush = 0;
2500 u32 flush_q = NES_CQP_FLUSH_RQ;
2501 struct ib_event ibevent;
2497 2502
2498 if (!nesqp) { 2503 if (!nesqp) {
2499 nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n"); 2504 nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
@@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2517 original_ibqp_state = nesqp->ibqp_state; 2522 original_ibqp_state = nesqp->ibqp_state;
2518 last_ae = nesqp->last_aeq; 2523 last_ae = nesqp->last_aeq;
2519 2524
2525 if (nesqp->term_flags) {
2526 issue_disconn = 1;
2527 issue_close = 1;
2528 nesqp->cm_id = NULL;
2529 if (nesqp->flush_issued == 0) {
2530 nesqp->flush_issued = 1;
2531 issue_flush = 1;
2532 }
2533 } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2534 ((original_ibqp_state == IB_QPS_RTS) &&
2535 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2536 issue_disconn = 1;
2537 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
2538 disconn_status = IW_CM_EVENT_STATUS_RESET;
2539 }
2540
2541 if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2542 (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
2543 (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
2544 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2545 issue_close = 1;
2546 nesqp->cm_id = NULL;
2547 if (nesqp->flush_issued == 0) {
2548 nesqp->flush_issued = 1;
2549 issue_flush = 1;
2550 }
2551 }
2552
2553 spin_unlock_irqrestore(&nesqp->lock, flags);
2520 2554
2521 nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state); 2555 if ((issue_flush) && (nesqp->destroyed == 0)) {
2556 /* Flush the queue(s) */
2557 if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE)
2558 flush_q |= NES_CQP_FLUSH_SQ;
2559 flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1);
2522 2560
2523 if ((nesqp->cm_id) && (cm_id->event_handler)) { 2561 if (nesqp->term_flags) {
2524 if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || 2562 ibevent.device = nesqp->ibqp.device;
2525 ((original_ibqp_state == IB_QPS_RTS) && 2563 ibevent.event = nesqp->terminate_eventtype;
2526 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { 2564 ibevent.element.qp = &nesqp->ibqp;
2565 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2566 }
2567 }
2568
2569 if ((cm_id) && (cm_id->event_handler)) {
2570 if (issue_disconn) {
2527 atomic_inc(&cm_disconnects); 2571 atomic_inc(&cm_disconnects);
2528 cm_event.event = IW_CM_EVENT_DISCONNECT; 2572 cm_event.event = IW_CM_EVENT_DISCONNECT;
2529 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { 2573 cm_event.status = disconn_status;
2530 cm_event.status = IW_CM_EVENT_STATUS_RESET;
2531 nes_debug(NES_DBG_CM, "Generating a CM "
2532 "Disconnect Event (status reset) for "
2533 "QP%u, cm_id = %p. \n",
2534 nesqp->hwqp.qp_id, cm_id);
2535 } else
2536 cm_event.status = IW_CM_EVENT_STATUS_OK;
2537
2538 cm_event.local_addr = cm_id->local_addr; 2574 cm_event.local_addr = cm_id->local_addr;
2539 cm_event.remote_addr = cm_id->remote_addr; 2575 cm_event.remote_addr = cm_id->remote_addr;
2540 cm_event.private_data = NULL; 2576 cm_event.private_data = NULL;
@@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2547 nesqp->hwqp.sq_tail, cm_id, 2583 nesqp->hwqp.sq_tail, cm_id,
2548 atomic_read(&nesqp->refcount)); 2584 atomic_read(&nesqp->refcount));
2549 2585
2550 spin_unlock_irqrestore(&nesqp->lock, flags);
2551 ret = cm_id->event_handler(cm_id, &cm_event); 2586 ret = cm_id->event_handler(cm_id, &cm_event);
2552 if (ret) 2587 if (ret)
2553 nes_debug(NES_DBG_CM, "OFA CM event_handler " 2588 nes_debug(NES_DBG_CM, "OFA CM event_handler "
2554 "returned, ret=%d\n", ret); 2589 "returned, ret=%d\n", ret);
2555 spin_lock_irqsave(&nesqp->lock, flags);
2556 } 2590 }
2557 2591
2558 nesqp->disconn_pending = 0; 2592 if (issue_close) {
2559 /* There might have been another AE while the lock was released */
2560 original_hw_tcp_state = nesqp->hw_tcp_state;
2561 original_ibqp_state = nesqp->ibqp_state;
2562 last_ae = nesqp->last_aeq;
2563
2564 if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
2565 ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2566 (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
2567 (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
2568 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2569 atomic_inc(&cm_closes); 2593 atomic_inc(&cm_closes);
2570 nesqp->cm_id = NULL;
2571 nesqp->in_disconnect = 0;
2572 spin_unlock_irqrestore(&nesqp->lock, flags);
2573 nes_disconnect(nesqp, 1); 2594 nes_disconnect(nesqp, 1);
2574 2595
2575 cm_id->provider_data = nesqp; 2596 cm_id->provider_data = nesqp;
@@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2588 } 2609 }
2589 2610
2590 cm_id->rem_ref(cm_id); 2611 cm_id->rem_ref(cm_id);
2591
2592 spin_lock_irqsave(&nesqp->lock, flags);
2593 if (nesqp->flush_issued == 0) {
2594 nesqp->flush_issued = 1;
2595 spin_unlock_irqrestore(&nesqp->lock, flags);
2596 flush_wqes(nesvnic->nesdev, nesqp,
2597 NES_CQP_FLUSH_RQ, 1);
2598 } else
2599 spin_unlock_irqrestore(&nesqp->lock, flags);
2600 } else {
2601 cm_id = nesqp->cm_id;
2602 spin_unlock_irqrestore(&nesqp->lock, flags);
2603 /* check to see if the inbound reset beat the outbound reset */
2604 if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
2605 nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
2606 "due to inbound reset beating the "
2607 "outbound reset.\n", nesqp->hwqp.qp_id);
2608 }
2609 } 2612 }
2610 } else {
2611 nesqp->disconn_pending = 0;
2612 spin_unlock_irqrestore(&nesqp->lock, flags);
2613 } 2613 }
2614 2614
2615 return 0; 2615 return 0;
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 8b7e7c0e496e..90e8e4d8a5ce 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -410,8 +410,6 @@ struct nes_cm_ops {
410int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *, 410int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
411 enum nes_timer_type, int, int); 411 enum nes_timer_type, int, int);
412 412
413int nes_cm_disconn(struct nes_qp *);
414
415int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *); 413int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
416int nes_reject(struct iw_cm_id *, const void *, u8); 414int nes_reject(struct iw_cm_id *, const void *, u8);
417int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *); 415int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 4a84d02ece06..63a1a8e1e8a3 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -74,6 +74,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
74static void process_critical_error(struct nes_device *nesdev); 74static void process_critical_error(struct nes_device *nesdev);
75static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); 75static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
76static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); 76static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
77static void nes_terminate_timeout(unsigned long context);
78static void nes_terminate_start_timer(struct nes_qp *nesqp);
77 79
78#ifdef CONFIG_INFINIBAND_NES_DEBUG 80#ifdef CONFIG_INFINIBAND_NES_DEBUG
79static unsigned char *nes_iwarp_state_str[] = { 81static unsigned char *nes_iwarp_state_str[] = {
@@ -2903,6 +2905,417 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2903} 2905}
2904 2906
2905 2907
2908static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
2909{
2910 u16 pkt_len;
2911
2912 if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
2913 /* skip over ethernet header */
2914 pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
2915 pkt += ETH_HLEN;
2916
2917 /* Skip over IP and TCP headers */
2918 pkt += 4 * (pkt[0] & 0x0f);
2919 pkt += 4 * ((pkt[12] >> 4) & 0x0f);
2920 }
2921 return pkt;
2922}
2923
2924/* Determine if incoming error pkt is rdma layer */
2925static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info)
2926{
2927 u8 *pkt;
2928 u16 *mpa;
2929 u32 opcode = 0xffffffff;
2930
2931 if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
2932 pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
2933 mpa = (u16 *)locate_mpa(pkt, aeq_info);
2934 opcode = be16_to_cpu(mpa[1]) & 0xf;
2935 }
2936
2937 return opcode;
2938}
2939
2940/* Build iWARP terminate header */
2941static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info)
2942{
2943 u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
2944 u16 ddp_seg_len;
2945 int copy_len = 0;
2946 u8 is_tagged = 0;
2947 u8 flush_code = 0;
2948 struct nes_terminate_hdr *termhdr;
2949
2950 termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase;
2951 memset(termhdr, 0, 64);
2952
2953 if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
2954
2955 /* Use data from offending packet to fill in ddp & rdma hdrs */
2956 pkt = locate_mpa(pkt, aeq_info);
2957 ddp_seg_len = be16_to_cpu(*(u16 *)pkt);
2958 if (ddp_seg_len) {
2959 copy_len = 2;
2960 termhdr->hdrct = DDP_LEN_FLAG;
2961 if (pkt[2] & 0x80) {
2962 is_tagged = 1;
2963 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
2964 copy_len += TERM_DDP_LEN_TAGGED;
2965 termhdr->hdrct |= DDP_HDR_FLAG;
2966 }
2967 } else {
2968 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
2969 copy_len += TERM_DDP_LEN_UNTAGGED;
2970 termhdr->hdrct |= DDP_HDR_FLAG;
2971 }
2972
2973 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
2974 if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
2975 copy_len += TERM_RDMA_LEN;
2976 termhdr->hdrct |= RDMA_HDR_FLAG;
2977 }
2978 }
2979 }
2980 }
2981 }
2982
2983 switch (async_event_id) {
2984 case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
2985 switch (iwarp_opcode(nesqp, aeq_info)) {
2986 case IWARP_OPCODE_WRITE:
2987 flush_code = IB_WC_LOC_PROT_ERR;
2988 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
2989 termhdr->error_code = DDP_TAGGED_INV_STAG;
2990 break;
2991 default:
2992 flush_code = IB_WC_REM_ACCESS_ERR;
2993 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
2994 termhdr->error_code = RDMAP_INV_STAG;
2995 }
2996 break;
2997 case NES_AEQE_AEID_AMP_INVALID_STAG:
2998 flush_code = IB_WC_REM_ACCESS_ERR;
2999 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3000 termhdr->error_code = RDMAP_INV_STAG;
3001 break;
3002 case NES_AEQE_AEID_AMP_BAD_QP:
3003 flush_code = IB_WC_LOC_QP_OP_ERR;
3004 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3005 termhdr->error_code = DDP_UNTAGGED_INV_QN;
3006 break;
3007 case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
3008 case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
3009 switch (iwarp_opcode(nesqp, aeq_info)) {
3010 case IWARP_OPCODE_SEND_INV:
3011 case IWARP_OPCODE_SEND_SE_INV:
3012 flush_code = IB_WC_REM_OP_ERR;
3013 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3014 termhdr->error_code = RDMAP_CANT_INV_STAG;
3015 break;
3016 default:
3017 flush_code = IB_WC_REM_ACCESS_ERR;
3018 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3019 termhdr->error_code = RDMAP_INV_STAG;
3020 }
3021 break;
3022 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3023 if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) {
3024 flush_code = IB_WC_LOC_PROT_ERR;
3025 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
3026 termhdr->error_code = DDP_TAGGED_BOUNDS;
3027 } else {
3028 flush_code = IB_WC_REM_ACCESS_ERR;
3029 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3030 termhdr->error_code = RDMAP_INV_BOUNDS;
3031 }
3032 break;
3033 case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
3034 case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
3035 case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
3036 flush_code = IB_WC_REM_ACCESS_ERR;
3037 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3038 termhdr->error_code = RDMAP_ACCESS;
3039 break;
3040 case NES_AEQE_AEID_AMP_TO_WRAP:
3041 flush_code = IB_WC_REM_ACCESS_ERR;
3042 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3043 termhdr->error_code = RDMAP_TO_WRAP;
3044 break;
3045 case NES_AEQE_AEID_AMP_BAD_PD:
3046 switch (iwarp_opcode(nesqp, aeq_info)) {
3047 case IWARP_OPCODE_WRITE:
3048 flush_code = IB_WC_LOC_PROT_ERR;
3049 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
3050 termhdr->error_code = DDP_TAGGED_UNASSOC_STAG;
3051 break;
3052 case IWARP_OPCODE_SEND_INV:
3053 case IWARP_OPCODE_SEND_SE_INV:
3054 flush_code = IB_WC_REM_ACCESS_ERR;
3055 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3056 termhdr->error_code = RDMAP_CANT_INV_STAG;
3057 break;
3058 default:
3059 flush_code = IB_WC_REM_ACCESS_ERR;
3060 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3061 termhdr->error_code = RDMAP_UNASSOC_STAG;
3062 }
3063 break;
3064 case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
3065 flush_code = IB_WC_LOC_LEN_ERR;
3066 termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
3067 termhdr->error_code = MPA_MARKER;
3068 break;
3069 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
3070 flush_code = IB_WC_GENERAL_ERR;
3071 termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
3072 termhdr->error_code = MPA_CRC;
3073 break;
3074 case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
3075 case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
3076 flush_code = IB_WC_LOC_LEN_ERR;
3077 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
3078 termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
3079 break;
3080 case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
3081 case NES_AEQE_AEID_DDP_NO_L_BIT:
3082 flush_code = IB_WC_FATAL_ERR;
3083 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
3084 termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
3085 break;
3086 case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
3087 case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
3088 flush_code = IB_WC_GENERAL_ERR;
3089 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3090 termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE;
3091 break;
3092 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
3093 flush_code = IB_WC_LOC_LEN_ERR;
3094 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3095 termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG;
3096 break;
3097 case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
3098 flush_code = IB_WC_GENERAL_ERR;
3099 if (is_tagged) {
3100 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
3101 termhdr->error_code = DDP_TAGGED_INV_DDP_VER;
3102 } else {
3103 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3104 termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER;
3105 }
3106 break;
3107 case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
3108 flush_code = IB_WC_GENERAL_ERR;
3109 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3110 termhdr->error_code = DDP_UNTAGGED_INV_MO;
3111 break;
3112 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
3113 flush_code = IB_WC_REM_OP_ERR;
3114 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3115 termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF;
3116 break;
3117 case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
3118 flush_code = IB_WC_GENERAL_ERR;
3119 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3120 termhdr->error_code = DDP_UNTAGGED_INV_QN;
3121 break;
3122 case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
3123 flush_code = IB_WC_GENERAL_ERR;
3124 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3125 termhdr->error_code = RDMAP_INV_RDMAP_VER;
3126 break;
3127 case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
3128 flush_code = IB_WC_LOC_QP_OP_ERR;
3129 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3130 termhdr->error_code = RDMAP_UNEXPECTED_OP;
3131 break;
3132 default:
3133 flush_code = IB_WC_FATAL_ERR;
3134 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3135 termhdr->error_code = RDMAP_UNSPECIFIED;
3136 break;
3137 }
3138
3139 if (copy_len)
3140 memcpy(termhdr + 1, pkt, copy_len);
3141
3142 if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) {
3143 if (aeq_info & NES_AEQE_SQ)
3144 nesqp->term_sq_flush_code = flush_code;
3145 else
3146 nesqp->term_rq_flush_code = flush_code;
3147 }
3148
3149 return sizeof(struct nes_terminate_hdr) + copy_len;
3150}
3151
3152static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp,
3153 struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype)
3154{
3155 u64 context;
3156 unsigned long flags;
3157 u32 aeq_info;
3158 u16 async_event_id;
3159 u8 tcp_state;
3160 u8 iwarp_state;
3161 u32 termlen = 0;
3162 u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE |
3163 NES_CQP_QP_TERM_DONT_SEND_FIN;
3164 struct nes_adapter *nesadapter = nesdev->nesadapter;
3165
3166 if (nesqp->term_flags & NES_TERM_SENT)
3167 return; /* Sanity check */
3168
3169 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
3170 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
3171 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
3172 async_event_id = (u16)aeq_info;
3173
3174 context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
3175 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
3176 if (!context) {
3177 WARN_ON(!context);
3178 return;
3179 }
3180
3181 nesqp = (struct nes_qp *)(unsigned long)context;
3182 spin_lock_irqsave(&nesqp->lock, flags);
3183 nesqp->hw_iwarp_state = iwarp_state;
3184 nesqp->hw_tcp_state = tcp_state;
3185 nesqp->last_aeq = async_event_id;
3186 nesqp->terminate_eventtype = eventtype;
3187 spin_unlock_irqrestore(&nesqp->lock, flags);
3188
3189 if (nesadapter->send_term_ok)
3190 termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info);
3191 else
3192 mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
3193
3194 nes_terminate_start_timer(nesqp);
3195 nesqp->term_flags |= NES_TERM_SENT;
3196 nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
3197}
3198
3199static void nes_terminate_send_fin(struct nes_device *nesdev,
3200 struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
3201{
3202 u32 aeq_info;
3203 u16 async_event_id;
3204 u8 tcp_state;
3205 u8 iwarp_state;
3206 unsigned long flags;
3207
3208 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
3209 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
3210 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
3211 async_event_id = (u16)aeq_info;
3212
3213 spin_lock_irqsave(&nesqp->lock, flags);
3214 nesqp->hw_iwarp_state = iwarp_state;
3215 nesqp->hw_tcp_state = tcp_state;
3216 nesqp->last_aeq = async_event_id;
3217 spin_unlock_irqrestore(&nesqp->lock, flags);
3218
3219 /* Send the fin only */
3220 nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE |
3221 NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0);
3222}
3223
3224/* Cleanup after a terminate sent or received */
3225static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred)
3226{
3227 u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
3228 unsigned long flags;
3229 struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device);
3230 struct nes_device *nesdev = nesvnic->nesdev;
3231 u8 first_time = 0;
3232
3233 spin_lock_irqsave(&nesqp->lock, flags);
3234 if (nesqp->hte_added) {
3235 nesqp->hte_added = 0;
3236 next_iwarp_state |= NES_CQP_QP_DEL_HTE;
3237 }
3238
3239 first_time = (nesqp->term_flags & NES_TERM_DONE) == 0;
3240 nesqp->term_flags |= NES_TERM_DONE;
3241 spin_unlock_irqrestore(&nesqp->lock, flags);
3242
3243 /* Make sure we go through this only once */
3244 if (first_time) {
3245 if (timeout_occurred == 0)
3246 del_timer(&nesqp->terminate_timer);
3247 else
3248 next_iwarp_state |= NES_CQP_QP_RESET;
3249
3250 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3251 nes_cm_disconn(nesqp);
3252 }
3253}
3254
3255static void nes_terminate_received(struct nes_device *nesdev,
3256 struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
3257{
3258 u32 aeq_info;
3259 u8 *pkt;
3260 u32 *mpa;
3261 u8 ddp_ctl;
3262 u8 rdma_ctl;
3263 u16 aeq_id = 0;
3264
3265 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
3266 if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
3267 /* Terminate is not a performance path so the silicon */
3268 /* did not validate the frame - do it now */
3269 pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
3270 mpa = (u32 *)locate_mpa(pkt, aeq_info);
3271 ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff;
3272 rdma_ctl = be32_to_cpu(mpa[0]) & 0xff;
3273 if ((ddp_ctl & 0xc0) != 0x40)
3274 aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC;
3275 else if ((ddp_ctl & 0x03) != 1)
3276 aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION;
3277 else if (be32_to_cpu(mpa[2]) != 2)
3278 aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN;
3279 else if (be32_to_cpu(mpa[3]) != 1)
3280 aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN;
3281 else if (be32_to_cpu(mpa[4]) != 0)
3282 aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO;
3283 else if ((rdma_ctl & 0xc0) != 0x40)
3284 aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION;
3285
3286 if (aeq_id) {
3287 /* Bad terminate recvd - send back a terminate */
3288 aeq_info = (aeq_info & 0xffff0000) | aeq_id;
3289 aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
3290 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3291 return;
3292 }
3293 }
3294
3295 nesqp->term_flags |= NES_TERM_RCVD;
3296 nesqp->terminate_eventtype = IB_EVENT_QP_FATAL;
3297 nes_terminate_start_timer(nesqp);
3298 nes_terminate_send_fin(nesdev, nesqp, aeqe);
3299}
3300
3301/* Timeout routine in case terminate fails to complete */
3302static void nes_terminate_timeout(unsigned long context)
3303{
3304 struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
3305
3306 nes_terminate_done(nesqp, 1);
3307}
3308
3309/* Set a timer in case hw cannot complete the terminate sequence */
3310static void nes_terminate_start_timer(struct nes_qp *nesqp)
3311{
3312 init_timer(&nesqp->terminate_timer);
3313 nesqp->terminate_timer.function = nes_terminate_timeout;
3314 nesqp->terminate_timer.expires = jiffies + HZ;
3315 nesqp->terminate_timer.data = (unsigned long)nesqp;
3316 add_timer(&nesqp->terminate_timer);
3317}
3318
2906/** 3319/**
2907 * nes_process_iwarp_aeqe 3320 * nes_process_iwarp_aeqe
2908 */ 3321 */
@@ -2910,28 +3323,27 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2910 struct nes_hw_aeqe *aeqe) 3323 struct nes_hw_aeqe *aeqe)
2911{ 3324{
2912 u64 context; 3325 u64 context;
2913 u64 aeqe_context = 0;
2914 unsigned long flags; 3326 unsigned long flags;
2915 struct nes_qp *nesqp; 3327 struct nes_qp *nesqp;
3328 struct nes_hw_cq *hw_cq;
3329 struct nes_cq *nescq;
2916 int resource_allocated; 3330 int resource_allocated;
2917 /* struct iw_cm_id *cm_id; */
2918 struct nes_adapter *nesadapter = nesdev->nesadapter; 3331 struct nes_adapter *nesadapter = nesdev->nesadapter;
2919 struct ib_event ibevent;
2920 /* struct iw_cm_event cm_event; */
2921 u32 aeq_info; 3332 u32 aeq_info;
2922 u32 next_iwarp_state = 0; 3333 u32 next_iwarp_state = 0;
2923 u16 async_event_id; 3334 u16 async_event_id;
2924 u8 tcp_state; 3335 u8 tcp_state;
2925 u8 iwarp_state; 3336 u8 iwarp_state;
3337 int must_disconn = 1;
3338 int must_terminate = 0;
3339 struct ib_event ibevent;
2926 3340
2927 nes_debug(NES_DBG_AEQ, "\n"); 3341 nes_debug(NES_DBG_AEQ, "\n");
2928 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); 3342 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
2929 if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) { 3343 if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) {
2930 context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); 3344 context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
2931 context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; 3345 context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
2932 } else { 3346 } else {
2933 aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
2934 aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
2935 context = (unsigned long)nesadapter->qp_table[le32_to_cpu( 3347 context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
2936 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN]; 3348 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
2937 BUG_ON(!context); 3349 BUG_ON(!context);
@@ -2948,7 +3360,11 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2948 3360
2949 switch (async_event_id) { 3361 switch (async_event_id) {
2950 case NES_AEQE_AEID_LLP_FIN_RECEIVED: 3362 case NES_AEQE_AEID_LLP_FIN_RECEIVED:
2951 nesqp = *((struct nes_qp **)&context); 3363 nesqp = (struct nes_qp *)(unsigned long)context;
3364
3365 if (nesqp->term_flags)
3366 return; /* Ignore it, wait for close complete */
3367
2952 if (atomic_inc_return(&nesqp->close_timer_started) == 1) { 3368 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
2953 nesqp->cm_id->add_ref(nesqp->cm_id); 3369 nesqp->cm_id->add_ref(nesqp->cm_id);
2954 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, 3370 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
@@ -2959,18 +3375,24 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2959 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3375 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
2960 async_event_id, nesqp->last_aeq, tcp_state); 3376 async_event_id, nesqp->last_aeq, tcp_state);
2961 } 3377 }
3378
2962 if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) || 3379 if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2963 (nesqp->ibqp_state != IB_QPS_RTS)) { 3380 (nesqp->ibqp_state != IB_QPS_RTS)) {
2964 /* FIN Received but tcp state or IB state moved on, 3381 /* FIN Received but tcp state or IB state moved on,
2965 should expect a close complete */ 3382 should expect a close complete */
2966 return; 3383 return;
2967 } 3384 }
3385
2968 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: 3386 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
3387 nesqp = (struct nes_qp *)(unsigned long)context;
3388 if (nesqp->term_flags) {
3389 nes_terminate_done(nesqp, 0);
3390 return;
3391 }
3392
2969 case NES_AEQE_AEID_LLP_CONNECTION_RESET: 3393 case NES_AEQE_AEID_LLP_CONNECTION_RESET:
2970 case NES_AEQE_AEID_TERMINATE_SENT:
2971 case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
2972 case NES_AEQE_AEID_RESET_SENT: 3394 case NES_AEQE_AEID_RESET_SENT:
2973 nesqp = *((struct nes_qp **)&context); 3395 nesqp = (struct nes_qp *)(unsigned long)context;
2974 if (async_event_id == NES_AEQE_AEID_RESET_SENT) { 3396 if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
2975 tcp_state = NES_AEQE_TCP_STATE_CLOSED; 3397 tcp_state = NES_AEQE_TCP_STATE_CLOSED;
2976 } 3398 }
@@ -2982,12 +3404,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2982 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) || 3404 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2983 (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) { 3405 (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
2984 nesqp->hte_added = 0; 3406 nesqp->hte_added = 0;
2985 spin_unlock_irqrestore(&nesqp->lock, flags); 3407 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
2986 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
2987 nesqp->hwqp.qp_id);
2988 nes_hw_modify_qp(nesdev, nesqp,
2989 NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
2990 spin_lock_irqsave(&nesqp->lock, flags);
2991 } 3408 }
2992 3409
2993 if ((nesqp->ibqp_state == IB_QPS_RTS) && 3410 if ((nesqp->ibqp_state == IB_QPS_RTS) &&
@@ -2999,151 +3416,106 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2999 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; 3416 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3000 break; 3417 break;
3001 case NES_AEQE_IWARP_STATE_TERMINATE: 3418 case NES_AEQE_IWARP_STATE_TERMINATE:
3002 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; 3419 must_disconn = 0; /* terminate path takes care of disconn */
3003 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE; 3420 if (nesqp->term_flags == 0)
3004 if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { 3421 must_terminate = 1;
3005 next_iwarp_state |= 0x02000000;
3006 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3007 }
3008 break; 3422 break;
3009 default:
3010 next_iwarp_state = 0;
3011 }
3012 spin_unlock_irqrestore(&nesqp->lock, flags);
3013 if (next_iwarp_state) {
3014 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
3015 " also added another reference\n",
3016 nesqp->hwqp.qp_id, next_iwarp_state);
3017 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
3018 } 3423 }
3019 nes_cm_disconn(nesqp);
3020 } else { 3424 } else {
3021 if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) { 3425 if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
3022 /* FIN Received but ib state not RTS, 3426 /* FIN Received but ib state not RTS,
3023 close complete will be on its way */ 3427 close complete will be on its way */
3024 spin_unlock_irqrestore(&nesqp->lock, flags); 3428 must_disconn = 0;
3025 return;
3026 }
3027 spin_unlock_irqrestore(&nesqp->lock, flags);
3028 if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
3029 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
3030 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3031 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
3032 " also added another reference\n",
3033 nesqp->hwqp.qp_id, next_iwarp_state);
3034 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
3035 } 3429 }
3036 nes_cm_disconn(nesqp);
3037 } 3430 }
3038 break;
3039 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
3040 nesqp = *((struct nes_qp **)&context);
3041 spin_lock_irqsave(&nesqp->lock, flags);
3042 nesqp->hw_iwarp_state = iwarp_state;
3043 nesqp->hw_tcp_state = tcp_state;
3044 nesqp->last_aeq = async_event_id;
3045 spin_unlock_irqrestore(&nesqp->lock, flags); 3431 spin_unlock_irqrestore(&nesqp->lock, flags);
3046 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED" 3432
3047 " event on QP%u \n Q2 Data:\n", 3433 if (must_terminate)
3048 nesqp->hwqp.qp_id); 3434 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3049 if (nesqp->ibqp.event_handler) { 3435 else if (must_disconn) {
3050 ibevent.device = nesqp->ibqp.device; 3436 if (next_iwarp_state) {
3051 ibevent.element.qp = &nesqp->ibqp; 3437 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
3052 ibevent.event = IB_EVENT_QP_FATAL; 3438 nesqp->hwqp.qp_id, next_iwarp_state);
3053 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); 3439 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3054 } 3440 }
3055 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
3056 ((nesqp->ibqp_state == IB_QPS_RTS)&&
3057 (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
3058 nes_cm_disconn(nesqp); 3441 nes_cm_disconn(nesqp);
3059 } else {
3060 nesqp->in_disconnect = 0;
3061 wake_up(&nesqp->kick_waitq);
3062 } 3442 }
3063 break; 3443 break;
3064 case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES: 3444
3065 nesqp = *((struct nes_qp **)&context); 3445 case NES_AEQE_AEID_TERMINATE_SENT:
3066 spin_lock_irqsave(&nesqp->lock, flags); 3446 nesqp = (struct nes_qp *)(unsigned long)context;
3067 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR; 3447 nes_terminate_send_fin(nesdev, nesqp, aeqe);
3068 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3069 nesqp->last_aeq = async_event_id;
3070 if (nesqp->cm_id) {
3071 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
3072 " event on QP%u, remote IP = 0x%08X \n",
3073 nesqp->hwqp.qp_id,
3074 ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
3075 } else {
3076 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
3077 " event on QP%u \n",
3078 nesqp->hwqp.qp_id);
3079 }
3080 spin_unlock_irqrestore(&nesqp->lock, flags);
3081 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
3082 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
3083 if (nesqp->ibqp.event_handler) {
3084 ibevent.device = nesqp->ibqp.device;
3085 ibevent.element.qp = &nesqp->ibqp;
3086 ibevent.event = IB_EVENT_QP_FATAL;
3087 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3088 }
3089 break; 3448 break;
3090 case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: 3449
3091 if (NES_AEQE_INBOUND_RDMA&aeq_info) { 3450 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
3092 nesqp = nesadapter->qp_table[le32_to_cpu( 3451 nesqp = (struct nes_qp *)(unsigned long)context;
3093 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; 3452 nes_terminate_received(nesdev, nesqp, aeqe);
3094 } else {
3095 /* TODO: get the actual WQE and mask off wqe index */
3096 context &= ~((u64)511);
3097 nesqp = *((struct nes_qp **)&context);
3098 }
3099 spin_lock_irqsave(&nesqp->lock, flags);
3100 nesqp->hw_iwarp_state = iwarp_state;
3101 nesqp->hw_tcp_state = tcp_state;
3102 nesqp->last_aeq = async_event_id;
3103 spin_unlock_irqrestore(&nesqp->lock, flags);
3104 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
3105 nesqp->hwqp.qp_id);
3106 if (nesqp->ibqp.event_handler) {
3107 ibevent.device = nesqp->ibqp.device;
3108 ibevent.element.qp = &nesqp->ibqp;
3109 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3110 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3111 }
3112 break; 3453 break;
3454
3455 case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
3456 case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
3113 case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: 3457 case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
3114 nesqp = *((struct nes_qp **)&context); 3458 case NES_AEQE_AEID_AMP_INVALID_STAG:
3115 spin_lock_irqsave(&nesqp->lock, flags); 3459 case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
3116 nesqp->hw_iwarp_state = iwarp_state; 3460 case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
3117 nesqp->hw_tcp_state = tcp_state;
3118 nesqp->last_aeq = async_event_id;
3119 spin_unlock_irqrestore(&nesqp->lock, flags);
3120 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n",
3121 nesqp->hwqp.qp_id);
3122 if (nesqp->ibqp.event_handler) {
3123 ibevent.device = nesqp->ibqp.device;
3124 ibevent.element.qp = &nesqp->ibqp;
3125 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3126 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3127 }
3128 break;
3129 case NES_AEQE_AEID_PRIV_OPERATION_DENIED: 3461 case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
3130 nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words 3462 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
3131 [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; 3463 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3132 spin_lock_irqsave(&nesqp->lock, flags); 3464 case NES_AEQE_AEID_AMP_TO_WRAP:
3133 nesqp->hw_iwarp_state = iwarp_state; 3465 nesqp = (struct nes_qp *)(unsigned long)context;
3134 nesqp->hw_tcp_state = tcp_state; 3466 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
3135 nesqp->last_aeq = async_event_id; 3467 break;
3136 spin_unlock_irqrestore(&nesqp->lock, flags); 3468
3137 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u," 3469 case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
3138 " nesqp = %p, AE reported %p\n", 3470 case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
3139 nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context)); 3471 case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
3140 if (nesqp->ibqp.event_handler) { 3472 case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
3141 ibevent.device = nesqp->ibqp.device; 3473 nesqp = (struct nes_qp *)(unsigned long)context;
3142 ibevent.element.qp = &nesqp->ibqp; 3474 if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
3143 ibevent.event = IB_EVENT_QP_ACCESS_ERR; 3475 aeq_info &= 0xffff0000;
3144 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); 3476 aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
3477 aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
3145 } 3478 }
3479
3480 case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
3481 case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
3482 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
3483 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
3484 case NES_AEQE_AEID_AMP_BAD_QP:
3485 case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
3486 case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
3487 case NES_AEQE_AEID_DDP_NO_L_BIT:
3488 case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
3489 case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
3490 case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
3491 case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
3492 case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
3493 case NES_AEQE_AEID_AMP_BAD_PD:
3494 case NES_AEQE_AEID_AMP_FASTREG_SHARED:
3495 case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG:
3496 case NES_AEQE_AEID_AMP_FASTREG_MW_STAG:
3497 case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS:
3498 case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW:
3499 case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH:
3500 case NES_AEQE_AEID_AMP_INVALIDATE_SHARED:
3501 case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS:
3502 case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG:
3503 case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG:
3504 case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG:
3505 case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG:
3506 case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS:
3507 case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS:
3508 case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT:
3509 case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED:
3510 case NES_AEQE_AEID_BAD_CLOSE:
3511 case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO:
3512 case NES_AEQE_AEID_STAG_ZERO_INVALID:
3513 case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
3514 case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
3515 nesqp = (struct nes_qp *)(unsigned long)context;
3516 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3146 break; 3517 break;
3518
3147 case NES_AEQE_AEID_CQ_OPERATION_ERROR: 3519 case NES_AEQE_AEID_CQ_OPERATION_ERROR:
3148 context <<= 1; 3520 context <<= 1;
3149 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n", 3521 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
@@ -3153,83 +3525,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3153 if (resource_allocated) { 3525 if (resource_allocated) {
3154 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n", 3526 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
3155 __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); 3527 __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
3528 hw_cq = (struct nes_hw_cq *)(unsigned long)context;
3529 if (hw_cq) {
3530 nescq = container_of(hw_cq, struct nes_cq, hw_cq);
3531 if (nescq->ibcq.event_handler) {
3532 ibevent.device = nescq->ibcq.device;
3533 ibevent.event = IB_EVENT_CQ_ERR;
3534 ibevent.element.cq = &nescq->ibcq;
3535 nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context);
3536 }
3537 }
3156 } 3538 }
3157 break; 3539 break;
3158 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 3540
3159 nesqp = nesadapter->qp_table[le32_to_cpu(
3160 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
3161 spin_lock_irqsave(&nesqp->lock, flags);
3162 nesqp->hw_iwarp_state = iwarp_state;
3163 nesqp->hw_tcp_state = tcp_state;
3164 nesqp->last_aeq = async_event_id;
3165 spin_unlock_irqrestore(&nesqp->lock, flags);
3166 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG"
3167 "_FOR_AVAILABLE_BUFFER event on QP%u\n",
3168 nesqp->hwqp.qp_id);
3169 if (nesqp->ibqp.event_handler) {
3170 ibevent.device = nesqp->ibqp.device;
3171 ibevent.element.qp = &nesqp->ibqp;
3172 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3173 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3174 }
3175 /* tell cm to disconnect, cm will queue work to thread */
3176 nes_cm_disconn(nesqp);
3177 break;
3178 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
3179 nesqp = *((struct nes_qp **)&context);
3180 spin_lock_irqsave(&nesqp->lock, flags);
3181 nesqp->hw_iwarp_state = iwarp_state;
3182 nesqp->hw_tcp_state = tcp_state;
3183 nesqp->last_aeq = async_event_id;
3184 spin_unlock_irqrestore(&nesqp->lock, flags);
3185 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN"
3186 "_NO_BUFFER_AVAILABLE event on QP%u\n",
3187 nesqp->hwqp.qp_id);
3188 if (nesqp->ibqp.event_handler) {
3189 ibevent.device = nesqp->ibqp.device;
3190 ibevent.element.qp = &nesqp->ibqp;
3191 ibevent.event = IB_EVENT_QP_FATAL;
3192 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3193 }
3194 /* tell cm to disconnect, cm will queue work to thread */
3195 nes_cm_disconn(nesqp);
3196 break;
3197 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
3198 nesqp = *((struct nes_qp **)&context);
3199 spin_lock_irqsave(&nesqp->lock, flags);
3200 nesqp->hw_iwarp_state = iwarp_state;
3201 nesqp->hw_tcp_state = tcp_state;
3202 nesqp->last_aeq = async_event_id;
3203 spin_unlock_irqrestore(&nesqp->lock, flags);
3204 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR"
3205 " event on QP%u \n Q2 Data:\n",
3206 nesqp->hwqp.qp_id);
3207 if (nesqp->ibqp.event_handler) {
3208 ibevent.device = nesqp->ibqp.device;
3209 ibevent.element.qp = &nesqp->ibqp;
3210 ibevent.event = IB_EVENT_QP_FATAL;
3211 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3212 }
3213 /* tell cm to disconnect, cm will queue work to thread */
3214 nes_cm_disconn(nesqp);
3215 break;
3216 /* TODO: additional AEs need to be here */
3217 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3218 nesqp = *((struct nes_qp **)&context);
3219 spin_lock_irqsave(&nesqp->lock, flags);
3220 nesqp->hw_iwarp_state = iwarp_state;
3221 nesqp->hw_tcp_state = tcp_state;
3222 nesqp->last_aeq = async_event_id;
3223 spin_unlock_irqrestore(&nesqp->lock, flags);
3224 if (nesqp->ibqp.event_handler) {
3225 ibevent.device = nesqp->ibqp.device;
3226 ibevent.element.qp = &nesqp->ibqp;
3227 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3228 nesqp->ibqp.event_handler(&ibevent,
3229 nesqp->ibqp.qp_context);
3230 }
3231 nes_cm_disconn(nesqp);
3232 break;
3233 default: 3541 default:
3234 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n", 3542 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
3235 async_event_id); 3543 async_event_id);
@@ -3238,7 +3546,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3238 3546
3239} 3547}
3240 3548
3241
3242/** 3549/**
3243 * nes_iwarp_ce_handler 3550 * nes_iwarp_ce_handler
3244 */ 3551 */
@@ -3373,6 +3680,8 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
3373{ 3680{
3374 struct nes_cqp_request *cqp_request; 3681 struct nes_cqp_request *cqp_request;
3375 struct nes_hw_cqp_wqe *cqp_wqe; 3682 struct nes_hw_cqp_wqe *cqp_wqe;
3683 u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
3684 u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
3376 int ret; 3685 int ret;
3377 3686
3378 cqp_request = nes_get_cqp_request(nesdev); 3687 cqp_request = nes_get_cqp_request(nesdev);
@@ -3389,6 +3698,24 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
3389 cqp_wqe = &cqp_request->cqp_wqe; 3698 cqp_wqe = &cqp_request->cqp_wqe;
3390 nes_fill_init_cqp_wqe(cqp_wqe, nesdev); 3699 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
3391 3700
3701 /* If wqe in error was identified, set code to be put into cqe */
3702 if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) {
3703 which_wq |= NES_CQP_FLUSH_MAJ_MIN;
3704 sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code;
3705 nesqp->term_sq_flush_code = 0;
3706 }
3707
3708 if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) {
3709 which_wq |= NES_CQP_FLUSH_MAJ_MIN;
3710 rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code;
3711 nesqp->term_rq_flush_code = 0;
3712 }
3713
3714 if (which_wq & NES_CQP_FLUSH_MAJ_MIN) {
3715 cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code);
3716 cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code);
3717 }
3718
3392 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = 3719 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
3393 cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq); 3720 cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
3394 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id); 3721 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index c3654c6383fe..f28a41ba9fa1 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -241,6 +241,7 @@ enum nes_cqp_stag_wqeword_idx {
241}; 241};
242 242
243#define NES_CQP_OP_IWARP_STATE_SHIFT 28 243#define NES_CQP_OP_IWARP_STATE_SHIFT 28
244#define NES_CQP_OP_TERMLEN_SHIFT 28
244 245
245enum nes_cqp_qp_bits { 246enum nes_cqp_qp_bits {
246 NES_CQP_QP_ARP_VALID = (1<<8), 247 NES_CQP_QP_ARP_VALID = (1<<8),
@@ -265,12 +266,16 @@ enum nes_cqp_qp_bits {
265 NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT), 266 NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
266 NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT), 267 NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
267 NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT), 268 NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
269 NES_CQP_QP_TERM_DONT_SEND_FIN = (1<<24),
270 NES_CQP_QP_TERM_DONT_SEND_TERM_MSG = (1<<25),
268 NES_CQP_QP_RESET = (1<<31), 271 NES_CQP_QP_RESET = (1<<31),
269}; 272};
270 273
271enum nes_cqp_qp_wqe_word_idx { 274enum nes_cqp_qp_wqe_word_idx {
272 NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6, 275 NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
273 NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7, 276 NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
277 NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8,
278 NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9,
274 NES_CQP_QP_WQE_NEW_MSS_IDX = 15, 279 NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
275}; 280};
276 281
@@ -361,6 +366,7 @@ enum nes_cqp_arp_bits {
361enum nes_cqp_flush_bits { 366enum nes_cqp_flush_bits {
362 NES_CQP_FLUSH_SQ = (1<<30), 367 NES_CQP_FLUSH_SQ = (1<<30),
363 NES_CQP_FLUSH_RQ = (1<<31), 368 NES_CQP_FLUSH_RQ = (1<<31),
369 NES_CQP_FLUSH_MAJ_MIN = (1<<28),
364}; 370};
365 371
366enum nes_cqe_opcode_bits { 372enum nes_cqe_opcode_bits {
@@ -633,11 +639,14 @@ enum nes_aeqe_bits {
633 NES_AEQE_INBOUND_RDMA = (1<<19), 639 NES_AEQE_INBOUND_RDMA = (1<<19),
634 NES_AEQE_IWARP_STATE_MASK = (7<<20), 640 NES_AEQE_IWARP_STATE_MASK = (7<<20),
635 NES_AEQE_TCP_STATE_MASK = (0xf<<24), 641 NES_AEQE_TCP_STATE_MASK = (0xf<<24),
642 NES_AEQE_Q2_DATA_WRITTEN = (0x3<<28),
636 NES_AEQE_VALID = (1<<31), 643 NES_AEQE_VALID = (1<<31),
637}; 644};
638 645
639#define NES_AEQE_IWARP_STATE_SHIFT 20 646#define NES_AEQE_IWARP_STATE_SHIFT 20
640#define NES_AEQE_TCP_STATE_SHIFT 24 647#define NES_AEQE_TCP_STATE_SHIFT 24
648#define NES_AEQE_Q2_DATA_ETHERNET (1<<28)
649#define NES_AEQE_Q2_DATA_MPA (1<<29)
641 650
642enum nes_aeqe_iwarp_state { 651enum nes_aeqe_iwarp_state {
643 NES_AEQE_IWARP_STATE_NON_EXISTANT = 0, 652 NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
@@ -751,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits {
751 NES_IWARP_SQ_OP_NOP = 12, 760 NES_IWARP_SQ_OP_NOP = 12,
752}; 761};
753 762
763enum nes_iwarp_cqe_major_code {
764 NES_IWARP_CQE_MAJOR_FLUSH = 1,
765 NES_IWARP_CQE_MAJOR_DRV = 0x8000
766};
767
768enum nes_iwarp_cqe_minor_code {
769 NES_IWARP_CQE_MINOR_FLUSH = 1
770};
771
754#define NES_EEPROM_READ_REQUEST (1<<16) 772#define NES_EEPROM_READ_REQUEST (1<<16)
755#define NES_MAC_ADDR_VALID (1<<20) 773#define NES_MAC_ADDR_VALID (1<<20)
756 774
@@ -1119,6 +1137,7 @@ struct nes_adapter {
1119 u8 netdev_max; /* from host nic address count in EEPROM */ 1137 u8 netdev_max; /* from host nic address count in EEPROM */
1120 u8 port_count; 1138 u8 port_count;
1121 u8 virtwq; 1139 u8 virtwq;
1140 u8 send_term_ok;
1122 u8 et_use_adaptive_rx_coalesce; 1141 u8 et_use_adaptive_rx_coalesce;
1123 u8 adapter_fcn_count; 1142 u8 adapter_fcn_count;
1124 u8 pft_mcast_map[NES_PFT_SIZE]; 1143 u8 pft_mcast_map[NES_PFT_SIZE];
@@ -1217,6 +1236,90 @@ struct nes_ib_device {
1217 u32 num_pd; 1236 u32 num_pd;
1218}; 1237};
1219 1238
1239enum nes_hdrct_flags {
1240 DDP_LEN_FLAG = 0x80,
1241 DDP_HDR_FLAG = 0x40,
1242 RDMA_HDR_FLAG = 0x20
1243};
1244
1245enum nes_term_layers {
1246 LAYER_RDMA = 0,
1247 LAYER_DDP = 1,
1248 LAYER_MPA = 2
1249};
1250
1251enum nes_term_error_types {
1252 RDMAP_CATASTROPHIC = 0,
1253 RDMAP_REMOTE_PROT = 1,
1254 RDMAP_REMOTE_OP = 2,
1255 DDP_CATASTROPHIC = 0,
1256 DDP_TAGGED_BUFFER = 1,
1257 DDP_UNTAGGED_BUFFER = 2,
1258 DDP_LLP = 3
1259};
1260
1261enum nes_term_rdma_errors {
1262 RDMAP_INV_STAG = 0x00,
1263 RDMAP_INV_BOUNDS = 0x01,
1264 RDMAP_ACCESS = 0x02,
1265 RDMAP_UNASSOC_STAG = 0x03,
1266 RDMAP_TO_WRAP = 0x04,
1267 RDMAP_INV_RDMAP_VER = 0x05,
1268 RDMAP_UNEXPECTED_OP = 0x06,
1269 RDMAP_CATASTROPHIC_LOCAL = 0x07,
1270 RDMAP_CATASTROPHIC_GLOBAL = 0x08,
1271 RDMAP_CANT_INV_STAG = 0x09,
1272 RDMAP_UNSPECIFIED = 0xff
1273};
1274
1275enum nes_term_ddp_errors {
1276 DDP_CATASTROPHIC_LOCAL = 0x00,
1277 DDP_TAGGED_INV_STAG = 0x00,
1278 DDP_TAGGED_BOUNDS = 0x01,
1279 DDP_TAGGED_UNASSOC_STAG = 0x02,
1280 DDP_TAGGED_TO_WRAP = 0x03,
1281 DDP_TAGGED_INV_DDP_VER = 0x04,
1282 DDP_UNTAGGED_INV_QN = 0x01,
1283 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
1284 DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
1285 DDP_UNTAGGED_INV_MO = 0x04,
1286 DDP_UNTAGGED_INV_TOO_LONG = 0x05,
1287 DDP_UNTAGGED_INV_DDP_VER = 0x06
1288};
1289
1290enum nes_term_mpa_errors {
1291 MPA_CLOSED = 0x01,
1292 MPA_CRC = 0x02,
1293 MPA_MARKER = 0x03,
1294 MPA_REQ_RSP = 0x04,
1295};
1296
1297struct nes_terminate_hdr {
1298 u8 layer_etype;
1299 u8 error_code;
1300 u8 hdrct;
1301 u8 rsvd;
1302};
1303
1304/* Used to determine how to fill in terminate error codes */
1305#define IWARP_OPCODE_WRITE 0
1306#define IWARP_OPCODE_READREQ 1
1307#define IWARP_OPCODE_READRSP 2
1308#define IWARP_OPCODE_SEND 3
1309#define IWARP_OPCODE_SEND_INV 4
1310#define IWARP_OPCODE_SEND_SE 5
1311#define IWARP_OPCODE_SEND_SE_INV 6
1312#define IWARP_OPCODE_TERM 7
1313
1314/* These values are used only during terminate processing */
1315#define TERM_DDP_LEN_TAGGED 14
1316#define TERM_DDP_LEN_UNTAGGED 18
1317#define TERM_RDMA_LEN 28
1318#define RDMA_OPCODE_MASK 0x0f
1319#define RDMA_READ_REQ_OPCODE 1
1320#define BAD_FRAME_OFFSET 64
1321#define CQE_MAJOR_DRV 0x8000
1322
1220#define nes_vlan_rx vlan_hwaccel_receive_skb 1323#define nes_vlan_rx vlan_hwaccel_receive_skb
1221#define nes_netif_rx netif_receive_skb 1324#define nes_netif_rx netif_receive_skb
1222 1325
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index a282031d15c7..9687c397ce1a 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -183,6 +183,9 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
183 } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) { 183 } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
184 nesadapter->virtwq = 1; 184 nesadapter->virtwq = 1;
185 } 185 }
186 if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3))
187 nesadapter->send_term_ok = 1;
188
186 nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + 189 nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
187 (u32)((u8)eeprom_data); 190 (u32)((u8)eeprom_data);
188 191
@@ -548,7 +551,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
548 spin_unlock_irqrestore(&nesdev->cqp.lock, flags); 551 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
549 } 552 }
550 if (cqp_request == NULL) { 553 if (cqp_request == NULL) {
551 cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL); 554 cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC);
552 if (cqp_request) { 555 if (cqp_request) {
553 cqp_request->dynamic = 1; 556 cqp_request->dynamic = 1;
554 INIT_LIST_HEAD(&cqp_request->list); 557 INIT_LIST_HEAD(&cqp_request->list);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 21e0fd336cf7..a680c42d6e8c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -667,15 +667,32 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
667 */ 667 */
668static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) 668static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
669{ 669{
670 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
671 struct net_device *netdev = nesvnic->netdev;
672
670 memset(props, 0, sizeof(*props)); 673 memset(props, 0, sizeof(*props));
671 674
672 props->max_mtu = IB_MTU_2048; 675 props->max_mtu = IB_MTU_4096;
673 props->active_mtu = IB_MTU_2048; 676
677 if (netdev->mtu >= 4096)
678 props->active_mtu = IB_MTU_4096;
679 else if (netdev->mtu >= 2048)
680 props->active_mtu = IB_MTU_2048;
681 else if (netdev->mtu >= 1024)
682 props->active_mtu = IB_MTU_1024;
683 else if (netdev->mtu >= 512)
684 props->active_mtu = IB_MTU_512;
685 else
686 props->active_mtu = IB_MTU_256;
687
674 props->lid = 1; 688 props->lid = 1;
675 props->lmc = 0; 689 props->lmc = 0;
676 props->sm_lid = 0; 690 props->sm_lid = 0;
677 props->sm_sl = 0; 691 props->sm_sl = 0;
678 props->state = IB_PORT_ACTIVE; 692 if (nesvnic->linkup)
693 props->state = IB_PORT_ACTIVE;
694 else
695 props->state = IB_PORT_DOWN;
679 props->phys_state = 0; 696 props->phys_state = 0;
680 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | 697 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
681 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; 698 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
@@ -1506,12 +1523,45 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1506 1523
1507 1524
1508/** 1525/**
1526 * nes_clean_cq
1527 */
1528static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
1529{
1530 u32 cq_head;
1531 u32 lo;
1532 u32 hi;
1533 u64 u64temp;
1534 unsigned long flags = 0;
1535
1536 spin_lock_irqsave(&nescq->lock, flags);
1537
1538 cq_head = nescq->hw_cq.cq_head;
1539 while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
1540 rmb();
1541 lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
1542 hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]);
1543 u64temp = (((u64)hi) << 32) | ((u64)lo);
1544 u64temp &= ~(NES_SW_CONTEXT_ALIGN-1);
1545 if (u64temp == (u64)(unsigned long)nesqp) {
1546 /* Zero the context value so cqe will be ignored */
1547 nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0;
1548 nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0;
1549 }
1550
1551 if (++cq_head >= nescq->hw_cq.cq_size)
1552 cq_head = 0;
1553 }
1554
1555 spin_unlock_irqrestore(&nescq->lock, flags);
1556}
1557
1558
1559/**
1509 * nes_destroy_qp 1560 * nes_destroy_qp
1510 */ 1561 */
1511static int nes_destroy_qp(struct ib_qp *ibqp) 1562static int nes_destroy_qp(struct ib_qp *ibqp)
1512{ 1563{
1513 struct nes_qp *nesqp = to_nesqp(ibqp); 1564 struct nes_qp *nesqp = to_nesqp(ibqp);
1514 /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
1515 struct nes_ucontext *nes_ucontext; 1565 struct nes_ucontext *nes_ucontext;
1516 struct ib_qp_attr attr; 1566 struct ib_qp_attr attr;
1517 struct iw_cm_id *cm_id; 1567 struct iw_cm_id *cm_id;
@@ -1548,7 +1598,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
1548 nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret); 1598 nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
1549 } 1599 }
1550 1600
1551
1552 if (nesqp->user_mode) { 1601 if (nesqp->user_mode) {
1553 if ((ibqp->uobject)&&(ibqp->uobject->context)) { 1602 if ((ibqp->uobject)&&(ibqp->uobject->context)) {
1554 nes_ucontext = to_nesucontext(ibqp->uobject->context); 1603 nes_ucontext = to_nesucontext(ibqp->uobject->context);
@@ -1560,6 +1609,13 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
1560 } 1609 }
1561 if (nesqp->pbl_pbase) 1610 if (nesqp->pbl_pbase)
1562 kunmap(nesqp->page); 1611 kunmap(nesqp->page);
1612 } else {
1613 /* Clean any pending completions from the cq(s) */
1614 if (nesqp->nesscq)
1615 nes_clean_cq(nesqp, nesqp->nesscq);
1616
1617 if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq))
1618 nes_clean_cq(nesqp, nesqp->nesrcq);
1563 } 1619 }
1564 1620
1565 nes_rem_ref(&nesqp->ibqp); 1621 nes_rem_ref(&nesqp->ibqp);
@@ -2884,7 +2940,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2884 * nes_hw_modify_qp 2940 * nes_hw_modify_qp
2885 */ 2941 */
2886int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, 2942int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
2887 u32 next_iwarp_state, u32 wait_completion) 2943 u32 next_iwarp_state, u32 termlen, u32 wait_completion)
2888{ 2944{
2889 struct nes_hw_cqp_wqe *cqp_wqe; 2945 struct nes_hw_cqp_wqe *cqp_wqe;
2890 /* struct iw_cm_id *cm_id = nesqp->cm_id; */ 2946 /* struct iw_cm_id *cm_id = nesqp->cm_id; */
@@ -2916,6 +2972,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
2916 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); 2972 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
2917 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase); 2973 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
2918 2974
2975 /* If sending a terminate message, fill in the length (in words) */
2976 if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) &&
2977 !(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) {
2978 termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT;
2979 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen);
2980 }
2981
2919 atomic_set(&cqp_request->refcount, 2); 2982 atomic_set(&cqp_request->refcount, 2);
2920 nes_post_cqp_request(nesdev, cqp_request); 2983 nes_post_cqp_request(nesdev, cqp_request);
2921 2984
@@ -3086,6 +3149,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3086 } 3149 }
3087 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n", 3150 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
3088 nesqp->hwqp.qp_id); 3151 nesqp->hwqp.qp_id);
3152 if (nesqp->term_flags)
3153 del_timer(&nesqp->terminate_timer);
3154
3089 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; 3155 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
3090 /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */ 3156 /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
3091 if (nesqp->hte_added) { 3157 if (nesqp->hte_added) {
@@ -3163,7 +3229,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3163 3229
3164 if (issue_modify_qp) { 3230 if (issue_modify_qp) {
3165 nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n"); 3231 nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
3166 ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1); 3232 ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1);
3167 if (ret) 3233 if (ret)
3168 nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)" 3234 nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
3169 " failed for QP%u.\n", 3235 " failed for QP%u.\n",
@@ -3328,6 +3394,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3328 head = nesqp->hwqp.sq_head; 3394 head = nesqp->hwqp.sq_head;
3329 3395
3330 while (ib_wr) { 3396 while (ib_wr) {
3397 /* Check for QP error */
3398 if (nesqp->term_flags) {
3399 err = -EINVAL;
3400 break;
3401 }
3402
3331 /* Check for SQ overflow */ 3403 /* Check for SQ overflow */
3332 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { 3404 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
3333 err = -EINVAL; 3405 err = -EINVAL;
@@ -3484,6 +3556,12 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3484 head = nesqp->hwqp.rq_head; 3556 head = nesqp->hwqp.rq_head;
3485 3557
3486 while (ib_wr) { 3558 while (ib_wr) {
3559 /* Check for QP error */
3560 if (nesqp->term_flags) {
3561 err = -EINVAL;
3562 break;
3563 }
3564
3487 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { 3565 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
3488 err = -EINVAL; 3566 err = -EINVAL;
3489 break; 3567 break;
@@ -3547,7 +3625,6 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3547{ 3625{
3548 u64 u64temp; 3626 u64 u64temp;
3549 u64 wrid; 3627 u64 wrid;
3550 /* u64 u64temp; */
3551 unsigned long flags = 0; 3628 unsigned long flags = 0;
3552 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); 3629 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
3553 struct nes_device *nesdev = nesvnic->nesdev; 3630 struct nes_device *nesdev = nesvnic->nesdev;
@@ -3555,12 +3632,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3555 struct nes_qp *nesqp; 3632 struct nes_qp *nesqp;
3556 struct nes_hw_cqe cqe; 3633 struct nes_hw_cqe cqe;
3557 u32 head; 3634 u32 head;
3558 u32 wq_tail; 3635 u32 wq_tail = 0;
3559 u32 cq_size; 3636 u32 cq_size;
3560 u32 cqe_count = 0; 3637 u32 cqe_count = 0;
3561 u32 wqe_index; 3638 u32 wqe_index;
3562 u32 u32temp; 3639 u32 u32temp;
3563 /* u32 counter; */ 3640 u32 move_cq_head = 1;
3641 u32 err_code;
3564 3642
3565 nes_debug(NES_DBG_CQ, "\n"); 3643 nes_debug(NES_DBG_CQ, "\n");
3566 3644
@@ -3570,29 +3648,40 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3570 cq_size = nescq->hw_cq.cq_size; 3648 cq_size = nescq->hw_cq.cq_size;
3571 3649
3572 while (cqe_count < num_entries) { 3650 while (cqe_count < num_entries) {
3573 if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & 3651 if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
3574 NES_CQE_VALID) { 3652 NES_CQE_VALID) == 0)
3575 /* 3653 break;
3576 * Make sure we read CQ entry contents *after* 3654
3577 * we've checked the valid bit. 3655 /*
3578 */ 3656 * Make sure we read CQ entry contents *after*
3579 rmb(); 3657 * we've checked the valid bit.
3580 3658 */
3581 cqe = nescq->hw_cq.cq_vbase[head]; 3659 rmb();
3582 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; 3660
3583 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); 3661 cqe = nescq->hw_cq.cq_vbase[head];
3584 wqe_index = u32temp & 3662 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
3585 (nesdev->nesadapter->max_qp_wr - 1); 3663 wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1);
3586 u32temp &= ~(NES_SW_CONTEXT_ALIGN-1); 3664 u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
3587 /* parse CQE, get completion context from WQE (either rq or sq */ 3665 /* parse CQE, get completion context from WQE (either rq or sq) */
3588 u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | 3666 u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
3589 ((u64)u32temp); 3667 ((u64)u32temp);
3590 nesqp = *((struct nes_qp **)&u64temp); 3668
3669 if (u64temp) {
3670 nesqp = (struct nes_qp *)(unsigned long)u64temp;
3591 memset(entry, 0, sizeof *entry); 3671 memset(entry, 0, sizeof *entry);
3592 if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) { 3672 if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
3593 entry->status = IB_WC_SUCCESS; 3673 entry->status = IB_WC_SUCCESS;
3594 } else { 3674 } else {
3595 entry->status = IB_WC_WR_FLUSH_ERR; 3675 err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
3676 if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) {
3677 entry->status = err_code & 0x0000ffff;
3678
3679 /* The rest of the cqe's will be marked as flushed */
3680 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] =
3681 cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) |
3682 NES_IWARP_CQE_MINOR_FLUSH);
3683 } else
3684 entry->status = IB_WC_WR_FLUSH_ERR;
3596 } 3685 }
3597 3686
3598 entry->qp = &nesqp->ibqp; 3687 entry->qp = &nesqp->ibqp;
@@ -3601,20 +3690,18 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3601 if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) { 3690 if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
3602 if (nesqp->skip_lsmm) { 3691 if (nesqp->skip_lsmm) {
3603 nesqp->skip_lsmm = 0; 3692 nesqp->skip_lsmm = 0;
3604 wq_tail = nesqp->hwqp.sq_tail++; 3693 nesqp->hwqp.sq_tail++;
3605 } 3694 }
3606 3695
3607 /* Working on a SQ Completion*/ 3696 /* Working on a SQ Completion*/
3608 wq_tail = wqe_index; 3697 wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
3609 nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
3610 wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
3611 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) | 3698 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
3612 ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail]. 3699 ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
3613 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX]))); 3700 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
3614 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. 3701 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
3615 wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]); 3702 wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
3616 3703
3617 switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. 3704 switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
3618 wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) { 3705 wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
3619 case NES_IWARP_SQ_OP_RDMAW: 3706 case NES_IWARP_SQ_OP_RDMAW:
3620 nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n"); 3707 nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
@@ -3623,7 +3710,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3623 case NES_IWARP_SQ_OP_RDMAR: 3710 case NES_IWARP_SQ_OP_RDMAR:
3624 nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n"); 3711 nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
3625 entry->opcode = IB_WC_RDMA_READ; 3712 entry->opcode = IB_WC_RDMA_READ;
3626 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. 3713 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
3627 wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]); 3714 wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
3628 break; 3715 break;
3629 case NES_IWARP_SQ_OP_SENDINV: 3716 case NES_IWARP_SQ_OP_SENDINV:
@@ -3634,33 +3721,54 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3634 entry->opcode = IB_WC_SEND; 3721 entry->opcode = IB_WC_SEND;
3635 break; 3722 break;
3636 } 3723 }
3724
3725 nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
3726 if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) {
3727 move_cq_head = 0;
3728 wq_tail = nesqp->hwqp.sq_tail;
3729 }
3637 } else { 3730 } else {
3638 /* Working on a RQ Completion*/ 3731 /* Working on a RQ Completion*/
3639 wq_tail = wqe_index;
3640 nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
3641 entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]); 3732 entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
3642 wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) | 3733 wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
3643 ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32); 3734 ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
3644 entry->opcode = IB_WC_RECV; 3735 entry->opcode = IB_WC_RECV;
3736
3737 nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
3738 if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
3739 move_cq_head = 0;
3740 wq_tail = nesqp->hwqp.rq_tail;
3741 }
3645 } 3742 }
3743
3646 entry->wr_id = wrid; 3744 entry->wr_id = wrid;
3745 entry++;
3746 cqe_count++;
3747 }
3647 3748
3749 if (move_cq_head) {
3750 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
3648 if (++head >= cq_size) 3751 if (++head >= cq_size)
3649 head = 0; 3752 head = 0;
3650 cqe_count++;
3651 nescq->polled_completions++; 3753 nescq->polled_completions++;
3754
3652 if ((nescq->polled_completions > (cq_size / 2)) || 3755 if ((nescq->polled_completions > (cq_size / 2)) ||
3653 (nescq->polled_completions == 255)) { 3756 (nescq->polled_completions == 255)) {
3654 nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes" 3757 nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
3655 " are pending %u of %u.\n", 3758 " are pending %u of %u.\n",
3656 nescq->hw_cq.cq_number, nescq->polled_completions, cq_size); 3759 nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
3657 nes_write32(nesdev->regs+NES_CQE_ALLOC, 3760 nes_write32(nesdev->regs+NES_CQE_ALLOC,
3658 nescq->hw_cq.cq_number | (nescq->polled_completions << 16)); 3761 nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
3659 nescq->polled_completions = 0; 3762 nescq->polled_completions = 0;
3660 } 3763 }
3661 entry++; 3764 } else {
3662 } else 3765 /* Update the wqe index and set status to flush */
3663 break; 3766 wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
3767 wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail;
3768 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] =
3769 cpu_to_le32(wqe_index);
3770 move_cq_head = 1; /* ready for next pass */
3771 }
3664 } 3772 }
3665 3773
3666 if (nescq->polled_completions) { 3774 if (nescq->polled_completions) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 41c07f29f7c9..89822d75f82e 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -40,6 +40,10 @@ struct nes_device;
40#define NES_MAX_USER_DB_REGIONS 4096 40#define NES_MAX_USER_DB_REGIONS 4096
41#define NES_MAX_USER_WQ_REGIONS 4096 41#define NES_MAX_USER_WQ_REGIONS 4096
42 42
43#define NES_TERM_SENT 0x01
44#define NES_TERM_RCVD 0x02
45#define NES_TERM_DONE 0x04
46
43struct nes_ucontext { 47struct nes_ucontext {
44 struct ib_ucontext ibucontext; 48 struct ib_ucontext ibucontext;
45 struct nes_device *nesdev; 49 struct nes_device *nesdev;
@@ -119,6 +123,11 @@ struct nes_wq {
119 spinlock_t lock; 123 spinlock_t lock;
120}; 124};
121 125
126struct disconn_work {
127 struct work_struct work;
128 struct nes_qp *nesqp;
129};
130
122struct iw_cm_id; 131struct iw_cm_id;
123struct ietf_mpa_frame; 132struct ietf_mpa_frame;
124 133
@@ -127,7 +136,6 @@ struct nes_qp {
127 void *allocated_buffer; 136 void *allocated_buffer;
128 struct iw_cm_id *cm_id; 137 struct iw_cm_id *cm_id;
129 struct workqueue_struct *wq; 138 struct workqueue_struct *wq;
130 struct work_struct disconn_work;
131 struct nes_cq *nesscq; 139 struct nes_cq *nesscq;
132 struct nes_cq *nesrcq; 140 struct nes_cq *nesrcq;
133 struct nes_pd *nespd; 141 struct nes_pd *nespd;
@@ -155,9 +163,13 @@ struct nes_qp {
155 void *pbl_vbase; 163 void *pbl_vbase;
156 dma_addr_t pbl_pbase; 164 dma_addr_t pbl_pbase;
157 struct page *page; 165 struct page *page;
166 struct timer_list terminate_timer;
167 enum ib_event_type terminate_eventtype;
158 wait_queue_head_t kick_waitq; 168 wait_queue_head_t kick_waitq;
159 u16 in_disconnect; 169 u16 in_disconnect;
160 u16 private_data_len; 170 u16 private_data_len;
171 u16 term_sq_flush_code;
172 u16 term_rq_flush_code;
161 u8 active_conn; 173 u8 active_conn;
162 u8 skip_lsmm; 174 u8 skip_lsmm;
163 u8 user_mode; 175 u8 user_mode;
@@ -165,7 +177,7 @@ struct nes_qp {
165 u8 hw_iwarp_state; 177 u8 hw_iwarp_state;
166 u8 flush_issued; 178 u8 flush_issued;
167 u8 hw_tcp_state; 179 u8 hw_tcp_state;
168 u8 disconn_pending; 180 u8 term_flags;
169 u8 destroyed; 181 u8 destroyed;
170}; 182};
171#endif /* NES_VERBS_H */ 183#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 181b1f32325f..8f4b4fca2a1d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -31,7 +31,6 @@
31 */ 31 */
32 32
33#include <rdma/ib_cm.h> 33#include <rdma/ib_cm.h>
34#include <rdma/ib_cache.h>
35#include <net/dst.h> 34#include <net/dst.h>
36#include <net/icmp.h> 35#include <net/icmp.h>
37#include <linux/icmpv6.h> 36#include <linux/icmpv6.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index e7e5adf84e84..e35f4a0ea9d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -36,7 +36,6 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
38 38
39#include <rdma/ib_cache.h>
40#include <linux/ip.h> 39#include <linux/ip.h>
41#include <linux/tcp.h> 40#include <linux/tcp.h>
42 41
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e319d91f60a6..2bf5116deec4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -604,8 +604,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
604 skb_queue_len(&neigh->queue)); 604 skb_queue_len(&neigh->queue));
605 goto err_drop; 605 goto err_drop;
606 } 606 }
607 } else 607 } else {
608 spin_unlock_irqrestore(&priv->lock, flags);
608 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); 609 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
610 return;
611 }
609 } else { 612 } else {
610 neigh->ah = NULL; 613 neigh->ah = NULL;
611 614
@@ -688,7 +691,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
688 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 691 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
689 be16_to_cpu(path->pathrec.dlid)); 692 be16_to_cpu(path->pathrec.dlid));
690 693
694 spin_unlock_irqrestore(&priv->lock, flags);
691 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); 695 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
696 return;
692 } else if ((path->query || !path_rec_start(dev, path)) && 697 } else if ((path->query || !path_rec_start(dev, path)) &&
693 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 698 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
694 /* put pseudoheader back on for next time */ 699 /* put pseudoheader back on for next time */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index a0e97532e714..25874fc680c9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -720,7 +720,9 @@ out:
720 } 720 }
721 } 721 }
722 722
723 spin_unlock_irqrestore(&priv->lock, flags);
723 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 724 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
725 return;
724 } 726 }
725 727
726unlock: 728unlock:
@@ -758,6 +760,20 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
758 } 760 }
759} 761}
760 762
763static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
764 const u8 *broadcast)
765{
766 if (addrlen != INFINIBAND_ALEN)
767 return 0;
768 /* reserved QPN, prefix, scope */
769 if (memcmp(addr, broadcast, 6))
770 return 0;
771 /* signature lower, pkey */
772 if (memcmp(addr + 7, broadcast + 7, 3))
773 return 0;
774 return 1;
775}
776
761void ipoib_mcast_restart_task(struct work_struct *work) 777void ipoib_mcast_restart_task(struct work_struct *work)
762{ 778{
763 struct ipoib_dev_priv *priv = 779 struct ipoib_dev_priv *priv =
@@ -791,6 +807,11 @@ void ipoib_mcast_restart_task(struct work_struct *work)
791 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 807 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
792 union ib_gid mgid; 808 union ib_gid mgid;
793 809
810 if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
811 mclist->dmi_addrlen,
812 dev->broadcast))
813 continue;
814
794 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); 815 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
795 816
796 mcast = __ipoib_mcast_find(dev, &mgid); 817 mcast = __ipoib_mcast_find(dev, &mgid);