diff options
28 files changed, 450 insertions, 156 deletions
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 5bcb2afd3dcb..0fcd7aa26fa2 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -188,6 +188,8 @@ IB_UVERBS_DECLARE_CMD(alloc_pd); | |||
188 | IB_UVERBS_DECLARE_CMD(dealloc_pd); | 188 | IB_UVERBS_DECLARE_CMD(dealloc_pd); |
189 | IB_UVERBS_DECLARE_CMD(reg_mr); | 189 | IB_UVERBS_DECLARE_CMD(reg_mr); |
190 | IB_UVERBS_DECLARE_CMD(dereg_mr); | 190 | IB_UVERBS_DECLARE_CMD(dereg_mr); |
191 | IB_UVERBS_DECLARE_CMD(alloc_mw); | ||
192 | IB_UVERBS_DECLARE_CMD(dealloc_mw); | ||
191 | IB_UVERBS_DECLARE_CMD(create_comp_channel); | 193 | IB_UVERBS_DECLARE_CMD(create_comp_channel); |
192 | IB_UVERBS_DECLARE_CMD(create_cq); | 194 | IB_UVERBS_DECLARE_CMD(create_cq); |
193 | IB_UVERBS_DECLARE_CMD(resize_cq); | 195 | IB_UVERBS_DECLARE_CMD(resize_cq); |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 0cb0007724a2..3983a0552775 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -48,6 +48,7 @@ struct uverbs_lock_class { | |||
48 | 48 | ||
49 | static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; | 49 | static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; |
50 | static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; | 50 | static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; |
51 | static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; | ||
51 | static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; | 52 | static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; |
52 | static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; | 53 | static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; |
53 | static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; | 54 | static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; |
@@ -1049,6 +1050,126 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, | |||
1049 | return in_len; | 1050 | return in_len; |
1050 | } | 1051 | } |
1051 | 1052 | ||
1053 | ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, | ||
1054 | const char __user *buf, int in_len, | ||
1055 | int out_len) | ||
1056 | { | ||
1057 | struct ib_uverbs_alloc_mw cmd; | ||
1058 | struct ib_uverbs_alloc_mw_resp resp; | ||
1059 | struct ib_uobject *uobj; | ||
1060 | struct ib_pd *pd; | ||
1061 | struct ib_mw *mw; | ||
1062 | int ret; | ||
1063 | |||
1064 | if (out_len < sizeof(resp)) | ||
1065 | return -ENOSPC; | ||
1066 | |||
1067 | if (copy_from_user(&cmd, buf, sizeof(cmd))) | ||
1068 | return -EFAULT; | ||
1069 | |||
1070 | uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); | ||
1071 | if (!uobj) | ||
1072 | return -ENOMEM; | ||
1073 | |||
1074 | init_uobj(uobj, 0, file->ucontext, &mw_lock_class); | ||
1075 | down_write(&uobj->mutex); | ||
1076 | |||
1077 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | ||
1078 | if (!pd) { | ||
1079 | ret = -EINVAL; | ||
1080 | goto err_free; | ||
1081 | } | ||
1082 | |||
1083 | mw = pd->device->alloc_mw(pd, cmd.mw_type); | ||
1084 | if (IS_ERR(mw)) { | ||
1085 | ret = PTR_ERR(mw); | ||
1086 | goto err_put; | ||
1087 | } | ||
1088 | |||
1089 | mw->device = pd->device; | ||
1090 | mw->pd = pd; | ||
1091 | mw->uobject = uobj; | ||
1092 | atomic_inc(&pd->usecnt); | ||
1093 | |||
1094 | uobj->object = mw; | ||
1095 | ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); | ||
1096 | if (ret) | ||
1097 | goto err_unalloc; | ||
1098 | |||
1099 | memset(&resp, 0, sizeof(resp)); | ||
1100 | resp.rkey = mw->rkey; | ||
1101 | resp.mw_handle = uobj->id; | ||
1102 | |||
1103 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | ||
1104 | &resp, sizeof(resp))) { | ||
1105 | ret = -EFAULT; | ||
1106 | goto err_copy; | ||
1107 | } | ||
1108 | |||
1109 | put_pd_read(pd); | ||
1110 | |||
1111 | mutex_lock(&file->mutex); | ||
1112 | list_add_tail(&uobj->list, &file->ucontext->mw_list); | ||
1113 | mutex_unlock(&file->mutex); | ||
1114 | |||
1115 | uobj->live = 1; | ||
1116 | |||
1117 | up_write(&uobj->mutex); | ||
1118 | |||
1119 | return in_len; | ||
1120 | |||
1121 | err_copy: | ||
1122 | idr_remove_uobj(&ib_uverbs_mw_idr, uobj); | ||
1123 | |||
1124 | err_unalloc: | ||
1125 | ib_dealloc_mw(mw); | ||
1126 | |||
1127 | err_put: | ||
1128 | put_pd_read(pd); | ||
1129 | |||
1130 | err_free: | ||
1131 | put_uobj_write(uobj); | ||
1132 | return ret; | ||
1133 | } | ||
1134 | |||
1135 | ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, | ||
1136 | const char __user *buf, int in_len, | ||
1137 | int out_len) | ||
1138 | { | ||
1139 | struct ib_uverbs_dealloc_mw cmd; | ||
1140 | struct ib_mw *mw; | ||
1141 | struct ib_uobject *uobj; | ||
1142 | int ret = -EINVAL; | ||
1143 | |||
1144 | if (copy_from_user(&cmd, buf, sizeof(cmd))) | ||
1145 | return -EFAULT; | ||
1146 | |||
1147 | uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); | ||
1148 | if (!uobj) | ||
1149 | return -EINVAL; | ||
1150 | |||
1151 | mw = uobj->object; | ||
1152 | |||
1153 | ret = ib_dealloc_mw(mw); | ||
1154 | if (!ret) | ||
1155 | uobj->live = 0; | ||
1156 | |||
1157 | put_uobj_write(uobj); | ||
1158 | |||
1159 | if (ret) | ||
1160 | return ret; | ||
1161 | |||
1162 | idr_remove_uobj(&ib_uverbs_mw_idr, uobj); | ||
1163 | |||
1164 | mutex_lock(&file->mutex); | ||
1165 | list_del(&uobj->list); | ||
1166 | mutex_unlock(&file->mutex); | ||
1167 | |||
1168 | put_uobj(uobj); | ||
1169 | |||
1170 | return in_len; | ||
1171 | } | ||
1172 | |||
1052 | ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, | 1173 | ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, |
1053 | const char __user *buf, int in_len, | 1174 | const char __user *buf, int in_len, |
1054 | int out_len) | 1175 | int out_len) |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 6f2ce6fa98f8..2c6f0f2ecd9d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -87,6 +87,8 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, | |||
87 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, | 87 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, |
88 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, | 88 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, |
89 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, | 89 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, |
90 | [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw, | ||
91 | [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw, | ||
90 | [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, | 92 | [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, |
91 | [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, | 93 | [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, |
92 | [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, | 94 | [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, |
@@ -201,6 +203,15 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
201 | kfree(uobj); | 203 | kfree(uobj); |
202 | } | 204 | } |
203 | 205 | ||
206 | /* Remove MWs before QPs, in order to support type 2A MWs. */ | ||
207 | list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) { | ||
208 | struct ib_mw *mw = uobj->object; | ||
209 | |||
210 | idr_remove_uobj(&ib_uverbs_mw_idr, uobj); | ||
211 | ib_dealloc_mw(mw); | ||
212 | kfree(uobj); | ||
213 | } | ||
214 | |||
204 | list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { | 215 | list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { |
205 | struct ib_qp *qp = uobj->object; | 216 | struct ib_qp *qp = uobj->object; |
206 | struct ib_uqp_object *uqp = | 217 | struct ib_uqp_object *uqp = |
@@ -240,8 +251,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
240 | kfree(uevent); | 251 | kfree(uevent); |
241 | } | 252 | } |
242 | 253 | ||
243 | /* XXX Free MWs */ | ||
244 | |||
245 | list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { | 254 | list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { |
246 | struct ib_mr *mr = uobj->object; | 255 | struct ib_mr *mr = uobj->object; |
247 | 256 | ||
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 30f199e8579f..a8fdd3381405 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -1099,18 +1099,19 @@ EXPORT_SYMBOL(ib_free_fast_reg_page_list); | |||
1099 | 1099 | ||
1100 | /* Memory windows */ | 1100 | /* Memory windows */ |
1101 | 1101 | ||
1102 | struct ib_mw *ib_alloc_mw(struct ib_pd *pd) | 1102 | struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) |
1103 | { | 1103 | { |
1104 | struct ib_mw *mw; | 1104 | struct ib_mw *mw; |
1105 | 1105 | ||
1106 | if (!pd->device->alloc_mw) | 1106 | if (!pd->device->alloc_mw) |
1107 | return ERR_PTR(-ENOSYS); | 1107 | return ERR_PTR(-ENOSYS); |
1108 | 1108 | ||
1109 | mw = pd->device->alloc_mw(pd); | 1109 | mw = pd->device->alloc_mw(pd, type); |
1110 | if (!IS_ERR(mw)) { | 1110 | if (!IS_ERR(mw)) { |
1111 | mw->device = pd->device; | 1111 | mw->device = pd->device; |
1112 | mw->pd = pd; | 1112 | mw->pd = pd; |
1113 | mw->uobject = NULL; | 1113 | mw->uobject = NULL; |
1114 | mw->type = type; | ||
1114 | atomic_inc(&pd->usecnt); | 1115 | atomic_inc(&pd->usecnt); |
1115 | } | 1116 | } |
1116 | 1117 | ||
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index 7275e727e0f5..d53cf519f42a 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -1238,15 +1238,4 @@ static struct pci_driver c2_pci_driver = { | |||
1238 | .remove = c2_remove, | 1238 | .remove = c2_remove, |
1239 | }; | 1239 | }; |
1240 | 1240 | ||
1241 | static int __init c2_init_module(void) | 1241 | module_pci_driver(c2_pci_driver); |
1242 | { | ||
1243 | return pci_register_driver(&c2_pci_driver); | ||
1244 | } | ||
1245 | |||
1246 | static void __exit c2_exit_module(void) | ||
1247 | { | ||
1248 | pci_unregister_driver(&c2_pci_driver); | ||
1249 | } | ||
1250 | |||
1251 | module_init(c2_init_module); | ||
1252 | module_exit(c2_exit_module); | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 0bdf09aa6f42..074d5c25f50b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -738,7 +738,7 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) | |||
738 | return ibmr; | 738 | return ibmr; |
739 | } | 739 | } |
740 | 740 | ||
741 | static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) | 741 | static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) |
742 | { | 742 | { |
743 | struct iwch_dev *rhp; | 743 | struct iwch_dev *rhp; |
744 | struct iwch_pd *php; | 744 | struct iwch_pd *php; |
@@ -747,6 +747,9 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) | |||
747 | u32 stag = 0; | 747 | u32 stag = 0; |
748 | int ret; | 748 | int ret; |
749 | 749 | ||
750 | if (type != IB_MW_TYPE_1) | ||
751 | return ERR_PTR(-EINVAL); | ||
752 | |||
750 | php = to_iwch_pd(pd); | 753 | php = to_iwch_pd(pd); |
751 | rhp = php->rhp; | 754 | rhp = php->rhp; |
752 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | 755 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 6de8463f453b..e5649e8b215d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -567,18 +567,19 @@ int iwch_bind_mw(struct ib_qp *qp, | |||
567 | if (mw_bind->send_flags & IB_SEND_SIGNALED) | 567 | if (mw_bind->send_flags & IB_SEND_SIGNALED) |
568 | t3_wr_flags = T3_COMPLETION_FLAG; | 568 | t3_wr_flags = T3_COMPLETION_FLAG; |
569 | 569 | ||
570 | sgl.addr = mw_bind->addr; | 570 | sgl.addr = mw_bind->bind_info.addr; |
571 | sgl.lkey = mw_bind->mr->lkey; | 571 | sgl.lkey = mw_bind->bind_info.mr->lkey; |
572 | sgl.length = mw_bind->length; | 572 | sgl.length = mw_bind->bind_info.length; |
573 | wqe->bind.reserved = 0; | 573 | wqe->bind.reserved = 0; |
574 | wqe->bind.type = TPT_VATO; | 574 | wqe->bind.type = TPT_VATO; |
575 | 575 | ||
576 | /* TBD: check perms */ | 576 | /* TBD: check perms */ |
577 | wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags); | 577 | wqe->bind.perms = iwch_ib_to_tpt_bind_access( |
578 | wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); | 578 | mw_bind->bind_info.mw_access_flags); |
579 | wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey); | ||
579 | wqe->bind.mw_stag = cpu_to_be32(mw->rkey); | 580 | wqe->bind.mw_stag = cpu_to_be32(mw->rkey); |
580 | wqe->bind.mw_len = cpu_to_be32(mw_bind->length); | 581 | wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length); |
581 | wqe->bind.mw_va = cpu_to_be64(mw_bind->addr); | 582 | wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr); |
582 | err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size); | 583 | err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size); |
583 | if (err) { | 584 | if (err) { |
584 | spin_unlock_irqrestore(&qhp->lock, flag); | 585 | spin_unlock_irqrestore(&qhp->lock, flag); |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index c13745cde7fa..565bfb161c1a 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -143,14 +143,28 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status); | |||
143 | static LIST_HEAD(timeout_list); | 143 | static LIST_HEAD(timeout_list); |
144 | static spinlock_t timeout_lock; | 144 | static spinlock_t timeout_lock; |
145 | 145 | ||
146 | static void deref_qp(struct c4iw_ep *ep) | ||
147 | { | ||
148 | c4iw_qp_rem_ref(&ep->com.qp->ibqp); | ||
149 | clear_bit(QP_REFERENCED, &ep->com.flags); | ||
150 | } | ||
151 | |||
152 | static void ref_qp(struct c4iw_ep *ep) | ||
153 | { | ||
154 | set_bit(QP_REFERENCED, &ep->com.flags); | ||
155 | c4iw_qp_add_ref(&ep->com.qp->ibqp); | ||
156 | } | ||
157 | |||
146 | static void start_ep_timer(struct c4iw_ep *ep) | 158 | static void start_ep_timer(struct c4iw_ep *ep) |
147 | { | 159 | { |
148 | PDBG("%s ep %p\n", __func__, ep); | 160 | PDBG("%s ep %p\n", __func__, ep); |
149 | if (timer_pending(&ep->timer)) { | 161 | if (timer_pending(&ep->timer)) { |
150 | PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); | 162 | pr_err("%s timer already started! ep %p\n", |
151 | del_timer_sync(&ep->timer); | 163 | __func__, ep); |
152 | } else | 164 | return; |
153 | c4iw_get_ep(&ep->com); | 165 | } |
166 | clear_bit(TIMEOUT, &ep->com.flags); | ||
167 | c4iw_get_ep(&ep->com); | ||
154 | ep->timer.expires = jiffies + ep_timeout_secs * HZ; | 168 | ep->timer.expires = jiffies + ep_timeout_secs * HZ; |
155 | ep->timer.data = (unsigned long)ep; | 169 | ep->timer.data = (unsigned long)ep; |
156 | ep->timer.function = ep_timeout; | 170 | ep->timer.function = ep_timeout; |
@@ -159,14 +173,10 @@ static void start_ep_timer(struct c4iw_ep *ep) | |||
159 | 173 | ||
160 | static void stop_ep_timer(struct c4iw_ep *ep) | 174 | static void stop_ep_timer(struct c4iw_ep *ep) |
161 | { | 175 | { |
162 | PDBG("%s ep %p\n", __func__, ep); | 176 | PDBG("%s ep %p stopping\n", __func__, ep); |
163 | if (!timer_pending(&ep->timer)) { | ||
164 | WARN(1, "%s timer stopped when its not running! " | ||
165 | "ep %p state %u\n", __func__, ep, ep->com.state); | ||
166 | return; | ||
167 | } | ||
168 | del_timer_sync(&ep->timer); | 177 | del_timer_sync(&ep->timer); |
169 | c4iw_put_ep(&ep->com); | 178 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) |
179 | c4iw_put_ep(&ep->com); | ||
170 | } | 180 | } |
171 | 181 | ||
172 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, | 182 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, |
@@ -271,11 +281,13 @@ void _c4iw_free_ep(struct kref *kref) | |||
271 | 281 | ||
272 | ep = container_of(kref, struct c4iw_ep, com.kref); | 282 | ep = container_of(kref, struct c4iw_ep, com.kref); |
273 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); | 283 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); |
284 | if (test_bit(QP_REFERENCED, &ep->com.flags)) | ||
285 | deref_qp(ep); | ||
274 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { | 286 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { |
287 | remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); | ||
275 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | 288 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); |
276 | dst_release(ep->dst); | 289 | dst_release(ep->dst); |
277 | cxgb4_l2t_release(ep->l2t); | 290 | cxgb4_l2t_release(ep->l2t); |
278 | remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); | ||
279 | } | 291 | } |
280 | kfree(ep); | 292 | kfree(ep); |
281 | } | 293 | } |
@@ -687,7 +699,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
687 | memset(mpa, 0, sizeof(*mpa)); | 699 | memset(mpa, 0, sizeof(*mpa)); |
688 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | 700 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); |
689 | mpa->flags = MPA_REJECT; | 701 | mpa->flags = MPA_REJECT; |
690 | mpa->revision = mpa_rev; | 702 | mpa->revision = ep->mpa_attr.version; |
691 | mpa->private_data_size = htons(plen); | 703 | mpa->private_data_size = htons(plen); |
692 | 704 | ||
693 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { | 705 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
@@ -863,7 +875,6 @@ static void close_complete_upcall(struct c4iw_ep *ep) | |||
863 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | 875 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); |
864 | ep->com.cm_id->rem_ref(ep->com.cm_id); | 876 | ep->com.cm_id->rem_ref(ep->com.cm_id); |
865 | ep->com.cm_id = NULL; | 877 | ep->com.cm_id = NULL; |
866 | ep->com.qp = NULL; | ||
867 | set_bit(CLOSE_UPCALL, &ep->com.history); | 878 | set_bit(CLOSE_UPCALL, &ep->com.history); |
868 | } | 879 | } |
869 | } | 880 | } |
@@ -906,7 +917,6 @@ static void peer_abort_upcall(struct c4iw_ep *ep) | |||
906 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | 917 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); |
907 | ep->com.cm_id->rem_ref(ep->com.cm_id); | 918 | ep->com.cm_id->rem_ref(ep->com.cm_id); |
908 | ep->com.cm_id = NULL; | 919 | ep->com.cm_id = NULL; |
909 | ep->com.qp = NULL; | ||
910 | set_bit(ABORT_UPCALL, &ep->com.history); | 920 | set_bit(ABORT_UPCALL, &ep->com.history); |
911 | } | 921 | } |
912 | } | 922 | } |
@@ -946,7 +956,6 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status) | |||
946 | if (status < 0) { | 956 | if (status < 0) { |
947 | ep->com.cm_id->rem_ref(ep->com.cm_id); | 957 | ep->com.cm_id->rem_ref(ep->com.cm_id); |
948 | ep->com.cm_id = NULL; | 958 | ep->com.cm_id = NULL; |
949 | ep->com.qp = NULL; | ||
950 | } | 959 | } |
951 | } | 960 | } |
952 | 961 | ||
@@ -1291,11 +1300,13 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1291 | if (mpa->revision > mpa_rev) { | 1300 | if (mpa->revision > mpa_rev) { |
1292 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | 1301 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," |
1293 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | 1302 | " Received = %d\n", __func__, mpa_rev, mpa->revision); |
1303 | stop_ep_timer(ep); | ||
1294 | abort_connection(ep, skb, GFP_KERNEL); | 1304 | abort_connection(ep, skb, GFP_KERNEL); |
1295 | return; | 1305 | return; |
1296 | } | 1306 | } |
1297 | 1307 | ||
1298 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { | 1308 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { |
1309 | stop_ep_timer(ep); | ||
1299 | abort_connection(ep, skb, GFP_KERNEL); | 1310 | abort_connection(ep, skb, GFP_KERNEL); |
1300 | return; | 1311 | return; |
1301 | } | 1312 | } |
@@ -1306,6 +1317,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1306 | * Fail if there's too much private data. | 1317 | * Fail if there's too much private data. |
1307 | */ | 1318 | */ |
1308 | if (plen > MPA_MAX_PRIVATE_DATA) { | 1319 | if (plen > MPA_MAX_PRIVATE_DATA) { |
1320 | stop_ep_timer(ep); | ||
1309 | abort_connection(ep, skb, GFP_KERNEL); | 1321 | abort_connection(ep, skb, GFP_KERNEL); |
1310 | return; | 1322 | return; |
1311 | } | 1323 | } |
@@ -1314,6 +1326,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1314 | * If plen does not account for pkt size | 1326 | * If plen does not account for pkt size |
1315 | */ | 1327 | */ |
1316 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | 1328 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { |
1329 | stop_ep_timer(ep); | ||
1317 | abort_connection(ep, skb, GFP_KERNEL); | 1330 | abort_connection(ep, skb, GFP_KERNEL); |
1318 | return; | 1331 | return; |
1319 | } | 1332 | } |
@@ -1391,30 +1404,33 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1391 | skb_pull(skb, sizeof(*hdr)); | 1404 | skb_pull(skb, sizeof(*hdr)); |
1392 | skb_trim(skb, dlen); | 1405 | skb_trim(skb, dlen); |
1393 | 1406 | ||
1394 | ep->rcv_seq += dlen; | ||
1395 | BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); | ||
1396 | |||
1397 | /* update RX credits */ | 1407 | /* update RX credits */ |
1398 | update_rx_credits(ep, dlen); | 1408 | update_rx_credits(ep, dlen); |
1399 | 1409 | ||
1400 | switch (state_read(&ep->com)) { | 1410 | switch (state_read(&ep->com)) { |
1401 | case MPA_REQ_SENT: | 1411 | case MPA_REQ_SENT: |
1412 | ep->rcv_seq += dlen; | ||
1402 | process_mpa_reply(ep, skb); | 1413 | process_mpa_reply(ep, skb); |
1403 | break; | 1414 | break; |
1404 | case MPA_REQ_WAIT: | 1415 | case MPA_REQ_WAIT: |
1416 | ep->rcv_seq += dlen; | ||
1405 | process_mpa_request(ep, skb); | 1417 | process_mpa_request(ep, skb); |
1406 | break; | 1418 | break; |
1407 | case MPA_REP_SENT: | 1419 | case FPDU_MODE: { |
1420 | struct c4iw_qp_attributes attrs; | ||
1421 | BUG_ON(!ep->com.qp); | ||
1422 | if (status) | ||
1423 | pr_err("%s Unexpected streaming data." \ | ||
1424 | " qpid %u ep %p state %d tid %u status %d\n", | ||
1425 | __func__, ep->com.qp->wq.sq.qid, ep, | ||
1426 | state_read(&ep->com), ep->hwtid, status); | ||
1427 | attrs.next_state = C4IW_QP_STATE_ERROR; | ||
1428 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1429 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1430 | c4iw_ep_disconnect(ep, 1, GFP_KERNEL); | ||
1408 | break; | 1431 | break; |
1432 | } | ||
1409 | default: | 1433 | default: |
1410 | pr_err("%s Unexpected streaming data." \ | ||
1411 | " ep %p state %d tid %u status %d\n", | ||
1412 | __func__, ep, state_read(&ep->com), ep->hwtid, status); | ||
1413 | |||
1414 | /* | ||
1415 | * The ep will timeout and inform the ULP of the failure. | ||
1416 | * See ep_timeout(). | ||
1417 | */ | ||
1418 | break; | 1434 | break; |
1419 | } | 1435 | } |
1420 | return 0; | 1436 | return 0; |
@@ -1437,6 +1453,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1437 | mutex_lock(&ep->com.mutex); | 1453 | mutex_lock(&ep->com.mutex); |
1438 | switch (ep->com.state) { | 1454 | switch (ep->com.state) { |
1439 | case ABORTING: | 1455 | case ABORTING: |
1456 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
1440 | __state_set(&ep->com, DEAD); | 1457 | __state_set(&ep->com, DEAD); |
1441 | release = 1; | 1458 | release = 1; |
1442 | break; | 1459 | break; |
@@ -1475,11 +1492,11 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) | |||
1475 | V_FW_OFLD_CONNECTION_WR_ASTID(atid)); | 1492 | V_FW_OFLD_CONNECTION_WR_ASTID(atid)); |
1476 | req->tcb.cplrxdataack_cplpassacceptrpl = | 1493 | req->tcb.cplrxdataack_cplpassacceptrpl = |
1477 | htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); | 1494 | htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); |
1478 | req->tcb.tx_max = jiffies; | 1495 | req->tcb.tx_max = (__force __be32) jiffies; |
1479 | req->tcb.rcv_adv = htons(1); | 1496 | req->tcb.rcv_adv = htons(1); |
1480 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | 1497 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); |
1481 | wscale = compute_wscale(rcv_win); | 1498 | wscale = compute_wscale(rcv_win); |
1482 | req->tcb.opt0 = TCAM_BYPASS(1) | | 1499 | req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | |
1483 | (nocong ? NO_CONG(1) : 0) | | 1500 | (nocong ? NO_CONG(1) : 0) | |
1484 | KEEP_ALIVE(1) | | 1501 | KEEP_ALIVE(1) | |
1485 | DELACK(1) | | 1502 | DELACK(1) | |
@@ -1490,20 +1507,20 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) | |||
1490 | SMAC_SEL(ep->smac_idx) | | 1507 | SMAC_SEL(ep->smac_idx) | |
1491 | DSCP(ep->tos) | | 1508 | DSCP(ep->tos) | |
1492 | ULP_MODE(ULP_MODE_TCPDDP) | | 1509 | ULP_MODE(ULP_MODE_TCPDDP) | |
1493 | RCV_BUFSIZ(rcv_win >> 10); | 1510 | RCV_BUFSIZ(rcv_win >> 10)); |
1494 | req->tcb.opt2 = PACE(1) | | 1511 | req->tcb.opt2 = (__force __be32) (PACE(1) | |
1495 | TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | | 1512 | TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | |
1496 | RX_CHANNEL(0) | | 1513 | RX_CHANNEL(0) | |
1497 | CCTRL_ECN(enable_ecn) | | 1514 | CCTRL_ECN(enable_ecn) | |
1498 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | 1515 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid)); |
1499 | if (enable_tcp_timestamps) | 1516 | if (enable_tcp_timestamps) |
1500 | req->tcb.opt2 |= TSTAMPS_EN(1); | 1517 | req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); |
1501 | if (enable_tcp_sack) | 1518 | if (enable_tcp_sack) |
1502 | req->tcb.opt2 |= SACK_EN(1); | 1519 | req->tcb.opt2 |= (__force __be32) SACK_EN(1); |
1503 | if (wscale && enable_tcp_window_scaling) | 1520 | if (wscale && enable_tcp_window_scaling) |
1504 | req->tcb.opt2 |= WND_SCALE_EN(1); | 1521 | req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); |
1505 | req->tcb.opt0 = cpu_to_be64(req->tcb.opt0); | 1522 | req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); |
1506 | req->tcb.opt2 = cpu_to_be32(req->tcb.opt2); | 1523 | req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2); |
1507 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); | 1524 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); |
1508 | set_bit(ACT_OFLD_CONN, &ep->com.history); | 1525 | set_bit(ACT_OFLD_CONN, &ep->com.history); |
1509 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 1526 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
@@ -1993,6 +2010,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1993 | 2010 | ||
1994 | init_timer(&child_ep->timer); | 2011 | init_timer(&child_ep->timer); |
1995 | cxgb4_insert_tid(t, child_ep, hwtid); | 2012 | cxgb4_insert_tid(t, child_ep, hwtid); |
2013 | insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid); | ||
1996 | accept_cr(child_ep, peer_ip, skb, req); | 2014 | accept_cr(child_ep, peer_ip, skb, req); |
1997 | set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); | 2015 | set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); |
1998 | goto out; | 2016 | goto out; |
@@ -2018,7 +2036,6 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2018 | ntohs(req->tcp_opt)); | 2036 | ntohs(req->tcp_opt)); |
2019 | 2037 | ||
2020 | set_emss(ep, ntohs(req->tcp_opt)); | 2038 | set_emss(ep, ntohs(req->tcp_opt)); |
2021 | insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); | ||
2022 | 2039 | ||
2023 | dst_confirm(ep->dst); | 2040 | dst_confirm(ep->dst); |
2024 | state_set(&ep->com, MPA_REQ_WAIT); | 2041 | state_set(&ep->com, MPA_REQ_WAIT); |
@@ -2163,7 +2180,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2163 | break; | 2180 | break; |
2164 | case MPA_REQ_SENT: | 2181 | case MPA_REQ_SENT: |
2165 | stop_ep_timer(ep); | 2182 | stop_ep_timer(ep); |
2166 | if (mpa_rev == 2 && ep->tried_with_mpa_v1) | 2183 | if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) |
2167 | connect_reply_upcall(ep, -ECONNRESET); | 2184 | connect_reply_upcall(ep, -ECONNRESET); |
2168 | else { | 2185 | else { |
2169 | /* | 2186 | /* |
@@ -2235,9 +2252,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2235 | out: | 2252 | out: |
2236 | if (release) | 2253 | if (release) |
2237 | release_ep_resources(ep); | 2254 | release_ep_resources(ep); |
2238 | 2255 | else if (ep->retry_with_mpa_v1) { | |
2239 | /* retry with mpa-v1 */ | 2256 | remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); |
2240 | if (ep && ep->retry_with_mpa_v1) { | ||
2241 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | 2257 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); |
2242 | dst_release(ep->dst); | 2258 | dst_release(ep->dst); |
2243 | cxgb4_l2t_release(ep->l2t); | 2259 | cxgb4_l2t_release(ep->l2t); |
@@ -2430,6 +2446,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2430 | cm_id->add_ref(cm_id); | 2446 | cm_id->add_ref(cm_id); |
2431 | ep->com.cm_id = cm_id; | 2447 | ep->com.cm_id = cm_id; |
2432 | ep->com.qp = qp; | 2448 | ep->com.qp = qp; |
2449 | ref_qp(ep); | ||
2433 | 2450 | ||
2434 | /* bind QP to EP and move to RTS */ | 2451 | /* bind QP to EP and move to RTS */ |
2435 | attrs.mpa_attr = ep->mpa_attr; | 2452 | attrs.mpa_attr = ep->mpa_attr; |
@@ -2460,7 +2477,6 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2460 | return 0; | 2477 | return 0; |
2461 | err1: | 2478 | err1: |
2462 | ep->com.cm_id = NULL; | 2479 | ep->com.cm_id = NULL; |
2463 | ep->com.qp = NULL; | ||
2464 | cm_id->rem_ref(cm_id); | 2480 | cm_id->rem_ref(cm_id); |
2465 | err: | 2481 | err: |
2466 | c4iw_put_ep(&ep->com); | 2482 | c4iw_put_ep(&ep->com); |
@@ -2501,6 +2517,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2501 | ep->com.cm_id = cm_id; | 2517 | ep->com.cm_id = cm_id; |
2502 | ep->com.qp = get_qhp(dev, conn_param->qpn); | 2518 | ep->com.qp = get_qhp(dev, conn_param->qpn); |
2503 | BUG_ON(!ep->com.qp); | 2519 | BUG_ON(!ep->com.qp); |
2520 | ref_qp(ep); | ||
2504 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, | 2521 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, |
2505 | ep->com.qp, cm_id); | 2522 | ep->com.qp, cm_id); |
2506 | 2523 | ||
@@ -2756,7 +2773,8 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, | |||
2756 | struct c4iw_ep *ep; | 2773 | struct c4iw_ep *ep; |
2757 | int atid = be32_to_cpu(req->tid); | 2774 | int atid = be32_to_cpu(req->tid); |
2758 | 2775 | ||
2759 | ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid); | 2776 | ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, |
2777 | (__force u32) req->tid); | ||
2760 | if (!ep) | 2778 | if (!ep) |
2761 | return; | 2779 | return; |
2762 | 2780 | ||
@@ -2800,7 +2818,7 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, | |||
2800 | struct cpl_pass_accept_req *cpl; | 2818 | struct cpl_pass_accept_req *cpl; |
2801 | int ret; | 2819 | int ret; |
2802 | 2820 | ||
2803 | rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie); | 2821 | rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; |
2804 | BUG_ON(!rpl_skb); | 2822 | BUG_ON(!rpl_skb); |
2805 | if (req->retval) { | 2823 | if (req->retval) { |
2806 | PDBG("%s passive open failure %d\n", __func__, req->retval); | 2824 | PDBG("%s passive open failure %d\n", __func__, req->retval); |
@@ -2811,7 +2829,8 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, | |||
2811 | } else { | 2829 | } else { |
2812 | cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); | 2830 | cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); |
2813 | OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, | 2831 | OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, |
2814 | htonl(req->tid))); | 2832 | (__force u32) htonl( |
2833 | (__force u32) req->tid))); | ||
2815 | ret = pass_accept_req(dev, rpl_skb); | 2834 | ret = pass_accept_req(dev, rpl_skb); |
2816 | if (!ret) | 2835 | if (!ret) |
2817 | kfree_skb(rpl_skb); | 2836 | kfree_skb(rpl_skb); |
@@ -2857,10 +2876,10 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) | |||
2857 | struct tcp_options_received tmp_opt; | 2876 | struct tcp_options_received tmp_opt; |
2858 | 2877 | ||
2859 | /* Store values from cpl_rx_pkt in temporary location. */ | 2878 | /* Store values from cpl_rx_pkt in temporary location. */ |
2860 | vlantag = cpl->vlan; | 2879 | vlantag = (__force u16) cpl->vlan; |
2861 | len = cpl->len; | 2880 | len = (__force u16) cpl->len; |
2862 | l2info = cpl->l2info; | 2881 | l2info = (__force u32) cpl->l2info; |
2863 | hdr_len = cpl->hdr_len; | 2882 | hdr_len = (__force u16) cpl->hdr_len; |
2864 | intf = cpl->iff; | 2883 | intf = cpl->iff; |
2865 | 2884 | ||
2866 | __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); | 2885 | __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); |
@@ -2871,19 +2890,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) | |||
2871 | */ | 2890 | */ |
2872 | memset(&tmp_opt, 0, sizeof(tmp_opt)); | 2891 | memset(&tmp_opt, 0, sizeof(tmp_opt)); |
2873 | tcp_clear_options(&tmp_opt); | 2892 | tcp_clear_options(&tmp_opt); |
2874 | tcp_parse_options(skb, &tmp_opt, 0, 0, NULL); | 2893 | tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL); |
2875 | 2894 | ||
2876 | req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); | 2895 | req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); |
2877 | memset(req, 0, sizeof(*req)); | 2896 | memset(req, 0, sizeof(*req)); |
2878 | req->l2info = cpu_to_be16(V_SYN_INTF(intf) | | 2897 | req->l2info = cpu_to_be16(V_SYN_INTF(intf) | |
2879 | V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) | | 2898 | V_SYN_MAC_IDX(G_RX_MACIDX( |
2899 | (__force int) htonl(l2info))) | | ||
2880 | F_SYN_XACT_MATCH); | 2900 | F_SYN_XACT_MATCH); |
2881 | req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) | | 2901 | req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( |
2882 | V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) | | 2902 | (__force int) htonl(l2info))) | |
2883 | V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) | | 2903 | V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( |
2884 | V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info)))); | 2904 | (__force int) htons(hdr_len))) | |
2885 | req->vlan = vlantag; | 2905 | V_IP_HDR_LEN(G_RX_IPHDR_LEN( |
2886 | req->len = len; | 2906 | (__force int) htons(hdr_len))) | |
2907 | V_ETH_HDR_LEN(G_RX_ETHHDR_LEN( | ||
2908 | (__force int) htonl(l2info)))); | ||
2909 | req->vlan = (__force __be16) vlantag; | ||
2910 | req->len = (__force __be16) len; | ||
2887 | req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | | 2911 | req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | |
2888 | PASS_OPEN_TOS(tos)); | 2912 | PASS_OPEN_TOS(tos)); |
2889 | req->tcpopt.mss = htons(tmp_opt.mss_clamp); | 2913 | req->tcpopt.mss = htons(tmp_opt.mss_clamp); |
@@ -2912,7 +2936,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, | |||
2912 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); | 2936 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); |
2913 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); | 2937 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); |
2914 | req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); | 2938 | req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); |
2915 | req->le.filter = filter; | 2939 | req->le.filter = (__force __be32) filter; |
2916 | req->le.lport = lport; | 2940 | req->le.lport = lport; |
2917 | req->le.pport = rport; | 2941 | req->le.pport = rport; |
2918 | req->le.u.ipv4.lip = laddr; | 2942 | req->le.u.ipv4.lip = laddr; |
@@ -2938,7 +2962,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, | |||
2938 | * TP will ignore any value > 0 for MSS index. | 2962 | * TP will ignore any value > 0 for MSS index. |
2939 | */ | 2963 | */ |
2940 | req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); | 2964 | req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); |
2941 | req->cookie = cpu_to_be64((u64)skb); | 2965 | req->cookie = (unsigned long)skb; |
2942 | 2966 | ||
2943 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); | 2967 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); |
2944 | cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); | 2968 | cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); |
@@ -2988,7 +3012,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2988 | /* | 3012 | /* |
2989 | * Calculate the server tid from filter hit index from cpl_rx_pkt. | 3013 | * Calculate the server tid from filter hit index from cpl_rx_pkt. |
2990 | */ | 3014 | */ |
2991 | stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base | 3015 | stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) |
3016 | - dev->rdev.lldi.tids->sftid_base | ||
2992 | + dev->rdev.lldi.tids->nstids; | 3017 | + dev->rdev.lldi.tids->nstids; |
2993 | 3018 | ||
2994 | lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); | 3019 | lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); |
@@ -3049,10 +3074,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3049 | 3074 | ||
3050 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | 3075 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
3051 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; | 3076 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; |
3052 | window = htons(tcph->window); | 3077 | window = (__force u16) htons((__force u16)tcph->window); |
3053 | 3078 | ||
3054 | /* Calcuate filter portion for LE region. */ | 3079 | /* Calcuate filter portion for LE region. */ |
3055 | filter = cpu_to_be32(select_ntuple(dev, dst, e)); | 3080 | filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); |
3056 | 3081 | ||
3057 | /* | 3082 | /* |
3058 | * Synthesize the cpl_pass_accept_req. We have everything except the | 3083 | * Synthesize the cpl_pass_accept_req. We have everything except the |
@@ -3175,11 +3200,16 @@ static DECLARE_WORK(skb_work, process_work); | |||
3175 | static void ep_timeout(unsigned long arg) | 3200 | static void ep_timeout(unsigned long arg) |
3176 | { | 3201 | { |
3177 | struct c4iw_ep *ep = (struct c4iw_ep *)arg; | 3202 | struct c4iw_ep *ep = (struct c4iw_ep *)arg; |
3203 | int kickit = 0; | ||
3178 | 3204 | ||
3179 | spin_lock(&timeout_lock); | 3205 | spin_lock(&timeout_lock); |
3180 | list_add_tail(&ep->entry, &timeout_list); | 3206 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { |
3207 | list_add_tail(&ep->entry, &timeout_list); | ||
3208 | kickit = 1; | ||
3209 | } | ||
3181 | spin_unlock(&timeout_lock); | 3210 | spin_unlock(&timeout_lock); |
3182 | queue_work(workq, &skb_work); | 3211 | if (kickit) |
3212 | queue_work(workq, &skb_work); | ||
3183 | } | 3213 | } |
3184 | 3214 | ||
3185 | /* | 3215 | /* |
@@ -3268,8 +3298,14 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3268 | 3298 | ||
3269 | /* | 3299 | /* |
3270 | * Wake up any threads in rdma_init() or rdma_fini(). | 3300 | * Wake up any threads in rdma_init() or rdma_fini(). |
3301 | * However, if we are on MPAv2 and want to retry with MPAv1 | ||
3302 | * then, don't wake up yet. | ||
3271 | */ | 3303 | */ |
3272 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | 3304 | if (mpa_rev == 2 && !ep->tried_with_mpa_v1) { |
3305 | if (ep->com.state != MPA_REQ_SENT) | ||
3306 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
3307 | } else | ||
3308 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
3273 | sched(dev, skb); | 3309 | sched(dev, skb); |
3274 | return 0; | 3310 | return 0; |
3275 | } | 3311 | } |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index ba11c76c0b5a..80069ad595c1 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -533,7 +533,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
533 | PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " | 533 | PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " |
534 | "qpmask 0x%x cqshift %lu cqmask 0x%x\n", | 534 | "qpmask 0x%x cqshift %lu cqmask 0x%x\n", |
535 | (unsigned)pci_resource_len(rdev->lldi.pdev, 2), | 535 | (unsigned)pci_resource_len(rdev->lldi.pdev, 2), |
536 | (void *)pci_resource_start(rdev->lldi.pdev, 2), | 536 | (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2), |
537 | rdev->lldi.db_reg, | 537 | rdev->lldi.db_reg, |
538 | rdev->lldi.gts_reg, | 538 | rdev->lldi.gts_reg, |
539 | rdev->qpshift, rdev->qpmask, | 539 | rdev->qpshift, rdev->qpmask, |
@@ -797,7 +797,8 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, | |||
797 | "RSS %#llx, FL %#llx, len %u\n", | 797 | "RSS %#llx, FL %#llx, len %u\n", |
798 | pci_name(ctx->lldi.pdev), gl->va, | 798 | pci_name(ctx->lldi.pdev), gl->va, |
799 | (unsigned long long)be64_to_cpu(*rsp), | 799 | (unsigned long long)be64_to_cpu(*rsp), |
800 | (unsigned long long)be64_to_cpu(*(u64 *)gl->va), | 800 | (unsigned long long)be64_to_cpu( |
801 | *(__force __be64 *)gl->va), | ||
801 | gl->tot_len); | 802 | gl->tot_len); |
802 | 803 | ||
803 | return 0; | 804 | return 0; |
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index cf2f6b47617a..1a840b2211dd 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c | |||
@@ -46,9 +46,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, | |||
46 | 46 | ||
47 | if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || | 47 | if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || |
48 | (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { | 48 | (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { |
49 | PDBG("%s AE received after RTS - " | 49 | pr_err("%s AE after RTS - qpid 0x%x opcode %d status 0x%x "\ |
50 | "qp state %d qpid 0x%x status 0x%x\n", __func__, | 50 | "type %d wrid.hi 0x%x wrid.lo 0x%x\n", |
51 | qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe)); | 51 | __func__, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), |
52 | CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), | ||
53 | CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); | ||
52 | return; | 54 | return; |
53 | } | 55 | } |
54 | 56 | ||
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 9c1644fb0259..4c07fc069766 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -716,6 +716,8 @@ enum c4iw_ep_flags { | |||
716 | ABORT_REQ_IN_PROGRESS = 1, | 716 | ABORT_REQ_IN_PROGRESS = 1, |
717 | RELEASE_RESOURCES = 2, | 717 | RELEASE_RESOURCES = 2, |
718 | CLOSE_SENT = 3, | 718 | CLOSE_SENT = 3, |
719 | TIMEOUT = 4, | ||
720 | QP_REFERENCED = 5, | ||
719 | }; | 721 | }; |
720 | 722 | ||
721 | enum c4iw_ep_history { | 723 | enum c4iw_ep_history { |
@@ -866,7 +868,7 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl( | |||
866 | int page_list_len); | 868 | int page_list_len); |
867 | struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth); | 869 | struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth); |
868 | int c4iw_dealloc_mw(struct ib_mw *mw); | 870 | int c4iw_dealloc_mw(struct ib_mw *mw); |
869 | struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd); | 871 | struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); |
870 | struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, | 872 | struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, |
871 | u64 length, u64 virt, int acc, | 873 | u64 length, u64 virt, int acc, |
872 | struct ib_udata *udata); | 874 | struct ib_udata *udata); |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index afd81790ab3c..903a92d6f91d 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -650,7 +650,7 @@ err: | |||
650 | return ERR_PTR(err); | 650 | return ERR_PTR(err); |
651 | } | 651 | } |
652 | 652 | ||
653 | struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd) | 653 | struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) |
654 | { | 654 | { |
655 | struct c4iw_dev *rhp; | 655 | struct c4iw_dev *rhp; |
656 | struct c4iw_pd *php; | 656 | struct c4iw_pd *php; |
@@ -659,6 +659,9 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd) | |||
659 | u32 stag = 0; | 659 | u32 stag = 0; |
660 | int ret; | 660 | int ret; |
661 | 661 | ||
662 | if (type != IB_MW_TYPE_1) | ||
663 | return ERR_PTR(-EINVAL); | ||
664 | |||
662 | php = to_c4iw_pd(pd); | 665 | php = to_c4iw_pd(pd); |
663 | rhp = php->rhp; | 666 | rhp = php->rhp; |
664 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | 667 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 05bfe53bff64..17ba4f8bc12d 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -1383,6 +1383,7 @@ err: | |||
1383 | qhp->ep = NULL; | 1383 | qhp->ep = NULL; |
1384 | set_state(qhp, C4IW_QP_STATE_ERROR); | 1384 | set_state(qhp, C4IW_QP_STATE_ERROR); |
1385 | free = 1; | 1385 | free = 1; |
1386 | abort = 1; | ||
1386 | wake_up(&qhp->wait); | 1387 | wake_up(&qhp->wait); |
1387 | BUG_ON(!ep); | 1388 | BUG_ON(!ep); |
1388 | flush_qp(qhp); | 1389 | flush_qp(qhp); |
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index 8f7f282ead65..22f79afa7fc1 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h | |||
@@ -95,7 +95,7 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); | |||
95 | 95 | ||
96 | int ehca_dereg_mr(struct ib_mr *mr); | 96 | int ehca_dereg_mr(struct ib_mr *mr); |
97 | 97 | ||
98 | struct ib_mw *ehca_alloc_mw(struct ib_pd *pd); | 98 | struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); |
99 | 99 | ||
100 | int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, | 100 | int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, |
101 | struct ib_mw_bind *mw_bind); | 101 | struct ib_mw_bind *mw_bind); |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 87844869dcc2..bcfb0c183620 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -688,7 +688,7 @@ dereg_mr_exit0: | |||
688 | 688 | ||
689 | /*----------------------------------------------------------------------*/ | 689 | /*----------------------------------------------------------------------*/ |
690 | 690 | ||
691 | struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) | 691 | struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) |
692 | { | 692 | { |
693 | struct ib_mw *ib_mw; | 693 | struct ib_mw *ib_mw; |
694 | u64 h_ret; | 694 | u64 h_ret; |
@@ -698,6 +698,9 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) | |||
698 | container_of(pd->device, struct ehca_shca, ib_device); | 698 | container_of(pd->device, struct ehca_shca, ib_device); |
699 | struct ehca_mw_hipzout_parms hipzout; | 699 | struct ehca_mw_hipzout_parms hipzout; |
700 | 700 | ||
701 | if (type != IB_MW_TYPE_1) | ||
702 | return ERR_PTR(-EINVAL); | ||
703 | |||
701 | e_mw = ehca_mw_new(); | 704 | e_mw = ehca_mw_new(); |
702 | if (!e_mw) { | 705 | if (!e_mw) { |
703 | ib_mw = ERR_PTR(-ENOMEM); | 706 | ib_mw = ERR_PTR(-ENOMEM); |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 07e4fbad987a..8f67fe2e91e6 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -55,7 +55,8 @@ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); | |||
55 | /** | 55 | /** |
56 | * nes_alloc_mw | 56 | * nes_alloc_mw |
57 | */ | 57 | */ |
58 | static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) { | 58 | static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type) |
59 | { | ||
59 | struct nes_pd *nespd = to_nespd(ibpd); | 60 | struct nes_pd *nespd = to_nespd(ibpd); |
60 | struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); | 61 | struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); |
61 | struct nes_device *nesdev = nesvnic->nesdev; | 62 | struct nes_device *nesdev = nesvnic->nesdev; |
@@ -71,6 +72,9 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) { | |||
71 | u32 driver_key = 0; | 72 | u32 driver_key = 0; |
72 | u8 stag_key = 0; | 73 | u8 stag_key = 0; |
73 | 74 | ||
75 | if (type != IB_MW_TYPE_1) | ||
76 | return ERR_PTR(-EINVAL); | ||
77 | |||
74 | get_random_bytes(&next_stag_index, sizeof(next_stag_index)); | 78 | get_random_bytes(&next_stag_index, sizeof(next_stag_index)); |
75 | stag_key = (u8)next_stag_index; | 79 | stag_key = (u8)next_stag_index; |
76 | 80 | ||
@@ -244,20 +248,19 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, | |||
244 | if (ibmw_bind->send_flags & IB_SEND_SIGNALED) | 248 | if (ibmw_bind->send_flags & IB_SEND_SIGNALED) |
245 | wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL; | 249 | wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL; |
246 | 250 | ||
247 | if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_WRITE) { | 251 | if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_WRITE) |
248 | wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE; | 252 | wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE; |
249 | } | 253 | if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_READ) |
250 | if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_READ) { | ||
251 | wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ; | 254 | wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ; |
252 | } | ||
253 | 255 | ||
254 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc); | 256 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc); |
255 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, ibmw_bind->mr->lkey); | 257 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, |
258 | ibmw_bind->bind_info.mr->lkey); | ||
256 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey); | 259 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey); |
257 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX, | 260 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX, |
258 | ibmw_bind->length); | 261 | ibmw_bind->bind_info.length); |
259 | wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0; | 262 | wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0; |
260 | u64temp = (u64)ibmw_bind->addr; | 263 | u64temp = (u64)ibmw_bind->bind_info.addr; |
261 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp); | 264 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp); |
262 | 265 | ||
263 | head++; | 266 | head++; |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 35275099cafd..a6a2cc2ba260 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -268,8 +268,9 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
268 | qpp = &q->next) | 268 | qpp = &q->next) |
269 | if (q == qp) { | 269 | if (q == qp) { |
270 | atomic_dec(&qp->refcount); | 270 | atomic_dec(&qp->refcount); |
271 | *qpp = qp->next; | 271 | rcu_assign_pointer(*qpp, |
272 | rcu_assign_pointer(qp->next, NULL); | 272 | rcu_dereference_protected(qp->next, |
273 | lockdep_is_held(&dev->qpt_lock))); | ||
273 | break; | 274 | break; |
274 | } | 275 | } |
275 | } | 276 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 07ca6fd5546b..eb71aaa26a9a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -117,6 +117,8 @@ enum { | |||
117 | #define IPOIB_OP_CM (0) | 117 | #define IPOIB_OP_CM (0) |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | #define IPOIB_QPN_MASK ((__force u32) cpu_to_be32(0xFFFFFF)) | ||
121 | |||
120 | /* structs */ | 122 | /* structs */ |
121 | 123 | ||
122 | struct ipoib_header { | 124 | struct ipoib_header { |
@@ -760,4 +762,6 @@ extern int ipoib_debug_level; | |||
760 | 762 | ||
761 | #define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff) | 763 | #define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff) |
762 | 764 | ||
765 | extern const char ipoib_driver_version[]; | ||
766 | |||
763 | #endif /* _IPOIB_H */ | 767 | #endif /* _IPOIB_H */ |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 29bc7b5724ac..c4b3940845e6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | |||
@@ -39,7 +39,24 @@ | |||
39 | static void ipoib_get_drvinfo(struct net_device *netdev, | 39 | static void ipoib_get_drvinfo(struct net_device *netdev, |
40 | struct ethtool_drvinfo *drvinfo) | 40 | struct ethtool_drvinfo *drvinfo) |
41 | { | 41 | { |
42 | strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); | 42 | struct ipoib_dev_priv *priv = netdev_priv(netdev); |
43 | struct ib_device_attr *attr; | ||
44 | |||
45 | attr = kmalloc(sizeof(*attr), GFP_KERNEL); | ||
46 | if (attr && !ib_query_device(priv->ca, attr)) | ||
47 | snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), | ||
48 | "%d.%d.%d", (int)(attr->fw_ver >> 32), | ||
49 | (int)(attr->fw_ver >> 16) & 0xffff, | ||
50 | (int)attr->fw_ver & 0xffff); | ||
51 | kfree(attr); | ||
52 | |||
53 | strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device), | ||
54 | sizeof(drvinfo->bus_info)); | ||
55 | |||
56 | strlcpy(drvinfo->version, ipoib_driver_version, | ||
57 | sizeof(drvinfo->version)); | ||
58 | |||
59 | strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver)); | ||
43 | } | 60 | } |
44 | 61 | ||
45 | static int ipoib_get_coalesce(struct net_device *dev, | 62 | static int ipoib_get_coalesce(struct net_device *dev, |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 6fdc9e78da0d..8534afd04e7c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -49,9 +49,14 @@ | |||
49 | #include <linux/jhash.h> | 49 | #include <linux/jhash.h> |
50 | #include <net/arp.h> | 50 | #include <net/arp.h> |
51 | 51 | ||
52 | #define DRV_VERSION "1.0.0" | ||
53 | |||
54 | const char ipoib_driver_version[] = DRV_VERSION; | ||
55 | |||
52 | MODULE_AUTHOR("Roland Dreier"); | 56 | MODULE_AUTHOR("Roland Dreier"); |
53 | MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); | 57 | MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); |
54 | MODULE_LICENSE("Dual BSD/GPL"); | 58 | MODULE_LICENSE("Dual BSD/GPL"); |
59 | MODULE_VERSION(DRV_VERSION); | ||
55 | 60 | ||
56 | int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; | 61 | int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; |
57 | int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; | 62 | int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; |
@@ -505,6 +510,9 @@ static void path_rec_completion(int status, | |||
505 | 510 | ||
506 | spin_unlock_irqrestore(&priv->lock, flags); | 511 | spin_unlock_irqrestore(&priv->lock, flags); |
507 | 512 | ||
513 | if (IS_ERR_OR_NULL(ah)) | ||
514 | ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); | ||
515 | |||
508 | if (old_ah) | 516 | if (old_ah) |
509 | ipoib_put_ah(old_ah); | 517 | ipoib_put_ah(old_ah); |
510 | 518 | ||
@@ -844,10 +852,10 @@ static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) | |||
844 | * different subnets. | 852 | * different subnets. |
845 | */ | 853 | */ |
846 | /* qpn octets[1:4) & port GUID octets[12:20) */ | 854 | /* qpn octets[1:4) & port GUID octets[12:20) */ |
847 | u32 *daddr_32 = (u32 *) daddr; | 855 | u32 *d32 = (u32 *) daddr; |
848 | u32 hv; | 856 | u32 hv; |
849 | 857 | ||
850 | hv = jhash_3words(daddr_32[3], daddr_32[4], 0xFFFFFF & daddr_32[0], 0); | 858 | hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); |
851 | return hv & htbl->mask; | 859 | return hv & htbl->mask; |
852 | } | 860 | } |
853 | 861 | ||
@@ -1688,6 +1696,8 @@ static void ipoib_remove_one(struct ib_device *device) | |||
1688 | return; | 1696 | return; |
1689 | 1697 | ||
1690 | dev_list = ib_get_client_data(device, &ipoib_client); | 1698 | dev_list = ib_get_client_data(device, &ipoib_client); |
1699 | if (!dev_list) | ||
1700 | return; | ||
1691 | 1701 | ||
1692 | list_for_each_entry_safe(priv, tmp, dev_list, list) { | 1702 | list_for_each_entry_safe(priv, tmp, dev_list, list) { |
1693 | ib_unregister_event_handler(&priv->event_handler); | 1703 | ib_unregister_event_handler(&priv->event_handler); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index ef7d3be46c31..5babdb35bda7 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -94,7 +94,7 @@ | |||
94 | 94 | ||
95 | /* support up to 512KB in one RDMA */ | 95 | /* support up to 512KB in one RDMA */ |
96 | #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) | 96 | #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) |
97 | #define ISER_DEF_CMD_PER_LUN 128 | 97 | #define ISER_DEF_CMD_PER_LUN ISCSI_DEF_XMIT_CMDS_MAX |
98 | 98 | ||
99 | /* QP settings */ | 99 | /* QP settings */ |
100 | /* Maximal bounds on received asynchronous PDUs */ | 100 | /* Maximal bounds on received asynchronous PDUs */ |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2033a928d34d..be1edb04b085 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -369,10 +369,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, | |||
369 | regd_buf = &iser_task->rdma_regd[cmd_dir]; | 369 | regd_buf = &iser_task->rdma_regd[cmd_dir]; |
370 | 370 | ||
371 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); | 371 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
372 | if (aligned_len != mem->dma_nents) { | 372 | if (aligned_len != mem->dma_nents || |
373 | (!ib_conn->fmr_pool && mem->dma_nents > 1)) { | ||
373 | iscsi_conn->fmr_unalign_cnt++; | 374 | iscsi_conn->fmr_unalign_cnt++; |
374 | iser_warn("rdma alignment violation %d/%d aligned\n", | 375 | iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", |
375 | aligned_len, mem->size); | 376 | aligned_len, mem->size); |
376 | iser_data_buf_dump(mem, ibdev); | 377 | iser_data_buf_dump(mem, ibdev); |
377 | 378 | ||
378 | /* unmap the command data before accessing it */ | 379 | /* unmap the command data before accessing it */ |
@@ -404,7 +405,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, | |||
404 | } else { /* use FMR for multiple dma entries */ | 405 | } else { /* use FMR for multiple dma entries */ |
405 | iser_page_vec_build(mem, ib_conn->page_vec, ibdev); | 406 | iser_page_vec_build(mem, ib_conn->page_vec, ibdev); |
406 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); | 407 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); |
407 | if (err) { | 408 | if (err && err != -EAGAIN) { |
408 | iser_data_buf_dump(mem, ibdev); | 409 | iser_data_buf_dump(mem, ibdev); |
409 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", | 410 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", |
410 | mem->dma_nents, | 411 | mem->dma_nents, |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 95a49affee44..4debadc53106 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -242,10 +242,14 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
242 | IB_ACCESS_REMOTE_READ); | 242 | IB_ACCESS_REMOTE_READ); |
243 | 243 | ||
244 | ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); | 244 | ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); |
245 | if (IS_ERR(ib_conn->fmr_pool)) { | 245 | ret = PTR_ERR(ib_conn->fmr_pool); |
246 | ret = PTR_ERR(ib_conn->fmr_pool); | 246 | if (IS_ERR(ib_conn->fmr_pool) && ret != -ENOSYS) { |
247 | ib_conn->fmr_pool = NULL; | 247 | ib_conn->fmr_pool = NULL; |
248 | goto out_err; | 248 | goto out_err; |
249 | } else if (ret == -ENOSYS) { | ||
250 | ib_conn->fmr_pool = NULL; | ||
251 | iser_warn("FMRs are not supported, using unaligned mode\n"); | ||
252 | ret = 0; | ||
249 | } | 253 | } |
250 | 254 | ||
251 | memset(&init_attr, 0, sizeof init_attr); | 255 | memset(&init_attr, 0, sizeof init_attr); |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index d5088ce78290..7ccf3284dda3 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -700,23 +700,24 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
700 | struct Scsi_Host *shost = target->scsi_host; | 700 | struct Scsi_Host *shost = target->scsi_host; |
701 | int i, ret; | 701 | int i, ret; |
702 | 702 | ||
703 | if (target->state != SRP_TARGET_LIVE) | ||
704 | return -EAGAIN; | ||
705 | |||
706 | scsi_target_block(&shost->shost_gendev); | 703 | scsi_target_block(&shost->shost_gendev); |
707 | 704 | ||
708 | srp_disconnect_target(target); | 705 | srp_disconnect_target(target); |
709 | /* | 706 | /* |
710 | * Now get a new local CM ID so that we avoid confusing the | 707 | * Now get a new local CM ID so that we avoid confusing the target in |
711 | * target in case things are really fouled up. | 708 | * case things are really fouled up. Doing so also ensures that all CM |
709 | * callbacks will have finished before a new QP is allocated. | ||
712 | */ | 710 | */ |
713 | ret = srp_new_cm_id(target); | 711 | ret = srp_new_cm_id(target); |
714 | if (ret) | 712 | /* |
715 | goto unblock; | 713 | * Whether or not creating a new CM ID succeeded, create a new |
716 | 714 | * QP. This guarantees that all completion callback function | |
717 | ret = srp_create_target_ib(target); | 715 | * invocations have finished before request resetting starts. |
718 | if (ret) | 716 | */ |
719 | goto unblock; | 717 | if (ret == 0) |
718 | ret = srp_create_target_ib(target); | ||
719 | else | ||
720 | srp_create_target_ib(target); | ||
720 | 721 | ||
721 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { | 722 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
722 | struct srp_request *req = &target->req_ring[i]; | 723 | struct srp_request *req = &target->req_ring[i]; |
@@ -728,11 +729,12 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
728 | for (i = 0; i < SRP_SQ_SIZE; ++i) | 729 | for (i = 0; i < SRP_SQ_SIZE; ++i) |
729 | list_add(&target->tx_ring[i]->list, &target->free_tx); | 730 | list_add(&target->tx_ring[i]->list, &target->free_tx); |
730 | 731 | ||
731 | ret = srp_connect_target(target); | 732 | if (ret == 0) |
733 | ret = srp_connect_target(target); | ||
732 | 734 | ||
733 | unblock: | ||
734 | scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING : | 735 | scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING : |
735 | SDEV_TRANSPORT_OFFLINE); | 736 | SDEV_TRANSPORT_OFFLINE); |
737 | target->transport_offline = !!ret; | ||
736 | 738 | ||
737 | if (ret) | 739 | if (ret) |
738 | goto err; | 740 | goto err; |
@@ -1352,6 +1354,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1352 | unsigned long flags; | 1354 | unsigned long flags; |
1353 | int len; | 1355 | int len; |
1354 | 1356 | ||
1357 | if (unlikely(target->transport_offline)) { | ||
1358 | scmnd->result = DID_NO_CONNECT << 16; | ||
1359 | scmnd->scsi_done(scmnd); | ||
1360 | return 0; | ||
1361 | } | ||
1362 | |||
1355 | spin_lock_irqsave(&target->lock, flags); | 1363 | spin_lock_irqsave(&target->lock, flags); |
1356 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); | 1364 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); |
1357 | if (!iu) | 1365 | if (!iu) |
@@ -1695,6 +1703,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, | |||
1695 | struct srp_iu *iu; | 1703 | struct srp_iu *iu; |
1696 | struct srp_tsk_mgmt *tsk_mgmt; | 1704 | struct srp_tsk_mgmt *tsk_mgmt; |
1697 | 1705 | ||
1706 | if (!target->connected || target->qp_in_error) | ||
1707 | return -1; | ||
1708 | |||
1698 | init_completion(&target->tsk_mgmt_done); | 1709 | init_completion(&target->tsk_mgmt_done); |
1699 | 1710 | ||
1700 | spin_lock_irq(&target->lock); | 1711 | spin_lock_irq(&target->lock); |
@@ -1736,7 +1747,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) | |||
1736 | 1747 | ||
1737 | shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); | 1748 | shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); |
1738 | 1749 | ||
1739 | if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd)) | 1750 | if (!req || !srp_claim_req(target, req, scmnd)) |
1740 | return FAILED; | 1751 | return FAILED; |
1741 | srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, | 1752 | srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, |
1742 | SRP_TSK_ABORT_TASK); | 1753 | SRP_TSK_ABORT_TASK); |
@@ -1754,8 +1765,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
1754 | 1765 | ||
1755 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); | 1766 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); |
1756 | 1767 | ||
1757 | if (target->qp_in_error) | ||
1758 | return FAILED; | ||
1759 | if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, | 1768 | if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, |
1760 | SRP_TSK_LUN_RESET)) | 1769 | SRP_TSK_LUN_RESET)) |
1761 | return FAILED; | 1770 | return FAILED; |
@@ -1972,7 +1981,6 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) | |||
1972 | spin_unlock(&host->target_lock); | 1981 | spin_unlock(&host->target_lock); |
1973 | 1982 | ||
1974 | target->state = SRP_TARGET_LIVE; | 1983 | target->state = SRP_TARGET_LIVE; |
1975 | target->connected = false; | ||
1976 | 1984 | ||
1977 | scsi_scan_target(&target->scsi_host->shost_gendev, | 1985 | scsi_scan_target(&target->scsi_host->shost_gendev, |
1978 | 0, target->scsi_id, SCAN_WILD_CARD, 0); | 1986 | 0, target->scsi_id, SCAN_WILD_CARD, 0); |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index de2d0b3c0bfe..66fbedda4571 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -140,6 +140,7 @@ struct srp_target_port { | |||
140 | unsigned int cmd_sg_cnt; | 140 | unsigned int cmd_sg_cnt; |
141 | unsigned int indirect_size; | 141 | unsigned int indirect_size; |
142 | bool allow_ext_sg; | 142 | bool allow_ext_sg; |
143 | bool transport_offline; | ||
143 | 144 | ||
144 | /* Everything above this point is used in the hot path of | 145 | /* Everything above this point is used in the hot path of |
145 | * command processing. Try to keep them packed into cachelines. | 146 | * command processing. Try to keep them packed into cachelines. |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 46bc045bbe15..98cc4b29fc5b 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -115,6 +115,8 @@ enum ib_device_cap_flags { | |||
115 | IB_DEVICE_XRC = (1<<20), | 115 | IB_DEVICE_XRC = (1<<20), |
116 | IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), | 116 | IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), |
117 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), | 117 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), |
118 | IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), | ||
119 | IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24) | ||
118 | }; | 120 | }; |
119 | 121 | ||
120 | enum ib_atomic_cap { | 122 | enum ib_atomic_cap { |
@@ -715,6 +717,11 @@ enum ib_mig_state { | |||
715 | IB_MIG_ARMED | 717 | IB_MIG_ARMED |
716 | }; | 718 | }; |
717 | 719 | ||
720 | enum ib_mw_type { | ||
721 | IB_MW_TYPE_1 = 1, | ||
722 | IB_MW_TYPE_2 = 2 | ||
723 | }; | ||
724 | |||
718 | struct ib_qp_attr { | 725 | struct ib_qp_attr { |
719 | enum ib_qp_state qp_state; | 726 | enum ib_qp_state qp_state; |
720 | enum ib_qp_state cur_qp_state; | 727 | enum ib_qp_state cur_qp_state; |
@@ -758,6 +765,7 @@ enum ib_wr_opcode { | |||
758 | IB_WR_FAST_REG_MR, | 765 | IB_WR_FAST_REG_MR, |
759 | IB_WR_MASKED_ATOMIC_CMP_AND_SWP, | 766 | IB_WR_MASKED_ATOMIC_CMP_AND_SWP, |
760 | IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, | 767 | IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, |
768 | IB_WR_BIND_MW, | ||
761 | }; | 769 | }; |
762 | 770 | ||
763 | enum ib_send_flags { | 771 | enum ib_send_flags { |
@@ -780,6 +788,23 @@ struct ib_fast_reg_page_list { | |||
780 | unsigned int max_page_list_len; | 788 | unsigned int max_page_list_len; |
781 | }; | 789 | }; |
782 | 790 | ||
791 | /** | ||
792 | * struct ib_mw_bind_info - Parameters for a memory window bind operation. | ||
793 | * @mr: A memory region to bind the memory window to. | ||
794 | * @addr: The address where the memory window should begin. | ||
795 | * @length: The length of the memory window, in bytes. | ||
796 | * @mw_access_flags: Access flags from enum ib_access_flags for the window. | ||
797 | * | ||
798 | * This struct contains the shared parameters for type 1 and type 2 | ||
799 | * memory window bind operations. | ||
800 | */ | ||
801 | struct ib_mw_bind_info { | ||
802 | struct ib_mr *mr; | ||
803 | u64 addr; | ||
804 | u64 length; | ||
805 | int mw_access_flags; | ||
806 | }; | ||
807 | |||
783 | struct ib_send_wr { | 808 | struct ib_send_wr { |
784 | struct ib_send_wr *next; | 809 | struct ib_send_wr *next; |
785 | u64 wr_id; | 810 | u64 wr_id; |
@@ -823,6 +848,12 @@ struct ib_send_wr { | |||
823 | int access_flags; | 848 | int access_flags; |
824 | u32 rkey; | 849 | u32 rkey; |
825 | } fast_reg; | 850 | } fast_reg; |
851 | struct { | ||
852 | struct ib_mw *mw; | ||
853 | /* The new rkey for the memory window. */ | ||
854 | u32 rkey; | ||
855 | struct ib_mw_bind_info bind_info; | ||
856 | } bind_mw; | ||
826 | } wr; | 857 | } wr; |
827 | u32 xrc_remote_srq_num; /* XRC TGT QPs only */ | 858 | u32 xrc_remote_srq_num; /* XRC TGT QPs only */ |
828 | }; | 859 | }; |
@@ -839,7 +870,8 @@ enum ib_access_flags { | |||
839 | IB_ACCESS_REMOTE_WRITE = (1<<1), | 870 | IB_ACCESS_REMOTE_WRITE = (1<<1), |
840 | IB_ACCESS_REMOTE_READ = (1<<2), | 871 | IB_ACCESS_REMOTE_READ = (1<<2), |
841 | IB_ACCESS_REMOTE_ATOMIC = (1<<3), | 872 | IB_ACCESS_REMOTE_ATOMIC = (1<<3), |
842 | IB_ACCESS_MW_BIND = (1<<4) | 873 | IB_ACCESS_MW_BIND = (1<<4), |
874 | IB_ZERO_BASED = (1<<5) | ||
843 | }; | 875 | }; |
844 | 876 | ||
845 | struct ib_phys_buf { | 877 | struct ib_phys_buf { |
@@ -862,13 +894,16 @@ enum ib_mr_rereg_flags { | |||
862 | IB_MR_REREG_ACCESS = (1<<2) | 894 | IB_MR_REREG_ACCESS = (1<<2) |
863 | }; | 895 | }; |
864 | 896 | ||
897 | /** | ||
898 | * struct ib_mw_bind - Parameters for a type 1 memory window bind operation. | ||
899 | * @wr_id: Work request id. | ||
900 | * @send_flags: Flags from ib_send_flags enum. | ||
901 | * @bind_info: More parameters of the bind operation. | ||
902 | */ | ||
865 | struct ib_mw_bind { | 903 | struct ib_mw_bind { |
866 | struct ib_mr *mr; | 904 | u64 wr_id; |
867 | u64 wr_id; | 905 | int send_flags; |
868 | u64 addr; | 906 | struct ib_mw_bind_info bind_info; |
869 | u32 length; | ||
870 | int send_flags; | ||
871 | int mw_access_flags; | ||
872 | }; | 907 | }; |
873 | 908 | ||
874 | struct ib_fmr_attr { | 909 | struct ib_fmr_attr { |
@@ -991,6 +1026,7 @@ struct ib_mw { | |||
991 | struct ib_pd *pd; | 1026 | struct ib_pd *pd; |
992 | struct ib_uobject *uobject; | 1027 | struct ib_uobject *uobject; |
993 | u32 rkey; | 1028 | u32 rkey; |
1029 | enum ib_mw_type type; | ||
994 | }; | 1030 | }; |
995 | 1031 | ||
996 | struct ib_fmr { | 1032 | struct ib_fmr { |
@@ -1202,7 +1238,8 @@ struct ib_device { | |||
1202 | int num_phys_buf, | 1238 | int num_phys_buf, |
1203 | int mr_access_flags, | 1239 | int mr_access_flags, |
1204 | u64 *iova_start); | 1240 | u64 *iova_start); |
1205 | struct ib_mw * (*alloc_mw)(struct ib_pd *pd); | 1241 | struct ib_mw * (*alloc_mw)(struct ib_pd *pd, |
1242 | enum ib_mw_type type); | ||
1206 | int (*bind_mw)(struct ib_qp *qp, | 1243 | int (*bind_mw)(struct ib_qp *qp, |
1207 | struct ib_mw *mw, | 1244 | struct ib_mw *mw, |
1208 | struct ib_mw_bind *mw_bind); | 1245 | struct ib_mw_bind *mw_bind); |
@@ -2019,6 +2056,8 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); | |||
2019 | * ib_dereg_mr - Deregisters a memory region and removes it from the | 2056 | * ib_dereg_mr - Deregisters a memory region and removes it from the |
2020 | * HCA translation table. | 2057 | * HCA translation table. |
2021 | * @mr: The memory region to deregister. | 2058 | * @mr: The memory region to deregister. |
2059 | * | ||
2060 | * This function can fail, if the memory region has memory windows bound to it. | ||
2022 | */ | 2061 | */ |
2023 | int ib_dereg_mr(struct ib_mr *mr); | 2062 | int ib_dereg_mr(struct ib_mr *mr); |
2024 | 2063 | ||
@@ -2071,10 +2110,22 @@ static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) | |||
2071 | } | 2110 | } |
2072 | 2111 | ||
2073 | /** | 2112 | /** |
2113 | * ib_inc_rkey - increments the key portion of the given rkey. Can be used | ||
2114 | * for calculating a new rkey for type 2 memory windows. | ||
2115 | * @rkey - the rkey to increment. | ||
2116 | */ | ||
2117 | static inline u32 ib_inc_rkey(u32 rkey) | ||
2118 | { | ||
2119 | const u32 mask = 0x000000ff; | ||
2120 | return ((rkey + 1) & mask) | (rkey & ~mask); | ||
2121 | } | ||
2122 | |||
2123 | /** | ||
2074 | * ib_alloc_mw - Allocates a memory window. | 2124 | * ib_alloc_mw - Allocates a memory window. |
2075 | * @pd: The protection domain associated with the memory window. | 2125 | * @pd: The protection domain associated with the memory window. |
2126 | * @type: The type of the memory window (1 or 2). | ||
2076 | */ | 2127 | */ |
2077 | struct ib_mw *ib_alloc_mw(struct ib_pd *pd); | 2128 | struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); |
2078 | 2129 | ||
2079 | /** | 2130 | /** |
2080 | * ib_bind_mw - Posts a work request to the send queue of the specified | 2131 | * ib_bind_mw - Posts a work request to the send queue of the specified |
@@ -2084,6 +2135,10 @@ struct ib_mw *ib_alloc_mw(struct ib_pd *pd); | |||
2084 | * @mw: The memory window to bind. | 2135 | * @mw: The memory window to bind. |
2085 | * @mw_bind: Specifies information about the memory window, including | 2136 | * @mw_bind: Specifies information about the memory window, including |
2086 | * its address range, remote access rights, and associated memory region. | 2137 | * its address range, remote access rights, and associated memory region. |
2138 | * | ||
2139 | * If there is no immediate error, the function will update the rkey member | ||
2140 | * of the mw parameter to its new value. The bind operation can still fail | ||
2141 | * asynchronously. | ||
2087 | */ | 2142 | */ |
2088 | static inline int ib_bind_mw(struct ib_qp *qp, | 2143 | static inline int ib_bind_mw(struct ib_qp *qp, |
2089 | struct ib_mw *mw, | 2144 | struct ib_mw *mw, |
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 81aba3a73aa3..805711ea2005 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h | |||
@@ -261,6 +261,22 @@ struct ib_uverbs_dereg_mr { | |||
261 | __u32 mr_handle; | 261 | __u32 mr_handle; |
262 | }; | 262 | }; |
263 | 263 | ||
264 | struct ib_uverbs_alloc_mw { | ||
265 | __u64 response; | ||
266 | __u32 pd_handle; | ||
267 | __u8 mw_type; | ||
268 | __u8 reserved[3]; | ||
269 | }; | ||
270 | |||
271 | struct ib_uverbs_alloc_mw_resp { | ||
272 | __u32 mw_handle; | ||
273 | __u32 rkey; | ||
274 | }; | ||
275 | |||
276 | struct ib_uverbs_dealloc_mw { | ||
277 | __u32 mw_handle; | ||
278 | }; | ||
279 | |||
264 | struct ib_uverbs_create_comp_channel { | 280 | struct ib_uverbs_create_comp_channel { |
265 | __u64 response; | 281 | __u64 response; |
266 | }; | 282 | }; |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 745973b729af..93726560eaa8 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -1086,7 +1086,7 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, | |||
1086 | case RPCRDMA_MEMWINDOWS: | 1086 | case RPCRDMA_MEMWINDOWS: |
1087 | /* Allocate one extra request's worth, for full cycling */ | 1087 | /* Allocate one extra request's worth, for full cycling */ |
1088 | for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { | 1088 | for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { |
1089 | r->r.mw = ib_alloc_mw(ia->ri_pd); | 1089 | r->r.mw = ib_alloc_mw(ia->ri_pd, IB_MW_TYPE_1); |
1090 | if (IS_ERR(r->r.mw)) { | 1090 | if (IS_ERR(r->r.mw)) { |
1091 | rc = PTR_ERR(r->r.mw); | 1091 | rc = PTR_ERR(r->r.mw); |
1092 | dprintk("RPC: %s: ib_alloc_mw" | 1092 | dprintk("RPC: %s: ib_alloc_mw" |
@@ -1673,12 +1673,12 @@ rpcrdma_register_memwin_external(struct rpcrdma_mr_seg *seg, | |||
1673 | 1673 | ||
1674 | *nsegs = 1; | 1674 | *nsegs = 1; |
1675 | rpcrdma_map_one(ia, seg, writing); | 1675 | rpcrdma_map_one(ia, seg, writing); |
1676 | param.mr = ia->ri_bind_mem; | 1676 | param.bind_info.mr = ia->ri_bind_mem; |
1677 | param.wr_id = 0ULL; /* no send cookie */ | 1677 | param.wr_id = 0ULL; /* no send cookie */ |
1678 | param.addr = seg->mr_dma; | 1678 | param.bind_info.addr = seg->mr_dma; |
1679 | param.length = seg->mr_len; | 1679 | param.bind_info.length = seg->mr_len; |
1680 | param.send_flags = 0; | 1680 | param.send_flags = 0; |
1681 | param.mw_access_flags = mem_priv; | 1681 | param.bind_info.mw_access_flags = mem_priv; |
1682 | 1682 | ||
1683 | DECR_CQCOUNT(&r_xprt->rx_ep); | 1683 | DECR_CQCOUNT(&r_xprt->rx_ep); |
1684 | rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m); | 1684 | rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m); |
@@ -1690,7 +1690,7 @@ rpcrdma_register_memwin_external(struct rpcrdma_mr_seg *seg, | |||
1690 | rpcrdma_unmap_one(ia, seg); | 1690 | rpcrdma_unmap_one(ia, seg); |
1691 | } else { | 1691 | } else { |
1692 | seg->mr_rkey = seg->mr_chunk.rl_mw->r.mw->rkey; | 1692 | seg->mr_rkey = seg->mr_chunk.rl_mw->r.mw->rkey; |
1693 | seg->mr_base = param.addr; | 1693 | seg->mr_base = param.bind_info.addr; |
1694 | seg->mr_nsegs = 1; | 1694 | seg->mr_nsegs = 1; |
1695 | } | 1695 | } |
1696 | return rc; | 1696 | return rc; |
@@ -1706,10 +1706,10 @@ rpcrdma_deregister_memwin_external(struct rpcrdma_mr_seg *seg, | |||
1706 | int rc; | 1706 | int rc; |
1707 | 1707 | ||
1708 | BUG_ON(seg->mr_nsegs != 1); | 1708 | BUG_ON(seg->mr_nsegs != 1); |
1709 | param.mr = ia->ri_bind_mem; | 1709 | param.bind_info.mr = ia->ri_bind_mem; |
1710 | param.addr = 0ULL; /* unbind */ | 1710 | param.bind_info.addr = 0ULL; /* unbind */ |
1711 | param.length = 0; | 1711 | param.bind_info.length = 0; |
1712 | param.mw_access_flags = 0; | 1712 | param.bind_info.mw_access_flags = 0; |
1713 | if (*r) { | 1713 | if (*r) { |
1714 | param.wr_id = (u64) (unsigned long) *r; | 1714 | param.wr_id = (u64) (unsigned long) *r; |
1715 | param.send_flags = IB_SEND_SIGNALED; | 1715 | param.send_flags = IB_SEND_SIGNALED; |