diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 14:41:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 14:41:08 -0500 |
commit | 70a3a06d01ed9ca887316a881813cdefb8a20170 (patch) | |
tree | fbdb7982040ba77818e4b738d76eef8bb06fb47f /drivers/infiniband | |
parent | f6c0ffa8f0b0781f4954cb06f0a81d6c10c1b434 (diff) | |
parent | ef4e359d9b9e2dc022f79840fd207796b524a893 (diff) |
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband update from Roland Dreier:
"Main batch of InfiniBand/RDMA changes for 3.9:
- SRP error handling fixes from Bart Van Assche
- Implementation of memory windows for mlx4 from Shani Michaeli
- Lots of cxgb4 HW driver fixes from Vipul Pandya
- Make iSER work for virtual functions, other fixes from Or Gerlitz
- Fix for bug in qib HW driver from Mike Marciniszyn
- IPoIB fixes from me, Itai Garbi, Shlomo Pongratz, Yan Burman
- Various cleanups and warning fixes from Julia Lawall, Paul Bolle,
Wei Yongjun"
* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (41 commits)
IB/mlx4: Advertise MW support
IB/mlx4: Support memory window binding
mlx4: Implement memory windows allocation and deallocation
mlx4_core: Enable memory windows in {INIT, QUERY}_HCA
mlx4_core: Disable memory windows for virtual functions
IPoIB: Free ipoib neigh on path record failure so path rec queries are retried
IB/srp: Fail I/O requests if the transport is offline
IB/srp: Avoid endless SCSI error handling loop
IB/srp: Avoid sending a task management function needlessly
IB/srp: Track connection state properly
IB/mlx4: Remove redundant NULL check before kfree
IB/mlx4: Fix compiler warning about uninitialized 'vlan' variable
IB/mlx4: Convert is_xxx variables in build_mlx_header() to bool
IB/iser: Enable iser when FMRs are not supported
IB/iser: Avoid error prints on EAGAIN registration failures
IB/iser: Use proper define for the commands per LUN value advertised to SCSI ML
IB/uverbs: Implement memory windows support in uverbs
IB/core: Add "type 2" memory windows support
mlx4_core: Propagate MR deregistration failures to caller
mlx4_core: Rename MPT-related functions to have mpt_ prefix
...
Diffstat (limited to 'drivers/infiniband')
31 files changed, 521 insertions, 161 deletions
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 5bcb2afd3dcb..0fcd7aa26fa2 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -188,6 +188,8 @@ IB_UVERBS_DECLARE_CMD(alloc_pd); | |||
188 | IB_UVERBS_DECLARE_CMD(dealloc_pd); | 188 | IB_UVERBS_DECLARE_CMD(dealloc_pd); |
189 | IB_UVERBS_DECLARE_CMD(reg_mr); | 189 | IB_UVERBS_DECLARE_CMD(reg_mr); |
190 | IB_UVERBS_DECLARE_CMD(dereg_mr); | 190 | IB_UVERBS_DECLARE_CMD(dereg_mr); |
191 | IB_UVERBS_DECLARE_CMD(alloc_mw); | ||
192 | IB_UVERBS_DECLARE_CMD(dealloc_mw); | ||
191 | IB_UVERBS_DECLARE_CMD(create_comp_channel); | 193 | IB_UVERBS_DECLARE_CMD(create_comp_channel); |
192 | IB_UVERBS_DECLARE_CMD(create_cq); | 194 | IB_UVERBS_DECLARE_CMD(create_cq); |
193 | IB_UVERBS_DECLARE_CMD(resize_cq); | 195 | IB_UVERBS_DECLARE_CMD(resize_cq); |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 0cb0007724a2..3983a0552775 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -48,6 +48,7 @@ struct uverbs_lock_class { | |||
48 | 48 | ||
49 | static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; | 49 | static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; |
50 | static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; | 50 | static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; |
51 | static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; | ||
51 | static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; | 52 | static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; |
52 | static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; | 53 | static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; |
53 | static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; | 54 | static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; |
@@ -1049,6 +1050,126 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, | |||
1049 | return in_len; | 1050 | return in_len; |
1050 | } | 1051 | } |
1051 | 1052 | ||
1053 | ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, | ||
1054 | const char __user *buf, int in_len, | ||
1055 | int out_len) | ||
1056 | { | ||
1057 | struct ib_uverbs_alloc_mw cmd; | ||
1058 | struct ib_uverbs_alloc_mw_resp resp; | ||
1059 | struct ib_uobject *uobj; | ||
1060 | struct ib_pd *pd; | ||
1061 | struct ib_mw *mw; | ||
1062 | int ret; | ||
1063 | |||
1064 | if (out_len < sizeof(resp)) | ||
1065 | return -ENOSPC; | ||
1066 | |||
1067 | if (copy_from_user(&cmd, buf, sizeof(cmd))) | ||
1068 | return -EFAULT; | ||
1069 | |||
1070 | uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); | ||
1071 | if (!uobj) | ||
1072 | return -ENOMEM; | ||
1073 | |||
1074 | init_uobj(uobj, 0, file->ucontext, &mw_lock_class); | ||
1075 | down_write(&uobj->mutex); | ||
1076 | |||
1077 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | ||
1078 | if (!pd) { | ||
1079 | ret = -EINVAL; | ||
1080 | goto err_free; | ||
1081 | } | ||
1082 | |||
1083 | mw = pd->device->alloc_mw(pd, cmd.mw_type); | ||
1084 | if (IS_ERR(mw)) { | ||
1085 | ret = PTR_ERR(mw); | ||
1086 | goto err_put; | ||
1087 | } | ||
1088 | |||
1089 | mw->device = pd->device; | ||
1090 | mw->pd = pd; | ||
1091 | mw->uobject = uobj; | ||
1092 | atomic_inc(&pd->usecnt); | ||
1093 | |||
1094 | uobj->object = mw; | ||
1095 | ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); | ||
1096 | if (ret) | ||
1097 | goto err_unalloc; | ||
1098 | |||
1099 | memset(&resp, 0, sizeof(resp)); | ||
1100 | resp.rkey = mw->rkey; | ||
1101 | resp.mw_handle = uobj->id; | ||
1102 | |||
1103 | if (copy_to_user((void __user *)(unsigned long)cmd.response, | ||
1104 | &resp, sizeof(resp))) { | ||
1105 | ret = -EFAULT; | ||
1106 | goto err_copy; | ||
1107 | } | ||
1108 | |||
1109 | put_pd_read(pd); | ||
1110 | |||
1111 | mutex_lock(&file->mutex); | ||
1112 | list_add_tail(&uobj->list, &file->ucontext->mw_list); | ||
1113 | mutex_unlock(&file->mutex); | ||
1114 | |||
1115 | uobj->live = 1; | ||
1116 | |||
1117 | up_write(&uobj->mutex); | ||
1118 | |||
1119 | return in_len; | ||
1120 | |||
1121 | err_copy: | ||
1122 | idr_remove_uobj(&ib_uverbs_mw_idr, uobj); | ||
1123 | |||
1124 | err_unalloc: | ||
1125 | ib_dealloc_mw(mw); | ||
1126 | |||
1127 | err_put: | ||
1128 | put_pd_read(pd); | ||
1129 | |||
1130 | err_free: | ||
1131 | put_uobj_write(uobj); | ||
1132 | return ret; | ||
1133 | } | ||
1134 | |||
1135 | ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, | ||
1136 | const char __user *buf, int in_len, | ||
1137 | int out_len) | ||
1138 | { | ||
1139 | struct ib_uverbs_dealloc_mw cmd; | ||
1140 | struct ib_mw *mw; | ||
1141 | struct ib_uobject *uobj; | ||
1142 | int ret = -EINVAL; | ||
1143 | |||
1144 | if (copy_from_user(&cmd, buf, sizeof(cmd))) | ||
1145 | return -EFAULT; | ||
1146 | |||
1147 | uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); | ||
1148 | if (!uobj) | ||
1149 | return -EINVAL; | ||
1150 | |||
1151 | mw = uobj->object; | ||
1152 | |||
1153 | ret = ib_dealloc_mw(mw); | ||
1154 | if (!ret) | ||
1155 | uobj->live = 0; | ||
1156 | |||
1157 | put_uobj_write(uobj); | ||
1158 | |||
1159 | if (ret) | ||
1160 | return ret; | ||
1161 | |||
1162 | idr_remove_uobj(&ib_uverbs_mw_idr, uobj); | ||
1163 | |||
1164 | mutex_lock(&file->mutex); | ||
1165 | list_del(&uobj->list); | ||
1166 | mutex_unlock(&file->mutex); | ||
1167 | |||
1168 | put_uobj(uobj); | ||
1169 | |||
1170 | return in_len; | ||
1171 | } | ||
1172 | |||
1052 | ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, | 1173 | ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, |
1053 | const char __user *buf, int in_len, | 1174 | const char __user *buf, int in_len, |
1054 | int out_len) | 1175 | int out_len) |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 6f2ce6fa98f8..2c6f0f2ecd9d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -87,6 +87,8 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, | |||
87 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, | 87 | [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, |
88 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, | 88 | [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, |
89 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, | 89 | [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, |
90 | [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw, | ||
91 | [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw, | ||
90 | [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, | 92 | [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, |
91 | [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, | 93 | [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, |
92 | [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, | 94 | [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, |
@@ -201,6 +203,15 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
201 | kfree(uobj); | 203 | kfree(uobj); |
202 | } | 204 | } |
203 | 205 | ||
206 | /* Remove MWs before QPs, in order to support type 2A MWs. */ | ||
207 | list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) { | ||
208 | struct ib_mw *mw = uobj->object; | ||
209 | |||
210 | idr_remove_uobj(&ib_uverbs_mw_idr, uobj); | ||
211 | ib_dealloc_mw(mw); | ||
212 | kfree(uobj); | ||
213 | } | ||
214 | |||
204 | list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { | 215 | list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { |
205 | struct ib_qp *qp = uobj->object; | 216 | struct ib_qp *qp = uobj->object; |
206 | struct ib_uqp_object *uqp = | 217 | struct ib_uqp_object *uqp = |
@@ -240,8 +251,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
240 | kfree(uevent); | 251 | kfree(uevent); |
241 | } | 252 | } |
242 | 253 | ||
243 | /* XXX Free MWs */ | ||
244 | |||
245 | list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { | 254 | list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { |
246 | struct ib_mr *mr = uobj->object; | 255 | struct ib_mr *mr = uobj->object; |
247 | 256 | ||
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 30f199e8579f..a8fdd3381405 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -1099,18 +1099,19 @@ EXPORT_SYMBOL(ib_free_fast_reg_page_list); | |||
1099 | 1099 | ||
1100 | /* Memory windows */ | 1100 | /* Memory windows */ |
1101 | 1101 | ||
1102 | struct ib_mw *ib_alloc_mw(struct ib_pd *pd) | 1102 | struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) |
1103 | { | 1103 | { |
1104 | struct ib_mw *mw; | 1104 | struct ib_mw *mw; |
1105 | 1105 | ||
1106 | if (!pd->device->alloc_mw) | 1106 | if (!pd->device->alloc_mw) |
1107 | return ERR_PTR(-ENOSYS); | 1107 | return ERR_PTR(-ENOSYS); |
1108 | 1108 | ||
1109 | mw = pd->device->alloc_mw(pd); | 1109 | mw = pd->device->alloc_mw(pd, type); |
1110 | if (!IS_ERR(mw)) { | 1110 | if (!IS_ERR(mw)) { |
1111 | mw->device = pd->device; | 1111 | mw->device = pd->device; |
1112 | mw->pd = pd; | 1112 | mw->pd = pd; |
1113 | mw->uobject = NULL; | 1113 | mw->uobject = NULL; |
1114 | mw->type = type; | ||
1114 | atomic_inc(&pd->usecnt); | 1115 | atomic_inc(&pd->usecnt); |
1115 | } | 1116 | } |
1116 | 1117 | ||
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index 7275e727e0f5..d53cf519f42a 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -1238,15 +1238,4 @@ static struct pci_driver c2_pci_driver = { | |||
1238 | .remove = c2_remove, | 1238 | .remove = c2_remove, |
1239 | }; | 1239 | }; |
1240 | 1240 | ||
1241 | static int __init c2_init_module(void) | 1241 | module_pci_driver(c2_pci_driver); |
1242 | { | ||
1243 | return pci_register_driver(&c2_pci_driver); | ||
1244 | } | ||
1245 | |||
1246 | static void __exit c2_exit_module(void) | ||
1247 | { | ||
1248 | pci_unregister_driver(&c2_pci_driver); | ||
1249 | } | ||
1250 | |||
1251 | module_init(c2_init_module); | ||
1252 | module_exit(c2_exit_module); | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 145d82a64d0a..9c12da0cbd32 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -738,7 +738,7 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) | |||
738 | return ibmr; | 738 | return ibmr; |
739 | } | 739 | } |
740 | 740 | ||
741 | static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) | 741 | static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) |
742 | { | 742 | { |
743 | struct iwch_dev *rhp; | 743 | struct iwch_dev *rhp; |
744 | struct iwch_pd *php; | 744 | struct iwch_pd *php; |
@@ -747,6 +747,9 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) | |||
747 | u32 stag = 0; | 747 | u32 stag = 0; |
748 | int ret; | 748 | int ret; |
749 | 749 | ||
750 | if (type != IB_MW_TYPE_1) | ||
751 | return ERR_PTR(-EINVAL); | ||
752 | |||
750 | php = to_iwch_pd(pd); | 753 | php = to_iwch_pd(pd); |
751 | rhp = php->rhp; | 754 | rhp = php->rhp; |
752 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | 755 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 6de8463f453b..e5649e8b215d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -567,18 +567,19 @@ int iwch_bind_mw(struct ib_qp *qp, | |||
567 | if (mw_bind->send_flags & IB_SEND_SIGNALED) | 567 | if (mw_bind->send_flags & IB_SEND_SIGNALED) |
568 | t3_wr_flags = T3_COMPLETION_FLAG; | 568 | t3_wr_flags = T3_COMPLETION_FLAG; |
569 | 569 | ||
570 | sgl.addr = mw_bind->addr; | 570 | sgl.addr = mw_bind->bind_info.addr; |
571 | sgl.lkey = mw_bind->mr->lkey; | 571 | sgl.lkey = mw_bind->bind_info.mr->lkey; |
572 | sgl.length = mw_bind->length; | 572 | sgl.length = mw_bind->bind_info.length; |
573 | wqe->bind.reserved = 0; | 573 | wqe->bind.reserved = 0; |
574 | wqe->bind.type = TPT_VATO; | 574 | wqe->bind.type = TPT_VATO; |
575 | 575 | ||
576 | /* TBD: check perms */ | 576 | /* TBD: check perms */ |
577 | wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags); | 577 | wqe->bind.perms = iwch_ib_to_tpt_bind_access( |
578 | wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); | 578 | mw_bind->bind_info.mw_access_flags); |
579 | wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey); | ||
579 | wqe->bind.mw_stag = cpu_to_be32(mw->rkey); | 580 | wqe->bind.mw_stag = cpu_to_be32(mw->rkey); |
580 | wqe->bind.mw_len = cpu_to_be32(mw_bind->length); | 581 | wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length); |
581 | wqe->bind.mw_va = cpu_to_be64(mw_bind->addr); | 582 | wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr); |
582 | err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size); | 583 | err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size); |
583 | if (err) { | 584 | if (err) { |
584 | spin_unlock_irqrestore(&qhp->lock, flag); | 585 | spin_unlock_irqrestore(&qhp->lock, flag); |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index c13745cde7fa..565bfb161c1a 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -143,14 +143,28 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status); | |||
143 | static LIST_HEAD(timeout_list); | 143 | static LIST_HEAD(timeout_list); |
144 | static spinlock_t timeout_lock; | 144 | static spinlock_t timeout_lock; |
145 | 145 | ||
146 | static void deref_qp(struct c4iw_ep *ep) | ||
147 | { | ||
148 | c4iw_qp_rem_ref(&ep->com.qp->ibqp); | ||
149 | clear_bit(QP_REFERENCED, &ep->com.flags); | ||
150 | } | ||
151 | |||
152 | static void ref_qp(struct c4iw_ep *ep) | ||
153 | { | ||
154 | set_bit(QP_REFERENCED, &ep->com.flags); | ||
155 | c4iw_qp_add_ref(&ep->com.qp->ibqp); | ||
156 | } | ||
157 | |||
146 | static void start_ep_timer(struct c4iw_ep *ep) | 158 | static void start_ep_timer(struct c4iw_ep *ep) |
147 | { | 159 | { |
148 | PDBG("%s ep %p\n", __func__, ep); | 160 | PDBG("%s ep %p\n", __func__, ep); |
149 | if (timer_pending(&ep->timer)) { | 161 | if (timer_pending(&ep->timer)) { |
150 | PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); | 162 | pr_err("%s timer already started! ep %p\n", |
151 | del_timer_sync(&ep->timer); | 163 | __func__, ep); |
152 | } else | 164 | return; |
153 | c4iw_get_ep(&ep->com); | 165 | } |
166 | clear_bit(TIMEOUT, &ep->com.flags); | ||
167 | c4iw_get_ep(&ep->com); | ||
154 | ep->timer.expires = jiffies + ep_timeout_secs * HZ; | 168 | ep->timer.expires = jiffies + ep_timeout_secs * HZ; |
155 | ep->timer.data = (unsigned long)ep; | 169 | ep->timer.data = (unsigned long)ep; |
156 | ep->timer.function = ep_timeout; | 170 | ep->timer.function = ep_timeout; |
@@ -159,14 +173,10 @@ static void start_ep_timer(struct c4iw_ep *ep) | |||
159 | 173 | ||
160 | static void stop_ep_timer(struct c4iw_ep *ep) | 174 | static void stop_ep_timer(struct c4iw_ep *ep) |
161 | { | 175 | { |
162 | PDBG("%s ep %p\n", __func__, ep); | 176 | PDBG("%s ep %p stopping\n", __func__, ep); |
163 | if (!timer_pending(&ep->timer)) { | ||
164 | WARN(1, "%s timer stopped when its not running! " | ||
165 | "ep %p state %u\n", __func__, ep, ep->com.state); | ||
166 | return; | ||
167 | } | ||
168 | del_timer_sync(&ep->timer); | 177 | del_timer_sync(&ep->timer); |
169 | c4iw_put_ep(&ep->com); | 178 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) |
179 | c4iw_put_ep(&ep->com); | ||
170 | } | 180 | } |
171 | 181 | ||
172 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, | 182 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, |
@@ -271,11 +281,13 @@ void _c4iw_free_ep(struct kref *kref) | |||
271 | 281 | ||
272 | ep = container_of(kref, struct c4iw_ep, com.kref); | 282 | ep = container_of(kref, struct c4iw_ep, com.kref); |
273 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); | 283 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); |
284 | if (test_bit(QP_REFERENCED, &ep->com.flags)) | ||
285 | deref_qp(ep); | ||
274 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { | 286 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { |
287 | remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); | ||
275 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | 288 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); |
276 | dst_release(ep->dst); | 289 | dst_release(ep->dst); |
277 | cxgb4_l2t_release(ep->l2t); | 290 | cxgb4_l2t_release(ep->l2t); |
278 | remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); | ||
279 | } | 291 | } |
280 | kfree(ep); | 292 | kfree(ep); |
281 | } | 293 | } |
@@ -687,7 +699,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |||
687 | memset(mpa, 0, sizeof(*mpa)); | 699 | memset(mpa, 0, sizeof(*mpa)); |
688 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | 700 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); |
689 | mpa->flags = MPA_REJECT; | 701 | mpa->flags = MPA_REJECT; |
690 | mpa->revision = mpa_rev; | 702 | mpa->revision = ep->mpa_attr.version; |
691 | mpa->private_data_size = htons(plen); | 703 | mpa->private_data_size = htons(plen); |
692 | 704 | ||
693 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { | 705 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
@@ -863,7 +875,6 @@ static void close_complete_upcall(struct c4iw_ep *ep) | |||
863 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | 875 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); |
864 | ep->com.cm_id->rem_ref(ep->com.cm_id); | 876 | ep->com.cm_id->rem_ref(ep->com.cm_id); |
865 | ep->com.cm_id = NULL; | 877 | ep->com.cm_id = NULL; |
866 | ep->com.qp = NULL; | ||
867 | set_bit(CLOSE_UPCALL, &ep->com.history); | 878 | set_bit(CLOSE_UPCALL, &ep->com.history); |
868 | } | 879 | } |
869 | } | 880 | } |
@@ -906,7 +917,6 @@ static void peer_abort_upcall(struct c4iw_ep *ep) | |||
906 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | 917 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); |
907 | ep->com.cm_id->rem_ref(ep->com.cm_id); | 918 | ep->com.cm_id->rem_ref(ep->com.cm_id); |
908 | ep->com.cm_id = NULL; | 919 | ep->com.cm_id = NULL; |
909 | ep->com.qp = NULL; | ||
910 | set_bit(ABORT_UPCALL, &ep->com.history); | 920 | set_bit(ABORT_UPCALL, &ep->com.history); |
911 | } | 921 | } |
912 | } | 922 | } |
@@ -946,7 +956,6 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status) | |||
946 | if (status < 0) { | 956 | if (status < 0) { |
947 | ep->com.cm_id->rem_ref(ep->com.cm_id); | 957 | ep->com.cm_id->rem_ref(ep->com.cm_id); |
948 | ep->com.cm_id = NULL; | 958 | ep->com.cm_id = NULL; |
949 | ep->com.qp = NULL; | ||
950 | } | 959 | } |
951 | } | 960 | } |
952 | 961 | ||
@@ -1291,11 +1300,13 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1291 | if (mpa->revision > mpa_rev) { | 1300 | if (mpa->revision > mpa_rev) { |
1292 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | 1301 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," |
1293 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | 1302 | " Received = %d\n", __func__, mpa_rev, mpa->revision); |
1303 | stop_ep_timer(ep); | ||
1294 | abort_connection(ep, skb, GFP_KERNEL); | 1304 | abort_connection(ep, skb, GFP_KERNEL); |
1295 | return; | 1305 | return; |
1296 | } | 1306 | } |
1297 | 1307 | ||
1298 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { | 1308 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { |
1309 | stop_ep_timer(ep); | ||
1299 | abort_connection(ep, skb, GFP_KERNEL); | 1310 | abort_connection(ep, skb, GFP_KERNEL); |
1300 | return; | 1311 | return; |
1301 | } | 1312 | } |
@@ -1306,6 +1317,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1306 | * Fail if there's too much private data. | 1317 | * Fail if there's too much private data. |
1307 | */ | 1318 | */ |
1308 | if (plen > MPA_MAX_PRIVATE_DATA) { | 1319 | if (plen > MPA_MAX_PRIVATE_DATA) { |
1320 | stop_ep_timer(ep); | ||
1309 | abort_connection(ep, skb, GFP_KERNEL); | 1321 | abort_connection(ep, skb, GFP_KERNEL); |
1310 | return; | 1322 | return; |
1311 | } | 1323 | } |
@@ -1314,6 +1326,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1314 | * If plen does not account for pkt size | 1326 | * If plen does not account for pkt size |
1315 | */ | 1327 | */ |
1316 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | 1328 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { |
1329 | stop_ep_timer(ep); | ||
1317 | abort_connection(ep, skb, GFP_KERNEL); | 1330 | abort_connection(ep, skb, GFP_KERNEL); |
1318 | return; | 1331 | return; |
1319 | } | 1332 | } |
@@ -1391,30 +1404,33 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1391 | skb_pull(skb, sizeof(*hdr)); | 1404 | skb_pull(skb, sizeof(*hdr)); |
1392 | skb_trim(skb, dlen); | 1405 | skb_trim(skb, dlen); |
1393 | 1406 | ||
1394 | ep->rcv_seq += dlen; | ||
1395 | BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); | ||
1396 | |||
1397 | /* update RX credits */ | 1407 | /* update RX credits */ |
1398 | update_rx_credits(ep, dlen); | 1408 | update_rx_credits(ep, dlen); |
1399 | 1409 | ||
1400 | switch (state_read(&ep->com)) { | 1410 | switch (state_read(&ep->com)) { |
1401 | case MPA_REQ_SENT: | 1411 | case MPA_REQ_SENT: |
1412 | ep->rcv_seq += dlen; | ||
1402 | process_mpa_reply(ep, skb); | 1413 | process_mpa_reply(ep, skb); |
1403 | break; | 1414 | break; |
1404 | case MPA_REQ_WAIT: | 1415 | case MPA_REQ_WAIT: |
1416 | ep->rcv_seq += dlen; | ||
1405 | process_mpa_request(ep, skb); | 1417 | process_mpa_request(ep, skb); |
1406 | break; | 1418 | break; |
1407 | case MPA_REP_SENT: | 1419 | case FPDU_MODE: { |
1420 | struct c4iw_qp_attributes attrs; | ||
1421 | BUG_ON(!ep->com.qp); | ||
1422 | if (status) | ||
1423 | pr_err("%s Unexpected streaming data." \ | ||
1424 | " qpid %u ep %p state %d tid %u status %d\n", | ||
1425 | __func__, ep->com.qp->wq.sq.qid, ep, | ||
1426 | state_read(&ep->com), ep->hwtid, status); | ||
1427 | attrs.next_state = C4IW_QP_STATE_ERROR; | ||
1428 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
1429 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
1430 | c4iw_ep_disconnect(ep, 1, GFP_KERNEL); | ||
1408 | break; | 1431 | break; |
1432 | } | ||
1409 | default: | 1433 | default: |
1410 | pr_err("%s Unexpected streaming data." \ | ||
1411 | " ep %p state %d tid %u status %d\n", | ||
1412 | __func__, ep, state_read(&ep->com), ep->hwtid, status); | ||
1413 | |||
1414 | /* | ||
1415 | * The ep will timeout and inform the ULP of the failure. | ||
1416 | * See ep_timeout(). | ||
1417 | */ | ||
1418 | break; | 1434 | break; |
1419 | } | 1435 | } |
1420 | return 0; | 1436 | return 0; |
@@ -1437,6 +1453,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1437 | mutex_lock(&ep->com.mutex); | 1453 | mutex_lock(&ep->com.mutex); |
1438 | switch (ep->com.state) { | 1454 | switch (ep->com.state) { |
1439 | case ABORTING: | 1455 | case ABORTING: |
1456 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
1440 | __state_set(&ep->com, DEAD); | 1457 | __state_set(&ep->com, DEAD); |
1441 | release = 1; | 1458 | release = 1; |
1442 | break; | 1459 | break; |
@@ -1475,11 +1492,11 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) | |||
1475 | V_FW_OFLD_CONNECTION_WR_ASTID(atid)); | 1492 | V_FW_OFLD_CONNECTION_WR_ASTID(atid)); |
1476 | req->tcb.cplrxdataack_cplpassacceptrpl = | 1493 | req->tcb.cplrxdataack_cplpassacceptrpl = |
1477 | htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); | 1494 | htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); |
1478 | req->tcb.tx_max = jiffies; | 1495 | req->tcb.tx_max = (__force __be32) jiffies; |
1479 | req->tcb.rcv_adv = htons(1); | 1496 | req->tcb.rcv_adv = htons(1); |
1480 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | 1497 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); |
1481 | wscale = compute_wscale(rcv_win); | 1498 | wscale = compute_wscale(rcv_win); |
1482 | req->tcb.opt0 = TCAM_BYPASS(1) | | 1499 | req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | |
1483 | (nocong ? NO_CONG(1) : 0) | | 1500 | (nocong ? NO_CONG(1) : 0) | |
1484 | KEEP_ALIVE(1) | | 1501 | KEEP_ALIVE(1) | |
1485 | DELACK(1) | | 1502 | DELACK(1) | |
@@ -1490,20 +1507,20 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) | |||
1490 | SMAC_SEL(ep->smac_idx) | | 1507 | SMAC_SEL(ep->smac_idx) | |
1491 | DSCP(ep->tos) | | 1508 | DSCP(ep->tos) | |
1492 | ULP_MODE(ULP_MODE_TCPDDP) | | 1509 | ULP_MODE(ULP_MODE_TCPDDP) | |
1493 | RCV_BUFSIZ(rcv_win >> 10); | 1510 | RCV_BUFSIZ(rcv_win >> 10)); |
1494 | req->tcb.opt2 = PACE(1) | | 1511 | req->tcb.opt2 = (__force __be32) (PACE(1) | |
1495 | TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | | 1512 | TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | |
1496 | RX_CHANNEL(0) | | 1513 | RX_CHANNEL(0) | |
1497 | CCTRL_ECN(enable_ecn) | | 1514 | CCTRL_ECN(enable_ecn) | |
1498 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | 1515 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid)); |
1499 | if (enable_tcp_timestamps) | 1516 | if (enable_tcp_timestamps) |
1500 | req->tcb.opt2 |= TSTAMPS_EN(1); | 1517 | req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); |
1501 | if (enable_tcp_sack) | 1518 | if (enable_tcp_sack) |
1502 | req->tcb.opt2 |= SACK_EN(1); | 1519 | req->tcb.opt2 |= (__force __be32) SACK_EN(1); |
1503 | if (wscale && enable_tcp_window_scaling) | 1520 | if (wscale && enable_tcp_window_scaling) |
1504 | req->tcb.opt2 |= WND_SCALE_EN(1); | 1521 | req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); |
1505 | req->tcb.opt0 = cpu_to_be64(req->tcb.opt0); | 1522 | req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); |
1506 | req->tcb.opt2 = cpu_to_be32(req->tcb.opt2); | 1523 | req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2); |
1507 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); | 1524 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); |
1508 | set_bit(ACT_OFLD_CONN, &ep->com.history); | 1525 | set_bit(ACT_OFLD_CONN, &ep->com.history); |
1509 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 1526 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
@@ -1993,6 +2010,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1993 | 2010 | ||
1994 | init_timer(&child_ep->timer); | 2011 | init_timer(&child_ep->timer); |
1995 | cxgb4_insert_tid(t, child_ep, hwtid); | 2012 | cxgb4_insert_tid(t, child_ep, hwtid); |
2013 | insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid); | ||
1996 | accept_cr(child_ep, peer_ip, skb, req); | 2014 | accept_cr(child_ep, peer_ip, skb, req); |
1997 | set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); | 2015 | set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); |
1998 | goto out; | 2016 | goto out; |
@@ -2018,7 +2036,6 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2018 | ntohs(req->tcp_opt)); | 2036 | ntohs(req->tcp_opt)); |
2019 | 2037 | ||
2020 | set_emss(ep, ntohs(req->tcp_opt)); | 2038 | set_emss(ep, ntohs(req->tcp_opt)); |
2021 | insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); | ||
2022 | 2039 | ||
2023 | dst_confirm(ep->dst); | 2040 | dst_confirm(ep->dst); |
2024 | state_set(&ep->com, MPA_REQ_WAIT); | 2041 | state_set(&ep->com, MPA_REQ_WAIT); |
@@ -2163,7 +2180,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2163 | break; | 2180 | break; |
2164 | case MPA_REQ_SENT: | 2181 | case MPA_REQ_SENT: |
2165 | stop_ep_timer(ep); | 2182 | stop_ep_timer(ep); |
2166 | if (mpa_rev == 2 && ep->tried_with_mpa_v1) | 2183 | if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) |
2167 | connect_reply_upcall(ep, -ECONNRESET); | 2184 | connect_reply_upcall(ep, -ECONNRESET); |
2168 | else { | 2185 | else { |
2169 | /* | 2186 | /* |
@@ -2235,9 +2252,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2235 | out: | 2252 | out: |
2236 | if (release) | 2253 | if (release) |
2237 | release_ep_resources(ep); | 2254 | release_ep_resources(ep); |
2238 | 2255 | else if (ep->retry_with_mpa_v1) { | |
2239 | /* retry with mpa-v1 */ | 2256 | remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); |
2240 | if (ep && ep->retry_with_mpa_v1) { | ||
2241 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | 2257 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); |
2242 | dst_release(ep->dst); | 2258 | dst_release(ep->dst); |
2243 | cxgb4_l2t_release(ep->l2t); | 2259 | cxgb4_l2t_release(ep->l2t); |
@@ -2430,6 +2446,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2430 | cm_id->add_ref(cm_id); | 2446 | cm_id->add_ref(cm_id); |
2431 | ep->com.cm_id = cm_id; | 2447 | ep->com.cm_id = cm_id; |
2432 | ep->com.qp = qp; | 2448 | ep->com.qp = qp; |
2449 | ref_qp(ep); | ||
2433 | 2450 | ||
2434 | /* bind QP to EP and move to RTS */ | 2451 | /* bind QP to EP and move to RTS */ |
2435 | attrs.mpa_attr = ep->mpa_attr; | 2452 | attrs.mpa_attr = ep->mpa_attr; |
@@ -2460,7 +2477,6 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2460 | return 0; | 2477 | return 0; |
2461 | err1: | 2478 | err1: |
2462 | ep->com.cm_id = NULL; | 2479 | ep->com.cm_id = NULL; |
2463 | ep->com.qp = NULL; | ||
2464 | cm_id->rem_ref(cm_id); | 2480 | cm_id->rem_ref(cm_id); |
2465 | err: | 2481 | err: |
2466 | c4iw_put_ep(&ep->com); | 2482 | c4iw_put_ep(&ep->com); |
@@ -2501,6 +2517,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2501 | ep->com.cm_id = cm_id; | 2517 | ep->com.cm_id = cm_id; |
2502 | ep->com.qp = get_qhp(dev, conn_param->qpn); | 2518 | ep->com.qp = get_qhp(dev, conn_param->qpn); |
2503 | BUG_ON(!ep->com.qp); | 2519 | BUG_ON(!ep->com.qp); |
2520 | ref_qp(ep); | ||
2504 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, | 2521 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, |
2505 | ep->com.qp, cm_id); | 2522 | ep->com.qp, cm_id); |
2506 | 2523 | ||
@@ -2756,7 +2773,8 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, | |||
2756 | struct c4iw_ep *ep; | 2773 | struct c4iw_ep *ep; |
2757 | int atid = be32_to_cpu(req->tid); | 2774 | int atid = be32_to_cpu(req->tid); |
2758 | 2775 | ||
2759 | ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid); | 2776 | ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, |
2777 | (__force u32) req->tid); | ||
2760 | if (!ep) | 2778 | if (!ep) |
2761 | return; | 2779 | return; |
2762 | 2780 | ||
@@ -2800,7 +2818,7 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, | |||
2800 | struct cpl_pass_accept_req *cpl; | 2818 | struct cpl_pass_accept_req *cpl; |
2801 | int ret; | 2819 | int ret; |
2802 | 2820 | ||
2803 | rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie); | 2821 | rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; |
2804 | BUG_ON(!rpl_skb); | 2822 | BUG_ON(!rpl_skb); |
2805 | if (req->retval) { | 2823 | if (req->retval) { |
2806 | PDBG("%s passive open failure %d\n", __func__, req->retval); | 2824 | PDBG("%s passive open failure %d\n", __func__, req->retval); |
@@ -2811,7 +2829,8 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, | |||
2811 | } else { | 2829 | } else { |
2812 | cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); | 2830 | cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); |
2813 | OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, | 2831 | OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, |
2814 | htonl(req->tid))); | 2832 | (__force u32) htonl( |
2833 | (__force u32) req->tid))); | ||
2815 | ret = pass_accept_req(dev, rpl_skb); | 2834 | ret = pass_accept_req(dev, rpl_skb); |
2816 | if (!ret) | 2835 | if (!ret) |
2817 | kfree_skb(rpl_skb); | 2836 | kfree_skb(rpl_skb); |
@@ -2857,10 +2876,10 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) | |||
2857 | struct tcp_options_received tmp_opt; | 2876 | struct tcp_options_received tmp_opt; |
2858 | 2877 | ||
2859 | /* Store values from cpl_rx_pkt in temporary location. */ | 2878 | /* Store values from cpl_rx_pkt in temporary location. */ |
2860 | vlantag = cpl->vlan; | 2879 | vlantag = (__force u16) cpl->vlan; |
2861 | len = cpl->len; | 2880 | len = (__force u16) cpl->len; |
2862 | l2info = cpl->l2info; | 2881 | l2info = (__force u32) cpl->l2info; |
2863 | hdr_len = cpl->hdr_len; | 2882 | hdr_len = (__force u16) cpl->hdr_len; |
2864 | intf = cpl->iff; | 2883 | intf = cpl->iff; |
2865 | 2884 | ||
2866 | __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); | 2885 | __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); |
@@ -2871,19 +2890,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) | |||
2871 | */ | 2890 | */ |
2872 | memset(&tmp_opt, 0, sizeof(tmp_opt)); | 2891 | memset(&tmp_opt, 0, sizeof(tmp_opt)); |
2873 | tcp_clear_options(&tmp_opt); | 2892 | tcp_clear_options(&tmp_opt); |
2874 | tcp_parse_options(skb, &tmp_opt, 0, 0, NULL); | 2893 | tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL); |
2875 | 2894 | ||
2876 | req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); | 2895 | req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); |
2877 | memset(req, 0, sizeof(*req)); | 2896 | memset(req, 0, sizeof(*req)); |
2878 | req->l2info = cpu_to_be16(V_SYN_INTF(intf) | | 2897 | req->l2info = cpu_to_be16(V_SYN_INTF(intf) | |
2879 | V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) | | 2898 | V_SYN_MAC_IDX(G_RX_MACIDX( |
2899 | (__force int) htonl(l2info))) | | ||
2880 | F_SYN_XACT_MATCH); | 2900 | F_SYN_XACT_MATCH); |
2881 | req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) | | 2901 | req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( |
2882 | V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) | | 2902 | (__force int) htonl(l2info))) | |
2883 | V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) | | 2903 | V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( |
2884 | V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info)))); | 2904 | (__force int) htons(hdr_len))) | |
2885 | req->vlan = vlantag; | 2905 | V_IP_HDR_LEN(G_RX_IPHDR_LEN( |
2886 | req->len = len; | 2906 | (__force int) htons(hdr_len))) | |
2907 | V_ETH_HDR_LEN(G_RX_ETHHDR_LEN( | ||
2908 | (__force int) htonl(l2info)))); | ||
2909 | req->vlan = (__force __be16) vlantag; | ||
2910 | req->len = (__force __be16) len; | ||
2887 | req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | | 2911 | req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | |
2888 | PASS_OPEN_TOS(tos)); | 2912 | PASS_OPEN_TOS(tos)); |
2889 | req->tcpopt.mss = htons(tmp_opt.mss_clamp); | 2913 | req->tcpopt.mss = htons(tmp_opt.mss_clamp); |
@@ -2912,7 +2936,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, | |||
2912 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); | 2936 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); |
2913 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); | 2937 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); |
2914 | req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); | 2938 | req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); |
2915 | req->le.filter = filter; | 2939 | req->le.filter = (__force __be32) filter; |
2916 | req->le.lport = lport; | 2940 | req->le.lport = lport; |
2917 | req->le.pport = rport; | 2941 | req->le.pport = rport; |
2918 | req->le.u.ipv4.lip = laddr; | 2942 | req->le.u.ipv4.lip = laddr; |
@@ -2938,7 +2962,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, | |||
2938 | * TP will ignore any value > 0 for MSS index. | 2962 | * TP will ignore any value > 0 for MSS index. |
2939 | */ | 2963 | */ |
2940 | req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); | 2964 | req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); |
2941 | req->cookie = cpu_to_be64((u64)skb); | 2965 | req->cookie = (unsigned long)skb; |
2942 | 2966 | ||
2943 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); | 2967 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); |
2944 | cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); | 2968 | cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); |
@@ -2988,7 +3012,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2988 | /* | 3012 | /* |
2989 | * Calculate the server tid from filter hit index from cpl_rx_pkt. | 3013 | * Calculate the server tid from filter hit index from cpl_rx_pkt. |
2990 | */ | 3014 | */ |
2991 | stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base | 3015 | stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) |
3016 | - dev->rdev.lldi.tids->sftid_base | ||
2992 | + dev->rdev.lldi.tids->nstids; | 3017 | + dev->rdev.lldi.tids->nstids; |
2993 | 3018 | ||
2994 | lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); | 3019 | lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); |
@@ -3049,10 +3074,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3049 | 3074 | ||
3050 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | 3075 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
3051 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; | 3076 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; |
3052 | window = htons(tcph->window); | 3077 | window = (__force u16) htons((__force u16)tcph->window); |
3053 | 3078 | ||
3054 | /* Calcuate filter portion for LE region. */ | 3079 | /* Calcuate filter portion for LE region. */ |
3055 | filter = cpu_to_be32(select_ntuple(dev, dst, e)); | 3080 | filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); |
3056 | 3081 | ||
3057 | /* | 3082 | /* |
3058 | * Synthesize the cpl_pass_accept_req. We have everything except the | 3083 | * Synthesize the cpl_pass_accept_req. We have everything except the |
@@ -3175,11 +3200,16 @@ static DECLARE_WORK(skb_work, process_work); | |||
3175 | static void ep_timeout(unsigned long arg) | 3200 | static void ep_timeout(unsigned long arg) |
3176 | { | 3201 | { |
3177 | struct c4iw_ep *ep = (struct c4iw_ep *)arg; | 3202 | struct c4iw_ep *ep = (struct c4iw_ep *)arg; |
3203 | int kickit = 0; | ||
3178 | 3204 | ||
3179 | spin_lock(&timeout_lock); | 3205 | spin_lock(&timeout_lock); |
3180 | list_add_tail(&ep->entry, &timeout_list); | 3206 | if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { |
3207 | list_add_tail(&ep->entry, &timeout_list); | ||
3208 | kickit = 1; | ||
3209 | } | ||
3181 | spin_unlock(&timeout_lock); | 3210 | spin_unlock(&timeout_lock); |
3182 | queue_work(workq, &skb_work); | 3211 | if (kickit) |
3212 | queue_work(workq, &skb_work); | ||
3183 | } | 3213 | } |
3184 | 3214 | ||
3185 | /* | 3215 | /* |
@@ -3268,8 +3298,14 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3268 | 3298 | ||
3269 | /* | 3299 | /* |
3270 | * Wake up any threads in rdma_init() or rdma_fini(). | 3300 | * Wake up any threads in rdma_init() or rdma_fini(). |
3301 | * However, if we are on MPAv2 and want to retry with MPAv1 | ||
3302 | * then, don't wake up yet. | ||
3271 | */ | 3303 | */ |
3272 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | 3304 | if (mpa_rev == 2 && !ep->tried_with_mpa_v1) { |
3305 | if (ep->com.state != MPA_REQ_SENT) | ||
3306 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
3307 | } else | ||
3308 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | ||
3273 | sched(dev, skb); | 3309 | sched(dev, skb); |
3274 | return 0; | 3310 | return 0; |
3275 | } | 3311 | } |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index ba11c76c0b5a..80069ad595c1 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -533,7 +533,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
533 | PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " | 533 | PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " |
534 | "qpmask 0x%x cqshift %lu cqmask 0x%x\n", | 534 | "qpmask 0x%x cqshift %lu cqmask 0x%x\n", |
535 | (unsigned)pci_resource_len(rdev->lldi.pdev, 2), | 535 | (unsigned)pci_resource_len(rdev->lldi.pdev, 2), |
536 | (void *)pci_resource_start(rdev->lldi.pdev, 2), | 536 | (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2), |
537 | rdev->lldi.db_reg, | 537 | rdev->lldi.db_reg, |
538 | rdev->lldi.gts_reg, | 538 | rdev->lldi.gts_reg, |
539 | rdev->qpshift, rdev->qpmask, | 539 | rdev->qpshift, rdev->qpmask, |
@@ -797,7 +797,8 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, | |||
797 | "RSS %#llx, FL %#llx, len %u\n", | 797 | "RSS %#llx, FL %#llx, len %u\n", |
798 | pci_name(ctx->lldi.pdev), gl->va, | 798 | pci_name(ctx->lldi.pdev), gl->va, |
799 | (unsigned long long)be64_to_cpu(*rsp), | 799 | (unsigned long long)be64_to_cpu(*rsp), |
800 | (unsigned long long)be64_to_cpu(*(u64 *)gl->va), | 800 | (unsigned long long)be64_to_cpu( |
801 | *(__force __be64 *)gl->va), | ||
801 | gl->tot_len); | 802 | gl->tot_len); |
802 | 803 | ||
803 | return 0; | 804 | return 0; |
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index cf2f6b47617a..1a840b2211dd 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c | |||
@@ -46,9 +46,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, | |||
46 | 46 | ||
47 | if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || | 47 | if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || |
48 | (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { | 48 | (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { |
49 | PDBG("%s AE received after RTS - " | 49 | pr_err("%s AE after RTS - qpid 0x%x opcode %d status 0x%x "\ |
50 | "qp state %d qpid 0x%x status 0x%x\n", __func__, | 50 | "type %d wrid.hi 0x%x wrid.lo 0x%x\n", |
51 | qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe)); | 51 | __func__, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), |
52 | CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), | ||
53 | CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); | ||
52 | return; | 54 | return; |
53 | } | 55 | } |
54 | 56 | ||
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 9c1644fb0259..4c07fc069766 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -716,6 +716,8 @@ enum c4iw_ep_flags { | |||
716 | ABORT_REQ_IN_PROGRESS = 1, | 716 | ABORT_REQ_IN_PROGRESS = 1, |
717 | RELEASE_RESOURCES = 2, | 717 | RELEASE_RESOURCES = 2, |
718 | CLOSE_SENT = 3, | 718 | CLOSE_SENT = 3, |
719 | TIMEOUT = 4, | ||
720 | QP_REFERENCED = 5, | ||
719 | }; | 721 | }; |
720 | 722 | ||
721 | enum c4iw_ep_history { | 723 | enum c4iw_ep_history { |
@@ -866,7 +868,7 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl( | |||
866 | int page_list_len); | 868 | int page_list_len); |
867 | struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth); | 869 | struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth); |
868 | int c4iw_dealloc_mw(struct ib_mw *mw); | 870 | int c4iw_dealloc_mw(struct ib_mw *mw); |
869 | struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd); | 871 | struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); |
870 | struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, | 872 | struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, |
871 | u64 length, u64 virt, int acc, | 873 | u64 length, u64 virt, int acc, |
872 | struct ib_udata *udata); | 874 | struct ib_udata *udata); |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index afd81790ab3c..903a92d6f91d 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -650,7 +650,7 @@ err: | |||
650 | return ERR_PTR(err); | 650 | return ERR_PTR(err); |
651 | } | 651 | } |
652 | 652 | ||
653 | struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd) | 653 | struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) |
654 | { | 654 | { |
655 | struct c4iw_dev *rhp; | 655 | struct c4iw_dev *rhp; |
656 | struct c4iw_pd *php; | 656 | struct c4iw_pd *php; |
@@ -659,6 +659,9 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd) | |||
659 | u32 stag = 0; | 659 | u32 stag = 0; |
660 | int ret; | 660 | int ret; |
661 | 661 | ||
662 | if (type != IB_MW_TYPE_1) | ||
663 | return ERR_PTR(-EINVAL); | ||
664 | |||
662 | php = to_c4iw_pd(pd); | 665 | php = to_c4iw_pd(pd); |
663 | rhp = php->rhp; | 666 | rhp = php->rhp; |
664 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | 667 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 05bfe53bff64..17ba4f8bc12d 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -1383,6 +1383,7 @@ err: | |||
1383 | qhp->ep = NULL; | 1383 | qhp->ep = NULL; |
1384 | set_state(qhp, C4IW_QP_STATE_ERROR); | 1384 | set_state(qhp, C4IW_QP_STATE_ERROR); |
1385 | free = 1; | 1385 | free = 1; |
1386 | abort = 1; | ||
1386 | wake_up(&qhp->wait); | 1387 | wake_up(&qhp->wait); |
1387 | BUG_ON(!ep); | 1388 | BUG_ON(!ep); |
1388 | flush_qp(qhp); | 1389 | flush_qp(qhp); |
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index 8f7f282ead65..22f79afa7fc1 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h | |||
@@ -95,7 +95,7 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); | |||
95 | 95 | ||
96 | int ehca_dereg_mr(struct ib_mr *mr); | 96 | int ehca_dereg_mr(struct ib_mr *mr); |
97 | 97 | ||
98 | struct ib_mw *ehca_alloc_mw(struct ib_pd *pd); | 98 | struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); |
99 | 99 | ||
100 | int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, | 100 | int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, |
101 | struct ib_mw_bind *mw_bind); | 101 | struct ib_mw_bind *mw_bind); |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 87844869dcc2..bcfb0c183620 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -688,7 +688,7 @@ dereg_mr_exit0: | |||
688 | 688 | ||
689 | /*----------------------------------------------------------------------*/ | 689 | /*----------------------------------------------------------------------*/ |
690 | 690 | ||
691 | struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) | 691 | struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) |
692 | { | 692 | { |
693 | struct ib_mw *ib_mw; | 693 | struct ib_mw *ib_mw; |
694 | u64 h_ret; | 694 | u64 h_ret; |
@@ -698,6 +698,9 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) | |||
698 | container_of(pd->device, struct ehca_shca, ib_device); | 698 | container_of(pd->device, struct ehca_shca, ib_device); |
699 | struct ehca_mw_hipzout_parms hipzout; | 699 | struct ehca_mw_hipzout_parms hipzout; |
700 | 700 | ||
701 | if (type != IB_MW_TYPE_1) | ||
702 | return ERR_PTR(-EINVAL); | ||
703 | |||
701 | e_mw = ehca_mw_new(); | 704 | e_mw = ehca_mw_new(); |
702 | if (!e_mw) { | 705 | if (!e_mw) { |
703 | ib_mw = ERR_PTR(-ENOMEM); | 706 | ib_mw = ERR_PTR(-ENOMEM); |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 0a903c129f0a..934792c477bc 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -1999,16 +1999,17 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) | |||
1999 | goto demux_err; | 1999 | goto demux_err; |
2000 | err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); | 2000 | err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); |
2001 | if (err) | 2001 | if (err) |
2002 | goto demux_err; | 2002 | goto free_pv; |
2003 | } | 2003 | } |
2004 | mlx4_ib_master_tunnels(dev, 1); | 2004 | mlx4_ib_master_tunnels(dev, 1); |
2005 | return 0; | 2005 | return 0; |
2006 | 2006 | ||
2007 | free_pv: | ||
2008 | free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); | ||
2007 | demux_err: | 2009 | demux_err: |
2008 | while (i > 0) { | 2010 | while (--i >= 0) { |
2009 | free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); | 2011 | free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); |
2010 | mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); | 2012 | mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); |
2011 | --i; | ||
2012 | } | 2013 | } |
2013 | mlx4_ib_device_unregister_sysfs(dev); | 2014 | mlx4_ib_device_unregister_sysfs(dev); |
2014 | 2015 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e7d81c0d1ac5..23d734349d8e 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -137,6 +137,14 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
137 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | 137 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; |
138 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) | 138 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) |
139 | props->device_cap_flags |= IB_DEVICE_XRC; | 139 | props->device_cap_flags |= IB_DEVICE_XRC; |
140 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW) | ||
141 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW; | ||
142 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { | ||
143 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B) | ||
144 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; | ||
145 | else | ||
146 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; | ||
147 | } | ||
140 | 148 | ||
141 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & | 149 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & |
142 | 0xffffff; | 150 | 0xffffff; |
@@ -1434,6 +1442,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1434 | ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; | 1442 | ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; |
1435 | } | 1443 | } |
1436 | 1444 | ||
1445 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || | ||
1446 | dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { | ||
1447 | ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw; | ||
1448 | ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw; | ||
1449 | ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw; | ||
1450 | |||
1451 | ibdev->ib_dev.uverbs_cmd_mask |= | ||
1452 | (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | | ||
1453 | (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); | ||
1454 | } | ||
1455 | |||
1437 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { | 1456 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { |
1438 | ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd; | 1457 | ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd; |
1439 | ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd; | 1458 | ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd; |
@@ -1601,8 +1620,7 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) | |||
1601 | spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); | 1620 | spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); |
1602 | } | 1621 | } |
1603 | out: | 1622 | out: |
1604 | if (dm) | 1623 | kfree(dm); |
1605 | kfree(dm); | ||
1606 | return; | 1624 | return; |
1607 | } | 1625 | } |
1608 | 1626 | ||
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index dcd845bc30f0..f61ec26500c4 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -116,6 +116,11 @@ struct mlx4_ib_mr { | |||
116 | struct ib_umem *umem; | 116 | struct ib_umem *umem; |
117 | }; | 117 | }; |
118 | 118 | ||
119 | struct mlx4_ib_mw { | ||
120 | struct ib_mw ibmw; | ||
121 | struct mlx4_mw mmw; | ||
122 | }; | ||
123 | |||
119 | struct mlx4_ib_fast_reg_page_list { | 124 | struct mlx4_ib_fast_reg_page_list { |
120 | struct ib_fast_reg_page_list ibfrpl; | 125 | struct ib_fast_reg_page_list ibfrpl; |
121 | __be64 *mapped_page_list; | 126 | __be64 *mapped_page_list; |
@@ -533,6 +538,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr) | |||
533 | return container_of(ibmr, struct mlx4_ib_mr, ibmr); | 538 | return container_of(ibmr, struct mlx4_ib_mr, ibmr); |
534 | } | 539 | } |
535 | 540 | ||
541 | static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw) | ||
542 | { | ||
543 | return container_of(ibmw, struct mlx4_ib_mw, ibmw); | ||
544 | } | ||
545 | |||
536 | static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) | 546 | static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) |
537 | { | 547 | { |
538 | return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl); | 548 | return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl); |
@@ -581,6 +591,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
581 | u64 virt_addr, int access_flags, | 591 | u64 virt_addr, int access_flags, |
582 | struct ib_udata *udata); | 592 | struct ib_udata *udata); |
583 | int mlx4_ib_dereg_mr(struct ib_mr *mr); | 593 | int mlx4_ib_dereg_mr(struct ib_mr *mr); |
594 | struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); | ||
595 | int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, | ||
596 | struct ib_mw_bind *mw_bind); | ||
597 | int mlx4_ib_dealloc_mw(struct ib_mw *mw); | ||
584 | struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, | 598 | struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, |
585 | int max_page_list_len); | 599 | int max_page_list_len); |
586 | struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, | 600 | struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, |
@@ -652,12 +666,12 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, | |||
652 | int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, | 666 | int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, |
653 | u8 *mac, int *is_mcast, u8 port); | 667 | u8 *mac, int *is_mcast, u8 port); |
654 | 668 | ||
655 | static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) | 669 | static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) |
656 | { | 670 | { |
657 | u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; | 671 | u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; |
658 | 672 | ||
659 | if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET) | 673 | if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET) |
660 | return 1; | 674 | return true; |
661 | 675 | ||
662 | return !!(ah->av.ib.g_slid & 0x80); | 676 | return !!(ah->av.ib.g_slid & 0x80); |
663 | } | 677 | } |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index bbaf6176f207..e471f089ff00 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -41,9 +41,19 @@ static u32 convert_access(int acc) | |||
41 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | | 41 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | |
42 | (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | | 42 | (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | |
43 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | | 43 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | |
44 | (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) | | ||
44 | MLX4_PERM_LOCAL_READ; | 45 | MLX4_PERM_LOCAL_READ; |
45 | } | 46 | } |
46 | 47 | ||
48 | static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type) | ||
49 | { | ||
50 | switch (type) { | ||
51 | case IB_MW_TYPE_1: return MLX4_MW_TYPE_1; | ||
52 | case IB_MW_TYPE_2: return MLX4_MW_TYPE_2; | ||
53 | default: return -1; | ||
54 | } | ||
55 | } | ||
56 | |||
47 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) | 57 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) |
48 | { | 58 | { |
49 | struct mlx4_ib_mr *mr; | 59 | struct mlx4_ib_mr *mr; |
@@ -68,7 +78,7 @@ struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) | |||
68 | return &mr->ibmr; | 78 | return &mr->ibmr; |
69 | 79 | ||
70 | err_mr: | 80 | err_mr: |
71 | mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); | 81 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); |
72 | 82 | ||
73 | err_free: | 83 | err_free: |
74 | kfree(mr); | 84 | kfree(mr); |
@@ -163,7 +173,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
163 | return &mr->ibmr; | 173 | return &mr->ibmr; |
164 | 174 | ||
165 | err_mr: | 175 | err_mr: |
166 | mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); | 176 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); |
167 | 177 | ||
168 | err_umem: | 178 | err_umem: |
169 | ib_umem_release(mr->umem); | 179 | ib_umem_release(mr->umem); |
@@ -177,8 +187,11 @@ err_free: | |||
177 | int mlx4_ib_dereg_mr(struct ib_mr *ibmr) | 187 | int mlx4_ib_dereg_mr(struct ib_mr *ibmr) |
178 | { | 188 | { |
179 | struct mlx4_ib_mr *mr = to_mmr(ibmr); | 189 | struct mlx4_ib_mr *mr = to_mmr(ibmr); |
190 | int ret; | ||
180 | 191 | ||
181 | mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); | 192 | ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); |
193 | if (ret) | ||
194 | return ret; | ||
182 | if (mr->umem) | 195 | if (mr->umem) |
183 | ib_umem_release(mr->umem); | 196 | ib_umem_release(mr->umem); |
184 | kfree(mr); | 197 | kfree(mr); |
@@ -186,6 +199,70 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr) | |||
186 | return 0; | 199 | return 0; |
187 | } | 200 | } |
188 | 201 | ||
202 | struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) | ||
203 | { | ||
204 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | ||
205 | struct mlx4_ib_mw *mw; | ||
206 | int err; | ||
207 | |||
208 | mw = kmalloc(sizeof(*mw), GFP_KERNEL); | ||
209 | if (!mw) | ||
210 | return ERR_PTR(-ENOMEM); | ||
211 | |||
212 | err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, | ||
213 | to_mlx4_type(type), &mw->mmw); | ||
214 | if (err) | ||
215 | goto err_free; | ||
216 | |||
217 | err = mlx4_mw_enable(dev->dev, &mw->mmw); | ||
218 | if (err) | ||
219 | goto err_mw; | ||
220 | |||
221 | mw->ibmw.rkey = mw->mmw.key; | ||
222 | |||
223 | return &mw->ibmw; | ||
224 | |||
225 | err_mw: | ||
226 | mlx4_mw_free(dev->dev, &mw->mmw); | ||
227 | |||
228 | err_free: | ||
229 | kfree(mw); | ||
230 | |||
231 | return ERR_PTR(err); | ||
232 | } | ||
233 | |||
234 | int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, | ||
235 | struct ib_mw_bind *mw_bind) | ||
236 | { | ||
237 | struct ib_send_wr wr; | ||
238 | struct ib_send_wr *bad_wr; | ||
239 | int ret; | ||
240 | |||
241 | memset(&wr, 0, sizeof(wr)); | ||
242 | wr.opcode = IB_WR_BIND_MW; | ||
243 | wr.wr_id = mw_bind->wr_id; | ||
244 | wr.send_flags = mw_bind->send_flags; | ||
245 | wr.wr.bind_mw.mw = mw; | ||
246 | wr.wr.bind_mw.bind_info = mw_bind->bind_info; | ||
247 | wr.wr.bind_mw.rkey = ib_inc_rkey(mw->rkey); | ||
248 | |||
249 | ret = mlx4_ib_post_send(qp, &wr, &bad_wr); | ||
250 | if (!ret) | ||
251 | mw->rkey = wr.wr.bind_mw.rkey; | ||
252 | |||
253 | return ret; | ||
254 | } | ||
255 | |||
256 | int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) | ||
257 | { | ||
258 | struct mlx4_ib_mw *mw = to_mmw(ibmw); | ||
259 | |||
260 | mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); | ||
261 | kfree(mw); | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | |||
189 | struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, | 266 | struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, |
190 | int max_page_list_len) | 267 | int max_page_list_len) |
191 | { | 268 | { |
@@ -212,7 +289,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, | |||
212 | return &mr->ibmr; | 289 | return &mr->ibmr; |
213 | 290 | ||
214 | err_mr: | 291 | err_mr: |
215 | mlx4_mr_free(dev->dev, &mr->mmr); | 292 | (void) mlx4_mr_free(dev->dev, &mr->mmr); |
216 | 293 | ||
217 | err_free: | 294 | err_free: |
218 | kfree(mr); | 295 | kfree(mr); |
@@ -291,7 +368,7 @@ struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, | |||
291 | return &fmr->ibfmr; | 368 | return &fmr->ibfmr; |
292 | 369 | ||
293 | err_mr: | 370 | err_mr: |
294 | mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); | 371 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); |
295 | 372 | ||
296 | err_free: | 373 | err_free: |
297 | kfree(fmr); | 374 | kfree(fmr); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 19e0637220b9..35cced2a4da8 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -104,6 +104,7 @@ static const __be32 mlx4_ib_opcode[] = { | |||
104 | [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), | 104 | [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), |
105 | [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), | 105 | [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), |
106 | [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), | 106 | [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), |
107 | [IB_WR_BIND_MW] = cpu_to_be32(MLX4_OPCODE_BIND_MW), | ||
107 | }; | 108 | }; |
108 | 109 | ||
109 | static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) | 110 | static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) |
@@ -1746,11 +1747,11 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
1746 | int header_size; | 1747 | int header_size; |
1747 | int spc; | 1748 | int spc; |
1748 | int i; | 1749 | int i; |
1749 | int is_eth; | ||
1750 | int is_vlan = 0; | ||
1751 | int is_grh; | ||
1752 | u16 vlan; | ||
1753 | int err = 0; | 1750 | int err = 0; |
1751 | u16 vlan = 0xffff; | ||
1752 | bool is_eth; | ||
1753 | bool is_vlan = false; | ||
1754 | bool is_grh; | ||
1754 | 1755 | ||
1755 | send_size = 0; | 1756 | send_size = 0; |
1756 | for (i = 0; i < wr->num_sge; ++i) | 1757 | for (i = 0; i < wr->num_sge; ++i) |
@@ -1953,9 +1954,12 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq | |||
1953 | 1954 | ||
1954 | static __be32 convert_access(int acc) | 1955 | static __be32 convert_access(int acc) |
1955 | { | 1956 | { |
1956 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC) : 0) | | 1957 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? |
1957 | (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) | | 1958 | cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) | |
1958 | (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ) : 0) | | 1959 | (acc & IB_ACCESS_REMOTE_WRITE ? |
1960 | cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) | | ||
1961 | (acc & IB_ACCESS_REMOTE_READ ? | ||
1962 | cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) | | ||
1959 | (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | | 1963 | (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | |
1960 | cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); | 1964 | cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); |
1961 | } | 1965 | } |
@@ -1981,12 +1985,28 @@ static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) | |||
1981 | fseg->reserved[1] = 0; | 1985 | fseg->reserved[1] = 0; |
1982 | } | 1986 | } |
1983 | 1987 | ||
1988 | static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr) | ||
1989 | { | ||
1990 | bseg->flags1 = | ||
1991 | convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) & | ||
1992 | cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ | | ||
1993 | MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE | | ||
1994 | MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC); | ||
1995 | bseg->flags2 = 0; | ||
1996 | if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2) | ||
1997 | bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2); | ||
1998 | if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED) | ||
1999 | bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED); | ||
2000 | bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey); | ||
2001 | bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey); | ||
2002 | bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr); | ||
2003 | bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length); | ||
2004 | } | ||
2005 | |||
1984 | static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) | 2006 | static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) |
1985 | { | 2007 | { |
1986 | iseg->flags = 0; | 2008 | memset(iseg, 0, sizeof(*iseg)); |
1987 | iseg->mem_key = cpu_to_be32(rkey); | 2009 | iseg->mem_key = cpu_to_be32(rkey); |
1988 | iseg->guest_id = 0; | ||
1989 | iseg->pa = 0; | ||
1990 | } | 2010 | } |
1991 | 2011 | ||
1992 | static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, | 2012 | static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, |
@@ -2291,6 +2311,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2291 | size += sizeof (struct mlx4_wqe_fmr_seg) / 16; | 2311 | size += sizeof (struct mlx4_wqe_fmr_seg) / 16; |
2292 | break; | 2312 | break; |
2293 | 2313 | ||
2314 | case IB_WR_BIND_MW: | ||
2315 | ctrl->srcrb_flags |= | ||
2316 | cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); | ||
2317 | set_bind_seg(wqe, wr); | ||
2318 | wqe += sizeof(struct mlx4_wqe_bind_seg); | ||
2319 | size += sizeof(struct mlx4_wqe_bind_seg) / 16; | ||
2320 | break; | ||
2294 | default: | 2321 | default: |
2295 | /* No extra segments required for sends */ | 2322 | /* No extra segments required for sends */ |
2296 | break; | 2323 | break; |
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index 5b2a01dfb907..97516eb363b7 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c | |||
@@ -732,7 +732,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev) | |||
732 | dev->ports_parent = | 732 | dev->ports_parent = |
733 | kobject_create_and_add("ports", | 733 | kobject_create_and_add("ports", |
734 | kobject_get(dev->iov_parent)); | 734 | kobject_get(dev->iov_parent)); |
735 | if (!dev->iov_parent) { | 735 | if (!dev->ports_parent) { |
736 | ret = -ENOMEM; | 736 | ret = -ENOMEM; |
737 | goto err_ports; | 737 | goto err_ports; |
738 | } | 738 | } |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 07e4fbad987a..8f67fe2e91e6 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -55,7 +55,8 @@ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); | |||
55 | /** | 55 | /** |
56 | * nes_alloc_mw | 56 | * nes_alloc_mw |
57 | */ | 57 | */ |
58 | static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) { | 58 | static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type) |
59 | { | ||
59 | struct nes_pd *nespd = to_nespd(ibpd); | 60 | struct nes_pd *nespd = to_nespd(ibpd); |
60 | struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); | 61 | struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); |
61 | struct nes_device *nesdev = nesvnic->nesdev; | 62 | struct nes_device *nesdev = nesvnic->nesdev; |
@@ -71,6 +72,9 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) { | |||
71 | u32 driver_key = 0; | 72 | u32 driver_key = 0; |
72 | u8 stag_key = 0; | 73 | u8 stag_key = 0; |
73 | 74 | ||
75 | if (type != IB_MW_TYPE_1) | ||
76 | return ERR_PTR(-EINVAL); | ||
77 | |||
74 | get_random_bytes(&next_stag_index, sizeof(next_stag_index)); | 78 | get_random_bytes(&next_stag_index, sizeof(next_stag_index)); |
75 | stag_key = (u8)next_stag_index; | 79 | stag_key = (u8)next_stag_index; |
76 | 80 | ||
@@ -244,20 +248,19 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, | |||
244 | if (ibmw_bind->send_flags & IB_SEND_SIGNALED) | 248 | if (ibmw_bind->send_flags & IB_SEND_SIGNALED) |
245 | wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL; | 249 | wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL; |
246 | 250 | ||
247 | if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_WRITE) { | 251 | if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_WRITE) |
248 | wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE; | 252 | wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE; |
249 | } | 253 | if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_READ) |
250 | if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_READ) { | ||
251 | wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ; | 254 | wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ; |
252 | } | ||
253 | 255 | ||
254 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc); | 256 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc); |
255 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, ibmw_bind->mr->lkey); | 257 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, |
258 | ibmw_bind->bind_info.mr->lkey); | ||
256 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey); | 259 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey); |
257 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX, | 260 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX, |
258 | ibmw_bind->length); | 261 | ibmw_bind->bind_info.length); |
259 | wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0; | 262 | wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0; |
260 | u64temp = (u64)ibmw_bind->addr; | 263 | u64temp = (u64)ibmw_bind->bind_info.addr; |
261 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp); | 264 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp); |
262 | 265 | ||
263 | head++; | 266 | head++; |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 35275099cafd..a6a2cc2ba260 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -268,8 +268,9 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
268 | qpp = &q->next) | 268 | qpp = &q->next) |
269 | if (q == qp) { | 269 | if (q == qp) { |
270 | atomic_dec(&qp->refcount); | 270 | atomic_dec(&qp->refcount); |
271 | *qpp = qp->next; | 271 | rcu_assign_pointer(*qpp, |
272 | rcu_assign_pointer(qp->next, NULL); | 272 | rcu_dereference_protected(qp->next, |
273 | lockdep_is_held(&dev->qpt_lock))); | ||
273 | break; | 274 | break; |
274 | } | 275 | } |
275 | } | 276 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 07ca6fd5546b..eb71aaa26a9a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -117,6 +117,8 @@ enum { | |||
117 | #define IPOIB_OP_CM (0) | 117 | #define IPOIB_OP_CM (0) |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | #define IPOIB_QPN_MASK ((__force u32) cpu_to_be32(0xFFFFFF)) | ||
121 | |||
120 | /* structs */ | 122 | /* structs */ |
121 | 123 | ||
122 | struct ipoib_header { | 124 | struct ipoib_header { |
@@ -760,4 +762,6 @@ extern int ipoib_debug_level; | |||
760 | 762 | ||
761 | #define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff) | 763 | #define IPOIB_QPN(ha) (be32_to_cpup((__be32 *) ha) & 0xffffff) |
762 | 764 | ||
765 | extern const char ipoib_driver_version[]; | ||
766 | |||
763 | #endif /* _IPOIB_H */ | 767 | #endif /* _IPOIB_H */ |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index ca131335417b..c4b3940845e6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | |||
@@ -39,7 +39,24 @@ | |||
39 | static void ipoib_get_drvinfo(struct net_device *netdev, | 39 | static void ipoib_get_drvinfo(struct net_device *netdev, |
40 | struct ethtool_drvinfo *drvinfo) | 40 | struct ethtool_drvinfo *drvinfo) |
41 | { | 41 | { |
42 | strlcpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver)); | 42 | struct ipoib_dev_priv *priv = netdev_priv(netdev); |
43 | struct ib_device_attr *attr; | ||
44 | |||
45 | attr = kmalloc(sizeof(*attr), GFP_KERNEL); | ||
46 | if (attr && !ib_query_device(priv->ca, attr)) | ||
47 | snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), | ||
48 | "%d.%d.%d", (int)(attr->fw_ver >> 32), | ||
49 | (int)(attr->fw_ver >> 16) & 0xffff, | ||
50 | (int)attr->fw_ver & 0xffff); | ||
51 | kfree(attr); | ||
52 | |||
53 | strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device), | ||
54 | sizeof(drvinfo->bus_info)); | ||
55 | |||
56 | strlcpy(drvinfo->version, ipoib_driver_version, | ||
57 | sizeof(drvinfo->version)); | ||
58 | |||
59 | strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver)); | ||
43 | } | 60 | } |
44 | 61 | ||
45 | static int ipoib_get_coalesce(struct net_device *dev, | 62 | static int ipoib_get_coalesce(struct net_device *dev, |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 6fdc9e78da0d..8534afd04e7c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -49,9 +49,14 @@ | |||
49 | #include <linux/jhash.h> | 49 | #include <linux/jhash.h> |
50 | #include <net/arp.h> | 50 | #include <net/arp.h> |
51 | 51 | ||
52 | #define DRV_VERSION "1.0.0" | ||
53 | |||
54 | const char ipoib_driver_version[] = DRV_VERSION; | ||
55 | |||
52 | MODULE_AUTHOR("Roland Dreier"); | 56 | MODULE_AUTHOR("Roland Dreier"); |
53 | MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); | 57 | MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); |
54 | MODULE_LICENSE("Dual BSD/GPL"); | 58 | MODULE_LICENSE("Dual BSD/GPL"); |
59 | MODULE_VERSION(DRV_VERSION); | ||
55 | 60 | ||
56 | int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; | 61 | int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; |
57 | int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; | 62 | int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; |
@@ -505,6 +510,9 @@ static void path_rec_completion(int status, | |||
505 | 510 | ||
506 | spin_unlock_irqrestore(&priv->lock, flags); | 511 | spin_unlock_irqrestore(&priv->lock, flags); |
507 | 512 | ||
513 | if (IS_ERR_OR_NULL(ah)) | ||
514 | ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); | ||
515 | |||
508 | if (old_ah) | 516 | if (old_ah) |
509 | ipoib_put_ah(old_ah); | 517 | ipoib_put_ah(old_ah); |
510 | 518 | ||
@@ -844,10 +852,10 @@ static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) | |||
844 | * different subnets. | 852 | * different subnets. |
845 | */ | 853 | */ |
846 | /* qpn octets[1:4) & port GUID octets[12:20) */ | 854 | /* qpn octets[1:4) & port GUID octets[12:20) */ |
847 | u32 *daddr_32 = (u32 *) daddr; | 855 | u32 *d32 = (u32 *) daddr; |
848 | u32 hv; | 856 | u32 hv; |
849 | 857 | ||
850 | hv = jhash_3words(daddr_32[3], daddr_32[4], 0xFFFFFF & daddr_32[0], 0); | 858 | hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); |
851 | return hv & htbl->mask; | 859 | return hv & htbl->mask; |
852 | } | 860 | } |
853 | 861 | ||
@@ -1688,6 +1696,8 @@ static void ipoib_remove_one(struct ib_device *device) | |||
1688 | return; | 1696 | return; |
1689 | 1697 | ||
1690 | dev_list = ib_get_client_data(device, &ipoib_client); | 1698 | dev_list = ib_get_client_data(device, &ipoib_client); |
1699 | if (!dev_list) | ||
1700 | return; | ||
1691 | 1701 | ||
1692 | list_for_each_entry_safe(priv, tmp, dev_list, list) { | 1702 | list_for_each_entry_safe(priv, tmp, dev_list, list) { |
1693 | ib_unregister_event_handler(&priv->event_handler); | 1703 | ib_unregister_event_handler(&priv->event_handler); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index ef7d3be46c31..5babdb35bda7 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -94,7 +94,7 @@ | |||
94 | 94 | ||
95 | /* support up to 512KB in one RDMA */ | 95 | /* support up to 512KB in one RDMA */ |
96 | #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) | 96 | #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) |
97 | #define ISER_DEF_CMD_PER_LUN 128 | 97 | #define ISER_DEF_CMD_PER_LUN ISCSI_DEF_XMIT_CMDS_MAX |
98 | 98 | ||
99 | /* QP settings */ | 99 | /* QP settings */ |
100 | /* Maximal bounds on received asynchronous PDUs */ | 100 | /* Maximal bounds on received asynchronous PDUs */ |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2033a928d34d..be1edb04b085 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -369,10 +369,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, | |||
369 | regd_buf = &iser_task->rdma_regd[cmd_dir]; | 369 | regd_buf = &iser_task->rdma_regd[cmd_dir]; |
370 | 370 | ||
371 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); | 371 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
372 | if (aligned_len != mem->dma_nents) { | 372 | if (aligned_len != mem->dma_nents || |
373 | (!ib_conn->fmr_pool && mem->dma_nents > 1)) { | ||
373 | iscsi_conn->fmr_unalign_cnt++; | 374 | iscsi_conn->fmr_unalign_cnt++; |
374 | iser_warn("rdma alignment violation %d/%d aligned\n", | 375 | iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", |
375 | aligned_len, mem->size); | 376 | aligned_len, mem->size); |
376 | iser_data_buf_dump(mem, ibdev); | 377 | iser_data_buf_dump(mem, ibdev); |
377 | 378 | ||
378 | /* unmap the command data before accessing it */ | 379 | /* unmap the command data before accessing it */ |
@@ -404,7 +405,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, | |||
404 | } else { /* use FMR for multiple dma entries */ | 405 | } else { /* use FMR for multiple dma entries */ |
405 | iser_page_vec_build(mem, ib_conn->page_vec, ibdev); | 406 | iser_page_vec_build(mem, ib_conn->page_vec, ibdev); |
406 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); | 407 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); |
407 | if (err) { | 408 | if (err && err != -EAGAIN) { |
408 | iser_data_buf_dump(mem, ibdev); | 409 | iser_data_buf_dump(mem, ibdev); |
409 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", | 410 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", |
410 | mem->dma_nents, | 411 | mem->dma_nents, |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 95a49affee44..4debadc53106 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -242,10 +242,14 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) | |||
242 | IB_ACCESS_REMOTE_READ); | 242 | IB_ACCESS_REMOTE_READ); |
243 | 243 | ||
244 | ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); | 244 | ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); |
245 | if (IS_ERR(ib_conn->fmr_pool)) { | 245 | ret = PTR_ERR(ib_conn->fmr_pool); |
246 | ret = PTR_ERR(ib_conn->fmr_pool); | 246 | if (IS_ERR(ib_conn->fmr_pool) && ret != -ENOSYS) { |
247 | ib_conn->fmr_pool = NULL; | 247 | ib_conn->fmr_pool = NULL; |
248 | goto out_err; | 248 | goto out_err; |
249 | } else if (ret == -ENOSYS) { | ||
250 | ib_conn->fmr_pool = NULL; | ||
251 | iser_warn("FMRs are not supported, using unaligned mode\n"); | ||
252 | ret = 0; | ||
249 | } | 253 | } |
250 | 254 | ||
251 | memset(&init_attr, 0, sizeof init_attr); | 255 | memset(&init_attr, 0, sizeof init_attr); |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index d5088ce78290..7ccf3284dda3 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -700,23 +700,24 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
700 | struct Scsi_Host *shost = target->scsi_host; | 700 | struct Scsi_Host *shost = target->scsi_host; |
701 | int i, ret; | 701 | int i, ret; |
702 | 702 | ||
703 | if (target->state != SRP_TARGET_LIVE) | ||
704 | return -EAGAIN; | ||
705 | |||
706 | scsi_target_block(&shost->shost_gendev); | 703 | scsi_target_block(&shost->shost_gendev); |
707 | 704 | ||
708 | srp_disconnect_target(target); | 705 | srp_disconnect_target(target); |
709 | /* | 706 | /* |
710 | * Now get a new local CM ID so that we avoid confusing the | 707 | * Now get a new local CM ID so that we avoid confusing the target in |
711 | * target in case things are really fouled up. | 708 | * case things are really fouled up. Doing so also ensures that all CM |
709 | * callbacks will have finished before a new QP is allocated. | ||
712 | */ | 710 | */ |
713 | ret = srp_new_cm_id(target); | 711 | ret = srp_new_cm_id(target); |
714 | if (ret) | 712 | /* |
715 | goto unblock; | 713 | * Whether or not creating a new CM ID succeeded, create a new |
716 | 714 | * QP. This guarantees that all completion callback function | |
717 | ret = srp_create_target_ib(target); | 715 | * invocations have finished before request resetting starts. |
718 | if (ret) | 716 | */ |
719 | goto unblock; | 717 | if (ret == 0) |
718 | ret = srp_create_target_ib(target); | ||
719 | else | ||
720 | srp_create_target_ib(target); | ||
720 | 721 | ||
721 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { | 722 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
722 | struct srp_request *req = &target->req_ring[i]; | 723 | struct srp_request *req = &target->req_ring[i]; |
@@ -728,11 +729,12 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
728 | for (i = 0; i < SRP_SQ_SIZE; ++i) | 729 | for (i = 0; i < SRP_SQ_SIZE; ++i) |
729 | list_add(&target->tx_ring[i]->list, &target->free_tx); | 730 | list_add(&target->tx_ring[i]->list, &target->free_tx); |
730 | 731 | ||
731 | ret = srp_connect_target(target); | 732 | if (ret == 0) |
733 | ret = srp_connect_target(target); | ||
732 | 734 | ||
733 | unblock: | ||
734 | scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING : | 735 | scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING : |
735 | SDEV_TRANSPORT_OFFLINE); | 736 | SDEV_TRANSPORT_OFFLINE); |
737 | target->transport_offline = !!ret; | ||
736 | 738 | ||
737 | if (ret) | 739 | if (ret) |
738 | goto err; | 740 | goto err; |
@@ -1352,6 +1354,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1352 | unsigned long flags; | 1354 | unsigned long flags; |
1353 | int len; | 1355 | int len; |
1354 | 1356 | ||
1357 | if (unlikely(target->transport_offline)) { | ||
1358 | scmnd->result = DID_NO_CONNECT << 16; | ||
1359 | scmnd->scsi_done(scmnd); | ||
1360 | return 0; | ||
1361 | } | ||
1362 | |||
1355 | spin_lock_irqsave(&target->lock, flags); | 1363 | spin_lock_irqsave(&target->lock, flags); |
1356 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); | 1364 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); |
1357 | if (!iu) | 1365 | if (!iu) |
@@ -1695,6 +1703,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, | |||
1695 | struct srp_iu *iu; | 1703 | struct srp_iu *iu; |
1696 | struct srp_tsk_mgmt *tsk_mgmt; | 1704 | struct srp_tsk_mgmt *tsk_mgmt; |
1697 | 1705 | ||
1706 | if (!target->connected || target->qp_in_error) | ||
1707 | return -1; | ||
1708 | |||
1698 | init_completion(&target->tsk_mgmt_done); | 1709 | init_completion(&target->tsk_mgmt_done); |
1699 | 1710 | ||
1700 | spin_lock_irq(&target->lock); | 1711 | spin_lock_irq(&target->lock); |
@@ -1736,7 +1747,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) | |||
1736 | 1747 | ||
1737 | shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); | 1748 | shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); |
1738 | 1749 | ||
1739 | if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd)) | 1750 | if (!req || !srp_claim_req(target, req, scmnd)) |
1740 | return FAILED; | 1751 | return FAILED; |
1741 | srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, | 1752 | srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, |
1742 | SRP_TSK_ABORT_TASK); | 1753 | SRP_TSK_ABORT_TASK); |
@@ -1754,8 +1765,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
1754 | 1765 | ||
1755 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); | 1766 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); |
1756 | 1767 | ||
1757 | if (target->qp_in_error) | ||
1758 | return FAILED; | ||
1759 | if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, | 1768 | if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, |
1760 | SRP_TSK_LUN_RESET)) | 1769 | SRP_TSK_LUN_RESET)) |
1761 | return FAILED; | 1770 | return FAILED; |
@@ -1972,7 +1981,6 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) | |||
1972 | spin_unlock(&host->target_lock); | 1981 | spin_unlock(&host->target_lock); |
1973 | 1982 | ||
1974 | target->state = SRP_TARGET_LIVE; | 1983 | target->state = SRP_TARGET_LIVE; |
1975 | target->connected = false; | ||
1976 | 1984 | ||
1977 | scsi_scan_target(&target->scsi_host->shost_gendev, | 1985 | scsi_scan_target(&target->scsi_host->shost_gendev, |
1978 | 0, target->scsi_id, SCAN_WILD_CARD, 0); | 1986 | 0, target->scsi_id, SCAN_WILD_CARD, 0); |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index de2d0b3c0bfe..66fbedda4571 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -140,6 +140,7 @@ struct srp_target_port { | |||
140 | unsigned int cmd_sg_cnt; | 140 | unsigned int cmd_sg_cnt; |
141 | unsigned int indirect_size; | 141 | unsigned int indirect_size; |
142 | bool allow_ext_sg; | 142 | bool allow_ext_sg; |
143 | bool transport_offline; | ||
143 | 144 | ||
144 | /* Everything above this point is used in the hot path of | 145 | /* Everything above this point is used in the hot path of |
145 | * command processing. Try to keep them packed into cachelines. | 146 | * command processing. Try to keep them packed into cachelines. |