aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/umem.c19
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c43
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c19
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c24
-rw-r--r--include/rdma/ib_umem.h1
12 files changed, 85 insertions, 62 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index a3a2e9c1639b..df0c4f605a21 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -105,6 +105,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
105 umem->length = size; 105 umem->length = size;
106 umem->offset = addr & ~PAGE_MASK; 106 umem->offset = addr & ~PAGE_MASK;
107 umem->page_size = PAGE_SIZE; 107 umem->page_size = PAGE_SIZE;
108 umem->pid = get_task_pid(current, PIDTYPE_PID);
108 /* 109 /*
109 * We ask for writable memory if any access flags other than 110 * We ask for writable memory if any access flags other than
110 * "remote read" are set. "Local write" and "remote write" 111 * "remote read" are set. "Local write" and "remote write"
@@ -198,6 +199,7 @@ out:
198 if (ret < 0) { 199 if (ret < 0) {
199 if (need_release) 200 if (need_release)
200 __ib_umem_release(context->device, umem, 0); 201 __ib_umem_release(context->device, umem, 0);
202 put_pid(umem->pid);
201 kfree(umem); 203 kfree(umem);
202 } else 204 } else
203 current->mm->pinned_vm = locked; 205 current->mm->pinned_vm = locked;
@@ -230,15 +232,19 @@ void ib_umem_release(struct ib_umem *umem)
230{ 232{
231 struct ib_ucontext *context = umem->context; 233 struct ib_ucontext *context = umem->context;
232 struct mm_struct *mm; 234 struct mm_struct *mm;
235 struct task_struct *task;
233 unsigned long diff; 236 unsigned long diff;
234 237
235 __ib_umem_release(umem->context->device, umem, 1); 238 __ib_umem_release(umem->context->device, umem, 1);
236 239
237 mm = get_task_mm(current); 240 task = get_pid_task(umem->pid, PIDTYPE_PID);
238 if (!mm) { 241 put_pid(umem->pid);
239 kfree(umem); 242 if (!task)
240 return; 243 goto out;
241 } 244 mm = get_task_mm(task);
245 put_task_struct(task);
246 if (!mm)
247 goto out;
242 248
243 diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; 249 diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
244 250
@@ -262,9 +268,10 @@ void ib_umem_release(struct ib_umem *umem)
262 } else 268 } else
263 down_write(&mm->mmap_sem); 269 down_write(&mm->mmap_sem);
264 270
265 current->mm->pinned_vm -= diff; 271 mm->pinned_vm -= diff;
266 up_write(&mm->mmap_sem); 272 up_write(&mm->mmap_sem);
267 mmput(mm); 273 mmput(mm);
274out:
268 kfree(umem); 275 kfree(umem);
269} 276}
270EXPORT_SYMBOL(ib_umem_release); 277EXPORT_SYMBOL(ib_umem_release);
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index dc66c4506916..1da1252dcdb3 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
54 54
55/* call with current->mm->mmap_sem held */ 55/* call with current->mm->mmap_sem held */
56static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, 56static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
57 struct page **p, struct vm_area_struct **vma) 57 struct page **p)
58{ 58{
59 unsigned long lock_limit; 59 unsigned long lock_limit;
60 size_t got; 60 size_t got;
@@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
74 ret = get_user_pages(current, current->mm, 74 ret = get_user_pages(current, current->mm,
75 start_page + got * PAGE_SIZE, 75 start_page + got * PAGE_SIZE,
76 num_pages - got, 1, 1, 76 num_pages - got, 1, 1,
77 p + got, vma); 77 p + got, NULL);
78 if (ret < 0) 78 if (ret < 0)
79 goto bail_release; 79 goto bail_release;
80 } 80 }
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
165 165
166 down_write(&current->mm->mmap_sem); 166 down_write(&current->mm->mmap_sem);
167 167
168 ret = __ipath_get_user_pages(start_page, num_pages, p, NULL); 168 ret = __ipath_get_user_pages(start_page, num_pages, p);
169 169
170 up_write(&current->mm->mmap_sem); 170 up_write(&current->mm->mmap_sem);
171 171
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 40f8536c10b0..ac02ce4e8040 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -38,7 +38,7 @@
38#define OCRDMA_VID_PCP_SHIFT 0xD 38#define OCRDMA_VID_PCP_SHIFT 0xD
39 39
40static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, 40static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
41 struct ib_ah_attr *attr, int pdid) 41 struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
42{ 42{
43 int status = 0; 43 int status = 0;
44 u16 vlan_tag; bool vlan_enabled = false; 44 u16 vlan_tag; bool vlan_enabled = false;
@@ -49,8 +49,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
49 memset(&eth, 0, sizeof(eth)); 49 memset(&eth, 0, sizeof(eth));
50 memset(&grh, 0, sizeof(grh)); 50 memset(&grh, 0, sizeof(grh));
51 51
52 ah->sgid_index = attr->grh.sgid_index; 52 /* VLAN */
53
54 vlan_tag = attr->vlan_id; 53 vlan_tag = attr->vlan_id;
55 if (!vlan_tag || (vlan_tag > 0xFFF)) 54 if (!vlan_tag || (vlan_tag > 0xFFF))
56 vlan_tag = dev->pvid; 55 vlan_tag = dev->pvid;
@@ -65,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
65 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 64 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
66 eth_sz = sizeof(struct ocrdma_eth_basic); 65 eth_sz = sizeof(struct ocrdma_eth_basic);
67 } 66 }
67 /* MAC */
68 memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); 68 memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
69 memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
70 status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]); 69 status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
71 if (status) 70 if (status)
72 return status; 71 return status;
73 status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, 72 ah->sgid_index = attr->grh.sgid_index;
74 (union ib_gid *)&grh.sgid[0]); 73 memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
75 if (status) 74 memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
76 return status;
77 75
78 grh.tclass_flow = cpu_to_be32((6 << 28) | 76 grh.tclass_flow = cpu_to_be32((6 << 28) |
79 (attr->grh.traffic_class << 24) | 77 (attr->grh.traffic_class << 24) |
@@ -81,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
81 /* 0x1b is next header value in GRH */ 79 /* 0x1b is next header value in GRH */
82 grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | 80 grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
83 (0x1b << 8) | attr->grh.hop_limit); 81 (0x1b << 8) | attr->grh.hop_limit);
84 82 /* Eth HDR */
85 memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
86 memcpy(&ah->av->eth_hdr, &eth, eth_sz); 83 memcpy(&ah->av->eth_hdr, &eth, eth_sz);
87 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); 84 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
88 if (vlan_enabled) 85 if (vlan_enabled)
@@ -98,6 +95,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
98 struct ocrdma_ah *ah; 95 struct ocrdma_ah *ah;
99 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); 96 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
100 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); 97 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
98 union ib_gid sgid;
99 u8 zmac[ETH_ALEN];
101 100
102 if (!(attr->ah_flags & IB_AH_GRH)) 101 if (!(attr->ah_flags & IB_AH_GRH))
103 return ERR_PTR(-EINVAL); 102 return ERR_PTR(-EINVAL);
@@ -111,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
111 status = ocrdma_alloc_av(dev, ah); 110 status = ocrdma_alloc_av(dev, ah);
112 if (status) 111 if (status)
113 goto av_err; 112 goto av_err;
114 status = set_av_attr(dev, ah, attr, pd->id); 113
114 status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid);
115 if (status) {
116 pr_err("%s(): Failed to query sgid, status = %d\n",
117 __func__, status);
118 goto av_conf_err;
119 }
120
121 memset(&zmac, 0, ETH_ALEN);
122 if (pd->uctx &&
123 memcmp(attr->dmac, &zmac, ETH_ALEN)) {
124 status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
125 attr->dmac, &attr->vlan_id);
126 if (status) {
127 pr_err("%s(): Failed to resolve dmac from gid."
128 "status = %d\n", __func__, status);
129 goto av_conf_err;
130 }
131 }
132
133 status = set_av_attr(dev, ah, attr, &sgid, pd->id);
115 if (status) 134 if (status)
116 goto av_conf_err; 135 goto av_conf_err;
117 136
@@ -145,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
145 struct ocrdma_av *av = ah->av; 164 struct ocrdma_av *av = ah->av;
146 struct ocrdma_grh *grh; 165 struct ocrdma_grh *grh;
147 attr->ah_flags |= IB_AH_GRH; 166 attr->ah_flags |= IB_AH_GRH;
148 if (ah->av->valid & Bit(1)) { 167 if (ah->av->valid & OCRDMA_AV_VALID) {
149 grh = (struct ocrdma_grh *)((u8 *)ah->av + 168 grh = (struct ocrdma_grh *)((u8 *)ah->av +
150 sizeof(struct ocrdma_eth_vlan)); 169 sizeof(struct ocrdma_eth_vlan));
151 attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13; 170 attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index acb434d16903..8f5f2577f288 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
101 attr->max_srq_sge = dev->attr.max_srq_sge; 101 attr->max_srq_sge = dev->attr.max_srq_sge;
102 attr->max_srq_wr = dev->attr.max_rqe; 102 attr->max_srq_wr = dev->attr.max_rqe;
103 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; 103 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
104 attr->max_fast_reg_page_list_len = 0; 104 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
105 attr->max_pkeys = 1; 105 attr->max_pkeys = 1;
106 return 0; 106 return 0;
107} 107}
@@ -2846,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2846 if (cq->first_arm) { 2846 if (cq->first_arm) {
2847 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); 2847 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2848 cq->first_arm = false; 2848 cq->first_arm = false;
2849 goto skip_defer;
2850 } 2849 }
2851 cq->deferred_arm = true;
2852 2850
2853skip_defer: 2851 cq->deferred_arm = true;
2854 cq->deferred_sol = sol_needed; 2852 cq->deferred_sol = sol_needed;
2855 spin_unlock_irqrestore(&cq->cq_lock, flags); 2853 spin_unlock_irqrestore(&cq->cq_lock, flags);
2856 2854
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 799a0c3bffc4..6abd3ed3cd51 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
193 struct qib_qp_iter *iter; 193 struct qib_qp_iter *iter;
194 loff_t n = *pos; 194 loff_t n = *pos;
195 195
196 rcu_read_lock();
196 iter = qib_qp_iter_init(s->private); 197 iter = qib_qp_iter_init(s->private);
197 if (!iter) 198 if (!iter)
198 return NULL; 199 return NULL;
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
224 225
225static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) 226static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
226{ 227{
227 /* nothing for now */ 228 rcu_read_unlock();
228} 229}
229 230
230static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) 231static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 7fcc150d603c..6ddc0264aad2 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
1325 struct qib_qp *pqp = iter->qp; 1325 struct qib_qp *pqp = iter->qp;
1326 struct qib_qp *qp; 1326 struct qib_qp *qp;
1327 1327
1328 rcu_read_lock();
1329 for (; n < dev->qp_table_size; n++) { 1328 for (; n < dev->qp_table_size; n++) {
1330 if (pqp) 1329 if (pqp)
1331 qp = rcu_dereference(pqp->next); 1330 qp = rcu_dereference(pqp->next);
@@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
1333 qp = rcu_dereference(dev->qp_table[n]); 1332 qp = rcu_dereference(dev->qp_table[n]);
1334 pqp = qp; 1333 pqp = qp;
1335 if (qp) { 1334 if (qp) {
1336 if (iter->qp)
1337 atomic_dec(&iter->qp->refcount);
1338 atomic_inc(&qp->refcount);
1339 rcu_read_unlock();
1340 iter->qp = qp; 1335 iter->qp = qp;
1341 iter->n = n; 1336 iter->n = n;
1342 return 0; 1337 return 0;
1343 } 1338 }
1344 } 1339 }
1345 rcu_read_unlock();
1346 if (iter->qp)
1347 atomic_dec(&iter->qp->refcount);
1348 return ret; 1340 return ret;
1349} 1341}
1350 1342
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2bc1d2b96298..74f90b2619f6 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
52 * Call with current->mm->mmap_sem held. 52 * Call with current->mm->mmap_sem held.
53 */ 53 */
54static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, 54static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
55 struct page **p, struct vm_area_struct **vma) 55 struct page **p)
56{ 56{
57 unsigned long lock_limit; 57 unsigned long lock_limit;
58 size_t got; 58 size_t got;
@@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
69 ret = get_user_pages(current, current->mm, 69 ret = get_user_pages(current, current->mm,
70 start_page + got * PAGE_SIZE, 70 start_page + got * PAGE_SIZE,
71 num_pages - got, 1, 1, 71 num_pages - got, 1, 1,
72 p + got, vma); 72 p + got, NULL);
73 if (ret < 0) 73 if (ret < 0)
74 goto bail_release; 74 goto bail_release;
75 } 75 }
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
136 136
137 down_write(&current->mm->mmap_sem); 137 down_write(&current->mm->mmap_sem);
138 138
139 ret = __qib_get_user_pages(start_page, num_pages, p, NULL); 139 ret = __qib_get_user_pages(start_page, num_pages, p);
140 140
141 up_write(&current->mm->mmap_sem); 141 up_write(&current->mm->mmap_sem);
142 142
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d4e005720d01..ffb83b5f7e80 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -529,21 +529,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
529 port_attr.state); 529 port_attr.state);
530 return; 530 return;
531 } 531 }
532 priv->local_lid = port_attr.lid;
532 533
533 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) 534 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
534 ipoib_warn(priv, "ib_query_gid() failed\n"); 535 ipoib_warn(priv, "ib_query_gid() failed\n");
535 else 536 else
536 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 537 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
537 538
538 {
539 struct ib_port_attr attr;
540
541 if (!ib_query_port(priv->ca, priv->port, &attr))
542 priv->local_lid = attr.lid;
543 else
544 ipoib_warn(priv, "ib_query_port failed\n");
545 }
546
547 if (!priv->broadcast) { 539 if (!priv->broadcast) {
548 struct ipoib_mcast *broadcast; 540 struct ipoib_mcast *broadcast;
549 541
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 61ee91d88380..93ce62fe1594 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -344,7 +344,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
344 int is_leading) 344 int is_leading)
345{ 345{
346 struct iscsi_conn *conn = cls_conn->dd_data; 346 struct iscsi_conn *conn = cls_conn->dd_data;
347 struct iscsi_session *session;
348 struct iser_conn *ib_conn; 347 struct iser_conn *ib_conn;
349 struct iscsi_endpoint *ep; 348 struct iscsi_endpoint *ep;
350 int error; 349 int error;
@@ -363,9 +362,17 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
363 } 362 }
364 ib_conn = ep->dd_data; 363 ib_conn = ep->dd_data;
365 364
366 session = conn->session; 365 mutex_lock(&ib_conn->state_mutex);
367 if (iser_alloc_rx_descriptors(ib_conn, session)) 366 if (ib_conn->state != ISER_CONN_UP) {
368 return -ENOMEM; 367 error = -EINVAL;
368 iser_err("iser_conn %p state is %d, teardown started\n",
369 ib_conn, ib_conn->state);
370 goto out;
371 }
372
373 error = iser_alloc_rx_descriptors(ib_conn, conn->session);
374 if (error)
375 goto out;
369 376
370 /* binds the iSER connection retrieved from the previously 377 /* binds the iSER connection retrieved from the previously
371 * connected ep_handle to the iSCSI layer connection. exchanges 378 * connected ep_handle to the iSCSI layer connection. exchanges
@@ -375,7 +382,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
375 conn->dd_data = ib_conn; 382 conn->dd_data = ib_conn;
376 ib_conn->iscsi_conn = conn; 383 ib_conn->iscsi_conn = conn;
377 384
378 return 0; 385out:
386 mutex_unlock(&ib_conn->state_mutex);
387 return error;
379} 388}
380 389
381static int 390static int
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index c877dad381cb..9f0e0e34d6ca 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -69,7 +69,7 @@
69 69
70#define DRV_NAME "iser" 70#define DRV_NAME "iser"
71#define PFX DRV_NAME ": " 71#define PFX DRV_NAME ": "
72#define DRV_VER "1.4" 72#define DRV_VER "1.4.1"
73 73
74#define iser_dbg(fmt, arg...) \ 74#define iser_dbg(fmt, arg...) \
75 do { \ 75 do { \
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 3ef167f97d6f..3bfec4bbda52 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -73,7 +73,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
73{ 73{
74 struct iser_cq_desc *cq_desc; 74 struct iser_cq_desc *cq_desc;
75 struct ib_device_attr *dev_attr = &device->dev_attr; 75 struct ib_device_attr *dev_attr = &device->dev_attr;
76 int ret, i, j; 76 int ret, i;
77 77
78 ret = ib_query_device(device->ib_device, dev_attr); 78 ret = ib_query_device(device->ib_device, dev_attr);
79 if (ret) { 79 if (ret) {
@@ -125,16 +125,20 @@ static int iser_create_device_ib_res(struct iser_device *device)
125 iser_cq_event_callback, 125 iser_cq_event_callback,
126 (void *)&cq_desc[i], 126 (void *)&cq_desc[i],
127 ISER_MAX_RX_CQ_LEN, i); 127 ISER_MAX_RX_CQ_LEN, i);
128 if (IS_ERR(device->rx_cq[i])) 128 if (IS_ERR(device->rx_cq[i])) {
129 device->rx_cq[i] = NULL;
129 goto cq_err; 130 goto cq_err;
131 }
130 132
131 device->tx_cq[i] = ib_create_cq(device->ib_device, 133 device->tx_cq[i] = ib_create_cq(device->ib_device,
132 NULL, iser_cq_event_callback, 134 NULL, iser_cq_event_callback,
133 (void *)&cq_desc[i], 135 (void *)&cq_desc[i],
134 ISER_MAX_TX_CQ_LEN, i); 136 ISER_MAX_TX_CQ_LEN, i);
135 137
136 if (IS_ERR(device->tx_cq[i])) 138 if (IS_ERR(device->tx_cq[i])) {
139 device->tx_cq[i] = NULL;
137 goto cq_err; 140 goto cq_err;
141 }
138 142
139 if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) 143 if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
140 goto cq_err; 144 goto cq_err;
@@ -160,14 +164,14 @@ static int iser_create_device_ib_res(struct iser_device *device)
160handler_err: 164handler_err:
161 ib_dereg_mr(device->mr); 165 ib_dereg_mr(device->mr);
162dma_mr_err: 166dma_mr_err:
163 for (j = 0; j < device->cqs_used; j++) 167 for (i = 0; i < device->cqs_used; i++)
164 tasklet_kill(&device->cq_tasklet[j]); 168 tasklet_kill(&device->cq_tasklet[i]);
165cq_err: 169cq_err:
166 for (j = 0; j < i; j++) { 170 for (i = 0; i < device->cqs_used; i++) {
167 if (device->tx_cq[j]) 171 if (device->tx_cq[i])
168 ib_destroy_cq(device->tx_cq[j]); 172 ib_destroy_cq(device->tx_cq[i]);
169 if (device->rx_cq[j]) 173 if (device->rx_cq[i])
170 ib_destroy_cq(device->rx_cq[j]); 174 ib_destroy_cq(device->rx_cq[i]);
171 } 175 }
172 ib_dealloc_pd(device->pd); 176 ib_dealloc_pd(device->pd);
173pd_err: 177pd_err:
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 1ea0b65c4cfb..a2bf41e0bde9 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -47,6 +47,7 @@ struct ib_umem {
47 int writable; 47 int writable;
48 int hugetlb; 48 int hugetlb;
49 struct work_struct work; 49 struct work_struct work;
50 struct pid *pid;
50 struct mm_struct *mm; 51 struct mm_struct *mm;
51 unsigned long diff; 52 unsigned long diff;
52 struct sg_table sg_head; 53 struct sg_table sg_head;