aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/cma.c17
-rw-r--r--drivers/infiniband/core/ucma.c11
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c10
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c26
13 files changed, 79 insertions, 47 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 533193d4e5df..9e0ab048c878 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1088,10 +1088,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1088 *sin = iw_event->local_addr; 1088 *sin = iw_event->local_addr;
1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1090 *sin = iw_event->remote_addr; 1090 *sin = iw_event->remote_addr;
1091 if (iw_event->status) 1091 switch (iw_event->status) {
1092 event.event = RDMA_CM_EVENT_REJECTED; 1092 case 0:
1093 else
1094 event.event = RDMA_CM_EVENT_ESTABLISHED; 1093 event.event = RDMA_CM_EVENT_ESTABLISHED;
1094 break;
1095 case -ECONNRESET:
1096 case -ECONNREFUSED:
1097 event.event = RDMA_CM_EVENT_REJECTED;
1098 break;
1099 case -ETIMEDOUT:
1100 event.event = RDMA_CM_EVENT_UNREACHABLE;
1101 break;
1102 default:
1103 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1104 break;
1105 }
1095 break; 1106 break;
1096 case IW_CM_EVENT_ESTABLISHED: 1107 case IW_CM_EVENT_ESTABLISHED:
1097 event.event = RDMA_CM_EVENT_ESTABLISHED; 1108 event.event = RDMA_CM_EVENT_ESTABLISHED;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 81a5cdc5733a..e2e8d329b443 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -209,10 +209,21 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
210 if (!ctx->backlog) { 210 if (!ctx->backlog) {
211 ret = -EDQUOT; 211 ret = -EDQUOT;
212 kfree(uevent);
212 goto out; 213 goto out;
213 } 214 }
214 ctx->backlog--; 215 ctx->backlog--;
216 } else if (!ctx->uid) {
217 /*
218 * We ignore events for new connections until userspace has set
219 * their context. This can only happen if an error occurs on a
220 * new connection before the user accepts it. This is okay,
221 * since the accept will just fail later.
222 */
223 kfree(uevent);
224 goto out;
215 } 225 }
226
216 list_add_tail(&uevent->list, &ctx->file->event_list); 227 list_add_tail(&uevent->list, &ctx->file->event_list);
217 wake_up_interruptible(&ctx->file->poll_wait); 228 wake_up_interruptible(&ctx->file->poll_wait);
218out: 229out:
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index e1b618c5f685..b7be950ab47c 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -50,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
50 ib_device); 50 ib_device);
51 struct hipz_query_hca *rblock; 51 struct hipz_query_hca *rblock;
52 52
53 rblock = ehca_alloc_fw_ctrlblock(); 53 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
54 if (!rblock) { 54 if (!rblock) {
55 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 55 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
56 return -ENOMEM; 56 return -ENOMEM;
@@ -110,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev,
110 ib_device); 110 ib_device);
111 struct hipz_query_port *rblock; 111 struct hipz_query_port *rblock;
112 112
113 rblock = ehca_alloc_fw_ctrlblock(); 113 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
114 if (!rblock) { 114 if (!rblock) {
115 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 115 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
116 return -ENOMEM; 116 return -ENOMEM;
@@ -179,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
179 return -EINVAL; 179 return -EINVAL;
180 } 180 }
181 181
182 rblock = ehca_alloc_fw_ctrlblock(); 182 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
183 if (!rblock) { 183 if (!rblock) {
184 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 184 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
185 return -ENOMEM; 185 return -ENOMEM;
@@ -212,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
212 return -EINVAL; 212 return -EINVAL;
213 } 213 }
214 214
215 rblock = ehca_alloc_fw_ctrlblock(); 215 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
216 if (!rblock) { 216 if (!rblock) {
217 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 217 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
218 return -ENOMEM; 218 return -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index c3ea746e9045..e7209afb4250 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -138,7 +138,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
138 u64 *rblock; 138 u64 *rblock;
139 unsigned long block_count; 139 unsigned long block_count;
140 140
141 rblock = ehca_alloc_fw_ctrlblock(); 141 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
142 if (!rblock) { 142 if (!rblock) {
143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); 143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
144 ret = -ENOMEM; 144 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 3720e3032cce..cd7789f0d08e 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -180,10 +180,10 @@ int ehca_mmap_register(u64 physical,void **mapped,
180int ehca_munmap(unsigned long addr, size_t len); 180int ehca_munmap(unsigned long addr, size_t len);
181 181
182#ifdef CONFIG_PPC_64K_PAGES 182#ifdef CONFIG_PPC_64K_PAGES
183void *ehca_alloc_fw_ctrlblock(void); 183void *ehca_alloc_fw_ctrlblock(gfp_t flags);
184void ehca_free_fw_ctrlblock(void *ptr); 184void ehca_free_fw_ctrlblock(void *ptr);
185#else 185#else
186#define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL)) 186#define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
187#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 187#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
188#endif 188#endif
189 189
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index cc47e4c13a18..6574fbbaead5 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -106,9 +106,9 @@ static struct timer_list poll_eqs_timer;
106#ifdef CONFIG_PPC_64K_PAGES 106#ifdef CONFIG_PPC_64K_PAGES
107static struct kmem_cache *ctblk_cache = NULL; 107static struct kmem_cache *ctblk_cache = NULL;
108 108
109void *ehca_alloc_fw_ctrlblock(void) 109void *ehca_alloc_fw_ctrlblock(gfp_t flags)
110{ 110{
111 void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL); 111 void *ret = kmem_cache_zalloc(ctblk_cache, flags);
112 if (!ret) 112 if (!ret)
113 ehca_gen_err("Out of memory for ctblk"); 113 ehca_gen_err("Out of memory for ctblk");
114 return ret; 114 return ret;
@@ -206,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
206 u64 h_ret; 206 u64 h_ret;
207 struct hipz_query_hca *rblock; 207 struct hipz_query_hca *rblock;
208 208
209 rblock = ehca_alloc_fw_ctrlblock(); 209 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
210 if (!rblock) { 210 if (!rblock) {
211 ehca_gen_err("Cannot allocate rblock memory."); 211 ehca_gen_err("Cannot allocate rblock memory.");
212 return -ENOMEM; 212 return -ENOMEM;
@@ -258,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca)
258 int ret = 0; 258 int ret = 0;
259 struct hipz_query_hca *rblock; 259 struct hipz_query_hca *rblock;
260 260
261 rblock = ehca_alloc_fw_ctrlblock(); 261 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
262 if (!rblock) { 262 if (!rblock) {
263 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 263 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
264 return -ENOMEM; 264 return -ENOMEM;
@@ -469,7 +469,7 @@ static ssize_t ehca_show_##name(struct device *dev, \
469 \ 469 \
470 shca = dev->driver_data; \ 470 shca = dev->driver_data; \
471 \ 471 \
472 rblock = ehca_alloc_fw_ctrlblock(); \ 472 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
473 if (!rblock) { \ 473 if (!rblock) { \
474 dev_err(dev, "Can't allocate rblock memory."); \ 474 dev_err(dev, "Can't allocate rblock memory."); \
475 return 0; \ 475 return 0; \
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 0a5e2214cc5f..cfb362a1029c 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1013 u32 i; 1013 u32 i;
1014 u64 *kpage; 1014 u64 *kpage;
1015 1015
1016 kpage = ehca_alloc_fw_ctrlblock(); 1016 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1017 if (!kpage) { 1017 if (!kpage) {
1018 ehca_err(&shca->ib_device, "kpage alloc failed"); 1018 ehca_err(&shca->ib_device, "kpage alloc failed");
1019 ret = -ENOMEM; 1019 ret = -ENOMEM;
@@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1124 ehca_mrmw_map_acl(acl, &hipz_acl); 1124 ehca_mrmw_map_acl(acl, &hipz_acl);
1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1126 1126
1127 kpage = ehca_alloc_fw_ctrlblock(); 1127 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1128 if (!kpage) { 1128 if (!kpage) {
1129 ehca_err(&shca->ib_device, "kpage alloc failed"); 1129 ehca_err(&shca->ib_device, "kpage alloc failed");
1130 ret = -ENOMEM; 1130 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index c6c9cef203e3..34b85556d01e 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -807,7 +807,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
807 unsigned long spl_flags = 0; 807 unsigned long spl_flags = 0;
808 808
809 /* do query_qp to obtain current attr values */ 809 /* do query_qp to obtain current attr values */
810 mqpcb = ehca_alloc_fw_ctrlblock(); 810 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
811 if (!mqpcb) { 811 if (!mqpcb) {
812 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 812 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
813 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 813 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
@@ -1273,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp,
1273 return -EINVAL; 1273 return -EINVAL;
1274 } 1274 }
1275 1275
1276 qpcb = ehca_alloc_fw_ctrlblock(); 1276 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1277 if (!qpcb) { 1277 if (!qpcb) {
1278 ehca_err(qp->device,"Out of memory for qpcb " 1278 ehca_err(qp->device,"Out of memory for qpcb "
1279 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); 1279 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 283d50b76c3d..1159c8a0f2c5 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -54,6 +54,10 @@ enum {
54 MTHCA_CQ_ENTRY_SIZE = 0x20 54 MTHCA_CQ_ENTRY_SIZE = 0x20
55}; 55};
56 56
57enum {
58 MTHCA_ATOMIC_BYTE_LEN = 8
59};
60
57/* 61/*
58 * Must be packed because start is 64 bits but only aligned to 32 bits. 62 * Must be packed because start is 64 bits but only aligned to 32 bits.
59 */ 63 */
@@ -599,11 +603,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
599 break; 603 break;
600 case MTHCA_OPCODE_ATOMIC_CS: 604 case MTHCA_OPCODE_ATOMIC_CS:
601 entry->opcode = IB_WC_COMP_SWAP; 605 entry->opcode = IB_WC_COMP_SWAP;
602 entry->byte_len = be32_to_cpu(cqe->byte_cnt); 606 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
603 break; 607 break;
604 case MTHCA_OPCODE_ATOMIC_FA: 608 case MTHCA_OPCODE_ATOMIC_FA:
605 entry->opcode = IB_WC_FETCH_ADD; 609 entry->opcode = IB_WC_FETCH_ADD;
606 entry->byte_len = be32_to_cpu(cqe->byte_cnt); 610 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
607 break; 611 break;
608 case MTHCA_OPCODE_BIND_MW: 612 case MTHCA_OPCODE_BIND_MW:
609 entry->opcode = IB_WC_BIND_MW; 613 entry->opcode = IB_WC_BIND_MW;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 15cc2f6eb475..6b19645d946c 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -232,7 +232,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
232 232
233 list_for_each_entry(chunk, &icm->chunk_list, list) { 233 list_for_each_entry(chunk, &icm->chunk_list, list) {
234 for (i = 0; i < chunk->npages; ++i) { 234 for (i = 0; i < chunk->npages; ++i) {
235 if (chunk->mem[i].length >= offset) { 235 if (chunk->mem[i].length > offset) {
236 page = chunk->mem[i].page; 236 page = chunk->mem[i].page;
237 goto out; 237 goto out;
238 } 238 }
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index d844a2569b47..5f5214c0337d 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -429,13 +429,18 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
429{ 429{
430 struct mthca_dev *dev = to_mdev(ibqp->device); 430 struct mthca_dev *dev = to_mdev(ibqp->device);
431 struct mthca_qp *qp = to_mqp(ibqp); 431 struct mthca_qp *qp = to_mqp(ibqp);
432 int err; 432 int err = 0;
433 struct mthca_mailbox *mailbox; 433 struct mthca_mailbox *mailbox = NULL;
434 struct mthca_qp_param *qp_param; 434 struct mthca_qp_param *qp_param;
435 struct mthca_qp_context *context; 435 struct mthca_qp_context *context;
436 int mthca_state; 436 int mthca_state;
437 u8 status; 437 u8 status;
438 438
439 if (qp->state == IB_QPS_RESET) {
440 qp_attr->qp_state = IB_QPS_RESET;
441 goto done;
442 }
443
439 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 444 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
440 if (IS_ERR(mailbox)) 445 if (IS_ERR(mailbox))
441 return PTR_ERR(mailbox); 446 return PTR_ERR(mailbox);
@@ -454,7 +459,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
454 mthca_state = be32_to_cpu(context->flags) >> 28; 459 mthca_state = be32_to_cpu(context->flags) >> 28;
455 460
456 qp_attr->qp_state = to_ib_qp_state(mthca_state); 461 qp_attr->qp_state = to_ib_qp_state(mthca_state);
457 qp_attr->cur_qp_state = qp_attr->qp_state;
458 qp_attr->path_mtu = context->mtu_msgmax >> 5; 462 qp_attr->path_mtu = context->mtu_msgmax >> 5;
459 qp_attr->path_mig_state = 463 qp_attr->path_mig_state =
460 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 464 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
@@ -464,11 +468,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
464 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; 468 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
465 qp_attr->qp_access_flags = 469 qp_attr->qp_access_flags =
466 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 470 to_ib_qp_access_flags(be32_to_cpu(context->params2));
467 qp_attr->cap.max_send_wr = qp->sq.max;
468 qp_attr->cap.max_recv_wr = qp->rq.max;
469 qp_attr->cap.max_send_sge = qp->sq.max_gs;
470 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
471 qp_attr->cap.max_inline_data = qp->max_inline_data;
472 471
473 if (qp->transport == RC || qp->transport == UC) { 472 if (qp->transport == RC || qp->transport == UC) {
474 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 473 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
@@ -495,7 +494,16 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
495 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 494 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
496 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 495 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
497 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 496 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
498 qp_init_attr->cap = qp_attr->cap; 497
498done:
499 qp_attr->cur_qp_state = qp_attr->qp_state;
500 qp_attr->cap.max_send_wr = qp->sq.max;
501 qp_attr->cap.max_recv_wr = qp->rq.max;
502 qp_attr->cap.max_send_sge = qp->sq.max_gs;
503 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
504 qp_attr->cap.max_inline_data = qp->max_inline_data;
505
506 qp_init_attr->cap = qp_attr->cap;
499 507
500out: 508out:
501 mthca_free_mailbox(dev, mailbox); 509 mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9b2041e25d59..dd221eda3ea6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -177,7 +177,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
177 * - if yes, the mtask is recycled at iscsi_complete_pdu 177 * - if yes, the mtask is recycled at iscsi_complete_pdu
178 * - if no, the mtask is recycled at iser_snd_completion 178 * - if no, the mtask is recycled at iser_snd_completion
179 */ 179 */
180 if (error && error != -EAGAIN) 180 if (error && error != -ENOBUFS)
181 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 181 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
182 182
183 return error; 183 return error;
@@ -241,7 +241,7 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
241 error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); 241 error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
242 242
243 iscsi_iser_ctask_xmit_exit: 243 iscsi_iser_ctask_xmit_exit:
244 if (error && error != -EAGAIN) 244 if (error && error != -ENOBUFS)
245 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 245 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
246 return error; 246 return error;
247} 247}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index e73c87b9be43..0a7d1ab60e6d 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -304,18 +304,14 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
304static int 304static int
305iser_check_xmit(struct iscsi_conn *conn, void *task) 305iser_check_xmit(struct iscsi_conn *conn, void *task)
306{ 306{
307 int rc = 0;
308 struct iscsi_iser_conn *iser_conn = conn->dd_data; 307 struct iscsi_iser_conn *iser_conn = conn->dd_data;
309 308
310 write_lock_bh(conn->recv_lock);
311 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == 309 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
312 ISER_QP_MAX_REQ_DTOS) { 310 ISER_QP_MAX_REQ_DTOS) {
313 iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task); 311 iser_dbg("%ld can't xmit task %p\n",jiffies,task);
314 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 312 return -ENOBUFS;
315 rc = -EAGAIN;
316 } 313 }
317 write_unlock_bh(conn->recv_lock); 314 return 0;
318 return rc;
319} 315}
320 316
321 317
@@ -340,7 +336,7 @@ int iser_send_command(struct iscsi_conn *conn,
340 return -EPERM; 336 return -EPERM;
341 } 337 }
342 if (iser_check_xmit(conn, ctask)) 338 if (iser_check_xmit(conn, ctask))
343 return -EAGAIN; 339 return -ENOBUFS;
344 340
345 edtl = ntohl(hdr->data_length); 341 edtl = ntohl(hdr->data_length);
346 342
@@ -426,7 +422,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
426 } 422 }
427 423
428 if (iser_check_xmit(conn, ctask)) 424 if (iser_check_xmit(conn, ctask))
429 return -EAGAIN; 425 return -ENOBUFS;
430 426
431 itt = ntohl(hdr->itt); 427 itt = ntohl(hdr->itt);
432 data_seg_len = ntoh24(hdr->dlength); 428 data_seg_len = ntoh24(hdr->dlength);
@@ -498,7 +494,7 @@ int iser_send_control(struct iscsi_conn *conn,
498 } 494 }
499 495
500 if (iser_check_xmit(conn,mtask)) 496 if (iser_check_xmit(conn,mtask))
501 return -EAGAIN; 497 return -ENOBUFS;
502 498
503 /* build the tx desc regd header and add it to the tx desc dto */ 499 /* build the tx desc regd header and add it to the tx desc dto */
504 mdesc->type = ISCSI_TX_CONTROL; 500 mdesc->type = ISCSI_TX_CONTROL;
@@ -605,6 +601,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
605 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; 601 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
606 struct iscsi_conn *conn = iser_conn->iscsi_conn; 602 struct iscsi_conn *conn = iser_conn->iscsi_conn;
607 struct iscsi_mgmt_task *mtask; 603 struct iscsi_mgmt_task *mtask;
604 int resume_tx = 0;
608 605
609 iser_dbg("Initiator, Data sent dto=0x%p\n", dto); 606 iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
610 607
@@ -613,15 +610,16 @@ void iser_snd_completion(struct iser_desc *tx_desc)
613 if (tx_desc->type == ISCSI_TX_DATAOUT) 610 if (tx_desc->type == ISCSI_TX_DATAOUT)
614 kmem_cache_free(ig.desc_cache, tx_desc); 611 kmem_cache_free(ig.desc_cache, tx_desc);
615 612
613 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
614 ISER_QP_MAX_REQ_DTOS)
615 resume_tx = 1;
616
616 atomic_dec(&ib_conn->post_send_buf_count); 617 atomic_dec(&ib_conn->post_send_buf_count);
617 618
618 write_lock(conn->recv_lock); 619 if (resume_tx) {
619 if (conn->suspend_tx) {
620 iser_dbg("%ld resuming tx\n",jiffies); 620 iser_dbg("%ld resuming tx\n",jiffies);
621 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
622 scsi_queue_work(conn->session->host, &conn->xmitwork); 621 scsi_queue_work(conn->session->host, &conn->xmitwork);
623 } 622 }
624 write_unlock(conn->recv_lock);
625 623
626 if (tx_desc->type == ISCSI_TX_CONTROL) { 624 if (tx_desc->type == ISCSI_TX_CONTROL) {
627 /* this arithmetic is legal by libiscsi dd_data allocation */ 625 /* this arithmetic is legal by libiscsi dd_data allocation */