aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorStefan Roscher <stefan.roscher@de.ibm.com>2007-09-11 09:29:39 -0400
committerRoland Dreier <rolandd@cisco.com>2007-10-09 22:59:08 -0400
commit5281a4b8a0c6bac0c070913ec25868faa06a3115 (patch)
tree49e379688fca00a0a3eb4f031aab90929f784f34 /drivers/infiniband/hw
parent441633b968a5be0ef9be7c37ae24c35eda5b730d (diff)
IB/ehca: Support more than 4k QPs for userspace and kernelspace
Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c22
4 files changed, 25 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 81aff36101ba..a6f17e488c77 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -166,7 +166,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
166 write_lock_irqsave(&ehca_cq_idr_lock, flags); 166 write_lock_irqsave(&ehca_cq_idr_lock, flags);
167 ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token); 167 ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
168 write_unlock_irqrestore(&ehca_cq_idr_lock, flags); 168 write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
169
170 } while (ret == -EAGAIN); 169 } while (ret == -EAGAIN);
171 170
172 if (ret) { 171 if (ret) {
@@ -176,6 +175,12 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
176 goto create_cq_exit1; 175 goto create_cq_exit1;
177 } 176 }
178 177
178 if (my_cq->token > 0x1FFFFFF) {
179 cq = ERR_PTR(-ENOMEM);
180 ehca_err(device, "Invalid number of cq. device=%p", device);
181 goto create_cq_exit2;
182 }
183
179 /* 184 /*
180 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer 185 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
181 * for receiving errors CQEs. 186 * for receiving errors CQEs.
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index db041dfb4129..991690739170 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -380,7 +380,7 @@ int ehca_init_device(struct ehca_shca *shca)
380 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); 380 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
381 shca->ib_device.owner = THIS_MODULE; 381 shca->ib_device.owner = THIS_MODULE;
382 382
383 shca->ib_device.uverbs_abi_ver = 7; 383 shca->ib_device.uverbs_abi_ver = 8;
384 shca->ib_device.uverbs_cmd_mask = 384 shca->ib_device.uverbs_cmd_mask =
385 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 385 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
386 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 386 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 87b32ab6caf6..bfae1c228b2f 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -557,7 +557,6 @@ static struct ehca_qp *internal_create_qp(
557 write_lock_irqsave(&ehca_qp_idr_lock, flags); 557 write_lock_irqsave(&ehca_qp_idr_lock, flags);
558 ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token); 558 ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
559 write_unlock_irqrestore(&ehca_qp_idr_lock, flags); 559 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
560
561 } while (ret == -EAGAIN); 560 } while (ret == -EAGAIN);
562 561
563 if (ret) { 562 if (ret) {
@@ -566,11 +565,17 @@ static struct ehca_qp *internal_create_qp(
566 goto create_qp_exit0; 565 goto create_qp_exit0;
567 } 566 }
568 567
568 if (my_qp->token > 0x1FFFFFF) {
569 ret = -EINVAL;
570 ehca_err(pd->device, "Invalid number of qp");
571 goto create_qp_exit1;
572 }
573
569 parms.servicetype = ibqptype2servicetype(qp_type); 574 parms.servicetype = ibqptype2servicetype(qp_type);
570 if (parms.servicetype < 0) { 575 if (parms.servicetype < 0) {
571 ret = -EINVAL; 576 ret = -EINVAL;
572 ehca_err(pd->device, "Invalid qp_type=%x", qp_type); 577 ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
573 goto create_qp_exit0; 578 goto create_qp_exit1;
574 } 579 }
575 580
576 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 581 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 4bc687fdf531..3340f49f0211 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -164,7 +164,7 @@ static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
164 int ret; 164 int ret;
165 165
166 switch (rsrc_type) { 166 switch (rsrc_type) {
167 case 1: /* galpa fw handle */ 167 case 0: /* galpa fw handle */
168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number); 168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa); 169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
170 if (unlikely(ret)) { 170 if (unlikely(ret)) {
@@ -175,7 +175,7 @@ static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
175 } 175 }
176 break; 176 break;
177 177
178 case 2: /* cq queue_addr */ 178 case 1: /* cq queue_addr */
179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number); 179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue); 180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
181 if (unlikely(ret)) { 181 if (unlikely(ret)) {
@@ -201,7 +201,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
201 int ret; 201 int ret;
202 202
203 switch (rsrc_type) { 203 switch (rsrc_type) {
204 case 1: /* galpa fw handle */ 204 case 0: /* galpa fw handle */
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); 205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); 206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
207 if (unlikely(ret)) { 207 if (unlikely(ret)) {
@@ -212,7 +212,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
212 } 212 }
213 break; 213 break;
214 214
215 case 2: /* qp rqueue_addr */ 215 case 1: /* qp rqueue_addr */
216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
217 qp->ib_qp.qp_num); 217 qp->ib_qp.qp_num);
218 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, 218 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
@@ -225,7 +225,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
225 } 225 }
226 break; 226 break;
227 227
228 case 3: /* qp squeue_addr */ 228 case 2: /* qp squeue_addr */
229 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", 229 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
230 qp->ib_qp.qp_num); 230 qp->ib_qp.qp_num);
231 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, 231 ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
@@ -249,10 +249,10 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
249 249
250int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 250int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
251{ 251{
252 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 252 u64 fileoffset = vma->vm_pgoff;
253 u32 idr_handle = fileoffset >> 32; 253 u32 idr_handle = fileoffset & 0x1FFFFFF;
254 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ 254 u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */
255 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 255 u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
256 u32 cur_pid = current->tgid; 256 u32 cur_pid = current->tgid;
257 u32 ret; 257 u32 ret;
258 struct ehca_cq *cq; 258 struct ehca_cq *cq;
@@ -261,7 +261,7 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
261 struct ib_uobject *uobject; 261 struct ib_uobject *uobject;
262 262
263 switch (q_type) { 263 switch (q_type) {
264 case 1: /* CQ */ 264 case 0: /* CQ */
265 read_lock(&ehca_cq_idr_lock); 265 read_lock(&ehca_cq_idr_lock);
266 cq = idr_find(&ehca_cq_idr, idr_handle); 266 cq = idr_find(&ehca_cq_idr, idr_handle);
267 read_unlock(&ehca_cq_idr_lock); 267 read_unlock(&ehca_cq_idr_lock);
@@ -289,7 +289,7 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
289 } 289 }
290 break; 290 break;
291 291
292 case 2: /* QP */ 292 case 1: /* QP */
293 read_lock(&ehca_qp_idr_lock); 293 read_lock(&ehca_qp_idr_lock);
294 qp = idr_find(&ehca_qp_idr, idr_handle); 294 qp = idr_find(&ehca_qp_idr, idr_handle);
295 read_unlock(&ehca_qp_idr_lock); 295 read_unlock(&ehca_qp_idr_lock);