aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorHoang-Nam Nguyen <hnguyen@linux.vnet.ibm.com>2007-01-23 18:13:35 -0500
committerRoland Dreier <rolandd@cisco.com>2007-02-04 17:11:57 -0500
commit4c34bdf58c0a3b305ebd9b5e74011ca1fd6d964d (patch)
tree6e0a9639148b5bdb6b344ac048a4bc4453d5550f /drivers/infiniband
parent1f12667021c542236b1f10eb5d8b2d8f3a79ab48 (diff)
IB/ehca: Remove use of do_mmap()
This patch removes do_mmap() from ehca: - Call remap_pfn_range() for hardware register block - Use vm_insert_page() to register memory allocated for completion queues and queue pairs - The actual mmap() call/trigger is now controlled by user space, ie. libehca Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h15
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c65
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c78
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c395
6 files changed, 204 insertions, 363 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1c722032319c..c3580bfb3c58 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -119,13 +119,14 @@ struct ehca_qp {
119 struct ipz_qp_handle ipz_qp_handle; 119 struct ipz_qp_handle ipz_qp_handle;
120 struct ehca_pfqp pf; 120 struct ehca_pfqp pf;
121 struct ib_qp_init_attr init_attr; 121 struct ib_qp_init_attr init_attr;
122 u64 uspace_squeue;
123 u64 uspace_rqueue;
124 u64 uspace_fwh;
125 struct ehca_cq *send_cq; 122 struct ehca_cq *send_cq;
126 struct ehca_cq *recv_cq; 123 struct ehca_cq *recv_cq;
127 unsigned int sqerr_purgeflag; 124 unsigned int sqerr_purgeflag;
128 struct hlist_node list_entries; 125 struct hlist_node list_entries;
126 /* mmap counter for resources mapped into user space */
127 u32 mm_count_squeue;
128 u32 mm_count_rqueue;
129 u32 mm_count_galpa;
129}; 130};
130 131
131/* must be power of 2 */ 132/* must be power of 2 */
@@ -142,13 +143,14 @@ struct ehca_cq {
142 struct ipz_cq_handle ipz_cq_handle; 143 struct ipz_cq_handle ipz_cq_handle;
143 struct ehca_pfcq pf; 144 struct ehca_pfcq pf;
144 spinlock_t cb_lock; 145 spinlock_t cb_lock;
145 u64 uspace_queue;
146 u64 uspace_fwh;
147 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 146 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
148 struct list_head entry; 147 struct list_head entry;
149 u32 nr_callbacks; 148 u32 nr_callbacks;
150 spinlock_t task_lock; 149 spinlock_t task_lock;
151 u32 ownpid; 150 u32 ownpid;
151 /* mmap counter for resources mapped into user space */
152 u32 mm_count_queue;
153 u32 mm_count_galpa;
152}; 154};
153 155
154enum ehca_mr_flag { 156enum ehca_mr_flag {
@@ -283,7 +285,6 @@ extern int ehca_port_act_time;
283extern int ehca_use_hp_mr; 285extern int ehca_use_hp_mr;
284 286
285struct ipzu_queue_resp { 287struct ipzu_queue_resp {
286 u64 queue; /* points to first queue entry */
287 u32 qe_size; /* queue entry size */ 288 u32 qe_size; /* queue entry size */
288 u32 act_nr_of_sg; 289 u32 act_nr_of_sg;
289 u32 queue_length; /* queue length allocated in bytes */ 290 u32 queue_length; /* queue length allocated in bytes */
@@ -296,7 +297,6 @@ struct ehca_create_cq_resp {
296 u32 cq_number; 297 u32 cq_number;
297 u32 token; 298 u32 token;
298 struct ipzu_queue_resp ipz_queue; 299 struct ipzu_queue_resp ipz_queue;
299 struct h_galpas galpas;
300}; 300};
301 301
302struct ehca_create_qp_resp { 302struct ehca_create_qp_resp {
@@ -309,7 +309,6 @@ struct ehca_create_qp_resp {
309 u32 dummy; /* padding for 8 byte alignment */ 309 u32 dummy; /* padding for 8 byte alignment */
310 struct ipzu_queue_resp ipz_squeue; 310 struct ipzu_queue_resp ipz_squeue;
311 struct ipzu_queue_resp ipz_rqueue; 311 struct ipzu_queue_resp ipz_rqueue;
312 struct h_galpas galpas;
313}; 312};
314 313
315struct ehca_alloc_cq_parms { 314struct ehca_alloc_cq_parms {
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 6074c897f51c..9291a86ca053 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -267,7 +267,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
267 if (context) { 267 if (context) {
268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue; 268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
269 struct ehca_create_cq_resp resp; 269 struct ehca_create_cq_resp resp;
270 struct vm_area_struct *vma;
271 memset(&resp, 0, sizeof(resp)); 270 memset(&resp, 0, sizeof(resp));
272 resp.cq_number = my_cq->cq_number; 271 resp.cq_number = my_cq->cq_number;
273 resp.token = my_cq->token; 272 resp.token = my_cq->token;
@@ -276,40 +275,14 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
276 resp.ipz_queue.queue_length = ipz_queue->queue_length; 275 resp.ipz_queue.queue_length = ipz_queue->queue_length;
277 resp.ipz_queue.pagesize = ipz_queue->pagesize; 276 resp.ipz_queue.pagesize = ipz_queue->pagesize;
278 resp.ipz_queue.toggle_state = ipz_queue->toggle_state; 277 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
279 ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
280 ipz_queue->queue_length,
281 (void**)&resp.ipz_queue.queue,
282 &vma);
283 if (ret) {
284 ehca_err(device, "Could not mmap queue pages");
285 cq = ERR_PTR(ret);
286 goto create_cq_exit4;
287 }
288 my_cq->uspace_queue = resp.ipz_queue.queue;
289 resp.galpas = my_cq->galpas;
290 ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
291 (void**)&resp.galpas.kernel.fw_handle,
292 &vma);
293 if (ret) {
294 ehca_err(device, "Could not mmap fw_handle");
295 cq = ERR_PTR(ret);
296 goto create_cq_exit5;
297 }
298 my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
299 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 278 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
300 ehca_err(device, "Copy to udata failed."); 279 ehca_err(device, "Copy to udata failed.");
301 goto create_cq_exit6; 280 goto create_cq_exit4;
302 } 281 }
303 } 282 }
304 283
305 return cq; 284 return cq;
306 285
307create_cq_exit6:
308 ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
309
310create_cq_exit5:
311 ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
312
313create_cq_exit4: 286create_cq_exit4:
314 ipz_queue_dtor(&my_cq->ipz_queue); 287 ipz_queue_dtor(&my_cq->ipz_queue);
315 288
@@ -333,7 +306,6 @@ create_cq_exit1:
333int ehca_destroy_cq(struct ib_cq *cq) 306int ehca_destroy_cq(struct ib_cq *cq)
334{ 307{
335 u64 h_ret; 308 u64 h_ret;
336 int ret;
337 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 309 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
338 int cq_num = my_cq->cq_number; 310 int cq_num = my_cq->cq_number;
339 struct ib_device *device = cq->device; 311 struct ib_device *device = cq->device;
@@ -343,6 +315,20 @@ int ehca_destroy_cq(struct ib_cq *cq)
343 u32 cur_pid = current->tgid; 315 u32 cur_pid = current->tgid;
344 unsigned long flags; 316 unsigned long flags;
345 317
318 if (cq->uobject) {
319 if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
320 ehca_err(device, "Resources still referenced in "
321 "user space cq_num=%x", my_cq->cq_number);
322 return -EINVAL;
323 }
324 if (my_cq->ownpid != cur_pid) {
325 ehca_err(device, "Invalid caller pid=%x ownpid=%x "
326 "cq_num=%x",
327 cur_pid, my_cq->ownpid, my_cq->cq_number);
328 return -EINVAL;
329 }
330 }
331
346 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 332 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 while (my_cq->nr_callbacks) { 333 while (my_cq->nr_callbacks) {
348 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 334 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
@@ -353,25 +339,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
353 idr_remove(&ehca_cq_idr, my_cq->token); 339 idr_remove(&ehca_cq_idr, my_cq->token);
354 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 340 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
355 341
356 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
357 ehca_err(device, "Invalid caller pid=%x ownpid=%x",
358 cur_pid, my_cq->ownpid);
359 return -EINVAL;
360 }
361
362 /* un-mmap if vma alloc */
363 if (my_cq->uspace_queue ) {
364 ret = ehca_munmap(my_cq->uspace_queue,
365 my_cq->ipz_queue.queue_length);
366 if (ret)
367 ehca_err(device, "Could not munmap queue ehca_cq=%p "
368 "cq_num=%x", my_cq, cq_num);
369 ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
370 if (ret)
371 ehca_err(device, "Could not munmap fwh ehca_cq=%p "
372 "cq_num=%x", my_cq, cq_num);
373 }
374
375 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 342 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
376 if (h_ret == H_R_STATE) { 343 if (h_ret == H_R_STATE) {
377 /* cq in err: read err data and destroy it forcibly */ 344 /* cq in err: read err data and destroy it forcibly */
@@ -400,7 +367,7 @@ int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
400 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 367 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
401 u32 cur_pid = current->tgid; 368 u32 cur_pid = current->tgid;
402 369
403 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { 370 if (cq->uobject && my_cq->ownpid != cur_pid) {
404 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x", 371 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
405 cur_pid, my_cq->ownpid); 372 cur_pid, my_cq->ownpid);
406 return -EINVAL; 373 return -EINVAL;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index cd7789f0d08e..95fd59fb4528 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -171,14 +171,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
171 171
172void ehca_poll_eqs(unsigned long data); 172void ehca_poll_eqs(unsigned long data);
173 173
174int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped,
175 struct vm_area_struct **vma);
176
177int ehca_mmap_register(u64 physical,void **mapped,
178 struct vm_area_struct **vma);
179
180int ehca_munmap(unsigned long addr, size_t len);
181
182#ifdef CONFIG_PPC_64K_PAGES 174#ifdef CONFIG_PPC_64K_PAGES
183void *ehca_alloc_fw_ctrlblock(gfp_t flags); 175void *ehca_alloc_fw_ctrlblock(gfp_t flags);
184void ehca_free_fw_ctrlblock(void *ptr); 176void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 6574fbbaead5..1155bcf48212 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55MODULE_VERSION("SVNEHCA_0019"); 55MODULE_VERSION("SVNEHCA_0020");
56 56
57int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
58int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -288,7 +288,7 @@ int ehca_init_device(struct ehca_shca *shca)
288 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); 288 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
289 shca->ib_device.owner = THIS_MODULE; 289 shca->ib_device.owner = THIS_MODULE;
290 290
291 shca->ib_device.uverbs_abi_ver = 5; 291 shca->ib_device.uverbs_abi_ver = 6;
292 shca->ib_device.uverbs_cmd_mask = 292 shca->ib_device.uverbs_cmd_mask =
293 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 293 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
294 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 294 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -790,7 +790,7 @@ int __init ehca_module_init(void)
790 int ret; 790 int ret;
791 791
792 printk(KERN_INFO "eHCA Infiniband Device Driver " 792 printk(KERN_INFO "eHCA Infiniband Device Driver "
793 "(Rel.: SVNEHCA_0019)\n"); 793 "(Rel.: SVNEHCA_0020)\n");
794 idr_init(&ehca_qp_idr); 794 idr_init(&ehca_qp_idr);
795 idr_init(&ehca_cq_idr); 795 idr_init(&ehca_cq_idr);
796 spin_lock_init(&ehca_qp_idr_lock); 796 spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 34b85556d01e..95efef921f1d 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -637,7 +637,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue; 637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue; 638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
639 struct ehca_create_qp_resp resp; 639 struct ehca_create_qp_resp resp;
640 struct vm_area_struct * vma;
641 memset(&resp, 0, sizeof(resp)); 640 memset(&resp, 0, sizeof(resp));
642 641
643 resp.qp_num = my_qp->real_qp_num; 642 resp.qp_num = my_qp->real_qp_num;
@@ -651,59 +650,21 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
651 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length; 650 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
652 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize; 651 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
653 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state; 652 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
654 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
655 ipz_rqueue->queue_length,
656 (void**)&resp.ipz_rqueue.queue,
657 &vma);
658 if (ret) {
659 ehca_err(pd->device, "Could not mmap rqueue pages");
660 goto create_qp_exit3;
661 }
662 my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
663 /* squeue properties */ 653 /* squeue properties */
664 resp.ipz_squeue.qe_size = ipz_squeue->qe_size; 654 resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
665 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg; 655 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
666 resp.ipz_squeue.queue_length = ipz_squeue->queue_length; 656 resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
667 resp.ipz_squeue.pagesize = ipz_squeue->pagesize; 657 resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
668 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state; 658 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
669 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
670 ipz_squeue->queue_length,
671 (void**)&resp.ipz_squeue.queue,
672 &vma);
673 if (ret) {
674 ehca_err(pd->device, "Could not mmap squeue pages");
675 goto create_qp_exit4;
676 }
677 my_qp->uspace_squeue = resp.ipz_squeue.queue;
678 /* fw_handle */
679 resp.galpas = my_qp->galpas;
680 ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
681 (void**)&resp.galpas.kernel.fw_handle,
682 &vma);
683 if (ret) {
684 ehca_err(pd->device, "Could not mmap fw_handle");
685 goto create_qp_exit5;
686 }
687 my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
688
689 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 659 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
690 ehca_err(pd->device, "Copy to udata failed"); 660 ehca_err(pd->device, "Copy to udata failed");
691 ret = -EINVAL; 661 ret = -EINVAL;
692 goto create_qp_exit6; 662 goto create_qp_exit3;
693 } 663 }
694 } 664 }
695 665
696 return &my_qp->ib_qp; 666 return &my_qp->ib_qp;
697 667
698create_qp_exit6:
699 ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
700
701create_qp_exit5:
702 ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);
703
704create_qp_exit4:
705 ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);
706
707create_qp_exit3: 668create_qp_exit3:
708 ipz_queue_dtor(&my_qp->ipz_rqueue); 669 ipz_queue_dtor(&my_qp->ipz_rqueue);
709 ipz_queue_dtor(&my_qp->ipz_squeue); 670 ipz_queue_dtor(&my_qp->ipz_squeue);
@@ -931,7 +892,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
931 my_qp->qp_type == IB_QPT_SMI) && 892 my_qp->qp_type == IB_QPT_SMI) &&
932 statetrans == IB_QPST_SQE2RTS) { 893 statetrans == IB_QPST_SQE2RTS) {
933 /* mark next free wqe if kernel */ 894 /* mark next free wqe if kernel */
934 if (my_qp->uspace_squeue == 0) { 895 if (!ibqp->uobject) {
935 struct ehca_wqe *wqe; 896 struct ehca_wqe *wqe;
936 /* lock send queue */ 897 /* lock send queue */
937 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags); 898 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
@@ -1417,11 +1378,18 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1417 enum ib_qp_type qp_type; 1378 enum ib_qp_type qp_type;
1418 unsigned long flags; 1379 unsigned long flags;
1419 1380
1420 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && 1381 if (ibqp->uobject) {
1421 my_pd->ownpid != cur_pid) { 1382 if (my_qp->mm_count_galpa ||
1422 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x", 1383 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
1423 cur_pid, my_pd->ownpid); 1384 ehca_err(ibqp->device, "Resources still referenced in "
1424 return -EINVAL; 1385 "user space qp_num=%x", ibqp->qp_num);
1386 return -EINVAL;
1387 }
1388 if (my_pd->ownpid != cur_pid) {
1389 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
1390 cur_pid, my_pd->ownpid);
1391 return -EINVAL;
1392 }
1425 } 1393 }
1426 1394
1427 if (my_qp->send_cq) { 1395 if (my_qp->send_cq) {
@@ -1439,24 +1407,6 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1439 idr_remove(&ehca_qp_idr, my_qp->token); 1407 idr_remove(&ehca_qp_idr, my_qp->token);
1440 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 1408 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
1441 1409
1442 /* un-mmap if vma alloc */
1443 if (my_qp->uspace_rqueue) {
1444 ret = ehca_munmap(my_qp->uspace_rqueue,
1445 my_qp->ipz_rqueue.queue_length);
1446 if (ret)
1447 ehca_err(ibqp->device, "Could not munmap rqueue "
1448 "qp_num=%x", qp_num);
1449 ret = ehca_munmap(my_qp->uspace_squeue,
1450 my_qp->ipz_squeue.queue_length);
1451 if (ret)
1452 ehca_err(ibqp->device, "Could not munmap squeue "
1453 "qp_num=%x", qp_num);
1454 ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
1455 if (ret)
1456 ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
1457 qp_num);
1458 }
1459
1460 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 1410 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
1461 if (h_ret != H_SUCCESS) { 1411 if (h_ret != H_SUCCESS) {
1462 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx " 1412 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index e08764e4aef2..73db920b6945 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -68,105 +68,183 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context)
68 return 0; 68 return 0;
69} 69}
70 70
71struct page *ehca_nopage(struct vm_area_struct *vma, 71static void ehca_mm_open(struct vm_area_struct *vma)
72 unsigned long address, int *type)
73{ 72{
74 struct page *mypage = NULL; 73 u32 *count = (u32*)vma->vm_private_data;
75 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 74 if (!count) {
76 u32 idr_handle = fileoffset >> 32; 75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
77 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ 76 vma->vm_start, vma->vm_end);
78 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 77 return;
79 u32 cur_pid = current->tgid; 78 }
80 unsigned long flags; 79 (*count)++;
81 struct ehca_cq *cq; 80 if (!(*count))
82 struct ehca_qp *qp; 81 ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
83 struct ehca_pd *pd; 82 vma->vm_start, vma->vm_end);
84 u64 offset; 83 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
85 void *vaddr; 84 vma->vm_start, vma->vm_end, *count);
85}
86 86
87 switch (q_type) { 87static void ehca_mm_close(struct vm_area_struct *vma)
88 case 1: /* CQ */ 88{
89 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 89 u32 *count = (u32*)vma->vm_private_data;
90 cq = idr_find(&ehca_cq_idr, idr_handle); 90 if (!count) {
91 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92 vma->vm_start, vma->vm_end);
93 return;
94 }
95 (*count)--;
96 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97 vma->vm_start, vma->vm_end, *count);
98}
92 99
93 /* make sure this mmap really belongs to the authorized user */ 100static struct vm_operations_struct vm_ops = {
94 if (!cq) { 101 .open = ehca_mm_open,
95 ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS"); 102 .close = ehca_mm_close,
96 return NOPAGE_SIGBUS; 103};
104
105static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
106 u32 *mm_count)
107{
108 int ret;
109 u64 vsize, physical;
110
111 vsize = vma->vm_end - vma->vm_start;
112 if (vsize != EHCA_PAGESIZE) {
113 ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
114 return -EINVAL;
115 }
116
117 physical = galpas->user.fw_handle;
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
120 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
121 ret = remap_pfn_range(vma, vma->vm_start, physical >> PAGE_SHIFT,
122 vsize, vma->vm_page_prot);
123 if (unlikely(ret)) {
124 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
125 return -ENOMEM;
126 }
127
128 vma->vm_private_data = mm_count;
129 (*mm_count)++;
130 vma->vm_ops = &vm_ops;
131
132 return 0;
133}
134
135static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
136 u32 *mm_count)
137{
138 int ret;
139 u64 start, ofs;
140 struct page *page;
141
142 vma->vm_flags |= VM_RESERVED;
143 start = vma->vm_start;
144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
146 page = virt_to_page(virt_addr);
147 ret = vm_insert_page(vma, start, page);
148 if (unlikely(ret)) {
149 ehca_gen_err("vm_insert_page() failed rc=%x", ret);
150 return ret;
97 } 151 }
152 start += PAGE_SIZE;
153 }
154 vma->vm_private_data = mm_count;
155 (*mm_count)++;
156 vma->vm_ops = &vm_ops;
98 157
99 if (cq->ownpid != cur_pid) { 158 return 0;
159}
160
161static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
162 u32 rsrc_type)
163{
164 int ret;
165
166 switch (rsrc_type) {
167 case 1: /* galpa fw handle */
168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
170 if (unlikely(ret)) {
100 ehca_err(cq->ib_cq.device, 171 ehca_err(cq->ib_cq.device,
101 "Invalid caller pid=%x ownpid=%x", 172 "ehca_mmap_fw() failed rc=%x cq_num=%x",
102 cur_pid, cq->ownpid); 173 ret, cq->cq_number);
103 return NOPAGE_SIGBUS; 174 return ret;
104 } 175 }
176 break;
105 177
106 if (rsrc_type == 2) { 178 case 2: /* cq queue_addr */
107 ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq); 179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
108 offset = address - vma->vm_start; 180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
109 vaddr = ipz_qeit_calc(&cq->ipz_queue, offset); 181 if (unlikely(ret)) {
110 ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p", 182 ehca_err(cq->ib_cq.device,
111 offset, vaddr); 183 "ehca_mmap_queue() failed rc=%x cq_num=%x",
112 mypage = virt_to_page(vaddr); 184 ret, cq->cq_number);
185 return ret;
113 } 186 }
114 break; 187 break;
115 188
116 case 2: /* QP */ 189 default:
117 spin_lock_irqsave(&ehca_qp_idr_lock, flags); 190 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
118 qp = idr_find(&ehca_qp_idr, idr_handle); 191 rsrc_type, cq->cq_number);
119 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 192 return -EINVAL;
193 }
120 194
121 /* make sure this mmap really belongs to the authorized user */ 195 return 0;
122 if (!qp) { 196}
123 ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS"); 197
124 return NOPAGE_SIGBUS; 198static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
199 u32 rsrc_type)
200{
201 int ret;
202
203 switch (rsrc_type) {
204 case 1: /* galpa fw handle */
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
207 if (unlikely(ret)) {
208 ehca_err(qp->ib_qp.device,
209 "remap_pfn_range() failed ret=%x qp_num=%x",
210 ret, qp->ib_qp.qp_num);
211 return -ENOMEM;
125 } 212 }
213 break;
126 214
127 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd); 215 case 2: /* qp rqueue_addr */
128 if (pd->ownpid != cur_pid) { 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
217 qp->ib_qp.qp_num);
218 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
219 if (unlikely(ret)) {
129 ehca_err(qp->ib_qp.device, 220 ehca_err(qp->ib_qp.device,
130 "Invalid caller pid=%x ownpid=%x", 221 "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
131 cur_pid, pd->ownpid); 222 ret, qp->ib_qp.qp_num);
132 return NOPAGE_SIGBUS; 223 return ret;
133 } 224 }
225 break;
134 226
135 if (rsrc_type == 2) { /* rqueue */ 227 case 3: /* qp squeue_addr */
136 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp); 228 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
137 offset = address - vma->vm_start; 229 qp->ib_qp.qp_num);
138 vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset); 230 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
139 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p", 231 if (unlikely(ret)) {
140 offset, vaddr); 232 ehca_err(qp->ib_qp.device,
141 mypage = virt_to_page(vaddr); 233 "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
142 } else if (rsrc_type == 3) { /* squeue */ 234 ret, qp->ib_qp.qp_num);
143 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp); 235 return ret;
144 offset = address - vma->vm_start;
145 vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
146 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
147 offset, vaddr);
148 mypage = virt_to_page(vaddr);
149 } 236 }
150 break; 237 break;
151 238
152 default: 239 default:
153 ehca_gen_err("bad queue type %x", q_type); 240 ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
154 return NOPAGE_SIGBUS; 241 rsrc_type, qp->ib_qp.qp_num);
155 } 242 return -EINVAL;
156
157 if (!mypage) {
158 ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
159 return NOPAGE_SIGBUS;
160 } 243 }
161 get_page(mypage);
162 244
163 return mypage; 245 return 0;
164} 246}
165 247
166static struct vm_operations_struct ehcau_vm_ops = {
167 .nopage = ehca_nopage,
168};
169
170int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 248int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
171{ 249{
172 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 250 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
@@ -175,7 +253,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
175 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 253 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
176 u32 cur_pid = current->tgid; 254 u32 cur_pid = current->tgid;
177 u32 ret; 255 u32 ret;
178 u64 vsize, physical;
179 unsigned long flags; 256 unsigned long flags;
180 struct ehca_cq *cq; 257 struct ehca_cq *cq;
181 struct ehca_qp *qp; 258 struct ehca_qp *qp;
@@ -201,44 +278,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
201 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) 278 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
202 return -EINVAL; 279 return -EINVAL;
203 280
204 switch (rsrc_type) { 281 ret = ehca_mmap_cq(vma, cq, rsrc_type);
205 case 1: /* galpa fw handle */ 282 if (unlikely(ret)) {
206 ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq); 283 ehca_err(cq->ib_cq.device,
207 vma->vm_flags |= VM_RESERVED; 284 "ehca_mmap_cq() failed rc=%x cq_num=%x",
208 vsize = vma->vm_end - vma->vm_start; 285 ret, cq->cq_number);
209 if (vsize != EHCA_PAGESIZE) { 286 return ret;
210 ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
211 vma->vm_end - vma->vm_start);
212 return -EINVAL;
213 }
214
215 physical = cq->galpas.user.fw_handle;
216 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
217 vma->vm_flags |= VM_IO | VM_RESERVED;
218
219 ehca_dbg(cq->ib_cq.device,
220 "vsize=%lx physical=%lx", vsize, physical);
221 ret = remap_pfn_range(vma, vma->vm_start,
222 physical >> PAGE_SHIFT, vsize,
223 vma->vm_page_prot);
224 if (ret) {
225 ehca_err(cq->ib_cq.device,
226 "remap_pfn_range() failed ret=%x",
227 ret);
228 return -ENOMEM;
229 }
230 break;
231
232 case 2: /* cq queue_addr */
233 ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
234 vma->vm_flags |= VM_RESERVED;
235 vma->vm_ops = &ehcau_vm_ops;
236 break;
237
238 default:
239 ehca_err(cq->ib_cq.device, "bad resource type %x",
240 rsrc_type);
241 return -EINVAL;
242 } 287 }
243 break; 288 break;
244 289
@@ -262,50 +307,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
262 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context) 307 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
263 return -EINVAL; 308 return -EINVAL;
264 309
265 switch (rsrc_type) { 310 ret = ehca_mmap_qp(vma, qp, rsrc_type);
266 case 1: /* galpa fw handle */ 311 if (unlikely(ret)) {
267 ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp); 312 ehca_err(qp->ib_qp.device,
268 vma->vm_flags |= VM_RESERVED; 313 "ehca_mmap_qp() failed rc=%x qp_num=%x",
269 vsize = vma->vm_end - vma->vm_start; 314 ret, qp->ib_qp.qp_num);
270 if (vsize != EHCA_PAGESIZE) { 315 return ret;
271 ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
272 vma->vm_end - vma->vm_start);
273 return -EINVAL;
274 }
275
276 physical = qp->galpas.user.fw_handle;
277 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
278 vma->vm_flags |= VM_IO | VM_RESERVED;
279
280 ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
281 vsize, physical);
282 ret = remap_pfn_range(vma, vma->vm_start,
283 physical >> PAGE_SHIFT, vsize,
284 vma->vm_page_prot);
285 if (ret) {
286 ehca_err(qp->ib_qp.device,
287 "remap_pfn_range() failed ret=%x",
288 ret);
289 return -ENOMEM;
290 }
291 break;
292
293 case 2: /* qp rqueue_addr */
294 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
295 vma->vm_flags |= VM_RESERVED;
296 vma->vm_ops = &ehcau_vm_ops;
297 break;
298
299 case 3: /* qp squeue_addr */
300 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
301 vma->vm_flags |= VM_RESERVED;
302 vma->vm_ops = &ehcau_vm_ops;
303 break;
304
305 default:
306 ehca_err(qp->ib_qp.device, "bad resource type %x",
307 rsrc_type);
308 return -EINVAL;
309 } 316 }
310 break; 317 break;
311 318
@@ -316,77 +323,3 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
316 323
317 return 0; 324 return 0;
318} 325}
319
320int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
321 struct vm_area_struct **vma)
322{
323 down_write(&current->mm->mmap_sem);
324 *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
325 MAP_SHARED | MAP_ANONYMOUS,
326 foffset);
327 up_write(&current->mm->mmap_sem);
328 if (!(*mapped)) {
329 ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
330 foffset, length);
331 return -EINVAL;
332 }
333
334 *vma = find_vma(current->mm, (u64)*mapped);
335 if (!(*vma)) {
336 down_write(&current->mm->mmap_sem);
337 do_munmap(current->mm, 0, length);
338 up_write(&current->mm->mmap_sem);
339 ehca_gen_err("couldn't find vma queue=%p", *mapped);
340 return -EINVAL;
341 }
342 (*vma)->vm_flags |= VM_RESERVED;
343 (*vma)->vm_ops = &ehcau_vm_ops;
344
345 return 0;
346}
347
348int ehca_mmap_register(u64 physical, void **mapped,
349 struct vm_area_struct **vma)
350{
351 int ret;
352 unsigned long vsize;
353 /* ehca hw supports only 4k page */
354 ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
355 if (ret) {
356 ehca_gen_err("could'nt mmap physical=%lx", physical);
357 return ret;
358 }
359
360 (*vma)->vm_flags |= VM_RESERVED;
361 vsize = (*vma)->vm_end - (*vma)->vm_start;
362 if (vsize != EHCA_PAGESIZE) {
363 ehca_gen_err("invalid vsize=%lx",
364 (*vma)->vm_end - (*vma)->vm_start);
365 return -EINVAL;
366 }
367
368 (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
369 (*vma)->vm_flags |= VM_IO | VM_RESERVED;
370
371 ret = remap_pfn_range((*vma), (*vma)->vm_start,
372 physical >> PAGE_SHIFT, vsize,
373 (*vma)->vm_page_prot);
374 if (ret) {
375 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
376 return -ENOMEM;
377 }
378
379 return 0;
380
381}
382
383int ehca_munmap(unsigned long addr, size_t len) {
384 int ret = 0;
385 struct mm_struct *mm = current->mm;
386 if (mm) {
387 down_write(&mm->mmap_sem);
388 ret = do_munmap(mm, addr, len);
389 up_write(&mm->mmap_sem);
390 }
391 return ret;
392}