aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2005-07-07 20:57:10 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-07 21:23:47 -0400
commite2773c062e41f710d8ef1e8a790c7e558aff663d (patch)
tree1e0c19d85c429fbd19f2996d3f2a88652e8820c3
parent404865516ce6b6d7ee37c4eb4ee77d78b38e669a (diff)
[PATCH] IB uverbs: core API extensions
First of a series of patches which add support for direct userspace access to InfiniBand hardware -- so-called "userspace verbs." I believe these patches are ready to merge, but a final review would be useful. These patches should incorporate all of the feedback from the discussion when I posted an earlier version back in April (see http://lkml.org/lkml/2005/4/4/267 for the start of the thread). In particular, memory pinned for use by userspace is accounted for in current->mm->vm_locked and requests to pin memory are checked against RLIMIT_MEMLOCK. This patch: Modify the ib_verbs.h header file with changes required for InfiniBand userspace verbs support. We add a few structures to keep track of userspace context, and extend the driver API so that low-level drivers know when they're creating resources that will be used from userspace. Signed-off-by: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/infiniband/include/ib_verbs.h124
1 files changed, 106 insertions, 18 deletions
diff --git a/drivers/infiniband/include/ib_verbs.h b/drivers/infiniband/include/ib_verbs.h
index cf01f044a223..e5bd9a10c201 100644
--- a/drivers/infiniband/include/ib_verbs.h
+++ b/drivers/infiniband/include/ib_verbs.h
@@ -4,6 +4,7 @@
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Cisco Systems. All rights reserved.
7 * 8 *
8 * This software is available to you under a choice of one of two 9 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 10 * licenses. You may choose to be licensed under the terms of the GNU
@@ -41,7 +42,10 @@
41 42
42#include <linux/types.h> 43#include <linux/types.h>
43#include <linux/device.h> 44#include <linux/device.h>
45
44#include <asm/atomic.h> 46#include <asm/atomic.h>
47#include <asm/scatterlist.h>
48#include <asm/uaccess.h>
45 49
46union ib_gid { 50union ib_gid {
47 u8 raw[16]; 51 u8 raw[16];
@@ -544,7 +548,7 @@ struct ib_send_wr {
544 int num_sge; 548 int num_sge;
545 enum ib_wr_opcode opcode; 549 enum ib_wr_opcode opcode;
546 int send_flags; 550 int send_flags;
547 u32 imm_data; 551 __be32 imm_data;
548 union { 552 union {
549 struct { 553 struct {
550 u64 remote_addr; 554 u64 remote_addr;
@@ -618,29 +622,86 @@ struct ib_fmr_attr {
618 u8 page_size; 622 u8 page_size;
619}; 623};
620 624
625struct ib_ucontext {
626 struct ib_device *device;
627 struct list_head pd_list;
628 struct list_head mr_list;
629 struct list_head mw_list;
630 struct list_head cq_list;
631 struct list_head qp_list;
632 struct list_head srq_list;
633 struct list_head ah_list;
634 spinlock_t lock;
635};
636
637struct ib_uobject {
638 u64 user_handle; /* handle given to us by userspace */
639 struct ib_ucontext *context; /* associated user context */
640 struct list_head list; /* link to context's list */
641 u32 id; /* index into kernel idr */
642};
643
644struct ib_umem {
645 unsigned long user_base;
646 unsigned long virt_base;
647 size_t length;
648 int offset;
649 int page_size;
650 int writable;
651 struct list_head chunk_list;
652};
653
654struct ib_umem_chunk {
655 struct list_head list;
656 int nents;
657 int nmap;
658 struct scatterlist page_list[0];
659};
660
661struct ib_udata {
662 void __user *inbuf;
663 void __user *outbuf;
664 size_t inlen;
665 size_t outlen;
666};
667
668#define IB_UMEM_MAX_PAGE_CHUNK \
669 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
670 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
671 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
672
673struct ib_umem_object {
674 struct ib_uobject uobject;
675 struct ib_umem umem;
676};
677
621struct ib_pd { 678struct ib_pd {
622 struct ib_device *device; 679 struct ib_device *device;
623 atomic_t usecnt; /* count all resources */ 680 struct ib_uobject *uobject;
681 atomic_t usecnt; /* count all resources */
624}; 682};
625 683
626struct ib_ah { 684struct ib_ah {
627 struct ib_device *device; 685 struct ib_device *device;
628 struct ib_pd *pd; 686 struct ib_pd *pd;
687 struct ib_uobject *uobject;
629}; 688};
630 689
631typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 690typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
632 691
633struct ib_cq { 692struct ib_cq {
634 struct ib_device *device; 693 struct ib_device *device;
635 ib_comp_handler comp_handler; 694 struct ib_uobject *uobject;
636 void (*event_handler)(struct ib_event *, void *); 695 ib_comp_handler comp_handler;
637 void * cq_context; 696 void (*event_handler)(struct ib_event *, void *);
638 int cqe; 697 void * cq_context;
639 atomic_t usecnt; /* count number of work queues */ 698 int cqe;
699 atomic_t usecnt; /* count number of work queues */
640}; 700};
641 701
642struct ib_srq { 702struct ib_srq {
643 struct ib_device *device; 703 struct ib_device *device;
704 struct ib_uobject *uobject;
644 struct ib_pd *pd; 705 struct ib_pd *pd;
645 void *srq_context; 706 void *srq_context;
646 atomic_t usecnt; 707 atomic_t usecnt;
@@ -652,6 +713,7 @@ struct ib_qp {
652 struct ib_cq *send_cq; 713 struct ib_cq *send_cq;
653 struct ib_cq *recv_cq; 714 struct ib_cq *recv_cq;
654 struct ib_srq *srq; 715 struct ib_srq *srq;
716 struct ib_uobject *uobject;
655 void (*event_handler)(struct ib_event *, void *); 717 void (*event_handler)(struct ib_event *, void *);
656 void *qp_context; 718 void *qp_context;
657 u32 qp_num; 719 u32 qp_num;
@@ -659,16 +721,18 @@ struct ib_qp {
659}; 721};
660 722
661struct ib_mr { 723struct ib_mr {
662 struct ib_device *device; 724 struct ib_device *device;
663 struct ib_pd *pd; 725 struct ib_pd *pd;
664 u32 lkey; 726 struct ib_uobject *uobject;
665 u32 rkey; 727 u32 lkey;
666 atomic_t usecnt; /* count number of MWs */ 728 u32 rkey;
729 atomic_t usecnt; /* count number of MWs */
667}; 730};
668 731
669struct ib_mw { 732struct ib_mw {
670 struct ib_device *device; 733 struct ib_device *device;
671 struct ib_pd *pd; 734 struct ib_pd *pd;
735 struct ib_uobject *uobject;
672 u32 rkey; 736 u32 rkey;
673}; 737};
674 738
@@ -737,7 +801,14 @@ struct ib_device {
737 int (*modify_port)(struct ib_device *device, 801 int (*modify_port)(struct ib_device *device,
738 u8 port_num, int port_modify_mask, 802 u8 port_num, int port_modify_mask,
739 struct ib_port_modify *port_modify); 803 struct ib_port_modify *port_modify);
740 struct ib_pd * (*alloc_pd)(struct ib_device *device); 804 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
805 struct ib_udata *udata);
806 int (*dealloc_ucontext)(struct ib_ucontext *context);
807 int (*mmap)(struct ib_ucontext *context,
808 struct vm_area_struct *vma);
809 struct ib_pd * (*alloc_pd)(struct ib_device *device,
810 struct ib_ucontext *context,
811 struct ib_udata *udata);
741 int (*dealloc_pd)(struct ib_pd *pd); 812 int (*dealloc_pd)(struct ib_pd *pd);
742 struct ib_ah * (*create_ah)(struct ib_pd *pd, 813 struct ib_ah * (*create_ah)(struct ib_pd *pd,
743 struct ib_ah_attr *ah_attr); 814 struct ib_ah_attr *ah_attr);
@@ -747,7 +818,8 @@ struct ib_device {
747 struct ib_ah_attr *ah_attr); 818 struct ib_ah_attr *ah_attr);
748 int (*destroy_ah)(struct ib_ah *ah); 819 int (*destroy_ah)(struct ib_ah *ah);
749 struct ib_qp * (*create_qp)(struct ib_pd *pd, 820 struct ib_qp * (*create_qp)(struct ib_pd *pd,
750 struct ib_qp_init_attr *qp_init_attr); 821 struct ib_qp_init_attr *qp_init_attr,
822 struct ib_udata *udata);
751 int (*modify_qp)(struct ib_qp *qp, 823 int (*modify_qp)(struct ib_qp *qp,
752 struct ib_qp_attr *qp_attr, 824 struct ib_qp_attr *qp_attr,
753 int qp_attr_mask); 825 int qp_attr_mask);
@@ -762,8 +834,9 @@ struct ib_device {
762 int (*post_recv)(struct ib_qp *qp, 834 int (*post_recv)(struct ib_qp *qp,
763 struct ib_recv_wr *recv_wr, 835 struct ib_recv_wr *recv_wr,
764 struct ib_recv_wr **bad_recv_wr); 836 struct ib_recv_wr **bad_recv_wr);
765 struct ib_cq * (*create_cq)(struct ib_device *device, 837 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
766 int cqe); 838 struct ib_ucontext *context,
839 struct ib_udata *udata);
767 int (*destroy_cq)(struct ib_cq *cq); 840 int (*destroy_cq)(struct ib_cq *cq);
768 int (*resize_cq)(struct ib_cq *cq, int *cqe); 841 int (*resize_cq)(struct ib_cq *cq, int *cqe);
769 int (*poll_cq)(struct ib_cq *cq, int num_entries, 842 int (*poll_cq)(struct ib_cq *cq, int num_entries,
@@ -780,6 +853,10 @@ struct ib_device {
780 int num_phys_buf, 853 int num_phys_buf,
781 int mr_access_flags, 854 int mr_access_flags,
782 u64 *iova_start); 855 u64 *iova_start);
856 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
857 struct ib_umem *region,
858 int mr_access_flags,
859 struct ib_udata *udata);
783 int (*query_mr)(struct ib_mr *mr, 860 int (*query_mr)(struct ib_mr *mr,
784 struct ib_mr_attr *mr_attr); 861 struct ib_mr_attr *mr_attr);
785 int (*dereg_mr)(struct ib_mr *mr); 862 int (*dereg_mr)(struct ib_mr *mr);
@@ -817,6 +894,7 @@ struct ib_device {
817 struct ib_mad *in_mad, 894 struct ib_mad *in_mad,
818 struct ib_mad *out_mad); 895 struct ib_mad *out_mad);
819 896
897 struct module *owner;
820 struct class_device class_dev; 898 struct class_device class_dev;
821 struct kobject ports_parent; 899 struct kobject ports_parent;
822 struct list_head port_list; 900 struct list_head port_list;
@@ -852,6 +930,16 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
852void ib_set_client_data(struct ib_device *device, struct ib_client *client, 930void ib_set_client_data(struct ib_device *device, struct ib_client *client,
853 void *data); 931 void *data);
854 932
933static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
934{
935 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
936}
937
938static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
939{
940 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
941}
942
855int ib_register_event_handler (struct ib_event_handler *event_handler); 943int ib_register_event_handler (struct ib_event_handler *event_handler);
856int ib_unregister_event_handler(struct ib_event_handler *event_handler); 944int ib_unregister_event_handler(struct ib_event_handler *event_handler);
857void ib_dispatch_event(struct ib_event *event); 945void ib_dispatch_event(struct ib_event *event);