aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-12 21:52:31 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-12 21:52:31 -0500
commitb1ef951e8199d1c59f14dbe0fa22974ed57a3b48 (patch)
tree86a16643358339c23e3d7a9e608fcc90a18d2c84
parent775ba7ad491a154f99871fe603f03366e84ae159 (diff)
parent82b399133b6ebf667ee635fc69ef26b61eede4bc (diff)
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: IPoIB: Make sure struct ipoib_neigh.queue is always initialized IB/iser: Use the new verbs DMA mapping functions IB/srp: Use new verbs IB DMA mapping functions IPoIB: Use the new verbs DMA mapping functions IB/core: Use the new verbs DMA mapping functions IB/ipath: Implement new verbs DMA mapping functions IB: Add DMA mapping functions to allow device drivers to interpose RDMA/cma: Export rdma cm interface to userspace RDMA/cma: Add support for RDMA_PS_UDP RDMA/cma: Allow early transition to RTS to handle lost CM messages RDMA/cma: Report connect info with connect events RDMA/cma: Remove unneeded qp_type parameter from rdma_cm IB/ipath: Fix IRQ for PCI Express HCAs RDMA/amso1100: Fix memory leak in c2_qp_modify() IB/iser: Remove unused "write-only" variables IB/ipath: Remove unused "write-only" variables IB/fmr: ib_flush_fmr_pool() may wait too long
-rw-r--r--drivers/infiniband/core/Makefile6
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/cma.c416
-rw-r--r--drivers/infiniband/core/fmr_pool.c12
-rw-r--r--drivers/infiniband/core/mad.c90
-rw-r--r--drivers/infiniband/core/mad_priv.h6
-rw-r--r--drivers/infiniband/core/ucma.c874
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c5
-rw-r--r--drivers/infiniband/core/uverbs_mem.c12
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c13
-rw-r--r--drivers/infiniband/hw/ipath/Makefile1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c189
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c75
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c125
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c81
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
-rw-r--r--include/rdma/ib_marshall.h5
-rw-r--r--include/rdma/ib_verbs.h253
-rw-r--r--include/rdma/rdma_cm.h62
-rw-r--r--include/rdma/rdma_cm_ib.h3
-rw-r--r--include/rdma/rdma_user_cm.h206
36 files changed, 2146 insertions, 354 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 163d991eb8c9..50fb1cd447b7 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,9 +1,11 @@
1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o 1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
2user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
2 3
3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 4obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
4 ib_cm.o iw_cm.o $(infiniband-y) 5 ib_cm.o iw_cm.o $(infiniband-y)
5obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o 6obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
6obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o 7obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
8 $(user_access-y)
7 9
8ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 10ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
9 device.o fmr_pool.o cache.o 11 device.o fmr_pool.o cache.o
@@ -18,6 +20,8 @@ iw_cm-y := iwcm.o
18 20
19rdma_cm-y := cma.o 21rdma_cm-y := cma.o
20 22
23rdma_ucm-y := ucma.o
24
21ib_addr-y := addr.o 25ib_addr-y := addr.o
22 26
23ib_umad-y := user_mad.o 27ib_umad-y := user_mad.o
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 79c937bf6962..d446998b12a4 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3289,6 +3289,10 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3289 3289
3290 spin_lock_irqsave(&cm_id_priv->lock, flags); 3290 spin_lock_irqsave(&cm_id_priv->lock, flags);
3291 switch (cm_id_priv->id.state) { 3291 switch (cm_id_priv->id.state) {
3292 /* Allow transition to RTS before sending REP */
3293 case IB_CM_REQ_RCVD:
3294 case IB_CM_MRA_REQ_SENT:
3295
3292 case IB_CM_REP_RCVD: 3296 case IB_CM_REP_RCVD:
3293 case IB_CM_MRA_REP_SENT: 3297 case IB_CM_MRA_REP_SENT:
3294 case IB_CM_REP_SENT: 3298 case IB_CM_REP_SENT:
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 985a6b564d8f..533193d4e5df 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -70,6 +70,7 @@ static DEFINE_MUTEX(lock);
70static struct workqueue_struct *cma_wq; 70static struct workqueue_struct *cma_wq;
71static DEFINE_IDR(sdp_ps); 71static DEFINE_IDR(sdp_ps);
72static DEFINE_IDR(tcp_ps); 72static DEFINE_IDR(tcp_ps);
73static DEFINE_IDR(udp_ps);
73 74
74struct cma_device { 75struct cma_device {
75 struct list_head list; 76 struct list_head list;
@@ -133,7 +134,6 @@ struct rdma_id_private {
133 134
134 u32 seq_num; 135 u32 seq_num;
135 u32 qp_num; 136 u32 qp_num;
136 enum ib_qp_type qp_type;
137 u8 srq; 137 u8 srq;
138}; 138};
139 139
@@ -392,7 +392,6 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
392 392
393 id->qp = qp; 393 id->qp = qp;
394 id_priv->qp_num = qp->qp_num; 394 id_priv->qp_num = qp->qp_num;
395 id_priv->qp_type = qp->qp_type;
396 id_priv->srq = (qp->srq != NULL); 395 id_priv->srq = (qp->srq != NULL);
397 return 0; 396 return 0;
398err: 397err:
@@ -510,9 +509,17 @@ static inline int cma_any_addr(struct sockaddr *addr)
510 return cma_zero_addr(addr) || cma_loopback_addr(addr); 509 return cma_zero_addr(addr) || cma_loopback_addr(addr);
511} 510}
512 511
512static inline __be16 cma_port(struct sockaddr *addr)
513{
514 if (addr->sa_family == AF_INET)
515 return ((struct sockaddr_in *) addr)->sin_port;
516 else
517 return ((struct sockaddr_in6 *) addr)->sin6_port;
518}
519
513static inline int cma_any_port(struct sockaddr *addr) 520static inline int cma_any_port(struct sockaddr *addr)
514{ 521{
515 return !((struct sockaddr_in *) addr)->sin_port; 522 return !cma_port(addr);
516} 523}
517 524
518static int cma_get_net_info(void *hdr, enum rdma_port_space ps, 525static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
@@ -594,20 +601,6 @@ static inline int cma_user_data_offset(enum rdma_port_space ps)
594 } 601 }
595} 602}
596 603
597static int cma_notify_user(struct rdma_id_private *id_priv,
598 enum rdma_cm_event_type type, int status,
599 void *data, u8 data_len)
600{
601 struct rdma_cm_event event;
602
603 event.event = type;
604 event.status = status;
605 event.private_data = data;
606 event.private_data_len = data_len;
607
608 return id_priv->id.event_handler(&id_priv->id, &event);
609}
610
611static void cma_cancel_route(struct rdma_id_private *id_priv) 604static void cma_cancel_route(struct rdma_id_private *id_priv)
612{ 605{
613 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 606 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
@@ -776,63 +769,61 @@ static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
776 return 0; 769 return 0;
777} 770}
778 771
779static int cma_rtu_recv(struct rdma_id_private *id_priv) 772static void cma_set_rep_event_data(struct rdma_cm_event *event,
773 struct ib_cm_rep_event_param *rep_data,
774 void *private_data)
780{ 775{
781 int ret; 776 event->param.conn.private_data = private_data;
782 777 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
783 ret = cma_modify_qp_rts(&id_priv->id); 778 event->param.conn.responder_resources = rep_data->responder_resources;
784 if (ret) 779 event->param.conn.initiator_depth = rep_data->initiator_depth;
785 goto reject; 780 event->param.conn.flow_control = rep_data->flow_control;
786 781 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
787 return 0; 782 event->param.conn.srq = rep_data->srq;
788reject: 783 event->param.conn.qp_num = rep_data->remote_qpn;
789 cma_modify_qp_err(&id_priv->id);
790 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
791 NULL, 0, NULL, 0);
792 return ret;
793} 784}
794 785
795static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 786static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
796{ 787{
797 struct rdma_id_private *id_priv = cm_id->context; 788 struct rdma_id_private *id_priv = cm_id->context;
798 enum rdma_cm_event_type event; 789 struct rdma_cm_event event;
799 u8 private_data_len = 0; 790 int ret = 0;
800 int ret = 0, status = 0;
801 791
802 atomic_inc(&id_priv->dev_remove); 792 atomic_inc(&id_priv->dev_remove);
803 if (!cma_comp(id_priv, CMA_CONNECT)) 793 if (!cma_comp(id_priv, CMA_CONNECT))
804 goto out; 794 goto out;
805 795
796 memset(&event, 0, sizeof event);
806 switch (ib_event->event) { 797 switch (ib_event->event) {
807 case IB_CM_REQ_ERROR: 798 case IB_CM_REQ_ERROR:
808 case IB_CM_REP_ERROR: 799 case IB_CM_REP_ERROR:
809 event = RDMA_CM_EVENT_UNREACHABLE; 800 event.event = RDMA_CM_EVENT_UNREACHABLE;
810 status = -ETIMEDOUT; 801 event.status = -ETIMEDOUT;
811 break; 802 break;
812 case IB_CM_REP_RECEIVED: 803 case IB_CM_REP_RECEIVED:
813 status = cma_verify_rep(id_priv, ib_event->private_data); 804 event.status = cma_verify_rep(id_priv, ib_event->private_data);
814 if (status) 805 if (event.status)
815 event = RDMA_CM_EVENT_CONNECT_ERROR; 806 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
816 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 807 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
817 status = cma_rep_recv(id_priv); 808 event.status = cma_rep_recv(id_priv);
818 event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 809 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
819 RDMA_CM_EVENT_ESTABLISHED; 810 RDMA_CM_EVENT_ESTABLISHED;
820 } else 811 } else
821 event = RDMA_CM_EVENT_CONNECT_RESPONSE; 812 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
822 private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 813 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
814 ib_event->private_data);
823 break; 815 break;
824 case IB_CM_RTU_RECEIVED: 816 case IB_CM_RTU_RECEIVED:
825 status = cma_rtu_recv(id_priv); 817 case IB_CM_USER_ESTABLISHED:
826 event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 818 event.event = RDMA_CM_EVENT_ESTABLISHED;
827 RDMA_CM_EVENT_ESTABLISHED;
828 break; 819 break;
829 case IB_CM_DREQ_ERROR: 820 case IB_CM_DREQ_ERROR:
830 status = -ETIMEDOUT; /* fall through */ 821 event.status = -ETIMEDOUT; /* fall through */
831 case IB_CM_DREQ_RECEIVED: 822 case IB_CM_DREQ_RECEIVED:
832 case IB_CM_DREP_RECEIVED: 823 case IB_CM_DREP_RECEIVED:
833 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 824 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
834 goto out; 825 goto out;
835 event = RDMA_CM_EVENT_DISCONNECTED; 826 event.event = RDMA_CM_EVENT_DISCONNECTED;
836 break; 827 break;
837 case IB_CM_TIMEWAIT_EXIT: 828 case IB_CM_TIMEWAIT_EXIT:
838 case IB_CM_MRA_RECEIVED: 829 case IB_CM_MRA_RECEIVED:
@@ -840,9 +831,10 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
840 goto out; 831 goto out;
841 case IB_CM_REJ_RECEIVED: 832 case IB_CM_REJ_RECEIVED:
842 cma_modify_qp_err(&id_priv->id); 833 cma_modify_qp_err(&id_priv->id);
843 status = ib_event->param.rej_rcvd.reason; 834 event.status = ib_event->param.rej_rcvd.reason;
844 event = RDMA_CM_EVENT_REJECTED; 835 event.event = RDMA_CM_EVENT_REJECTED;
845 private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 836 event.param.conn.private_data = ib_event->private_data;
837 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
846 break; 838 break;
847 default: 839 default:
848 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", 840 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
@@ -850,8 +842,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
850 goto out; 842 goto out;
851 } 843 }
852 844
853 ret = cma_notify_user(id_priv, event, status, ib_event->private_data, 845 ret = id_priv->id.event_handler(&id_priv->id, &event);
854 private_data_len);
855 if (ret) { 846 if (ret) {
856 /* Destroy the CM ID by returning a non-zero value. */ 847 /* Destroy the CM ID by returning a non-zero value. */
857 id_priv->cm_id.ib = NULL; 848 id_priv->cm_id.ib = NULL;
@@ -865,8 +856,8 @@ out:
865 return ret; 856 return ret;
866} 857}
867 858
868static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id, 859static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
869 struct ib_cm_event *ib_event) 860 struct ib_cm_event *ib_event)
870{ 861{
871 struct rdma_id_private *id_priv; 862 struct rdma_id_private *id_priv;
872 struct rdma_cm_id *id; 863 struct rdma_cm_id *id;
@@ -913,9 +904,61 @@ err:
913 return NULL; 904 return NULL;
914} 905}
915 906
907static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
908 struct ib_cm_event *ib_event)
909{
910 struct rdma_id_private *id_priv;
911 struct rdma_cm_id *id;
912 union cma_ip_addr *src, *dst;
913 __u16 port;
914 u8 ip_ver;
915 int ret;
916
917 id = rdma_create_id(listen_id->event_handler, listen_id->context,
918 listen_id->ps);
919 if (IS_ERR(id))
920 return NULL;
921
922
923 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
924 &ip_ver, &port, &src, &dst))
925 goto err;
926
927 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
928 ip_ver, port, src, dst);
929
930 ret = rdma_translate_ip(&id->route.addr.src_addr,
931 &id->route.addr.dev_addr);
932 if (ret)
933 goto err;
934
935 id_priv = container_of(id, struct rdma_id_private, id);
936 id_priv->state = CMA_CONNECT;
937 return id_priv;
938err:
939 rdma_destroy_id(id);
940 return NULL;
941}
942
943static void cma_set_req_event_data(struct rdma_cm_event *event,
944 struct ib_cm_req_event_param *req_data,
945 void *private_data, int offset)
946{
947 event->param.conn.private_data = private_data + offset;
948 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
949 event->param.conn.responder_resources = req_data->responder_resources;
950 event->param.conn.initiator_depth = req_data->initiator_depth;
951 event->param.conn.flow_control = req_data->flow_control;
952 event->param.conn.retry_count = req_data->retry_count;
953 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
954 event->param.conn.srq = req_data->srq;
955 event->param.conn.qp_num = req_data->remote_qpn;
956}
957
916static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 958static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
917{ 959{
918 struct rdma_id_private *listen_id, *conn_id; 960 struct rdma_id_private *listen_id, *conn_id;
961 struct rdma_cm_event event;
919 int offset, ret; 962 int offset, ret;
920 963
921 listen_id = cm_id->context; 964 listen_id = cm_id->context;
@@ -925,7 +968,19 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
925 goto out; 968 goto out;
926 } 969 }
927 970
928 conn_id = cma_new_id(&listen_id->id, ib_event); 971 memset(&event, 0, sizeof event);
972 offset = cma_user_data_offset(listen_id->id.ps);
973 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
974 if (listen_id->id.ps == RDMA_PS_UDP) {
975 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
976 event.param.ud.private_data = ib_event->private_data + offset;
977 event.param.ud.private_data_len =
978 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
979 } else {
980 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
981 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
982 ib_event->private_data, offset);
983 }
929 if (!conn_id) { 984 if (!conn_id) {
930 ret = -ENOMEM; 985 ret = -ENOMEM;
931 goto out; 986 goto out;
@@ -942,10 +997,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
942 cm_id->context = conn_id; 997 cm_id->context = conn_id;
943 cm_id->cm_handler = cma_ib_handler; 998 cm_id->cm_handler = cma_ib_handler;
944 999
945 offset = cma_user_data_offset(listen_id->id.ps); 1000 ret = conn_id->id.event_handler(&conn_id->id, &event);
946 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
947 ib_event->private_data + offset,
948 IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
949 if (!ret) 1001 if (!ret)
950 goto out; 1002 goto out;
951 1003
@@ -964,8 +1016,7 @@ out:
964 1016
965static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1017static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
966{ 1018{
967 return cpu_to_be64(((u64)ps << 16) + 1019 return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
968 be16_to_cpu(((struct sockaddr_in *) addr)->sin_port));
969} 1020}
970 1021
971static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, 1022static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
@@ -1021,15 +1072,16 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1021static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1072static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1022{ 1073{
1023 struct rdma_id_private *id_priv = iw_id->context; 1074 struct rdma_id_private *id_priv = iw_id->context;
1024 enum rdma_cm_event_type event = 0; 1075 struct rdma_cm_event event;
1025 struct sockaddr_in *sin; 1076 struct sockaddr_in *sin;
1026 int ret = 0; 1077 int ret = 0;
1027 1078
1079 memset(&event, 0, sizeof event);
1028 atomic_inc(&id_priv->dev_remove); 1080 atomic_inc(&id_priv->dev_remove);
1029 1081
1030 switch (iw_event->event) { 1082 switch (iw_event->event) {
1031 case IW_CM_EVENT_CLOSE: 1083 case IW_CM_EVENT_CLOSE:
1032 event = RDMA_CM_EVENT_DISCONNECTED; 1084 event.event = RDMA_CM_EVENT_DISCONNECTED;
1033 break; 1085 break;
1034 case IW_CM_EVENT_CONNECT_REPLY: 1086 case IW_CM_EVENT_CONNECT_REPLY:
1035 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1087 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
@@ -1037,20 +1089,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1037 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1038 *sin = iw_event->remote_addr; 1090 *sin = iw_event->remote_addr;
1039 if (iw_event->status) 1091 if (iw_event->status)
1040 event = RDMA_CM_EVENT_REJECTED; 1092 event.event = RDMA_CM_EVENT_REJECTED;
1041 else 1093 else
1042 event = RDMA_CM_EVENT_ESTABLISHED; 1094 event.event = RDMA_CM_EVENT_ESTABLISHED;
1043 break; 1095 break;
1044 case IW_CM_EVENT_ESTABLISHED: 1096 case IW_CM_EVENT_ESTABLISHED:
1045 event = RDMA_CM_EVENT_ESTABLISHED; 1097 event.event = RDMA_CM_EVENT_ESTABLISHED;
1046 break; 1098 break;
1047 default: 1099 default:
1048 BUG_ON(1); 1100 BUG_ON(1);
1049 } 1101 }
1050 1102
1051 ret = cma_notify_user(id_priv, event, iw_event->status, 1103 event.status = iw_event->status;
1052 iw_event->private_data, 1104 event.param.conn.private_data = iw_event->private_data;
1053 iw_event->private_data_len); 1105 event.param.conn.private_data_len = iw_event->private_data_len;
1106 ret = id_priv->id.event_handler(&id_priv->id, &event);
1054 if (ret) { 1107 if (ret) {
1055 /* Destroy the CM ID by returning a non-zero value. */ 1108 /* Destroy the CM ID by returning a non-zero value. */
1056 id_priv->cm_id.iw = NULL; 1109 id_priv->cm_id.iw = NULL;
@@ -1071,6 +1124,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1071 struct rdma_id_private *listen_id, *conn_id; 1124 struct rdma_id_private *listen_id, *conn_id;
1072 struct sockaddr_in *sin; 1125 struct sockaddr_in *sin;
1073 struct net_device *dev = NULL; 1126 struct net_device *dev = NULL;
1127 struct rdma_cm_event event;
1074 int ret; 1128 int ret;
1075 1129
1076 listen_id = cm_id->context; 1130 listen_id = cm_id->context;
@@ -1124,9 +1178,11 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1124 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1178 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1125 *sin = iw_event->remote_addr; 1179 *sin = iw_event->remote_addr;
1126 1180
1127 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, 1181 memset(&event, 0, sizeof event);
1128 iw_event->private_data, 1182 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1129 iw_event->private_data_len); 1183 event.param.conn.private_data = iw_event->private_data;
1184 event.param.conn.private_data_len = iw_event->private_data_len;
1185 ret = conn_id->id.event_handler(&conn_id->id, &event);
1130 if (ret) { 1186 if (ret) {
1131 /* User wants to destroy the CM ID */ 1187 /* User wants to destroy the CM ID */
1132 conn_id->cm_id.iw = NULL; 1188 conn_id->cm_id.iw = NULL;
@@ -1515,8 +1571,9 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1515 struct rdma_dev_addr *dev_addr, void *context) 1571 struct rdma_dev_addr *dev_addr, void *context)
1516{ 1572{
1517 struct rdma_id_private *id_priv = context; 1573 struct rdma_id_private *id_priv = context;
1518 enum rdma_cm_event_type event; 1574 struct rdma_cm_event event;
1519 1575
1576 memset(&event, 0, sizeof event);
1520 atomic_inc(&id_priv->dev_remove); 1577 atomic_inc(&id_priv->dev_remove);
1521 1578
1522 /* 1579 /*
@@ -1536,14 +1593,15 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1536 if (status) { 1593 if (status) {
1537 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1594 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1538 goto out; 1595 goto out;
1539 event = RDMA_CM_EVENT_ADDR_ERROR; 1596 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1597 event.status = status;
1540 } else { 1598 } else {
1541 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1599 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1542 ip_addr_size(src_addr)); 1600 ip_addr_size(src_addr));
1543 event = RDMA_CM_EVENT_ADDR_RESOLVED; 1601 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1544 } 1602 }
1545 1603
1546 if (cma_notify_user(id_priv, event, status, NULL, 0)) { 1604 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1547 cma_exch(id_priv, CMA_DESTROYING); 1605 cma_exch(id_priv, CMA_DESTROYING);
1548 cma_release_remove(id_priv); 1606 cma_release_remove(id_priv);
1549 cma_deref_id(id_priv); 1607 cma_deref_id(id_priv);
@@ -1733,6 +1791,9 @@ static int cma_get_port(struct rdma_id_private *id_priv)
1733 case RDMA_PS_TCP: 1791 case RDMA_PS_TCP:
1734 ps = &tcp_ps; 1792 ps = &tcp_ps;
1735 break; 1793 break;
1794 case RDMA_PS_UDP:
1795 ps = &udp_ps;
1796 break;
1736 default: 1797 default:
1737 return -EPROTONOSUPPORT; 1798 return -EPROTONOSUPPORT;
1738 } 1799 }
@@ -1821,6 +1882,110 @@ static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
1821 return 0; 1882 return 0;
1822} 1883}
1823 1884
1885static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
1886 struct ib_cm_event *ib_event)
1887{
1888 struct rdma_id_private *id_priv = cm_id->context;
1889 struct rdma_cm_event event;
1890 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
1891 int ret = 0;
1892
1893 memset(&event, 0, sizeof event);
1894 atomic_inc(&id_priv->dev_remove);
1895 if (!cma_comp(id_priv, CMA_CONNECT))
1896 goto out;
1897
1898 switch (ib_event->event) {
1899 case IB_CM_SIDR_REQ_ERROR:
1900 event.event = RDMA_CM_EVENT_UNREACHABLE;
1901 event.status = -ETIMEDOUT;
1902 break;
1903 case IB_CM_SIDR_REP_RECEIVED:
1904 event.param.ud.private_data = ib_event->private_data;
1905 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
1906 if (rep->status != IB_SIDR_SUCCESS) {
1907 event.event = RDMA_CM_EVENT_UNREACHABLE;
1908 event.status = ib_event->param.sidr_rep_rcvd.status;
1909 break;
1910 }
1911 if (rep->qkey != RDMA_UD_QKEY) {
1912 event.event = RDMA_CM_EVENT_UNREACHABLE;
1913 event.status = -EINVAL;
1914 break;
1915 }
1916 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
1917 id_priv->id.route.path_rec,
1918 &event.param.ud.ah_attr);
1919 event.param.ud.qp_num = rep->qpn;
1920 event.param.ud.qkey = rep->qkey;
1921 event.event = RDMA_CM_EVENT_ESTABLISHED;
1922 event.status = 0;
1923 break;
1924 default:
1925 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
1926 ib_event->event);
1927 goto out;
1928 }
1929
1930 ret = id_priv->id.event_handler(&id_priv->id, &event);
1931 if (ret) {
1932 /* Destroy the CM ID by returning a non-zero value. */
1933 id_priv->cm_id.ib = NULL;
1934 cma_exch(id_priv, CMA_DESTROYING);
1935 cma_release_remove(id_priv);
1936 rdma_destroy_id(&id_priv->id);
1937 return ret;
1938 }
1939out:
1940 cma_release_remove(id_priv);
1941 return ret;
1942}
1943
1944static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
1945 struct rdma_conn_param *conn_param)
1946{
1947 struct ib_cm_sidr_req_param req;
1948 struct rdma_route *route;
1949 int ret;
1950
1951 req.private_data_len = sizeof(struct cma_hdr) +
1952 conn_param->private_data_len;
1953 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
1954 if (!req.private_data)
1955 return -ENOMEM;
1956
1957 if (conn_param->private_data && conn_param->private_data_len)
1958 memcpy((void *) req.private_data + sizeof(struct cma_hdr),
1959 conn_param->private_data, conn_param->private_data_len);
1960
1961 route = &id_priv->id.route;
1962 ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
1963 if (ret)
1964 goto out;
1965
1966 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
1967 cma_sidr_rep_handler, id_priv);
1968 if (IS_ERR(id_priv->cm_id.ib)) {
1969 ret = PTR_ERR(id_priv->cm_id.ib);
1970 goto out;
1971 }
1972
1973 req.path = route->path_rec;
1974 req.service_id = cma_get_service_id(id_priv->id.ps,
1975 &route->addr.dst_addr);
1976 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
1977 req.max_cm_retries = CMA_MAX_CM_RETRIES;
1978
1979 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
1980 if (ret) {
1981 ib_destroy_cm_id(id_priv->cm_id.ib);
1982 id_priv->cm_id.ib = NULL;
1983 }
1984out:
1985 kfree(req.private_data);
1986 return ret;
1987}
1988
1824static int cma_connect_ib(struct rdma_id_private *id_priv, 1989static int cma_connect_ib(struct rdma_id_private *id_priv,
1825 struct rdma_conn_param *conn_param) 1990 struct rdma_conn_param *conn_param)
1826{ 1991{
@@ -1860,7 +2025,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
1860 req.service_id = cma_get_service_id(id_priv->id.ps, 2025 req.service_id = cma_get_service_id(id_priv->id.ps,
1861 &route->addr.dst_addr); 2026 &route->addr.dst_addr);
1862 req.qp_num = id_priv->qp_num; 2027 req.qp_num = id_priv->qp_num;
1863 req.qp_type = id_priv->qp_type; 2028 req.qp_type = IB_QPT_RC;
1864 req.starting_psn = id_priv->seq_num; 2029 req.starting_psn = id_priv->seq_num;
1865 req.responder_resources = conn_param->responder_resources; 2030 req.responder_resources = conn_param->responder_resources;
1866 req.initiator_depth = conn_param->initiator_depth; 2031 req.initiator_depth = conn_param->initiator_depth;
@@ -1937,13 +2102,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
1937 2102
1938 if (!id->qp) { 2103 if (!id->qp) {
1939 id_priv->qp_num = conn_param->qp_num; 2104 id_priv->qp_num = conn_param->qp_num;
1940 id_priv->qp_type = conn_param->qp_type;
1941 id_priv->srq = conn_param->srq; 2105 id_priv->srq = conn_param->srq;
1942 } 2106 }
1943 2107
1944 switch (rdma_node_get_transport(id->device->node_type)) { 2108 switch (rdma_node_get_transport(id->device->node_type)) {
1945 case RDMA_TRANSPORT_IB: 2109 case RDMA_TRANSPORT_IB:
1946 ret = cma_connect_ib(id_priv, conn_param); 2110 if (id->ps == RDMA_PS_UDP)
2111 ret = cma_resolve_ib_udp(id_priv, conn_param);
2112 else
2113 ret = cma_connect_ib(id_priv, conn_param);
1947 break; 2114 break;
1948 case RDMA_TRANSPORT_IWARP: 2115 case RDMA_TRANSPORT_IWARP:
1949 ret = cma_connect_iw(id_priv, conn_param); 2116 ret = cma_connect_iw(id_priv, conn_param);
@@ -1966,11 +2133,25 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
1966 struct rdma_conn_param *conn_param) 2133 struct rdma_conn_param *conn_param)
1967{ 2134{
1968 struct ib_cm_rep_param rep; 2135 struct ib_cm_rep_param rep;
1969 int ret; 2136 struct ib_qp_attr qp_attr;
2137 int qp_attr_mask, ret;
1970 2138
1971 ret = cma_modify_qp_rtr(&id_priv->id); 2139 if (id_priv->id.qp) {
1972 if (ret) 2140 ret = cma_modify_qp_rtr(&id_priv->id);
1973 return ret; 2141 if (ret)
2142 goto out;
2143
2144 qp_attr.qp_state = IB_QPS_RTS;
2145 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
2146 &qp_attr_mask);
2147 if (ret)
2148 goto out;
2149
2150 qp_attr.max_rd_atomic = conn_param->initiator_depth;
2151 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
2152 if (ret)
2153 goto out;
2154 }
1974 2155
1975 memset(&rep, 0, sizeof rep); 2156 memset(&rep, 0, sizeof rep);
1976 rep.qp_num = id_priv->qp_num; 2157 rep.qp_num = id_priv->qp_num;
@@ -1985,7 +2166,9 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
1985 rep.rnr_retry_count = conn_param->rnr_retry_count; 2166 rep.rnr_retry_count = conn_param->rnr_retry_count;
1986 rep.srq = id_priv->srq ? 1 : 0; 2167 rep.srq = id_priv->srq ? 1 : 0;
1987 2168
1988 return ib_send_cm_rep(id_priv->cm_id.ib, &rep); 2169 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2170out:
2171 return ret;
1989} 2172}
1990 2173
1991static int cma_accept_iw(struct rdma_id_private *id_priv, 2174static int cma_accept_iw(struct rdma_id_private *id_priv,
@@ -2010,6 +2193,24 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
2010 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2193 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2011} 2194}
2012 2195
2196static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2197 enum ib_cm_sidr_status status,
2198 const void *private_data, int private_data_len)
2199{
2200 struct ib_cm_sidr_rep_param rep;
2201
2202 memset(&rep, 0, sizeof rep);
2203 rep.status = status;
2204 if (status == IB_SIDR_SUCCESS) {
2205 rep.qp_num = id_priv->qp_num;
2206 rep.qkey = RDMA_UD_QKEY;
2207 }
2208 rep.private_data = private_data;
2209 rep.private_data_len = private_data_len;
2210
2211 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2212}
2213
2013int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2214int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2014{ 2215{
2015 struct rdma_id_private *id_priv; 2216 struct rdma_id_private *id_priv;
@@ -2021,13 +2222,16 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2021 2222
2022 if (!id->qp && conn_param) { 2223 if (!id->qp && conn_param) {
2023 id_priv->qp_num = conn_param->qp_num; 2224 id_priv->qp_num = conn_param->qp_num;
2024 id_priv->qp_type = conn_param->qp_type;
2025 id_priv->srq = conn_param->srq; 2225 id_priv->srq = conn_param->srq;
2026 } 2226 }
2027 2227
2028 switch (rdma_node_get_transport(id->device->node_type)) { 2228 switch (rdma_node_get_transport(id->device->node_type)) {
2029 case RDMA_TRANSPORT_IB: 2229 case RDMA_TRANSPORT_IB:
2030 if (conn_param) 2230 if (id->ps == RDMA_PS_UDP)
2231 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2232 conn_param->private_data,
2233 conn_param->private_data_len);
2234 else if (conn_param)
2031 ret = cma_accept_ib(id_priv, conn_param); 2235 ret = cma_accept_ib(id_priv, conn_param);
2032 else 2236 else
2033 ret = cma_rep_recv(id_priv); 2237 ret = cma_rep_recv(id_priv);
@@ -2051,6 +2255,27 @@ reject:
2051} 2255}
2052EXPORT_SYMBOL(rdma_accept); 2256EXPORT_SYMBOL(rdma_accept);
2053 2257
2258int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2259{
2260 struct rdma_id_private *id_priv;
2261 int ret;
2262
2263 id_priv = container_of(id, struct rdma_id_private, id);
2264 if (!cma_comp(id_priv, CMA_CONNECT))
2265 return -EINVAL;
2266
2267 switch (id->device->node_type) {
2268 case RDMA_NODE_IB_CA:
2269 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2270 break;
2271 default:
2272 ret = 0;
2273 break;
2274 }
2275 return ret;
2276}
2277EXPORT_SYMBOL(rdma_notify);
2278
2054int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2279int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2055 u8 private_data_len) 2280 u8 private_data_len)
2056{ 2281{
@@ -2063,9 +2288,13 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2063 2288
2064 switch (rdma_node_get_transport(id->device->node_type)) { 2289 switch (rdma_node_get_transport(id->device->node_type)) {
2065 case RDMA_TRANSPORT_IB: 2290 case RDMA_TRANSPORT_IB:
2066 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2291 if (id->ps == RDMA_PS_UDP)
2067 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2292 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2068 private_data, private_data_len); 2293 private_data, private_data_len);
2294 else
2295 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2296 IB_CM_REJ_CONSUMER_DEFINED, NULL,
2297 0, private_data, private_data_len);
2069 break; 2298 break;
2070 case RDMA_TRANSPORT_IWARP: 2299 case RDMA_TRANSPORT_IWARP:
2071 ret = iw_cm_reject(id_priv->cm_id.iw, 2300 ret = iw_cm_reject(id_priv->cm_id.iw,
@@ -2136,6 +2365,7 @@ static void cma_add_one(struct ib_device *device)
2136 2365
2137static int cma_remove_id_dev(struct rdma_id_private *id_priv) 2366static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2138{ 2367{
2368 struct rdma_cm_event event;
2139 enum cma_state state; 2369 enum cma_state state;
2140 2370
2141 /* Record that we want to remove the device */ 2371 /* Record that we want to remove the device */
@@ -2150,8 +2380,9 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2150 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 2380 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2151 return 0; 2381 return 0;
2152 2382
2153 return cma_notify_user(id_priv, RDMA_CM_EVENT_DEVICE_REMOVAL, 2383 memset(&event, 0, sizeof event);
2154 0, NULL, 0); 2384 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2385 return id_priv->id.event_handler(&id_priv->id, &event);
2155} 2386}
2156 2387
2157static void cma_process_remove(struct cma_device *cma_dev) 2388static void cma_process_remove(struct cma_device *cma_dev)
@@ -2233,6 +2464,7 @@ static void cma_cleanup(void)
2233 destroy_workqueue(cma_wq); 2464 destroy_workqueue(cma_wq);
2234 idr_destroy(&sdp_ps); 2465 idr_destroy(&sdp_ps);
2235 idr_destroy(&tcp_ps); 2466 idr_destroy(&tcp_ps);
2467 idr_destroy(&udp_ps);
2236} 2468}
2237 2469
2238module_init(cma_init); 2470module_init(cma_init);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 86a3b2d401db..8926a2bd4a87 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -394,20 +394,12 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
394 */ 394 */
395int ib_flush_fmr_pool(struct ib_fmr_pool *pool) 395int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
396{ 396{
397 int serial; 397 int serial = atomic_inc_return(&pool->req_ser);
398
399 atomic_inc(&pool->req_ser);
400 /*
401 * It's OK if someone else bumps req_ser again here -- we'll
402 * just wait a little longer.
403 */
404 serial = atomic_read(&pool->req_ser);
405 398
406 wake_up_process(pool->thread); 399 wake_up_process(pool->thread);
407 400
408 if (wait_event_interruptible(pool->force_wait, 401 if (wait_event_interruptible(pool->force_wait,
409 atomic_read(&pool->flush_ser) - 402 atomic_read(&pool->flush_ser) - serial >= 0))
410 atomic_read(&pool->req_ser) >= 0))
411 return -EINTR; 403 return -EINTR;
412 404
413 return 0; 405 return 0;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 15f38d94b3a8..5ed141ebd1c8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
998 998
999 mad_agent = mad_send_wr->send_buf.mad_agent; 999 mad_agent = mad_send_wr->send_buf.mad_agent;
1000 sge = mad_send_wr->sg_list; 1000 sge = mad_send_wr->sg_list;
1001 sge[0].addr = dma_map_single(mad_agent->device->dma_device, 1001 sge[0].addr = ib_dma_map_single(mad_agent->device,
1002 mad_send_wr->send_buf.mad, 1002 mad_send_wr->send_buf.mad,
1003 sge[0].length, 1003 sge[0].length,
1004 DMA_TO_DEVICE); 1004 DMA_TO_DEVICE);
1005 pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr); 1005 mad_send_wr->header_mapping = sge[0].addr;
1006 1006
1007 sge[1].addr = dma_map_single(mad_agent->device->dma_device, 1007 sge[1].addr = ib_dma_map_single(mad_agent->device,
1008 ib_get_payload(mad_send_wr), 1008 ib_get_payload(mad_send_wr),
1009 sge[1].length, 1009 sge[1].length,
1010 DMA_TO_DEVICE); 1010 DMA_TO_DEVICE);
1011 pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr); 1011 mad_send_wr->payload_mapping = sge[1].addr;
1012 1012
1013 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1013 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1014 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1014 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
@@ -1026,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1026 } 1026 }
1027 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1027 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1028 if (ret) { 1028 if (ret) {
1029 dma_unmap_single(mad_agent->device->dma_device, 1029 ib_dma_unmap_single(mad_agent->device,
1030 pci_unmap_addr(mad_send_wr, header_mapping), 1030 mad_send_wr->header_mapping,
1031 sge[0].length, DMA_TO_DEVICE); 1031 sge[0].length, DMA_TO_DEVICE);
1032 dma_unmap_single(mad_agent->device->dma_device, 1032 ib_dma_unmap_single(mad_agent->device,
1033 pci_unmap_addr(mad_send_wr, payload_mapping), 1033 mad_send_wr->payload_mapping,
1034 sge[1].length, DMA_TO_DEVICE); 1034 sge[1].length, DMA_TO_DEVICE);
1035 } 1035 }
1036 return ret; 1036 return ret;
1037} 1037}
@@ -1850,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1850 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 1850 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1851 mad_list); 1851 mad_list);
1852 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 1852 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1853 dma_unmap_single(port_priv->device->dma_device, 1853 ib_dma_unmap_single(port_priv->device,
1854 pci_unmap_addr(&recv->header, mapping), 1854 recv->header.mapping,
1855 sizeof(struct ib_mad_private) - 1855 sizeof(struct ib_mad_private) -
1856 sizeof(struct ib_mad_private_header), 1856 sizeof(struct ib_mad_private_header),
1857 DMA_FROM_DEVICE); 1857 DMA_FROM_DEVICE);
1858 1858
1859 /* Setup MAD receive work completion from "normal" work completion */ 1859 /* Setup MAD receive work completion from "normal" work completion */
1860 recv->header.wc = *wc; 1860 recv->header.wc = *wc;
@@ -2080,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2080 qp_info = send_queue->qp_info; 2080 qp_info = send_queue->qp_info;
2081 2081
2082retry: 2082retry:
2083 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 2083 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2084 pci_unmap_addr(mad_send_wr, header_mapping), 2084 mad_send_wr->header_mapping,
2085 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2085 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2086 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 2086 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2087 pci_unmap_addr(mad_send_wr, payload_mapping), 2087 mad_send_wr->payload_mapping,
2088 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2088 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2089 queued_send_wr = NULL; 2089 queued_send_wr = NULL;
2090 spin_lock_irqsave(&send_queue->lock, flags); 2090 spin_lock_irqsave(&send_queue->lock, flags);
2091 list_del(&mad_list->list); 2091 list_del(&mad_list->list);
@@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2528 break; 2528 break;
2529 } 2529 }
2530 } 2530 }
2531 sg_list.addr = dma_map_single(qp_info->port_priv-> 2531 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2532 device->dma_device, 2532 &mad_priv->grh,
2533 &mad_priv->grh, 2533 sizeof *mad_priv -
2534 sizeof *mad_priv - 2534 sizeof mad_priv->header,
2535 sizeof mad_priv->header, 2535 DMA_FROM_DEVICE);
2536 DMA_FROM_DEVICE); 2536 mad_priv->header.mapping = sg_list.addr;
2537 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2538 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2537 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2539 mad_priv->header.mad_list.mad_queue = recv_queue; 2538 mad_priv->header.mad_list.mad_queue = recv_queue;
2540 2539
@@ -2549,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2549 list_del(&mad_priv->header.mad_list.list); 2548 list_del(&mad_priv->header.mad_list.list);
2550 recv_queue->count--; 2549 recv_queue->count--;
2551 spin_unlock_irqrestore(&recv_queue->lock, flags); 2550 spin_unlock_irqrestore(&recv_queue->lock, flags);
2552 dma_unmap_single(qp_info->port_priv->device->dma_device, 2551 ib_dma_unmap_single(qp_info->port_priv->device,
2553 pci_unmap_addr(&mad_priv->header, 2552 mad_priv->header.mapping,
2554 mapping), 2553 sizeof *mad_priv -
2555 sizeof *mad_priv - 2554 sizeof mad_priv->header,
2556 sizeof mad_priv->header, 2555 DMA_FROM_DEVICE);
2557 DMA_FROM_DEVICE);
2558 kmem_cache_free(ib_mad_cache, mad_priv); 2556 kmem_cache_free(ib_mad_cache, mad_priv);
2559 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); 2557 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2560 break; 2558 break;
@@ -2586,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2586 /* Remove from posted receive MAD list */ 2584 /* Remove from posted receive MAD list */
2587 list_del(&mad_list->list); 2585 list_del(&mad_list->list);
2588 2586
2589 dma_unmap_single(qp_info->port_priv->device->dma_device, 2587 ib_dma_unmap_single(qp_info->port_priv->device,
2590 pci_unmap_addr(&recv->header, mapping), 2588 recv->header.mapping,
2591 sizeof(struct ib_mad_private) - 2589 sizeof(struct ib_mad_private) -
2592 sizeof(struct ib_mad_private_header), 2590 sizeof(struct ib_mad_private_header),
2593 DMA_FROM_DEVICE); 2591 DMA_FROM_DEVICE);
2594 kmem_cache_free(ib_mad_cache, recv); 2592 kmem_cache_free(ib_mad_cache, recv);
2595 } 2593 }
2596 2594
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d5548e73e068..de89717f49fe 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -73,7 +73,7 @@ struct ib_mad_private_header {
73 struct ib_mad_list_head mad_list; 73 struct ib_mad_list_head mad_list;
74 struct ib_mad_recv_wc recv_wc; 74 struct ib_mad_recv_wc recv_wc;
75 struct ib_wc wc; 75 struct ib_wc wc;
76 DECLARE_PCI_UNMAP_ADDR(mapping) 76 u64 mapping;
77} __attribute__ ((packed)); 77} __attribute__ ((packed));
78 78
79struct ib_mad_private { 79struct ib_mad_private {
@@ -126,8 +126,8 @@ struct ib_mad_send_wr_private {
126 struct list_head agent_list; 126 struct list_head agent_list;
127 struct ib_mad_agent_private *mad_agent_priv; 127 struct ib_mad_agent_private *mad_agent_priv;
128 struct ib_mad_send_buf send_buf; 128 struct ib_mad_send_buf send_buf;
129 DECLARE_PCI_UNMAP_ADDR(header_mapping) 129 u64 header_mapping;
130 DECLARE_PCI_UNMAP_ADDR(payload_mapping) 130 u64 payload_mapping;
131 struct ib_send_wr send_wr; 131 struct ib_send_wr send_wr;
132 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 132 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
133 __be64 tid; 133 __be64 tid;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
new file mode 100644
index 000000000000..81a5cdc5733a
--- /dev/null
+++ b/drivers/infiniband/core/ucma.c
@@ -0,0 +1,874 @@
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/completion.h>
34#include <linux/mutex.h>
35#include <linux/poll.h>
36#include <linux/idr.h>
37#include <linux/in.h>
38#include <linux/in6.h>
39#include <linux/miscdevice.h>
40
41#include <rdma/rdma_user_cm.h>
42#include <rdma/ib_marshall.h>
43#include <rdma/rdma_cm.h>
44
45MODULE_AUTHOR("Sean Hefty");
46MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
47MODULE_LICENSE("Dual BSD/GPL");
48
49enum {
50 UCMA_MAX_BACKLOG = 128
51};
52
53struct ucma_file {
54 struct mutex mut;
55 struct file *filp;
56 struct list_head ctx_list;
57 struct list_head event_list;
58 wait_queue_head_t poll_wait;
59};
60
61struct ucma_context {
62 int id;
63 struct completion comp;
64 atomic_t ref;
65 int events_reported;
66 int backlog;
67
68 struct ucma_file *file;
69 struct rdma_cm_id *cm_id;
70 u64 uid;
71
72 struct list_head list;
73};
74
75struct ucma_event {
76 struct ucma_context *ctx;
77 struct list_head list;
78 struct rdma_cm_id *cm_id;
79 struct rdma_ucm_event_resp resp;
80};
81
82static DEFINE_MUTEX(mut);
83static DEFINE_IDR(ctx_idr);
84
85static inline struct ucma_context *_ucma_find_context(int id,
86 struct ucma_file *file)
87{
88 struct ucma_context *ctx;
89
90 ctx = idr_find(&ctx_idr, id);
91 if (!ctx)
92 ctx = ERR_PTR(-ENOENT);
93 else if (ctx->file != file)
94 ctx = ERR_PTR(-EINVAL);
95 return ctx;
96}
97
98static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
99{
100 struct ucma_context *ctx;
101
102 mutex_lock(&mut);
103 ctx = _ucma_find_context(id, file);
104 if (!IS_ERR(ctx))
105 atomic_inc(&ctx->ref);
106 mutex_unlock(&mut);
107 return ctx;
108}
109
110static void ucma_put_ctx(struct ucma_context *ctx)
111{
112 if (atomic_dec_and_test(&ctx->ref))
113 complete(&ctx->comp);
114}
115
116static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
117{
118 struct ucma_context *ctx;
119 int ret;
120
121 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
122 if (!ctx)
123 return NULL;
124
125 atomic_set(&ctx->ref, 1);
126 init_completion(&ctx->comp);
127 ctx->file = file;
128
129 do {
130 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
131 if (!ret)
132 goto error;
133
134 mutex_lock(&mut);
135 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
136 mutex_unlock(&mut);
137 } while (ret == -EAGAIN);
138
139 if (ret)
140 goto error;
141
142 list_add_tail(&ctx->list, &file->ctx_list);
143 return ctx;
144
145error:
146 kfree(ctx);
147 return NULL;
148}
149
150static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
151 struct rdma_conn_param *src)
152{
153 if (src->private_data_len)
154 memcpy(dst->private_data, src->private_data,
155 src->private_data_len);
156 dst->private_data_len = src->private_data_len;
157 dst->responder_resources =src->responder_resources;
158 dst->initiator_depth = src->initiator_depth;
159 dst->flow_control = src->flow_control;
160 dst->retry_count = src->retry_count;
161 dst->rnr_retry_count = src->rnr_retry_count;
162 dst->srq = src->srq;
163 dst->qp_num = src->qp_num;
164}
165
166static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
167 struct rdma_ud_param *src)
168{
169 if (src->private_data_len)
170 memcpy(dst->private_data, src->private_data,
171 src->private_data_len);
172 dst->private_data_len = src->private_data_len;
173 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
174 dst->qp_num = src->qp_num;
175 dst->qkey = src->qkey;
176}
177
178static void ucma_set_event_context(struct ucma_context *ctx,
179 struct rdma_cm_event *event,
180 struct ucma_event *uevent)
181{
182 uevent->ctx = ctx;
183 uevent->resp.uid = ctx->uid;
184 uevent->resp.id = ctx->id;
185}
186
187static int ucma_event_handler(struct rdma_cm_id *cm_id,
188 struct rdma_cm_event *event)
189{
190 struct ucma_event *uevent;
191 struct ucma_context *ctx = cm_id->context;
192 int ret = 0;
193
194 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
195 if (!uevent)
196 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
197
198 uevent->cm_id = cm_id;
199 ucma_set_event_context(ctx, event, uevent);
200 uevent->resp.event = event->event;
201 uevent->resp.status = event->status;
202 if (cm_id->ps == RDMA_PS_UDP)
203 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
204 else
205 ucma_copy_conn_event(&uevent->resp.param.conn,
206 &event->param.conn);
207
208 mutex_lock(&ctx->file->mut);
209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
210 if (!ctx->backlog) {
211 ret = -EDQUOT;
212 goto out;
213 }
214 ctx->backlog--;
215 }
216 list_add_tail(&uevent->list, &ctx->file->event_list);
217 wake_up_interruptible(&ctx->file->poll_wait);
218out:
219 mutex_unlock(&ctx->file->mut);
220 return ret;
221}
222
223static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
224 int in_len, int out_len)
225{
226 struct ucma_context *ctx;
227 struct rdma_ucm_get_event cmd;
228 struct ucma_event *uevent;
229 int ret = 0;
230 DEFINE_WAIT(wait);
231
232 if (out_len < sizeof uevent->resp)
233 return -ENOSPC;
234
235 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
236 return -EFAULT;
237
238 mutex_lock(&file->mut);
239 while (list_empty(&file->event_list)) {
240 if (file->filp->f_flags & O_NONBLOCK) {
241 ret = -EAGAIN;
242 break;
243 }
244
245 if (signal_pending(current)) {
246 ret = -ERESTARTSYS;
247 break;
248 }
249
250 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
251 mutex_unlock(&file->mut);
252 schedule();
253 mutex_lock(&file->mut);
254 finish_wait(&file->poll_wait, &wait);
255 }
256
257 if (ret)
258 goto done;
259
260 uevent = list_entry(file->event_list.next, struct ucma_event, list);
261
262 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
263 ctx = ucma_alloc_ctx(file);
264 if (!ctx) {
265 ret = -ENOMEM;
266 goto done;
267 }
268 uevent->ctx->backlog++;
269 ctx->cm_id = uevent->cm_id;
270 ctx->cm_id->context = ctx;
271 uevent->resp.id = ctx->id;
272 }
273
274 if (copy_to_user((void __user *)(unsigned long)cmd.response,
275 &uevent->resp, sizeof uevent->resp)) {
276 ret = -EFAULT;
277 goto done;
278 }
279
280 list_del(&uevent->list);
281 uevent->ctx->events_reported++;
282 kfree(uevent);
283done:
284 mutex_unlock(&file->mut);
285 return ret;
286}
287
288static ssize_t ucma_create_id(struct ucma_file *file,
289 const char __user *inbuf,
290 int in_len, int out_len)
291{
292 struct rdma_ucm_create_id cmd;
293 struct rdma_ucm_create_id_resp resp;
294 struct ucma_context *ctx;
295 int ret;
296
297 if (out_len < sizeof(resp))
298 return -ENOSPC;
299
300 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
301 return -EFAULT;
302
303 mutex_lock(&file->mut);
304 ctx = ucma_alloc_ctx(file);
305 mutex_unlock(&file->mut);
306 if (!ctx)
307 return -ENOMEM;
308
309 ctx->uid = cmd.uid;
310 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
311 if (IS_ERR(ctx->cm_id)) {
312 ret = PTR_ERR(ctx->cm_id);
313 goto err1;
314 }
315
316 resp.id = ctx->id;
317 if (copy_to_user((void __user *)(unsigned long)cmd.response,
318 &resp, sizeof(resp))) {
319 ret = -EFAULT;
320 goto err2;
321 }
322 return 0;
323
324err2:
325 rdma_destroy_id(ctx->cm_id);
326err1:
327 mutex_lock(&mut);
328 idr_remove(&ctx_idr, ctx->id);
329 mutex_unlock(&mut);
330 kfree(ctx);
331 return ret;
332}
333
334static void ucma_cleanup_events(struct ucma_context *ctx)
335{
336 struct ucma_event *uevent, *tmp;
337
338 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
339 if (uevent->ctx != ctx)
340 continue;
341
342 list_del(&uevent->list);
343
344 /* clear incoming connections. */
345 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
346 rdma_destroy_id(uevent->cm_id);
347
348 kfree(uevent);
349 }
350}
351
352static int ucma_free_ctx(struct ucma_context *ctx)
353{
354 int events_reported;
355
356 /* No new events will be generated after destroying the id. */
357 rdma_destroy_id(ctx->cm_id);
358
359 /* Cleanup events not yet reported to the user. */
360 mutex_lock(&ctx->file->mut);
361 ucma_cleanup_events(ctx);
362 list_del(&ctx->list);
363 mutex_unlock(&ctx->file->mut);
364
365 events_reported = ctx->events_reported;
366 kfree(ctx);
367 return events_reported;
368}
369
370static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
371 int in_len, int out_len)
372{
373 struct rdma_ucm_destroy_id cmd;
374 struct rdma_ucm_destroy_id_resp resp;
375 struct ucma_context *ctx;
376 int ret = 0;
377
378 if (out_len < sizeof(resp))
379 return -ENOSPC;
380
381 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
382 return -EFAULT;
383
384 mutex_lock(&mut);
385 ctx = _ucma_find_context(cmd.id, file);
386 if (!IS_ERR(ctx))
387 idr_remove(&ctx_idr, ctx->id);
388 mutex_unlock(&mut);
389
390 if (IS_ERR(ctx))
391 return PTR_ERR(ctx);
392
393 ucma_put_ctx(ctx);
394 wait_for_completion(&ctx->comp);
395 resp.events_reported = ucma_free_ctx(ctx);
396
397 if (copy_to_user((void __user *)(unsigned long)cmd.response,
398 &resp, sizeof(resp)))
399 ret = -EFAULT;
400
401 return ret;
402}
403
404static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
405 int in_len, int out_len)
406{
407 struct rdma_ucm_bind_addr cmd;
408 struct ucma_context *ctx;
409 int ret;
410
411 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
412 return -EFAULT;
413
414 ctx = ucma_get_ctx(file, cmd.id);
415 if (IS_ERR(ctx))
416 return PTR_ERR(ctx);
417
418 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
419 ucma_put_ctx(ctx);
420 return ret;
421}
422
423static ssize_t ucma_resolve_addr(struct ucma_file *file,
424 const char __user *inbuf,
425 int in_len, int out_len)
426{
427 struct rdma_ucm_resolve_addr cmd;
428 struct ucma_context *ctx;
429 int ret;
430
431 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
432 return -EFAULT;
433
434 ctx = ucma_get_ctx(file, cmd.id);
435 if (IS_ERR(ctx))
436 return PTR_ERR(ctx);
437
438 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
439 (struct sockaddr *) &cmd.dst_addr,
440 cmd.timeout_ms);
441 ucma_put_ctx(ctx);
442 return ret;
443}
444
445static ssize_t ucma_resolve_route(struct ucma_file *file,
446 const char __user *inbuf,
447 int in_len, int out_len)
448{
449 struct rdma_ucm_resolve_route cmd;
450 struct ucma_context *ctx;
451 int ret;
452
453 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
454 return -EFAULT;
455
456 ctx = ucma_get_ctx(file, cmd.id);
457 if (IS_ERR(ctx))
458 return PTR_ERR(ctx);
459
460 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
461 ucma_put_ctx(ctx);
462 return ret;
463}
464
465static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
466 struct rdma_route *route)
467{
468 struct rdma_dev_addr *dev_addr;
469
470 resp->num_paths = route->num_paths;
471 switch (route->num_paths) {
472 case 0:
473 dev_addr = &route->addr.dev_addr;
474 ib_addr_get_dgid(dev_addr,
475 (union ib_gid *) &resp->ib_route[0].dgid);
476 ib_addr_get_sgid(dev_addr,
477 (union ib_gid *) &resp->ib_route[0].sgid);
478 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
479 break;
480 case 2:
481 ib_copy_path_rec_to_user(&resp->ib_route[1],
482 &route->path_rec[1]);
483 /* fall through */
484 case 1:
485 ib_copy_path_rec_to_user(&resp->ib_route[0],
486 &route->path_rec[0]);
487 break;
488 default:
489 break;
490 }
491}
492
493static ssize_t ucma_query_route(struct ucma_file *file,
494 const char __user *inbuf,
495 int in_len, int out_len)
496{
497 struct rdma_ucm_query_route cmd;
498 struct rdma_ucm_query_route_resp resp;
499 struct ucma_context *ctx;
500 struct sockaddr *addr;
501 int ret = 0;
502
503 if (out_len < sizeof(resp))
504 return -ENOSPC;
505
506 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
507 return -EFAULT;
508
509 ctx = ucma_get_ctx(file, cmd.id);
510 if (IS_ERR(ctx))
511 return PTR_ERR(ctx);
512
513 memset(&resp, 0, sizeof resp);
514 addr = &ctx->cm_id->route.addr.src_addr;
515 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
516 sizeof(struct sockaddr_in) :
517 sizeof(struct sockaddr_in6));
518 addr = &ctx->cm_id->route.addr.dst_addr;
519 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
520 sizeof(struct sockaddr_in) :
521 sizeof(struct sockaddr_in6));
522 if (!ctx->cm_id->device)
523 goto out;
524
525 resp.node_guid = ctx->cm_id->device->node_guid;
526 resp.port_num = ctx->cm_id->port_num;
527 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
528 case RDMA_TRANSPORT_IB:
529 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
530 break;
531 default:
532 break;
533 }
534
535out:
536 if (copy_to_user((void __user *)(unsigned long)cmd.response,
537 &resp, sizeof(resp)))
538 ret = -EFAULT;
539
540 ucma_put_ctx(ctx);
541 return ret;
542}
543
544static void ucma_copy_conn_param(struct rdma_conn_param *dst,
545 struct rdma_ucm_conn_param *src)
546{
547 dst->private_data = src->private_data;
548 dst->private_data_len = src->private_data_len;
549 dst->responder_resources =src->responder_resources;
550 dst->initiator_depth = src->initiator_depth;
551 dst->flow_control = src->flow_control;
552 dst->retry_count = src->retry_count;
553 dst->rnr_retry_count = src->rnr_retry_count;
554 dst->srq = src->srq;
555 dst->qp_num = src->qp_num;
556}
557
558static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
559 int in_len, int out_len)
560{
561 struct rdma_ucm_connect cmd;
562 struct rdma_conn_param conn_param;
563 struct ucma_context *ctx;
564 int ret;
565
566 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
567 return -EFAULT;
568
569 if (!cmd.conn_param.valid)
570 return -EINVAL;
571
572 ctx = ucma_get_ctx(file, cmd.id);
573 if (IS_ERR(ctx))
574 return PTR_ERR(ctx);
575
576 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
577 ret = rdma_connect(ctx->cm_id, &conn_param);
578 ucma_put_ctx(ctx);
579 return ret;
580}
581
582static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
583 int in_len, int out_len)
584{
585 struct rdma_ucm_listen cmd;
586 struct ucma_context *ctx;
587 int ret;
588
589 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
590 return -EFAULT;
591
592 ctx = ucma_get_ctx(file, cmd.id);
593 if (IS_ERR(ctx))
594 return PTR_ERR(ctx);
595
596 ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
597 cmd.backlog : UCMA_MAX_BACKLOG;
598 ret = rdma_listen(ctx->cm_id, ctx->backlog);
599 ucma_put_ctx(ctx);
600 return ret;
601}
602
603static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
604 int in_len, int out_len)
605{
606 struct rdma_ucm_accept cmd;
607 struct rdma_conn_param conn_param;
608 struct ucma_context *ctx;
609 int ret;
610
611 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
612 return -EFAULT;
613
614 ctx = ucma_get_ctx(file, cmd.id);
615 if (IS_ERR(ctx))
616 return PTR_ERR(ctx);
617
618 if (cmd.conn_param.valid) {
619 ctx->uid = cmd.uid;
620 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
621 ret = rdma_accept(ctx->cm_id, &conn_param);
622 } else
623 ret = rdma_accept(ctx->cm_id, NULL);
624
625 ucma_put_ctx(ctx);
626 return ret;
627}
628
629static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
630 int in_len, int out_len)
631{
632 struct rdma_ucm_reject cmd;
633 struct ucma_context *ctx;
634 int ret;
635
636 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
637 return -EFAULT;
638
639 ctx = ucma_get_ctx(file, cmd.id);
640 if (IS_ERR(ctx))
641 return PTR_ERR(ctx);
642
643 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
644 ucma_put_ctx(ctx);
645 return ret;
646}
647
648static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
649 int in_len, int out_len)
650{
651 struct rdma_ucm_disconnect cmd;
652 struct ucma_context *ctx;
653 int ret;
654
655 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
656 return -EFAULT;
657
658 ctx = ucma_get_ctx(file, cmd.id);
659 if (IS_ERR(ctx))
660 return PTR_ERR(ctx);
661
662 ret = rdma_disconnect(ctx->cm_id);
663 ucma_put_ctx(ctx);
664 return ret;
665}
666
667static ssize_t ucma_init_qp_attr(struct ucma_file *file,
668 const char __user *inbuf,
669 int in_len, int out_len)
670{
671 struct rdma_ucm_init_qp_attr cmd;
672 struct ib_uverbs_qp_attr resp;
673 struct ucma_context *ctx;
674 struct ib_qp_attr qp_attr;
675 int ret;
676
677 if (out_len < sizeof(resp))
678 return -ENOSPC;
679
680 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
681 return -EFAULT;
682
683 ctx = ucma_get_ctx(file, cmd.id);
684 if (IS_ERR(ctx))
685 return PTR_ERR(ctx);
686
687 resp.qp_attr_mask = 0;
688 memset(&qp_attr, 0, sizeof qp_attr);
689 qp_attr.qp_state = cmd.qp_state;
690 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
691 if (ret)
692 goto out;
693
694 ib_copy_qp_attr_to_user(&resp, &qp_attr);
695 if (copy_to_user((void __user *)(unsigned long)cmd.response,
696 &resp, sizeof(resp)))
697 ret = -EFAULT;
698
699out:
700 ucma_put_ctx(ctx);
701 return ret;
702}
703
704static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
705 int in_len, int out_len)
706{
707 struct rdma_ucm_notify cmd;
708 struct ucma_context *ctx;
709 int ret;
710
711 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
712 return -EFAULT;
713
714 ctx = ucma_get_ctx(file, cmd.id);
715 if (IS_ERR(ctx))
716 return PTR_ERR(ctx);
717
718 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
719 ucma_put_ctx(ctx);
720 return ret;
721}
722
723static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
724 const char __user *inbuf,
725 int in_len, int out_len) = {
726 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
727 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
728 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
729 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
730 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
731 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
732 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
733 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
734 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
735 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
736 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
737 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
738 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
739 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
740 [RDMA_USER_CM_CMD_SET_OPTION] = NULL,
741 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
742};
743
744static ssize_t ucma_write(struct file *filp, const char __user *buf,
745 size_t len, loff_t *pos)
746{
747 struct ucma_file *file = filp->private_data;
748 struct rdma_ucm_cmd_hdr hdr;
749 ssize_t ret;
750
751 if (len < sizeof(hdr))
752 return -EINVAL;
753
754 if (copy_from_user(&hdr, buf, sizeof(hdr)))
755 return -EFAULT;
756
757 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
758 return -EINVAL;
759
760 if (hdr.in + sizeof(hdr) > len)
761 return -EINVAL;
762
763 if (!ucma_cmd_table[hdr.cmd])
764 return -ENOSYS;
765
766 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
767 if (!ret)
768 ret = len;
769
770 return ret;
771}
772
773static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
774{
775 struct ucma_file *file = filp->private_data;
776 unsigned int mask = 0;
777
778 poll_wait(filp, &file->poll_wait, wait);
779
780 if (!list_empty(&file->event_list))
781 mask = POLLIN | POLLRDNORM;
782
783 return mask;
784}
785
786static int ucma_open(struct inode *inode, struct file *filp)
787{
788 struct ucma_file *file;
789
790 file = kmalloc(sizeof *file, GFP_KERNEL);
791 if (!file)
792 return -ENOMEM;
793
794 INIT_LIST_HEAD(&file->event_list);
795 INIT_LIST_HEAD(&file->ctx_list);
796 init_waitqueue_head(&file->poll_wait);
797 mutex_init(&file->mut);
798
799 filp->private_data = file;
800 file->filp = filp;
801 return 0;
802}
803
804static int ucma_close(struct inode *inode, struct file *filp)
805{
806 struct ucma_file *file = filp->private_data;
807 struct ucma_context *ctx, *tmp;
808
809 mutex_lock(&file->mut);
810 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
811 mutex_unlock(&file->mut);
812
813 mutex_lock(&mut);
814 idr_remove(&ctx_idr, ctx->id);
815 mutex_unlock(&mut);
816
817 ucma_free_ctx(ctx);
818 mutex_lock(&file->mut);
819 }
820 mutex_unlock(&file->mut);
821 kfree(file);
822 return 0;
823}
824
825static struct file_operations ucma_fops = {
826 .owner = THIS_MODULE,
827 .open = ucma_open,
828 .release = ucma_close,
829 .write = ucma_write,
830 .poll = ucma_poll,
831};
832
833static struct miscdevice ucma_misc = {
834 .minor = MISC_DYNAMIC_MINOR,
835 .name = "rdma_cm",
836 .fops = &ucma_fops,
837};
838
839static ssize_t show_abi_version(struct device *dev,
840 struct device_attribute *attr,
841 char *buf)
842{
843 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
844}
845static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
846
847static int __init ucma_init(void)
848{
849 int ret;
850
851 ret = misc_register(&ucma_misc);
852 if (ret)
853 return ret;
854
855 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
856 if (ret) {
857 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
858 goto err;
859 }
860 return 0;
861err:
862 misc_deregister(&ucma_misc);
863 return ret;
864}
865
866static void __exit ucma_cleanup(void)
867{
868 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
869 misc_deregister(&ucma_misc);
870 idr_destroy(&ctx_idr);
871}
872
873module_init(ucma_init);
874module_exit(ucma_cleanup);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index ce46b13ae02b..5440da0e59b4 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -32,8 +32,8 @@
32 32
33#include <rdma/ib_marshall.h> 33#include <rdma/ib_marshall.h>
34 34
35static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, 35void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
36 struct ib_ah_attr *src) 36 struct ib_ah_attr *src)
37{ 37{
38 memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid); 38 memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid);
39 dst->grh.flow_label = src->grh.flow_label; 39 dst->grh.flow_label = src->grh.flow_label;
@@ -47,6 +47,7 @@ static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
47 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; 47 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
48 dst->port_num = src->port_num; 48 dst->port_num = src->port_num;
49} 49}
50EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
50 51
51void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 52void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
52 struct ib_qp_attr *src) 53 struct ib_qp_attr *src)
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index db12cc0841df..c95fe952abd5 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
52 int i; 52 int i;
53 53
54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { 54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
55 dma_unmap_sg(dev->dma_device, chunk->page_list, 55 ib_dma_unmap_sg(dev, chunk->page_list,
56 chunk->nents, DMA_BIDIRECTIONAL); 56 chunk->nents, DMA_BIDIRECTIONAL);
57 for (i = 0; i < chunk->nents; ++i) { 57 for (i = 0; i < chunk->nents; ++i) {
58 if (umem->writable && dirty) 58 if (umem->writable && dirty)
59 set_page_dirty_lock(chunk->page_list[i].page); 59 set_page_dirty_lock(chunk->page_list[i].page);
@@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
136 chunk->page_list[i].length = PAGE_SIZE; 136 chunk->page_list[i].length = PAGE_SIZE;
137 } 137 }
138 138
139 chunk->nmap = dma_map_sg(dev->dma_device, 139 chunk->nmap = ib_dma_map_sg(dev,
140 &chunk->page_list[0], 140 &chunk->page_list[0],
141 chunk->nents, 141 chunk->nents,
142 DMA_BIDIRECTIONAL); 142 DMA_BIDIRECTIONAL);
143 if (chunk->nmap <= 0) { 143 if (chunk->nmap <= 0) {
144 for (i = 0; i < chunk->nents; ++i) 144 for (i = 0; i < chunk->nents; ++i)
145 put_page(chunk->page_list[i].page); 145 put_page(chunk->page_list[i].page);
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 179d005ed4a5..420c1380f5c3 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -161,8 +161,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
161 161
162 if (attr_mask & IB_QP_STATE) { 162 if (attr_mask & IB_QP_STATE) {
163 /* Ensure the state is valid */ 163 /* Ensure the state is valid */
164 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) 164 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
165 return -EINVAL; 165 err = -EINVAL;
166 goto bail0;
167 }
166 168
167 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state)); 169 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
168 170
@@ -184,9 +186,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
184 if (attr->cur_qp_state != IB_QPS_RTR && 186 if (attr->cur_qp_state != IB_QPS_RTR &&
185 attr->cur_qp_state != IB_QPS_RTS && 187 attr->cur_qp_state != IB_QPS_RTS &&
186 attr->cur_qp_state != IB_QPS_SQD && 188 attr->cur_qp_state != IB_QPS_SQD &&
187 attr->cur_qp_state != IB_QPS_SQE) 189 attr->cur_qp_state != IB_QPS_SQE) {
188 return -EINVAL; 190 err = -EINVAL;
189 else 191 goto bail0;
192 } else
190 wr.next_qp_state = 193 wr.next_qp_state =
191 cpu_to_be32(to_c2_state(attr->cur_qp_state)); 194 cpu_to_be32(to_c2_state(attr->cur_qp_state));
192 195
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index 7dc10551cf18..ec2e603ea241 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
6ib_ipath-y := \ 6ib_ipath-y := \
7 ipath_cq.o \ 7 ipath_cq.o \
8 ipath_diag.o \ 8 ipath_diag.o \
9 ipath_dma.o \
9 ipath_driver.o \ 10 ipath_driver.o \
10 ipath_eeprom.o \ 11 ipath_eeprom.o \
11 ipath_file_ops.o \ 12 ipath_file_ops.o \
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
new file mode 100644
index 000000000000..6e0f2b8918ce
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -0,0 +1,189 @@
1/*
2 * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_verbs.h>
34
35#include "ipath_verbs.h"
36
37#define BAD_DMA_ADDRESS ((u64) 0)
38
39/*
40 * The following functions implement driver specific replacements
41 * for the ib_dma_*() functions.
42 *
43 * These functions return kernel virtual addresses instead of
44 * device bus addresses since the driver uses the CPU to copy
45 * data instead of using hardware DMA.
46 */
47
48static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
49{
50 return dma_addr == BAD_DMA_ADDRESS;
51}
52
53static u64 ipath_dma_map_single(struct ib_device *dev,
54 void *cpu_addr, size_t size,
55 enum dma_data_direction direction)
56{
57 BUG_ON(!valid_dma_direction(direction));
58 return (u64) cpu_addr;
59}
60
61static void ipath_dma_unmap_single(struct ib_device *dev,
62 u64 addr, size_t size,
63 enum dma_data_direction direction)
64{
65 BUG_ON(!valid_dma_direction(direction));
66}
67
68static u64 ipath_dma_map_page(struct ib_device *dev,
69 struct page *page,
70 unsigned long offset,
71 size_t size,
72 enum dma_data_direction direction)
73{
74 u64 addr;
75
76 BUG_ON(!valid_dma_direction(direction));
77
78 if (offset + size > PAGE_SIZE) {
79 addr = BAD_DMA_ADDRESS;
80 goto done;
81 }
82
83 addr = (u64) page_address(page);
84 if (addr)
85 addr += offset;
86 /* TODO: handle highmem pages */
87
88done:
89 return addr;
90}
91
92static void ipath_dma_unmap_page(struct ib_device *dev,
93 u64 addr, size_t size,
94 enum dma_data_direction direction)
95{
96 BUG_ON(!valid_dma_direction(direction));
97}
98
99int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
100 enum dma_data_direction direction)
101{
102 u64 addr;
103 int i;
104 int ret = nents;
105
106 BUG_ON(!valid_dma_direction(direction));
107
108 for (i = 0; i < nents; i++) {
109 addr = (u64) page_address(sg[i].page);
110 /* TODO: handle highmem pages */
111 if (!addr) {
112 ret = 0;
113 break;
114 }
115 }
116 return ret;
117}
118
119static void ipath_unmap_sg(struct ib_device *dev,
120 struct scatterlist *sg, int nents,
121 enum dma_data_direction direction)
122{
123 BUG_ON(!valid_dma_direction(direction));
124}
125
126static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
127{
128 u64 addr = (u64) page_address(sg->page);
129
130 if (addr)
131 addr += sg->offset;
132 return addr;
133}
134
135static unsigned int ipath_sg_dma_len(struct ib_device *dev,
136 struct scatterlist *sg)
137{
138 return sg->length;
139}
140
141static void ipath_sync_single_for_cpu(struct ib_device *dev,
142 u64 addr,
143 size_t size,
144 enum dma_data_direction dir)
145{
146}
147
148static void ipath_sync_single_for_device(struct ib_device *dev,
149 u64 addr,
150 size_t size,
151 enum dma_data_direction dir)
152{
153}
154
155static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
156 u64 *dma_handle, gfp_t flag)
157{
158 struct page *p;
159 void *addr = NULL;
160
161 p = alloc_pages(flag, get_order(size));
162 if (p)
163 addr = page_address(p);
164 if (dma_handle)
165 *dma_handle = (u64) addr;
166 return addr;
167}
168
169static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
170 void *cpu_addr, dma_addr_t dma_handle)
171{
172 free_pages((unsigned long) cpu_addr, get_order(size));
173}
174
175struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
176 ipath_mapping_error,
177 ipath_dma_map_single,
178 ipath_dma_unmap_single,
179 ipath_dma_map_page,
180 ipath_dma_unmap_page,
181 ipath_map_sg,
182 ipath_unmap_sg,
183 ipath_sg_dma_address,
184 ipath_sg_dma_len,
185 ipath_sync_single_for_cpu,
186 ipath_sync_single_for_device,
187 ipath_dma_alloc_coherent,
188 ipath_dma_free_coherent
189};
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 1aeddb48e355..ae7f21a0cdc0 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1825,8 +1825,6 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
1825 */ 1825 */
1826void ipath_shutdown_device(struct ipath_devdata *dd) 1826void ipath_shutdown_device(struct ipath_devdata *dd)
1827{ 1827{
1828 u64 val;
1829
1830 ipath_dbg("Shutting down the device\n"); 1828 ipath_dbg("Shutting down the device\n");
1831 1829
1832 dd->ipath_flags |= IPATH_LINKUNK; 1830 dd->ipath_flags |= IPATH_LINKUNK;
@@ -1849,7 +1847,7 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
1849 */ 1847 */
1850 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL); 1848 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
1851 /* flush it */ 1849 /* flush it */
1852 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1850 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1853 /* 1851 /*
1854 * enough for anything that's going to trickle out to have actually 1852 * enough for anything that's going to trickle out to have actually
1855 * done so. 1853 * done so.
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 340f27e3ebff..b932bcb67a5e 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -699,7 +699,6 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
699 int start_stop) 699 int start_stop)
700{ 700{
701 struct ipath_devdata *dd = pd->port_dd; 701 struct ipath_devdata *dd = pd->port_dd;
702 u64 tval;
703 702
704 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", 703 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
705 start_stop ? "en" : "dis", dd->ipath_unit, 704 start_stop ? "en" : "dis", dd->ipath_unit,
@@ -729,7 +728,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
729 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 728 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
730 dd->ipath_rcvctrl); 729 dd->ipath_rcvctrl);
731 /* now be sure chip saw it before we return */ 730 /* now be sure chip saw it before we return */
732 tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 731 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
733 if (start_stop) { 732 if (start_stop) {
734 /* 733 /*
735 * And try to be sure that tail reg update has happened too. 734 * And try to be sure that tail reg update has happened too.
@@ -738,7 +737,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
738 * in memory copy, since we could overwrite an update by the 737 * in memory copy, since we could overwrite an update by the
739 * chip if we did. 738 * chip if we did.
740 */ 739 */
741 tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 740 ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
742 } 741 }
743 /* always; new head should be equal to new tail; see above */ 742 /* always; new head should be equal to new tail; see above */
744bail: 743bail:
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index e57c7a351cb5..7468477ba837 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1447,7 +1447,7 @@ static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
1447static int ipath_ht_early_init(struct ipath_devdata *dd) 1447static int ipath_ht_early_init(struct ipath_devdata *dd)
1448{ 1448{
1449 u32 __iomem *piobuf; 1449 u32 __iomem *piobuf;
1450 u32 pioincr, val32, egrsize; 1450 u32 pioincr, val32;
1451 int i; 1451 int i;
1452 1452
1453 /* 1453 /*
@@ -1467,7 +1467,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1467 * errors interrupts if we ever see one). 1467 * errors interrupts if we ever see one).
1468 */ 1468 */
1469 dd->ipath_rcvegrbufsize = dd->ipath_piosize2k; 1469 dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
1470 egrsize = dd->ipath_rcvegrbufsize;
1471 1470
1472 /* 1471 /*
1473 * the min() check here is currently a nop, but it may not 1472 * the min() check here is currently a nop, but it may not
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index 6af89683f710..ae8bf9950c6d 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -602,7 +602,7 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
602 */ 602 */
603static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) 603static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
604{ 604{
605 u64 val, tmp, config1, prev_val; 605 u64 val, config1, prev_val;
606 int ret = 0; 606 int ret = 0;
607 607
608 ipath_dbg("Trying to bringup serdes\n"); 608 ipath_dbg("Trying to bringup serdes\n");
@@ -633,7 +633,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
633 | INFINIPATH_SERDC0_L1PWR_DN; 633 | INFINIPATH_SERDC0_L1PWR_DN;
634 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); 634 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
635 /* be sure chip saw it */ 635 /* be sure chip saw it */
636 tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 636 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
637 udelay(5); /* need pll reset set at least for a bit */ 637 udelay(5); /* need pll reset set at least for a bit */
638 /* 638 /*
639 * after PLL is reset, set the per-lane Resets and TxIdle and 639 * after PLL is reset, set the per-lane Resets and TxIdle and
@@ -647,7 +647,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
647 "and txidle (%llx)\n", (unsigned long long) val); 647 "and txidle (%llx)\n", (unsigned long long) val);
648 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); 648 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
649 /* be sure chip saw it */ 649 /* be sure chip saw it */
650 tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 650 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
651 /* need PLL reset clear for at least 11 usec before lane 651 /* need PLL reset clear for at least 11 usec before lane
652 * resets cleared; give it a few more to be sure */ 652 * resets cleared; give it a few more to be sure */
653 udelay(15); 653 udelay(15);
@@ -851,12 +851,12 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
851 int pos, ret; 851 int pos, ret;
852 852
853 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ 853 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
854 dd->ipath_irq = pdev->irq;
855 ret = pci_enable_msi(dd->pcidev); 854 ret = pci_enable_msi(dd->pcidev);
856 if (ret) 855 if (ret)
857 ipath_dev_err(dd, "pci_enable_msi failed: %d, " 856 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
858 "interrupts may not work\n", ret); 857 "interrupts may not work\n", ret);
859 /* continue even if it fails, we may still be OK... */ 858 /* continue even if it fails, we may still be OK... */
859 dd->ipath_irq = pdev->irq;
860 860
861 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { 861 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
862 u16 control; 862 u16 control;
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index d819cca524cd..d4f6b5239ef8 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -347,10 +347,9 @@ done:
347static int init_chip_reset(struct ipath_devdata *dd, 347static int init_chip_reset(struct ipath_devdata *dd,
348 struct ipath_portdata **pdp) 348 struct ipath_portdata **pdp)
349{ 349{
350 struct ipath_portdata *pd;
351 u32 rtmp; 350 u32 rtmp;
352 351
353 *pdp = pd = dd->ipath_pd[0]; 352 *pdp = dd->ipath_pd[0];
354 /* ensure chip does no sends or receives while we re-initialize */ 353 /* ensure chip does no sends or receives while we re-initialize */
355 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U; 354 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
356 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0); 355 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 5652a550d442..72b9e279d19d 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -598,10 +598,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
598 * on close 598 * on close
599 */ 599 */
600 if (errs & INFINIPATH_E_RRCVHDRFULL) { 600 if (errs & INFINIPATH_E_RRCVHDRFULL) {
601 int any;
602 u32 hd, tl; 601 u32 hd, tl;
603 ipath_stats.sps_hdrqfull++; 602 ipath_stats.sps_hdrqfull++;
604 for (any = i = 0; i < dd->ipath_cfgports; i++) { 603 for (i = 0; i < dd->ipath_cfgports; i++) {
605 struct ipath_portdata *pd = dd->ipath_pd[i]; 604 struct ipath_portdata *pd = dd->ipath_pd[i];
606 if (i == 0) { 605 if (i == 0) {
607 hd = dd->ipath_port0head; 606 hd = dd->ipath_port0head;
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 9a6cbd05adcd..851763d7d2db 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -134,7 +134,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
134 */ 134 */
135 if (sge->lkey == 0) { 135 if (sge->lkey == 0) {
136 isge->mr = NULL; 136 isge->mr = NULL;
137 isge->vaddr = bus_to_virt(sge->addr); 137 isge->vaddr = (void *) sge->addr;
138 isge->length = sge->length; 138 isge->length = sge->length;
139 isge->sge_length = sge->length; 139 isge->sge_length = sge->length;
140 ret = 1; 140 ret = 1;
@@ -202,12 +202,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
202 int ret; 202 int ret;
203 203
204 /* 204 /*
205 * We use RKEY == zero for physical addresses 205 * We use RKEY == zero for kernel virtual addresses
206 * (see ipath_get_dma_mr). 206 * (see ipath_get_dma_mr and ipath_dma.c).
207 */ 207 */
208 if (rkey == 0) { 208 if (rkey == 0) {
209 sge->mr = NULL; 209 sge->mr = NULL;
210 sge->vaddr = phys_to_virt(vaddr); 210 sge->vaddr = (void *) vaddr;
211 sge->length = len; 211 sge->length = len;
212 sge->sge_length = len; 212 sge->sge_length = len;
213 ss->sg_list = NULL; 213 ss->sg_list = NULL;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index a0673c1eef71..8cc8598d6c69 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -54,6 +54,8 @@ static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
54 * @acc: access flags 54 * @acc: access flags
55 * 55 *
56 * Returns the memory region on success, otherwise returns an errno. 56 * Returns the memory region on success, otherwise returns an errno.
57 * Note that all DMA addresses should be created via the
58 * struct ib_dma_mapping_ops functions (see ipath_dma.c).
57 */ 59 */
58struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc) 60struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
59{ 61{
@@ -149,8 +151,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
149 m = 0; 151 m = 0;
150 n = 0; 152 n = 0;
151 for (i = 0; i < num_phys_buf; i++) { 153 for (i = 0; i < num_phys_buf; i++) {
152 mr->mr.map[m]->segs[n].vaddr = 154 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
153 phys_to_virt(buffer_list[i].addr);
154 mr->mr.map[m]->segs[n].length = buffer_list[i].size; 155 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
155 mr->mr.length += buffer_list[i].size; 156 mr->mr.length += buffer_list[i].size;
156 n++; 157 n++;
@@ -347,7 +348,7 @@ int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
347 n = 0; 348 n = 0;
348 ps = 1 << fmr->page_shift; 349 ps = 1 << fmr->page_shift;
349 for (i = 0; i < list_len; i++) { 350 for (i = 0; i < list_len; i++) {
350 fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]); 351 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
351 fmr->mr.map[m]->segs[n].length = ps; 352 fmr->mr.map[m]->segs[n].length = ps;
352 if (++n == IPATH_SEGSZ) { 353 if (++n == IPATH_SEGSZ) {
353 m++; 354 m++;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 182de34f9f47..ffa6318ad0cc 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -215,7 +215,6 @@ static ssize_t store_mlid(struct device *dev,
215 size_t count) 215 size_t count)
216{ 216{
217 struct ipath_devdata *dd = dev_get_drvdata(dev); 217 struct ipath_devdata *dd = dev_get_drvdata(dev);
218 int unit;
219 u16 mlid; 218 u16 mlid;
220 int ret; 219 int ret;
221 220
@@ -223,8 +222,6 @@ static ssize_t store_mlid(struct device *dev,
223 if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE) 222 if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
224 goto invalid; 223 goto invalid;
225 224
226 unit = dd->ipath_unit;
227
228 dd->ipath_mlid = mlid; 225 dd->ipath_mlid = mlid;
229 226
230 goto bail; 227 goto bail;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index acdee33ee1f8..2aaacdb7e52a 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1599,6 +1599,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1599 dev->detach_mcast = ipath_multicast_detach; 1599 dev->detach_mcast = ipath_multicast_detach;
1600 dev->process_mad = ipath_process_mad; 1600 dev->process_mad = ipath_process_mad;
1601 dev->mmap = ipath_mmap; 1601 dev->mmap = ipath_mmap;
1602 dev->dma_ops = &ipath_dma_mapping_ops;
1602 1603
1603 snprintf(dev->node_desc, sizeof(dev->node_desc), 1604 snprintf(dev->node_desc, sizeof(dev->node_desc),
1604 IPATH_IDSTR " %s", init_utsname()->nodename); 1605 IPATH_IDSTR " %s", init_utsname()->nodename);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 8039f6e5f0c8..c0c8d5b24a7d 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -812,4 +812,6 @@ extern unsigned int ib_ipath_max_srq_wrs;
812 812
813extern const u32 ib_ipath_rnr_table[]; 813extern const u32 ib_ipath_rnr_table[];
814 814
815extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
816
815#endif /* IPATH_VERBS_H */ 817#endif /* IPATH_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 99547996aba2..07deee8f81ce 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -105,12 +105,12 @@ struct ipoib_mcast;
105 105
106struct ipoib_rx_buf { 106struct ipoib_rx_buf {
107 struct sk_buff *skb; 107 struct sk_buff *skb;
108 dma_addr_t mapping; 108 u64 mapping;
109}; 109};
110 110
111struct ipoib_tx_buf { 111struct ipoib_tx_buf {
112 struct sk_buff *skb; 112 struct sk_buff *skb;
113 DECLARE_PCI_UNMAP_ADDR(mapping) 113 u64 mapping;
114}; 114};
115 115
116/* 116/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f10fba5d3265..59d9594ed6d9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -109,9 +109,8 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
109 ret = ib_post_recv(priv->qp, &param, &bad_wr); 109 ret = ib_post_recv(priv->qp, &param, &bad_wr);
110 if (unlikely(ret)) { 110 if (unlikely(ret)) {
111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
112 dma_unmap_single(priv->ca->dma_device, 112 ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
113 priv->rx_ring[id].mapping, 113 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
114 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
115 dev_kfree_skb_any(priv->rx_ring[id].skb); 114 dev_kfree_skb_any(priv->rx_ring[id].skb);
116 priv->rx_ring[id].skb = NULL; 115 priv->rx_ring[id].skb = NULL;
117 } 116 }
@@ -123,7 +122,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
123{ 122{
124 struct ipoib_dev_priv *priv = netdev_priv(dev); 123 struct ipoib_dev_priv *priv = netdev_priv(dev);
125 struct sk_buff *skb; 124 struct sk_buff *skb;
126 dma_addr_t addr; 125 u64 addr;
127 126
128 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 127 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
129 if (!skb) 128 if (!skb)
@@ -136,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
136 */ 135 */
137 skb_reserve(skb, 4); 136 skb_reserve(skb, 4);
138 137
139 addr = dma_map_single(priv->ca->dma_device, 138 addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
140 skb->data, IPOIB_BUF_SIZE, 139 DMA_FROM_DEVICE);
141 DMA_FROM_DEVICE); 140 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
142 if (unlikely(dma_mapping_error(addr))) {
143 dev_kfree_skb_any(skb); 141 dev_kfree_skb_any(skb);
144 return -EIO; 142 return -EIO;
145 } 143 }
@@ -174,7 +172,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
174 struct ipoib_dev_priv *priv = netdev_priv(dev); 172 struct ipoib_dev_priv *priv = netdev_priv(dev);
175 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 173 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
176 struct sk_buff *skb; 174 struct sk_buff *skb;
177 dma_addr_t addr; 175 u64 addr;
178 176
179 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", 177 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
180 wr_id, wc->opcode, wc->status); 178 wr_id, wc->opcode, wc->status);
@@ -193,8 +191,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
193 ipoib_warn(priv, "failed recv event " 191 ipoib_warn(priv, "failed recv event "
194 "(status=%d, wrid=%d vend_err %x)\n", 192 "(status=%d, wrid=%d vend_err %x)\n",
195 wc->status, wr_id, wc->vendor_err); 193 wc->status, wr_id, wc->vendor_err);
196 dma_unmap_single(priv->ca->dma_device, addr, 194 ib_dma_unmap_single(priv->ca, addr,
197 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 195 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
198 dev_kfree_skb_any(skb); 196 dev_kfree_skb_any(skb);
199 priv->rx_ring[wr_id].skb = NULL; 197 priv->rx_ring[wr_id].skb = NULL;
200 return; 198 return;
@@ -212,8 +210,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
212 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 210 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
213 wc->byte_len, wc->slid); 211 wc->byte_len, wc->slid);
214 212
215 dma_unmap_single(priv->ca->dma_device, addr, 213 ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
216 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
217 214
218 skb_put(skb, wc->byte_len); 215 skb_put(skb, wc->byte_len);
219 skb_pull(skb, IB_GRH_BYTES); 216 skb_pull(skb, IB_GRH_BYTES);
@@ -261,10 +258,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
261 258
262 tx_req = &priv->tx_ring[wr_id]; 259 tx_req = &priv->tx_ring[wr_id];
263 260
264 dma_unmap_single(priv->ca->dma_device, 261 ib_dma_unmap_single(priv->ca, tx_req->mapping,
265 pci_unmap_addr(tx_req, mapping), 262 tx_req->skb->len, DMA_TO_DEVICE);
266 tx_req->skb->len,
267 DMA_TO_DEVICE);
268 263
269 ++priv->stats.tx_packets; 264 ++priv->stats.tx_packets;
270 priv->stats.tx_bytes += tx_req->skb->len; 265 priv->stats.tx_bytes += tx_req->skb->len;
@@ -311,7 +306,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
311static inline int post_send(struct ipoib_dev_priv *priv, 306static inline int post_send(struct ipoib_dev_priv *priv,
312 unsigned int wr_id, 307 unsigned int wr_id,
313 struct ib_ah *address, u32 qpn, 308 struct ib_ah *address, u32 qpn,
314 dma_addr_t addr, int len) 309 u64 addr, int len)
315{ 310{
316 struct ib_send_wr *bad_wr; 311 struct ib_send_wr *bad_wr;
317 312
@@ -330,7 +325,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
330{ 325{
331 struct ipoib_dev_priv *priv = netdev_priv(dev); 326 struct ipoib_dev_priv *priv = netdev_priv(dev);
332 struct ipoib_tx_buf *tx_req; 327 struct ipoib_tx_buf *tx_req;
333 dma_addr_t addr; 328 u64 addr;
334 329
335 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { 330 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
336 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 331 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -353,21 +348,20 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
353 */ 348 */
354 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; 349 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
355 tx_req->skb = skb; 350 tx_req->skb = skb;
356 addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, 351 addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
357 DMA_TO_DEVICE); 352 DMA_TO_DEVICE);
358 if (unlikely(dma_mapping_error(addr))) { 353 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
359 ++priv->stats.tx_errors; 354 ++priv->stats.tx_errors;
360 dev_kfree_skb_any(skb); 355 dev_kfree_skb_any(skb);
361 return; 356 return;
362 } 357 }
363 pci_unmap_addr_set(tx_req, mapping, addr); 358 tx_req->mapping = addr;
364 359
365 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 360 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
366 address->ah, qpn, addr, skb->len))) { 361 address->ah, qpn, addr, skb->len))) {
367 ipoib_warn(priv, "post_send failed\n"); 362 ipoib_warn(priv, "post_send failed\n");
368 ++priv->stats.tx_errors; 363 ++priv->stats.tx_errors;
369 dma_unmap_single(priv->ca->dma_device, addr, skb->len, 364 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
370 DMA_TO_DEVICE);
371 dev_kfree_skb_any(skb); 365 dev_kfree_skb_any(skb);
372 } else { 366 } else {
373 dev->trans_start = jiffies; 367 dev->trans_start = jiffies;
@@ -538,24 +532,27 @@ int ipoib_ib_dev_stop(struct net_device *dev)
538 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 532 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
539 tx_req = &priv->tx_ring[priv->tx_tail & 533 tx_req = &priv->tx_ring[priv->tx_tail &
540 (ipoib_sendq_size - 1)]; 534 (ipoib_sendq_size - 1)];
541 dma_unmap_single(priv->ca->dma_device, 535 ib_dma_unmap_single(priv->ca,
542 pci_unmap_addr(tx_req, mapping), 536 tx_req->mapping,
543 tx_req->skb->len, 537 tx_req->skb->len,
544 DMA_TO_DEVICE); 538 DMA_TO_DEVICE);
545 dev_kfree_skb_any(tx_req->skb); 539 dev_kfree_skb_any(tx_req->skb);
546 ++priv->tx_tail; 540 ++priv->tx_tail;
547 } 541 }
548 542
549 for (i = 0; i < ipoib_recvq_size; ++i) 543 for (i = 0; i < ipoib_recvq_size; ++i) {
550 if (priv->rx_ring[i].skb) { 544 struct ipoib_rx_buf *rx_req;
551 dma_unmap_single(priv->ca->dma_device, 545
552 pci_unmap_addr(&priv->rx_ring[i], 546 rx_req = &priv->rx_ring[i];
553 mapping), 547 if (!rx_req->skb)
554 IPOIB_BUF_SIZE, 548 continue;
555 DMA_FROM_DEVICE); 549 ib_dma_unmap_single(priv->ca,
556 dev_kfree_skb_any(priv->rx_ring[i].skb); 550 rx_req->mapping,
557 priv->rx_ring[i].skb = NULL; 551 IPOIB_BUF_SIZE,
558 } 552 DMA_FROM_DEVICE);
553 dev_kfree_skb_any(rx_req->skb);
554 rx_req->skb = NULL;
555 }
559 556
560 goto timeout; 557 goto timeout;
561 } 558 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c09280243726..705eb1d0e554 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -497,8 +497,6 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
497 return; 497 return;
498 } 498 }
499 499
500 skb_queue_head_init(&neigh->queue);
501
502 /* 500 /*
503 * We can only be called from ipoib_start_xmit, so we're 501 * We can only be called from ipoib_start_xmit, so we're
504 * inside tx_lock -- no need to save/restore flags. 502 * inside tx_lock -- no need to save/restore flags.
@@ -806,6 +804,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
806 804
807 neigh->neighbour = neighbour; 805 neigh->neighbour = neighbour;
808 *to_ipoib_neigh(neighbour) = neigh; 806 *to_ipoib_neigh(neighbour) = neigh;
807 skb_queue_head_init(&neigh->queue);
809 808
810 return neigh; 809 return neigh;
811} 810}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 234e5b061a75..cae8c96a55f8 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -182,7 +182,7 @@ struct iser_regd_buf {
182 struct iser_mem_reg reg; /* memory registration info */ 182 struct iser_mem_reg reg; /* memory registration info */
183 void *virt_addr; 183 void *virt_addr;
184 struct iser_device *device; /* device->device for dma_unmap */ 184 struct iser_device *device; /* device->device for dma_unmap */
185 dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */ 185 u64 dma_addr; /* if non zero, addr for dma_unmap */
186 enum dma_data_direction direction; /* direction for dma_unmap */ 186 enum dma_data_direction direction; /* direction for dma_unmap */
187 unsigned int data_size; 187 unsigned int data_size;
188 atomic_t ref_count; /* refcount, freed when dec to 0 */ 188 atomic_t ref_count; /* refcount, freed when dec to 0 */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 9b3d79c796c8..e73c87b9be43 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -487,10 +487,8 @@ int iser_send_control(struct iscsi_conn *conn,
487 struct iscsi_iser_conn *iser_conn = conn->dd_data; 487 struct iscsi_iser_conn *iser_conn = conn->dd_data;
488 struct iser_desc *mdesc = mtask->dd_data; 488 struct iser_desc *mdesc = mtask->dd_data;
489 struct iser_dto *send_dto = NULL; 489 struct iser_dto *send_dto = NULL;
490 unsigned int itt;
491 unsigned long data_seg_len; 490 unsigned long data_seg_len;
492 int err = 0; 491 int err = 0;
493 unsigned char opcode;
494 struct iser_regd_buf *regd_buf; 492 struct iser_regd_buf *regd_buf;
495 struct iser_device *device; 493 struct iser_device *device;
496 494
@@ -512,8 +510,6 @@ int iser_send_control(struct iscsi_conn *conn,
512 510
513 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); 511 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
514 512
515 itt = ntohl(mtask->hdr->itt);
516 opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK;
517 data_seg_len = ntoh24(mtask->hdr->dlength); 513 data_seg_len = ntoh24(mtask->hdr->dlength);
518 514
519 if (data_seg_len > 0) { 515 if (data_seg_len > 0) {
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 3aedd59b8a84..fc9f1fd0ae54 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -52,7 +52,7 @@
52 */ 52 */
53int iser_regd_buff_release(struct iser_regd_buf *regd_buf) 53int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
54{ 54{
55 struct device *dma_device; 55 struct ib_device *dev;
56 56
57 if ((atomic_read(&regd_buf->ref_count) == 0) || 57 if ((atomic_read(&regd_buf->ref_count) == 0) ||
58 atomic_dec_and_test(&regd_buf->ref_count)) { 58 atomic_dec_and_test(&regd_buf->ref_count)) {
@@ -61,8 +61,8 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
61 iser_unreg_mem(&regd_buf->reg); 61 iser_unreg_mem(&regd_buf->reg);
62 62
63 if (regd_buf->dma_addr) { 63 if (regd_buf->dma_addr) {
64 dma_device = regd_buf->device->ib_device->dma_device; 64 dev = regd_buf->device->ib_device;
65 dma_unmap_single(dma_device, 65 ib_dma_unmap_single(dev,
66 regd_buf->dma_addr, 66 regd_buf->dma_addr,
67 regd_buf->data_size, 67 regd_buf->data_size,
68 regd_buf->direction); 68 regd_buf->direction);
@@ -84,12 +84,12 @@ void iser_reg_single(struct iser_device *device,
84 struct iser_regd_buf *regd_buf, 84 struct iser_regd_buf *regd_buf,
85 enum dma_data_direction direction) 85 enum dma_data_direction direction)
86{ 86{
87 dma_addr_t dma_addr; 87 u64 dma_addr;
88 88
89 dma_addr = dma_map_single(device->ib_device->dma_device, 89 dma_addr = ib_dma_map_single(device->ib_device,
90 regd_buf->virt_addr, 90 regd_buf->virt_addr,
91 regd_buf->data_size, direction); 91 regd_buf->data_size, direction);
92 BUG_ON(dma_mapping_error(dma_addr)); 92 BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
93 93
94 regd_buf->reg.lkey = device->mr->lkey; 94 regd_buf->reg.lkey = device->mr->lkey;
95 regd_buf->reg.len = regd_buf->data_size; 95 regd_buf->reg.len = regd_buf->data_size;
@@ -107,7 +107,7 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
107 enum iser_data_dir cmd_dir) 107 enum iser_data_dir cmd_dir)
108{ 108{
109 int dma_nents; 109 int dma_nents;
110 struct device *dma_device; 110 struct ib_device *dev;
111 char *mem = NULL; 111 char *mem = NULL;
112 struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; 112 struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
113 unsigned long cmd_data_len = data->data_len; 113 unsigned long cmd_data_len = data->data_len;
@@ -147,17 +147,12 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
147 147
148 iser_ctask->data_copy[cmd_dir].copy_buf = mem; 148 iser_ctask->data_copy[cmd_dir].copy_buf = mem;
149 149
150 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 150 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
151 151 dma_nents = ib_dma_map_sg(dev,
152 if (cmd_dir == ISER_DIR_OUT) 152 &iser_ctask->data_copy[cmd_dir].sg_single,
153 dma_nents = dma_map_sg(dma_device, 153 1,
154 &iser_ctask->data_copy[cmd_dir].sg_single, 154 (cmd_dir == ISER_DIR_OUT) ?
155 1, DMA_TO_DEVICE); 155 DMA_TO_DEVICE : DMA_FROM_DEVICE);
156 else
157 dma_nents = dma_map_sg(dma_device,
158 &iser_ctask->data_copy[cmd_dir].sg_single,
159 1, DMA_FROM_DEVICE);
160
161 BUG_ON(dma_nents == 0); 156 BUG_ON(dma_nents == 0);
162 157
163 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; 158 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
@@ -170,19 +165,16 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
170void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 165void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
171 enum iser_data_dir cmd_dir) 166 enum iser_data_dir cmd_dir)
172{ 167{
173 struct device *dma_device; 168 struct ib_device *dev;
174 struct iser_data_buf *mem_copy; 169 struct iser_data_buf *mem_copy;
175 unsigned long cmd_data_len; 170 unsigned long cmd_data_len;
176 171
177 dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 172 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
178 mem_copy = &iser_ctask->data_copy[cmd_dir]; 173 mem_copy = &iser_ctask->data_copy[cmd_dir];
179 174
180 if (cmd_dir == ISER_DIR_OUT) 175 ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
181 dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, 176 (cmd_dir == ISER_DIR_OUT) ?
182 DMA_TO_DEVICE); 177 DMA_TO_DEVICE : DMA_FROM_DEVICE);
183 else
184 dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
185 DMA_FROM_DEVICE);
186 178
187 if (cmd_dir == ISER_DIR_IN) { 179 if (cmd_dir == ISER_DIR_IN) {
188 char *mem; 180 char *mem;
@@ -231,11 +223,12 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
231 * consecutive elements. Also, it handles one entry SG. 223 * consecutive elements. Also, it handles one entry SG.
232 */ 224 */
233static int iser_sg_to_page_vec(struct iser_data_buf *data, 225static int iser_sg_to_page_vec(struct iser_data_buf *data,
234 struct iser_page_vec *page_vec) 226 struct iser_page_vec *page_vec,
227 struct ib_device *ibdev)
235{ 228{
236 struct scatterlist *sg = (struct scatterlist *)data->buf; 229 struct scatterlist *sg = (struct scatterlist *)data->buf;
237 dma_addr_t first_addr, last_addr, page; 230 u64 first_addr, last_addr, page;
238 int start_aligned, end_aligned; 231 int end_aligned;
239 unsigned int cur_page = 0; 232 unsigned int cur_page = 0;
240 unsigned long total_sz = 0; 233 unsigned long total_sz = 0;
241 int i; 234 int i;
@@ -244,19 +237,21 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
244 page_vec->offset = (u64) sg[0].offset & ~MASK_4K; 237 page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
245 238
246 for (i = 0; i < data->dma_nents; i++) { 239 for (i = 0; i < data->dma_nents; i++) {
247 total_sz += sg_dma_len(&sg[i]); 240 unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]);
241
242 total_sz += dma_len;
248 243
249 first_addr = sg_dma_address(&sg[i]); 244 first_addr = ib_sg_dma_address(ibdev, &sg[i]);
250 last_addr = first_addr + sg_dma_len(&sg[i]); 245 last_addr = first_addr + dma_len;
251 246
252 start_aligned = !(first_addr & ~MASK_4K);
253 end_aligned = !(last_addr & ~MASK_4K); 247 end_aligned = !(last_addr & ~MASK_4K);
254 248
255 /* continue to collect page fragments till aligned or SG ends */ 249 /* continue to collect page fragments till aligned or SG ends */
256 while (!end_aligned && (i + 1 < data->dma_nents)) { 250 while (!end_aligned && (i + 1 < data->dma_nents)) {
257 i++; 251 i++;
258 total_sz += sg_dma_len(&sg[i]); 252 dma_len = ib_sg_dma_len(ibdev, &sg[i]);
259 last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); 253 total_sz += dma_len;
254 last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len;
260 end_aligned = !(last_addr & ~MASK_4K); 255 end_aligned = !(last_addr & ~MASK_4K);
261 } 256 }
262 257
@@ -288,10 +283,11 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
288 * the number of entries which are aligned correctly. Supports the case where 283 * the number of entries which are aligned correctly. Supports the case where
289 * consecutive SG elements are actually fragments of the same physcial page. 284 * consecutive SG elements are actually fragments of the same physcial page.
290 */ 285 */
291static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) 286static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
287 struct ib_device *ibdev)
292{ 288{
293 struct scatterlist *sg; 289 struct scatterlist *sg;
294 dma_addr_t end_addr, next_addr; 290 u64 end_addr, next_addr;
295 int i, cnt; 291 int i, cnt;
296 unsigned int ret_len = 0; 292 unsigned int ret_len = 0;
297 293
@@ -303,12 +299,12 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
303 (unsigned long)page_to_phys(sg[i].page), 299 (unsigned long)page_to_phys(sg[i].page),
304 (unsigned long)sg[i].offset, 300 (unsigned long)sg[i].offset,
305 (unsigned long)sg[i].length); */ 301 (unsigned long)sg[i].length); */
306 end_addr = sg_dma_address(&sg[i]) + 302 end_addr = ib_sg_dma_address(ibdev, &sg[i]) +
307 sg_dma_len(&sg[i]); 303 ib_sg_dma_len(ibdev, &sg[i]);
308 /* iser_dbg("Checking sg iobuf end address " 304 /* iser_dbg("Checking sg iobuf end address "
309 "0x%08lX\n", end_addr); */ 305 "0x%08lX\n", end_addr); */
310 if (i + 1 < data->dma_nents) { 306 if (i + 1 < data->dma_nents) {
311 next_addr = sg_dma_address(&sg[i+1]); 307 next_addr = ib_sg_dma_address(ibdev, &sg[i+1]);
312 /* are i, i+1 fragments of the same page? */ 308 /* are i, i+1 fragments of the same page? */
313 if (end_addr == next_addr) 309 if (end_addr == next_addr)
314 continue; 310 continue;
@@ -325,7 +321,8 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
325 return ret_len; 321 return ret_len;
326} 322}
327 323
328static void iser_data_buf_dump(struct iser_data_buf *data) 324static void iser_data_buf_dump(struct iser_data_buf *data,
325 struct ib_device *ibdev)
329{ 326{
330 struct scatterlist *sg = (struct scatterlist *)data->buf; 327 struct scatterlist *sg = (struct scatterlist *)data->buf;
331 int i; 328 int i;
@@ -333,9 +330,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data)
333 for (i = 0; i < data->dma_nents; i++) 330 for (i = 0; i < data->dma_nents; i++)
334 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 331 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
335 "off:0x%x sz:0x%x dma_len:0x%x\n", 332 "off:0x%x sz:0x%x dma_len:0x%x\n",
336 i, (unsigned long)sg_dma_address(&sg[i]), 333 i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]),
337 sg[i].page, sg[i].offset, 334 sg[i].page, sg[i].offset,
338 sg[i].length,sg_dma_len(&sg[i])); 335 sg[i].length, ib_sg_dma_len(ibdev, &sg[i]));
339} 336}
340 337
341static void iser_dump_page_vec(struct iser_page_vec *page_vec) 338static void iser_dump_page_vec(struct iser_page_vec *page_vec)
@@ -349,7 +346,8 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
349} 346}
350 347
351static void iser_page_vec_build(struct iser_data_buf *data, 348static void iser_page_vec_build(struct iser_data_buf *data,
352 struct iser_page_vec *page_vec) 349 struct iser_page_vec *page_vec,
350 struct ib_device *ibdev)
353{ 351{
354 int page_vec_len = 0; 352 int page_vec_len = 0;
355 353
@@ -357,14 +355,14 @@ static void iser_page_vec_build(struct iser_data_buf *data,
357 page_vec->offset = 0; 355 page_vec->offset = 0;
358 356
359 iser_dbg("Translating sg sz: %d\n", data->dma_nents); 357 iser_dbg("Translating sg sz: %d\n", data->dma_nents);
360 page_vec_len = iser_sg_to_page_vec(data,page_vec); 358 page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
361 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); 359 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
362 360
363 page_vec->length = page_vec_len; 361 page_vec->length = page_vec_len;
364 362
365 if (page_vec_len * SIZE_4K < page_vec->data_size) { 363 if (page_vec_len * SIZE_4K < page_vec->data_size) {
366 iser_err("page_vec too short to hold this SG\n"); 364 iser_err("page_vec too short to hold this SG\n");
367 iser_data_buf_dump(data); 365 iser_data_buf_dump(data, ibdev);
368 iser_dump_page_vec(page_vec); 366 iser_dump_page_vec(page_vec);
369 BUG(); 367 BUG();
370 } 368 }
@@ -375,13 +373,12 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
375 enum iser_data_dir iser_dir, 373 enum iser_data_dir iser_dir,
376 enum dma_data_direction dma_dir) 374 enum dma_data_direction dma_dir)
377{ 375{
378 struct device *dma_device; 376 struct ib_device *dev;
379 377
380 iser_ctask->dir[iser_dir] = 1; 378 iser_ctask->dir[iser_dir] = 1;
381 dma_device = 379 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
382 iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
383 380
384 data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir); 381 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
385 if (data->dma_nents == 0) { 382 if (data->dma_nents == 0) {
386 iser_err("dma_map_sg failed!!!\n"); 383 iser_err("dma_map_sg failed!!!\n");
387 return -EINVAL; 384 return -EINVAL;
@@ -391,20 +388,19 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
391 388
392void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) 389void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
393{ 390{
394 struct device *dma_device; 391 struct ib_device *dev;
395 struct iser_data_buf *data; 392 struct iser_data_buf *data;
396 393
397 dma_device = 394 dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
398 iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
399 395
400 if (iser_ctask->dir[ISER_DIR_IN]) { 396 if (iser_ctask->dir[ISER_DIR_IN]) {
401 data = &iser_ctask->data[ISER_DIR_IN]; 397 data = &iser_ctask->data[ISER_DIR_IN];
402 dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE); 398 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
403 } 399 }
404 400
405 if (iser_ctask->dir[ISER_DIR_OUT]) { 401 if (iser_ctask->dir[ISER_DIR_OUT]) {
406 data = &iser_ctask->data[ISER_DIR_OUT]; 402 data = &iser_ctask->data[ISER_DIR_OUT];
407 dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE); 403 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
408 } 404 }
409} 405}
410 406
@@ -419,6 +415,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
419{ 415{
420 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 416 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
421 struct iser_device *device = ib_conn->device; 417 struct iser_device *device = ib_conn->device;
418 struct ib_device *ibdev = device->ib_device;
422 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; 419 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
423 struct iser_regd_buf *regd_buf; 420 struct iser_regd_buf *regd_buf;
424 int aligned_len; 421 int aligned_len;
@@ -428,11 +425,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
428 425
429 regd_buf = &iser_ctask->rdma_regd[cmd_dir]; 426 regd_buf = &iser_ctask->rdma_regd[cmd_dir];
430 427
431 aligned_len = iser_data_buf_aligned_len(mem); 428 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
432 if (aligned_len != mem->dma_nents) { 429 if (aligned_len != mem->dma_nents) {
433 iser_err("rdma alignment violation %d/%d aligned\n", 430 iser_err("rdma alignment violation %d/%d aligned\n",
434 aligned_len, mem->size); 431 aligned_len, mem->size);
435 iser_data_buf_dump(mem); 432 iser_data_buf_dump(mem, ibdev);
436 433
437 /* unmap the command data before accessing it */ 434 /* unmap the command data before accessing it */
438 iser_dma_unmap_task_data(iser_ctask); 435 iser_dma_unmap_task_data(iser_ctask);
@@ -450,8 +447,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
450 447
451 regd_buf->reg.lkey = device->mr->lkey; 448 regd_buf->reg.lkey = device->mr->lkey;
452 regd_buf->reg.rkey = device->mr->rkey; 449 regd_buf->reg.rkey = device->mr->rkey;
453 regd_buf->reg.len = sg_dma_len(&sg[0]); 450 regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
454 regd_buf->reg.va = sg_dma_address(&sg[0]); 451 regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
455 regd_buf->reg.is_fmr = 0; 452 regd_buf->reg.is_fmr = 0;
456 453
457 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " 454 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
@@ -461,10 +458,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
461 (unsigned long)regd_buf->reg.va, 458 (unsigned long)regd_buf->reg.va,
462 (unsigned long)regd_buf->reg.len); 459 (unsigned long)regd_buf->reg.len);
463 } else { /* use FMR for multiple dma entries */ 460 } else { /* use FMR for multiple dma entries */
464 iser_page_vec_build(mem, ib_conn->page_vec); 461 iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
465 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg); 462 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
466 if (err) { 463 if (err) {
467 iser_data_buf_dump(mem); 464 iser_data_buf_dump(mem, ibdev);
468 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, 465 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
469 ntoh24(iser_ctask->desc.iscsi_header.dlength)); 466 ntoh24(iser_ctask->desc.iscsi_header.dlength));
470 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", 467 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index a6289595557b..e9b6a6f07dd7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -122,9 +122,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
122 if (!iu->buf) 122 if (!iu->buf)
123 goto out_free_iu; 123 goto out_free_iu;
124 124
125 iu->dma = dma_map_single(host->dev->dev->dma_device, 125 iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction);
126 iu->buf, size, direction); 126 if (ib_dma_mapping_error(host->dev->dev, iu->dma))
127 if (dma_mapping_error(iu->dma))
128 goto out_free_buf; 127 goto out_free_buf;
129 128
130 iu->size = size; 129 iu->size = size;
@@ -145,8 +144,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
145 if (!iu) 144 if (!iu)
146 return; 145 return;
147 146
148 dma_unmap_single(host->dev->dev->dma_device, 147 ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction);
149 iu->dma, iu->size, iu->direction);
150 kfree(iu->buf); 148 kfree(iu->buf);
151 kfree(iu); 149 kfree(iu);
152} 150}
@@ -482,8 +480,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
482 scat = &req->fake_sg; 480 scat = &req->fake_sg;
483 } 481 }
484 482
485 dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, 483 ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents,
486 scmnd->sc_data_direction); 484 scmnd->sc_data_direction);
487} 485}
488 486
489static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 487static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
@@ -595,23 +593,26 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
595 int i, j; 593 int i, j;
596 int ret; 594 int ret;
597 struct srp_device *dev = target->srp_host->dev; 595 struct srp_device *dev = target->srp_host->dev;
596 struct ib_device *ibdev = dev->dev;
598 597
599 if (!dev->fmr_pool) 598 if (!dev->fmr_pool)
600 return -ENODEV; 599 return -ENODEV;
601 600
602 if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && 601 if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) &&
603 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) 602 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
604 return -EINVAL; 603 return -EINVAL;
605 604
606 len = page_cnt = 0; 605 len = page_cnt = 0;
607 for (i = 0; i < sg_cnt; ++i) { 606 for (i = 0; i < sg_cnt; ++i) {
608 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { 607 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
608
609 if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) {
609 if (i > 0) 610 if (i > 0)
610 return -EINVAL; 611 return -EINVAL;
611 else 612 else
612 ++page_cnt; 613 ++page_cnt;
613 } 614 }
614 if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & 615 if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) &
615 ~dev->fmr_page_mask) { 616 ~dev->fmr_page_mask) {
616 if (i < sg_cnt - 1) 617 if (i < sg_cnt - 1)
617 return -EINVAL; 618 return -EINVAL;
@@ -619,7 +620,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
619 ++page_cnt; 620 ++page_cnt;
620 } 621 }
621 622
622 len += sg_dma_len(&scat[i]); 623 len += dma_len;
623 } 624 }
624 625
625 page_cnt += len >> dev->fmr_page_shift; 626 page_cnt += len >> dev->fmr_page_shift;
@@ -631,10 +632,14 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
631 return -ENOMEM; 632 return -ENOMEM;
632 633
633 page_cnt = 0; 634 page_cnt = 0;
634 for (i = 0; i < sg_cnt; ++i) 635 for (i = 0; i < sg_cnt; ++i) {
635 for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) 636 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
637
638 for (j = 0; j < dma_len; j += dev->fmr_page_size)
636 dma_pages[page_cnt++] = 639 dma_pages[page_cnt++] =
637 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; 640 (ib_sg_dma_address(ibdev, &scat[i]) &
641 dev->fmr_page_mask) + j;
642 }
638 643
639 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 644 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
640 dma_pages, page_cnt, io_addr); 645 dma_pages, page_cnt, io_addr);
@@ -644,7 +649,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
644 goto out; 649 goto out;
645 } 650 }
646 651
647 buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); 652 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
653 ~dev->fmr_page_mask);
648 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 654 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
649 buf->len = cpu_to_be32(len); 655 buf->len = cpu_to_be32(len);
650 656
@@ -663,6 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
663 struct srp_cmd *cmd = req->cmd->buf; 669 struct srp_cmd *cmd = req->cmd->buf;
664 int len, nents, count; 670 int len, nents, count;
665 u8 fmt = SRP_DATA_DESC_DIRECT; 671 u8 fmt = SRP_DATA_DESC_DIRECT;
672 struct srp_device *dev;
673 struct ib_device *ibdev;
666 674
667 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 675 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
668 return sizeof (struct srp_cmd); 676 return sizeof (struct srp_cmd);
@@ -687,8 +695,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
687 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 695 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
688 } 696 }
689 697
690 count = dma_map_sg(target->srp_host->dev->dev->dma_device, 698 dev = target->srp_host->dev;
691 scat, nents, scmnd->sc_data_direction); 699 ibdev = dev->dev;
700
701 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
692 702
693 fmt = SRP_DATA_DESC_DIRECT; 703 fmt = SRP_DATA_DESC_DIRECT;
694 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 704 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
@@ -702,9 +712,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
702 */ 712 */
703 struct srp_direct_buf *buf = (void *) cmd->add_data; 713 struct srp_direct_buf *buf = (void *) cmd->add_data;
704 714
705 buf->va = cpu_to_be64(sg_dma_address(scat)); 715 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
706 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); 716 buf->key = cpu_to_be32(dev->mr->rkey);
707 buf->len = cpu_to_be32(sg_dma_len(scat)); 717 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
708 } else if (srp_map_fmr(target, scat, count, req, 718 } else if (srp_map_fmr(target, scat, count, req,
709 (void *) cmd->add_data)) { 719 (void *) cmd->add_data)) {
710 /* 720 /*
@@ -722,13 +732,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
722 count * sizeof (struct srp_direct_buf); 732 count * sizeof (struct srp_direct_buf);
723 733
724 for (i = 0; i < count; ++i) { 734 for (i = 0; i < count; ++i) {
735 unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
736
725 buf->desc_list[i].va = 737 buf->desc_list[i].va =
726 cpu_to_be64(sg_dma_address(&scat[i])); 738 cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i]));
727 buf->desc_list[i].key = 739 buf->desc_list[i].key =
728 cpu_to_be32(target->srp_host->dev->mr->rkey); 740 cpu_to_be32(dev->mr->rkey);
729 buf->desc_list[i].len = 741 buf->desc_list[i].len = cpu_to_be32(dma_len);
730 cpu_to_be32(sg_dma_len(&scat[i])); 742 datalen += dma_len;
731 datalen += sg_dma_len(&scat[i]);
732 } 743 }
733 744
734 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 745 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
@@ -808,13 +819,15 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
808 819
809static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 820static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
810{ 821{
822 struct ib_device *dev;
811 struct srp_iu *iu; 823 struct srp_iu *iu;
812 u8 opcode; 824 u8 opcode;
813 825
814 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 826 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
815 827
816 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 828 dev = target->srp_host->dev->dev;
817 target->max_ti_iu_len, DMA_FROM_DEVICE); 829 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
830 DMA_FROM_DEVICE);
818 831
819 opcode = *(u8 *) iu->buf; 832 opcode = *(u8 *) iu->buf;
820 833
@@ -850,8 +863,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
850 break; 863 break;
851 } 864 }
852 865
853 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 866 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
854 target->max_ti_iu_len, DMA_FROM_DEVICE); 867 DMA_FROM_DEVICE);
855} 868}
856 869
857static void srp_completion(struct ib_cq *cq, void *target_ptr) 870static void srp_completion(struct ib_cq *cq, void *target_ptr)
@@ -969,6 +982,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
969 struct srp_request *req; 982 struct srp_request *req;
970 struct srp_iu *iu; 983 struct srp_iu *iu;
971 struct srp_cmd *cmd; 984 struct srp_cmd *cmd;
985 struct ib_device *dev;
972 int len; 986 int len;
973 987
974 if (target->state == SRP_TARGET_CONNECTING) 988 if (target->state == SRP_TARGET_CONNECTING)
@@ -985,8 +999,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
985 if (!iu) 999 if (!iu)
986 goto err; 1000 goto err;
987 1001
988 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 1002 dev = target->srp_host->dev->dev;
989 srp_max_iu_len, DMA_TO_DEVICE); 1003 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1004 DMA_TO_DEVICE);
990 1005
991 req = list_entry(target->free_reqs.next, struct srp_request, list); 1006 req = list_entry(target->free_reqs.next, struct srp_request, list);
992 1007
@@ -1018,8 +1033,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
1018 goto err_unmap; 1033 goto err_unmap;
1019 } 1034 }
1020 1035
1021 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 1036 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1022 srp_max_iu_len, DMA_TO_DEVICE); 1037 DMA_TO_DEVICE);
1023 1038
1024 if (__srp_post_send(target, iu, len)) { 1039 if (__srp_post_send(target, iu, len)) {
1025 printk(KERN_ERR PFX "Send failed\n"); 1040 printk(KERN_ERR PFX "Send failed\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index d4e35ef51374..868a540ef7cd 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -161,7 +161,7 @@ struct srp_target_port {
161}; 161};
162 162
163struct srp_iu { 163struct srp_iu {
164 dma_addr_t dma; 164 u64 dma;
165 void *buf; 165 void *buf;
166 size_t size; 166 size_t size;
167 enum dma_data_direction direction; 167 enum dma_data_direction direction;
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 66bf4d7d0dfb..db037205c9e8 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005 Intel Corporation. All rights reserved. 2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -41,6 +41,9 @@
41void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 41void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
42 struct ib_qp_attr *src); 42 struct ib_qp_attr *src);
43 43
44void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
45 struct ib_ah_attr *src);
46
44void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 47void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
45 struct ib_sa_path_rec *src); 48 struct ib_sa_path_rec *src);
46 49
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8eacc3510993..fd2353fa7e12 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -43,6 +43,8 @@
43 43
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/device.h> 45#include <linux/device.h>
46#include <linux/mm.h>
47#include <linux/dma-mapping.h>
46 48
47#include <asm/atomic.h> 49#include <asm/atomic.h>
48#include <asm/scatterlist.h> 50#include <asm/scatterlist.h>
@@ -848,6 +850,49 @@ struct ib_cache {
848 u8 *lmc_cache; 850 u8 *lmc_cache;
849}; 851};
850 852
853struct ib_dma_mapping_ops {
854 int (*mapping_error)(struct ib_device *dev,
855 u64 dma_addr);
856 u64 (*map_single)(struct ib_device *dev,
857 void *ptr, size_t size,
858 enum dma_data_direction direction);
859 void (*unmap_single)(struct ib_device *dev,
860 u64 addr, size_t size,
861 enum dma_data_direction direction);
862 u64 (*map_page)(struct ib_device *dev,
863 struct page *page, unsigned long offset,
864 size_t size,
865 enum dma_data_direction direction);
866 void (*unmap_page)(struct ib_device *dev,
867 u64 addr, size_t size,
868 enum dma_data_direction direction);
869 int (*map_sg)(struct ib_device *dev,
870 struct scatterlist *sg, int nents,
871 enum dma_data_direction direction);
872 void (*unmap_sg)(struct ib_device *dev,
873 struct scatterlist *sg, int nents,
874 enum dma_data_direction direction);
875 u64 (*dma_address)(struct ib_device *dev,
876 struct scatterlist *sg);
877 unsigned int (*dma_len)(struct ib_device *dev,
878 struct scatterlist *sg);
879 void (*sync_single_for_cpu)(struct ib_device *dev,
880 u64 dma_handle,
881 size_t size,
882 enum dma_data_direction dir);
883 void (*sync_single_for_device)(struct ib_device *dev,
884 u64 dma_handle,
885 size_t size,
886 enum dma_data_direction dir);
887 void *(*alloc_coherent)(struct ib_device *dev,
888 size_t size,
889 u64 *dma_handle,
890 gfp_t flag);
891 void (*free_coherent)(struct ib_device *dev,
892 size_t size, void *cpu_addr,
893 u64 dma_handle);
894};
895
851struct iw_cm_verbs; 896struct iw_cm_verbs;
852 897
853struct ib_device { 898struct ib_device {
@@ -992,6 +1037,8 @@ struct ib_device {
992 struct ib_mad *in_mad, 1037 struct ib_mad *in_mad,
993 struct ib_mad *out_mad); 1038 struct ib_mad *out_mad);
994 1039
1040 struct ib_dma_mapping_ops *dma_ops;
1041
995 struct module *owner; 1042 struct module *owner;
996 struct class_device class_dev; 1043 struct class_device class_dev;
997 struct kobject ports_parent; 1044 struct kobject ports_parent;
@@ -1395,10 +1442,216 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1395 * usable for DMA. 1442 * usable for DMA.
1396 * @pd: The protection domain associated with the memory region. 1443 * @pd: The protection domain associated with the memory region.
1397 * @mr_access_flags: Specifies the memory access rights. 1444 * @mr_access_flags: Specifies the memory access rights.
1445 *
1446 * Note that the ib_dma_*() functions defined below must be used
1447 * to create/destroy addresses used with the Lkey or Rkey returned
1448 * by ib_get_dma_mr().
1398 */ 1449 */
1399struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1450struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1400 1451
1401/** 1452/**
1453 * ib_dma_mapping_error - check a DMA addr for error
1454 * @dev: The device for which the dma_addr was created
1455 * @dma_addr: The DMA address to check
1456 */
1457static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1458{
1459 return dev->dma_ops ?
1460 dev->dma_ops->mapping_error(dev, dma_addr) :
1461 dma_mapping_error(dma_addr);
1462}
1463
1464/**
1465 * ib_dma_map_single - Map a kernel virtual address to DMA address
1466 * @dev: The device for which the dma_addr is to be created
1467 * @cpu_addr: The kernel virtual address
1468 * @size: The size of the region in bytes
1469 * @direction: The direction of the DMA
1470 */
1471static inline u64 ib_dma_map_single(struct ib_device *dev,
1472 void *cpu_addr, size_t size,
1473 enum dma_data_direction direction)
1474{
1475 return dev->dma_ops ?
1476 dev->dma_ops->map_single(dev, cpu_addr, size, direction) :
1477 dma_map_single(dev->dma_device, cpu_addr, size, direction);
1478}
1479
1480/**
1481 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1482 * @dev: The device for which the DMA address was created
1483 * @addr: The DMA address
1484 * @size: The size of the region in bytes
1485 * @direction: The direction of the DMA
1486 */
1487static inline void ib_dma_unmap_single(struct ib_device *dev,
1488 u64 addr, size_t size,
1489 enum dma_data_direction direction)
1490{
1491 dev->dma_ops ?
1492 dev->dma_ops->unmap_single(dev, addr, size, direction) :
1493 dma_unmap_single(dev->dma_device, addr, size, direction);
1494}
1495
1496/**
1497 * ib_dma_map_page - Map a physical page to DMA address
1498 * @dev: The device for which the dma_addr is to be created
1499 * @page: The page to be mapped
1500 * @offset: The offset within the page
1501 * @size: The size of the region in bytes
1502 * @direction: The direction of the DMA
1503 */
1504static inline u64 ib_dma_map_page(struct ib_device *dev,
1505 struct page *page,
1506 unsigned long offset,
1507 size_t size,
1508 enum dma_data_direction direction)
1509{
1510 return dev->dma_ops ?
1511 dev->dma_ops->map_page(dev, page, offset, size, direction) :
1512 dma_map_page(dev->dma_device, page, offset, size, direction);
1513}
1514
1515/**
1516 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1517 * @dev: The device for which the DMA address was created
1518 * @addr: The DMA address
1519 * @size: The size of the region in bytes
1520 * @direction: The direction of the DMA
1521 */
1522static inline void ib_dma_unmap_page(struct ib_device *dev,
1523 u64 addr, size_t size,
1524 enum dma_data_direction direction)
1525{
1526 dev->dma_ops ?
1527 dev->dma_ops->unmap_page(dev, addr, size, direction) :
1528 dma_unmap_page(dev->dma_device, addr, size, direction);
1529}
1530
1531/**
1532 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1533 * @dev: The device for which the DMA addresses are to be created
1534 * @sg: The array of scatter/gather entries
1535 * @nents: The number of scatter/gather entries
1536 * @direction: The direction of the DMA
1537 */
1538static inline int ib_dma_map_sg(struct ib_device *dev,
1539 struct scatterlist *sg, int nents,
1540 enum dma_data_direction direction)
1541{
1542 return dev->dma_ops ?
1543 dev->dma_ops->map_sg(dev, sg, nents, direction) :
1544 dma_map_sg(dev->dma_device, sg, nents, direction);
1545}
1546
1547/**
1548 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1549 * @dev: The device for which the DMA addresses were created
1550 * @sg: The array of scatter/gather entries
1551 * @nents: The number of scatter/gather entries
1552 * @direction: The direction of the DMA
1553 */
1554static inline void ib_dma_unmap_sg(struct ib_device *dev,
1555 struct scatterlist *sg, int nents,
1556 enum dma_data_direction direction)
1557{
1558 dev->dma_ops ?
1559 dev->dma_ops->unmap_sg(dev, sg, nents, direction) :
1560 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1561}
1562
1563/**
1564 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1565 * @dev: The device for which the DMA addresses were created
1566 * @sg: The scatter/gather entry
1567 */
1568static inline u64 ib_sg_dma_address(struct ib_device *dev,
1569 struct scatterlist *sg)
1570{
1571 return dev->dma_ops ?
1572 dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg);
1573}
1574
1575/**
1576 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1577 * @dev: The device for which the DMA addresses were created
1578 * @sg: The scatter/gather entry
1579 */
1580static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1581 struct scatterlist *sg)
1582{
1583 return dev->dma_ops ?
1584 dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg);
1585}
1586
1587/**
1588 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1589 * @dev: The device for which the DMA address was created
1590 * @addr: The DMA address
1591 * @size: The size of the region in bytes
1592 * @dir: The direction of the DMA
1593 */
1594static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1595 u64 addr,
1596 size_t size,
1597 enum dma_data_direction dir)
1598{
1599 dev->dma_ops ?
1600 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) :
1601 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1602}
1603
1604/**
1605 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1606 * @dev: The device for which the DMA address was created
1607 * @addr: The DMA address
1608 * @size: The size of the region in bytes
1609 * @dir: The direction of the DMA
1610 */
1611static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1612 u64 addr,
1613 size_t size,
1614 enum dma_data_direction dir)
1615{
1616 dev->dma_ops ?
1617 dev->dma_ops->sync_single_for_device(dev, addr, size, dir) :
1618 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1619}
1620
1621/**
1622 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1623 * @dev: The device for which the DMA address is requested
1624 * @size: The size of the region to allocate in bytes
1625 * @dma_handle: A pointer for returning the DMA address of the region
1626 * @flag: memory allocator flags
1627 */
1628static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1629 size_t size,
1630 u64 *dma_handle,
1631 gfp_t flag)
1632{
1633 return dev->dma_ops ?
1634 dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) :
1635 dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
1636}
1637
1638/**
1639 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1640 * @dev: The device for which the DMA addresses were allocated
1641 * @size: The size of the region
1642 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1643 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1644 */
1645static inline void ib_dma_free_coherent(struct ib_device *dev,
1646 size_t size, void *cpu_addr,
1647 u64 dma_handle)
1648{
1649 dev->dma_ops ?
1650 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) :
1651 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1652}
1653
1654/**
1402 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 1655 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1403 * by an HCA. 1656 * by an HCA.
1404 * @pd: The protection domain associated assigned to the registered region. 1657 * @pd: The protection domain associated assigned to the registered region.
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index deb5a0a4cee5..36cd8a8526a0 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -77,11 +77,34 @@ struct rdma_route {
77 int num_paths; 77 int num_paths;
78}; 78};
79 79
80struct rdma_conn_param {
81 const void *private_data;
82 u8 private_data_len;
83 u8 responder_resources;
84 u8 initiator_depth;
85 u8 flow_control;
86 u8 retry_count; /* ignored when accepting */
87 u8 rnr_retry_count;
88 /* Fields below ignored if a QP is created on the rdma_cm_id. */
89 u8 srq;
90 u32 qp_num;
91};
92
93struct rdma_ud_param {
94 const void *private_data;
95 u8 private_data_len;
96 struct ib_ah_attr ah_attr;
97 u32 qp_num;
98 u32 qkey;
99};
100
80struct rdma_cm_event { 101struct rdma_cm_event {
81 enum rdma_cm_event_type event; 102 enum rdma_cm_event_type event;
82 int status; 103 int status;
83 void *private_data; 104 union {
84 u8 private_data_len; 105 struct rdma_conn_param conn;
106 struct rdma_ud_param ud;
107 } param;
85}; 108};
86 109
87struct rdma_cm_id; 110struct rdma_cm_id;
@@ -204,25 +227,17 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
204int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 227int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
205 int *qp_attr_mask); 228 int *qp_attr_mask);
206 229
207struct rdma_conn_param {
208 const void *private_data;
209 u8 private_data_len;
210 u8 responder_resources;
211 u8 initiator_depth;
212 u8 flow_control;
213 u8 retry_count; /* ignored when accepting */
214 u8 rnr_retry_count;
215 /* Fields below ignored if a QP is created on the rdma_cm_id. */
216 u8 srq;
217 u32 qp_num;
218 enum ib_qp_type qp_type;
219};
220
221/** 230/**
222 * rdma_connect - Initiate an active connection request. 231 * rdma_connect - Initiate an active connection request.
232 * @id: Connection identifier to connect.
233 * @conn_param: Connection information used for connected QPs.
223 * 234 *
224 * Users must have resolved a route for the rdma_cm_id to connect with 235 * Users must have resolved a route for the rdma_cm_id to connect with
225 * by having called rdma_resolve_route before calling this routine. 236 * by having called rdma_resolve_route before calling this routine.
237 *
238 * This call will either connect to a remote QP or obtain remote QP
239 * information for unconnected rdma_cm_id's. The actual operation is
240 * based on the rdma_cm_id's port space.
226 */ 241 */
227int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 242int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
228 243
@@ -253,6 +268,21 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
253int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 268int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
254 269
255/** 270/**
271 * rdma_notify - Notifies the RDMA CM of an asynchronous event that has
272 * occurred on the connection.
273 * @id: Connection identifier to transition to established.
274 * @event: Asynchronous event.
275 *
276 * This routine should be invoked by users to notify the CM of relevant
277 * communication events. Events that should be reported to the CM and
278 * when to report them are:
279 *
280 * IB_EVENT_COMM_EST - Used when a message is received on a connected
281 * QP before an RTU has been received.
282 */
283int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
284
285/**
256 * rdma_reject - Called to reject a connection request or response. 286 * rdma_reject - Called to reject a connection request or response.
257 */ 287 */
258int rdma_reject(struct rdma_cm_id *id, const void *private_data, 288int rdma_reject(struct rdma_cm_id *id, const void *private_data,
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index e8c3af1804d4..9b176df1d667 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -44,4 +44,7 @@
44int rdma_set_ib_paths(struct rdma_cm_id *id, 44int rdma_set_ib_paths(struct rdma_cm_id *id,
45 struct ib_sa_path_rec *path_rec, int num_paths); 45 struct ib_sa_path_rec *path_rec, int num_paths);
46 46
47/* Global qkey for UD QPs and multicast groups. */
48#define RDMA_UD_QKEY 0x01234567
49
47#endif /* RDMA_CM_IB_H */ 50#endif /* RDMA_CM_IB_H */
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
new file mode 100644
index 000000000000..9572ab8eeac1
--- /dev/null
+++ b/include/rdma/rdma_user_cm.h
@@ -0,0 +1,206 @@
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef RDMA_USER_CM_H
34#define RDMA_USER_CM_H
35
36#include <linux/types.h>
37#include <linux/in6.h>
38#include <rdma/ib_user_verbs.h>
39#include <rdma/ib_user_sa.h>
40
41#define RDMA_USER_CM_ABI_VERSION 3
42
43#define RDMA_MAX_PRIVATE_DATA 256
44
45enum {
46 RDMA_USER_CM_CMD_CREATE_ID,
47 RDMA_USER_CM_CMD_DESTROY_ID,
48 RDMA_USER_CM_CMD_BIND_ADDR,
49 RDMA_USER_CM_CMD_RESOLVE_ADDR,
50 RDMA_USER_CM_CMD_RESOLVE_ROUTE,
51 RDMA_USER_CM_CMD_QUERY_ROUTE,
52 RDMA_USER_CM_CMD_CONNECT,
53 RDMA_USER_CM_CMD_LISTEN,
54 RDMA_USER_CM_CMD_ACCEPT,
55 RDMA_USER_CM_CMD_REJECT,
56 RDMA_USER_CM_CMD_DISCONNECT,
57 RDMA_USER_CM_CMD_INIT_QP_ATTR,
58 RDMA_USER_CM_CMD_GET_EVENT,
59 RDMA_USER_CM_CMD_GET_OPTION,
60 RDMA_USER_CM_CMD_SET_OPTION,
61 RDMA_USER_CM_CMD_NOTIFY
62};
63
64/*
65 * command ABI structures.
66 */
67struct rdma_ucm_cmd_hdr {
68 __u32 cmd;
69 __u16 in;
70 __u16 out;
71};
72
73struct rdma_ucm_create_id {
74 __u64 uid;
75 __u64 response;
76 __u16 ps;
77 __u8 reserved[6];
78};
79
80struct rdma_ucm_create_id_resp {
81 __u32 id;
82};
83
84struct rdma_ucm_destroy_id {
85 __u64 response;
86 __u32 id;
87 __u32 reserved;
88};
89
90struct rdma_ucm_destroy_id_resp {
91 __u32 events_reported;
92};
93
94struct rdma_ucm_bind_addr {
95 __u64 response;
96 struct sockaddr_in6 addr;
97 __u32 id;
98};
99
100struct rdma_ucm_resolve_addr {
101 struct sockaddr_in6 src_addr;
102 struct sockaddr_in6 dst_addr;
103 __u32 id;
104 __u32 timeout_ms;
105};
106
107struct rdma_ucm_resolve_route {
108 __u32 id;
109 __u32 timeout_ms;
110};
111
112struct rdma_ucm_query_route {
113 __u64 response;
114 __u32 id;
115 __u32 reserved;
116};
117
118struct rdma_ucm_query_route_resp {
119 __u64 node_guid;
120 struct ib_user_path_rec ib_route[2];
121 struct sockaddr_in6 src_addr;
122 struct sockaddr_in6 dst_addr;
123 __u32 num_paths;
124 __u8 port_num;
125 __u8 reserved[3];
126};
127
128struct rdma_ucm_conn_param {
129 __u32 qp_num;
130 __u32 reserved;
131 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
132 __u8 private_data_len;
133 __u8 srq;
134 __u8 responder_resources;
135 __u8 initiator_depth;
136 __u8 flow_control;
137 __u8 retry_count;
138 __u8 rnr_retry_count;
139 __u8 valid;
140};
141
142struct rdma_ucm_ud_param {
143 __u32 qp_num;
144 __u32 qkey;
145 struct ib_uverbs_ah_attr ah_attr;
146 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
147 __u8 private_data_len;
148 __u8 reserved[7];
149};
150
151struct rdma_ucm_connect {
152 struct rdma_ucm_conn_param conn_param;
153 __u32 id;
154 __u32 reserved;
155};
156
157struct rdma_ucm_listen {
158 __u32 id;
159 __u32 backlog;
160};
161
162struct rdma_ucm_accept {
163 __u64 uid;
164 struct rdma_ucm_conn_param conn_param;
165 __u32 id;
166 __u32 reserved;
167};
168
169struct rdma_ucm_reject {
170 __u32 id;
171 __u8 private_data_len;
172 __u8 reserved[3];
173 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
174};
175
176struct rdma_ucm_disconnect {
177 __u32 id;
178};
179
180struct rdma_ucm_init_qp_attr {
181 __u64 response;
182 __u32 id;
183 __u32 qp_state;
184};
185
186struct rdma_ucm_notify {
187 __u32 id;
188 __u32 event;
189};
190
191struct rdma_ucm_get_event {
192 __u64 response;
193};
194
195struct rdma_ucm_event_resp {
196 __u64 uid;
197 __u32 id;
198 __u32 event;
199 __u32 status;
200 union {
201 struct rdma_ucm_conn_param conn;
202 struct rdma_ucm_ud_param ud;
203 } param;
204};
205
206#endif /* RDMA_USER_CM_H */