diff options
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 71 | ||||
-rw-r--r-- | include/linux/mlx5/qp.h | 3 | ||||
-rw-r--r-- | include/rdma/ib_umem.h | 5 |
4 files changed, 80 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 53d19e6e69a4..14a0311eaa1c 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -503,6 +503,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
503 | int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | 503 | int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, |
504 | struct ib_recv_wr **bad_wr); | 504 | struct ib_recv_wr **bad_wr); |
505 | void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); | 505 | void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); |
506 | int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, | ||
507 | void *buffer, u32 length); | ||
506 | struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, | 508 | struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, |
507 | int vector, struct ib_ucontext *context, | 509 | int vector, struct ib_ucontext *context, |
508 | struct ib_udata *udata); | 510 | struct ib_udata *udata); |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 36e2cfe1c2fe..9783c3342dbf 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -101,6 +101,77 @@ void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) | |||
101 | return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); | 101 | return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); |
102 | } | 102 | } |
103 | 103 | ||
104 | /** | ||
105 | * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space. | ||
106 | * | ||
107 | * @qp: QP to copy from. | ||
108 | * @send: copy from the send queue when non-zero, use the receive queue | ||
109 | * otherwise. | ||
110 | * @wqe_index: index to start copying from. For send work queues, the | ||
111 | * wqe_index is in units of MLX5_SEND_WQE_BB. | ||
112 | * For receive work queue, it is the number of work queue | ||
113 | * element in the queue. | ||
114 | * @buffer: destination buffer. | ||
115 | * @length: maximum number of bytes to copy. | ||
116 | * | ||
117 | * Copies at least a single WQE, but may copy more data. | ||
118 | * | ||
119 | * Return: the number of bytes copied, or an error code. | ||
120 | */ | ||
121 | int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, | ||
122 | void *buffer, u32 length) | ||
123 | { | ||
124 | struct ib_device *ibdev = qp->ibqp.device; | ||
125 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||
126 | struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; | ||
127 | size_t offset; | ||
128 | size_t wq_end; | ||
129 | struct ib_umem *umem = qp->umem; | ||
130 | u32 first_copy_length; | ||
131 | int wqe_length; | ||
132 | int ret; | ||
133 | |||
134 | if (wq->wqe_cnt == 0) { | ||
135 | mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n", | ||
136 | qp->ibqp.qp_type); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | |||
140 | offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift); | ||
141 | wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift); | ||
142 | |||
143 | if (send && length < sizeof(struct mlx5_wqe_ctrl_seg)) | ||
144 | return -EINVAL; | ||
145 | |||
146 | if (offset > umem->length || | ||
147 | (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length)) | ||
148 | return -EINVAL; | ||
149 | |||
150 | first_copy_length = min_t(u32, offset + length, wq_end) - offset; | ||
151 | ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length); | ||
152 | if (ret) | ||
153 | return ret; | ||
154 | |||
155 | if (send) { | ||
156 | struct mlx5_wqe_ctrl_seg *ctrl = buffer; | ||
157 | int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; | ||
158 | |||
159 | wqe_length = ds * MLX5_WQE_DS_UNITS; | ||
160 | } else { | ||
161 | wqe_length = 1 << wq->wqe_shift; | ||
162 | } | ||
163 | |||
164 | if (wqe_length <= first_copy_length) | ||
165 | return first_copy_length; | ||
166 | |||
167 | ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, | ||
168 | wqe_length - first_copy_length); | ||
169 | if (ret) | ||
170 | return ret; | ||
171 | |||
172 | return wqe_length; | ||
173 | } | ||
174 | |||
104 | static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) | 175 | static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) |
105 | { | 176 | { |
106 | struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; | 177 | struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; |
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 3fa075daeb1d..67f4b9660b06 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
@@ -189,6 +189,9 @@ struct mlx5_wqe_ctrl_seg { | |||
189 | __be32 imm; | 189 | __be32 imm; |
190 | }; | 190 | }; |
191 | 191 | ||
192 | #define MLX5_WQE_CTRL_DS_MASK 0x3f | ||
193 | #define MLX5_WQE_DS_UNITS 16 | ||
194 | |||
192 | struct mlx5_wqe_xrc_seg { | 195 | struct mlx5_wqe_xrc_seg { |
193 | __be32 xrc_srqn; | 196 | __be32 xrc_srqn; |
194 | u8 rsvd[12]; | 197 | u8 rsvd[12]; |
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 45bb04bc88cd..a51f4091489a 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h | |||
@@ -98,7 +98,10 @@ static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context, | |||
98 | } | 98 | } |
99 | static inline void ib_umem_release(struct ib_umem *umem) { } | 99 | static inline void ib_umem_release(struct ib_umem *umem) { } |
100 | static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; } | 100 | static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; } |
101 | 101 | static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, | |
102 | size_t length) { | ||
103 | return -EINVAL; | ||
104 | } | ||
102 | #endif /* CONFIG_INFINIBAND_USER_MEM */ | 105 | #endif /* CONFIG_INFINIBAND_USER_MEM */ |
103 | 106 | ||
104 | #endif /* IB_UMEM_H */ | 107 | #endif /* IB_UMEM_H */ |