diff options
author | Roland Dreier <rolandd@cisco.com> | 2007-07-18 14:47:55 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-07-18 14:47:55 -0400 |
commit | 0fbfa6a9062c71b62ec216c0294b676b76e41661 (patch) | |
tree | c249dfa059fe5c01ef2f892b3e68ddd5f29442b8 /drivers/infiniband | |
parent | d420d9e32f4bd741b2f0b7227a91941107f96b47 (diff) |
IB/mlx4: Factor out setting other WQE segments
Factor code to set remote address, atomic and datagram segments out of
mlx4_ib_post_send() into small helper functions. This doesn't change
the generated code in any significant way, and makes the source easier
on the eyes.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 67 |
1 files changed, 36 insertions, 31 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index a59c7f04ca18..b5a24fbef70d 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1183,6 +1183,35 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq | |||
1183 | return cur + nreq >= wq->max_post; | 1183 | return cur + nreq >= wq->max_post; |
1184 | } | 1184 | } |
1185 | 1185 | ||
1186 | static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, | ||
1187 | u64 remote_addr, u32 rkey) | ||
1188 | { | ||
1189 | rseg->raddr = cpu_to_be64(remote_addr); | ||
1190 | rseg->rkey = cpu_to_be32(rkey); | ||
1191 | rseg->reserved = 0; | ||
1192 | } | ||
1193 | |||
1194 | static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) | ||
1195 | { | ||
1196 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | ||
1197 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | ||
1198 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1199 | } else { | ||
1200 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1201 | aseg->compare = 0; | ||
1202 | } | ||
1203 | |||
1204 | } | ||
1205 | |||
1206 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, | ||
1207 | struct ib_send_wr *wr) | ||
1208 | { | ||
1209 | memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); | ||
1210 | dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | ||
1211 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | ||
1212 | |||
1213 | } | ||
1214 | |||
1186 | static void set_data_seg(struct mlx4_wqe_data_seg *dseg, | 1215 | static void set_data_seg(struct mlx4_wqe_data_seg *dseg, |
1187 | struct ib_sge *sg) | 1216 | struct ib_sge *sg) |
1188 | { | 1217 | { |
@@ -1246,26 +1275,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1246 | switch (wr->opcode) { | 1275 | switch (wr->opcode) { |
1247 | case IB_WR_ATOMIC_CMP_AND_SWP: | 1276 | case IB_WR_ATOMIC_CMP_AND_SWP: |
1248 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 1277 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
1249 | ((struct mlx4_wqe_raddr_seg *) wqe)->raddr = | 1278 | set_raddr_seg(wqe, wr->wr.atomic.remote_addr, |
1250 | cpu_to_be64(wr->wr.atomic.remote_addr); | 1279 | wr->wr.atomic.rkey); |
1251 | ((struct mlx4_wqe_raddr_seg *) wqe)->rkey = | ||
1252 | cpu_to_be32(wr->wr.atomic.rkey); | ||
1253 | ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0; | ||
1254 | |||
1255 | wqe += sizeof (struct mlx4_wqe_raddr_seg); | 1280 | wqe += sizeof (struct mlx4_wqe_raddr_seg); |
1256 | 1281 | ||
1257 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | 1282 | set_atomic_seg(wqe, wr); |
1258 | ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add = | ||
1259 | cpu_to_be64(wr->wr.atomic.swap); | ||
1260 | ((struct mlx4_wqe_atomic_seg *) wqe)->compare = | ||
1261 | cpu_to_be64(wr->wr.atomic.compare_add); | ||
1262 | } else { | ||
1263 | ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add = | ||
1264 | cpu_to_be64(wr->wr.atomic.compare_add); | ||
1265 | ((struct mlx4_wqe_atomic_seg *) wqe)->compare = 0; | ||
1266 | } | ||
1267 | |||
1268 | wqe += sizeof (struct mlx4_wqe_atomic_seg); | 1283 | wqe += sizeof (struct mlx4_wqe_atomic_seg); |
1284 | |||
1269 | size += (sizeof (struct mlx4_wqe_raddr_seg) + | 1285 | size += (sizeof (struct mlx4_wqe_raddr_seg) + |
1270 | sizeof (struct mlx4_wqe_atomic_seg)) / 16; | 1286 | sizeof (struct mlx4_wqe_atomic_seg)) / 16; |
1271 | 1287 | ||
@@ -1274,15 +1290,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1274 | case IB_WR_RDMA_READ: | 1290 | case IB_WR_RDMA_READ: |
1275 | case IB_WR_RDMA_WRITE: | 1291 | case IB_WR_RDMA_WRITE: |
1276 | case IB_WR_RDMA_WRITE_WITH_IMM: | 1292 | case IB_WR_RDMA_WRITE_WITH_IMM: |
1277 | ((struct mlx4_wqe_raddr_seg *) wqe)->raddr = | 1293 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, |
1278 | cpu_to_be64(wr->wr.rdma.remote_addr); | 1294 | wr->wr.rdma.rkey); |
1279 | ((struct mlx4_wqe_raddr_seg *) wqe)->rkey = | ||
1280 | cpu_to_be32(wr->wr.rdma.rkey); | ||
1281 | ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0; | ||
1282 | |||
1283 | wqe += sizeof (struct mlx4_wqe_raddr_seg); | 1295 | wqe += sizeof (struct mlx4_wqe_raddr_seg); |
1284 | size += sizeof (struct mlx4_wqe_raddr_seg) / 16; | 1296 | size += sizeof (struct mlx4_wqe_raddr_seg) / 16; |
1285 | |||
1286 | break; | 1297 | break; |
1287 | 1298 | ||
1288 | default: | 1299 | default: |
@@ -1292,13 +1303,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1292 | break; | 1303 | break; |
1293 | 1304 | ||
1294 | case IB_QPT_UD: | 1305 | case IB_QPT_UD: |
1295 | memcpy(((struct mlx4_wqe_datagram_seg *) wqe)->av, | 1306 | set_datagram_seg(wqe, wr); |
1296 | &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); | ||
1297 | ((struct mlx4_wqe_datagram_seg *) wqe)->dqpn = | ||
1298 | cpu_to_be32(wr->wr.ud.remote_qpn); | ||
1299 | ((struct mlx4_wqe_datagram_seg *) wqe)->qkey = | ||
1300 | cpu_to_be32(wr->wr.ud.remote_qkey); | ||
1301 | |||
1302 | wqe += sizeof (struct mlx4_wqe_datagram_seg); | 1307 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
1303 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | 1308 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; |
1304 | break; | 1309 | break; |