aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_qp.c
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2005-08-14 00:05:57 -0400
committerRoland Dreier <rolandd@cisco.com>2005-08-26 23:37:35 -0400
commit97f52eb438be7caebe026421545619d8a0c1398a (patch)
tree1085acb833b691e9cc7ef607e4b4ac8cbd81e03f /drivers/infiniband/hw/mthca/mthca_qp.c
parent92a6b34bf4d0d11c54b2a6bdd6240f98cb326200 (diff)
[PATCH] IB: sparse endianness cleanup
Fix sparse warnings. Use __be* where appropriate. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c187
1 files changed, 93 insertions, 94 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 2f429815d195..8fbb4f1f5398 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -97,62 +97,62 @@ enum {
97}; 97};
98 98
99struct mthca_qp_path { 99struct mthca_qp_path {
100 u32 port_pkey; 100 __be32 port_pkey;
101 u8 rnr_retry; 101 u8 rnr_retry;
102 u8 g_mylmc; 102 u8 g_mylmc;
103 u16 rlid; 103 __be16 rlid;
104 u8 ackto; 104 u8 ackto;
105 u8 mgid_index; 105 u8 mgid_index;
106 u8 static_rate; 106 u8 static_rate;
107 u8 hop_limit; 107 u8 hop_limit;
108 u32 sl_tclass_flowlabel; 108 __be32 sl_tclass_flowlabel;
109 u8 rgid[16]; 109 u8 rgid[16];
110} __attribute__((packed)); 110} __attribute__((packed));
111 111
112struct mthca_qp_context { 112struct mthca_qp_context {
113 u32 flags; 113 __be32 flags;
114 u32 tavor_sched_queue; /* Reserved on Arbel */ 114 __be32 tavor_sched_queue; /* Reserved on Arbel */
115 u8 mtu_msgmax; 115 u8 mtu_msgmax;
116 u8 rq_size_stride; /* Reserved on Tavor */ 116 u8 rq_size_stride; /* Reserved on Tavor */
117 u8 sq_size_stride; /* Reserved on Tavor */ 117 u8 sq_size_stride; /* Reserved on Tavor */
118 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ 118 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
119 u32 usr_page; 119 __be32 usr_page;
120 u32 local_qpn; 120 __be32 local_qpn;
121 u32 remote_qpn; 121 __be32 remote_qpn;
122 u32 reserved1[2]; 122 u32 reserved1[2];
123 struct mthca_qp_path pri_path; 123 struct mthca_qp_path pri_path;
124 struct mthca_qp_path alt_path; 124 struct mthca_qp_path alt_path;
125 u32 rdd; 125 __be32 rdd;
126 u32 pd; 126 __be32 pd;
127 u32 wqe_base; 127 __be32 wqe_base;
128 u32 wqe_lkey; 128 __be32 wqe_lkey;
129 u32 params1; 129 __be32 params1;
130 u32 reserved2; 130 __be32 reserved2;
131 u32 next_send_psn; 131 __be32 next_send_psn;
132 u32 cqn_snd; 132 __be32 cqn_snd;
133 u32 snd_wqe_base_l; /* Next send WQE on Tavor */ 133 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
134 u32 snd_db_index; /* (debugging only entries) */ 134 __be32 snd_db_index; /* (debugging only entries) */
135 u32 last_acked_psn; 135 __be32 last_acked_psn;
136 u32 ssn; 136 __be32 ssn;
137 u32 params2; 137 __be32 params2;
138 u32 rnr_nextrecvpsn; 138 __be32 rnr_nextrecvpsn;
139 u32 ra_buff_indx; 139 __be32 ra_buff_indx;
140 u32 cqn_rcv; 140 __be32 cqn_rcv;
141 u32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ 141 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
142 u32 rcv_db_index; /* (debugging only entries) */ 142 __be32 rcv_db_index; /* (debugging only entries) */
143 u32 qkey; 143 __be32 qkey;
144 u32 srqn; 144 __be32 srqn;
145 u32 rmsn; 145 __be32 rmsn;
146 u16 rq_wqe_counter; /* reserved on Tavor */ 146 __be16 rq_wqe_counter; /* reserved on Tavor */
147 u16 sq_wqe_counter; /* reserved on Tavor */ 147 __be16 sq_wqe_counter; /* reserved on Tavor */
148 u32 reserved3[18]; 148 u32 reserved3[18];
149} __attribute__((packed)); 149} __attribute__((packed));
150 150
151struct mthca_qp_param { 151struct mthca_qp_param {
152 u32 opt_param_mask; 152 __be32 opt_param_mask;
153 u32 reserved1; 153 u32 reserved1;
154 struct mthca_qp_context context; 154 struct mthca_qp_context context;
155 u32 reserved2[62]; 155 u32 reserved2[62];
156} __attribute__((packed)); 156} __attribute__((packed));
157 157
158enum { 158enum {
@@ -191,62 +191,62 @@ enum {
191}; 191};
192 192
193struct mthca_next_seg { 193struct mthca_next_seg {
194 u32 nda_op; /* [31:6] next WQE [4:0] next opcode */ 194 __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */
195 u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ 195 __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
196 u32 flags; /* [3] CQ [2] Event [1] Solicit */ 196 __be32 flags; /* [3] CQ [2] Event [1] Solicit */
197 u32 imm; /* immediate data */ 197 __be32 imm; /* immediate data */
198}; 198};
199 199
200struct mthca_tavor_ud_seg { 200struct mthca_tavor_ud_seg {
201 u32 reserved1; 201 u32 reserved1;
202 u32 lkey; 202 __be32 lkey;
203 u64 av_addr; 203 __be64 av_addr;
204 u32 reserved2[4]; 204 u32 reserved2[4];
205 u32 dqpn; 205 __be32 dqpn;
206 u32 qkey; 206 __be32 qkey;
207 u32 reserved3[2]; 207 u32 reserved3[2];
208}; 208};
209 209
210struct mthca_arbel_ud_seg { 210struct mthca_arbel_ud_seg {
211 u32 av[8]; 211 __be32 av[8];
212 u32 dqpn; 212 __be32 dqpn;
213 u32 qkey; 213 __be32 qkey;
214 u32 reserved[2]; 214 u32 reserved[2];
215}; 215};
216 216
217struct mthca_bind_seg { 217struct mthca_bind_seg {
218 u32 flags; /* [31] Atomic [30] rem write [29] rem read */ 218 __be32 flags; /* [31] Atomic [30] rem write [29] rem read */
219 u32 reserved; 219 u32 reserved;
220 u32 new_rkey; 220 __be32 new_rkey;
221 u32 lkey; 221 __be32 lkey;
222 u64 addr; 222 __be64 addr;
223 u64 length; 223 __be64 length;
224}; 224};
225 225
226struct mthca_raddr_seg { 226struct mthca_raddr_seg {
227 u64 raddr; 227 __be64 raddr;
228 u32 rkey; 228 __be32 rkey;
229 u32 reserved; 229 u32 reserved;
230}; 230};
231 231
232struct mthca_atomic_seg { 232struct mthca_atomic_seg {
233 u64 swap_add; 233 __be64 swap_add;
234 u64 compare; 234 __be64 compare;
235}; 235};
236 236
237struct mthca_data_seg { 237struct mthca_data_seg {
238 u32 byte_count; 238 __be32 byte_count;
239 u32 lkey; 239 __be32 lkey;
240 u64 addr; 240 __be64 addr;
241}; 241};
242 242
243struct mthca_mlx_seg { 243struct mthca_mlx_seg {
244 u32 nda_op; 244 __be32 nda_op;
245 u32 nds; 245 __be32 nds;
246 u32 flags; /* [17] VL15 [16] SLR [14:12] static rate 246 __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate
247 [11:8] SL [3] C [2] E */ 247 [11:8] SL [3] C [2] E */
248 u16 rlid; 248 __be16 rlid;
249 u16 vcrc; 249 __be16 vcrc;
250}; 250};
251 251
252static const u8 mthca_opcode[] = { 252static const u8 mthca_opcode[] = {
@@ -1459,6 +1459,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1459{ 1459{
1460 int header_size; 1460 int header_size;
1461 int err; 1461 int err;
1462 u16 pkey;
1462 1463
1463 ib_ud_header_init(256, /* assume a MAD */ 1464 ib_ud_header_init(256, /* assume a MAD */
1464 sqp->ud_header.grh_present, 1465 sqp->ud_header.grh_present,
@@ -1469,8 +1470,8 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1469 return err; 1470 return err;
1470 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); 1471 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1471 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | 1472 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1472 (sqp->ud_header.lrh.destination_lid == 0xffff ? 1473 (sqp->ud_header.lrh.destination_lid ==
1473 MTHCA_MLX_SLR : 0) | 1474 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1474 (sqp->ud_header.lrh.service_level << 8)); 1475 (sqp->ud_header.lrh.service_level << 8));
1475 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1476 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1476 mlx->vcrc = 0; 1477 mlx->vcrc = 0;
@@ -1490,18 +1491,16 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1490 } 1491 }
1491 1492
1492 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1493 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1493 if (sqp->ud_header.lrh.destination_lid == 0xffff) 1494 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1494 sqp->ud_header.lrh.source_lid = 0xffff; 1495 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1495 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1496 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1496 if (!sqp->qp.ibqp.qp_num) 1497 if (!sqp->qp.ibqp.qp_num)
1497 ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1498 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1498 sqp->pkey_index, 1499 sqp->pkey_index, &pkey);
1499 &sqp->ud_header.bth.pkey);
1500 else 1500 else
1501 ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1501 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1502 wr->wr.ud.pkey_index, 1502 wr->wr.ud.pkey_index, &pkey);
1503 &sqp->ud_header.bth.pkey); 1503 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1504 cpu_to_be16s(&sqp->ud_header.bth.pkey);
1505 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1504 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1506 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1505 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1507 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? 1506 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
@@ -1744,7 +1743,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1744 1743
1745out: 1744out:
1746 if (likely(nreq)) { 1745 if (likely(nreq)) {
1747 u32 doorbell[2]; 1746 __be32 doorbell[2];
1748 1747
1749 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + 1748 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1750 qp->send_wqe_offset) | f0 | op0); 1749 qp->send_wqe_offset) | f0 | op0);
@@ -1845,7 +1844,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1845 1844
1846out: 1845out:
1847 if (likely(nreq)) { 1846 if (likely(nreq)) {
1848 u32 doorbell[2]; 1847 __be32 doorbell[2];
1849 1848
1850 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); 1849 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1851 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); 1850 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
@@ -2066,7 +2065,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2066 2065
2067out: 2066out:
2068 if (likely(nreq)) { 2067 if (likely(nreq)) {
2069 u32 doorbell[2]; 2068 __be32 doorbell[2];
2070 2069
2071 doorbell[0] = cpu_to_be32((nreq << 24) | 2070 doorbell[0] = cpu_to_be32((nreq << 24) |
2072 ((qp->sq.head & 0xffff) << 8) | 2071 ((qp->sq.head & 0xffff) << 8) |
@@ -2176,7 +2175,7 @@ out:
2176} 2175}
2177 2176
2178int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 2177int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2179 int index, int *dbd, u32 *new_wqe) 2178 int index, int *dbd, __be32 *new_wqe)
2180{ 2179{
2181 struct mthca_next_seg *next; 2180 struct mthca_next_seg *next;
2182 2181