diff options
author | Steve Wise <swise@opengridcomputing.com> | 2010-04-21 18:30:06 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2010-04-21 18:30:06 -0400 |
commit | cfdda9d764362ab77b11a410bb928400e6520d57 (patch) | |
tree | 3634e5aca12414d40f4e50a3d73543cc479b525f /drivers/infiniband/hw/cxgb4/t4.h | |
parent | 0eddb519b9127c73d53db4bf3ec1d45b13f844d1 (diff) |
RDMA/cxgb4: Add driver for Chelsio T4 RNIC
Add an RDMA/iWARP driver for Chelsio T4 Ethernet adapters.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/t4.h')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4.h | 536 |
1 files changed, 536 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h new file mode 100644 index 000000000000..3f0d2172efda --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -0,0 +1,536 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * - Redistributions in binary form must reproduce the above | ||
18 | * copyright notice, this list of conditions and the following | ||
19 | * disclaimer in the documentation and/or other materials | ||
20 | * provided with the distribution. | ||
21 | * | ||
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
23 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
24 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
25 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
26 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
27 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
28 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
29 | * SOFTWARE. | ||
30 | */ | ||
31 | #ifndef __T4_H__ | ||
32 | #define __T4_H__ | ||
33 | |||
34 | #include "t4_hw.h" | ||
35 | #include "t4_regs.h" | ||
36 | #include "t4_msg.h" | ||
37 | #include "t4fw_ri_api.h" | ||
38 | |||
39 | #define T4_MAX_READ_DEPTH 16 | ||
40 | #define T4_QID_BASE 1024 | ||
41 | #define T4_MAX_QIDS 256 | ||
42 | #define T4_MAX_NUM_QP (1<<16) | ||
43 | #define T4_MAX_NUM_CQ (1<<15) | ||
44 | #define T4_MAX_NUM_PD (1<<15) | ||
45 | #define T4_MAX_PBL_SIZE 256 | ||
46 | #define T4_MAX_RQ_SIZE 1024 | ||
47 | #define T4_MAX_SQ_SIZE 1024 | ||
48 | #define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1) | ||
49 | #define T4_MAX_CQ_DEPTH 8192 | ||
50 | #define T4_MAX_NUM_STAG (1<<15) | ||
51 | #define T4_MAX_MR_SIZE (~0ULL - 1) | ||
52 | #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ | ||
53 | #define T4_STAG_UNSET 0xffffffff | ||
54 | #define T4_FW_MAJ 0 | ||
55 | #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) | ||
56 | |||
57 | struct t4_status_page { | ||
58 | __be32 rsvd1; /* flit 0 - hw owns */ | ||
59 | __be16 rsvd2; | ||
60 | __be16 qid; | ||
61 | __be16 cidx; | ||
62 | __be16 pidx; | ||
63 | u8 qp_err; /* flit 1 - sw owns */ | ||
64 | u8 db_off; | ||
65 | }; | ||
66 | |||
67 | #define T4_EQ_SIZE 64 | ||
68 | |||
69 | #define T4_SQ_NUM_SLOTS 4 | ||
70 | #define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS) | ||
71 | #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ | ||
72 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) | ||
73 | #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ | ||
74 | sizeof(struct fw_ri_immd))) | ||
75 | #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \ | ||
76 | sizeof(struct fw_ri_rdma_write_wr) - \ | ||
77 | sizeof(struct fw_ri_immd))) | ||
78 | #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \ | ||
79 | sizeof(struct fw_ri_rdma_write_wr) - \ | ||
80 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) | ||
81 | #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ | ||
82 | sizeof(struct fw_ri_immd))) | ||
83 | #define T4_MAX_FR_DEPTH 255 | ||
84 | |||
85 | #define T4_RQ_NUM_SLOTS 2 | ||
86 | #define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS) | ||
87 | #define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \ | ||
88 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) | ||
89 | |||
90 | union t4_wr { | ||
91 | struct fw_ri_res_wr res; | ||
92 | struct fw_ri_wr ri; | ||
93 | struct fw_ri_rdma_write_wr write; | ||
94 | struct fw_ri_send_wr send; | ||
95 | struct fw_ri_rdma_read_wr read; | ||
96 | struct fw_ri_bind_mw_wr bind; | ||
97 | struct fw_ri_fr_nsmr_wr fr; | ||
98 | struct fw_ri_inv_lstag_wr inv; | ||
99 | struct t4_status_page status; | ||
100 | __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS]; | ||
101 | }; | ||
102 | |||
103 | union t4_recv_wr { | ||
104 | struct fw_ri_recv_wr recv; | ||
105 | struct t4_status_page status; | ||
106 | __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS]; | ||
107 | }; | ||
108 | |||
109 | static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, | ||
110 | enum fw_wr_opcodes opcode, u8 flags, u8 len16) | ||
111 | { | ||
112 | int slots_used; | ||
113 | |||
114 | wqe->send.opcode = (u8)opcode; | ||
115 | wqe->send.flags = flags; | ||
116 | wqe->send.wrid = wrid; | ||
117 | wqe->send.r1[0] = 0; | ||
118 | wqe->send.r1[1] = 0; | ||
119 | wqe->send.r1[2] = 0; | ||
120 | wqe->send.len16 = len16; | ||
121 | |||
122 | slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE); | ||
123 | while (slots_used < T4_SQ_NUM_SLOTS) { | ||
124 | wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0; | ||
125 | slots_used++; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | /* CQE/AE status codes */ | ||
130 | #define T4_ERR_SUCCESS 0x0 | ||
131 | #define T4_ERR_STAG 0x1 /* STAG invalid: either the */ | ||
132 | /* STAG is offlimt, being 0, */ | ||
133 | /* or STAG_key mismatch */ | ||
134 | #define T4_ERR_PDID 0x2 /* PDID mismatch */ | ||
135 | #define T4_ERR_QPID 0x3 /* QPID mismatch */ | ||
136 | #define T4_ERR_ACCESS 0x4 /* Invalid access right */ | ||
137 | #define T4_ERR_WRAP 0x5 /* Wrap error */ | ||
138 | #define T4_ERR_BOUND 0x6 /* base and bounds voilation */ | ||
139 | #define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */ | ||
140 | /* shared memory region */ | ||
141 | #define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */ | ||
142 | /* shared memory region */ | ||
143 | #define T4_ERR_ECC 0x9 /* ECC error detected */ | ||
144 | #define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */ | ||
145 | /* reading PSTAG for a MW */ | ||
146 | /* Invalidate */ | ||
147 | #define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */ | ||
148 | /* software error */ | ||
149 | #define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */ | ||
150 | #define T4_ERR_CRC 0x10 /* CRC error */ | ||
151 | #define T4_ERR_MARKER 0x11 /* Marker error */ | ||
152 | #define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */ | ||
153 | #define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */ | ||
154 | #define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */ | ||
155 | #define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */ | ||
156 | #define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */ | ||
157 | #define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */ | ||
158 | #define T4_ERR_MSN 0x18 /* MSN error */ | ||
159 | #define T4_ERR_TBIT 0x19 /* tag bit not set correctly */ | ||
160 | #define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */ | ||
161 | /* or READ_REQ */ | ||
162 | #define T4_ERR_MSN_GAP 0x1B | ||
163 | #define T4_ERR_MSN_RANGE 0x1C | ||
164 | #define T4_ERR_IRD_OVERFLOW 0x1D | ||
165 | #define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */ | ||
166 | /* software error */ | ||
167 | #define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */ | ||
168 | /* mismatch) */ | ||
169 | /* | ||
170 | * CQE defs | ||
171 | */ | ||
172 | struct t4_cqe { | ||
173 | __be32 header; | ||
174 | __be32 len; | ||
175 | union { | ||
176 | struct { | ||
177 | __be32 stag; | ||
178 | __be32 msn; | ||
179 | } rcqe; | ||
180 | struct { | ||
181 | u32 nada1; | ||
182 | u16 nada2; | ||
183 | u16 cidx; | ||
184 | } scqe; | ||
185 | struct { | ||
186 | __be32 wrid_hi; | ||
187 | __be32 wrid_low; | ||
188 | } gen; | ||
189 | } u; | ||
190 | __be64 reserved; | ||
191 | __be64 bits_type_ts; | ||
192 | }; | ||
193 | |||
194 | /* macros for flit 0 of the cqe */ | ||
195 | |||
196 | #define S_CQE_QPID 12 | ||
197 | #define M_CQE_QPID 0xFFFFF | ||
198 | #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID) | ||
199 | #define V_CQE_QPID(x) ((x)<<S_CQE_QPID) | ||
200 | |||
201 | #define S_CQE_SWCQE 11 | ||
202 | #define M_CQE_SWCQE 0x1 | ||
203 | #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE) | ||
204 | #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE) | ||
205 | |||
206 | #define S_CQE_STATUS 5 | ||
207 | #define M_CQE_STATUS 0x1F | ||
208 | #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS) | ||
209 | #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS) | ||
210 | |||
211 | #define S_CQE_TYPE 4 | ||
212 | #define M_CQE_TYPE 0x1 | ||
213 | #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE) | ||
214 | #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE) | ||
215 | |||
216 | #define S_CQE_OPCODE 0 | ||
217 | #define M_CQE_OPCODE 0xF | ||
218 | #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE) | ||
219 | #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE) | ||
220 | |||
221 | #define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header))) | ||
222 | #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header))) | ||
223 | #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header))) | ||
224 | #define SQ_TYPE(x) (CQE_TYPE((x))) | ||
225 | #define RQ_TYPE(x) (!CQE_TYPE((x))) | ||
226 | #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header))) | ||
227 | #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header))) | ||
228 | |||
229 | #define CQE_SEND_OPCODE(x)( \ | ||
230 | (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ | ||
231 | (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ | ||
232 | (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ | ||
233 | (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) | ||
234 | |||
235 | #define CQE_LEN(x) (be32_to_cpu((x)->len)) | ||
236 | |||
237 | /* used for RQ completion processing */ | ||
238 | #define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag)) | ||
239 | #define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn)) | ||
240 | |||
241 | /* used for SQ completion processing */ | ||
242 | #define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx) | ||
243 | |||
244 | /* generic accessor macros */ | ||
245 | #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi) | ||
246 | #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low) | ||
247 | |||
248 | /* macros for flit 3 of the cqe */ | ||
249 | #define S_CQE_GENBIT 63 | ||
250 | #define M_CQE_GENBIT 0x1 | ||
251 | #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT) | ||
252 | #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT) | ||
253 | |||
254 | #define S_CQE_OVFBIT 62 | ||
255 | #define M_CQE_OVFBIT 0x1 | ||
256 | #define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT) | ||
257 | |||
258 | #define S_CQE_IQTYPE 60 | ||
259 | #define M_CQE_IQTYPE 0x3 | ||
260 | #define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE) | ||
261 | |||
262 | #define M_CQE_TS 0x0fffffffffffffffULL | ||
263 | #define G_CQE_TS(x) ((x) & M_CQE_TS) | ||
264 | |||
265 | #define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts))) | ||
266 | #define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts))) | ||
267 | #define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts))) | ||
268 | |||
269 | struct t4_swsqe { | ||
270 | u64 wr_id; | ||
271 | struct t4_cqe cqe; | ||
272 | int read_len; | ||
273 | int opcode; | ||
274 | int complete; | ||
275 | int signaled; | ||
276 | u16 idx; | ||
277 | }; | ||
278 | |||
279 | struct t4_sq { | ||
280 | union t4_wr *queue; | ||
281 | dma_addr_t dma_addr; | ||
282 | DECLARE_PCI_UNMAP_ADDR(mapping); | ||
283 | struct t4_swsqe *sw_sq; | ||
284 | struct t4_swsqe *oldest_read; | ||
285 | u64 udb; | ||
286 | size_t memsize; | ||
287 | u32 qid; | ||
288 | u16 in_use; | ||
289 | u16 size; | ||
290 | u16 cidx; | ||
291 | u16 pidx; | ||
292 | }; | ||
293 | |||
294 | struct t4_swrqe { | ||
295 | u64 wr_id; | ||
296 | }; | ||
297 | |||
298 | struct t4_rq { | ||
299 | union t4_recv_wr *queue; | ||
300 | dma_addr_t dma_addr; | ||
301 | DECLARE_PCI_UNMAP_ADDR(mapping); | ||
302 | struct t4_swrqe *sw_rq; | ||
303 | u64 udb; | ||
304 | size_t memsize; | ||
305 | u32 qid; | ||
306 | u32 msn; | ||
307 | u32 rqt_hwaddr; | ||
308 | u16 rqt_size; | ||
309 | u16 in_use; | ||
310 | u16 size; | ||
311 | u16 cidx; | ||
312 | u16 pidx; | ||
313 | }; | ||
314 | |||
315 | struct t4_wq { | ||
316 | struct t4_sq sq; | ||
317 | struct t4_rq rq; | ||
318 | void __iomem *db; | ||
319 | void __iomem *gts; | ||
320 | struct c4iw_rdev *rdev; | ||
321 | }; | ||
322 | |||
323 | static inline int t4_rqes_posted(struct t4_wq *wq) | ||
324 | { | ||
325 | return wq->rq.in_use; | ||
326 | } | ||
327 | |||
328 | static inline int t4_rq_empty(struct t4_wq *wq) | ||
329 | { | ||
330 | return wq->rq.in_use == 0; | ||
331 | } | ||
332 | |||
333 | static inline int t4_rq_full(struct t4_wq *wq) | ||
334 | { | ||
335 | return wq->rq.in_use == (wq->rq.size - 1); | ||
336 | } | ||
337 | |||
338 | static inline u32 t4_rq_avail(struct t4_wq *wq) | ||
339 | { | ||
340 | return wq->rq.size - 1 - wq->rq.in_use; | ||
341 | } | ||
342 | |||
343 | static inline void t4_rq_produce(struct t4_wq *wq) | ||
344 | { | ||
345 | wq->rq.in_use++; | ||
346 | if (++wq->rq.pidx == wq->rq.size) | ||
347 | wq->rq.pidx = 0; | ||
348 | } | ||
349 | |||
350 | static inline void t4_rq_consume(struct t4_wq *wq) | ||
351 | { | ||
352 | wq->rq.in_use--; | ||
353 | wq->rq.msn++; | ||
354 | if (++wq->rq.cidx == wq->rq.size) | ||
355 | wq->rq.cidx = 0; | ||
356 | } | ||
357 | |||
358 | static inline int t4_sq_empty(struct t4_wq *wq) | ||
359 | { | ||
360 | return wq->sq.in_use == 0; | ||
361 | } | ||
362 | |||
363 | static inline int t4_sq_full(struct t4_wq *wq) | ||
364 | { | ||
365 | return wq->sq.in_use == (wq->sq.size - 1); | ||
366 | } | ||
367 | |||
368 | static inline u32 t4_sq_avail(struct t4_wq *wq) | ||
369 | { | ||
370 | return wq->sq.size - 1 - wq->sq.in_use; | ||
371 | } | ||
372 | |||
373 | static inline void t4_sq_produce(struct t4_wq *wq) | ||
374 | { | ||
375 | wq->sq.in_use++; | ||
376 | if (++wq->sq.pidx == wq->sq.size) | ||
377 | wq->sq.pidx = 0; | ||
378 | } | ||
379 | |||
380 | static inline void t4_sq_consume(struct t4_wq *wq) | ||
381 | { | ||
382 | wq->sq.in_use--; | ||
383 | if (++wq->sq.cidx == wq->sq.size) | ||
384 | wq->sq.cidx = 0; | ||
385 | } | ||
386 | |||
387 | static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) | ||
388 | { | ||
389 | inc *= T4_SQ_NUM_SLOTS; | ||
390 | wmb(); | ||
391 | writel(QID(wq->sq.qid) | PIDX(inc), wq->db); | ||
392 | } | ||
393 | |||
394 | static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) | ||
395 | { | ||
396 | inc *= T4_RQ_NUM_SLOTS; | ||
397 | wmb(); | ||
398 | writel(QID(wq->rq.qid) | PIDX(inc), wq->db); | ||
399 | } | ||
400 | |||
401 | static inline int t4_wq_in_error(struct t4_wq *wq) | ||
402 | { | ||
403 | return wq->sq.queue[wq->sq.size].status.qp_err; | ||
404 | } | ||
405 | |||
406 | static inline void t4_set_wq_in_error(struct t4_wq *wq) | ||
407 | { | ||
408 | wq->sq.queue[wq->sq.size].status.qp_err = 1; | ||
409 | wq->rq.queue[wq->rq.size].status.qp_err = 1; | ||
410 | } | ||
411 | |||
412 | static inline void t4_disable_wq_db(struct t4_wq *wq) | ||
413 | { | ||
414 | wq->sq.queue[wq->sq.size].status.db_off = 1; | ||
415 | wq->rq.queue[wq->rq.size].status.db_off = 1; | ||
416 | } | ||
417 | |||
418 | static inline void t4_enable_wq_db(struct t4_wq *wq) | ||
419 | { | ||
420 | wq->sq.queue[wq->sq.size].status.db_off = 0; | ||
421 | wq->rq.queue[wq->rq.size].status.db_off = 0; | ||
422 | } | ||
423 | |||
424 | static inline int t4_wq_db_enabled(struct t4_wq *wq) | ||
425 | { | ||
426 | return !wq->sq.queue[wq->sq.size].status.db_off; | ||
427 | } | ||
428 | |||
429 | struct t4_cq { | ||
430 | struct t4_cqe *queue; | ||
431 | dma_addr_t dma_addr; | ||
432 | DECLARE_PCI_UNMAP_ADDR(mapping); | ||
433 | struct t4_cqe *sw_queue; | ||
434 | void __iomem *gts; | ||
435 | struct c4iw_rdev *rdev; | ||
436 | u64 ugts; | ||
437 | size_t memsize; | ||
438 | u64 timestamp; | ||
439 | u32 cqid; | ||
440 | u16 size; /* including status page */ | ||
441 | u16 cidx; | ||
442 | u16 sw_pidx; | ||
443 | u16 sw_cidx; | ||
444 | u16 sw_in_use; | ||
445 | u16 cidx_inc; | ||
446 | u8 gen; | ||
447 | u8 error; | ||
448 | }; | ||
449 | |||
450 | static inline int t4_arm_cq(struct t4_cq *cq, int se) | ||
451 | { | ||
452 | u32 val; | ||
453 | |||
454 | val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | | ||
455 | INGRESSQID(cq->cqid); | ||
456 | cq->cidx_inc = 0; | ||
457 | writel(val, cq->gts); | ||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | static inline void t4_swcq_produce(struct t4_cq *cq) | ||
462 | { | ||
463 | cq->sw_in_use++; | ||
464 | if (++cq->sw_pidx == cq->size) | ||
465 | cq->sw_pidx = 0; | ||
466 | } | ||
467 | |||
468 | static inline void t4_swcq_consume(struct t4_cq *cq) | ||
469 | { | ||
470 | cq->sw_in_use--; | ||
471 | if (++cq->sw_cidx == cq->size) | ||
472 | cq->sw_cidx = 0; | ||
473 | } | ||
474 | |||
475 | static inline void t4_hwcq_consume(struct t4_cq *cq) | ||
476 | { | ||
477 | cq->cidx_inc++; | ||
478 | if (++cq->cidx == cq->size) { | ||
479 | cq->cidx = 0; | ||
480 | cq->gen ^= 1; | ||
481 | } | ||
482 | } | ||
483 | |||
484 | static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) | ||
485 | { | ||
486 | return (CQE_GENBIT(cqe) == cq->gen); | ||
487 | } | ||
488 | |||
489 | static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) | ||
490 | { | ||
491 | int ret = 0; | ||
492 | |||
493 | if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { | ||
494 | *cqe = &cq->queue[cq->cidx]; | ||
495 | cq->timestamp = CQE_TS(*cqe); | ||
496 | } else if (CQE_TS(&cq->queue[cq->cidx]) > cq->timestamp) | ||
497 | ret = -EOVERFLOW; | ||
498 | else | ||
499 | ret = -ENODATA; | ||
500 | if (ret == -EOVERFLOW) { | ||
501 | printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); | ||
502 | cq->error = 1; | ||
503 | } | ||
504 | return ret; | ||
505 | } | ||
506 | |||
507 | static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) | ||
508 | { | ||
509 | if (cq->sw_in_use) | ||
510 | return &cq->sw_queue[cq->sw_cidx]; | ||
511 | return NULL; | ||
512 | } | ||
513 | |||
514 | static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) | ||
515 | { | ||
516 | int ret = 0; | ||
517 | |||
518 | if (cq->error) | ||
519 | ret = -ENODATA; | ||
520 | else if (cq->sw_in_use) | ||
521 | *cqe = &cq->sw_queue[cq->sw_cidx]; | ||
522 | else | ||
523 | ret = t4_next_hw_cqe(cq, cqe); | ||
524 | return ret; | ||
525 | } | ||
526 | |||
527 | static inline int t4_cq_in_error(struct t4_cq *cq) | ||
528 | { | ||
529 | return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err; | ||
530 | } | ||
531 | |||
532 | static inline void t4_set_cq_in_error(struct t4_cq *cq) | ||
533 | { | ||
534 | ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; | ||
535 | } | ||
536 | #endif | ||