diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-03-29 18:23:33 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-03-31 16:14:20 -0500 |
commit | aa735edf5dffbe43463c3d1218912fa54a8ec724 (patch) | |
tree | f5ac10f3ff06a3e7dfc6a2e68be0064fbd8a1847 /drivers/infiniband | |
parent | 889ab795a34247c8085e65648051e34f9fec952c (diff) |
IB/ipath: infiniband header files
These header files are used by the layered Infiniband driver.
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_verbs.h | 697 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/verbs_debug.h | 107 |
2 files changed, 804 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h new file mode 100644 index 000000000000..b824632b2a8c --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -0,0 +1,697 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef IPATH_VERBS_H | ||
34 | #define IPATH_VERBS_H | ||
35 | |||
36 | #include <linux/types.h> | ||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/interrupt.h> | ||
40 | #include <rdma/ib_pack.h> | ||
41 | |||
42 | #include "ipath_layer.h" | ||
43 | #include "verbs_debug.h" | ||
44 | |||
45 | #define QPN_MAX (1 << 24) | ||
46 | #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) | ||
47 | |||
48 | /* | ||
49 | * Increment this value if any changes that break userspace ABI | ||
50 | * compatibility are made. | ||
51 | */ | ||
52 | #define IPATH_UVERBS_ABI_VERSION 1 | ||
53 | |||
54 | /* | ||
55 | * Define an ib_cq_notify value that is not valid so we know when CQ | ||
56 | * notifications are armed. | ||
57 | */ | ||
58 | #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1) | ||
59 | |||
60 | #define IB_RNR_NAK 0x20 | ||
61 | #define IB_NAK_PSN_ERROR 0x60 | ||
62 | #define IB_NAK_INVALID_REQUEST 0x61 | ||
63 | #define IB_NAK_REMOTE_ACCESS_ERROR 0x62 | ||
64 | #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63 | ||
65 | #define IB_NAK_INVALID_RD_REQUEST 0x64 | ||
66 | |||
67 | #define IPATH_POST_SEND_OK 0x01 | ||
68 | #define IPATH_POST_RECV_OK 0x02 | ||
69 | #define IPATH_PROCESS_RECV_OK 0x04 | ||
70 | #define IPATH_PROCESS_SEND_OK 0x08 | ||
71 | |||
72 | /* IB Performance Manager status values */ | ||
73 | #define IB_PMA_SAMPLE_STATUS_DONE 0x00 | ||
74 | #define IB_PMA_SAMPLE_STATUS_STARTED 0x01 | ||
75 | #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 | ||
76 | |||
77 | /* Mandatory IB performance counter select values. */ | ||
78 | #define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001) | ||
79 | #define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002) | ||
80 | #define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003) | ||
81 | #define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004) | ||
82 | #define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005) | ||
83 | |||
84 | struct ib_reth { | ||
85 | __be64 vaddr; | ||
86 | __be32 rkey; | ||
87 | __be32 length; | ||
88 | } __attribute__ ((packed)); | ||
89 | |||
90 | struct ib_atomic_eth { | ||
91 | __be64 vaddr; | ||
92 | __be32 rkey; | ||
93 | __be64 swap_data; | ||
94 | __be64 compare_data; | ||
95 | } __attribute__ ((packed)); | ||
96 | |||
97 | struct ipath_other_headers { | ||
98 | __be32 bth[3]; | ||
99 | union { | ||
100 | struct { | ||
101 | __be32 deth[2]; | ||
102 | __be32 imm_data; | ||
103 | } ud; | ||
104 | struct { | ||
105 | struct ib_reth reth; | ||
106 | __be32 imm_data; | ||
107 | } rc; | ||
108 | struct { | ||
109 | __be32 aeth; | ||
110 | __be64 atomic_ack_eth; | ||
111 | } at; | ||
112 | __be32 imm_data; | ||
113 | __be32 aeth; | ||
114 | struct ib_atomic_eth atomic_eth; | ||
115 | } u; | ||
116 | } __attribute__ ((packed)); | ||
117 | |||
118 | /* | ||
119 | * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes | ||
120 | * long (72 w/ imm_data). Only the first 56 bytes of the IB header | ||
121 | * will be in the eager header buffer. The remaining 12 or 16 bytes | ||
122 | * are in the data buffer. | ||
123 | */ | ||
124 | struct ipath_ib_header { | ||
125 | __be16 lrh[4]; | ||
126 | union { | ||
127 | struct { | ||
128 | struct ib_grh grh; | ||
129 | struct ipath_other_headers oth; | ||
130 | } l; | ||
131 | struct ipath_other_headers oth; | ||
132 | } u; | ||
133 | } __attribute__ ((packed)); | ||
134 | |||
135 | /* | ||
136 | * There is one struct ipath_mcast for each multicast GID. | ||
137 | * All attached QPs are then stored as a list of | ||
138 | * struct ipath_mcast_qp. | ||
139 | */ | ||
140 | struct ipath_mcast_qp { | ||
141 | struct list_head list; | ||
142 | struct ipath_qp *qp; | ||
143 | }; | ||
144 | |||
145 | struct ipath_mcast { | ||
146 | struct rb_node rb_node; | ||
147 | union ib_gid mgid; | ||
148 | struct list_head qp_list; | ||
149 | wait_queue_head_t wait; | ||
150 | atomic_t refcount; | ||
151 | }; | ||
152 | |||
153 | /* Memory region */ | ||
154 | struct ipath_mr { | ||
155 | struct ib_mr ibmr; | ||
156 | struct ipath_mregion mr; /* must be last */ | ||
157 | }; | ||
158 | |||
159 | /* Fast memory region */ | ||
160 | struct ipath_fmr { | ||
161 | struct ib_fmr ibfmr; | ||
162 | u8 page_shift; | ||
163 | struct ipath_mregion mr; /* must be last */ | ||
164 | }; | ||
165 | |||
166 | /* Protection domain */ | ||
167 | struct ipath_pd { | ||
168 | struct ib_pd ibpd; | ||
169 | int user; /* non-zero if created from user space */ | ||
170 | }; | ||
171 | |||
172 | /* Address Handle */ | ||
173 | struct ipath_ah { | ||
174 | struct ib_ah ibah; | ||
175 | struct ib_ah_attr attr; | ||
176 | }; | ||
177 | |||
178 | /* | ||
179 | * Quick description of our CQ/QP locking scheme: | ||
180 | * | ||
181 | * We have one global lock that protects dev->cq/qp_table. Each | ||
182 | * struct ipath_cq/qp also has its own lock. An individual qp lock | ||
183 | * may be taken inside of an individual cq lock. Both cqs attached to | ||
184 | * a qp may be locked, with the send cq locked first. No other | ||
185 | * nesting should be done. | ||
186 | * | ||
187 | * Each struct ipath_cq/qp also has an atomic_t ref count. The | ||
188 | * pointer from the cq/qp_table to the struct counts as one reference. | ||
189 | * This reference also is good for access through the consumer API, so | ||
190 | * modifying the CQ/QP etc doesn't need to take another reference. | ||
191 | * Access because of a completion being polled does need a reference. | ||
192 | * | ||
193 | * Finally, each struct ipath_cq/qp has a wait_queue_head_t for the | ||
194 | * destroy function to sleep on. | ||
195 | * | ||
196 | * This means that access from the consumer API requires nothing but | ||
197 | * taking the struct's lock. | ||
198 | * | ||
199 | * Access because of a completion event should go as follows: | ||
200 | * - lock cq/qp_table and look up struct | ||
201 | * - increment ref count in struct | ||
202 | * - drop cq/qp_table lock | ||
203 | * - lock struct, do your thing, and unlock struct | ||
204 | * - decrement ref count; if zero, wake up waiters | ||
205 | * | ||
206 | * To destroy a CQ/QP, we can do the following: | ||
207 | * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock | ||
208 | * - decrement ref count | ||
209 | * - wait_event until ref count is zero | ||
210 | * | ||
211 | * It is the consumer's responsibilty to make sure that no QP | ||
212 | * operations (WQE posting or state modification) are pending when the | ||
213 | * QP is destroyed. Also, the consumer must make sure that calls to | ||
214 | * qp_modify are serialized. | ||
215 | * | ||
216 | * Possible optimizations (wait for profile data to see if/where we | ||
217 | * have locks bouncing between CPUs): | ||
218 | * - split cq/qp table lock into n separate (cache-aligned) locks, | ||
219 | * indexed (say) by the page in the table | ||
220 | */ | ||
221 | |||
222 | struct ipath_cq { | ||
223 | struct ib_cq ibcq; | ||
224 | struct tasklet_struct comptask; | ||
225 | spinlock_t lock; | ||
226 | u8 notify; | ||
227 | u8 triggered; | ||
228 | u32 head; /* new records added to the head */ | ||
229 | u32 tail; /* poll_cq() reads from here. */ | ||
230 | struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */ | ||
231 | }; | ||
232 | |||
233 | /* | ||
234 | * Send work request queue entry. | ||
235 | * The size of the sg_list is determined when the QP is created and stored | ||
236 | * in qp->s_max_sge. | ||
237 | */ | ||
238 | struct ipath_swqe { | ||
239 | struct ib_send_wr wr; /* don't use wr.sg_list */ | ||
240 | u32 psn; /* first packet sequence number */ | ||
241 | u32 lpsn; /* last packet sequence number */ | ||
242 | u32 ssn; /* send sequence number */ | ||
243 | u32 length; /* total length of data in sg_list */ | ||
244 | struct ipath_sge sg_list[0]; | ||
245 | }; | ||
246 | |||
247 | /* | ||
248 | * Receive work request queue entry. | ||
249 | * The size of the sg_list is determined when the QP is created and stored | ||
250 | * in qp->r_max_sge. | ||
251 | */ | ||
252 | struct ipath_rwqe { | ||
253 | u64 wr_id; | ||
254 | u32 length; /* total length of data in sg_list */ | ||
255 | u8 num_sge; | ||
256 | struct ipath_sge sg_list[0]; | ||
257 | }; | ||
258 | |||
259 | struct ipath_rq { | ||
260 | spinlock_t lock; | ||
261 | u32 head; /* new work requests posted to the head */ | ||
262 | u32 tail; /* receives pull requests from here. */ | ||
263 | u32 size; /* size of RWQE array */ | ||
264 | u8 max_sge; | ||
265 | struct ipath_rwqe *wq; /* RWQE array */ | ||
266 | }; | ||
267 | |||
268 | struct ipath_srq { | ||
269 | struct ib_srq ibsrq; | ||
270 | struct ipath_rq rq; | ||
271 | /* send signal when number of RWQEs < limit */ | ||
272 | u32 limit; | ||
273 | }; | ||
274 | |||
275 | /* | ||
276 | * Variables prefixed with s_ are for the requester (sender). | ||
277 | * Variables prefixed with r_ are for the responder (receiver). | ||
278 | * Variables prefixed with ack_ are for responder replies. | ||
279 | * | ||
280 | * Common variables are protected by both r_rq.lock and s_lock in that order | ||
281 | * which only happens in modify_qp() or changing the QP 'state'. | ||
282 | */ | ||
283 | struct ipath_qp { | ||
284 | struct ib_qp ibqp; | ||
285 | struct ipath_qp *next; /* link list for QPN hash table */ | ||
286 | struct list_head piowait; /* link for wait PIO buf */ | ||
287 | struct list_head timerwait; /* link for waiting for timeouts */ | ||
288 | struct ib_ah_attr remote_ah_attr; | ||
289 | struct ipath_ib_header s_hdr; /* next packet header to send */ | ||
290 | atomic_t refcount; | ||
291 | wait_queue_head_t wait; | ||
292 | struct tasklet_struct s_task; | ||
293 | struct ipath_sge_state *s_cur_sge; | ||
294 | struct ipath_sge_state s_sge; /* current send request data */ | ||
295 | /* current RDMA read send data */ | ||
296 | struct ipath_sge_state s_rdma_sge; | ||
297 | struct ipath_sge_state r_sge; /* current receive data */ | ||
298 | spinlock_t s_lock; | ||
299 | unsigned long s_flags; | ||
300 | u32 s_hdrwords; /* size of s_hdr in 32 bit words */ | ||
301 | u32 s_cur_size; /* size of send packet in bytes */ | ||
302 | u32 s_len; /* total length of s_sge */ | ||
303 | u32 s_rdma_len; /* total length of s_rdma_sge */ | ||
304 | u32 s_next_psn; /* PSN for next request */ | ||
305 | u32 s_last_psn; /* last response PSN processed */ | ||
306 | u32 s_psn; /* current packet sequence number */ | ||
307 | u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ | ||
308 | u32 s_ack_psn; /* PSN for next ACK or RDMA_READ */ | ||
309 | u64 s_ack_atomic; /* data for atomic ACK */ | ||
310 | u64 r_wr_id; /* ID for current receive WQE */ | ||
311 | u64 r_atomic_data; /* data for last atomic op */ | ||
312 | u32 r_atomic_psn; /* PSN of last atomic op */ | ||
313 | u32 r_len; /* total length of r_sge */ | ||
314 | u32 r_rcv_len; /* receive data len processed */ | ||
315 | u32 r_psn; /* expected rcv packet sequence number */ | ||
316 | u8 state; /* QP state */ | ||
317 | u8 s_state; /* opcode of last packet sent */ | ||
318 | u8 s_ack_state; /* opcode of packet to ACK */ | ||
319 | u8 s_nak_state; /* non-zero if NAK is pending */ | ||
320 | u8 r_state; /* opcode of last packet received */ | ||
321 | u8 r_reuse_sge; /* for UC receive errors */ | ||
322 | u8 r_sge_inx; /* current index into sg_list */ | ||
323 | u8 s_max_sge; /* size of s_wq->sg_list */ | ||
324 | u8 qp_access_flags; | ||
325 | u8 s_retry_cnt; /* number of times to retry */ | ||
326 | u8 s_rnr_retry_cnt; | ||
327 | u8 s_min_rnr_timer; | ||
328 | u8 s_retry; /* requester retry counter */ | ||
329 | u8 s_rnr_retry; /* requester RNR retry counter */ | ||
330 | u8 s_pkey_index; /* PKEY index to use */ | ||
331 | enum ib_mtu path_mtu; | ||
332 | atomic_t msn; /* message sequence number */ | ||
333 | u32 remote_qpn; | ||
334 | u32 qkey; /* QKEY for this QP (for UD or RD) */ | ||
335 | u32 s_size; /* send work queue size */ | ||
336 | u32 s_head; /* new entries added here */ | ||
337 | u32 s_tail; /* next entry to process */ | ||
338 | u32 s_cur; /* current work queue entry */ | ||
339 | u32 s_last; /* last un-ACK'ed entry */ | ||
340 | u32 s_ssn; /* SSN of tail entry */ | ||
341 | u32 s_lsn; /* limit sequence number (credit) */ | ||
342 | struct ipath_swqe *s_wq; /* send work queue */ | ||
343 | struct ipath_rq r_rq; /* receive work queue */ | ||
344 | }; | ||
345 | |||
346 | /* | ||
347 | * Bit definitions for s_flags. | ||
348 | */ | ||
349 | #define IPATH_S_BUSY 0 | ||
350 | #define IPATH_S_SIGNAL_REQ_WR 1 | ||
351 | |||
352 | /* | ||
353 | * Since struct ipath_swqe is not a fixed size, we can't simply index into | ||
354 | * struct ipath_qp.s_wq. This function does the array index computation. | ||
355 | */ | ||
356 | static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, | ||
357 | unsigned n) | ||
358 | { | ||
359 | return (struct ipath_swqe *)((char *)qp->s_wq + | ||
360 | (sizeof(struct ipath_swqe) + | ||
361 | qp->s_max_sge * | ||
362 | sizeof(struct ipath_sge)) * n); | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * Since struct ipath_rwqe is not a fixed size, we can't simply index into | ||
367 | * struct ipath_rq.wq. This function does the array index computation. | ||
368 | */ | ||
369 | static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, | ||
370 | unsigned n) | ||
371 | { | ||
372 | return (struct ipath_rwqe *) | ||
373 | ((char *) rq->wq + | ||
374 | (sizeof(struct ipath_rwqe) + | ||
375 | rq->max_sge * sizeof(struct ipath_sge)) * n); | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * QPN-map pages start out as NULL, they get allocated upon | ||
380 | * first use and are never deallocated. This way, | ||
381 | * large bitmaps are not allocated unless large numbers of QPs are used. | ||
382 | */ | ||
383 | struct qpn_map { | ||
384 | atomic_t n_free; | ||
385 | void *page; | ||
386 | }; | ||
387 | |||
388 | struct ipath_qp_table { | ||
389 | spinlock_t lock; | ||
390 | u32 last; /* last QP number allocated */ | ||
391 | u32 max; /* size of the hash table */ | ||
392 | u32 nmaps; /* size of the map table */ | ||
393 | struct ipath_qp **table; | ||
394 | /* bit map of free numbers */ | ||
395 | struct qpn_map map[QPNMAP_ENTRIES]; | ||
396 | }; | ||
397 | |||
398 | struct ipath_lkey_table { | ||
399 | spinlock_t lock; | ||
400 | u32 next; /* next unused index (speeds search) */ | ||
401 | u32 gen; /* generation count */ | ||
402 | u32 max; /* size of the table */ | ||
403 | struct ipath_mregion **table; | ||
404 | }; | ||
405 | |||
406 | struct ipath_opcode_stats { | ||
407 | u64 n_packets; /* number of packets */ | ||
408 | u64 n_bytes; /* total number of bytes */ | ||
409 | }; | ||
410 | |||
411 | struct ipath_ibdev { | ||
412 | struct ib_device ibdev; | ||
413 | struct list_head dev_list; | ||
414 | struct ipath_devdata *dd; | ||
415 | int ib_unit; /* This is the device number */ | ||
416 | u16 sm_lid; /* in host order */ | ||
417 | u8 sm_sl; | ||
418 | u8 mkeyprot_resv_lmc; | ||
419 | /* non-zero when timer is set */ | ||
420 | unsigned long mkey_lease_timeout; | ||
421 | |||
422 | /* The following fields are really per port. */ | ||
423 | struct ipath_qp_table qp_table; | ||
424 | struct ipath_lkey_table lk_table; | ||
425 | struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */ | ||
426 | struct list_head piowait; /* list for wait PIO buf */ | ||
427 | /* list of QPs waiting for RNR timer */ | ||
428 | struct list_head rnrwait; | ||
429 | spinlock_t pending_lock; | ||
430 | __be64 sys_image_guid; /* in network order */ | ||
431 | __be64 gid_prefix; /* in network order */ | ||
432 | __be64 mkey; | ||
433 | u64 ipath_sword; /* total dwords sent (sample result) */ | ||
434 | u64 ipath_rword; /* total dwords received (sample result) */ | ||
435 | u64 ipath_spkts; /* total packets sent (sample result) */ | ||
436 | u64 ipath_rpkts; /* total packets received (sample result) */ | ||
437 | /* # of ticks no data sent (sample result) */ | ||
438 | u64 ipath_xmit_wait; | ||
439 | u64 rcv_errors; /* # of packets with SW detected rcv errs */ | ||
440 | u64 n_unicast_xmit; /* total unicast packets sent */ | ||
441 | u64 n_unicast_rcv; /* total unicast packets received */ | ||
442 | u64 n_multicast_xmit; /* total multicast packets sent */ | ||
443 | u64 n_multicast_rcv; /* total multicast packets received */ | ||
444 | u64 n_symbol_error_counter; /* starting count for PMA */ | ||
445 | u64 n_link_error_recovery_counter; /* starting count for PMA */ | ||
446 | u64 n_link_downed_counter; /* starting count for PMA */ | ||
447 | u64 n_port_rcv_errors; /* starting count for PMA */ | ||
448 | u64 n_port_rcv_remphys_errors; /* starting count for PMA */ | ||
449 | u64 n_port_xmit_discards; /* starting count for PMA */ | ||
450 | u64 n_port_xmit_data; /* starting count for PMA */ | ||
451 | u64 n_port_rcv_data; /* starting count for PMA */ | ||
452 | u64 n_port_xmit_packets; /* starting count for PMA */ | ||
453 | u64 n_port_rcv_packets; /* starting count for PMA */ | ||
454 | u32 n_pkey_violations; /* starting count for PMA */ | ||
455 | u32 n_rc_resends; | ||
456 | u32 n_rc_acks; | ||
457 | u32 n_rc_qacks; | ||
458 | u32 n_seq_naks; | ||
459 | u32 n_rdma_seq; | ||
460 | u32 n_rnr_naks; | ||
461 | u32 n_other_naks; | ||
462 | u32 n_timeouts; | ||
463 | u32 n_pkt_drops; | ||
464 | u32 n_wqe_errs; | ||
465 | u32 n_rdma_dup_busy; | ||
466 | u32 n_piowait; | ||
467 | u32 n_no_piobuf; | ||
468 | u32 port_cap_flags; | ||
469 | u32 pma_sample_start; | ||
470 | u32 pma_sample_interval; | ||
471 | __be16 pma_counter_select[5]; | ||
472 | u16 pma_tag; | ||
473 | u16 qkey_violations; | ||
474 | u16 mkey_violations; | ||
475 | u16 mkey_lease_period; | ||
476 | u16 pending_index; /* which pending queue is active */ | ||
477 | u8 pma_sample_status; | ||
478 | u8 subnet_timeout; | ||
479 | u8 link_width_enabled; | ||
480 | u8 vl_high_limit; | ||
481 | struct ipath_opcode_stats opstats[128]; | ||
482 | }; | ||
483 | |||
484 | struct ipath_ucontext { | ||
485 | struct ib_ucontext ibucontext; | ||
486 | }; | ||
487 | |||
488 | static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) | ||
489 | { | ||
490 | return container_of(ibmr, struct ipath_mr, ibmr); | ||
491 | } | ||
492 | |||
493 | static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr) | ||
494 | { | ||
495 | return container_of(ibfmr, struct ipath_fmr, ibfmr); | ||
496 | } | ||
497 | |||
498 | static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd) | ||
499 | { | ||
500 | return container_of(ibpd, struct ipath_pd, ibpd); | ||
501 | } | ||
502 | |||
503 | static inline struct ipath_ah *to_iah(struct ib_ah *ibah) | ||
504 | { | ||
505 | return container_of(ibah, struct ipath_ah, ibah); | ||
506 | } | ||
507 | |||
508 | static inline struct ipath_cq *to_icq(struct ib_cq *ibcq) | ||
509 | { | ||
510 | return container_of(ibcq, struct ipath_cq, ibcq); | ||
511 | } | ||
512 | |||
513 | static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq) | ||
514 | { | ||
515 | return container_of(ibsrq, struct ipath_srq, ibsrq); | ||
516 | } | ||
517 | |||
518 | static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp) | ||
519 | { | ||
520 | return container_of(ibqp, struct ipath_qp, ibqp); | ||
521 | } | ||
522 | |||
523 | static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev) | ||
524 | { | ||
525 | return container_of(ibdev, struct ipath_ibdev, ibdev); | ||
526 | } | ||
527 | |||
528 | int ipath_process_mad(struct ib_device *ibdev, | ||
529 | int mad_flags, | ||
530 | u8 port_num, | ||
531 | struct ib_wc *in_wc, | ||
532 | struct ib_grh *in_grh, | ||
533 | struct ib_mad *in_mad, struct ib_mad *out_mad); | ||
534 | |||
535 | static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext | ||
536 | *ibucontext) | ||
537 | { | ||
538 | return container_of(ibucontext, struct ipath_ucontext, ibucontext); | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * Compare the lower 24 bits of the two values. | ||
543 | * Returns an integer <, ==, or > than zero. | ||
544 | */ | ||
545 | static inline int ipath_cmp24(u32 a, u32 b) | ||
546 | { | ||
547 | return (((int) a) - ((int) b)) << 8; | ||
548 | } | ||
549 | |||
550 | struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid); | ||
551 | |||
552 | int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | ||
553 | |||
554 | int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | ||
555 | |||
556 | int ipath_mcast_tree_empty(void); | ||
557 | |||
558 | __be32 ipath_compute_aeth(struct ipath_qp *qp); | ||
559 | |||
560 | struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn); | ||
561 | |||
562 | struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | ||
563 | struct ib_qp_init_attr *init_attr, | ||
564 | struct ib_udata *udata); | ||
565 | |||
566 | int ipath_destroy_qp(struct ib_qp *ibqp); | ||
567 | |||
568 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
569 | int attr_mask); | ||
570 | |||
571 | int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
572 | int attr_mask, struct ib_qp_init_attr *init_attr); | ||
573 | |||
574 | void ipath_free_all_qps(struct ipath_qp_table *qpt); | ||
575 | |||
576 | int ipath_init_qp_table(struct ipath_ibdev *idev, int size); | ||
577 | |||
578 | void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); | ||
579 | |||
580 | void ipath_error_qp(struct ipath_qp *qp); | ||
581 | |||
582 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); | ||
583 | |||
584 | void ipath_do_rc_send(unsigned long data); | ||
585 | |||
586 | void ipath_do_uc_send(unsigned long data); | ||
587 | |||
588 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); | ||
589 | |||
590 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | ||
591 | u32 len, u64 vaddr, u32 rkey, int acc); | ||
592 | |||
593 | int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge, | ||
594 | struct ib_sge *sge, int acc); | ||
595 | |||
596 | void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length); | ||
597 | |||
598 | void ipath_skip_sge(struct ipath_sge_state *ss, u32 length); | ||
599 | |||
600 | int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr); | ||
601 | |||
602 | void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | ||
603 | int has_grh, void *data, u32 tlen, struct ipath_qp *qp); | ||
604 | |||
605 | void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | ||
606 | int has_grh, void *data, u32 tlen, struct ipath_qp *qp); | ||
607 | |||
608 | void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc); | ||
609 | |||
610 | void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, | ||
611 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc); | ||
612 | |||
613 | int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); | ||
614 | |||
615 | void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | ||
616 | int has_grh, void *data, u32 tlen, struct ipath_qp *qp); | ||
617 | |||
618 | int ipath_alloc_lkey(struct ipath_lkey_table *rkt, | ||
619 | struct ipath_mregion *mr); | ||
620 | |||
621 | void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey); | ||
622 | |||
623 | int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge, | ||
624 | struct ib_sge *sge, int acc); | ||
625 | |||
626 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | ||
627 | u32 len, u64 vaddr, u32 rkey, int acc); | ||
628 | |||
629 | int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||
630 | struct ib_recv_wr **bad_wr); | ||
631 | |||
632 | struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, | ||
633 | struct ib_srq_init_attr *srq_init_attr, | ||
634 | struct ib_udata *udata); | ||
635 | |||
636 | int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
637 | enum ib_srq_attr_mask attr_mask); | ||
638 | |||
639 | int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | ||
640 | |||
641 | int ipath_destroy_srq(struct ib_srq *ibsrq); | ||
642 | |||
643 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); | ||
644 | |||
645 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); | ||
646 | |||
647 | struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, | ||
648 | struct ib_ucontext *context, | ||
649 | struct ib_udata *udata); | ||
650 | |||
651 | int ipath_destroy_cq(struct ib_cq *ibcq); | ||
652 | |||
653 | int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); | ||
654 | |||
655 | int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); | ||
656 | |||
657 | struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc); | ||
658 | |||
659 | struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd, | ||
660 | struct ib_phys_buf *buffer_list, | ||
661 | int num_phys_buf, int acc, u64 *iova_start); | ||
662 | |||
663 | struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | ||
664 | int mr_access_flags, | ||
665 | struct ib_udata *udata); | ||
666 | |||
667 | int ipath_dereg_mr(struct ib_mr *ibmr); | ||
668 | |||
669 | struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | ||
670 | struct ib_fmr_attr *fmr_attr); | ||
671 | |||
672 | int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list, | ||
673 | int list_len, u64 iova); | ||
674 | |||
675 | int ipath_unmap_fmr(struct list_head *fmr_list); | ||
676 | |||
677 | int ipath_dealloc_fmr(struct ib_fmr *ibfmr); | ||
678 | |||
679 | void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); | ||
680 | |||
681 | void ipath_insert_rnr_queue(struct ipath_qp *qp); | ||
682 | |||
683 | int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only); | ||
684 | |||
685 | void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc); | ||
686 | |||
687 | extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; | ||
688 | |||
689 | extern const u8 ipath_cvt_physportstate[]; | ||
690 | |||
691 | extern const int ib_ipath_state_ops[]; | ||
692 | |||
693 | extern unsigned int ib_ipath_lkey_table_size; | ||
694 | |||
695 | extern const u32 ib_ipath_rnr_table[]; | ||
696 | |||
697 | #endif /* IPATH_VERBS_H */ | ||
diff --git a/drivers/infiniband/hw/ipath/verbs_debug.h b/drivers/infiniband/hw/ipath/verbs_debug.h new file mode 100644 index 000000000000..40d693cf3f94 --- /dev/null +++ b/drivers/infiniband/hw/ipath/verbs_debug.h | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef _VERBS_DEBUG_H | ||
34 | #define _VERBS_DEBUG_H | ||
35 | |||
36 | /* | ||
37 | * This file contains tracing code for the ib_ipath kernel module. | ||
38 | */ | ||
39 | #ifndef _VERBS_DEBUGGING /* tracing enabled or not */ | ||
40 | #define _VERBS_DEBUGGING 1 | ||
41 | #endif | ||
42 | |||
43 | extern unsigned ib_ipath_debug; | ||
44 | |||
45 | #define _VERBS_ERROR(fmt,...) \ | ||
46 | do { \ | ||
47 | printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \ | ||
48 | } while(0) | ||
49 | |||
50 | #define _VERBS_UNIT_ERROR(unit,fmt,...) \ | ||
51 | do { \ | ||
52 | printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \ | ||
53 | } while(0) | ||
54 | |||
55 | #if _VERBS_DEBUGGING | ||
56 | |||
57 | /* | ||
58 | * Mask values for debugging. The scheme allows us to compile out any | ||
59 | * of the debug tracing stuff, and if compiled in, to enable or | ||
60 | * disable dynamically. | ||
61 | * This can be set at modprobe time also: | ||
62 | * modprobe ib_path ib_ipath_debug=3 | ||
63 | */ | ||
64 | |||
65 | #define __VERBS_INFO 0x1 /* generic low verbosity stuff */ | ||
66 | #define __VERBS_DBG 0x2 /* generic debug */ | ||
67 | #define __VERBS_VDBG 0x4 /* verbose debug */ | ||
68 | #define __VERBS_SMADBG 0x8000 /* sma packet debug */ | ||
69 | |||
70 | #define _VERBS_INFO(fmt,...) \ | ||
71 | do { \ | ||
72 | if (unlikely(ib_ipath_debug&__VERBS_INFO)) \ | ||
73 | printk(KERN_INFO "%s: " fmt,"ib_ipath", \ | ||
74 | ##__VA_ARGS__); \ | ||
75 | } while(0) | ||
76 | |||
77 | #define _VERBS_DBG(fmt,...) \ | ||
78 | do { \ | ||
79 | if (unlikely(ib_ipath_debug&__VERBS_DBG)) \ | ||
80 | printk(KERN_DEBUG "%s: " fmt, __func__, \ | ||
81 | ##__VA_ARGS__); \ | ||
82 | } while(0) | ||
83 | |||
84 | #define _VERBS_VDBG(fmt,...) \ | ||
85 | do { \ | ||
86 | if (unlikely(ib_ipath_debug&__VERBS_VDBG)) \ | ||
87 | printk(KERN_DEBUG "%s: " fmt, __func__, \ | ||
88 | ##__VA_ARGS__); \ | ||
89 | } while(0) | ||
90 | |||
91 | #define _VERBS_SMADBG(fmt,...) \ | ||
92 | do { \ | ||
93 | if (unlikely(ib_ipath_debug&__VERBS_SMADBG)) \ | ||
94 | printk(KERN_DEBUG "%s: " fmt, __func__, \ | ||
95 | ##__VA_ARGS__); \ | ||
96 | } while(0) | ||
97 | |||
98 | #else /* ! _VERBS_DEBUGGING */ | ||
99 | |||
100 | #define _VERBS_INFO(fmt,...) | ||
101 | #define _VERBS_DBG(fmt,...) | ||
102 | #define _VERBS_VDBG(fmt,...) | ||
103 | #define _VERBS_SMADBG(fmt,...) | ||
104 | |||
105 | #endif /* _VERBS_DEBUGGING */ | ||
106 | |||
107 | #endif /* _VERBS_DEBUG_H */ | ||