diff options
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_verbs.h')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_verbs.h | 252 |
1 files changed, 167 insertions, 85 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index 2df684727dc1..09bbb3f9a217 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -38,10 +38,10 @@ | |||
38 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
39 | #include <linux/kernel.h> | 39 | #include <linux/kernel.h> |
40 | #include <linux/interrupt.h> | 40 | #include <linux/interrupt.h> |
41 | #include <linux/kref.h> | ||
41 | #include <rdma/ib_pack.h> | 42 | #include <rdma/ib_pack.h> |
42 | 43 | ||
43 | #include "ipath_layer.h" | 44 | #include "ipath_layer.h" |
44 | #include "verbs_debug.h" | ||
45 | 45 | ||
46 | #define QPN_MAX (1 << 24) | 46 | #define QPN_MAX (1 << 24) |
47 | #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) | 47 | #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) |
@@ -50,7 +50,7 @@ | |||
50 | * Increment this value if any changes that break userspace ABI | 50 | * Increment this value if any changes that break userspace ABI |
51 | * compatibility are made. | 51 | * compatibility are made. |
52 | */ | 52 | */ |
53 | #define IPATH_UVERBS_ABI_VERSION 1 | 53 | #define IPATH_UVERBS_ABI_VERSION 2 |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * Define an ib_cq_notify value that is not valid so we know when CQ | 56 | * Define an ib_cq_notify value that is not valid so we know when CQ |
@@ -152,19 +152,6 @@ struct ipath_mcast { | |||
152 | int n_attached; | 152 | int n_attached; |
153 | }; | 153 | }; |
154 | 154 | ||
155 | /* Memory region */ | ||
156 | struct ipath_mr { | ||
157 | struct ib_mr ibmr; | ||
158 | struct ipath_mregion mr; /* must be last */ | ||
159 | }; | ||
160 | |||
161 | /* Fast memory region */ | ||
162 | struct ipath_fmr { | ||
163 | struct ib_fmr ibfmr; | ||
164 | u8 page_shift; | ||
165 | struct ipath_mregion mr; /* must be last */ | ||
166 | }; | ||
167 | |||
168 | /* Protection domain */ | 155 | /* Protection domain */ |
169 | struct ipath_pd { | 156 | struct ipath_pd { |
170 | struct ib_pd ibpd; | 157 | struct ib_pd ibpd; |
@@ -178,58 +165,89 @@ struct ipath_ah { | |||
178 | }; | 165 | }; |
179 | 166 | ||
180 | /* | 167 | /* |
181 | * Quick description of our CQ/QP locking scheme: | 168 | * This structure is used by ipath_mmap() to validate an offset |
182 | * | 169 | * when an mmap() request is made. The vm_area_struct then uses |
183 | * We have one global lock that protects dev->cq/qp_table. Each | 170 | * this as its vm_private_data. |
184 | * struct ipath_cq/qp also has its own lock. An individual qp lock | 171 | */ |
185 | * may be taken inside of an individual cq lock. Both cqs attached to | 172 | struct ipath_mmap_info { |
186 | * a qp may be locked, with the send cq locked first. No other | 173 | struct ipath_mmap_info *next; |
187 | * nesting should be done. | 174 | struct ib_ucontext *context; |
188 | * | 175 | void *obj; |
189 | * Each struct ipath_cq/qp also has an atomic_t ref count. The | 176 | struct kref ref; |
190 | * pointer from the cq/qp_table to the struct counts as one reference. | 177 | unsigned size; |
191 | * This reference also is good for access through the consumer API, so | 178 | unsigned mmap_cnt; |
192 | * modifying the CQ/QP etc doesn't need to take another reference. | 179 | }; |
193 | * Access because of a completion being polled does need a reference. | 180 | |
194 | * | 181 | /* |
195 | * Finally, each struct ipath_cq/qp has a wait_queue_head_t for the | 182 | * This structure is used to contain the head pointer, tail pointer, |
196 | * destroy function to sleep on. | 183 | * and completion queue entries as a single memory allocation so |
197 | * | 184 | * it can be mmap'ed into user space. |
198 | * This means that access from the consumer API requires nothing but | ||
199 | * taking the struct's lock. | ||
200 | * | ||
201 | * Access because of a completion event should go as follows: | ||
202 | * - lock cq/qp_table and look up struct | ||
203 | * - increment ref count in struct | ||
204 | * - drop cq/qp_table lock | ||
205 | * - lock struct, do your thing, and unlock struct | ||
206 | * - decrement ref count; if zero, wake up waiters | ||
207 | * | ||
208 | * To destroy a CQ/QP, we can do the following: | ||
209 | * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock | ||
210 | * - decrement ref count | ||
211 | * - wait_event until ref count is zero | ||
212 | * | ||
213 | * It is the consumer's responsibilty to make sure that no QP | ||
214 | * operations (WQE posting or state modification) are pending when the | ||
215 | * QP is destroyed. Also, the consumer must make sure that calls to | ||
216 | * qp_modify are serialized. | ||
217 | * | ||
218 | * Possible optimizations (wait for profile data to see if/where we | ||
219 | * have locks bouncing between CPUs): | ||
220 | * - split cq/qp table lock into n separate (cache-aligned) locks, | ||
221 | * indexed (say) by the page in the table | ||
222 | */ | 185 | */ |
186 | struct ipath_cq_wc { | ||
187 | u32 head; /* index of next entry to fill */ | ||
188 | u32 tail; /* index of next ib_poll_cq() entry */ | ||
189 | struct ib_wc queue[1]; /* this is actually size ibcq.cqe + 1 */ | ||
190 | }; | ||
223 | 191 | ||
192 | /* | ||
193 | * The completion queue structure. | ||
194 | */ | ||
224 | struct ipath_cq { | 195 | struct ipath_cq { |
225 | struct ib_cq ibcq; | 196 | struct ib_cq ibcq; |
226 | struct tasklet_struct comptask; | 197 | struct tasklet_struct comptask; |
227 | spinlock_t lock; | 198 | spinlock_t lock; |
228 | u8 notify; | 199 | u8 notify; |
229 | u8 triggered; | 200 | u8 triggered; |
230 | u32 head; /* new records added to the head */ | 201 | struct ipath_cq_wc *queue; |
231 | u32 tail; /* poll_cq() reads from here. */ | 202 | struct ipath_mmap_info *ip; |
232 | struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */ | 203 | }; |
204 | |||
205 | /* | ||
206 | * A segment is a linear region of low physical memory. | ||
207 | * XXX Maybe we should use phys addr here and kmap()/kunmap(). | ||
208 | * Used by the verbs layer. | ||
209 | */ | ||
210 | struct ipath_seg { | ||
211 | void *vaddr; | ||
212 | size_t length; | ||
213 | }; | ||
214 | |||
215 | /* The number of ipath_segs that fit in a page. */ | ||
216 | #define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg)) | ||
217 | |||
218 | struct ipath_segarray { | ||
219 | struct ipath_seg segs[IPATH_SEGSZ]; | ||
220 | }; | ||
221 | |||
222 | struct ipath_mregion { | ||
223 | u64 user_base; /* User's address for this region */ | ||
224 | u64 iova; /* IB start address of this region */ | ||
225 | size_t length; | ||
226 | u32 lkey; | ||
227 | u32 offset; /* offset (bytes) to start of region */ | ||
228 | int access_flags; | ||
229 | u32 max_segs; /* number of ipath_segs in all the arrays */ | ||
230 | u32 mapsz; /* size of the map array */ | ||
231 | struct ipath_segarray *map[0]; /* the segments */ | ||
232 | }; | ||
233 | |||
234 | /* | ||
235 | * These keep track of the copy progress within a memory region. | ||
236 | * Used by the verbs layer. | ||
237 | */ | ||
238 | struct ipath_sge { | ||
239 | struct ipath_mregion *mr; | ||
240 | void *vaddr; /* current pointer into the segment */ | ||
241 | u32 sge_length; /* length of the SGE */ | ||
242 | u32 length; /* remaining length of the segment */ | ||
243 | u16 m; /* current index: mr->map[m] */ | ||
244 | u16 n; /* current index: mr->map[m]->segs[n] */ | ||
245 | }; | ||
246 | |||
247 | /* Memory region */ | ||
248 | struct ipath_mr { | ||
249 | struct ib_mr ibmr; | ||
250 | struct ipath_mregion mr; /* must be last */ | ||
233 | }; | 251 | }; |
234 | 252 | ||
235 | /* | 253 | /* |
@@ -248,32 +266,50 @@ struct ipath_swqe { | |||
248 | 266 | ||
249 | /* | 267 | /* |
250 | * Receive work request queue entry. | 268 | * Receive work request queue entry. |
251 | * The size of the sg_list is determined when the QP is created and stored | 269 | * The size of the sg_list is determined when the QP (or SRQ) is created |
252 | * in qp->r_max_sge. | 270 | * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). |
253 | */ | 271 | */ |
254 | struct ipath_rwqe { | 272 | struct ipath_rwqe { |
255 | u64 wr_id; | 273 | u64 wr_id; |
256 | u32 length; /* total length of data in sg_list */ | ||
257 | u8 num_sge; | 274 | u8 num_sge; |
258 | struct ipath_sge sg_list[0]; | 275 | struct ib_sge sg_list[0]; |
259 | }; | 276 | }; |
260 | 277 | ||
261 | struct ipath_rq { | 278 | /* |
262 | spinlock_t lock; | 279 | * This structure is used to contain the head pointer, tail pointer, |
280 | * and receive work queue entries as a single memory allocation so | ||
281 | * it can be mmap'ed into user space. | ||
282 | * Note that the wq array elements are variable size so you can't | ||
283 | * just index into the array to get the N'th element; | ||
284 | * use get_rwqe_ptr() instead. | ||
285 | */ | ||
286 | struct ipath_rwq { | ||
263 | u32 head; /* new work requests posted to the head */ | 287 | u32 head; /* new work requests posted to the head */ |
264 | u32 tail; /* receives pull requests from here. */ | 288 | u32 tail; /* receives pull requests from here. */ |
289 | struct ipath_rwqe wq[0]; | ||
290 | }; | ||
291 | |||
292 | struct ipath_rq { | ||
293 | struct ipath_rwq *wq; | ||
294 | spinlock_t lock; | ||
265 | u32 size; /* size of RWQE array */ | 295 | u32 size; /* size of RWQE array */ |
266 | u8 max_sge; | 296 | u8 max_sge; |
267 | struct ipath_rwqe *wq; /* RWQE array */ | ||
268 | }; | 297 | }; |
269 | 298 | ||
270 | struct ipath_srq { | 299 | struct ipath_srq { |
271 | struct ib_srq ibsrq; | 300 | struct ib_srq ibsrq; |
272 | struct ipath_rq rq; | 301 | struct ipath_rq rq; |
302 | struct ipath_mmap_info *ip; | ||
273 | /* send signal when number of RWQEs < limit */ | 303 | /* send signal when number of RWQEs < limit */ |
274 | u32 limit; | 304 | u32 limit; |
275 | }; | 305 | }; |
276 | 306 | ||
307 | struct ipath_sge_state { | ||
308 | struct ipath_sge *sg_list; /* next SGE to be used if any */ | ||
309 | struct ipath_sge sge; /* progress state for the current SGE */ | ||
310 | u8 num_sge; | ||
311 | }; | ||
312 | |||
277 | /* | 313 | /* |
278 | * Variables prefixed with s_ are for the requester (sender). | 314 | * Variables prefixed with s_ are for the requester (sender). |
279 | * Variables prefixed with r_ are for the responder (receiver). | 315 | * Variables prefixed with r_ are for the responder (receiver). |
@@ -293,6 +329,7 @@ struct ipath_qp { | |||
293 | atomic_t refcount; | 329 | atomic_t refcount; |
294 | wait_queue_head_t wait; | 330 | wait_queue_head_t wait; |
295 | struct tasklet_struct s_task; | 331 | struct tasklet_struct s_task; |
332 | struct ipath_mmap_info *ip; | ||
296 | struct ipath_sge_state *s_cur_sge; | 333 | struct ipath_sge_state *s_cur_sge; |
297 | struct ipath_sge_state s_sge; /* current send request data */ | 334 | struct ipath_sge_state s_sge; /* current send request data */ |
298 | /* current RDMA read send data */ | 335 | /* current RDMA read send data */ |
@@ -334,6 +371,7 @@ struct ipath_qp { | |||
334 | u8 s_retry; /* requester retry counter */ | 371 | u8 s_retry; /* requester retry counter */ |
335 | u8 s_rnr_retry; /* requester RNR retry counter */ | 372 | u8 s_rnr_retry; /* requester RNR retry counter */ |
336 | u8 s_pkey_index; /* PKEY index to use */ | 373 | u8 s_pkey_index; /* PKEY index to use */ |
374 | u8 timeout; /* Timeout for this QP */ | ||
337 | enum ib_mtu path_mtu; | 375 | enum ib_mtu path_mtu; |
338 | u32 remote_qpn; | 376 | u32 remote_qpn; |
339 | u32 qkey; /* QKEY for this QP (for UD or RD) */ | 377 | u32 qkey; /* QKEY for this QP (for UD or RD) */ |
@@ -345,7 +383,8 @@ struct ipath_qp { | |||
345 | u32 s_ssn; /* SSN of tail entry */ | 383 | u32 s_ssn; /* SSN of tail entry */ |
346 | u32 s_lsn; /* limit sequence number (credit) */ | 384 | u32 s_lsn; /* limit sequence number (credit) */ |
347 | struct ipath_swqe *s_wq; /* send work queue */ | 385 | struct ipath_swqe *s_wq; /* send work queue */ |
348 | struct ipath_rq r_rq; /* receive work queue */ | 386 | struct ipath_rq r_rq; /* receive work queue */ |
387 | struct ipath_sge r_sg_list[0]; /* verified SGEs */ | ||
349 | }; | 388 | }; |
350 | 389 | ||
351 | /* | 390 | /* |
@@ -369,15 +408,15 @@ static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, | |||
369 | 408 | ||
370 | /* | 409 | /* |
371 | * Since struct ipath_rwqe is not a fixed size, we can't simply index into | 410 | * Since struct ipath_rwqe is not a fixed size, we can't simply index into |
372 | * struct ipath_rq.wq. This function does the array index computation. | 411 | * struct ipath_rwq.wq. This function does the array index computation. |
373 | */ | 412 | */ |
374 | static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, | 413 | static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, |
375 | unsigned n) | 414 | unsigned n) |
376 | { | 415 | { |
377 | return (struct ipath_rwqe *) | 416 | return (struct ipath_rwqe *) |
378 | ((char *) rq->wq + | 417 | ((char *) rq->wq->wq + |
379 | (sizeof(struct ipath_rwqe) + | 418 | (sizeof(struct ipath_rwqe) + |
380 | rq->max_sge * sizeof(struct ipath_sge)) * n); | 419 | rq->max_sge * sizeof(struct ib_sge)) * n); |
381 | } | 420 | } |
382 | 421 | ||
383 | /* | 422 | /* |
@@ -417,6 +456,7 @@ struct ipath_ibdev { | |||
417 | struct ib_device ibdev; | 456 | struct ib_device ibdev; |
418 | struct list_head dev_list; | 457 | struct list_head dev_list; |
419 | struct ipath_devdata *dd; | 458 | struct ipath_devdata *dd; |
459 | struct ipath_mmap_info *pending_mmaps; | ||
420 | int ib_unit; /* This is the device number */ | 460 | int ib_unit; /* This is the device number */ |
421 | u16 sm_lid; /* in host order */ | 461 | u16 sm_lid; /* in host order */ |
422 | u8 sm_sl; | 462 | u8 sm_sl; |
@@ -435,11 +475,20 @@ struct ipath_ibdev { | |||
435 | __be64 sys_image_guid; /* in network order */ | 475 | __be64 sys_image_guid; /* in network order */ |
436 | __be64 gid_prefix; /* in network order */ | 476 | __be64 gid_prefix; /* in network order */ |
437 | __be64 mkey; | 477 | __be64 mkey; |
478 | |||
438 | u32 n_pds_allocated; /* number of PDs allocated for device */ | 479 | u32 n_pds_allocated; /* number of PDs allocated for device */ |
480 | spinlock_t n_pds_lock; | ||
439 | u32 n_ahs_allocated; /* number of AHs allocated for device */ | 481 | u32 n_ahs_allocated; /* number of AHs allocated for device */ |
482 | spinlock_t n_ahs_lock; | ||
440 | u32 n_cqs_allocated; /* number of CQs allocated for device */ | 483 | u32 n_cqs_allocated; /* number of CQs allocated for device */ |
484 | spinlock_t n_cqs_lock; | ||
485 | u32 n_qps_allocated; /* number of QPs allocated for device */ | ||
486 | spinlock_t n_qps_lock; | ||
441 | u32 n_srqs_allocated; /* number of SRQs allocated for device */ | 487 | u32 n_srqs_allocated; /* number of SRQs allocated for device */ |
488 | spinlock_t n_srqs_lock; | ||
442 | u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ | 489 | u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ |
490 | spinlock_t n_mcast_grps_lock; | ||
491 | |||
443 | u64 ipath_sword; /* total dwords sent (sample result) */ | 492 | u64 ipath_sword; /* total dwords sent (sample result) */ |
444 | u64 ipath_rword; /* total dwords received (sample result) */ | 493 | u64 ipath_rword; /* total dwords received (sample result) */ |
445 | u64 ipath_spkts; /* total packets sent (sample result) */ | 494 | u64 ipath_spkts; /* total packets sent (sample result) */ |
@@ -494,8 +543,19 @@ struct ipath_ibdev { | |||
494 | struct ipath_opcode_stats opstats[128]; | 543 | struct ipath_opcode_stats opstats[128]; |
495 | }; | 544 | }; |
496 | 545 | ||
497 | struct ipath_ucontext { | 546 | struct ipath_verbs_counters { |
498 | struct ib_ucontext ibucontext; | 547 | u64 symbol_error_counter; |
548 | u64 link_error_recovery_counter; | ||
549 | u64 link_downed_counter; | ||
550 | u64 port_rcv_errors; | ||
551 | u64 port_rcv_remphys_errors; | ||
552 | u64 port_xmit_discards; | ||
553 | u64 port_xmit_data; | ||
554 | u64 port_rcv_data; | ||
555 | u64 port_xmit_packets; | ||
556 | u64 port_rcv_packets; | ||
557 | u32 local_link_integrity_errors; | ||
558 | u32 excessive_buffer_overrun_errors; | ||
499 | }; | 559 | }; |
500 | 560 | ||
501 | static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) | 561 | static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) |
@@ -503,11 +563,6 @@ static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) | |||
503 | return container_of(ibmr, struct ipath_mr, ibmr); | 563 | return container_of(ibmr, struct ipath_mr, ibmr); |
504 | } | 564 | } |
505 | 565 | ||
506 | static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr) | ||
507 | { | ||
508 | return container_of(ibfmr, struct ipath_fmr, ibfmr); | ||
509 | } | ||
510 | |||
511 | static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd) | 566 | static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd) |
512 | { | 567 | { |
513 | return container_of(ibpd, struct ipath_pd, ibpd); | 568 | return container_of(ibpd, struct ipath_pd, ibpd); |
@@ -545,12 +600,6 @@ int ipath_process_mad(struct ib_device *ibdev, | |||
545 | struct ib_grh *in_grh, | 600 | struct ib_grh *in_grh, |
546 | struct ib_mad *in_mad, struct ib_mad *out_mad); | 601 | struct ib_mad *in_mad, struct ib_mad *out_mad); |
547 | 602 | ||
548 | static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext | ||
549 | *ibucontext) | ||
550 | { | ||
551 | return container_of(ibucontext, struct ipath_ucontext, ibucontext); | ||
552 | } | ||
553 | |||
554 | /* | 603 | /* |
555 | * Compare the lower 24 bits of the two values. | 604 | * Compare the lower 24 bits of the two values. |
556 | * Returns an integer <, ==, or > than zero. | 605 | * Returns an integer <, ==, or > than zero. |
@@ -562,6 +611,13 @@ static inline int ipath_cmp24(u32 a, u32 b) | |||
562 | 611 | ||
563 | struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid); | 612 | struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid); |
564 | 613 | ||
614 | int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords, | ||
615 | u64 *rwords, u64 *spkts, u64 *rpkts, | ||
616 | u64 *xmit_wait); | ||
617 | |||
618 | int ipath_get_counters(struct ipath_devdata *dd, | ||
619 | struct ipath_verbs_counters *cntrs); | ||
620 | |||
565 | int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | 621 | int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); |
566 | 622 | ||
567 | int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | 623 | int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); |
@@ -579,7 +635,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
579 | int ipath_destroy_qp(struct ib_qp *ibqp); | 635 | int ipath_destroy_qp(struct ib_qp *ibqp); |
580 | 636 | ||
581 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 637 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
582 | int attr_mask); | 638 | int attr_mask, struct ib_udata *udata); |
583 | 639 | ||
584 | int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | 640 | int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
585 | int attr_mask, struct ib_qp_init_attr *init_attr); | 641 | int attr_mask, struct ib_qp_init_attr *init_attr); |
@@ -592,6 +648,9 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); | |||
592 | 648 | ||
593 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); | 649 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); |
594 | 650 | ||
651 | int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, | ||
652 | u32 *hdr, u32 len, struct ipath_sge_state *ss); | ||
653 | |||
595 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); | 654 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); |
596 | 655 | ||
597 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | 656 | int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, |
@@ -638,7 +697,8 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, | |||
638 | struct ib_udata *udata); | 697 | struct ib_udata *udata); |
639 | 698 | ||
640 | int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | 699 | int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
641 | enum ib_srq_attr_mask attr_mask); | 700 | enum ib_srq_attr_mask attr_mask, |
701 | struct ib_udata *udata); | ||
642 | 702 | ||
643 | int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | 703 | int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); |
644 | 704 | ||
@@ -680,6 +740,10 @@ int ipath_unmap_fmr(struct list_head *fmr_list); | |||
680 | 740 | ||
681 | int ipath_dealloc_fmr(struct ib_fmr *ibfmr); | 741 | int ipath_dealloc_fmr(struct ib_fmr *ibfmr); |
682 | 742 | ||
743 | void ipath_release_mmap_info(struct kref *ref); | ||
744 | |||
745 | int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | ||
746 | |||
683 | void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); | 747 | void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); |
684 | 748 | ||
685 | void ipath_insert_rnr_queue(struct ipath_qp *qp); | 749 | void ipath_insert_rnr_queue(struct ipath_qp *qp); |
@@ -700,6 +764,22 @@ int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, | |||
700 | int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, | 764 | int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, |
701 | u32 pmtu, u32 *bth0p, u32 *bth2p); | 765 | u32 pmtu, u32 *bth0p, u32 *bth2p); |
702 | 766 | ||
767 | int ipath_register_ib_device(struct ipath_devdata *); | ||
768 | |||
769 | void ipath_unregister_ib_device(struct ipath_ibdev *); | ||
770 | |||
771 | void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32); | ||
772 | |||
773 | int ipath_ib_piobufavail(struct ipath_ibdev *); | ||
774 | |||
775 | void ipath_ib_timer(struct ipath_ibdev *); | ||
776 | |||
777 | unsigned ipath_get_npkeys(struct ipath_devdata *); | ||
778 | |||
779 | u32 ipath_get_cr_errpkey(struct ipath_devdata *); | ||
780 | |||
781 | unsigned ipath_get_pkey(struct ipath_devdata *, unsigned); | ||
782 | |||
703 | extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; | 783 | extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; |
704 | 784 | ||
705 | extern const u8 ipath_cvt_physportstate[]; | 785 | extern const u8 ipath_cvt_physportstate[]; |
@@ -714,6 +794,8 @@ extern unsigned int ib_ipath_max_cqs; | |||
714 | 794 | ||
715 | extern unsigned int ib_ipath_max_qp_wrs; | 795 | extern unsigned int ib_ipath_max_qp_wrs; |
716 | 796 | ||
797 | extern unsigned int ib_ipath_max_qps; | ||
798 | |||
717 | extern unsigned int ib_ipath_max_sges; | 799 | extern unsigned int ib_ipath_max_sges; |
718 | 800 | ||
719 | extern unsigned int ib_ipath_max_mcast_grps; | 801 | extern unsigned int ib_ipath_max_mcast_grps; |