diff options
Diffstat (limited to 'include/rdma')
-rw-r--r-- | include/rdma/ib_fmr_pool.h | 2 | ||||
-rw-r--r-- | include/rdma/ib_mad.h | 48 | ||||
-rw-r--r-- | include/rdma/ib_user_verbs.h | 79 | ||||
-rw-r--r-- | include/rdma/ib_verbs.h | 38 |
4 files changed, 139 insertions, 28 deletions
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h index 86b7e93f198b..4ace54cd0cce 100644 --- a/include/rdma/ib_fmr_pool.h +++ b/include/rdma/ib_fmr_pool.h | |||
@@ -43,6 +43,7 @@ struct ib_fmr_pool; | |||
43 | /** | 43 | /** |
44 | * struct ib_fmr_pool_param - Parameters for creating FMR pool | 44 | * struct ib_fmr_pool_param - Parameters for creating FMR pool |
45 | * @max_pages_per_fmr:Maximum number of pages per map request. | 45 | * @max_pages_per_fmr:Maximum number of pages per map request. |
46 | * @page_shift: Log2 of sizeof "pages" mapped by this fmr | ||
46 | * @access:Access flags for FMRs in pool. | 47 | * @access:Access flags for FMRs in pool. |
47 | * @pool_size:Number of FMRs to allocate for pool. | 48 | * @pool_size:Number of FMRs to allocate for pool. |
48 | * @dirty_watermark:Flush is triggered when @dirty_watermark dirty | 49 | * @dirty_watermark:Flush is triggered when @dirty_watermark dirty |
@@ -55,6 +56,7 @@ struct ib_fmr_pool; | |||
55 | */ | 56 | */ |
56 | struct ib_fmr_pool_param { | 57 | struct ib_fmr_pool_param { |
57 | int max_pages_per_fmr; | 58 | int max_pages_per_fmr; |
59 | int page_shift; | ||
58 | enum ib_access_flags access; | 60 | enum ib_access_flags access; |
59 | int pool_size; | 61 | int pool_size; |
60 | int dirty_watermark; | 62 | int dirty_watermark; |
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 2c133506742b..51ab8eddb295 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h | |||
@@ -33,7 +33,7 @@ | |||
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
34 | * SOFTWARE. | 34 | * SOFTWARE. |
35 | * | 35 | * |
36 | * $Id: ib_mad.h 2775 2005-07-02 13:42:12Z halr $ | 36 | * $Id: ib_mad.h 5596 2006-03-03 01:00:07Z sean.hefty $ |
37 | */ | 37 | */ |
38 | 38 | ||
39 | #if !defined( IB_MAD_H ) | 39 | #if !defined( IB_MAD_H ) |
@@ -208,15 +208,23 @@ struct ib_class_port_info | |||
208 | /** | 208 | /** |
209 | * ib_mad_send_buf - MAD data buffer and work request for sends. | 209 | * ib_mad_send_buf - MAD data buffer and work request for sends. |
210 | * @next: A pointer used to chain together MADs for posting. | 210 | * @next: A pointer used to chain together MADs for posting. |
211 | * @mad: References an allocated MAD data buffer. | 211 | * @mad: References an allocated MAD data buffer for MADs that do not have |
212 | * RMPP active. For MADs using RMPP, references the common and management | ||
213 | * class specific headers. | ||
212 | * @mad_agent: MAD agent that allocated the buffer. | 214 | * @mad_agent: MAD agent that allocated the buffer. |
213 | * @ah: The address handle to use when sending the MAD. | 215 | * @ah: The address handle to use when sending the MAD. |
214 | * @context: User-controlled context fields. | 216 | * @context: User-controlled context fields. |
217 | * @hdr_len: Indicates the size of the data header of the MAD. This length | ||
218 | * includes the common MAD, RMPP, and class specific headers. | ||
219 | * @data_len: Indicates the total size of user-transferred data. | ||
220 | * @seg_count: The number of RMPP segments allocated for this send. | ||
221 | * @seg_size: Size of each RMPP segment. | ||
215 | * @timeout_ms: Time to wait for a response. | 222 | * @timeout_ms: Time to wait for a response. |
216 | * @retries: Number of times to retry a request for a response. | 223 | * @retries: Number of times to retry a request for a response. |
217 | * | 224 | * |
218 | * Users are responsible for initializing the MAD buffer itself, with the | 225 | * Users are responsible for initializing the MAD buffer itself, with the |
219 | * exception of specifying the payload length field in any RMPP MAD. | 226 | * exception of any RMPP header. Additional segment buffer space allocated |
227 | * beyond data_len is padding. | ||
220 | */ | 228 | */ |
221 | struct ib_mad_send_buf { | 229 | struct ib_mad_send_buf { |
222 | struct ib_mad_send_buf *next; | 230 | struct ib_mad_send_buf *next; |
@@ -224,6 +232,10 @@ struct ib_mad_send_buf { | |||
224 | struct ib_mad_agent *mad_agent; | 232 | struct ib_mad_agent *mad_agent; |
225 | struct ib_ah *ah; | 233 | struct ib_ah *ah; |
226 | void *context[2]; | 234 | void *context[2]; |
235 | int hdr_len; | ||
236 | int data_len; | ||
237 | int seg_count; | ||
238 | int seg_size; | ||
227 | int timeout_ms; | 239 | int timeout_ms; |
228 | int retries; | 240 | int retries; |
229 | }; | 241 | }; |
@@ -299,7 +311,7 @@ typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent, | |||
299 | * @mad_recv_wc: Received work completion information on the received MAD. | 311 | * @mad_recv_wc: Received work completion information on the received MAD. |
300 | * | 312 | * |
301 | * MADs received in response to a send request operation will be handed to | 313 | * MADs received in response to a send request operation will be handed to |
302 | * the user after the send operation completes. All data buffers given | 314 | * the user before the send operation completes. All data buffers given |
303 | * to registered agents through this routine are owned by the receiving | 315 | * to registered agents through this routine are owned by the receiving |
304 | * client, except for snooping agents. Clients snooping MADs should not | 316 | * client, except for snooping agents. Clients snooping MADs should not |
305 | * modify the data referenced by @mad_recv_wc. | 317 | * modify the data referenced by @mad_recv_wc. |
@@ -485,17 +497,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent); | |||
485 | int ib_post_send_mad(struct ib_mad_send_buf *send_buf, | 497 | int ib_post_send_mad(struct ib_mad_send_buf *send_buf, |
486 | struct ib_mad_send_buf **bad_send_buf); | 498 | struct ib_mad_send_buf **bad_send_buf); |
487 | 499 | ||
488 | /** | ||
489 | * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer. | ||
490 | * @mad_recv_wc: Work completion information for a received MAD. | ||
491 | * @buf: User-provided data buffer to receive the coalesced buffers. The | ||
492 | * referenced buffer should be at least the size of the mad_len specified | ||
493 | * by @mad_recv_wc. | ||
494 | * | ||
495 | * This call copies a chain of received MAD segments into a single data buffer, | ||
496 | * removing duplicated headers. | ||
497 | */ | ||
498 | void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf); | ||
499 | 500 | ||
500 | /** | 501 | /** |
501 | * ib_free_recv_mad - Returns data buffers used to receive a MAD. | 502 | * ib_free_recv_mad - Returns data buffers used to receive a MAD. |
@@ -590,9 +591,10 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | |||
590 | * with an initialized work request structure. Users may modify the returned | 591 | * with an initialized work request structure. Users may modify the returned |
591 | * MAD data buffer before posting the send. | 592 | * MAD data buffer before posting the send. |
592 | * | 593 | * |
593 | * The returned data buffer will be cleared. Users are responsible for | 594 | * The returned MAD header, class specific headers, and any padding will be |
594 | * initializing the common MAD and any class specific headers. If @rmpp_active | 595 | * cleared. Users are responsible for initializing the common MAD header, |
595 | * is set, the RMPP header will be initialized for sending. | 596 | * any class specific header, and MAD data area. |
597 | * If @rmpp_active is set, the RMPP header will be initialized for sending. | ||
596 | */ | 598 | */ |
597 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | 599 | struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, |
598 | u32 remote_qpn, u16 pkey_index, | 600 | u32 remote_qpn, u16 pkey_index, |
@@ -601,6 +603,16 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | |||
601 | gfp_t gfp_mask); | 603 | gfp_t gfp_mask); |
602 | 604 | ||
603 | /** | 605 | /** |
606 | * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment. | ||
607 | * @send_buf: Previously allocated send data buffer. | ||
608 | * @seg_num: number of segment to return | ||
609 | * | ||
610 | * This routine returns a pointer to the data buffer of an RMPP MAD. | ||
611 | * Users must provide synchronization to @send_buf around this call. | ||
612 | */ | ||
613 | void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num); | ||
614 | |||
615 | /** | ||
604 | * ib_free_send_mad - Returns data buffers used to send a MAD. | 616 | * ib_free_send_mad - Returns data buffers used to send a MAD. |
605 | * @send_buf: Previously allocated send data buffer. | 617 | * @send_buf: Previously allocated send data buffer. |
606 | */ | 618 | */ |
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h index 5ff1490c08db..338ed4333063 100644 --- a/include/rdma/ib_user_verbs.h +++ b/include/rdma/ib_user_verbs.h | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | 3 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
4 | * Copyright (c) 2005 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2005 PathScale, Inc. All rights reserved. |
5 | * Copyright (c) 2006 Mellanox Technologies. All rights reserved. | ||
5 | * | 6 | * |
6 | * This software is available to you under a choice of one of two | 7 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU | 8 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -43,7 +44,7 @@ | |||
43 | * Increment this value if any changes that break userspace ABI | 44 | * Increment this value if any changes that break userspace ABI |
44 | * compatibility are made. | 45 | * compatibility are made. |
45 | */ | 46 | */ |
46 | #define IB_USER_VERBS_ABI_VERSION 4 | 47 | #define IB_USER_VERBS_ABI_VERSION 6 |
47 | 48 | ||
48 | enum { | 49 | enum { |
49 | IB_USER_VERBS_CMD_GET_CONTEXT, | 50 | IB_USER_VERBS_CMD_GET_CONTEXT, |
@@ -265,6 +266,17 @@ struct ib_uverbs_create_cq_resp { | |||
265 | __u32 cqe; | 266 | __u32 cqe; |
266 | }; | 267 | }; |
267 | 268 | ||
269 | struct ib_uverbs_resize_cq { | ||
270 | __u64 response; | ||
271 | __u32 cq_handle; | ||
272 | __u32 cqe; | ||
273 | __u64 driver_data[0]; | ||
274 | }; | ||
275 | |||
276 | struct ib_uverbs_resize_cq_resp { | ||
277 | __u32 cqe; | ||
278 | }; | ||
279 | |||
268 | struct ib_uverbs_poll_cq { | 280 | struct ib_uverbs_poll_cq { |
269 | __u64 response; | 281 | __u64 response; |
270 | __u32 cq_handle; | 282 | __u32 cq_handle; |
@@ -338,6 +350,7 @@ struct ib_uverbs_create_qp_resp { | |||
338 | __u32 max_send_sge; | 350 | __u32 max_send_sge; |
339 | __u32 max_recv_sge; | 351 | __u32 max_recv_sge; |
340 | __u32 max_inline_data; | 352 | __u32 max_inline_data; |
353 | __u32 reserved; | ||
341 | }; | 354 | }; |
342 | 355 | ||
343 | /* | 356 | /* |
@@ -359,6 +372,47 @@ struct ib_uverbs_qp_dest { | |||
359 | __u8 port_num; | 372 | __u8 port_num; |
360 | }; | 373 | }; |
361 | 374 | ||
375 | struct ib_uverbs_query_qp { | ||
376 | __u64 response; | ||
377 | __u32 qp_handle; | ||
378 | __u32 attr_mask; | ||
379 | __u64 driver_data[0]; | ||
380 | }; | ||
381 | |||
382 | struct ib_uverbs_query_qp_resp { | ||
383 | struct ib_uverbs_qp_dest dest; | ||
384 | struct ib_uverbs_qp_dest alt_dest; | ||
385 | __u32 max_send_wr; | ||
386 | __u32 max_recv_wr; | ||
387 | __u32 max_send_sge; | ||
388 | __u32 max_recv_sge; | ||
389 | __u32 max_inline_data; | ||
390 | __u32 qkey; | ||
391 | __u32 rq_psn; | ||
392 | __u32 sq_psn; | ||
393 | __u32 dest_qp_num; | ||
394 | __u32 qp_access_flags; | ||
395 | __u16 pkey_index; | ||
396 | __u16 alt_pkey_index; | ||
397 | __u8 qp_state; | ||
398 | __u8 cur_qp_state; | ||
399 | __u8 path_mtu; | ||
400 | __u8 path_mig_state; | ||
401 | __u8 en_sqd_async_notify; | ||
402 | __u8 max_rd_atomic; | ||
403 | __u8 max_dest_rd_atomic; | ||
404 | __u8 min_rnr_timer; | ||
405 | __u8 port_num; | ||
406 | __u8 timeout; | ||
407 | __u8 retry_cnt; | ||
408 | __u8 rnr_retry; | ||
409 | __u8 alt_port_num; | ||
410 | __u8 alt_timeout; | ||
411 | __u8 sq_sig_all; | ||
412 | __u8 reserved[5]; | ||
413 | __u64 driver_data[0]; | ||
414 | }; | ||
415 | |||
362 | struct ib_uverbs_modify_qp { | 416 | struct ib_uverbs_modify_qp { |
363 | struct ib_uverbs_qp_dest dest; | 417 | struct ib_uverbs_qp_dest dest; |
364 | struct ib_uverbs_qp_dest alt_dest; | 418 | struct ib_uverbs_qp_dest alt_dest; |
@@ -415,7 +469,7 @@ struct ib_uverbs_sge { | |||
415 | }; | 469 | }; |
416 | 470 | ||
417 | struct ib_uverbs_send_wr { | 471 | struct ib_uverbs_send_wr { |
418 | __u64 wr_id; | 472 | __u64 wr_id; |
419 | __u32 num_sge; | 473 | __u32 num_sge; |
420 | __u32 opcode; | 474 | __u32 opcode; |
421 | __u32 send_flags; | 475 | __u32 send_flags; |
@@ -489,7 +543,7 @@ struct ib_uverbs_post_srq_recv_resp { | |||
489 | 543 | ||
490 | struct ib_uverbs_global_route { | 544 | struct ib_uverbs_global_route { |
491 | __u8 dgid[16]; | 545 | __u8 dgid[16]; |
492 | __u32 flow_label; | 546 | __u32 flow_label; |
493 | __u8 sgid_index; | 547 | __u8 sgid_index; |
494 | __u8 hop_limit; | 548 | __u8 hop_limit; |
495 | __u8 traffic_class; | 549 | __u8 traffic_class; |
@@ -551,6 +605,9 @@ struct ib_uverbs_create_srq { | |||
551 | 605 | ||
552 | struct ib_uverbs_create_srq_resp { | 606 | struct ib_uverbs_create_srq_resp { |
553 | __u32 srq_handle; | 607 | __u32 srq_handle; |
608 | __u32 max_wr; | ||
609 | __u32 max_sge; | ||
610 | __u32 reserved; | ||
554 | }; | 611 | }; |
555 | 612 | ||
556 | struct ib_uverbs_modify_srq { | 613 | struct ib_uverbs_modify_srq { |
@@ -561,6 +618,20 @@ struct ib_uverbs_modify_srq { | |||
561 | __u64 driver_data[0]; | 618 | __u64 driver_data[0]; |
562 | }; | 619 | }; |
563 | 620 | ||
621 | struct ib_uverbs_query_srq { | ||
622 | __u64 response; | ||
623 | __u32 srq_handle; | ||
624 | __u32 reserved; | ||
625 | __u64 driver_data[0]; | ||
626 | }; | ||
627 | |||
628 | struct ib_uverbs_query_srq_resp { | ||
629 | __u32 max_wr; | ||
630 | __u32 max_sge; | ||
631 | __u32 srq_limit; | ||
632 | __u32 reserved; | ||
633 | }; | ||
634 | |||
564 | struct ib_uverbs_destroy_srq { | 635 | struct ib_uverbs_destroy_srq { |
565 | __u64 response; | 636 | __u64 response; |
566 | __u32 srq_handle; | 637 | __u32 srq_handle; |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 22fc886b9695..c1ad6273ac6c 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. | 5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
6 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. | 6 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
8 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | 8 | * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. |
9 | * | 9 | * |
10 | * This software is available to you under a choice of one of two | 10 | * This software is available to you under a choice of one of two |
11 | * licenses. You may choose to be licensed under the terms of the GNU | 11 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -222,11 +222,13 @@ struct ib_port_attr { | |||
222 | }; | 222 | }; |
223 | 223 | ||
224 | enum ib_device_modify_flags { | 224 | enum ib_device_modify_flags { |
225 | IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 | 225 | IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, |
226 | IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 | ||
226 | }; | 227 | }; |
227 | 228 | ||
228 | struct ib_device_modify { | 229 | struct ib_device_modify { |
229 | u64 sys_image_guid; | 230 | u64 sys_image_guid; |
231 | char node_desc[64]; | ||
230 | }; | 232 | }; |
231 | 233 | ||
232 | enum ib_port_modify_flags { | 234 | enum ib_port_modify_flags { |
@@ -649,7 +651,7 @@ struct ib_mw_bind { | |||
649 | struct ib_fmr_attr { | 651 | struct ib_fmr_attr { |
650 | int max_pages; | 652 | int max_pages; |
651 | int max_maps; | 653 | int max_maps; |
652 | u8 page_size; | 654 | u8 page_shift; |
653 | }; | 655 | }; |
654 | 656 | ||
655 | struct ib_ucontext { | 657 | struct ib_ucontext { |
@@ -880,7 +882,8 @@ struct ib_device { | |||
880 | struct ib_ucontext *context, | 882 | struct ib_ucontext *context, |
881 | struct ib_udata *udata); | 883 | struct ib_udata *udata); |
882 | int (*destroy_cq)(struct ib_cq *cq); | 884 | int (*destroy_cq)(struct ib_cq *cq); |
883 | int (*resize_cq)(struct ib_cq *cq, int cqe); | 885 | int (*resize_cq)(struct ib_cq *cq, int cqe, |
886 | struct ib_udata *udata); | ||
884 | int (*poll_cq)(struct ib_cq *cq, int num_entries, | 887 | int (*poll_cq)(struct ib_cq *cq, int num_entries, |
885 | struct ib_wc *wc); | 888 | struct ib_wc *wc); |
886 | int (*peek_cq)(struct ib_cq *cq, int wc_cnt); | 889 | int (*peek_cq)(struct ib_cq *cq, int wc_cnt); |
@@ -950,6 +953,7 @@ struct ib_device { | |||
950 | u64 uverbs_cmd_mask; | 953 | u64 uverbs_cmd_mask; |
951 | int uverbs_abi_ver; | 954 | int uverbs_abi_ver; |
952 | 955 | ||
956 | char node_desc[64]; | ||
953 | __be64 node_guid; | 957 | __be64 node_guid; |
954 | u8 node_type; | 958 | u8 node_type; |
955 | u8 phys_port_cnt; | 959 | u8 phys_port_cnt; |
@@ -986,6 +990,24 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len | |||
986 | return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; | 990 | return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; |
987 | } | 991 | } |
988 | 992 | ||
993 | /** | ||
994 | * ib_modify_qp_is_ok - Check that the supplied attribute mask | ||
995 | * contains all required attributes and no attributes not allowed for | ||
996 | * the given QP state transition. | ||
997 | * @cur_state: Current QP state | ||
998 | * @next_state: Next QP state | ||
999 | * @type: QP type | ||
1000 | * @mask: Mask of supplied QP attributes | ||
1001 | * | ||
1002 | * This function is a helper function that a low-level driver's | ||
1003 | * modify_qp method can use to validate the consumer's input. It | ||
1004 | * checks that cur_state and next_state are valid QP states, that a | ||
1005 | * transition from cur_state to next_state is allowed by the IB spec, | ||
1006 | * and that the attribute mask supplied is allowed for the transition. | ||
1007 | */ | ||
1008 | int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, | ||
1009 | enum ib_qp_type type, enum ib_qp_attr_mask mask); | ||
1010 | |||
989 | int ib_register_event_handler (struct ib_event_handler *event_handler); | 1011 | int ib_register_event_handler (struct ib_event_handler *event_handler); |
990 | int ib_unregister_event_handler(struct ib_event_handler *event_handler); | 1012 | int ib_unregister_event_handler(struct ib_event_handler *event_handler); |
991 | void ib_dispatch_event(struct ib_event *event); | 1013 | void ib_dispatch_event(struct ib_event *event); |
@@ -1078,7 +1100,9 @@ int ib_destroy_ah(struct ib_ah *ah); | |||
1078 | * ib_create_srq - Creates a SRQ associated with the specified protection | 1100 | * ib_create_srq - Creates a SRQ associated with the specified protection |
1079 | * domain. | 1101 | * domain. |
1080 | * @pd: The protection domain associated with the SRQ. | 1102 | * @pd: The protection domain associated with the SRQ. |
1081 | * @srq_init_attr: A list of initial attributes required to create the SRQ. | 1103 | * @srq_init_attr: A list of initial attributes required to create the |
1104 | * SRQ. If SRQ creation succeeds, then the attributes are updated to | ||
1105 | * the actual capabilities of the created SRQ. | ||
1082 | * | 1106 | * |
1083 | * srq_attr->max_wr and srq_attr->max_sge are read the determine the | 1107 | * srq_attr->max_wr and srq_attr->max_sge are read the determine the |
1084 | * requested size of the SRQ, and set to the actual values allocated | 1108 | * requested size of the SRQ, and set to the actual values allocated |
@@ -1137,7 +1161,9 @@ static inline int ib_post_srq_recv(struct ib_srq *srq, | |||
1137 | * ib_create_qp - Creates a QP associated with the specified protection | 1161 | * ib_create_qp - Creates a QP associated with the specified protection |
1138 | * domain. | 1162 | * domain. |
1139 | * @pd: The protection domain associated with the QP. | 1163 | * @pd: The protection domain associated with the QP. |
1140 | * @qp_init_attr: A list of initial attributes required to create the QP. | 1164 | * @qp_init_attr: A list of initial attributes required to create the |
1165 | * QP. If QP creation succeeds, then the attributes are updated to | ||
1166 | * the actual capabilities of the created QP. | ||
1141 | */ | 1167 | */ |
1142 | struct ib_qp *ib_create_qp(struct ib_pd *pd, | 1168 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
1143 | struct ib_qp_init_attr *qp_init_attr); | 1169 | struct ib_qp_init_attr *qp_init_attr); |