diff options
Diffstat (limited to 'include/rdma/ib_verbs.h')
-rw-r--r-- | include/rdma/ib_verbs.h | 222 |
1 files changed, 124 insertions, 98 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 7845fae6f2df..9a68a19532ba 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -137,6 +137,8 @@ enum ib_device_cap_flags { | |||
137 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), | 137 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), |
138 | IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), | 138 | IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), |
139 | IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), | 139 | IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), |
140 | IB_DEVICE_RC_IP_CSUM = (1<<25), | ||
141 | IB_DEVICE_RAW_IP_CSUM = (1<<26), | ||
140 | IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29), | 142 | IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29), |
141 | IB_DEVICE_SIGNATURE_HANDOVER = (1<<30), | 143 | IB_DEVICE_SIGNATURE_HANDOVER = (1<<30), |
142 | IB_DEVICE_ON_DEMAND_PAGING = (1<<31), | 144 | IB_DEVICE_ON_DEMAND_PAGING = (1<<31), |
@@ -474,7 +476,7 @@ enum ib_event_type { | |||
474 | IB_EVENT_GID_CHANGE, | 476 | IB_EVENT_GID_CHANGE, |
475 | }; | 477 | }; |
476 | 478 | ||
477 | __attribute_const__ const char *ib_event_msg(enum ib_event_type event); | 479 | const char *__attribute_const__ ib_event_msg(enum ib_event_type event); |
478 | 480 | ||
479 | struct ib_event { | 481 | struct ib_event { |
480 | struct ib_device *device; | 482 | struct ib_device *device; |
@@ -697,7 +699,6 @@ struct ib_ah_attr { | |||
697 | u8 ah_flags; | 699 | u8 ah_flags; |
698 | u8 port_num; | 700 | u8 port_num; |
699 | u8 dmac[ETH_ALEN]; | 701 | u8 dmac[ETH_ALEN]; |
700 | u16 vlan_id; | ||
701 | }; | 702 | }; |
702 | 703 | ||
703 | enum ib_wc_status { | 704 | enum ib_wc_status { |
@@ -725,7 +726,7 @@ enum ib_wc_status { | |||
725 | IB_WC_GENERAL_ERR | 726 | IB_WC_GENERAL_ERR |
726 | }; | 727 | }; |
727 | 728 | ||
728 | __attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status); | 729 | const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); |
729 | 730 | ||
730 | enum ib_wc_opcode { | 731 | enum ib_wc_opcode { |
731 | IB_WC_SEND, | 732 | IB_WC_SEND, |
@@ -736,7 +737,7 @@ enum ib_wc_opcode { | |||
736 | IB_WC_BIND_MW, | 737 | IB_WC_BIND_MW, |
737 | IB_WC_LSO, | 738 | IB_WC_LSO, |
738 | IB_WC_LOCAL_INV, | 739 | IB_WC_LOCAL_INV, |
739 | IB_WC_FAST_REG_MR, | 740 | IB_WC_REG_MR, |
740 | IB_WC_MASKED_COMP_SWAP, | 741 | IB_WC_MASKED_COMP_SWAP, |
741 | IB_WC_MASKED_FETCH_ADD, | 742 | IB_WC_MASKED_FETCH_ADD, |
742 | /* | 743 | /* |
@@ -873,7 +874,6 @@ enum ib_qp_create_flags { | |||
873 | IB_QP_CREATE_RESERVED_END = 1 << 31, | 874 | IB_QP_CREATE_RESERVED_END = 1 << 31, |
874 | }; | 875 | }; |
875 | 876 | ||
876 | |||
877 | /* | 877 | /* |
878 | * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler | 878 | * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler |
879 | * callback to destroy the passed in QP. | 879 | * callback to destroy the passed in QP. |
@@ -957,10 +957,10 @@ enum ib_qp_attr_mask { | |||
957 | IB_QP_PATH_MIG_STATE = (1<<18), | 957 | IB_QP_PATH_MIG_STATE = (1<<18), |
958 | IB_QP_CAP = (1<<19), | 958 | IB_QP_CAP = (1<<19), |
959 | IB_QP_DEST_QPN = (1<<20), | 959 | IB_QP_DEST_QPN = (1<<20), |
960 | IB_QP_SMAC = (1<<21), | 960 | IB_QP_RESERVED1 = (1<<21), |
961 | IB_QP_ALT_SMAC = (1<<22), | 961 | IB_QP_RESERVED2 = (1<<22), |
962 | IB_QP_VID = (1<<23), | 962 | IB_QP_RESERVED3 = (1<<23), |
963 | IB_QP_ALT_VID = (1<<24), | 963 | IB_QP_RESERVED4 = (1<<24), |
964 | }; | 964 | }; |
965 | 965 | ||
966 | enum ib_qp_state { | 966 | enum ib_qp_state { |
@@ -1010,10 +1010,6 @@ struct ib_qp_attr { | |||
1010 | u8 rnr_retry; | 1010 | u8 rnr_retry; |
1011 | u8 alt_port_num; | 1011 | u8 alt_port_num; |
1012 | u8 alt_timeout; | 1012 | u8 alt_timeout; |
1013 | u8 smac[ETH_ALEN]; | ||
1014 | u8 alt_smac[ETH_ALEN]; | ||
1015 | u16 vlan_id; | ||
1016 | u16 alt_vlan_id; | ||
1017 | }; | 1013 | }; |
1018 | 1014 | ||
1019 | enum ib_wr_opcode { | 1015 | enum ib_wr_opcode { |
@@ -1028,7 +1024,7 @@ enum ib_wr_opcode { | |||
1028 | IB_WR_SEND_WITH_INV, | 1024 | IB_WR_SEND_WITH_INV, |
1029 | IB_WR_RDMA_READ_WITH_INV, | 1025 | IB_WR_RDMA_READ_WITH_INV, |
1030 | IB_WR_LOCAL_INV, | 1026 | IB_WR_LOCAL_INV, |
1031 | IB_WR_FAST_REG_MR, | 1027 | IB_WR_REG_MR, |
1032 | IB_WR_MASKED_ATOMIC_CMP_AND_SWP, | 1028 | IB_WR_MASKED_ATOMIC_CMP_AND_SWP, |
1033 | IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, | 1029 | IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, |
1034 | IB_WR_BIND_MW, | 1030 | IB_WR_BIND_MW, |
@@ -1066,12 +1062,6 @@ struct ib_sge { | |||
1066 | u32 lkey; | 1062 | u32 lkey; |
1067 | }; | 1063 | }; |
1068 | 1064 | ||
1069 | struct ib_fast_reg_page_list { | ||
1070 | struct ib_device *device; | ||
1071 | u64 *page_list; | ||
1072 | unsigned int max_page_list_len; | ||
1073 | }; | ||
1074 | |||
1075 | /** | 1065 | /** |
1076 | * struct ib_mw_bind_info - Parameters for a memory window bind operation. | 1066 | * struct ib_mw_bind_info - Parameters for a memory window bind operation. |
1077 | * @mr: A memory region to bind the memory window to. | 1067 | * @mr: A memory region to bind the memory window to. |
@@ -1100,54 +1090,89 @@ struct ib_send_wr { | |||
1100 | __be32 imm_data; | 1090 | __be32 imm_data; |
1101 | u32 invalidate_rkey; | 1091 | u32 invalidate_rkey; |
1102 | } ex; | 1092 | } ex; |
1103 | union { | ||
1104 | struct { | ||
1105 | u64 remote_addr; | ||
1106 | u32 rkey; | ||
1107 | } rdma; | ||
1108 | struct { | ||
1109 | u64 remote_addr; | ||
1110 | u64 compare_add; | ||
1111 | u64 swap; | ||
1112 | u64 compare_add_mask; | ||
1113 | u64 swap_mask; | ||
1114 | u32 rkey; | ||
1115 | } atomic; | ||
1116 | struct { | ||
1117 | struct ib_ah *ah; | ||
1118 | void *header; | ||
1119 | int hlen; | ||
1120 | int mss; | ||
1121 | u32 remote_qpn; | ||
1122 | u32 remote_qkey; | ||
1123 | u16 pkey_index; /* valid for GSI only */ | ||
1124 | u8 port_num; /* valid for DR SMPs on switch only */ | ||
1125 | } ud; | ||
1126 | struct { | ||
1127 | u64 iova_start; | ||
1128 | struct ib_fast_reg_page_list *page_list; | ||
1129 | unsigned int page_shift; | ||
1130 | unsigned int page_list_len; | ||
1131 | u32 length; | ||
1132 | int access_flags; | ||
1133 | u32 rkey; | ||
1134 | } fast_reg; | ||
1135 | struct { | ||
1136 | struct ib_mw *mw; | ||
1137 | /* The new rkey for the memory window. */ | ||
1138 | u32 rkey; | ||
1139 | struct ib_mw_bind_info bind_info; | ||
1140 | } bind_mw; | ||
1141 | struct { | ||
1142 | struct ib_sig_attrs *sig_attrs; | ||
1143 | struct ib_mr *sig_mr; | ||
1144 | int access_flags; | ||
1145 | struct ib_sge *prot; | ||
1146 | } sig_handover; | ||
1147 | } wr; | ||
1148 | u32 xrc_remote_srq_num; /* XRC TGT QPs only */ | ||
1149 | }; | 1093 | }; |
1150 | 1094 | ||
1095 | struct ib_rdma_wr { | ||
1096 | struct ib_send_wr wr; | ||
1097 | u64 remote_addr; | ||
1098 | u32 rkey; | ||
1099 | }; | ||
1100 | |||
1101 | static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) | ||
1102 | { | ||
1103 | return container_of(wr, struct ib_rdma_wr, wr); | ||
1104 | } | ||
1105 | |||
1106 | struct ib_atomic_wr { | ||
1107 | struct ib_send_wr wr; | ||
1108 | u64 remote_addr; | ||
1109 | u64 compare_add; | ||
1110 | u64 swap; | ||
1111 | u64 compare_add_mask; | ||
1112 | u64 swap_mask; | ||
1113 | u32 rkey; | ||
1114 | }; | ||
1115 | |||
1116 | static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) | ||
1117 | { | ||
1118 | return container_of(wr, struct ib_atomic_wr, wr); | ||
1119 | } | ||
1120 | |||
1121 | struct ib_ud_wr { | ||
1122 | struct ib_send_wr wr; | ||
1123 | struct ib_ah *ah; | ||
1124 | void *header; | ||
1125 | int hlen; | ||
1126 | int mss; | ||
1127 | u32 remote_qpn; | ||
1128 | u32 remote_qkey; | ||
1129 | u16 pkey_index; /* valid for GSI only */ | ||
1130 | u8 port_num; /* valid for DR SMPs on switch only */ | ||
1131 | }; | ||
1132 | |||
1133 | static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) | ||
1134 | { | ||
1135 | return container_of(wr, struct ib_ud_wr, wr); | ||
1136 | } | ||
1137 | |||
1138 | struct ib_reg_wr { | ||
1139 | struct ib_send_wr wr; | ||
1140 | struct ib_mr *mr; | ||
1141 | u32 key; | ||
1142 | int access; | ||
1143 | }; | ||
1144 | |||
1145 | static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) | ||
1146 | { | ||
1147 | return container_of(wr, struct ib_reg_wr, wr); | ||
1148 | } | ||
1149 | |||
1150 | struct ib_bind_mw_wr { | ||
1151 | struct ib_send_wr wr; | ||
1152 | struct ib_mw *mw; | ||
1153 | /* The new rkey for the memory window. */ | ||
1154 | u32 rkey; | ||
1155 | struct ib_mw_bind_info bind_info; | ||
1156 | }; | ||
1157 | |||
1158 | static inline struct ib_bind_mw_wr *bind_mw_wr(struct ib_send_wr *wr) | ||
1159 | { | ||
1160 | return container_of(wr, struct ib_bind_mw_wr, wr); | ||
1161 | } | ||
1162 | |||
1163 | struct ib_sig_handover_wr { | ||
1164 | struct ib_send_wr wr; | ||
1165 | struct ib_sig_attrs *sig_attrs; | ||
1166 | struct ib_mr *sig_mr; | ||
1167 | int access_flags; | ||
1168 | struct ib_sge *prot; | ||
1169 | }; | ||
1170 | |||
1171 | static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) | ||
1172 | { | ||
1173 | return container_of(wr, struct ib_sig_handover_wr, wr); | ||
1174 | } | ||
1175 | |||
1151 | struct ib_recv_wr { | 1176 | struct ib_recv_wr { |
1152 | struct ib_recv_wr *next; | 1177 | struct ib_recv_wr *next; |
1153 | u64 wr_id; | 1178 | u64 wr_id; |
@@ -1334,6 +1359,9 @@ struct ib_mr { | |||
1334 | struct ib_uobject *uobject; | 1359 | struct ib_uobject *uobject; |
1335 | u32 lkey; | 1360 | u32 lkey; |
1336 | u32 rkey; | 1361 | u32 rkey; |
1362 | u64 iova; | ||
1363 | u32 length; | ||
1364 | unsigned int page_size; | ||
1337 | atomic_t usecnt; /* count number of MWs */ | 1365 | atomic_t usecnt; /* count number of MWs */ |
1338 | }; | 1366 | }; |
1339 | 1367 | ||
@@ -1718,9 +1746,9 @@ struct ib_device { | |||
1718 | struct ib_mr * (*alloc_mr)(struct ib_pd *pd, | 1746 | struct ib_mr * (*alloc_mr)(struct ib_pd *pd, |
1719 | enum ib_mr_type mr_type, | 1747 | enum ib_mr_type mr_type, |
1720 | u32 max_num_sg); | 1748 | u32 max_num_sg); |
1721 | struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, | 1749 | int (*map_mr_sg)(struct ib_mr *mr, |
1722 | int page_list_len); | 1750 | struct scatterlist *sg, |
1723 | void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list); | 1751 | int sg_nents); |
1724 | int (*rereg_phys_mr)(struct ib_mr *mr, | 1752 | int (*rereg_phys_mr)(struct ib_mr *mr, |
1725 | int mr_rereg_mask, | 1753 | int mr_rereg_mask, |
1726 | struct ib_pd *pd, | 1754 | struct ib_pd *pd, |
@@ -2176,7 +2204,8 @@ static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, | |||
2176 | } | 2204 | } |
2177 | 2205 | ||
2178 | int ib_query_gid(struct ib_device *device, | 2206 | int ib_query_gid(struct ib_device *device, |
2179 | u8 port_num, int index, union ib_gid *gid); | 2207 | u8 port_num, int index, union ib_gid *gid, |
2208 | struct ib_gid_attr *attr); | ||
2180 | 2209 | ||
2181 | int ib_query_pkey(struct ib_device *device, | 2210 | int ib_query_pkey(struct ib_device *device, |
2182 | u8 port_num, u16 index, u16 *pkey); | 2211 | u8 port_num, u16 index, u16 *pkey); |
@@ -2190,7 +2219,7 @@ int ib_modify_port(struct ib_device *device, | |||
2190 | struct ib_port_modify *port_modify); | 2219 | struct ib_port_modify *port_modify); |
2191 | 2220 | ||
2192 | int ib_find_gid(struct ib_device *device, union ib_gid *gid, | 2221 | int ib_find_gid(struct ib_device *device, union ib_gid *gid, |
2193 | u8 *port_num, u16 *index); | 2222 | struct net_device *ndev, u8 *port_num, u16 *index); |
2194 | 2223 | ||
2195 | int ib_find_pkey(struct ib_device *device, | 2224 | int ib_find_pkey(struct ib_device *device, |
2196 | u8 port_num, u16 pkey, u16 *index); | 2225 | u8 port_num, u16 pkey, u16 *index); |
@@ -2829,33 +2858,6 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, | |||
2829 | u32 max_num_sg); | 2858 | u32 max_num_sg); |
2830 | 2859 | ||
2831 | /** | 2860 | /** |
2832 | * ib_alloc_fast_reg_page_list - Allocates a page list array | ||
2833 | * @device - ib device pointer. | ||
2834 | * @page_list_len - size of the page list array to be allocated. | ||
2835 | * | ||
2836 | * This allocates and returns a struct ib_fast_reg_page_list * and a | ||
2837 | * page_list array that is at least page_list_len in size. The actual | ||
2838 | * size is returned in max_page_list_len. The caller is responsible | ||
2839 | * for initializing the contents of the page_list array before posting | ||
2840 | * a send work request with the IB_WC_FAST_REG_MR opcode. | ||
2841 | * | ||
2842 | * The page_list array entries must be translated using one of the | ||
2843 | * ib_dma_*() functions just like the addresses passed to | ||
2844 | * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct | ||
2845 | * ib_fast_reg_page_list must not be modified by the caller until the | ||
2846 | * IB_WC_FAST_REG_MR work request completes. | ||
2847 | */ | ||
2848 | struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list( | ||
2849 | struct ib_device *device, int page_list_len); | ||
2850 | |||
2851 | /** | ||
2852 | * ib_free_fast_reg_page_list - Deallocates a previously allocated | ||
2853 | * page list array. | ||
2854 | * @page_list - struct ib_fast_reg_page_list pointer to be deallocated. | ||
2855 | */ | ||
2856 | void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); | ||
2857 | |||
2858 | /** | ||
2859 | * ib_update_fast_reg_key - updates the key portion of the fast_reg MR | 2861 | * ib_update_fast_reg_key - updates the key portion of the fast_reg MR |
2860 | * R_Key and L_Key. | 2862 | * R_Key and L_Key. |
2861 | * @mr - struct ib_mr pointer to be updated. | 2863 | * @mr - struct ib_mr pointer to be updated. |
@@ -3023,4 +3025,28 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, | |||
3023 | u16 pkey, const union ib_gid *gid, | 3025 | u16 pkey, const union ib_gid *gid, |
3024 | const struct sockaddr *addr); | 3026 | const struct sockaddr *addr); |
3025 | 3027 | ||
3028 | int ib_map_mr_sg(struct ib_mr *mr, | ||
3029 | struct scatterlist *sg, | ||
3030 | int sg_nents, | ||
3031 | unsigned int page_size); | ||
3032 | |||
3033 | static inline int | ||
3034 | ib_map_mr_sg_zbva(struct ib_mr *mr, | ||
3035 | struct scatterlist *sg, | ||
3036 | int sg_nents, | ||
3037 | unsigned int page_size) | ||
3038 | { | ||
3039 | int n; | ||
3040 | |||
3041 | n = ib_map_mr_sg(mr, sg, sg_nents, page_size); | ||
3042 | mr->iova = 0; | ||
3043 | |||
3044 | return n; | ||
3045 | } | ||
3046 | |||
3047 | int ib_sg_to_pages(struct ib_mr *mr, | ||
3048 | struct scatterlist *sgl, | ||
3049 | int sg_nents, | ||
3050 | int (*set_page)(struct ib_mr *, u64)); | ||
3051 | |||
3026 | #endif /* IB_VERBS_H */ | 3052 | #endif /* IB_VERBS_H */ |