diff options
Diffstat (limited to 'net/rds/iw.h')
| -rw-r--r-- | net/rds/iw.h | 398 |
1 files changed, 0 insertions, 398 deletions
diff --git a/net/rds/iw.h b/net/rds/iw.h deleted file mode 100644 index 5af01d1758b3..000000000000 --- a/net/rds/iw.h +++ /dev/null | |||
| @@ -1,398 +0,0 @@ | |||
| 1 | #ifndef _RDS_IW_H | ||
| 2 | #define _RDS_IW_H | ||
| 3 | |||
| 4 | #include <linux/interrupt.h> | ||
| 5 | #include <rdma/ib_verbs.h> | ||
| 6 | #include <rdma/rdma_cm.h> | ||
| 7 | #include "rds.h" | ||
| 8 | #include "rdma_transport.h" | ||
| 9 | |||
| 10 | #define RDS_FASTREG_SIZE 20 | ||
| 11 | #define RDS_FASTREG_POOL_SIZE 2048 | ||
| 12 | |||
| 13 | #define RDS_IW_MAX_SGE 8 | ||
| 14 | #define RDS_IW_RECV_SGE 2 | ||
| 15 | |||
| 16 | #define RDS_IW_DEFAULT_RECV_WR 1024 | ||
| 17 | #define RDS_IW_DEFAULT_SEND_WR 256 | ||
| 18 | |||
| 19 | #define RDS_IW_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ | ||
| 20 | |||
| 21 | extern struct list_head rds_iw_devices; | ||
| 22 | |||
| 23 | /* | ||
| 24 | * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to | ||
| 25 | * try and minimize the amount of memory tied up both the device and | ||
| 26 | * socket receive queues. | ||
| 27 | */ | ||
| 28 | /* page offset of the final full frag that fits in the page */ | ||
| 29 | #define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE) | ||
| 30 | struct rds_page_frag { | ||
| 31 | struct list_head f_item; | ||
| 32 | struct page *f_page; | ||
| 33 | unsigned long f_offset; | ||
| 34 | dma_addr_t f_mapped; | ||
| 35 | }; | ||
| 36 | |||
| 37 | struct rds_iw_incoming { | ||
| 38 | struct list_head ii_frags; | ||
| 39 | struct rds_incoming ii_inc; | ||
| 40 | }; | ||
| 41 | |||
| 42 | struct rds_iw_connect_private { | ||
| 43 | /* Add new fields at the end, and don't permute existing fields. */ | ||
| 44 | __be32 dp_saddr; | ||
| 45 | __be32 dp_daddr; | ||
| 46 | u8 dp_protocol_major; | ||
| 47 | u8 dp_protocol_minor; | ||
| 48 | __be16 dp_protocol_minor_mask; /* bitmask */ | ||
| 49 | __be32 dp_reserved1; | ||
| 50 | __be64 dp_ack_seq; | ||
| 51 | __be32 dp_credit; /* non-zero enables flow ctl */ | ||
| 52 | }; | ||
| 53 | |||
| 54 | struct rds_iw_scatterlist { | ||
| 55 | struct scatterlist *list; | ||
| 56 | unsigned int len; | ||
| 57 | int dma_len; | ||
| 58 | unsigned int dma_npages; | ||
| 59 | unsigned int bytes; | ||
| 60 | }; | ||
| 61 | |||
| 62 | struct rds_iw_mapping { | ||
| 63 | spinlock_t m_lock; /* protect the mapping struct */ | ||
| 64 | struct list_head m_list; | ||
| 65 | struct rds_iw_mr *m_mr; | ||
| 66 | uint32_t m_rkey; | ||
| 67 | struct rds_iw_scatterlist m_sg; | ||
| 68 | }; | ||
| 69 | |||
| 70 | struct rds_iw_send_work { | ||
| 71 | struct rds_message *s_rm; | ||
| 72 | |||
| 73 | /* We should really put these into a union: */ | ||
| 74 | struct rm_rdma_op *s_op; | ||
| 75 | struct rds_iw_mapping *s_mapping; | ||
| 76 | struct ib_mr *s_mr; | ||
| 77 | unsigned char s_remap_count; | ||
| 78 | |||
| 79 | union { | ||
| 80 | struct ib_send_wr s_send_wr; | ||
| 81 | struct ib_rdma_wr s_rdma_wr; | ||
| 82 | struct ib_reg_wr s_reg_wr; | ||
| 83 | }; | ||
| 84 | struct ib_sge s_sge[RDS_IW_MAX_SGE]; | ||
| 85 | unsigned long s_queued; | ||
| 86 | }; | ||
| 87 | |||
| 88 | struct rds_iw_recv_work { | ||
| 89 | struct rds_iw_incoming *r_iwinc; | ||
| 90 | struct rds_page_frag *r_frag; | ||
| 91 | struct ib_recv_wr r_wr; | ||
| 92 | struct ib_sge r_sge[2]; | ||
| 93 | }; | ||
| 94 | |||
| 95 | struct rds_iw_work_ring { | ||
| 96 | u32 w_nr; | ||
| 97 | u32 w_alloc_ptr; | ||
| 98 | u32 w_alloc_ctr; | ||
| 99 | u32 w_free_ptr; | ||
| 100 | atomic_t w_free_ctr; | ||
| 101 | }; | ||
| 102 | |||
| 103 | struct rds_iw_device; | ||
| 104 | |||
| 105 | struct rds_iw_connection { | ||
| 106 | |||
| 107 | struct list_head iw_node; | ||
| 108 | struct rds_iw_device *rds_iwdev; | ||
| 109 | struct rds_connection *conn; | ||
| 110 | |||
| 111 | /* alphabet soup, IBTA style */ | ||
| 112 | struct rdma_cm_id *i_cm_id; | ||
| 113 | struct ib_pd *i_pd; | ||
| 114 | struct ib_mr *i_mr; | ||
| 115 | struct ib_cq *i_send_cq; | ||
| 116 | struct ib_cq *i_recv_cq; | ||
| 117 | |||
| 118 | /* tx */ | ||
| 119 | struct rds_iw_work_ring i_send_ring; | ||
| 120 | struct rds_message *i_rm; | ||
| 121 | struct rds_header *i_send_hdrs; | ||
| 122 | u64 i_send_hdrs_dma; | ||
| 123 | struct rds_iw_send_work *i_sends; | ||
| 124 | |||
| 125 | /* rx */ | ||
| 126 | struct tasklet_struct i_recv_tasklet; | ||
| 127 | struct mutex i_recv_mutex; | ||
| 128 | struct rds_iw_work_ring i_recv_ring; | ||
| 129 | struct rds_iw_incoming *i_iwinc; | ||
| 130 | u32 i_recv_data_rem; | ||
| 131 | struct rds_header *i_recv_hdrs; | ||
| 132 | u64 i_recv_hdrs_dma; | ||
| 133 | struct rds_iw_recv_work *i_recvs; | ||
| 134 | struct rds_page_frag i_frag; | ||
| 135 | u64 i_ack_recv; /* last ACK received */ | ||
| 136 | |||
| 137 | /* sending acks */ | ||
| 138 | unsigned long i_ack_flags; | ||
| 139 | #ifdef KERNEL_HAS_ATOMIC64 | ||
| 140 | atomic64_t i_ack_next; /* next ACK to send */ | ||
| 141 | #else | ||
| 142 | spinlock_t i_ack_lock; /* protect i_ack_next */ | ||
| 143 | u64 i_ack_next; /* next ACK to send */ | ||
| 144 | #endif | ||
| 145 | struct rds_header *i_ack; | ||
| 146 | struct ib_send_wr i_ack_wr; | ||
| 147 | struct ib_sge i_ack_sge; | ||
| 148 | u64 i_ack_dma; | ||
| 149 | unsigned long i_ack_queued; | ||
| 150 | |||
| 151 | /* Flow control related information | ||
| 152 | * | ||
| 153 | * Our algorithm uses a pair variables that we need to access | ||
| 154 | * atomically - one for the send credits, and one posted | ||
| 155 | * recv credits we need to transfer to remote. | ||
| 156 | * Rather than protect them using a slow spinlock, we put both into | ||
| 157 | * a single atomic_t and update it using cmpxchg | ||
| 158 | */ | ||
| 159 | atomic_t i_credits; | ||
| 160 | |||
| 161 | /* Protocol version specific information */ | ||
| 162 | unsigned int i_flowctl:1; /* enable/disable flow ctl */ | ||
| 163 | unsigned int i_dma_local_lkey:1; | ||
| 164 | unsigned int i_fastreg_posted:1; /* fastreg posted on this connection */ | ||
| 165 | /* Batched completions */ | ||
| 166 | unsigned int i_unsignaled_wrs; | ||
| 167 | long i_unsignaled_bytes; | ||
| 168 | }; | ||
| 169 | |||
| 170 | /* This assumes that atomic_t is at least 32 bits */ | ||
| 171 | #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff) | ||
| 172 | #define IB_GET_POST_CREDITS(v) ((v) >> 16) | ||
| 173 | #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff) | ||
| 174 | #define IB_SET_POST_CREDITS(v) ((v) << 16) | ||
| 175 | |||
| 176 | struct rds_iw_cm_id { | ||
| 177 | struct list_head list; | ||
| 178 | struct rdma_cm_id *cm_id; | ||
| 179 | }; | ||
| 180 | |||
| 181 | struct rds_iw_device { | ||
| 182 | struct list_head list; | ||
| 183 | struct list_head cm_id_list; | ||
| 184 | struct list_head conn_list; | ||
| 185 | struct ib_device *dev; | ||
| 186 | struct ib_pd *pd; | ||
| 187 | struct ib_mr *mr; | ||
| 188 | struct rds_iw_mr_pool *mr_pool; | ||
| 189 | int max_sge; | ||
| 190 | unsigned int max_wrs; | ||
| 191 | unsigned int dma_local_lkey:1; | ||
| 192 | spinlock_t spinlock; /* protect the above */ | ||
| 193 | }; | ||
| 194 | |||
| 195 | /* bits for i_ack_flags */ | ||
| 196 | #define IB_ACK_IN_FLIGHT 0 | ||
| 197 | #define IB_ACK_REQUESTED 1 | ||
| 198 | |||
| 199 | /* Magic WR_ID for ACKs */ | ||
| 200 | #define RDS_IW_ACK_WR_ID ((u64)0xffffffffffffffffULL) | ||
| 201 | #define RDS_IW_REG_WR_ID ((u64)0xefefefefefefefefULL) | ||
| 202 | #define RDS_IW_LOCAL_INV_WR_ID ((u64)0xdfdfdfdfdfdfdfdfULL) | ||
| 203 | |||
| 204 | struct rds_iw_statistics { | ||
| 205 | uint64_t s_iw_connect_raced; | ||
| 206 | uint64_t s_iw_listen_closed_stale; | ||
| 207 | uint64_t s_iw_tx_cq_call; | ||
| 208 | uint64_t s_iw_tx_cq_event; | ||
| 209 | uint64_t s_iw_tx_ring_full; | ||
| 210 | uint64_t s_iw_tx_throttle; | ||
| 211 | uint64_t s_iw_tx_sg_mapping_failure; | ||
| 212 | uint64_t s_iw_tx_stalled; | ||
| 213 | uint64_t s_iw_tx_credit_updates; | ||
| 214 | uint64_t s_iw_rx_cq_call; | ||
| 215 | uint64_t s_iw_rx_cq_event; | ||
| 216 | uint64_t s_iw_rx_ring_empty; | ||
| 217 | uint64_t s_iw_rx_refill_from_cq; | ||
| 218 | uint64_t s_iw_rx_refill_from_thread; | ||
| 219 | uint64_t s_iw_rx_alloc_limit; | ||
| 220 | uint64_t s_iw_rx_credit_updates; | ||
| 221 | uint64_t s_iw_ack_sent; | ||
| 222 | uint64_t s_iw_ack_send_failure; | ||
| 223 | uint64_t s_iw_ack_send_delayed; | ||
| 224 | uint64_t s_iw_ack_send_piggybacked; | ||
| 225 | uint64_t s_iw_ack_received; | ||
| 226 | uint64_t s_iw_rdma_mr_alloc; | ||
| 227 | uint64_t s_iw_rdma_mr_free; | ||
| 228 | uint64_t s_iw_rdma_mr_used; | ||
| 229 | uint64_t s_iw_rdma_mr_pool_flush; | ||
| 230 | uint64_t s_iw_rdma_mr_pool_wait; | ||
| 231 | uint64_t s_iw_rdma_mr_pool_depleted; | ||
| 232 | }; | ||
| 233 | |||
| 234 | extern struct workqueue_struct *rds_iw_wq; | ||
| 235 | |||
| 236 | /* | ||
| 237 | * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h | ||
| 238 | * doesn't define it. | ||
| 239 | */ | ||
| 240 | static inline void rds_iw_dma_sync_sg_for_cpu(struct ib_device *dev, | ||
| 241 | struct scatterlist *sg, unsigned int sg_dma_len, int direction) | ||
| 242 | { | ||
| 243 | unsigned int i; | ||
| 244 | |||
| 245 | for (i = 0; i < sg_dma_len; ++i) { | ||
| 246 | ib_dma_sync_single_for_cpu(dev, | ||
| 247 | ib_sg_dma_address(dev, &sg[i]), | ||
| 248 | ib_sg_dma_len(dev, &sg[i]), | ||
| 249 | direction); | ||
| 250 | } | ||
| 251 | } | ||
| 252 | #define ib_dma_sync_sg_for_cpu rds_iw_dma_sync_sg_for_cpu | ||
| 253 | |||
| 254 | static inline void rds_iw_dma_sync_sg_for_device(struct ib_device *dev, | ||
| 255 | struct scatterlist *sg, unsigned int sg_dma_len, int direction) | ||
| 256 | { | ||
| 257 | unsigned int i; | ||
| 258 | |||
| 259 | for (i = 0; i < sg_dma_len; ++i) { | ||
| 260 | ib_dma_sync_single_for_device(dev, | ||
| 261 | ib_sg_dma_address(dev, &sg[i]), | ||
| 262 | ib_sg_dma_len(dev, &sg[i]), | ||
| 263 | direction); | ||
| 264 | } | ||
| 265 | } | ||
| 266 | #define ib_dma_sync_sg_for_device rds_iw_dma_sync_sg_for_device | ||
| 267 | |||
| 268 | static inline u32 rds_iw_local_dma_lkey(struct rds_iw_connection *ic) | ||
| 269 | { | ||
| 270 | return ic->i_dma_local_lkey ? ic->i_cm_id->device->local_dma_lkey : ic->i_mr->lkey; | ||
| 271 | } | ||
| 272 | |||
| 273 | /* ib.c */ | ||
| 274 | extern struct rds_transport rds_iw_transport; | ||
| 275 | extern struct ib_client rds_iw_client; | ||
| 276 | |||
| 277 | extern unsigned int fastreg_pool_size; | ||
| 278 | extern unsigned int fastreg_message_size; | ||
| 279 | |||
| 280 | extern spinlock_t iw_nodev_conns_lock; | ||
| 281 | extern struct list_head iw_nodev_conns; | ||
| 282 | |||
| 283 | /* ib_cm.c */ | ||
| 284 | int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp); | ||
| 285 | void rds_iw_conn_free(void *arg); | ||
| 286 | int rds_iw_conn_connect(struct rds_connection *conn); | ||
| 287 | void rds_iw_conn_shutdown(struct rds_connection *conn); | ||
| 288 | void rds_iw_state_change(struct sock *sk); | ||
| 289 | int rds_iw_listen_init(void); | ||
| 290 | void rds_iw_listen_stop(void); | ||
| 291 | void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...); | ||
| 292 | int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, | ||
| 293 | struct rdma_cm_event *event); | ||
| 294 | int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id); | ||
| 295 | void rds_iw_cm_connect_complete(struct rds_connection *conn, | ||
| 296 | struct rdma_cm_event *event); | ||
| 297 | |||
| 298 | |||
| 299 | #define rds_iw_conn_error(conn, fmt...) \ | ||
| 300 | __rds_iw_conn_error(conn, KERN_WARNING "RDS/IW: " fmt) | ||
| 301 | |||
| 302 | /* ib_rdma.c */ | ||
| 303 | int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id); | ||
| 304 | void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn); | ||
| 305 | void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn); | ||
| 306 | void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock); | ||
| 307 | static inline void rds_iw_destroy_nodev_conns(void) | ||
| 308 | { | ||
| 309 | __rds_iw_destroy_conns(&iw_nodev_conns, &iw_nodev_conns_lock); | ||
| 310 | } | ||
| 311 | static inline void rds_iw_destroy_conns(struct rds_iw_device *rds_iwdev) | ||
| 312 | { | ||
| 313 | __rds_iw_destroy_conns(&rds_iwdev->conn_list, &rds_iwdev->spinlock); | ||
| 314 | } | ||
| 315 | struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *); | ||
| 316 | void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo); | ||
| 317 | void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *); | ||
| 318 | void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, | ||
| 319 | struct rds_sock *rs, u32 *key_ret); | ||
| 320 | void rds_iw_sync_mr(void *trans_private, int dir); | ||
| 321 | void rds_iw_free_mr(void *trans_private, int invalidate); | ||
| 322 | void rds_iw_flush_mrs(void); | ||
| 323 | |||
| 324 | /* ib_recv.c */ | ||
| 325 | int rds_iw_recv_init(void); | ||
| 326 | void rds_iw_recv_exit(void); | ||
| 327 | int rds_iw_recv(struct rds_connection *conn); | ||
| 328 | int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, | ||
| 329 | gfp_t page_gfp, int prefill); | ||
| 330 | void rds_iw_inc_free(struct rds_incoming *inc); | ||
| 331 | int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); | ||
| 332 | void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); | ||
| 333 | void rds_iw_recv_tasklet_fn(unsigned long data); | ||
| 334 | void rds_iw_recv_init_ring(struct rds_iw_connection *ic); | ||
| 335 | void rds_iw_recv_clear_ring(struct rds_iw_connection *ic); | ||
| 336 | void rds_iw_recv_init_ack(struct rds_iw_connection *ic); | ||
| 337 | void rds_iw_attempt_ack(struct rds_iw_connection *ic); | ||
| 338 | void rds_iw_ack_send_complete(struct rds_iw_connection *ic); | ||
| 339 | u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic); | ||
| 340 | |||
| 341 | /* ib_ring.c */ | ||
| 342 | void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr); | ||
| 343 | void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr); | ||
| 344 | u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos); | ||
| 345 | void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val); | ||
| 346 | void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val); | ||
| 347 | int rds_iw_ring_empty(struct rds_iw_work_ring *ring); | ||
| 348 | int rds_iw_ring_low(struct rds_iw_work_ring *ring); | ||
| 349 | u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring); | ||
| 350 | u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest); | ||
| 351 | extern wait_queue_head_t rds_iw_ring_empty_wait; | ||
| 352 | |||
| 353 | /* ib_send.c */ | ||
| 354 | void rds_iw_xmit_complete(struct rds_connection *conn); | ||
| 355 | int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | ||
| 356 | unsigned int hdr_off, unsigned int sg, unsigned int off); | ||
| 357 | void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context); | ||
| 358 | void rds_iw_send_init_ring(struct rds_iw_connection *ic); | ||
| 359 | void rds_iw_send_clear_ring(struct rds_iw_connection *ic); | ||
| 360 | int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op); | ||
| 361 | void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits); | ||
| 362 | void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted); | ||
| 363 | int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, | ||
| 364 | u32 *adv_credits, int need_posted, int max_posted); | ||
| 365 | |||
| 366 | /* ib_stats.c */ | ||
| 367 | DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats); | ||
| 368 | #define rds_iw_stats_inc(member) rds_stats_inc_which(rds_iw_stats, member) | ||
| 369 | unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter, | ||
| 370 | unsigned int avail); | ||
| 371 | |||
| 372 | /* ib_sysctl.c */ | ||
| 373 | int rds_iw_sysctl_init(void); | ||
| 374 | void rds_iw_sysctl_exit(void); | ||
| 375 | extern unsigned long rds_iw_sysctl_max_send_wr; | ||
| 376 | extern unsigned long rds_iw_sysctl_max_recv_wr; | ||
| 377 | extern unsigned long rds_iw_sysctl_max_unsig_wrs; | ||
| 378 | extern unsigned long rds_iw_sysctl_max_unsig_bytes; | ||
| 379 | extern unsigned long rds_iw_sysctl_max_recv_allocation; | ||
| 380 | extern unsigned int rds_iw_sysctl_flow_control; | ||
| 381 | |||
| 382 | /* | ||
| 383 | * Helper functions for getting/setting the header and data SGEs in | ||
| 384 | * RDS packets (not RDMA) | ||
| 385 | */ | ||
| 386 | static inline struct ib_sge * | ||
| 387 | rds_iw_header_sge(struct rds_iw_connection *ic, struct ib_sge *sge) | ||
| 388 | { | ||
| 389 | return &sge[0]; | ||
| 390 | } | ||
| 391 | |||
| 392 | static inline struct ib_sge * | ||
| 393 | rds_iw_data_sge(struct rds_iw_connection *ic, struct ib_sge *sge) | ||
| 394 | { | ||
| 395 | return &sge[1]; | ||
| 396 | } | ||
| 397 | |||
| 398 | #endif | ||
