diff options
Diffstat (limited to 'net/rds/iw.h')
-rw-r--r-- | net/rds/iw.h | 395 |
1 files changed, 395 insertions, 0 deletions
diff --git a/net/rds/iw.h b/net/rds/iw.h new file mode 100644 index 000000000000..0ddda34f2a1c --- /dev/null +++ b/net/rds/iw.h | |||
@@ -0,0 +1,395 @@ | |||
1 | #ifndef _RDS_IW_H | ||
2 | #define _RDS_IW_H | ||
3 | |||
4 | #include <rdma/ib_verbs.h> | ||
5 | #include <rdma/rdma_cm.h> | ||
6 | #include "rds.h" | ||
7 | #include "rdma_transport.h" | ||
8 | |||
9 | #define RDS_FASTREG_SIZE 20 | ||
10 | #define RDS_FASTREG_POOL_SIZE 2048 | ||
11 | |||
12 | #define RDS_IW_MAX_SGE 8 | ||
13 | #define RDS_IW_RECV_SGE 2 | ||
14 | |||
15 | #define RDS_IW_DEFAULT_RECV_WR 1024 | ||
16 | #define RDS_IW_DEFAULT_SEND_WR 256 | ||
17 | |||
18 | #define RDS_IW_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ | ||
19 | |||
20 | extern struct list_head rds_iw_devices; | ||
21 | |||
22 | /* | ||
23 | * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to | ||
24 | * try and minimize the amount of memory tied up both the device and | ||
25 | * socket receive queues. | ||
26 | */ | ||
27 | /* page offset of the final full frag that fits in the page */ | ||
28 | #define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE) | ||
29 | struct rds_page_frag { | ||
30 | struct list_head f_item; | ||
31 | struct page *f_page; | ||
32 | unsigned long f_offset; | ||
33 | dma_addr_t f_mapped; | ||
34 | }; | ||
35 | |||
36 | struct rds_iw_incoming { | ||
37 | struct list_head ii_frags; | ||
38 | struct rds_incoming ii_inc; | ||
39 | }; | ||
40 | |||
41 | struct rds_iw_connect_private { | ||
42 | /* Add new fields at the end, and don't permute existing fields. */ | ||
43 | __be32 dp_saddr; | ||
44 | __be32 dp_daddr; | ||
45 | u8 dp_protocol_major; | ||
46 | u8 dp_protocol_minor; | ||
47 | __be16 dp_protocol_minor_mask; /* bitmask */ | ||
48 | __be32 dp_reserved1; | ||
49 | __be64 dp_ack_seq; | ||
50 | __be32 dp_credit; /* non-zero enables flow ctl */ | ||
51 | }; | ||
52 | |||
53 | struct rds_iw_scatterlist { | ||
54 | struct scatterlist *list; | ||
55 | unsigned int len; | ||
56 | int dma_len; | ||
57 | unsigned int dma_npages; | ||
58 | unsigned int bytes; | ||
59 | }; | ||
60 | |||
61 | struct rds_iw_mapping { | ||
62 | spinlock_t m_lock; /* protect the mapping struct */ | ||
63 | struct list_head m_list; | ||
64 | struct rds_iw_mr *m_mr; | ||
65 | uint32_t m_rkey; | ||
66 | struct rds_iw_scatterlist m_sg; | ||
67 | }; | ||
68 | |||
69 | struct rds_iw_send_work { | ||
70 | struct rds_message *s_rm; | ||
71 | |||
72 | /* We should really put these into a union: */ | ||
73 | struct rds_rdma_op *s_op; | ||
74 | struct rds_iw_mapping *s_mapping; | ||
75 | struct ib_mr *s_mr; | ||
76 | struct ib_fast_reg_page_list *s_page_list; | ||
77 | unsigned char s_remap_count; | ||
78 | |||
79 | struct ib_send_wr s_wr; | ||
80 | struct ib_sge s_sge[RDS_IW_MAX_SGE]; | ||
81 | unsigned long s_queued; | ||
82 | }; | ||
83 | |||
84 | struct rds_iw_recv_work { | ||
85 | struct rds_iw_incoming *r_iwinc; | ||
86 | struct rds_page_frag *r_frag; | ||
87 | struct ib_recv_wr r_wr; | ||
88 | struct ib_sge r_sge[2]; | ||
89 | }; | ||
90 | |||
91 | struct rds_iw_work_ring { | ||
92 | u32 w_nr; | ||
93 | u32 w_alloc_ptr; | ||
94 | u32 w_alloc_ctr; | ||
95 | u32 w_free_ptr; | ||
96 | atomic_t w_free_ctr; | ||
97 | }; | ||
98 | |||
99 | struct rds_iw_device; | ||
100 | |||
101 | struct rds_iw_connection { | ||
102 | |||
103 | struct list_head iw_node; | ||
104 | struct rds_iw_device *rds_iwdev; | ||
105 | struct rds_connection *conn; | ||
106 | |||
107 | /* alphabet soup, IBTA style */ | ||
108 | struct rdma_cm_id *i_cm_id; | ||
109 | struct ib_pd *i_pd; | ||
110 | struct ib_mr *i_mr; | ||
111 | struct ib_cq *i_send_cq; | ||
112 | struct ib_cq *i_recv_cq; | ||
113 | |||
114 | /* tx */ | ||
115 | struct rds_iw_work_ring i_send_ring; | ||
116 | struct rds_message *i_rm; | ||
117 | struct rds_header *i_send_hdrs; | ||
118 | u64 i_send_hdrs_dma; | ||
119 | struct rds_iw_send_work *i_sends; | ||
120 | |||
121 | /* rx */ | ||
122 | struct mutex i_recv_mutex; | ||
123 | struct rds_iw_work_ring i_recv_ring; | ||
124 | struct rds_iw_incoming *i_iwinc; | ||
125 | u32 i_recv_data_rem; | ||
126 | struct rds_header *i_recv_hdrs; | ||
127 | u64 i_recv_hdrs_dma; | ||
128 | struct rds_iw_recv_work *i_recvs; | ||
129 | struct rds_page_frag i_frag; | ||
130 | u64 i_ack_recv; /* last ACK received */ | ||
131 | |||
132 | /* sending acks */ | ||
133 | unsigned long i_ack_flags; | ||
134 | u64 i_ack_next; /* next ACK to send */ | ||
135 | struct rds_header *i_ack; | ||
136 | struct ib_send_wr i_ack_wr; | ||
137 | struct ib_sge i_ack_sge; | ||
138 | u64 i_ack_dma; | ||
139 | unsigned long i_ack_queued; | ||
140 | |||
141 | /* Flow control related information | ||
142 | * | ||
143 | * Our algorithm uses a pair variables that we need to access | ||
144 | * atomically - one for the send credits, and one posted | ||
145 | * recv credits we need to transfer to remote. | ||
146 | * Rather than protect them using a slow spinlock, we put both into | ||
147 | * a single atomic_t and update it using cmpxchg | ||
148 | */ | ||
149 | atomic_t i_credits; | ||
150 | |||
151 | /* Protocol version specific information */ | ||
152 | unsigned int i_flowctl:1; /* enable/disable flow ctl */ | ||
153 | unsigned int i_dma_local_lkey:1; | ||
154 | unsigned int i_fastreg_posted:1; /* fastreg posted on this connection */ | ||
155 | /* Batched completions */ | ||
156 | unsigned int i_unsignaled_wrs; | ||
157 | long i_unsignaled_bytes; | ||
158 | }; | ||
159 | |||
160 | /* This assumes that atomic_t is at least 32 bits */ | ||
161 | #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff) | ||
162 | #define IB_GET_POST_CREDITS(v) ((v) >> 16) | ||
163 | #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff) | ||
164 | #define IB_SET_POST_CREDITS(v) ((v) << 16) | ||
165 | |||
166 | struct rds_iw_cm_id { | ||
167 | struct list_head list; | ||
168 | struct rdma_cm_id *cm_id; | ||
169 | }; | ||
170 | |||
171 | struct rds_iw_device { | ||
172 | struct list_head list; | ||
173 | struct list_head cm_id_list; | ||
174 | struct list_head conn_list; | ||
175 | struct ib_device *dev; | ||
176 | struct ib_pd *pd; | ||
177 | struct ib_mr *mr; | ||
178 | struct rds_iw_mr_pool *mr_pool; | ||
179 | int page_shift; | ||
180 | int max_sge; | ||
181 | unsigned int max_wrs; | ||
182 | unsigned int dma_local_lkey:1; | ||
183 | spinlock_t spinlock; /* protect the above */ | ||
184 | }; | ||
185 | |||
186 | /* bits for i_ack_flags */ | ||
187 | #define IB_ACK_IN_FLIGHT 0 | ||
188 | #define IB_ACK_REQUESTED 1 | ||
189 | |||
190 | /* Magic WR_ID for ACKs */ | ||
191 | #define RDS_IW_ACK_WR_ID ((u64)0xffffffffffffffffULL) | ||
192 | #define RDS_IW_FAST_REG_WR_ID ((u64)0xefefefefefefefefULL) | ||
193 | #define RDS_IW_LOCAL_INV_WR_ID ((u64)0xdfdfdfdfdfdfdfdfULL) | ||
194 | |||
195 | struct rds_iw_statistics { | ||
196 | uint64_t s_iw_connect_raced; | ||
197 | uint64_t s_iw_listen_closed_stale; | ||
198 | uint64_t s_iw_tx_cq_call; | ||
199 | uint64_t s_iw_tx_cq_event; | ||
200 | uint64_t s_iw_tx_ring_full; | ||
201 | uint64_t s_iw_tx_throttle; | ||
202 | uint64_t s_iw_tx_sg_mapping_failure; | ||
203 | uint64_t s_iw_tx_stalled; | ||
204 | uint64_t s_iw_tx_credit_updates; | ||
205 | uint64_t s_iw_rx_cq_call; | ||
206 | uint64_t s_iw_rx_cq_event; | ||
207 | uint64_t s_iw_rx_ring_empty; | ||
208 | uint64_t s_iw_rx_refill_from_cq; | ||
209 | uint64_t s_iw_rx_refill_from_thread; | ||
210 | uint64_t s_iw_rx_alloc_limit; | ||
211 | uint64_t s_iw_rx_credit_updates; | ||
212 | uint64_t s_iw_ack_sent; | ||
213 | uint64_t s_iw_ack_send_failure; | ||
214 | uint64_t s_iw_ack_send_delayed; | ||
215 | uint64_t s_iw_ack_send_piggybacked; | ||
216 | uint64_t s_iw_ack_received; | ||
217 | uint64_t s_iw_rdma_mr_alloc; | ||
218 | uint64_t s_iw_rdma_mr_free; | ||
219 | uint64_t s_iw_rdma_mr_used; | ||
220 | uint64_t s_iw_rdma_mr_pool_flush; | ||
221 | uint64_t s_iw_rdma_mr_pool_wait; | ||
222 | uint64_t s_iw_rdma_mr_pool_depleted; | ||
223 | }; | ||
224 | |||
225 | extern struct workqueue_struct *rds_iw_wq; | ||
226 | |||
227 | /* | ||
228 | * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h | ||
229 | * doesn't define it. | ||
230 | */ | ||
231 | static inline void rds_iw_dma_sync_sg_for_cpu(struct ib_device *dev, | ||
232 | struct scatterlist *sg, unsigned int sg_dma_len, int direction) | ||
233 | { | ||
234 | unsigned int i; | ||
235 | |||
236 | for (i = 0; i < sg_dma_len; ++i) { | ||
237 | ib_dma_sync_single_for_cpu(dev, | ||
238 | ib_sg_dma_address(dev, &sg[i]), | ||
239 | ib_sg_dma_len(dev, &sg[i]), | ||
240 | direction); | ||
241 | } | ||
242 | } | ||
243 | #define ib_dma_sync_sg_for_cpu rds_iw_dma_sync_sg_for_cpu | ||
244 | |||
245 | static inline void rds_iw_dma_sync_sg_for_device(struct ib_device *dev, | ||
246 | struct scatterlist *sg, unsigned int sg_dma_len, int direction) | ||
247 | { | ||
248 | unsigned int i; | ||
249 | |||
250 | for (i = 0; i < sg_dma_len; ++i) { | ||
251 | ib_dma_sync_single_for_device(dev, | ||
252 | ib_sg_dma_address(dev, &sg[i]), | ||
253 | ib_sg_dma_len(dev, &sg[i]), | ||
254 | direction); | ||
255 | } | ||
256 | } | ||
257 | #define ib_dma_sync_sg_for_device rds_iw_dma_sync_sg_for_device | ||
258 | |||
259 | static inline u32 rds_iw_local_dma_lkey(struct rds_iw_connection *ic) | ||
260 | { | ||
261 | return ic->i_dma_local_lkey ? ic->i_cm_id->device->local_dma_lkey : ic->i_mr->lkey; | ||
262 | } | ||
263 | |||
264 | /* ib.c */ | ||
265 | extern struct rds_transport rds_iw_transport; | ||
266 | extern void rds_iw_add_one(struct ib_device *device); | ||
267 | extern void rds_iw_remove_one(struct ib_device *device); | ||
268 | extern struct ib_client rds_iw_client; | ||
269 | |||
270 | extern unsigned int fastreg_pool_size; | ||
271 | extern unsigned int fastreg_message_size; | ||
272 | |||
273 | extern spinlock_t iw_nodev_conns_lock; | ||
274 | extern struct list_head iw_nodev_conns; | ||
275 | |||
276 | /* ib_cm.c */ | ||
277 | int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp); | ||
278 | void rds_iw_conn_free(void *arg); | ||
279 | int rds_iw_conn_connect(struct rds_connection *conn); | ||
280 | void rds_iw_conn_shutdown(struct rds_connection *conn); | ||
281 | void rds_iw_state_change(struct sock *sk); | ||
282 | int __init rds_iw_listen_init(void); | ||
283 | void rds_iw_listen_stop(void); | ||
284 | void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...); | ||
285 | int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, | ||
286 | struct rdma_cm_event *event); | ||
287 | int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id); | ||
288 | void rds_iw_cm_connect_complete(struct rds_connection *conn, | ||
289 | struct rdma_cm_event *event); | ||
290 | |||
291 | |||
292 | #define rds_iw_conn_error(conn, fmt...) \ | ||
293 | __rds_iw_conn_error(conn, KERN_WARNING "RDS/IW: " fmt) | ||
294 | |||
295 | /* ib_rdma.c */ | ||
296 | int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id); | ||
297 | int rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn); | ||
298 | void rds_iw_remove_nodev_conns(void); | ||
299 | void rds_iw_remove_conns(struct rds_iw_device *rds_iwdev); | ||
300 | struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *); | ||
301 | void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo); | ||
302 | void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *); | ||
303 | void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, | ||
304 | struct rds_sock *rs, u32 *key_ret); | ||
305 | void rds_iw_sync_mr(void *trans_private, int dir); | ||
306 | void rds_iw_free_mr(void *trans_private, int invalidate); | ||
307 | void rds_iw_flush_mrs(void); | ||
308 | void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id); | ||
309 | |||
310 | /* ib_recv.c */ | ||
311 | int __init rds_iw_recv_init(void); | ||
312 | void rds_iw_recv_exit(void); | ||
313 | int rds_iw_recv(struct rds_connection *conn); | ||
314 | int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, | ||
315 | gfp_t page_gfp, int prefill); | ||
316 | void rds_iw_inc_purge(struct rds_incoming *inc); | ||
317 | void rds_iw_inc_free(struct rds_incoming *inc); | ||
318 | int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, | ||
319 | size_t size); | ||
320 | void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); | ||
321 | void rds_iw_recv_init_ring(struct rds_iw_connection *ic); | ||
322 | void rds_iw_recv_clear_ring(struct rds_iw_connection *ic); | ||
323 | void rds_iw_recv_init_ack(struct rds_iw_connection *ic); | ||
324 | void rds_iw_attempt_ack(struct rds_iw_connection *ic); | ||
325 | void rds_iw_ack_send_complete(struct rds_iw_connection *ic); | ||
326 | u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic); | ||
327 | |||
328 | /* ib_ring.c */ | ||
329 | void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr); | ||
330 | void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr); | ||
331 | u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos); | ||
332 | void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val); | ||
333 | void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val); | ||
334 | int rds_iw_ring_empty(struct rds_iw_work_ring *ring); | ||
335 | int rds_iw_ring_low(struct rds_iw_work_ring *ring); | ||
336 | u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring); | ||
337 | u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest); | ||
338 | extern wait_queue_head_t rds_iw_ring_empty_wait; | ||
339 | |||
340 | /* ib_send.c */ | ||
341 | void rds_iw_xmit_complete(struct rds_connection *conn); | ||
342 | int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | ||
343 | unsigned int hdr_off, unsigned int sg, unsigned int off); | ||
344 | void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context); | ||
345 | void rds_iw_send_init_ring(struct rds_iw_connection *ic); | ||
346 | void rds_iw_send_clear_ring(struct rds_iw_connection *ic); | ||
347 | int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op); | ||
348 | void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits); | ||
349 | void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted); | ||
350 | int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, | ||
351 | u32 *adv_credits, int need_posted); | ||
352 | |||
353 | /* ib_stats.c */ | ||
354 | DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats); | ||
355 | #define rds_iw_stats_inc(member) rds_stats_inc_which(rds_iw_stats, member) | ||
356 | unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter, | ||
357 | unsigned int avail); | ||
358 | |||
359 | /* ib_sysctl.c */ | ||
360 | int __init rds_iw_sysctl_init(void); | ||
361 | void rds_iw_sysctl_exit(void); | ||
362 | extern unsigned long rds_iw_sysctl_max_send_wr; | ||
363 | extern unsigned long rds_iw_sysctl_max_recv_wr; | ||
364 | extern unsigned long rds_iw_sysctl_max_unsig_wrs; | ||
365 | extern unsigned long rds_iw_sysctl_max_unsig_bytes; | ||
366 | extern unsigned long rds_iw_sysctl_max_recv_allocation; | ||
367 | extern unsigned int rds_iw_sysctl_flow_control; | ||
368 | extern ctl_table rds_iw_sysctl_table[]; | ||
369 | |||
370 | /* | ||
371 | * Helper functions for getting/setting the header and data SGEs in | ||
372 | * RDS packets (not RDMA) | ||
373 | */ | ||
374 | static inline struct ib_sge * | ||
375 | rds_iw_header_sge(struct rds_iw_connection *ic, struct ib_sge *sge) | ||
376 | { | ||
377 | return &sge[0]; | ||
378 | } | ||
379 | |||
380 | static inline struct ib_sge * | ||
381 | rds_iw_data_sge(struct rds_iw_connection *ic, struct ib_sge *sge) | ||
382 | { | ||
383 | return &sge[1]; | ||
384 | } | ||
385 | |||
386 | static inline void rds_iw_set_64bit(u64 *ptr, u64 val) | ||
387 | { | ||
388 | #if BITS_PER_LONG == 64 | ||
389 | *ptr = val; | ||
390 | #else | ||
391 | set_64bit(ptr, val); | ||
392 | #endif | ||
393 | } | ||
394 | |||
395 | #endif | ||