aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/iser/iscsi_iser.h
diff options
context:
space:
mode:
authorOr Gerlitz <ogerlitz@voltaire.com>2010-02-08 08:17:42 -0500
committerRoland Dreier <rolandd@cisco.com>2010-02-24 12:41:10 -0500
commitbcc60c381d857ced653e912cbe6121294773e147 (patch)
tree543a2d483a1110f9666ae5503d9e3c53a8782e0c /drivers/infiniband/ulp/iser/iscsi_iser.h
parent1cef4659850eeb862c248c7670e404d7a1711ed1 (diff)
IB/iser: New receive buffer posting logic
Currently, the recv buffer posting logic is based on the transactional nature of iSER which allows for posting a buffer before sending a PDU. Change this to post only when the number of outstanding recv buffers is below a water mark and in a batched manner, thus simplifying and optimizing the data path. Use a pre-allocated ring of recv buffers instead of allocating from kmem cache. A special treatment is given to the login response buffer whose size must be 8K unlike the size of buffers used for any other purpose which is 128 bytes. Signed-off-by: Or Gerlitz <ogerlitz@voltaire.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp/iser/iscsi_iser.h')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h40
1 files changed, 33 insertions, 7 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index e8dfdcfa1daf..83effb610594 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -102,9 +102,9 @@
102#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * 102#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
103 * SCSI_TMFUNC(2), LOGOUT(1) */ 103 * SCSI_TMFUNC(2), LOGOUT(1) */
104 104
105#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \ 105#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
106 ISER_MAX_RX_MISC_PDUS + \ 106
107 ISER_MAX_TX_MISC_PDUS) 107#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
108 108
109/* the max TX (send) WR supported by the iSER QP is defined by * 109/* the max TX (send) WR supported by the iSER QP is defined by *
110 * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * 110 * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
@@ -132,6 +132,12 @@ struct iser_hdr {
132 __be64 read_va; 132 __be64 read_va;
133} __attribute__((packed)); 133} __attribute__((packed));
134 134
135/* Constant PDU lengths calculations */
136#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
137
138#define ISER_RECV_DATA_SEG_LEN 128
139#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
140#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
135 141
136/* Length of an object name string */ 142/* Length of an object name string */
137#define ISER_OBJECT_NAME_SIZE 64 143#define ISER_OBJECT_NAME_SIZE 64
@@ -212,7 +218,6 @@ struct iser_dto {
212}; 218};
213 219
214enum iser_desc_type { 220enum iser_desc_type {
215 ISCSI_RX,
216 ISCSI_TX_CONTROL , 221 ISCSI_TX_CONTROL ,
217 ISCSI_TX_SCSI_COMMAND, 222 ISCSI_TX_SCSI_COMMAND,
218 ISCSI_TX_DATAOUT 223 ISCSI_TX_DATAOUT
@@ -228,6 +233,17 @@ struct iser_desc {
228 struct iser_dto dto; 233 struct iser_dto dto;
229}; 234};
230 235
236#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
237 sizeof(u64) + sizeof(struct ib_sge)))
238struct iser_rx_desc {
239 struct iser_hdr iser_header;
240 struct iscsi_hdr iscsi_header;
241 char data[ISER_RECV_DATA_SEG_LEN];
242 u64 dma_addr;
243 struct ib_sge rx_sg;
244 char pad[ISER_RX_PAD_SIZE];
245} __attribute__((packed));
246
231struct iser_device { 247struct iser_device {
232 struct ib_device *ib_device; 248 struct ib_device *ib_device;
233 struct ib_pd *pd; 249 struct ib_pd *pd;
@@ -256,6 +272,12 @@ struct iser_conn {
256 struct iser_page_vec *page_vec; /* represents SG to fmr maps* 272 struct iser_page_vec *page_vec; /* represents SG to fmr maps*
257 * maps serialized as tx is*/ 273 * maps serialized as tx is*/
258 struct list_head conn_list; /* entry in ig conn list */ 274 struct list_head conn_list; /* entry in ig conn list */
275
276 char *login_buf;
277 u64 login_dma;
278 unsigned int rx_desc_head;
279 struct iser_rx_desc *rx_descs;
280 struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
259}; 281};
260 282
261struct iscsi_iser_conn { 283struct iscsi_iser_conn {
@@ -319,8 +341,9 @@ void iser_conn_put(struct iser_conn *ib_conn);
319 341
320void iser_conn_terminate(struct iser_conn *ib_conn); 342void iser_conn_terminate(struct iser_conn *ib_conn);
321 343
322void iser_rcv_completion(struct iser_desc *desc, 344void iser_rcv_completion(struct iser_rx_desc *desc,
323 unsigned long dto_xfer_len); 345 unsigned long dto_xfer_len,
346 struct iser_conn *ib_conn);
324 347
325void iser_snd_completion(struct iser_desc *desc); 348void iser_snd_completion(struct iser_desc *desc);
326 349
@@ -332,6 +355,8 @@ void iser_dto_buffs_release(struct iser_dto *dto);
332 355
333int iser_regd_buff_release(struct iser_regd_buf *regd_buf); 356int iser_regd_buff_release(struct iser_regd_buf *regd_buf);
334 357
358void iser_free_rx_descriptors(struct iser_conn *ib_conn);
359
335void iser_reg_single(struct iser_device *device, 360void iser_reg_single(struct iser_device *device,
336 struct iser_regd_buf *regd_buf, 361 struct iser_regd_buf *regd_buf,
337 enum dma_data_direction direction); 362 enum dma_data_direction direction);
@@ -353,7 +378,8 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
353 378
354void iser_unreg_mem(struct iser_mem_reg *mem_reg); 379void iser_unreg_mem(struct iser_mem_reg *mem_reg);
355 380
356int iser_post_recv(struct iser_desc *rx_desc); 381int iser_post_recvl(struct iser_conn *ib_conn);
382int iser_post_recvm(struct iser_conn *ib_conn, int count);
357int iser_post_send(struct iser_desc *tx_desc); 383int iser_post_send(struct iser_desc *tx_desc);
358 384
359int iser_conn_state_comp(struct iser_conn *ib_conn, 385int iser_conn_state_comp(struct iser_conn *ib_conn,