aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sunrpc
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@citi.umich.edu>2008-07-03 16:24:06 -0400
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-07-03 16:24:06 -0400
commite86322f611eef95aafaf726fd3965e5b211f1985 (patch)
tree28547e26df4fc6ae671dc8cc6912a53717e4db08 /include/linux/sunrpc
parentb001a1b6aa960949a24c2cdc28257dfcc9428d74 (diff)
parent8948896c9e098c6fd31a6a698a598a7cbd7fa40e (diff)
Merge branch 'for-bfields' of git://linux-nfs.org/~tomtucker/xprt-switch-2.6 into for-2.6.27
Diffstat (limited to 'include/linux/sunrpc')
-rw-r--r--include/linux/sunrpc/svc_rdma.h44
1 files changed, 34 insertions, 10 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index c11bbcc081f9..ef2e3a20bf3b 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -71,7 +71,8 @@ extern atomic_t rdma_stat_sq_prod;
71 * completes. 71 * completes.
72 */ 72 */
73struct svc_rdma_op_ctxt { 73struct svc_rdma_op_ctxt {
74 struct svc_rdma_op_ctxt *next; 74 struct svc_rdma_op_ctxt *read_hdr;
75 int hdr_count;
75 struct xdr_buf arg; 76 struct xdr_buf arg;
76 struct list_head dto_q; 77 struct list_head dto_q;
77 enum ib_wr_opcode wr_op; 78 enum ib_wr_opcode wr_op;
@@ -85,7 +86,31 @@ struct svc_rdma_op_ctxt {
85 struct page *pages[RPCSVC_MAXPAGES]; 86 struct page *pages[RPCSVC_MAXPAGES];
86}; 87};
87 88
88#define RDMACTXT_F_READ_DONE 1 89/*
90 * NFS_ requests are mapped on the client side by the chunk lists in
91 * the RPCRDMA header. During the fetching of the RPC from the client
92 * and the writing of the reply to the client, the memory in the
93 * client and the memory in the server must be mapped as contiguous
94 * vaddr/len for access by the hardware. These data strucures keep
95 * these mappings.
96 *
97 * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the
98 * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the
99 * 'ch' field maps the read-list of the RPCRDMA header to the 'sge'
100 * mapping of the reply.
101 */
102struct svc_rdma_chunk_sge {
103 int start; /* sge no for this chunk */
104 int count; /* sge count for this chunk */
105};
106struct svc_rdma_req_map {
107 unsigned long count;
108 union {
109 struct kvec sge[RPCSVC_MAXPAGES];
110 struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES];
111 };
112};
113
89#define RDMACTXT_F_LAST_CTXT 2 114#define RDMACTXT_F_LAST_CTXT 2
90 115
91struct svcxprt_rdma { 116struct svcxprt_rdma {
@@ -93,7 +118,6 @@ struct svcxprt_rdma {
93 struct rdma_cm_id *sc_cm_id; /* RDMA connection id */ 118 struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
94 struct list_head sc_accept_q; /* Conn. waiting accept */ 119 struct list_head sc_accept_q; /* Conn. waiting accept */
95 int sc_ord; /* RDMA read limit */ 120 int sc_ord; /* RDMA read limit */
96 wait_queue_head_t sc_read_wait;
97 int sc_max_sge; 121 int sc_max_sge;
98 122
99 int sc_sq_depth; /* Depth of SQ */ 123 int sc_sq_depth; /* Depth of SQ */
@@ -104,11 +128,8 @@ struct svcxprt_rdma {
104 128
105 struct ib_pd *sc_pd; 129 struct ib_pd *sc_pd;
106 130
107 struct svc_rdma_op_ctxt *sc_ctxt_head; 131 atomic_t sc_dma_used;
108 int sc_ctxt_cnt; 132 atomic_t sc_ctxt_used;
109 int sc_ctxt_bump;
110 int sc_ctxt_max;
111 spinlock_t sc_ctxt_lock;
112 struct list_head sc_rq_dto_q; 133 struct list_head sc_rq_dto_q;
113 spinlock_t sc_rq_dto_lock; 134 spinlock_t sc_rq_dto_lock;
114 struct ib_qp *sc_qp; 135 struct ib_qp *sc_qp;
@@ -123,6 +144,7 @@ struct svcxprt_rdma {
123 struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ 144 struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */
124 struct list_head sc_read_complete_q; 145 struct list_head sc_read_complete_q;
125 spinlock_t sc_read_complete_lock; 146 spinlock_t sc_read_complete_lock;
147 struct work_struct sc_work;
126}; 148};
127/* sc_flags */ 149/* sc_flags */
128#define RDMAXPRT_RQ_PENDING 1 150#define RDMAXPRT_RQ_PENDING 1
@@ -164,13 +186,15 @@ extern int svc_rdma_sendto(struct svc_rqst *);
164 186
165/* svc_rdma_transport.c */ 187/* svc_rdma_transport.c */
166extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); 188extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
167extern int svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, 189extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
168 enum rpcrdma_errcode); 190 enum rpcrdma_errcode);
169struct page *svc_rdma_get_page(void); 191struct page *svc_rdma_get_page(void);
170extern int svc_rdma_post_recv(struct svcxprt_rdma *); 192extern int svc_rdma_post_recv(struct svcxprt_rdma *);
171extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); 193extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
172extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); 194extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
173extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); 195extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
196extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
197extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
174extern void svc_sq_reap(struct svcxprt_rdma *); 198extern void svc_sq_reap(struct svcxprt_rdma *);
175extern void svc_rq_reap(struct svcxprt_rdma *); 199extern void svc_rq_reap(struct svcxprt_rdma *);
176extern struct svc_xprt_class svc_rdma_class; 200extern struct svc_xprt_class svc_rdma_class;