diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-22 15:53:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-22 15:53:06 -0400 |
commit | df36b439c5fedefe013d4449cb6a50d15e2f4d70 (patch) | |
tree | 537c58db778cbf11b74e28091f89d1b8139fb84d /net/sunrpc/backchannel_rqst.c | |
parent | a9b011f5ac57cbaedb32a8149f3d39d7b2c1f0e0 (diff) | |
parent | e9f029855865e917821ef6034b31e340a4cfc815 (diff) |
Merge branch 'for-2.6.31' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6
* 'for-2.6.31' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6: (128 commits)
nfs41: sunrpc: xprt_alloc_bc_request() should not use spin_lock_bh()
nfs41: Move initialization of nfs4_opendata seq_res to nfs4_init_opendata_res
nfs: remove unnecessary NFS_INO_INVALID_ACL checks
NFS: More "sloppy" parsing problems
NFS: Invalid mount option values should always fail, even with "sloppy"
NFS: Remove unused XDR decoder functions
NFS: Update MNT and MNT3 reply decoding functions
NFS: add XDR decoder for mountd version 3 auth-flavor lists
NFS: add new file handle decoders to in-kernel mountd client
NFS: Add separate mountd status code decoders for each mountd version
NFS: remove unused function in fs/nfs/mount_clnt.c
NFS: Use xdr_stream-based XDR encoder for MNT's dirpath argument
NFS: Clean up MNT program definitions
lockd: Don't bother with RPC ping for NSM upcalls
lockd: Update NSM state from SM_MON replies
NFS: Fix false error return from nfs_callback_up() if ipv6.ko is not available
NFS: Return error code from nfs_callback_up() to user space
NFS: Do not display the setting of the "intr" mount option
NFS: add support for splice writes
nfs41: Backchannel: CB_SEQUENCE validation
...
Diffstat (limited to 'net/sunrpc/backchannel_rqst.c')
-rw-r--r-- | net/sunrpc/backchannel_rqst.c | 281 |
1 files changed, 281 insertions, 0 deletions
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c new file mode 100644 index 000000000000..553621fb2c41 --- /dev/null +++ b/net/sunrpc/backchannel_rqst.c | |||
@@ -0,0 +1,281 @@ | |||
1 | /****************************************************************************** | ||
2 | |||
3 | (c) 2007 Network Appliance, Inc. All Rights Reserved. | ||
4 | (c) 2009 NetApp. All Rights Reserved. | ||
5 | |||
6 | NetApp provides this source code under the GPL v2 License. | ||
7 | The GPL v2 license is available at | ||
8 | http://opensource.org/licenses/gpl-license.php. | ||
9 | |||
10 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
11 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
12 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
13 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR | ||
14 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | ||
15 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | ||
16 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | ||
17 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
18 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | ||
19 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
20 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
21 | |||
22 | ******************************************************************************/ | ||
23 | |||
24 | #include <linux/tcp.h> | ||
25 | #include <linux/sunrpc/xprt.h> | ||
26 | |||
27 | #ifdef RPC_DEBUG | ||
28 | #define RPCDBG_FACILITY RPCDBG_TRANS | ||
29 | #endif | ||
30 | |||
31 | #if defined(CONFIG_NFS_V4_1) | ||
32 | |||
33 | /* | ||
34 | * Helper routines that track the number of preallocation elements | ||
35 | * on the transport. | ||
36 | */ | ||
37 | static inline int xprt_need_to_requeue(struct rpc_xprt *xprt) | ||
38 | { | ||
39 | return xprt->bc_alloc_count > 0; | ||
40 | } | ||
41 | |||
42 | static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n) | ||
43 | { | ||
44 | xprt->bc_alloc_count += n; | ||
45 | } | ||
46 | |||
47 | static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n) | ||
48 | { | ||
49 | return xprt->bc_alloc_count -= n; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Free the preallocated rpc_rqst structure and the memory | ||
54 | * buffers hanging off of it. | ||
55 | */ | ||
56 | static void xprt_free_allocation(struct rpc_rqst *req) | ||
57 | { | ||
58 | struct xdr_buf *xbufp; | ||
59 | |||
60 | dprintk("RPC: free allocations for req= %p\n", req); | ||
61 | BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); | ||
62 | xbufp = &req->rq_private_buf; | ||
63 | free_page((unsigned long)xbufp->head[0].iov_base); | ||
64 | xbufp = &req->rq_snd_buf; | ||
65 | free_page((unsigned long)xbufp->head[0].iov_base); | ||
66 | list_del(&req->rq_bc_pa_list); | ||
67 | kfree(req); | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Preallocate up to min_reqs structures and related buffers for use | ||
72 | * by the backchannel. This function can be called multiple times | ||
73 | * when creating new sessions that use the same rpc_xprt. The | ||
74 | * preallocated buffers are added to the pool of resources used by | ||
75 | * the rpc_xprt. Anyone of these resources may be used used by an | ||
76 | * incoming callback request. It's up to the higher levels in the | ||
77 | * stack to enforce that the maximum number of session slots is not | ||
78 | * being exceeded. | ||
79 | * | ||
80 | * Some callback arguments can be large. For example, a pNFS server | ||
81 | * using multiple deviceids. The list can be unbound, but the client | ||
82 | * has the ability to tell the server the maximum size of the callback | ||
83 | * requests. Each deviceID is 16 bytes, so allocate one page | ||
84 | * for the arguments to have enough room to receive a number of these | ||
85 | * deviceIDs. The NFS client indicates to the pNFS server that its | ||
86 | * callback requests can be up to 4096 bytes in size. | ||
87 | */ | ||
88 | int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs) | ||
89 | { | ||
90 | struct page *page_rcv = NULL, *page_snd = NULL; | ||
91 | struct xdr_buf *xbufp = NULL; | ||
92 | struct rpc_rqst *req, *tmp; | ||
93 | struct list_head tmp_list; | ||
94 | int i; | ||
95 | |||
96 | dprintk("RPC: setup backchannel transport\n"); | ||
97 | |||
98 | /* | ||
99 | * We use a temporary list to keep track of the preallocated | ||
100 | * buffers. Once we're done building the list we splice it | ||
101 | * into the backchannel preallocation list off of the rpc_xprt | ||
102 | * struct. This helps minimize the amount of time the list | ||
103 | * lock is held on the rpc_xprt struct. It also makes cleanup | ||
104 | * easier in case of memory allocation errors. | ||
105 | */ | ||
106 | INIT_LIST_HEAD(&tmp_list); | ||
107 | for (i = 0; i < min_reqs; i++) { | ||
108 | /* Pre-allocate one backchannel rpc_rqst */ | ||
109 | req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); | ||
110 | if (req == NULL) { | ||
111 | printk(KERN_ERR "Failed to create bc rpc_rqst\n"); | ||
112 | goto out_free; | ||
113 | } | ||
114 | |||
115 | /* Add the allocated buffer to the tmp list */ | ||
116 | dprintk("RPC: adding req= %p\n", req); | ||
117 | list_add(&req->rq_bc_pa_list, &tmp_list); | ||
118 | |||
119 | req->rq_xprt = xprt; | ||
120 | INIT_LIST_HEAD(&req->rq_list); | ||
121 | INIT_LIST_HEAD(&req->rq_bc_list); | ||
122 | |||
123 | /* Preallocate one XDR receive buffer */ | ||
124 | page_rcv = alloc_page(GFP_KERNEL); | ||
125 | if (page_rcv == NULL) { | ||
126 | printk(KERN_ERR "Failed to create bc receive xbuf\n"); | ||
127 | goto out_free; | ||
128 | } | ||
129 | xbufp = &req->rq_rcv_buf; | ||
130 | xbufp->head[0].iov_base = page_address(page_rcv); | ||
131 | xbufp->head[0].iov_len = PAGE_SIZE; | ||
132 | xbufp->tail[0].iov_base = NULL; | ||
133 | xbufp->tail[0].iov_len = 0; | ||
134 | xbufp->page_len = 0; | ||
135 | xbufp->len = PAGE_SIZE; | ||
136 | xbufp->buflen = PAGE_SIZE; | ||
137 | |||
138 | /* Preallocate one XDR send buffer */ | ||
139 | page_snd = alloc_page(GFP_KERNEL); | ||
140 | if (page_snd == NULL) { | ||
141 | printk(KERN_ERR "Failed to create bc snd xbuf\n"); | ||
142 | goto out_free; | ||
143 | } | ||
144 | |||
145 | xbufp = &req->rq_snd_buf; | ||
146 | xbufp->head[0].iov_base = page_address(page_snd); | ||
147 | xbufp->head[0].iov_len = 0; | ||
148 | xbufp->tail[0].iov_base = NULL; | ||
149 | xbufp->tail[0].iov_len = 0; | ||
150 | xbufp->page_len = 0; | ||
151 | xbufp->len = 0; | ||
152 | xbufp->buflen = PAGE_SIZE; | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * Add the temporary list to the backchannel preallocation list | ||
157 | */ | ||
158 | spin_lock_bh(&xprt->bc_pa_lock); | ||
159 | list_splice(&tmp_list, &xprt->bc_pa_list); | ||
160 | xprt_inc_alloc_count(xprt, min_reqs); | ||
161 | spin_unlock_bh(&xprt->bc_pa_lock); | ||
162 | |||
163 | dprintk("RPC: setup backchannel transport done\n"); | ||
164 | return 0; | ||
165 | |||
166 | out_free: | ||
167 | /* | ||
168 | * Memory allocation failed, free the temporary list | ||
169 | */ | ||
170 | list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) | ||
171 | xprt_free_allocation(req); | ||
172 | |||
173 | dprintk("RPC: setup backchannel transport failed\n"); | ||
174 | return -1; | ||
175 | } | ||
176 | EXPORT_SYMBOL(xprt_setup_backchannel); | ||
177 | |||
178 | /* | ||
179 | * Destroys the backchannel preallocated structures. | ||
180 | * Since these structures may have been allocated by multiple calls | ||
181 | * to xprt_setup_backchannel, we only destroy up to the maximum number | ||
182 | * of reqs specified by the caller. | ||
183 | * @xprt: the transport holding the preallocated strucures | ||
184 | * @max_reqs the maximum number of preallocated structures to destroy | ||
185 | */ | ||
186 | void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) | ||
187 | { | ||
188 | struct rpc_rqst *req = NULL, *tmp = NULL; | ||
189 | |||
190 | dprintk("RPC: destroy backchannel transport\n"); | ||
191 | |||
192 | BUG_ON(max_reqs == 0); | ||
193 | spin_lock_bh(&xprt->bc_pa_lock); | ||
194 | xprt_dec_alloc_count(xprt, max_reqs); | ||
195 | list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { | ||
196 | dprintk("RPC: req=%p\n", req); | ||
197 | xprt_free_allocation(req); | ||
198 | if (--max_reqs == 0) | ||
199 | break; | ||
200 | } | ||
201 | spin_unlock_bh(&xprt->bc_pa_lock); | ||
202 | |||
203 | dprintk("RPC: backchannel list empty= %s\n", | ||
204 | list_empty(&xprt->bc_pa_list) ? "true" : "false"); | ||
205 | } | ||
206 | EXPORT_SYMBOL(xprt_destroy_backchannel); | ||
207 | |||
208 | /* | ||
209 | * One or more rpc_rqst structure have been preallocated during the | ||
210 | * backchannel setup. Buffer space for the send and private XDR buffers | ||
211 | * has been preallocated as well. Use xprt_alloc_bc_request to allocate | ||
212 | * to this request. Use xprt_free_bc_request to return it. | ||
213 | * | ||
214 | * We know that we're called in soft interrupt context, grab the spin_lock | ||
215 | * since there is no need to grab the bottom half spin_lock. | ||
216 | * | ||
217 | * Return an available rpc_rqst, otherwise NULL if non are available. | ||
218 | */ | ||
219 | struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) | ||
220 | { | ||
221 | struct rpc_rqst *req; | ||
222 | |||
223 | dprintk("RPC: allocate a backchannel request\n"); | ||
224 | spin_lock(&xprt->bc_pa_lock); | ||
225 | if (!list_empty(&xprt->bc_pa_list)) { | ||
226 | req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, | ||
227 | rq_bc_pa_list); | ||
228 | list_del(&req->rq_bc_pa_list); | ||
229 | } else { | ||
230 | req = NULL; | ||
231 | } | ||
232 | spin_unlock(&xprt->bc_pa_lock); | ||
233 | |||
234 | if (req != NULL) { | ||
235 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); | ||
236 | req->rq_reply_bytes_recvd = 0; | ||
237 | req->rq_bytes_sent = 0; | ||
238 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, | ||
239 | sizeof(req->rq_private_buf)); | ||
240 | } | ||
241 | dprintk("RPC: backchannel req=%p\n", req); | ||
242 | return req; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Return the preallocated rpc_rqst structure and XDR buffers | ||
247 | * associated with this rpc_task. | ||
248 | */ | ||
249 | void xprt_free_bc_request(struct rpc_rqst *req) | ||
250 | { | ||
251 | struct rpc_xprt *xprt = req->rq_xprt; | ||
252 | |||
253 | dprintk("RPC: free backchannel req=%p\n", req); | ||
254 | |||
255 | smp_mb__before_clear_bit(); | ||
256 | BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); | ||
257 | clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); | ||
258 | smp_mb__after_clear_bit(); | ||
259 | |||
260 | if (!xprt_need_to_requeue(xprt)) { | ||
261 | /* | ||
262 | * The last remaining session was destroyed while this | ||
263 | * entry was in use. Free the entry and don't attempt | ||
264 | * to add back to the list because there is no need to | ||
265 | * have anymore preallocated entries. | ||
266 | */ | ||
267 | dprintk("RPC: Last session removed req=%p\n", req); | ||
268 | xprt_free_allocation(req); | ||
269 | return; | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * Return it to the list of preallocations so that it | ||
274 | * may be reused by a new callback request. | ||
275 | */ | ||
276 | spin_lock_bh(&xprt->bc_pa_lock); | ||
277 | list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list); | ||
278 | spin_unlock_bh(&xprt->bc_pa_lock); | ||
279 | } | ||
280 | |||
281 | #endif /* CONFIG_NFS_V4_1 */ | ||