aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/direct.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-03-25 12:18:27 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-25 12:18:27 -0500
commit53846a21c1766326bb14ce8ab6e997a0c120675d (patch)
tree37b04485e29844b4e734479181276a2f4d2447e4 /fs/nfs/direct.c
parent2e9abdd9bad485970b37cd53a82f92702054984c (diff)
parent1ebbe2b20091d306453a5cf480a87e6cd28ae76f (diff)
Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6: (103 commits) SUNRPC,RPCSEC_GSS: spkm3--fix config dependencies SUNRPC,RPCSEC_GSS: spkm3: import contexts using NID_cast5_cbc LOCKD: Make nlmsvc_traverse_shares return void LOCKD: nlmsvc_traverse_blocks return is unused SUNRPC,RPCSEC_GSS: fix krb5 sequence numbers. NFSv4: Dont list system.nfs4_acl for filesystems that don't support it. SUNRPC,RPCSEC_GSS: remove unnecessary kmalloc of a checksum SUNRPC: Ensure rpc_call_async() always calls tk_ops->rpc_release() SUNRPC: Fix memory barriers for req->rq_received NFS: Fix a race in nfs_sync_inode() NFS: Clean up nfs_flush_list() NFS: Fix a race with PG_private and nfs_release_page() NFSv4: Ensure the callback daemon flushes signals SUNRPC: Fix a 'Busy inodes' error in rpc_pipefs NFS, NLM: Allow blocking locks to respect signals NFS: Make nfs_fhget() return appropriate error values NFSv4: Fix an oops in nfs4_fill_super lockd: blocks should hold a reference to the nlm_file NFSv4: SETCLIENTID_CONFIRM should handle NFS4ERR_DELAY/NFS4ERR_RESOURCE NFSv4: Send the delegation stateid for SETATTR calls ...
Diffstat (limited to 'fs/nfs/direct.c')
-rw-r--r--fs/nfs/direct.c949
1 files changed, 535 insertions, 414 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 4ae2f3b33fef..0f583cb16ddb 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -7,11 +7,11 @@
7 * 7 *
8 * There are important applications whose performance or correctness 8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters 9 * depends on uncached access to file data. Database clusters
10 * (multiple copies of the same instance running on separate hosts) 10 * (multiple copies of the same instance running on separate hosts)
11 * implement their own cache coherency protocol that subsumes file 11 * implement their own cache coherency protocol that subsumes file
12 * system cache protocols. Applications that process datasets 12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit 13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no 14 * from a local cache. A streaming video server, for instance, has no
15 * need to cache the contents of a file. 15 * need to cache the contents of a file.
16 * 16 *
17 * When an application requests uncached I/O, all read and write requests 17 * When an application requests uncached I/O, all read and write requests
@@ -34,6 +34,7 @@
34 * 08 Jun 2003 Port to 2.5 APIs --cel 34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel 35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel 36 * 15 Sep 2004 Parallel async reads --cel
37 * 04 May 2005 support O_DIRECT with aio --cel
37 * 38 *
38 */ 39 */
39 40
@@ -54,10 +55,10 @@
54#include <asm/uaccess.h> 55#include <asm/uaccess.h>
55#include <asm/atomic.h> 56#include <asm/atomic.h>
56 57
58#include "iostat.h"
59
57#define NFSDBG_FACILITY NFSDBG_VFS 60#define NFSDBG_FACILITY NFSDBG_VFS
58#define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT)
59 61
60static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty);
61static kmem_cache_t *nfs_direct_cachep; 62static kmem_cache_t *nfs_direct_cachep;
62 63
63/* 64/*
@@ -65,38 +66,78 @@ static kmem_cache_t *nfs_direct_cachep;
65 */ 66 */
66struct nfs_direct_req { 67struct nfs_direct_req {
67 struct kref kref; /* release manager */ 68 struct kref kref; /* release manager */
68 struct list_head list; /* nfs_read_data structs */ 69
69 wait_queue_head_t wait; /* wait for i/o completion */ 70 /* I/O parameters */
71 struct list_head list, /* nfs_read/write_data structs */
72 rewrite_list; /* saved nfs_write_data structs */
73 struct nfs_open_context *ctx; /* file open context info */
74 struct kiocb * iocb; /* controlling i/o request */
75 struct inode * inode; /* target file of i/o */
76 unsigned long user_addr; /* location of user's buffer */
77 size_t user_count; /* total bytes to move */
78 loff_t pos; /* starting offset in file */
70 struct page ** pages; /* pages in our buffer */ 79 struct page ** pages; /* pages in our buffer */
71 unsigned int npages; /* count of pages */ 80 unsigned int npages; /* count of pages */
72 atomic_t complete, /* i/os we're waiting for */ 81
73 count, /* bytes actually processed */ 82 /* completion state */
83 spinlock_t lock; /* protect completion state */
84 int outstanding; /* i/os we're waiting for */
85 ssize_t count, /* bytes actually processed */
74 error; /* any reported error */ 86 error; /* any reported error */
87 struct completion completion; /* wait for i/o completion */
88
89 /* commit state */
90 struct nfs_write_data * commit_data; /* special write_data for commits */
91 int flags;
92#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
93#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
94 struct nfs_writeverf verf; /* unstable write verifier */
75}; 95};
76 96
97static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
98static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
77 99
78/** 100/**
79 * nfs_get_user_pages - find and set up pages underlying user's buffer 101 * nfs_direct_IO - NFS address space operation for direct I/O
80 * rw: direction (read or write) 102 * @rw: direction (read or write)
81 * user_addr: starting address of this segment of user's buffer 103 * @iocb: target I/O control block
82 * count: size of this segment 104 * @iov: array of vectors that define I/O buffer
83 * @pages: returned array of page struct pointers underlying user's buffer 105 * @pos: offset in file to begin the operation
106 * @nr_segs: size of iovec array
107 *
108 * The presence of this routine in the address space ops vector means
109 * the NFS client supports direct I/O. However, we shunt off direct
110 * read and write requests before the VFS gets them, so this method
111 * should never be called.
84 */ 112 */
85static inline int 113ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
86nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, 114{
87 struct page ***pages) 115 struct dentry *dentry = iocb->ki_filp->f_dentry;
116
117 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
118 dentry->d_name.name, (long long) pos, nr_segs);
119
120 return -EINVAL;
121}
122
123static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
124{
125 int i;
126 for (i = 0; i < npages; i++) {
127 struct page *page = pages[i];
128 if (do_dirty && !PageCompound(page))
129 set_page_dirty_lock(page);
130 page_cache_release(page);
131 }
132 kfree(pages);
133}
134
135static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages)
88{ 136{
89 int result = -ENOMEM; 137 int result = -ENOMEM;
90 unsigned long page_count; 138 unsigned long page_count;
91 size_t array_size; 139 size_t array_size;
92 140
93 /* set an arbitrary limit to prevent type overflow */
94 /* XXX: this can probably be as large as INT_MAX */
95 if (size > MAX_DIRECTIO_SIZE) {
96 *pages = NULL;
97 return -EFBIG;
98 }
99
100 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; 141 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
101 page_count -= user_addr >> PAGE_SHIFT; 142 page_count -= user_addr >> PAGE_SHIFT;
102 143
@@ -108,75 +149,117 @@ nfs_get_user_pages(int rw, unsigned long user_addr, size_t size,
108 page_count, (rw == READ), 0, 149 page_count, (rw == READ), 0,
109 *pages, NULL); 150 *pages, NULL);
110 up_read(&current->mm->mmap_sem); 151 up_read(&current->mm->mmap_sem);
111 /* 152 if (result != page_count) {
112 * If we got fewer pages than expected from get_user_pages(), 153 /*
113 * the user buffer runs off the end of a mapping; return EFAULT. 154 * If we got fewer pages than expected from
114 */ 155 * get_user_pages(), the user buffer runs off the
115 if (result >= 0 && result < page_count) { 156 * end of a mapping; return EFAULT.
116 nfs_free_user_pages(*pages, result, 0); 157 */
158 if (result >= 0) {
159 nfs_free_user_pages(*pages, result, 0);
160 result = -EFAULT;
161 } else
162 kfree(*pages);
117 *pages = NULL; 163 *pages = NULL;
118 result = -EFAULT;
119 } 164 }
120 } 165 }
121 return result; 166 return result;
122} 167}
123 168
124/** 169static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
125 * nfs_free_user_pages - tear down page struct array
126 * @pages: array of page struct pointers underlying target buffer
127 * @npages: number of pages in the array
128 * @do_dirty: dirty the pages as we release them
129 */
130static void
131nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
132{ 170{
133 int i; 171 struct nfs_direct_req *dreq;
134 for (i = 0; i < npages; i++) { 172
135 struct page *page = pages[i]; 173 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
136 if (do_dirty && !PageCompound(page)) 174 if (!dreq)
137 set_page_dirty_lock(page); 175 return NULL;
138 page_cache_release(page); 176
139 } 177 kref_init(&dreq->kref);
140 kfree(pages); 178 init_completion(&dreq->completion);
179 INIT_LIST_HEAD(&dreq->list);
180 INIT_LIST_HEAD(&dreq->rewrite_list);
181 dreq->iocb = NULL;
182 dreq->ctx = NULL;
183 spin_lock_init(&dreq->lock);
184 dreq->outstanding = 0;
185 dreq->count = 0;
186 dreq->error = 0;
187 dreq->flags = 0;
188
189 return dreq;
141} 190}
142 191
143/**
144 * nfs_direct_req_release - release nfs_direct_req structure for direct read
145 * @kref: kref object embedded in an nfs_direct_req structure
146 *
147 */
148static void nfs_direct_req_release(struct kref *kref) 192static void nfs_direct_req_release(struct kref *kref)
149{ 193{
150 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); 194 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
195
196 if (dreq->ctx != NULL)
197 put_nfs_open_context(dreq->ctx);
151 kmem_cache_free(nfs_direct_cachep, dreq); 198 kmem_cache_free(nfs_direct_cachep, dreq);
152} 199}
153 200
154/** 201/*
155 * nfs_direct_read_alloc - allocate nfs_read_data structures for direct read 202 * Collects and returns the final error value/byte-count.
156 * @count: count of bytes for the read request 203 */
157 * @rsize: local rsize setting 204static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
205{
206 ssize_t result = -EIOCBQUEUED;
207
208 /* Async requests don't wait here */
209 if (dreq->iocb)
210 goto out;
211
212 result = wait_for_completion_interruptible(&dreq->completion);
213
214 if (!result)
215 result = dreq->error;
216 if (!result)
217 result = dreq->count;
218
219out:
220 kref_put(&dreq->kref, nfs_direct_req_release);
221 return (ssize_t) result;
222}
223
224/*
225 * We must hold a reference to all the pages in this direct read request
226 * until the RPCs complete. This could be long *after* we are woken up in
227 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
158 * 228 *
229 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
230 * can't trust the iocb is still valid here if this is a synchronous
231 * request. If the waiter is woken prematurely, the iocb is long gone.
232 */
233static void nfs_direct_complete(struct nfs_direct_req *dreq)
234{
235 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
236
237 if (dreq->iocb) {
238 long res = (long) dreq->error;
239 if (!res)
240 res = (long) dreq->count;
241 aio_complete(dreq->iocb, res, 0);
242 }
243 complete_all(&dreq->completion);
244
245 kref_put(&dreq->kref, nfs_direct_req_release);
246}
247
248/*
159 * Note we also set the number of requests we have in the dreq when we are 249 * Note we also set the number of requests we have in the dreq when we are
160 * done. This prevents races with I/O completion so we will always wait 250 * done. This prevents races with I/O completion so we will always wait
161 * until all requests have been dispatched and completed. 251 * until all requests have been dispatched and completed.
162 */ 252 */
163static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, unsigned int rsize) 253static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
164{ 254{
165 struct list_head *list; 255 struct list_head *list;
166 struct nfs_direct_req *dreq; 256 struct nfs_direct_req *dreq;
167 unsigned int reads = 0;
168 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 257 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
169 258
170 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL); 259 dreq = nfs_direct_req_alloc();
171 if (!dreq) 260 if (!dreq)
172 return NULL; 261 return NULL;
173 262
174 kref_init(&dreq->kref);
175 init_waitqueue_head(&dreq->wait);
176 INIT_LIST_HEAD(&dreq->list);
177 atomic_set(&dreq->count, 0);
178 atomic_set(&dreq->error, 0);
179
180 list = &dreq->list; 263 list = &dreq->list;
181 for(;;) { 264 for(;;) {
182 struct nfs_read_data *data = nfs_readdata_alloc(rpages); 265 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
@@ -196,72 +279,70 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, unsigned int
196 list_add(&data->pages, list); 279 list_add(&data->pages, list);
197 280
198 data->req = (struct nfs_page *) dreq; 281 data->req = (struct nfs_page *) dreq;
199 reads++; 282 dreq->outstanding++;
200 if (nbytes <= rsize) 283 if (nbytes <= rsize)
201 break; 284 break;
202 nbytes -= rsize; 285 nbytes -= rsize;
203 } 286 }
204 kref_get(&dreq->kref); 287 kref_get(&dreq->kref);
205 atomic_set(&dreq->complete, reads);
206 return dreq; 288 return dreq;
207} 289}
208 290
209/** 291static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
210 * nfs_direct_read_result - handle a read reply for a direct read request
211 * @data: address of NFS READ operation control block
212 * @status: status of this NFS READ operation
213 *
214 * We must hold a reference to all the pages in this direct read request
215 * until the RPCs complete. This could be long *after* we are woken up in
216 * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server).
217 */
218static void nfs_direct_read_result(struct nfs_read_data *data, int status)
219{ 292{
293 struct nfs_read_data *data = calldata;
220 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 294 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
221 295
222 if (likely(status >= 0)) 296 if (nfs_readpage_result(task, data) != 0)
223 atomic_add(data->res.count, &dreq->count); 297 return;
298
299 spin_lock(&dreq->lock);
300
301 if (likely(task->tk_status >= 0))
302 dreq->count += data->res.count;
224 else 303 else
225 atomic_set(&dreq->error, status); 304 dreq->error = task->tk_status;
226 305
227 if (unlikely(atomic_dec_and_test(&dreq->complete))) { 306 if (--dreq->outstanding) {
228 nfs_free_user_pages(dreq->pages, dreq->npages, 1); 307 spin_unlock(&dreq->lock);
229 wake_up(&dreq->wait); 308 return;
230 kref_put(&dreq->kref, nfs_direct_req_release);
231 } 309 }
310
311 spin_unlock(&dreq->lock);
312 nfs_direct_complete(dreq);
232} 313}
233 314
234/** 315static const struct rpc_call_ops nfs_read_direct_ops = {
235 * nfs_direct_read_schedule - dispatch NFS READ operations for a direct read 316 .rpc_call_done = nfs_direct_read_result,
236 * @dreq: address of nfs_direct_req struct for this request 317 .rpc_release = nfs_readdata_release,
237 * @inode: target inode 318};
238 * @ctx: target file open context 319
239 * @user_addr: starting address of this segment of user's buffer 320/*
240 * @count: size of this segment
241 * @file_offset: offset in file to begin the operation
242 *
243 * For each nfs_read_data struct that was allocated on the list, dispatch 321 * For each nfs_read_data struct that was allocated on the list, dispatch
244 * an NFS READ operation 322 * an NFS READ operation
245 */ 323 */
246static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, 324static void nfs_direct_read_schedule(struct nfs_direct_req *dreq)
247 struct inode *inode, struct nfs_open_context *ctx,
248 unsigned long user_addr, size_t count, loff_t file_offset)
249{ 325{
326 struct nfs_open_context *ctx = dreq->ctx;
327 struct inode *inode = ctx->dentry->d_inode;
250 struct list_head *list = &dreq->list; 328 struct list_head *list = &dreq->list;
251 struct page **pages = dreq->pages; 329 struct page **pages = dreq->pages;
330 size_t count = dreq->user_count;
331 loff_t pos = dreq->pos;
332 size_t rsize = NFS_SERVER(inode)->rsize;
252 unsigned int curpage, pgbase; 333 unsigned int curpage, pgbase;
253 unsigned int rsize = NFS_SERVER(inode)->rsize;
254 334
255 curpage = 0; 335 curpage = 0;
256 pgbase = user_addr & ~PAGE_MASK; 336 pgbase = dreq->user_addr & ~PAGE_MASK;
257 do { 337 do {
258 struct nfs_read_data *data; 338 struct nfs_read_data *data;
259 unsigned int bytes; 339 size_t bytes;
260 340
261 bytes = rsize; 341 bytes = rsize;
262 if (count < rsize) 342 if (count < rsize)
263 bytes = count; 343 bytes = count;
264 344
345 BUG_ON(list_empty(list));
265 data = list_entry(list->next, struct nfs_read_data, pages); 346 data = list_entry(list->next, struct nfs_read_data, pages);
266 list_del_init(&data->pages); 347 list_del_init(&data->pages);
267 348
@@ -269,7 +350,7 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq,
269 data->cred = ctx->cred; 350 data->cred = ctx->cred;
270 data->args.fh = NFS_FH(inode); 351 data->args.fh = NFS_FH(inode);
271 data->args.context = ctx; 352 data->args.context = ctx;
272 data->args.offset = file_offset; 353 data->args.offset = pos;
273 data->args.pgbase = pgbase; 354 data->args.pgbase = pgbase;
274 data->args.pages = &pages[curpage]; 355 data->args.pages = &pages[curpage];
275 data->args.count = bytes; 356 data->args.count = bytes;
@@ -277,77 +358,38 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq,
277 data->res.eof = 0; 358 data->res.eof = 0;
278 data->res.count = bytes; 359 data->res.count = bytes;
279 360
361 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
362 &nfs_read_direct_ops, data);
280 NFS_PROTO(inode)->read_setup(data); 363 NFS_PROTO(inode)->read_setup(data);
281 364
282 data->task.tk_cookie = (unsigned long) inode; 365 data->task.tk_cookie = (unsigned long) inode;
283 data->complete = nfs_direct_read_result;
284 366
285 lock_kernel(); 367 lock_kernel();
286 rpc_execute(&data->task); 368 rpc_execute(&data->task);
287 unlock_kernel(); 369 unlock_kernel();
288 370
289 dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n", 371 dfprintk(VFS, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
290 data->task.tk_pid, 372 data->task.tk_pid,
291 inode->i_sb->s_id, 373 inode->i_sb->s_id,
292 (long long)NFS_FILEID(inode), 374 (long long)NFS_FILEID(inode),
293 bytes, 375 bytes,
294 (unsigned long long)data->args.offset); 376 (unsigned long long)data->args.offset);
295 377
296 file_offset += bytes; 378 pos += bytes;
297 pgbase += bytes; 379 pgbase += bytes;
298 curpage += pgbase >> PAGE_SHIFT; 380 curpage += pgbase >> PAGE_SHIFT;
299 pgbase &= ~PAGE_MASK; 381 pgbase &= ~PAGE_MASK;
300 382
301 count -= bytes; 383 count -= bytes;
302 } while (count != 0); 384 } while (count != 0);
385 BUG_ON(!list_empty(list));
303} 386}
304 387
305/** 388static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages)
306 * nfs_direct_read_wait - wait for I/O completion for direct reads
307 * @dreq: request on which we are to wait
308 * @intr: whether or not this wait can be interrupted
309 *
310 * Collects and returns the final error value/byte-count.
311 */
312static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr)
313{
314 int result = 0;
315
316 if (intr) {
317 result = wait_event_interruptible(dreq->wait,
318 (atomic_read(&dreq->complete) == 0));
319 } else {
320 wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0));
321 }
322
323 if (!result)
324 result = atomic_read(&dreq->error);
325 if (!result)
326 result = atomic_read(&dreq->count);
327
328 kref_put(&dreq->kref, nfs_direct_req_release);
329 return (ssize_t) result;
330}
331
332/**
333 * nfs_direct_read_seg - Read in one iov segment. Generate separate
334 * read RPCs for each "rsize" bytes.
335 * @inode: target inode
336 * @ctx: target file open context
337 * @user_addr: starting address of this segment of user's buffer
338 * @count: size of this segment
339 * @file_offset: offset in file to begin the operation
340 * @pages: array of addresses of page structs defining user's buffer
341 * @nr_pages: number of pages in the array
342 *
343 */
344static ssize_t nfs_direct_read_seg(struct inode *inode,
345 struct nfs_open_context *ctx, unsigned long user_addr,
346 size_t count, loff_t file_offset, struct page **pages,
347 unsigned int nr_pages)
348{ 389{
349 ssize_t result; 390 ssize_t result;
350 sigset_t oldset; 391 sigset_t oldset;
392 struct inode *inode = iocb->ki_filp->f_mapping->host;
351 struct rpc_clnt *clnt = NFS_CLIENT(inode); 393 struct rpc_clnt *clnt = NFS_CLIENT(inode);
352 struct nfs_direct_req *dreq; 394 struct nfs_direct_req *dreq;
353 395
@@ -355,284 +397,350 @@ static ssize_t nfs_direct_read_seg(struct inode *inode,
355 if (!dreq) 397 if (!dreq)
356 return -ENOMEM; 398 return -ENOMEM;
357 399
400 dreq->user_addr = user_addr;
401 dreq->user_count = count;
402 dreq->pos = pos;
358 dreq->pages = pages; 403 dreq->pages = pages;
359 dreq->npages = nr_pages; 404 dreq->npages = nr_pages;
405 dreq->inode = inode;
406 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
407 if (!is_sync_kiocb(iocb))
408 dreq->iocb = iocb;
360 409
410 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
361 rpc_clnt_sigmask(clnt, &oldset); 411 rpc_clnt_sigmask(clnt, &oldset);
362 nfs_direct_read_schedule(dreq, inode, ctx, user_addr, count, 412 nfs_direct_read_schedule(dreq);
363 file_offset); 413 result = nfs_direct_wait(dreq);
364 result = nfs_direct_read_wait(dreq, clnt->cl_intr);
365 rpc_clnt_sigunmask(clnt, &oldset); 414 rpc_clnt_sigunmask(clnt, &oldset);
366 415
367 return result; 416 return result;
368} 417}
369 418
370/** 419static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
371 * nfs_direct_read - For each iov segment, map the user's buffer
372 * then generate read RPCs.
373 * @inode: target inode
374 * @ctx: target file open context
375 * @iov: array of vectors that define I/O buffer
376 * file_offset: offset in file to begin the operation
377 * nr_segs: size of iovec array
378 *
379 * We've already pushed out any non-direct writes so that this read
380 * will see them when we read from the server.
381 */
382static ssize_t
383nfs_direct_read(struct inode *inode, struct nfs_open_context *ctx,
384 const struct iovec *iov, loff_t file_offset,
385 unsigned long nr_segs)
386{ 420{
387 ssize_t tot_bytes = 0; 421 list_splice_init(&dreq->rewrite_list, &dreq->list);
388 unsigned long seg = 0; 422 while (!list_empty(&dreq->list)) {
389 423 struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages);
390 while ((seg < nr_segs) && (tot_bytes >= 0)) { 424 list_del(&data->pages);
391 ssize_t result; 425 nfs_writedata_release(data);
392 int page_count; 426 }
393 struct page **pages; 427}
394 const struct iovec *vec = &iov[seg++];
395 unsigned long user_addr = (unsigned long) vec->iov_base;
396 size_t size = vec->iov_len;
397
398 page_count = nfs_get_user_pages(READ, user_addr, size, &pages);
399 if (page_count < 0) {
400 nfs_free_user_pages(pages, 0, 0);
401 if (tot_bytes > 0)
402 break;
403 return page_count;
404 }
405 428
406 result = nfs_direct_read_seg(inode, ctx, user_addr, size, 429#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
407 file_offset, pages, page_count); 430static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
431{
432 struct list_head *pos;
408 433
409 if (result <= 0) { 434 list_splice_init(&dreq->rewrite_list, &dreq->list);
410 if (tot_bytes > 0) 435 list_for_each(pos, &dreq->list)
411 break; 436 dreq->outstanding++;
412 return result; 437 dreq->count = 0;
413 } 438
414 tot_bytes += result; 439 nfs_direct_write_schedule(dreq, FLUSH_STABLE);
415 file_offset += result; 440}
416 if (result < size) 441
417 break; 442static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
443{
444 struct nfs_write_data *data = calldata;
445 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
446
447 /* Call the NFS version-specific code */
448 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
449 return;
450 if (unlikely(task->tk_status < 0)) {
451 dreq->error = task->tk_status;
452 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
453 }
454 if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
455 dprintk("NFS: %5u commit verify failed\n", task->tk_pid);
456 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
418 } 457 }
419 458
420 return tot_bytes; 459 dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status);
460 nfs_direct_write_complete(dreq, data->inode);
421} 461}
422 462
423/** 463static const struct rpc_call_ops nfs_commit_direct_ops = {
424 * nfs_direct_write_seg - Write out one iov segment. Generate separate 464 .rpc_call_done = nfs_direct_commit_result,
425 * write RPCs for each "wsize" bytes, then commit. 465 .rpc_release = nfs_commit_release,
426 * @inode: target inode 466};
427 * @ctx: target file open context 467
428 * user_addr: starting address of this segment of user's buffer 468static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
429 * count: size of this segment
430 * file_offset: offset in file to begin the operation
431 * @pages: array of addresses of page structs defining user's buffer
432 * nr_pages: size of pages array
433 */
434static ssize_t nfs_direct_write_seg(struct inode *inode,
435 struct nfs_open_context *ctx, unsigned long user_addr,
436 size_t count, loff_t file_offset, struct page **pages,
437 int nr_pages)
438{ 469{
439 const unsigned int wsize = NFS_SERVER(inode)->wsize; 470 struct nfs_write_data *data = dreq->commit_data;
440 size_t request; 471 struct rpc_task *task = &data->task;
441 int curpage, need_commit;
442 ssize_t result, tot_bytes;
443 struct nfs_writeverf first_verf;
444 struct nfs_write_data *wdata;
445
446 wdata = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
447 if (!wdata)
448 return -ENOMEM;
449 472
450 wdata->inode = inode; 473 data->inode = dreq->inode;
451 wdata->cred = ctx->cred; 474 data->cred = dreq->ctx->cred;
452 wdata->args.fh = NFS_FH(inode);
453 wdata->args.context = ctx;
454 wdata->args.stable = NFS_UNSTABLE;
455 if (IS_SYNC(inode) || NFS_PROTO(inode)->version == 2 || count <= wsize)
456 wdata->args.stable = NFS_FILE_SYNC;
457 wdata->res.fattr = &wdata->fattr;
458 wdata->res.verf = &wdata->verf;
459 475
460 nfs_begin_data_update(inode); 476 data->args.fh = NFS_FH(data->inode);
461retry: 477 data->args.offset = dreq->pos;
462 need_commit = 0; 478 data->args.count = dreq->user_count;
463 tot_bytes = 0; 479 data->res.count = 0;
464 curpage = 0; 480 data->res.fattr = &data->fattr;
465 request = count; 481 data->res.verf = &data->verf;
466 wdata->args.pgbase = user_addr & ~PAGE_MASK;
467 wdata->args.offset = file_offset;
468 do {
469 wdata->args.count = request;
470 if (wdata->args.count > wsize)
471 wdata->args.count = wsize;
472 wdata->args.pages = &pages[curpage];
473 482
474 dprintk("NFS: direct write: c=%u o=%Ld ua=%lu, pb=%u, cp=%u\n", 483 rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC,
475 wdata->args.count, (long long) wdata->args.offset, 484 &nfs_commit_direct_ops, data);
476 user_addr + tot_bytes, wdata->args.pgbase, curpage); 485 NFS_PROTO(data->inode)->commit_setup(data, 0);
477 486
478 lock_kernel(); 487 data->task.tk_priority = RPC_PRIORITY_NORMAL;
479 result = NFS_PROTO(inode)->write(wdata); 488 data->task.tk_cookie = (unsigned long)data->inode;
480 unlock_kernel(); 489 /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
490 dreq->commit_data = NULL;
481 491
482 if (result <= 0) { 492 dprintk("NFS: %5u initiated commit call\n", task->tk_pid);
483 if (tot_bytes > 0)
484 break;
485 goto out;
486 }
487 493
488 if (tot_bytes == 0) 494 lock_kernel();
489 memcpy(&first_verf.verifier, &wdata->verf.verifier, 495 rpc_execute(&data->task);
490 sizeof(first_verf.verifier)); 496 unlock_kernel();
491 if (wdata->verf.committed != NFS_FILE_SYNC) { 497}
492 need_commit = 1;
493 if (memcmp(&first_verf.verifier, &wdata->verf.verifier,
494 sizeof(first_verf.verifier)))
495 goto sync_retry;
496 }
497 498
498 tot_bytes += result; 499static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
500{
501 int flags = dreq->flags;
499 502
500 /* in case of a short write: stop now, let the app recover */ 503 dreq->flags = 0;
501 if (result < wdata->args.count) 504 switch (flags) {
505 case NFS_ODIRECT_DO_COMMIT:
506 nfs_direct_commit_schedule(dreq);
502 break; 507 break;
508 case NFS_ODIRECT_RESCHED_WRITES:
509 nfs_direct_write_reschedule(dreq);
510 break;
511 default:
512 nfs_end_data_update(inode);
513 if (dreq->commit_data != NULL)
514 nfs_commit_free(dreq->commit_data);
515 nfs_direct_free_writedata(dreq);
516 nfs_direct_complete(dreq);
517 }
518}
503 519
504 wdata->args.offset += result; 520static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
505 wdata->args.pgbase += result; 521{
506 curpage += wdata->args.pgbase >> PAGE_SHIFT; 522 dreq->commit_data = nfs_commit_alloc(0);
507 wdata->args.pgbase &= ~PAGE_MASK; 523 if (dreq->commit_data != NULL)
508 request -= result; 524 dreq->commit_data->req = (struct nfs_page *) dreq;
509 } while (request != 0); 525}
526#else
527static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
528{
529 dreq->commit_data = NULL;
530}
510 531
511 /* 532static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
512 * Commit data written so far, even in the event of an error 533{
513 */ 534 nfs_end_data_update(inode);
514 if (need_commit) { 535 nfs_direct_free_writedata(dreq);
515 wdata->args.count = tot_bytes; 536 nfs_direct_complete(dreq);
516 wdata->args.offset = file_offset; 537}
538#endif
517 539
518 lock_kernel(); 540static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize)
519 result = NFS_PROTO(inode)->commit(wdata); 541{
520 unlock_kernel(); 542 struct list_head *list;
543 struct nfs_direct_req *dreq;
544 unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
545
546 dreq = nfs_direct_req_alloc();
547 if (!dreq)
548 return NULL;
549
550 list = &dreq->list;
551 for(;;) {
552 struct nfs_write_data *data = nfs_writedata_alloc(wpages);
521 553
522 if (result < 0 || memcmp(&first_verf.verifier, 554 if (unlikely(!data)) {
523 &wdata->verf.verifier, 555 while (!list_empty(list)) {
524 sizeof(first_verf.verifier)) != 0) 556 data = list_entry(list->next,
525 goto sync_retry; 557 struct nfs_write_data, pages);
558 list_del(&data->pages);
559 nfs_writedata_free(data);
560 }
561 kref_put(&dreq->kref, nfs_direct_req_release);
562 return NULL;
563 }
564
565 INIT_LIST_HEAD(&data->pages);
566 list_add(&data->pages, list);
567
568 data->req = (struct nfs_page *) dreq;
569 dreq->outstanding++;
570 if (nbytes <= wsize)
571 break;
572 nbytes -= wsize;
526 } 573 }
527 result = tot_bytes;
528 574
529out: 575 nfs_alloc_commit_data(dreq);
530 nfs_end_data_update(inode);
531 nfs_writedata_free(wdata);
532 return result;
533 576
534sync_retry: 577 kref_get(&dreq->kref);
535 wdata->args.stable = NFS_FILE_SYNC; 578 return dreq;
536 goto retry;
537} 579}
538 580
539/** 581static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
540 * nfs_direct_write - For each iov segment, map the user's buffer
541 * then generate write and commit RPCs.
542 * @inode: target inode
543 * @ctx: target file open context
544 * @iov: array of vectors that define I/O buffer
545 * file_offset: offset in file to begin the operation
546 * nr_segs: size of iovec array
547 *
548 * Upon return, generic_file_direct_IO invalidates any cached pages
549 * that non-direct readers might access, so they will pick up these
550 * writes immediately.
551 */
552static ssize_t nfs_direct_write(struct inode *inode,
553 struct nfs_open_context *ctx, const struct iovec *iov,
554 loff_t file_offset, unsigned long nr_segs)
555{ 582{
556 ssize_t tot_bytes = 0; 583 struct nfs_write_data *data = calldata;
557 unsigned long seg = 0; 584 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
558 585 int status = task->tk_status;
559 while ((seg < nr_segs) && (tot_bytes >= 0)) { 586
560 ssize_t result; 587 if (nfs_writeback_done(task, data) != 0)
561 int page_count; 588 return;
562 struct page **pages; 589
563 const struct iovec *vec = &iov[seg++]; 590 spin_lock(&dreq->lock);
564 unsigned long user_addr = (unsigned long) vec->iov_base;
565 size_t size = vec->iov_len;
566
567 page_count = nfs_get_user_pages(WRITE, user_addr, size, &pages);
568 if (page_count < 0) {
569 nfs_free_user_pages(pages, 0, 0);
570 if (tot_bytes > 0)
571 break;
572 return page_count;
573 }
574 591
575 result = nfs_direct_write_seg(inode, ctx, user_addr, size, 592 if (likely(status >= 0))
576 file_offset, pages, page_count); 593 dreq->count += data->res.count;
577 nfs_free_user_pages(pages, page_count, 0); 594 else
595 dreq->error = task->tk_status;
578 596
579 if (result <= 0) { 597 if (data->res.verf->committed != NFS_FILE_SYNC) {
580 if (tot_bytes > 0) 598 switch (dreq->flags) {
599 case 0:
600 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
601 dreq->flags = NFS_ODIRECT_DO_COMMIT;
581 break; 602 break;
582 return result; 603 case NFS_ODIRECT_DO_COMMIT:
604 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
605 dprintk("NFS: %5u write verify failed\n", task->tk_pid);
606 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
607 }
583 } 608 }
584 tot_bytes += result;
585 file_offset += result;
586 if (result < size)
587 break;
588 } 609 }
589 return tot_bytes; 610 /* In case we have to resend */
611 data->args.stable = NFS_FILE_SYNC;
612
613 spin_unlock(&dreq->lock);
590} 614}
591 615
592/** 616/*
593 * nfs_direct_IO - NFS address space operation for direct I/O 617 * NB: Return the value of the first error return code. Subsequent
594 * rw: direction (read or write) 618 * errors after the first one are ignored.
595 * @iocb: target I/O control block
596 * @iov: array of vectors that define I/O buffer
597 * file_offset: offset in file to begin the operation
598 * nr_segs: size of iovec array
599 *
600 */ 619 */
601ssize_t 620static void nfs_direct_write_release(void *calldata)
602nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
603 loff_t file_offset, unsigned long nr_segs)
604{ 621{
605 ssize_t result = -EINVAL; 622 struct nfs_write_data *data = calldata;
606 struct file *file = iocb->ki_filp; 623 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
607 struct nfs_open_context *ctx;
608 struct dentry *dentry = file->f_dentry;
609 struct inode *inode = dentry->d_inode;
610 624
611 /* 625 spin_lock(&dreq->lock);
612 * No support for async yet 626 if (--dreq->outstanding) {
613 */ 627 spin_unlock(&dreq->lock);
614 if (!is_sync_kiocb(iocb)) 628 return;
615 return result;
616
617 ctx = (struct nfs_open_context *)file->private_data;
618 switch (rw) {
619 case READ:
620 dprintk("NFS: direct_IO(read) (%s) off/no(%Lu/%lu)\n",
621 dentry->d_name.name, file_offset, nr_segs);
622
623 result = nfs_direct_read(inode, ctx, iov,
624 file_offset, nr_segs);
625 break;
626 case WRITE:
627 dprintk("NFS: direct_IO(write) (%s) off/no(%Lu/%lu)\n",
628 dentry->d_name.name, file_offset, nr_segs);
629
630 result = nfs_direct_write(inode, ctx, iov,
631 file_offset, nr_segs);
632 break;
633 default:
634 break;
635 } 629 }
630 spin_unlock(&dreq->lock);
631
632 nfs_direct_write_complete(dreq, data->inode);
633}
634
635static const struct rpc_call_ops nfs_write_direct_ops = {
636 .rpc_call_done = nfs_direct_write_result,
637 .rpc_release = nfs_direct_write_release,
638};
639
640/*
641 * For each nfs_write_data struct that was allocated on the list, dispatch
642 * an NFS WRITE operation
643 */
644static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync)
645{
646 struct nfs_open_context *ctx = dreq->ctx;
647 struct inode *inode = ctx->dentry->d_inode;
648 struct list_head *list = &dreq->list;
649 struct page **pages = dreq->pages;
650 size_t count = dreq->user_count;
651 loff_t pos = dreq->pos;
652 size_t wsize = NFS_SERVER(inode)->wsize;
653 unsigned int curpage, pgbase;
654
655 curpage = 0;
656 pgbase = dreq->user_addr & ~PAGE_MASK;
657 do {
658 struct nfs_write_data *data;
659 size_t bytes;
660
661 bytes = wsize;
662 if (count < wsize)
663 bytes = count;
664
665 BUG_ON(list_empty(list));
666 data = list_entry(list->next, struct nfs_write_data, pages);
667 list_move_tail(&data->pages, &dreq->rewrite_list);
668
669 data->inode = inode;
670 data->cred = ctx->cred;
671 data->args.fh = NFS_FH(inode);
672 data->args.context = ctx;
673 data->args.offset = pos;
674 data->args.pgbase = pgbase;
675 data->args.pages = &pages[curpage];
676 data->args.count = bytes;
677 data->res.fattr = &data->fattr;
678 data->res.count = bytes;
679 data->res.verf = &data->verf;
680
681 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
682 &nfs_write_direct_ops, data);
683 NFS_PROTO(inode)->write_setup(data, sync);
684
685 data->task.tk_priority = RPC_PRIORITY_NORMAL;
686 data->task.tk_cookie = (unsigned long) inode;
687
688 lock_kernel();
689 rpc_execute(&data->task);
690 unlock_kernel();
691
692 dfprintk(VFS, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
693 data->task.tk_pid,
694 inode->i_sb->s_id,
695 (long long)NFS_FILEID(inode),
696 bytes,
697 (unsigned long long)data->args.offset);
698
699 pos += bytes;
700 pgbase += bytes;
701 curpage += pgbase >> PAGE_SHIFT;
702 pgbase &= ~PAGE_MASK;
703
704 count -= bytes;
705 } while (count != 0);
706 BUG_ON(!list_empty(list));
707}
708
709static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages)
710{
711 ssize_t result;
712 sigset_t oldset;
713 struct inode *inode = iocb->ki_filp->f_mapping->host;
714 struct rpc_clnt *clnt = NFS_CLIENT(inode);
715 struct nfs_direct_req *dreq;
716 size_t wsize = NFS_SERVER(inode)->wsize;
717 int sync = 0;
718
719 dreq = nfs_direct_write_alloc(count, wsize);
720 if (!dreq)
721 return -ENOMEM;
722 if (dreq->commit_data == NULL || count < wsize)
723 sync = FLUSH_STABLE;
724
725 dreq->user_addr = user_addr;
726 dreq->user_count = count;
727 dreq->pos = pos;
728 dreq->pages = pages;
729 dreq->npages = nr_pages;
730 dreq->inode = inode;
731 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
732 if (!is_sync_kiocb(iocb))
733 dreq->iocb = iocb;
734
735 nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count);
736
737 nfs_begin_data_update(inode);
738
739 rpc_clnt_sigmask(clnt, &oldset);
740 nfs_direct_write_schedule(dreq, sync);
741 result = nfs_direct_wait(dreq);
742 rpc_clnt_sigunmask(clnt, &oldset);
743
636 return result; 744 return result;
637} 745}
638 746
@@ -640,49 +748,40 @@ nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
640 * nfs_file_direct_read - file direct read operation for NFS files 748 * nfs_file_direct_read - file direct read operation for NFS files
641 * @iocb: target I/O control block 749 * @iocb: target I/O control block
642 * @buf: user's buffer into which to read data 750 * @buf: user's buffer into which to read data
643 * count: number of bytes to read 751 * @count: number of bytes to read
644 * pos: byte offset in file where reading starts 752 * @pos: byte offset in file where reading starts
645 * 753 *
646 * We use this function for direct reads instead of calling 754 * We use this function for direct reads instead of calling
647 * generic_file_aio_read() in order to avoid gfar's check to see if 755 * generic_file_aio_read() in order to avoid gfar's check to see if
648 * the request starts before the end of the file. For that check 756 * the request starts before the end of the file. For that check
649 * to work, we must generate a GETATTR before each direct read, and 757 * to work, we must generate a GETATTR before each direct read, and
650 * even then there is a window between the GETATTR and the subsequent 758 * even then there is a window between the GETATTR and the subsequent
651 * READ where the file size could change. So our preference is simply 759 * READ where the file size could change. Our preference is simply
652 * to do all reads the application wants, and the server will take 760 * to do all reads the application wants, and the server will take
653 * care of managing the end of file boundary. 761 * care of managing the end of file boundary.
654 * 762 *
655 * This function also eliminates unnecessarily updating the file's 763 * This function also eliminates unnecessarily updating the file's
656 * atime locally, as the NFS server sets the file's atime, and this 764 * atime locally, as the NFS server sets the file's atime, and this
657 * client must read the updated atime from the server back into its 765 * client must read the updated atime from the server back into its
658 * cache. 766 * cache.
659 */ 767 */
660ssize_t 768ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
661nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
662{ 769{
663 ssize_t retval = -EINVAL; 770 ssize_t retval = -EINVAL;
664 loff_t *ppos = &iocb->ki_pos; 771 int page_count;
772 struct page **pages;
665 struct file *file = iocb->ki_filp; 773 struct file *file = iocb->ki_filp;
666 struct nfs_open_context *ctx =
667 (struct nfs_open_context *) file->private_data;
668 struct address_space *mapping = file->f_mapping; 774 struct address_space *mapping = file->f_mapping;
669 struct inode *inode = mapping->host;
670 struct iovec iov = {
671 .iov_base = buf,
672 .iov_len = count,
673 };
674 775
675 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n", 776 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
676 file->f_dentry->d_parent->d_name.name, 777 file->f_dentry->d_parent->d_name.name,
677 file->f_dentry->d_name.name, 778 file->f_dentry->d_name.name,
678 (unsigned long) count, (long long) pos); 779 (unsigned long) count, (long long) pos);
679 780
680 if (!is_sync_kiocb(iocb))
681 goto out;
682 if (count < 0) 781 if (count < 0)
683 goto out; 782 goto out;
684 retval = -EFAULT; 783 retval = -EFAULT;
685 if (!access_ok(VERIFY_WRITE, iov.iov_base, iov.iov_len)) 784 if (!access_ok(VERIFY_WRITE, buf, count))
686 goto out; 785 goto out;
687 retval = 0; 786 retval = 0;
688 if (!count) 787 if (!count)
@@ -692,9 +791,16 @@ nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t
692 if (retval) 791 if (retval)
693 goto out; 792 goto out;
694 793
695 retval = nfs_direct_read(inode, ctx, &iov, pos, 1); 794 retval = nfs_get_user_pages(READ, (unsigned long) buf,
795 count, &pages);
796 if (retval < 0)
797 goto out;
798 page_count = retval;
799
800 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
801 pages, page_count);
696 if (retval > 0) 802 if (retval > 0)
697 *ppos = pos + retval; 803 iocb->ki_pos = pos + retval;
698 804
699out: 805out:
700 return retval; 806 return retval;
@@ -704,8 +810,8 @@ out:
704 * nfs_file_direct_write - file direct write operation for NFS files 810 * nfs_file_direct_write - file direct write operation for NFS files
705 * @iocb: target I/O control block 811 * @iocb: target I/O control block
706 * @buf: user's buffer from which to write data 812 * @buf: user's buffer from which to write data
707 * count: number of bytes to write 813 * @count: number of bytes to write
708 * pos: byte offset in file where writing starts 814 * @pos: byte offset in file where writing starts
709 * 815 *
710 * We use this function for direct writes instead of calling 816 * We use this function for direct writes instead of calling
711 * generic_file_aio_write() in order to avoid taking the inode 817 * generic_file_aio_write() in order to avoid taking the inode
@@ -725,28 +831,19 @@ out:
725 * Note that O_APPEND is not supported for NFS direct writes, as there 831 * Note that O_APPEND is not supported for NFS direct writes, as there
726 * is no atomic O_APPEND write facility in the NFS protocol. 832 * is no atomic O_APPEND write facility in the NFS protocol.
727 */ 833 */
728ssize_t 834ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
729nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
730{ 835{
731 ssize_t retval; 836 ssize_t retval;
837 int page_count;
838 struct page **pages;
732 struct file *file = iocb->ki_filp; 839 struct file *file = iocb->ki_filp;
733 struct nfs_open_context *ctx =
734 (struct nfs_open_context *) file->private_data;
735 struct address_space *mapping = file->f_mapping; 840 struct address_space *mapping = file->f_mapping;
736 struct inode *inode = mapping->host;
737 struct iovec iov = {
738 .iov_base = (char __user *)buf,
739 };
740 841
741 dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n", 842 dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
742 file->f_dentry->d_parent->d_name.name, 843 file->f_dentry->d_parent->d_name.name,
743 file->f_dentry->d_name.name, 844 file->f_dentry->d_name.name,
744 (unsigned long) count, (long long) pos); 845 (unsigned long) count, (long long) pos);
745 846
746 retval = -EINVAL;
747 if (!is_sync_kiocb(iocb))
748 goto out;
749
750 retval = generic_write_checks(file, &pos, &count, 0); 847 retval = generic_write_checks(file, &pos, &count, 0);
751 if (retval) 848 if (retval)
752 goto out; 849 goto out;
@@ -757,19 +854,35 @@ nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count,
757 retval = 0; 854 retval = 0;
758 if (!count) 855 if (!count)
759 goto out; 856 goto out;
760 iov.iov_len = count,
761 857
762 retval = -EFAULT; 858 retval = -EFAULT;
763 if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len)) 859 if (!access_ok(VERIFY_READ, buf, count))
764 goto out; 860 goto out;
765 861
766 retval = nfs_sync_mapping(mapping); 862 retval = nfs_sync_mapping(mapping);
767 if (retval) 863 if (retval)
768 goto out; 864 goto out;
769 865
770 retval = nfs_direct_write(inode, ctx, &iov, pos, 1); 866 retval = nfs_get_user_pages(WRITE, (unsigned long) buf,
867 count, &pages);
868 if (retval < 0)
869 goto out;
870 page_count = retval;
871
872 retval = nfs_direct_write(iocb, (unsigned long) buf, count,
873 pos, pages, page_count);
874
875 /*
876 * XXX: nfs_end_data_update() already ensures this file's
877 * cached data is subsequently invalidated. Do we really
878 * need to call invalidate_inode_pages2() again here?
879 *
880 * For aio writes, this invalidation will almost certainly
881 * occur before the writes complete. Kind of racey.
882 */
771 if (mapping->nrpages) 883 if (mapping->nrpages)
772 invalidate_inode_pages2(mapping); 884 invalidate_inode_pages2(mapping);
885
773 if (retval > 0) 886 if (retval > 0)
774 iocb->ki_pos = pos + retval; 887 iocb->ki_pos = pos + retval;
775 888
@@ -777,6 +890,10 @@ out:
777 return retval; 890 return retval;
778} 891}
779 892
893/**
894 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
895 *
896 */
780int nfs_init_directcache(void) 897int nfs_init_directcache(void)
781{ 898{
782 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache", 899 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
@@ -790,6 +907,10 @@ int nfs_init_directcache(void)
790 return 0; 907 return 0;
791} 908}
792 909
910/**
911 * nfs_init_directcache - destroy the slab cache for nfs_direct_req structures
912 *
913 */
793void nfs_destroy_directcache(void) 914void nfs_destroy_directcache(void)
794{ 915{
795 if (kmem_cache_destroy(nfs_direct_cachep)) 916 if (kmem_cache_destroy(nfs_direct_cachep))