diff options
Diffstat (limited to 'fs/nfs/direct.c')
| -rw-r--r-- | fs/nfs/direct.c | 435 |
1 files changed, 189 insertions, 246 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 3c72b0c07283..377839bed172 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | * | 38 | * |
| 39 | */ | 39 | */ |
| 40 | 40 | ||
| 41 | #include <linux/config.h> | ||
| 42 | #include <linux/errno.h> | 41 | #include <linux/errno.h> |
| 43 | #include <linux/sched.h> | 42 | #include <linux/sched.h> |
| 44 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
| @@ -68,25 +67,19 @@ struct nfs_direct_req { | |||
| 68 | struct kref kref; /* release manager */ | 67 | struct kref kref; /* release manager */ |
| 69 | 68 | ||
| 70 | /* I/O parameters */ | 69 | /* I/O parameters */ |
| 71 | struct list_head list, /* nfs_read/write_data structs */ | ||
| 72 | rewrite_list; /* saved nfs_write_data structs */ | ||
| 73 | struct nfs_open_context *ctx; /* file open context info */ | 70 | struct nfs_open_context *ctx; /* file open context info */ |
| 74 | struct kiocb * iocb; /* controlling i/o request */ | 71 | struct kiocb * iocb; /* controlling i/o request */ |
| 75 | struct inode * inode; /* target file of i/o */ | 72 | struct inode * inode; /* target file of i/o */ |
| 76 | unsigned long user_addr; /* location of user's buffer */ | ||
| 77 | size_t user_count; /* total bytes to move */ | ||
| 78 | loff_t pos; /* starting offset in file */ | ||
| 79 | struct page ** pages; /* pages in our buffer */ | ||
| 80 | unsigned int npages; /* count of pages */ | ||
| 81 | 73 | ||
| 82 | /* completion state */ | 74 | /* completion state */ |
| 75 | atomic_t io_count; /* i/os we're waiting for */ | ||
| 83 | spinlock_t lock; /* protect completion state */ | 76 | spinlock_t lock; /* protect completion state */ |
| 84 | int outstanding; /* i/os we're waiting for */ | ||
| 85 | ssize_t count, /* bytes actually processed */ | 77 | ssize_t count, /* bytes actually processed */ |
| 86 | error; /* any reported error */ | 78 | error; /* any reported error */ |
| 87 | struct completion completion; /* wait for i/o completion */ | 79 | struct completion completion; /* wait for i/o completion */ |
| 88 | 80 | ||
| 89 | /* commit state */ | 81 | /* commit state */ |
| 82 | struct list_head rewrite_list; /* saved nfs_write_data structs */ | ||
| 90 | struct nfs_write_data * commit_data; /* special write_data for commits */ | 83 | struct nfs_write_data * commit_data; /* special write_data for commits */ |
| 91 | int flags; | 84 | int flags; |
| 92 | #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ | 85 | #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ |
| @@ -94,8 +87,18 @@ struct nfs_direct_req { | |||
| 94 | struct nfs_writeverf verf; /* unstable write verifier */ | 87 | struct nfs_writeverf verf; /* unstable write verifier */ |
| 95 | }; | 88 | }; |
| 96 | 89 | ||
| 97 | static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync); | ||
| 98 | static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); | 90 | static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); |
| 91 | static const struct rpc_call_ops nfs_write_direct_ops; | ||
| 92 | |||
| 93 | static inline void get_dreq(struct nfs_direct_req *dreq) | ||
| 94 | { | ||
| 95 | atomic_inc(&dreq->io_count); | ||
| 96 | } | ||
| 97 | |||
| 98 | static inline int put_dreq(struct nfs_direct_req *dreq) | ||
| 99 | { | ||
| 100 | return atomic_dec_and_test(&dreq->io_count); | ||
| 101 | } | ||
| 99 | 102 | ||
| 100 | /** | 103 | /** |
| 101 | * nfs_direct_IO - NFS address space operation for direct I/O | 104 | * nfs_direct_IO - NFS address space operation for direct I/O |
| @@ -119,50 +122,21 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_ | |||
| 119 | return -EINVAL; | 122 | return -EINVAL; |
| 120 | } | 123 | } |
| 121 | 124 | ||
| 122 | static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty) | 125 | static void nfs_direct_dirty_pages(struct page **pages, int npages) |
| 123 | { | 126 | { |
| 124 | int i; | 127 | int i; |
| 125 | for (i = 0; i < npages; i++) { | 128 | for (i = 0; i < npages; i++) { |
| 126 | struct page *page = pages[i]; | 129 | struct page *page = pages[i]; |
| 127 | if (do_dirty && !PageCompound(page)) | 130 | if (!PageCompound(page)) |
| 128 | set_page_dirty_lock(page); | 131 | set_page_dirty_lock(page); |
| 129 | page_cache_release(page); | ||
| 130 | } | 132 | } |
| 131 | kfree(pages); | ||
| 132 | } | 133 | } |
| 133 | 134 | ||
| 134 | static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages) | 135 | static void nfs_direct_release_pages(struct page **pages, int npages) |
| 135 | { | 136 | { |
| 136 | int result = -ENOMEM; | 137 | int i; |
| 137 | unsigned long page_count; | 138 | for (i = 0; i < npages; i++) |
| 138 | size_t array_size; | 139 | page_cache_release(pages[i]); |
| 139 | |||
| 140 | page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
| 141 | page_count -= user_addr >> PAGE_SHIFT; | ||
| 142 | |||
| 143 | array_size = (page_count * sizeof(struct page *)); | ||
| 144 | *pages = kmalloc(array_size, GFP_KERNEL); | ||
| 145 | if (*pages) { | ||
| 146 | down_read(¤t->mm->mmap_sem); | ||
| 147 | result = get_user_pages(current, current->mm, user_addr, | ||
| 148 | page_count, (rw == READ), 0, | ||
| 149 | *pages, NULL); | ||
| 150 | up_read(¤t->mm->mmap_sem); | ||
| 151 | if (result != page_count) { | ||
| 152 | /* | ||
| 153 | * If we got fewer pages than expected from | ||
| 154 | * get_user_pages(), the user buffer runs off the | ||
| 155 | * end of a mapping; return EFAULT. | ||
| 156 | */ | ||
| 157 | if (result >= 0) { | ||
| 158 | nfs_free_user_pages(*pages, result, 0); | ||
| 159 | result = -EFAULT; | ||
| 160 | } else | ||
| 161 | kfree(*pages); | ||
| 162 | *pages = NULL; | ||
| 163 | } | ||
| 164 | } | ||
| 165 | return result; | ||
| 166 | } | 140 | } |
| 167 | 141 | ||
| 168 | static inline struct nfs_direct_req *nfs_direct_req_alloc(void) | 142 | static inline struct nfs_direct_req *nfs_direct_req_alloc(void) |
| @@ -174,13 +148,13 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void) | |||
| 174 | return NULL; | 148 | return NULL; |
| 175 | 149 | ||
| 176 | kref_init(&dreq->kref); | 150 | kref_init(&dreq->kref); |
| 151 | kref_get(&dreq->kref); | ||
| 177 | init_completion(&dreq->completion); | 152 | init_completion(&dreq->completion); |
| 178 | INIT_LIST_HEAD(&dreq->list); | ||
| 179 | INIT_LIST_HEAD(&dreq->rewrite_list); | 153 | INIT_LIST_HEAD(&dreq->rewrite_list); |
| 180 | dreq->iocb = NULL; | 154 | dreq->iocb = NULL; |
| 181 | dreq->ctx = NULL; | 155 | dreq->ctx = NULL; |
| 182 | spin_lock_init(&dreq->lock); | 156 | spin_lock_init(&dreq->lock); |
| 183 | dreq->outstanding = 0; | 157 | atomic_set(&dreq->io_count, 0); |
| 184 | dreq->count = 0; | 158 | dreq->count = 0; |
| 185 | dreq->error = 0; | 159 | dreq->error = 0; |
| 186 | dreq->flags = 0; | 160 | dreq->flags = 0; |
| @@ -221,18 +195,11 @@ out: | |||
| 221 | } | 195 | } |
| 222 | 196 | ||
| 223 | /* | 197 | /* |
| 224 | * We must hold a reference to all the pages in this direct read request | 198 | * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust |
| 225 | * until the RPCs complete. This could be long *after* we are woken up in | 199 | * the iocb is still valid here if this is a synchronous request. |
| 226 | * nfs_direct_wait (for instance, if someone hits ^C on a slow server). | ||
| 227 | * | ||
| 228 | * In addition, synchronous I/O uses a stack-allocated iocb. Thus we | ||
| 229 | * can't trust the iocb is still valid here if this is a synchronous | ||
| 230 | * request. If the waiter is woken prematurely, the iocb is long gone. | ||
| 231 | */ | 200 | */ |
| 232 | static void nfs_direct_complete(struct nfs_direct_req *dreq) | 201 | static void nfs_direct_complete(struct nfs_direct_req *dreq) |
| 233 | { | 202 | { |
| 234 | nfs_free_user_pages(dreq->pages, dreq->npages, 1); | ||
| 235 | |||
| 236 | if (dreq->iocb) { | 203 | if (dreq->iocb) { |
| 237 | long res = (long) dreq->error; | 204 | long res = (long) dreq->error; |
| 238 | if (!res) | 205 | if (!res) |
| @@ -245,48 +212,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) | |||
| 245 | } | 212 | } |
| 246 | 213 | ||
| 247 | /* | 214 | /* |
| 248 | * Note we also set the number of requests we have in the dreq when we are | 215 | * We must hold a reference to all the pages in this direct read request |
| 249 | * done. This prevents races with I/O completion so we will always wait | 216 | * until the RPCs complete. This could be long *after* we are woken up in |
| 250 | * until all requests have been dispatched and completed. | 217 | * nfs_direct_wait (for instance, if someone hits ^C on a slow server). |
| 251 | */ | 218 | */ |
| 252 | static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize) | ||
| 253 | { | ||
| 254 | struct list_head *list; | ||
| 255 | struct nfs_direct_req *dreq; | ||
| 256 | unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
| 257 | |||
| 258 | dreq = nfs_direct_req_alloc(); | ||
| 259 | if (!dreq) | ||
| 260 | return NULL; | ||
| 261 | |||
| 262 | list = &dreq->list; | ||
| 263 | for(;;) { | ||
| 264 | struct nfs_read_data *data = nfs_readdata_alloc(rpages); | ||
| 265 | |||
| 266 | if (unlikely(!data)) { | ||
| 267 | while (!list_empty(list)) { | ||
| 268 | data = list_entry(list->next, | ||
| 269 | struct nfs_read_data, pages); | ||
| 270 | list_del(&data->pages); | ||
| 271 | nfs_readdata_free(data); | ||
| 272 | } | ||
| 273 | kref_put(&dreq->kref, nfs_direct_req_release); | ||
| 274 | return NULL; | ||
| 275 | } | ||
| 276 | |||
| 277 | INIT_LIST_HEAD(&data->pages); | ||
| 278 | list_add(&data->pages, list); | ||
| 279 | |||
| 280 | data->req = (struct nfs_page *) dreq; | ||
| 281 | dreq->outstanding++; | ||
| 282 | if (nbytes <= rsize) | ||
| 283 | break; | ||
| 284 | nbytes -= rsize; | ||
| 285 | } | ||
| 286 | kref_get(&dreq->kref); | ||
| 287 | return dreq; | ||
| 288 | } | ||
| 289 | |||
| 290 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | 219 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) |
| 291 | { | 220 | { |
| 292 | struct nfs_read_data *data = calldata; | 221 | struct nfs_read_data *data = calldata; |
| @@ -295,6 +224,9 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | |||
| 295 | if (nfs_readpage_result(task, data) != 0) | 224 | if (nfs_readpage_result(task, data) != 0) |
| 296 | return; | 225 | return; |
| 297 | 226 | ||
| 227 | nfs_direct_dirty_pages(data->pagevec, data->npages); | ||
| 228 | nfs_direct_release_pages(data->pagevec, data->npages); | ||
| 229 | |||
| 298 | spin_lock(&dreq->lock); | 230 | spin_lock(&dreq->lock); |
| 299 | 231 | ||
| 300 | if (likely(task->tk_status >= 0)) | 232 | if (likely(task->tk_status >= 0)) |
| @@ -302,13 +234,10 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | |||
| 302 | else | 234 | else |
| 303 | dreq->error = task->tk_status; | 235 | dreq->error = task->tk_status; |
| 304 | 236 | ||
| 305 | if (--dreq->outstanding) { | ||
| 306 | spin_unlock(&dreq->lock); | ||
| 307 | return; | ||
| 308 | } | ||
| 309 | |||
| 310 | spin_unlock(&dreq->lock); | 237 | spin_unlock(&dreq->lock); |
| 311 | nfs_direct_complete(dreq); | 238 | |
| 239 | if (put_dreq(dreq)) | ||
| 240 | nfs_direct_complete(dreq); | ||
| 312 | } | 241 | } |
| 313 | 242 | ||
| 314 | static const struct rpc_call_ops nfs_read_direct_ops = { | 243 | static const struct rpc_call_ops nfs_read_direct_ops = { |
| @@ -317,41 +246,56 @@ static const struct rpc_call_ops nfs_read_direct_ops = { | |||
| 317 | }; | 246 | }; |
| 318 | 247 | ||
| 319 | /* | 248 | /* |
| 320 | * For each nfs_read_data struct that was allocated on the list, dispatch | 249 | * For each rsize'd chunk of the user's buffer, dispatch an NFS READ |
| 321 | * an NFS READ operation | 250 | * operation. If nfs_readdata_alloc() or get_user_pages() fails, |
| 251 | * bail and stop sending more reads. Read length accounting is | ||
| 252 | * handled automatically by nfs_direct_read_result(). Otherwise, if | ||
| 253 | * no requests have been sent, just return an error. | ||
| 322 | */ | 254 | */ |
| 323 | static void nfs_direct_read_schedule(struct nfs_direct_req *dreq) | 255 | static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos) |
| 324 | { | 256 | { |
| 325 | struct nfs_open_context *ctx = dreq->ctx; | 257 | struct nfs_open_context *ctx = dreq->ctx; |
| 326 | struct inode *inode = ctx->dentry->d_inode; | 258 | struct inode *inode = ctx->dentry->d_inode; |
| 327 | struct list_head *list = &dreq->list; | ||
| 328 | struct page **pages = dreq->pages; | ||
| 329 | size_t count = dreq->user_count; | ||
| 330 | loff_t pos = dreq->pos; | ||
| 331 | size_t rsize = NFS_SERVER(inode)->rsize; | 259 | size_t rsize = NFS_SERVER(inode)->rsize; |
| 332 | unsigned int curpage, pgbase; | 260 | unsigned int pgbase; |
| 261 | int result; | ||
| 262 | ssize_t started = 0; | ||
| 263 | |||
| 264 | get_dreq(dreq); | ||
| 333 | 265 | ||
| 334 | curpage = 0; | ||
| 335 | pgbase = dreq->user_addr & ~PAGE_MASK; | ||
| 336 | do { | 266 | do { |
| 337 | struct nfs_read_data *data; | 267 | struct nfs_read_data *data; |
| 338 | size_t bytes; | 268 | size_t bytes; |
| 339 | 269 | ||
| 340 | bytes = rsize; | 270 | pgbase = user_addr & ~PAGE_MASK; |
| 341 | if (count < rsize) | 271 | bytes = min(rsize,count); |
| 342 | bytes = count; | ||
| 343 | 272 | ||
| 344 | BUG_ON(list_empty(list)); | 273 | result = -ENOMEM; |
| 345 | data = list_entry(list->next, struct nfs_read_data, pages); | 274 | data = nfs_readdata_alloc(pgbase + bytes); |
| 346 | list_del_init(&data->pages); | 275 | if (unlikely(!data)) |
| 276 | break; | ||
| 277 | |||
| 278 | down_read(¤t->mm->mmap_sem); | ||
| 279 | result = get_user_pages(current, current->mm, user_addr, | ||
| 280 | data->npages, 1, 0, data->pagevec, NULL); | ||
| 281 | up_read(¤t->mm->mmap_sem); | ||
| 282 | if (unlikely(result < data->npages)) { | ||
| 283 | if (result > 0) | ||
| 284 | nfs_direct_release_pages(data->pagevec, result); | ||
| 285 | nfs_readdata_release(data); | ||
| 286 | break; | ||
| 287 | } | ||
| 288 | |||
| 289 | get_dreq(dreq); | ||
| 347 | 290 | ||
| 291 | data->req = (struct nfs_page *) dreq; | ||
| 348 | data->inode = inode; | 292 | data->inode = inode; |
| 349 | data->cred = ctx->cred; | 293 | data->cred = ctx->cred; |
| 350 | data->args.fh = NFS_FH(inode); | 294 | data->args.fh = NFS_FH(inode); |
| 351 | data->args.context = ctx; | 295 | data->args.context = ctx; |
| 352 | data->args.offset = pos; | 296 | data->args.offset = pos; |
| 353 | data->args.pgbase = pgbase; | 297 | data->args.pgbase = pgbase; |
| 354 | data->args.pages = &pages[curpage]; | 298 | data->args.pages = data->pagevec; |
| 355 | data->args.count = bytes; | 299 | data->args.count = bytes; |
| 356 | data->res.fattr = &data->fattr; | 300 | data->res.fattr = &data->fattr; |
| 357 | data->res.eof = 0; | 301 | data->res.eof = 0; |
| @@ -374,33 +318,37 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq) | |||
| 374 | bytes, | 318 | bytes, |
| 375 | (unsigned long long)data->args.offset); | 319 | (unsigned long long)data->args.offset); |
| 376 | 320 | ||
| 321 | started += bytes; | ||
| 322 | user_addr += bytes; | ||
| 377 | pos += bytes; | 323 | pos += bytes; |
| 324 | /* FIXME: Remove this unnecessary math from final patch */ | ||
| 378 | pgbase += bytes; | 325 | pgbase += bytes; |
| 379 | curpage += pgbase >> PAGE_SHIFT; | ||
| 380 | pgbase &= ~PAGE_MASK; | 326 | pgbase &= ~PAGE_MASK; |
| 327 | BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); | ||
| 381 | 328 | ||
| 382 | count -= bytes; | 329 | count -= bytes; |
| 383 | } while (count != 0); | 330 | } while (count != 0); |
| 384 | BUG_ON(!list_empty(list)); | 331 | |
| 332 | if (put_dreq(dreq)) | ||
| 333 | nfs_direct_complete(dreq); | ||
| 334 | |||
| 335 | if (started) | ||
| 336 | return 0; | ||
| 337 | return result < 0 ? (ssize_t) result : -EFAULT; | ||
| 385 | } | 338 | } |
| 386 | 339 | ||
| 387 | static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages) | 340 | static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) |
| 388 | { | 341 | { |
| 389 | ssize_t result; | 342 | ssize_t result = 0; |
| 390 | sigset_t oldset; | 343 | sigset_t oldset; |
| 391 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 344 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
| 392 | struct rpc_clnt *clnt = NFS_CLIENT(inode); | 345 | struct rpc_clnt *clnt = NFS_CLIENT(inode); |
| 393 | struct nfs_direct_req *dreq; | 346 | struct nfs_direct_req *dreq; |
| 394 | 347 | ||
| 395 | dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize); | 348 | dreq = nfs_direct_req_alloc(); |
| 396 | if (!dreq) | 349 | if (!dreq) |
| 397 | return -ENOMEM; | 350 | return -ENOMEM; |
| 398 | 351 | ||
| 399 | dreq->user_addr = user_addr; | ||
| 400 | dreq->user_count = count; | ||
| 401 | dreq->pos = pos; | ||
| 402 | dreq->pages = pages; | ||
| 403 | dreq->npages = nr_pages; | ||
| 404 | dreq->inode = inode; | 352 | dreq->inode = inode; |
| 405 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); | 353 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); |
| 406 | if (!is_sync_kiocb(iocb)) | 354 | if (!is_sync_kiocb(iocb)) |
| @@ -408,8 +356,9 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size | |||
| 408 | 356 | ||
| 409 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); | 357 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); |
| 410 | rpc_clnt_sigmask(clnt, &oldset); | 358 | rpc_clnt_sigmask(clnt, &oldset); |
| 411 | nfs_direct_read_schedule(dreq); | 359 | result = nfs_direct_read_schedule(dreq, user_addr, count, pos); |
| 412 | result = nfs_direct_wait(dreq); | 360 | if (!result) |
| 361 | result = nfs_direct_wait(dreq); | ||
| 413 | rpc_clnt_sigunmask(clnt, &oldset); | 362 | rpc_clnt_sigunmask(clnt, &oldset); |
| 414 | 363 | ||
| 415 | return result; | 364 | return result; |
| @@ -417,10 +366,10 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size | |||
| 417 | 366 | ||
| 418 | static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) | 367 | static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) |
| 419 | { | 368 | { |
| 420 | list_splice_init(&dreq->rewrite_list, &dreq->list); | 369 | while (!list_empty(&dreq->rewrite_list)) { |
| 421 | while (!list_empty(&dreq->list)) { | 370 | struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); |
| 422 | struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages); | ||
| 423 | list_del(&data->pages); | 371 | list_del(&data->pages); |
| 372 | nfs_direct_release_pages(data->pagevec, data->npages); | ||
| 424 | nfs_writedata_release(data); | 373 | nfs_writedata_release(data); |
| 425 | } | 374 | } |
| 426 | } | 375 | } |
| @@ -428,14 +377,51 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) | |||
| 428 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 377 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
| 429 | static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | 378 | static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) |
| 430 | { | 379 | { |
| 431 | struct list_head *pos; | 380 | struct inode *inode = dreq->inode; |
| 381 | struct list_head *p; | ||
| 382 | struct nfs_write_data *data; | ||
| 432 | 383 | ||
| 433 | list_splice_init(&dreq->rewrite_list, &dreq->list); | ||
| 434 | list_for_each(pos, &dreq->list) | ||
| 435 | dreq->outstanding++; | ||
| 436 | dreq->count = 0; | 384 | dreq->count = 0; |
| 385 | get_dreq(dreq); | ||
| 386 | |||
| 387 | list_for_each(p, &dreq->rewrite_list) { | ||
| 388 | data = list_entry(p, struct nfs_write_data, pages); | ||
| 389 | |||
| 390 | get_dreq(dreq); | ||
| 437 | 391 | ||
| 438 | nfs_direct_write_schedule(dreq, FLUSH_STABLE); | 392 | /* |
| 393 | * Reset data->res. | ||
| 394 | */ | ||
| 395 | nfs_fattr_init(&data->fattr); | ||
| 396 | data->res.count = data->args.count; | ||
| 397 | memset(&data->verf, 0, sizeof(data->verf)); | ||
| 398 | |||
| 399 | /* | ||
| 400 | * Reuse data->task; data->args should not have changed | ||
| 401 | * since the original request was sent. | ||
| 402 | */ | ||
| 403 | rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, | ||
| 404 | &nfs_write_direct_ops, data); | ||
| 405 | NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE); | ||
| 406 | |||
| 407 | data->task.tk_priority = RPC_PRIORITY_NORMAL; | ||
| 408 | data->task.tk_cookie = (unsigned long) inode; | ||
| 409 | |||
| 410 | /* | ||
| 411 | * We're called via an RPC callback, so BKL is already held. | ||
| 412 | */ | ||
| 413 | rpc_execute(&data->task); | ||
| 414 | |||
| 415 | dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n", | ||
| 416 | data->task.tk_pid, | ||
| 417 | inode->i_sb->s_id, | ||
| 418 | (long long)NFS_FILEID(inode), | ||
| 419 | data->args.count, | ||
| 420 | (unsigned long long)data->args.offset); | ||
| 421 | } | ||
| 422 | |||
| 423 | if (put_dreq(dreq)) | ||
| 424 | nfs_direct_write_complete(dreq, inode); | ||
| 439 | } | 425 | } |
| 440 | 426 | ||
| 441 | static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) | 427 | static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) |
| @@ -472,8 +458,8 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) | |||
| 472 | data->cred = dreq->ctx->cred; | 458 | data->cred = dreq->ctx->cred; |
| 473 | 459 | ||
| 474 | data->args.fh = NFS_FH(data->inode); | 460 | data->args.fh = NFS_FH(data->inode); |
| 475 | data->args.offset = dreq->pos; | 461 | data->args.offset = 0; |
| 476 | data->args.count = dreq->user_count; | 462 | data->args.count = 0; |
| 477 | data->res.count = 0; | 463 | data->res.count = 0; |
| 478 | data->res.fattr = &data->fattr; | 464 | data->res.fattr = &data->fattr; |
| 479 | data->res.verf = &data->verf; | 465 | data->res.verf = &data->verf; |
| @@ -517,7 +503,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode | |||
| 517 | 503 | ||
| 518 | static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) | 504 | static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) |
| 519 | { | 505 | { |
| 520 | dreq->commit_data = nfs_commit_alloc(0); | 506 | dreq->commit_data = nfs_commit_alloc(); |
| 521 | if (dreq->commit_data != NULL) | 507 | if (dreq->commit_data != NULL) |
| 522 | dreq->commit_data->req = (struct nfs_page *) dreq; | 508 | dreq->commit_data->req = (struct nfs_page *) dreq; |
| 523 | } | 509 | } |
| @@ -535,47 +521,6 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode | |||
| 535 | } | 521 | } |
| 536 | #endif | 522 | #endif |
| 537 | 523 | ||
| 538 | static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize) | ||
| 539 | { | ||
| 540 | struct list_head *list; | ||
| 541 | struct nfs_direct_req *dreq; | ||
| 542 | unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
| 543 | |||
| 544 | dreq = nfs_direct_req_alloc(); | ||
| 545 | if (!dreq) | ||
| 546 | return NULL; | ||
| 547 | |||
| 548 | list = &dreq->list; | ||
| 549 | for(;;) { | ||
| 550 | struct nfs_write_data *data = nfs_writedata_alloc(wpages); | ||
| 551 | |||
| 552 | if (unlikely(!data)) { | ||
| 553 | while (!list_empty(list)) { | ||
| 554 | data = list_entry(list->next, | ||
| 555 | struct nfs_write_data, pages); | ||
| 556 | list_del(&data->pages); | ||
| 557 | nfs_writedata_free(data); | ||
| 558 | } | ||
| 559 | kref_put(&dreq->kref, nfs_direct_req_release); | ||
| 560 | return NULL; | ||
| 561 | } | ||
| 562 | |||
| 563 | INIT_LIST_HEAD(&data->pages); | ||
| 564 | list_add(&data->pages, list); | ||
| 565 | |||
| 566 | data->req = (struct nfs_page *) dreq; | ||
| 567 | dreq->outstanding++; | ||
| 568 | if (nbytes <= wsize) | ||
| 569 | break; | ||
| 570 | nbytes -= wsize; | ||
| 571 | } | ||
| 572 | |||
| 573 | nfs_alloc_commit_data(dreq); | ||
| 574 | |||
| 575 | kref_get(&dreq->kref); | ||
| 576 | return dreq; | ||
| 577 | } | ||
| 578 | |||
| 579 | static void nfs_direct_write_result(struct rpc_task *task, void *calldata) | 524 | static void nfs_direct_write_result(struct rpc_task *task, void *calldata) |
| 580 | { | 525 | { |
| 581 | struct nfs_write_data *data = calldata; | 526 | struct nfs_write_data *data = calldata; |
| @@ -605,8 +550,6 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata) | |||
| 605 | } | 550 | } |
| 606 | } | 551 | } |
| 607 | } | 552 | } |
| 608 | /* In case we have to resend */ | ||
| 609 | data->args.stable = NFS_FILE_SYNC; | ||
| 610 | 553 | ||
| 611 | spin_unlock(&dreq->lock); | 554 | spin_unlock(&dreq->lock); |
| 612 | } | 555 | } |
| @@ -620,14 +563,8 @@ static void nfs_direct_write_release(void *calldata) | |||
| 620 | struct nfs_write_data *data = calldata; | 563 | struct nfs_write_data *data = calldata; |
| 621 | struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; | 564 | struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; |
| 622 | 565 | ||
| 623 | spin_lock(&dreq->lock); | 566 | if (put_dreq(dreq)) |
| 624 | if (--dreq->outstanding) { | 567 | nfs_direct_write_complete(dreq, data->inode); |
| 625 | spin_unlock(&dreq->lock); | ||
| 626 | return; | ||
| 627 | } | ||
| 628 | spin_unlock(&dreq->lock); | ||
| 629 | |||
| 630 | nfs_direct_write_complete(dreq, data->inode); | ||
| 631 | } | 568 | } |
| 632 | 569 | ||
| 633 | static const struct rpc_call_ops nfs_write_direct_ops = { | 570 | static const struct rpc_call_ops nfs_write_direct_ops = { |
| @@ -636,41 +573,58 @@ static const struct rpc_call_ops nfs_write_direct_ops = { | |||
| 636 | }; | 573 | }; |
| 637 | 574 | ||
| 638 | /* | 575 | /* |
| 639 | * For each nfs_write_data struct that was allocated on the list, dispatch | 576 | * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE |
| 640 | * an NFS WRITE operation | 577 | * operation. If nfs_writedata_alloc() or get_user_pages() fails, |
| 578 | * bail and stop sending more writes. Write length accounting is | ||
| 579 | * handled automatically by nfs_direct_write_result(). Otherwise, if | ||
| 580 | * no requests have been sent, just return an error. | ||
| 641 | */ | 581 | */ |
| 642 | static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync) | 582 | static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync) |
| 643 | { | 583 | { |
| 644 | struct nfs_open_context *ctx = dreq->ctx; | 584 | struct nfs_open_context *ctx = dreq->ctx; |
| 645 | struct inode *inode = ctx->dentry->d_inode; | 585 | struct inode *inode = ctx->dentry->d_inode; |
| 646 | struct list_head *list = &dreq->list; | ||
| 647 | struct page **pages = dreq->pages; | ||
| 648 | size_t count = dreq->user_count; | ||
| 649 | loff_t pos = dreq->pos; | ||
| 650 | size_t wsize = NFS_SERVER(inode)->wsize; | 586 | size_t wsize = NFS_SERVER(inode)->wsize; |
| 651 | unsigned int curpage, pgbase; | 587 | unsigned int pgbase; |
| 588 | int result; | ||
| 589 | ssize_t started = 0; | ||
| 590 | |||
| 591 | get_dreq(dreq); | ||
| 652 | 592 | ||
| 653 | curpage = 0; | ||
| 654 | pgbase = dreq->user_addr & ~PAGE_MASK; | ||
| 655 | do { | 593 | do { |
| 656 | struct nfs_write_data *data; | 594 | struct nfs_write_data *data; |
| 657 | size_t bytes; | 595 | size_t bytes; |
| 658 | 596 | ||
| 659 | bytes = wsize; | 597 | pgbase = user_addr & ~PAGE_MASK; |
| 660 | if (count < wsize) | 598 | bytes = min(wsize,count); |
| 661 | bytes = count; | 599 | |
| 600 | result = -ENOMEM; | ||
| 601 | data = nfs_writedata_alloc(pgbase + bytes); | ||
| 602 | if (unlikely(!data)) | ||
| 603 | break; | ||
| 604 | |||
| 605 | down_read(¤t->mm->mmap_sem); | ||
| 606 | result = get_user_pages(current, current->mm, user_addr, | ||
| 607 | data->npages, 0, 0, data->pagevec, NULL); | ||
| 608 | up_read(¤t->mm->mmap_sem); | ||
| 609 | if (unlikely(result < data->npages)) { | ||
| 610 | if (result > 0) | ||
| 611 | nfs_direct_release_pages(data->pagevec, result); | ||
| 612 | nfs_writedata_release(data); | ||
| 613 | break; | ||
| 614 | } | ||
| 615 | |||
| 616 | get_dreq(dreq); | ||
| 662 | 617 | ||
| 663 | BUG_ON(list_empty(list)); | ||
| 664 | data = list_entry(list->next, struct nfs_write_data, pages); | ||
| 665 | list_move_tail(&data->pages, &dreq->rewrite_list); | 618 | list_move_tail(&data->pages, &dreq->rewrite_list); |
| 666 | 619 | ||
| 620 | data->req = (struct nfs_page *) dreq; | ||
| 667 | data->inode = inode; | 621 | data->inode = inode; |
| 668 | data->cred = ctx->cred; | 622 | data->cred = ctx->cred; |
| 669 | data->args.fh = NFS_FH(inode); | 623 | data->args.fh = NFS_FH(inode); |
| 670 | data->args.context = ctx; | 624 | data->args.context = ctx; |
| 671 | data->args.offset = pos; | 625 | data->args.offset = pos; |
| 672 | data->args.pgbase = pgbase; | 626 | data->args.pgbase = pgbase; |
| 673 | data->args.pages = &pages[curpage]; | 627 | data->args.pages = data->pagevec; |
| 674 | data->args.count = bytes; | 628 | data->args.count = bytes; |
| 675 | data->res.fattr = &data->fattr; | 629 | data->res.fattr = &data->fattr; |
| 676 | data->res.count = bytes; | 630 | data->res.count = bytes; |
| @@ -694,19 +648,29 @@ static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync) | |||
| 694 | bytes, | 648 | bytes, |
| 695 | (unsigned long long)data->args.offset); | 649 | (unsigned long long)data->args.offset); |
| 696 | 650 | ||
| 651 | started += bytes; | ||
| 652 | user_addr += bytes; | ||
| 697 | pos += bytes; | 653 | pos += bytes; |
| 654 | |||
| 655 | /* FIXME: Remove this useless math from the final patch */ | ||
| 698 | pgbase += bytes; | 656 | pgbase += bytes; |
| 699 | curpage += pgbase >> PAGE_SHIFT; | ||
| 700 | pgbase &= ~PAGE_MASK; | 657 | pgbase &= ~PAGE_MASK; |
| 658 | BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); | ||
| 701 | 659 | ||
| 702 | count -= bytes; | 660 | count -= bytes; |
| 703 | } while (count != 0); | 661 | } while (count != 0); |
| 704 | BUG_ON(!list_empty(list)); | 662 | |
| 663 | if (put_dreq(dreq)) | ||
| 664 | nfs_direct_write_complete(dreq, inode); | ||
| 665 | |||
| 666 | if (started) | ||
| 667 | return 0; | ||
| 668 | return result < 0 ? (ssize_t) result : -EFAULT; | ||
| 705 | } | 669 | } |
| 706 | 670 | ||
| 707 | static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages) | 671 | static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) |
| 708 | { | 672 | { |
| 709 | ssize_t result; | 673 | ssize_t result = 0; |
| 710 | sigset_t oldset; | 674 | sigset_t oldset; |
| 711 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 675 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
| 712 | struct rpc_clnt *clnt = NFS_CLIENT(inode); | 676 | struct rpc_clnt *clnt = NFS_CLIENT(inode); |
| @@ -714,17 +678,14 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz | |||
| 714 | size_t wsize = NFS_SERVER(inode)->wsize; | 678 | size_t wsize = NFS_SERVER(inode)->wsize; |
| 715 | int sync = 0; | 679 | int sync = 0; |
| 716 | 680 | ||
| 717 | dreq = nfs_direct_write_alloc(count, wsize); | 681 | dreq = nfs_direct_req_alloc(); |
| 718 | if (!dreq) | 682 | if (!dreq) |
| 719 | return -ENOMEM; | 683 | return -ENOMEM; |
| 684 | nfs_alloc_commit_data(dreq); | ||
| 685 | |||
| 720 | if (dreq->commit_data == NULL || count < wsize) | 686 | if (dreq->commit_data == NULL || count < wsize) |
| 721 | sync = FLUSH_STABLE; | 687 | sync = FLUSH_STABLE; |
| 722 | 688 | ||
| 723 | dreq->user_addr = user_addr; | ||
| 724 | dreq->user_count = count; | ||
| 725 | dreq->pos = pos; | ||
| 726 | dreq->pages = pages; | ||
| 727 | dreq->npages = nr_pages; | ||
| 728 | dreq->inode = inode; | 689 | dreq->inode = inode; |
| 729 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); | 690 | dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); |
| 730 | if (!is_sync_kiocb(iocb)) | 691 | if (!is_sync_kiocb(iocb)) |
| @@ -735,8 +696,9 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz | |||
| 735 | nfs_begin_data_update(inode); | 696 | nfs_begin_data_update(inode); |
| 736 | 697 | ||
| 737 | rpc_clnt_sigmask(clnt, &oldset); | 698 | rpc_clnt_sigmask(clnt, &oldset); |
| 738 | nfs_direct_write_schedule(dreq, sync); | 699 | result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync); |
| 739 | result = nfs_direct_wait(dreq); | 700 | if (!result) |
| 701 | result = nfs_direct_wait(dreq); | ||
| 740 | rpc_clnt_sigunmask(clnt, &oldset); | 702 | rpc_clnt_sigunmask(clnt, &oldset); |
| 741 | 703 | ||
| 742 | return result; | 704 | return result; |
| @@ -766,8 +728,6 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz | |||
| 766 | ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) | 728 | ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) |
| 767 | { | 729 | { |
| 768 | ssize_t retval = -EINVAL; | 730 | ssize_t retval = -EINVAL; |
| 769 | int page_count; | ||
| 770 | struct page **pages; | ||
| 771 | struct file *file = iocb->ki_filp; | 731 | struct file *file = iocb->ki_filp; |
| 772 | struct address_space *mapping = file->f_mapping; | 732 | struct address_space *mapping = file->f_mapping; |
| 773 | 733 | ||
| @@ -789,14 +749,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, | |||
| 789 | if (retval) | 749 | if (retval) |
| 790 | goto out; | 750 | goto out; |
| 791 | 751 | ||
| 792 | retval = nfs_get_user_pages(READ, (unsigned long) buf, | 752 | retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos); |
| 793 | count, &pages); | ||
| 794 | if (retval < 0) | ||
| 795 | goto out; | ||
| 796 | page_count = retval; | ||
| 797 | |||
| 798 | retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos, | ||
| 799 | pages, page_count); | ||
| 800 | if (retval > 0) | 753 | if (retval > 0) |
| 801 | iocb->ki_pos = pos + retval; | 754 | iocb->ki_pos = pos + retval; |
| 802 | 755 | ||
| @@ -832,8 +785,6 @@ out: | |||
| 832 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) | 785 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) |
| 833 | { | 786 | { |
| 834 | ssize_t retval; | 787 | ssize_t retval; |
| 835 | int page_count; | ||
| 836 | struct page **pages; | ||
| 837 | struct file *file = iocb->ki_filp; | 788 | struct file *file = iocb->ki_filp; |
| 838 | struct address_space *mapping = file->f_mapping; | 789 | struct address_space *mapping = file->f_mapping; |
| 839 | 790 | ||
| @@ -861,14 +812,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t | |||
| 861 | if (retval) | 812 | if (retval) |
| 862 | goto out; | 813 | goto out; |
| 863 | 814 | ||
| 864 | retval = nfs_get_user_pages(WRITE, (unsigned long) buf, | 815 | retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos); |
| 865 | count, &pages); | ||
| 866 | if (retval < 0) | ||
| 867 | goto out; | ||
| 868 | page_count = retval; | ||
| 869 | |||
| 870 | retval = nfs_direct_write(iocb, (unsigned long) buf, count, | ||
| 871 | pos, pages, page_count); | ||
| 872 | 816 | ||
| 873 | /* | 817 | /* |
| 874 | * XXX: nfs_end_data_update() already ensures this file's | 818 | * XXX: nfs_end_data_update() already ensures this file's |
| @@ -892,7 +836,7 @@ out: | |||
| 892 | * nfs_init_directcache - create a slab cache for nfs_direct_req structures | 836 | * nfs_init_directcache - create a slab cache for nfs_direct_req structures |
| 893 | * | 837 | * |
| 894 | */ | 838 | */ |
| 895 | int nfs_init_directcache(void) | 839 | int __init nfs_init_directcache(void) |
| 896 | { | 840 | { |
| 897 | nfs_direct_cachep = kmem_cache_create("nfs_direct_cache", | 841 | nfs_direct_cachep = kmem_cache_create("nfs_direct_cache", |
| 898 | sizeof(struct nfs_direct_req), | 842 | sizeof(struct nfs_direct_req), |
| @@ -906,11 +850,10 @@ int nfs_init_directcache(void) | |||
| 906 | } | 850 | } |
| 907 | 851 | ||
| 908 | /** | 852 | /** |
| 909 | * nfs_init_directcache - destroy the slab cache for nfs_direct_req structures | 853 | * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures |
| 910 | * | 854 | * |
| 911 | */ | 855 | */ |
| 912 | void nfs_destroy_directcache(void) | 856 | void nfs_destroy_directcache(void) |
| 913 | { | 857 | { |
| 914 | if (kmem_cache_destroy(nfs_direct_cachep)) | 858 | kmem_cache_destroy(nfs_direct_cachep); |
| 915 | printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n"); | ||
| 916 | } | 859 | } |
